xref: /linux/fs/proc/task_mmu.c (revision 247dbcdbf790c52fc76cf8e327cd0a5778e41e66)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/pagewalk.h>
3 #include <linux/mm_inline.h>
4 #include <linux/hugetlb.h>
5 #include <linux/huge_mm.h>
6 #include <linux/mount.h>
7 #include <linux/ksm.h>
8 #include <linux/seq_file.h>
9 #include <linux/highmem.h>
10 #include <linux/ptrace.h>
11 #include <linux/slab.h>
12 #include <linux/pagemap.h>
13 #include <linux/mempolicy.h>
14 #include <linux/rmap.h>
15 #include <linux/swap.h>
16 #include <linux/sched/mm.h>
17 #include <linux/swapops.h>
18 #include <linux/mmu_notifier.h>
19 #include <linux/page_idle.h>
20 #include <linux/shmem_fs.h>
21 #include <linux/uaccess.h>
22 #include <linux/pkeys.h>
23 #include <linux/minmax.h>
24 #include <linux/overflow.h>
25 
26 #include <asm/elf.h>
27 #include <asm/tlb.h>
28 #include <asm/tlbflush.h>
29 #include "internal.h"
30 
31 #define SEQ_PUT_DEC(str, val) \
32 		seq_put_decimal_ull_width(m, str, (val) << (PAGE_SHIFT-10), 8)
33 void task_mem(struct seq_file *m, struct mm_struct *mm)
34 {
35 	unsigned long text, lib, swap, anon, file, shmem;
36 	unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
37 
38 	anon = get_mm_counter(mm, MM_ANONPAGES);
39 	file = get_mm_counter(mm, MM_FILEPAGES);
40 	shmem = get_mm_counter(mm, MM_SHMEMPAGES);
41 
42 	/*
43 	 * Note: to minimize their overhead, mm maintains hiwater_vm and
44 	 * hiwater_rss only when about to *lower* total_vm or rss.  Any
45 	 * collector of these hiwater stats must therefore get total_vm
46 	 * and rss too, which will usually be the higher.  Barriers? not
47 	 * worth the effort, such snapshots can always be inconsistent.
48 	 */
49 	hiwater_vm = total_vm = mm->total_vm;
50 	if (hiwater_vm < mm->hiwater_vm)
51 		hiwater_vm = mm->hiwater_vm;
52 	hiwater_rss = total_rss = anon + file + shmem;
53 	if (hiwater_rss < mm->hiwater_rss)
54 		hiwater_rss = mm->hiwater_rss;
55 
56 	/* split executable areas between text and lib */
57 	text = PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK);
58 	text = min(text, mm->exec_vm << PAGE_SHIFT);
59 	lib = (mm->exec_vm << PAGE_SHIFT) - text;
60 
61 	swap = get_mm_counter(mm, MM_SWAPENTS);
62 	SEQ_PUT_DEC("VmPeak:\t", hiwater_vm);
63 	SEQ_PUT_DEC(" kB\nVmSize:\t", total_vm);
64 	SEQ_PUT_DEC(" kB\nVmLck:\t", mm->locked_vm);
65 	SEQ_PUT_DEC(" kB\nVmPin:\t", atomic64_read(&mm->pinned_vm));
66 	SEQ_PUT_DEC(" kB\nVmHWM:\t", hiwater_rss);
67 	SEQ_PUT_DEC(" kB\nVmRSS:\t", total_rss);
68 	SEQ_PUT_DEC(" kB\nRssAnon:\t", anon);
69 	SEQ_PUT_DEC(" kB\nRssFile:\t", file);
70 	SEQ_PUT_DEC(" kB\nRssShmem:\t", shmem);
71 	SEQ_PUT_DEC(" kB\nVmData:\t", mm->data_vm);
72 	SEQ_PUT_DEC(" kB\nVmStk:\t", mm->stack_vm);
73 	seq_put_decimal_ull_width(m,
74 		    " kB\nVmExe:\t", text >> 10, 8);
75 	seq_put_decimal_ull_width(m,
76 		    " kB\nVmLib:\t", lib >> 10, 8);
77 	seq_put_decimal_ull_width(m,
78 		    " kB\nVmPTE:\t", mm_pgtables_bytes(mm) >> 10, 8);
79 	SEQ_PUT_DEC(" kB\nVmSwap:\t", swap);
80 	seq_puts(m, " kB\n");
81 	hugetlb_report_usage(m, mm);
82 }
83 #undef SEQ_PUT_DEC
84 
85 unsigned long task_vsize(struct mm_struct *mm)
86 {
87 	return PAGE_SIZE * mm->total_vm;
88 }
89 
90 unsigned long task_statm(struct mm_struct *mm,
91 			 unsigned long *shared, unsigned long *text,
92 			 unsigned long *data, unsigned long *resident)
93 {
94 	*shared = get_mm_counter(mm, MM_FILEPAGES) +
95 			get_mm_counter(mm, MM_SHMEMPAGES);
96 	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
97 								>> PAGE_SHIFT;
98 	*data = mm->data_vm + mm->stack_vm;
99 	*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
100 	return mm->total_vm;
101 }
102 
103 #ifdef CONFIG_NUMA
104 /*
105  * Save get_task_policy() for show_numa_map().
106  */
107 static void hold_task_mempolicy(struct proc_maps_private *priv)
108 {
109 	struct task_struct *task = priv->task;
110 
111 	task_lock(task);
112 	priv->task_mempolicy = get_task_policy(task);
113 	mpol_get(priv->task_mempolicy);
114 	task_unlock(task);
115 }
116 static void release_task_mempolicy(struct proc_maps_private *priv)
117 {
118 	mpol_put(priv->task_mempolicy);
119 }
120 #else
121 static void hold_task_mempolicy(struct proc_maps_private *priv)
122 {
123 }
124 static void release_task_mempolicy(struct proc_maps_private *priv)
125 {
126 }
127 #endif
128 
129 static struct vm_area_struct *proc_get_vma(struct proc_maps_private *priv,
130 						loff_t *ppos)
131 {
132 	struct vm_area_struct *vma = vma_next(&priv->iter);
133 
134 	if (vma) {
135 		*ppos = vma->vm_start;
136 	} else {
137 		*ppos = -2UL;
138 		vma = get_gate_vma(priv->mm);
139 	}
140 
141 	return vma;
142 }
143 
144 static void *m_start(struct seq_file *m, loff_t *ppos)
145 {
146 	struct proc_maps_private *priv = m->private;
147 	unsigned long last_addr = *ppos;
148 	struct mm_struct *mm;
149 
150 	/* See m_next(). Zero at the start or after lseek. */
151 	if (last_addr == -1UL)
152 		return NULL;
153 
154 	priv->task = get_proc_task(priv->inode);
155 	if (!priv->task)
156 		return ERR_PTR(-ESRCH);
157 
158 	mm = priv->mm;
159 	if (!mm || !mmget_not_zero(mm)) {
160 		put_task_struct(priv->task);
161 		priv->task = NULL;
162 		return NULL;
163 	}
164 
165 	if (mmap_read_lock_killable(mm)) {
166 		mmput(mm);
167 		put_task_struct(priv->task);
168 		priv->task = NULL;
169 		return ERR_PTR(-EINTR);
170 	}
171 
172 	vma_iter_init(&priv->iter, mm, last_addr);
173 	hold_task_mempolicy(priv);
174 	if (last_addr == -2UL)
175 		return get_gate_vma(mm);
176 
177 	return proc_get_vma(priv, ppos);
178 }
179 
180 static void *m_next(struct seq_file *m, void *v, loff_t *ppos)
181 {
182 	if (*ppos == -2UL) {
183 		*ppos = -1UL;
184 		return NULL;
185 	}
186 	return proc_get_vma(m->private, ppos);
187 }
188 
189 static void m_stop(struct seq_file *m, void *v)
190 {
191 	struct proc_maps_private *priv = m->private;
192 	struct mm_struct *mm = priv->mm;
193 
194 	if (!priv->task)
195 		return;
196 
197 	release_task_mempolicy(priv);
198 	mmap_read_unlock(mm);
199 	mmput(mm);
200 	put_task_struct(priv->task);
201 	priv->task = NULL;
202 }
203 
204 static int proc_maps_open(struct inode *inode, struct file *file,
205 			const struct seq_operations *ops, int psize)
206 {
207 	struct proc_maps_private *priv = __seq_open_private(file, ops, psize);
208 
209 	if (!priv)
210 		return -ENOMEM;
211 
212 	priv->inode = inode;
213 	priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
214 	if (IS_ERR(priv->mm)) {
215 		int err = PTR_ERR(priv->mm);
216 
217 		seq_release_private(inode, file);
218 		return err;
219 	}
220 
221 	return 0;
222 }
223 
224 static int proc_map_release(struct inode *inode, struct file *file)
225 {
226 	struct seq_file *seq = file->private_data;
227 	struct proc_maps_private *priv = seq->private;
228 
229 	if (priv->mm)
230 		mmdrop(priv->mm);
231 
232 	return seq_release_private(inode, file);
233 }
234 
235 static int do_maps_open(struct inode *inode, struct file *file,
236 			const struct seq_operations *ops)
237 {
238 	return proc_maps_open(inode, file, ops,
239 				sizeof(struct proc_maps_private));
240 }
241 
242 static void show_vma_header_prefix(struct seq_file *m,
243 				   unsigned long start, unsigned long end,
244 				   vm_flags_t flags, unsigned long long pgoff,
245 				   dev_t dev, unsigned long ino)
246 {
247 	seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
248 	seq_put_hex_ll(m, NULL, start, 8);
249 	seq_put_hex_ll(m, "-", end, 8);
250 	seq_putc(m, ' ');
251 	seq_putc(m, flags & VM_READ ? 'r' : '-');
252 	seq_putc(m, flags & VM_WRITE ? 'w' : '-');
253 	seq_putc(m, flags & VM_EXEC ? 'x' : '-');
254 	seq_putc(m, flags & VM_MAYSHARE ? 's' : 'p');
255 	seq_put_hex_ll(m, " ", pgoff, 8);
256 	seq_put_hex_ll(m, " ", MAJOR(dev), 2);
257 	seq_put_hex_ll(m, ":", MINOR(dev), 2);
258 	seq_put_decimal_ull(m, " ", ino);
259 	seq_putc(m, ' ');
260 }
261 
262 static void
263 show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
264 {
265 	struct anon_vma_name *anon_name = NULL;
266 	struct mm_struct *mm = vma->vm_mm;
267 	struct file *file = vma->vm_file;
268 	vm_flags_t flags = vma->vm_flags;
269 	unsigned long ino = 0;
270 	unsigned long long pgoff = 0;
271 	unsigned long start, end;
272 	dev_t dev = 0;
273 	const char *name = NULL;
274 
275 	if (file) {
276 		struct inode *inode = file_inode(vma->vm_file);
277 		dev = inode->i_sb->s_dev;
278 		ino = inode->i_ino;
279 		pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
280 	}
281 
282 	start = vma->vm_start;
283 	end = vma->vm_end;
284 	show_vma_header_prefix(m, start, end, flags, pgoff, dev, ino);
285 	if (mm)
286 		anon_name = anon_vma_name(vma);
287 
288 	/*
289 	 * Print the dentry name for named mappings, and a
290 	 * special [heap] marker for the heap:
291 	 */
292 	if (file) {
293 		seq_pad(m, ' ');
294 		/*
295 		 * If user named this anon shared memory via
296 		 * prctl(PR_SET_VMA ..., use the provided name.
297 		 */
298 		if (anon_name)
299 			seq_printf(m, "[anon_shmem:%s]", anon_name->name);
300 		else
301 			seq_file_path(m, file, "\n");
302 		goto done;
303 	}
304 
305 	if (vma->vm_ops && vma->vm_ops->name) {
306 		name = vma->vm_ops->name(vma);
307 		if (name)
308 			goto done;
309 	}
310 
311 	name = arch_vma_name(vma);
312 	if (!name) {
313 		if (!mm) {
314 			name = "[vdso]";
315 			goto done;
316 		}
317 
318 		if (vma_is_initial_heap(vma)) {
319 			name = "[heap]";
320 			goto done;
321 		}
322 
323 		if (vma_is_initial_stack(vma)) {
324 			name = "[stack]";
325 			goto done;
326 		}
327 
328 		if (anon_name) {
329 			seq_pad(m, ' ');
330 			seq_printf(m, "[anon:%s]", anon_name->name);
331 		}
332 	}
333 
334 done:
335 	if (name) {
336 		seq_pad(m, ' ');
337 		seq_puts(m, name);
338 	}
339 	seq_putc(m, '\n');
340 }
341 
342 static int show_map(struct seq_file *m, void *v)
343 {
344 	show_map_vma(m, v);
345 	return 0;
346 }
347 
348 static const struct seq_operations proc_pid_maps_op = {
349 	.start	= m_start,
350 	.next	= m_next,
351 	.stop	= m_stop,
352 	.show	= show_map
353 };
354 
355 static int pid_maps_open(struct inode *inode, struct file *file)
356 {
357 	return do_maps_open(inode, file, &proc_pid_maps_op);
358 }
359 
360 const struct file_operations proc_pid_maps_operations = {
361 	.open		= pid_maps_open,
362 	.read		= seq_read,
363 	.llseek		= seq_lseek,
364 	.release	= proc_map_release,
365 };
366 
367 /*
368  * Proportional Set Size(PSS): my share of RSS.
369  *
370  * PSS of a process is the count of pages it has in memory, where each
371  * page is divided by the number of processes sharing it.  So if a
372  * process has 1000 pages all to itself, and 1000 shared with one other
373  * process, its PSS will be 1500.
374  *
375  * To keep (accumulated) division errors low, we adopt a 64bit
376  * fixed-point pss counter to minimize division errors. So (pss >>
377  * PSS_SHIFT) would be the real byte count.
378  *
379  * A shift of 12 before division means (assuming 4K page size):
380  * 	- 1M 3-user-pages add up to 8KB errors;
381  * 	- supports mapcount up to 2^24, or 16M;
382  * 	- supports PSS up to 2^52 bytes, or 4PB.
383  */
384 #define PSS_SHIFT 12
385 
386 #ifdef CONFIG_PROC_PAGE_MONITOR
387 struct mem_size_stats {
388 	unsigned long resident;
389 	unsigned long shared_clean;
390 	unsigned long shared_dirty;
391 	unsigned long private_clean;
392 	unsigned long private_dirty;
393 	unsigned long referenced;
394 	unsigned long anonymous;
395 	unsigned long lazyfree;
396 	unsigned long anonymous_thp;
397 	unsigned long shmem_thp;
398 	unsigned long file_thp;
399 	unsigned long swap;
400 	unsigned long shared_hugetlb;
401 	unsigned long private_hugetlb;
402 	unsigned long ksm;
403 	u64 pss;
404 	u64 pss_anon;
405 	u64 pss_file;
406 	u64 pss_shmem;
407 	u64 pss_dirty;
408 	u64 pss_locked;
409 	u64 swap_pss;
410 };
411 
412 static void smaps_page_accumulate(struct mem_size_stats *mss,
413 		struct page *page, unsigned long size, unsigned long pss,
414 		bool dirty, bool locked, bool private)
415 {
416 	mss->pss += pss;
417 
418 	if (PageAnon(page))
419 		mss->pss_anon += pss;
420 	else if (PageSwapBacked(page))
421 		mss->pss_shmem += pss;
422 	else
423 		mss->pss_file += pss;
424 
425 	if (locked)
426 		mss->pss_locked += pss;
427 
428 	if (dirty || PageDirty(page)) {
429 		mss->pss_dirty += pss;
430 		if (private)
431 			mss->private_dirty += size;
432 		else
433 			mss->shared_dirty += size;
434 	} else {
435 		if (private)
436 			mss->private_clean += size;
437 		else
438 			mss->shared_clean += size;
439 	}
440 }
441 
442 static void smaps_account(struct mem_size_stats *mss, struct page *page,
443 		bool compound, bool young, bool dirty, bool locked,
444 		bool migration)
445 {
446 	int i, nr = compound ? compound_nr(page) : 1;
447 	unsigned long size = nr * PAGE_SIZE;
448 
449 	/*
450 	 * First accumulate quantities that depend only on |size| and the type
451 	 * of the compound page.
452 	 */
453 	if (PageAnon(page)) {
454 		mss->anonymous += size;
455 		if (!PageSwapBacked(page) && !dirty && !PageDirty(page))
456 			mss->lazyfree += size;
457 	}
458 
459 	if (PageKsm(page))
460 		mss->ksm += size;
461 
462 	mss->resident += size;
463 	/* Accumulate the size in pages that have been accessed. */
464 	if (young || page_is_young(page) || PageReferenced(page))
465 		mss->referenced += size;
466 
467 	/*
468 	 * Then accumulate quantities that may depend on sharing, or that may
469 	 * differ page-by-page.
470 	 *
471 	 * page_count(page) == 1 guarantees the page is mapped exactly once.
472 	 * If any subpage of the compound page mapped with PTE it would elevate
473 	 * page_count().
474 	 *
475 	 * The page_mapcount() is called to get a snapshot of the mapcount.
476 	 * Without holding the page lock this snapshot can be slightly wrong as
477 	 * we cannot always read the mapcount atomically.  It is not safe to
478 	 * call page_mapcount() even with PTL held if the page is not mapped,
479 	 * especially for migration entries.  Treat regular migration entries
480 	 * as mapcount == 1.
481 	 */
482 	if ((page_count(page) == 1) || migration) {
483 		smaps_page_accumulate(mss, page, size, size << PSS_SHIFT, dirty,
484 			locked, true);
485 		return;
486 	}
487 	for (i = 0; i < nr; i++, page++) {
488 		int mapcount = page_mapcount(page);
489 		unsigned long pss = PAGE_SIZE << PSS_SHIFT;
490 		if (mapcount >= 2)
491 			pss /= mapcount;
492 		smaps_page_accumulate(mss, page, PAGE_SIZE, pss, dirty, locked,
493 				      mapcount < 2);
494 	}
495 }
496 
497 #ifdef CONFIG_SHMEM
498 static int smaps_pte_hole(unsigned long addr, unsigned long end,
499 			  __always_unused int depth, struct mm_walk *walk)
500 {
501 	struct mem_size_stats *mss = walk->private;
502 	struct vm_area_struct *vma = walk->vma;
503 
504 	mss->swap += shmem_partial_swap_usage(walk->vma->vm_file->f_mapping,
505 					      linear_page_index(vma, addr),
506 					      linear_page_index(vma, end));
507 
508 	return 0;
509 }
510 #else
511 #define smaps_pte_hole		NULL
512 #endif /* CONFIG_SHMEM */
513 
514 static void smaps_pte_hole_lookup(unsigned long addr, struct mm_walk *walk)
515 {
516 #ifdef CONFIG_SHMEM
517 	if (walk->ops->pte_hole) {
518 		/* depth is not used */
519 		smaps_pte_hole(addr, addr + PAGE_SIZE, 0, walk);
520 	}
521 #endif
522 }
523 
524 static void smaps_pte_entry(pte_t *pte, unsigned long addr,
525 		struct mm_walk *walk)
526 {
527 	struct mem_size_stats *mss = walk->private;
528 	struct vm_area_struct *vma = walk->vma;
529 	bool locked = !!(vma->vm_flags & VM_LOCKED);
530 	struct page *page = NULL;
531 	bool migration = false, young = false, dirty = false;
532 	pte_t ptent = ptep_get(pte);
533 
534 	if (pte_present(ptent)) {
535 		page = vm_normal_page(vma, addr, ptent);
536 		young = pte_young(ptent);
537 		dirty = pte_dirty(ptent);
538 	} else if (is_swap_pte(ptent)) {
539 		swp_entry_t swpent = pte_to_swp_entry(ptent);
540 
541 		if (!non_swap_entry(swpent)) {
542 			int mapcount;
543 
544 			mss->swap += PAGE_SIZE;
545 			mapcount = swp_swapcount(swpent);
546 			if (mapcount >= 2) {
547 				u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT;
548 
549 				do_div(pss_delta, mapcount);
550 				mss->swap_pss += pss_delta;
551 			} else {
552 				mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
553 			}
554 		} else if (is_pfn_swap_entry(swpent)) {
555 			if (is_migration_entry(swpent))
556 				migration = true;
557 			page = pfn_swap_entry_to_page(swpent);
558 		}
559 	} else {
560 		smaps_pte_hole_lookup(addr, walk);
561 		return;
562 	}
563 
564 	if (!page)
565 		return;
566 
567 	smaps_account(mss, page, false, young, dirty, locked, migration);
568 }
569 
570 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
571 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
572 		struct mm_walk *walk)
573 {
574 	struct mem_size_stats *mss = walk->private;
575 	struct vm_area_struct *vma = walk->vma;
576 	bool locked = !!(vma->vm_flags & VM_LOCKED);
577 	struct page *page = NULL;
578 	bool migration = false;
579 
580 	if (pmd_present(*pmd)) {
581 		page = vm_normal_page_pmd(vma, addr, *pmd);
582 	} else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) {
583 		swp_entry_t entry = pmd_to_swp_entry(*pmd);
584 
585 		if (is_migration_entry(entry)) {
586 			migration = true;
587 			page = pfn_swap_entry_to_page(entry);
588 		}
589 	}
590 	if (IS_ERR_OR_NULL(page))
591 		return;
592 	if (PageAnon(page))
593 		mss->anonymous_thp += HPAGE_PMD_SIZE;
594 	else if (PageSwapBacked(page))
595 		mss->shmem_thp += HPAGE_PMD_SIZE;
596 	else if (is_zone_device_page(page))
597 		/* pass */;
598 	else
599 		mss->file_thp += HPAGE_PMD_SIZE;
600 
601 	smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd),
602 		      locked, migration);
603 }
604 #else
605 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
606 		struct mm_walk *walk)
607 {
608 }
609 #endif
610 
611 static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
612 			   struct mm_walk *walk)
613 {
614 	struct vm_area_struct *vma = walk->vma;
615 	pte_t *pte;
616 	spinlock_t *ptl;
617 
618 	ptl = pmd_trans_huge_lock(pmd, vma);
619 	if (ptl) {
620 		smaps_pmd_entry(pmd, addr, walk);
621 		spin_unlock(ptl);
622 		goto out;
623 	}
624 
625 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
626 	if (!pte) {
627 		walk->action = ACTION_AGAIN;
628 		return 0;
629 	}
630 	for (; addr != end; pte++, addr += PAGE_SIZE)
631 		smaps_pte_entry(pte, addr, walk);
632 	pte_unmap_unlock(pte - 1, ptl);
633 out:
634 	cond_resched();
635 	return 0;
636 }
637 
638 static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
639 {
640 	/*
641 	 * Don't forget to update Documentation/ on changes.
642 	 */
643 	static const char mnemonics[BITS_PER_LONG][2] = {
644 		/*
645 		 * In case if we meet a flag we don't know about.
646 		 */
647 		[0 ... (BITS_PER_LONG-1)] = "??",
648 
649 		[ilog2(VM_READ)]	= "rd",
650 		[ilog2(VM_WRITE)]	= "wr",
651 		[ilog2(VM_EXEC)]	= "ex",
652 		[ilog2(VM_SHARED)]	= "sh",
653 		[ilog2(VM_MAYREAD)]	= "mr",
654 		[ilog2(VM_MAYWRITE)]	= "mw",
655 		[ilog2(VM_MAYEXEC)]	= "me",
656 		[ilog2(VM_MAYSHARE)]	= "ms",
657 		[ilog2(VM_GROWSDOWN)]	= "gd",
658 		[ilog2(VM_PFNMAP)]	= "pf",
659 		[ilog2(VM_LOCKED)]	= "lo",
660 		[ilog2(VM_IO)]		= "io",
661 		[ilog2(VM_SEQ_READ)]	= "sr",
662 		[ilog2(VM_RAND_READ)]	= "rr",
663 		[ilog2(VM_DONTCOPY)]	= "dc",
664 		[ilog2(VM_DONTEXPAND)]	= "de",
665 		[ilog2(VM_LOCKONFAULT)]	= "lf",
666 		[ilog2(VM_ACCOUNT)]	= "ac",
667 		[ilog2(VM_NORESERVE)]	= "nr",
668 		[ilog2(VM_HUGETLB)]	= "ht",
669 		[ilog2(VM_SYNC)]	= "sf",
670 		[ilog2(VM_ARCH_1)]	= "ar",
671 		[ilog2(VM_WIPEONFORK)]	= "wf",
672 		[ilog2(VM_DONTDUMP)]	= "dd",
673 #ifdef CONFIG_ARM64_BTI
674 		[ilog2(VM_ARM64_BTI)]	= "bt",
675 #endif
676 #ifdef CONFIG_MEM_SOFT_DIRTY
677 		[ilog2(VM_SOFTDIRTY)]	= "sd",
678 #endif
679 		[ilog2(VM_MIXEDMAP)]	= "mm",
680 		[ilog2(VM_HUGEPAGE)]	= "hg",
681 		[ilog2(VM_NOHUGEPAGE)]	= "nh",
682 		[ilog2(VM_MERGEABLE)]	= "mg",
683 		[ilog2(VM_UFFD_MISSING)]= "um",
684 		[ilog2(VM_UFFD_WP)]	= "uw",
685 #ifdef CONFIG_ARM64_MTE
686 		[ilog2(VM_MTE)]		= "mt",
687 		[ilog2(VM_MTE_ALLOWED)]	= "",
688 #endif
689 #ifdef CONFIG_ARCH_HAS_PKEYS
690 		/* These come out via ProtectionKey: */
691 		[ilog2(VM_PKEY_BIT0)]	= "",
692 		[ilog2(VM_PKEY_BIT1)]	= "",
693 		[ilog2(VM_PKEY_BIT2)]	= "",
694 		[ilog2(VM_PKEY_BIT3)]	= "",
695 #if VM_PKEY_BIT4
696 		[ilog2(VM_PKEY_BIT4)]	= "",
697 #endif
698 #endif /* CONFIG_ARCH_HAS_PKEYS */
699 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
700 		[ilog2(VM_UFFD_MINOR)]	= "ui",
701 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
702 #ifdef CONFIG_X86_USER_SHADOW_STACK
703 		[ilog2(VM_SHADOW_STACK)] = "ss",
704 #endif
705 	};
706 	size_t i;
707 
708 	seq_puts(m, "VmFlags: ");
709 	for (i = 0; i < BITS_PER_LONG; i++) {
710 		if (!mnemonics[i][0])
711 			continue;
712 		if (vma->vm_flags & (1UL << i)) {
713 			seq_putc(m, mnemonics[i][0]);
714 			seq_putc(m, mnemonics[i][1]);
715 			seq_putc(m, ' ');
716 		}
717 	}
718 	seq_putc(m, '\n');
719 }
720 
721 #ifdef CONFIG_HUGETLB_PAGE
722 static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
723 				 unsigned long addr, unsigned long end,
724 				 struct mm_walk *walk)
725 {
726 	struct mem_size_stats *mss = walk->private;
727 	struct vm_area_struct *vma = walk->vma;
728 	struct page *page = NULL;
729 	pte_t ptent = ptep_get(pte);
730 
731 	if (pte_present(ptent)) {
732 		page = vm_normal_page(vma, addr, ptent);
733 	} else if (is_swap_pte(ptent)) {
734 		swp_entry_t swpent = pte_to_swp_entry(ptent);
735 
736 		if (is_pfn_swap_entry(swpent))
737 			page = pfn_swap_entry_to_page(swpent);
738 	}
739 	if (page) {
740 		if (page_mapcount(page) >= 2 || hugetlb_pmd_shared(pte))
741 			mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
742 		else
743 			mss->private_hugetlb += huge_page_size(hstate_vma(vma));
744 	}
745 	return 0;
746 }
747 #else
748 #define smaps_hugetlb_range	NULL
749 #endif /* HUGETLB_PAGE */
750 
751 static const struct mm_walk_ops smaps_walk_ops = {
752 	.pmd_entry		= smaps_pte_range,
753 	.hugetlb_entry		= smaps_hugetlb_range,
754 	.walk_lock		= PGWALK_RDLOCK,
755 };
756 
757 static const struct mm_walk_ops smaps_shmem_walk_ops = {
758 	.pmd_entry		= smaps_pte_range,
759 	.hugetlb_entry		= smaps_hugetlb_range,
760 	.pte_hole		= smaps_pte_hole,
761 	.walk_lock		= PGWALK_RDLOCK,
762 };
763 
764 /*
765  * Gather mem stats from @vma with the indicated beginning
766  * address @start, and keep them in @mss.
767  *
768  * Use vm_start of @vma as the beginning address if @start is 0.
769  */
770 static void smap_gather_stats(struct vm_area_struct *vma,
771 		struct mem_size_stats *mss, unsigned long start)
772 {
773 	const struct mm_walk_ops *ops = &smaps_walk_ops;
774 
775 	/* Invalid start */
776 	if (start >= vma->vm_end)
777 		return;
778 
779 	if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) {
780 		/*
781 		 * For shared or readonly shmem mappings we know that all
782 		 * swapped out pages belong to the shmem object, and we can
783 		 * obtain the swap value much more efficiently. For private
784 		 * writable mappings, we might have COW pages that are
785 		 * not affected by the parent swapped out pages of the shmem
786 		 * object, so we have to distinguish them during the page walk.
787 		 * Unless we know that the shmem object (or the part mapped by
788 		 * our VMA) has no swapped out pages at all.
789 		 */
790 		unsigned long shmem_swapped = shmem_swap_usage(vma);
791 
792 		if (!start && (!shmem_swapped || (vma->vm_flags & VM_SHARED) ||
793 					!(vma->vm_flags & VM_WRITE))) {
794 			mss->swap += shmem_swapped;
795 		} else {
796 			ops = &smaps_shmem_walk_ops;
797 		}
798 	}
799 
800 	/* mmap_lock is held in m_start */
801 	if (!start)
802 		walk_page_vma(vma, ops, mss);
803 	else
804 		walk_page_range(vma->vm_mm, start, vma->vm_end, ops, mss);
805 }
806 
807 #define SEQ_PUT_DEC(str, val) \
808 		seq_put_decimal_ull_width(m, str, (val) >> 10, 8)
809 
810 /* Show the contents common for smaps and smaps_rollup */
811 static void __show_smap(struct seq_file *m, const struct mem_size_stats *mss,
812 	bool rollup_mode)
813 {
814 	SEQ_PUT_DEC("Rss:            ", mss->resident);
815 	SEQ_PUT_DEC(" kB\nPss:            ", mss->pss >> PSS_SHIFT);
816 	SEQ_PUT_DEC(" kB\nPss_Dirty:      ", mss->pss_dirty >> PSS_SHIFT);
817 	if (rollup_mode) {
818 		/*
819 		 * These are meaningful only for smaps_rollup, otherwise two of
820 		 * them are zero, and the other one is the same as Pss.
821 		 */
822 		SEQ_PUT_DEC(" kB\nPss_Anon:       ",
823 			mss->pss_anon >> PSS_SHIFT);
824 		SEQ_PUT_DEC(" kB\nPss_File:       ",
825 			mss->pss_file >> PSS_SHIFT);
826 		SEQ_PUT_DEC(" kB\nPss_Shmem:      ",
827 			mss->pss_shmem >> PSS_SHIFT);
828 	}
829 	SEQ_PUT_DEC(" kB\nShared_Clean:   ", mss->shared_clean);
830 	SEQ_PUT_DEC(" kB\nShared_Dirty:   ", mss->shared_dirty);
831 	SEQ_PUT_DEC(" kB\nPrivate_Clean:  ", mss->private_clean);
832 	SEQ_PUT_DEC(" kB\nPrivate_Dirty:  ", mss->private_dirty);
833 	SEQ_PUT_DEC(" kB\nReferenced:     ", mss->referenced);
834 	SEQ_PUT_DEC(" kB\nAnonymous:      ", mss->anonymous);
835 	SEQ_PUT_DEC(" kB\nKSM:            ", mss->ksm);
836 	SEQ_PUT_DEC(" kB\nLazyFree:       ", mss->lazyfree);
837 	SEQ_PUT_DEC(" kB\nAnonHugePages:  ", mss->anonymous_thp);
838 	SEQ_PUT_DEC(" kB\nShmemPmdMapped: ", mss->shmem_thp);
839 	SEQ_PUT_DEC(" kB\nFilePmdMapped:  ", mss->file_thp);
840 	SEQ_PUT_DEC(" kB\nShared_Hugetlb: ", mss->shared_hugetlb);
841 	seq_put_decimal_ull_width(m, " kB\nPrivate_Hugetlb: ",
842 				  mss->private_hugetlb >> 10, 7);
843 	SEQ_PUT_DEC(" kB\nSwap:           ", mss->swap);
844 	SEQ_PUT_DEC(" kB\nSwapPss:        ",
845 					mss->swap_pss >> PSS_SHIFT);
846 	SEQ_PUT_DEC(" kB\nLocked:         ",
847 					mss->pss_locked >> PSS_SHIFT);
848 	seq_puts(m, " kB\n");
849 }
850 
851 static int show_smap(struct seq_file *m, void *v)
852 {
853 	struct vm_area_struct *vma = v;
854 	struct mem_size_stats mss;
855 
856 	memset(&mss, 0, sizeof(mss));
857 
858 	smap_gather_stats(vma, &mss, 0);
859 
860 	show_map_vma(m, vma);
861 
862 	SEQ_PUT_DEC("Size:           ", vma->vm_end - vma->vm_start);
863 	SEQ_PUT_DEC(" kB\nKernelPageSize: ", vma_kernel_pagesize(vma));
864 	SEQ_PUT_DEC(" kB\nMMUPageSize:    ", vma_mmu_pagesize(vma));
865 	seq_puts(m, " kB\n");
866 
867 	__show_smap(m, &mss, false);
868 
869 	seq_printf(m, "THPeligible:    %8u\n",
870 		   hugepage_vma_check(vma, vma->vm_flags, true, false, true));
871 
872 	if (arch_pkeys_enabled())
873 		seq_printf(m, "ProtectionKey:  %8u\n", vma_pkey(vma));
874 	show_smap_vma_flags(m, vma);
875 
876 	return 0;
877 }
878 
879 static int show_smaps_rollup(struct seq_file *m, void *v)
880 {
881 	struct proc_maps_private *priv = m->private;
882 	struct mem_size_stats mss;
883 	struct mm_struct *mm = priv->mm;
884 	struct vm_area_struct *vma;
885 	unsigned long vma_start = 0, last_vma_end = 0;
886 	int ret = 0;
887 	VMA_ITERATOR(vmi, mm, 0);
888 
889 	priv->task = get_proc_task(priv->inode);
890 	if (!priv->task)
891 		return -ESRCH;
892 
893 	if (!mm || !mmget_not_zero(mm)) {
894 		ret = -ESRCH;
895 		goto out_put_task;
896 	}
897 
898 	memset(&mss, 0, sizeof(mss));
899 
900 	ret = mmap_read_lock_killable(mm);
901 	if (ret)
902 		goto out_put_mm;
903 
904 	hold_task_mempolicy(priv);
905 	vma = vma_next(&vmi);
906 
907 	if (unlikely(!vma))
908 		goto empty_set;
909 
910 	vma_start = vma->vm_start;
911 	do {
912 		smap_gather_stats(vma, &mss, 0);
913 		last_vma_end = vma->vm_end;
914 
915 		/*
916 		 * Release mmap_lock temporarily if someone wants to
917 		 * access it for write request.
918 		 */
919 		if (mmap_lock_is_contended(mm)) {
920 			vma_iter_invalidate(&vmi);
921 			mmap_read_unlock(mm);
922 			ret = mmap_read_lock_killable(mm);
923 			if (ret) {
924 				release_task_mempolicy(priv);
925 				goto out_put_mm;
926 			}
927 
928 			/*
929 			 * After dropping the lock, there are four cases to
930 			 * consider. See the following example for explanation.
931 			 *
932 			 *   +------+------+-----------+
933 			 *   | VMA1 | VMA2 | VMA3      |
934 			 *   +------+------+-----------+
935 			 *   |      |      |           |
936 			 *  4k     8k     16k         400k
937 			 *
938 			 * Suppose we drop the lock after reading VMA2 due to
939 			 * contention, then we get:
940 			 *
941 			 *	last_vma_end = 16k
942 			 *
943 			 * 1) VMA2 is freed, but VMA3 exists:
944 			 *
945 			 *    vma_next(vmi) will return VMA3.
946 			 *    In this case, just continue from VMA3.
947 			 *
948 			 * 2) VMA2 still exists:
949 			 *
950 			 *    vma_next(vmi) will return VMA3.
951 			 *    In this case, just continue from VMA3.
952 			 *
953 			 * 3) No more VMAs can be found:
954 			 *
955 			 *    vma_next(vmi) will return NULL.
956 			 *    No more things to do, just break.
957 			 *
958 			 * 4) (last_vma_end - 1) is the middle of a vma (VMA'):
959 			 *
960 			 *    vma_next(vmi) will return VMA' whose range
961 			 *    contains last_vma_end.
962 			 *    Iterate VMA' from last_vma_end.
963 			 */
964 			vma = vma_next(&vmi);
965 			/* Case 3 above */
966 			if (!vma)
967 				break;
968 
969 			/* Case 1 and 2 above */
970 			if (vma->vm_start >= last_vma_end)
971 				continue;
972 
973 			/* Case 4 above */
974 			if (vma->vm_end > last_vma_end)
975 				smap_gather_stats(vma, &mss, last_vma_end);
976 		}
977 	} for_each_vma(vmi, vma);
978 
979 empty_set:
980 	show_vma_header_prefix(m, vma_start, last_vma_end, 0, 0, 0, 0);
981 	seq_pad(m, ' ');
982 	seq_puts(m, "[rollup]\n");
983 
984 	__show_smap(m, &mss, true);
985 
986 	release_task_mempolicy(priv);
987 	mmap_read_unlock(mm);
988 
989 out_put_mm:
990 	mmput(mm);
991 out_put_task:
992 	put_task_struct(priv->task);
993 	priv->task = NULL;
994 
995 	return ret;
996 }
997 #undef SEQ_PUT_DEC
998 
999 static const struct seq_operations proc_pid_smaps_op = {
1000 	.start	= m_start,
1001 	.next	= m_next,
1002 	.stop	= m_stop,
1003 	.show	= show_smap
1004 };
1005 
1006 static int pid_smaps_open(struct inode *inode, struct file *file)
1007 {
1008 	return do_maps_open(inode, file, &proc_pid_smaps_op);
1009 }
1010 
1011 static int smaps_rollup_open(struct inode *inode, struct file *file)
1012 {
1013 	int ret;
1014 	struct proc_maps_private *priv;
1015 
1016 	priv = kzalloc(sizeof(*priv), GFP_KERNEL_ACCOUNT);
1017 	if (!priv)
1018 		return -ENOMEM;
1019 
1020 	ret = single_open(file, show_smaps_rollup, priv);
1021 	if (ret)
1022 		goto out_free;
1023 
1024 	priv->inode = inode;
1025 	priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
1026 	if (IS_ERR(priv->mm)) {
1027 		ret = PTR_ERR(priv->mm);
1028 
1029 		single_release(inode, file);
1030 		goto out_free;
1031 	}
1032 
1033 	return 0;
1034 
1035 out_free:
1036 	kfree(priv);
1037 	return ret;
1038 }
1039 
1040 static int smaps_rollup_release(struct inode *inode, struct file *file)
1041 {
1042 	struct seq_file *seq = file->private_data;
1043 	struct proc_maps_private *priv = seq->private;
1044 
1045 	if (priv->mm)
1046 		mmdrop(priv->mm);
1047 
1048 	kfree(priv);
1049 	return single_release(inode, file);
1050 }
1051 
1052 const struct file_operations proc_pid_smaps_operations = {
1053 	.open		= pid_smaps_open,
1054 	.read		= seq_read,
1055 	.llseek		= seq_lseek,
1056 	.release	= proc_map_release,
1057 };
1058 
1059 const struct file_operations proc_pid_smaps_rollup_operations = {
1060 	.open		= smaps_rollup_open,
1061 	.read		= seq_read,
1062 	.llseek		= seq_lseek,
1063 	.release	= smaps_rollup_release,
1064 };
1065 
1066 enum clear_refs_types {
1067 	CLEAR_REFS_ALL = 1,
1068 	CLEAR_REFS_ANON,
1069 	CLEAR_REFS_MAPPED,
1070 	CLEAR_REFS_SOFT_DIRTY,
1071 	CLEAR_REFS_MM_HIWATER_RSS,
1072 	CLEAR_REFS_LAST,
1073 };
1074 
1075 struct clear_refs_private {
1076 	enum clear_refs_types type;
1077 };
1078 
1079 #ifdef CONFIG_MEM_SOFT_DIRTY
1080 
1081 static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
1082 {
1083 	struct page *page;
1084 
1085 	if (!pte_write(pte))
1086 		return false;
1087 	if (!is_cow_mapping(vma->vm_flags))
1088 		return false;
1089 	if (likely(!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags)))
1090 		return false;
1091 	page = vm_normal_page(vma, addr, pte);
1092 	if (!page)
1093 		return false;
1094 	return page_maybe_dma_pinned(page);
1095 }
1096 
1097 static inline void clear_soft_dirty(struct vm_area_struct *vma,
1098 		unsigned long addr, pte_t *pte)
1099 {
1100 	/*
1101 	 * The soft-dirty tracker uses #PF-s to catch writes
1102 	 * to pages, so write-protect the pte as well. See the
1103 	 * Documentation/admin-guide/mm/soft-dirty.rst for full description
1104 	 * of how soft-dirty works.
1105 	 */
1106 	pte_t ptent = ptep_get(pte);
1107 
1108 	if (pte_present(ptent)) {
1109 		pte_t old_pte;
1110 
1111 		if (pte_is_pinned(vma, addr, ptent))
1112 			return;
1113 		old_pte = ptep_modify_prot_start(vma, addr, pte);
1114 		ptent = pte_wrprotect(old_pte);
1115 		ptent = pte_clear_soft_dirty(ptent);
1116 		ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent);
1117 	} else if (is_swap_pte(ptent)) {
1118 		ptent = pte_swp_clear_soft_dirty(ptent);
1119 		set_pte_at(vma->vm_mm, addr, pte, ptent);
1120 	}
1121 }
1122 #else
1123 static inline void clear_soft_dirty(struct vm_area_struct *vma,
1124 		unsigned long addr, pte_t *pte)
1125 {
1126 }
1127 #endif
1128 
1129 #if defined(CONFIG_MEM_SOFT_DIRTY) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
1130 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
1131 		unsigned long addr, pmd_t *pmdp)
1132 {
1133 	pmd_t old, pmd = *pmdp;
1134 
1135 	if (pmd_present(pmd)) {
1136 		/* See comment in change_huge_pmd() */
1137 		old = pmdp_invalidate(vma, addr, pmdp);
1138 		if (pmd_dirty(old))
1139 			pmd = pmd_mkdirty(pmd);
1140 		if (pmd_young(old))
1141 			pmd = pmd_mkyoung(pmd);
1142 
1143 		pmd = pmd_wrprotect(pmd);
1144 		pmd = pmd_clear_soft_dirty(pmd);
1145 
1146 		set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
1147 	} else if (is_migration_entry(pmd_to_swp_entry(pmd))) {
1148 		pmd = pmd_swp_clear_soft_dirty(pmd);
1149 		set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
1150 	}
1151 }
1152 #else
1153 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
1154 		unsigned long addr, pmd_t *pmdp)
1155 {
1156 }
1157 #endif
1158 
1159 static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
1160 				unsigned long end, struct mm_walk *walk)
1161 {
1162 	struct clear_refs_private *cp = walk->private;
1163 	struct vm_area_struct *vma = walk->vma;
1164 	pte_t *pte, ptent;
1165 	spinlock_t *ptl;
1166 	struct page *page;
1167 
1168 	ptl = pmd_trans_huge_lock(pmd, vma);
1169 	if (ptl) {
1170 		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
1171 			clear_soft_dirty_pmd(vma, addr, pmd);
1172 			goto out;
1173 		}
1174 
1175 		if (!pmd_present(*pmd))
1176 			goto out;
1177 
1178 		page = pmd_page(*pmd);
1179 
1180 		/* Clear accessed and referenced bits. */
1181 		pmdp_test_and_clear_young(vma, addr, pmd);
1182 		test_and_clear_page_young(page);
1183 		ClearPageReferenced(page);
1184 out:
1185 		spin_unlock(ptl);
1186 		return 0;
1187 	}
1188 
1189 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
1190 	if (!pte) {
1191 		walk->action = ACTION_AGAIN;
1192 		return 0;
1193 	}
1194 	for (; addr != end; pte++, addr += PAGE_SIZE) {
1195 		ptent = ptep_get(pte);
1196 
1197 		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
1198 			clear_soft_dirty(vma, addr, pte);
1199 			continue;
1200 		}
1201 
1202 		if (!pte_present(ptent))
1203 			continue;
1204 
1205 		page = vm_normal_page(vma, addr, ptent);
1206 		if (!page)
1207 			continue;
1208 
1209 		/* Clear accessed and referenced bits. */
1210 		ptep_test_and_clear_young(vma, addr, pte);
1211 		test_and_clear_page_young(page);
1212 		ClearPageReferenced(page);
1213 	}
1214 	pte_unmap_unlock(pte - 1, ptl);
1215 	cond_resched();
1216 	return 0;
1217 }
1218 
1219 static int clear_refs_test_walk(unsigned long start, unsigned long end,
1220 				struct mm_walk *walk)
1221 {
1222 	struct clear_refs_private *cp = walk->private;
1223 	struct vm_area_struct *vma = walk->vma;
1224 
1225 	if (vma->vm_flags & VM_PFNMAP)
1226 		return 1;
1227 
1228 	/*
1229 	 * Writing 1 to /proc/pid/clear_refs affects all pages.
1230 	 * Writing 2 to /proc/pid/clear_refs only affects anonymous pages.
1231 	 * Writing 3 to /proc/pid/clear_refs only affects file mapped pages.
1232 	 * Writing 4 to /proc/pid/clear_refs affects all pages.
1233 	 */
1234 	if (cp->type == CLEAR_REFS_ANON && vma->vm_file)
1235 		return 1;
1236 	if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file)
1237 		return 1;
1238 	return 0;
1239 }
1240 
1241 static const struct mm_walk_ops clear_refs_walk_ops = {
1242 	.pmd_entry		= clear_refs_pte_range,
1243 	.test_walk		= clear_refs_test_walk,
1244 	.walk_lock		= PGWALK_WRLOCK,
1245 };
1246 
1247 static ssize_t clear_refs_write(struct file *file, const char __user *buf,
1248 				size_t count, loff_t *ppos)
1249 {
1250 	struct task_struct *task;
1251 	char buffer[PROC_NUMBUF];
1252 	struct mm_struct *mm;
1253 	struct vm_area_struct *vma;
1254 	enum clear_refs_types type;
1255 	int itype;
1256 	int rv;
1257 
1258 	memset(buffer, 0, sizeof(buffer));
1259 	if (count > sizeof(buffer) - 1)
1260 		count = sizeof(buffer) - 1;
1261 	if (copy_from_user(buffer, buf, count))
1262 		return -EFAULT;
1263 	rv = kstrtoint(strstrip(buffer), 10, &itype);
1264 	if (rv < 0)
1265 		return rv;
1266 	type = (enum clear_refs_types)itype;
1267 	if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST)
1268 		return -EINVAL;
1269 
1270 	task = get_proc_task(file_inode(file));
1271 	if (!task)
1272 		return -ESRCH;
1273 	mm = get_task_mm(task);
1274 	if (mm) {
1275 		VMA_ITERATOR(vmi, mm, 0);
1276 		struct mmu_notifier_range range;
1277 		struct clear_refs_private cp = {
1278 			.type = type,
1279 		};
1280 
1281 		if (mmap_write_lock_killable(mm)) {
1282 			count = -EINTR;
1283 			goto out_mm;
1284 		}
1285 		if (type == CLEAR_REFS_MM_HIWATER_RSS) {
1286 			/*
1287 			 * Writing 5 to /proc/pid/clear_refs resets the peak
1288 			 * resident set size to this mm's current rss value.
1289 			 */
1290 			reset_mm_hiwater_rss(mm);
1291 			goto out_unlock;
1292 		}
1293 
1294 		if (type == CLEAR_REFS_SOFT_DIRTY) {
1295 			for_each_vma(vmi, vma) {
1296 				if (!(vma->vm_flags & VM_SOFTDIRTY))
1297 					continue;
1298 				vm_flags_clear(vma, VM_SOFTDIRTY);
1299 				vma_set_page_prot(vma);
1300 			}
1301 
1302 			inc_tlb_flush_pending(mm);
1303 			mmu_notifier_range_init(&range, MMU_NOTIFY_SOFT_DIRTY,
1304 						0, mm, 0, -1UL);
1305 			mmu_notifier_invalidate_range_start(&range);
1306 		}
1307 		walk_page_range(mm, 0, -1, &clear_refs_walk_ops, &cp);
1308 		if (type == CLEAR_REFS_SOFT_DIRTY) {
1309 			mmu_notifier_invalidate_range_end(&range);
1310 			flush_tlb_mm(mm);
1311 			dec_tlb_flush_pending(mm);
1312 		}
1313 out_unlock:
1314 		mmap_write_unlock(mm);
1315 out_mm:
1316 		mmput(mm);
1317 	}
1318 	put_task_struct(task);
1319 
1320 	return count;
1321 }
1322 
1323 const struct file_operations proc_clear_refs_operations = {
1324 	.write		= clear_refs_write,
1325 	.llseek		= noop_llseek,
1326 };
1327 
1328 typedef struct {
1329 	u64 pme;
1330 } pagemap_entry_t;
1331 
1332 struct pagemapread {
1333 	int pos, len;		/* units: PM_ENTRY_BYTES, not bytes */
1334 	pagemap_entry_t *buffer;
1335 	bool show_pfn;
1336 };
1337 
1338 #define PAGEMAP_WALK_SIZE	(PMD_SIZE)
1339 #define PAGEMAP_WALK_MASK	(PMD_MASK)
1340 
1341 #define PM_ENTRY_BYTES		sizeof(pagemap_entry_t)
1342 #define PM_PFRAME_BITS		55
1343 #define PM_PFRAME_MASK		GENMASK_ULL(PM_PFRAME_BITS - 1, 0)
1344 #define PM_SOFT_DIRTY		BIT_ULL(55)
1345 #define PM_MMAP_EXCLUSIVE	BIT_ULL(56)
1346 #define PM_UFFD_WP		BIT_ULL(57)
1347 #define PM_FILE			BIT_ULL(61)
1348 #define PM_SWAP			BIT_ULL(62)
1349 #define PM_PRESENT		BIT_ULL(63)
1350 
1351 #define PM_END_OF_BUFFER    1
1352 
1353 static inline pagemap_entry_t make_pme(u64 frame, u64 flags)
1354 {
1355 	return (pagemap_entry_t) { .pme = (frame & PM_PFRAME_MASK) | flags };
1356 }
1357 
1358 static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme,
1359 			  struct pagemapread *pm)
1360 {
1361 	pm->buffer[pm->pos++] = *pme;
1362 	if (pm->pos >= pm->len)
1363 		return PM_END_OF_BUFFER;
1364 	return 0;
1365 }
1366 
1367 static int pagemap_pte_hole(unsigned long start, unsigned long end,
1368 			    __always_unused int depth, struct mm_walk *walk)
1369 {
1370 	struct pagemapread *pm = walk->private;
1371 	unsigned long addr = start;
1372 	int err = 0;
1373 
1374 	while (addr < end) {
1375 		struct vm_area_struct *vma = find_vma(walk->mm, addr);
1376 		pagemap_entry_t pme = make_pme(0, 0);
1377 		/* End of address space hole, which we mark as non-present. */
1378 		unsigned long hole_end;
1379 
1380 		if (vma)
1381 			hole_end = min(end, vma->vm_start);
1382 		else
1383 			hole_end = end;
1384 
1385 		for (; addr < hole_end; addr += PAGE_SIZE) {
1386 			err = add_to_pagemap(addr, &pme, pm);
1387 			if (err)
1388 				goto out;
1389 		}
1390 
1391 		if (!vma)
1392 			break;
1393 
1394 		/* Addresses in the VMA. */
1395 		if (vma->vm_flags & VM_SOFTDIRTY)
1396 			pme = make_pme(0, PM_SOFT_DIRTY);
1397 		for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
1398 			err = add_to_pagemap(addr, &pme, pm);
1399 			if (err)
1400 				goto out;
1401 		}
1402 	}
1403 out:
1404 	return err;
1405 }
1406 
1407 static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
1408 		struct vm_area_struct *vma, unsigned long addr, pte_t pte)
1409 {
1410 	u64 frame = 0, flags = 0;
1411 	struct page *page = NULL;
1412 	bool migration = false;
1413 
1414 	if (pte_present(pte)) {
1415 		if (pm->show_pfn)
1416 			frame = pte_pfn(pte);
1417 		flags |= PM_PRESENT;
1418 		page = vm_normal_page(vma, addr, pte);
1419 		if (pte_soft_dirty(pte))
1420 			flags |= PM_SOFT_DIRTY;
1421 		if (pte_uffd_wp(pte))
1422 			flags |= PM_UFFD_WP;
1423 	} else if (is_swap_pte(pte)) {
1424 		swp_entry_t entry;
1425 		if (pte_swp_soft_dirty(pte))
1426 			flags |= PM_SOFT_DIRTY;
1427 		if (pte_swp_uffd_wp(pte))
1428 			flags |= PM_UFFD_WP;
1429 		entry = pte_to_swp_entry(pte);
1430 		if (pm->show_pfn) {
1431 			pgoff_t offset;
1432 			/*
1433 			 * For PFN swap offsets, keeping the offset field
1434 			 * to be PFN only to be compatible with old smaps.
1435 			 */
1436 			if (is_pfn_swap_entry(entry))
1437 				offset = swp_offset_pfn(entry);
1438 			else
1439 				offset = swp_offset(entry);
1440 			frame = swp_type(entry) |
1441 			    (offset << MAX_SWAPFILES_SHIFT);
1442 		}
1443 		flags |= PM_SWAP;
1444 		migration = is_migration_entry(entry);
1445 		if (is_pfn_swap_entry(entry))
1446 			page = pfn_swap_entry_to_page(entry);
1447 		if (pte_marker_entry_uffd_wp(entry))
1448 			flags |= PM_UFFD_WP;
1449 	}
1450 
1451 	if (page && !PageAnon(page))
1452 		flags |= PM_FILE;
1453 	if (page && !migration && page_mapcount(page) == 1)
1454 		flags |= PM_MMAP_EXCLUSIVE;
1455 	if (vma->vm_flags & VM_SOFTDIRTY)
1456 		flags |= PM_SOFT_DIRTY;
1457 
1458 	return make_pme(frame, flags);
1459 }
1460 
1461 static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
1462 			     struct mm_walk *walk)
1463 {
1464 	struct vm_area_struct *vma = walk->vma;
1465 	struct pagemapread *pm = walk->private;
1466 	spinlock_t *ptl;
1467 	pte_t *pte, *orig_pte;
1468 	int err = 0;
1469 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1470 	bool migration = false;
1471 
1472 	ptl = pmd_trans_huge_lock(pmdp, vma);
1473 	if (ptl) {
1474 		u64 flags = 0, frame = 0;
1475 		pmd_t pmd = *pmdp;
1476 		struct page *page = NULL;
1477 
1478 		if (vma->vm_flags & VM_SOFTDIRTY)
1479 			flags |= PM_SOFT_DIRTY;
1480 
1481 		if (pmd_present(pmd)) {
1482 			page = pmd_page(pmd);
1483 
1484 			flags |= PM_PRESENT;
1485 			if (pmd_soft_dirty(pmd))
1486 				flags |= PM_SOFT_DIRTY;
1487 			if (pmd_uffd_wp(pmd))
1488 				flags |= PM_UFFD_WP;
1489 			if (pm->show_pfn)
1490 				frame = pmd_pfn(pmd) +
1491 					((addr & ~PMD_MASK) >> PAGE_SHIFT);
1492 		}
1493 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1494 		else if (is_swap_pmd(pmd)) {
1495 			swp_entry_t entry = pmd_to_swp_entry(pmd);
1496 			unsigned long offset;
1497 
1498 			if (pm->show_pfn) {
1499 				if (is_pfn_swap_entry(entry))
1500 					offset = swp_offset_pfn(entry);
1501 				else
1502 					offset = swp_offset(entry);
1503 				offset = offset +
1504 					((addr & ~PMD_MASK) >> PAGE_SHIFT);
1505 				frame = swp_type(entry) |
1506 					(offset << MAX_SWAPFILES_SHIFT);
1507 			}
1508 			flags |= PM_SWAP;
1509 			if (pmd_swp_soft_dirty(pmd))
1510 				flags |= PM_SOFT_DIRTY;
1511 			if (pmd_swp_uffd_wp(pmd))
1512 				flags |= PM_UFFD_WP;
1513 			VM_BUG_ON(!is_pmd_migration_entry(pmd));
1514 			migration = is_migration_entry(entry);
1515 			page = pfn_swap_entry_to_page(entry);
1516 		}
1517 #endif
1518 
1519 		if (page && !migration && page_mapcount(page) == 1)
1520 			flags |= PM_MMAP_EXCLUSIVE;
1521 
1522 		for (; addr != end; addr += PAGE_SIZE) {
1523 			pagemap_entry_t pme = make_pme(frame, flags);
1524 
1525 			err = add_to_pagemap(addr, &pme, pm);
1526 			if (err)
1527 				break;
1528 			if (pm->show_pfn) {
1529 				if (flags & PM_PRESENT)
1530 					frame++;
1531 				else if (flags & PM_SWAP)
1532 					frame += (1 << MAX_SWAPFILES_SHIFT);
1533 			}
1534 		}
1535 		spin_unlock(ptl);
1536 		return err;
1537 	}
1538 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1539 
1540 	/*
1541 	 * We can assume that @vma always points to a valid one and @end never
1542 	 * goes beyond vma->vm_end.
1543 	 */
1544 	orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl);
1545 	if (!pte) {
1546 		walk->action = ACTION_AGAIN;
1547 		return err;
1548 	}
1549 	for (; addr < end; pte++, addr += PAGE_SIZE) {
1550 		pagemap_entry_t pme;
1551 
1552 		pme = pte_to_pagemap_entry(pm, vma, addr, ptep_get(pte));
1553 		err = add_to_pagemap(addr, &pme, pm);
1554 		if (err)
1555 			break;
1556 	}
1557 	pte_unmap_unlock(orig_pte, ptl);
1558 
1559 	cond_resched();
1560 
1561 	return err;
1562 }
1563 
1564 #ifdef CONFIG_HUGETLB_PAGE
1565 /* This function walks within one hugetlb entry in the single call */
1566 static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
1567 				 unsigned long addr, unsigned long end,
1568 				 struct mm_walk *walk)
1569 {
1570 	struct pagemapread *pm = walk->private;
1571 	struct vm_area_struct *vma = walk->vma;
1572 	u64 flags = 0, frame = 0;
1573 	int err = 0;
1574 	pte_t pte;
1575 
1576 	if (vma->vm_flags & VM_SOFTDIRTY)
1577 		flags |= PM_SOFT_DIRTY;
1578 
1579 	pte = huge_ptep_get(ptep);
1580 	if (pte_present(pte)) {
1581 		struct page *page = pte_page(pte);
1582 
1583 		if (!PageAnon(page))
1584 			flags |= PM_FILE;
1585 
1586 		if (page_mapcount(page) == 1)
1587 			flags |= PM_MMAP_EXCLUSIVE;
1588 
1589 		if (huge_pte_uffd_wp(pte))
1590 			flags |= PM_UFFD_WP;
1591 
1592 		flags |= PM_PRESENT;
1593 		if (pm->show_pfn)
1594 			frame = pte_pfn(pte) +
1595 				((addr & ~hmask) >> PAGE_SHIFT);
1596 	} else if (pte_swp_uffd_wp_any(pte)) {
1597 		flags |= PM_UFFD_WP;
1598 	}
1599 
1600 	for (; addr != end; addr += PAGE_SIZE) {
1601 		pagemap_entry_t pme = make_pme(frame, flags);
1602 
1603 		err = add_to_pagemap(addr, &pme, pm);
1604 		if (err)
1605 			return err;
1606 		if (pm->show_pfn && (flags & PM_PRESENT))
1607 			frame++;
1608 	}
1609 
1610 	cond_resched();
1611 
1612 	return err;
1613 }
1614 #else
1615 #define pagemap_hugetlb_range	NULL
1616 #endif /* HUGETLB_PAGE */
1617 
1618 static const struct mm_walk_ops pagemap_ops = {
1619 	.pmd_entry	= pagemap_pmd_range,
1620 	.pte_hole	= pagemap_pte_hole,
1621 	.hugetlb_entry	= pagemap_hugetlb_range,
1622 	.walk_lock	= PGWALK_RDLOCK,
1623 };
1624 
1625 /*
1626  * /proc/pid/pagemap - an array mapping virtual pages to pfns
1627  *
1628  * For each page in the address space, this file contains one 64-bit entry
1629  * consisting of the following:
1630  *
1631  * Bits 0-54  page frame number (PFN) if present
1632  * Bits 0-4   swap type if swapped
1633  * Bits 5-54  swap offset if swapped
1634  * Bit  55    pte is soft-dirty (see Documentation/admin-guide/mm/soft-dirty.rst)
1635  * Bit  56    page exclusively mapped
1636  * Bit  57    pte is uffd-wp write-protected
1637  * Bits 58-60 zero
1638  * Bit  61    page is file-page or shared-anon
1639  * Bit  62    page swapped
1640  * Bit  63    page present
1641  *
1642  * If the page is not present but in swap, then the PFN contains an
1643  * encoding of the swap file number and the page's offset into the
1644  * swap. Unmapped pages return a null PFN. This allows determining
1645  * precisely which pages are mapped (or in swap) and comparing mapped
1646  * pages between processes.
1647  *
1648  * Efficient users of this interface will use /proc/pid/maps to
1649  * determine which areas of memory are actually mapped and llseek to
1650  * skip over unmapped regions.
1651  */
1652 static ssize_t pagemap_read(struct file *file, char __user *buf,
1653 			    size_t count, loff_t *ppos)
1654 {
1655 	struct mm_struct *mm = file->private_data;
1656 	struct pagemapread pm;
1657 	unsigned long src;
1658 	unsigned long svpfn;
1659 	unsigned long start_vaddr;
1660 	unsigned long end_vaddr;
1661 	int ret = 0, copied = 0;
1662 
1663 	if (!mm || !mmget_not_zero(mm))
1664 		goto out;
1665 
1666 	ret = -EINVAL;
1667 	/* file position must be aligned */
1668 	if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
1669 		goto out_mm;
1670 
1671 	ret = 0;
1672 	if (!count)
1673 		goto out_mm;
1674 
1675 	/* do not disclose physical addresses: attack vector */
1676 	pm.show_pfn = file_ns_capable(file, &init_user_ns, CAP_SYS_ADMIN);
1677 
1678 	pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
1679 	pm.buffer = kmalloc_array(pm.len, PM_ENTRY_BYTES, GFP_KERNEL);
1680 	ret = -ENOMEM;
1681 	if (!pm.buffer)
1682 		goto out_mm;
1683 
1684 	src = *ppos;
1685 	svpfn = src / PM_ENTRY_BYTES;
1686 	end_vaddr = mm->task_size;
1687 
1688 	/* watch out for wraparound */
1689 	start_vaddr = end_vaddr;
1690 	if (svpfn <= (ULONG_MAX >> PAGE_SHIFT)) {
1691 		unsigned long end;
1692 
1693 		ret = mmap_read_lock_killable(mm);
1694 		if (ret)
1695 			goto out_free;
1696 		start_vaddr = untagged_addr_remote(mm, svpfn << PAGE_SHIFT);
1697 		mmap_read_unlock(mm);
1698 
1699 		end = start_vaddr + ((count / PM_ENTRY_BYTES) << PAGE_SHIFT);
1700 		if (end >= start_vaddr && end < mm->task_size)
1701 			end_vaddr = end;
1702 	}
1703 
1704 	/* Ensure the address is inside the task */
1705 	if (start_vaddr > mm->task_size)
1706 		start_vaddr = end_vaddr;
1707 
1708 	ret = 0;
1709 	while (count && (start_vaddr < end_vaddr)) {
1710 		int len;
1711 		unsigned long end;
1712 
1713 		pm.pos = 0;
1714 		end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
1715 		/* overflow ? */
1716 		if (end < start_vaddr || end > end_vaddr)
1717 			end = end_vaddr;
1718 		ret = mmap_read_lock_killable(mm);
1719 		if (ret)
1720 			goto out_free;
1721 		ret = walk_page_range(mm, start_vaddr, end, &pagemap_ops, &pm);
1722 		mmap_read_unlock(mm);
1723 		start_vaddr = end;
1724 
1725 		len = min(count, PM_ENTRY_BYTES * pm.pos);
1726 		if (copy_to_user(buf, pm.buffer, len)) {
1727 			ret = -EFAULT;
1728 			goto out_free;
1729 		}
1730 		copied += len;
1731 		buf += len;
1732 		count -= len;
1733 	}
1734 	*ppos += copied;
1735 	if (!ret || ret == PM_END_OF_BUFFER)
1736 		ret = copied;
1737 
1738 out_free:
1739 	kfree(pm.buffer);
1740 out_mm:
1741 	mmput(mm);
1742 out:
1743 	return ret;
1744 }
1745 
1746 static int pagemap_open(struct inode *inode, struct file *file)
1747 {
1748 	struct mm_struct *mm;
1749 
1750 	mm = proc_mem_open(inode, PTRACE_MODE_READ);
1751 	if (IS_ERR(mm))
1752 		return PTR_ERR(mm);
1753 	file->private_data = mm;
1754 	return 0;
1755 }
1756 
1757 static int pagemap_release(struct inode *inode, struct file *file)
1758 {
1759 	struct mm_struct *mm = file->private_data;
1760 
1761 	if (mm)
1762 		mmdrop(mm);
1763 	return 0;
1764 }
1765 
1766 #define PM_SCAN_CATEGORIES	(PAGE_IS_WPALLOWED | PAGE_IS_WRITTEN |	\
1767 				 PAGE_IS_FILE |	PAGE_IS_PRESENT |	\
1768 				 PAGE_IS_SWAPPED | PAGE_IS_PFNZERO |	\
1769 				 PAGE_IS_HUGE)
1770 #define PM_SCAN_FLAGS		(PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC)
1771 
1772 struct pagemap_scan_private {
1773 	struct pm_scan_arg arg;
1774 	unsigned long masks_of_interest, cur_vma_category;
1775 	struct page_region *vec_buf;
1776 	unsigned long vec_buf_len, vec_buf_index, found_pages;
1777 	struct page_region __user *vec_out;
1778 };
1779 
1780 static unsigned long pagemap_page_category(struct pagemap_scan_private *p,
1781 					   struct vm_area_struct *vma,
1782 					   unsigned long addr, pte_t pte)
1783 {
1784 	unsigned long categories = 0;
1785 
1786 	if (pte_present(pte)) {
1787 		struct page *page;
1788 
1789 		categories |= PAGE_IS_PRESENT;
1790 		if (!pte_uffd_wp(pte))
1791 			categories |= PAGE_IS_WRITTEN;
1792 
1793 		if (p->masks_of_interest & PAGE_IS_FILE) {
1794 			page = vm_normal_page(vma, addr, pte);
1795 			if (page && !PageAnon(page))
1796 				categories |= PAGE_IS_FILE;
1797 		}
1798 
1799 		if (is_zero_pfn(pte_pfn(pte)))
1800 			categories |= PAGE_IS_PFNZERO;
1801 	} else if (is_swap_pte(pte)) {
1802 		swp_entry_t swp;
1803 
1804 		categories |= PAGE_IS_SWAPPED;
1805 		if (!pte_swp_uffd_wp_any(pte))
1806 			categories |= PAGE_IS_WRITTEN;
1807 
1808 		if (p->masks_of_interest & PAGE_IS_FILE) {
1809 			swp = pte_to_swp_entry(pte);
1810 			if (is_pfn_swap_entry(swp) &&
1811 			    !PageAnon(pfn_swap_entry_to_page(swp)))
1812 				categories |= PAGE_IS_FILE;
1813 		}
1814 	}
1815 
1816 	return categories;
1817 }
1818 
1819 static void make_uffd_wp_pte(struct vm_area_struct *vma,
1820 			     unsigned long addr, pte_t *pte)
1821 {
1822 	pte_t ptent = ptep_get(pte);
1823 
1824 	if (pte_present(ptent)) {
1825 		pte_t old_pte;
1826 
1827 		old_pte = ptep_modify_prot_start(vma, addr, pte);
1828 		ptent = pte_mkuffd_wp(ptent);
1829 		ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent);
1830 	} else if (is_swap_pte(ptent)) {
1831 		ptent = pte_swp_mkuffd_wp(ptent);
1832 		set_pte_at(vma->vm_mm, addr, pte, ptent);
1833 	} else {
1834 		set_pte_at(vma->vm_mm, addr, pte,
1835 			   make_pte_marker(PTE_MARKER_UFFD_WP));
1836 	}
1837 }
1838 
1839 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1840 static unsigned long pagemap_thp_category(struct pagemap_scan_private *p,
1841 					  struct vm_area_struct *vma,
1842 					  unsigned long addr, pmd_t pmd)
1843 {
1844 	unsigned long categories = PAGE_IS_HUGE;
1845 
1846 	if (pmd_present(pmd)) {
1847 		struct page *page;
1848 
1849 		categories |= PAGE_IS_PRESENT;
1850 		if (!pmd_uffd_wp(pmd))
1851 			categories |= PAGE_IS_WRITTEN;
1852 
1853 		if (p->masks_of_interest & PAGE_IS_FILE) {
1854 			page = vm_normal_page_pmd(vma, addr, pmd);
1855 			if (page && !PageAnon(page))
1856 				categories |= PAGE_IS_FILE;
1857 		}
1858 
1859 		if (is_zero_pfn(pmd_pfn(pmd)))
1860 			categories |= PAGE_IS_PFNZERO;
1861 	} else if (is_swap_pmd(pmd)) {
1862 		swp_entry_t swp;
1863 
1864 		categories |= PAGE_IS_SWAPPED;
1865 		if (!pmd_swp_uffd_wp(pmd))
1866 			categories |= PAGE_IS_WRITTEN;
1867 
1868 		if (p->masks_of_interest & PAGE_IS_FILE) {
1869 			swp = pmd_to_swp_entry(pmd);
1870 			if (is_pfn_swap_entry(swp) &&
1871 			    !PageAnon(pfn_swap_entry_to_page(swp)))
1872 				categories |= PAGE_IS_FILE;
1873 		}
1874 	}
1875 
1876 	return categories;
1877 }
1878 
1879 static void make_uffd_wp_pmd(struct vm_area_struct *vma,
1880 			     unsigned long addr, pmd_t *pmdp)
1881 {
1882 	pmd_t old, pmd = *pmdp;
1883 
1884 	if (pmd_present(pmd)) {
1885 		old = pmdp_invalidate_ad(vma, addr, pmdp);
1886 		pmd = pmd_mkuffd_wp(old);
1887 		set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
1888 	} else if (is_migration_entry(pmd_to_swp_entry(pmd))) {
1889 		pmd = pmd_swp_mkuffd_wp(pmd);
1890 		set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
1891 	}
1892 }
1893 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1894 
1895 #ifdef CONFIG_HUGETLB_PAGE
1896 static unsigned long pagemap_hugetlb_category(pte_t pte)
1897 {
1898 	unsigned long categories = PAGE_IS_HUGE;
1899 
1900 	/*
1901 	 * According to pagemap_hugetlb_range(), file-backed HugeTLB
1902 	 * page cannot be swapped. So PAGE_IS_FILE is not checked for
1903 	 * swapped pages.
1904 	 */
1905 	if (pte_present(pte)) {
1906 		categories |= PAGE_IS_PRESENT;
1907 		if (!huge_pte_uffd_wp(pte))
1908 			categories |= PAGE_IS_WRITTEN;
1909 		if (!PageAnon(pte_page(pte)))
1910 			categories |= PAGE_IS_FILE;
1911 		if (is_zero_pfn(pte_pfn(pte)))
1912 			categories |= PAGE_IS_PFNZERO;
1913 	} else if (is_swap_pte(pte)) {
1914 		categories |= PAGE_IS_SWAPPED;
1915 		if (!pte_swp_uffd_wp_any(pte))
1916 			categories |= PAGE_IS_WRITTEN;
1917 	}
1918 
1919 	return categories;
1920 }
1921 
1922 static void make_uffd_wp_huge_pte(struct vm_area_struct *vma,
1923 				  unsigned long addr, pte_t *ptep,
1924 				  pte_t ptent)
1925 {
1926 	unsigned long psize;
1927 
1928 	if (is_hugetlb_entry_hwpoisoned(ptent) || is_pte_marker(ptent))
1929 		return;
1930 
1931 	psize = huge_page_size(hstate_vma(vma));
1932 
1933 	if (is_hugetlb_entry_migration(ptent))
1934 		set_huge_pte_at(vma->vm_mm, addr, ptep,
1935 				pte_swp_mkuffd_wp(ptent), psize);
1936 	else if (!huge_pte_none(ptent))
1937 		huge_ptep_modify_prot_commit(vma, addr, ptep, ptent,
1938 					     huge_pte_mkuffd_wp(ptent));
1939 	else
1940 		set_huge_pte_at(vma->vm_mm, addr, ptep,
1941 				make_pte_marker(PTE_MARKER_UFFD_WP), psize);
1942 }
1943 #endif /* CONFIG_HUGETLB_PAGE */
1944 
1945 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1946 static void pagemap_scan_backout_range(struct pagemap_scan_private *p,
1947 				       unsigned long addr, unsigned long end)
1948 {
1949 	struct page_region *cur_buf = &p->vec_buf[p->vec_buf_index];
1950 
1951 	if (cur_buf->start != addr)
1952 		cur_buf->end = addr;
1953 	else
1954 		cur_buf->start = cur_buf->end = 0;
1955 
1956 	p->found_pages -= (end - addr) / PAGE_SIZE;
1957 }
1958 #endif
1959 
1960 static bool pagemap_scan_is_interesting_page(unsigned long categories,
1961 					     const struct pagemap_scan_private *p)
1962 {
1963 	categories ^= p->arg.category_inverted;
1964 	if ((categories & p->arg.category_mask) != p->arg.category_mask)
1965 		return false;
1966 	if (p->arg.category_anyof_mask && !(categories & p->arg.category_anyof_mask))
1967 		return false;
1968 
1969 	return true;
1970 }
1971 
1972 static bool pagemap_scan_is_interesting_vma(unsigned long categories,
1973 					    const struct pagemap_scan_private *p)
1974 {
1975 	unsigned long required = p->arg.category_mask & PAGE_IS_WPALLOWED;
1976 
1977 	categories ^= p->arg.category_inverted;
1978 	if ((categories & required) != required)
1979 		return false;
1980 
1981 	return true;
1982 }
1983 
1984 static int pagemap_scan_test_walk(unsigned long start, unsigned long end,
1985 				  struct mm_walk *walk)
1986 {
1987 	struct pagemap_scan_private *p = walk->private;
1988 	struct vm_area_struct *vma = walk->vma;
1989 	unsigned long vma_category = 0;
1990 
1991 	if (userfaultfd_wp_async(vma) && userfaultfd_wp_use_markers(vma))
1992 		vma_category |= PAGE_IS_WPALLOWED;
1993 	else if (p->arg.flags & PM_SCAN_CHECK_WPASYNC)
1994 		return -EPERM;
1995 
1996 	if (vma->vm_flags & VM_PFNMAP)
1997 		return 1;
1998 
1999 	if (!pagemap_scan_is_interesting_vma(vma_category, p))
2000 		return 1;
2001 
2002 	p->cur_vma_category = vma_category;
2003 
2004 	return 0;
2005 }
2006 
2007 static bool pagemap_scan_push_range(unsigned long categories,
2008 				    struct pagemap_scan_private *p,
2009 				    unsigned long addr, unsigned long end)
2010 {
2011 	struct page_region *cur_buf = &p->vec_buf[p->vec_buf_index];
2012 
2013 	/*
2014 	 * When there is no output buffer provided at all, the sentinel values
2015 	 * won't match here. There is no other way for `cur_buf->end` to be
2016 	 * non-zero other than it being non-empty.
2017 	 */
2018 	if (addr == cur_buf->end && categories == cur_buf->categories) {
2019 		cur_buf->end = end;
2020 		return true;
2021 	}
2022 
2023 	if (cur_buf->end) {
2024 		if (p->vec_buf_index >= p->vec_buf_len - 1)
2025 			return false;
2026 
2027 		cur_buf = &p->vec_buf[++p->vec_buf_index];
2028 	}
2029 
2030 	cur_buf->start = addr;
2031 	cur_buf->end = end;
2032 	cur_buf->categories = categories;
2033 
2034 	return true;
2035 }
2036 
2037 static int pagemap_scan_output(unsigned long categories,
2038 			       struct pagemap_scan_private *p,
2039 			       unsigned long addr, unsigned long *end)
2040 {
2041 	unsigned long n_pages, total_pages;
2042 	int ret = 0;
2043 
2044 	if (!p->vec_buf)
2045 		return 0;
2046 
2047 	categories &= p->arg.return_mask;
2048 
2049 	n_pages = (*end - addr) / PAGE_SIZE;
2050 	if (check_add_overflow(p->found_pages, n_pages, &total_pages) ||
2051 	    total_pages > p->arg.max_pages) {
2052 		size_t n_too_much = total_pages - p->arg.max_pages;
2053 		*end -= n_too_much * PAGE_SIZE;
2054 		n_pages -= n_too_much;
2055 		ret = -ENOSPC;
2056 	}
2057 
2058 	if (!pagemap_scan_push_range(categories, p, addr, *end)) {
2059 		*end = addr;
2060 		n_pages = 0;
2061 		ret = -ENOSPC;
2062 	}
2063 
2064 	p->found_pages += n_pages;
2065 	if (ret)
2066 		p->arg.walk_end = *end;
2067 
2068 	return ret;
2069 }
2070 
2071 static int pagemap_scan_thp_entry(pmd_t *pmd, unsigned long start,
2072 				  unsigned long end, struct mm_walk *walk)
2073 {
2074 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2075 	struct pagemap_scan_private *p = walk->private;
2076 	struct vm_area_struct *vma = walk->vma;
2077 	unsigned long categories;
2078 	spinlock_t *ptl;
2079 	int ret = 0;
2080 
2081 	ptl = pmd_trans_huge_lock(pmd, vma);
2082 	if (!ptl)
2083 		return -ENOENT;
2084 
2085 	categories = p->cur_vma_category |
2086 		     pagemap_thp_category(p, vma, start, *pmd);
2087 
2088 	if (!pagemap_scan_is_interesting_page(categories, p))
2089 		goto out_unlock;
2090 
2091 	ret = pagemap_scan_output(categories, p, start, &end);
2092 	if (start == end)
2093 		goto out_unlock;
2094 
2095 	if (~p->arg.flags & PM_SCAN_WP_MATCHING)
2096 		goto out_unlock;
2097 	if (~categories & PAGE_IS_WRITTEN)
2098 		goto out_unlock;
2099 
2100 	/*
2101 	 * Break huge page into small pages if the WP operation
2102 	 * needs to be performed on a portion of the huge page.
2103 	 */
2104 	if (end != start + HPAGE_SIZE) {
2105 		spin_unlock(ptl);
2106 		split_huge_pmd(vma, pmd, start);
2107 		pagemap_scan_backout_range(p, start, end);
2108 		/* Report as if there was no THP */
2109 		return -ENOENT;
2110 	}
2111 
2112 	make_uffd_wp_pmd(vma, start, pmd);
2113 	flush_tlb_range(vma, start, end);
2114 out_unlock:
2115 	spin_unlock(ptl);
2116 	return ret;
2117 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
2118 	return -ENOENT;
2119 #endif
2120 }
2121 
2122 static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start,
2123 				  unsigned long end, struct mm_walk *walk)
2124 {
2125 	struct pagemap_scan_private *p = walk->private;
2126 	struct vm_area_struct *vma = walk->vma;
2127 	unsigned long addr, flush_end = 0;
2128 	pte_t *pte, *start_pte;
2129 	spinlock_t *ptl;
2130 	int ret;
2131 
2132 	arch_enter_lazy_mmu_mode();
2133 
2134 	ret = pagemap_scan_thp_entry(pmd, start, end, walk);
2135 	if (ret != -ENOENT) {
2136 		arch_leave_lazy_mmu_mode();
2137 		return ret;
2138 	}
2139 
2140 	ret = 0;
2141 	start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
2142 	if (!pte) {
2143 		arch_leave_lazy_mmu_mode();
2144 		walk->action = ACTION_AGAIN;
2145 		return 0;
2146 	}
2147 
2148 	if (!p->vec_out) {
2149 		/* Fast path for performing exclusive WP */
2150 		for (addr = start; addr != end; pte++, addr += PAGE_SIZE) {
2151 			if (pte_uffd_wp(ptep_get(pte)))
2152 				continue;
2153 			make_uffd_wp_pte(vma, addr, pte);
2154 			if (!flush_end)
2155 				start = addr;
2156 			flush_end = addr + PAGE_SIZE;
2157 		}
2158 		goto flush_and_return;
2159 	}
2160 
2161 	if (!p->arg.category_anyof_mask && !p->arg.category_inverted &&
2162 	    p->arg.category_mask == PAGE_IS_WRITTEN &&
2163 	    p->arg.return_mask == PAGE_IS_WRITTEN) {
2164 		for (addr = start; addr < end; pte++, addr += PAGE_SIZE) {
2165 			unsigned long next = addr + PAGE_SIZE;
2166 
2167 			if (pte_uffd_wp(ptep_get(pte)))
2168 				continue;
2169 			ret = pagemap_scan_output(p->cur_vma_category | PAGE_IS_WRITTEN,
2170 						  p, addr, &next);
2171 			if (next == addr)
2172 				break;
2173 			if (~p->arg.flags & PM_SCAN_WP_MATCHING)
2174 				continue;
2175 			make_uffd_wp_pte(vma, addr, pte);
2176 			if (!flush_end)
2177 				start = addr;
2178 			flush_end = next;
2179 		}
2180 		goto flush_and_return;
2181 	}
2182 
2183 	for (addr = start; addr != end; pte++, addr += PAGE_SIZE) {
2184 		unsigned long categories = p->cur_vma_category |
2185 					   pagemap_page_category(p, vma, addr, ptep_get(pte));
2186 		unsigned long next = addr + PAGE_SIZE;
2187 
2188 		if (!pagemap_scan_is_interesting_page(categories, p))
2189 			continue;
2190 
2191 		ret = pagemap_scan_output(categories, p, addr, &next);
2192 		if (next == addr)
2193 			break;
2194 
2195 		if (~p->arg.flags & PM_SCAN_WP_MATCHING)
2196 			continue;
2197 		if (~categories & PAGE_IS_WRITTEN)
2198 			continue;
2199 
2200 		make_uffd_wp_pte(vma, addr, pte);
2201 		if (!flush_end)
2202 			start = addr;
2203 		flush_end = next;
2204 	}
2205 
2206 flush_and_return:
2207 	if (flush_end)
2208 		flush_tlb_range(vma, start, addr);
2209 
2210 	pte_unmap_unlock(start_pte, ptl);
2211 	arch_leave_lazy_mmu_mode();
2212 
2213 	cond_resched();
2214 	return ret;
2215 }
2216 
2217 #ifdef CONFIG_HUGETLB_PAGE
2218 static int pagemap_scan_hugetlb_entry(pte_t *ptep, unsigned long hmask,
2219 				      unsigned long start, unsigned long end,
2220 				      struct mm_walk *walk)
2221 {
2222 	struct pagemap_scan_private *p = walk->private;
2223 	struct vm_area_struct *vma = walk->vma;
2224 	unsigned long categories;
2225 	spinlock_t *ptl;
2226 	int ret = 0;
2227 	pte_t pte;
2228 
2229 	if (~p->arg.flags & PM_SCAN_WP_MATCHING) {
2230 		/* Go the short route when not write-protecting pages. */
2231 
2232 		pte = huge_ptep_get(ptep);
2233 		categories = p->cur_vma_category | pagemap_hugetlb_category(pte);
2234 
2235 		if (!pagemap_scan_is_interesting_page(categories, p))
2236 			return 0;
2237 
2238 		return pagemap_scan_output(categories, p, start, &end);
2239 	}
2240 
2241 	i_mmap_lock_write(vma->vm_file->f_mapping);
2242 	ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, ptep);
2243 
2244 	pte = huge_ptep_get(ptep);
2245 	categories = p->cur_vma_category | pagemap_hugetlb_category(pte);
2246 
2247 	if (!pagemap_scan_is_interesting_page(categories, p))
2248 		goto out_unlock;
2249 
2250 	ret = pagemap_scan_output(categories, p, start, &end);
2251 	if (start == end)
2252 		goto out_unlock;
2253 
2254 	if (~categories & PAGE_IS_WRITTEN)
2255 		goto out_unlock;
2256 
2257 	if (end != start + HPAGE_SIZE) {
2258 		/* Partial HugeTLB page WP isn't possible. */
2259 		pagemap_scan_backout_range(p, start, end);
2260 		p->arg.walk_end = start;
2261 		ret = 0;
2262 		goto out_unlock;
2263 	}
2264 
2265 	make_uffd_wp_huge_pte(vma, start, ptep, pte);
2266 	flush_hugetlb_tlb_range(vma, start, end);
2267 
2268 out_unlock:
2269 	spin_unlock(ptl);
2270 	i_mmap_unlock_write(vma->vm_file->f_mapping);
2271 
2272 	return ret;
2273 }
2274 #else
2275 #define pagemap_scan_hugetlb_entry NULL
2276 #endif
2277 
2278 static int pagemap_scan_pte_hole(unsigned long addr, unsigned long end,
2279 				 int depth, struct mm_walk *walk)
2280 {
2281 	struct pagemap_scan_private *p = walk->private;
2282 	struct vm_area_struct *vma = walk->vma;
2283 	int ret, err;
2284 
2285 	if (!vma || !pagemap_scan_is_interesting_page(p->cur_vma_category, p))
2286 		return 0;
2287 
2288 	ret = pagemap_scan_output(p->cur_vma_category, p, addr, &end);
2289 	if (addr == end)
2290 		return ret;
2291 
2292 	if (~p->arg.flags & PM_SCAN_WP_MATCHING)
2293 		return ret;
2294 
2295 	err = uffd_wp_range(vma, addr, end - addr, true);
2296 	if (err < 0)
2297 		ret = err;
2298 
2299 	return ret;
2300 }
2301 
2302 static const struct mm_walk_ops pagemap_scan_ops = {
2303 	.test_walk = pagemap_scan_test_walk,
2304 	.pmd_entry = pagemap_scan_pmd_entry,
2305 	.pte_hole = pagemap_scan_pte_hole,
2306 	.hugetlb_entry = pagemap_scan_hugetlb_entry,
2307 };
2308 
2309 static int pagemap_scan_get_args(struct pm_scan_arg *arg,
2310 				 unsigned long uarg)
2311 {
2312 	if (copy_from_user(arg, (void __user *)uarg, sizeof(*arg)))
2313 		return -EFAULT;
2314 
2315 	if (arg->size != sizeof(struct pm_scan_arg))
2316 		return -EINVAL;
2317 
2318 	/* Validate requested features */
2319 	if (arg->flags & ~PM_SCAN_FLAGS)
2320 		return -EINVAL;
2321 	if ((arg->category_inverted | arg->category_mask |
2322 	     arg->category_anyof_mask | arg->return_mask) & ~PM_SCAN_CATEGORIES)
2323 		return -EINVAL;
2324 
2325 	arg->start = untagged_addr((unsigned long)arg->start);
2326 	arg->end = untagged_addr((unsigned long)arg->end);
2327 	arg->vec = untagged_addr((unsigned long)arg->vec);
2328 
2329 	/* Validate memory pointers */
2330 	if (!IS_ALIGNED(arg->start, PAGE_SIZE))
2331 		return -EINVAL;
2332 	if (!access_ok((void __user *)(long)arg->start, arg->end - arg->start))
2333 		return -EFAULT;
2334 	if (!arg->vec && arg->vec_len)
2335 		return -EINVAL;
2336 	if (arg->vec && !access_ok((void __user *)(long)arg->vec,
2337 			      arg->vec_len * sizeof(struct page_region)))
2338 		return -EFAULT;
2339 
2340 	/* Fixup default values */
2341 	arg->end = ALIGN(arg->end, PAGE_SIZE);
2342 	arg->walk_end = 0;
2343 	if (!arg->max_pages)
2344 		arg->max_pages = ULONG_MAX;
2345 
2346 	return 0;
2347 }
2348 
2349 static int pagemap_scan_writeback_args(struct pm_scan_arg *arg,
2350 				       unsigned long uargl)
2351 {
2352 	struct pm_scan_arg __user *uarg	= (void __user *)uargl;
2353 
2354 	if (copy_to_user(&uarg->walk_end, &arg->walk_end, sizeof(arg->walk_end)))
2355 		return -EFAULT;
2356 
2357 	return 0;
2358 }
2359 
2360 static int pagemap_scan_init_bounce_buffer(struct pagemap_scan_private *p)
2361 {
2362 	if (!p->arg.vec_len)
2363 		return 0;
2364 
2365 	p->vec_buf_len = min_t(size_t, PAGEMAP_WALK_SIZE >> PAGE_SHIFT,
2366 			       p->arg.vec_len);
2367 	p->vec_buf = kmalloc_array(p->vec_buf_len, sizeof(*p->vec_buf),
2368 				   GFP_KERNEL);
2369 	if (!p->vec_buf)
2370 		return -ENOMEM;
2371 
2372 	p->vec_buf->start = p->vec_buf->end = 0;
2373 	p->vec_out = (struct page_region __user *)(long)p->arg.vec;
2374 
2375 	return 0;
2376 }
2377 
2378 static long pagemap_scan_flush_buffer(struct pagemap_scan_private *p)
2379 {
2380 	const struct page_region *buf = p->vec_buf;
2381 	long n = p->vec_buf_index;
2382 
2383 	if (!p->vec_buf)
2384 		return 0;
2385 
2386 	if (buf[n].end != buf[n].start)
2387 		n++;
2388 
2389 	if (!n)
2390 		return 0;
2391 
2392 	if (copy_to_user(p->vec_out, buf, n * sizeof(*buf)))
2393 		return -EFAULT;
2394 
2395 	p->arg.vec_len -= n;
2396 	p->vec_out += n;
2397 
2398 	p->vec_buf_index = 0;
2399 	p->vec_buf_len = min_t(size_t, p->vec_buf_len, p->arg.vec_len);
2400 	p->vec_buf->start = p->vec_buf->end = 0;
2401 
2402 	return n;
2403 }
2404 
2405 static long do_pagemap_scan(struct mm_struct *mm, unsigned long uarg)
2406 {
2407 	struct mmu_notifier_range range;
2408 	struct pagemap_scan_private p = {0};
2409 	unsigned long walk_start;
2410 	size_t n_ranges_out = 0;
2411 	int ret;
2412 
2413 	ret = pagemap_scan_get_args(&p.arg, uarg);
2414 	if (ret)
2415 		return ret;
2416 
2417 	p.masks_of_interest = p.arg.category_mask | p.arg.category_anyof_mask |
2418 			      p.arg.return_mask;
2419 	ret = pagemap_scan_init_bounce_buffer(&p);
2420 	if (ret)
2421 		return ret;
2422 
2423 	/* Protection change for the range is going to happen. */
2424 	if (p.arg.flags & PM_SCAN_WP_MATCHING) {
2425 		mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA, 0,
2426 					mm, p.arg.start, p.arg.end);
2427 		mmu_notifier_invalidate_range_start(&range);
2428 	}
2429 
2430 	for (walk_start = p.arg.start; walk_start < p.arg.end;
2431 			walk_start = p.arg.walk_end) {
2432 		long n_out;
2433 
2434 		if (fatal_signal_pending(current)) {
2435 			ret = -EINTR;
2436 			break;
2437 		}
2438 
2439 		ret = mmap_read_lock_killable(mm);
2440 		if (ret)
2441 			break;
2442 		ret = walk_page_range(mm, walk_start, p.arg.end,
2443 				      &pagemap_scan_ops, &p);
2444 		mmap_read_unlock(mm);
2445 
2446 		n_out = pagemap_scan_flush_buffer(&p);
2447 		if (n_out < 0)
2448 			ret = n_out;
2449 		else
2450 			n_ranges_out += n_out;
2451 
2452 		if (ret != -ENOSPC)
2453 			break;
2454 
2455 		if (p.arg.vec_len == 0 || p.found_pages == p.arg.max_pages)
2456 			break;
2457 	}
2458 
2459 	/* ENOSPC signifies early stop (buffer full) from the walk. */
2460 	if (!ret || ret == -ENOSPC)
2461 		ret = n_ranges_out;
2462 
2463 	/* The walk_end isn't set when ret is zero */
2464 	if (!p.arg.walk_end)
2465 		p.arg.walk_end = p.arg.end;
2466 	if (pagemap_scan_writeback_args(&p.arg, uarg))
2467 		ret = -EFAULT;
2468 
2469 	if (p.arg.flags & PM_SCAN_WP_MATCHING)
2470 		mmu_notifier_invalidate_range_end(&range);
2471 
2472 	kfree(p.vec_buf);
2473 	return ret;
2474 }
2475 
2476 static long do_pagemap_cmd(struct file *file, unsigned int cmd,
2477 			   unsigned long arg)
2478 {
2479 	struct mm_struct *mm = file->private_data;
2480 
2481 	switch (cmd) {
2482 	case PAGEMAP_SCAN:
2483 		return do_pagemap_scan(mm, arg);
2484 
2485 	default:
2486 		return -EINVAL;
2487 	}
2488 }
2489 
2490 const struct file_operations proc_pagemap_operations = {
2491 	.llseek		= mem_lseek, /* borrow this */
2492 	.read		= pagemap_read,
2493 	.open		= pagemap_open,
2494 	.release	= pagemap_release,
2495 	.unlocked_ioctl = do_pagemap_cmd,
2496 	.compat_ioctl	= do_pagemap_cmd,
2497 };
2498 #endif /* CONFIG_PROC_PAGE_MONITOR */
2499 
2500 #ifdef CONFIG_NUMA
2501 
2502 struct numa_maps {
2503 	unsigned long pages;
2504 	unsigned long anon;
2505 	unsigned long active;
2506 	unsigned long writeback;
2507 	unsigned long mapcount_max;
2508 	unsigned long dirty;
2509 	unsigned long swapcache;
2510 	unsigned long node[MAX_NUMNODES];
2511 };
2512 
2513 struct numa_maps_private {
2514 	struct proc_maps_private proc_maps;
2515 	struct numa_maps md;
2516 };
2517 
2518 static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
2519 			unsigned long nr_pages)
2520 {
2521 	int count = page_mapcount(page);
2522 
2523 	md->pages += nr_pages;
2524 	if (pte_dirty || PageDirty(page))
2525 		md->dirty += nr_pages;
2526 
2527 	if (PageSwapCache(page))
2528 		md->swapcache += nr_pages;
2529 
2530 	if (PageActive(page) || PageUnevictable(page))
2531 		md->active += nr_pages;
2532 
2533 	if (PageWriteback(page))
2534 		md->writeback += nr_pages;
2535 
2536 	if (PageAnon(page))
2537 		md->anon += nr_pages;
2538 
2539 	if (count > md->mapcount_max)
2540 		md->mapcount_max = count;
2541 
2542 	md->node[page_to_nid(page)] += nr_pages;
2543 }
2544 
2545 static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
2546 		unsigned long addr)
2547 {
2548 	struct page *page;
2549 	int nid;
2550 
2551 	if (!pte_present(pte))
2552 		return NULL;
2553 
2554 	page = vm_normal_page(vma, addr, pte);
2555 	if (!page || is_zone_device_page(page))
2556 		return NULL;
2557 
2558 	if (PageReserved(page))
2559 		return NULL;
2560 
2561 	nid = page_to_nid(page);
2562 	if (!node_isset(nid, node_states[N_MEMORY]))
2563 		return NULL;
2564 
2565 	return page;
2566 }
2567 
2568 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2569 static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
2570 					      struct vm_area_struct *vma,
2571 					      unsigned long addr)
2572 {
2573 	struct page *page;
2574 	int nid;
2575 
2576 	if (!pmd_present(pmd))
2577 		return NULL;
2578 
2579 	page = vm_normal_page_pmd(vma, addr, pmd);
2580 	if (!page)
2581 		return NULL;
2582 
2583 	if (PageReserved(page))
2584 		return NULL;
2585 
2586 	nid = page_to_nid(page);
2587 	if (!node_isset(nid, node_states[N_MEMORY]))
2588 		return NULL;
2589 
2590 	return page;
2591 }
2592 #endif
2593 
2594 static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
2595 		unsigned long end, struct mm_walk *walk)
2596 {
2597 	struct numa_maps *md = walk->private;
2598 	struct vm_area_struct *vma = walk->vma;
2599 	spinlock_t *ptl;
2600 	pte_t *orig_pte;
2601 	pte_t *pte;
2602 
2603 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2604 	ptl = pmd_trans_huge_lock(pmd, vma);
2605 	if (ptl) {
2606 		struct page *page;
2607 
2608 		page = can_gather_numa_stats_pmd(*pmd, vma, addr);
2609 		if (page)
2610 			gather_stats(page, md, pmd_dirty(*pmd),
2611 				     HPAGE_PMD_SIZE/PAGE_SIZE);
2612 		spin_unlock(ptl);
2613 		return 0;
2614 	}
2615 #endif
2616 	orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
2617 	if (!pte) {
2618 		walk->action = ACTION_AGAIN;
2619 		return 0;
2620 	}
2621 	do {
2622 		pte_t ptent = ptep_get(pte);
2623 		struct page *page = can_gather_numa_stats(ptent, vma, addr);
2624 		if (!page)
2625 			continue;
2626 		gather_stats(page, md, pte_dirty(ptent), 1);
2627 
2628 	} while (pte++, addr += PAGE_SIZE, addr != end);
2629 	pte_unmap_unlock(orig_pte, ptl);
2630 	cond_resched();
2631 	return 0;
2632 }
2633 #ifdef CONFIG_HUGETLB_PAGE
2634 static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
2635 		unsigned long addr, unsigned long end, struct mm_walk *walk)
2636 {
2637 	pte_t huge_pte = huge_ptep_get(pte);
2638 	struct numa_maps *md;
2639 	struct page *page;
2640 
2641 	if (!pte_present(huge_pte))
2642 		return 0;
2643 
2644 	page = pte_page(huge_pte);
2645 
2646 	md = walk->private;
2647 	gather_stats(page, md, pte_dirty(huge_pte), 1);
2648 	return 0;
2649 }
2650 
2651 #else
2652 static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
2653 		unsigned long addr, unsigned long end, struct mm_walk *walk)
2654 {
2655 	return 0;
2656 }
2657 #endif
2658 
2659 static const struct mm_walk_ops show_numa_ops = {
2660 	.hugetlb_entry = gather_hugetlb_stats,
2661 	.pmd_entry = gather_pte_stats,
2662 	.walk_lock = PGWALK_RDLOCK,
2663 };
2664 
2665 /*
2666  * Display pages allocated per node and memory policy via /proc.
2667  */
2668 static int show_numa_map(struct seq_file *m, void *v)
2669 {
2670 	struct numa_maps_private *numa_priv = m->private;
2671 	struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
2672 	struct vm_area_struct *vma = v;
2673 	struct numa_maps *md = &numa_priv->md;
2674 	struct file *file = vma->vm_file;
2675 	struct mm_struct *mm = vma->vm_mm;
2676 	struct mempolicy *pol;
2677 	char buffer[64];
2678 	int nid;
2679 
2680 	if (!mm)
2681 		return 0;
2682 
2683 	/* Ensure we start with an empty set of numa_maps statistics. */
2684 	memset(md, 0, sizeof(*md));
2685 
2686 	pol = __get_vma_policy(vma, vma->vm_start);
2687 	if (pol) {
2688 		mpol_to_str(buffer, sizeof(buffer), pol);
2689 		mpol_cond_put(pol);
2690 	} else {
2691 		mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
2692 	}
2693 
2694 	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
2695 
2696 	if (file) {
2697 		seq_puts(m, " file=");
2698 		seq_file_path(m, file, "\n\t= ");
2699 	} else if (vma_is_initial_heap(vma)) {
2700 		seq_puts(m, " heap");
2701 	} else if (vma_is_initial_stack(vma)) {
2702 		seq_puts(m, " stack");
2703 	}
2704 
2705 	if (is_vm_hugetlb_page(vma))
2706 		seq_puts(m, " huge");
2707 
2708 	/* mmap_lock is held by m_start */
2709 	walk_page_vma(vma, &show_numa_ops, md);
2710 
2711 	if (!md->pages)
2712 		goto out;
2713 
2714 	if (md->anon)
2715 		seq_printf(m, " anon=%lu", md->anon);
2716 
2717 	if (md->dirty)
2718 		seq_printf(m, " dirty=%lu", md->dirty);
2719 
2720 	if (md->pages != md->anon && md->pages != md->dirty)
2721 		seq_printf(m, " mapped=%lu", md->pages);
2722 
2723 	if (md->mapcount_max > 1)
2724 		seq_printf(m, " mapmax=%lu", md->mapcount_max);
2725 
2726 	if (md->swapcache)
2727 		seq_printf(m, " swapcache=%lu", md->swapcache);
2728 
2729 	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
2730 		seq_printf(m, " active=%lu", md->active);
2731 
2732 	if (md->writeback)
2733 		seq_printf(m, " writeback=%lu", md->writeback);
2734 
2735 	for_each_node_state(nid, N_MEMORY)
2736 		if (md->node[nid])
2737 			seq_printf(m, " N%d=%lu", nid, md->node[nid]);
2738 
2739 	seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10);
2740 out:
2741 	seq_putc(m, '\n');
2742 	return 0;
2743 }
2744 
2745 static const struct seq_operations proc_pid_numa_maps_op = {
2746 	.start  = m_start,
2747 	.next   = m_next,
2748 	.stop   = m_stop,
2749 	.show   = show_numa_map,
2750 };
2751 
2752 static int pid_numa_maps_open(struct inode *inode, struct file *file)
2753 {
2754 	return proc_maps_open(inode, file, &proc_pid_numa_maps_op,
2755 				sizeof(struct numa_maps_private));
2756 }
2757 
2758 const struct file_operations proc_pid_numa_maps_operations = {
2759 	.open		= pid_numa_maps_open,
2760 	.read		= seq_read,
2761 	.llseek		= seq_lseek,
2762 	.release	= proc_map_release,
2763 };
2764 
2765 #endif /* CONFIG_NUMA */
2766