xref: /linux/fs/proc/task_mmu.c (revision cc8c418b4fc09ed58ddd27b8e90ec797e9ca1e67)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/pagewalk.h>
3 #include <linux/mm_inline.h>
4 #include <linux/hugetlb.h>
5 #include <linux/huge_mm.h>
6 #include <linux/mount.h>
7 #include <linux/seq_file.h>
8 #include <linux/highmem.h>
9 #include <linux/ptrace.h>
10 #include <linux/slab.h>
11 #include <linux/pagemap.h>
12 #include <linux/mempolicy.h>
13 #include <linux/rmap.h>
14 #include <linux/swap.h>
15 #include <linux/sched/mm.h>
16 #include <linux/swapops.h>
17 #include <linux/mmu_notifier.h>
18 #include <linux/page_idle.h>
19 #include <linux/shmem_fs.h>
20 #include <linux/uaccess.h>
21 #include <linux/pkeys.h>
22 
23 #include <asm/elf.h>
24 #include <asm/tlb.h>
25 #include <asm/tlbflush.h>
26 #include "internal.h"
27 
28 #define SEQ_PUT_DEC(str, val) \
29 		seq_put_decimal_ull_width(m, str, (val) << (PAGE_SHIFT-10), 8)
30 void task_mem(struct seq_file *m, struct mm_struct *mm)
31 {
32 	unsigned long text, lib, swap, anon, file, shmem;
33 	unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
34 
35 	anon = get_mm_counter(mm, MM_ANONPAGES);
36 	file = get_mm_counter(mm, MM_FILEPAGES);
37 	shmem = get_mm_counter(mm, MM_SHMEMPAGES);
38 
39 	/*
40 	 * Note: to minimize their overhead, mm maintains hiwater_vm and
41 	 * hiwater_rss only when about to *lower* total_vm or rss.  Any
42 	 * collector of these hiwater stats must therefore get total_vm
43 	 * and rss too, which will usually be the higher.  Barriers? not
44 	 * worth the effort, such snapshots can always be inconsistent.
45 	 */
46 	hiwater_vm = total_vm = mm->total_vm;
47 	if (hiwater_vm < mm->hiwater_vm)
48 		hiwater_vm = mm->hiwater_vm;
49 	hiwater_rss = total_rss = anon + file + shmem;
50 	if (hiwater_rss < mm->hiwater_rss)
51 		hiwater_rss = mm->hiwater_rss;
52 
53 	/* split executable areas between text and lib */
54 	text = PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK);
55 	text = min(text, mm->exec_vm << PAGE_SHIFT);
56 	lib = (mm->exec_vm << PAGE_SHIFT) - text;
57 
58 	swap = get_mm_counter(mm, MM_SWAPENTS);
59 	SEQ_PUT_DEC("VmPeak:\t", hiwater_vm);
60 	SEQ_PUT_DEC(" kB\nVmSize:\t", total_vm);
61 	SEQ_PUT_DEC(" kB\nVmLck:\t", mm->locked_vm);
62 	SEQ_PUT_DEC(" kB\nVmPin:\t", atomic64_read(&mm->pinned_vm));
63 	SEQ_PUT_DEC(" kB\nVmHWM:\t", hiwater_rss);
64 	SEQ_PUT_DEC(" kB\nVmRSS:\t", total_rss);
65 	SEQ_PUT_DEC(" kB\nRssAnon:\t", anon);
66 	SEQ_PUT_DEC(" kB\nRssFile:\t", file);
67 	SEQ_PUT_DEC(" kB\nRssShmem:\t", shmem);
68 	SEQ_PUT_DEC(" kB\nVmData:\t", mm->data_vm);
69 	SEQ_PUT_DEC(" kB\nVmStk:\t", mm->stack_vm);
70 	seq_put_decimal_ull_width(m,
71 		    " kB\nVmExe:\t", text >> 10, 8);
72 	seq_put_decimal_ull_width(m,
73 		    " kB\nVmLib:\t", lib >> 10, 8);
74 	seq_put_decimal_ull_width(m,
75 		    " kB\nVmPTE:\t", mm_pgtables_bytes(mm) >> 10, 8);
76 	SEQ_PUT_DEC(" kB\nVmSwap:\t", swap);
77 	seq_puts(m, " kB\n");
78 	hugetlb_report_usage(m, mm);
79 }
80 #undef SEQ_PUT_DEC
81 
82 unsigned long task_vsize(struct mm_struct *mm)
83 {
84 	return PAGE_SIZE * mm->total_vm;
85 }
86 
87 unsigned long task_statm(struct mm_struct *mm,
88 			 unsigned long *shared, unsigned long *text,
89 			 unsigned long *data, unsigned long *resident)
90 {
91 	*shared = get_mm_counter(mm, MM_FILEPAGES) +
92 			get_mm_counter(mm, MM_SHMEMPAGES);
93 	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
94 								>> PAGE_SHIFT;
95 	*data = mm->data_vm + mm->stack_vm;
96 	*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
97 	return mm->total_vm;
98 }
99 
100 #ifdef CONFIG_NUMA
101 /*
102  * Save get_task_policy() for show_numa_map().
103  */
104 static void hold_task_mempolicy(struct proc_maps_private *priv)
105 {
106 	struct task_struct *task = priv->task;
107 
108 	task_lock(task);
109 	priv->task_mempolicy = get_task_policy(task);
110 	mpol_get(priv->task_mempolicy);
111 	task_unlock(task);
112 }
113 static void release_task_mempolicy(struct proc_maps_private *priv)
114 {
115 	mpol_put(priv->task_mempolicy);
116 }
117 #else
118 static void hold_task_mempolicy(struct proc_maps_private *priv)
119 {
120 }
121 static void release_task_mempolicy(struct proc_maps_private *priv)
122 {
123 }
124 #endif
125 
126 static struct vm_area_struct *proc_get_vma(struct proc_maps_private *priv,
127 						loff_t *ppos)
128 {
129 	struct vm_area_struct *vma = vma_next(&priv->iter);
130 
131 	if (vma) {
132 		*ppos = vma->vm_start;
133 	} else {
134 		*ppos = -2UL;
135 		vma = get_gate_vma(priv->mm);
136 	}
137 
138 	return vma;
139 }
140 
141 static void *m_start(struct seq_file *m, loff_t *ppos)
142 {
143 	struct proc_maps_private *priv = m->private;
144 	unsigned long last_addr = *ppos;
145 	struct mm_struct *mm;
146 
147 	/* See m_next(). Zero at the start or after lseek. */
148 	if (last_addr == -1UL)
149 		return NULL;
150 
151 	priv->task = get_proc_task(priv->inode);
152 	if (!priv->task)
153 		return ERR_PTR(-ESRCH);
154 
155 	mm = priv->mm;
156 	if (!mm || !mmget_not_zero(mm)) {
157 		put_task_struct(priv->task);
158 		priv->task = NULL;
159 		return NULL;
160 	}
161 
162 	if (mmap_read_lock_killable(mm)) {
163 		mmput(mm);
164 		put_task_struct(priv->task);
165 		priv->task = NULL;
166 		return ERR_PTR(-EINTR);
167 	}
168 
169 	vma_iter_init(&priv->iter, mm, last_addr);
170 	hold_task_mempolicy(priv);
171 	if (last_addr == -2UL)
172 		return get_gate_vma(mm);
173 
174 	return proc_get_vma(priv, ppos);
175 }
176 
177 static void *m_next(struct seq_file *m, void *v, loff_t *ppos)
178 {
179 	if (*ppos == -2UL) {
180 		*ppos = -1UL;
181 		return NULL;
182 	}
183 	return proc_get_vma(m->private, ppos);
184 }
185 
186 static void m_stop(struct seq_file *m, void *v)
187 {
188 	struct proc_maps_private *priv = m->private;
189 	struct mm_struct *mm = priv->mm;
190 
191 	if (!priv->task)
192 		return;
193 
194 	release_task_mempolicy(priv);
195 	mmap_read_unlock(mm);
196 	mmput(mm);
197 	put_task_struct(priv->task);
198 	priv->task = NULL;
199 }
200 
201 static int proc_maps_open(struct inode *inode, struct file *file,
202 			const struct seq_operations *ops, int psize)
203 {
204 	struct proc_maps_private *priv = __seq_open_private(file, ops, psize);
205 
206 	if (!priv)
207 		return -ENOMEM;
208 
209 	priv->inode = inode;
210 	priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
211 	if (IS_ERR(priv->mm)) {
212 		int err = PTR_ERR(priv->mm);
213 
214 		seq_release_private(inode, file);
215 		return err;
216 	}
217 
218 	return 0;
219 }
220 
221 static int proc_map_release(struct inode *inode, struct file *file)
222 {
223 	struct seq_file *seq = file->private_data;
224 	struct proc_maps_private *priv = seq->private;
225 
226 	if (priv->mm)
227 		mmdrop(priv->mm);
228 
229 	return seq_release_private(inode, file);
230 }
231 
232 static int do_maps_open(struct inode *inode, struct file *file,
233 			const struct seq_operations *ops)
234 {
235 	return proc_maps_open(inode, file, ops,
236 				sizeof(struct proc_maps_private));
237 }
238 
239 /*
240  * Indicate if the VMA is a stack for the given task; for
241  * /proc/PID/maps that is the stack of the main task.
242  */
243 static int is_stack(struct vm_area_struct *vma)
244 {
245 	/*
246 	 * We make no effort to guess what a given thread considers to be
247 	 * its "stack".  It's not even well-defined for programs written
248 	 * languages like Go.
249 	 */
250 	return vma->vm_start <= vma->vm_mm->start_stack &&
251 		vma->vm_end >= vma->vm_mm->start_stack;
252 }
253 
254 static void show_vma_header_prefix(struct seq_file *m,
255 				   unsigned long start, unsigned long end,
256 				   vm_flags_t flags, unsigned long long pgoff,
257 				   dev_t dev, unsigned long ino)
258 {
259 	seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
260 	seq_put_hex_ll(m, NULL, start, 8);
261 	seq_put_hex_ll(m, "-", end, 8);
262 	seq_putc(m, ' ');
263 	seq_putc(m, flags & VM_READ ? 'r' : '-');
264 	seq_putc(m, flags & VM_WRITE ? 'w' : '-');
265 	seq_putc(m, flags & VM_EXEC ? 'x' : '-');
266 	seq_putc(m, flags & VM_MAYSHARE ? 's' : 'p');
267 	seq_put_hex_ll(m, " ", pgoff, 8);
268 	seq_put_hex_ll(m, " ", MAJOR(dev), 2);
269 	seq_put_hex_ll(m, ":", MINOR(dev), 2);
270 	seq_put_decimal_ull(m, " ", ino);
271 	seq_putc(m, ' ');
272 }
273 
274 static void
275 show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
276 {
277 	struct mm_struct *mm = vma->vm_mm;
278 	struct file *file = vma->vm_file;
279 	vm_flags_t flags = vma->vm_flags;
280 	unsigned long ino = 0;
281 	unsigned long long pgoff = 0;
282 	unsigned long start, end;
283 	dev_t dev = 0;
284 	const char *name = NULL;
285 
286 	if (file) {
287 		struct inode *inode = file_inode(vma->vm_file);
288 		dev = inode->i_sb->s_dev;
289 		ino = inode->i_ino;
290 		pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
291 	}
292 
293 	start = vma->vm_start;
294 	end = vma->vm_end;
295 	show_vma_header_prefix(m, start, end, flags, pgoff, dev, ino);
296 
297 	/*
298 	 * Print the dentry name for named mappings, and a
299 	 * special [heap] marker for the heap:
300 	 */
301 	if (file) {
302 		seq_pad(m, ' ');
303 		seq_file_path(m, file, "\n");
304 		goto done;
305 	}
306 
307 	if (vma->vm_ops && vma->vm_ops->name) {
308 		name = vma->vm_ops->name(vma);
309 		if (name)
310 			goto done;
311 	}
312 
313 	name = arch_vma_name(vma);
314 	if (!name) {
315 		struct anon_vma_name *anon_name;
316 
317 		if (!mm) {
318 			name = "[vdso]";
319 			goto done;
320 		}
321 
322 		if (vma->vm_start <= mm->brk &&
323 		    vma->vm_end >= mm->start_brk) {
324 			name = "[heap]";
325 			goto done;
326 		}
327 
328 		if (is_stack(vma)) {
329 			name = "[stack]";
330 			goto done;
331 		}
332 
333 		anon_name = anon_vma_name(vma);
334 		if (anon_name) {
335 			seq_pad(m, ' ');
336 			seq_printf(m, "[anon:%s]", anon_name->name);
337 		}
338 	}
339 
340 done:
341 	if (name) {
342 		seq_pad(m, ' ');
343 		seq_puts(m, name);
344 	}
345 	seq_putc(m, '\n');
346 }
347 
348 static int show_map(struct seq_file *m, void *v)
349 {
350 	show_map_vma(m, v);
351 	return 0;
352 }
353 
354 static const struct seq_operations proc_pid_maps_op = {
355 	.start	= m_start,
356 	.next	= m_next,
357 	.stop	= m_stop,
358 	.show	= show_map
359 };
360 
361 static int pid_maps_open(struct inode *inode, struct file *file)
362 {
363 	return do_maps_open(inode, file, &proc_pid_maps_op);
364 }
365 
366 const struct file_operations proc_pid_maps_operations = {
367 	.open		= pid_maps_open,
368 	.read		= seq_read,
369 	.llseek		= seq_lseek,
370 	.release	= proc_map_release,
371 };
372 
373 /*
374  * Proportional Set Size(PSS): my share of RSS.
375  *
376  * PSS of a process is the count of pages it has in memory, where each
377  * page is divided by the number of processes sharing it.  So if a
378  * process has 1000 pages all to itself, and 1000 shared with one other
379  * process, its PSS will be 1500.
380  *
381  * To keep (accumulated) division errors low, we adopt a 64bit
382  * fixed-point pss counter to minimize division errors. So (pss >>
383  * PSS_SHIFT) would be the real byte count.
384  *
385  * A shift of 12 before division means (assuming 4K page size):
386  * 	- 1M 3-user-pages add up to 8KB errors;
387  * 	- supports mapcount up to 2^24, or 16M;
388  * 	- supports PSS up to 2^52 bytes, or 4PB.
389  */
390 #define PSS_SHIFT 12
391 
392 #ifdef CONFIG_PROC_PAGE_MONITOR
393 struct mem_size_stats {
394 	unsigned long resident;
395 	unsigned long shared_clean;
396 	unsigned long shared_dirty;
397 	unsigned long private_clean;
398 	unsigned long private_dirty;
399 	unsigned long referenced;
400 	unsigned long anonymous;
401 	unsigned long lazyfree;
402 	unsigned long anonymous_thp;
403 	unsigned long shmem_thp;
404 	unsigned long file_thp;
405 	unsigned long swap;
406 	unsigned long shared_hugetlb;
407 	unsigned long private_hugetlb;
408 	u64 pss;
409 	u64 pss_anon;
410 	u64 pss_file;
411 	u64 pss_shmem;
412 	u64 pss_dirty;
413 	u64 pss_locked;
414 	u64 swap_pss;
415 };
416 
417 static void smaps_page_accumulate(struct mem_size_stats *mss,
418 		struct page *page, unsigned long size, unsigned long pss,
419 		bool dirty, bool locked, bool private)
420 {
421 	mss->pss += pss;
422 
423 	if (PageAnon(page))
424 		mss->pss_anon += pss;
425 	else if (PageSwapBacked(page))
426 		mss->pss_shmem += pss;
427 	else
428 		mss->pss_file += pss;
429 
430 	if (locked)
431 		mss->pss_locked += pss;
432 
433 	if (dirty || PageDirty(page)) {
434 		mss->pss_dirty += pss;
435 		if (private)
436 			mss->private_dirty += size;
437 		else
438 			mss->shared_dirty += size;
439 	} else {
440 		if (private)
441 			mss->private_clean += size;
442 		else
443 			mss->shared_clean += size;
444 	}
445 }
446 
447 static void smaps_account(struct mem_size_stats *mss, struct page *page,
448 		bool compound, bool young, bool dirty, bool locked,
449 		bool migration)
450 {
451 	int i, nr = compound ? compound_nr(page) : 1;
452 	unsigned long size = nr * PAGE_SIZE;
453 
454 	/*
455 	 * First accumulate quantities that depend only on |size| and the type
456 	 * of the compound page.
457 	 */
458 	if (PageAnon(page)) {
459 		mss->anonymous += size;
460 		if (!PageSwapBacked(page) && !dirty && !PageDirty(page))
461 			mss->lazyfree += size;
462 	}
463 
464 	mss->resident += size;
465 	/* Accumulate the size in pages that have been accessed. */
466 	if (young || page_is_young(page) || PageReferenced(page))
467 		mss->referenced += size;
468 
469 	/*
470 	 * Then accumulate quantities that may depend on sharing, or that may
471 	 * differ page-by-page.
472 	 *
473 	 * page_count(page) == 1 guarantees the page is mapped exactly once.
474 	 * If any subpage of the compound page mapped with PTE it would elevate
475 	 * page_count().
476 	 *
477 	 * The page_mapcount() is called to get a snapshot of the mapcount.
478 	 * Without holding the page lock this snapshot can be slightly wrong as
479 	 * we cannot always read the mapcount atomically.  It is not safe to
480 	 * call page_mapcount() even with PTL held if the page is not mapped,
481 	 * especially for migration entries.  Treat regular migration entries
482 	 * as mapcount == 1.
483 	 */
484 	if ((page_count(page) == 1) || migration) {
485 		smaps_page_accumulate(mss, page, size, size << PSS_SHIFT, dirty,
486 			locked, true);
487 		return;
488 	}
489 	for (i = 0; i < nr; i++, page++) {
490 		int mapcount = page_mapcount(page);
491 		unsigned long pss = PAGE_SIZE << PSS_SHIFT;
492 		if (mapcount >= 2)
493 			pss /= mapcount;
494 		smaps_page_accumulate(mss, page, PAGE_SIZE, pss, dirty, locked,
495 				      mapcount < 2);
496 	}
497 }
498 
499 #ifdef CONFIG_SHMEM
500 static int smaps_pte_hole(unsigned long addr, unsigned long end,
501 			  __always_unused int depth, struct mm_walk *walk)
502 {
503 	struct mem_size_stats *mss = walk->private;
504 	struct vm_area_struct *vma = walk->vma;
505 
506 	mss->swap += shmem_partial_swap_usage(walk->vma->vm_file->f_mapping,
507 					      linear_page_index(vma, addr),
508 					      linear_page_index(vma, end));
509 
510 	return 0;
511 }
512 #else
513 #define smaps_pte_hole		NULL
514 #endif /* CONFIG_SHMEM */
515 
516 static void smaps_pte_hole_lookup(unsigned long addr, struct mm_walk *walk)
517 {
518 #ifdef CONFIG_SHMEM
519 	if (walk->ops->pte_hole) {
520 		/* depth is not used */
521 		smaps_pte_hole(addr, addr + PAGE_SIZE, 0, walk);
522 	}
523 #endif
524 }
525 
526 static void smaps_pte_entry(pte_t *pte, unsigned long addr,
527 		struct mm_walk *walk)
528 {
529 	struct mem_size_stats *mss = walk->private;
530 	struct vm_area_struct *vma = walk->vma;
531 	bool locked = !!(vma->vm_flags & VM_LOCKED);
532 	struct page *page = NULL;
533 	bool migration = false, young = false, dirty = false;
534 
535 	if (pte_present(*pte)) {
536 		page = vm_normal_page(vma, addr, *pte);
537 		young = pte_young(*pte);
538 		dirty = pte_dirty(*pte);
539 	} else if (is_swap_pte(*pte)) {
540 		swp_entry_t swpent = pte_to_swp_entry(*pte);
541 
542 		if (!non_swap_entry(swpent)) {
543 			int mapcount;
544 
545 			mss->swap += PAGE_SIZE;
546 			mapcount = swp_swapcount(swpent);
547 			if (mapcount >= 2) {
548 				u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT;
549 
550 				do_div(pss_delta, mapcount);
551 				mss->swap_pss += pss_delta;
552 			} else {
553 				mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
554 			}
555 		} else if (is_pfn_swap_entry(swpent)) {
556 			if (is_migration_entry(swpent))
557 				migration = true;
558 			page = pfn_swap_entry_to_page(swpent);
559 		}
560 	} else {
561 		smaps_pte_hole_lookup(addr, walk);
562 		return;
563 	}
564 
565 	if (!page)
566 		return;
567 
568 	smaps_account(mss, page, false, young, dirty, locked, migration);
569 }
570 
571 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
572 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
573 		struct mm_walk *walk)
574 {
575 	struct mem_size_stats *mss = walk->private;
576 	struct vm_area_struct *vma = walk->vma;
577 	bool locked = !!(vma->vm_flags & VM_LOCKED);
578 	struct page *page = NULL;
579 	bool migration = false;
580 
581 	if (pmd_present(*pmd)) {
582 		/* FOLL_DUMP will return -EFAULT on huge zero page */
583 		page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP);
584 	} else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) {
585 		swp_entry_t entry = pmd_to_swp_entry(*pmd);
586 
587 		if (is_migration_entry(entry)) {
588 			migration = true;
589 			page = pfn_swap_entry_to_page(entry);
590 		}
591 	}
592 	if (IS_ERR_OR_NULL(page))
593 		return;
594 	if (PageAnon(page))
595 		mss->anonymous_thp += HPAGE_PMD_SIZE;
596 	else if (PageSwapBacked(page))
597 		mss->shmem_thp += HPAGE_PMD_SIZE;
598 	else if (is_zone_device_page(page))
599 		/* pass */;
600 	else
601 		mss->file_thp += HPAGE_PMD_SIZE;
602 
603 	smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd),
604 		      locked, migration);
605 }
606 #else
607 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
608 		struct mm_walk *walk)
609 {
610 }
611 #endif
612 
613 static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
614 			   struct mm_walk *walk)
615 {
616 	struct vm_area_struct *vma = walk->vma;
617 	pte_t *pte;
618 	spinlock_t *ptl;
619 
620 	ptl = pmd_trans_huge_lock(pmd, vma);
621 	if (ptl) {
622 		smaps_pmd_entry(pmd, addr, walk);
623 		spin_unlock(ptl);
624 		goto out;
625 	}
626 
627 	if (pmd_trans_unstable(pmd))
628 		goto out;
629 	/*
630 	 * The mmap_lock held all the way back in m_start() is what
631 	 * keeps khugepaged out of here and from collapsing things
632 	 * in here.
633 	 */
634 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
635 	for (; addr != end; pte++, addr += PAGE_SIZE)
636 		smaps_pte_entry(pte, addr, walk);
637 	pte_unmap_unlock(pte - 1, ptl);
638 out:
639 	cond_resched();
640 	return 0;
641 }
642 
643 static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
644 {
645 	/*
646 	 * Don't forget to update Documentation/ on changes.
647 	 */
648 	static const char mnemonics[BITS_PER_LONG][2] = {
649 		/*
650 		 * In case if we meet a flag we don't know about.
651 		 */
652 		[0 ... (BITS_PER_LONG-1)] = "??",
653 
654 		[ilog2(VM_READ)]	= "rd",
655 		[ilog2(VM_WRITE)]	= "wr",
656 		[ilog2(VM_EXEC)]	= "ex",
657 		[ilog2(VM_SHARED)]	= "sh",
658 		[ilog2(VM_MAYREAD)]	= "mr",
659 		[ilog2(VM_MAYWRITE)]	= "mw",
660 		[ilog2(VM_MAYEXEC)]	= "me",
661 		[ilog2(VM_MAYSHARE)]	= "ms",
662 		[ilog2(VM_GROWSDOWN)]	= "gd",
663 		[ilog2(VM_PFNMAP)]	= "pf",
664 		[ilog2(VM_LOCKED)]	= "lo",
665 		[ilog2(VM_IO)]		= "io",
666 		[ilog2(VM_SEQ_READ)]	= "sr",
667 		[ilog2(VM_RAND_READ)]	= "rr",
668 		[ilog2(VM_DONTCOPY)]	= "dc",
669 		[ilog2(VM_DONTEXPAND)]	= "de",
670 		[ilog2(VM_ACCOUNT)]	= "ac",
671 		[ilog2(VM_NORESERVE)]	= "nr",
672 		[ilog2(VM_HUGETLB)]	= "ht",
673 		[ilog2(VM_SYNC)]	= "sf",
674 		[ilog2(VM_ARCH_1)]	= "ar",
675 		[ilog2(VM_WIPEONFORK)]	= "wf",
676 		[ilog2(VM_DONTDUMP)]	= "dd",
677 #ifdef CONFIG_ARM64_BTI
678 		[ilog2(VM_ARM64_BTI)]	= "bt",
679 #endif
680 #ifdef CONFIG_MEM_SOFT_DIRTY
681 		[ilog2(VM_SOFTDIRTY)]	= "sd",
682 #endif
683 		[ilog2(VM_MIXEDMAP)]	= "mm",
684 		[ilog2(VM_HUGEPAGE)]	= "hg",
685 		[ilog2(VM_NOHUGEPAGE)]	= "nh",
686 		[ilog2(VM_MERGEABLE)]	= "mg",
687 		[ilog2(VM_UFFD_MISSING)]= "um",
688 		[ilog2(VM_UFFD_WP)]	= "uw",
689 #ifdef CONFIG_ARM64_MTE
690 		[ilog2(VM_MTE)]		= "mt",
691 		[ilog2(VM_MTE_ALLOWED)]	= "",
692 #endif
693 #ifdef CONFIG_ARCH_HAS_PKEYS
694 		/* These come out via ProtectionKey: */
695 		[ilog2(VM_PKEY_BIT0)]	= "",
696 		[ilog2(VM_PKEY_BIT1)]	= "",
697 		[ilog2(VM_PKEY_BIT2)]	= "",
698 		[ilog2(VM_PKEY_BIT3)]	= "",
699 #if VM_PKEY_BIT4
700 		[ilog2(VM_PKEY_BIT4)]	= "",
701 #endif
702 #endif /* CONFIG_ARCH_HAS_PKEYS */
703 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
704 		[ilog2(VM_UFFD_MINOR)]	= "ui",
705 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
706 	};
707 	size_t i;
708 
709 	seq_puts(m, "VmFlags: ");
710 	for (i = 0; i < BITS_PER_LONG; i++) {
711 		if (!mnemonics[i][0])
712 			continue;
713 		if (vma->vm_flags & (1UL << i)) {
714 			seq_putc(m, mnemonics[i][0]);
715 			seq_putc(m, mnemonics[i][1]);
716 			seq_putc(m, ' ');
717 		}
718 	}
719 	seq_putc(m, '\n');
720 }
721 
722 #ifdef CONFIG_HUGETLB_PAGE
723 static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
724 				 unsigned long addr, unsigned long end,
725 				 struct mm_walk *walk)
726 {
727 	struct mem_size_stats *mss = walk->private;
728 	struct vm_area_struct *vma = walk->vma;
729 	struct page *page = NULL;
730 
731 	if (pte_present(*pte)) {
732 		page = vm_normal_page(vma, addr, *pte);
733 	} else if (is_swap_pte(*pte)) {
734 		swp_entry_t swpent = pte_to_swp_entry(*pte);
735 
736 		if (is_pfn_swap_entry(swpent))
737 			page = pfn_swap_entry_to_page(swpent);
738 	}
739 	if (page) {
740 		int mapcount = page_mapcount(page);
741 
742 		if (mapcount >= 2)
743 			mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
744 		else
745 			mss->private_hugetlb += huge_page_size(hstate_vma(vma));
746 	}
747 	return 0;
748 }
749 #else
750 #define smaps_hugetlb_range	NULL
751 #endif /* HUGETLB_PAGE */
752 
753 static const struct mm_walk_ops smaps_walk_ops = {
754 	.pmd_entry		= smaps_pte_range,
755 	.hugetlb_entry		= smaps_hugetlb_range,
756 };
757 
758 static const struct mm_walk_ops smaps_shmem_walk_ops = {
759 	.pmd_entry		= smaps_pte_range,
760 	.hugetlb_entry		= smaps_hugetlb_range,
761 	.pte_hole		= smaps_pte_hole,
762 };
763 
764 /*
765  * Gather mem stats from @vma with the indicated beginning
766  * address @start, and keep them in @mss.
767  *
768  * Use vm_start of @vma as the beginning address if @start is 0.
769  */
770 static void smap_gather_stats(struct vm_area_struct *vma,
771 		struct mem_size_stats *mss, unsigned long start)
772 {
773 	const struct mm_walk_ops *ops = &smaps_walk_ops;
774 
775 	/* Invalid start */
776 	if (start >= vma->vm_end)
777 		return;
778 
779 #ifdef CONFIG_SHMEM
780 	if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) {
781 		/*
782 		 * For shared or readonly shmem mappings we know that all
783 		 * swapped out pages belong to the shmem object, and we can
784 		 * obtain the swap value much more efficiently. For private
785 		 * writable mappings, we might have COW pages that are
786 		 * not affected by the parent swapped out pages of the shmem
787 		 * object, so we have to distinguish them during the page walk.
788 		 * Unless we know that the shmem object (or the part mapped by
789 		 * our VMA) has no swapped out pages at all.
790 		 */
791 		unsigned long shmem_swapped = shmem_swap_usage(vma);
792 
793 		if (!start && (!shmem_swapped || (vma->vm_flags & VM_SHARED) ||
794 					!(vma->vm_flags & VM_WRITE))) {
795 			mss->swap += shmem_swapped;
796 		} else {
797 			ops = &smaps_shmem_walk_ops;
798 		}
799 	}
800 #endif
801 	/* mmap_lock is held in m_start */
802 	if (!start)
803 		walk_page_vma(vma, ops, mss);
804 	else
805 		walk_page_range(vma->vm_mm, start, vma->vm_end, ops, mss);
806 }
807 
808 #define SEQ_PUT_DEC(str, val) \
809 		seq_put_decimal_ull_width(m, str, (val) >> 10, 8)
810 
811 /* Show the contents common for smaps and smaps_rollup */
812 static void __show_smap(struct seq_file *m, const struct mem_size_stats *mss,
813 	bool rollup_mode)
814 {
815 	SEQ_PUT_DEC("Rss:            ", mss->resident);
816 	SEQ_PUT_DEC(" kB\nPss:            ", mss->pss >> PSS_SHIFT);
817 	SEQ_PUT_DEC(" kB\nPss_Dirty:      ", mss->pss_dirty >> PSS_SHIFT);
818 	if (rollup_mode) {
819 		/*
820 		 * These are meaningful only for smaps_rollup, otherwise two of
821 		 * them are zero, and the other one is the same as Pss.
822 		 */
823 		SEQ_PUT_DEC(" kB\nPss_Anon:       ",
824 			mss->pss_anon >> PSS_SHIFT);
825 		SEQ_PUT_DEC(" kB\nPss_File:       ",
826 			mss->pss_file >> PSS_SHIFT);
827 		SEQ_PUT_DEC(" kB\nPss_Shmem:      ",
828 			mss->pss_shmem >> PSS_SHIFT);
829 	}
830 	SEQ_PUT_DEC(" kB\nShared_Clean:   ", mss->shared_clean);
831 	SEQ_PUT_DEC(" kB\nShared_Dirty:   ", mss->shared_dirty);
832 	SEQ_PUT_DEC(" kB\nPrivate_Clean:  ", mss->private_clean);
833 	SEQ_PUT_DEC(" kB\nPrivate_Dirty:  ", mss->private_dirty);
834 	SEQ_PUT_DEC(" kB\nReferenced:     ", mss->referenced);
835 	SEQ_PUT_DEC(" kB\nAnonymous:      ", mss->anonymous);
836 	SEQ_PUT_DEC(" kB\nLazyFree:       ", mss->lazyfree);
837 	SEQ_PUT_DEC(" kB\nAnonHugePages:  ", mss->anonymous_thp);
838 	SEQ_PUT_DEC(" kB\nShmemPmdMapped: ", mss->shmem_thp);
839 	SEQ_PUT_DEC(" kB\nFilePmdMapped:  ", mss->file_thp);
840 	SEQ_PUT_DEC(" kB\nShared_Hugetlb: ", mss->shared_hugetlb);
841 	seq_put_decimal_ull_width(m, " kB\nPrivate_Hugetlb: ",
842 				  mss->private_hugetlb >> 10, 7);
843 	SEQ_PUT_DEC(" kB\nSwap:           ", mss->swap);
844 	SEQ_PUT_DEC(" kB\nSwapPss:        ",
845 					mss->swap_pss >> PSS_SHIFT);
846 	SEQ_PUT_DEC(" kB\nLocked:         ",
847 					mss->pss_locked >> PSS_SHIFT);
848 	seq_puts(m, " kB\n");
849 }
850 
851 static int show_smap(struct seq_file *m, void *v)
852 {
853 	struct vm_area_struct *vma = v;
854 	struct mem_size_stats mss;
855 
856 	memset(&mss, 0, sizeof(mss));
857 
858 	smap_gather_stats(vma, &mss, 0);
859 
860 	show_map_vma(m, vma);
861 
862 	SEQ_PUT_DEC("Size:           ", vma->vm_end - vma->vm_start);
863 	SEQ_PUT_DEC(" kB\nKernelPageSize: ", vma_kernel_pagesize(vma));
864 	SEQ_PUT_DEC(" kB\nMMUPageSize:    ", vma_mmu_pagesize(vma));
865 	seq_puts(m, " kB\n");
866 
867 	__show_smap(m, &mss, false);
868 
869 	seq_printf(m, "THPeligible:    %d\n",
870 		   hugepage_vma_check(vma, vma->vm_flags, true, false, true));
871 
872 	if (arch_pkeys_enabled())
873 		seq_printf(m, "ProtectionKey:  %8u\n", vma_pkey(vma));
874 	show_smap_vma_flags(m, vma);
875 
876 	return 0;
877 }
878 
879 static int show_smaps_rollup(struct seq_file *m, void *v)
880 {
881 	struct proc_maps_private *priv = m->private;
882 	struct mem_size_stats mss;
883 	struct mm_struct *mm = priv->mm;
884 	struct vm_area_struct *vma;
885 	unsigned long vma_start = 0, last_vma_end = 0;
886 	int ret = 0;
887 	MA_STATE(mas, &mm->mm_mt, 0, 0);
888 
889 	priv->task = get_proc_task(priv->inode);
890 	if (!priv->task)
891 		return -ESRCH;
892 
893 	if (!mm || !mmget_not_zero(mm)) {
894 		ret = -ESRCH;
895 		goto out_put_task;
896 	}
897 
898 	memset(&mss, 0, sizeof(mss));
899 
900 	ret = mmap_read_lock_killable(mm);
901 	if (ret)
902 		goto out_put_mm;
903 
904 	hold_task_mempolicy(priv);
905 	vma = mas_find(&mas, 0);
906 
907 	if (unlikely(!vma))
908 		goto empty_set;
909 
910 	vma_start = vma->vm_start;
911 	do {
912 		smap_gather_stats(vma, &mss, 0);
913 		last_vma_end = vma->vm_end;
914 
915 		/*
916 		 * Release mmap_lock temporarily if someone wants to
917 		 * access it for write request.
918 		 */
919 		if (mmap_lock_is_contended(mm)) {
920 			mas_pause(&mas);
921 			mmap_read_unlock(mm);
922 			ret = mmap_read_lock_killable(mm);
923 			if (ret) {
924 				release_task_mempolicy(priv);
925 				goto out_put_mm;
926 			}
927 
928 			/*
929 			 * After dropping the lock, there are four cases to
930 			 * consider. See the following example for explanation.
931 			 *
932 			 *   +------+------+-----------+
933 			 *   | VMA1 | VMA2 | VMA3      |
934 			 *   +------+------+-----------+
935 			 *   |      |      |           |
936 			 *  4k     8k     16k         400k
937 			 *
938 			 * Suppose we drop the lock after reading VMA2 due to
939 			 * contention, then we get:
940 			 *
941 			 *	last_vma_end = 16k
942 			 *
943 			 * 1) VMA2 is freed, but VMA3 exists:
944 			 *
945 			 *    find_vma(mm, 16k - 1) will return VMA3.
946 			 *    In this case, just continue from VMA3.
947 			 *
948 			 * 2) VMA2 still exists:
949 			 *
950 			 *    find_vma(mm, 16k - 1) will return VMA2.
951 			 *    Iterate the loop like the original one.
952 			 *
953 			 * 3) No more VMAs can be found:
954 			 *
955 			 *    find_vma(mm, 16k - 1) will return NULL.
956 			 *    No more things to do, just break.
957 			 *
958 			 * 4) (last_vma_end - 1) is the middle of a vma (VMA'):
959 			 *
960 			 *    find_vma(mm, 16k - 1) will return VMA' whose range
961 			 *    contains last_vma_end.
962 			 *    Iterate VMA' from last_vma_end.
963 			 */
964 			vma = mas_find(&mas, ULONG_MAX);
965 			/* Case 3 above */
966 			if (!vma)
967 				break;
968 
969 			/* Case 1 above */
970 			if (vma->vm_start >= last_vma_end)
971 				continue;
972 
973 			/* Case 4 above */
974 			if (vma->vm_end > last_vma_end)
975 				smap_gather_stats(vma, &mss, last_vma_end);
976 		}
977 		/* Case 2 above */
978 	} while ((vma = mas_find(&mas, ULONG_MAX)) != NULL);
979 
980 empty_set:
981 	show_vma_header_prefix(m, vma_start, last_vma_end, 0, 0, 0, 0);
982 	seq_pad(m, ' ');
983 	seq_puts(m, "[rollup]\n");
984 
985 	__show_smap(m, &mss, true);
986 
987 	release_task_mempolicy(priv);
988 	mmap_read_unlock(mm);
989 
990 out_put_mm:
991 	mmput(mm);
992 out_put_task:
993 	put_task_struct(priv->task);
994 	priv->task = NULL;
995 
996 	return ret;
997 }
998 #undef SEQ_PUT_DEC
999 
1000 static const struct seq_operations proc_pid_smaps_op = {
1001 	.start	= m_start,
1002 	.next	= m_next,
1003 	.stop	= m_stop,
1004 	.show	= show_smap
1005 };
1006 
1007 static int pid_smaps_open(struct inode *inode, struct file *file)
1008 {
1009 	return do_maps_open(inode, file, &proc_pid_smaps_op);
1010 }
1011 
1012 static int smaps_rollup_open(struct inode *inode, struct file *file)
1013 {
1014 	int ret;
1015 	struct proc_maps_private *priv;
1016 
1017 	priv = kzalloc(sizeof(*priv), GFP_KERNEL_ACCOUNT);
1018 	if (!priv)
1019 		return -ENOMEM;
1020 
1021 	ret = single_open(file, show_smaps_rollup, priv);
1022 	if (ret)
1023 		goto out_free;
1024 
1025 	priv->inode = inode;
1026 	priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
1027 	if (IS_ERR(priv->mm)) {
1028 		ret = PTR_ERR(priv->mm);
1029 
1030 		single_release(inode, file);
1031 		goto out_free;
1032 	}
1033 
1034 	return 0;
1035 
1036 out_free:
1037 	kfree(priv);
1038 	return ret;
1039 }
1040 
1041 static int smaps_rollup_release(struct inode *inode, struct file *file)
1042 {
1043 	struct seq_file *seq = file->private_data;
1044 	struct proc_maps_private *priv = seq->private;
1045 
1046 	if (priv->mm)
1047 		mmdrop(priv->mm);
1048 
1049 	kfree(priv);
1050 	return single_release(inode, file);
1051 }
1052 
1053 const struct file_operations proc_pid_smaps_operations = {
1054 	.open		= pid_smaps_open,
1055 	.read		= seq_read,
1056 	.llseek		= seq_lseek,
1057 	.release	= proc_map_release,
1058 };
1059 
1060 const struct file_operations proc_pid_smaps_rollup_operations = {
1061 	.open		= smaps_rollup_open,
1062 	.read		= seq_read,
1063 	.llseek		= seq_lseek,
1064 	.release	= smaps_rollup_release,
1065 };
1066 
1067 enum clear_refs_types {
1068 	CLEAR_REFS_ALL = 1,
1069 	CLEAR_REFS_ANON,
1070 	CLEAR_REFS_MAPPED,
1071 	CLEAR_REFS_SOFT_DIRTY,
1072 	CLEAR_REFS_MM_HIWATER_RSS,
1073 	CLEAR_REFS_LAST,
1074 };
1075 
1076 struct clear_refs_private {
1077 	enum clear_refs_types type;
1078 };
1079 
1080 #ifdef CONFIG_MEM_SOFT_DIRTY
1081 
1082 static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
1083 {
1084 	struct page *page;
1085 
1086 	if (!pte_write(pte))
1087 		return false;
1088 	if (!is_cow_mapping(vma->vm_flags))
1089 		return false;
1090 	if (likely(!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags)))
1091 		return false;
1092 	page = vm_normal_page(vma, addr, pte);
1093 	if (!page)
1094 		return false;
1095 	return page_maybe_dma_pinned(page);
1096 }
1097 
1098 static inline void clear_soft_dirty(struct vm_area_struct *vma,
1099 		unsigned long addr, pte_t *pte)
1100 {
1101 	/*
1102 	 * The soft-dirty tracker uses #PF-s to catch writes
1103 	 * to pages, so write-protect the pte as well. See the
1104 	 * Documentation/admin-guide/mm/soft-dirty.rst for full description
1105 	 * of how soft-dirty works.
1106 	 */
1107 	pte_t ptent = *pte;
1108 
1109 	if (pte_present(ptent)) {
1110 		pte_t old_pte;
1111 
1112 		if (pte_is_pinned(vma, addr, ptent))
1113 			return;
1114 		old_pte = ptep_modify_prot_start(vma, addr, pte);
1115 		ptent = pte_wrprotect(old_pte);
1116 		ptent = pte_clear_soft_dirty(ptent);
1117 		ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent);
1118 	} else if (is_swap_pte(ptent)) {
1119 		ptent = pte_swp_clear_soft_dirty(ptent);
1120 		set_pte_at(vma->vm_mm, addr, pte, ptent);
1121 	}
1122 }
1123 #else
1124 static inline void clear_soft_dirty(struct vm_area_struct *vma,
1125 		unsigned long addr, pte_t *pte)
1126 {
1127 }
1128 #endif
1129 
1130 #if defined(CONFIG_MEM_SOFT_DIRTY) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
1131 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
1132 		unsigned long addr, pmd_t *pmdp)
1133 {
1134 	pmd_t old, pmd = *pmdp;
1135 
1136 	if (pmd_present(pmd)) {
1137 		/* See comment in change_huge_pmd() */
1138 		old = pmdp_invalidate(vma, addr, pmdp);
1139 		if (pmd_dirty(old))
1140 			pmd = pmd_mkdirty(pmd);
1141 		if (pmd_young(old))
1142 			pmd = pmd_mkyoung(pmd);
1143 
1144 		pmd = pmd_wrprotect(pmd);
1145 		pmd = pmd_clear_soft_dirty(pmd);
1146 
1147 		set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
1148 	} else if (is_migration_entry(pmd_to_swp_entry(pmd))) {
1149 		pmd = pmd_swp_clear_soft_dirty(pmd);
1150 		set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
1151 	}
1152 }
1153 #else
1154 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
1155 		unsigned long addr, pmd_t *pmdp)
1156 {
1157 }
1158 #endif
1159 
1160 static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
1161 				unsigned long end, struct mm_walk *walk)
1162 {
1163 	struct clear_refs_private *cp = walk->private;
1164 	struct vm_area_struct *vma = walk->vma;
1165 	pte_t *pte, ptent;
1166 	spinlock_t *ptl;
1167 	struct page *page;
1168 
1169 	ptl = pmd_trans_huge_lock(pmd, vma);
1170 	if (ptl) {
1171 		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
1172 			clear_soft_dirty_pmd(vma, addr, pmd);
1173 			goto out;
1174 		}
1175 
1176 		if (!pmd_present(*pmd))
1177 			goto out;
1178 
1179 		page = pmd_page(*pmd);
1180 
1181 		/* Clear accessed and referenced bits. */
1182 		pmdp_test_and_clear_young(vma, addr, pmd);
1183 		test_and_clear_page_young(page);
1184 		ClearPageReferenced(page);
1185 out:
1186 		spin_unlock(ptl);
1187 		return 0;
1188 	}
1189 
1190 	if (pmd_trans_unstable(pmd))
1191 		return 0;
1192 
1193 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
1194 	for (; addr != end; pte++, addr += PAGE_SIZE) {
1195 		ptent = *pte;
1196 
1197 		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
1198 			clear_soft_dirty(vma, addr, pte);
1199 			continue;
1200 		}
1201 
1202 		if (!pte_present(ptent))
1203 			continue;
1204 
1205 		page = vm_normal_page(vma, addr, ptent);
1206 		if (!page)
1207 			continue;
1208 
1209 		/* Clear accessed and referenced bits. */
1210 		ptep_test_and_clear_young(vma, addr, pte);
1211 		test_and_clear_page_young(page);
1212 		ClearPageReferenced(page);
1213 	}
1214 	pte_unmap_unlock(pte - 1, ptl);
1215 	cond_resched();
1216 	return 0;
1217 }
1218 
1219 static int clear_refs_test_walk(unsigned long start, unsigned long end,
1220 				struct mm_walk *walk)
1221 {
1222 	struct clear_refs_private *cp = walk->private;
1223 	struct vm_area_struct *vma = walk->vma;
1224 
1225 	if (vma->vm_flags & VM_PFNMAP)
1226 		return 1;
1227 
1228 	/*
1229 	 * Writing 1 to /proc/pid/clear_refs affects all pages.
1230 	 * Writing 2 to /proc/pid/clear_refs only affects anonymous pages.
1231 	 * Writing 3 to /proc/pid/clear_refs only affects file mapped pages.
1232 	 * Writing 4 to /proc/pid/clear_refs affects all pages.
1233 	 */
1234 	if (cp->type == CLEAR_REFS_ANON && vma->vm_file)
1235 		return 1;
1236 	if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file)
1237 		return 1;
1238 	return 0;
1239 }
1240 
1241 static const struct mm_walk_ops clear_refs_walk_ops = {
1242 	.pmd_entry		= clear_refs_pte_range,
1243 	.test_walk		= clear_refs_test_walk,
1244 };
1245 
1246 static ssize_t clear_refs_write(struct file *file, const char __user *buf,
1247 				size_t count, loff_t *ppos)
1248 {
1249 	struct task_struct *task;
1250 	char buffer[PROC_NUMBUF];
1251 	struct mm_struct *mm;
1252 	struct vm_area_struct *vma;
1253 	enum clear_refs_types type;
1254 	int itype;
1255 	int rv;
1256 
1257 	memset(buffer, 0, sizeof(buffer));
1258 	if (count > sizeof(buffer) - 1)
1259 		count = sizeof(buffer) - 1;
1260 	if (copy_from_user(buffer, buf, count))
1261 		return -EFAULT;
1262 	rv = kstrtoint(strstrip(buffer), 10, &itype);
1263 	if (rv < 0)
1264 		return rv;
1265 	type = (enum clear_refs_types)itype;
1266 	if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST)
1267 		return -EINVAL;
1268 
1269 	task = get_proc_task(file_inode(file));
1270 	if (!task)
1271 		return -ESRCH;
1272 	mm = get_task_mm(task);
1273 	if (mm) {
1274 		MA_STATE(mas, &mm->mm_mt, 0, 0);
1275 		struct mmu_notifier_range range;
1276 		struct clear_refs_private cp = {
1277 			.type = type,
1278 		};
1279 
1280 		if (mmap_write_lock_killable(mm)) {
1281 			count = -EINTR;
1282 			goto out_mm;
1283 		}
1284 		if (type == CLEAR_REFS_MM_HIWATER_RSS) {
1285 			/*
1286 			 * Writing 5 to /proc/pid/clear_refs resets the peak
1287 			 * resident set size to this mm's current rss value.
1288 			 */
1289 			reset_mm_hiwater_rss(mm);
1290 			goto out_unlock;
1291 		}
1292 
1293 		if (type == CLEAR_REFS_SOFT_DIRTY) {
1294 			mas_for_each(&mas, vma, ULONG_MAX) {
1295 				if (!(vma->vm_flags & VM_SOFTDIRTY))
1296 					continue;
1297 				vma->vm_flags &= ~VM_SOFTDIRTY;
1298 				vma_set_page_prot(vma);
1299 			}
1300 
1301 			inc_tlb_flush_pending(mm);
1302 			mmu_notifier_range_init(&range, MMU_NOTIFY_SOFT_DIRTY,
1303 						0, NULL, mm, 0, -1UL);
1304 			mmu_notifier_invalidate_range_start(&range);
1305 		}
1306 		walk_page_range(mm, 0, -1, &clear_refs_walk_ops, &cp);
1307 		if (type == CLEAR_REFS_SOFT_DIRTY) {
1308 			mmu_notifier_invalidate_range_end(&range);
1309 			flush_tlb_mm(mm);
1310 			dec_tlb_flush_pending(mm);
1311 		}
1312 out_unlock:
1313 		mmap_write_unlock(mm);
1314 out_mm:
1315 		mmput(mm);
1316 	}
1317 	put_task_struct(task);
1318 
1319 	return count;
1320 }
1321 
1322 const struct file_operations proc_clear_refs_operations = {
1323 	.write		= clear_refs_write,
1324 	.llseek		= noop_llseek,
1325 };
1326 
1327 typedef struct {
1328 	u64 pme;
1329 } pagemap_entry_t;
1330 
1331 struct pagemapread {
1332 	int pos, len;		/* units: PM_ENTRY_BYTES, not bytes */
1333 	pagemap_entry_t *buffer;
1334 	bool show_pfn;
1335 };
1336 
1337 #define PAGEMAP_WALK_SIZE	(PMD_SIZE)
1338 #define PAGEMAP_WALK_MASK	(PMD_MASK)
1339 
1340 #define PM_ENTRY_BYTES		sizeof(pagemap_entry_t)
1341 #define PM_PFRAME_BITS		55
1342 #define PM_PFRAME_MASK		GENMASK_ULL(PM_PFRAME_BITS - 1, 0)
1343 #define PM_SOFT_DIRTY		BIT_ULL(55)
1344 #define PM_MMAP_EXCLUSIVE	BIT_ULL(56)
1345 #define PM_UFFD_WP		BIT_ULL(57)
1346 #define PM_FILE			BIT_ULL(61)
1347 #define PM_SWAP			BIT_ULL(62)
1348 #define PM_PRESENT		BIT_ULL(63)
1349 
1350 #define PM_END_OF_BUFFER    1
1351 
1352 static inline pagemap_entry_t make_pme(u64 frame, u64 flags)
1353 {
1354 	return (pagemap_entry_t) { .pme = (frame & PM_PFRAME_MASK) | flags };
1355 }
1356 
1357 static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme,
1358 			  struct pagemapread *pm)
1359 {
1360 	pm->buffer[pm->pos++] = *pme;
1361 	if (pm->pos >= pm->len)
1362 		return PM_END_OF_BUFFER;
1363 	return 0;
1364 }
1365 
1366 static int pagemap_pte_hole(unsigned long start, unsigned long end,
1367 			    __always_unused int depth, struct mm_walk *walk)
1368 {
1369 	struct pagemapread *pm = walk->private;
1370 	unsigned long addr = start;
1371 	int err = 0;
1372 
1373 	while (addr < end) {
1374 		struct vm_area_struct *vma = find_vma(walk->mm, addr);
1375 		pagemap_entry_t pme = make_pme(0, 0);
1376 		/* End of address space hole, which we mark as non-present. */
1377 		unsigned long hole_end;
1378 
1379 		if (vma)
1380 			hole_end = min(end, vma->vm_start);
1381 		else
1382 			hole_end = end;
1383 
1384 		for (; addr < hole_end; addr += PAGE_SIZE) {
1385 			err = add_to_pagemap(addr, &pme, pm);
1386 			if (err)
1387 				goto out;
1388 		}
1389 
1390 		if (!vma)
1391 			break;
1392 
1393 		/* Addresses in the VMA. */
1394 		if (vma->vm_flags & VM_SOFTDIRTY)
1395 			pme = make_pme(0, PM_SOFT_DIRTY);
1396 		for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
1397 			err = add_to_pagemap(addr, &pme, pm);
1398 			if (err)
1399 				goto out;
1400 		}
1401 	}
1402 out:
1403 	return err;
1404 }
1405 
1406 static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
1407 		struct vm_area_struct *vma, unsigned long addr, pte_t pte)
1408 {
1409 	u64 frame = 0, flags = 0;
1410 	struct page *page = NULL;
1411 	bool migration = false;
1412 
1413 	if (pte_present(pte)) {
1414 		if (pm->show_pfn)
1415 			frame = pte_pfn(pte);
1416 		flags |= PM_PRESENT;
1417 		page = vm_normal_page(vma, addr, pte);
1418 		if (pte_soft_dirty(pte))
1419 			flags |= PM_SOFT_DIRTY;
1420 		if (pte_uffd_wp(pte))
1421 			flags |= PM_UFFD_WP;
1422 	} else if (is_swap_pte(pte)) {
1423 		swp_entry_t entry;
1424 		if (pte_swp_soft_dirty(pte))
1425 			flags |= PM_SOFT_DIRTY;
1426 		if (pte_swp_uffd_wp(pte))
1427 			flags |= PM_UFFD_WP;
1428 		entry = pte_to_swp_entry(pte);
1429 		if (pm->show_pfn) {
1430 			pgoff_t offset;
1431 			/*
1432 			 * For PFN swap offsets, keeping the offset field
1433 			 * to be PFN only to be compatible with old smaps.
1434 			 */
1435 			if (is_pfn_swap_entry(entry))
1436 				offset = swp_offset_pfn(entry);
1437 			else
1438 				offset = swp_offset(entry);
1439 			frame = swp_type(entry) |
1440 			    (offset << MAX_SWAPFILES_SHIFT);
1441 		}
1442 		flags |= PM_SWAP;
1443 		migration = is_migration_entry(entry);
1444 		if (is_pfn_swap_entry(entry))
1445 			page = pfn_swap_entry_to_page(entry);
1446 		if (pte_marker_entry_uffd_wp(entry))
1447 			flags |= PM_UFFD_WP;
1448 	}
1449 
1450 	if (page && !PageAnon(page))
1451 		flags |= PM_FILE;
1452 	if (page && !migration && page_mapcount(page) == 1)
1453 		flags |= PM_MMAP_EXCLUSIVE;
1454 	if (vma->vm_flags & VM_SOFTDIRTY)
1455 		flags |= PM_SOFT_DIRTY;
1456 
1457 	return make_pme(frame, flags);
1458 }
1459 
1460 static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
1461 			     struct mm_walk *walk)
1462 {
1463 	struct vm_area_struct *vma = walk->vma;
1464 	struct pagemapread *pm = walk->private;
1465 	spinlock_t *ptl;
1466 	pte_t *pte, *orig_pte;
1467 	int err = 0;
1468 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1469 	bool migration = false;
1470 
1471 	ptl = pmd_trans_huge_lock(pmdp, vma);
1472 	if (ptl) {
1473 		u64 flags = 0, frame = 0;
1474 		pmd_t pmd = *pmdp;
1475 		struct page *page = NULL;
1476 
1477 		if (vma->vm_flags & VM_SOFTDIRTY)
1478 			flags |= PM_SOFT_DIRTY;
1479 
1480 		if (pmd_present(pmd)) {
1481 			page = pmd_page(pmd);
1482 
1483 			flags |= PM_PRESENT;
1484 			if (pmd_soft_dirty(pmd))
1485 				flags |= PM_SOFT_DIRTY;
1486 			if (pmd_uffd_wp(pmd))
1487 				flags |= PM_UFFD_WP;
1488 			if (pm->show_pfn)
1489 				frame = pmd_pfn(pmd) +
1490 					((addr & ~PMD_MASK) >> PAGE_SHIFT);
1491 		}
1492 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1493 		else if (is_swap_pmd(pmd)) {
1494 			swp_entry_t entry = pmd_to_swp_entry(pmd);
1495 			unsigned long offset;
1496 
1497 			if (pm->show_pfn) {
1498 				if (is_pfn_swap_entry(entry))
1499 					offset = swp_offset_pfn(entry);
1500 				else
1501 					offset = swp_offset(entry);
1502 				offset = offset +
1503 					((addr & ~PMD_MASK) >> PAGE_SHIFT);
1504 				frame = swp_type(entry) |
1505 					(offset << MAX_SWAPFILES_SHIFT);
1506 			}
1507 			flags |= PM_SWAP;
1508 			if (pmd_swp_soft_dirty(pmd))
1509 				flags |= PM_SOFT_DIRTY;
1510 			if (pmd_swp_uffd_wp(pmd))
1511 				flags |= PM_UFFD_WP;
1512 			VM_BUG_ON(!is_pmd_migration_entry(pmd));
1513 			migration = is_migration_entry(entry);
1514 			page = pfn_swap_entry_to_page(entry);
1515 		}
1516 #endif
1517 
1518 		if (page && !migration && page_mapcount(page) == 1)
1519 			flags |= PM_MMAP_EXCLUSIVE;
1520 
1521 		for (; addr != end; addr += PAGE_SIZE) {
1522 			pagemap_entry_t pme = make_pme(frame, flags);
1523 
1524 			err = add_to_pagemap(addr, &pme, pm);
1525 			if (err)
1526 				break;
1527 			if (pm->show_pfn) {
1528 				if (flags & PM_PRESENT)
1529 					frame++;
1530 				else if (flags & PM_SWAP)
1531 					frame += (1 << MAX_SWAPFILES_SHIFT);
1532 			}
1533 		}
1534 		spin_unlock(ptl);
1535 		return err;
1536 	}
1537 
1538 	if (pmd_trans_unstable(pmdp))
1539 		return 0;
1540 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1541 
1542 	/*
1543 	 * We can assume that @vma always points to a valid one and @end never
1544 	 * goes beyond vma->vm_end.
1545 	 */
1546 	orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl);
1547 	for (; addr < end; pte++, addr += PAGE_SIZE) {
1548 		pagemap_entry_t pme;
1549 
1550 		pme = pte_to_pagemap_entry(pm, vma, addr, *pte);
1551 		err = add_to_pagemap(addr, &pme, pm);
1552 		if (err)
1553 			break;
1554 	}
1555 	pte_unmap_unlock(orig_pte, ptl);
1556 
1557 	cond_resched();
1558 
1559 	return err;
1560 }
1561 
1562 #ifdef CONFIG_HUGETLB_PAGE
1563 /* This function walks within one hugetlb entry in the single call */
1564 static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
1565 				 unsigned long addr, unsigned long end,
1566 				 struct mm_walk *walk)
1567 {
1568 	struct pagemapread *pm = walk->private;
1569 	struct vm_area_struct *vma = walk->vma;
1570 	u64 flags = 0, frame = 0;
1571 	int err = 0;
1572 	pte_t pte;
1573 
1574 	if (vma->vm_flags & VM_SOFTDIRTY)
1575 		flags |= PM_SOFT_DIRTY;
1576 
1577 	pte = huge_ptep_get(ptep);
1578 	if (pte_present(pte)) {
1579 		struct page *page = pte_page(pte);
1580 
1581 		if (!PageAnon(page))
1582 			flags |= PM_FILE;
1583 
1584 		if (page_mapcount(page) == 1)
1585 			flags |= PM_MMAP_EXCLUSIVE;
1586 
1587 		if (huge_pte_uffd_wp(pte))
1588 			flags |= PM_UFFD_WP;
1589 
1590 		flags |= PM_PRESENT;
1591 		if (pm->show_pfn)
1592 			frame = pte_pfn(pte) +
1593 				((addr & ~hmask) >> PAGE_SHIFT);
1594 	} else if (pte_swp_uffd_wp_any(pte)) {
1595 		flags |= PM_UFFD_WP;
1596 	}
1597 
1598 	for (; addr != end; addr += PAGE_SIZE) {
1599 		pagemap_entry_t pme = make_pme(frame, flags);
1600 
1601 		err = add_to_pagemap(addr, &pme, pm);
1602 		if (err)
1603 			return err;
1604 		if (pm->show_pfn && (flags & PM_PRESENT))
1605 			frame++;
1606 	}
1607 
1608 	cond_resched();
1609 
1610 	return err;
1611 }
1612 #else
1613 #define pagemap_hugetlb_range	NULL
1614 #endif /* HUGETLB_PAGE */
1615 
1616 static const struct mm_walk_ops pagemap_ops = {
1617 	.pmd_entry	= pagemap_pmd_range,
1618 	.pte_hole	= pagemap_pte_hole,
1619 	.hugetlb_entry	= pagemap_hugetlb_range,
1620 };
1621 
1622 /*
1623  * /proc/pid/pagemap - an array mapping virtual pages to pfns
1624  *
1625  * For each page in the address space, this file contains one 64-bit entry
1626  * consisting of the following:
1627  *
1628  * Bits 0-54  page frame number (PFN) if present
1629  * Bits 0-4   swap type if swapped
1630  * Bits 5-54  swap offset if swapped
1631  * Bit  55    pte is soft-dirty (see Documentation/admin-guide/mm/soft-dirty.rst)
1632  * Bit  56    page exclusively mapped
1633  * Bit  57    pte is uffd-wp write-protected
1634  * Bits 58-60 zero
1635  * Bit  61    page is file-page or shared-anon
1636  * Bit  62    page swapped
1637  * Bit  63    page present
1638  *
1639  * If the page is not present but in swap, then the PFN contains an
1640  * encoding of the swap file number and the page's offset into the
1641  * swap. Unmapped pages return a null PFN. This allows determining
1642  * precisely which pages are mapped (or in swap) and comparing mapped
1643  * pages between processes.
1644  *
1645  * Efficient users of this interface will use /proc/pid/maps to
1646  * determine which areas of memory are actually mapped and llseek to
1647  * skip over unmapped regions.
1648  */
1649 static ssize_t pagemap_read(struct file *file, char __user *buf,
1650 			    size_t count, loff_t *ppos)
1651 {
1652 	struct mm_struct *mm = file->private_data;
1653 	struct pagemapread pm;
1654 	unsigned long src;
1655 	unsigned long svpfn;
1656 	unsigned long start_vaddr;
1657 	unsigned long end_vaddr;
1658 	int ret = 0, copied = 0;
1659 
1660 	if (!mm || !mmget_not_zero(mm))
1661 		goto out;
1662 
1663 	ret = -EINVAL;
1664 	/* file position must be aligned */
1665 	if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
1666 		goto out_mm;
1667 
1668 	ret = 0;
1669 	if (!count)
1670 		goto out_mm;
1671 
1672 	/* do not disclose physical addresses: attack vector */
1673 	pm.show_pfn = file_ns_capable(file, &init_user_ns, CAP_SYS_ADMIN);
1674 
1675 	pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
1676 	pm.buffer = kmalloc_array(pm.len, PM_ENTRY_BYTES, GFP_KERNEL);
1677 	ret = -ENOMEM;
1678 	if (!pm.buffer)
1679 		goto out_mm;
1680 
1681 	src = *ppos;
1682 	svpfn = src / PM_ENTRY_BYTES;
1683 	end_vaddr = mm->task_size;
1684 
1685 	/* watch out for wraparound */
1686 	start_vaddr = end_vaddr;
1687 	if (svpfn <= (ULONG_MAX >> PAGE_SHIFT))
1688 		start_vaddr = untagged_addr(svpfn << PAGE_SHIFT);
1689 
1690 	/* Ensure the address is inside the task */
1691 	if (start_vaddr > mm->task_size)
1692 		start_vaddr = end_vaddr;
1693 
1694 	/*
1695 	 * The odds are that this will stop walking way
1696 	 * before end_vaddr, because the length of the
1697 	 * user buffer is tracked in "pm", and the walk
1698 	 * will stop when we hit the end of the buffer.
1699 	 */
1700 	ret = 0;
1701 	while (count && (start_vaddr < end_vaddr)) {
1702 		int len;
1703 		unsigned long end;
1704 
1705 		pm.pos = 0;
1706 		end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
1707 		/* overflow ? */
1708 		if (end < start_vaddr || end > end_vaddr)
1709 			end = end_vaddr;
1710 		ret = mmap_read_lock_killable(mm);
1711 		if (ret)
1712 			goto out_free;
1713 		ret = walk_page_range(mm, start_vaddr, end, &pagemap_ops, &pm);
1714 		mmap_read_unlock(mm);
1715 		start_vaddr = end;
1716 
1717 		len = min(count, PM_ENTRY_BYTES * pm.pos);
1718 		if (copy_to_user(buf, pm.buffer, len)) {
1719 			ret = -EFAULT;
1720 			goto out_free;
1721 		}
1722 		copied += len;
1723 		buf += len;
1724 		count -= len;
1725 	}
1726 	*ppos += copied;
1727 	if (!ret || ret == PM_END_OF_BUFFER)
1728 		ret = copied;
1729 
1730 out_free:
1731 	kfree(pm.buffer);
1732 out_mm:
1733 	mmput(mm);
1734 out:
1735 	return ret;
1736 }
1737 
1738 static int pagemap_open(struct inode *inode, struct file *file)
1739 {
1740 	struct mm_struct *mm;
1741 
1742 	mm = proc_mem_open(inode, PTRACE_MODE_READ);
1743 	if (IS_ERR(mm))
1744 		return PTR_ERR(mm);
1745 	file->private_data = mm;
1746 	return 0;
1747 }
1748 
1749 static int pagemap_release(struct inode *inode, struct file *file)
1750 {
1751 	struct mm_struct *mm = file->private_data;
1752 
1753 	if (mm)
1754 		mmdrop(mm);
1755 	return 0;
1756 }
1757 
1758 const struct file_operations proc_pagemap_operations = {
1759 	.llseek		= mem_lseek, /* borrow this */
1760 	.read		= pagemap_read,
1761 	.open		= pagemap_open,
1762 	.release	= pagemap_release,
1763 };
1764 #endif /* CONFIG_PROC_PAGE_MONITOR */
1765 
1766 #ifdef CONFIG_NUMA
1767 
1768 struct numa_maps {
1769 	unsigned long pages;
1770 	unsigned long anon;
1771 	unsigned long active;
1772 	unsigned long writeback;
1773 	unsigned long mapcount_max;
1774 	unsigned long dirty;
1775 	unsigned long swapcache;
1776 	unsigned long node[MAX_NUMNODES];
1777 };
1778 
1779 struct numa_maps_private {
1780 	struct proc_maps_private proc_maps;
1781 	struct numa_maps md;
1782 };
1783 
1784 static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
1785 			unsigned long nr_pages)
1786 {
1787 	int count = page_mapcount(page);
1788 
1789 	md->pages += nr_pages;
1790 	if (pte_dirty || PageDirty(page))
1791 		md->dirty += nr_pages;
1792 
1793 	if (PageSwapCache(page))
1794 		md->swapcache += nr_pages;
1795 
1796 	if (PageActive(page) || PageUnevictable(page))
1797 		md->active += nr_pages;
1798 
1799 	if (PageWriteback(page))
1800 		md->writeback += nr_pages;
1801 
1802 	if (PageAnon(page))
1803 		md->anon += nr_pages;
1804 
1805 	if (count > md->mapcount_max)
1806 		md->mapcount_max = count;
1807 
1808 	md->node[page_to_nid(page)] += nr_pages;
1809 }
1810 
1811 static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
1812 		unsigned long addr)
1813 {
1814 	struct page *page;
1815 	int nid;
1816 
1817 	if (!pte_present(pte))
1818 		return NULL;
1819 
1820 	page = vm_normal_page(vma, addr, pte);
1821 	if (!page || is_zone_device_page(page))
1822 		return NULL;
1823 
1824 	if (PageReserved(page))
1825 		return NULL;
1826 
1827 	nid = page_to_nid(page);
1828 	if (!node_isset(nid, node_states[N_MEMORY]))
1829 		return NULL;
1830 
1831 	return page;
1832 }
1833 
1834 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1835 static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
1836 					      struct vm_area_struct *vma,
1837 					      unsigned long addr)
1838 {
1839 	struct page *page;
1840 	int nid;
1841 
1842 	if (!pmd_present(pmd))
1843 		return NULL;
1844 
1845 	page = vm_normal_page_pmd(vma, addr, pmd);
1846 	if (!page)
1847 		return NULL;
1848 
1849 	if (PageReserved(page))
1850 		return NULL;
1851 
1852 	nid = page_to_nid(page);
1853 	if (!node_isset(nid, node_states[N_MEMORY]))
1854 		return NULL;
1855 
1856 	return page;
1857 }
1858 #endif
1859 
1860 static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
1861 		unsigned long end, struct mm_walk *walk)
1862 {
1863 	struct numa_maps *md = walk->private;
1864 	struct vm_area_struct *vma = walk->vma;
1865 	spinlock_t *ptl;
1866 	pte_t *orig_pte;
1867 	pte_t *pte;
1868 
1869 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1870 	ptl = pmd_trans_huge_lock(pmd, vma);
1871 	if (ptl) {
1872 		struct page *page;
1873 
1874 		page = can_gather_numa_stats_pmd(*pmd, vma, addr);
1875 		if (page)
1876 			gather_stats(page, md, pmd_dirty(*pmd),
1877 				     HPAGE_PMD_SIZE/PAGE_SIZE);
1878 		spin_unlock(ptl);
1879 		return 0;
1880 	}
1881 
1882 	if (pmd_trans_unstable(pmd))
1883 		return 0;
1884 #endif
1885 	orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
1886 	do {
1887 		struct page *page = can_gather_numa_stats(*pte, vma, addr);
1888 		if (!page)
1889 			continue;
1890 		gather_stats(page, md, pte_dirty(*pte), 1);
1891 
1892 	} while (pte++, addr += PAGE_SIZE, addr != end);
1893 	pte_unmap_unlock(orig_pte, ptl);
1894 	cond_resched();
1895 	return 0;
1896 }
1897 #ifdef CONFIG_HUGETLB_PAGE
1898 static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
1899 		unsigned long addr, unsigned long end, struct mm_walk *walk)
1900 {
1901 	pte_t huge_pte = huge_ptep_get(pte);
1902 	struct numa_maps *md;
1903 	struct page *page;
1904 
1905 	if (!pte_present(huge_pte))
1906 		return 0;
1907 
1908 	page = pte_page(huge_pte);
1909 
1910 	md = walk->private;
1911 	gather_stats(page, md, pte_dirty(huge_pte), 1);
1912 	return 0;
1913 }
1914 
1915 #else
1916 static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
1917 		unsigned long addr, unsigned long end, struct mm_walk *walk)
1918 {
1919 	return 0;
1920 }
1921 #endif
1922 
1923 static const struct mm_walk_ops show_numa_ops = {
1924 	.hugetlb_entry = gather_hugetlb_stats,
1925 	.pmd_entry = gather_pte_stats,
1926 };
1927 
1928 /*
1929  * Display pages allocated per node and memory policy via /proc.
1930  */
1931 static int show_numa_map(struct seq_file *m, void *v)
1932 {
1933 	struct numa_maps_private *numa_priv = m->private;
1934 	struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
1935 	struct vm_area_struct *vma = v;
1936 	struct numa_maps *md = &numa_priv->md;
1937 	struct file *file = vma->vm_file;
1938 	struct mm_struct *mm = vma->vm_mm;
1939 	struct mempolicy *pol;
1940 	char buffer[64];
1941 	int nid;
1942 
1943 	if (!mm)
1944 		return 0;
1945 
1946 	/* Ensure we start with an empty set of numa_maps statistics. */
1947 	memset(md, 0, sizeof(*md));
1948 
1949 	pol = __get_vma_policy(vma, vma->vm_start);
1950 	if (pol) {
1951 		mpol_to_str(buffer, sizeof(buffer), pol);
1952 		mpol_cond_put(pol);
1953 	} else {
1954 		mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
1955 	}
1956 
1957 	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1958 
1959 	if (file) {
1960 		seq_puts(m, " file=");
1961 		seq_file_path(m, file, "\n\t= ");
1962 	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1963 		seq_puts(m, " heap");
1964 	} else if (is_stack(vma)) {
1965 		seq_puts(m, " stack");
1966 	}
1967 
1968 	if (is_vm_hugetlb_page(vma))
1969 		seq_puts(m, " huge");
1970 
1971 	/* mmap_lock is held by m_start */
1972 	walk_page_vma(vma, &show_numa_ops, md);
1973 
1974 	if (!md->pages)
1975 		goto out;
1976 
1977 	if (md->anon)
1978 		seq_printf(m, " anon=%lu", md->anon);
1979 
1980 	if (md->dirty)
1981 		seq_printf(m, " dirty=%lu", md->dirty);
1982 
1983 	if (md->pages != md->anon && md->pages != md->dirty)
1984 		seq_printf(m, " mapped=%lu", md->pages);
1985 
1986 	if (md->mapcount_max > 1)
1987 		seq_printf(m, " mapmax=%lu", md->mapcount_max);
1988 
1989 	if (md->swapcache)
1990 		seq_printf(m, " swapcache=%lu", md->swapcache);
1991 
1992 	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
1993 		seq_printf(m, " active=%lu", md->active);
1994 
1995 	if (md->writeback)
1996 		seq_printf(m, " writeback=%lu", md->writeback);
1997 
1998 	for_each_node_state(nid, N_MEMORY)
1999 		if (md->node[nid])
2000 			seq_printf(m, " N%d=%lu", nid, md->node[nid]);
2001 
2002 	seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10);
2003 out:
2004 	seq_putc(m, '\n');
2005 	return 0;
2006 }
2007 
2008 static const struct seq_operations proc_pid_numa_maps_op = {
2009 	.start  = m_start,
2010 	.next   = m_next,
2011 	.stop   = m_stop,
2012 	.show   = show_numa_map,
2013 };
2014 
2015 static int pid_numa_maps_open(struct inode *inode, struct file *file)
2016 {
2017 	return proc_maps_open(inode, file, &proc_pid_numa_maps_op,
2018 				sizeof(struct numa_maps_private));
2019 }
2020 
2021 const struct file_operations proc_pid_numa_maps_operations = {
2022 	.open		= pid_numa_maps_open,
2023 	.read		= seq_read,
2024 	.llseek		= seq_lseek,
2025 	.release	= proc_map_release,
2026 };
2027 
2028 #endif /* CONFIG_NUMA */
2029