xref: /linux/fs/proc/task_mmu.c (revision c0e297dc61f8d4453e07afbea1fa8d0e67cd4a34)
1 #include <linux/mm.h>
2 #include <linux/vmacache.h>
3 #include <linux/hugetlb.h>
4 #include <linux/huge_mm.h>
5 #include <linux/mount.h>
6 #include <linux/seq_file.h>
7 #include <linux/highmem.h>
8 #include <linux/ptrace.h>
9 #include <linux/slab.h>
10 #include <linux/pagemap.h>
11 #include <linux/mempolicy.h>
12 #include <linux/rmap.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/mmu_notifier.h>
16 
17 #include <asm/elf.h>
18 #include <asm/uaccess.h>
19 #include <asm/tlbflush.h>
20 #include "internal.h"
21 
22 void task_mem(struct seq_file *m, struct mm_struct *mm)
23 {
24 	unsigned long data, text, lib, swap, ptes, pmds;
25 	unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
26 
27 	/*
28 	 * Note: to minimize their overhead, mm maintains hiwater_vm and
29 	 * hiwater_rss only when about to *lower* total_vm or rss.  Any
30 	 * collector of these hiwater stats must therefore get total_vm
31 	 * and rss too, which will usually be the higher.  Barriers? not
32 	 * worth the effort, such snapshots can always be inconsistent.
33 	 */
34 	hiwater_vm = total_vm = mm->total_vm;
35 	if (hiwater_vm < mm->hiwater_vm)
36 		hiwater_vm = mm->hiwater_vm;
37 	hiwater_rss = total_rss = get_mm_rss(mm);
38 	if (hiwater_rss < mm->hiwater_rss)
39 		hiwater_rss = mm->hiwater_rss;
40 
41 	data = mm->total_vm - mm->shared_vm - mm->stack_vm;
42 	text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
43 	lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
44 	swap = get_mm_counter(mm, MM_SWAPENTS);
45 	ptes = PTRS_PER_PTE * sizeof(pte_t) * atomic_long_read(&mm->nr_ptes);
46 	pmds = PTRS_PER_PMD * sizeof(pmd_t) * mm_nr_pmds(mm);
47 	seq_printf(m,
48 		"VmPeak:\t%8lu kB\n"
49 		"VmSize:\t%8lu kB\n"
50 		"VmLck:\t%8lu kB\n"
51 		"VmPin:\t%8lu kB\n"
52 		"VmHWM:\t%8lu kB\n"
53 		"VmRSS:\t%8lu kB\n"
54 		"VmData:\t%8lu kB\n"
55 		"VmStk:\t%8lu kB\n"
56 		"VmExe:\t%8lu kB\n"
57 		"VmLib:\t%8lu kB\n"
58 		"VmPTE:\t%8lu kB\n"
59 		"VmPMD:\t%8lu kB\n"
60 		"VmSwap:\t%8lu kB\n",
61 		hiwater_vm << (PAGE_SHIFT-10),
62 		total_vm << (PAGE_SHIFT-10),
63 		mm->locked_vm << (PAGE_SHIFT-10),
64 		mm->pinned_vm << (PAGE_SHIFT-10),
65 		hiwater_rss << (PAGE_SHIFT-10),
66 		total_rss << (PAGE_SHIFT-10),
67 		data << (PAGE_SHIFT-10),
68 		mm->stack_vm << (PAGE_SHIFT-10), text, lib,
69 		ptes >> 10,
70 		pmds >> 10,
71 		swap << (PAGE_SHIFT-10));
72 }
73 
74 unsigned long task_vsize(struct mm_struct *mm)
75 {
76 	return PAGE_SIZE * mm->total_vm;
77 }
78 
79 unsigned long task_statm(struct mm_struct *mm,
80 			 unsigned long *shared, unsigned long *text,
81 			 unsigned long *data, unsigned long *resident)
82 {
83 	*shared = get_mm_counter(mm, MM_FILEPAGES);
84 	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
85 								>> PAGE_SHIFT;
86 	*data = mm->total_vm - mm->shared_vm;
87 	*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
88 	return mm->total_vm;
89 }
90 
91 #ifdef CONFIG_NUMA
92 /*
93  * Save get_task_policy() for show_numa_map().
94  */
95 static void hold_task_mempolicy(struct proc_maps_private *priv)
96 {
97 	struct task_struct *task = priv->task;
98 
99 	task_lock(task);
100 	priv->task_mempolicy = get_task_policy(task);
101 	mpol_get(priv->task_mempolicy);
102 	task_unlock(task);
103 }
104 static void release_task_mempolicy(struct proc_maps_private *priv)
105 {
106 	mpol_put(priv->task_mempolicy);
107 }
108 #else
109 static void hold_task_mempolicy(struct proc_maps_private *priv)
110 {
111 }
112 static void release_task_mempolicy(struct proc_maps_private *priv)
113 {
114 }
115 #endif
116 
117 static void vma_stop(struct proc_maps_private *priv)
118 {
119 	struct mm_struct *mm = priv->mm;
120 
121 	release_task_mempolicy(priv);
122 	up_read(&mm->mmap_sem);
123 	mmput(mm);
124 }
125 
126 static struct vm_area_struct *
127 m_next_vma(struct proc_maps_private *priv, struct vm_area_struct *vma)
128 {
129 	if (vma == priv->tail_vma)
130 		return NULL;
131 	return vma->vm_next ?: priv->tail_vma;
132 }
133 
134 static void m_cache_vma(struct seq_file *m, struct vm_area_struct *vma)
135 {
136 	if (m->count < m->size)	/* vma is copied successfully */
137 		m->version = m_next_vma(m->private, vma) ? vma->vm_start : -1UL;
138 }
139 
140 static void *m_start(struct seq_file *m, loff_t *ppos)
141 {
142 	struct proc_maps_private *priv = m->private;
143 	unsigned long last_addr = m->version;
144 	struct mm_struct *mm;
145 	struct vm_area_struct *vma;
146 	unsigned int pos = *ppos;
147 
148 	/* See m_cache_vma(). Zero at the start or after lseek. */
149 	if (last_addr == -1UL)
150 		return NULL;
151 
152 	priv->task = get_proc_task(priv->inode);
153 	if (!priv->task)
154 		return ERR_PTR(-ESRCH);
155 
156 	mm = priv->mm;
157 	if (!mm || !atomic_inc_not_zero(&mm->mm_users))
158 		return NULL;
159 
160 	down_read(&mm->mmap_sem);
161 	hold_task_mempolicy(priv);
162 	priv->tail_vma = get_gate_vma(mm);
163 
164 	if (last_addr) {
165 		vma = find_vma(mm, last_addr);
166 		if (vma && (vma = m_next_vma(priv, vma)))
167 			return vma;
168 	}
169 
170 	m->version = 0;
171 	if (pos < mm->map_count) {
172 		for (vma = mm->mmap; pos; pos--) {
173 			m->version = vma->vm_start;
174 			vma = vma->vm_next;
175 		}
176 		return vma;
177 	}
178 
179 	/* we do not bother to update m->version in this case */
180 	if (pos == mm->map_count && priv->tail_vma)
181 		return priv->tail_vma;
182 
183 	vma_stop(priv);
184 	return NULL;
185 }
186 
187 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
188 {
189 	struct proc_maps_private *priv = m->private;
190 	struct vm_area_struct *next;
191 
192 	(*pos)++;
193 	next = m_next_vma(priv, v);
194 	if (!next)
195 		vma_stop(priv);
196 	return next;
197 }
198 
199 static void m_stop(struct seq_file *m, void *v)
200 {
201 	struct proc_maps_private *priv = m->private;
202 
203 	if (!IS_ERR_OR_NULL(v))
204 		vma_stop(priv);
205 	if (priv->task) {
206 		put_task_struct(priv->task);
207 		priv->task = NULL;
208 	}
209 }
210 
211 static int proc_maps_open(struct inode *inode, struct file *file,
212 			const struct seq_operations *ops, int psize)
213 {
214 	struct proc_maps_private *priv = __seq_open_private(file, ops, psize);
215 
216 	if (!priv)
217 		return -ENOMEM;
218 
219 	priv->inode = inode;
220 	priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
221 	if (IS_ERR(priv->mm)) {
222 		int err = PTR_ERR(priv->mm);
223 
224 		seq_release_private(inode, file);
225 		return err;
226 	}
227 
228 	return 0;
229 }
230 
231 static int proc_map_release(struct inode *inode, struct file *file)
232 {
233 	struct seq_file *seq = file->private_data;
234 	struct proc_maps_private *priv = seq->private;
235 
236 	if (priv->mm)
237 		mmdrop(priv->mm);
238 
239 	return seq_release_private(inode, file);
240 }
241 
242 static int do_maps_open(struct inode *inode, struct file *file,
243 			const struct seq_operations *ops)
244 {
245 	return proc_maps_open(inode, file, ops,
246 				sizeof(struct proc_maps_private));
247 }
248 
249 static pid_t pid_of_stack(struct proc_maps_private *priv,
250 				struct vm_area_struct *vma, bool is_pid)
251 {
252 	struct inode *inode = priv->inode;
253 	struct task_struct *task;
254 	pid_t ret = 0;
255 
256 	rcu_read_lock();
257 	task = pid_task(proc_pid(inode), PIDTYPE_PID);
258 	if (task) {
259 		task = task_of_stack(task, vma, is_pid);
260 		if (task)
261 			ret = task_pid_nr_ns(task, inode->i_sb->s_fs_info);
262 	}
263 	rcu_read_unlock();
264 
265 	return ret;
266 }
267 
268 static void
269 show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
270 {
271 	struct mm_struct *mm = vma->vm_mm;
272 	struct file *file = vma->vm_file;
273 	struct proc_maps_private *priv = m->private;
274 	vm_flags_t flags = vma->vm_flags;
275 	unsigned long ino = 0;
276 	unsigned long long pgoff = 0;
277 	unsigned long start, end;
278 	dev_t dev = 0;
279 	const char *name = NULL;
280 
281 	if (file) {
282 		struct inode *inode = file_inode(vma->vm_file);
283 		dev = inode->i_sb->s_dev;
284 		ino = inode->i_ino;
285 		pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
286 	}
287 
288 	/* We don't show the stack guard page in /proc/maps */
289 	start = vma->vm_start;
290 	if (stack_guard_page_start(vma, start))
291 		start += PAGE_SIZE;
292 	end = vma->vm_end;
293 	if (stack_guard_page_end(vma, end))
294 		end -= PAGE_SIZE;
295 
296 	seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
297 	seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
298 			start,
299 			end,
300 			flags & VM_READ ? 'r' : '-',
301 			flags & VM_WRITE ? 'w' : '-',
302 			flags & VM_EXEC ? 'x' : '-',
303 			flags & VM_MAYSHARE ? 's' : 'p',
304 			pgoff,
305 			MAJOR(dev), MINOR(dev), ino);
306 
307 	/*
308 	 * Print the dentry name for named mappings, and a
309 	 * special [heap] marker for the heap:
310 	 */
311 	if (file) {
312 		seq_pad(m, ' ');
313 		seq_file_path(m, file, "\n");
314 		goto done;
315 	}
316 
317 	if (vma->vm_ops && vma->vm_ops->name) {
318 		name = vma->vm_ops->name(vma);
319 		if (name)
320 			goto done;
321 	}
322 
323 	name = arch_vma_name(vma);
324 	if (!name) {
325 		pid_t tid;
326 
327 		if (!mm) {
328 			name = "[vdso]";
329 			goto done;
330 		}
331 
332 		if (vma->vm_start <= mm->brk &&
333 		    vma->vm_end >= mm->start_brk) {
334 			name = "[heap]";
335 			goto done;
336 		}
337 
338 		tid = pid_of_stack(priv, vma, is_pid);
339 		if (tid != 0) {
340 			/*
341 			 * Thread stack in /proc/PID/task/TID/maps or
342 			 * the main process stack.
343 			 */
344 			if (!is_pid || (vma->vm_start <= mm->start_stack &&
345 			    vma->vm_end >= mm->start_stack)) {
346 				name = "[stack]";
347 			} else {
348 				/* Thread stack in /proc/PID/maps */
349 				seq_pad(m, ' ');
350 				seq_printf(m, "[stack:%d]", tid);
351 			}
352 		}
353 	}
354 
355 done:
356 	if (name) {
357 		seq_pad(m, ' ');
358 		seq_puts(m, name);
359 	}
360 	seq_putc(m, '\n');
361 }
362 
363 static int show_map(struct seq_file *m, void *v, int is_pid)
364 {
365 	show_map_vma(m, v, is_pid);
366 	m_cache_vma(m, v);
367 	return 0;
368 }
369 
370 static int show_pid_map(struct seq_file *m, void *v)
371 {
372 	return show_map(m, v, 1);
373 }
374 
375 static int show_tid_map(struct seq_file *m, void *v)
376 {
377 	return show_map(m, v, 0);
378 }
379 
380 static const struct seq_operations proc_pid_maps_op = {
381 	.start	= m_start,
382 	.next	= m_next,
383 	.stop	= m_stop,
384 	.show	= show_pid_map
385 };
386 
387 static const struct seq_operations proc_tid_maps_op = {
388 	.start	= m_start,
389 	.next	= m_next,
390 	.stop	= m_stop,
391 	.show	= show_tid_map
392 };
393 
394 static int pid_maps_open(struct inode *inode, struct file *file)
395 {
396 	return do_maps_open(inode, file, &proc_pid_maps_op);
397 }
398 
399 static int tid_maps_open(struct inode *inode, struct file *file)
400 {
401 	return do_maps_open(inode, file, &proc_tid_maps_op);
402 }
403 
404 const struct file_operations proc_pid_maps_operations = {
405 	.open		= pid_maps_open,
406 	.read		= seq_read,
407 	.llseek		= seq_lseek,
408 	.release	= proc_map_release,
409 };
410 
411 const struct file_operations proc_tid_maps_operations = {
412 	.open		= tid_maps_open,
413 	.read		= seq_read,
414 	.llseek		= seq_lseek,
415 	.release	= proc_map_release,
416 };
417 
418 /*
419  * Proportional Set Size(PSS): my share of RSS.
420  *
421  * PSS of a process is the count of pages it has in memory, where each
422  * page is divided by the number of processes sharing it.  So if a
423  * process has 1000 pages all to itself, and 1000 shared with one other
424  * process, its PSS will be 1500.
425  *
426  * To keep (accumulated) division errors low, we adopt a 64bit
427  * fixed-point pss counter to minimize division errors. So (pss >>
428  * PSS_SHIFT) would be the real byte count.
429  *
430  * A shift of 12 before division means (assuming 4K page size):
431  * 	- 1M 3-user-pages add up to 8KB errors;
432  * 	- supports mapcount up to 2^24, or 16M;
433  * 	- supports PSS up to 2^52 bytes, or 4PB.
434  */
435 #define PSS_SHIFT 12
436 
437 #ifdef CONFIG_PROC_PAGE_MONITOR
438 struct mem_size_stats {
439 	unsigned long resident;
440 	unsigned long shared_clean;
441 	unsigned long shared_dirty;
442 	unsigned long private_clean;
443 	unsigned long private_dirty;
444 	unsigned long referenced;
445 	unsigned long anonymous;
446 	unsigned long anonymous_thp;
447 	unsigned long swap;
448 	u64 pss;
449 };
450 
451 static void smaps_account(struct mem_size_stats *mss, struct page *page,
452 		unsigned long size, bool young, bool dirty)
453 {
454 	int mapcount;
455 
456 	if (PageAnon(page))
457 		mss->anonymous += size;
458 
459 	mss->resident += size;
460 	/* Accumulate the size in pages that have been accessed. */
461 	if (young || PageReferenced(page))
462 		mss->referenced += size;
463 	mapcount = page_mapcount(page);
464 	if (mapcount >= 2) {
465 		u64 pss_delta;
466 
467 		if (dirty || PageDirty(page))
468 			mss->shared_dirty += size;
469 		else
470 			mss->shared_clean += size;
471 		pss_delta = (u64)size << PSS_SHIFT;
472 		do_div(pss_delta, mapcount);
473 		mss->pss += pss_delta;
474 	} else {
475 		if (dirty || PageDirty(page))
476 			mss->private_dirty += size;
477 		else
478 			mss->private_clean += size;
479 		mss->pss += (u64)size << PSS_SHIFT;
480 	}
481 }
482 
483 static void smaps_pte_entry(pte_t *pte, unsigned long addr,
484 		struct mm_walk *walk)
485 {
486 	struct mem_size_stats *mss = walk->private;
487 	struct vm_area_struct *vma = walk->vma;
488 	struct page *page = NULL;
489 
490 	if (pte_present(*pte)) {
491 		page = vm_normal_page(vma, addr, *pte);
492 	} else if (is_swap_pte(*pte)) {
493 		swp_entry_t swpent = pte_to_swp_entry(*pte);
494 
495 		if (!non_swap_entry(swpent))
496 			mss->swap += PAGE_SIZE;
497 		else if (is_migration_entry(swpent))
498 			page = migration_entry_to_page(swpent);
499 	}
500 
501 	if (!page)
502 		return;
503 	smaps_account(mss, page, PAGE_SIZE, pte_young(*pte), pte_dirty(*pte));
504 }
505 
506 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
507 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
508 		struct mm_walk *walk)
509 {
510 	struct mem_size_stats *mss = walk->private;
511 	struct vm_area_struct *vma = walk->vma;
512 	struct page *page;
513 
514 	/* FOLL_DUMP will return -EFAULT on huge zero page */
515 	page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP);
516 	if (IS_ERR_OR_NULL(page))
517 		return;
518 	mss->anonymous_thp += HPAGE_PMD_SIZE;
519 	smaps_account(mss, page, HPAGE_PMD_SIZE,
520 			pmd_young(*pmd), pmd_dirty(*pmd));
521 }
522 #else
523 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
524 		struct mm_walk *walk)
525 {
526 }
527 #endif
528 
529 static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
530 			   struct mm_walk *walk)
531 {
532 	struct vm_area_struct *vma = walk->vma;
533 	pte_t *pte;
534 	spinlock_t *ptl;
535 
536 	if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
537 		smaps_pmd_entry(pmd, addr, walk);
538 		spin_unlock(ptl);
539 		return 0;
540 	}
541 
542 	if (pmd_trans_unstable(pmd))
543 		return 0;
544 	/*
545 	 * The mmap_sem held all the way back in m_start() is what
546 	 * keeps khugepaged out of here and from collapsing things
547 	 * in here.
548 	 */
549 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
550 	for (; addr != end; pte++, addr += PAGE_SIZE)
551 		smaps_pte_entry(pte, addr, walk);
552 	pte_unmap_unlock(pte - 1, ptl);
553 	cond_resched();
554 	return 0;
555 }
556 
557 static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
558 {
559 	/*
560 	 * Don't forget to update Documentation/ on changes.
561 	 */
562 	static const char mnemonics[BITS_PER_LONG][2] = {
563 		/*
564 		 * In case if we meet a flag we don't know about.
565 		 */
566 		[0 ... (BITS_PER_LONG-1)] = "??",
567 
568 		[ilog2(VM_READ)]	= "rd",
569 		[ilog2(VM_WRITE)]	= "wr",
570 		[ilog2(VM_EXEC)]	= "ex",
571 		[ilog2(VM_SHARED)]	= "sh",
572 		[ilog2(VM_MAYREAD)]	= "mr",
573 		[ilog2(VM_MAYWRITE)]	= "mw",
574 		[ilog2(VM_MAYEXEC)]	= "me",
575 		[ilog2(VM_MAYSHARE)]	= "ms",
576 		[ilog2(VM_GROWSDOWN)]	= "gd",
577 		[ilog2(VM_PFNMAP)]	= "pf",
578 		[ilog2(VM_DENYWRITE)]	= "dw",
579 #ifdef CONFIG_X86_INTEL_MPX
580 		[ilog2(VM_MPX)]		= "mp",
581 #endif
582 		[ilog2(VM_LOCKED)]	= "lo",
583 		[ilog2(VM_IO)]		= "io",
584 		[ilog2(VM_SEQ_READ)]	= "sr",
585 		[ilog2(VM_RAND_READ)]	= "rr",
586 		[ilog2(VM_DONTCOPY)]	= "dc",
587 		[ilog2(VM_DONTEXPAND)]	= "de",
588 		[ilog2(VM_ACCOUNT)]	= "ac",
589 		[ilog2(VM_NORESERVE)]	= "nr",
590 		[ilog2(VM_HUGETLB)]	= "ht",
591 		[ilog2(VM_ARCH_1)]	= "ar",
592 		[ilog2(VM_DONTDUMP)]	= "dd",
593 #ifdef CONFIG_MEM_SOFT_DIRTY
594 		[ilog2(VM_SOFTDIRTY)]	= "sd",
595 #endif
596 		[ilog2(VM_MIXEDMAP)]	= "mm",
597 		[ilog2(VM_HUGEPAGE)]	= "hg",
598 		[ilog2(VM_NOHUGEPAGE)]	= "nh",
599 		[ilog2(VM_MERGEABLE)]	= "mg",
600 	};
601 	size_t i;
602 
603 	seq_puts(m, "VmFlags: ");
604 	for (i = 0; i < BITS_PER_LONG; i++) {
605 		if (vma->vm_flags & (1UL << i)) {
606 			seq_printf(m, "%c%c ",
607 				   mnemonics[i][0], mnemonics[i][1]);
608 		}
609 	}
610 	seq_putc(m, '\n');
611 }
612 
613 static int show_smap(struct seq_file *m, void *v, int is_pid)
614 {
615 	struct vm_area_struct *vma = v;
616 	struct mem_size_stats mss;
617 	struct mm_walk smaps_walk = {
618 		.pmd_entry = smaps_pte_range,
619 		.mm = vma->vm_mm,
620 		.private = &mss,
621 	};
622 
623 	memset(&mss, 0, sizeof mss);
624 	/* mmap_sem is held in m_start */
625 	walk_page_vma(vma, &smaps_walk);
626 
627 	show_map_vma(m, vma, is_pid);
628 
629 	seq_printf(m,
630 		   "Size:           %8lu kB\n"
631 		   "Rss:            %8lu kB\n"
632 		   "Pss:            %8lu kB\n"
633 		   "Shared_Clean:   %8lu kB\n"
634 		   "Shared_Dirty:   %8lu kB\n"
635 		   "Private_Clean:  %8lu kB\n"
636 		   "Private_Dirty:  %8lu kB\n"
637 		   "Referenced:     %8lu kB\n"
638 		   "Anonymous:      %8lu kB\n"
639 		   "AnonHugePages:  %8lu kB\n"
640 		   "Swap:           %8lu kB\n"
641 		   "KernelPageSize: %8lu kB\n"
642 		   "MMUPageSize:    %8lu kB\n"
643 		   "Locked:         %8lu kB\n",
644 		   (vma->vm_end - vma->vm_start) >> 10,
645 		   mss.resident >> 10,
646 		   (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
647 		   mss.shared_clean  >> 10,
648 		   mss.shared_dirty  >> 10,
649 		   mss.private_clean >> 10,
650 		   mss.private_dirty >> 10,
651 		   mss.referenced >> 10,
652 		   mss.anonymous >> 10,
653 		   mss.anonymous_thp >> 10,
654 		   mss.swap >> 10,
655 		   vma_kernel_pagesize(vma) >> 10,
656 		   vma_mmu_pagesize(vma) >> 10,
657 		   (vma->vm_flags & VM_LOCKED) ?
658 			(unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
659 
660 	show_smap_vma_flags(m, vma);
661 	m_cache_vma(m, vma);
662 	return 0;
663 }
664 
665 static int show_pid_smap(struct seq_file *m, void *v)
666 {
667 	return show_smap(m, v, 1);
668 }
669 
670 static int show_tid_smap(struct seq_file *m, void *v)
671 {
672 	return show_smap(m, v, 0);
673 }
674 
675 static const struct seq_operations proc_pid_smaps_op = {
676 	.start	= m_start,
677 	.next	= m_next,
678 	.stop	= m_stop,
679 	.show	= show_pid_smap
680 };
681 
682 static const struct seq_operations proc_tid_smaps_op = {
683 	.start	= m_start,
684 	.next	= m_next,
685 	.stop	= m_stop,
686 	.show	= show_tid_smap
687 };
688 
689 static int pid_smaps_open(struct inode *inode, struct file *file)
690 {
691 	return do_maps_open(inode, file, &proc_pid_smaps_op);
692 }
693 
694 static int tid_smaps_open(struct inode *inode, struct file *file)
695 {
696 	return do_maps_open(inode, file, &proc_tid_smaps_op);
697 }
698 
699 const struct file_operations proc_pid_smaps_operations = {
700 	.open		= pid_smaps_open,
701 	.read		= seq_read,
702 	.llseek		= seq_lseek,
703 	.release	= proc_map_release,
704 };
705 
706 const struct file_operations proc_tid_smaps_operations = {
707 	.open		= tid_smaps_open,
708 	.read		= seq_read,
709 	.llseek		= seq_lseek,
710 	.release	= proc_map_release,
711 };
712 
713 /*
714  * We do not want to have constant page-shift bits sitting in
715  * pagemap entries and are about to reuse them some time soon.
716  *
717  * Here's the "migration strategy":
718  * 1. when the system boots these bits remain what they are,
719  *    but a warning about future change is printed in log;
720  * 2. once anyone clears soft-dirty bits via clear_refs file,
721  *    these flag is set to denote, that user is aware of the
722  *    new API and those page-shift bits change their meaning.
723  *    The respective warning is printed in dmesg;
724  * 3. In a couple of releases we will remove all the mentions
725  *    of page-shift in pagemap entries.
726  */
727 
728 static bool soft_dirty_cleared __read_mostly;
729 
730 enum clear_refs_types {
731 	CLEAR_REFS_ALL = 1,
732 	CLEAR_REFS_ANON,
733 	CLEAR_REFS_MAPPED,
734 	CLEAR_REFS_SOFT_DIRTY,
735 	CLEAR_REFS_MM_HIWATER_RSS,
736 	CLEAR_REFS_LAST,
737 };
738 
739 struct clear_refs_private {
740 	enum clear_refs_types type;
741 };
742 
743 #ifdef CONFIG_MEM_SOFT_DIRTY
744 static inline void clear_soft_dirty(struct vm_area_struct *vma,
745 		unsigned long addr, pte_t *pte)
746 {
747 	/*
748 	 * The soft-dirty tracker uses #PF-s to catch writes
749 	 * to pages, so write-protect the pte as well. See the
750 	 * Documentation/vm/soft-dirty.txt for full description
751 	 * of how soft-dirty works.
752 	 */
753 	pte_t ptent = *pte;
754 
755 	if (pte_present(ptent)) {
756 		ptent = pte_wrprotect(ptent);
757 		ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY);
758 	} else if (is_swap_pte(ptent)) {
759 		ptent = pte_swp_clear_soft_dirty(ptent);
760 	}
761 
762 	set_pte_at(vma->vm_mm, addr, pte, ptent);
763 }
764 
765 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
766 		unsigned long addr, pmd_t *pmdp)
767 {
768 	pmd_t pmd = *pmdp;
769 
770 	pmd = pmd_wrprotect(pmd);
771 	pmd = pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);
772 
773 	if (vma->vm_flags & VM_SOFTDIRTY)
774 		vma->vm_flags &= ~VM_SOFTDIRTY;
775 
776 	set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
777 }
778 
779 #else
780 
781 static inline void clear_soft_dirty(struct vm_area_struct *vma,
782 		unsigned long addr, pte_t *pte)
783 {
784 }
785 
786 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
787 		unsigned long addr, pmd_t *pmdp)
788 {
789 }
790 #endif
791 
792 static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
793 				unsigned long end, struct mm_walk *walk)
794 {
795 	struct clear_refs_private *cp = walk->private;
796 	struct vm_area_struct *vma = walk->vma;
797 	pte_t *pte, ptent;
798 	spinlock_t *ptl;
799 	struct page *page;
800 
801 	if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
802 		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
803 			clear_soft_dirty_pmd(vma, addr, pmd);
804 			goto out;
805 		}
806 
807 		page = pmd_page(*pmd);
808 
809 		/* Clear accessed and referenced bits. */
810 		pmdp_test_and_clear_young(vma, addr, pmd);
811 		ClearPageReferenced(page);
812 out:
813 		spin_unlock(ptl);
814 		return 0;
815 	}
816 
817 	if (pmd_trans_unstable(pmd))
818 		return 0;
819 
820 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
821 	for (; addr != end; pte++, addr += PAGE_SIZE) {
822 		ptent = *pte;
823 
824 		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
825 			clear_soft_dirty(vma, addr, pte);
826 			continue;
827 		}
828 
829 		if (!pte_present(ptent))
830 			continue;
831 
832 		page = vm_normal_page(vma, addr, ptent);
833 		if (!page)
834 			continue;
835 
836 		/* Clear accessed and referenced bits. */
837 		ptep_test_and_clear_young(vma, addr, pte);
838 		ClearPageReferenced(page);
839 	}
840 	pte_unmap_unlock(pte - 1, ptl);
841 	cond_resched();
842 	return 0;
843 }
844 
845 static int clear_refs_test_walk(unsigned long start, unsigned long end,
846 				struct mm_walk *walk)
847 {
848 	struct clear_refs_private *cp = walk->private;
849 	struct vm_area_struct *vma = walk->vma;
850 
851 	if (vma->vm_flags & VM_PFNMAP)
852 		return 1;
853 
854 	/*
855 	 * Writing 1 to /proc/pid/clear_refs affects all pages.
856 	 * Writing 2 to /proc/pid/clear_refs only affects anonymous pages.
857 	 * Writing 3 to /proc/pid/clear_refs only affects file mapped pages.
858 	 * Writing 4 to /proc/pid/clear_refs affects all pages.
859 	 */
860 	if (cp->type == CLEAR_REFS_ANON && vma->vm_file)
861 		return 1;
862 	if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file)
863 		return 1;
864 	return 0;
865 }
866 
867 static ssize_t clear_refs_write(struct file *file, const char __user *buf,
868 				size_t count, loff_t *ppos)
869 {
870 	struct task_struct *task;
871 	char buffer[PROC_NUMBUF];
872 	struct mm_struct *mm;
873 	struct vm_area_struct *vma;
874 	enum clear_refs_types type;
875 	int itype;
876 	int rv;
877 
878 	memset(buffer, 0, sizeof(buffer));
879 	if (count > sizeof(buffer) - 1)
880 		count = sizeof(buffer) - 1;
881 	if (copy_from_user(buffer, buf, count))
882 		return -EFAULT;
883 	rv = kstrtoint(strstrip(buffer), 10, &itype);
884 	if (rv < 0)
885 		return rv;
886 	type = (enum clear_refs_types)itype;
887 	if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST)
888 		return -EINVAL;
889 
890 	if (type == CLEAR_REFS_SOFT_DIRTY) {
891 		soft_dirty_cleared = true;
892 		pr_warn_once("The pagemap bits 55-60 has changed their meaning!"
893 			     " See the linux/Documentation/vm/pagemap.txt for "
894 			     "details.\n");
895 	}
896 
897 	task = get_proc_task(file_inode(file));
898 	if (!task)
899 		return -ESRCH;
900 	mm = get_task_mm(task);
901 	if (mm) {
902 		struct clear_refs_private cp = {
903 			.type = type,
904 		};
905 		struct mm_walk clear_refs_walk = {
906 			.pmd_entry = clear_refs_pte_range,
907 			.test_walk = clear_refs_test_walk,
908 			.mm = mm,
909 			.private = &cp,
910 		};
911 
912 		if (type == CLEAR_REFS_MM_HIWATER_RSS) {
913 			/*
914 			 * Writing 5 to /proc/pid/clear_refs resets the peak
915 			 * resident set size to this mm's current rss value.
916 			 */
917 			down_write(&mm->mmap_sem);
918 			reset_mm_hiwater_rss(mm);
919 			up_write(&mm->mmap_sem);
920 			goto out_mm;
921 		}
922 
923 		down_read(&mm->mmap_sem);
924 		if (type == CLEAR_REFS_SOFT_DIRTY) {
925 			for (vma = mm->mmap; vma; vma = vma->vm_next) {
926 				if (!(vma->vm_flags & VM_SOFTDIRTY))
927 					continue;
928 				up_read(&mm->mmap_sem);
929 				down_write(&mm->mmap_sem);
930 				for (vma = mm->mmap; vma; vma = vma->vm_next) {
931 					vma->vm_flags &= ~VM_SOFTDIRTY;
932 					vma_set_page_prot(vma);
933 				}
934 				downgrade_write(&mm->mmap_sem);
935 				break;
936 			}
937 			mmu_notifier_invalidate_range_start(mm, 0, -1);
938 		}
939 		walk_page_range(0, ~0UL, &clear_refs_walk);
940 		if (type == CLEAR_REFS_SOFT_DIRTY)
941 			mmu_notifier_invalidate_range_end(mm, 0, -1);
942 		flush_tlb_mm(mm);
943 		up_read(&mm->mmap_sem);
944 out_mm:
945 		mmput(mm);
946 	}
947 	put_task_struct(task);
948 
949 	return count;
950 }
951 
952 const struct file_operations proc_clear_refs_operations = {
953 	.write		= clear_refs_write,
954 	.llseek		= noop_llseek,
955 };
956 
957 typedef struct {
958 	u64 pme;
959 } pagemap_entry_t;
960 
961 struct pagemapread {
962 	int pos, len;		/* units: PM_ENTRY_BYTES, not bytes */
963 	pagemap_entry_t *buffer;
964 	bool v2;
965 };
966 
967 #define PAGEMAP_WALK_SIZE	(PMD_SIZE)
968 #define PAGEMAP_WALK_MASK	(PMD_MASK)
969 
970 #define PM_ENTRY_BYTES      sizeof(pagemap_entry_t)
971 #define PM_STATUS_BITS      3
972 #define PM_STATUS_OFFSET    (64 - PM_STATUS_BITS)
973 #define PM_STATUS_MASK      (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
974 #define PM_STATUS(nr)       (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK)
975 #define PM_PSHIFT_BITS      6
976 #define PM_PSHIFT_OFFSET    (PM_STATUS_OFFSET - PM_PSHIFT_BITS)
977 #define PM_PSHIFT_MASK      (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET)
978 #define __PM_PSHIFT(x)      (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK)
979 #define PM_PFRAME_MASK      ((1LL << PM_PSHIFT_OFFSET) - 1)
980 #define PM_PFRAME(x)        ((x) & PM_PFRAME_MASK)
981 /* in "new" pagemap pshift bits are occupied with more status bits */
982 #define PM_STATUS2(v2, x)   (__PM_PSHIFT(v2 ? x : PAGE_SHIFT))
983 
984 #define __PM_SOFT_DIRTY      (1LL)
985 #define PM_PRESENT          PM_STATUS(4LL)
986 #define PM_SWAP             PM_STATUS(2LL)
987 #define PM_FILE             PM_STATUS(1LL)
988 #define PM_NOT_PRESENT(v2)  PM_STATUS2(v2, 0)
989 #define PM_END_OF_BUFFER    1
990 
991 static inline pagemap_entry_t make_pme(u64 val)
992 {
993 	return (pagemap_entry_t) { .pme = val };
994 }
995 
996 static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme,
997 			  struct pagemapread *pm)
998 {
999 	pm->buffer[pm->pos++] = *pme;
1000 	if (pm->pos >= pm->len)
1001 		return PM_END_OF_BUFFER;
1002 	return 0;
1003 }
1004 
1005 static int pagemap_pte_hole(unsigned long start, unsigned long end,
1006 				struct mm_walk *walk)
1007 {
1008 	struct pagemapread *pm = walk->private;
1009 	unsigned long addr = start;
1010 	int err = 0;
1011 
1012 	while (addr < end) {
1013 		struct vm_area_struct *vma = find_vma(walk->mm, addr);
1014 		pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
1015 		/* End of address space hole, which we mark as non-present. */
1016 		unsigned long hole_end;
1017 
1018 		if (vma)
1019 			hole_end = min(end, vma->vm_start);
1020 		else
1021 			hole_end = end;
1022 
1023 		for (; addr < hole_end; addr += PAGE_SIZE) {
1024 			err = add_to_pagemap(addr, &pme, pm);
1025 			if (err)
1026 				goto out;
1027 		}
1028 
1029 		if (!vma)
1030 			break;
1031 
1032 		/* Addresses in the VMA. */
1033 		if (vma->vm_flags & VM_SOFTDIRTY)
1034 			pme.pme |= PM_STATUS2(pm->v2, __PM_SOFT_DIRTY);
1035 		for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
1036 			err = add_to_pagemap(addr, &pme, pm);
1037 			if (err)
1038 				goto out;
1039 		}
1040 	}
1041 out:
1042 	return err;
1043 }
1044 
1045 static void pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
1046 		struct vm_area_struct *vma, unsigned long addr, pte_t pte)
1047 {
1048 	u64 frame, flags;
1049 	struct page *page = NULL;
1050 	int flags2 = 0;
1051 
1052 	if (pte_present(pte)) {
1053 		frame = pte_pfn(pte);
1054 		flags = PM_PRESENT;
1055 		page = vm_normal_page(vma, addr, pte);
1056 		if (pte_soft_dirty(pte))
1057 			flags2 |= __PM_SOFT_DIRTY;
1058 	} else if (is_swap_pte(pte)) {
1059 		swp_entry_t entry;
1060 		if (pte_swp_soft_dirty(pte))
1061 			flags2 |= __PM_SOFT_DIRTY;
1062 		entry = pte_to_swp_entry(pte);
1063 		frame = swp_type(entry) |
1064 			(swp_offset(entry) << MAX_SWAPFILES_SHIFT);
1065 		flags = PM_SWAP;
1066 		if (is_migration_entry(entry))
1067 			page = migration_entry_to_page(entry);
1068 	} else {
1069 		if (vma->vm_flags & VM_SOFTDIRTY)
1070 			flags2 |= __PM_SOFT_DIRTY;
1071 		*pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, flags2));
1072 		return;
1073 	}
1074 
1075 	if (page && !PageAnon(page))
1076 		flags |= PM_FILE;
1077 	if ((vma->vm_flags & VM_SOFTDIRTY))
1078 		flags2 |= __PM_SOFT_DIRTY;
1079 
1080 	*pme = make_pme(PM_PFRAME(frame) | PM_STATUS2(pm->v2, flags2) | flags);
1081 }
1082 
1083 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1084 static void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
1085 		pmd_t pmd, int offset, int pmd_flags2)
1086 {
1087 	/*
1088 	 * Currently pmd for thp is always present because thp can not be
1089 	 * swapped-out, migrated, or HWPOISONed (split in such cases instead.)
1090 	 * This if-check is just to prepare for future implementation.
1091 	 */
1092 	if (pmd_present(pmd))
1093 		*pme = make_pme(PM_PFRAME(pmd_pfn(pmd) + offset)
1094 				| PM_STATUS2(pm->v2, pmd_flags2) | PM_PRESENT);
1095 	else
1096 		*pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, pmd_flags2));
1097 }
1098 #else
1099 static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
1100 		pmd_t pmd, int offset, int pmd_flags2)
1101 {
1102 }
1103 #endif
1104 
1105 static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
1106 			     struct mm_walk *walk)
1107 {
1108 	struct vm_area_struct *vma = walk->vma;
1109 	struct pagemapread *pm = walk->private;
1110 	spinlock_t *ptl;
1111 	pte_t *pte, *orig_pte;
1112 	int err = 0;
1113 
1114 	if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
1115 		int pmd_flags2;
1116 
1117 		if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(*pmd))
1118 			pmd_flags2 = __PM_SOFT_DIRTY;
1119 		else
1120 			pmd_flags2 = 0;
1121 
1122 		for (; addr != end; addr += PAGE_SIZE) {
1123 			unsigned long offset;
1124 			pagemap_entry_t pme;
1125 
1126 			offset = (addr & ~PAGEMAP_WALK_MASK) >>
1127 					PAGE_SHIFT;
1128 			thp_pmd_to_pagemap_entry(&pme, pm, *pmd, offset, pmd_flags2);
1129 			err = add_to_pagemap(addr, &pme, pm);
1130 			if (err)
1131 				break;
1132 		}
1133 		spin_unlock(ptl);
1134 		return err;
1135 	}
1136 
1137 	if (pmd_trans_unstable(pmd))
1138 		return 0;
1139 
1140 	/*
1141 	 * We can assume that @vma always points to a valid one and @end never
1142 	 * goes beyond vma->vm_end.
1143 	 */
1144 	orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
1145 	for (; addr < end; pte++, addr += PAGE_SIZE) {
1146 		pagemap_entry_t pme;
1147 
1148 		pte_to_pagemap_entry(&pme, pm, vma, addr, *pte);
1149 		err = add_to_pagemap(addr, &pme, pm);
1150 		if (err)
1151 			break;
1152 	}
1153 	pte_unmap_unlock(orig_pte, ptl);
1154 
1155 	cond_resched();
1156 
1157 	return err;
1158 }
1159 
1160 #ifdef CONFIG_HUGETLB_PAGE
1161 static void huge_pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
1162 					pte_t pte, int offset, int flags2)
1163 {
1164 	if (pte_present(pte))
1165 		*pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset)	|
1166 				PM_STATUS2(pm->v2, flags2)		|
1167 				PM_PRESENT);
1168 	else
1169 		*pme = make_pme(PM_NOT_PRESENT(pm->v2)			|
1170 				PM_STATUS2(pm->v2, flags2));
1171 }
1172 
1173 /* This function walks within one hugetlb entry in the single call */
1174 static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
1175 				 unsigned long addr, unsigned long end,
1176 				 struct mm_walk *walk)
1177 {
1178 	struct pagemapread *pm = walk->private;
1179 	struct vm_area_struct *vma = walk->vma;
1180 	int err = 0;
1181 	int flags2;
1182 	pagemap_entry_t pme;
1183 
1184 	if (vma->vm_flags & VM_SOFTDIRTY)
1185 		flags2 = __PM_SOFT_DIRTY;
1186 	else
1187 		flags2 = 0;
1188 
1189 	for (; addr != end; addr += PAGE_SIZE) {
1190 		int offset = (addr & ~hmask) >> PAGE_SHIFT;
1191 		huge_pte_to_pagemap_entry(&pme, pm, *pte, offset, flags2);
1192 		err = add_to_pagemap(addr, &pme, pm);
1193 		if (err)
1194 			return err;
1195 	}
1196 
1197 	cond_resched();
1198 
1199 	return err;
1200 }
1201 #endif /* HUGETLB_PAGE */
1202 
1203 /*
1204  * /proc/pid/pagemap - an array mapping virtual pages to pfns
1205  *
1206  * For each page in the address space, this file contains one 64-bit entry
1207  * consisting of the following:
1208  *
1209  * Bits 0-54  page frame number (PFN) if present
1210  * Bits 0-4   swap type if swapped
1211  * Bits 5-54  swap offset if swapped
1212  * Bits 55-60 page shift (page size = 1<<page shift)
1213  * Bit  61    page is file-page or shared-anon
1214  * Bit  62    page swapped
1215  * Bit  63    page present
1216  *
1217  * If the page is not present but in swap, then the PFN contains an
1218  * encoding of the swap file number and the page's offset into the
1219  * swap. Unmapped pages return a null PFN. This allows determining
1220  * precisely which pages are mapped (or in swap) and comparing mapped
1221  * pages between processes.
1222  *
1223  * Efficient users of this interface will use /proc/pid/maps to
1224  * determine which areas of memory are actually mapped and llseek to
1225  * skip over unmapped regions.
1226  */
1227 static ssize_t pagemap_read(struct file *file, char __user *buf,
1228 			    size_t count, loff_t *ppos)
1229 {
1230 	struct task_struct *task = get_proc_task(file_inode(file));
1231 	struct mm_struct *mm;
1232 	struct pagemapread pm;
1233 	int ret = -ESRCH;
1234 	struct mm_walk pagemap_walk = {};
1235 	unsigned long src;
1236 	unsigned long svpfn;
1237 	unsigned long start_vaddr;
1238 	unsigned long end_vaddr;
1239 	int copied = 0;
1240 
1241 	if (!task)
1242 		goto out;
1243 
1244 	ret = -EINVAL;
1245 	/* file position must be aligned */
1246 	if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
1247 		goto out_task;
1248 
1249 	ret = 0;
1250 	if (!count)
1251 		goto out_task;
1252 
1253 	pm.v2 = soft_dirty_cleared;
1254 	pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
1255 	pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY);
1256 	ret = -ENOMEM;
1257 	if (!pm.buffer)
1258 		goto out_task;
1259 
1260 	mm = mm_access(task, PTRACE_MODE_READ);
1261 	ret = PTR_ERR(mm);
1262 	if (!mm || IS_ERR(mm))
1263 		goto out_free;
1264 
1265 	pagemap_walk.pmd_entry = pagemap_pte_range;
1266 	pagemap_walk.pte_hole = pagemap_pte_hole;
1267 #ifdef CONFIG_HUGETLB_PAGE
1268 	pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
1269 #endif
1270 	pagemap_walk.mm = mm;
1271 	pagemap_walk.private = &pm;
1272 
1273 	src = *ppos;
1274 	svpfn = src / PM_ENTRY_BYTES;
1275 	start_vaddr = svpfn << PAGE_SHIFT;
1276 	end_vaddr = TASK_SIZE_OF(task);
1277 
1278 	/* watch out for wraparound */
1279 	if (svpfn > TASK_SIZE_OF(task) >> PAGE_SHIFT)
1280 		start_vaddr = end_vaddr;
1281 
1282 	/*
1283 	 * The odds are that this will stop walking way
1284 	 * before end_vaddr, because the length of the
1285 	 * user buffer is tracked in "pm", and the walk
1286 	 * will stop when we hit the end of the buffer.
1287 	 */
1288 	ret = 0;
1289 	while (count && (start_vaddr < end_vaddr)) {
1290 		int len;
1291 		unsigned long end;
1292 
1293 		pm.pos = 0;
1294 		end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
1295 		/* overflow ? */
1296 		if (end < start_vaddr || end > end_vaddr)
1297 			end = end_vaddr;
1298 		down_read(&mm->mmap_sem);
1299 		ret = walk_page_range(start_vaddr, end, &pagemap_walk);
1300 		up_read(&mm->mmap_sem);
1301 		start_vaddr = end;
1302 
1303 		len = min(count, PM_ENTRY_BYTES * pm.pos);
1304 		if (copy_to_user(buf, pm.buffer, len)) {
1305 			ret = -EFAULT;
1306 			goto out_mm;
1307 		}
1308 		copied += len;
1309 		buf += len;
1310 		count -= len;
1311 	}
1312 	*ppos += copied;
1313 	if (!ret || ret == PM_END_OF_BUFFER)
1314 		ret = copied;
1315 
1316 out_mm:
1317 	mmput(mm);
1318 out_free:
1319 	kfree(pm.buffer);
1320 out_task:
1321 	put_task_struct(task);
1322 out:
1323 	return ret;
1324 }
1325 
1326 static int pagemap_open(struct inode *inode, struct file *file)
1327 {
1328 	/* do not disclose physical addresses: attack vector */
1329 	if (!capable(CAP_SYS_ADMIN))
1330 		return -EPERM;
1331 	pr_warn_once("Bits 55-60 of /proc/PID/pagemap entries are about "
1332 			"to stop being page-shift some time soon. See the "
1333 			"linux/Documentation/vm/pagemap.txt for details.\n");
1334 	return 0;
1335 }
1336 
1337 const struct file_operations proc_pagemap_operations = {
1338 	.llseek		= mem_lseek, /* borrow this */
1339 	.read		= pagemap_read,
1340 	.open		= pagemap_open,
1341 };
1342 #endif /* CONFIG_PROC_PAGE_MONITOR */
1343 
1344 #ifdef CONFIG_NUMA
1345 
1346 struct numa_maps {
1347 	unsigned long pages;
1348 	unsigned long anon;
1349 	unsigned long active;
1350 	unsigned long writeback;
1351 	unsigned long mapcount_max;
1352 	unsigned long dirty;
1353 	unsigned long swapcache;
1354 	unsigned long node[MAX_NUMNODES];
1355 };
1356 
1357 struct numa_maps_private {
1358 	struct proc_maps_private proc_maps;
1359 	struct numa_maps md;
1360 };
1361 
1362 static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
1363 			unsigned long nr_pages)
1364 {
1365 	int count = page_mapcount(page);
1366 
1367 	md->pages += nr_pages;
1368 	if (pte_dirty || PageDirty(page))
1369 		md->dirty += nr_pages;
1370 
1371 	if (PageSwapCache(page))
1372 		md->swapcache += nr_pages;
1373 
1374 	if (PageActive(page) || PageUnevictable(page))
1375 		md->active += nr_pages;
1376 
1377 	if (PageWriteback(page))
1378 		md->writeback += nr_pages;
1379 
1380 	if (PageAnon(page))
1381 		md->anon += nr_pages;
1382 
1383 	if (count > md->mapcount_max)
1384 		md->mapcount_max = count;
1385 
1386 	md->node[page_to_nid(page)] += nr_pages;
1387 }
1388 
1389 static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
1390 		unsigned long addr)
1391 {
1392 	struct page *page;
1393 	int nid;
1394 
1395 	if (!pte_present(pte))
1396 		return NULL;
1397 
1398 	page = vm_normal_page(vma, addr, pte);
1399 	if (!page)
1400 		return NULL;
1401 
1402 	if (PageReserved(page))
1403 		return NULL;
1404 
1405 	nid = page_to_nid(page);
1406 	if (!node_isset(nid, node_states[N_MEMORY]))
1407 		return NULL;
1408 
1409 	return page;
1410 }
1411 
1412 static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
1413 		unsigned long end, struct mm_walk *walk)
1414 {
1415 	struct numa_maps *md = walk->private;
1416 	struct vm_area_struct *vma = walk->vma;
1417 	spinlock_t *ptl;
1418 	pte_t *orig_pte;
1419 	pte_t *pte;
1420 
1421 	if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
1422 		pte_t huge_pte = *(pte_t *)pmd;
1423 		struct page *page;
1424 
1425 		page = can_gather_numa_stats(huge_pte, vma, addr);
1426 		if (page)
1427 			gather_stats(page, md, pte_dirty(huge_pte),
1428 				     HPAGE_PMD_SIZE/PAGE_SIZE);
1429 		spin_unlock(ptl);
1430 		return 0;
1431 	}
1432 
1433 	if (pmd_trans_unstable(pmd))
1434 		return 0;
1435 	orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
1436 	do {
1437 		struct page *page = can_gather_numa_stats(*pte, vma, addr);
1438 		if (!page)
1439 			continue;
1440 		gather_stats(page, md, pte_dirty(*pte), 1);
1441 
1442 	} while (pte++, addr += PAGE_SIZE, addr != end);
1443 	pte_unmap_unlock(orig_pte, ptl);
1444 	return 0;
1445 }
1446 #ifdef CONFIG_HUGETLB_PAGE
1447 static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
1448 		unsigned long addr, unsigned long end, struct mm_walk *walk)
1449 {
1450 	struct numa_maps *md;
1451 	struct page *page;
1452 
1453 	if (!pte_present(*pte))
1454 		return 0;
1455 
1456 	page = pte_page(*pte);
1457 	if (!page)
1458 		return 0;
1459 
1460 	md = walk->private;
1461 	gather_stats(page, md, pte_dirty(*pte), 1);
1462 	return 0;
1463 }
1464 
1465 #else
1466 static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
1467 		unsigned long addr, unsigned long end, struct mm_walk *walk)
1468 {
1469 	return 0;
1470 }
1471 #endif
1472 
1473 /*
1474  * Display pages allocated per node and memory policy via /proc.
1475  */
1476 static int show_numa_map(struct seq_file *m, void *v, int is_pid)
1477 {
1478 	struct numa_maps_private *numa_priv = m->private;
1479 	struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
1480 	struct vm_area_struct *vma = v;
1481 	struct numa_maps *md = &numa_priv->md;
1482 	struct file *file = vma->vm_file;
1483 	struct mm_struct *mm = vma->vm_mm;
1484 	struct mm_walk walk = {
1485 		.hugetlb_entry = gather_hugetlb_stats,
1486 		.pmd_entry = gather_pte_stats,
1487 		.private = md,
1488 		.mm = mm,
1489 	};
1490 	struct mempolicy *pol;
1491 	char buffer[64];
1492 	int nid;
1493 
1494 	if (!mm)
1495 		return 0;
1496 
1497 	/* Ensure we start with an empty set of numa_maps statistics. */
1498 	memset(md, 0, sizeof(*md));
1499 
1500 	pol = __get_vma_policy(vma, vma->vm_start);
1501 	if (pol) {
1502 		mpol_to_str(buffer, sizeof(buffer), pol);
1503 		mpol_cond_put(pol);
1504 	} else {
1505 		mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
1506 	}
1507 
1508 	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1509 
1510 	if (file) {
1511 		seq_puts(m, " file=");
1512 		seq_file_path(m, file, "\n\t= ");
1513 	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1514 		seq_puts(m, " heap");
1515 	} else {
1516 		pid_t tid = pid_of_stack(proc_priv, vma, is_pid);
1517 		if (tid != 0) {
1518 			/*
1519 			 * Thread stack in /proc/PID/task/TID/maps or
1520 			 * the main process stack.
1521 			 */
1522 			if (!is_pid || (vma->vm_start <= mm->start_stack &&
1523 			    vma->vm_end >= mm->start_stack))
1524 				seq_puts(m, " stack");
1525 			else
1526 				seq_printf(m, " stack:%d", tid);
1527 		}
1528 	}
1529 
1530 	if (is_vm_hugetlb_page(vma))
1531 		seq_puts(m, " huge");
1532 
1533 	/* mmap_sem is held by m_start */
1534 	walk_page_vma(vma, &walk);
1535 
1536 	if (!md->pages)
1537 		goto out;
1538 
1539 	if (md->anon)
1540 		seq_printf(m, " anon=%lu", md->anon);
1541 
1542 	if (md->dirty)
1543 		seq_printf(m, " dirty=%lu", md->dirty);
1544 
1545 	if (md->pages != md->anon && md->pages != md->dirty)
1546 		seq_printf(m, " mapped=%lu", md->pages);
1547 
1548 	if (md->mapcount_max > 1)
1549 		seq_printf(m, " mapmax=%lu", md->mapcount_max);
1550 
1551 	if (md->swapcache)
1552 		seq_printf(m, " swapcache=%lu", md->swapcache);
1553 
1554 	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
1555 		seq_printf(m, " active=%lu", md->active);
1556 
1557 	if (md->writeback)
1558 		seq_printf(m, " writeback=%lu", md->writeback);
1559 
1560 	for_each_node_state(nid, N_MEMORY)
1561 		if (md->node[nid])
1562 			seq_printf(m, " N%d=%lu", nid, md->node[nid]);
1563 
1564 	seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10);
1565 out:
1566 	seq_putc(m, '\n');
1567 	m_cache_vma(m, vma);
1568 	return 0;
1569 }
1570 
1571 static int show_pid_numa_map(struct seq_file *m, void *v)
1572 {
1573 	return show_numa_map(m, v, 1);
1574 }
1575 
1576 static int show_tid_numa_map(struct seq_file *m, void *v)
1577 {
1578 	return show_numa_map(m, v, 0);
1579 }
1580 
1581 static const struct seq_operations proc_pid_numa_maps_op = {
1582 	.start  = m_start,
1583 	.next   = m_next,
1584 	.stop   = m_stop,
1585 	.show   = show_pid_numa_map,
1586 };
1587 
1588 static const struct seq_operations proc_tid_numa_maps_op = {
1589 	.start  = m_start,
1590 	.next   = m_next,
1591 	.stop   = m_stop,
1592 	.show   = show_tid_numa_map,
1593 };
1594 
1595 static int numa_maps_open(struct inode *inode, struct file *file,
1596 			  const struct seq_operations *ops)
1597 {
1598 	return proc_maps_open(inode, file, ops,
1599 				sizeof(struct numa_maps_private));
1600 }
1601 
1602 static int pid_numa_maps_open(struct inode *inode, struct file *file)
1603 {
1604 	return numa_maps_open(inode, file, &proc_pid_numa_maps_op);
1605 }
1606 
1607 static int tid_numa_maps_open(struct inode *inode, struct file *file)
1608 {
1609 	return numa_maps_open(inode, file, &proc_tid_numa_maps_op);
1610 }
1611 
1612 const struct file_operations proc_pid_numa_maps_operations = {
1613 	.open		= pid_numa_maps_open,
1614 	.read		= seq_read,
1615 	.llseek		= seq_lseek,
1616 	.release	= proc_map_release,
1617 };
1618 
1619 const struct file_operations proc_tid_numa_maps_operations = {
1620 	.open		= tid_numa_maps_open,
1621 	.read		= seq_read,
1622 	.llseek		= seq_lseek,
1623 	.release	= proc_map_release,
1624 };
1625 #endif /* CONFIG_NUMA */
1626