xref: /linux/fs/proc/task_mmu.c (revision a33f32244d8550da8b4a26e277ce07d5c6d158b5)
1 #include <linux/mm.h>
2 #include <linux/hugetlb.h>
3 #include <linux/mount.h>
4 #include <linux/seq_file.h>
5 #include <linux/highmem.h>
6 #include <linux/ptrace.h>
7 #include <linux/slab.h>
8 #include <linux/pagemap.h>
9 #include <linux/mempolicy.h>
10 #include <linux/swap.h>
11 #include <linux/swapops.h>
12 
13 #include <asm/elf.h>
14 #include <asm/uaccess.h>
15 #include <asm/tlbflush.h>
16 #include "internal.h"
17 
18 void task_mem(struct seq_file *m, struct mm_struct *mm)
19 {
20 	unsigned long data, text, lib, swap;
21 	unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
22 
23 	/*
24 	 * Note: to minimize their overhead, mm maintains hiwater_vm and
25 	 * hiwater_rss only when about to *lower* total_vm or rss.  Any
26 	 * collector of these hiwater stats must therefore get total_vm
27 	 * and rss too, which will usually be the higher.  Barriers? not
28 	 * worth the effort, such snapshots can always be inconsistent.
29 	 */
30 	hiwater_vm = total_vm = mm->total_vm;
31 	if (hiwater_vm < mm->hiwater_vm)
32 		hiwater_vm = mm->hiwater_vm;
33 	hiwater_rss = total_rss = get_mm_rss(mm);
34 	if (hiwater_rss < mm->hiwater_rss)
35 		hiwater_rss = mm->hiwater_rss;
36 
37 	data = mm->total_vm - mm->shared_vm - mm->stack_vm;
38 	text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
39 	lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
40 	swap = get_mm_counter(mm, MM_SWAPENTS);
41 	seq_printf(m,
42 		"VmPeak:\t%8lu kB\n"
43 		"VmSize:\t%8lu kB\n"
44 		"VmLck:\t%8lu kB\n"
45 		"VmHWM:\t%8lu kB\n"
46 		"VmRSS:\t%8lu kB\n"
47 		"VmData:\t%8lu kB\n"
48 		"VmStk:\t%8lu kB\n"
49 		"VmExe:\t%8lu kB\n"
50 		"VmLib:\t%8lu kB\n"
51 		"VmPTE:\t%8lu kB\n"
52 		"VmSwap:\t%8lu kB\n",
53 		hiwater_vm << (PAGE_SHIFT-10),
54 		(total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
55 		mm->locked_vm << (PAGE_SHIFT-10),
56 		hiwater_rss << (PAGE_SHIFT-10),
57 		total_rss << (PAGE_SHIFT-10),
58 		data << (PAGE_SHIFT-10),
59 		mm->stack_vm << (PAGE_SHIFT-10), text, lib,
60 		(PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
61 		swap << (PAGE_SHIFT-10));
62 }
63 
64 unsigned long task_vsize(struct mm_struct *mm)
65 {
66 	return PAGE_SIZE * mm->total_vm;
67 }
68 
69 int task_statm(struct mm_struct *mm, int *shared, int *text,
70 	       int *data, int *resident)
71 {
72 	*shared = get_mm_counter(mm, MM_FILEPAGES);
73 	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
74 								>> PAGE_SHIFT;
75 	*data = mm->total_vm - mm->shared_vm;
76 	*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
77 	return mm->total_vm;
78 }
79 
80 static void pad_len_spaces(struct seq_file *m, int len)
81 {
82 	len = 25 + sizeof(void*) * 6 - len;
83 	if (len < 1)
84 		len = 1;
85 	seq_printf(m, "%*c", len, ' ');
86 }
87 
88 static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
89 {
90 	if (vma && vma != priv->tail_vma) {
91 		struct mm_struct *mm = vma->vm_mm;
92 		up_read(&mm->mmap_sem);
93 		mmput(mm);
94 	}
95 }
96 
97 static void *m_start(struct seq_file *m, loff_t *pos)
98 {
99 	struct proc_maps_private *priv = m->private;
100 	unsigned long last_addr = m->version;
101 	struct mm_struct *mm;
102 	struct vm_area_struct *vma, *tail_vma = NULL;
103 	loff_t l = *pos;
104 
105 	/* Clear the per syscall fields in priv */
106 	priv->task = NULL;
107 	priv->tail_vma = NULL;
108 
109 	/*
110 	 * We remember last_addr rather than next_addr to hit with
111 	 * mmap_cache most of the time. We have zero last_addr at
112 	 * the beginning and also after lseek. We will have -1 last_addr
113 	 * after the end of the vmas.
114 	 */
115 
116 	if (last_addr == -1UL)
117 		return NULL;
118 
119 	priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
120 	if (!priv->task)
121 		return NULL;
122 
123 	mm = mm_for_maps(priv->task);
124 	if (!mm)
125 		return NULL;
126 	down_read(&mm->mmap_sem);
127 
128 	tail_vma = get_gate_vma(priv->task);
129 	priv->tail_vma = tail_vma;
130 
131 	/* Start with last addr hint */
132 	vma = find_vma(mm, last_addr);
133 	if (last_addr && vma) {
134 		vma = vma->vm_next;
135 		goto out;
136 	}
137 
138 	/*
139 	 * Check the vma index is within the range and do
140 	 * sequential scan until m_index.
141 	 */
142 	vma = NULL;
143 	if ((unsigned long)l < mm->map_count) {
144 		vma = mm->mmap;
145 		while (l-- && vma)
146 			vma = vma->vm_next;
147 		goto out;
148 	}
149 
150 	if (l != mm->map_count)
151 		tail_vma = NULL; /* After gate vma */
152 
153 out:
154 	if (vma)
155 		return vma;
156 
157 	/* End of vmas has been reached */
158 	m->version = (tail_vma != NULL)? 0: -1UL;
159 	up_read(&mm->mmap_sem);
160 	mmput(mm);
161 	return tail_vma;
162 }
163 
164 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
165 {
166 	struct proc_maps_private *priv = m->private;
167 	struct vm_area_struct *vma = v;
168 	struct vm_area_struct *tail_vma = priv->tail_vma;
169 
170 	(*pos)++;
171 	if (vma && (vma != tail_vma) && vma->vm_next)
172 		return vma->vm_next;
173 	vma_stop(priv, vma);
174 	return (vma != tail_vma)? tail_vma: NULL;
175 }
176 
177 static void m_stop(struct seq_file *m, void *v)
178 {
179 	struct proc_maps_private *priv = m->private;
180 	struct vm_area_struct *vma = v;
181 
182 	vma_stop(priv, vma);
183 	if (priv->task)
184 		put_task_struct(priv->task);
185 }
186 
187 static int do_maps_open(struct inode *inode, struct file *file,
188 			const struct seq_operations *ops)
189 {
190 	struct proc_maps_private *priv;
191 	int ret = -ENOMEM;
192 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
193 	if (priv) {
194 		priv->pid = proc_pid(inode);
195 		ret = seq_open(file, ops);
196 		if (!ret) {
197 			struct seq_file *m = file->private_data;
198 			m->private = priv;
199 		} else {
200 			kfree(priv);
201 		}
202 	}
203 	return ret;
204 }
205 
206 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
207 {
208 	struct mm_struct *mm = vma->vm_mm;
209 	struct file *file = vma->vm_file;
210 	int flags = vma->vm_flags;
211 	unsigned long ino = 0;
212 	unsigned long long pgoff = 0;
213 	dev_t dev = 0;
214 	int len;
215 
216 	if (file) {
217 		struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
218 		dev = inode->i_sb->s_dev;
219 		ino = inode->i_ino;
220 		pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
221 	}
222 
223 	seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
224 			vma->vm_start,
225 			vma->vm_end,
226 			flags & VM_READ ? 'r' : '-',
227 			flags & VM_WRITE ? 'w' : '-',
228 			flags & VM_EXEC ? 'x' : '-',
229 			flags & VM_MAYSHARE ? 's' : 'p',
230 			pgoff,
231 			MAJOR(dev), MINOR(dev), ino, &len);
232 
233 	/*
234 	 * Print the dentry name for named mappings, and a
235 	 * special [heap] marker for the heap:
236 	 */
237 	if (file) {
238 		pad_len_spaces(m, len);
239 		seq_path(m, &file->f_path, "\n");
240 	} else {
241 		const char *name = arch_vma_name(vma);
242 		if (!name) {
243 			if (mm) {
244 				if (vma->vm_start <= mm->start_brk &&
245 						vma->vm_end >= mm->brk) {
246 					name = "[heap]";
247 				} else if (vma->vm_start <= mm->start_stack &&
248 					   vma->vm_end >= mm->start_stack) {
249 					name = "[stack]";
250 				} else {
251 					unsigned long stack_start;
252 					struct proc_maps_private *pmp;
253 
254 					pmp = m->private;
255 					stack_start = pmp->task->stack_start;
256 
257 					if (vma->vm_start <= stack_start &&
258 					    vma->vm_end >= stack_start) {
259 						pad_len_spaces(m, len);
260 						seq_printf(m,
261 						 "[threadstack:%08lx]",
262 #ifdef CONFIG_STACK_GROWSUP
263 						 vma->vm_end - stack_start
264 #else
265 						 stack_start - vma->vm_start
266 #endif
267 						);
268 					}
269 				}
270 			} else {
271 				name = "[vdso]";
272 			}
273 		}
274 		if (name) {
275 			pad_len_spaces(m, len);
276 			seq_puts(m, name);
277 		}
278 	}
279 	seq_putc(m, '\n');
280 }
281 
282 static int show_map(struct seq_file *m, void *v)
283 {
284 	struct vm_area_struct *vma = v;
285 	struct proc_maps_private *priv = m->private;
286 	struct task_struct *task = priv->task;
287 
288 	show_map_vma(m, vma);
289 
290 	if (m->count < m->size)  /* vma is copied successfully */
291 		m->version = (vma != get_gate_vma(task))? vma->vm_start: 0;
292 	return 0;
293 }
294 
295 static const struct seq_operations proc_pid_maps_op = {
296 	.start	= m_start,
297 	.next	= m_next,
298 	.stop	= m_stop,
299 	.show	= show_map
300 };
301 
302 static int maps_open(struct inode *inode, struct file *file)
303 {
304 	return do_maps_open(inode, file, &proc_pid_maps_op);
305 }
306 
307 const struct file_operations proc_maps_operations = {
308 	.open		= maps_open,
309 	.read		= seq_read,
310 	.llseek		= seq_lseek,
311 	.release	= seq_release_private,
312 };
313 
314 /*
315  * Proportional Set Size(PSS): my share of RSS.
316  *
317  * PSS of a process is the count of pages it has in memory, where each
318  * page is divided by the number of processes sharing it.  So if a
319  * process has 1000 pages all to itself, and 1000 shared with one other
320  * process, its PSS will be 1500.
321  *
322  * To keep (accumulated) division errors low, we adopt a 64bit
323  * fixed-point pss counter to minimize division errors. So (pss >>
324  * PSS_SHIFT) would be the real byte count.
325  *
326  * A shift of 12 before division means (assuming 4K page size):
327  * 	- 1M 3-user-pages add up to 8KB errors;
328  * 	- supports mapcount up to 2^24, or 16M;
329  * 	- supports PSS up to 2^52 bytes, or 4PB.
330  */
331 #define PSS_SHIFT 12
332 
333 #ifdef CONFIG_PROC_PAGE_MONITOR
334 struct mem_size_stats {
335 	struct vm_area_struct *vma;
336 	unsigned long resident;
337 	unsigned long shared_clean;
338 	unsigned long shared_dirty;
339 	unsigned long private_clean;
340 	unsigned long private_dirty;
341 	unsigned long referenced;
342 	unsigned long swap;
343 	u64 pss;
344 };
345 
346 static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
347 			   struct mm_walk *walk)
348 {
349 	struct mem_size_stats *mss = walk->private;
350 	struct vm_area_struct *vma = mss->vma;
351 	pte_t *pte, ptent;
352 	spinlock_t *ptl;
353 	struct page *page;
354 	int mapcount;
355 
356 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
357 	for (; addr != end; pte++, addr += PAGE_SIZE) {
358 		ptent = *pte;
359 
360 		if (is_swap_pte(ptent)) {
361 			mss->swap += PAGE_SIZE;
362 			continue;
363 		}
364 
365 		if (!pte_present(ptent))
366 			continue;
367 
368 		page = vm_normal_page(vma, addr, ptent);
369 		if (!page)
370 			continue;
371 
372 		mss->resident += PAGE_SIZE;
373 		/* Accumulate the size in pages that have been accessed. */
374 		if (pte_young(ptent) || PageReferenced(page))
375 			mss->referenced += PAGE_SIZE;
376 		mapcount = page_mapcount(page);
377 		if (mapcount >= 2) {
378 			if (pte_dirty(ptent))
379 				mss->shared_dirty += PAGE_SIZE;
380 			else
381 				mss->shared_clean += PAGE_SIZE;
382 			mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
383 		} else {
384 			if (pte_dirty(ptent))
385 				mss->private_dirty += PAGE_SIZE;
386 			else
387 				mss->private_clean += PAGE_SIZE;
388 			mss->pss += (PAGE_SIZE << PSS_SHIFT);
389 		}
390 	}
391 	pte_unmap_unlock(pte - 1, ptl);
392 	cond_resched();
393 	return 0;
394 }
395 
396 static int show_smap(struct seq_file *m, void *v)
397 {
398 	struct proc_maps_private *priv = m->private;
399 	struct task_struct *task = priv->task;
400 	struct vm_area_struct *vma = v;
401 	struct mem_size_stats mss;
402 	struct mm_walk smaps_walk = {
403 		.pmd_entry = smaps_pte_range,
404 		.mm = vma->vm_mm,
405 		.private = &mss,
406 	};
407 
408 	memset(&mss, 0, sizeof mss);
409 	mss.vma = vma;
410 	/* mmap_sem is held in m_start */
411 	if (vma->vm_mm && !is_vm_hugetlb_page(vma))
412 		walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
413 
414 	show_map_vma(m, vma);
415 
416 	seq_printf(m,
417 		   "Size:           %8lu kB\n"
418 		   "Rss:            %8lu kB\n"
419 		   "Pss:            %8lu kB\n"
420 		   "Shared_Clean:   %8lu kB\n"
421 		   "Shared_Dirty:   %8lu kB\n"
422 		   "Private_Clean:  %8lu kB\n"
423 		   "Private_Dirty:  %8lu kB\n"
424 		   "Referenced:     %8lu kB\n"
425 		   "Swap:           %8lu kB\n"
426 		   "KernelPageSize: %8lu kB\n"
427 		   "MMUPageSize:    %8lu kB\n",
428 		   (vma->vm_end - vma->vm_start) >> 10,
429 		   mss.resident >> 10,
430 		   (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
431 		   mss.shared_clean  >> 10,
432 		   mss.shared_dirty  >> 10,
433 		   mss.private_clean >> 10,
434 		   mss.private_dirty >> 10,
435 		   mss.referenced >> 10,
436 		   mss.swap >> 10,
437 		   vma_kernel_pagesize(vma) >> 10,
438 		   vma_mmu_pagesize(vma) >> 10);
439 
440 	if (m->count < m->size)  /* vma is copied successfully */
441 		m->version = (vma != get_gate_vma(task)) ? vma->vm_start : 0;
442 	return 0;
443 }
444 
445 static const struct seq_operations proc_pid_smaps_op = {
446 	.start	= m_start,
447 	.next	= m_next,
448 	.stop	= m_stop,
449 	.show	= show_smap
450 };
451 
452 static int smaps_open(struct inode *inode, struct file *file)
453 {
454 	return do_maps_open(inode, file, &proc_pid_smaps_op);
455 }
456 
457 const struct file_operations proc_smaps_operations = {
458 	.open		= smaps_open,
459 	.read		= seq_read,
460 	.llseek		= seq_lseek,
461 	.release	= seq_release_private,
462 };
463 
464 static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
465 				unsigned long end, struct mm_walk *walk)
466 {
467 	struct vm_area_struct *vma = walk->private;
468 	pte_t *pte, ptent;
469 	spinlock_t *ptl;
470 	struct page *page;
471 
472 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
473 	for (; addr != end; pte++, addr += PAGE_SIZE) {
474 		ptent = *pte;
475 		if (!pte_present(ptent))
476 			continue;
477 
478 		page = vm_normal_page(vma, addr, ptent);
479 		if (!page)
480 			continue;
481 
482 		/* Clear accessed and referenced bits. */
483 		ptep_test_and_clear_young(vma, addr, pte);
484 		ClearPageReferenced(page);
485 	}
486 	pte_unmap_unlock(pte - 1, ptl);
487 	cond_resched();
488 	return 0;
489 }
490 
491 #define CLEAR_REFS_ALL 1
492 #define CLEAR_REFS_ANON 2
493 #define CLEAR_REFS_MAPPED 3
494 
495 static ssize_t clear_refs_write(struct file *file, const char __user *buf,
496 				size_t count, loff_t *ppos)
497 {
498 	struct task_struct *task;
499 	char buffer[PROC_NUMBUF];
500 	struct mm_struct *mm;
501 	struct vm_area_struct *vma;
502 	long type;
503 
504 	memset(buffer, 0, sizeof(buffer));
505 	if (count > sizeof(buffer) - 1)
506 		count = sizeof(buffer) - 1;
507 	if (copy_from_user(buffer, buf, count))
508 		return -EFAULT;
509 	if (strict_strtol(strstrip(buffer), 10, &type))
510 		return -EINVAL;
511 	if (type < CLEAR_REFS_ALL || type > CLEAR_REFS_MAPPED)
512 		return -EINVAL;
513 	task = get_proc_task(file->f_path.dentry->d_inode);
514 	if (!task)
515 		return -ESRCH;
516 	mm = get_task_mm(task);
517 	if (mm) {
518 		struct mm_walk clear_refs_walk = {
519 			.pmd_entry = clear_refs_pte_range,
520 			.mm = mm,
521 		};
522 		down_read(&mm->mmap_sem);
523 		for (vma = mm->mmap; vma; vma = vma->vm_next) {
524 			clear_refs_walk.private = vma;
525 			if (is_vm_hugetlb_page(vma))
526 				continue;
527 			/*
528 			 * Writing 1 to /proc/pid/clear_refs affects all pages.
529 			 *
530 			 * Writing 2 to /proc/pid/clear_refs only affects
531 			 * Anonymous pages.
532 			 *
533 			 * Writing 3 to /proc/pid/clear_refs only affects file
534 			 * mapped pages.
535 			 */
536 			if (type == CLEAR_REFS_ANON && vma->vm_file)
537 				continue;
538 			if (type == CLEAR_REFS_MAPPED && !vma->vm_file)
539 				continue;
540 			walk_page_range(vma->vm_start, vma->vm_end,
541 					&clear_refs_walk);
542 		}
543 		flush_tlb_mm(mm);
544 		up_read(&mm->mmap_sem);
545 		mmput(mm);
546 	}
547 	put_task_struct(task);
548 
549 	return count;
550 }
551 
552 const struct file_operations proc_clear_refs_operations = {
553 	.write		= clear_refs_write,
554 };
555 
556 struct pagemapread {
557 	int pos, len;
558 	u64 *buffer;
559 };
560 
561 #define PM_ENTRY_BYTES      sizeof(u64)
562 #define PM_STATUS_BITS      3
563 #define PM_STATUS_OFFSET    (64 - PM_STATUS_BITS)
564 #define PM_STATUS_MASK      (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
565 #define PM_STATUS(nr)       (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK)
566 #define PM_PSHIFT_BITS      6
567 #define PM_PSHIFT_OFFSET    (PM_STATUS_OFFSET - PM_PSHIFT_BITS)
568 #define PM_PSHIFT_MASK      (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET)
569 #define PM_PSHIFT(x)        (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK)
570 #define PM_PFRAME_MASK      ((1LL << PM_PSHIFT_OFFSET) - 1)
571 #define PM_PFRAME(x)        ((x) & PM_PFRAME_MASK)
572 
573 #define PM_PRESENT          PM_STATUS(4LL)
574 #define PM_SWAP             PM_STATUS(2LL)
575 #define PM_NOT_PRESENT      PM_PSHIFT(PAGE_SHIFT)
576 #define PM_END_OF_BUFFER    1
577 
578 static int add_to_pagemap(unsigned long addr, u64 pfn,
579 			  struct pagemapread *pm)
580 {
581 	pm->buffer[pm->pos++] = pfn;
582 	if (pm->pos >= pm->len)
583 		return PM_END_OF_BUFFER;
584 	return 0;
585 }
586 
587 static int pagemap_pte_hole(unsigned long start, unsigned long end,
588 				struct mm_walk *walk)
589 {
590 	struct pagemapread *pm = walk->private;
591 	unsigned long addr;
592 	int err = 0;
593 	for (addr = start; addr < end; addr += PAGE_SIZE) {
594 		err = add_to_pagemap(addr, PM_NOT_PRESENT, pm);
595 		if (err)
596 			break;
597 	}
598 	return err;
599 }
600 
601 static u64 swap_pte_to_pagemap_entry(pte_t pte)
602 {
603 	swp_entry_t e = pte_to_swp_entry(pte);
604 	return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT);
605 }
606 
607 static u64 pte_to_pagemap_entry(pte_t pte)
608 {
609 	u64 pme = 0;
610 	if (is_swap_pte(pte))
611 		pme = PM_PFRAME(swap_pte_to_pagemap_entry(pte))
612 			| PM_PSHIFT(PAGE_SHIFT) | PM_SWAP;
613 	else if (pte_present(pte))
614 		pme = PM_PFRAME(pte_pfn(pte))
615 			| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
616 	return pme;
617 }
618 
619 static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
620 			     struct mm_walk *walk)
621 {
622 	struct vm_area_struct *vma;
623 	struct pagemapread *pm = walk->private;
624 	pte_t *pte;
625 	int err = 0;
626 
627 	/* find the first VMA at or above 'addr' */
628 	vma = find_vma(walk->mm, addr);
629 	for (; addr != end; addr += PAGE_SIZE) {
630 		u64 pfn = PM_NOT_PRESENT;
631 
632 		/* check to see if we've left 'vma' behind
633 		 * and need a new, higher one */
634 		if (vma && (addr >= vma->vm_end))
635 			vma = find_vma(walk->mm, addr);
636 
637 		/* check that 'vma' actually covers this address,
638 		 * and that it isn't a huge page vma */
639 		if (vma && (vma->vm_start <= addr) &&
640 		    !is_vm_hugetlb_page(vma)) {
641 			pte = pte_offset_map(pmd, addr);
642 			pfn = pte_to_pagemap_entry(*pte);
643 			/* unmap before userspace copy */
644 			pte_unmap(pte);
645 		}
646 		err = add_to_pagemap(addr, pfn, pm);
647 		if (err)
648 			return err;
649 	}
650 
651 	cond_resched();
652 
653 	return err;
654 }
655 
656 static u64 huge_pte_to_pagemap_entry(pte_t pte, int offset)
657 {
658 	u64 pme = 0;
659 	if (pte_present(pte))
660 		pme = PM_PFRAME(pte_pfn(pte) + offset)
661 			| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
662 	return pme;
663 }
664 
665 /* This function walks within one hugetlb entry in the single call */
666 static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
667 				 unsigned long addr, unsigned long end,
668 				 struct mm_walk *walk)
669 {
670 	struct pagemapread *pm = walk->private;
671 	int err = 0;
672 	u64 pfn;
673 
674 	for (; addr != end; addr += PAGE_SIZE) {
675 		int offset = (addr & ~hmask) >> PAGE_SHIFT;
676 		pfn = huge_pte_to_pagemap_entry(*pte, offset);
677 		err = add_to_pagemap(addr, pfn, pm);
678 		if (err)
679 			return err;
680 	}
681 
682 	cond_resched();
683 
684 	return err;
685 }
686 
687 /*
688  * /proc/pid/pagemap - an array mapping virtual pages to pfns
689  *
690  * For each page in the address space, this file contains one 64-bit entry
691  * consisting of the following:
692  *
693  * Bits 0-55  page frame number (PFN) if present
694  * Bits 0-4   swap type if swapped
695  * Bits 5-55  swap offset if swapped
696  * Bits 55-60 page shift (page size = 1<<page shift)
697  * Bit  61    reserved for future use
698  * Bit  62    page swapped
699  * Bit  63    page present
700  *
701  * If the page is not present but in swap, then the PFN contains an
702  * encoding of the swap file number and the page's offset into the
703  * swap. Unmapped pages return a null PFN. This allows determining
704  * precisely which pages are mapped (or in swap) and comparing mapped
705  * pages between processes.
706  *
707  * Efficient users of this interface will use /proc/pid/maps to
708  * determine which areas of memory are actually mapped and llseek to
709  * skip over unmapped regions.
710  */
711 #define PAGEMAP_WALK_SIZE	(PMD_SIZE)
712 static ssize_t pagemap_read(struct file *file, char __user *buf,
713 			    size_t count, loff_t *ppos)
714 {
715 	struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
716 	struct mm_struct *mm;
717 	struct pagemapread pm;
718 	int ret = -ESRCH;
719 	struct mm_walk pagemap_walk = {};
720 	unsigned long src;
721 	unsigned long svpfn;
722 	unsigned long start_vaddr;
723 	unsigned long end_vaddr;
724 	int copied = 0;
725 
726 	if (!task)
727 		goto out;
728 
729 	ret = -EACCES;
730 	if (!ptrace_may_access(task, PTRACE_MODE_READ))
731 		goto out_task;
732 
733 	ret = -EINVAL;
734 	/* file position must be aligned */
735 	if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
736 		goto out_task;
737 
738 	ret = 0;
739 
740 	if (!count)
741 		goto out_task;
742 
743 	mm = get_task_mm(task);
744 	if (!mm)
745 		goto out_task;
746 
747 	pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
748 	pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
749 	ret = -ENOMEM;
750 	if (!pm.buffer)
751 		goto out_mm;
752 
753 	pagemap_walk.pmd_entry = pagemap_pte_range;
754 	pagemap_walk.pte_hole = pagemap_pte_hole;
755 	pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
756 	pagemap_walk.mm = mm;
757 	pagemap_walk.private = &pm;
758 
759 	src = *ppos;
760 	svpfn = src / PM_ENTRY_BYTES;
761 	start_vaddr = svpfn << PAGE_SHIFT;
762 	end_vaddr = TASK_SIZE_OF(task);
763 
764 	/* watch out for wraparound */
765 	if (svpfn > TASK_SIZE_OF(task) >> PAGE_SHIFT)
766 		start_vaddr = end_vaddr;
767 
768 	/*
769 	 * The odds are that this will stop walking way
770 	 * before end_vaddr, because the length of the
771 	 * user buffer is tracked in "pm", and the walk
772 	 * will stop when we hit the end of the buffer.
773 	 */
774 	ret = 0;
775 	while (count && (start_vaddr < end_vaddr)) {
776 		int len;
777 		unsigned long end;
778 
779 		pm.pos = 0;
780 		end = start_vaddr + PAGEMAP_WALK_SIZE;
781 		/* overflow ? */
782 		if (end < start_vaddr || end > end_vaddr)
783 			end = end_vaddr;
784 		down_read(&mm->mmap_sem);
785 		ret = walk_page_range(start_vaddr, end, &pagemap_walk);
786 		up_read(&mm->mmap_sem);
787 		start_vaddr = end;
788 
789 		len = min(count, PM_ENTRY_BYTES * pm.pos);
790 		if (copy_to_user(buf, pm.buffer, len)) {
791 			ret = -EFAULT;
792 			goto out_free;
793 		}
794 		copied += len;
795 		buf += len;
796 		count -= len;
797 	}
798 	*ppos += copied;
799 	if (!ret || ret == PM_END_OF_BUFFER)
800 		ret = copied;
801 
802 out_free:
803 	kfree(pm.buffer);
804 out_mm:
805 	mmput(mm);
806 out_task:
807 	put_task_struct(task);
808 out:
809 	return ret;
810 }
811 
812 const struct file_operations proc_pagemap_operations = {
813 	.llseek		= mem_lseek, /* borrow this */
814 	.read		= pagemap_read,
815 };
816 #endif /* CONFIG_PROC_PAGE_MONITOR */
817 
818 #ifdef CONFIG_NUMA
819 extern int show_numa_map(struct seq_file *m, void *v);
820 
821 static const struct seq_operations proc_pid_numa_maps_op = {
822         .start  = m_start,
823         .next   = m_next,
824         .stop   = m_stop,
825         .show   = show_numa_map,
826 };
827 
828 static int numa_maps_open(struct inode *inode, struct file *file)
829 {
830 	return do_maps_open(inode, file, &proc_pid_numa_maps_op);
831 }
832 
833 const struct file_operations proc_numa_maps_operations = {
834 	.open		= numa_maps_open,
835 	.read		= seq_read,
836 	.llseek		= seq_lseek,
837 	.release	= seq_release_private,
838 };
839 #endif
840