xref: /linux/fs/proc/task_mmu.c (revision 861e10be08c69808065d755d3e3cab5d520a2d32)
1 #include <linux/mm.h>
2 #include <linux/hugetlb.h>
3 #include <linux/huge_mm.h>
4 #include <linux/mount.h>
5 #include <linux/seq_file.h>
6 #include <linux/highmem.h>
7 #include <linux/ptrace.h>
8 #include <linux/slab.h>
9 #include <linux/pagemap.h>
10 #include <linux/mempolicy.h>
11 #include <linux/rmap.h>
12 #include <linux/swap.h>
13 #include <linux/swapops.h>
14 
15 #include <asm/elf.h>
16 #include <asm/uaccess.h>
17 #include <asm/tlbflush.h>
18 #include "internal.h"
19 
20 void task_mem(struct seq_file *m, struct mm_struct *mm)
21 {
22 	unsigned long data, text, lib, swap;
23 	unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
24 
25 	/*
26 	 * Note: to minimize their overhead, mm maintains hiwater_vm and
27 	 * hiwater_rss only when about to *lower* total_vm or rss.  Any
28 	 * collector of these hiwater stats must therefore get total_vm
29 	 * and rss too, which will usually be the higher.  Barriers? not
30 	 * worth the effort, such snapshots can always be inconsistent.
31 	 */
32 	hiwater_vm = total_vm = mm->total_vm;
33 	if (hiwater_vm < mm->hiwater_vm)
34 		hiwater_vm = mm->hiwater_vm;
35 	hiwater_rss = total_rss = get_mm_rss(mm);
36 	if (hiwater_rss < mm->hiwater_rss)
37 		hiwater_rss = mm->hiwater_rss;
38 
39 	data = mm->total_vm - mm->shared_vm - mm->stack_vm;
40 	text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
41 	lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
42 	swap = get_mm_counter(mm, MM_SWAPENTS);
43 	seq_printf(m,
44 		"VmPeak:\t%8lu kB\n"
45 		"VmSize:\t%8lu kB\n"
46 		"VmLck:\t%8lu kB\n"
47 		"VmPin:\t%8lu kB\n"
48 		"VmHWM:\t%8lu kB\n"
49 		"VmRSS:\t%8lu kB\n"
50 		"VmData:\t%8lu kB\n"
51 		"VmStk:\t%8lu kB\n"
52 		"VmExe:\t%8lu kB\n"
53 		"VmLib:\t%8lu kB\n"
54 		"VmPTE:\t%8lu kB\n"
55 		"VmSwap:\t%8lu kB\n",
56 		hiwater_vm << (PAGE_SHIFT-10),
57 		total_vm << (PAGE_SHIFT-10),
58 		mm->locked_vm << (PAGE_SHIFT-10),
59 		mm->pinned_vm << (PAGE_SHIFT-10),
60 		hiwater_rss << (PAGE_SHIFT-10),
61 		total_rss << (PAGE_SHIFT-10),
62 		data << (PAGE_SHIFT-10),
63 		mm->stack_vm << (PAGE_SHIFT-10), text, lib,
64 		(PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
65 		swap << (PAGE_SHIFT-10));
66 }
67 
68 unsigned long task_vsize(struct mm_struct *mm)
69 {
70 	return PAGE_SIZE * mm->total_vm;
71 }
72 
73 unsigned long task_statm(struct mm_struct *mm,
74 			 unsigned long *shared, unsigned long *text,
75 			 unsigned long *data, unsigned long *resident)
76 {
77 	*shared = get_mm_counter(mm, MM_FILEPAGES);
78 	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
79 								>> PAGE_SHIFT;
80 	*data = mm->total_vm - mm->shared_vm;
81 	*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
82 	return mm->total_vm;
83 }
84 
85 static void pad_len_spaces(struct seq_file *m, int len)
86 {
87 	len = 25 + sizeof(void*) * 6 - len;
88 	if (len < 1)
89 		len = 1;
90 	seq_printf(m, "%*c", len, ' ');
91 }
92 
93 #ifdef CONFIG_NUMA
94 /*
95  * These functions are for numa_maps but called in generic **maps seq_file
96  * ->start(), ->stop() ops.
97  *
98  * numa_maps scans all vmas under mmap_sem and checks their mempolicy.
99  * Each mempolicy object is controlled by reference counting. The problem here
100  * is how to avoid accessing dead mempolicy object.
101  *
102  * Because we're holding mmap_sem while reading seq_file, it's safe to access
103  * each vma's mempolicy, no vma objects will never drop refs to mempolicy.
104  *
105  * A task's mempolicy (task->mempolicy) has different behavior. task->mempolicy
106  * is set and replaced under mmap_sem but unrefed and cleared under task_lock().
107  * So, without task_lock(), we cannot trust get_vma_policy() because we cannot
108  * gurantee the task never exits under us. But taking task_lock() around
109  * get_vma_plicy() causes lock order problem.
110  *
111  * To access task->mempolicy without lock, we hold a reference count of an
112  * object pointed by task->mempolicy and remember it. This will guarantee
113  * that task->mempolicy points to an alive object or NULL in numa_maps accesses.
114  */
115 static void hold_task_mempolicy(struct proc_maps_private *priv)
116 {
117 	struct task_struct *task = priv->task;
118 
119 	task_lock(task);
120 	priv->task_mempolicy = task->mempolicy;
121 	mpol_get(priv->task_mempolicy);
122 	task_unlock(task);
123 }
124 static void release_task_mempolicy(struct proc_maps_private *priv)
125 {
126 	mpol_put(priv->task_mempolicy);
127 }
128 #else
129 static void hold_task_mempolicy(struct proc_maps_private *priv)
130 {
131 }
132 static void release_task_mempolicy(struct proc_maps_private *priv)
133 {
134 }
135 #endif
136 
137 static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
138 {
139 	if (vma && vma != priv->tail_vma) {
140 		struct mm_struct *mm = vma->vm_mm;
141 		release_task_mempolicy(priv);
142 		up_read(&mm->mmap_sem);
143 		mmput(mm);
144 	}
145 }
146 
147 static void *m_start(struct seq_file *m, loff_t *pos)
148 {
149 	struct proc_maps_private *priv = m->private;
150 	unsigned long last_addr = m->version;
151 	struct mm_struct *mm;
152 	struct vm_area_struct *vma, *tail_vma = NULL;
153 	loff_t l = *pos;
154 
155 	/* Clear the per syscall fields in priv */
156 	priv->task = NULL;
157 	priv->tail_vma = NULL;
158 
159 	/*
160 	 * We remember last_addr rather than next_addr to hit with
161 	 * mmap_cache most of the time. We have zero last_addr at
162 	 * the beginning and also after lseek. We will have -1 last_addr
163 	 * after the end of the vmas.
164 	 */
165 
166 	if (last_addr == -1UL)
167 		return NULL;
168 
169 	priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
170 	if (!priv->task)
171 		return ERR_PTR(-ESRCH);
172 
173 	mm = mm_access(priv->task, PTRACE_MODE_READ);
174 	if (!mm || IS_ERR(mm))
175 		return mm;
176 	down_read(&mm->mmap_sem);
177 
178 	tail_vma = get_gate_vma(priv->task->mm);
179 	priv->tail_vma = tail_vma;
180 	hold_task_mempolicy(priv);
181 	/* Start with last addr hint */
182 	vma = find_vma(mm, last_addr);
183 	if (last_addr && vma) {
184 		vma = vma->vm_next;
185 		goto out;
186 	}
187 
188 	/*
189 	 * Check the vma index is within the range and do
190 	 * sequential scan until m_index.
191 	 */
192 	vma = NULL;
193 	if ((unsigned long)l < mm->map_count) {
194 		vma = mm->mmap;
195 		while (l-- && vma)
196 			vma = vma->vm_next;
197 		goto out;
198 	}
199 
200 	if (l != mm->map_count)
201 		tail_vma = NULL; /* After gate vma */
202 
203 out:
204 	if (vma)
205 		return vma;
206 
207 	release_task_mempolicy(priv);
208 	/* End of vmas has been reached */
209 	m->version = (tail_vma != NULL)? 0: -1UL;
210 	up_read(&mm->mmap_sem);
211 	mmput(mm);
212 	return tail_vma;
213 }
214 
215 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
216 {
217 	struct proc_maps_private *priv = m->private;
218 	struct vm_area_struct *vma = v;
219 	struct vm_area_struct *tail_vma = priv->tail_vma;
220 
221 	(*pos)++;
222 	if (vma && (vma != tail_vma) && vma->vm_next)
223 		return vma->vm_next;
224 	vma_stop(priv, vma);
225 	return (vma != tail_vma)? tail_vma: NULL;
226 }
227 
228 static void m_stop(struct seq_file *m, void *v)
229 {
230 	struct proc_maps_private *priv = m->private;
231 	struct vm_area_struct *vma = v;
232 
233 	if (!IS_ERR(vma))
234 		vma_stop(priv, vma);
235 	if (priv->task)
236 		put_task_struct(priv->task);
237 }
238 
239 static int do_maps_open(struct inode *inode, struct file *file,
240 			const struct seq_operations *ops)
241 {
242 	struct proc_maps_private *priv;
243 	int ret = -ENOMEM;
244 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
245 	if (priv) {
246 		priv->pid = proc_pid(inode);
247 		ret = seq_open(file, ops);
248 		if (!ret) {
249 			struct seq_file *m = file->private_data;
250 			m->private = priv;
251 		} else {
252 			kfree(priv);
253 		}
254 	}
255 	return ret;
256 }
257 
258 static void
259 show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
260 {
261 	struct mm_struct *mm = vma->vm_mm;
262 	struct file *file = vma->vm_file;
263 	struct proc_maps_private *priv = m->private;
264 	struct task_struct *task = priv->task;
265 	vm_flags_t flags = vma->vm_flags;
266 	unsigned long ino = 0;
267 	unsigned long long pgoff = 0;
268 	unsigned long start, end;
269 	dev_t dev = 0;
270 	int len;
271 	const char *name = NULL;
272 
273 	if (file) {
274 		struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
275 		dev = inode->i_sb->s_dev;
276 		ino = inode->i_ino;
277 		pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
278 	}
279 
280 	/* We don't show the stack guard page in /proc/maps */
281 	start = vma->vm_start;
282 	if (stack_guard_page_start(vma, start))
283 		start += PAGE_SIZE;
284 	end = vma->vm_end;
285 	if (stack_guard_page_end(vma, end))
286 		end -= PAGE_SIZE;
287 
288 	seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
289 			start,
290 			end,
291 			flags & VM_READ ? 'r' : '-',
292 			flags & VM_WRITE ? 'w' : '-',
293 			flags & VM_EXEC ? 'x' : '-',
294 			flags & VM_MAYSHARE ? 's' : 'p',
295 			pgoff,
296 			MAJOR(dev), MINOR(dev), ino, &len);
297 
298 	/*
299 	 * Print the dentry name for named mappings, and a
300 	 * special [heap] marker for the heap:
301 	 */
302 	if (file) {
303 		pad_len_spaces(m, len);
304 		seq_path(m, &file->f_path, "\n");
305 		goto done;
306 	}
307 
308 	name = arch_vma_name(vma);
309 	if (!name) {
310 		pid_t tid;
311 
312 		if (!mm) {
313 			name = "[vdso]";
314 			goto done;
315 		}
316 
317 		if (vma->vm_start <= mm->brk &&
318 		    vma->vm_end >= mm->start_brk) {
319 			name = "[heap]";
320 			goto done;
321 		}
322 
323 		tid = vm_is_stack(task, vma, is_pid);
324 
325 		if (tid != 0) {
326 			/*
327 			 * Thread stack in /proc/PID/task/TID/maps or
328 			 * the main process stack.
329 			 */
330 			if (!is_pid || (vma->vm_start <= mm->start_stack &&
331 			    vma->vm_end >= mm->start_stack)) {
332 				name = "[stack]";
333 			} else {
334 				/* Thread stack in /proc/PID/maps */
335 				pad_len_spaces(m, len);
336 				seq_printf(m, "[stack:%d]", tid);
337 			}
338 		}
339 	}
340 
341 done:
342 	if (name) {
343 		pad_len_spaces(m, len);
344 		seq_puts(m, name);
345 	}
346 	seq_putc(m, '\n');
347 }
348 
349 static int show_map(struct seq_file *m, void *v, int is_pid)
350 {
351 	struct vm_area_struct *vma = v;
352 	struct proc_maps_private *priv = m->private;
353 	struct task_struct *task = priv->task;
354 
355 	show_map_vma(m, vma, is_pid);
356 
357 	if (m->count < m->size)  /* vma is copied successfully */
358 		m->version = (vma != get_gate_vma(task->mm))
359 			? vma->vm_start : 0;
360 	return 0;
361 }
362 
363 static int show_pid_map(struct seq_file *m, void *v)
364 {
365 	return show_map(m, v, 1);
366 }
367 
368 static int show_tid_map(struct seq_file *m, void *v)
369 {
370 	return show_map(m, v, 0);
371 }
372 
373 static const struct seq_operations proc_pid_maps_op = {
374 	.start	= m_start,
375 	.next	= m_next,
376 	.stop	= m_stop,
377 	.show	= show_pid_map
378 };
379 
380 static const struct seq_operations proc_tid_maps_op = {
381 	.start	= m_start,
382 	.next	= m_next,
383 	.stop	= m_stop,
384 	.show	= show_tid_map
385 };
386 
387 static int pid_maps_open(struct inode *inode, struct file *file)
388 {
389 	return do_maps_open(inode, file, &proc_pid_maps_op);
390 }
391 
392 static int tid_maps_open(struct inode *inode, struct file *file)
393 {
394 	return do_maps_open(inode, file, &proc_tid_maps_op);
395 }
396 
397 const struct file_operations proc_pid_maps_operations = {
398 	.open		= pid_maps_open,
399 	.read		= seq_read,
400 	.llseek		= seq_lseek,
401 	.release	= seq_release_private,
402 };
403 
404 const struct file_operations proc_tid_maps_operations = {
405 	.open		= tid_maps_open,
406 	.read		= seq_read,
407 	.llseek		= seq_lseek,
408 	.release	= seq_release_private,
409 };
410 
411 /*
412  * Proportional Set Size(PSS): my share of RSS.
413  *
414  * PSS of a process is the count of pages it has in memory, where each
415  * page is divided by the number of processes sharing it.  So if a
416  * process has 1000 pages all to itself, and 1000 shared with one other
417  * process, its PSS will be 1500.
418  *
419  * To keep (accumulated) division errors low, we adopt a 64bit
420  * fixed-point pss counter to minimize division errors. So (pss >>
421  * PSS_SHIFT) would be the real byte count.
422  *
423  * A shift of 12 before division means (assuming 4K page size):
424  * 	- 1M 3-user-pages add up to 8KB errors;
425  * 	- supports mapcount up to 2^24, or 16M;
426  * 	- supports PSS up to 2^52 bytes, or 4PB.
427  */
428 #define PSS_SHIFT 12
429 
430 #ifdef CONFIG_PROC_PAGE_MONITOR
431 struct mem_size_stats {
432 	struct vm_area_struct *vma;
433 	unsigned long resident;
434 	unsigned long shared_clean;
435 	unsigned long shared_dirty;
436 	unsigned long private_clean;
437 	unsigned long private_dirty;
438 	unsigned long referenced;
439 	unsigned long anonymous;
440 	unsigned long anonymous_thp;
441 	unsigned long swap;
442 	unsigned long nonlinear;
443 	u64 pss;
444 };
445 
446 
447 static void smaps_pte_entry(pte_t ptent, unsigned long addr,
448 		unsigned long ptent_size, struct mm_walk *walk)
449 {
450 	struct mem_size_stats *mss = walk->private;
451 	struct vm_area_struct *vma = mss->vma;
452 	pgoff_t pgoff = linear_page_index(vma, addr);
453 	struct page *page = NULL;
454 	int mapcount;
455 
456 	if (pte_present(ptent)) {
457 		page = vm_normal_page(vma, addr, ptent);
458 	} else if (is_swap_pte(ptent)) {
459 		swp_entry_t swpent = pte_to_swp_entry(ptent);
460 
461 		if (!non_swap_entry(swpent))
462 			mss->swap += ptent_size;
463 		else if (is_migration_entry(swpent))
464 			page = migration_entry_to_page(swpent);
465 	} else if (pte_file(ptent)) {
466 		if (pte_to_pgoff(ptent) != pgoff)
467 			mss->nonlinear += ptent_size;
468 	}
469 
470 	if (!page)
471 		return;
472 
473 	if (PageAnon(page))
474 		mss->anonymous += ptent_size;
475 
476 	if (page->index != pgoff)
477 		mss->nonlinear += ptent_size;
478 
479 	mss->resident += ptent_size;
480 	/* Accumulate the size in pages that have been accessed. */
481 	if (pte_young(ptent) || PageReferenced(page))
482 		mss->referenced += ptent_size;
483 	mapcount = page_mapcount(page);
484 	if (mapcount >= 2) {
485 		if (pte_dirty(ptent) || PageDirty(page))
486 			mss->shared_dirty += ptent_size;
487 		else
488 			mss->shared_clean += ptent_size;
489 		mss->pss += (ptent_size << PSS_SHIFT) / mapcount;
490 	} else {
491 		if (pte_dirty(ptent) || PageDirty(page))
492 			mss->private_dirty += ptent_size;
493 		else
494 			mss->private_clean += ptent_size;
495 		mss->pss += (ptent_size << PSS_SHIFT);
496 	}
497 }
498 
499 static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
500 			   struct mm_walk *walk)
501 {
502 	struct mem_size_stats *mss = walk->private;
503 	struct vm_area_struct *vma = mss->vma;
504 	pte_t *pte;
505 	spinlock_t *ptl;
506 
507 	if (pmd_trans_huge_lock(pmd, vma) == 1) {
508 		smaps_pte_entry(*(pte_t *)pmd, addr, HPAGE_PMD_SIZE, walk);
509 		spin_unlock(&walk->mm->page_table_lock);
510 		mss->anonymous_thp += HPAGE_PMD_SIZE;
511 		return 0;
512 	}
513 
514 	if (pmd_trans_unstable(pmd))
515 		return 0;
516 	/*
517 	 * The mmap_sem held all the way back in m_start() is what
518 	 * keeps khugepaged out of here and from collapsing things
519 	 * in here.
520 	 */
521 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
522 	for (; addr != end; pte++, addr += PAGE_SIZE)
523 		smaps_pte_entry(*pte, addr, PAGE_SIZE, walk);
524 	pte_unmap_unlock(pte - 1, ptl);
525 	cond_resched();
526 	return 0;
527 }
528 
529 static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
530 {
531 	/*
532 	 * Don't forget to update Documentation/ on changes.
533 	 */
534 	static const char mnemonics[BITS_PER_LONG][2] = {
535 		/*
536 		 * In case if we meet a flag we don't know about.
537 		 */
538 		[0 ... (BITS_PER_LONG-1)] = "??",
539 
540 		[ilog2(VM_READ)]	= "rd",
541 		[ilog2(VM_WRITE)]	= "wr",
542 		[ilog2(VM_EXEC)]	= "ex",
543 		[ilog2(VM_SHARED)]	= "sh",
544 		[ilog2(VM_MAYREAD)]	= "mr",
545 		[ilog2(VM_MAYWRITE)]	= "mw",
546 		[ilog2(VM_MAYEXEC)]	= "me",
547 		[ilog2(VM_MAYSHARE)]	= "ms",
548 		[ilog2(VM_GROWSDOWN)]	= "gd",
549 		[ilog2(VM_PFNMAP)]	= "pf",
550 		[ilog2(VM_DENYWRITE)]	= "dw",
551 		[ilog2(VM_LOCKED)]	= "lo",
552 		[ilog2(VM_IO)]		= "io",
553 		[ilog2(VM_SEQ_READ)]	= "sr",
554 		[ilog2(VM_RAND_READ)]	= "rr",
555 		[ilog2(VM_DONTCOPY)]	= "dc",
556 		[ilog2(VM_DONTEXPAND)]	= "de",
557 		[ilog2(VM_ACCOUNT)]	= "ac",
558 		[ilog2(VM_NORESERVE)]	= "nr",
559 		[ilog2(VM_HUGETLB)]	= "ht",
560 		[ilog2(VM_NONLINEAR)]	= "nl",
561 		[ilog2(VM_ARCH_1)]	= "ar",
562 		[ilog2(VM_DONTDUMP)]	= "dd",
563 		[ilog2(VM_MIXEDMAP)]	= "mm",
564 		[ilog2(VM_HUGEPAGE)]	= "hg",
565 		[ilog2(VM_NOHUGEPAGE)]	= "nh",
566 		[ilog2(VM_MERGEABLE)]	= "mg",
567 	};
568 	size_t i;
569 
570 	seq_puts(m, "VmFlags: ");
571 	for (i = 0; i < BITS_PER_LONG; i++) {
572 		if (vma->vm_flags & (1UL << i)) {
573 			seq_printf(m, "%c%c ",
574 				   mnemonics[i][0], mnemonics[i][1]);
575 		}
576 	}
577 	seq_putc(m, '\n');
578 }
579 
580 static int show_smap(struct seq_file *m, void *v, int is_pid)
581 {
582 	struct proc_maps_private *priv = m->private;
583 	struct task_struct *task = priv->task;
584 	struct vm_area_struct *vma = v;
585 	struct mem_size_stats mss;
586 	struct mm_walk smaps_walk = {
587 		.pmd_entry = smaps_pte_range,
588 		.mm = vma->vm_mm,
589 		.private = &mss,
590 	};
591 
592 	memset(&mss, 0, sizeof mss);
593 	mss.vma = vma;
594 	/* mmap_sem is held in m_start */
595 	if (vma->vm_mm && !is_vm_hugetlb_page(vma))
596 		walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
597 
598 	show_map_vma(m, vma, is_pid);
599 
600 	seq_printf(m,
601 		   "Size:           %8lu kB\n"
602 		   "Rss:            %8lu kB\n"
603 		   "Pss:            %8lu kB\n"
604 		   "Shared_Clean:   %8lu kB\n"
605 		   "Shared_Dirty:   %8lu kB\n"
606 		   "Private_Clean:  %8lu kB\n"
607 		   "Private_Dirty:  %8lu kB\n"
608 		   "Referenced:     %8lu kB\n"
609 		   "Anonymous:      %8lu kB\n"
610 		   "AnonHugePages:  %8lu kB\n"
611 		   "Swap:           %8lu kB\n"
612 		   "KernelPageSize: %8lu kB\n"
613 		   "MMUPageSize:    %8lu kB\n"
614 		   "Locked:         %8lu kB\n",
615 		   (vma->vm_end - vma->vm_start) >> 10,
616 		   mss.resident >> 10,
617 		   (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
618 		   mss.shared_clean  >> 10,
619 		   mss.shared_dirty  >> 10,
620 		   mss.private_clean >> 10,
621 		   mss.private_dirty >> 10,
622 		   mss.referenced >> 10,
623 		   mss.anonymous >> 10,
624 		   mss.anonymous_thp >> 10,
625 		   mss.swap >> 10,
626 		   vma_kernel_pagesize(vma) >> 10,
627 		   vma_mmu_pagesize(vma) >> 10,
628 		   (vma->vm_flags & VM_LOCKED) ?
629 			(unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
630 
631 	if (vma->vm_flags & VM_NONLINEAR)
632 		seq_printf(m, "Nonlinear:      %8lu kB\n",
633 				mss.nonlinear >> 10);
634 
635 	show_smap_vma_flags(m, vma);
636 
637 	if (m->count < m->size)  /* vma is copied successfully */
638 		m->version = (vma != get_gate_vma(task->mm))
639 			? vma->vm_start : 0;
640 	return 0;
641 }
642 
643 static int show_pid_smap(struct seq_file *m, void *v)
644 {
645 	return show_smap(m, v, 1);
646 }
647 
648 static int show_tid_smap(struct seq_file *m, void *v)
649 {
650 	return show_smap(m, v, 0);
651 }
652 
653 static const struct seq_operations proc_pid_smaps_op = {
654 	.start	= m_start,
655 	.next	= m_next,
656 	.stop	= m_stop,
657 	.show	= show_pid_smap
658 };
659 
660 static const struct seq_operations proc_tid_smaps_op = {
661 	.start	= m_start,
662 	.next	= m_next,
663 	.stop	= m_stop,
664 	.show	= show_tid_smap
665 };
666 
667 static int pid_smaps_open(struct inode *inode, struct file *file)
668 {
669 	return do_maps_open(inode, file, &proc_pid_smaps_op);
670 }
671 
672 static int tid_smaps_open(struct inode *inode, struct file *file)
673 {
674 	return do_maps_open(inode, file, &proc_tid_smaps_op);
675 }
676 
677 const struct file_operations proc_pid_smaps_operations = {
678 	.open		= pid_smaps_open,
679 	.read		= seq_read,
680 	.llseek		= seq_lseek,
681 	.release	= seq_release_private,
682 };
683 
684 const struct file_operations proc_tid_smaps_operations = {
685 	.open		= tid_smaps_open,
686 	.read		= seq_read,
687 	.llseek		= seq_lseek,
688 	.release	= seq_release_private,
689 };
690 
691 static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
692 				unsigned long end, struct mm_walk *walk)
693 {
694 	struct vm_area_struct *vma = walk->private;
695 	pte_t *pte, ptent;
696 	spinlock_t *ptl;
697 	struct page *page;
698 
699 	split_huge_page_pmd(vma, addr, pmd);
700 	if (pmd_trans_unstable(pmd))
701 		return 0;
702 
703 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
704 	for (; addr != end; pte++, addr += PAGE_SIZE) {
705 		ptent = *pte;
706 		if (!pte_present(ptent))
707 			continue;
708 
709 		page = vm_normal_page(vma, addr, ptent);
710 		if (!page)
711 			continue;
712 
713 		/* Clear accessed and referenced bits. */
714 		ptep_test_and_clear_young(vma, addr, pte);
715 		ClearPageReferenced(page);
716 	}
717 	pte_unmap_unlock(pte - 1, ptl);
718 	cond_resched();
719 	return 0;
720 }
721 
722 #define CLEAR_REFS_ALL 1
723 #define CLEAR_REFS_ANON 2
724 #define CLEAR_REFS_MAPPED 3
725 
726 static ssize_t clear_refs_write(struct file *file, const char __user *buf,
727 				size_t count, loff_t *ppos)
728 {
729 	struct task_struct *task;
730 	char buffer[PROC_NUMBUF];
731 	struct mm_struct *mm;
732 	struct vm_area_struct *vma;
733 	int type;
734 	int rv;
735 
736 	memset(buffer, 0, sizeof(buffer));
737 	if (count > sizeof(buffer) - 1)
738 		count = sizeof(buffer) - 1;
739 	if (copy_from_user(buffer, buf, count))
740 		return -EFAULT;
741 	rv = kstrtoint(strstrip(buffer), 10, &type);
742 	if (rv < 0)
743 		return rv;
744 	if (type < CLEAR_REFS_ALL || type > CLEAR_REFS_MAPPED)
745 		return -EINVAL;
746 	task = get_proc_task(file->f_path.dentry->d_inode);
747 	if (!task)
748 		return -ESRCH;
749 	mm = get_task_mm(task);
750 	if (mm) {
751 		struct mm_walk clear_refs_walk = {
752 			.pmd_entry = clear_refs_pte_range,
753 			.mm = mm,
754 		};
755 		down_read(&mm->mmap_sem);
756 		for (vma = mm->mmap; vma; vma = vma->vm_next) {
757 			clear_refs_walk.private = vma;
758 			if (is_vm_hugetlb_page(vma))
759 				continue;
760 			/*
761 			 * Writing 1 to /proc/pid/clear_refs affects all pages.
762 			 *
763 			 * Writing 2 to /proc/pid/clear_refs only affects
764 			 * Anonymous pages.
765 			 *
766 			 * Writing 3 to /proc/pid/clear_refs only affects file
767 			 * mapped pages.
768 			 */
769 			if (type == CLEAR_REFS_ANON && vma->vm_file)
770 				continue;
771 			if (type == CLEAR_REFS_MAPPED && !vma->vm_file)
772 				continue;
773 			walk_page_range(vma->vm_start, vma->vm_end,
774 					&clear_refs_walk);
775 		}
776 		flush_tlb_mm(mm);
777 		up_read(&mm->mmap_sem);
778 		mmput(mm);
779 	}
780 	put_task_struct(task);
781 
782 	return count;
783 }
784 
785 const struct file_operations proc_clear_refs_operations = {
786 	.write		= clear_refs_write,
787 	.llseek		= noop_llseek,
788 };
789 
790 typedef struct {
791 	u64 pme;
792 } pagemap_entry_t;
793 
794 struct pagemapread {
795 	int pos, len;
796 	pagemap_entry_t *buffer;
797 };
798 
799 #define PAGEMAP_WALK_SIZE	(PMD_SIZE)
800 #define PAGEMAP_WALK_MASK	(PMD_MASK)
801 
802 #define PM_ENTRY_BYTES      sizeof(u64)
803 #define PM_STATUS_BITS      3
804 #define PM_STATUS_OFFSET    (64 - PM_STATUS_BITS)
805 #define PM_STATUS_MASK      (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
806 #define PM_STATUS(nr)       (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK)
807 #define PM_PSHIFT_BITS      6
808 #define PM_PSHIFT_OFFSET    (PM_STATUS_OFFSET - PM_PSHIFT_BITS)
809 #define PM_PSHIFT_MASK      (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET)
810 #define PM_PSHIFT(x)        (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK)
811 #define PM_PFRAME_MASK      ((1LL << PM_PSHIFT_OFFSET) - 1)
812 #define PM_PFRAME(x)        ((x) & PM_PFRAME_MASK)
813 
814 #define PM_PRESENT          PM_STATUS(4LL)
815 #define PM_SWAP             PM_STATUS(2LL)
816 #define PM_FILE             PM_STATUS(1LL)
817 #define PM_NOT_PRESENT      PM_PSHIFT(PAGE_SHIFT)
818 #define PM_END_OF_BUFFER    1
819 
820 static inline pagemap_entry_t make_pme(u64 val)
821 {
822 	return (pagemap_entry_t) { .pme = val };
823 }
824 
825 static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme,
826 			  struct pagemapread *pm)
827 {
828 	pm->buffer[pm->pos++] = *pme;
829 	if (pm->pos >= pm->len)
830 		return PM_END_OF_BUFFER;
831 	return 0;
832 }
833 
834 static int pagemap_pte_hole(unsigned long start, unsigned long end,
835 				struct mm_walk *walk)
836 {
837 	struct pagemapread *pm = walk->private;
838 	unsigned long addr;
839 	int err = 0;
840 	pagemap_entry_t pme = make_pme(PM_NOT_PRESENT);
841 
842 	for (addr = start; addr < end; addr += PAGE_SIZE) {
843 		err = add_to_pagemap(addr, &pme, pm);
844 		if (err)
845 			break;
846 	}
847 	return err;
848 }
849 
850 static void pte_to_pagemap_entry(pagemap_entry_t *pme,
851 		struct vm_area_struct *vma, unsigned long addr, pte_t pte)
852 {
853 	u64 frame, flags;
854 	struct page *page = NULL;
855 
856 	if (pte_present(pte)) {
857 		frame = pte_pfn(pte);
858 		flags = PM_PRESENT;
859 		page = vm_normal_page(vma, addr, pte);
860 	} else if (is_swap_pte(pte)) {
861 		swp_entry_t entry = pte_to_swp_entry(pte);
862 
863 		frame = swp_type(entry) |
864 			(swp_offset(entry) << MAX_SWAPFILES_SHIFT);
865 		flags = PM_SWAP;
866 		if (is_migration_entry(entry))
867 			page = migration_entry_to_page(entry);
868 	} else {
869 		*pme = make_pme(PM_NOT_PRESENT);
870 		return;
871 	}
872 
873 	if (page && !PageAnon(page))
874 		flags |= PM_FILE;
875 
876 	*pme = make_pme(PM_PFRAME(frame) | PM_PSHIFT(PAGE_SHIFT) | flags);
877 }
878 
879 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
880 static void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme,
881 					pmd_t pmd, int offset)
882 {
883 	/*
884 	 * Currently pmd for thp is always present because thp can not be
885 	 * swapped-out, migrated, or HWPOISONed (split in such cases instead.)
886 	 * This if-check is just to prepare for future implementation.
887 	 */
888 	if (pmd_present(pmd))
889 		*pme = make_pme(PM_PFRAME(pmd_pfn(pmd) + offset)
890 				| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT);
891 	else
892 		*pme = make_pme(PM_NOT_PRESENT);
893 }
894 #else
895 static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme,
896 						pmd_t pmd, int offset)
897 {
898 }
899 #endif
900 
901 static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
902 			     struct mm_walk *walk)
903 {
904 	struct vm_area_struct *vma;
905 	struct pagemapread *pm = walk->private;
906 	pte_t *pte;
907 	int err = 0;
908 	pagemap_entry_t pme = make_pme(PM_NOT_PRESENT);
909 
910 	/* find the first VMA at or above 'addr' */
911 	vma = find_vma(walk->mm, addr);
912 	if (vma && pmd_trans_huge_lock(pmd, vma) == 1) {
913 		for (; addr != end; addr += PAGE_SIZE) {
914 			unsigned long offset;
915 
916 			offset = (addr & ~PAGEMAP_WALK_MASK) >>
917 					PAGE_SHIFT;
918 			thp_pmd_to_pagemap_entry(&pme, *pmd, offset);
919 			err = add_to_pagemap(addr, &pme, pm);
920 			if (err)
921 				break;
922 		}
923 		spin_unlock(&walk->mm->page_table_lock);
924 		return err;
925 	}
926 
927 	if (pmd_trans_unstable(pmd))
928 		return 0;
929 	for (; addr != end; addr += PAGE_SIZE) {
930 
931 		/* check to see if we've left 'vma' behind
932 		 * and need a new, higher one */
933 		if (vma && (addr >= vma->vm_end)) {
934 			vma = find_vma(walk->mm, addr);
935 			pme = make_pme(PM_NOT_PRESENT);
936 		}
937 
938 		/* check that 'vma' actually covers this address,
939 		 * and that it isn't a huge page vma */
940 		if (vma && (vma->vm_start <= addr) &&
941 		    !is_vm_hugetlb_page(vma)) {
942 			pte = pte_offset_map(pmd, addr);
943 			pte_to_pagemap_entry(&pme, vma, addr, *pte);
944 			/* unmap before userspace copy */
945 			pte_unmap(pte);
946 		}
947 		err = add_to_pagemap(addr, &pme, pm);
948 		if (err)
949 			return err;
950 	}
951 
952 	cond_resched();
953 
954 	return err;
955 }
956 
957 #ifdef CONFIG_HUGETLB_PAGE
958 static void huge_pte_to_pagemap_entry(pagemap_entry_t *pme,
959 					pte_t pte, int offset)
960 {
961 	if (pte_present(pte))
962 		*pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset)
963 				| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT);
964 	else
965 		*pme = make_pme(PM_NOT_PRESENT);
966 }
967 
968 /* This function walks within one hugetlb entry in the single call */
969 static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
970 				 unsigned long addr, unsigned long end,
971 				 struct mm_walk *walk)
972 {
973 	struct pagemapread *pm = walk->private;
974 	int err = 0;
975 	pagemap_entry_t pme;
976 
977 	for (; addr != end; addr += PAGE_SIZE) {
978 		int offset = (addr & ~hmask) >> PAGE_SHIFT;
979 		huge_pte_to_pagemap_entry(&pme, *pte, offset);
980 		err = add_to_pagemap(addr, &pme, pm);
981 		if (err)
982 			return err;
983 	}
984 
985 	cond_resched();
986 
987 	return err;
988 }
989 #endif /* HUGETLB_PAGE */
990 
991 /*
992  * /proc/pid/pagemap - an array mapping virtual pages to pfns
993  *
994  * For each page in the address space, this file contains one 64-bit entry
995  * consisting of the following:
996  *
997  * Bits 0-54  page frame number (PFN) if present
998  * Bits 0-4   swap type if swapped
999  * Bits 5-54  swap offset if swapped
1000  * Bits 55-60 page shift (page size = 1<<page shift)
1001  * Bit  61    page is file-page or shared-anon
1002  * Bit  62    page swapped
1003  * Bit  63    page present
1004  *
1005  * If the page is not present but in swap, then the PFN contains an
1006  * encoding of the swap file number and the page's offset into the
1007  * swap. Unmapped pages return a null PFN. This allows determining
1008  * precisely which pages are mapped (or in swap) and comparing mapped
1009  * pages between processes.
1010  *
1011  * Efficient users of this interface will use /proc/pid/maps to
1012  * determine which areas of memory are actually mapped and llseek to
1013  * skip over unmapped regions.
1014  */
1015 static ssize_t pagemap_read(struct file *file, char __user *buf,
1016 			    size_t count, loff_t *ppos)
1017 {
1018 	struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
1019 	struct mm_struct *mm;
1020 	struct pagemapread pm;
1021 	int ret = -ESRCH;
1022 	struct mm_walk pagemap_walk = {};
1023 	unsigned long src;
1024 	unsigned long svpfn;
1025 	unsigned long start_vaddr;
1026 	unsigned long end_vaddr;
1027 	int copied = 0;
1028 
1029 	if (!task)
1030 		goto out;
1031 
1032 	ret = -EINVAL;
1033 	/* file position must be aligned */
1034 	if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
1035 		goto out_task;
1036 
1037 	ret = 0;
1038 	if (!count)
1039 		goto out_task;
1040 
1041 	pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
1042 	pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
1043 	ret = -ENOMEM;
1044 	if (!pm.buffer)
1045 		goto out_task;
1046 
1047 	mm = mm_access(task, PTRACE_MODE_READ);
1048 	ret = PTR_ERR(mm);
1049 	if (!mm || IS_ERR(mm))
1050 		goto out_free;
1051 
1052 	pagemap_walk.pmd_entry = pagemap_pte_range;
1053 	pagemap_walk.pte_hole = pagemap_pte_hole;
1054 #ifdef CONFIG_HUGETLB_PAGE
1055 	pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
1056 #endif
1057 	pagemap_walk.mm = mm;
1058 	pagemap_walk.private = &pm;
1059 
1060 	src = *ppos;
1061 	svpfn = src / PM_ENTRY_BYTES;
1062 	start_vaddr = svpfn << PAGE_SHIFT;
1063 	end_vaddr = TASK_SIZE_OF(task);
1064 
1065 	/* watch out for wraparound */
1066 	if (svpfn > TASK_SIZE_OF(task) >> PAGE_SHIFT)
1067 		start_vaddr = end_vaddr;
1068 
1069 	/*
1070 	 * The odds are that this will stop walking way
1071 	 * before end_vaddr, because the length of the
1072 	 * user buffer is tracked in "pm", and the walk
1073 	 * will stop when we hit the end of the buffer.
1074 	 */
1075 	ret = 0;
1076 	while (count && (start_vaddr < end_vaddr)) {
1077 		int len;
1078 		unsigned long end;
1079 
1080 		pm.pos = 0;
1081 		end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
1082 		/* overflow ? */
1083 		if (end < start_vaddr || end > end_vaddr)
1084 			end = end_vaddr;
1085 		down_read(&mm->mmap_sem);
1086 		ret = walk_page_range(start_vaddr, end, &pagemap_walk);
1087 		up_read(&mm->mmap_sem);
1088 		start_vaddr = end;
1089 
1090 		len = min(count, PM_ENTRY_BYTES * pm.pos);
1091 		if (copy_to_user(buf, pm.buffer, len)) {
1092 			ret = -EFAULT;
1093 			goto out_mm;
1094 		}
1095 		copied += len;
1096 		buf += len;
1097 		count -= len;
1098 	}
1099 	*ppos += copied;
1100 	if (!ret || ret == PM_END_OF_BUFFER)
1101 		ret = copied;
1102 
1103 out_mm:
1104 	mmput(mm);
1105 out_free:
1106 	kfree(pm.buffer);
1107 out_task:
1108 	put_task_struct(task);
1109 out:
1110 	return ret;
1111 }
1112 
1113 const struct file_operations proc_pagemap_operations = {
1114 	.llseek		= mem_lseek, /* borrow this */
1115 	.read		= pagemap_read,
1116 };
1117 #endif /* CONFIG_PROC_PAGE_MONITOR */
1118 
1119 #ifdef CONFIG_NUMA
1120 
1121 struct numa_maps {
1122 	struct vm_area_struct *vma;
1123 	unsigned long pages;
1124 	unsigned long anon;
1125 	unsigned long active;
1126 	unsigned long writeback;
1127 	unsigned long mapcount_max;
1128 	unsigned long dirty;
1129 	unsigned long swapcache;
1130 	unsigned long node[MAX_NUMNODES];
1131 };
1132 
1133 struct numa_maps_private {
1134 	struct proc_maps_private proc_maps;
1135 	struct numa_maps md;
1136 };
1137 
1138 static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
1139 			unsigned long nr_pages)
1140 {
1141 	int count = page_mapcount(page);
1142 
1143 	md->pages += nr_pages;
1144 	if (pte_dirty || PageDirty(page))
1145 		md->dirty += nr_pages;
1146 
1147 	if (PageSwapCache(page))
1148 		md->swapcache += nr_pages;
1149 
1150 	if (PageActive(page) || PageUnevictable(page))
1151 		md->active += nr_pages;
1152 
1153 	if (PageWriteback(page))
1154 		md->writeback += nr_pages;
1155 
1156 	if (PageAnon(page))
1157 		md->anon += nr_pages;
1158 
1159 	if (count > md->mapcount_max)
1160 		md->mapcount_max = count;
1161 
1162 	md->node[page_to_nid(page)] += nr_pages;
1163 }
1164 
1165 static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
1166 		unsigned long addr)
1167 {
1168 	struct page *page;
1169 	int nid;
1170 
1171 	if (!pte_present(pte))
1172 		return NULL;
1173 
1174 	page = vm_normal_page(vma, addr, pte);
1175 	if (!page)
1176 		return NULL;
1177 
1178 	if (PageReserved(page))
1179 		return NULL;
1180 
1181 	nid = page_to_nid(page);
1182 	if (!node_isset(nid, node_states[N_MEMORY]))
1183 		return NULL;
1184 
1185 	return page;
1186 }
1187 
1188 static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
1189 		unsigned long end, struct mm_walk *walk)
1190 {
1191 	struct numa_maps *md;
1192 	spinlock_t *ptl;
1193 	pte_t *orig_pte;
1194 	pte_t *pte;
1195 
1196 	md = walk->private;
1197 
1198 	if (pmd_trans_huge_lock(pmd, md->vma) == 1) {
1199 		pte_t huge_pte = *(pte_t *)pmd;
1200 		struct page *page;
1201 
1202 		page = can_gather_numa_stats(huge_pte, md->vma, addr);
1203 		if (page)
1204 			gather_stats(page, md, pte_dirty(huge_pte),
1205 				     HPAGE_PMD_SIZE/PAGE_SIZE);
1206 		spin_unlock(&walk->mm->page_table_lock);
1207 		return 0;
1208 	}
1209 
1210 	if (pmd_trans_unstable(pmd))
1211 		return 0;
1212 	orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
1213 	do {
1214 		struct page *page = can_gather_numa_stats(*pte, md->vma, addr);
1215 		if (!page)
1216 			continue;
1217 		gather_stats(page, md, pte_dirty(*pte), 1);
1218 
1219 	} while (pte++, addr += PAGE_SIZE, addr != end);
1220 	pte_unmap_unlock(orig_pte, ptl);
1221 	return 0;
1222 }
1223 #ifdef CONFIG_HUGETLB_PAGE
1224 static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
1225 		unsigned long addr, unsigned long end, struct mm_walk *walk)
1226 {
1227 	struct numa_maps *md;
1228 	struct page *page;
1229 
1230 	if (pte_none(*pte))
1231 		return 0;
1232 
1233 	page = pte_page(*pte);
1234 	if (!page)
1235 		return 0;
1236 
1237 	md = walk->private;
1238 	gather_stats(page, md, pte_dirty(*pte), 1);
1239 	return 0;
1240 }
1241 
1242 #else
1243 static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
1244 		unsigned long addr, unsigned long end, struct mm_walk *walk)
1245 {
1246 	return 0;
1247 }
1248 #endif
1249 
1250 /*
1251  * Display pages allocated per node and memory policy via /proc.
1252  */
1253 static int show_numa_map(struct seq_file *m, void *v, int is_pid)
1254 {
1255 	struct numa_maps_private *numa_priv = m->private;
1256 	struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
1257 	struct vm_area_struct *vma = v;
1258 	struct numa_maps *md = &numa_priv->md;
1259 	struct file *file = vma->vm_file;
1260 	struct task_struct *task = proc_priv->task;
1261 	struct mm_struct *mm = vma->vm_mm;
1262 	struct mm_walk walk = {};
1263 	struct mempolicy *pol;
1264 	int n;
1265 	char buffer[50];
1266 
1267 	if (!mm)
1268 		return 0;
1269 
1270 	/* Ensure we start with an empty set of numa_maps statistics. */
1271 	memset(md, 0, sizeof(*md));
1272 
1273 	md->vma = vma;
1274 
1275 	walk.hugetlb_entry = gather_hugetbl_stats;
1276 	walk.pmd_entry = gather_pte_stats;
1277 	walk.private = md;
1278 	walk.mm = mm;
1279 
1280 	pol = get_vma_policy(task, vma, vma->vm_start);
1281 	mpol_to_str(buffer, sizeof(buffer), pol);
1282 	mpol_cond_put(pol);
1283 
1284 	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1285 
1286 	if (file) {
1287 		seq_printf(m, " file=");
1288 		seq_path(m, &file->f_path, "\n\t= ");
1289 	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1290 		seq_printf(m, " heap");
1291 	} else {
1292 		pid_t tid = vm_is_stack(task, vma, is_pid);
1293 		if (tid != 0) {
1294 			/*
1295 			 * Thread stack in /proc/PID/task/TID/maps or
1296 			 * the main process stack.
1297 			 */
1298 			if (!is_pid || (vma->vm_start <= mm->start_stack &&
1299 			    vma->vm_end >= mm->start_stack))
1300 				seq_printf(m, " stack");
1301 			else
1302 				seq_printf(m, " stack:%d", tid);
1303 		}
1304 	}
1305 
1306 	if (is_vm_hugetlb_page(vma))
1307 		seq_printf(m, " huge");
1308 
1309 	walk_page_range(vma->vm_start, vma->vm_end, &walk);
1310 
1311 	if (!md->pages)
1312 		goto out;
1313 
1314 	if (md->anon)
1315 		seq_printf(m, " anon=%lu", md->anon);
1316 
1317 	if (md->dirty)
1318 		seq_printf(m, " dirty=%lu", md->dirty);
1319 
1320 	if (md->pages != md->anon && md->pages != md->dirty)
1321 		seq_printf(m, " mapped=%lu", md->pages);
1322 
1323 	if (md->mapcount_max > 1)
1324 		seq_printf(m, " mapmax=%lu", md->mapcount_max);
1325 
1326 	if (md->swapcache)
1327 		seq_printf(m, " swapcache=%lu", md->swapcache);
1328 
1329 	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
1330 		seq_printf(m, " active=%lu", md->active);
1331 
1332 	if (md->writeback)
1333 		seq_printf(m, " writeback=%lu", md->writeback);
1334 
1335 	for_each_node_state(n, N_MEMORY)
1336 		if (md->node[n])
1337 			seq_printf(m, " N%d=%lu", n, md->node[n]);
1338 out:
1339 	seq_putc(m, '\n');
1340 
1341 	if (m->count < m->size)
1342 		m->version = (vma != proc_priv->tail_vma) ? vma->vm_start : 0;
1343 	return 0;
1344 }
1345 
1346 static int show_pid_numa_map(struct seq_file *m, void *v)
1347 {
1348 	return show_numa_map(m, v, 1);
1349 }
1350 
1351 static int show_tid_numa_map(struct seq_file *m, void *v)
1352 {
1353 	return show_numa_map(m, v, 0);
1354 }
1355 
1356 static const struct seq_operations proc_pid_numa_maps_op = {
1357 	.start  = m_start,
1358 	.next   = m_next,
1359 	.stop   = m_stop,
1360 	.show   = show_pid_numa_map,
1361 };
1362 
1363 static const struct seq_operations proc_tid_numa_maps_op = {
1364 	.start  = m_start,
1365 	.next   = m_next,
1366 	.stop   = m_stop,
1367 	.show   = show_tid_numa_map,
1368 };
1369 
1370 static int numa_maps_open(struct inode *inode, struct file *file,
1371 			  const struct seq_operations *ops)
1372 {
1373 	struct numa_maps_private *priv;
1374 	int ret = -ENOMEM;
1375 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1376 	if (priv) {
1377 		priv->proc_maps.pid = proc_pid(inode);
1378 		ret = seq_open(file, ops);
1379 		if (!ret) {
1380 			struct seq_file *m = file->private_data;
1381 			m->private = priv;
1382 		} else {
1383 			kfree(priv);
1384 		}
1385 	}
1386 	return ret;
1387 }
1388 
1389 static int pid_numa_maps_open(struct inode *inode, struct file *file)
1390 {
1391 	return numa_maps_open(inode, file, &proc_pid_numa_maps_op);
1392 }
1393 
1394 static int tid_numa_maps_open(struct inode *inode, struct file *file)
1395 {
1396 	return numa_maps_open(inode, file, &proc_tid_numa_maps_op);
1397 }
1398 
1399 const struct file_operations proc_pid_numa_maps_operations = {
1400 	.open		= pid_numa_maps_open,
1401 	.read		= seq_read,
1402 	.llseek		= seq_lseek,
1403 	.release	= seq_release_private,
1404 };
1405 
1406 const struct file_operations proc_tid_numa_maps_operations = {
1407 	.open		= tid_numa_maps_open,
1408 	.read		= seq_read,
1409 	.llseek		= seq_lseek,
1410 	.release	= seq_release_private,
1411 };
1412 #endif /* CONFIG_NUMA */
1413