xref: /linux/fs/proc/task_mmu.c (revision eb2bce7f5e7ac1ca6da434461217fadf3c688d2c)
1 #include <linux/mm.h>
2 #include <linux/hugetlb.h>
3 #include <linux/mount.h>
4 #include <linux/seq_file.h>
5 #include <linux/highmem.h>
6 #include <linux/pagemap.h>
7 #include <linux/mempolicy.h>
8 
9 #include <asm/elf.h>
10 #include <asm/uaccess.h>
11 #include <asm/tlbflush.h>
12 #include "internal.h"
13 
14 char *task_mem(struct mm_struct *mm, char *buffer)
15 {
16 	unsigned long data, text, lib;
17 	unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
18 
19 	/*
20 	 * Note: to minimize their overhead, mm maintains hiwater_vm and
21 	 * hiwater_rss only when about to *lower* total_vm or rss.  Any
22 	 * collector of these hiwater stats must therefore get total_vm
23 	 * and rss too, which will usually be the higher.  Barriers? not
24 	 * worth the effort, such snapshots can always be inconsistent.
25 	 */
26 	hiwater_vm = total_vm = mm->total_vm;
27 	if (hiwater_vm < mm->hiwater_vm)
28 		hiwater_vm = mm->hiwater_vm;
29 	hiwater_rss = total_rss = get_mm_rss(mm);
30 	if (hiwater_rss < mm->hiwater_rss)
31 		hiwater_rss = mm->hiwater_rss;
32 
33 	data = mm->total_vm - mm->shared_vm - mm->stack_vm;
34 	text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
35 	lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
36 	buffer += sprintf(buffer,
37 		"VmPeak:\t%8lu kB\n"
38 		"VmSize:\t%8lu kB\n"
39 		"VmLck:\t%8lu kB\n"
40 		"VmHWM:\t%8lu kB\n"
41 		"VmRSS:\t%8lu kB\n"
42 		"VmData:\t%8lu kB\n"
43 		"VmStk:\t%8lu kB\n"
44 		"VmExe:\t%8lu kB\n"
45 		"VmLib:\t%8lu kB\n"
46 		"VmPTE:\t%8lu kB\n",
47 		hiwater_vm << (PAGE_SHIFT-10),
48 		(total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
49 		mm->locked_vm << (PAGE_SHIFT-10),
50 		hiwater_rss << (PAGE_SHIFT-10),
51 		total_rss << (PAGE_SHIFT-10),
52 		data << (PAGE_SHIFT-10),
53 		mm->stack_vm << (PAGE_SHIFT-10), text, lib,
54 		(PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
55 	return buffer;
56 }
57 
58 unsigned long task_vsize(struct mm_struct *mm)
59 {
60 	return PAGE_SIZE * mm->total_vm;
61 }
62 
63 int task_statm(struct mm_struct *mm, int *shared, int *text,
64 	       int *data, int *resident)
65 {
66 	*shared = get_mm_counter(mm, file_rss);
67 	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
68 								>> PAGE_SHIFT;
69 	*data = mm->total_vm - mm->shared_vm;
70 	*resident = *shared + get_mm_counter(mm, anon_rss);
71 	return mm->total_vm;
72 }
73 
74 int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
75 {
76 	struct vm_area_struct * vma;
77 	int result = -ENOENT;
78 	struct task_struct *task = get_proc_task(inode);
79 	struct mm_struct * mm = NULL;
80 
81 	if (task) {
82 		mm = get_task_mm(task);
83 		put_task_struct(task);
84 	}
85 	if (!mm)
86 		goto out;
87 	down_read(&mm->mmap_sem);
88 
89 	vma = mm->mmap;
90 	while (vma) {
91 		if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
92 			break;
93 		vma = vma->vm_next;
94 	}
95 
96 	if (vma) {
97 		*mnt = mntget(vma->vm_file->f_path.mnt);
98 		*dentry = dget(vma->vm_file->f_path.dentry);
99 		result = 0;
100 	}
101 
102 	up_read(&mm->mmap_sem);
103 	mmput(mm);
104 out:
105 	return result;
106 }
107 
108 static void pad_len_spaces(struct seq_file *m, int len)
109 {
110 	len = 25 + sizeof(void*) * 6 - len;
111 	if (len < 1)
112 		len = 1;
113 	seq_printf(m, "%*c", len, ' ');
114 }
115 
116 struct mem_size_stats
117 {
118 	unsigned long resident;
119 	unsigned long shared_clean;
120 	unsigned long shared_dirty;
121 	unsigned long private_clean;
122 	unsigned long private_dirty;
123 	unsigned long referenced;
124 };
125 
126 struct pmd_walker {
127 	struct vm_area_struct *vma;
128 	void *private;
129 	void (*action)(struct vm_area_struct *, pmd_t *, unsigned long,
130 		       unsigned long, void *);
131 };
132 
133 static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss)
134 {
135 	struct proc_maps_private *priv = m->private;
136 	struct task_struct *task = priv->task;
137 	struct vm_area_struct *vma = v;
138 	struct mm_struct *mm = vma->vm_mm;
139 	struct file *file = vma->vm_file;
140 	int flags = vma->vm_flags;
141 	unsigned long ino = 0;
142 	dev_t dev = 0;
143 	int len;
144 
145 	if (file) {
146 		struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
147 		dev = inode->i_sb->s_dev;
148 		ino = inode->i_ino;
149 	}
150 
151 	seq_printf(m, "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n",
152 			vma->vm_start,
153 			vma->vm_end,
154 			flags & VM_READ ? 'r' : '-',
155 			flags & VM_WRITE ? 'w' : '-',
156 			flags & VM_EXEC ? 'x' : '-',
157 			flags & VM_MAYSHARE ? 's' : 'p',
158 			vma->vm_pgoff << PAGE_SHIFT,
159 			MAJOR(dev), MINOR(dev), ino, &len);
160 
161 	/*
162 	 * Print the dentry name for named mappings, and a
163 	 * special [heap] marker for the heap:
164 	 */
165 	if (file) {
166 		pad_len_spaces(m, len);
167 		seq_path(m, file->f_path.mnt, file->f_path.dentry, "\n");
168 	} else {
169 		const char *name = arch_vma_name(vma);
170 		if (!name) {
171 			if (mm) {
172 				if (vma->vm_start <= mm->start_brk &&
173 						vma->vm_end >= mm->brk) {
174 					name = "[heap]";
175 				} else if (vma->vm_start <= mm->start_stack &&
176 					   vma->vm_end >= mm->start_stack) {
177 					name = "[stack]";
178 				}
179 			} else {
180 				name = "[vdso]";
181 			}
182 		}
183 		if (name) {
184 			pad_len_spaces(m, len);
185 			seq_puts(m, name);
186 		}
187 	}
188 	seq_putc(m, '\n');
189 
190 	if (mss)
191 		seq_printf(m,
192 			   "Size:           %8lu kB\n"
193 			   "Rss:            %8lu kB\n"
194 			   "Shared_Clean:   %8lu kB\n"
195 			   "Shared_Dirty:   %8lu kB\n"
196 			   "Private_Clean:  %8lu kB\n"
197 			   "Private_Dirty:  %8lu kB\n"
198 			   "Referenced:     %8lu kB\n",
199 			   (vma->vm_end - vma->vm_start) >> 10,
200 			   mss->resident >> 10,
201 			   mss->shared_clean  >> 10,
202 			   mss->shared_dirty  >> 10,
203 			   mss->private_clean >> 10,
204 			   mss->private_dirty >> 10,
205 			   mss->referenced >> 10);
206 
207 	if (m->count < m->size)  /* vma is copied successfully */
208 		m->version = (vma != get_gate_vma(task))? vma->vm_start: 0;
209 	return 0;
210 }
211 
212 static int show_map(struct seq_file *m, void *v)
213 {
214 	return show_map_internal(m, v, NULL);
215 }
216 
217 static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
218 			    unsigned long addr, unsigned long end,
219 			    void *private)
220 {
221 	struct mem_size_stats *mss = private;
222 	pte_t *pte, ptent;
223 	spinlock_t *ptl;
224 	struct page *page;
225 
226 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
227 	for (; addr != end; pte++, addr += PAGE_SIZE) {
228 		ptent = *pte;
229 		if (!pte_present(ptent))
230 			continue;
231 
232 		mss->resident += PAGE_SIZE;
233 
234 		page = vm_normal_page(vma, addr, ptent);
235 		if (!page)
236 			continue;
237 
238 		/* Accumulate the size in pages that have been accessed. */
239 		if (pte_young(ptent) || PageReferenced(page))
240 			mss->referenced += PAGE_SIZE;
241 		if (page_mapcount(page) >= 2) {
242 			if (pte_dirty(ptent))
243 				mss->shared_dirty += PAGE_SIZE;
244 			else
245 				mss->shared_clean += PAGE_SIZE;
246 		} else {
247 			if (pte_dirty(ptent))
248 				mss->private_dirty += PAGE_SIZE;
249 			else
250 				mss->private_clean += PAGE_SIZE;
251 		}
252 	}
253 	pte_unmap_unlock(pte - 1, ptl);
254 	cond_resched();
255 }
256 
257 static void clear_refs_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
258 				 unsigned long addr, unsigned long end,
259 				 void *private)
260 {
261 	pte_t *pte, ptent;
262 	spinlock_t *ptl;
263 	struct page *page;
264 
265 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
266 	for (; addr != end; pte++, addr += PAGE_SIZE) {
267 		ptent = *pte;
268 		if (!pte_present(ptent))
269 			continue;
270 
271 		page = vm_normal_page(vma, addr, ptent);
272 		if (!page)
273 			continue;
274 
275 		/* Clear accessed and referenced bits. */
276 		ptep_test_and_clear_young(vma, addr, pte);
277 		ClearPageReferenced(page);
278 	}
279 	pte_unmap_unlock(pte - 1, ptl);
280 	cond_resched();
281 }
282 
283 static inline void walk_pmd_range(struct pmd_walker *walker, pud_t *pud,
284 				  unsigned long addr, unsigned long end)
285 {
286 	pmd_t *pmd;
287 	unsigned long next;
288 
289 	for (pmd = pmd_offset(pud, addr); addr != end;
290 	     pmd++, addr = next) {
291 		next = pmd_addr_end(addr, end);
292 		if (pmd_none_or_clear_bad(pmd))
293 			continue;
294 		walker->action(walker->vma, pmd, addr, next, walker->private);
295 	}
296 }
297 
298 static inline void walk_pud_range(struct pmd_walker *walker, pgd_t *pgd,
299 				  unsigned long addr, unsigned long end)
300 {
301 	pud_t *pud;
302 	unsigned long next;
303 
304 	for (pud = pud_offset(pgd, addr); addr != end;
305 	     pud++, addr = next) {
306 		next = pud_addr_end(addr, end);
307 		if (pud_none_or_clear_bad(pud))
308 			continue;
309 		walk_pmd_range(walker, pud, addr, next);
310 	}
311 }
312 
313 /*
314  * walk_page_range - walk the page tables of a VMA with a callback
315  * @vma - VMA to walk
316  * @action - callback invoked for every bottom-level (PTE) page table
317  * @private - private data passed to the callback function
318  *
319  * Recursively walk the page table for the memory area in a VMA, calling
320  * a callback for every bottom-level (PTE) page table.
321  */
322 static inline void walk_page_range(struct vm_area_struct *vma,
323 				   void (*action)(struct vm_area_struct *,
324 						  pmd_t *, unsigned long,
325 						  unsigned long, void *),
326 				   void *private)
327 {
328 	unsigned long addr = vma->vm_start;
329 	unsigned long end = vma->vm_end;
330 	struct pmd_walker walker = {
331 		.vma		= vma,
332 		.private	= private,
333 		.action		= action,
334 	};
335 	pgd_t *pgd;
336 	unsigned long next;
337 
338 	for (pgd = pgd_offset(vma->vm_mm, addr); addr != end;
339 	     pgd++, addr = next) {
340 		next = pgd_addr_end(addr, end);
341 		if (pgd_none_or_clear_bad(pgd))
342 			continue;
343 		walk_pud_range(&walker, pgd, addr, next);
344 	}
345 }
346 
347 static int show_smap(struct seq_file *m, void *v)
348 {
349 	struct vm_area_struct *vma = v;
350 	struct mem_size_stats mss;
351 
352 	memset(&mss, 0, sizeof mss);
353 	if (vma->vm_mm && !is_vm_hugetlb_page(vma))
354 		walk_page_range(vma, smaps_pte_range, &mss);
355 	return show_map_internal(m, v, &mss);
356 }
357 
358 void clear_refs_smap(struct mm_struct *mm)
359 {
360 	struct vm_area_struct *vma;
361 
362 	down_read(&mm->mmap_sem);
363 	for (vma = mm->mmap; vma; vma = vma->vm_next)
364 		if (vma->vm_mm && !is_vm_hugetlb_page(vma))
365 			walk_page_range(vma, clear_refs_pte_range, NULL);
366 	flush_tlb_mm(mm);
367 	up_read(&mm->mmap_sem);
368 }
369 
370 static void *m_start(struct seq_file *m, loff_t *pos)
371 {
372 	struct proc_maps_private *priv = m->private;
373 	unsigned long last_addr = m->version;
374 	struct mm_struct *mm;
375 	struct vm_area_struct *vma, *tail_vma = NULL;
376 	loff_t l = *pos;
377 
378 	/* Clear the per syscall fields in priv */
379 	priv->task = NULL;
380 	priv->tail_vma = NULL;
381 
382 	/*
383 	 * We remember last_addr rather than next_addr to hit with
384 	 * mmap_cache most of the time. We have zero last_addr at
385 	 * the beginning and also after lseek. We will have -1 last_addr
386 	 * after the end of the vmas.
387 	 */
388 
389 	if (last_addr == -1UL)
390 		return NULL;
391 
392 	priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
393 	if (!priv->task)
394 		return NULL;
395 
396 	mm = get_task_mm(priv->task);
397 	if (!mm)
398 		return NULL;
399 
400 	priv->tail_vma = tail_vma = get_gate_vma(priv->task);
401 	down_read(&mm->mmap_sem);
402 
403 	/* Start with last addr hint */
404 	if (last_addr && (vma = find_vma(mm, last_addr))) {
405 		vma = vma->vm_next;
406 		goto out;
407 	}
408 
409 	/*
410 	 * Check the vma index is within the range and do
411 	 * sequential scan until m_index.
412 	 */
413 	vma = NULL;
414 	if ((unsigned long)l < mm->map_count) {
415 		vma = mm->mmap;
416 		while (l-- && vma)
417 			vma = vma->vm_next;
418 		goto out;
419 	}
420 
421 	if (l != mm->map_count)
422 		tail_vma = NULL; /* After gate vma */
423 
424 out:
425 	if (vma)
426 		return vma;
427 
428 	/* End of vmas has been reached */
429 	m->version = (tail_vma != NULL)? 0: -1UL;
430 	up_read(&mm->mmap_sem);
431 	mmput(mm);
432 	return tail_vma;
433 }
434 
435 static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
436 {
437 	if (vma && vma != priv->tail_vma) {
438 		struct mm_struct *mm = vma->vm_mm;
439 		up_read(&mm->mmap_sem);
440 		mmput(mm);
441 	}
442 }
443 
444 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
445 {
446 	struct proc_maps_private *priv = m->private;
447 	struct vm_area_struct *vma = v;
448 	struct vm_area_struct *tail_vma = priv->tail_vma;
449 
450 	(*pos)++;
451 	if (vma && (vma != tail_vma) && vma->vm_next)
452 		return vma->vm_next;
453 	vma_stop(priv, vma);
454 	return (vma != tail_vma)? tail_vma: NULL;
455 }
456 
457 static void m_stop(struct seq_file *m, void *v)
458 {
459 	struct proc_maps_private *priv = m->private;
460 	struct vm_area_struct *vma = v;
461 
462 	vma_stop(priv, vma);
463 	if (priv->task)
464 		put_task_struct(priv->task);
465 }
466 
467 static struct seq_operations proc_pid_maps_op = {
468 	.start	= m_start,
469 	.next	= m_next,
470 	.stop	= m_stop,
471 	.show	= show_map
472 };
473 
474 static struct seq_operations proc_pid_smaps_op = {
475 	.start	= m_start,
476 	.next	= m_next,
477 	.stop	= m_stop,
478 	.show	= show_smap
479 };
480 
481 static int do_maps_open(struct inode *inode, struct file *file,
482 			struct seq_operations *ops)
483 {
484 	struct proc_maps_private *priv;
485 	int ret = -ENOMEM;
486 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
487 	if (priv) {
488 		priv->pid = proc_pid(inode);
489 		ret = seq_open(file, ops);
490 		if (!ret) {
491 			struct seq_file *m = file->private_data;
492 			m->private = priv;
493 		} else {
494 			kfree(priv);
495 		}
496 	}
497 	return ret;
498 }
499 
500 static int maps_open(struct inode *inode, struct file *file)
501 {
502 	return do_maps_open(inode, file, &proc_pid_maps_op);
503 }
504 
505 const struct file_operations proc_maps_operations = {
506 	.open		= maps_open,
507 	.read		= seq_read,
508 	.llseek		= seq_lseek,
509 	.release	= seq_release_private,
510 };
511 
512 #ifdef CONFIG_NUMA
513 extern int show_numa_map(struct seq_file *m, void *v);
514 
515 static struct seq_operations proc_pid_numa_maps_op = {
516         .start  = m_start,
517         .next   = m_next,
518         .stop   = m_stop,
519         .show   = show_numa_map
520 };
521 
522 static int numa_maps_open(struct inode *inode, struct file *file)
523 {
524 	return do_maps_open(inode, file, &proc_pid_numa_maps_op);
525 }
526 
527 const struct file_operations proc_numa_maps_operations = {
528 	.open		= numa_maps_open,
529 	.read		= seq_read,
530 	.llseek		= seq_lseek,
531 	.release	= seq_release_private,
532 };
533 #endif
534 
535 static int smaps_open(struct inode *inode, struct file *file)
536 {
537 	return do_maps_open(inode, file, &proc_pid_smaps_op);
538 }
539 
540 const struct file_operations proc_smaps_operations = {
541 	.open		= smaps_open,
542 	.read		= seq_read,
543 	.llseek		= seq_lseek,
544 	.release	= seq_release_private,
545 };
546