xref: /linux/fs/proc/task_mmu.c (revision 858259cf7d1c443c836a2022b78cb281f0a9b95e)
1 #include <linux/mm.h>
2 #include <linux/hugetlb.h>
3 #include <linux/mount.h>
4 #include <linux/seq_file.h>
5 #include <linux/highmem.h>
6 #include <linux/pagemap.h>
7 #include <linux/mempolicy.h>
8 
9 #include <asm/elf.h>
10 #include <asm/uaccess.h>
11 #include <asm/tlbflush.h>
12 #include "internal.h"
13 
14 char *task_mem(struct mm_struct *mm, char *buffer)
15 {
16 	unsigned long data, text, lib;
17 	unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
18 
19 	/*
20 	 * Note: to minimize their overhead, mm maintains hiwater_vm and
21 	 * hiwater_rss only when about to *lower* total_vm or rss.  Any
22 	 * collector of these hiwater stats must therefore get total_vm
23 	 * and rss too, which will usually be the higher.  Barriers? not
24 	 * worth the effort, such snapshots can always be inconsistent.
25 	 */
26 	hiwater_vm = total_vm = mm->total_vm;
27 	if (hiwater_vm < mm->hiwater_vm)
28 		hiwater_vm = mm->hiwater_vm;
29 	hiwater_rss = total_rss = get_mm_rss(mm);
30 	if (hiwater_rss < mm->hiwater_rss)
31 		hiwater_rss = mm->hiwater_rss;
32 
33 	data = mm->total_vm - mm->shared_vm - mm->stack_vm;
34 	text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
35 	lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
36 	buffer += sprintf(buffer,
37 		"VmPeak:\t%8lu kB\n"
38 		"VmSize:\t%8lu kB\n"
39 		"VmLck:\t%8lu kB\n"
40 		"VmHWM:\t%8lu kB\n"
41 		"VmRSS:\t%8lu kB\n"
42 		"VmData:\t%8lu kB\n"
43 		"VmStk:\t%8lu kB\n"
44 		"VmExe:\t%8lu kB\n"
45 		"VmLib:\t%8lu kB\n"
46 		"VmPTE:\t%8lu kB\n",
47 		hiwater_vm << (PAGE_SHIFT-10),
48 		(total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
49 		mm->locked_vm << (PAGE_SHIFT-10),
50 		hiwater_rss << (PAGE_SHIFT-10),
51 		total_rss << (PAGE_SHIFT-10),
52 		data << (PAGE_SHIFT-10),
53 		mm->stack_vm << (PAGE_SHIFT-10), text, lib,
54 		(PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
55 	return buffer;
56 }
57 
58 unsigned long task_vsize(struct mm_struct *mm)
59 {
60 	return PAGE_SIZE * mm->total_vm;
61 }
62 
63 int task_statm(struct mm_struct *mm, int *shared, int *text,
64 	       int *data, int *resident)
65 {
66 	*shared = get_mm_counter(mm, file_rss);
67 	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
68 								>> PAGE_SHIFT;
69 	*data = mm->total_vm - mm->shared_vm;
70 	*resident = *shared + get_mm_counter(mm, anon_rss);
71 	return mm->total_vm;
72 }
73 
74 int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
75 {
76 	struct vm_area_struct * vma;
77 	int result = -ENOENT;
78 	struct task_struct *task = proc_task(inode);
79 	struct mm_struct * mm = get_task_mm(task);
80 
81 	if (!mm)
82 		goto out;
83 	down_read(&mm->mmap_sem);
84 
85 	vma = mm->mmap;
86 	while (vma) {
87 		if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
88 			break;
89 		vma = vma->vm_next;
90 	}
91 
92 	if (vma) {
93 		*mnt = mntget(vma->vm_file->f_vfsmnt);
94 		*dentry = dget(vma->vm_file->f_dentry);
95 		result = 0;
96 	}
97 
98 	up_read(&mm->mmap_sem);
99 	mmput(mm);
100 out:
101 	return result;
102 }
103 
104 static void pad_len_spaces(struct seq_file *m, int len)
105 {
106 	len = 25 + sizeof(void*) * 6 - len;
107 	if (len < 1)
108 		len = 1;
109 	seq_printf(m, "%*c", len, ' ');
110 }
111 
112 struct mem_size_stats
113 {
114 	unsigned long resident;
115 	unsigned long shared_clean;
116 	unsigned long shared_dirty;
117 	unsigned long private_clean;
118 	unsigned long private_dirty;
119 };
120 
121 static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss)
122 {
123 	struct task_struct *task = m->private;
124 	struct vm_area_struct *vma = v;
125 	struct mm_struct *mm = vma->vm_mm;
126 	struct file *file = vma->vm_file;
127 	int flags = vma->vm_flags;
128 	unsigned long ino = 0;
129 	dev_t dev = 0;
130 	int len;
131 
132 	if (file) {
133 		struct inode *inode = vma->vm_file->f_dentry->d_inode;
134 		dev = inode->i_sb->s_dev;
135 		ino = inode->i_ino;
136 	}
137 
138 	seq_printf(m, "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n",
139 			vma->vm_start,
140 			vma->vm_end,
141 			flags & VM_READ ? 'r' : '-',
142 			flags & VM_WRITE ? 'w' : '-',
143 			flags & VM_EXEC ? 'x' : '-',
144 			flags & VM_MAYSHARE ? 's' : 'p',
145 			vma->vm_pgoff << PAGE_SHIFT,
146 			MAJOR(dev), MINOR(dev), ino, &len);
147 
148 	/*
149 	 * Print the dentry name for named mappings, and a
150 	 * special [heap] marker for the heap:
151 	 */
152 	if (file) {
153 		pad_len_spaces(m, len);
154 		seq_path(m, file->f_vfsmnt, file->f_dentry, "\n");
155 	} else {
156 		if (mm) {
157 			if (vma->vm_start <= mm->start_brk &&
158 						vma->vm_end >= mm->brk) {
159 				pad_len_spaces(m, len);
160 				seq_puts(m, "[heap]");
161 			} else {
162 				if (vma->vm_start <= mm->start_stack &&
163 					vma->vm_end >= mm->start_stack) {
164 
165 					pad_len_spaces(m, len);
166 					seq_puts(m, "[stack]");
167 				}
168 			}
169 		} else {
170 			pad_len_spaces(m, len);
171 			seq_puts(m, "[vdso]");
172 		}
173 	}
174 	seq_putc(m, '\n');
175 
176 	if (mss)
177 		seq_printf(m,
178 			   "Size:          %8lu kB\n"
179 			   "Rss:           %8lu kB\n"
180 			   "Shared_Clean:  %8lu kB\n"
181 			   "Shared_Dirty:  %8lu kB\n"
182 			   "Private_Clean: %8lu kB\n"
183 			   "Private_Dirty: %8lu kB\n",
184 			   (vma->vm_end - vma->vm_start) >> 10,
185 			   mss->resident >> 10,
186 			   mss->shared_clean  >> 10,
187 			   mss->shared_dirty  >> 10,
188 			   mss->private_clean >> 10,
189 			   mss->private_dirty >> 10);
190 
191 	if (m->count < m->size)  /* vma is copied successfully */
192 		m->version = (vma != get_gate_vma(task))? vma->vm_start: 0;
193 	return 0;
194 }
195 
196 static int show_map(struct seq_file *m, void *v)
197 {
198 	return show_map_internal(m, v, 0);
199 }
200 
201 static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
202 				unsigned long addr, unsigned long end,
203 				struct mem_size_stats *mss)
204 {
205 	pte_t *pte, ptent;
206 	spinlock_t *ptl;
207 	unsigned long pfn;
208 	struct page *page;
209 
210 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
211 	do {
212 		ptent = *pte;
213 		if (!pte_present(ptent))
214 			continue;
215 
216 		mss->resident += PAGE_SIZE;
217 		pfn = pte_pfn(ptent);
218 		if (!pfn_valid(pfn))
219 			continue;
220 
221 		page = pfn_to_page(pfn);
222 		if (page_count(page) >= 2) {
223 			if (pte_dirty(ptent))
224 				mss->shared_dirty += PAGE_SIZE;
225 			else
226 				mss->shared_clean += PAGE_SIZE;
227 		} else {
228 			if (pte_dirty(ptent))
229 				mss->private_dirty += PAGE_SIZE;
230 			else
231 				mss->private_clean += PAGE_SIZE;
232 		}
233 	} while (pte++, addr += PAGE_SIZE, addr != end);
234 	pte_unmap_unlock(pte - 1, ptl);
235 	cond_resched();
236 }
237 
238 static inline void smaps_pmd_range(struct vm_area_struct *vma, pud_t *pud,
239 				unsigned long addr, unsigned long end,
240 				struct mem_size_stats *mss)
241 {
242 	pmd_t *pmd;
243 	unsigned long next;
244 
245 	pmd = pmd_offset(pud, addr);
246 	do {
247 		next = pmd_addr_end(addr, end);
248 		if (pmd_none_or_clear_bad(pmd))
249 			continue;
250 		smaps_pte_range(vma, pmd, addr, next, mss);
251 	} while (pmd++, addr = next, addr != end);
252 }
253 
254 static inline void smaps_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
255 				unsigned long addr, unsigned long end,
256 				struct mem_size_stats *mss)
257 {
258 	pud_t *pud;
259 	unsigned long next;
260 
261 	pud = pud_offset(pgd, addr);
262 	do {
263 		next = pud_addr_end(addr, end);
264 		if (pud_none_or_clear_bad(pud))
265 			continue;
266 		smaps_pmd_range(vma, pud, addr, next, mss);
267 	} while (pud++, addr = next, addr != end);
268 }
269 
270 static inline void smaps_pgd_range(struct vm_area_struct *vma,
271 				unsigned long addr, unsigned long end,
272 				struct mem_size_stats *mss)
273 {
274 	pgd_t *pgd;
275 	unsigned long next;
276 
277 	pgd = pgd_offset(vma->vm_mm, addr);
278 	do {
279 		next = pgd_addr_end(addr, end);
280 		if (pgd_none_or_clear_bad(pgd))
281 			continue;
282 		smaps_pud_range(vma, pgd, addr, next, mss);
283 	} while (pgd++, addr = next, addr != end);
284 }
285 
286 static int show_smap(struct seq_file *m, void *v)
287 {
288 	struct vm_area_struct *vma = v;
289 	struct mem_size_stats mss;
290 
291 	memset(&mss, 0, sizeof mss);
292 	if (vma->vm_mm)
293 		smaps_pgd_range(vma, vma->vm_start, vma->vm_end, &mss);
294 	return show_map_internal(m, v, &mss);
295 }
296 
297 static void *m_start(struct seq_file *m, loff_t *pos)
298 {
299 	struct task_struct *task = m->private;
300 	unsigned long last_addr = m->version;
301 	struct mm_struct *mm;
302 	struct vm_area_struct *vma, *tail_vma;
303 	loff_t l = *pos;
304 
305 	/*
306 	 * We remember last_addr rather than next_addr to hit with
307 	 * mmap_cache most of the time. We have zero last_addr at
308 	 * the beginning and also after lseek. We will have -1 last_addr
309 	 * after the end of the vmas.
310 	 */
311 
312 	if (last_addr == -1UL)
313 		return NULL;
314 
315 	mm = get_task_mm(task);
316 	if (!mm)
317 		return NULL;
318 
319 	tail_vma = get_gate_vma(task);
320 	down_read(&mm->mmap_sem);
321 
322 	/* Start with last addr hint */
323 	if (last_addr && (vma = find_vma(mm, last_addr))) {
324 		vma = vma->vm_next;
325 		goto out;
326 	}
327 
328 	/*
329 	 * Check the vma index is within the range and do
330 	 * sequential scan until m_index.
331 	 */
332 	vma = NULL;
333 	if ((unsigned long)l < mm->map_count) {
334 		vma = mm->mmap;
335 		while (l-- && vma)
336 			vma = vma->vm_next;
337 		goto out;
338 	}
339 
340 	if (l != mm->map_count)
341 		tail_vma = NULL; /* After gate vma */
342 
343 out:
344 	if (vma)
345 		return vma;
346 
347 	/* End of vmas has been reached */
348 	m->version = (tail_vma != NULL)? 0: -1UL;
349 	up_read(&mm->mmap_sem);
350 	mmput(mm);
351 	return tail_vma;
352 }
353 
354 static void m_stop(struct seq_file *m, void *v)
355 {
356 	struct task_struct *task = m->private;
357 	struct vm_area_struct *vma = v;
358 	if (vma && vma != get_gate_vma(task)) {
359 		struct mm_struct *mm = vma->vm_mm;
360 		up_read(&mm->mmap_sem);
361 		mmput(mm);
362 	}
363 }
364 
365 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
366 {
367 	struct task_struct *task = m->private;
368 	struct vm_area_struct *vma = v;
369 	struct vm_area_struct *tail_vma = get_gate_vma(task);
370 
371 	(*pos)++;
372 	if (vma && (vma != tail_vma) && vma->vm_next)
373 		return vma->vm_next;
374 	m_stop(m, v);
375 	return (vma != tail_vma)? tail_vma: NULL;
376 }
377 
378 struct seq_operations proc_pid_maps_op = {
379 	.start	= m_start,
380 	.next	= m_next,
381 	.stop	= m_stop,
382 	.show	= show_map
383 };
384 
385 struct seq_operations proc_pid_smaps_op = {
386 	.start	= m_start,
387 	.next	= m_next,
388 	.stop	= m_stop,
389 	.show	= show_smap
390 };
391 
392 #ifdef CONFIG_NUMA
393 
394 struct numa_maps {
395 	unsigned long pages;
396 	unsigned long anon;
397 	unsigned long mapped;
398 	unsigned long mapcount_max;
399 	unsigned long node[MAX_NUMNODES];
400 };
401 
402 /*
403  * Calculate numa node maps for a vma
404  */
405 static struct numa_maps *get_numa_maps(const struct vm_area_struct *vma)
406 {
407 	struct page *page;
408 	unsigned long vaddr;
409 	struct mm_struct *mm = vma->vm_mm;
410 	int i;
411 	struct numa_maps *md = kmalloc(sizeof(struct numa_maps), GFP_KERNEL);
412 
413 	if (!md)
414 		return NULL;
415 	md->pages = 0;
416 	md->anon = 0;
417 	md->mapped = 0;
418 	md->mapcount_max = 0;
419 	for_each_node(i)
420 		md->node[i] =0;
421 
422  	for (vaddr = vma->vm_start; vaddr < vma->vm_end; vaddr += PAGE_SIZE) {
423 		page = follow_page(mm, vaddr, 0);
424 		if (page) {
425 			int count = page_mapcount(page);
426 
427 			if (count)
428 				md->mapped++;
429 			if (count > md->mapcount_max)
430 				md->mapcount_max = count;
431 			md->pages++;
432 			if (PageAnon(page))
433 				md->anon++;
434 			md->node[page_to_nid(page)]++;
435 		}
436 		cond_resched();
437 	}
438 	return md;
439 }
440 
441 static int show_numa_map(struct seq_file *m, void *v)
442 {
443 	struct task_struct *task = m->private;
444 	struct vm_area_struct *vma = v;
445 	struct mempolicy *pol;
446 	struct numa_maps *md;
447 	struct zone **z;
448 	int n;
449 	int first;
450 
451 	if (!vma->vm_mm)
452 		return 0;
453 
454 	md = get_numa_maps(vma);
455 	if (!md)
456 		return 0;
457 
458 	seq_printf(m, "%08lx", vma->vm_start);
459 	pol = get_vma_policy(task, vma, vma->vm_start);
460 	/* Print policy */
461 	switch (pol->policy) {
462 	case MPOL_PREFERRED:
463 		seq_printf(m, " prefer=%d", pol->v.preferred_node);
464 		break;
465 	case MPOL_BIND:
466 		seq_printf(m, " bind={");
467 		first = 1;
468 		for (z = pol->v.zonelist->zones; *z; z++) {
469 
470 			if (!first)
471 				seq_putc(m, ',');
472 			else
473 				first = 0;
474 			seq_printf(m, "%d/%s", (*z)->zone_pgdat->node_id,
475 					(*z)->name);
476 		}
477 		seq_putc(m, '}');
478 		break;
479 	case MPOL_INTERLEAVE:
480 		seq_printf(m, " interleave={");
481 		first = 1;
482 		for_each_node(n) {
483 			if (node_isset(n, pol->v.nodes)) {
484 				if (!first)
485 					seq_putc(m,',');
486 				else
487 					first = 0;
488 				seq_printf(m, "%d",n);
489 			}
490 		}
491 		seq_putc(m, '}');
492 		break;
493 	default:
494 		seq_printf(m," default");
495 		break;
496 	}
497 	seq_printf(m, " MaxRef=%lu Pages=%lu Mapped=%lu",
498 			md->mapcount_max, md->pages, md->mapped);
499 	if (md->anon)
500 		seq_printf(m," Anon=%lu",md->anon);
501 
502 	for_each_online_node(n) {
503 		if (md->node[n])
504 			seq_printf(m, " N%d=%lu", n, md->node[n]);
505 	}
506 	seq_putc(m, '\n');
507 	kfree(md);
508 	if (m->count < m->size)  /* vma is copied successfully */
509 		m->version = (vma != get_gate_vma(task)) ? vma->vm_start : 0;
510 	return 0;
511 }
512 
513 struct seq_operations proc_pid_numa_maps_op = {
514 	.start	= m_start,
515 	.next	= m_next,
516 	.stop	= m_stop,
517 	.show	= show_numa_map
518 };
519 #endif
520