xref: /linux/fs/proc/task_nommu.c (revision f3539c12d8196ce0a1993364d30b3a18908470d1)
1 
2 #include <linux/mm.h>
3 #include <linux/file.h>
4 #include <linux/fdtable.h>
5 #include <linux/fs_struct.h>
6 #include <linux/mount.h>
7 #include <linux/ptrace.h>
8 #include <linux/slab.h>
9 #include <linux/seq_file.h>
10 #include "internal.h"
11 
12 /*
13  * Logic: we've got two memory sums for each process, "shared", and
14  * "non-shared". Shared memory may get counted more than once, for
15  * each process that owns it. Non-shared memory is counted
16  * accurately.
17  */
18 void task_mem(struct seq_file *m, struct mm_struct *mm)
19 {
20 	struct vm_area_struct *vma;
21 	struct vm_region *region;
22 	struct rb_node *p;
23 	unsigned long bytes = 0, sbytes = 0, slack = 0, size;
24 
25 	down_read(&mm->mmap_sem);
26 	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
27 		vma = rb_entry(p, struct vm_area_struct, vm_rb);
28 
29 		bytes += kobjsize(vma);
30 
31 		region = vma->vm_region;
32 		if (region) {
33 			size = kobjsize(region);
34 			size += region->vm_end - region->vm_start;
35 		} else {
36 			size = vma->vm_end - vma->vm_start;
37 		}
38 
39 		if (atomic_read(&mm->mm_count) > 1 ||
40 		    vma->vm_flags & VM_MAYSHARE) {
41 			sbytes += size;
42 		} else {
43 			bytes += size;
44 			if (region)
45 				slack = region->vm_end - vma->vm_end;
46 		}
47 	}
48 
49 	if (atomic_read(&mm->mm_count) > 1)
50 		sbytes += kobjsize(mm);
51 	else
52 		bytes += kobjsize(mm);
53 
54 	if (current->fs && current->fs->users > 1)
55 		sbytes += kobjsize(current->fs);
56 	else
57 		bytes += kobjsize(current->fs);
58 
59 	if (current->files && atomic_read(&current->files->count) > 1)
60 		sbytes += kobjsize(current->files);
61 	else
62 		bytes += kobjsize(current->files);
63 
64 	if (current->sighand && atomic_read(&current->sighand->count) > 1)
65 		sbytes += kobjsize(current->sighand);
66 	else
67 		bytes += kobjsize(current->sighand);
68 
69 	bytes += kobjsize(current); /* includes kernel stack */
70 
71 	seq_printf(m,
72 		"Mem:\t%8lu bytes\n"
73 		"Slack:\t%8lu bytes\n"
74 		"Shared:\t%8lu bytes\n",
75 		bytes, slack, sbytes);
76 
77 	up_read(&mm->mmap_sem);
78 }
79 
80 unsigned long task_vsize(struct mm_struct *mm)
81 {
82 	struct vm_area_struct *vma;
83 	struct rb_node *p;
84 	unsigned long vsize = 0;
85 
86 	down_read(&mm->mmap_sem);
87 	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
88 		vma = rb_entry(p, struct vm_area_struct, vm_rb);
89 		vsize += vma->vm_end - vma->vm_start;
90 	}
91 	up_read(&mm->mmap_sem);
92 	return vsize;
93 }
94 
95 unsigned long task_statm(struct mm_struct *mm,
96 			 unsigned long *shared, unsigned long *text,
97 			 unsigned long *data, unsigned long *resident)
98 {
99 	struct vm_area_struct *vma;
100 	struct vm_region *region;
101 	struct rb_node *p;
102 	unsigned long size = kobjsize(mm);
103 
104 	down_read(&mm->mmap_sem);
105 	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
106 		vma = rb_entry(p, struct vm_area_struct, vm_rb);
107 		size += kobjsize(vma);
108 		region = vma->vm_region;
109 		if (region) {
110 			size += kobjsize(region);
111 			size += region->vm_end - region->vm_start;
112 		}
113 	}
114 
115 	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
116 		>> PAGE_SHIFT;
117 	*data = (PAGE_ALIGN(mm->start_stack) - (mm->start_data & PAGE_MASK))
118 		>> PAGE_SHIFT;
119 	up_read(&mm->mmap_sem);
120 	size >>= PAGE_SHIFT;
121 	size += *text + *data;
122 	*resident = size;
123 	return size;
124 }
125 
126 static int is_stack(struct proc_maps_private *priv,
127 		    struct vm_area_struct *vma, int is_pid)
128 {
129 	struct mm_struct *mm = vma->vm_mm;
130 	int stack = 0;
131 
132 	if (is_pid) {
133 		stack = vma->vm_start <= mm->start_stack &&
134 			vma->vm_end >= mm->start_stack;
135 	} else {
136 		struct inode *inode = priv->inode;
137 		struct task_struct *task;
138 
139 		rcu_read_lock();
140 		task = pid_task(proc_pid(inode), PIDTYPE_PID);
141 		if (task)
142 			stack = vma_is_stack_for_task(vma, task);
143 		rcu_read_unlock();
144 	}
145 	return stack;
146 }
147 
148 /*
149  * display a single VMA to a sequenced file
150  */
151 static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
152 			  int is_pid)
153 {
154 	struct mm_struct *mm = vma->vm_mm;
155 	struct proc_maps_private *priv = m->private;
156 	unsigned long ino = 0;
157 	struct file *file;
158 	dev_t dev = 0;
159 	int flags;
160 	unsigned long long pgoff = 0;
161 
162 	flags = vma->vm_flags;
163 	file = vma->vm_file;
164 
165 	if (file) {
166 		struct inode *inode = file_inode(vma->vm_file);
167 		dev = inode->i_sb->s_dev;
168 		ino = inode->i_ino;
169 		pgoff = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
170 	}
171 
172 	seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
173 	seq_printf(m,
174 		   "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
175 		   vma->vm_start,
176 		   vma->vm_end,
177 		   flags & VM_READ ? 'r' : '-',
178 		   flags & VM_WRITE ? 'w' : '-',
179 		   flags & VM_EXEC ? 'x' : '-',
180 		   flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p',
181 		   pgoff,
182 		   MAJOR(dev), MINOR(dev), ino);
183 
184 	if (file) {
185 		seq_pad(m, ' ');
186 		seq_file_path(m, file, "");
187 	} else if (mm && is_stack(priv, vma, is_pid)) {
188 		seq_pad(m, ' ');
189 		seq_printf(m, "[stack]");
190 	}
191 
192 	seq_putc(m, '\n');
193 	return 0;
194 }
195 
196 /*
197  * display mapping lines for a particular process's /proc/pid/maps
198  */
199 static int show_map(struct seq_file *m, void *_p, int is_pid)
200 {
201 	struct rb_node *p = _p;
202 
203 	return nommu_vma_show(m, rb_entry(p, struct vm_area_struct, vm_rb),
204 			      is_pid);
205 }
206 
207 static int show_pid_map(struct seq_file *m, void *_p)
208 {
209 	return show_map(m, _p, 1);
210 }
211 
212 static int show_tid_map(struct seq_file *m, void *_p)
213 {
214 	return show_map(m, _p, 0);
215 }
216 
217 static void *m_start(struct seq_file *m, loff_t *pos)
218 {
219 	struct proc_maps_private *priv = m->private;
220 	struct mm_struct *mm;
221 	struct rb_node *p;
222 	loff_t n = *pos;
223 
224 	/* pin the task and mm whilst we play with them */
225 	priv->task = get_proc_task(priv->inode);
226 	if (!priv->task)
227 		return ERR_PTR(-ESRCH);
228 
229 	mm = priv->mm;
230 	if (!mm || !atomic_inc_not_zero(&mm->mm_users))
231 		return NULL;
232 
233 	down_read(&mm->mmap_sem);
234 	/* start from the Nth VMA */
235 	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p))
236 		if (n-- == 0)
237 			return p;
238 
239 	up_read(&mm->mmap_sem);
240 	mmput(mm);
241 	return NULL;
242 }
243 
244 static void m_stop(struct seq_file *m, void *_vml)
245 {
246 	struct proc_maps_private *priv = m->private;
247 
248 	if (!IS_ERR_OR_NULL(_vml)) {
249 		up_read(&priv->mm->mmap_sem);
250 		mmput(priv->mm);
251 	}
252 	if (priv->task) {
253 		put_task_struct(priv->task);
254 		priv->task = NULL;
255 	}
256 }
257 
258 static void *m_next(struct seq_file *m, void *_p, loff_t *pos)
259 {
260 	struct rb_node *p = _p;
261 
262 	(*pos)++;
263 	return p ? rb_next(p) : NULL;
264 }
265 
266 static const struct seq_operations proc_pid_maps_ops = {
267 	.start	= m_start,
268 	.next	= m_next,
269 	.stop	= m_stop,
270 	.show	= show_pid_map
271 };
272 
273 static const struct seq_operations proc_tid_maps_ops = {
274 	.start	= m_start,
275 	.next	= m_next,
276 	.stop	= m_stop,
277 	.show	= show_tid_map
278 };
279 
280 static int maps_open(struct inode *inode, struct file *file,
281 		     const struct seq_operations *ops)
282 {
283 	struct proc_maps_private *priv;
284 
285 	priv = __seq_open_private(file, ops, sizeof(*priv));
286 	if (!priv)
287 		return -ENOMEM;
288 
289 	priv->inode = inode;
290 	priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
291 	if (IS_ERR(priv->mm)) {
292 		int err = PTR_ERR(priv->mm);
293 
294 		seq_release_private(inode, file);
295 		return err;
296 	}
297 
298 	return 0;
299 }
300 
301 
302 static int map_release(struct inode *inode, struct file *file)
303 {
304 	struct seq_file *seq = file->private_data;
305 	struct proc_maps_private *priv = seq->private;
306 
307 	if (priv->mm)
308 		mmdrop(priv->mm);
309 
310 	return seq_release_private(inode, file);
311 }
312 
313 static int pid_maps_open(struct inode *inode, struct file *file)
314 {
315 	return maps_open(inode, file, &proc_pid_maps_ops);
316 }
317 
318 static int tid_maps_open(struct inode *inode, struct file *file)
319 {
320 	return maps_open(inode, file, &proc_tid_maps_ops);
321 }
322 
323 const struct file_operations proc_pid_maps_operations = {
324 	.open		= pid_maps_open,
325 	.read		= seq_read,
326 	.llseek		= seq_lseek,
327 	.release	= map_release,
328 };
329 
330 const struct file_operations proc_tid_maps_operations = {
331 	.open		= tid_maps_open,
332 	.read		= seq_read,
333 	.llseek		= seq_lseek,
334 	.release	= map_release,
335 };
336 
337