xref: /linux/fs/proc/task_nommu.c (revision 24bce201d79807b668bf9d9e0aca801c5c0d5f78)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/mm.h>
4 #include <linux/file.h>
5 #include <linux/fdtable.h>
6 #include <linux/fs_struct.h>
7 #include <linux/mount.h>
8 #include <linux/ptrace.h>
9 #include <linux/slab.h>
10 #include <linux/seq_file.h>
11 #include <linux/sched/mm.h>
12 
13 #include "internal.h"
14 
15 /*
16  * Logic: we've got two memory sums for each process, "shared", and
17  * "non-shared". Shared memory may get counted more than once, for
18  * each process that owns it. Non-shared memory is counted
19  * accurately.
20  */
21 void task_mem(struct seq_file *m, struct mm_struct *mm)
22 {
23 	struct vm_area_struct *vma;
24 	struct vm_region *region;
25 	struct rb_node *p;
26 	unsigned long bytes = 0, sbytes = 0, slack = 0, size;
27 
28 	mmap_read_lock(mm);
29 	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
30 		vma = rb_entry(p, struct vm_area_struct, vm_rb);
31 
32 		bytes += kobjsize(vma);
33 
34 		region = vma->vm_region;
35 		if (region) {
36 			size = kobjsize(region);
37 			size += region->vm_end - region->vm_start;
38 		} else {
39 			size = vma->vm_end - vma->vm_start;
40 		}
41 
42 		if (atomic_read(&mm->mm_count) > 1 ||
43 		    vma->vm_flags & VM_MAYSHARE) {
44 			sbytes += size;
45 		} else {
46 			bytes += size;
47 			if (region)
48 				slack = region->vm_end - vma->vm_end;
49 		}
50 	}
51 
52 	if (atomic_read(&mm->mm_count) > 1)
53 		sbytes += kobjsize(mm);
54 	else
55 		bytes += kobjsize(mm);
56 
57 	if (current->fs && current->fs->users > 1)
58 		sbytes += kobjsize(current->fs);
59 	else
60 		bytes += kobjsize(current->fs);
61 
62 	if (current->files && atomic_read(&current->files->count) > 1)
63 		sbytes += kobjsize(current->files);
64 	else
65 		bytes += kobjsize(current->files);
66 
67 	if (current->sighand && refcount_read(&current->sighand->count) > 1)
68 		sbytes += kobjsize(current->sighand);
69 	else
70 		bytes += kobjsize(current->sighand);
71 
72 	bytes += kobjsize(current); /* includes kernel stack */
73 
74 	seq_printf(m,
75 		"Mem:\t%8lu bytes\n"
76 		"Slack:\t%8lu bytes\n"
77 		"Shared:\t%8lu bytes\n",
78 		bytes, slack, sbytes);
79 
80 	mmap_read_unlock(mm);
81 }
82 
83 unsigned long task_vsize(struct mm_struct *mm)
84 {
85 	struct vm_area_struct *vma;
86 	struct rb_node *p;
87 	unsigned long vsize = 0;
88 
89 	mmap_read_lock(mm);
90 	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
91 		vma = rb_entry(p, struct vm_area_struct, vm_rb);
92 		vsize += vma->vm_end - vma->vm_start;
93 	}
94 	mmap_read_unlock(mm);
95 	return vsize;
96 }
97 
98 unsigned long task_statm(struct mm_struct *mm,
99 			 unsigned long *shared, unsigned long *text,
100 			 unsigned long *data, unsigned long *resident)
101 {
102 	struct vm_area_struct *vma;
103 	struct vm_region *region;
104 	struct rb_node *p;
105 	unsigned long size = kobjsize(mm);
106 
107 	mmap_read_lock(mm);
108 	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
109 		vma = rb_entry(p, struct vm_area_struct, vm_rb);
110 		size += kobjsize(vma);
111 		region = vma->vm_region;
112 		if (region) {
113 			size += kobjsize(region);
114 			size += region->vm_end - region->vm_start;
115 		}
116 	}
117 
118 	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
119 		>> PAGE_SHIFT;
120 	*data = (PAGE_ALIGN(mm->start_stack) - (mm->start_data & PAGE_MASK))
121 		>> PAGE_SHIFT;
122 	mmap_read_unlock(mm);
123 	size >>= PAGE_SHIFT;
124 	size += *text + *data;
125 	*resident = size;
126 	return size;
127 }
128 
129 static int is_stack(struct vm_area_struct *vma)
130 {
131 	struct mm_struct *mm = vma->vm_mm;
132 
133 	/*
134 	 * We make no effort to guess what a given thread considers to be
135 	 * its "stack".  It's not even well-defined for programs written
136 	 * languages like Go.
137 	 */
138 	return vma->vm_start <= mm->start_stack &&
139 		vma->vm_end >= mm->start_stack;
140 }
141 
142 /*
143  * display a single VMA to a sequenced file
144  */
145 static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
146 {
147 	struct mm_struct *mm = vma->vm_mm;
148 	unsigned long ino = 0;
149 	struct file *file;
150 	dev_t dev = 0;
151 	int flags;
152 	unsigned long long pgoff = 0;
153 
154 	flags = vma->vm_flags;
155 	file = vma->vm_file;
156 
157 	if (file) {
158 		struct inode *inode = file_inode(vma->vm_file);
159 		dev = inode->i_sb->s_dev;
160 		ino = inode->i_ino;
161 		pgoff = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
162 	}
163 
164 	seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
165 	seq_printf(m,
166 		   "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
167 		   vma->vm_start,
168 		   vma->vm_end,
169 		   flags & VM_READ ? 'r' : '-',
170 		   flags & VM_WRITE ? 'w' : '-',
171 		   flags & VM_EXEC ? 'x' : '-',
172 		   flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p',
173 		   pgoff,
174 		   MAJOR(dev), MINOR(dev), ino);
175 
176 	if (file) {
177 		seq_pad(m, ' ');
178 		seq_file_path(m, file, "");
179 	} else if (mm && is_stack(vma)) {
180 		seq_pad(m, ' ');
181 		seq_puts(m, "[stack]");
182 	}
183 
184 	seq_putc(m, '\n');
185 	return 0;
186 }
187 
188 /*
189  * display mapping lines for a particular process's /proc/pid/maps
190  */
191 static int show_map(struct seq_file *m, void *_p)
192 {
193 	struct rb_node *p = _p;
194 
195 	return nommu_vma_show(m, rb_entry(p, struct vm_area_struct, vm_rb));
196 }
197 
198 static void *m_start(struct seq_file *m, loff_t *pos)
199 {
200 	struct proc_maps_private *priv = m->private;
201 	struct mm_struct *mm;
202 	struct rb_node *p;
203 	loff_t n = *pos;
204 
205 	/* pin the task and mm whilst we play with them */
206 	priv->task = get_proc_task(priv->inode);
207 	if (!priv->task)
208 		return ERR_PTR(-ESRCH);
209 
210 	mm = priv->mm;
211 	if (!mm || !mmget_not_zero(mm))
212 		return NULL;
213 
214 	if (mmap_read_lock_killable(mm)) {
215 		mmput(mm);
216 		return ERR_PTR(-EINTR);
217 	}
218 
219 	/* start from the Nth VMA */
220 	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p))
221 		if (n-- == 0)
222 			return p;
223 
224 	mmap_read_unlock(mm);
225 	mmput(mm);
226 	return NULL;
227 }
228 
229 static void m_stop(struct seq_file *m, void *_vml)
230 {
231 	struct proc_maps_private *priv = m->private;
232 
233 	if (!IS_ERR_OR_NULL(_vml)) {
234 		mmap_read_unlock(priv->mm);
235 		mmput(priv->mm);
236 	}
237 	if (priv->task) {
238 		put_task_struct(priv->task);
239 		priv->task = NULL;
240 	}
241 }
242 
243 static void *m_next(struct seq_file *m, void *_p, loff_t *pos)
244 {
245 	struct rb_node *p = _p;
246 
247 	(*pos)++;
248 	return p ? rb_next(p) : NULL;
249 }
250 
251 static const struct seq_operations proc_pid_maps_ops = {
252 	.start	= m_start,
253 	.next	= m_next,
254 	.stop	= m_stop,
255 	.show	= show_map
256 };
257 
258 static int maps_open(struct inode *inode, struct file *file,
259 		     const struct seq_operations *ops)
260 {
261 	struct proc_maps_private *priv;
262 
263 	priv = __seq_open_private(file, ops, sizeof(*priv));
264 	if (!priv)
265 		return -ENOMEM;
266 
267 	priv->inode = inode;
268 	priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
269 	if (IS_ERR(priv->mm)) {
270 		int err = PTR_ERR(priv->mm);
271 
272 		seq_release_private(inode, file);
273 		return err;
274 	}
275 
276 	return 0;
277 }
278 
279 
280 static int map_release(struct inode *inode, struct file *file)
281 {
282 	struct seq_file *seq = file->private_data;
283 	struct proc_maps_private *priv = seq->private;
284 
285 	if (priv->mm)
286 		mmdrop(priv->mm);
287 
288 	return seq_release_private(inode, file);
289 }
290 
291 static int pid_maps_open(struct inode *inode, struct file *file)
292 {
293 	return maps_open(inode, file, &proc_pid_maps_ops);
294 }
295 
296 const struct file_operations proc_pid_maps_operations = {
297 	.open		= pid_maps_open,
298 	.read		= seq_read,
299 	.llseek		= seq_lseek,
300 	.release	= map_release,
301 };
302 
303