xref: /linux/mm/util.c (revision 3932b9ca55b0be314a36d3e84faff3e823c081f5)
1 #include <linux/mm.h>
2 #include <linux/slab.h>
3 #include <linux/string.h>
4 #include <linux/compiler.h>
5 #include <linux/export.h>
6 #include <linux/err.h>
7 #include <linux/sched.h>
8 #include <linux/security.h>
9 #include <linux/swap.h>
10 #include <linux/swapops.h>
11 #include <linux/mman.h>
12 #include <linux/hugetlb.h>
13 #include <linux/vmalloc.h>
14 
15 #include <asm/uaccess.h>
16 
17 #include "internal.h"
18 
19 /**
20  * kstrdup - allocate space for and copy an existing string
21  * @s: the string to duplicate
22  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
23  */
24 char *kstrdup(const char *s, gfp_t gfp)
25 {
26 	size_t len;
27 	char *buf;
28 
29 	if (!s)
30 		return NULL;
31 
32 	len = strlen(s) + 1;
33 	buf = kmalloc_track_caller(len, gfp);
34 	if (buf)
35 		memcpy(buf, s, len);
36 	return buf;
37 }
38 EXPORT_SYMBOL(kstrdup);
39 
40 /**
41  * kstrndup - allocate space for and copy an existing string
42  * @s: the string to duplicate
43  * @max: read at most @max chars from @s
44  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
45  */
46 char *kstrndup(const char *s, size_t max, gfp_t gfp)
47 {
48 	size_t len;
49 	char *buf;
50 
51 	if (!s)
52 		return NULL;
53 
54 	len = strnlen(s, max);
55 	buf = kmalloc_track_caller(len+1, gfp);
56 	if (buf) {
57 		memcpy(buf, s, len);
58 		buf[len] = '\0';
59 	}
60 	return buf;
61 }
62 EXPORT_SYMBOL(kstrndup);
63 
64 /**
65  * kmemdup - duplicate region of memory
66  *
67  * @src: memory region to duplicate
68  * @len: memory region length
69  * @gfp: GFP mask to use
70  */
71 void *kmemdup(const void *src, size_t len, gfp_t gfp)
72 {
73 	void *p;
74 
75 	p = kmalloc_track_caller(len, gfp);
76 	if (p)
77 		memcpy(p, src, len);
78 	return p;
79 }
80 EXPORT_SYMBOL(kmemdup);
81 
82 /**
83  * memdup_user - duplicate memory region from user space
84  *
85  * @src: source address in user space
86  * @len: number of bytes to copy
87  *
88  * Returns an ERR_PTR() on failure.
89  */
90 void *memdup_user(const void __user *src, size_t len)
91 {
92 	void *p;
93 
94 	/*
95 	 * Always use GFP_KERNEL, since copy_from_user() can sleep and
96 	 * cause pagefault, which makes it pointless to use GFP_NOFS
97 	 * or GFP_ATOMIC.
98 	 */
99 	p = kmalloc_track_caller(len, GFP_KERNEL);
100 	if (!p)
101 		return ERR_PTR(-ENOMEM);
102 
103 	if (copy_from_user(p, src, len)) {
104 		kfree(p);
105 		return ERR_PTR(-EFAULT);
106 	}
107 
108 	return p;
109 }
110 EXPORT_SYMBOL(memdup_user);
111 
112 /*
113  * strndup_user - duplicate an existing string from user space
114  * @s: The string to duplicate
115  * @n: Maximum number of bytes to copy, including the trailing NUL.
116  */
117 char *strndup_user(const char __user *s, long n)
118 {
119 	char *p;
120 	long length;
121 
122 	length = strnlen_user(s, n);
123 
124 	if (!length)
125 		return ERR_PTR(-EFAULT);
126 
127 	if (length > n)
128 		return ERR_PTR(-EINVAL);
129 
130 	p = memdup_user(s, length);
131 
132 	if (IS_ERR(p))
133 		return p;
134 
135 	p[length - 1] = '\0';
136 
137 	return p;
138 }
139 EXPORT_SYMBOL(strndup_user);
140 
141 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
142 		struct vm_area_struct *prev, struct rb_node *rb_parent)
143 {
144 	struct vm_area_struct *next;
145 
146 	vma->vm_prev = prev;
147 	if (prev) {
148 		next = prev->vm_next;
149 		prev->vm_next = vma;
150 	} else {
151 		mm->mmap = vma;
152 		if (rb_parent)
153 			next = rb_entry(rb_parent,
154 					struct vm_area_struct, vm_rb);
155 		else
156 			next = NULL;
157 	}
158 	vma->vm_next = next;
159 	if (next)
160 		next->vm_prev = vma;
161 }
162 
163 /* Check if the vma is being used as a stack by this task */
164 static int vm_is_stack_for_task(struct task_struct *t,
165 				struct vm_area_struct *vma)
166 {
167 	return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
168 }
169 
170 /*
171  * Check if the vma is being used as a stack.
172  * If is_group is non-zero, check in the entire thread group or else
173  * just check in the current task. Returns the pid of the task that
174  * the vma is stack for.
175  */
176 pid_t vm_is_stack(struct task_struct *task,
177 		  struct vm_area_struct *vma, int in_group)
178 {
179 	pid_t ret = 0;
180 
181 	if (vm_is_stack_for_task(task, vma))
182 		return task->pid;
183 
184 	if (in_group) {
185 		struct task_struct *t;
186 
187 		rcu_read_lock();
188 		for_each_thread(task, t) {
189 			if (vm_is_stack_for_task(t, vma)) {
190 				ret = t->pid;
191 				goto done;
192 			}
193 		}
194 done:
195 		rcu_read_unlock();
196 	}
197 
198 	return ret;
199 }
200 
201 #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
202 void arch_pick_mmap_layout(struct mm_struct *mm)
203 {
204 	mm->mmap_base = TASK_UNMAPPED_BASE;
205 	mm->get_unmapped_area = arch_get_unmapped_area;
206 }
207 #endif
208 
209 /*
210  * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
211  * back to the regular GUP.
212  * If the architecture not support this function, simply return with no
213  * page pinned
214  */
215 int __weak __get_user_pages_fast(unsigned long start,
216 				 int nr_pages, int write, struct page **pages)
217 {
218 	return 0;
219 }
220 EXPORT_SYMBOL_GPL(__get_user_pages_fast);
221 
222 /**
223  * get_user_pages_fast() - pin user pages in memory
224  * @start:	starting user address
225  * @nr_pages:	number of pages from start to pin
226  * @write:	whether pages will be written to
227  * @pages:	array that receives pointers to the pages pinned.
228  *		Should be at least nr_pages long.
229  *
230  * Returns number of pages pinned. This may be fewer than the number
231  * requested. If nr_pages is 0 or negative, returns 0. If no pages
232  * were pinned, returns -errno.
233  *
234  * get_user_pages_fast provides equivalent functionality to get_user_pages,
235  * operating on current and current->mm, with force=0 and vma=NULL. However
236  * unlike get_user_pages, it must be called without mmap_sem held.
237  *
238  * get_user_pages_fast may take mmap_sem and page table locks, so no
239  * assumptions can be made about lack of locking. get_user_pages_fast is to be
240  * implemented in a way that is advantageous (vs get_user_pages()) when the
241  * user memory area is already faulted in and present in ptes. However if the
242  * pages have to be faulted in, it may turn out to be slightly slower so
243  * callers need to carefully consider what to use. On many architectures,
244  * get_user_pages_fast simply falls back to get_user_pages.
245  */
246 int __weak get_user_pages_fast(unsigned long start,
247 				int nr_pages, int write, struct page **pages)
248 {
249 	struct mm_struct *mm = current->mm;
250 	int ret;
251 
252 	down_read(&mm->mmap_sem);
253 	ret = get_user_pages(current, mm, start, nr_pages,
254 					write, 0, pages, NULL);
255 	up_read(&mm->mmap_sem);
256 
257 	return ret;
258 }
259 EXPORT_SYMBOL_GPL(get_user_pages_fast);
260 
261 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
262 	unsigned long len, unsigned long prot,
263 	unsigned long flag, unsigned long pgoff)
264 {
265 	unsigned long ret;
266 	struct mm_struct *mm = current->mm;
267 	unsigned long populate;
268 
269 	ret = security_mmap_file(file, prot, flag);
270 	if (!ret) {
271 		down_write(&mm->mmap_sem);
272 		ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
273 				    &populate);
274 		up_write(&mm->mmap_sem);
275 		if (populate)
276 			mm_populate(ret, populate);
277 	}
278 	return ret;
279 }
280 
281 unsigned long vm_mmap(struct file *file, unsigned long addr,
282 	unsigned long len, unsigned long prot,
283 	unsigned long flag, unsigned long offset)
284 {
285 	if (unlikely(offset + PAGE_ALIGN(len) < offset))
286 		return -EINVAL;
287 	if (unlikely(offset & ~PAGE_MASK))
288 		return -EINVAL;
289 
290 	return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
291 }
292 EXPORT_SYMBOL(vm_mmap);
293 
294 void kvfree(const void *addr)
295 {
296 	if (is_vmalloc_addr(addr))
297 		vfree(addr);
298 	else
299 		kfree(addr);
300 }
301 EXPORT_SYMBOL(kvfree);
302 
303 struct address_space *page_mapping(struct page *page)
304 {
305 	struct address_space *mapping = page->mapping;
306 
307 	/* This happens if someone calls flush_dcache_page on slab page */
308 	if (unlikely(PageSlab(page)))
309 		return NULL;
310 
311 	if (unlikely(PageSwapCache(page))) {
312 		swp_entry_t entry;
313 
314 		entry.val = page_private(page);
315 		mapping = swap_address_space(entry);
316 	} else if ((unsigned long)mapping & PAGE_MAPPING_ANON)
317 		mapping = NULL;
318 	return mapping;
319 }
320 
321 int overcommit_ratio_handler(struct ctl_table *table, int write,
322 			     void __user *buffer, size_t *lenp,
323 			     loff_t *ppos)
324 {
325 	int ret;
326 
327 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
328 	if (ret == 0 && write)
329 		sysctl_overcommit_kbytes = 0;
330 	return ret;
331 }
332 
333 int overcommit_kbytes_handler(struct ctl_table *table, int write,
334 			     void __user *buffer, size_t *lenp,
335 			     loff_t *ppos)
336 {
337 	int ret;
338 
339 	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
340 	if (ret == 0 && write)
341 		sysctl_overcommit_ratio = 0;
342 	return ret;
343 }
344 
345 /*
346  * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
347  */
348 unsigned long vm_commit_limit(void)
349 {
350 	unsigned long allowed;
351 
352 	if (sysctl_overcommit_kbytes)
353 		allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
354 	else
355 		allowed = ((totalram_pages - hugetlb_total_pages())
356 			   * sysctl_overcommit_ratio / 100);
357 	allowed += total_swap_pages;
358 
359 	return allowed;
360 }
361 
362 /**
363  * get_cmdline() - copy the cmdline value to a buffer.
364  * @task:     the task whose cmdline value to copy.
365  * @buffer:   the buffer to copy to.
366  * @buflen:   the length of the buffer. Larger cmdline values are truncated
367  *            to this length.
368  * Returns the size of the cmdline field copied. Note that the copy does
369  * not guarantee an ending NULL byte.
370  */
371 int get_cmdline(struct task_struct *task, char *buffer, int buflen)
372 {
373 	int res = 0;
374 	unsigned int len;
375 	struct mm_struct *mm = get_task_mm(task);
376 	if (!mm)
377 		goto out;
378 	if (!mm->arg_end)
379 		goto out_mm;	/* Shh! No looking before we're done */
380 
381 	len = mm->arg_end - mm->arg_start;
382 
383 	if (len > buflen)
384 		len = buflen;
385 
386 	res = access_process_vm(task, mm->arg_start, buffer, len, 0);
387 
388 	/*
389 	 * If the nul at the end of args has been overwritten, then
390 	 * assume application is using setproctitle(3).
391 	 */
392 	if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
393 		len = strnlen(buffer, res);
394 		if (len < res) {
395 			res = len;
396 		} else {
397 			len = mm->env_end - mm->env_start;
398 			if (len > buflen - res)
399 				len = buflen - res;
400 			res += access_process_vm(task, mm->env_start,
401 						 buffer+res, len, 0);
402 			res = strnlen(buffer, res);
403 		}
404 	}
405 out_mm:
406 	mmput(mm);
407 out:
408 	return res;
409 }
410