xref: /linux/mm/util.c (revision d9c6a72d6fa29d3a7999dda726577e5d1fccafa5)
1 #include <linux/mm.h>
2 #include <linux/slab.h>
3 #include <linux/string.h>
4 #include <linux/compiler.h>
5 #include <linux/export.h>
6 #include <linux/err.h>
7 #include <linux/sched.h>
8 #include <linux/sched/mm.h>
9 #include <linux/sched/task_stack.h>
10 #include <linux/security.h>
11 #include <linux/swap.h>
12 #include <linux/swapops.h>
13 #include <linux/mman.h>
14 #include <linux/hugetlb.h>
15 #include <linux/vmalloc.h>
16 #include <linux/userfaultfd_k.h>
17 
18 #include <asm/sections.h>
19 #include <linux/uaccess.h>
20 
21 #include "internal.h"
22 
23 static inline int is_kernel_rodata(unsigned long addr)
24 {
25 	return addr >= (unsigned long)__start_rodata &&
26 		addr < (unsigned long)__end_rodata;
27 }
28 
29 /**
30  * kfree_const - conditionally free memory
31  * @x: pointer to the memory
32  *
33  * Function calls kfree only if @x is not in .rodata section.
34  */
35 void kfree_const(const void *x)
36 {
37 	if (!is_kernel_rodata((unsigned long)x))
38 		kfree(x);
39 }
40 EXPORT_SYMBOL(kfree_const);
41 
42 /**
43  * kstrdup - allocate space for and copy an existing string
44  * @s: the string to duplicate
45  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
46  */
47 char *kstrdup(const char *s, gfp_t gfp)
48 {
49 	size_t len;
50 	char *buf;
51 
52 	if (!s)
53 		return NULL;
54 
55 	len = strlen(s) + 1;
56 	buf = kmalloc_track_caller(len, gfp);
57 	if (buf)
58 		memcpy(buf, s, len);
59 	return buf;
60 }
61 EXPORT_SYMBOL(kstrdup);
62 
63 /**
64  * kstrdup_const - conditionally duplicate an existing const string
65  * @s: the string to duplicate
66  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
67  *
68  * Function returns source string if it is in .rodata section otherwise it
69  * fallbacks to kstrdup.
70  * Strings allocated by kstrdup_const should be freed by kfree_const.
71  */
72 const char *kstrdup_const(const char *s, gfp_t gfp)
73 {
74 	if (is_kernel_rodata((unsigned long)s))
75 		return s;
76 
77 	return kstrdup(s, gfp);
78 }
79 EXPORT_SYMBOL(kstrdup_const);
80 
81 /**
82  * kstrndup - allocate space for and copy an existing string
83  * @s: the string to duplicate
84  * @max: read at most @max chars from @s
85  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
86  */
87 char *kstrndup(const char *s, size_t max, gfp_t gfp)
88 {
89 	size_t len;
90 	char *buf;
91 
92 	if (!s)
93 		return NULL;
94 
95 	len = strnlen(s, max);
96 	buf = kmalloc_track_caller(len+1, gfp);
97 	if (buf) {
98 		memcpy(buf, s, len);
99 		buf[len] = '\0';
100 	}
101 	return buf;
102 }
103 EXPORT_SYMBOL(kstrndup);
104 
105 /**
106  * kmemdup - duplicate region of memory
107  *
108  * @src: memory region to duplicate
109  * @len: memory region length
110  * @gfp: GFP mask to use
111  */
112 void *kmemdup(const void *src, size_t len, gfp_t gfp)
113 {
114 	void *p;
115 
116 	p = kmalloc_track_caller(len, gfp);
117 	if (p)
118 		memcpy(p, src, len);
119 	return p;
120 }
121 EXPORT_SYMBOL(kmemdup);
122 
123 /**
124  * memdup_user - duplicate memory region from user space
125  *
126  * @src: source address in user space
127  * @len: number of bytes to copy
128  *
129  * Returns an ERR_PTR() on failure.
130  */
131 void *memdup_user(const void __user *src, size_t len)
132 {
133 	void *p;
134 
135 	/*
136 	 * Always use GFP_KERNEL, since copy_from_user() can sleep and
137 	 * cause pagefault, which makes it pointless to use GFP_NOFS
138 	 * or GFP_ATOMIC.
139 	 */
140 	p = kmalloc_track_caller(len, GFP_KERNEL);
141 	if (!p)
142 		return ERR_PTR(-ENOMEM);
143 
144 	if (copy_from_user(p, src, len)) {
145 		kfree(p);
146 		return ERR_PTR(-EFAULT);
147 	}
148 
149 	return p;
150 }
151 EXPORT_SYMBOL(memdup_user);
152 
153 /*
154  * strndup_user - duplicate an existing string from user space
155  * @s: The string to duplicate
156  * @n: Maximum number of bytes to copy, including the trailing NUL.
157  */
158 char *strndup_user(const char __user *s, long n)
159 {
160 	char *p;
161 	long length;
162 
163 	length = strnlen_user(s, n);
164 
165 	if (!length)
166 		return ERR_PTR(-EFAULT);
167 
168 	if (length > n)
169 		return ERR_PTR(-EINVAL);
170 
171 	p = memdup_user(s, length);
172 
173 	if (IS_ERR(p))
174 		return p;
175 
176 	p[length - 1] = '\0';
177 
178 	return p;
179 }
180 EXPORT_SYMBOL(strndup_user);
181 
182 /**
183  * memdup_user_nul - duplicate memory region from user space and NUL-terminate
184  *
185  * @src: source address in user space
186  * @len: number of bytes to copy
187  *
188  * Returns an ERR_PTR() on failure.
189  */
190 void *memdup_user_nul(const void __user *src, size_t len)
191 {
192 	char *p;
193 
194 	/*
195 	 * Always use GFP_KERNEL, since copy_from_user() can sleep and
196 	 * cause pagefault, which makes it pointless to use GFP_NOFS
197 	 * or GFP_ATOMIC.
198 	 */
199 	p = kmalloc_track_caller(len + 1, GFP_KERNEL);
200 	if (!p)
201 		return ERR_PTR(-ENOMEM);
202 
203 	if (copy_from_user(p, src, len)) {
204 		kfree(p);
205 		return ERR_PTR(-EFAULT);
206 	}
207 	p[len] = '\0';
208 
209 	return p;
210 }
211 EXPORT_SYMBOL(memdup_user_nul);
212 
213 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
214 		struct vm_area_struct *prev, struct rb_node *rb_parent)
215 {
216 	struct vm_area_struct *next;
217 
218 	vma->vm_prev = prev;
219 	if (prev) {
220 		next = prev->vm_next;
221 		prev->vm_next = vma;
222 	} else {
223 		mm->mmap = vma;
224 		if (rb_parent)
225 			next = rb_entry(rb_parent,
226 					struct vm_area_struct, vm_rb);
227 		else
228 			next = NULL;
229 	}
230 	vma->vm_next = next;
231 	if (next)
232 		next->vm_prev = vma;
233 }
234 
235 /* Check if the vma is being used as a stack by this task */
236 int vma_is_stack_for_current(struct vm_area_struct *vma)
237 {
238 	struct task_struct * __maybe_unused t = current;
239 
240 	return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
241 }
242 
243 #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
244 void arch_pick_mmap_layout(struct mm_struct *mm)
245 {
246 	mm->mmap_base = TASK_UNMAPPED_BASE;
247 	mm->get_unmapped_area = arch_get_unmapped_area;
248 }
249 #endif
250 
251 /*
252  * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
253  * back to the regular GUP.
254  * If the architecture not support this function, simply return with no
255  * page pinned
256  */
257 int __weak __get_user_pages_fast(unsigned long start,
258 				 int nr_pages, int write, struct page **pages)
259 {
260 	return 0;
261 }
262 EXPORT_SYMBOL_GPL(__get_user_pages_fast);
263 
264 /**
265  * get_user_pages_fast() - pin user pages in memory
266  * @start:	starting user address
267  * @nr_pages:	number of pages from start to pin
268  * @write:	whether pages will be written to
269  * @pages:	array that receives pointers to the pages pinned.
270  *		Should be at least nr_pages long.
271  *
272  * Returns number of pages pinned. This may be fewer than the number
273  * requested. If nr_pages is 0 or negative, returns 0. If no pages
274  * were pinned, returns -errno.
275  *
276  * get_user_pages_fast provides equivalent functionality to get_user_pages,
277  * operating on current and current->mm, with force=0 and vma=NULL. However
278  * unlike get_user_pages, it must be called without mmap_sem held.
279  *
280  * get_user_pages_fast may take mmap_sem and page table locks, so no
281  * assumptions can be made about lack of locking. get_user_pages_fast is to be
282  * implemented in a way that is advantageous (vs get_user_pages()) when the
283  * user memory area is already faulted in and present in ptes. However if the
284  * pages have to be faulted in, it may turn out to be slightly slower so
285  * callers need to carefully consider what to use. On many architectures,
286  * get_user_pages_fast simply falls back to get_user_pages.
287  */
288 int __weak get_user_pages_fast(unsigned long start,
289 				int nr_pages, int write, struct page **pages)
290 {
291 	return get_user_pages_unlocked(start, nr_pages, pages,
292 				       write ? FOLL_WRITE : 0);
293 }
294 EXPORT_SYMBOL_GPL(get_user_pages_fast);
295 
296 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
297 	unsigned long len, unsigned long prot,
298 	unsigned long flag, unsigned long pgoff)
299 {
300 	unsigned long ret;
301 	struct mm_struct *mm = current->mm;
302 	unsigned long populate;
303 	LIST_HEAD(uf);
304 
305 	ret = security_mmap_file(file, prot, flag);
306 	if (!ret) {
307 		if (down_write_killable(&mm->mmap_sem))
308 			return -EINTR;
309 		ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
310 				    &populate, &uf);
311 		up_write(&mm->mmap_sem);
312 		userfaultfd_unmap_complete(mm, &uf);
313 		if (populate)
314 			mm_populate(ret, populate);
315 	}
316 	return ret;
317 }
318 
319 unsigned long vm_mmap(struct file *file, unsigned long addr,
320 	unsigned long len, unsigned long prot,
321 	unsigned long flag, unsigned long offset)
322 {
323 	if (unlikely(offset + PAGE_ALIGN(len) < offset))
324 		return -EINVAL;
325 	if (unlikely(offset_in_page(offset)))
326 		return -EINVAL;
327 
328 	return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
329 }
330 EXPORT_SYMBOL(vm_mmap);
331 
332 /**
333  * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
334  * failure, fall back to non-contiguous (vmalloc) allocation.
335  * @size: size of the request.
336  * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
337  * @node: numa node to allocate from
338  *
339  * Uses kmalloc to get the memory but if the allocation fails then falls back
340  * to the vmalloc allocator. Use kvfree for freeing the memory.
341  *
342  * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported.
343  * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
344  * preferable to the vmalloc fallback, due to visible performance drawbacks.
345  *
346  * Any use of gfp flags outside of GFP_KERNEL should be consulted with mm people.
347  */
348 void *kvmalloc_node(size_t size, gfp_t flags, int node)
349 {
350 	gfp_t kmalloc_flags = flags;
351 	void *ret;
352 
353 	/*
354 	 * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
355 	 * so the given set of flags has to be compatible.
356 	 */
357 	WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL);
358 
359 	/*
360 	 * We want to attempt a large physically contiguous block first because
361 	 * it is less likely to fragment multiple larger blocks and therefore
362 	 * contribute to a long term fragmentation less than vmalloc fallback.
363 	 * However make sure that larger requests are not too disruptive - no
364 	 * OOM killer and no allocation failure warnings as we have a fallback.
365 	 */
366 	if (size > PAGE_SIZE) {
367 		kmalloc_flags |= __GFP_NOWARN;
368 
369 		if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
370 			kmalloc_flags |= __GFP_NORETRY;
371 	}
372 
373 	ret = kmalloc_node(size, kmalloc_flags, node);
374 
375 	/*
376 	 * It doesn't really make sense to fallback to vmalloc for sub page
377 	 * requests
378 	 */
379 	if (ret || size <= PAGE_SIZE)
380 		return ret;
381 
382 	return __vmalloc_node_flags_caller(size, node, flags,
383 			__builtin_return_address(0));
384 }
385 EXPORT_SYMBOL(kvmalloc_node);
386 
387 void kvfree(const void *addr)
388 {
389 	if (is_vmalloc_addr(addr))
390 		vfree(addr);
391 	else
392 		kfree(addr);
393 }
394 EXPORT_SYMBOL(kvfree);
395 
396 static inline void *__page_rmapping(struct page *page)
397 {
398 	unsigned long mapping;
399 
400 	mapping = (unsigned long)page->mapping;
401 	mapping &= ~PAGE_MAPPING_FLAGS;
402 
403 	return (void *)mapping;
404 }
405 
406 /* Neutral page->mapping pointer to address_space or anon_vma or other */
407 void *page_rmapping(struct page *page)
408 {
409 	page = compound_head(page);
410 	return __page_rmapping(page);
411 }
412 
413 /*
414  * Return true if this page is mapped into pagetables.
415  * For compound page it returns true if any subpage of compound page is mapped.
416  */
417 bool page_mapped(struct page *page)
418 {
419 	int i;
420 
421 	if (likely(!PageCompound(page)))
422 		return atomic_read(&page->_mapcount) >= 0;
423 	page = compound_head(page);
424 	if (atomic_read(compound_mapcount_ptr(page)) >= 0)
425 		return true;
426 	if (PageHuge(page))
427 		return false;
428 	for (i = 0; i < hpage_nr_pages(page); i++) {
429 		if (atomic_read(&page[i]._mapcount) >= 0)
430 			return true;
431 	}
432 	return false;
433 }
434 EXPORT_SYMBOL(page_mapped);
435 
436 struct anon_vma *page_anon_vma(struct page *page)
437 {
438 	unsigned long mapping;
439 
440 	page = compound_head(page);
441 	mapping = (unsigned long)page->mapping;
442 	if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
443 		return NULL;
444 	return __page_rmapping(page);
445 }
446 
447 struct address_space *page_mapping(struct page *page)
448 {
449 	struct address_space *mapping;
450 
451 	page = compound_head(page);
452 
453 	/* This happens if someone calls flush_dcache_page on slab page */
454 	if (unlikely(PageSlab(page)))
455 		return NULL;
456 
457 	if (unlikely(PageSwapCache(page))) {
458 		swp_entry_t entry;
459 
460 		entry.val = page_private(page);
461 		return swap_address_space(entry);
462 	}
463 
464 	mapping = page->mapping;
465 	if ((unsigned long)mapping & PAGE_MAPPING_ANON)
466 		return NULL;
467 
468 	return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
469 }
470 EXPORT_SYMBOL(page_mapping);
471 
472 /* Slow path of page_mapcount() for compound pages */
473 int __page_mapcount(struct page *page)
474 {
475 	int ret;
476 
477 	ret = atomic_read(&page->_mapcount) + 1;
478 	/*
479 	 * For file THP page->_mapcount contains total number of mapping
480 	 * of the page: no need to look into compound_mapcount.
481 	 */
482 	if (!PageAnon(page) && !PageHuge(page))
483 		return ret;
484 	page = compound_head(page);
485 	ret += atomic_read(compound_mapcount_ptr(page)) + 1;
486 	if (PageDoubleMap(page))
487 		ret--;
488 	return ret;
489 }
490 EXPORT_SYMBOL_GPL(__page_mapcount);
491 
492 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
493 int sysctl_overcommit_ratio __read_mostly = 50;
494 unsigned long sysctl_overcommit_kbytes __read_mostly;
495 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
496 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
497 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
498 
499 int overcommit_ratio_handler(struct ctl_table *table, int write,
500 			     void __user *buffer, size_t *lenp,
501 			     loff_t *ppos)
502 {
503 	int ret;
504 
505 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
506 	if (ret == 0 && write)
507 		sysctl_overcommit_kbytes = 0;
508 	return ret;
509 }
510 
511 int overcommit_kbytes_handler(struct ctl_table *table, int write,
512 			     void __user *buffer, size_t *lenp,
513 			     loff_t *ppos)
514 {
515 	int ret;
516 
517 	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
518 	if (ret == 0 && write)
519 		sysctl_overcommit_ratio = 0;
520 	return ret;
521 }
522 
523 /*
524  * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
525  */
526 unsigned long vm_commit_limit(void)
527 {
528 	unsigned long allowed;
529 
530 	if (sysctl_overcommit_kbytes)
531 		allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
532 	else
533 		allowed = ((totalram_pages - hugetlb_total_pages())
534 			   * sysctl_overcommit_ratio / 100);
535 	allowed += total_swap_pages;
536 
537 	return allowed;
538 }
539 
540 /*
541  * Make sure vm_committed_as in one cacheline and not cacheline shared with
542  * other variables. It can be updated by several CPUs frequently.
543  */
544 struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
545 
546 /*
547  * The global memory commitment made in the system can be a metric
548  * that can be used to drive ballooning decisions when Linux is hosted
549  * as a guest. On Hyper-V, the host implements a policy engine for dynamically
550  * balancing memory across competing virtual machines that are hosted.
551  * Several metrics drive this policy engine including the guest reported
552  * memory commitment.
553  */
554 unsigned long vm_memory_committed(void)
555 {
556 	return percpu_counter_read_positive(&vm_committed_as);
557 }
558 EXPORT_SYMBOL_GPL(vm_memory_committed);
559 
560 /*
561  * Check that a process has enough memory to allocate a new virtual
562  * mapping. 0 means there is enough memory for the allocation to
563  * succeed and -ENOMEM implies there is not.
564  *
565  * We currently support three overcommit policies, which are set via the
566  * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting
567  *
568  * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
569  * Additional code 2002 Jul 20 by Robert Love.
570  *
571  * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
572  *
573  * Note this is a helper function intended to be used by LSMs which
574  * wish to use this logic.
575  */
576 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
577 {
578 	long free, allowed, reserve;
579 
580 	VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) <
581 			-(s64)vm_committed_as_batch * num_online_cpus(),
582 			"memory commitment underflow");
583 
584 	vm_acct_memory(pages);
585 
586 	/*
587 	 * Sometimes we want to use more memory than we have
588 	 */
589 	if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
590 		return 0;
591 
592 	if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
593 		free = global_page_state(NR_FREE_PAGES);
594 		free += global_node_page_state(NR_FILE_PAGES);
595 
596 		/*
597 		 * shmem pages shouldn't be counted as free in this
598 		 * case, they can't be purged, only swapped out, and
599 		 * that won't affect the overall amount of available
600 		 * memory in the system.
601 		 */
602 		free -= global_node_page_state(NR_SHMEM);
603 
604 		free += get_nr_swap_pages();
605 
606 		/*
607 		 * Any slabs which are created with the
608 		 * SLAB_RECLAIM_ACCOUNT flag claim to have contents
609 		 * which are reclaimable, under pressure.  The dentry
610 		 * cache and most inode caches should fall into this
611 		 */
612 		free += global_page_state(NR_SLAB_RECLAIMABLE);
613 
614 		/*
615 		 * Leave reserved pages. The pages are not for anonymous pages.
616 		 */
617 		if (free <= totalreserve_pages)
618 			goto error;
619 		else
620 			free -= totalreserve_pages;
621 
622 		/*
623 		 * Reserve some for root
624 		 */
625 		if (!cap_sys_admin)
626 			free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
627 
628 		if (free > pages)
629 			return 0;
630 
631 		goto error;
632 	}
633 
634 	allowed = vm_commit_limit();
635 	/*
636 	 * Reserve some for root
637 	 */
638 	if (!cap_sys_admin)
639 		allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
640 
641 	/*
642 	 * Don't let a single process grow so big a user can't recover
643 	 */
644 	if (mm) {
645 		reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
646 		allowed -= min_t(long, mm->total_vm / 32, reserve);
647 	}
648 
649 	if (percpu_counter_read_positive(&vm_committed_as) < allowed)
650 		return 0;
651 error:
652 	vm_unacct_memory(pages);
653 
654 	return -ENOMEM;
655 }
656 
657 /**
658  * get_cmdline() - copy the cmdline value to a buffer.
659  * @task:     the task whose cmdline value to copy.
660  * @buffer:   the buffer to copy to.
661  * @buflen:   the length of the buffer. Larger cmdline values are truncated
662  *            to this length.
663  * Returns the size of the cmdline field copied. Note that the copy does
664  * not guarantee an ending NULL byte.
665  */
666 int get_cmdline(struct task_struct *task, char *buffer, int buflen)
667 {
668 	int res = 0;
669 	unsigned int len;
670 	struct mm_struct *mm = get_task_mm(task);
671 	unsigned long arg_start, arg_end, env_start, env_end;
672 	if (!mm)
673 		goto out;
674 	if (!mm->arg_end)
675 		goto out_mm;	/* Shh! No looking before we're done */
676 
677 	down_read(&mm->mmap_sem);
678 	arg_start = mm->arg_start;
679 	arg_end = mm->arg_end;
680 	env_start = mm->env_start;
681 	env_end = mm->env_end;
682 	up_read(&mm->mmap_sem);
683 
684 	len = arg_end - arg_start;
685 
686 	if (len > buflen)
687 		len = buflen;
688 
689 	res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
690 
691 	/*
692 	 * If the nul at the end of args has been overwritten, then
693 	 * assume application is using setproctitle(3).
694 	 */
695 	if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
696 		len = strnlen(buffer, res);
697 		if (len < res) {
698 			res = len;
699 		} else {
700 			len = env_end - env_start;
701 			if (len > buflen - res)
702 				len = buflen - res;
703 			res += access_process_vm(task, env_start,
704 						 buffer+res, len,
705 						 FOLL_FORCE);
706 			res = strnlen(buffer, res);
707 		}
708 	}
709 out_mm:
710 	mmput(mm);
711 out:
712 	return res;
713 }
714