xref: /linux/mm/util.c (revision 8eecf1c9929aef24e9e75280a39ed1ba3c64fb71)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/mm.h>
3 #include <linux/slab.h>
4 #include <linux/string.h>
5 #include <linux/compiler.h>
6 #include <linux/export.h>
7 #include <linux/err.h>
8 #include <linux/sched.h>
9 #include <linux/sched/mm.h>
10 #include <linux/sched/signal.h>
11 #include <linux/sched/task_stack.h>
12 #include <linux/security.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/mman.h>
16 #include <linux/hugetlb.h>
17 #include <linux/vmalloc.h>
18 #include <linux/userfaultfd_k.h>
19 #include <linux/elf.h>
20 #include <linux/elf-randomize.h>
21 #include <linux/personality.h>
22 #include <linux/random.h>
23 #include <linux/processor.h>
24 #include <linux/sizes.h>
25 #include <linux/compat.h>
26 
27 #include <linux/uaccess.h>
28 
29 #include "internal.h"
30 
31 /**
32  * kfree_const - conditionally free memory
33  * @x: pointer to the memory
34  *
35  * Function calls kfree only if @x is not in .rodata section.
36  */
37 void kfree_const(const void *x)
38 {
39 	if (!is_kernel_rodata((unsigned long)x))
40 		kfree(x);
41 }
42 EXPORT_SYMBOL(kfree_const);
43 
44 /**
45  * kstrdup - allocate space for and copy an existing string
46  * @s: the string to duplicate
47  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
48  *
49  * Return: newly allocated copy of @s or %NULL in case of error
50  */
51 char *kstrdup(const char *s, gfp_t gfp)
52 {
53 	size_t len;
54 	char *buf;
55 
56 	if (!s)
57 		return NULL;
58 
59 	len = strlen(s) + 1;
60 	buf = kmalloc_track_caller(len, gfp);
61 	if (buf)
62 		memcpy(buf, s, len);
63 	return buf;
64 }
65 EXPORT_SYMBOL(kstrdup);
66 
67 /**
68  * kstrdup_const - conditionally duplicate an existing const string
69  * @s: the string to duplicate
70  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
71  *
72  * Note: Strings allocated by kstrdup_const should be freed by kfree_const and
73  * must not be passed to krealloc().
74  *
75  * Return: source string if it is in .rodata section otherwise
76  * fallback to kstrdup.
77  */
78 const char *kstrdup_const(const char *s, gfp_t gfp)
79 {
80 	if (is_kernel_rodata((unsigned long)s))
81 		return s;
82 
83 	return kstrdup(s, gfp);
84 }
85 EXPORT_SYMBOL(kstrdup_const);
86 
87 /**
88  * kstrndup - allocate space for and copy an existing string
89  * @s: the string to duplicate
90  * @max: read at most @max chars from @s
91  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
92  *
93  * Note: Use kmemdup_nul() instead if the size is known exactly.
94  *
95  * Return: newly allocated copy of @s or %NULL in case of error
96  */
97 char *kstrndup(const char *s, size_t max, gfp_t gfp)
98 {
99 	size_t len;
100 	char *buf;
101 
102 	if (!s)
103 		return NULL;
104 
105 	len = strnlen(s, max);
106 	buf = kmalloc_track_caller(len+1, gfp);
107 	if (buf) {
108 		memcpy(buf, s, len);
109 		buf[len] = '\0';
110 	}
111 	return buf;
112 }
113 EXPORT_SYMBOL(kstrndup);
114 
115 /**
116  * kmemdup - duplicate region of memory
117  *
118  * @src: memory region to duplicate
119  * @len: memory region length
120  * @gfp: GFP mask to use
121  *
122  * Return: newly allocated copy of @src or %NULL in case of error
123  */
124 void *kmemdup(const void *src, size_t len, gfp_t gfp)
125 {
126 	void *p;
127 
128 	p = kmalloc_track_caller(len, gfp);
129 	if (p)
130 		memcpy(p, src, len);
131 	return p;
132 }
133 EXPORT_SYMBOL(kmemdup);
134 
135 /**
136  * kmemdup_nul - Create a NUL-terminated string from unterminated data
137  * @s: The data to stringify
138  * @len: The size of the data
139  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
140  *
141  * Return: newly allocated copy of @s with NUL-termination or %NULL in
142  * case of error
143  */
144 char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
145 {
146 	char *buf;
147 
148 	if (!s)
149 		return NULL;
150 
151 	buf = kmalloc_track_caller(len + 1, gfp);
152 	if (buf) {
153 		memcpy(buf, s, len);
154 		buf[len] = '\0';
155 	}
156 	return buf;
157 }
158 EXPORT_SYMBOL(kmemdup_nul);
159 
160 /**
161  * memdup_user - duplicate memory region from user space
162  *
163  * @src: source address in user space
164  * @len: number of bytes to copy
165  *
166  * Return: an ERR_PTR() on failure.  Result is physically
167  * contiguous, to be freed by kfree().
168  */
169 void *memdup_user(const void __user *src, size_t len)
170 {
171 	void *p;
172 
173 	p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
174 	if (!p)
175 		return ERR_PTR(-ENOMEM);
176 
177 	if (copy_from_user(p, src, len)) {
178 		kfree(p);
179 		return ERR_PTR(-EFAULT);
180 	}
181 
182 	return p;
183 }
184 EXPORT_SYMBOL(memdup_user);
185 
186 /**
187  * vmemdup_user - duplicate memory region from user space
188  *
189  * @src: source address in user space
190  * @len: number of bytes to copy
191  *
192  * Return: an ERR_PTR() on failure.  Result may be not
193  * physically contiguous.  Use kvfree() to free.
194  */
195 void *vmemdup_user(const void __user *src, size_t len)
196 {
197 	void *p;
198 
199 	p = kvmalloc(len, GFP_USER);
200 	if (!p)
201 		return ERR_PTR(-ENOMEM);
202 
203 	if (copy_from_user(p, src, len)) {
204 		kvfree(p);
205 		return ERR_PTR(-EFAULT);
206 	}
207 
208 	return p;
209 }
210 EXPORT_SYMBOL(vmemdup_user);
211 
212 /**
213  * strndup_user - duplicate an existing string from user space
214  * @s: The string to duplicate
215  * @n: Maximum number of bytes to copy, including the trailing NUL.
216  *
217  * Return: newly allocated copy of @s or an ERR_PTR() in case of error
218  */
219 char *strndup_user(const char __user *s, long n)
220 {
221 	char *p;
222 	long length;
223 
224 	length = strnlen_user(s, n);
225 
226 	if (!length)
227 		return ERR_PTR(-EFAULT);
228 
229 	if (length > n)
230 		return ERR_PTR(-EINVAL);
231 
232 	p = memdup_user(s, length);
233 
234 	if (IS_ERR(p))
235 		return p;
236 
237 	p[length - 1] = '\0';
238 
239 	return p;
240 }
241 EXPORT_SYMBOL(strndup_user);
242 
243 /**
244  * memdup_user_nul - duplicate memory region from user space and NUL-terminate
245  *
246  * @src: source address in user space
247  * @len: number of bytes to copy
248  *
249  * Return: an ERR_PTR() on failure.
250  */
251 void *memdup_user_nul(const void __user *src, size_t len)
252 {
253 	char *p;
254 
255 	/*
256 	 * Always use GFP_KERNEL, since copy_from_user() can sleep and
257 	 * cause pagefault, which makes it pointless to use GFP_NOFS
258 	 * or GFP_ATOMIC.
259 	 */
260 	p = kmalloc_track_caller(len + 1, GFP_KERNEL);
261 	if (!p)
262 		return ERR_PTR(-ENOMEM);
263 
264 	if (copy_from_user(p, src, len)) {
265 		kfree(p);
266 		return ERR_PTR(-EFAULT);
267 	}
268 	p[len] = '\0';
269 
270 	return p;
271 }
272 EXPORT_SYMBOL(memdup_user_nul);
273 
274 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
275 		struct vm_area_struct *prev)
276 {
277 	struct vm_area_struct *next;
278 
279 	vma->vm_prev = prev;
280 	if (prev) {
281 		next = prev->vm_next;
282 		prev->vm_next = vma;
283 	} else {
284 		next = mm->mmap;
285 		mm->mmap = vma;
286 	}
287 	vma->vm_next = next;
288 	if (next)
289 		next->vm_prev = vma;
290 }
291 
292 void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma)
293 {
294 	struct vm_area_struct *prev, *next;
295 
296 	next = vma->vm_next;
297 	prev = vma->vm_prev;
298 	if (prev)
299 		prev->vm_next = next;
300 	else
301 		mm->mmap = next;
302 	if (next)
303 		next->vm_prev = prev;
304 }
305 
306 /* Check if the vma is being used as a stack by this task */
307 int vma_is_stack_for_current(struct vm_area_struct *vma)
308 {
309 	struct task_struct * __maybe_unused t = current;
310 
311 	return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
312 }
313 
314 /*
315  * Change backing file, only valid to use during initial VMA setup.
316  */
317 void vma_set_file(struct vm_area_struct *vma, struct file *file)
318 {
319 	/* Changing an anonymous vma with this is illegal */
320 	get_file(file);
321 	swap(vma->vm_file, file);
322 	fput(file);
323 }
324 EXPORT_SYMBOL(vma_set_file);
325 
326 #ifndef STACK_RND_MASK
327 #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))     /* 8MB of VA */
328 #endif
329 
330 unsigned long randomize_stack_top(unsigned long stack_top)
331 {
332 	unsigned long random_variable = 0;
333 
334 	if (current->flags & PF_RANDOMIZE) {
335 		random_variable = get_random_long();
336 		random_variable &= STACK_RND_MASK;
337 		random_variable <<= PAGE_SHIFT;
338 	}
339 #ifdef CONFIG_STACK_GROWSUP
340 	return PAGE_ALIGN(stack_top) + random_variable;
341 #else
342 	return PAGE_ALIGN(stack_top) - random_variable;
343 #endif
344 }
345 
346 /**
347  * randomize_page - Generate a random, page aligned address
348  * @start:	The smallest acceptable address the caller will take.
349  * @range:	The size of the area, starting at @start, within which the
350  *		random address must fall.
351  *
352  * If @start + @range would overflow, @range is capped.
353  *
354  * NOTE: Historical use of randomize_range, which this replaces, presumed that
355  * @start was already page aligned.  We now align it regardless.
356  *
357  * Return: A page aligned address within [start, start + range).  On error,
358  * @start is returned.
359  */
360 unsigned long randomize_page(unsigned long start, unsigned long range)
361 {
362 	if (!PAGE_ALIGNED(start)) {
363 		range -= PAGE_ALIGN(start) - start;
364 		start = PAGE_ALIGN(start);
365 	}
366 
367 	if (start > ULONG_MAX - range)
368 		range = ULONG_MAX - start;
369 
370 	range >>= PAGE_SHIFT;
371 
372 	if (range == 0)
373 		return start;
374 
375 	return start + (get_random_long() % range << PAGE_SHIFT);
376 }
377 
378 #ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
379 unsigned long arch_randomize_brk(struct mm_struct *mm)
380 {
381 	/* Is the current task 32bit ? */
382 	if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())
383 		return randomize_page(mm->brk, SZ_32M);
384 
385 	return randomize_page(mm->brk, SZ_1G);
386 }
387 
388 unsigned long arch_mmap_rnd(void)
389 {
390 	unsigned long rnd;
391 
392 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
393 	if (is_compat_task())
394 		rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
395 	else
396 #endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */
397 		rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
398 
399 	return rnd << PAGE_SHIFT;
400 }
401 
402 static int mmap_is_legacy(struct rlimit *rlim_stack)
403 {
404 	if (current->personality & ADDR_COMPAT_LAYOUT)
405 		return 1;
406 
407 	if (rlim_stack->rlim_cur == RLIM_INFINITY)
408 		return 1;
409 
410 	return sysctl_legacy_va_layout;
411 }
412 
413 /*
414  * Leave enough space between the mmap area and the stack to honour ulimit in
415  * the face of randomisation.
416  */
417 #define MIN_GAP		(SZ_128M)
418 #define MAX_GAP		(STACK_TOP / 6 * 5)
419 
420 static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
421 {
422 	unsigned long gap = rlim_stack->rlim_cur;
423 	unsigned long pad = stack_guard_gap;
424 
425 	/* Account for stack randomization if necessary */
426 	if (current->flags & PF_RANDOMIZE)
427 		pad += (STACK_RND_MASK << PAGE_SHIFT);
428 
429 	/* Values close to RLIM_INFINITY can overflow. */
430 	if (gap + pad > gap)
431 		gap += pad;
432 
433 	if (gap < MIN_GAP)
434 		gap = MIN_GAP;
435 	else if (gap > MAX_GAP)
436 		gap = MAX_GAP;
437 
438 	return PAGE_ALIGN(STACK_TOP - gap - rnd);
439 }
440 
441 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
442 {
443 	unsigned long random_factor = 0UL;
444 
445 	if (current->flags & PF_RANDOMIZE)
446 		random_factor = arch_mmap_rnd();
447 
448 	if (mmap_is_legacy(rlim_stack)) {
449 		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
450 		mm->get_unmapped_area = arch_get_unmapped_area;
451 	} else {
452 		mm->mmap_base = mmap_base(random_factor, rlim_stack);
453 		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
454 	}
455 }
456 #elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
457 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
458 {
459 	mm->mmap_base = TASK_UNMAPPED_BASE;
460 	mm->get_unmapped_area = arch_get_unmapped_area;
461 }
462 #endif
463 
464 /**
465  * __account_locked_vm - account locked pages to an mm's locked_vm
466  * @mm:          mm to account against
467  * @pages:       number of pages to account
468  * @inc:         %true if @pages should be considered positive, %false if not
469  * @task:        task used to check RLIMIT_MEMLOCK
470  * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped
471  *
472  * Assumes @task and @mm are valid (i.e. at least one reference on each), and
473  * that mmap_lock is held as writer.
474  *
475  * Return:
476  * * 0       on success
477  * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
478  */
479 int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
480 			struct task_struct *task, bool bypass_rlim)
481 {
482 	unsigned long locked_vm, limit;
483 	int ret = 0;
484 
485 	mmap_assert_write_locked(mm);
486 
487 	locked_vm = mm->locked_vm;
488 	if (inc) {
489 		if (!bypass_rlim) {
490 			limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
491 			if (locked_vm + pages > limit)
492 				ret = -ENOMEM;
493 		}
494 		if (!ret)
495 			mm->locked_vm = locked_vm + pages;
496 	} else {
497 		WARN_ON_ONCE(pages > locked_vm);
498 		mm->locked_vm = locked_vm - pages;
499 	}
500 
501 	pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid,
502 		 (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT,
503 		 locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK),
504 		 ret ? " - exceeded" : "");
505 
506 	return ret;
507 }
508 EXPORT_SYMBOL_GPL(__account_locked_vm);
509 
510 /**
511  * account_locked_vm - account locked pages to an mm's locked_vm
512  * @mm:          mm to account against, may be NULL
513  * @pages:       number of pages to account
514  * @inc:         %true if @pages should be considered positive, %false if not
515  *
516  * Assumes a non-NULL @mm is valid (i.e. at least one reference on it).
517  *
518  * Return:
519  * * 0       on success, or if mm is NULL
520  * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
521  */
522 int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc)
523 {
524 	int ret;
525 
526 	if (pages == 0 || !mm)
527 		return 0;
528 
529 	mmap_write_lock(mm);
530 	ret = __account_locked_vm(mm, pages, inc, current,
531 				  capable(CAP_IPC_LOCK));
532 	mmap_write_unlock(mm);
533 
534 	return ret;
535 }
536 EXPORT_SYMBOL_GPL(account_locked_vm);
537 
538 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
539 	unsigned long len, unsigned long prot,
540 	unsigned long flag, unsigned long pgoff)
541 {
542 	unsigned long ret;
543 	struct mm_struct *mm = current->mm;
544 	unsigned long populate;
545 	LIST_HEAD(uf);
546 
547 	ret = security_mmap_file(file, prot, flag);
548 	if (!ret) {
549 		if (mmap_write_lock_killable(mm))
550 			return -EINTR;
551 		ret = do_mmap(file, addr, len, prot, flag, pgoff, &populate,
552 			      &uf);
553 		mmap_write_unlock(mm);
554 		userfaultfd_unmap_complete(mm, &uf);
555 		if (populate)
556 			mm_populate(ret, populate);
557 	}
558 	return ret;
559 }
560 
561 unsigned long vm_mmap(struct file *file, unsigned long addr,
562 	unsigned long len, unsigned long prot,
563 	unsigned long flag, unsigned long offset)
564 {
565 	if (unlikely(offset + PAGE_ALIGN(len) < offset))
566 		return -EINVAL;
567 	if (unlikely(offset_in_page(offset)))
568 		return -EINVAL;
569 
570 	return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
571 }
572 EXPORT_SYMBOL(vm_mmap);
573 
574 /**
575  * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
576  * failure, fall back to non-contiguous (vmalloc) allocation.
577  * @size: size of the request.
578  * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
579  * @node: numa node to allocate from
580  *
581  * Uses kmalloc to get the memory but if the allocation fails then falls back
582  * to the vmalloc allocator. Use kvfree for freeing the memory.
583  *
584  * GFP_NOWAIT and GFP_ATOMIC are not supported, neither is the __GFP_NORETRY modifier.
585  * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
586  * preferable to the vmalloc fallback, due to visible performance drawbacks.
587  *
588  * Return: pointer to the allocated memory of %NULL in case of failure
589  */
590 void *kvmalloc_node(size_t size, gfp_t flags, int node)
591 {
592 	gfp_t kmalloc_flags = flags;
593 	void *ret;
594 
595 	/*
596 	 * We want to attempt a large physically contiguous block first because
597 	 * it is less likely to fragment multiple larger blocks and therefore
598 	 * contribute to a long term fragmentation less than vmalloc fallback.
599 	 * However make sure that larger requests are not too disruptive - no
600 	 * OOM killer and no allocation failure warnings as we have a fallback.
601 	 */
602 	if (size > PAGE_SIZE) {
603 		kmalloc_flags |= __GFP_NOWARN;
604 
605 		if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
606 			kmalloc_flags |= __GFP_NORETRY;
607 
608 		/* nofail semantic is implemented by the vmalloc fallback */
609 		kmalloc_flags &= ~__GFP_NOFAIL;
610 	}
611 
612 	ret = kmalloc_node(size, kmalloc_flags, node);
613 
614 	/*
615 	 * It doesn't really make sense to fallback to vmalloc for sub page
616 	 * requests
617 	 */
618 	if (ret || size <= PAGE_SIZE)
619 		return ret;
620 
621 	/* Don't even allow crazy sizes */
622 	if (unlikely(size > INT_MAX)) {
623 		WARN_ON_ONCE(!(flags & __GFP_NOWARN));
624 		return NULL;
625 	}
626 
627 	/*
628 	 * kvmalloc() can always use VM_ALLOW_HUGE_VMAP,
629 	 * since the callers already cannot assume anything
630 	 * about the resulting pointer, and cannot play
631 	 * protection games.
632 	 */
633 	return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
634 			flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
635 			node, __builtin_return_address(0));
636 }
637 EXPORT_SYMBOL(kvmalloc_node);
638 
639 /**
640  * kvfree() - Free memory.
641  * @addr: Pointer to allocated memory.
642  *
643  * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
644  * It is slightly more efficient to use kfree() or vfree() if you are certain
645  * that you know which one to use.
646  *
647  * Context: Either preemptible task context or not-NMI interrupt.
648  */
649 void kvfree(const void *addr)
650 {
651 	if (is_vmalloc_addr(addr))
652 		vfree(addr);
653 	else
654 		kfree(addr);
655 }
656 EXPORT_SYMBOL(kvfree);
657 
658 /**
659  * kvfree_sensitive - Free a data object containing sensitive information.
660  * @addr: address of the data object to be freed.
661  * @len: length of the data object.
662  *
663  * Use the special memzero_explicit() function to clear the content of a
664  * kvmalloc'ed object containing sensitive data to make sure that the
665  * compiler won't optimize out the data clearing.
666  */
667 void kvfree_sensitive(const void *addr, size_t len)
668 {
669 	if (likely(!ZERO_OR_NULL_PTR(addr))) {
670 		memzero_explicit((void *)addr, len);
671 		kvfree(addr);
672 	}
673 }
674 EXPORT_SYMBOL(kvfree_sensitive);
675 
676 void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
677 {
678 	void *newp;
679 
680 	if (oldsize >= newsize)
681 		return (void *)p;
682 	newp = kvmalloc(newsize, flags);
683 	if (!newp)
684 		return NULL;
685 	memcpy(newp, p, oldsize);
686 	kvfree(p);
687 	return newp;
688 }
689 EXPORT_SYMBOL(kvrealloc);
690 
691 /**
692  * __vmalloc_array - allocate memory for a virtually contiguous array.
693  * @n: number of elements.
694  * @size: element size.
695  * @flags: the type of memory to allocate (see kmalloc).
696  */
697 void *__vmalloc_array(size_t n, size_t size, gfp_t flags)
698 {
699 	size_t bytes;
700 
701 	if (unlikely(check_mul_overflow(n, size, &bytes)))
702 		return NULL;
703 	return __vmalloc(bytes, flags);
704 }
705 EXPORT_SYMBOL(__vmalloc_array);
706 
707 /**
708  * vmalloc_array - allocate memory for a virtually contiguous array.
709  * @n: number of elements.
710  * @size: element size.
711  */
712 void *vmalloc_array(size_t n, size_t size)
713 {
714 	return __vmalloc_array(n, size, GFP_KERNEL);
715 }
716 EXPORT_SYMBOL(vmalloc_array);
717 
718 /**
719  * __vcalloc - allocate and zero memory for a virtually contiguous array.
720  * @n: number of elements.
721  * @size: element size.
722  * @flags: the type of memory to allocate (see kmalloc).
723  */
724 void *__vcalloc(size_t n, size_t size, gfp_t flags)
725 {
726 	return __vmalloc_array(n, size, flags | __GFP_ZERO);
727 }
728 EXPORT_SYMBOL(__vcalloc);
729 
730 /**
731  * vcalloc - allocate and zero memory for a virtually contiguous array.
732  * @n: number of elements.
733  * @size: element size.
734  */
735 void *vcalloc(size_t n, size_t size)
736 {
737 	return __vmalloc_array(n, size, GFP_KERNEL | __GFP_ZERO);
738 }
739 EXPORT_SYMBOL(vcalloc);
740 
741 /* Neutral page->mapping pointer to address_space or anon_vma or other */
742 void *page_rmapping(struct page *page)
743 {
744 	return folio_raw_mapping(page_folio(page));
745 }
746 
747 /**
748  * folio_mapped - Is this folio mapped into userspace?
749  * @folio: The folio.
750  *
751  * Return: True if any page in this folio is referenced by user page tables.
752  */
753 bool folio_mapped(struct folio *folio)
754 {
755 	long i, nr;
756 
757 	if (!folio_test_large(folio))
758 		return atomic_read(&folio->_mapcount) >= 0;
759 	if (atomic_read(folio_mapcount_ptr(folio)) >= 0)
760 		return true;
761 	if (folio_test_hugetlb(folio))
762 		return false;
763 
764 	nr = folio_nr_pages(folio);
765 	for (i = 0; i < nr; i++) {
766 		if (atomic_read(&folio_page(folio, i)->_mapcount) >= 0)
767 			return true;
768 	}
769 	return false;
770 }
771 EXPORT_SYMBOL(folio_mapped);
772 
773 struct anon_vma *folio_anon_vma(struct folio *folio)
774 {
775 	unsigned long mapping = (unsigned long)folio->mapping;
776 
777 	if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
778 		return NULL;
779 	return (void *)(mapping - PAGE_MAPPING_ANON);
780 }
781 
782 /**
783  * folio_mapping - Find the mapping where this folio is stored.
784  * @folio: The folio.
785  *
786  * For folios which are in the page cache, return the mapping that this
787  * page belongs to.  Folios in the swap cache return the swap mapping
788  * this page is stored in (which is different from the mapping for the
789  * swap file or swap device where the data is stored).
790  *
791  * You can call this for folios which aren't in the swap cache or page
792  * cache and it will return NULL.
793  */
794 struct address_space *folio_mapping(struct folio *folio)
795 {
796 	struct address_space *mapping;
797 
798 	/* This happens if someone calls flush_dcache_page on slab page */
799 	if (unlikely(folio_test_slab(folio)))
800 		return NULL;
801 
802 	if (unlikely(folio_test_swapcache(folio)))
803 		return swap_address_space(folio_swap_entry(folio));
804 
805 	mapping = folio->mapping;
806 	if ((unsigned long)mapping & PAGE_MAPPING_ANON)
807 		return NULL;
808 
809 	return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
810 }
811 EXPORT_SYMBOL(folio_mapping);
812 
813 /* Slow path of page_mapcount() for compound pages */
814 int __page_mapcount(struct page *page)
815 {
816 	int ret;
817 
818 	ret = atomic_read(&page->_mapcount) + 1;
819 	/*
820 	 * For file THP page->_mapcount contains total number of mapping
821 	 * of the page: no need to look into compound_mapcount.
822 	 */
823 	if (!PageAnon(page) && !PageHuge(page))
824 		return ret;
825 	page = compound_head(page);
826 	ret += atomic_read(compound_mapcount_ptr(page)) + 1;
827 	if (PageDoubleMap(page))
828 		ret--;
829 	return ret;
830 }
831 EXPORT_SYMBOL_GPL(__page_mapcount);
832 
833 /**
834  * folio_mapcount() - Calculate the number of mappings of this folio.
835  * @folio: The folio.
836  *
837  * A large folio tracks both how many times the entire folio is mapped,
838  * and how many times each individual page in the folio is mapped.
839  * This function calculates the total number of times the folio is
840  * mapped.
841  *
842  * Return: The number of times this folio is mapped.
843  */
844 int folio_mapcount(struct folio *folio)
845 {
846 	int i, compound, nr, ret;
847 
848 	if (likely(!folio_test_large(folio)))
849 		return atomic_read(&folio->_mapcount) + 1;
850 
851 	compound = folio_entire_mapcount(folio);
852 	nr = folio_nr_pages(folio);
853 	if (folio_test_hugetlb(folio))
854 		return compound;
855 	ret = compound;
856 	for (i = 0; i < nr; i++)
857 		ret += atomic_read(&folio_page(folio, i)->_mapcount) + 1;
858 	/* File pages has compound_mapcount included in _mapcount */
859 	if (!folio_test_anon(folio))
860 		return ret - compound * nr;
861 	if (folio_test_double_map(folio))
862 		ret -= nr;
863 	return ret;
864 }
865 
866 /**
867  * folio_copy - Copy the contents of one folio to another.
868  * @dst: Folio to copy to.
869  * @src: Folio to copy from.
870  *
871  * The bytes in the folio represented by @src are copied to @dst.
872  * Assumes the caller has validated that @dst is at least as large as @src.
873  * Can be called in atomic context for order-0 folios, but if the folio is
874  * larger, it may sleep.
875  */
876 void folio_copy(struct folio *dst, struct folio *src)
877 {
878 	long i = 0;
879 	long nr = folio_nr_pages(src);
880 
881 	for (;;) {
882 		copy_highpage(folio_page(dst, i), folio_page(src, i));
883 		if (++i == nr)
884 			break;
885 		cond_resched();
886 	}
887 }
888 
889 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
890 int sysctl_overcommit_ratio __read_mostly = 50;
891 unsigned long sysctl_overcommit_kbytes __read_mostly;
892 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
893 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
894 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
895 
896 int overcommit_ratio_handler(struct ctl_table *table, int write, void *buffer,
897 		size_t *lenp, loff_t *ppos)
898 {
899 	int ret;
900 
901 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
902 	if (ret == 0 && write)
903 		sysctl_overcommit_kbytes = 0;
904 	return ret;
905 }
906 
907 static void sync_overcommit_as(struct work_struct *dummy)
908 {
909 	percpu_counter_sync(&vm_committed_as);
910 }
911 
912 int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer,
913 		size_t *lenp, loff_t *ppos)
914 {
915 	struct ctl_table t;
916 	int new_policy = -1;
917 	int ret;
918 
919 	/*
920 	 * The deviation of sync_overcommit_as could be big with loose policy
921 	 * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to
922 	 * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply
923 	 * with the strict "NEVER", and to avoid possible race condition (even
924 	 * though user usually won't too frequently do the switching to policy
925 	 * OVERCOMMIT_NEVER), the switch is done in the following order:
926 	 *	1. changing the batch
927 	 *	2. sync percpu count on each CPU
928 	 *	3. switch the policy
929 	 */
930 	if (write) {
931 		t = *table;
932 		t.data = &new_policy;
933 		ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
934 		if (ret || new_policy == -1)
935 			return ret;
936 
937 		mm_compute_batch(new_policy);
938 		if (new_policy == OVERCOMMIT_NEVER)
939 			schedule_on_each_cpu(sync_overcommit_as);
940 		sysctl_overcommit_memory = new_policy;
941 	} else {
942 		ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
943 	}
944 
945 	return ret;
946 }
947 
948 int overcommit_kbytes_handler(struct ctl_table *table, int write, void *buffer,
949 		size_t *lenp, loff_t *ppos)
950 {
951 	int ret;
952 
953 	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
954 	if (ret == 0 && write)
955 		sysctl_overcommit_ratio = 0;
956 	return ret;
957 }
958 
959 /*
960  * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
961  */
962 unsigned long vm_commit_limit(void)
963 {
964 	unsigned long allowed;
965 
966 	if (sysctl_overcommit_kbytes)
967 		allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
968 	else
969 		allowed = ((totalram_pages() - hugetlb_total_pages())
970 			   * sysctl_overcommit_ratio / 100);
971 	allowed += total_swap_pages;
972 
973 	return allowed;
974 }
975 
976 /*
977  * Make sure vm_committed_as in one cacheline and not cacheline shared with
978  * other variables. It can be updated by several CPUs frequently.
979  */
980 struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
981 
982 /*
983  * The global memory commitment made in the system can be a metric
984  * that can be used to drive ballooning decisions when Linux is hosted
985  * as a guest. On Hyper-V, the host implements a policy engine for dynamically
986  * balancing memory across competing virtual machines that are hosted.
987  * Several metrics drive this policy engine including the guest reported
988  * memory commitment.
989  *
990  * The time cost of this is very low for small platforms, and for big
991  * platform like a 2S/36C/72T Skylake server, in worst case where
992  * vm_committed_as's spinlock is under severe contention, the time cost
993  * could be about 30~40 microseconds.
994  */
995 unsigned long vm_memory_committed(void)
996 {
997 	return percpu_counter_sum_positive(&vm_committed_as);
998 }
999 EXPORT_SYMBOL_GPL(vm_memory_committed);
1000 
1001 /*
1002  * Check that a process has enough memory to allocate a new virtual
1003  * mapping. 0 means there is enough memory for the allocation to
1004  * succeed and -ENOMEM implies there is not.
1005  *
1006  * We currently support three overcommit policies, which are set via the
1007  * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting.rst
1008  *
1009  * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
1010  * Additional code 2002 Jul 20 by Robert Love.
1011  *
1012  * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
1013  *
1014  * Note this is a helper function intended to be used by LSMs which
1015  * wish to use this logic.
1016  */
1017 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
1018 {
1019 	long allowed;
1020 
1021 	vm_acct_memory(pages);
1022 
1023 	/*
1024 	 * Sometimes we want to use more memory than we have
1025 	 */
1026 	if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
1027 		return 0;
1028 
1029 	if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
1030 		if (pages > totalram_pages() + total_swap_pages)
1031 			goto error;
1032 		return 0;
1033 	}
1034 
1035 	allowed = vm_commit_limit();
1036 	/*
1037 	 * Reserve some for root
1038 	 */
1039 	if (!cap_sys_admin)
1040 		allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
1041 
1042 	/*
1043 	 * Don't let a single process grow so big a user can't recover
1044 	 */
1045 	if (mm) {
1046 		long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
1047 
1048 		allowed -= min_t(long, mm->total_vm / 32, reserve);
1049 	}
1050 
1051 	if (percpu_counter_read_positive(&vm_committed_as) < allowed)
1052 		return 0;
1053 error:
1054 	vm_unacct_memory(pages);
1055 
1056 	return -ENOMEM;
1057 }
1058 
1059 /**
1060  * get_cmdline() - copy the cmdline value to a buffer.
1061  * @task:     the task whose cmdline value to copy.
1062  * @buffer:   the buffer to copy to.
1063  * @buflen:   the length of the buffer. Larger cmdline values are truncated
1064  *            to this length.
1065  *
1066  * Return: the size of the cmdline field copied. Note that the copy does
1067  * not guarantee an ending NULL byte.
1068  */
1069 int get_cmdline(struct task_struct *task, char *buffer, int buflen)
1070 {
1071 	int res = 0;
1072 	unsigned int len;
1073 	struct mm_struct *mm = get_task_mm(task);
1074 	unsigned long arg_start, arg_end, env_start, env_end;
1075 	if (!mm)
1076 		goto out;
1077 	if (!mm->arg_end)
1078 		goto out_mm;	/* Shh! No looking before we're done */
1079 
1080 	spin_lock(&mm->arg_lock);
1081 	arg_start = mm->arg_start;
1082 	arg_end = mm->arg_end;
1083 	env_start = mm->env_start;
1084 	env_end = mm->env_end;
1085 	spin_unlock(&mm->arg_lock);
1086 
1087 	len = arg_end - arg_start;
1088 
1089 	if (len > buflen)
1090 		len = buflen;
1091 
1092 	res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
1093 
1094 	/*
1095 	 * If the nul at the end of args has been overwritten, then
1096 	 * assume application is using setproctitle(3).
1097 	 */
1098 	if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
1099 		len = strnlen(buffer, res);
1100 		if (len < res) {
1101 			res = len;
1102 		} else {
1103 			len = env_end - env_start;
1104 			if (len > buflen - res)
1105 				len = buflen - res;
1106 			res += access_process_vm(task, env_start,
1107 						 buffer+res, len,
1108 						 FOLL_FORCE);
1109 			res = strnlen(buffer, res);
1110 		}
1111 	}
1112 out_mm:
1113 	mmput(mm);
1114 out:
1115 	return res;
1116 }
1117 
1118 int __weak memcmp_pages(struct page *page1, struct page *page2)
1119 {
1120 	char *addr1, *addr2;
1121 	int ret;
1122 
1123 	addr1 = kmap_atomic(page1);
1124 	addr2 = kmap_atomic(page2);
1125 	ret = memcmp(addr1, addr2, PAGE_SIZE);
1126 	kunmap_atomic(addr2);
1127 	kunmap_atomic(addr1);
1128 	return ret;
1129 }
1130 
1131 #ifdef CONFIG_PRINTK
1132 /**
1133  * mem_dump_obj - Print available provenance information
1134  * @object: object for which to find provenance information.
1135  *
1136  * This function uses pr_cont(), so that the caller is expected to have
1137  * printed out whatever preamble is appropriate.  The provenance information
1138  * depends on the type of object and on how much debugging is enabled.
1139  * For example, for a slab-cache object, the slab name is printed, and,
1140  * if available, the return address and stack trace from the allocation
1141  * and last free path of that object.
1142  */
1143 void mem_dump_obj(void *object)
1144 {
1145 	const char *type;
1146 
1147 	if (kmem_valid_obj(object)) {
1148 		kmem_dump_obj(object);
1149 		return;
1150 	}
1151 
1152 	if (vmalloc_dump_obj(object))
1153 		return;
1154 
1155 	if (virt_addr_valid(object))
1156 		type = "non-slab/vmalloc memory";
1157 	else if (object == NULL)
1158 		type = "NULL pointer";
1159 	else if (object == ZERO_SIZE_PTR)
1160 		type = "zero-size pointer";
1161 	else
1162 		type = "non-paged memory";
1163 
1164 	pr_cont(" %s\n", type);
1165 }
1166 EXPORT_SYMBOL_GPL(mem_dump_obj);
1167 #endif
1168 
1169 /*
1170  * A driver might set a page logically offline -- PageOffline() -- and
1171  * turn the page inaccessible in the hypervisor; after that, access to page
1172  * content can be fatal.
1173  *
1174  * Some special PFN walkers -- i.e., /proc/kcore -- read content of random
1175  * pages after checking PageOffline(); however, these PFN walkers can race
1176  * with drivers that set PageOffline().
1177  *
1178  * page_offline_freeze()/page_offline_thaw() allows for a subsystem to
1179  * synchronize with such drivers, achieving that a page cannot be set
1180  * PageOffline() while frozen.
1181  *
1182  * page_offline_begin()/page_offline_end() is used by drivers that care about
1183  * such races when setting a page PageOffline().
1184  */
1185 static DECLARE_RWSEM(page_offline_rwsem);
1186 
1187 void page_offline_freeze(void)
1188 {
1189 	down_read(&page_offline_rwsem);
1190 }
1191 
1192 void page_offline_thaw(void)
1193 {
1194 	up_read(&page_offline_rwsem);
1195 }
1196 
1197 void page_offline_begin(void)
1198 {
1199 	down_write(&page_offline_rwsem);
1200 }
1201 EXPORT_SYMBOL(page_offline_begin);
1202 
1203 void page_offline_end(void)
1204 {
1205 	up_write(&page_offline_rwsem);
1206 }
1207 EXPORT_SYMBOL(page_offline_end);
1208 
1209 #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO
1210 void flush_dcache_folio(struct folio *folio)
1211 {
1212 	long i, nr = folio_nr_pages(folio);
1213 
1214 	for (i = 0; i < nr; i++)
1215 		flush_dcache_page(folio_page(folio, i));
1216 }
1217 EXPORT_SYMBOL(flush_dcache_folio);
1218 #endif
1219