xref: /linux/mm/util.c (revision 436381eaf2a423e60fc8340399f7d2458091b383)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/mm.h>
3 #include <linux/slab.h>
4 #include <linux/string.h>
5 #include <linux/compiler.h>
6 #include <linux/export.h>
7 #include <linux/err.h>
8 #include <linux/sched.h>
9 #include <linux/sched/mm.h>
10 #include <linux/sched/signal.h>
11 #include <linux/sched/task_stack.h>
12 #include <linux/security.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/mman.h>
16 #include <linux/hugetlb.h>
17 #include <linux/vmalloc.h>
18 #include <linux/userfaultfd_k.h>
19 #include <linux/elf.h>
20 #include <linux/elf-randomize.h>
21 #include <linux/personality.h>
22 #include <linux/random.h>
23 #include <linux/processor.h>
24 #include <linux/sizes.h>
25 #include <linux/compat.h>
26 
27 #include <linux/uaccess.h>
28 
29 #include "internal.h"
30 #include "swap.h"
31 
32 /**
33  * kfree_const - conditionally free memory
34  * @x: pointer to the memory
35  *
36  * Function calls kfree only if @x is not in .rodata section.
37  */
38 void kfree_const(const void *x)
39 {
40 	if (!is_kernel_rodata((unsigned long)x))
41 		kfree(x);
42 }
43 EXPORT_SYMBOL(kfree_const);
44 
45 /**
46  * kstrdup - allocate space for and copy an existing string
47  * @s: the string to duplicate
48  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
49  *
50  * Return: newly allocated copy of @s or %NULL in case of error
51  */
52 noinline
53 char *kstrdup(const char *s, gfp_t gfp)
54 {
55 	size_t len;
56 	char *buf;
57 
58 	if (!s)
59 		return NULL;
60 
61 	len = strlen(s) + 1;
62 	buf = kmalloc_track_caller(len, gfp);
63 	if (buf)
64 		memcpy(buf, s, len);
65 	return buf;
66 }
67 EXPORT_SYMBOL(kstrdup);
68 
69 /**
70  * kstrdup_const - conditionally duplicate an existing const string
71  * @s: the string to duplicate
72  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
73  *
74  * Note: Strings allocated by kstrdup_const should be freed by kfree_const and
75  * must not be passed to krealloc().
76  *
77  * Return: source string if it is in .rodata section otherwise
78  * fallback to kstrdup.
79  */
80 const char *kstrdup_const(const char *s, gfp_t gfp)
81 {
82 	if (is_kernel_rodata((unsigned long)s))
83 		return s;
84 
85 	return kstrdup(s, gfp);
86 }
87 EXPORT_SYMBOL(kstrdup_const);
88 
89 /**
90  * kstrndup - allocate space for and copy an existing string
91  * @s: the string to duplicate
92  * @max: read at most @max chars from @s
93  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
94  *
95  * Note: Use kmemdup_nul() instead if the size is known exactly.
96  *
97  * Return: newly allocated copy of @s or %NULL in case of error
98  */
99 char *kstrndup(const char *s, size_t max, gfp_t gfp)
100 {
101 	size_t len;
102 	char *buf;
103 
104 	if (!s)
105 		return NULL;
106 
107 	len = strnlen(s, max);
108 	buf = kmalloc_track_caller(len+1, gfp);
109 	if (buf) {
110 		memcpy(buf, s, len);
111 		buf[len] = '\0';
112 	}
113 	return buf;
114 }
115 EXPORT_SYMBOL(kstrndup);
116 
117 /**
118  * kmemdup - duplicate region of memory
119  *
120  * @src: memory region to duplicate
121  * @len: memory region length
122  * @gfp: GFP mask to use
123  *
124  * Return: newly allocated copy of @src or %NULL in case of error,
125  * result is physically contiguous. Use kfree() to free.
126  */
127 void *kmemdup_noprof(const void *src, size_t len, gfp_t gfp)
128 {
129 	void *p;
130 
131 	p = kmalloc_node_track_caller_noprof(len, gfp, NUMA_NO_NODE, _RET_IP_);
132 	if (p)
133 		memcpy(p, src, len);
134 	return p;
135 }
136 EXPORT_SYMBOL(kmemdup_noprof);
137 
138 /**
139  * kmemdup_array - duplicate a given array.
140  *
141  * @src: array to duplicate.
142  * @count: number of elements to duplicate from array.
143  * @element_size: size of each element of array.
144  * @gfp: GFP mask to use.
145  *
146  * Return: duplicated array of @src or %NULL in case of error,
147  * result is physically contiguous. Use kfree() to free.
148  */
149 void *kmemdup_array(const void *src, size_t count, size_t element_size, gfp_t gfp)
150 {
151 	return kmemdup(src, size_mul(element_size, count), gfp);
152 }
153 EXPORT_SYMBOL(kmemdup_array);
154 
155 /**
156  * kvmemdup - duplicate region of memory
157  *
158  * @src: memory region to duplicate
159  * @len: memory region length
160  * @gfp: GFP mask to use
161  *
162  * Return: newly allocated copy of @src or %NULL in case of error,
163  * result may be not physically contiguous. Use kvfree() to free.
164  */
165 void *kvmemdup(const void *src, size_t len, gfp_t gfp)
166 {
167 	void *p;
168 
169 	p = kvmalloc(len, gfp);
170 	if (p)
171 		memcpy(p, src, len);
172 	return p;
173 }
174 EXPORT_SYMBOL(kvmemdup);
175 
176 /**
177  * kmemdup_nul - Create a NUL-terminated string from unterminated data
178  * @s: The data to stringify
179  * @len: The size of the data
180  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
181  *
182  * Return: newly allocated copy of @s with NUL-termination or %NULL in
183  * case of error
184  */
185 char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
186 {
187 	char *buf;
188 
189 	if (!s)
190 		return NULL;
191 
192 	buf = kmalloc_track_caller(len + 1, gfp);
193 	if (buf) {
194 		memcpy(buf, s, len);
195 		buf[len] = '\0';
196 	}
197 	return buf;
198 }
199 EXPORT_SYMBOL(kmemdup_nul);
200 
201 static kmem_buckets *user_buckets __ro_after_init;
202 
203 static int __init init_user_buckets(void)
204 {
205 	user_buckets = kmem_buckets_create("memdup_user", 0, 0, INT_MAX, NULL);
206 
207 	return 0;
208 }
209 subsys_initcall(init_user_buckets);
210 
211 /**
212  * memdup_user - duplicate memory region from user space
213  *
214  * @src: source address in user space
215  * @len: number of bytes to copy
216  *
217  * Return: an ERR_PTR() on failure.  Result is physically
218  * contiguous, to be freed by kfree().
219  */
220 void *memdup_user(const void __user *src, size_t len)
221 {
222 	void *p;
223 
224 	p = kmem_buckets_alloc_track_caller(user_buckets, len, GFP_USER | __GFP_NOWARN);
225 	if (!p)
226 		return ERR_PTR(-ENOMEM);
227 
228 	if (copy_from_user(p, src, len)) {
229 		kfree(p);
230 		return ERR_PTR(-EFAULT);
231 	}
232 
233 	return p;
234 }
235 EXPORT_SYMBOL(memdup_user);
236 
237 /**
238  * vmemdup_user - duplicate memory region from user space
239  *
240  * @src: source address in user space
241  * @len: number of bytes to copy
242  *
243  * Return: an ERR_PTR() on failure.  Result may be not
244  * physically contiguous.  Use kvfree() to free.
245  */
246 void *vmemdup_user(const void __user *src, size_t len)
247 {
248 	void *p;
249 
250 	p = kmem_buckets_valloc(user_buckets, len, GFP_USER);
251 	if (!p)
252 		return ERR_PTR(-ENOMEM);
253 
254 	if (copy_from_user(p, src, len)) {
255 		kvfree(p);
256 		return ERR_PTR(-EFAULT);
257 	}
258 
259 	return p;
260 }
261 EXPORT_SYMBOL(vmemdup_user);
262 
263 /**
264  * strndup_user - duplicate an existing string from user space
265  * @s: The string to duplicate
266  * @n: Maximum number of bytes to copy, including the trailing NUL.
267  *
268  * Return: newly allocated copy of @s or an ERR_PTR() in case of error
269  */
270 char *strndup_user(const char __user *s, long n)
271 {
272 	char *p;
273 	long length;
274 
275 	length = strnlen_user(s, n);
276 
277 	if (!length)
278 		return ERR_PTR(-EFAULT);
279 
280 	if (length > n)
281 		return ERR_PTR(-EINVAL);
282 
283 	p = memdup_user(s, length);
284 
285 	if (IS_ERR(p))
286 		return p;
287 
288 	p[length - 1] = '\0';
289 
290 	return p;
291 }
292 EXPORT_SYMBOL(strndup_user);
293 
294 /**
295  * memdup_user_nul - duplicate memory region from user space and NUL-terminate
296  *
297  * @src: source address in user space
298  * @len: number of bytes to copy
299  *
300  * Return: an ERR_PTR() on failure.
301  */
302 void *memdup_user_nul(const void __user *src, size_t len)
303 {
304 	char *p;
305 
306 	/*
307 	 * Always use GFP_KERNEL, since copy_from_user() can sleep and
308 	 * cause pagefault, which makes it pointless to use GFP_NOFS
309 	 * or GFP_ATOMIC.
310 	 */
311 	p = kmalloc_track_caller(len + 1, GFP_KERNEL);
312 	if (!p)
313 		return ERR_PTR(-ENOMEM);
314 
315 	if (copy_from_user(p, src, len)) {
316 		kfree(p);
317 		return ERR_PTR(-EFAULT);
318 	}
319 	p[len] = '\0';
320 
321 	return p;
322 }
323 EXPORT_SYMBOL(memdup_user_nul);
324 
325 /* Check if the vma is being used as a stack by this task */
326 int vma_is_stack_for_current(struct vm_area_struct *vma)
327 {
328 	struct task_struct * __maybe_unused t = current;
329 
330 	return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
331 }
332 
333 /*
334  * Change backing file, only valid to use during initial VMA setup.
335  */
336 void vma_set_file(struct vm_area_struct *vma, struct file *file)
337 {
338 	/* Changing an anonymous vma with this is illegal */
339 	get_file(file);
340 	swap(vma->vm_file, file);
341 	fput(file);
342 }
343 EXPORT_SYMBOL(vma_set_file);
344 
345 #ifndef STACK_RND_MASK
346 #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))     /* 8MB of VA */
347 #endif
348 
349 unsigned long randomize_stack_top(unsigned long stack_top)
350 {
351 	unsigned long random_variable = 0;
352 
353 	if (current->flags & PF_RANDOMIZE) {
354 		random_variable = get_random_long();
355 		random_variable &= STACK_RND_MASK;
356 		random_variable <<= PAGE_SHIFT;
357 	}
358 #ifdef CONFIG_STACK_GROWSUP
359 	return PAGE_ALIGN(stack_top) + random_variable;
360 #else
361 	return PAGE_ALIGN(stack_top) - random_variable;
362 #endif
363 }
364 
365 /**
366  * randomize_page - Generate a random, page aligned address
367  * @start:	The smallest acceptable address the caller will take.
368  * @range:	The size of the area, starting at @start, within which the
369  *		random address must fall.
370  *
371  * If @start + @range would overflow, @range is capped.
372  *
373  * NOTE: Historical use of randomize_range, which this replaces, presumed that
374  * @start was already page aligned.  We now align it regardless.
375  *
376  * Return: A page aligned address within [start, start + range).  On error,
377  * @start is returned.
378  */
379 unsigned long randomize_page(unsigned long start, unsigned long range)
380 {
381 	if (!PAGE_ALIGNED(start)) {
382 		range -= PAGE_ALIGN(start) - start;
383 		start = PAGE_ALIGN(start);
384 	}
385 
386 	if (start > ULONG_MAX - range)
387 		range = ULONG_MAX - start;
388 
389 	range >>= PAGE_SHIFT;
390 
391 	if (range == 0)
392 		return start;
393 
394 	return start + (get_random_long() % range << PAGE_SHIFT);
395 }
396 
397 #ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
398 unsigned long __weak arch_randomize_brk(struct mm_struct *mm)
399 {
400 	/* Is the current task 32bit ? */
401 	if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())
402 		return randomize_page(mm->brk, SZ_32M);
403 
404 	return randomize_page(mm->brk, SZ_1G);
405 }
406 
407 unsigned long arch_mmap_rnd(void)
408 {
409 	unsigned long rnd;
410 
411 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
412 	if (is_compat_task())
413 		rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
414 	else
415 #endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */
416 		rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
417 
418 	return rnd << PAGE_SHIFT;
419 }
420 
421 static int mmap_is_legacy(struct rlimit *rlim_stack)
422 {
423 	if (current->personality & ADDR_COMPAT_LAYOUT)
424 		return 1;
425 
426 	/* On parisc the stack always grows up - so a unlimited stack should
427 	 * not be an indicator to use the legacy memory layout. */
428 	if (rlim_stack->rlim_cur == RLIM_INFINITY &&
429 		!IS_ENABLED(CONFIG_STACK_GROWSUP))
430 		return 1;
431 
432 	return sysctl_legacy_va_layout;
433 }
434 
435 /*
436  * Leave enough space between the mmap area and the stack to honour ulimit in
437  * the face of randomisation.
438  */
439 #define MIN_GAP		(SZ_128M)
440 #define MAX_GAP		(STACK_TOP / 6 * 5)
441 
442 static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
443 {
444 #ifdef CONFIG_STACK_GROWSUP
445 	/*
446 	 * For an upwards growing stack the calculation is much simpler.
447 	 * Memory for the maximum stack size is reserved at the top of the
448 	 * task. mmap_base starts directly below the stack and grows
449 	 * downwards.
450 	 */
451 	return PAGE_ALIGN_DOWN(mmap_upper_limit(rlim_stack) - rnd);
452 #else
453 	unsigned long gap = rlim_stack->rlim_cur;
454 	unsigned long pad = stack_guard_gap;
455 
456 	/* Account for stack randomization if necessary */
457 	if (current->flags & PF_RANDOMIZE)
458 		pad += (STACK_RND_MASK << PAGE_SHIFT);
459 
460 	/* Values close to RLIM_INFINITY can overflow. */
461 	if (gap + pad > gap)
462 		gap += pad;
463 
464 	if (gap < MIN_GAP)
465 		gap = MIN_GAP;
466 	else if (gap > MAX_GAP)
467 		gap = MAX_GAP;
468 
469 	return PAGE_ALIGN(STACK_TOP - gap - rnd);
470 #endif
471 }
472 
473 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
474 {
475 	unsigned long random_factor = 0UL;
476 
477 	if (current->flags & PF_RANDOMIZE)
478 		random_factor = arch_mmap_rnd();
479 
480 	if (mmap_is_legacy(rlim_stack)) {
481 		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
482 		clear_bit(MMF_TOPDOWN, &mm->flags);
483 	} else {
484 		mm->mmap_base = mmap_base(random_factor, rlim_stack);
485 		set_bit(MMF_TOPDOWN, &mm->flags);
486 	}
487 }
488 #elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
489 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
490 {
491 	mm->mmap_base = TASK_UNMAPPED_BASE;
492 	clear_bit(MMF_TOPDOWN, &mm->flags);
493 }
494 #endif
495 
496 /**
497  * __account_locked_vm - account locked pages to an mm's locked_vm
498  * @mm:          mm to account against
499  * @pages:       number of pages to account
500  * @inc:         %true if @pages should be considered positive, %false if not
501  * @task:        task used to check RLIMIT_MEMLOCK
502  * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped
503  *
504  * Assumes @task and @mm are valid (i.e. at least one reference on each), and
505  * that mmap_lock is held as writer.
506  *
507  * Return:
508  * * 0       on success
509  * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
510  */
511 int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
512 			struct task_struct *task, bool bypass_rlim)
513 {
514 	unsigned long locked_vm, limit;
515 	int ret = 0;
516 
517 	mmap_assert_write_locked(mm);
518 
519 	locked_vm = mm->locked_vm;
520 	if (inc) {
521 		if (!bypass_rlim) {
522 			limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
523 			if (locked_vm + pages > limit)
524 				ret = -ENOMEM;
525 		}
526 		if (!ret)
527 			mm->locked_vm = locked_vm + pages;
528 	} else {
529 		WARN_ON_ONCE(pages > locked_vm);
530 		mm->locked_vm = locked_vm - pages;
531 	}
532 
533 	pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid,
534 		 (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT,
535 		 locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK),
536 		 ret ? " - exceeded" : "");
537 
538 	return ret;
539 }
540 EXPORT_SYMBOL_GPL(__account_locked_vm);
541 
542 /**
543  * account_locked_vm - account locked pages to an mm's locked_vm
544  * @mm:          mm to account against, may be NULL
545  * @pages:       number of pages to account
546  * @inc:         %true if @pages should be considered positive, %false if not
547  *
548  * Assumes a non-NULL @mm is valid (i.e. at least one reference on it).
549  *
550  * Return:
551  * * 0       on success, or if mm is NULL
552  * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
553  */
554 int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc)
555 {
556 	int ret;
557 
558 	if (pages == 0 || !mm)
559 		return 0;
560 
561 	mmap_write_lock(mm);
562 	ret = __account_locked_vm(mm, pages, inc, current,
563 				  capable(CAP_IPC_LOCK));
564 	mmap_write_unlock(mm);
565 
566 	return ret;
567 }
568 EXPORT_SYMBOL_GPL(account_locked_vm);
569 
570 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
571 	unsigned long len, unsigned long prot,
572 	unsigned long flag, unsigned long pgoff)
573 {
574 	unsigned long ret;
575 	struct mm_struct *mm = current->mm;
576 	unsigned long populate;
577 	LIST_HEAD(uf);
578 
579 	ret = security_mmap_file(file, prot, flag);
580 	if (!ret) {
581 		if (mmap_write_lock_killable(mm))
582 			return -EINTR;
583 		ret = do_mmap(file, addr, len, prot, flag, 0, pgoff, &populate,
584 			      &uf);
585 		mmap_write_unlock(mm);
586 		userfaultfd_unmap_complete(mm, &uf);
587 		if (populate)
588 			mm_populate(ret, populate);
589 	}
590 	return ret;
591 }
592 
593 unsigned long vm_mmap(struct file *file, unsigned long addr,
594 	unsigned long len, unsigned long prot,
595 	unsigned long flag, unsigned long offset)
596 {
597 	if (unlikely(offset + PAGE_ALIGN(len) < offset))
598 		return -EINVAL;
599 	if (unlikely(offset_in_page(offset)))
600 		return -EINVAL;
601 
602 	return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
603 }
604 EXPORT_SYMBOL(vm_mmap);
605 
606 /**
607  * __kvmalloc_node - attempt to allocate physically contiguous memory, but upon
608  * failure, fall back to non-contiguous (vmalloc) allocation.
609  * @size: size of the request.
610  * @b: which set of kmalloc buckets to allocate from.
611  * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
612  * @node: numa node to allocate from
613  *
614  * Uses kmalloc to get the memory but if the allocation fails then falls back
615  * to the vmalloc allocator. Use kvfree for freeing the memory.
616  *
617  * GFP_NOWAIT and GFP_ATOMIC are not supported, neither is the __GFP_NORETRY modifier.
618  * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
619  * preferable to the vmalloc fallback, due to visible performance drawbacks.
620  *
621  * Return: pointer to the allocated memory of %NULL in case of failure
622  */
623 void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node)
624 {
625 	gfp_t kmalloc_flags = flags;
626 	void *ret;
627 
628 	/*
629 	 * We want to attempt a large physically contiguous block first because
630 	 * it is less likely to fragment multiple larger blocks and therefore
631 	 * contribute to a long term fragmentation less than vmalloc fallback.
632 	 * However make sure that larger requests are not too disruptive - no
633 	 * OOM killer and no allocation failure warnings as we have a fallback.
634 	 */
635 	if (size > PAGE_SIZE) {
636 		kmalloc_flags |= __GFP_NOWARN;
637 
638 		if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
639 			kmalloc_flags |= __GFP_NORETRY;
640 
641 		/* nofail semantic is implemented by the vmalloc fallback */
642 		kmalloc_flags &= ~__GFP_NOFAIL;
643 	}
644 
645 	ret = __kmalloc_node_noprof(PASS_BUCKET_PARAMS(size, b), kmalloc_flags, node);
646 
647 	/*
648 	 * It doesn't really make sense to fallback to vmalloc for sub page
649 	 * requests
650 	 */
651 	if (ret || size <= PAGE_SIZE)
652 		return ret;
653 
654 	/* non-sleeping allocations are not supported by vmalloc */
655 	if (!gfpflags_allow_blocking(flags))
656 		return NULL;
657 
658 	/* Don't even allow crazy sizes */
659 	if (unlikely(size > INT_MAX)) {
660 		WARN_ON_ONCE(!(flags & __GFP_NOWARN));
661 		return NULL;
662 	}
663 
664 	/*
665 	 * kvmalloc() can always use VM_ALLOW_HUGE_VMAP,
666 	 * since the callers already cannot assume anything
667 	 * about the resulting pointer, and cannot play
668 	 * protection games.
669 	 */
670 	return __vmalloc_node_range_noprof(size, 1, VMALLOC_START, VMALLOC_END,
671 			flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
672 			node, __builtin_return_address(0));
673 }
674 EXPORT_SYMBOL(__kvmalloc_node_noprof);
675 
676 /**
677  * kvfree() - Free memory.
678  * @addr: Pointer to allocated memory.
679  *
680  * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
681  * It is slightly more efficient to use kfree() or vfree() if you are certain
682  * that you know which one to use.
683  *
684  * Context: Either preemptible task context or not-NMI interrupt.
685  */
686 void kvfree(const void *addr)
687 {
688 	if (is_vmalloc_addr(addr))
689 		vfree(addr);
690 	else
691 		kfree(addr);
692 }
693 EXPORT_SYMBOL(kvfree);
694 
695 /**
696  * kvfree_sensitive - Free a data object containing sensitive information.
697  * @addr: address of the data object to be freed.
698  * @len: length of the data object.
699  *
700  * Use the special memzero_explicit() function to clear the content of a
701  * kvmalloc'ed object containing sensitive data to make sure that the
702  * compiler won't optimize out the data clearing.
703  */
704 void kvfree_sensitive(const void *addr, size_t len)
705 {
706 	if (likely(!ZERO_OR_NULL_PTR(addr))) {
707 		memzero_explicit((void *)addr, len);
708 		kvfree(addr);
709 	}
710 }
711 EXPORT_SYMBOL(kvfree_sensitive);
712 
713 void *kvrealloc_noprof(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
714 {
715 	void *newp;
716 
717 	if (oldsize >= newsize)
718 		return (void *)p;
719 	newp = kvmalloc_noprof(newsize, flags);
720 	if (!newp)
721 		return NULL;
722 	memcpy(newp, p, oldsize);
723 	kvfree(p);
724 	return newp;
725 }
726 EXPORT_SYMBOL(kvrealloc_noprof);
727 
728 /**
729  * __vmalloc_array - allocate memory for a virtually contiguous array.
730  * @n: number of elements.
731  * @size: element size.
732  * @flags: the type of memory to allocate (see kmalloc).
733  */
734 void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags)
735 {
736 	size_t bytes;
737 
738 	if (unlikely(check_mul_overflow(n, size, &bytes)))
739 		return NULL;
740 	return __vmalloc_noprof(bytes, flags);
741 }
742 EXPORT_SYMBOL(__vmalloc_array_noprof);
743 
744 /**
745  * vmalloc_array - allocate memory for a virtually contiguous array.
746  * @n: number of elements.
747  * @size: element size.
748  */
749 void *vmalloc_array_noprof(size_t n, size_t size)
750 {
751 	return __vmalloc_array_noprof(n, size, GFP_KERNEL);
752 }
753 EXPORT_SYMBOL(vmalloc_array_noprof);
754 
755 /**
756  * __vcalloc - allocate and zero memory for a virtually contiguous array.
757  * @n: number of elements.
758  * @size: element size.
759  * @flags: the type of memory to allocate (see kmalloc).
760  */
761 void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags)
762 {
763 	return __vmalloc_array_noprof(n, size, flags | __GFP_ZERO);
764 }
765 EXPORT_SYMBOL(__vcalloc_noprof);
766 
767 /**
768  * vcalloc - allocate and zero memory for a virtually contiguous array.
769  * @n: number of elements.
770  * @size: element size.
771  */
772 void *vcalloc_noprof(size_t n, size_t size)
773 {
774 	return __vmalloc_array_noprof(n, size, GFP_KERNEL | __GFP_ZERO);
775 }
776 EXPORT_SYMBOL(vcalloc_noprof);
777 
778 struct anon_vma *folio_anon_vma(struct folio *folio)
779 {
780 	unsigned long mapping = (unsigned long)folio->mapping;
781 
782 	if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
783 		return NULL;
784 	return (void *)(mapping - PAGE_MAPPING_ANON);
785 }
786 
787 /**
788  * folio_mapping - Find the mapping where this folio is stored.
789  * @folio: The folio.
790  *
791  * For folios which are in the page cache, return the mapping that this
792  * page belongs to.  Folios in the swap cache return the swap mapping
793  * this page is stored in (which is different from the mapping for the
794  * swap file or swap device where the data is stored).
795  *
796  * You can call this for folios which aren't in the swap cache or page
797  * cache and it will return NULL.
798  */
799 struct address_space *folio_mapping(struct folio *folio)
800 {
801 	struct address_space *mapping;
802 
803 	/* This happens if someone calls flush_dcache_page on slab page */
804 	if (unlikely(folio_test_slab(folio)))
805 		return NULL;
806 
807 	if (unlikely(folio_test_swapcache(folio)))
808 		return swap_address_space(folio->swap);
809 
810 	mapping = folio->mapping;
811 	if ((unsigned long)mapping & PAGE_MAPPING_FLAGS)
812 		return NULL;
813 
814 	return mapping;
815 }
816 EXPORT_SYMBOL(folio_mapping);
817 
818 /**
819  * folio_copy - Copy the contents of one folio to another.
820  * @dst: Folio to copy to.
821  * @src: Folio to copy from.
822  *
823  * The bytes in the folio represented by @src are copied to @dst.
824  * Assumes the caller has validated that @dst is at least as large as @src.
825  * Can be called in atomic context for order-0 folios, but if the folio is
826  * larger, it may sleep.
827  */
828 void folio_copy(struct folio *dst, struct folio *src)
829 {
830 	long i = 0;
831 	long nr = folio_nr_pages(src);
832 
833 	for (;;) {
834 		copy_highpage(folio_page(dst, i), folio_page(src, i));
835 		if (++i == nr)
836 			break;
837 		cond_resched();
838 	}
839 }
840 EXPORT_SYMBOL(folio_copy);
841 
842 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
843 int sysctl_overcommit_ratio __read_mostly = 50;
844 unsigned long sysctl_overcommit_kbytes __read_mostly;
845 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
846 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
847 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
848 
849 int overcommit_ratio_handler(struct ctl_table *table, int write, void *buffer,
850 		size_t *lenp, loff_t *ppos)
851 {
852 	int ret;
853 
854 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
855 	if (ret == 0 && write)
856 		sysctl_overcommit_kbytes = 0;
857 	return ret;
858 }
859 
860 static void sync_overcommit_as(struct work_struct *dummy)
861 {
862 	percpu_counter_sync(&vm_committed_as);
863 }
864 
865 int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer,
866 		size_t *lenp, loff_t *ppos)
867 {
868 	struct ctl_table t;
869 	int new_policy = -1;
870 	int ret;
871 
872 	/*
873 	 * The deviation of sync_overcommit_as could be big with loose policy
874 	 * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to
875 	 * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply
876 	 * with the strict "NEVER", and to avoid possible race condition (even
877 	 * though user usually won't too frequently do the switching to policy
878 	 * OVERCOMMIT_NEVER), the switch is done in the following order:
879 	 *	1. changing the batch
880 	 *	2. sync percpu count on each CPU
881 	 *	3. switch the policy
882 	 */
883 	if (write) {
884 		t = *table;
885 		t.data = &new_policy;
886 		ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
887 		if (ret || new_policy == -1)
888 			return ret;
889 
890 		mm_compute_batch(new_policy);
891 		if (new_policy == OVERCOMMIT_NEVER)
892 			schedule_on_each_cpu(sync_overcommit_as);
893 		sysctl_overcommit_memory = new_policy;
894 	} else {
895 		ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
896 	}
897 
898 	return ret;
899 }
900 
901 int overcommit_kbytes_handler(struct ctl_table *table, int write, void *buffer,
902 		size_t *lenp, loff_t *ppos)
903 {
904 	int ret;
905 
906 	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
907 	if (ret == 0 && write)
908 		sysctl_overcommit_ratio = 0;
909 	return ret;
910 }
911 
912 /*
913  * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
914  */
915 unsigned long vm_commit_limit(void)
916 {
917 	unsigned long allowed;
918 
919 	if (sysctl_overcommit_kbytes)
920 		allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
921 	else
922 		allowed = ((totalram_pages() - hugetlb_total_pages())
923 			   * sysctl_overcommit_ratio / 100);
924 	allowed += total_swap_pages;
925 
926 	return allowed;
927 }
928 
929 /*
930  * Make sure vm_committed_as in one cacheline and not cacheline shared with
931  * other variables. It can be updated by several CPUs frequently.
932  */
933 struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
934 
935 /*
936  * The global memory commitment made in the system can be a metric
937  * that can be used to drive ballooning decisions when Linux is hosted
938  * as a guest. On Hyper-V, the host implements a policy engine for dynamically
939  * balancing memory across competing virtual machines that are hosted.
940  * Several metrics drive this policy engine including the guest reported
941  * memory commitment.
942  *
943  * The time cost of this is very low for small platforms, and for big
944  * platform like a 2S/36C/72T Skylake server, in worst case where
945  * vm_committed_as's spinlock is under severe contention, the time cost
946  * could be about 30~40 microseconds.
947  */
948 unsigned long vm_memory_committed(void)
949 {
950 	return percpu_counter_sum_positive(&vm_committed_as);
951 }
952 EXPORT_SYMBOL_GPL(vm_memory_committed);
953 
954 /*
955  * Check that a process has enough memory to allocate a new virtual
956  * mapping. 0 means there is enough memory for the allocation to
957  * succeed and -ENOMEM implies there is not.
958  *
959  * We currently support three overcommit policies, which are set via the
960  * vm.overcommit_memory sysctl.  See Documentation/mm/overcommit-accounting.rst
961  *
962  * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
963  * Additional code 2002 Jul 20 by Robert Love.
964  *
965  * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
966  *
967  * Note this is a helper function intended to be used by LSMs which
968  * wish to use this logic.
969  */
970 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
971 {
972 	long allowed;
973 	unsigned long bytes_failed;
974 
975 	vm_acct_memory(pages);
976 
977 	/*
978 	 * Sometimes we want to use more memory than we have
979 	 */
980 	if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
981 		return 0;
982 
983 	if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
984 		if (pages > totalram_pages() + total_swap_pages)
985 			goto error;
986 		return 0;
987 	}
988 
989 	allowed = vm_commit_limit();
990 	/*
991 	 * Reserve some for root
992 	 */
993 	if (!cap_sys_admin)
994 		allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
995 
996 	/*
997 	 * Don't let a single process grow so big a user can't recover
998 	 */
999 	if (mm) {
1000 		long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
1001 
1002 		allowed -= min_t(long, mm->total_vm / 32, reserve);
1003 	}
1004 
1005 	if (percpu_counter_read_positive(&vm_committed_as) < allowed)
1006 		return 0;
1007 error:
1008 	bytes_failed = pages << PAGE_SHIFT;
1009 	pr_warn_ratelimited("%s: pid: %d, comm: %s, bytes: %lu not enough memory for the allocation\n",
1010 			    __func__, current->pid, current->comm, bytes_failed);
1011 	vm_unacct_memory(pages);
1012 
1013 	return -ENOMEM;
1014 }
1015 
1016 /**
1017  * get_cmdline() - copy the cmdline value to a buffer.
1018  * @task:     the task whose cmdline value to copy.
1019  * @buffer:   the buffer to copy to.
1020  * @buflen:   the length of the buffer. Larger cmdline values are truncated
1021  *            to this length.
1022  *
1023  * Return: the size of the cmdline field copied. Note that the copy does
1024  * not guarantee an ending NULL byte.
1025  */
1026 int get_cmdline(struct task_struct *task, char *buffer, int buflen)
1027 {
1028 	int res = 0;
1029 	unsigned int len;
1030 	struct mm_struct *mm = get_task_mm(task);
1031 	unsigned long arg_start, arg_end, env_start, env_end;
1032 	if (!mm)
1033 		goto out;
1034 	if (!mm->arg_end)
1035 		goto out_mm;	/* Shh! No looking before we're done */
1036 
1037 	spin_lock(&mm->arg_lock);
1038 	arg_start = mm->arg_start;
1039 	arg_end = mm->arg_end;
1040 	env_start = mm->env_start;
1041 	env_end = mm->env_end;
1042 	spin_unlock(&mm->arg_lock);
1043 
1044 	len = arg_end - arg_start;
1045 
1046 	if (len > buflen)
1047 		len = buflen;
1048 
1049 	res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
1050 
1051 	/*
1052 	 * If the nul at the end of args has been overwritten, then
1053 	 * assume application is using setproctitle(3).
1054 	 */
1055 	if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
1056 		len = strnlen(buffer, res);
1057 		if (len < res) {
1058 			res = len;
1059 		} else {
1060 			len = env_end - env_start;
1061 			if (len > buflen - res)
1062 				len = buflen - res;
1063 			res += access_process_vm(task, env_start,
1064 						 buffer+res, len,
1065 						 FOLL_FORCE);
1066 			res = strnlen(buffer, res);
1067 		}
1068 	}
1069 out_mm:
1070 	mmput(mm);
1071 out:
1072 	return res;
1073 }
1074 
1075 int __weak memcmp_pages(struct page *page1, struct page *page2)
1076 {
1077 	char *addr1, *addr2;
1078 	int ret;
1079 
1080 	addr1 = kmap_local_page(page1);
1081 	addr2 = kmap_local_page(page2);
1082 	ret = memcmp(addr1, addr2, PAGE_SIZE);
1083 	kunmap_local(addr2);
1084 	kunmap_local(addr1);
1085 	return ret;
1086 }
1087 
1088 #ifdef CONFIG_PRINTK
1089 /**
1090  * mem_dump_obj - Print available provenance information
1091  * @object: object for which to find provenance information.
1092  *
1093  * This function uses pr_cont(), so that the caller is expected to have
1094  * printed out whatever preamble is appropriate.  The provenance information
1095  * depends on the type of object and on how much debugging is enabled.
1096  * For example, for a slab-cache object, the slab name is printed, and,
1097  * if available, the return address and stack trace from the allocation
1098  * and last free path of that object.
1099  */
1100 void mem_dump_obj(void *object)
1101 {
1102 	const char *type;
1103 
1104 	if (kmem_dump_obj(object))
1105 		return;
1106 
1107 	if (vmalloc_dump_obj(object))
1108 		return;
1109 
1110 	if (is_vmalloc_addr(object))
1111 		type = "vmalloc memory";
1112 	else if (virt_addr_valid(object))
1113 		type = "non-slab/vmalloc memory";
1114 	else if (object == NULL)
1115 		type = "NULL pointer";
1116 	else if (object == ZERO_SIZE_PTR)
1117 		type = "zero-size pointer";
1118 	else
1119 		type = "non-paged memory";
1120 
1121 	pr_cont(" %s\n", type);
1122 }
1123 EXPORT_SYMBOL_GPL(mem_dump_obj);
1124 #endif
1125 
1126 /*
1127  * A driver might set a page logically offline -- PageOffline() -- and
1128  * turn the page inaccessible in the hypervisor; after that, access to page
1129  * content can be fatal.
1130  *
1131  * Some special PFN walkers -- i.e., /proc/kcore -- read content of random
1132  * pages after checking PageOffline(); however, these PFN walkers can race
1133  * with drivers that set PageOffline().
1134  *
1135  * page_offline_freeze()/page_offline_thaw() allows for a subsystem to
1136  * synchronize with such drivers, achieving that a page cannot be set
1137  * PageOffline() while frozen.
1138  *
1139  * page_offline_begin()/page_offline_end() is used by drivers that care about
1140  * such races when setting a page PageOffline().
1141  */
1142 static DECLARE_RWSEM(page_offline_rwsem);
1143 
1144 void page_offline_freeze(void)
1145 {
1146 	down_read(&page_offline_rwsem);
1147 }
1148 
1149 void page_offline_thaw(void)
1150 {
1151 	up_read(&page_offline_rwsem);
1152 }
1153 
1154 void page_offline_begin(void)
1155 {
1156 	down_write(&page_offline_rwsem);
1157 }
1158 EXPORT_SYMBOL(page_offline_begin);
1159 
1160 void page_offline_end(void)
1161 {
1162 	up_write(&page_offline_rwsem);
1163 }
1164 EXPORT_SYMBOL(page_offline_end);
1165 
1166 #ifndef flush_dcache_folio
1167 void flush_dcache_folio(struct folio *folio)
1168 {
1169 	long i, nr = folio_nr_pages(folio);
1170 
1171 	for (i = 0; i < nr; i++)
1172 		flush_dcache_page(folio_page(folio, i));
1173 }
1174 EXPORT_SYMBOL(flush_dcache_folio);
1175 #endif
1176