xref: /linux/mm/util.c (revision f5f4745a7f057b58c9728ee4e2c5d6d79f382fe7)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/mm.h>
3 #include <linux/slab.h>
4 #include <linux/string.h>
5 #include <linux/compiler.h>
6 #include <linux/export.h>
7 #include <linux/err.h>
8 #include <linux/sched.h>
9 #include <linux/sched/mm.h>
10 #include <linux/sched/signal.h>
11 #include <linux/sched/task_stack.h>
12 #include <linux/security.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/mman.h>
16 #include <linux/hugetlb.h>
17 #include <linux/vmalloc.h>
18 #include <linux/userfaultfd_k.h>
19 #include <linux/elf.h>
20 #include <linux/elf-randomize.h>
21 #include <linux/personality.h>
22 #include <linux/random.h>
23 #include <linux/processor.h>
24 #include <linux/sizes.h>
25 #include <linux/compat.h>
26 
27 #include <linux/uaccess.h>
28 
29 #include <kunit/visibility.h>
30 
31 #include "internal.h"
32 #include "swap.h"
33 
34 /**
35  * kfree_const - conditionally free memory
36  * @x: pointer to the memory
37  *
38  * Function calls kfree only if @x is not in .rodata section.
39  */
kfree_const(const void * x)40 void kfree_const(const void *x)
41 {
42 	if (!is_kernel_rodata((unsigned long)x))
43 		kfree(x);
44 }
45 EXPORT_SYMBOL(kfree_const);
46 
47 /**
48  * __kmemdup_nul - Create a NUL-terminated string from @s, which might be unterminated.
49  * @s: The data to copy
50  * @len: The size of the data, not including the NUL terminator
51  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
52  *
53  * Return: newly allocated copy of @s with NUL-termination or %NULL in
54  * case of error
55  */
__kmemdup_nul(const char * s,size_t len,gfp_t gfp)56 static __always_inline char *__kmemdup_nul(const char *s, size_t len, gfp_t gfp)
57 {
58 	char *buf;
59 
60 	/* '+1' for the NUL terminator */
61 	buf = kmalloc_track_caller(len + 1, gfp);
62 	if (!buf)
63 		return NULL;
64 
65 	memcpy(buf, s, len);
66 	/* Ensure the buf is always NUL-terminated, regardless of @s. */
67 	buf[len] = '\0';
68 	return buf;
69 }
70 
71 /**
72  * kstrdup - allocate space for and copy an existing string
73  * @s: the string to duplicate
74  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
75  *
76  * Return: newly allocated copy of @s or %NULL in case of error
77  */
78 noinline
kstrdup(const char * s,gfp_t gfp)79 char *kstrdup(const char *s, gfp_t gfp)
80 {
81 	return s ? __kmemdup_nul(s, strlen(s), gfp) : NULL;
82 }
83 EXPORT_SYMBOL(kstrdup);
84 
85 /**
86  * kstrdup_const - conditionally duplicate an existing const string
87  * @s: the string to duplicate
88  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
89  *
90  * Note: Strings allocated by kstrdup_const should be freed by kfree_const and
91  * must not be passed to krealloc().
92  *
93  * Return: source string if it is in .rodata section otherwise
94  * fallback to kstrdup.
95  */
kstrdup_const(const char * s,gfp_t gfp)96 const char *kstrdup_const(const char *s, gfp_t gfp)
97 {
98 	if (is_kernel_rodata((unsigned long)s))
99 		return s;
100 
101 	return kstrdup(s, gfp);
102 }
103 EXPORT_SYMBOL(kstrdup_const);
104 
105 /**
106  * kstrndup - allocate space for and copy an existing string
107  * @s: the string to duplicate
108  * @max: read at most @max chars from @s
109  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
110  *
111  * Note: Use kmemdup_nul() instead if the size is known exactly.
112  *
113  * Return: newly allocated copy of @s or %NULL in case of error
114  */
kstrndup(const char * s,size_t max,gfp_t gfp)115 char *kstrndup(const char *s, size_t max, gfp_t gfp)
116 {
117 	return s ? __kmemdup_nul(s, strnlen(s, max), gfp) : NULL;
118 }
119 EXPORT_SYMBOL(kstrndup);
120 
121 /**
122  * kmemdup - duplicate region of memory
123  *
124  * @src: memory region to duplicate
125  * @len: memory region length
126  * @gfp: GFP mask to use
127  *
128  * Return: newly allocated copy of @src or %NULL in case of error,
129  * result is physically contiguous. Use kfree() to free.
130  */
kmemdup_noprof(const void * src,size_t len,gfp_t gfp)131 void *kmemdup_noprof(const void *src, size_t len, gfp_t gfp)
132 {
133 	void *p;
134 
135 	p = kmalloc_node_track_caller_noprof(len, gfp, NUMA_NO_NODE, _RET_IP_);
136 	if (p)
137 		memcpy(p, src, len);
138 	return p;
139 }
140 EXPORT_SYMBOL(kmemdup_noprof);
141 
142 /**
143  * kmemdup_array - duplicate a given array.
144  *
145  * @src: array to duplicate.
146  * @count: number of elements to duplicate from array.
147  * @element_size: size of each element of array.
148  * @gfp: GFP mask to use.
149  *
150  * Return: duplicated array of @src or %NULL in case of error,
151  * result is physically contiguous. Use kfree() to free.
152  */
kmemdup_array(const void * src,size_t count,size_t element_size,gfp_t gfp)153 void *kmemdup_array(const void *src, size_t count, size_t element_size, gfp_t gfp)
154 {
155 	return kmemdup(src, size_mul(element_size, count), gfp);
156 }
157 EXPORT_SYMBOL(kmemdup_array);
158 
159 /**
160  * kvmemdup - duplicate region of memory
161  *
162  * @src: memory region to duplicate
163  * @len: memory region length
164  * @gfp: GFP mask to use
165  *
166  * Return: newly allocated copy of @src or %NULL in case of error,
167  * result may be not physically contiguous. Use kvfree() to free.
168  */
kvmemdup(const void * src,size_t len,gfp_t gfp)169 void *kvmemdup(const void *src, size_t len, gfp_t gfp)
170 {
171 	void *p;
172 
173 	p = kvmalloc(len, gfp);
174 	if (p)
175 		memcpy(p, src, len);
176 	return p;
177 }
178 EXPORT_SYMBOL(kvmemdup);
179 
180 /**
181  * kmemdup_nul - Create a NUL-terminated string from unterminated data
182  * @s: The data to stringify
183  * @len: The size of the data
184  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
185  *
186  * Return: newly allocated copy of @s with NUL-termination or %NULL in
187  * case of error
188  */
kmemdup_nul(const char * s,size_t len,gfp_t gfp)189 char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
190 {
191 	return s ? __kmemdup_nul(s, len, gfp) : NULL;
192 }
193 EXPORT_SYMBOL(kmemdup_nul);
194 
195 static kmem_buckets *user_buckets __ro_after_init;
196 
init_user_buckets(void)197 static int __init init_user_buckets(void)
198 {
199 	user_buckets = kmem_buckets_create("memdup_user", 0, 0, INT_MAX, NULL);
200 
201 	return 0;
202 }
203 subsys_initcall(init_user_buckets);
204 
205 /**
206  * memdup_user - duplicate memory region from user space
207  *
208  * @src: source address in user space
209  * @len: number of bytes to copy
210  *
211  * Return: an ERR_PTR() on failure.  Result is physically
212  * contiguous, to be freed by kfree().
213  */
memdup_user(const void __user * src,size_t len)214 void *memdup_user(const void __user *src, size_t len)
215 {
216 	void *p;
217 
218 	p = kmem_buckets_alloc_track_caller(user_buckets, len, GFP_USER | __GFP_NOWARN);
219 	if (!p)
220 		return ERR_PTR(-ENOMEM);
221 
222 	if (copy_from_user(p, src, len)) {
223 		kfree(p);
224 		return ERR_PTR(-EFAULT);
225 	}
226 
227 	return p;
228 }
229 EXPORT_SYMBOL(memdup_user);
230 
231 /**
232  * vmemdup_user - duplicate memory region from user space
233  *
234  * @src: source address in user space
235  * @len: number of bytes to copy
236  *
237  * Return: an ERR_PTR() on failure.  Result may be not
238  * physically contiguous.  Use kvfree() to free.
239  */
vmemdup_user(const void __user * src,size_t len)240 void *vmemdup_user(const void __user *src, size_t len)
241 {
242 	void *p;
243 
244 	p = kmem_buckets_valloc(user_buckets, len, GFP_USER);
245 	if (!p)
246 		return ERR_PTR(-ENOMEM);
247 
248 	if (copy_from_user(p, src, len)) {
249 		kvfree(p);
250 		return ERR_PTR(-EFAULT);
251 	}
252 
253 	return p;
254 }
255 EXPORT_SYMBOL(vmemdup_user);
256 
257 /**
258  * strndup_user - duplicate an existing string from user space
259  * @s: The string to duplicate
260  * @n: Maximum number of bytes to copy, including the trailing NUL.
261  *
262  * Return: newly allocated copy of @s or an ERR_PTR() in case of error
263  */
strndup_user(const char __user * s,long n)264 char *strndup_user(const char __user *s, long n)
265 {
266 	char *p;
267 	long length;
268 
269 	length = strnlen_user(s, n);
270 
271 	if (!length)
272 		return ERR_PTR(-EFAULT);
273 
274 	if (length > n)
275 		return ERR_PTR(-EINVAL);
276 
277 	p = memdup_user(s, length);
278 
279 	if (IS_ERR(p))
280 		return p;
281 
282 	p[length - 1] = '\0';
283 
284 	return p;
285 }
286 EXPORT_SYMBOL(strndup_user);
287 
288 /**
289  * memdup_user_nul - duplicate memory region from user space and NUL-terminate
290  *
291  * @src: source address in user space
292  * @len: number of bytes to copy
293  *
294  * Return: an ERR_PTR() on failure.
295  */
memdup_user_nul(const void __user * src,size_t len)296 void *memdup_user_nul(const void __user *src, size_t len)
297 {
298 	char *p;
299 
300 	/*
301 	 * Always use GFP_KERNEL, since copy_from_user() can sleep and
302 	 * cause pagefault, which makes it pointless to use GFP_NOFS
303 	 * or GFP_ATOMIC.
304 	 */
305 	p = kmalloc_track_caller(len + 1, GFP_KERNEL);
306 	if (!p)
307 		return ERR_PTR(-ENOMEM);
308 
309 	if (copy_from_user(p, src, len)) {
310 		kfree(p);
311 		return ERR_PTR(-EFAULT);
312 	}
313 	p[len] = '\0';
314 
315 	return p;
316 }
317 EXPORT_SYMBOL(memdup_user_nul);
318 
319 /* Check if the vma is being used as a stack by this task */
vma_is_stack_for_current(struct vm_area_struct * vma)320 int vma_is_stack_for_current(struct vm_area_struct *vma)
321 {
322 	struct task_struct * __maybe_unused t = current;
323 
324 	return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
325 }
326 
327 /*
328  * Change backing file, only valid to use during initial VMA setup.
329  */
vma_set_file(struct vm_area_struct * vma,struct file * file)330 void vma_set_file(struct vm_area_struct *vma, struct file *file)
331 {
332 	/* Changing an anonymous vma with this is illegal */
333 	get_file(file);
334 	swap(vma->vm_file, file);
335 	fput(file);
336 }
337 EXPORT_SYMBOL(vma_set_file);
338 
339 #ifndef STACK_RND_MASK
340 #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))     /* 8MB of VA */
341 #endif
342 
randomize_stack_top(unsigned long stack_top)343 unsigned long randomize_stack_top(unsigned long stack_top)
344 {
345 	unsigned long random_variable = 0;
346 
347 	if (current->flags & PF_RANDOMIZE) {
348 		random_variable = get_random_long();
349 		random_variable &= STACK_RND_MASK;
350 		random_variable <<= PAGE_SHIFT;
351 	}
352 #ifdef CONFIG_STACK_GROWSUP
353 	return PAGE_ALIGN(stack_top) + random_variable;
354 #else
355 	return PAGE_ALIGN(stack_top) - random_variable;
356 #endif
357 }
358 
359 /**
360  * randomize_page - Generate a random, page aligned address
361  * @start:	The smallest acceptable address the caller will take.
362  * @range:	The size of the area, starting at @start, within which the
363  *		random address must fall.
364  *
365  * If @start + @range would overflow, @range is capped.
366  *
367  * NOTE: Historical use of randomize_range, which this replaces, presumed that
368  * @start was already page aligned.  We now align it regardless.
369  *
370  * Return: A page aligned address within [start, start + range).  On error,
371  * @start is returned.
372  */
randomize_page(unsigned long start,unsigned long range)373 unsigned long randomize_page(unsigned long start, unsigned long range)
374 {
375 	if (!PAGE_ALIGNED(start)) {
376 		range -= PAGE_ALIGN(start) - start;
377 		start = PAGE_ALIGN(start);
378 	}
379 
380 	if (start > ULONG_MAX - range)
381 		range = ULONG_MAX - start;
382 
383 	range >>= PAGE_SHIFT;
384 
385 	if (range == 0)
386 		return start;
387 
388 	return start + (get_random_long() % range << PAGE_SHIFT);
389 }
390 
391 #ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
arch_randomize_brk(struct mm_struct * mm)392 unsigned long __weak arch_randomize_brk(struct mm_struct *mm)
393 {
394 	/* Is the current task 32bit ? */
395 	if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())
396 		return randomize_page(mm->brk, SZ_32M);
397 
398 	return randomize_page(mm->brk, SZ_1G);
399 }
400 
arch_mmap_rnd(void)401 unsigned long arch_mmap_rnd(void)
402 {
403 	unsigned long rnd;
404 
405 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
406 	if (is_compat_task())
407 		rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
408 	else
409 #endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */
410 		rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
411 
412 	return rnd << PAGE_SHIFT;
413 }
414 
mmap_is_legacy(struct rlimit * rlim_stack)415 static int mmap_is_legacy(struct rlimit *rlim_stack)
416 {
417 	if (current->personality & ADDR_COMPAT_LAYOUT)
418 		return 1;
419 
420 	/* On parisc the stack always grows up - so a unlimited stack should
421 	 * not be an indicator to use the legacy memory layout. */
422 	if (rlim_stack->rlim_cur == RLIM_INFINITY &&
423 		!IS_ENABLED(CONFIG_STACK_GROWSUP))
424 		return 1;
425 
426 	return sysctl_legacy_va_layout;
427 }
428 
429 /*
430  * Leave enough space between the mmap area and the stack to honour ulimit in
431  * the face of randomisation.
432  */
433 #define MIN_GAP		(SZ_128M)
434 #define MAX_GAP		(STACK_TOP / 6 * 5)
435 
mmap_base(unsigned long rnd,struct rlimit * rlim_stack)436 static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
437 {
438 #ifdef CONFIG_STACK_GROWSUP
439 	/*
440 	 * For an upwards growing stack the calculation is much simpler.
441 	 * Memory for the maximum stack size is reserved at the top of the
442 	 * task. mmap_base starts directly below the stack and grows
443 	 * downwards.
444 	 */
445 	return PAGE_ALIGN_DOWN(mmap_upper_limit(rlim_stack) - rnd);
446 #else
447 	unsigned long gap = rlim_stack->rlim_cur;
448 	unsigned long pad = stack_guard_gap;
449 
450 	/* Account for stack randomization if necessary */
451 	if (current->flags & PF_RANDOMIZE)
452 		pad += (STACK_RND_MASK << PAGE_SHIFT);
453 
454 	/* Values close to RLIM_INFINITY can overflow. */
455 	if (gap + pad > gap)
456 		gap += pad;
457 
458 	if (gap < MIN_GAP && MIN_GAP < MAX_GAP)
459 		gap = MIN_GAP;
460 	else if (gap > MAX_GAP)
461 		gap = MAX_GAP;
462 
463 	return PAGE_ALIGN(STACK_TOP - gap - rnd);
464 #endif
465 }
466 
arch_pick_mmap_layout(struct mm_struct * mm,struct rlimit * rlim_stack)467 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
468 {
469 	unsigned long random_factor = 0UL;
470 
471 	if (current->flags & PF_RANDOMIZE)
472 		random_factor = arch_mmap_rnd();
473 
474 	if (mmap_is_legacy(rlim_stack)) {
475 		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
476 		clear_bit(MMF_TOPDOWN, &mm->flags);
477 	} else {
478 		mm->mmap_base = mmap_base(random_factor, rlim_stack);
479 		set_bit(MMF_TOPDOWN, &mm->flags);
480 	}
481 }
482 #elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
arch_pick_mmap_layout(struct mm_struct * mm,struct rlimit * rlim_stack)483 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
484 {
485 	mm->mmap_base = TASK_UNMAPPED_BASE;
486 	clear_bit(MMF_TOPDOWN, &mm->flags);
487 }
488 #endif
489 #ifdef CONFIG_MMU
490 EXPORT_SYMBOL_IF_KUNIT(arch_pick_mmap_layout);
491 #endif
492 
493 /**
494  * __account_locked_vm - account locked pages to an mm's locked_vm
495  * @mm:          mm to account against
496  * @pages:       number of pages to account
497  * @inc:         %true if @pages should be considered positive, %false if not
498  * @task:        task used to check RLIMIT_MEMLOCK
499  * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped
500  *
501  * Assumes @task and @mm are valid (i.e. at least one reference on each), and
502  * that mmap_lock is held as writer.
503  *
504  * Return:
505  * * 0       on success
506  * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
507  */
__account_locked_vm(struct mm_struct * mm,unsigned long pages,bool inc,struct task_struct * task,bool bypass_rlim)508 int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
509 			struct task_struct *task, bool bypass_rlim)
510 {
511 	unsigned long locked_vm, limit;
512 	int ret = 0;
513 
514 	mmap_assert_write_locked(mm);
515 
516 	locked_vm = mm->locked_vm;
517 	if (inc) {
518 		if (!bypass_rlim) {
519 			limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
520 			if (locked_vm + pages > limit)
521 				ret = -ENOMEM;
522 		}
523 		if (!ret)
524 			mm->locked_vm = locked_vm + pages;
525 	} else {
526 		WARN_ON_ONCE(pages > locked_vm);
527 		mm->locked_vm = locked_vm - pages;
528 	}
529 
530 	pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid,
531 		 (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT,
532 		 locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK),
533 		 ret ? " - exceeded" : "");
534 
535 	return ret;
536 }
537 EXPORT_SYMBOL_GPL(__account_locked_vm);
538 
539 /**
540  * account_locked_vm - account locked pages to an mm's locked_vm
541  * @mm:          mm to account against, may be NULL
542  * @pages:       number of pages to account
543  * @inc:         %true if @pages should be considered positive, %false if not
544  *
545  * Assumes a non-NULL @mm is valid (i.e. at least one reference on it).
546  *
547  * Return:
548  * * 0       on success, or if mm is NULL
549  * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
550  */
account_locked_vm(struct mm_struct * mm,unsigned long pages,bool inc)551 int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc)
552 {
553 	int ret;
554 
555 	if (pages == 0 || !mm)
556 		return 0;
557 
558 	mmap_write_lock(mm);
559 	ret = __account_locked_vm(mm, pages, inc, current,
560 				  capable(CAP_IPC_LOCK));
561 	mmap_write_unlock(mm);
562 
563 	return ret;
564 }
565 EXPORT_SYMBOL_GPL(account_locked_vm);
566 
vm_mmap_pgoff(struct file * file,unsigned long addr,unsigned long len,unsigned long prot,unsigned long flag,unsigned long pgoff)567 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
568 	unsigned long len, unsigned long prot,
569 	unsigned long flag, unsigned long pgoff)
570 {
571 	unsigned long ret;
572 	struct mm_struct *mm = current->mm;
573 	unsigned long populate;
574 	LIST_HEAD(uf);
575 
576 	ret = security_mmap_file(file, prot, flag);
577 	if (!ret) {
578 		if (mmap_write_lock_killable(mm))
579 			return -EINTR;
580 		ret = do_mmap(file, addr, len, prot, flag, 0, pgoff, &populate,
581 			      &uf);
582 		mmap_write_unlock(mm);
583 		userfaultfd_unmap_complete(mm, &uf);
584 		if (populate)
585 			mm_populate(ret, populate);
586 	}
587 	return ret;
588 }
589 
vm_mmap(struct file * file,unsigned long addr,unsigned long len,unsigned long prot,unsigned long flag,unsigned long offset)590 unsigned long vm_mmap(struct file *file, unsigned long addr,
591 	unsigned long len, unsigned long prot,
592 	unsigned long flag, unsigned long offset)
593 {
594 	if (unlikely(offset + PAGE_ALIGN(len) < offset))
595 		return -EINVAL;
596 	if (unlikely(offset_in_page(offset)))
597 		return -EINVAL;
598 
599 	return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
600 }
601 EXPORT_SYMBOL(vm_mmap);
602 
kmalloc_gfp_adjust(gfp_t flags,size_t size)603 static gfp_t kmalloc_gfp_adjust(gfp_t flags, size_t size)
604 {
605 	/*
606 	 * We want to attempt a large physically contiguous block first because
607 	 * it is less likely to fragment multiple larger blocks and therefore
608 	 * contribute to a long term fragmentation less than vmalloc fallback.
609 	 * However make sure that larger requests are not too disruptive - no
610 	 * OOM killer and no allocation failure warnings as we have a fallback.
611 	 */
612 	if (size > PAGE_SIZE) {
613 		flags |= __GFP_NOWARN;
614 
615 		if (!(flags & __GFP_RETRY_MAYFAIL))
616 			flags |= __GFP_NORETRY;
617 
618 		/* nofail semantic is implemented by the vmalloc fallback */
619 		flags &= ~__GFP_NOFAIL;
620 	}
621 
622 	return flags;
623 }
624 
625 /**
626  * __kvmalloc_node - attempt to allocate physically contiguous memory, but upon
627  * failure, fall back to non-contiguous (vmalloc) allocation.
628  * @size: size of the request.
629  * @b: which set of kmalloc buckets to allocate from.
630  * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
631  * @node: numa node to allocate from
632  *
633  * Uses kmalloc to get the memory but if the allocation fails then falls back
634  * to the vmalloc allocator. Use kvfree for freeing the memory.
635  *
636  * GFP_NOWAIT and GFP_ATOMIC are not supported, neither is the __GFP_NORETRY modifier.
637  * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
638  * preferable to the vmalloc fallback, due to visible performance drawbacks.
639  *
640  * Return: pointer to the allocated memory of %NULL in case of failure
641  */
__kvmalloc_node_noprof(DECL_BUCKET_PARAMS (size,b),gfp_t flags,int node)642 void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node)
643 {
644 	void *ret;
645 
646 	/*
647 	 * It doesn't really make sense to fallback to vmalloc for sub page
648 	 * requests
649 	 */
650 	ret = __kmalloc_node_noprof(PASS_BUCKET_PARAMS(size, b),
651 				    kmalloc_gfp_adjust(flags, size),
652 				    node);
653 	if (ret || size <= PAGE_SIZE)
654 		return ret;
655 
656 	/* non-sleeping allocations are not supported by vmalloc */
657 	if (!gfpflags_allow_blocking(flags))
658 		return NULL;
659 
660 	/* Don't even allow crazy sizes */
661 	if (unlikely(size > INT_MAX)) {
662 		WARN_ON_ONCE(!(flags & __GFP_NOWARN));
663 		return NULL;
664 	}
665 
666 	/*
667 	 * kvmalloc() can always use VM_ALLOW_HUGE_VMAP,
668 	 * since the callers already cannot assume anything
669 	 * about the resulting pointer, and cannot play
670 	 * protection games.
671 	 */
672 	return __vmalloc_node_range_noprof(size, 1, VMALLOC_START, VMALLOC_END,
673 			flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
674 			node, __builtin_return_address(0));
675 }
676 EXPORT_SYMBOL(__kvmalloc_node_noprof);
677 
678 /**
679  * kvfree() - Free memory.
680  * @addr: Pointer to allocated memory.
681  *
682  * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
683  * It is slightly more efficient to use kfree() or vfree() if you are certain
684  * that you know which one to use.
685  *
686  * Context: Either preemptible task context or not-NMI interrupt.
687  */
kvfree(const void * addr)688 void kvfree(const void *addr)
689 {
690 	if (is_vmalloc_addr(addr))
691 		vfree(addr);
692 	else
693 		kfree(addr);
694 }
695 EXPORT_SYMBOL(kvfree);
696 
697 /**
698  * kvfree_sensitive - Free a data object containing sensitive information.
699  * @addr: address of the data object to be freed.
700  * @len: length of the data object.
701  *
702  * Use the special memzero_explicit() function to clear the content of a
703  * kvmalloc'ed object containing sensitive data to make sure that the
704  * compiler won't optimize out the data clearing.
705  */
kvfree_sensitive(const void * addr,size_t len)706 void kvfree_sensitive(const void *addr, size_t len)
707 {
708 	if (likely(!ZERO_OR_NULL_PTR(addr))) {
709 		memzero_explicit((void *)addr, len);
710 		kvfree(addr);
711 	}
712 }
713 EXPORT_SYMBOL(kvfree_sensitive);
714 
715 /**
716  * kvrealloc - reallocate memory; contents remain unchanged
717  * @p: object to reallocate memory for
718  * @size: the size to reallocate
719  * @flags: the flags for the page level allocator
720  *
721  * If @p is %NULL, kvrealloc() behaves exactly like kvmalloc(). If @size is 0
722  * and @p is not a %NULL pointer, the object pointed to is freed.
723  *
724  * If __GFP_ZERO logic is requested, callers must ensure that, starting with the
725  * initial memory allocation, every subsequent call to this API for the same
726  * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
727  * __GFP_ZERO is not fully honored by this API.
728  *
729  * In any case, the contents of the object pointed to are preserved up to the
730  * lesser of the new and old sizes.
731  *
732  * This function must not be called concurrently with itself or kvfree() for the
733  * same memory allocation.
734  *
735  * Return: pointer to the allocated memory or %NULL in case of error
736  */
kvrealloc_noprof(const void * p,size_t size,gfp_t flags)737 void *kvrealloc_noprof(const void *p, size_t size, gfp_t flags)
738 {
739 	void *n;
740 
741 	if (is_vmalloc_addr(p))
742 		return vrealloc_noprof(p, size, flags);
743 
744 	n = krealloc_noprof(p, size, kmalloc_gfp_adjust(flags, size));
745 	if (!n) {
746 		/* We failed to krealloc(), fall back to kvmalloc(). */
747 		n = kvmalloc_noprof(size, flags);
748 		if (!n)
749 			return NULL;
750 
751 		if (p) {
752 			/* We already know that `p` is not a vmalloc address. */
753 			kasan_disable_current();
754 			memcpy(n, kasan_reset_tag(p), ksize(p));
755 			kasan_enable_current();
756 
757 			kfree(p);
758 		}
759 	}
760 
761 	return n;
762 }
763 EXPORT_SYMBOL(kvrealloc_noprof);
764 
765 /**
766  * __vmalloc_array - allocate memory for a virtually contiguous array.
767  * @n: number of elements.
768  * @size: element size.
769  * @flags: the type of memory to allocate (see kmalloc).
770  */
__vmalloc_array_noprof(size_t n,size_t size,gfp_t flags)771 void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags)
772 {
773 	size_t bytes;
774 
775 	if (unlikely(check_mul_overflow(n, size, &bytes)))
776 		return NULL;
777 	return __vmalloc_noprof(bytes, flags);
778 }
779 EXPORT_SYMBOL(__vmalloc_array_noprof);
780 
781 /**
782  * vmalloc_array - allocate memory for a virtually contiguous array.
783  * @n: number of elements.
784  * @size: element size.
785  */
vmalloc_array_noprof(size_t n,size_t size)786 void *vmalloc_array_noprof(size_t n, size_t size)
787 {
788 	return __vmalloc_array_noprof(n, size, GFP_KERNEL);
789 }
790 EXPORT_SYMBOL(vmalloc_array_noprof);
791 
792 /**
793  * __vcalloc - allocate and zero memory for a virtually contiguous array.
794  * @n: number of elements.
795  * @size: element size.
796  * @flags: the type of memory to allocate (see kmalloc).
797  */
__vcalloc_noprof(size_t n,size_t size,gfp_t flags)798 void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags)
799 {
800 	return __vmalloc_array_noprof(n, size, flags | __GFP_ZERO);
801 }
802 EXPORT_SYMBOL(__vcalloc_noprof);
803 
804 /**
805  * vcalloc - allocate and zero memory for a virtually contiguous array.
806  * @n: number of elements.
807  * @size: element size.
808  */
vcalloc_noprof(size_t n,size_t size)809 void *vcalloc_noprof(size_t n, size_t size)
810 {
811 	return __vmalloc_array_noprof(n, size, GFP_KERNEL | __GFP_ZERO);
812 }
813 EXPORT_SYMBOL(vcalloc_noprof);
814 
folio_anon_vma(const struct folio * folio)815 struct anon_vma *folio_anon_vma(const struct folio *folio)
816 {
817 	unsigned long mapping = (unsigned long)folio->mapping;
818 
819 	if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
820 		return NULL;
821 	return (void *)(mapping - PAGE_MAPPING_ANON);
822 }
823 
824 /**
825  * folio_mapping - Find the mapping where this folio is stored.
826  * @folio: The folio.
827  *
828  * For folios which are in the page cache, return the mapping that this
829  * page belongs to.  Folios in the swap cache return the swap mapping
830  * this page is stored in (which is different from the mapping for the
831  * swap file or swap device where the data is stored).
832  *
833  * You can call this for folios which aren't in the swap cache or page
834  * cache and it will return NULL.
835  */
folio_mapping(struct folio * folio)836 struct address_space *folio_mapping(struct folio *folio)
837 {
838 	struct address_space *mapping;
839 
840 	/* This happens if someone calls flush_dcache_page on slab page */
841 	if (unlikely(folio_test_slab(folio)))
842 		return NULL;
843 
844 	if (unlikely(folio_test_swapcache(folio)))
845 		return swap_address_space(folio->swap);
846 
847 	mapping = folio->mapping;
848 	if ((unsigned long)mapping & PAGE_MAPPING_FLAGS)
849 		return NULL;
850 
851 	return mapping;
852 }
853 EXPORT_SYMBOL(folio_mapping);
854 
855 /**
856  * folio_copy - Copy the contents of one folio to another.
857  * @dst: Folio to copy to.
858  * @src: Folio to copy from.
859  *
860  * The bytes in the folio represented by @src are copied to @dst.
861  * Assumes the caller has validated that @dst is at least as large as @src.
862  * Can be called in atomic context for order-0 folios, but if the folio is
863  * larger, it may sleep.
864  */
folio_copy(struct folio * dst,struct folio * src)865 void folio_copy(struct folio *dst, struct folio *src)
866 {
867 	long i = 0;
868 	long nr = folio_nr_pages(src);
869 
870 	for (;;) {
871 		copy_highpage(folio_page(dst, i), folio_page(src, i));
872 		if (++i == nr)
873 			break;
874 		cond_resched();
875 	}
876 }
877 EXPORT_SYMBOL(folio_copy);
878 
folio_mc_copy(struct folio * dst,struct folio * src)879 int folio_mc_copy(struct folio *dst, struct folio *src)
880 {
881 	long nr = folio_nr_pages(src);
882 	long i = 0;
883 
884 	for (;;) {
885 		if (copy_mc_highpage(folio_page(dst, i), folio_page(src, i)))
886 			return -EHWPOISON;
887 		if (++i == nr)
888 			break;
889 		cond_resched();
890 	}
891 
892 	return 0;
893 }
894 EXPORT_SYMBOL(folio_mc_copy);
895 
896 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
897 int sysctl_overcommit_ratio __read_mostly = 50;
898 unsigned long sysctl_overcommit_kbytes __read_mostly;
899 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
900 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
901 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
902 
overcommit_ratio_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)903 int overcommit_ratio_handler(const struct ctl_table *table, int write, void *buffer,
904 		size_t *lenp, loff_t *ppos)
905 {
906 	int ret;
907 
908 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
909 	if (ret == 0 && write)
910 		sysctl_overcommit_kbytes = 0;
911 	return ret;
912 }
913 
sync_overcommit_as(struct work_struct * dummy)914 static void sync_overcommit_as(struct work_struct *dummy)
915 {
916 	percpu_counter_sync(&vm_committed_as);
917 }
918 
overcommit_policy_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)919 int overcommit_policy_handler(const struct ctl_table *table, int write, void *buffer,
920 		size_t *lenp, loff_t *ppos)
921 {
922 	struct ctl_table t;
923 	int new_policy = -1;
924 	int ret;
925 
926 	/*
927 	 * The deviation of sync_overcommit_as could be big with loose policy
928 	 * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to
929 	 * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply
930 	 * with the strict "NEVER", and to avoid possible race condition (even
931 	 * though user usually won't too frequently do the switching to policy
932 	 * OVERCOMMIT_NEVER), the switch is done in the following order:
933 	 *	1. changing the batch
934 	 *	2. sync percpu count on each CPU
935 	 *	3. switch the policy
936 	 */
937 	if (write) {
938 		t = *table;
939 		t.data = &new_policy;
940 		ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
941 		if (ret || new_policy == -1)
942 			return ret;
943 
944 		mm_compute_batch(new_policy);
945 		if (new_policy == OVERCOMMIT_NEVER)
946 			schedule_on_each_cpu(sync_overcommit_as);
947 		sysctl_overcommit_memory = new_policy;
948 	} else {
949 		ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
950 	}
951 
952 	return ret;
953 }
954 
overcommit_kbytes_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)955 int overcommit_kbytes_handler(const struct ctl_table *table, int write, void *buffer,
956 		size_t *lenp, loff_t *ppos)
957 {
958 	int ret;
959 
960 	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
961 	if (ret == 0 && write)
962 		sysctl_overcommit_ratio = 0;
963 	return ret;
964 }
965 
966 /*
967  * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
968  */
vm_commit_limit(void)969 unsigned long vm_commit_limit(void)
970 {
971 	unsigned long allowed;
972 
973 	if (sysctl_overcommit_kbytes)
974 		allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
975 	else
976 		allowed = ((totalram_pages() - hugetlb_total_pages())
977 			   * sysctl_overcommit_ratio / 100);
978 	allowed += total_swap_pages;
979 
980 	return allowed;
981 }
982 
983 /*
984  * Make sure vm_committed_as in one cacheline and not cacheline shared with
985  * other variables. It can be updated by several CPUs frequently.
986  */
987 struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
988 
989 /*
990  * The global memory commitment made in the system can be a metric
991  * that can be used to drive ballooning decisions when Linux is hosted
992  * as a guest. On Hyper-V, the host implements a policy engine for dynamically
993  * balancing memory across competing virtual machines that are hosted.
994  * Several metrics drive this policy engine including the guest reported
995  * memory commitment.
996  *
997  * The time cost of this is very low for small platforms, and for big
998  * platform like a 2S/36C/72T Skylake server, in worst case where
999  * vm_committed_as's spinlock is under severe contention, the time cost
1000  * could be about 30~40 microseconds.
1001  */
vm_memory_committed(void)1002 unsigned long vm_memory_committed(void)
1003 {
1004 	return percpu_counter_sum_positive(&vm_committed_as);
1005 }
1006 EXPORT_SYMBOL_GPL(vm_memory_committed);
1007 
1008 /*
1009  * Check that a process has enough memory to allocate a new virtual
1010  * mapping. 0 means there is enough memory for the allocation to
1011  * succeed and -ENOMEM implies there is not.
1012  *
1013  * We currently support three overcommit policies, which are set via the
1014  * vm.overcommit_memory sysctl.  See Documentation/mm/overcommit-accounting.rst
1015  *
1016  * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
1017  * Additional code 2002 Jul 20 by Robert Love.
1018  *
1019  * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
1020  *
1021  * Note this is a helper function intended to be used by LSMs which
1022  * wish to use this logic.
1023  */
__vm_enough_memory(struct mm_struct * mm,long pages,int cap_sys_admin)1024 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
1025 {
1026 	long allowed;
1027 	unsigned long bytes_failed;
1028 
1029 	vm_acct_memory(pages);
1030 
1031 	/*
1032 	 * Sometimes we want to use more memory than we have
1033 	 */
1034 	if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
1035 		return 0;
1036 
1037 	if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
1038 		if (pages > totalram_pages() + total_swap_pages)
1039 			goto error;
1040 		return 0;
1041 	}
1042 
1043 	allowed = vm_commit_limit();
1044 	/*
1045 	 * Reserve some for root
1046 	 */
1047 	if (!cap_sys_admin)
1048 		allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
1049 
1050 	/*
1051 	 * Don't let a single process grow so big a user can't recover
1052 	 */
1053 	if (mm) {
1054 		long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
1055 
1056 		allowed -= min_t(long, mm->total_vm / 32, reserve);
1057 	}
1058 
1059 	if (percpu_counter_read_positive(&vm_committed_as) < allowed)
1060 		return 0;
1061 error:
1062 	bytes_failed = pages << PAGE_SHIFT;
1063 	pr_warn_ratelimited("%s: pid: %d, comm: %s, bytes: %lu not enough memory for the allocation\n",
1064 			    __func__, current->pid, current->comm, bytes_failed);
1065 	vm_unacct_memory(pages);
1066 
1067 	return -ENOMEM;
1068 }
1069 
1070 /**
1071  * get_cmdline() - copy the cmdline value to a buffer.
1072  * @task:     the task whose cmdline value to copy.
1073  * @buffer:   the buffer to copy to.
1074  * @buflen:   the length of the buffer. Larger cmdline values are truncated
1075  *            to this length.
1076  *
1077  * Return: the size of the cmdline field copied. Note that the copy does
1078  * not guarantee an ending NULL byte.
1079  */
get_cmdline(struct task_struct * task,char * buffer,int buflen)1080 int get_cmdline(struct task_struct *task, char *buffer, int buflen)
1081 {
1082 	int res = 0;
1083 	unsigned int len;
1084 	struct mm_struct *mm = get_task_mm(task);
1085 	unsigned long arg_start, arg_end, env_start, env_end;
1086 	if (!mm)
1087 		goto out;
1088 	if (!mm->arg_end)
1089 		goto out_mm;	/* Shh! No looking before we're done */
1090 
1091 	spin_lock(&mm->arg_lock);
1092 	arg_start = mm->arg_start;
1093 	arg_end = mm->arg_end;
1094 	env_start = mm->env_start;
1095 	env_end = mm->env_end;
1096 	spin_unlock(&mm->arg_lock);
1097 
1098 	len = arg_end - arg_start;
1099 
1100 	if (len > buflen)
1101 		len = buflen;
1102 
1103 	res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
1104 
1105 	/*
1106 	 * If the nul at the end of args has been overwritten, then
1107 	 * assume application is using setproctitle(3).
1108 	 */
1109 	if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
1110 		len = strnlen(buffer, res);
1111 		if (len < res) {
1112 			res = len;
1113 		} else {
1114 			len = env_end - env_start;
1115 			if (len > buflen - res)
1116 				len = buflen - res;
1117 			res += access_process_vm(task, env_start,
1118 						 buffer+res, len,
1119 						 FOLL_FORCE);
1120 			res = strnlen(buffer, res);
1121 		}
1122 	}
1123 out_mm:
1124 	mmput(mm);
1125 out:
1126 	return res;
1127 }
1128 
memcmp_pages(struct page * page1,struct page * page2)1129 int __weak memcmp_pages(struct page *page1, struct page *page2)
1130 {
1131 	char *addr1, *addr2;
1132 	int ret;
1133 
1134 	addr1 = kmap_local_page(page1);
1135 	addr2 = kmap_local_page(page2);
1136 	ret = memcmp(addr1, addr2, PAGE_SIZE);
1137 	kunmap_local(addr2);
1138 	kunmap_local(addr1);
1139 	return ret;
1140 }
1141 
1142 #ifdef CONFIG_PRINTK
1143 /**
1144  * mem_dump_obj - Print available provenance information
1145  * @object: object for which to find provenance information.
1146  *
1147  * This function uses pr_cont(), so that the caller is expected to have
1148  * printed out whatever preamble is appropriate.  The provenance information
1149  * depends on the type of object and on how much debugging is enabled.
1150  * For example, for a slab-cache object, the slab name is printed, and,
1151  * if available, the return address and stack trace from the allocation
1152  * and last free path of that object.
1153  */
mem_dump_obj(void * object)1154 void mem_dump_obj(void *object)
1155 {
1156 	const char *type;
1157 
1158 	if (kmem_dump_obj(object))
1159 		return;
1160 
1161 	if (vmalloc_dump_obj(object))
1162 		return;
1163 
1164 	if (is_vmalloc_addr(object))
1165 		type = "vmalloc memory";
1166 	else if (virt_addr_valid(object))
1167 		type = "non-slab/vmalloc memory";
1168 	else if (object == NULL)
1169 		type = "NULL pointer";
1170 	else if (object == ZERO_SIZE_PTR)
1171 		type = "zero-size pointer";
1172 	else
1173 		type = "non-paged memory";
1174 
1175 	pr_cont(" %s\n", type);
1176 }
1177 EXPORT_SYMBOL_GPL(mem_dump_obj);
1178 #endif
1179 
1180 /*
1181  * A driver might set a page logically offline -- PageOffline() -- and
1182  * turn the page inaccessible in the hypervisor; after that, access to page
1183  * content can be fatal.
1184  *
1185  * Some special PFN walkers -- i.e., /proc/kcore -- read content of random
1186  * pages after checking PageOffline(); however, these PFN walkers can race
1187  * with drivers that set PageOffline().
1188  *
1189  * page_offline_freeze()/page_offline_thaw() allows for a subsystem to
1190  * synchronize with such drivers, achieving that a page cannot be set
1191  * PageOffline() while frozen.
1192  *
1193  * page_offline_begin()/page_offline_end() is used by drivers that care about
1194  * such races when setting a page PageOffline().
1195  */
1196 static DECLARE_RWSEM(page_offline_rwsem);
1197 
page_offline_freeze(void)1198 void page_offline_freeze(void)
1199 {
1200 	down_read(&page_offline_rwsem);
1201 }
1202 
page_offline_thaw(void)1203 void page_offline_thaw(void)
1204 {
1205 	up_read(&page_offline_rwsem);
1206 }
1207 
page_offline_begin(void)1208 void page_offline_begin(void)
1209 {
1210 	down_write(&page_offline_rwsem);
1211 }
1212 EXPORT_SYMBOL(page_offline_begin);
1213 
page_offline_end(void)1214 void page_offline_end(void)
1215 {
1216 	up_write(&page_offline_rwsem);
1217 }
1218 EXPORT_SYMBOL(page_offline_end);
1219 
1220 #ifndef flush_dcache_folio
flush_dcache_folio(struct folio * folio)1221 void flush_dcache_folio(struct folio *folio)
1222 {
1223 	long i, nr = folio_nr_pages(folio);
1224 
1225 	for (i = 0; i < nr; i++)
1226 		flush_dcache_page(folio_page(folio, i));
1227 }
1228 EXPORT_SYMBOL(flush_dcache_folio);
1229 #endif
1230