1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/mm.h>
3 #include <linux/slab.h>
4 #include <linux/string.h>
5 #include <linux/compiler.h>
6 #include <linux/export.h>
7 #include <linux/err.h>
8 #include <linux/sched.h>
9 #include <linux/sched/mm.h>
10 #include <linux/sched/signal.h>
11 #include <linux/sched/task_stack.h>
12 #include <linux/security.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/mman.h>
16 #include <linux/hugetlb.h>
17 #include <linux/vmalloc.h>
18 #include <linux/userfaultfd_k.h>
19 #include <linux/elf.h>
20 #include <linux/elf-randomize.h>
21 #include <linux/personality.h>
22 #include <linux/random.h>
23 #include <linux/processor.h>
24 #include <linux/sizes.h>
25 #include <linux/compat.h>
26 #include <linux/fsnotify.h>
27
28 #include <linux/uaccess.h>
29
30 #include <kunit/visibility.h>
31
32 #include "internal.h"
33 #include "swap.h"
34
35 /**
36 * kfree_const - conditionally free memory
37 * @x: pointer to the memory
38 *
39 * Function calls kfree only if @x is not in .rodata section.
40 */
kfree_const(const void * x)41 void kfree_const(const void *x)
42 {
43 if (!is_kernel_rodata((unsigned long)x))
44 kfree(x);
45 }
46 EXPORT_SYMBOL(kfree_const);
47
48 /**
49 * __kmemdup_nul - Create a NUL-terminated string from @s, which might be unterminated.
50 * @s: The data to copy
51 * @len: The size of the data, not including the NUL terminator
52 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
53 *
54 * Return: newly allocated copy of @s with NUL-termination or %NULL in
55 * case of error
56 */
__kmemdup_nul(const char * s,size_t len,gfp_t gfp)57 static __always_inline char *__kmemdup_nul(const char *s, size_t len, gfp_t gfp)
58 {
59 char *buf;
60
61 /* '+1' for the NUL terminator */
62 buf = kmalloc_track_caller(len + 1, gfp);
63 if (!buf)
64 return NULL;
65
66 memcpy(buf, s, len);
67 /* Ensure the buf is always NUL-terminated, regardless of @s. */
68 buf[len] = '\0';
69 return buf;
70 }
71
72 /**
73 * kstrdup - allocate space for and copy an existing string
74 * @s: the string to duplicate
75 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
76 *
77 * Return: newly allocated copy of @s or %NULL in case of error
78 */
79 noinline
kstrdup(const char * s,gfp_t gfp)80 char *kstrdup(const char *s, gfp_t gfp)
81 {
82 return s ? __kmemdup_nul(s, strlen(s), gfp) : NULL;
83 }
84 EXPORT_SYMBOL(kstrdup);
85
86 /**
87 * kstrdup_const - conditionally duplicate an existing const string
88 * @s: the string to duplicate
89 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
90 *
91 * Note: Strings allocated by kstrdup_const should be freed by kfree_const and
92 * must not be passed to krealloc().
93 *
94 * Return: source string if it is in .rodata section otherwise
95 * fallback to kstrdup.
96 */
kstrdup_const(const char * s,gfp_t gfp)97 const char *kstrdup_const(const char *s, gfp_t gfp)
98 {
99 if (is_kernel_rodata((unsigned long)s))
100 return s;
101
102 return kstrdup(s, gfp);
103 }
104 EXPORT_SYMBOL(kstrdup_const);
105
106 /**
107 * kstrndup - allocate space for and copy an existing string
108 * @s: the string to duplicate
109 * @max: read at most @max chars from @s
110 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
111 *
112 * Note: Use kmemdup_nul() instead if the size is known exactly.
113 *
114 * Return: newly allocated copy of @s or %NULL in case of error
115 */
kstrndup(const char * s,size_t max,gfp_t gfp)116 char *kstrndup(const char *s, size_t max, gfp_t gfp)
117 {
118 return s ? __kmemdup_nul(s, strnlen(s, max), gfp) : NULL;
119 }
120 EXPORT_SYMBOL(kstrndup);
121
122 /**
123 * kmemdup - duplicate region of memory
124 *
125 * @src: memory region to duplicate
126 * @len: memory region length
127 * @gfp: GFP mask to use
128 *
129 * Return: newly allocated copy of @src or %NULL in case of error,
130 * result is physically contiguous. Use kfree() to free.
131 */
kmemdup_noprof(const void * src,size_t len,gfp_t gfp)132 void *kmemdup_noprof(const void *src, size_t len, gfp_t gfp)
133 {
134 void *p;
135
136 p = kmalloc_node_track_caller_noprof(len, gfp, NUMA_NO_NODE, _RET_IP_);
137 if (p)
138 memcpy(p, src, len);
139 return p;
140 }
141 EXPORT_SYMBOL(kmemdup_noprof);
142
143 /**
144 * kmemdup_array - duplicate a given array.
145 *
146 * @src: array to duplicate.
147 * @count: number of elements to duplicate from array.
148 * @element_size: size of each element of array.
149 * @gfp: GFP mask to use.
150 *
151 * Return: duplicated array of @src or %NULL in case of error,
152 * result is physically contiguous. Use kfree() to free.
153 */
kmemdup_array(const void * src,size_t count,size_t element_size,gfp_t gfp)154 void *kmemdup_array(const void *src, size_t count, size_t element_size, gfp_t gfp)
155 {
156 return kmemdup(src, size_mul(element_size, count), gfp);
157 }
158 EXPORT_SYMBOL(kmemdup_array);
159
160 /**
161 * kvmemdup - duplicate region of memory
162 *
163 * @src: memory region to duplicate
164 * @len: memory region length
165 * @gfp: GFP mask to use
166 *
167 * Return: newly allocated copy of @src or %NULL in case of error,
168 * result may be not physically contiguous. Use kvfree() to free.
169 */
kvmemdup(const void * src,size_t len,gfp_t gfp)170 void *kvmemdup(const void *src, size_t len, gfp_t gfp)
171 {
172 void *p;
173
174 p = kvmalloc(len, gfp);
175 if (p)
176 memcpy(p, src, len);
177 return p;
178 }
179 EXPORT_SYMBOL(kvmemdup);
180
181 /**
182 * kmemdup_nul - Create a NUL-terminated string from unterminated data
183 * @s: The data to stringify
184 * @len: The size of the data
185 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
186 *
187 * Return: newly allocated copy of @s with NUL-termination or %NULL in
188 * case of error
189 */
kmemdup_nul(const char * s,size_t len,gfp_t gfp)190 char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
191 {
192 return s ? __kmemdup_nul(s, len, gfp) : NULL;
193 }
194 EXPORT_SYMBOL(kmemdup_nul);
195
196 static kmem_buckets *user_buckets __ro_after_init;
197
init_user_buckets(void)198 static int __init init_user_buckets(void)
199 {
200 user_buckets = kmem_buckets_create("memdup_user", 0, 0, INT_MAX, NULL);
201
202 return 0;
203 }
204 subsys_initcall(init_user_buckets);
205
206 /**
207 * memdup_user - duplicate memory region from user space
208 *
209 * @src: source address in user space
210 * @len: number of bytes to copy
211 *
212 * Return: an ERR_PTR() on failure. Result is physically
213 * contiguous, to be freed by kfree().
214 */
memdup_user(const void __user * src,size_t len)215 void *memdup_user(const void __user *src, size_t len)
216 {
217 void *p;
218
219 p = kmem_buckets_alloc_track_caller(user_buckets, len, GFP_USER | __GFP_NOWARN);
220 if (!p)
221 return ERR_PTR(-ENOMEM);
222
223 if (copy_from_user(p, src, len)) {
224 kfree(p);
225 return ERR_PTR(-EFAULT);
226 }
227
228 return p;
229 }
230 EXPORT_SYMBOL(memdup_user);
231
232 /**
233 * vmemdup_user - duplicate memory region from user space
234 *
235 * @src: source address in user space
236 * @len: number of bytes to copy
237 *
238 * Return: an ERR_PTR() on failure. Result may be not
239 * physically contiguous. Use kvfree() to free.
240 */
vmemdup_user(const void __user * src,size_t len)241 void *vmemdup_user(const void __user *src, size_t len)
242 {
243 void *p;
244
245 p = kmem_buckets_valloc(user_buckets, len, GFP_USER);
246 if (!p)
247 return ERR_PTR(-ENOMEM);
248
249 if (copy_from_user(p, src, len)) {
250 kvfree(p);
251 return ERR_PTR(-EFAULT);
252 }
253
254 return p;
255 }
256 EXPORT_SYMBOL(vmemdup_user);
257
258 /**
259 * strndup_user - duplicate an existing string from user space
260 * @s: The string to duplicate
261 * @n: Maximum number of bytes to copy, including the trailing NUL.
262 *
263 * Return: newly allocated copy of @s or an ERR_PTR() in case of error
264 */
strndup_user(const char __user * s,long n)265 char *strndup_user(const char __user *s, long n)
266 {
267 char *p;
268 long length;
269
270 length = strnlen_user(s, n);
271
272 if (!length)
273 return ERR_PTR(-EFAULT);
274
275 if (length > n)
276 return ERR_PTR(-EINVAL);
277
278 p = memdup_user(s, length);
279
280 if (IS_ERR(p))
281 return p;
282
283 p[length - 1] = '\0';
284
285 return p;
286 }
287 EXPORT_SYMBOL(strndup_user);
288
289 /**
290 * memdup_user_nul - duplicate memory region from user space and NUL-terminate
291 *
292 * @src: source address in user space
293 * @len: number of bytes to copy
294 *
295 * Return: an ERR_PTR() on failure.
296 */
memdup_user_nul(const void __user * src,size_t len)297 void *memdup_user_nul(const void __user *src, size_t len)
298 {
299 char *p;
300
301 p = kmem_buckets_alloc_track_caller(user_buckets, len + 1, GFP_USER | __GFP_NOWARN);
302 if (!p)
303 return ERR_PTR(-ENOMEM);
304
305 if (copy_from_user(p, src, len)) {
306 kfree(p);
307 return ERR_PTR(-EFAULT);
308 }
309 p[len] = '\0';
310
311 return p;
312 }
313 EXPORT_SYMBOL(memdup_user_nul);
314
315 /* Check if the vma is being used as a stack by this task */
vma_is_stack_for_current(struct vm_area_struct * vma)316 int vma_is_stack_for_current(struct vm_area_struct *vma)
317 {
318 struct task_struct * __maybe_unused t = current;
319
320 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
321 }
322
323 /*
324 * Change backing file, only valid to use during initial VMA setup.
325 */
vma_set_file(struct vm_area_struct * vma,struct file * file)326 void vma_set_file(struct vm_area_struct *vma, struct file *file)
327 {
328 /* Changing an anonymous vma with this is illegal */
329 get_file(file);
330 swap(vma->vm_file, file);
331 fput(file);
332 }
333 EXPORT_SYMBOL(vma_set_file);
334
335 #ifndef STACK_RND_MASK
336 #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */
337 #endif
338
randomize_stack_top(unsigned long stack_top)339 unsigned long randomize_stack_top(unsigned long stack_top)
340 {
341 unsigned long random_variable = 0;
342
343 if (current->flags & PF_RANDOMIZE) {
344 random_variable = get_random_long();
345 random_variable &= STACK_RND_MASK;
346 random_variable <<= PAGE_SHIFT;
347 }
348 #ifdef CONFIG_STACK_GROWSUP
349 return PAGE_ALIGN(stack_top) + random_variable;
350 #else
351 return PAGE_ALIGN(stack_top) - random_variable;
352 #endif
353 }
354
355 /**
356 * randomize_page - Generate a random, page aligned address
357 * @start: The smallest acceptable address the caller will take.
358 * @range: The size of the area, starting at @start, within which the
359 * random address must fall.
360 *
361 * If @start + @range would overflow, @range is capped.
362 *
363 * NOTE: Historical use of randomize_range, which this replaces, presumed that
364 * @start was already page aligned. We now align it regardless.
365 *
366 * Return: A page aligned address within [start, start + range). On error,
367 * @start is returned.
368 */
randomize_page(unsigned long start,unsigned long range)369 unsigned long randomize_page(unsigned long start, unsigned long range)
370 {
371 if (!PAGE_ALIGNED(start)) {
372 range -= PAGE_ALIGN(start) - start;
373 start = PAGE_ALIGN(start);
374 }
375
376 if (start > ULONG_MAX - range)
377 range = ULONG_MAX - start;
378
379 range >>= PAGE_SHIFT;
380
381 if (range == 0)
382 return start;
383
384 return start + (get_random_long() % range << PAGE_SHIFT);
385 }
386
387 #ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
arch_randomize_brk(struct mm_struct * mm)388 unsigned long __weak arch_randomize_brk(struct mm_struct *mm)
389 {
390 /* Is the current task 32bit ? */
391 if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())
392 return randomize_page(mm->brk, SZ_32M);
393
394 return randomize_page(mm->brk, SZ_1G);
395 }
396
arch_mmap_rnd(void)397 unsigned long arch_mmap_rnd(void)
398 {
399 unsigned long rnd;
400
401 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
402 if (is_compat_task())
403 rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
404 else
405 #endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */
406 rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
407
408 return rnd << PAGE_SHIFT;
409 }
410
mmap_is_legacy(struct rlimit * rlim_stack)411 static int mmap_is_legacy(struct rlimit *rlim_stack)
412 {
413 if (current->personality & ADDR_COMPAT_LAYOUT)
414 return 1;
415
416 /* On parisc the stack always grows up - so a unlimited stack should
417 * not be an indicator to use the legacy memory layout. */
418 if (rlim_stack->rlim_cur == RLIM_INFINITY &&
419 !IS_ENABLED(CONFIG_STACK_GROWSUP))
420 return 1;
421
422 return sysctl_legacy_va_layout;
423 }
424
425 /*
426 * Leave enough space between the mmap area and the stack to honour ulimit in
427 * the face of randomisation.
428 */
429 #define MIN_GAP (SZ_128M)
430 #define MAX_GAP (STACK_TOP / 6 * 5)
431
mmap_base(unsigned long rnd,struct rlimit * rlim_stack)432 static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
433 {
434 #ifdef CONFIG_STACK_GROWSUP
435 /*
436 * For an upwards growing stack the calculation is much simpler.
437 * Memory for the maximum stack size is reserved at the top of the
438 * task. mmap_base starts directly below the stack and grows
439 * downwards.
440 */
441 return PAGE_ALIGN_DOWN(mmap_upper_limit(rlim_stack) - rnd);
442 #else
443 unsigned long gap = rlim_stack->rlim_cur;
444 unsigned long pad = stack_guard_gap;
445
446 /* Account for stack randomization if necessary */
447 if (current->flags & PF_RANDOMIZE)
448 pad += (STACK_RND_MASK << PAGE_SHIFT);
449
450 /* Values close to RLIM_INFINITY can overflow. */
451 if (gap + pad > gap)
452 gap += pad;
453
454 if (gap < MIN_GAP && MIN_GAP < MAX_GAP)
455 gap = MIN_GAP;
456 else if (gap > MAX_GAP)
457 gap = MAX_GAP;
458
459 return PAGE_ALIGN(STACK_TOP - gap - rnd);
460 #endif
461 }
462
arch_pick_mmap_layout(struct mm_struct * mm,struct rlimit * rlim_stack)463 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
464 {
465 unsigned long random_factor = 0UL;
466
467 if (current->flags & PF_RANDOMIZE)
468 random_factor = arch_mmap_rnd();
469
470 if (mmap_is_legacy(rlim_stack)) {
471 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
472 clear_bit(MMF_TOPDOWN, &mm->flags);
473 } else {
474 mm->mmap_base = mmap_base(random_factor, rlim_stack);
475 set_bit(MMF_TOPDOWN, &mm->flags);
476 }
477 }
478 #elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
arch_pick_mmap_layout(struct mm_struct * mm,struct rlimit * rlim_stack)479 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
480 {
481 mm->mmap_base = TASK_UNMAPPED_BASE;
482 clear_bit(MMF_TOPDOWN, &mm->flags);
483 }
484 #endif
485 #ifdef CONFIG_MMU
486 EXPORT_SYMBOL_IF_KUNIT(arch_pick_mmap_layout);
487 #endif
488
489 /**
490 * __account_locked_vm - account locked pages to an mm's locked_vm
491 * @mm: mm to account against
492 * @pages: number of pages to account
493 * @inc: %true if @pages should be considered positive, %false if not
494 * @task: task used to check RLIMIT_MEMLOCK
495 * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped
496 *
497 * Assumes @task and @mm are valid (i.e. at least one reference on each), and
498 * that mmap_lock is held as writer.
499 *
500 * Return:
501 * * 0 on success
502 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
503 */
__account_locked_vm(struct mm_struct * mm,unsigned long pages,bool inc,struct task_struct * task,bool bypass_rlim)504 int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
505 struct task_struct *task, bool bypass_rlim)
506 {
507 unsigned long locked_vm, limit;
508 int ret = 0;
509
510 mmap_assert_write_locked(mm);
511
512 locked_vm = mm->locked_vm;
513 if (inc) {
514 if (!bypass_rlim) {
515 limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
516 if (locked_vm + pages > limit)
517 ret = -ENOMEM;
518 }
519 if (!ret)
520 mm->locked_vm = locked_vm + pages;
521 } else {
522 WARN_ON_ONCE(pages > locked_vm);
523 mm->locked_vm = locked_vm - pages;
524 }
525
526 pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid,
527 (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT,
528 locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK),
529 ret ? " - exceeded" : "");
530
531 return ret;
532 }
533 EXPORT_SYMBOL_GPL(__account_locked_vm);
534
535 /**
536 * account_locked_vm - account locked pages to an mm's locked_vm
537 * @mm: mm to account against, may be NULL
538 * @pages: number of pages to account
539 * @inc: %true if @pages should be considered positive, %false if not
540 *
541 * Assumes a non-NULL @mm is valid (i.e. at least one reference on it).
542 *
543 * Return:
544 * * 0 on success, or if mm is NULL
545 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
546 */
account_locked_vm(struct mm_struct * mm,unsigned long pages,bool inc)547 int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc)
548 {
549 int ret;
550
551 if (pages == 0 || !mm)
552 return 0;
553
554 mmap_write_lock(mm);
555 ret = __account_locked_vm(mm, pages, inc, current,
556 capable(CAP_IPC_LOCK));
557 mmap_write_unlock(mm);
558
559 return ret;
560 }
561 EXPORT_SYMBOL_GPL(account_locked_vm);
562
vm_mmap_pgoff(struct file * file,unsigned long addr,unsigned long len,unsigned long prot,unsigned long flag,unsigned long pgoff)563 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
564 unsigned long len, unsigned long prot,
565 unsigned long flag, unsigned long pgoff)
566 {
567 unsigned long ret;
568 struct mm_struct *mm = current->mm;
569 unsigned long populate;
570 LIST_HEAD(uf);
571
572 ret = security_mmap_file(file, prot, flag);
573 if (!ret)
574 ret = fsnotify_mmap_perm(file, prot, pgoff >> PAGE_SHIFT, len);
575 if (!ret) {
576 if (mmap_write_lock_killable(mm))
577 return -EINTR;
578 ret = do_mmap(file, addr, len, prot, flag, 0, pgoff, &populate,
579 &uf);
580 mmap_write_unlock(mm);
581 userfaultfd_unmap_complete(mm, &uf);
582 if (populate)
583 mm_populate(ret, populate);
584 }
585 return ret;
586 }
587
588 /*
589 * Perform a userland memory mapping into the current process address space. See
590 * the comment for do_mmap() for more details on this operation in general.
591 *
592 * This differs from do_mmap() in that:
593 *
594 * a. An offset parameter is provided rather than pgoff, which is both checked
595 * for overflow and page alignment.
596 * b. mmap locking is performed on the caller's behalf.
597 * c. Userfaultfd unmap events and memory population are handled.
598 *
599 * This means that this function performs essentially the same work as if
600 * userland were invoking mmap (2).
601 *
602 * Returns either an error, or the address at which the requested mapping has
603 * been performed.
604 */
vm_mmap(struct file * file,unsigned long addr,unsigned long len,unsigned long prot,unsigned long flag,unsigned long offset)605 unsigned long vm_mmap(struct file *file, unsigned long addr,
606 unsigned long len, unsigned long prot,
607 unsigned long flag, unsigned long offset)
608 {
609 if (unlikely(offset + PAGE_ALIGN(len) < offset))
610 return -EINVAL;
611 if (unlikely(offset_in_page(offset)))
612 return -EINVAL;
613
614 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
615 }
616 EXPORT_SYMBOL(vm_mmap);
617
kmalloc_gfp_adjust(gfp_t flags,size_t size)618 static gfp_t kmalloc_gfp_adjust(gfp_t flags, size_t size)
619 {
620 /*
621 * We want to attempt a large physically contiguous block first because
622 * it is less likely to fragment multiple larger blocks and therefore
623 * contribute to a long term fragmentation less than vmalloc fallback.
624 * However make sure that larger requests are not too disruptive - no
625 * OOM killer and no allocation failure warnings as we have a fallback.
626 */
627 if (size > PAGE_SIZE) {
628 flags |= __GFP_NOWARN;
629
630 if (!(flags & __GFP_RETRY_MAYFAIL))
631 flags |= __GFP_NORETRY;
632
633 /* nofail semantic is implemented by the vmalloc fallback */
634 flags &= ~__GFP_NOFAIL;
635 }
636
637 return flags;
638 }
639
640 /**
641 * __kvmalloc_node - attempt to allocate physically contiguous memory, but upon
642 * failure, fall back to non-contiguous (vmalloc) allocation.
643 * @size: size of the request.
644 * @b: which set of kmalloc buckets to allocate from.
645 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
646 * @node: numa node to allocate from
647 *
648 * Uses kmalloc to get the memory but if the allocation fails then falls back
649 * to the vmalloc allocator. Use kvfree for freeing the memory.
650 *
651 * GFP_NOWAIT and GFP_ATOMIC are not supported, neither is the __GFP_NORETRY modifier.
652 * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
653 * preferable to the vmalloc fallback, due to visible performance drawbacks.
654 *
655 * Return: pointer to the allocated memory of %NULL in case of failure
656 */
__kvmalloc_node_noprof(DECL_BUCKET_PARAMS (size,b),gfp_t flags,int node)657 void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node)
658 {
659 void *ret;
660
661 /*
662 * It doesn't really make sense to fallback to vmalloc for sub page
663 * requests
664 */
665 ret = __kmalloc_node_noprof(PASS_BUCKET_PARAMS(size, b),
666 kmalloc_gfp_adjust(flags, size),
667 node);
668 if (ret || size <= PAGE_SIZE)
669 return ret;
670
671 /* non-sleeping allocations are not supported by vmalloc */
672 if (!gfpflags_allow_blocking(flags))
673 return NULL;
674
675 /* Don't even allow crazy sizes */
676 if (unlikely(size > INT_MAX)) {
677 WARN_ON_ONCE(!(flags & __GFP_NOWARN));
678 return NULL;
679 }
680
681 /*
682 * kvmalloc() can always use VM_ALLOW_HUGE_VMAP,
683 * since the callers already cannot assume anything
684 * about the resulting pointer, and cannot play
685 * protection games.
686 */
687 return __vmalloc_node_range_noprof(size, 1, VMALLOC_START, VMALLOC_END,
688 flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
689 node, __builtin_return_address(0));
690 }
691 EXPORT_SYMBOL(__kvmalloc_node_noprof);
692
693 /**
694 * kvfree() - Free memory.
695 * @addr: Pointer to allocated memory.
696 *
697 * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
698 * It is slightly more efficient to use kfree() or vfree() if you are certain
699 * that you know which one to use.
700 *
701 * Context: Either preemptible task context or not-NMI interrupt.
702 */
kvfree(const void * addr)703 void kvfree(const void *addr)
704 {
705 if (is_vmalloc_addr(addr))
706 vfree(addr);
707 else
708 kfree(addr);
709 }
710 EXPORT_SYMBOL(kvfree);
711
712 /**
713 * kvfree_sensitive - Free a data object containing sensitive information.
714 * @addr: address of the data object to be freed.
715 * @len: length of the data object.
716 *
717 * Use the special memzero_explicit() function to clear the content of a
718 * kvmalloc'ed object containing sensitive data to make sure that the
719 * compiler won't optimize out the data clearing.
720 */
kvfree_sensitive(const void * addr,size_t len)721 void kvfree_sensitive(const void *addr, size_t len)
722 {
723 if (likely(!ZERO_OR_NULL_PTR(addr))) {
724 memzero_explicit((void *)addr, len);
725 kvfree(addr);
726 }
727 }
728 EXPORT_SYMBOL(kvfree_sensitive);
729
730 /**
731 * kvrealloc - reallocate memory; contents remain unchanged
732 * @p: object to reallocate memory for
733 * @size: the size to reallocate
734 * @flags: the flags for the page level allocator
735 *
736 * If @p is %NULL, kvrealloc() behaves exactly like kvmalloc(). If @size is 0
737 * and @p is not a %NULL pointer, the object pointed to is freed.
738 *
739 * If __GFP_ZERO logic is requested, callers must ensure that, starting with the
740 * initial memory allocation, every subsequent call to this API for the same
741 * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
742 * __GFP_ZERO is not fully honored by this API.
743 *
744 * In any case, the contents of the object pointed to are preserved up to the
745 * lesser of the new and old sizes.
746 *
747 * This function must not be called concurrently with itself or kvfree() for the
748 * same memory allocation.
749 *
750 * Return: pointer to the allocated memory or %NULL in case of error
751 */
kvrealloc_noprof(const void * p,size_t size,gfp_t flags)752 void *kvrealloc_noprof(const void *p, size_t size, gfp_t flags)
753 {
754 void *n;
755
756 if (is_vmalloc_addr(p))
757 return vrealloc_noprof(p, size, flags);
758
759 n = krealloc_noprof(p, size, kmalloc_gfp_adjust(flags, size));
760 if (!n) {
761 /* We failed to krealloc(), fall back to kvmalloc(). */
762 n = kvmalloc_noprof(size, flags);
763 if (!n)
764 return NULL;
765
766 if (p) {
767 /* We already know that `p` is not a vmalloc address. */
768 kasan_disable_current();
769 memcpy(n, kasan_reset_tag(p), ksize(p));
770 kasan_enable_current();
771
772 kfree(p);
773 }
774 }
775
776 return n;
777 }
778 EXPORT_SYMBOL(kvrealloc_noprof);
779
780 /**
781 * __vmalloc_array - allocate memory for a virtually contiguous array.
782 * @n: number of elements.
783 * @size: element size.
784 * @flags: the type of memory to allocate (see kmalloc).
785 */
__vmalloc_array_noprof(size_t n,size_t size,gfp_t flags)786 void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags)
787 {
788 size_t bytes;
789
790 if (unlikely(check_mul_overflow(n, size, &bytes)))
791 return NULL;
792 return __vmalloc_noprof(bytes, flags);
793 }
794 EXPORT_SYMBOL(__vmalloc_array_noprof);
795
796 /**
797 * vmalloc_array - allocate memory for a virtually contiguous array.
798 * @n: number of elements.
799 * @size: element size.
800 */
vmalloc_array_noprof(size_t n,size_t size)801 void *vmalloc_array_noprof(size_t n, size_t size)
802 {
803 return __vmalloc_array_noprof(n, size, GFP_KERNEL);
804 }
805 EXPORT_SYMBOL(vmalloc_array_noprof);
806
807 /**
808 * __vcalloc - allocate and zero memory for a virtually contiguous array.
809 * @n: number of elements.
810 * @size: element size.
811 * @flags: the type of memory to allocate (see kmalloc).
812 */
__vcalloc_noprof(size_t n,size_t size,gfp_t flags)813 void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags)
814 {
815 return __vmalloc_array_noprof(n, size, flags | __GFP_ZERO);
816 }
817 EXPORT_SYMBOL(__vcalloc_noprof);
818
819 /**
820 * vcalloc - allocate and zero memory for a virtually contiguous array.
821 * @n: number of elements.
822 * @size: element size.
823 */
vcalloc_noprof(size_t n,size_t size)824 void *vcalloc_noprof(size_t n, size_t size)
825 {
826 return __vmalloc_array_noprof(n, size, GFP_KERNEL | __GFP_ZERO);
827 }
828 EXPORT_SYMBOL(vcalloc_noprof);
829
folio_anon_vma(const struct folio * folio)830 struct anon_vma *folio_anon_vma(const struct folio *folio)
831 {
832 unsigned long mapping = (unsigned long)folio->mapping;
833
834 if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
835 return NULL;
836 return (void *)(mapping - PAGE_MAPPING_ANON);
837 }
838
839 /**
840 * folio_mapping - Find the mapping where this folio is stored.
841 * @folio: The folio.
842 *
843 * For folios which are in the page cache, return the mapping that this
844 * page belongs to. Folios in the swap cache return the swap mapping
845 * this page is stored in (which is different from the mapping for the
846 * swap file or swap device where the data is stored).
847 *
848 * You can call this for folios which aren't in the swap cache or page
849 * cache and it will return NULL.
850 */
folio_mapping(struct folio * folio)851 struct address_space *folio_mapping(struct folio *folio)
852 {
853 struct address_space *mapping;
854
855 /* This happens if someone calls flush_dcache_page on slab page */
856 if (unlikely(folio_test_slab(folio)))
857 return NULL;
858
859 if (unlikely(folio_test_swapcache(folio)))
860 return swap_address_space(folio->swap);
861
862 mapping = folio->mapping;
863 if ((unsigned long)mapping & PAGE_MAPPING_FLAGS)
864 return NULL;
865
866 return mapping;
867 }
868 EXPORT_SYMBOL(folio_mapping);
869
870 /**
871 * folio_copy - Copy the contents of one folio to another.
872 * @dst: Folio to copy to.
873 * @src: Folio to copy from.
874 *
875 * The bytes in the folio represented by @src are copied to @dst.
876 * Assumes the caller has validated that @dst is at least as large as @src.
877 * Can be called in atomic context for order-0 folios, but if the folio is
878 * larger, it may sleep.
879 */
folio_copy(struct folio * dst,struct folio * src)880 void folio_copy(struct folio *dst, struct folio *src)
881 {
882 long i = 0;
883 long nr = folio_nr_pages(src);
884
885 for (;;) {
886 copy_highpage(folio_page(dst, i), folio_page(src, i));
887 if (++i == nr)
888 break;
889 cond_resched();
890 }
891 }
892 EXPORT_SYMBOL(folio_copy);
893
folio_mc_copy(struct folio * dst,struct folio * src)894 int folio_mc_copy(struct folio *dst, struct folio *src)
895 {
896 long nr = folio_nr_pages(src);
897 long i = 0;
898
899 for (;;) {
900 if (copy_mc_highpage(folio_page(dst, i), folio_page(src, i)))
901 return -EHWPOISON;
902 if (++i == nr)
903 break;
904 cond_resched();
905 }
906
907 return 0;
908 }
909 EXPORT_SYMBOL(folio_mc_copy);
910
911 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
912 int sysctl_overcommit_ratio __read_mostly = 50;
913 unsigned long sysctl_overcommit_kbytes __read_mostly;
914 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
915 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
916 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
917
overcommit_ratio_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)918 int overcommit_ratio_handler(const struct ctl_table *table, int write, void *buffer,
919 size_t *lenp, loff_t *ppos)
920 {
921 int ret;
922
923 ret = proc_dointvec(table, write, buffer, lenp, ppos);
924 if (ret == 0 && write)
925 sysctl_overcommit_kbytes = 0;
926 return ret;
927 }
928
sync_overcommit_as(struct work_struct * dummy)929 static void sync_overcommit_as(struct work_struct *dummy)
930 {
931 percpu_counter_sync(&vm_committed_as);
932 }
933
overcommit_policy_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)934 int overcommit_policy_handler(const struct ctl_table *table, int write, void *buffer,
935 size_t *lenp, loff_t *ppos)
936 {
937 struct ctl_table t;
938 int new_policy = -1;
939 int ret;
940
941 /*
942 * The deviation of sync_overcommit_as could be big with loose policy
943 * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to
944 * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply
945 * with the strict "NEVER", and to avoid possible race condition (even
946 * though user usually won't too frequently do the switching to policy
947 * OVERCOMMIT_NEVER), the switch is done in the following order:
948 * 1. changing the batch
949 * 2. sync percpu count on each CPU
950 * 3. switch the policy
951 */
952 if (write) {
953 t = *table;
954 t.data = &new_policy;
955 ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
956 if (ret || new_policy == -1)
957 return ret;
958
959 mm_compute_batch(new_policy);
960 if (new_policy == OVERCOMMIT_NEVER)
961 schedule_on_each_cpu(sync_overcommit_as);
962 sysctl_overcommit_memory = new_policy;
963 } else {
964 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
965 }
966
967 return ret;
968 }
969
overcommit_kbytes_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)970 int overcommit_kbytes_handler(const struct ctl_table *table, int write, void *buffer,
971 size_t *lenp, loff_t *ppos)
972 {
973 int ret;
974
975 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
976 if (ret == 0 && write)
977 sysctl_overcommit_ratio = 0;
978 return ret;
979 }
980
981 /*
982 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
983 */
vm_commit_limit(void)984 unsigned long vm_commit_limit(void)
985 {
986 unsigned long allowed;
987
988 if (sysctl_overcommit_kbytes)
989 allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
990 else
991 allowed = ((totalram_pages() - hugetlb_total_pages())
992 * sysctl_overcommit_ratio / 100);
993 allowed += total_swap_pages;
994
995 return allowed;
996 }
997
998 /*
999 * Make sure vm_committed_as in one cacheline and not cacheline shared with
1000 * other variables. It can be updated by several CPUs frequently.
1001 */
1002 struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
1003
1004 /*
1005 * The global memory commitment made in the system can be a metric
1006 * that can be used to drive ballooning decisions when Linux is hosted
1007 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
1008 * balancing memory across competing virtual machines that are hosted.
1009 * Several metrics drive this policy engine including the guest reported
1010 * memory commitment.
1011 *
1012 * The time cost of this is very low for small platforms, and for big
1013 * platform like a 2S/36C/72T Skylake server, in worst case where
1014 * vm_committed_as's spinlock is under severe contention, the time cost
1015 * could be about 30~40 microseconds.
1016 */
vm_memory_committed(void)1017 unsigned long vm_memory_committed(void)
1018 {
1019 return percpu_counter_sum_positive(&vm_committed_as);
1020 }
1021 EXPORT_SYMBOL_GPL(vm_memory_committed);
1022
1023 /*
1024 * Check that a process has enough memory to allocate a new virtual
1025 * mapping. 0 means there is enough memory for the allocation to
1026 * succeed and -ENOMEM implies there is not.
1027 *
1028 * We currently support three overcommit policies, which are set via the
1029 * vm.overcommit_memory sysctl. See Documentation/mm/overcommit-accounting.rst
1030 *
1031 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
1032 * Additional code 2002 Jul 20 by Robert Love.
1033 *
1034 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
1035 *
1036 * Note this is a helper function intended to be used by LSMs which
1037 * wish to use this logic.
1038 */
__vm_enough_memory(struct mm_struct * mm,long pages,int cap_sys_admin)1039 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
1040 {
1041 long allowed;
1042 unsigned long bytes_failed;
1043
1044 vm_acct_memory(pages);
1045
1046 /*
1047 * Sometimes we want to use more memory than we have
1048 */
1049 if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
1050 return 0;
1051
1052 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
1053 if (pages > totalram_pages() + total_swap_pages)
1054 goto error;
1055 return 0;
1056 }
1057
1058 allowed = vm_commit_limit();
1059 /*
1060 * Reserve some for root
1061 */
1062 if (!cap_sys_admin)
1063 allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
1064
1065 /*
1066 * Don't let a single process grow so big a user can't recover
1067 */
1068 if (mm) {
1069 long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
1070
1071 allowed -= min_t(long, mm->total_vm / 32, reserve);
1072 }
1073
1074 if (percpu_counter_read_positive(&vm_committed_as) < allowed)
1075 return 0;
1076 error:
1077 bytes_failed = pages << PAGE_SHIFT;
1078 pr_warn_ratelimited("%s: pid: %d, comm: %s, bytes: %lu not enough memory for the allocation\n",
1079 __func__, current->pid, current->comm, bytes_failed);
1080 vm_unacct_memory(pages);
1081
1082 return -ENOMEM;
1083 }
1084
1085 /**
1086 * get_cmdline() - copy the cmdline value to a buffer.
1087 * @task: the task whose cmdline value to copy.
1088 * @buffer: the buffer to copy to.
1089 * @buflen: the length of the buffer. Larger cmdline values are truncated
1090 * to this length.
1091 *
1092 * Return: the size of the cmdline field copied. Note that the copy does
1093 * not guarantee an ending NULL byte.
1094 */
get_cmdline(struct task_struct * task,char * buffer,int buflen)1095 int get_cmdline(struct task_struct *task, char *buffer, int buflen)
1096 {
1097 int res = 0;
1098 unsigned int len;
1099 struct mm_struct *mm = get_task_mm(task);
1100 unsigned long arg_start, arg_end, env_start, env_end;
1101 if (!mm)
1102 goto out;
1103 if (!mm->arg_end)
1104 goto out_mm; /* Shh! No looking before we're done */
1105
1106 spin_lock(&mm->arg_lock);
1107 arg_start = mm->arg_start;
1108 arg_end = mm->arg_end;
1109 env_start = mm->env_start;
1110 env_end = mm->env_end;
1111 spin_unlock(&mm->arg_lock);
1112
1113 len = arg_end - arg_start;
1114
1115 if (len > buflen)
1116 len = buflen;
1117
1118 res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
1119
1120 /*
1121 * If the nul at the end of args has been overwritten, then
1122 * assume application is using setproctitle(3).
1123 */
1124 if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
1125 len = strnlen(buffer, res);
1126 if (len < res) {
1127 res = len;
1128 } else {
1129 len = env_end - env_start;
1130 if (len > buflen - res)
1131 len = buflen - res;
1132 res += access_process_vm(task, env_start,
1133 buffer+res, len,
1134 FOLL_FORCE);
1135 res = strnlen(buffer, res);
1136 }
1137 }
1138 out_mm:
1139 mmput(mm);
1140 out:
1141 return res;
1142 }
1143
memcmp_pages(struct page * page1,struct page * page2)1144 int __weak memcmp_pages(struct page *page1, struct page *page2)
1145 {
1146 char *addr1, *addr2;
1147 int ret;
1148
1149 addr1 = kmap_local_page(page1);
1150 addr2 = kmap_local_page(page2);
1151 ret = memcmp(addr1, addr2, PAGE_SIZE);
1152 kunmap_local(addr2);
1153 kunmap_local(addr1);
1154 return ret;
1155 }
1156
1157 #ifdef CONFIG_PRINTK
1158 /**
1159 * mem_dump_obj - Print available provenance information
1160 * @object: object for which to find provenance information.
1161 *
1162 * This function uses pr_cont(), so that the caller is expected to have
1163 * printed out whatever preamble is appropriate. The provenance information
1164 * depends on the type of object and on how much debugging is enabled.
1165 * For example, for a slab-cache object, the slab name is printed, and,
1166 * if available, the return address and stack trace from the allocation
1167 * and last free path of that object.
1168 */
mem_dump_obj(void * object)1169 void mem_dump_obj(void *object)
1170 {
1171 const char *type;
1172
1173 if (kmem_dump_obj(object))
1174 return;
1175
1176 if (vmalloc_dump_obj(object))
1177 return;
1178
1179 if (is_vmalloc_addr(object))
1180 type = "vmalloc memory";
1181 else if (virt_addr_valid(object))
1182 type = "non-slab/vmalloc memory";
1183 else if (object == NULL)
1184 type = "NULL pointer";
1185 else if (object == ZERO_SIZE_PTR)
1186 type = "zero-size pointer";
1187 else
1188 type = "non-paged memory";
1189
1190 pr_cont(" %s\n", type);
1191 }
1192 EXPORT_SYMBOL_GPL(mem_dump_obj);
1193 #endif
1194
1195 /*
1196 * A driver might set a page logically offline -- PageOffline() -- and
1197 * turn the page inaccessible in the hypervisor; after that, access to page
1198 * content can be fatal.
1199 *
1200 * Some special PFN walkers -- i.e., /proc/kcore -- read content of random
1201 * pages after checking PageOffline(); however, these PFN walkers can race
1202 * with drivers that set PageOffline().
1203 *
1204 * page_offline_freeze()/page_offline_thaw() allows for a subsystem to
1205 * synchronize with such drivers, achieving that a page cannot be set
1206 * PageOffline() while frozen.
1207 *
1208 * page_offline_begin()/page_offline_end() is used by drivers that care about
1209 * such races when setting a page PageOffline().
1210 */
1211 static DECLARE_RWSEM(page_offline_rwsem);
1212
page_offline_freeze(void)1213 void page_offline_freeze(void)
1214 {
1215 down_read(&page_offline_rwsem);
1216 }
1217
page_offline_thaw(void)1218 void page_offline_thaw(void)
1219 {
1220 up_read(&page_offline_rwsem);
1221 }
1222
page_offline_begin(void)1223 void page_offline_begin(void)
1224 {
1225 down_write(&page_offline_rwsem);
1226 }
1227 EXPORT_SYMBOL(page_offline_begin);
1228
page_offline_end(void)1229 void page_offline_end(void)
1230 {
1231 up_write(&page_offline_rwsem);
1232 }
1233 EXPORT_SYMBOL(page_offline_end);
1234
1235 #ifndef flush_dcache_folio
flush_dcache_folio(struct folio * folio)1236 void flush_dcache_folio(struct folio *folio)
1237 {
1238 long i, nr = folio_nr_pages(folio);
1239
1240 for (i = 0; i < nr; i++)
1241 flush_dcache_page(folio_page(folio, i));
1242 }
1243 EXPORT_SYMBOL(flush_dcache_folio);
1244 #endif
1245