1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <linux/mm.h> 3 #include <linux/slab.h> 4 #include <linux/string.h> 5 #include <linux/compiler.h> 6 #include <linux/export.h> 7 #include <linux/err.h> 8 #include <linux/sched.h> 9 #include <linux/sched/mm.h> 10 #include <linux/sched/signal.h> 11 #include <linux/sched/task_stack.h> 12 #include <linux/security.h> 13 #include <linux/swap.h> 14 #include <linux/swapops.h> 15 #include <linux/mman.h> 16 #include <linux/hugetlb.h> 17 #include <linux/vmalloc.h> 18 #include <linux/userfaultfd_k.h> 19 #include <linux/elf.h> 20 #include <linux/elf-randomize.h> 21 #include <linux/personality.h> 22 #include <linux/random.h> 23 #include <linux/processor.h> 24 #include <linux/sizes.h> 25 #include <linux/compat.h> 26 27 #include <linux/uaccess.h> 28 29 #include <kunit/visibility.h> 30 31 #include "internal.h" 32 #include "swap.h" 33 34 /** 35 * kfree_const - conditionally free memory 36 * @x: pointer to the memory 37 * 38 * Function calls kfree only if @x is not in .rodata section. 39 */ 40 void kfree_const(const void *x) 41 { 42 if (!is_kernel_rodata((unsigned long)x)) 43 kfree(x); 44 } 45 EXPORT_SYMBOL(kfree_const); 46 47 /** 48 * __kmemdup_nul - Create a NUL-terminated string from @s, which might be unterminated. 49 * @s: The data to copy 50 * @len: The size of the data, not including the NUL terminator 51 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 52 * 53 * Return: newly allocated copy of @s with NUL-termination or %NULL in 54 * case of error 55 */ 56 static __always_inline char *__kmemdup_nul(const char *s, size_t len, gfp_t gfp) 57 { 58 char *buf; 59 60 /* '+1' for the NUL terminator */ 61 buf = kmalloc_track_caller(len + 1, gfp); 62 if (!buf) 63 return NULL; 64 65 memcpy(buf, s, len); 66 /* Ensure the buf is always NUL-terminated, regardless of @s. */ 67 buf[len] = '\0'; 68 return buf; 69 } 70 71 /** 72 * kstrdup - allocate space for and copy an existing string 73 * @s: the string to duplicate 74 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 75 * 76 * Return: newly allocated copy of @s or %NULL in case of error 77 */ 78 noinline 79 char *kstrdup(const char *s, gfp_t gfp) 80 { 81 return s ? __kmemdup_nul(s, strlen(s), gfp) : NULL; 82 } 83 EXPORT_SYMBOL(kstrdup); 84 85 /** 86 * kstrdup_const - conditionally duplicate an existing const string 87 * @s: the string to duplicate 88 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 89 * 90 * Note: Strings allocated by kstrdup_const should be freed by kfree_const and 91 * must not be passed to krealloc(). 92 * 93 * Return: source string if it is in .rodata section otherwise 94 * fallback to kstrdup. 95 */ 96 const char *kstrdup_const(const char *s, gfp_t gfp) 97 { 98 if (is_kernel_rodata((unsigned long)s)) 99 return s; 100 101 return kstrdup(s, gfp); 102 } 103 EXPORT_SYMBOL(kstrdup_const); 104 105 /** 106 * kstrndup - allocate space for and copy an existing string 107 * @s: the string to duplicate 108 * @max: read at most @max chars from @s 109 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 110 * 111 * Note: Use kmemdup_nul() instead if the size is known exactly. 112 * 113 * Return: newly allocated copy of @s or %NULL in case of error 114 */ 115 char *kstrndup(const char *s, size_t max, gfp_t gfp) 116 { 117 return s ? __kmemdup_nul(s, strnlen(s, max), gfp) : NULL; 118 } 119 EXPORT_SYMBOL(kstrndup); 120 121 /** 122 * kmemdup - duplicate region of memory 123 * 124 * @src: memory region to duplicate 125 * @len: memory region length 126 * @gfp: GFP mask to use 127 * 128 * Return: newly allocated copy of @src or %NULL in case of error, 129 * result is physically contiguous. Use kfree() to free. 130 */ 131 void *kmemdup_noprof(const void *src, size_t len, gfp_t gfp) 132 { 133 void *p; 134 135 p = kmalloc_node_track_caller_noprof(len, gfp, NUMA_NO_NODE, _RET_IP_); 136 if (p) 137 memcpy(p, src, len); 138 return p; 139 } 140 EXPORT_SYMBOL(kmemdup_noprof); 141 142 /** 143 * kmemdup_array - duplicate a given array. 144 * 145 * @src: array to duplicate. 146 * @count: number of elements to duplicate from array. 147 * @element_size: size of each element of array. 148 * @gfp: GFP mask to use. 149 * 150 * Return: duplicated array of @src or %NULL in case of error, 151 * result is physically contiguous. Use kfree() to free. 152 */ 153 void *kmemdup_array(const void *src, size_t count, size_t element_size, gfp_t gfp) 154 { 155 return kmemdup(src, size_mul(element_size, count), gfp); 156 } 157 EXPORT_SYMBOL(kmemdup_array); 158 159 /** 160 * kvmemdup - duplicate region of memory 161 * 162 * @src: memory region to duplicate 163 * @len: memory region length 164 * @gfp: GFP mask to use 165 * 166 * Return: newly allocated copy of @src or %NULL in case of error, 167 * result may be not physically contiguous. Use kvfree() to free. 168 */ 169 void *kvmemdup(const void *src, size_t len, gfp_t gfp) 170 { 171 void *p; 172 173 p = kvmalloc(len, gfp); 174 if (p) 175 memcpy(p, src, len); 176 return p; 177 } 178 EXPORT_SYMBOL(kvmemdup); 179 180 /** 181 * kmemdup_nul - Create a NUL-terminated string from unterminated data 182 * @s: The data to stringify 183 * @len: The size of the data 184 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 185 * 186 * Return: newly allocated copy of @s with NUL-termination or %NULL in 187 * case of error 188 */ 189 char *kmemdup_nul(const char *s, size_t len, gfp_t gfp) 190 { 191 return s ? __kmemdup_nul(s, len, gfp) : NULL; 192 } 193 EXPORT_SYMBOL(kmemdup_nul); 194 195 static kmem_buckets *user_buckets __ro_after_init; 196 197 static int __init init_user_buckets(void) 198 { 199 user_buckets = kmem_buckets_create("memdup_user", 0, 0, INT_MAX, NULL); 200 201 return 0; 202 } 203 subsys_initcall(init_user_buckets); 204 205 /** 206 * memdup_user - duplicate memory region from user space 207 * 208 * @src: source address in user space 209 * @len: number of bytes to copy 210 * 211 * Return: an ERR_PTR() on failure. Result is physically 212 * contiguous, to be freed by kfree(). 213 */ 214 void *memdup_user(const void __user *src, size_t len) 215 { 216 void *p; 217 218 p = kmem_buckets_alloc_track_caller(user_buckets, len, GFP_USER | __GFP_NOWARN); 219 if (!p) 220 return ERR_PTR(-ENOMEM); 221 222 if (copy_from_user(p, src, len)) { 223 kfree(p); 224 return ERR_PTR(-EFAULT); 225 } 226 227 return p; 228 } 229 EXPORT_SYMBOL(memdup_user); 230 231 /** 232 * vmemdup_user - duplicate memory region from user space 233 * 234 * @src: source address in user space 235 * @len: number of bytes to copy 236 * 237 * Return: an ERR_PTR() on failure. Result may be not 238 * physically contiguous. Use kvfree() to free. 239 */ 240 void *vmemdup_user(const void __user *src, size_t len) 241 { 242 void *p; 243 244 p = kmem_buckets_valloc(user_buckets, len, GFP_USER); 245 if (!p) 246 return ERR_PTR(-ENOMEM); 247 248 if (copy_from_user(p, src, len)) { 249 kvfree(p); 250 return ERR_PTR(-EFAULT); 251 } 252 253 return p; 254 } 255 EXPORT_SYMBOL(vmemdup_user); 256 257 /** 258 * strndup_user - duplicate an existing string from user space 259 * @s: The string to duplicate 260 * @n: Maximum number of bytes to copy, including the trailing NUL. 261 * 262 * Return: newly allocated copy of @s or an ERR_PTR() in case of error 263 */ 264 char *strndup_user(const char __user *s, long n) 265 { 266 char *p; 267 long length; 268 269 length = strnlen_user(s, n); 270 271 if (!length) 272 return ERR_PTR(-EFAULT); 273 274 if (length > n) 275 return ERR_PTR(-EINVAL); 276 277 p = memdup_user(s, length); 278 279 if (IS_ERR(p)) 280 return p; 281 282 p[length - 1] = '\0'; 283 284 return p; 285 } 286 EXPORT_SYMBOL(strndup_user); 287 288 /** 289 * memdup_user_nul - duplicate memory region from user space and NUL-terminate 290 * 291 * @src: source address in user space 292 * @len: number of bytes to copy 293 * 294 * Return: an ERR_PTR() on failure. 295 */ 296 void *memdup_user_nul(const void __user *src, size_t len) 297 { 298 char *p; 299 300 p = kmem_buckets_alloc_track_caller(user_buckets, len + 1, GFP_USER | __GFP_NOWARN); 301 if (!p) 302 return ERR_PTR(-ENOMEM); 303 304 if (copy_from_user(p, src, len)) { 305 kfree(p); 306 return ERR_PTR(-EFAULT); 307 } 308 p[len] = '\0'; 309 310 return p; 311 } 312 EXPORT_SYMBOL(memdup_user_nul); 313 314 /* Check if the vma is being used as a stack by this task */ 315 int vma_is_stack_for_current(struct vm_area_struct *vma) 316 { 317 struct task_struct * __maybe_unused t = current; 318 319 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); 320 } 321 322 /* 323 * Change backing file, only valid to use during initial VMA setup. 324 */ 325 void vma_set_file(struct vm_area_struct *vma, struct file *file) 326 { 327 /* Changing an anonymous vma with this is illegal */ 328 get_file(file); 329 swap(vma->vm_file, file); 330 fput(file); 331 } 332 EXPORT_SYMBOL(vma_set_file); 333 334 #ifndef STACK_RND_MASK 335 #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */ 336 #endif 337 338 unsigned long randomize_stack_top(unsigned long stack_top) 339 { 340 unsigned long random_variable = 0; 341 342 if (current->flags & PF_RANDOMIZE) { 343 random_variable = get_random_long(); 344 random_variable &= STACK_RND_MASK; 345 random_variable <<= PAGE_SHIFT; 346 } 347 #ifdef CONFIG_STACK_GROWSUP 348 return PAGE_ALIGN(stack_top) + random_variable; 349 #else 350 return PAGE_ALIGN(stack_top) - random_variable; 351 #endif 352 } 353 354 /** 355 * randomize_page - Generate a random, page aligned address 356 * @start: The smallest acceptable address the caller will take. 357 * @range: The size of the area, starting at @start, within which the 358 * random address must fall. 359 * 360 * If @start + @range would overflow, @range is capped. 361 * 362 * NOTE: Historical use of randomize_range, which this replaces, presumed that 363 * @start was already page aligned. We now align it regardless. 364 * 365 * Return: A page aligned address within [start, start + range). On error, 366 * @start is returned. 367 */ 368 unsigned long randomize_page(unsigned long start, unsigned long range) 369 { 370 if (!PAGE_ALIGNED(start)) { 371 range -= PAGE_ALIGN(start) - start; 372 start = PAGE_ALIGN(start); 373 } 374 375 if (start > ULONG_MAX - range) 376 range = ULONG_MAX - start; 377 378 range >>= PAGE_SHIFT; 379 380 if (range == 0) 381 return start; 382 383 return start + (get_random_long() % range << PAGE_SHIFT); 384 } 385 386 #ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT 387 unsigned long __weak arch_randomize_brk(struct mm_struct *mm) 388 { 389 /* Is the current task 32bit ? */ 390 if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task()) 391 return randomize_page(mm->brk, SZ_32M); 392 393 return randomize_page(mm->brk, SZ_1G); 394 } 395 396 unsigned long arch_mmap_rnd(void) 397 { 398 unsigned long rnd; 399 400 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS 401 if (is_compat_task()) 402 rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1); 403 else 404 #endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */ 405 rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1); 406 407 return rnd << PAGE_SHIFT; 408 } 409 410 static int mmap_is_legacy(struct rlimit *rlim_stack) 411 { 412 if (current->personality & ADDR_COMPAT_LAYOUT) 413 return 1; 414 415 /* On parisc the stack always grows up - so a unlimited stack should 416 * not be an indicator to use the legacy memory layout. */ 417 if (rlim_stack->rlim_cur == RLIM_INFINITY && 418 !IS_ENABLED(CONFIG_STACK_GROWSUP)) 419 return 1; 420 421 return sysctl_legacy_va_layout; 422 } 423 424 /* 425 * Leave enough space between the mmap area and the stack to honour ulimit in 426 * the face of randomisation. 427 */ 428 #define MIN_GAP (SZ_128M) 429 #define MAX_GAP (STACK_TOP / 6 * 5) 430 431 static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack) 432 { 433 #ifdef CONFIG_STACK_GROWSUP 434 /* 435 * For an upwards growing stack the calculation is much simpler. 436 * Memory for the maximum stack size is reserved at the top of the 437 * task. mmap_base starts directly below the stack and grows 438 * downwards. 439 */ 440 return PAGE_ALIGN_DOWN(mmap_upper_limit(rlim_stack) - rnd); 441 #else 442 unsigned long gap = rlim_stack->rlim_cur; 443 unsigned long pad = stack_guard_gap; 444 445 /* Account for stack randomization if necessary */ 446 if (current->flags & PF_RANDOMIZE) 447 pad += (STACK_RND_MASK << PAGE_SHIFT); 448 449 /* Values close to RLIM_INFINITY can overflow. */ 450 if (gap + pad > gap) 451 gap += pad; 452 453 if (gap < MIN_GAP && MIN_GAP < MAX_GAP) 454 gap = MIN_GAP; 455 else if (gap > MAX_GAP) 456 gap = MAX_GAP; 457 458 return PAGE_ALIGN(STACK_TOP - gap - rnd); 459 #endif 460 } 461 462 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) 463 { 464 unsigned long random_factor = 0UL; 465 466 if (current->flags & PF_RANDOMIZE) 467 random_factor = arch_mmap_rnd(); 468 469 if (mmap_is_legacy(rlim_stack)) { 470 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; 471 clear_bit(MMF_TOPDOWN, &mm->flags); 472 } else { 473 mm->mmap_base = mmap_base(random_factor, rlim_stack); 474 set_bit(MMF_TOPDOWN, &mm->flags); 475 } 476 } 477 #elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT) 478 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) 479 { 480 mm->mmap_base = TASK_UNMAPPED_BASE; 481 clear_bit(MMF_TOPDOWN, &mm->flags); 482 } 483 #endif 484 #ifdef CONFIG_MMU 485 EXPORT_SYMBOL_IF_KUNIT(arch_pick_mmap_layout); 486 #endif 487 488 /** 489 * __account_locked_vm - account locked pages to an mm's locked_vm 490 * @mm: mm to account against 491 * @pages: number of pages to account 492 * @inc: %true if @pages should be considered positive, %false if not 493 * @task: task used to check RLIMIT_MEMLOCK 494 * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped 495 * 496 * Assumes @task and @mm are valid (i.e. at least one reference on each), and 497 * that mmap_lock is held as writer. 498 * 499 * Return: 500 * * 0 on success 501 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded. 502 */ 503 int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc, 504 struct task_struct *task, bool bypass_rlim) 505 { 506 unsigned long locked_vm, limit; 507 int ret = 0; 508 509 mmap_assert_write_locked(mm); 510 511 locked_vm = mm->locked_vm; 512 if (inc) { 513 if (!bypass_rlim) { 514 limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT; 515 if (locked_vm + pages > limit) 516 ret = -ENOMEM; 517 } 518 if (!ret) 519 mm->locked_vm = locked_vm + pages; 520 } else { 521 WARN_ON_ONCE(pages > locked_vm); 522 mm->locked_vm = locked_vm - pages; 523 } 524 525 pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid, 526 (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT, 527 locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK), 528 ret ? " - exceeded" : ""); 529 530 return ret; 531 } 532 EXPORT_SYMBOL_GPL(__account_locked_vm); 533 534 /** 535 * account_locked_vm - account locked pages to an mm's locked_vm 536 * @mm: mm to account against, may be NULL 537 * @pages: number of pages to account 538 * @inc: %true if @pages should be considered positive, %false if not 539 * 540 * Assumes a non-NULL @mm is valid (i.e. at least one reference on it). 541 * 542 * Return: 543 * * 0 on success, or if mm is NULL 544 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded. 545 */ 546 int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc) 547 { 548 int ret; 549 550 if (pages == 0 || !mm) 551 return 0; 552 553 mmap_write_lock(mm); 554 ret = __account_locked_vm(mm, pages, inc, current, 555 capable(CAP_IPC_LOCK)); 556 mmap_write_unlock(mm); 557 558 return ret; 559 } 560 EXPORT_SYMBOL_GPL(account_locked_vm); 561 562 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr, 563 unsigned long len, unsigned long prot, 564 unsigned long flag, unsigned long pgoff) 565 { 566 unsigned long ret; 567 struct mm_struct *mm = current->mm; 568 unsigned long populate; 569 LIST_HEAD(uf); 570 571 ret = security_mmap_file(file, prot, flag); 572 if (!ret) { 573 if (mmap_write_lock_killable(mm)) 574 return -EINTR; 575 ret = do_mmap(file, addr, len, prot, flag, 0, pgoff, &populate, 576 &uf); 577 mmap_write_unlock(mm); 578 userfaultfd_unmap_complete(mm, &uf); 579 if (populate) 580 mm_populate(ret, populate); 581 } 582 return ret; 583 } 584 585 unsigned long vm_mmap(struct file *file, unsigned long addr, 586 unsigned long len, unsigned long prot, 587 unsigned long flag, unsigned long offset) 588 { 589 if (unlikely(offset + PAGE_ALIGN(len) < offset)) 590 return -EINVAL; 591 if (unlikely(offset_in_page(offset))) 592 return -EINVAL; 593 594 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); 595 } 596 EXPORT_SYMBOL(vm_mmap); 597 598 static gfp_t kmalloc_gfp_adjust(gfp_t flags, size_t size) 599 { 600 /* 601 * We want to attempt a large physically contiguous block first because 602 * it is less likely to fragment multiple larger blocks and therefore 603 * contribute to a long term fragmentation less than vmalloc fallback. 604 * However make sure that larger requests are not too disruptive - no 605 * OOM killer and no allocation failure warnings as we have a fallback. 606 */ 607 if (size > PAGE_SIZE) { 608 flags |= __GFP_NOWARN; 609 610 if (!(flags & __GFP_RETRY_MAYFAIL)) 611 flags |= __GFP_NORETRY; 612 613 /* nofail semantic is implemented by the vmalloc fallback */ 614 flags &= ~__GFP_NOFAIL; 615 } 616 617 return flags; 618 } 619 620 /** 621 * __kvmalloc_node - attempt to allocate physically contiguous memory, but upon 622 * failure, fall back to non-contiguous (vmalloc) allocation. 623 * @size: size of the request. 624 * @b: which set of kmalloc buckets to allocate from. 625 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL. 626 * @node: numa node to allocate from 627 * 628 * Uses kmalloc to get the memory but if the allocation fails then falls back 629 * to the vmalloc allocator. Use kvfree for freeing the memory. 630 * 631 * GFP_NOWAIT and GFP_ATOMIC are not supported, neither is the __GFP_NORETRY modifier. 632 * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is 633 * preferable to the vmalloc fallback, due to visible performance drawbacks. 634 * 635 * Return: pointer to the allocated memory of %NULL in case of failure 636 */ 637 void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node) 638 { 639 void *ret; 640 641 /* 642 * It doesn't really make sense to fallback to vmalloc for sub page 643 * requests 644 */ 645 ret = __kmalloc_node_noprof(PASS_BUCKET_PARAMS(size, b), 646 kmalloc_gfp_adjust(flags, size), 647 node); 648 if (ret || size <= PAGE_SIZE) 649 return ret; 650 651 /* non-sleeping allocations are not supported by vmalloc */ 652 if (!gfpflags_allow_blocking(flags)) 653 return NULL; 654 655 /* Don't even allow crazy sizes */ 656 if (unlikely(size > INT_MAX)) { 657 WARN_ON_ONCE(!(flags & __GFP_NOWARN)); 658 return NULL; 659 } 660 661 /* 662 * kvmalloc() can always use VM_ALLOW_HUGE_VMAP, 663 * since the callers already cannot assume anything 664 * about the resulting pointer, and cannot play 665 * protection games. 666 */ 667 return __vmalloc_node_range_noprof(size, 1, VMALLOC_START, VMALLOC_END, 668 flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP, 669 node, __builtin_return_address(0)); 670 } 671 EXPORT_SYMBOL(__kvmalloc_node_noprof); 672 673 /** 674 * kvfree() - Free memory. 675 * @addr: Pointer to allocated memory. 676 * 677 * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc(). 678 * It is slightly more efficient to use kfree() or vfree() if you are certain 679 * that you know which one to use. 680 * 681 * Context: Either preemptible task context or not-NMI interrupt. 682 */ 683 void kvfree(const void *addr) 684 { 685 if (is_vmalloc_addr(addr)) 686 vfree(addr); 687 else 688 kfree(addr); 689 } 690 EXPORT_SYMBOL(kvfree); 691 692 /** 693 * kvfree_sensitive - Free a data object containing sensitive information. 694 * @addr: address of the data object to be freed. 695 * @len: length of the data object. 696 * 697 * Use the special memzero_explicit() function to clear the content of a 698 * kvmalloc'ed object containing sensitive data to make sure that the 699 * compiler won't optimize out the data clearing. 700 */ 701 void kvfree_sensitive(const void *addr, size_t len) 702 { 703 if (likely(!ZERO_OR_NULL_PTR(addr))) { 704 memzero_explicit((void *)addr, len); 705 kvfree(addr); 706 } 707 } 708 EXPORT_SYMBOL(kvfree_sensitive); 709 710 /** 711 * kvrealloc - reallocate memory; contents remain unchanged 712 * @p: object to reallocate memory for 713 * @size: the size to reallocate 714 * @flags: the flags for the page level allocator 715 * 716 * If @p is %NULL, kvrealloc() behaves exactly like kvmalloc(). If @size is 0 717 * and @p is not a %NULL pointer, the object pointed to is freed. 718 * 719 * If __GFP_ZERO logic is requested, callers must ensure that, starting with the 720 * initial memory allocation, every subsequent call to this API for the same 721 * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that 722 * __GFP_ZERO is not fully honored by this API. 723 * 724 * In any case, the contents of the object pointed to are preserved up to the 725 * lesser of the new and old sizes. 726 * 727 * This function must not be called concurrently with itself or kvfree() for the 728 * same memory allocation. 729 * 730 * Return: pointer to the allocated memory or %NULL in case of error 731 */ 732 void *kvrealloc_noprof(const void *p, size_t size, gfp_t flags) 733 { 734 void *n; 735 736 if (is_vmalloc_addr(p)) 737 return vrealloc_noprof(p, size, flags); 738 739 n = krealloc_noprof(p, size, kmalloc_gfp_adjust(flags, size)); 740 if (!n) { 741 /* We failed to krealloc(), fall back to kvmalloc(). */ 742 n = kvmalloc_noprof(size, flags); 743 if (!n) 744 return NULL; 745 746 if (p) { 747 /* We already know that `p` is not a vmalloc address. */ 748 kasan_disable_current(); 749 memcpy(n, kasan_reset_tag(p), ksize(p)); 750 kasan_enable_current(); 751 752 kfree(p); 753 } 754 } 755 756 return n; 757 } 758 EXPORT_SYMBOL(kvrealloc_noprof); 759 760 /** 761 * __vmalloc_array - allocate memory for a virtually contiguous array. 762 * @n: number of elements. 763 * @size: element size. 764 * @flags: the type of memory to allocate (see kmalloc). 765 */ 766 void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags) 767 { 768 size_t bytes; 769 770 if (unlikely(check_mul_overflow(n, size, &bytes))) 771 return NULL; 772 return __vmalloc_noprof(bytes, flags); 773 } 774 EXPORT_SYMBOL(__vmalloc_array_noprof); 775 776 /** 777 * vmalloc_array - allocate memory for a virtually contiguous array. 778 * @n: number of elements. 779 * @size: element size. 780 */ 781 void *vmalloc_array_noprof(size_t n, size_t size) 782 { 783 return __vmalloc_array_noprof(n, size, GFP_KERNEL); 784 } 785 EXPORT_SYMBOL(vmalloc_array_noprof); 786 787 /** 788 * __vcalloc - allocate and zero memory for a virtually contiguous array. 789 * @n: number of elements. 790 * @size: element size. 791 * @flags: the type of memory to allocate (see kmalloc). 792 */ 793 void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags) 794 { 795 return __vmalloc_array_noprof(n, size, flags | __GFP_ZERO); 796 } 797 EXPORT_SYMBOL(__vcalloc_noprof); 798 799 /** 800 * vcalloc - allocate and zero memory for a virtually contiguous array. 801 * @n: number of elements. 802 * @size: element size. 803 */ 804 void *vcalloc_noprof(size_t n, size_t size) 805 { 806 return __vmalloc_array_noprof(n, size, GFP_KERNEL | __GFP_ZERO); 807 } 808 EXPORT_SYMBOL(vcalloc_noprof); 809 810 struct anon_vma *folio_anon_vma(const struct folio *folio) 811 { 812 unsigned long mapping = (unsigned long)folio->mapping; 813 814 if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 815 return NULL; 816 return (void *)(mapping - PAGE_MAPPING_ANON); 817 } 818 819 /** 820 * folio_mapping - Find the mapping where this folio is stored. 821 * @folio: The folio. 822 * 823 * For folios which are in the page cache, return the mapping that this 824 * page belongs to. Folios in the swap cache return the swap mapping 825 * this page is stored in (which is different from the mapping for the 826 * swap file or swap device where the data is stored). 827 * 828 * You can call this for folios which aren't in the swap cache or page 829 * cache and it will return NULL. 830 */ 831 struct address_space *folio_mapping(struct folio *folio) 832 { 833 struct address_space *mapping; 834 835 /* This happens if someone calls flush_dcache_page on slab page */ 836 if (unlikely(folio_test_slab(folio))) 837 return NULL; 838 839 if (unlikely(folio_test_swapcache(folio))) 840 return swap_address_space(folio->swap); 841 842 mapping = folio->mapping; 843 if ((unsigned long)mapping & PAGE_MAPPING_FLAGS) 844 return NULL; 845 846 return mapping; 847 } 848 EXPORT_SYMBOL(folio_mapping); 849 850 /** 851 * folio_copy - Copy the contents of one folio to another. 852 * @dst: Folio to copy to. 853 * @src: Folio to copy from. 854 * 855 * The bytes in the folio represented by @src are copied to @dst. 856 * Assumes the caller has validated that @dst is at least as large as @src. 857 * Can be called in atomic context for order-0 folios, but if the folio is 858 * larger, it may sleep. 859 */ 860 void folio_copy(struct folio *dst, struct folio *src) 861 { 862 long i = 0; 863 long nr = folio_nr_pages(src); 864 865 for (;;) { 866 copy_highpage(folio_page(dst, i), folio_page(src, i)); 867 if (++i == nr) 868 break; 869 cond_resched(); 870 } 871 } 872 EXPORT_SYMBOL(folio_copy); 873 874 int folio_mc_copy(struct folio *dst, struct folio *src) 875 { 876 long nr = folio_nr_pages(src); 877 long i = 0; 878 879 for (;;) { 880 if (copy_mc_highpage(folio_page(dst, i), folio_page(src, i))) 881 return -EHWPOISON; 882 if (++i == nr) 883 break; 884 cond_resched(); 885 } 886 887 return 0; 888 } 889 EXPORT_SYMBOL(folio_mc_copy); 890 891 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; 892 int sysctl_overcommit_ratio __read_mostly = 50; 893 unsigned long sysctl_overcommit_kbytes __read_mostly; 894 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; 895 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */ 896 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */ 897 898 int overcommit_ratio_handler(const struct ctl_table *table, int write, void *buffer, 899 size_t *lenp, loff_t *ppos) 900 { 901 int ret; 902 903 ret = proc_dointvec(table, write, buffer, lenp, ppos); 904 if (ret == 0 && write) 905 sysctl_overcommit_kbytes = 0; 906 return ret; 907 } 908 909 static void sync_overcommit_as(struct work_struct *dummy) 910 { 911 percpu_counter_sync(&vm_committed_as); 912 } 913 914 int overcommit_policy_handler(const struct ctl_table *table, int write, void *buffer, 915 size_t *lenp, loff_t *ppos) 916 { 917 struct ctl_table t; 918 int new_policy = -1; 919 int ret; 920 921 /* 922 * The deviation of sync_overcommit_as could be big with loose policy 923 * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to 924 * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply 925 * with the strict "NEVER", and to avoid possible race condition (even 926 * though user usually won't too frequently do the switching to policy 927 * OVERCOMMIT_NEVER), the switch is done in the following order: 928 * 1. changing the batch 929 * 2. sync percpu count on each CPU 930 * 3. switch the policy 931 */ 932 if (write) { 933 t = *table; 934 t.data = &new_policy; 935 ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 936 if (ret || new_policy == -1) 937 return ret; 938 939 mm_compute_batch(new_policy); 940 if (new_policy == OVERCOMMIT_NEVER) 941 schedule_on_each_cpu(sync_overcommit_as); 942 sysctl_overcommit_memory = new_policy; 943 } else { 944 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 945 } 946 947 return ret; 948 } 949 950 int overcommit_kbytes_handler(const struct ctl_table *table, int write, void *buffer, 951 size_t *lenp, loff_t *ppos) 952 { 953 int ret; 954 955 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 956 if (ret == 0 && write) 957 sysctl_overcommit_ratio = 0; 958 return ret; 959 } 960 961 /* 962 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used 963 */ 964 unsigned long vm_commit_limit(void) 965 { 966 unsigned long allowed; 967 968 if (sysctl_overcommit_kbytes) 969 allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10); 970 else 971 allowed = ((totalram_pages() - hugetlb_total_pages()) 972 * sysctl_overcommit_ratio / 100); 973 allowed += total_swap_pages; 974 975 return allowed; 976 } 977 978 /* 979 * Make sure vm_committed_as in one cacheline and not cacheline shared with 980 * other variables. It can be updated by several CPUs frequently. 981 */ 982 struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp; 983 984 /* 985 * The global memory commitment made in the system can be a metric 986 * that can be used to drive ballooning decisions when Linux is hosted 987 * as a guest. On Hyper-V, the host implements a policy engine for dynamically 988 * balancing memory across competing virtual machines that are hosted. 989 * Several metrics drive this policy engine including the guest reported 990 * memory commitment. 991 * 992 * The time cost of this is very low for small platforms, and for big 993 * platform like a 2S/36C/72T Skylake server, in worst case where 994 * vm_committed_as's spinlock is under severe contention, the time cost 995 * could be about 30~40 microseconds. 996 */ 997 unsigned long vm_memory_committed(void) 998 { 999 return percpu_counter_sum_positive(&vm_committed_as); 1000 } 1001 EXPORT_SYMBOL_GPL(vm_memory_committed); 1002 1003 /* 1004 * Check that a process has enough memory to allocate a new virtual 1005 * mapping. 0 means there is enough memory for the allocation to 1006 * succeed and -ENOMEM implies there is not. 1007 * 1008 * We currently support three overcommit policies, which are set via the 1009 * vm.overcommit_memory sysctl. See Documentation/mm/overcommit-accounting.rst 1010 * 1011 * Strict overcommit modes added 2002 Feb 26 by Alan Cox. 1012 * Additional code 2002 Jul 20 by Robert Love. 1013 * 1014 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise. 1015 * 1016 * Note this is a helper function intended to be used by LSMs which 1017 * wish to use this logic. 1018 */ 1019 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) 1020 { 1021 long allowed; 1022 unsigned long bytes_failed; 1023 1024 vm_acct_memory(pages); 1025 1026 /* 1027 * Sometimes we want to use more memory than we have 1028 */ 1029 if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS) 1030 return 0; 1031 1032 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { 1033 if (pages > totalram_pages() + total_swap_pages) 1034 goto error; 1035 return 0; 1036 } 1037 1038 allowed = vm_commit_limit(); 1039 /* 1040 * Reserve some for root 1041 */ 1042 if (!cap_sys_admin) 1043 allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); 1044 1045 /* 1046 * Don't let a single process grow so big a user can't recover 1047 */ 1048 if (mm) { 1049 long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10); 1050 1051 allowed -= min_t(long, mm->total_vm / 32, reserve); 1052 } 1053 1054 if (percpu_counter_read_positive(&vm_committed_as) < allowed) 1055 return 0; 1056 error: 1057 bytes_failed = pages << PAGE_SHIFT; 1058 pr_warn_ratelimited("%s: pid: %d, comm: %s, bytes: %lu not enough memory for the allocation\n", 1059 __func__, current->pid, current->comm, bytes_failed); 1060 vm_unacct_memory(pages); 1061 1062 return -ENOMEM; 1063 } 1064 1065 /** 1066 * get_cmdline() - copy the cmdline value to a buffer. 1067 * @task: the task whose cmdline value to copy. 1068 * @buffer: the buffer to copy to. 1069 * @buflen: the length of the buffer. Larger cmdline values are truncated 1070 * to this length. 1071 * 1072 * Return: the size of the cmdline field copied. Note that the copy does 1073 * not guarantee an ending NULL byte. 1074 */ 1075 int get_cmdline(struct task_struct *task, char *buffer, int buflen) 1076 { 1077 int res = 0; 1078 unsigned int len; 1079 struct mm_struct *mm = get_task_mm(task); 1080 unsigned long arg_start, arg_end, env_start, env_end; 1081 if (!mm) 1082 goto out; 1083 if (!mm->arg_end) 1084 goto out_mm; /* Shh! No looking before we're done */ 1085 1086 spin_lock(&mm->arg_lock); 1087 arg_start = mm->arg_start; 1088 arg_end = mm->arg_end; 1089 env_start = mm->env_start; 1090 env_end = mm->env_end; 1091 spin_unlock(&mm->arg_lock); 1092 1093 len = arg_end - arg_start; 1094 1095 if (len > buflen) 1096 len = buflen; 1097 1098 res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE); 1099 1100 /* 1101 * If the nul at the end of args has been overwritten, then 1102 * assume application is using setproctitle(3). 1103 */ 1104 if (res > 0 && buffer[res-1] != '\0' && len < buflen) { 1105 len = strnlen(buffer, res); 1106 if (len < res) { 1107 res = len; 1108 } else { 1109 len = env_end - env_start; 1110 if (len > buflen - res) 1111 len = buflen - res; 1112 res += access_process_vm(task, env_start, 1113 buffer+res, len, 1114 FOLL_FORCE); 1115 res = strnlen(buffer, res); 1116 } 1117 } 1118 out_mm: 1119 mmput(mm); 1120 out: 1121 return res; 1122 } 1123 1124 int __weak memcmp_pages(struct page *page1, struct page *page2) 1125 { 1126 char *addr1, *addr2; 1127 int ret; 1128 1129 addr1 = kmap_local_page(page1); 1130 addr2 = kmap_local_page(page2); 1131 ret = memcmp(addr1, addr2, PAGE_SIZE); 1132 kunmap_local(addr2); 1133 kunmap_local(addr1); 1134 return ret; 1135 } 1136 1137 #ifdef CONFIG_PRINTK 1138 /** 1139 * mem_dump_obj - Print available provenance information 1140 * @object: object for which to find provenance information. 1141 * 1142 * This function uses pr_cont(), so that the caller is expected to have 1143 * printed out whatever preamble is appropriate. The provenance information 1144 * depends on the type of object and on how much debugging is enabled. 1145 * For example, for a slab-cache object, the slab name is printed, and, 1146 * if available, the return address and stack trace from the allocation 1147 * and last free path of that object. 1148 */ 1149 void mem_dump_obj(void *object) 1150 { 1151 const char *type; 1152 1153 if (kmem_dump_obj(object)) 1154 return; 1155 1156 if (vmalloc_dump_obj(object)) 1157 return; 1158 1159 if (is_vmalloc_addr(object)) 1160 type = "vmalloc memory"; 1161 else if (virt_addr_valid(object)) 1162 type = "non-slab/vmalloc memory"; 1163 else if (object == NULL) 1164 type = "NULL pointer"; 1165 else if (object == ZERO_SIZE_PTR) 1166 type = "zero-size pointer"; 1167 else 1168 type = "non-paged memory"; 1169 1170 pr_cont(" %s\n", type); 1171 } 1172 EXPORT_SYMBOL_GPL(mem_dump_obj); 1173 #endif 1174 1175 /* 1176 * A driver might set a page logically offline -- PageOffline() -- and 1177 * turn the page inaccessible in the hypervisor; after that, access to page 1178 * content can be fatal. 1179 * 1180 * Some special PFN walkers -- i.e., /proc/kcore -- read content of random 1181 * pages after checking PageOffline(); however, these PFN walkers can race 1182 * with drivers that set PageOffline(). 1183 * 1184 * page_offline_freeze()/page_offline_thaw() allows for a subsystem to 1185 * synchronize with such drivers, achieving that a page cannot be set 1186 * PageOffline() while frozen. 1187 * 1188 * page_offline_begin()/page_offline_end() is used by drivers that care about 1189 * such races when setting a page PageOffline(). 1190 */ 1191 static DECLARE_RWSEM(page_offline_rwsem); 1192 1193 void page_offline_freeze(void) 1194 { 1195 down_read(&page_offline_rwsem); 1196 } 1197 1198 void page_offline_thaw(void) 1199 { 1200 up_read(&page_offline_rwsem); 1201 } 1202 1203 void page_offline_begin(void) 1204 { 1205 down_write(&page_offline_rwsem); 1206 } 1207 EXPORT_SYMBOL(page_offline_begin); 1208 1209 void page_offline_end(void) 1210 { 1211 up_write(&page_offline_rwsem); 1212 } 1213 EXPORT_SYMBOL(page_offline_end); 1214 1215 #ifndef flush_dcache_folio 1216 void flush_dcache_folio(struct folio *folio) 1217 { 1218 long i, nr = folio_nr_pages(folio); 1219 1220 for (i = 0; i < nr; i++) 1221 flush_dcache_page(folio_page(folio, i)); 1222 } 1223 EXPORT_SYMBOL(flush_dcache_folio); 1224 #endif 1225