1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <linux/mm.h> 3 #include <linux/slab.h> 4 #include <linux/string.h> 5 #include <linux/compiler.h> 6 #include <linux/export.h> 7 #include <linux/err.h> 8 #include <linux/sched.h> 9 #include <linux/sched/mm.h> 10 #include <linux/sched/signal.h> 11 #include <linux/sched/task_stack.h> 12 #include <linux/security.h> 13 #include <linux/swap.h> 14 #include <linux/swapops.h> 15 #include <linux/mman.h> 16 #include <linux/hugetlb.h> 17 #include <linux/vmalloc.h> 18 #include <linux/userfaultfd_k.h> 19 #include <linux/elf.h> 20 #include <linux/elf-randomize.h> 21 #include <linux/personality.h> 22 #include <linux/random.h> 23 #include <linux/processor.h> 24 #include <linux/sizes.h> 25 #include <linux/compat.h> 26 #include <linux/fsnotify.h> 27 28 #include <linux/uaccess.h> 29 30 #include <kunit/visibility.h> 31 32 #include "internal.h" 33 #include "swap.h" 34 35 /** 36 * kfree_const - conditionally free memory 37 * @x: pointer to the memory 38 * 39 * Function calls kfree only if @x is not in .rodata section. 40 */ 41 void kfree_const(const void *x) 42 { 43 if (!is_kernel_rodata((unsigned long)x)) 44 kfree(x); 45 } 46 EXPORT_SYMBOL(kfree_const); 47 48 /** 49 * __kmemdup_nul - Create a NUL-terminated string from @s, which might be unterminated. 50 * @s: The data to copy 51 * @len: The size of the data, not including the NUL terminator 52 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 53 * 54 * Return: newly allocated copy of @s with NUL-termination or %NULL in 55 * case of error 56 */ 57 static __always_inline char *__kmemdup_nul(const char *s, size_t len, gfp_t gfp) 58 { 59 char *buf; 60 61 /* '+1' for the NUL terminator */ 62 buf = kmalloc_track_caller(len + 1, gfp); 63 if (!buf) 64 return NULL; 65 66 memcpy(buf, s, len); 67 /* Ensure the buf is always NUL-terminated, regardless of @s. */ 68 buf[len] = '\0'; 69 return buf; 70 } 71 72 /** 73 * kstrdup - allocate space for and copy an existing string 74 * @s: the string to duplicate 75 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 76 * 77 * Return: newly allocated copy of @s or %NULL in case of error 78 */ 79 noinline 80 char *kstrdup(const char *s, gfp_t gfp) 81 { 82 return s ? __kmemdup_nul(s, strlen(s), gfp) : NULL; 83 } 84 EXPORT_SYMBOL(kstrdup); 85 86 /** 87 * kstrdup_const - conditionally duplicate an existing const string 88 * @s: the string to duplicate 89 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 90 * 91 * Note: Strings allocated by kstrdup_const should be freed by kfree_const and 92 * must not be passed to krealloc(). 93 * 94 * Return: source string if it is in .rodata section otherwise 95 * fallback to kstrdup. 96 */ 97 const char *kstrdup_const(const char *s, gfp_t gfp) 98 { 99 if (is_kernel_rodata((unsigned long)s)) 100 return s; 101 102 return kstrdup(s, gfp); 103 } 104 EXPORT_SYMBOL(kstrdup_const); 105 106 /** 107 * kstrndup - allocate space for and copy an existing string 108 * @s: the string to duplicate 109 * @max: read at most @max chars from @s 110 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 111 * 112 * Note: Use kmemdup_nul() instead if the size is known exactly. 113 * 114 * Return: newly allocated copy of @s or %NULL in case of error 115 */ 116 char *kstrndup(const char *s, size_t max, gfp_t gfp) 117 { 118 return s ? __kmemdup_nul(s, strnlen(s, max), gfp) : NULL; 119 } 120 EXPORT_SYMBOL(kstrndup); 121 122 /** 123 * kmemdup - duplicate region of memory 124 * 125 * @src: memory region to duplicate 126 * @len: memory region length 127 * @gfp: GFP mask to use 128 * 129 * Return: newly allocated copy of @src or %NULL in case of error, 130 * result is physically contiguous. Use kfree() to free. 131 */ 132 void *kmemdup_noprof(const void *src, size_t len, gfp_t gfp) 133 { 134 void *p; 135 136 p = kmalloc_node_track_caller_noprof(len, gfp, NUMA_NO_NODE, _RET_IP_); 137 if (p) 138 memcpy(p, src, len); 139 return p; 140 } 141 EXPORT_SYMBOL(kmemdup_noprof); 142 143 /** 144 * kmemdup_array - duplicate a given array. 145 * 146 * @src: array to duplicate. 147 * @count: number of elements to duplicate from array. 148 * @element_size: size of each element of array. 149 * @gfp: GFP mask to use. 150 * 151 * Return: duplicated array of @src or %NULL in case of error, 152 * result is physically contiguous. Use kfree() to free. 153 */ 154 void *kmemdup_array(const void *src, size_t count, size_t element_size, gfp_t gfp) 155 { 156 return kmemdup(src, size_mul(element_size, count), gfp); 157 } 158 EXPORT_SYMBOL(kmemdup_array); 159 160 /** 161 * kvmemdup - duplicate region of memory 162 * 163 * @src: memory region to duplicate 164 * @len: memory region length 165 * @gfp: GFP mask to use 166 * 167 * Return: newly allocated copy of @src or %NULL in case of error, 168 * result may be not physically contiguous. Use kvfree() to free. 169 */ 170 void *kvmemdup(const void *src, size_t len, gfp_t gfp) 171 { 172 void *p; 173 174 p = kvmalloc(len, gfp); 175 if (p) 176 memcpy(p, src, len); 177 return p; 178 } 179 EXPORT_SYMBOL(kvmemdup); 180 181 /** 182 * kmemdup_nul - Create a NUL-terminated string from unterminated data 183 * @s: The data to stringify 184 * @len: The size of the data 185 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 186 * 187 * Return: newly allocated copy of @s with NUL-termination or %NULL in 188 * case of error 189 */ 190 char *kmemdup_nul(const char *s, size_t len, gfp_t gfp) 191 { 192 return s ? __kmemdup_nul(s, len, gfp) : NULL; 193 } 194 EXPORT_SYMBOL(kmemdup_nul); 195 196 static kmem_buckets *user_buckets __ro_after_init; 197 198 static int __init init_user_buckets(void) 199 { 200 user_buckets = kmem_buckets_create("memdup_user", 0, 0, INT_MAX, NULL); 201 202 return 0; 203 } 204 subsys_initcall(init_user_buckets); 205 206 /** 207 * memdup_user - duplicate memory region from user space 208 * 209 * @src: source address in user space 210 * @len: number of bytes to copy 211 * 212 * Return: an ERR_PTR() on failure. Result is physically 213 * contiguous, to be freed by kfree(). 214 */ 215 void *memdup_user(const void __user *src, size_t len) 216 { 217 void *p; 218 219 p = kmem_buckets_alloc_track_caller(user_buckets, len, GFP_USER | __GFP_NOWARN); 220 if (!p) 221 return ERR_PTR(-ENOMEM); 222 223 if (copy_from_user(p, src, len)) { 224 kfree(p); 225 return ERR_PTR(-EFAULT); 226 } 227 228 return p; 229 } 230 EXPORT_SYMBOL(memdup_user); 231 232 /** 233 * vmemdup_user - duplicate memory region from user space 234 * 235 * @src: source address in user space 236 * @len: number of bytes to copy 237 * 238 * Return: an ERR_PTR() on failure. Result may be not 239 * physically contiguous. Use kvfree() to free. 240 */ 241 void *vmemdup_user(const void __user *src, size_t len) 242 { 243 void *p; 244 245 p = kmem_buckets_valloc(user_buckets, len, GFP_USER); 246 if (!p) 247 return ERR_PTR(-ENOMEM); 248 249 if (copy_from_user(p, src, len)) { 250 kvfree(p); 251 return ERR_PTR(-EFAULT); 252 } 253 254 return p; 255 } 256 EXPORT_SYMBOL(vmemdup_user); 257 258 /** 259 * strndup_user - duplicate an existing string from user space 260 * @s: The string to duplicate 261 * @n: Maximum number of bytes to copy, including the trailing NUL. 262 * 263 * Return: newly allocated copy of @s or an ERR_PTR() in case of error 264 */ 265 char *strndup_user(const char __user *s, long n) 266 { 267 char *p; 268 long length; 269 270 length = strnlen_user(s, n); 271 272 if (!length) 273 return ERR_PTR(-EFAULT); 274 275 if (length > n) 276 return ERR_PTR(-EINVAL); 277 278 p = memdup_user(s, length); 279 280 if (IS_ERR(p)) 281 return p; 282 283 p[length - 1] = '\0'; 284 285 return p; 286 } 287 EXPORT_SYMBOL(strndup_user); 288 289 /** 290 * memdup_user_nul - duplicate memory region from user space and NUL-terminate 291 * 292 * @src: source address in user space 293 * @len: number of bytes to copy 294 * 295 * Return: an ERR_PTR() on failure. 296 */ 297 void *memdup_user_nul(const void __user *src, size_t len) 298 { 299 char *p; 300 301 p = kmem_buckets_alloc_track_caller(user_buckets, len + 1, GFP_USER | __GFP_NOWARN); 302 if (!p) 303 return ERR_PTR(-ENOMEM); 304 305 if (copy_from_user(p, src, len)) { 306 kfree(p); 307 return ERR_PTR(-EFAULT); 308 } 309 p[len] = '\0'; 310 311 return p; 312 } 313 EXPORT_SYMBOL(memdup_user_nul); 314 315 /* Check if the vma is being used as a stack by this task */ 316 int vma_is_stack_for_current(struct vm_area_struct *vma) 317 { 318 struct task_struct * __maybe_unused t = current; 319 320 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); 321 } 322 323 /* 324 * Change backing file, only valid to use during initial VMA setup. 325 */ 326 void vma_set_file(struct vm_area_struct *vma, struct file *file) 327 { 328 /* Changing an anonymous vma with this is illegal */ 329 get_file(file); 330 swap(vma->vm_file, file); 331 fput(file); 332 } 333 EXPORT_SYMBOL(vma_set_file); 334 335 #ifndef STACK_RND_MASK 336 #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */ 337 #endif 338 339 unsigned long randomize_stack_top(unsigned long stack_top) 340 { 341 unsigned long random_variable = 0; 342 343 if (current->flags & PF_RANDOMIZE) { 344 random_variable = get_random_long(); 345 random_variable &= STACK_RND_MASK; 346 random_variable <<= PAGE_SHIFT; 347 } 348 #ifdef CONFIG_STACK_GROWSUP 349 return PAGE_ALIGN(stack_top) + random_variable; 350 #else 351 return PAGE_ALIGN(stack_top) - random_variable; 352 #endif 353 } 354 355 /** 356 * randomize_page - Generate a random, page aligned address 357 * @start: The smallest acceptable address the caller will take. 358 * @range: The size of the area, starting at @start, within which the 359 * random address must fall. 360 * 361 * If @start + @range would overflow, @range is capped. 362 * 363 * NOTE: Historical use of randomize_range, which this replaces, presumed that 364 * @start was already page aligned. We now align it regardless. 365 * 366 * Return: A page aligned address within [start, start + range). On error, 367 * @start is returned. 368 */ 369 unsigned long randomize_page(unsigned long start, unsigned long range) 370 { 371 if (!PAGE_ALIGNED(start)) { 372 range -= PAGE_ALIGN(start) - start; 373 start = PAGE_ALIGN(start); 374 } 375 376 if (start > ULONG_MAX - range) 377 range = ULONG_MAX - start; 378 379 range >>= PAGE_SHIFT; 380 381 if (range == 0) 382 return start; 383 384 return start + (get_random_long() % range << PAGE_SHIFT); 385 } 386 387 #ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT 388 unsigned long __weak arch_randomize_brk(struct mm_struct *mm) 389 { 390 /* Is the current task 32bit ? */ 391 if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task()) 392 return randomize_page(mm->brk, SZ_32M); 393 394 return randomize_page(mm->brk, SZ_1G); 395 } 396 397 unsigned long arch_mmap_rnd(void) 398 { 399 unsigned long rnd; 400 401 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS 402 if (is_compat_task()) 403 rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1); 404 else 405 #endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */ 406 rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1); 407 408 return rnd << PAGE_SHIFT; 409 } 410 411 static int mmap_is_legacy(struct rlimit *rlim_stack) 412 { 413 if (current->personality & ADDR_COMPAT_LAYOUT) 414 return 1; 415 416 /* On parisc the stack always grows up - so a unlimited stack should 417 * not be an indicator to use the legacy memory layout. */ 418 if (rlim_stack->rlim_cur == RLIM_INFINITY && 419 !IS_ENABLED(CONFIG_STACK_GROWSUP)) 420 return 1; 421 422 return sysctl_legacy_va_layout; 423 } 424 425 /* 426 * Leave enough space between the mmap area and the stack to honour ulimit in 427 * the face of randomisation. 428 */ 429 #define MIN_GAP (SZ_128M) 430 #define MAX_GAP (STACK_TOP / 6 * 5) 431 432 static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack) 433 { 434 #ifdef CONFIG_STACK_GROWSUP 435 /* 436 * For an upwards growing stack the calculation is much simpler. 437 * Memory for the maximum stack size is reserved at the top of the 438 * task. mmap_base starts directly below the stack and grows 439 * downwards. 440 */ 441 return PAGE_ALIGN_DOWN(mmap_upper_limit(rlim_stack) - rnd); 442 #else 443 unsigned long gap = rlim_stack->rlim_cur; 444 unsigned long pad = stack_guard_gap; 445 446 /* Account for stack randomization if necessary */ 447 if (current->flags & PF_RANDOMIZE) 448 pad += (STACK_RND_MASK << PAGE_SHIFT); 449 450 /* Values close to RLIM_INFINITY can overflow. */ 451 if (gap + pad > gap) 452 gap += pad; 453 454 if (gap < MIN_GAP && MIN_GAP < MAX_GAP) 455 gap = MIN_GAP; 456 else if (gap > MAX_GAP) 457 gap = MAX_GAP; 458 459 return PAGE_ALIGN(STACK_TOP - gap - rnd); 460 #endif 461 } 462 463 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) 464 { 465 unsigned long random_factor = 0UL; 466 467 if (current->flags & PF_RANDOMIZE) 468 random_factor = arch_mmap_rnd(); 469 470 if (mmap_is_legacy(rlim_stack)) { 471 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; 472 clear_bit(MMF_TOPDOWN, &mm->flags); 473 } else { 474 mm->mmap_base = mmap_base(random_factor, rlim_stack); 475 set_bit(MMF_TOPDOWN, &mm->flags); 476 } 477 } 478 #elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT) 479 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) 480 { 481 mm->mmap_base = TASK_UNMAPPED_BASE; 482 clear_bit(MMF_TOPDOWN, &mm->flags); 483 } 484 #endif 485 #ifdef CONFIG_MMU 486 EXPORT_SYMBOL_IF_KUNIT(arch_pick_mmap_layout); 487 #endif 488 489 /** 490 * __account_locked_vm - account locked pages to an mm's locked_vm 491 * @mm: mm to account against 492 * @pages: number of pages to account 493 * @inc: %true if @pages should be considered positive, %false if not 494 * @task: task used to check RLIMIT_MEMLOCK 495 * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped 496 * 497 * Assumes @task and @mm are valid (i.e. at least one reference on each), and 498 * that mmap_lock is held as writer. 499 * 500 * Return: 501 * * 0 on success 502 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded. 503 */ 504 int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc, 505 struct task_struct *task, bool bypass_rlim) 506 { 507 unsigned long locked_vm, limit; 508 int ret = 0; 509 510 mmap_assert_write_locked(mm); 511 512 locked_vm = mm->locked_vm; 513 if (inc) { 514 if (!bypass_rlim) { 515 limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT; 516 if (locked_vm + pages > limit) 517 ret = -ENOMEM; 518 } 519 if (!ret) 520 mm->locked_vm = locked_vm + pages; 521 } else { 522 WARN_ON_ONCE(pages > locked_vm); 523 mm->locked_vm = locked_vm - pages; 524 } 525 526 pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid, 527 (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT, 528 locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK), 529 ret ? " - exceeded" : ""); 530 531 return ret; 532 } 533 EXPORT_SYMBOL_GPL(__account_locked_vm); 534 535 /** 536 * account_locked_vm - account locked pages to an mm's locked_vm 537 * @mm: mm to account against, may be NULL 538 * @pages: number of pages to account 539 * @inc: %true if @pages should be considered positive, %false if not 540 * 541 * Assumes a non-NULL @mm is valid (i.e. at least one reference on it). 542 * 543 * Return: 544 * * 0 on success, or if mm is NULL 545 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded. 546 */ 547 int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc) 548 { 549 int ret; 550 551 if (pages == 0 || !mm) 552 return 0; 553 554 mmap_write_lock(mm); 555 ret = __account_locked_vm(mm, pages, inc, current, 556 capable(CAP_IPC_LOCK)); 557 mmap_write_unlock(mm); 558 559 return ret; 560 } 561 EXPORT_SYMBOL_GPL(account_locked_vm); 562 563 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr, 564 unsigned long len, unsigned long prot, 565 unsigned long flag, unsigned long pgoff) 566 { 567 unsigned long ret; 568 struct mm_struct *mm = current->mm; 569 unsigned long populate; 570 LIST_HEAD(uf); 571 572 ret = security_mmap_file(file, prot, flag); 573 if (!ret) 574 ret = fsnotify_mmap_perm(file, prot, pgoff >> PAGE_SHIFT, len); 575 if (!ret) { 576 if (mmap_write_lock_killable(mm)) 577 return -EINTR; 578 ret = do_mmap(file, addr, len, prot, flag, 0, pgoff, &populate, 579 &uf); 580 mmap_write_unlock(mm); 581 userfaultfd_unmap_complete(mm, &uf); 582 if (populate) 583 mm_populate(ret, populate); 584 } 585 return ret; 586 } 587 588 /* 589 * Perform a userland memory mapping into the current process address space. See 590 * the comment for do_mmap() for more details on this operation in general. 591 * 592 * This differs from do_mmap() in that: 593 * 594 * a. An offset parameter is provided rather than pgoff, which is both checked 595 * for overflow and page alignment. 596 * b. mmap locking is performed on the caller's behalf. 597 * c. Userfaultfd unmap events and memory population are handled. 598 * 599 * This means that this function performs essentially the same work as if 600 * userland were invoking mmap (2). 601 * 602 * Returns either an error, or the address at which the requested mapping has 603 * been performed. 604 */ 605 unsigned long vm_mmap(struct file *file, unsigned long addr, 606 unsigned long len, unsigned long prot, 607 unsigned long flag, unsigned long offset) 608 { 609 if (unlikely(offset + PAGE_ALIGN(len) < offset)) 610 return -EINVAL; 611 if (unlikely(offset_in_page(offset))) 612 return -EINVAL; 613 614 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); 615 } 616 EXPORT_SYMBOL(vm_mmap); 617 618 /** 619 * __vmalloc_array - allocate memory for a virtually contiguous array. 620 * @n: number of elements. 621 * @size: element size. 622 * @flags: the type of memory to allocate (see kmalloc). 623 */ 624 void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags) 625 { 626 size_t bytes; 627 628 if (unlikely(check_mul_overflow(n, size, &bytes))) 629 return NULL; 630 return __vmalloc_noprof(bytes, flags); 631 } 632 EXPORT_SYMBOL(__vmalloc_array_noprof); 633 634 /** 635 * vmalloc_array - allocate memory for a virtually contiguous array. 636 * @n: number of elements. 637 * @size: element size. 638 */ 639 void *vmalloc_array_noprof(size_t n, size_t size) 640 { 641 return __vmalloc_array_noprof(n, size, GFP_KERNEL); 642 } 643 EXPORT_SYMBOL(vmalloc_array_noprof); 644 645 /** 646 * __vcalloc - allocate and zero memory for a virtually contiguous array. 647 * @n: number of elements. 648 * @size: element size. 649 * @flags: the type of memory to allocate (see kmalloc). 650 */ 651 void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags) 652 { 653 return __vmalloc_array_noprof(n, size, flags | __GFP_ZERO); 654 } 655 EXPORT_SYMBOL(__vcalloc_noprof); 656 657 /** 658 * vcalloc - allocate and zero memory for a virtually contiguous array. 659 * @n: number of elements. 660 * @size: element size. 661 */ 662 void *vcalloc_noprof(size_t n, size_t size) 663 { 664 return __vmalloc_array_noprof(n, size, GFP_KERNEL | __GFP_ZERO); 665 } 666 EXPORT_SYMBOL(vcalloc_noprof); 667 668 struct anon_vma *folio_anon_vma(const struct folio *folio) 669 { 670 unsigned long mapping = (unsigned long)folio->mapping; 671 672 if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 673 return NULL; 674 return (void *)(mapping - PAGE_MAPPING_ANON); 675 } 676 677 /** 678 * folio_mapping - Find the mapping where this folio is stored. 679 * @folio: The folio. 680 * 681 * For folios which are in the page cache, return the mapping that this 682 * page belongs to. Folios in the swap cache return the swap mapping 683 * this page is stored in (which is different from the mapping for the 684 * swap file or swap device where the data is stored). 685 * 686 * You can call this for folios which aren't in the swap cache or page 687 * cache and it will return NULL. 688 */ 689 struct address_space *folio_mapping(struct folio *folio) 690 { 691 struct address_space *mapping; 692 693 /* This happens if someone calls flush_dcache_page on slab page */ 694 if (unlikely(folio_test_slab(folio))) 695 return NULL; 696 697 if (unlikely(folio_test_swapcache(folio))) 698 return swap_address_space(folio->swap); 699 700 mapping = folio->mapping; 701 if ((unsigned long)mapping & PAGE_MAPPING_FLAGS) 702 return NULL; 703 704 return mapping; 705 } 706 EXPORT_SYMBOL(folio_mapping); 707 708 /** 709 * folio_copy - Copy the contents of one folio to another. 710 * @dst: Folio to copy to. 711 * @src: Folio to copy from. 712 * 713 * The bytes in the folio represented by @src are copied to @dst. 714 * Assumes the caller has validated that @dst is at least as large as @src. 715 * Can be called in atomic context for order-0 folios, but if the folio is 716 * larger, it may sleep. 717 */ 718 void folio_copy(struct folio *dst, struct folio *src) 719 { 720 long i = 0; 721 long nr = folio_nr_pages(src); 722 723 for (;;) { 724 copy_highpage(folio_page(dst, i), folio_page(src, i)); 725 if (++i == nr) 726 break; 727 cond_resched(); 728 } 729 } 730 EXPORT_SYMBOL(folio_copy); 731 732 int folio_mc_copy(struct folio *dst, struct folio *src) 733 { 734 long nr = folio_nr_pages(src); 735 long i = 0; 736 737 for (;;) { 738 if (copy_mc_highpage(folio_page(dst, i), folio_page(src, i))) 739 return -EHWPOISON; 740 if (++i == nr) 741 break; 742 cond_resched(); 743 } 744 745 return 0; 746 } 747 EXPORT_SYMBOL(folio_mc_copy); 748 749 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; 750 int sysctl_overcommit_ratio __read_mostly = 50; 751 unsigned long sysctl_overcommit_kbytes __read_mostly; 752 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; 753 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */ 754 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */ 755 756 int overcommit_ratio_handler(const struct ctl_table *table, int write, void *buffer, 757 size_t *lenp, loff_t *ppos) 758 { 759 int ret; 760 761 ret = proc_dointvec(table, write, buffer, lenp, ppos); 762 if (ret == 0 && write) 763 sysctl_overcommit_kbytes = 0; 764 return ret; 765 } 766 767 static void sync_overcommit_as(struct work_struct *dummy) 768 { 769 percpu_counter_sync(&vm_committed_as); 770 } 771 772 int overcommit_policy_handler(const struct ctl_table *table, int write, void *buffer, 773 size_t *lenp, loff_t *ppos) 774 { 775 struct ctl_table t; 776 int new_policy = -1; 777 int ret; 778 779 /* 780 * The deviation of sync_overcommit_as could be big with loose policy 781 * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to 782 * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply 783 * with the strict "NEVER", and to avoid possible race condition (even 784 * though user usually won't too frequently do the switching to policy 785 * OVERCOMMIT_NEVER), the switch is done in the following order: 786 * 1. changing the batch 787 * 2. sync percpu count on each CPU 788 * 3. switch the policy 789 */ 790 if (write) { 791 t = *table; 792 t.data = &new_policy; 793 ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 794 if (ret || new_policy == -1) 795 return ret; 796 797 mm_compute_batch(new_policy); 798 if (new_policy == OVERCOMMIT_NEVER) 799 schedule_on_each_cpu(sync_overcommit_as); 800 sysctl_overcommit_memory = new_policy; 801 } else { 802 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 803 } 804 805 return ret; 806 } 807 808 int overcommit_kbytes_handler(const struct ctl_table *table, int write, void *buffer, 809 size_t *lenp, loff_t *ppos) 810 { 811 int ret; 812 813 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 814 if (ret == 0 && write) 815 sysctl_overcommit_ratio = 0; 816 return ret; 817 } 818 819 /* 820 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used 821 */ 822 unsigned long vm_commit_limit(void) 823 { 824 unsigned long allowed; 825 826 if (sysctl_overcommit_kbytes) 827 allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10); 828 else 829 allowed = ((totalram_pages() - hugetlb_total_pages()) 830 * sysctl_overcommit_ratio / 100); 831 allowed += total_swap_pages; 832 833 return allowed; 834 } 835 836 /* 837 * Make sure vm_committed_as in one cacheline and not cacheline shared with 838 * other variables. It can be updated by several CPUs frequently. 839 */ 840 struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp; 841 842 /* 843 * The global memory commitment made in the system can be a metric 844 * that can be used to drive ballooning decisions when Linux is hosted 845 * as a guest. On Hyper-V, the host implements a policy engine for dynamically 846 * balancing memory across competing virtual machines that are hosted. 847 * Several metrics drive this policy engine including the guest reported 848 * memory commitment. 849 * 850 * The time cost of this is very low for small platforms, and for big 851 * platform like a 2S/36C/72T Skylake server, in worst case where 852 * vm_committed_as's spinlock is under severe contention, the time cost 853 * could be about 30~40 microseconds. 854 */ 855 unsigned long vm_memory_committed(void) 856 { 857 return percpu_counter_sum_positive(&vm_committed_as); 858 } 859 EXPORT_SYMBOL_GPL(vm_memory_committed); 860 861 /* 862 * Check that a process has enough memory to allocate a new virtual 863 * mapping. 0 means there is enough memory for the allocation to 864 * succeed and -ENOMEM implies there is not. 865 * 866 * We currently support three overcommit policies, which are set via the 867 * vm.overcommit_memory sysctl. See Documentation/mm/overcommit-accounting.rst 868 * 869 * Strict overcommit modes added 2002 Feb 26 by Alan Cox. 870 * Additional code 2002 Jul 20 by Robert Love. 871 * 872 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise. 873 * 874 * Note this is a helper function intended to be used by LSMs which 875 * wish to use this logic. 876 */ 877 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) 878 { 879 long allowed; 880 unsigned long bytes_failed; 881 882 vm_acct_memory(pages); 883 884 /* 885 * Sometimes we want to use more memory than we have 886 */ 887 if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS) 888 return 0; 889 890 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { 891 if (pages > totalram_pages() + total_swap_pages) 892 goto error; 893 return 0; 894 } 895 896 allowed = vm_commit_limit(); 897 /* 898 * Reserve some for root 899 */ 900 if (!cap_sys_admin) 901 allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); 902 903 /* 904 * Don't let a single process grow so big a user can't recover 905 */ 906 if (mm) { 907 long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10); 908 909 allowed -= min_t(long, mm->total_vm / 32, reserve); 910 } 911 912 if (percpu_counter_read_positive(&vm_committed_as) < allowed) 913 return 0; 914 error: 915 bytes_failed = pages << PAGE_SHIFT; 916 pr_warn_ratelimited("%s: pid: %d, comm: %s, bytes: %lu not enough memory for the allocation\n", 917 __func__, current->pid, current->comm, bytes_failed); 918 vm_unacct_memory(pages); 919 920 return -ENOMEM; 921 } 922 923 /** 924 * get_cmdline() - copy the cmdline value to a buffer. 925 * @task: the task whose cmdline value to copy. 926 * @buffer: the buffer to copy to. 927 * @buflen: the length of the buffer. Larger cmdline values are truncated 928 * to this length. 929 * 930 * Return: the size of the cmdline field copied. Note that the copy does 931 * not guarantee an ending NULL byte. 932 */ 933 int get_cmdline(struct task_struct *task, char *buffer, int buflen) 934 { 935 int res = 0; 936 unsigned int len; 937 struct mm_struct *mm = get_task_mm(task); 938 unsigned long arg_start, arg_end, env_start, env_end; 939 if (!mm) 940 goto out; 941 if (!mm->arg_end) 942 goto out_mm; /* Shh! No looking before we're done */ 943 944 spin_lock(&mm->arg_lock); 945 arg_start = mm->arg_start; 946 arg_end = mm->arg_end; 947 env_start = mm->env_start; 948 env_end = mm->env_end; 949 spin_unlock(&mm->arg_lock); 950 951 len = arg_end - arg_start; 952 953 if (len > buflen) 954 len = buflen; 955 956 res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE); 957 958 /* 959 * If the nul at the end of args has been overwritten, then 960 * assume application is using setproctitle(3). 961 */ 962 if (res > 0 && buffer[res-1] != '\0' && len < buflen) { 963 len = strnlen(buffer, res); 964 if (len < res) { 965 res = len; 966 } else { 967 len = env_end - env_start; 968 if (len > buflen - res) 969 len = buflen - res; 970 res += access_process_vm(task, env_start, 971 buffer+res, len, 972 FOLL_FORCE); 973 res = strnlen(buffer, res); 974 } 975 } 976 out_mm: 977 mmput(mm); 978 out: 979 return res; 980 } 981 982 int __weak memcmp_pages(struct page *page1, struct page *page2) 983 { 984 char *addr1, *addr2; 985 int ret; 986 987 addr1 = kmap_local_page(page1); 988 addr2 = kmap_local_page(page2); 989 ret = memcmp(addr1, addr2, PAGE_SIZE); 990 kunmap_local(addr2); 991 kunmap_local(addr1); 992 return ret; 993 } 994 995 #ifdef CONFIG_PRINTK 996 /** 997 * mem_dump_obj - Print available provenance information 998 * @object: object for which to find provenance information. 999 * 1000 * This function uses pr_cont(), so that the caller is expected to have 1001 * printed out whatever preamble is appropriate. The provenance information 1002 * depends on the type of object and on how much debugging is enabled. 1003 * For example, for a slab-cache object, the slab name is printed, and, 1004 * if available, the return address and stack trace from the allocation 1005 * and last free path of that object. 1006 */ 1007 void mem_dump_obj(void *object) 1008 { 1009 const char *type; 1010 1011 if (kmem_dump_obj(object)) 1012 return; 1013 1014 if (vmalloc_dump_obj(object)) 1015 return; 1016 1017 if (is_vmalloc_addr(object)) 1018 type = "vmalloc memory"; 1019 else if (virt_addr_valid(object)) 1020 type = "non-slab/vmalloc memory"; 1021 else if (object == NULL) 1022 type = "NULL pointer"; 1023 else if (object == ZERO_SIZE_PTR) 1024 type = "zero-size pointer"; 1025 else 1026 type = "non-paged memory"; 1027 1028 pr_cont(" %s\n", type); 1029 } 1030 EXPORT_SYMBOL_GPL(mem_dump_obj); 1031 #endif 1032 1033 /* 1034 * A driver might set a page logically offline -- PageOffline() -- and 1035 * turn the page inaccessible in the hypervisor; after that, access to page 1036 * content can be fatal. 1037 * 1038 * Some special PFN walkers -- i.e., /proc/kcore -- read content of random 1039 * pages after checking PageOffline(); however, these PFN walkers can race 1040 * with drivers that set PageOffline(). 1041 * 1042 * page_offline_freeze()/page_offline_thaw() allows for a subsystem to 1043 * synchronize with such drivers, achieving that a page cannot be set 1044 * PageOffline() while frozen. 1045 * 1046 * page_offline_begin()/page_offline_end() is used by drivers that care about 1047 * such races when setting a page PageOffline(). 1048 */ 1049 static DECLARE_RWSEM(page_offline_rwsem); 1050 1051 void page_offline_freeze(void) 1052 { 1053 down_read(&page_offline_rwsem); 1054 } 1055 1056 void page_offline_thaw(void) 1057 { 1058 up_read(&page_offline_rwsem); 1059 } 1060 1061 void page_offline_begin(void) 1062 { 1063 down_write(&page_offline_rwsem); 1064 } 1065 EXPORT_SYMBOL(page_offline_begin); 1066 1067 void page_offline_end(void) 1068 { 1069 up_write(&page_offline_rwsem); 1070 } 1071 EXPORT_SYMBOL(page_offline_end); 1072 1073 #ifndef flush_dcache_folio 1074 void flush_dcache_folio(struct folio *folio) 1075 { 1076 long i, nr = folio_nr_pages(folio); 1077 1078 for (i = 0; i < nr; i++) 1079 flush_dcache_page(folio_page(folio, i)); 1080 } 1081 EXPORT_SYMBOL(flush_dcache_folio); 1082 #endif 1083