1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <linux/mm.h> 3 #include <linux/slab.h> 4 #include <linux/string.h> 5 #include <linux/compiler.h> 6 #include <linux/export.h> 7 #include <linux/err.h> 8 #include <linux/sched.h> 9 #include <linux/sched/mm.h> 10 #include <linux/sched/signal.h> 11 #include <linux/sched/task_stack.h> 12 #include <linux/security.h> 13 #include <linux/swap.h> 14 #include <linux/swapops.h> 15 #include <linux/mman.h> 16 #include <linux/hugetlb.h> 17 #include <linux/vmalloc.h> 18 #include <linux/userfaultfd_k.h> 19 #include <linux/elf.h> 20 #include <linux/elf-randomize.h> 21 #include <linux/personality.h> 22 #include <linux/random.h> 23 #include <linux/processor.h> 24 #include <linux/sizes.h> 25 #include <linux/compat.h> 26 27 #include <linux/uaccess.h> 28 29 #include "internal.h" 30 31 /** 32 * kfree_const - conditionally free memory 33 * @x: pointer to the memory 34 * 35 * Function calls kfree only if @x is not in .rodata section. 36 */ 37 void kfree_const(const void *x) 38 { 39 if (!is_kernel_rodata((unsigned long)x)) 40 kfree(x); 41 } 42 EXPORT_SYMBOL(kfree_const); 43 44 /** 45 * kstrdup - allocate space for and copy an existing string 46 * @s: the string to duplicate 47 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 48 * 49 * Return: newly allocated copy of @s or %NULL in case of error 50 */ 51 char *kstrdup(const char *s, gfp_t gfp) 52 { 53 size_t len; 54 char *buf; 55 56 if (!s) 57 return NULL; 58 59 len = strlen(s) + 1; 60 buf = kmalloc_track_caller(len, gfp); 61 if (buf) 62 memcpy(buf, s, len); 63 return buf; 64 } 65 EXPORT_SYMBOL(kstrdup); 66 67 /** 68 * kstrdup_const - conditionally duplicate an existing const string 69 * @s: the string to duplicate 70 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 71 * 72 * Note: Strings allocated by kstrdup_const should be freed by kfree_const and 73 * must not be passed to krealloc(). 74 * 75 * Return: source string if it is in .rodata section otherwise 76 * fallback to kstrdup. 77 */ 78 const char *kstrdup_const(const char *s, gfp_t gfp) 79 { 80 if (is_kernel_rodata((unsigned long)s)) 81 return s; 82 83 return kstrdup(s, gfp); 84 } 85 EXPORT_SYMBOL(kstrdup_const); 86 87 /** 88 * kstrndup - allocate space for and copy an existing string 89 * @s: the string to duplicate 90 * @max: read at most @max chars from @s 91 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 92 * 93 * Note: Use kmemdup_nul() instead if the size is known exactly. 94 * 95 * Return: newly allocated copy of @s or %NULL in case of error 96 */ 97 char *kstrndup(const char *s, size_t max, gfp_t gfp) 98 { 99 size_t len; 100 char *buf; 101 102 if (!s) 103 return NULL; 104 105 len = strnlen(s, max); 106 buf = kmalloc_track_caller(len+1, gfp); 107 if (buf) { 108 memcpy(buf, s, len); 109 buf[len] = '\0'; 110 } 111 return buf; 112 } 113 EXPORT_SYMBOL(kstrndup); 114 115 /** 116 * kmemdup - duplicate region of memory 117 * 118 * @src: memory region to duplicate 119 * @len: memory region length 120 * @gfp: GFP mask to use 121 * 122 * Return: newly allocated copy of @src or %NULL in case of error 123 */ 124 void *kmemdup(const void *src, size_t len, gfp_t gfp) 125 { 126 void *p; 127 128 p = kmalloc_track_caller(len, gfp); 129 if (p) 130 memcpy(p, src, len); 131 return p; 132 } 133 EXPORT_SYMBOL(kmemdup); 134 135 /** 136 * kmemdup_nul - Create a NUL-terminated string from unterminated data 137 * @s: The data to stringify 138 * @len: The size of the data 139 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 140 * 141 * Return: newly allocated copy of @s with NUL-termination or %NULL in 142 * case of error 143 */ 144 char *kmemdup_nul(const char *s, size_t len, gfp_t gfp) 145 { 146 char *buf; 147 148 if (!s) 149 return NULL; 150 151 buf = kmalloc_track_caller(len + 1, gfp); 152 if (buf) { 153 memcpy(buf, s, len); 154 buf[len] = '\0'; 155 } 156 return buf; 157 } 158 EXPORT_SYMBOL(kmemdup_nul); 159 160 /** 161 * memdup_user - duplicate memory region from user space 162 * 163 * @src: source address in user space 164 * @len: number of bytes to copy 165 * 166 * Return: an ERR_PTR() on failure. Result is physically 167 * contiguous, to be freed by kfree(). 168 */ 169 void *memdup_user(const void __user *src, size_t len) 170 { 171 void *p; 172 173 p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN); 174 if (!p) 175 return ERR_PTR(-ENOMEM); 176 177 if (copy_from_user(p, src, len)) { 178 kfree(p); 179 return ERR_PTR(-EFAULT); 180 } 181 182 return p; 183 } 184 EXPORT_SYMBOL(memdup_user); 185 186 /** 187 * vmemdup_user - duplicate memory region from user space 188 * 189 * @src: source address in user space 190 * @len: number of bytes to copy 191 * 192 * Return: an ERR_PTR() on failure. Result may be not 193 * physically contiguous. Use kvfree() to free. 194 */ 195 void *vmemdup_user(const void __user *src, size_t len) 196 { 197 void *p; 198 199 p = kvmalloc(len, GFP_USER); 200 if (!p) 201 return ERR_PTR(-ENOMEM); 202 203 if (copy_from_user(p, src, len)) { 204 kvfree(p); 205 return ERR_PTR(-EFAULT); 206 } 207 208 return p; 209 } 210 EXPORT_SYMBOL(vmemdup_user); 211 212 /** 213 * strndup_user - duplicate an existing string from user space 214 * @s: The string to duplicate 215 * @n: Maximum number of bytes to copy, including the trailing NUL. 216 * 217 * Return: newly allocated copy of @s or an ERR_PTR() in case of error 218 */ 219 char *strndup_user(const char __user *s, long n) 220 { 221 char *p; 222 long length; 223 224 length = strnlen_user(s, n); 225 226 if (!length) 227 return ERR_PTR(-EFAULT); 228 229 if (length > n) 230 return ERR_PTR(-EINVAL); 231 232 p = memdup_user(s, length); 233 234 if (IS_ERR(p)) 235 return p; 236 237 p[length - 1] = '\0'; 238 239 return p; 240 } 241 EXPORT_SYMBOL(strndup_user); 242 243 /** 244 * memdup_user_nul - duplicate memory region from user space and NUL-terminate 245 * 246 * @src: source address in user space 247 * @len: number of bytes to copy 248 * 249 * Return: an ERR_PTR() on failure. 250 */ 251 void *memdup_user_nul(const void __user *src, size_t len) 252 { 253 char *p; 254 255 /* 256 * Always use GFP_KERNEL, since copy_from_user() can sleep and 257 * cause pagefault, which makes it pointless to use GFP_NOFS 258 * or GFP_ATOMIC. 259 */ 260 p = kmalloc_track_caller(len + 1, GFP_KERNEL); 261 if (!p) 262 return ERR_PTR(-ENOMEM); 263 264 if (copy_from_user(p, src, len)) { 265 kfree(p); 266 return ERR_PTR(-EFAULT); 267 } 268 p[len] = '\0'; 269 270 return p; 271 } 272 EXPORT_SYMBOL(memdup_user_nul); 273 274 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, 275 struct vm_area_struct *prev) 276 { 277 struct vm_area_struct *next; 278 279 vma->vm_prev = prev; 280 if (prev) { 281 next = prev->vm_next; 282 prev->vm_next = vma; 283 } else { 284 next = mm->mmap; 285 mm->mmap = vma; 286 } 287 vma->vm_next = next; 288 if (next) 289 next->vm_prev = vma; 290 } 291 292 void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma) 293 { 294 struct vm_area_struct *prev, *next; 295 296 next = vma->vm_next; 297 prev = vma->vm_prev; 298 if (prev) 299 prev->vm_next = next; 300 else 301 mm->mmap = next; 302 if (next) 303 next->vm_prev = prev; 304 } 305 306 /* Check if the vma is being used as a stack by this task */ 307 int vma_is_stack_for_current(struct vm_area_struct *vma) 308 { 309 struct task_struct * __maybe_unused t = current; 310 311 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); 312 } 313 314 /* 315 * Change backing file, only valid to use during initial VMA setup. 316 */ 317 void vma_set_file(struct vm_area_struct *vma, struct file *file) 318 { 319 /* Changing an anonymous vma with this is illegal */ 320 get_file(file); 321 swap(vma->vm_file, file); 322 fput(file); 323 } 324 EXPORT_SYMBOL(vma_set_file); 325 326 #ifndef STACK_RND_MASK 327 #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */ 328 #endif 329 330 unsigned long randomize_stack_top(unsigned long stack_top) 331 { 332 unsigned long random_variable = 0; 333 334 if (current->flags & PF_RANDOMIZE) { 335 random_variable = get_random_long(); 336 random_variable &= STACK_RND_MASK; 337 random_variable <<= PAGE_SHIFT; 338 } 339 #ifdef CONFIG_STACK_GROWSUP 340 return PAGE_ALIGN(stack_top) + random_variable; 341 #else 342 return PAGE_ALIGN(stack_top) - random_variable; 343 #endif 344 } 345 346 #ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT 347 unsigned long arch_randomize_brk(struct mm_struct *mm) 348 { 349 /* Is the current task 32bit ? */ 350 if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task()) 351 return randomize_page(mm->brk, SZ_32M); 352 353 return randomize_page(mm->brk, SZ_1G); 354 } 355 356 unsigned long arch_mmap_rnd(void) 357 { 358 unsigned long rnd; 359 360 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS 361 if (is_compat_task()) 362 rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1); 363 else 364 #endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */ 365 rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1); 366 367 return rnd << PAGE_SHIFT; 368 } 369 370 static int mmap_is_legacy(struct rlimit *rlim_stack) 371 { 372 if (current->personality & ADDR_COMPAT_LAYOUT) 373 return 1; 374 375 if (rlim_stack->rlim_cur == RLIM_INFINITY) 376 return 1; 377 378 return sysctl_legacy_va_layout; 379 } 380 381 /* 382 * Leave enough space between the mmap area and the stack to honour ulimit in 383 * the face of randomisation. 384 */ 385 #define MIN_GAP (SZ_128M) 386 #define MAX_GAP (STACK_TOP / 6 * 5) 387 388 static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack) 389 { 390 unsigned long gap = rlim_stack->rlim_cur; 391 unsigned long pad = stack_guard_gap; 392 393 /* Account for stack randomization if necessary */ 394 if (current->flags & PF_RANDOMIZE) 395 pad += (STACK_RND_MASK << PAGE_SHIFT); 396 397 /* Values close to RLIM_INFINITY can overflow. */ 398 if (gap + pad > gap) 399 gap += pad; 400 401 if (gap < MIN_GAP) 402 gap = MIN_GAP; 403 else if (gap > MAX_GAP) 404 gap = MAX_GAP; 405 406 return PAGE_ALIGN(STACK_TOP - gap - rnd); 407 } 408 409 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) 410 { 411 unsigned long random_factor = 0UL; 412 413 if (current->flags & PF_RANDOMIZE) 414 random_factor = arch_mmap_rnd(); 415 416 if (mmap_is_legacy(rlim_stack)) { 417 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; 418 mm->get_unmapped_area = arch_get_unmapped_area; 419 } else { 420 mm->mmap_base = mmap_base(random_factor, rlim_stack); 421 mm->get_unmapped_area = arch_get_unmapped_area_topdown; 422 } 423 } 424 #elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT) 425 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) 426 { 427 mm->mmap_base = TASK_UNMAPPED_BASE; 428 mm->get_unmapped_area = arch_get_unmapped_area; 429 } 430 #endif 431 432 /** 433 * __account_locked_vm - account locked pages to an mm's locked_vm 434 * @mm: mm to account against 435 * @pages: number of pages to account 436 * @inc: %true if @pages should be considered positive, %false if not 437 * @task: task used to check RLIMIT_MEMLOCK 438 * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped 439 * 440 * Assumes @task and @mm are valid (i.e. at least one reference on each), and 441 * that mmap_lock is held as writer. 442 * 443 * Return: 444 * * 0 on success 445 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded. 446 */ 447 int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc, 448 struct task_struct *task, bool bypass_rlim) 449 { 450 unsigned long locked_vm, limit; 451 int ret = 0; 452 453 mmap_assert_write_locked(mm); 454 455 locked_vm = mm->locked_vm; 456 if (inc) { 457 if (!bypass_rlim) { 458 limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT; 459 if (locked_vm + pages > limit) 460 ret = -ENOMEM; 461 } 462 if (!ret) 463 mm->locked_vm = locked_vm + pages; 464 } else { 465 WARN_ON_ONCE(pages > locked_vm); 466 mm->locked_vm = locked_vm - pages; 467 } 468 469 pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid, 470 (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT, 471 locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK), 472 ret ? " - exceeded" : ""); 473 474 return ret; 475 } 476 EXPORT_SYMBOL_GPL(__account_locked_vm); 477 478 /** 479 * account_locked_vm - account locked pages to an mm's locked_vm 480 * @mm: mm to account against, may be NULL 481 * @pages: number of pages to account 482 * @inc: %true if @pages should be considered positive, %false if not 483 * 484 * Assumes a non-NULL @mm is valid (i.e. at least one reference on it). 485 * 486 * Return: 487 * * 0 on success, or if mm is NULL 488 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded. 489 */ 490 int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc) 491 { 492 int ret; 493 494 if (pages == 0 || !mm) 495 return 0; 496 497 mmap_write_lock(mm); 498 ret = __account_locked_vm(mm, pages, inc, current, 499 capable(CAP_IPC_LOCK)); 500 mmap_write_unlock(mm); 501 502 return ret; 503 } 504 EXPORT_SYMBOL_GPL(account_locked_vm); 505 506 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr, 507 unsigned long len, unsigned long prot, 508 unsigned long flag, unsigned long pgoff) 509 { 510 unsigned long ret; 511 struct mm_struct *mm = current->mm; 512 unsigned long populate; 513 LIST_HEAD(uf); 514 515 ret = security_mmap_file(file, prot, flag); 516 if (!ret) { 517 if (mmap_write_lock_killable(mm)) 518 return -EINTR; 519 ret = do_mmap(file, addr, len, prot, flag, pgoff, &populate, 520 &uf); 521 mmap_write_unlock(mm); 522 userfaultfd_unmap_complete(mm, &uf); 523 if (populate) 524 mm_populate(ret, populate); 525 } 526 return ret; 527 } 528 529 unsigned long vm_mmap(struct file *file, unsigned long addr, 530 unsigned long len, unsigned long prot, 531 unsigned long flag, unsigned long offset) 532 { 533 if (unlikely(offset + PAGE_ALIGN(len) < offset)) 534 return -EINVAL; 535 if (unlikely(offset_in_page(offset))) 536 return -EINVAL; 537 538 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); 539 } 540 EXPORT_SYMBOL(vm_mmap); 541 542 /** 543 * kvmalloc_node - attempt to allocate physically contiguous memory, but upon 544 * failure, fall back to non-contiguous (vmalloc) allocation. 545 * @size: size of the request. 546 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL. 547 * @node: numa node to allocate from 548 * 549 * Uses kmalloc to get the memory but if the allocation fails then falls back 550 * to the vmalloc allocator. Use kvfree for freeing the memory. 551 * 552 * GFP_NOWAIT and GFP_ATOMIC are not supported, neither is the __GFP_NORETRY modifier. 553 * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is 554 * preferable to the vmalloc fallback, due to visible performance drawbacks. 555 * 556 * Return: pointer to the allocated memory of %NULL in case of failure 557 */ 558 void *kvmalloc_node(size_t size, gfp_t flags, int node) 559 { 560 gfp_t kmalloc_flags = flags; 561 void *ret; 562 563 /* 564 * We want to attempt a large physically contiguous block first because 565 * it is less likely to fragment multiple larger blocks and therefore 566 * contribute to a long term fragmentation less than vmalloc fallback. 567 * However make sure that larger requests are not too disruptive - no 568 * OOM killer and no allocation failure warnings as we have a fallback. 569 */ 570 if (size > PAGE_SIZE) { 571 kmalloc_flags |= __GFP_NOWARN; 572 573 if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL)) 574 kmalloc_flags |= __GFP_NORETRY; 575 576 /* nofail semantic is implemented by the vmalloc fallback */ 577 kmalloc_flags &= ~__GFP_NOFAIL; 578 } 579 580 ret = kmalloc_node(size, kmalloc_flags, node); 581 582 /* 583 * It doesn't really make sense to fallback to vmalloc for sub page 584 * requests 585 */ 586 if (ret || size <= PAGE_SIZE) 587 return ret; 588 589 /* Don't even allow crazy sizes */ 590 if (unlikely(size > INT_MAX)) { 591 WARN_ON_ONCE(!(flags & __GFP_NOWARN)); 592 return NULL; 593 } 594 595 return __vmalloc_node(size, 1, flags, node, 596 __builtin_return_address(0)); 597 } 598 EXPORT_SYMBOL(kvmalloc_node); 599 600 /** 601 * kvfree() - Free memory. 602 * @addr: Pointer to allocated memory. 603 * 604 * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc(). 605 * It is slightly more efficient to use kfree() or vfree() if you are certain 606 * that you know which one to use. 607 * 608 * Context: Either preemptible task context or not-NMI interrupt. 609 */ 610 void kvfree(const void *addr) 611 { 612 if (is_vmalloc_addr(addr)) 613 vfree(addr); 614 else 615 kfree(addr); 616 } 617 EXPORT_SYMBOL(kvfree); 618 619 /** 620 * kvfree_sensitive - Free a data object containing sensitive information. 621 * @addr: address of the data object to be freed. 622 * @len: length of the data object. 623 * 624 * Use the special memzero_explicit() function to clear the content of a 625 * kvmalloc'ed object containing sensitive data to make sure that the 626 * compiler won't optimize out the data clearing. 627 */ 628 void kvfree_sensitive(const void *addr, size_t len) 629 { 630 if (likely(!ZERO_OR_NULL_PTR(addr))) { 631 memzero_explicit((void *)addr, len); 632 kvfree(addr); 633 } 634 } 635 EXPORT_SYMBOL(kvfree_sensitive); 636 637 void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags) 638 { 639 void *newp; 640 641 if (oldsize >= newsize) 642 return (void *)p; 643 newp = kvmalloc(newsize, flags); 644 if (!newp) 645 return NULL; 646 memcpy(newp, p, oldsize); 647 kvfree(p); 648 return newp; 649 } 650 EXPORT_SYMBOL(kvrealloc); 651 652 /* Neutral page->mapping pointer to address_space or anon_vma or other */ 653 void *page_rmapping(struct page *page) 654 { 655 return folio_raw_mapping(page_folio(page)); 656 } 657 658 /** 659 * folio_mapped - Is this folio mapped into userspace? 660 * @folio: The folio. 661 * 662 * Return: True if any page in this folio is referenced by user page tables. 663 */ 664 bool folio_mapped(struct folio *folio) 665 { 666 long i, nr; 667 668 if (!folio_test_large(folio)) 669 return atomic_read(&folio->_mapcount) >= 0; 670 if (atomic_read(folio_mapcount_ptr(folio)) >= 0) 671 return true; 672 if (folio_test_hugetlb(folio)) 673 return false; 674 675 nr = folio_nr_pages(folio); 676 for (i = 0; i < nr; i++) { 677 if (atomic_read(&folio_page(folio, i)->_mapcount) >= 0) 678 return true; 679 } 680 return false; 681 } 682 EXPORT_SYMBOL(folio_mapped); 683 684 struct anon_vma *folio_anon_vma(struct folio *folio) 685 { 686 unsigned long mapping = (unsigned long)folio->mapping; 687 688 if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 689 return NULL; 690 return (void *)(mapping - PAGE_MAPPING_ANON); 691 } 692 693 /** 694 * folio_mapping - Find the mapping where this folio is stored. 695 * @folio: The folio. 696 * 697 * For folios which are in the page cache, return the mapping that this 698 * page belongs to. Folios in the swap cache return the swap mapping 699 * this page is stored in (which is different from the mapping for the 700 * swap file or swap device where the data is stored). 701 * 702 * You can call this for folios which aren't in the swap cache or page 703 * cache and it will return NULL. 704 */ 705 struct address_space *folio_mapping(struct folio *folio) 706 { 707 struct address_space *mapping; 708 709 /* This happens if someone calls flush_dcache_page on slab page */ 710 if (unlikely(folio_test_slab(folio))) 711 return NULL; 712 713 if (unlikely(folio_test_swapcache(folio))) 714 return swap_address_space(folio_swap_entry(folio)); 715 716 mapping = folio->mapping; 717 if ((unsigned long)mapping & PAGE_MAPPING_ANON) 718 return NULL; 719 720 return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS); 721 } 722 EXPORT_SYMBOL(folio_mapping); 723 724 /* Slow path of page_mapcount() for compound pages */ 725 int __page_mapcount(struct page *page) 726 { 727 int ret; 728 729 ret = atomic_read(&page->_mapcount) + 1; 730 /* 731 * For file THP page->_mapcount contains total number of mapping 732 * of the page: no need to look into compound_mapcount. 733 */ 734 if (!PageAnon(page) && !PageHuge(page)) 735 return ret; 736 page = compound_head(page); 737 ret += atomic_read(compound_mapcount_ptr(page)) + 1; 738 if (PageDoubleMap(page)) 739 ret--; 740 return ret; 741 } 742 EXPORT_SYMBOL_GPL(__page_mapcount); 743 744 /** 745 * folio_mapcount() - Calculate the number of mappings of this folio. 746 * @folio: The folio. 747 * 748 * A large folio tracks both how many times the entire folio is mapped, 749 * and how many times each individual page in the folio is mapped. 750 * This function calculates the total number of times the folio is 751 * mapped. 752 * 753 * Return: The number of times this folio is mapped. 754 */ 755 int folio_mapcount(struct folio *folio) 756 { 757 int i, compound, nr, ret; 758 759 if (likely(!folio_test_large(folio))) 760 return atomic_read(&folio->_mapcount) + 1; 761 762 compound = folio_entire_mapcount(folio); 763 nr = folio_nr_pages(folio); 764 if (folio_test_hugetlb(folio)) 765 return compound; 766 ret = compound; 767 for (i = 0; i < nr; i++) 768 ret += atomic_read(&folio_page(folio, i)->_mapcount) + 1; 769 /* File pages has compound_mapcount included in _mapcount */ 770 if (!folio_test_anon(folio)) 771 return ret - compound * nr; 772 if (folio_test_double_map(folio)) 773 ret -= nr; 774 return ret; 775 } 776 777 /** 778 * folio_copy - Copy the contents of one folio to another. 779 * @dst: Folio to copy to. 780 * @src: Folio to copy from. 781 * 782 * The bytes in the folio represented by @src are copied to @dst. 783 * Assumes the caller has validated that @dst is at least as large as @src. 784 * Can be called in atomic context for order-0 folios, but if the folio is 785 * larger, it may sleep. 786 */ 787 void folio_copy(struct folio *dst, struct folio *src) 788 { 789 long i = 0; 790 long nr = folio_nr_pages(src); 791 792 for (;;) { 793 copy_highpage(folio_page(dst, i), folio_page(src, i)); 794 if (++i == nr) 795 break; 796 cond_resched(); 797 } 798 } 799 800 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; 801 int sysctl_overcommit_ratio __read_mostly = 50; 802 unsigned long sysctl_overcommit_kbytes __read_mostly; 803 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; 804 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */ 805 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */ 806 807 int overcommit_ratio_handler(struct ctl_table *table, int write, void *buffer, 808 size_t *lenp, loff_t *ppos) 809 { 810 int ret; 811 812 ret = proc_dointvec(table, write, buffer, lenp, ppos); 813 if (ret == 0 && write) 814 sysctl_overcommit_kbytes = 0; 815 return ret; 816 } 817 818 static void sync_overcommit_as(struct work_struct *dummy) 819 { 820 percpu_counter_sync(&vm_committed_as); 821 } 822 823 int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer, 824 size_t *lenp, loff_t *ppos) 825 { 826 struct ctl_table t; 827 int new_policy = -1; 828 int ret; 829 830 /* 831 * The deviation of sync_overcommit_as could be big with loose policy 832 * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to 833 * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply 834 * with the strict "NEVER", and to avoid possible race condition (even 835 * though user usually won't too frequently do the switching to policy 836 * OVERCOMMIT_NEVER), the switch is done in the following order: 837 * 1. changing the batch 838 * 2. sync percpu count on each CPU 839 * 3. switch the policy 840 */ 841 if (write) { 842 t = *table; 843 t.data = &new_policy; 844 ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 845 if (ret || new_policy == -1) 846 return ret; 847 848 mm_compute_batch(new_policy); 849 if (new_policy == OVERCOMMIT_NEVER) 850 schedule_on_each_cpu(sync_overcommit_as); 851 sysctl_overcommit_memory = new_policy; 852 } else { 853 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 854 } 855 856 return ret; 857 } 858 859 int overcommit_kbytes_handler(struct ctl_table *table, int write, void *buffer, 860 size_t *lenp, loff_t *ppos) 861 { 862 int ret; 863 864 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 865 if (ret == 0 && write) 866 sysctl_overcommit_ratio = 0; 867 return ret; 868 } 869 870 /* 871 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used 872 */ 873 unsigned long vm_commit_limit(void) 874 { 875 unsigned long allowed; 876 877 if (sysctl_overcommit_kbytes) 878 allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10); 879 else 880 allowed = ((totalram_pages() - hugetlb_total_pages()) 881 * sysctl_overcommit_ratio / 100); 882 allowed += total_swap_pages; 883 884 return allowed; 885 } 886 887 /* 888 * Make sure vm_committed_as in one cacheline and not cacheline shared with 889 * other variables. It can be updated by several CPUs frequently. 890 */ 891 struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp; 892 893 /* 894 * The global memory commitment made in the system can be a metric 895 * that can be used to drive ballooning decisions when Linux is hosted 896 * as a guest. On Hyper-V, the host implements a policy engine for dynamically 897 * balancing memory across competing virtual machines that are hosted. 898 * Several metrics drive this policy engine including the guest reported 899 * memory commitment. 900 * 901 * The time cost of this is very low for small platforms, and for big 902 * platform like a 2S/36C/72T Skylake server, in worst case where 903 * vm_committed_as's spinlock is under severe contention, the time cost 904 * could be about 30~40 microseconds. 905 */ 906 unsigned long vm_memory_committed(void) 907 { 908 return percpu_counter_sum_positive(&vm_committed_as); 909 } 910 EXPORT_SYMBOL_GPL(vm_memory_committed); 911 912 /* 913 * Check that a process has enough memory to allocate a new virtual 914 * mapping. 0 means there is enough memory for the allocation to 915 * succeed and -ENOMEM implies there is not. 916 * 917 * We currently support three overcommit policies, which are set via the 918 * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting.rst 919 * 920 * Strict overcommit modes added 2002 Feb 26 by Alan Cox. 921 * Additional code 2002 Jul 20 by Robert Love. 922 * 923 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise. 924 * 925 * Note this is a helper function intended to be used by LSMs which 926 * wish to use this logic. 927 */ 928 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) 929 { 930 long allowed; 931 932 vm_acct_memory(pages); 933 934 /* 935 * Sometimes we want to use more memory than we have 936 */ 937 if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS) 938 return 0; 939 940 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { 941 if (pages > totalram_pages() + total_swap_pages) 942 goto error; 943 return 0; 944 } 945 946 allowed = vm_commit_limit(); 947 /* 948 * Reserve some for root 949 */ 950 if (!cap_sys_admin) 951 allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); 952 953 /* 954 * Don't let a single process grow so big a user can't recover 955 */ 956 if (mm) { 957 long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10); 958 959 allowed -= min_t(long, mm->total_vm / 32, reserve); 960 } 961 962 if (percpu_counter_read_positive(&vm_committed_as) < allowed) 963 return 0; 964 error: 965 vm_unacct_memory(pages); 966 967 return -ENOMEM; 968 } 969 970 /** 971 * get_cmdline() - copy the cmdline value to a buffer. 972 * @task: the task whose cmdline value to copy. 973 * @buffer: the buffer to copy to. 974 * @buflen: the length of the buffer. Larger cmdline values are truncated 975 * to this length. 976 * 977 * Return: the size of the cmdline field copied. Note that the copy does 978 * not guarantee an ending NULL byte. 979 */ 980 int get_cmdline(struct task_struct *task, char *buffer, int buflen) 981 { 982 int res = 0; 983 unsigned int len; 984 struct mm_struct *mm = get_task_mm(task); 985 unsigned long arg_start, arg_end, env_start, env_end; 986 if (!mm) 987 goto out; 988 if (!mm->arg_end) 989 goto out_mm; /* Shh! No looking before we're done */ 990 991 spin_lock(&mm->arg_lock); 992 arg_start = mm->arg_start; 993 arg_end = mm->arg_end; 994 env_start = mm->env_start; 995 env_end = mm->env_end; 996 spin_unlock(&mm->arg_lock); 997 998 len = arg_end - arg_start; 999 1000 if (len > buflen) 1001 len = buflen; 1002 1003 res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE); 1004 1005 /* 1006 * If the nul at the end of args has been overwritten, then 1007 * assume application is using setproctitle(3). 1008 */ 1009 if (res > 0 && buffer[res-1] != '\0' && len < buflen) { 1010 len = strnlen(buffer, res); 1011 if (len < res) { 1012 res = len; 1013 } else { 1014 len = env_end - env_start; 1015 if (len > buflen - res) 1016 len = buflen - res; 1017 res += access_process_vm(task, env_start, 1018 buffer+res, len, 1019 FOLL_FORCE); 1020 res = strnlen(buffer, res); 1021 } 1022 } 1023 out_mm: 1024 mmput(mm); 1025 out: 1026 return res; 1027 } 1028 1029 int __weak memcmp_pages(struct page *page1, struct page *page2) 1030 { 1031 char *addr1, *addr2; 1032 int ret; 1033 1034 addr1 = kmap_atomic(page1); 1035 addr2 = kmap_atomic(page2); 1036 ret = memcmp(addr1, addr2, PAGE_SIZE); 1037 kunmap_atomic(addr2); 1038 kunmap_atomic(addr1); 1039 return ret; 1040 } 1041 1042 #ifdef CONFIG_PRINTK 1043 /** 1044 * mem_dump_obj - Print available provenance information 1045 * @object: object for which to find provenance information. 1046 * 1047 * This function uses pr_cont(), so that the caller is expected to have 1048 * printed out whatever preamble is appropriate. The provenance information 1049 * depends on the type of object and on how much debugging is enabled. 1050 * For example, for a slab-cache object, the slab name is printed, and, 1051 * if available, the return address and stack trace from the allocation 1052 * and last free path of that object. 1053 */ 1054 void mem_dump_obj(void *object) 1055 { 1056 const char *type; 1057 1058 if (kmem_valid_obj(object)) { 1059 kmem_dump_obj(object); 1060 return; 1061 } 1062 1063 if (vmalloc_dump_obj(object)) 1064 return; 1065 1066 if (virt_addr_valid(object)) 1067 type = "non-slab/vmalloc memory"; 1068 else if (object == NULL) 1069 type = "NULL pointer"; 1070 else if (object == ZERO_SIZE_PTR) 1071 type = "zero-size pointer"; 1072 else 1073 type = "non-paged memory"; 1074 1075 pr_cont(" %s\n", type); 1076 } 1077 EXPORT_SYMBOL_GPL(mem_dump_obj); 1078 #endif 1079 1080 /* 1081 * A driver might set a page logically offline -- PageOffline() -- and 1082 * turn the page inaccessible in the hypervisor; after that, access to page 1083 * content can be fatal. 1084 * 1085 * Some special PFN walkers -- i.e., /proc/kcore -- read content of random 1086 * pages after checking PageOffline(); however, these PFN walkers can race 1087 * with drivers that set PageOffline(). 1088 * 1089 * page_offline_freeze()/page_offline_thaw() allows for a subsystem to 1090 * synchronize with such drivers, achieving that a page cannot be set 1091 * PageOffline() while frozen. 1092 * 1093 * page_offline_begin()/page_offline_end() is used by drivers that care about 1094 * such races when setting a page PageOffline(). 1095 */ 1096 static DECLARE_RWSEM(page_offline_rwsem); 1097 1098 void page_offline_freeze(void) 1099 { 1100 down_read(&page_offline_rwsem); 1101 } 1102 1103 void page_offline_thaw(void) 1104 { 1105 up_read(&page_offline_rwsem); 1106 } 1107 1108 void page_offline_begin(void) 1109 { 1110 down_write(&page_offline_rwsem); 1111 } 1112 EXPORT_SYMBOL(page_offline_begin); 1113 1114 void page_offline_end(void) 1115 { 1116 up_write(&page_offline_rwsem); 1117 } 1118 EXPORT_SYMBOL(page_offline_end); 1119 1120 #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO 1121 void flush_dcache_folio(struct folio *folio) 1122 { 1123 long i, nr = folio_nr_pages(folio); 1124 1125 for (i = 0; i < nr; i++) 1126 flush_dcache_page(folio_page(folio, i)); 1127 } 1128 EXPORT_SYMBOL(flush_dcache_folio); 1129 #endif 1130