1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 216d69265SAndrew Morton #include <linux/mm.h> 330992c97SMatt Mackall #include <linux/slab.h> 430992c97SMatt Mackall #include <linux/string.h> 53b32123dSGideon Israel Dsouza #include <linux/compiler.h> 6b95f1b31SPaul Gortmaker #include <linux/export.h> 796840aa0SDavi Arnaut #include <linux/err.h> 83b8f14b4SAdrian Bunk #include <linux/sched.h> 96e84f315SIngo Molnar #include <linux/sched/mm.h> 1079eb597cSDaniel Jordan #include <linux/sched/signal.h> 1168db0cf1SIngo Molnar #include <linux/sched/task_stack.h> 12eb36c587SAl Viro #include <linux/security.h> 139800339bSShaohua Li #include <linux/swap.h> 1433806f06SShaohua Li #include <linux/swapops.h> 1500619bccSJerome Marchand #include <linux/mman.h> 1600619bccSJerome Marchand #include <linux/hugetlb.h> 1739f1f78dSAl Viro #include <linux/vmalloc.h> 18897ab3e0SMike Rapoport #include <linux/userfaultfd_k.h> 19649775beSAlexandre Ghiti #include <linux/elf.h> 2067f3977fSAlexandre Ghiti #include <linux/elf-randomize.h> 2167f3977fSAlexandre Ghiti #include <linux/personality.h> 22649775beSAlexandre Ghiti #include <linux/random.h> 2367f3977fSAlexandre Ghiti #include <linux/processor.h> 2467f3977fSAlexandre Ghiti #include <linux/sizes.h> 2567f3977fSAlexandre Ghiti #include <linux/compat.h> 2600619bccSJerome Marchand 277c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 2830992c97SMatt Mackall 296038def0SNamhyung Kim #include "internal.h" 30014bb1deSNeilBrown #include "swap.h" 316038def0SNamhyung Kim 32a4bb1e43SAndrzej Hajda /** 33a4bb1e43SAndrzej Hajda * kfree_const - conditionally free memory 34a4bb1e43SAndrzej Hajda * @x: pointer to the memory 35a4bb1e43SAndrzej Hajda * 36a4bb1e43SAndrzej Hajda * Function calls kfree only if @x is not in .rodata section. 37a4bb1e43SAndrzej Hajda */ 38a4bb1e43SAndrzej Hajda void kfree_const(const void *x) 39a4bb1e43SAndrzej Hajda { 40a4bb1e43SAndrzej Hajda if (!is_kernel_rodata((unsigned long)x)) 41a4bb1e43SAndrzej Hajda kfree(x); 42a4bb1e43SAndrzej Hajda } 43a4bb1e43SAndrzej Hajda EXPORT_SYMBOL(kfree_const); 44a4bb1e43SAndrzej Hajda 4530992c97SMatt Mackall /** 4630992c97SMatt Mackall * kstrdup - allocate space for and copy an existing string 4730992c97SMatt Mackall * @s: the string to duplicate 4830992c97SMatt Mackall * @gfp: the GFP mask used in the kmalloc() call when allocating memory 49a862f68aSMike Rapoport * 50a862f68aSMike Rapoport * Return: newly allocated copy of @s or %NULL in case of error 5130992c97SMatt Mackall */ 522a6772ebSAlexey Dobriyan noinline 5330992c97SMatt Mackall char *kstrdup(const char *s, gfp_t gfp) 5430992c97SMatt Mackall { 5530992c97SMatt Mackall size_t len; 5630992c97SMatt Mackall char *buf; 5730992c97SMatt Mackall 5830992c97SMatt Mackall if (!s) 5930992c97SMatt Mackall return NULL; 6030992c97SMatt Mackall 6130992c97SMatt Mackall len = strlen(s) + 1; 621d2c8eeaSChristoph Hellwig buf = kmalloc_track_caller(len, gfp); 6330992c97SMatt Mackall if (buf) 6430992c97SMatt Mackall memcpy(buf, s, len); 6530992c97SMatt Mackall return buf; 6630992c97SMatt Mackall } 6730992c97SMatt Mackall EXPORT_SYMBOL(kstrdup); 6896840aa0SDavi Arnaut 691a2f67b4SAlexey Dobriyan /** 70a4bb1e43SAndrzej Hajda * kstrdup_const - conditionally duplicate an existing const string 71a4bb1e43SAndrzej Hajda * @s: the string to duplicate 72a4bb1e43SAndrzej Hajda * @gfp: the GFP mask used in the kmalloc() call when allocating memory 73a4bb1e43SAndrzej Hajda * 74295a1730SBartosz Golaszewski * Note: Strings allocated by kstrdup_const should be freed by kfree_const and 75295a1730SBartosz Golaszewski * must not be passed to krealloc(). 76a862f68aSMike Rapoport * 77a862f68aSMike Rapoport * Return: source string if it is in .rodata section otherwise 78a862f68aSMike Rapoport * fallback to kstrdup. 79a4bb1e43SAndrzej Hajda */ 80a4bb1e43SAndrzej Hajda const char *kstrdup_const(const char *s, gfp_t gfp) 81a4bb1e43SAndrzej Hajda { 82a4bb1e43SAndrzej Hajda if (is_kernel_rodata((unsigned long)s)) 83a4bb1e43SAndrzej Hajda return s; 84a4bb1e43SAndrzej Hajda 85a4bb1e43SAndrzej Hajda return kstrdup(s, gfp); 86a4bb1e43SAndrzej Hajda } 87a4bb1e43SAndrzej Hajda EXPORT_SYMBOL(kstrdup_const); 88a4bb1e43SAndrzej Hajda 89a4bb1e43SAndrzej Hajda /** 901e66df3eSJeremy Fitzhardinge * kstrndup - allocate space for and copy an existing string 911e66df3eSJeremy Fitzhardinge * @s: the string to duplicate 921e66df3eSJeremy Fitzhardinge * @max: read at most @max chars from @s 931e66df3eSJeremy Fitzhardinge * @gfp: the GFP mask used in the kmalloc() call when allocating memory 94f3515741SDavid Howells * 95f3515741SDavid Howells * Note: Use kmemdup_nul() instead if the size is known exactly. 96a862f68aSMike Rapoport * 97a862f68aSMike Rapoport * Return: newly allocated copy of @s or %NULL in case of error 981e66df3eSJeremy Fitzhardinge */ 991e66df3eSJeremy Fitzhardinge char *kstrndup(const char *s, size_t max, gfp_t gfp) 1001e66df3eSJeremy Fitzhardinge { 1011e66df3eSJeremy Fitzhardinge size_t len; 1021e66df3eSJeremy Fitzhardinge char *buf; 1031e66df3eSJeremy Fitzhardinge 1041e66df3eSJeremy Fitzhardinge if (!s) 1051e66df3eSJeremy Fitzhardinge return NULL; 1061e66df3eSJeremy Fitzhardinge 1071e66df3eSJeremy Fitzhardinge len = strnlen(s, max); 1081e66df3eSJeremy Fitzhardinge buf = kmalloc_track_caller(len+1, gfp); 1091e66df3eSJeremy Fitzhardinge if (buf) { 1101e66df3eSJeremy Fitzhardinge memcpy(buf, s, len); 1111e66df3eSJeremy Fitzhardinge buf[len] = '\0'; 1121e66df3eSJeremy Fitzhardinge } 1131e66df3eSJeremy Fitzhardinge return buf; 1141e66df3eSJeremy Fitzhardinge } 1151e66df3eSJeremy Fitzhardinge EXPORT_SYMBOL(kstrndup); 1161e66df3eSJeremy Fitzhardinge 1171e66df3eSJeremy Fitzhardinge /** 1181a2f67b4SAlexey Dobriyan * kmemdup - duplicate region of memory 1191a2f67b4SAlexey Dobriyan * 1201a2f67b4SAlexey Dobriyan * @src: memory region to duplicate 1211a2f67b4SAlexey Dobriyan * @len: memory region length 1221a2f67b4SAlexey Dobriyan * @gfp: GFP mask to use 123a862f68aSMike Rapoport * 1240b7b8704SHao Sun * Return: newly allocated copy of @src or %NULL in case of error, 1250b7b8704SHao Sun * result is physically contiguous. Use kfree() to free. 1261a2f67b4SAlexey Dobriyan */ 1271a2f67b4SAlexey Dobriyan void *kmemdup(const void *src, size_t len, gfp_t gfp) 1281a2f67b4SAlexey Dobriyan { 1291a2f67b4SAlexey Dobriyan void *p; 1301a2f67b4SAlexey Dobriyan 1311d2c8eeaSChristoph Hellwig p = kmalloc_track_caller(len, gfp); 1321a2f67b4SAlexey Dobriyan if (p) 1331a2f67b4SAlexey Dobriyan memcpy(p, src, len); 1341a2f67b4SAlexey Dobriyan return p; 1351a2f67b4SAlexey Dobriyan } 1361a2f67b4SAlexey Dobriyan EXPORT_SYMBOL(kmemdup); 1371a2f67b4SAlexey Dobriyan 138ef2ad80cSChristoph Lameter /** 1390b7b8704SHao Sun * kvmemdup - duplicate region of memory 1400b7b8704SHao Sun * 1410b7b8704SHao Sun * @src: memory region to duplicate 1420b7b8704SHao Sun * @len: memory region length 1430b7b8704SHao Sun * @gfp: GFP mask to use 1440b7b8704SHao Sun * 1450b7b8704SHao Sun * Return: newly allocated copy of @src or %NULL in case of error, 1460b7b8704SHao Sun * result may be not physically contiguous. Use kvfree() to free. 1470b7b8704SHao Sun */ 1480b7b8704SHao Sun void *kvmemdup(const void *src, size_t len, gfp_t gfp) 1490b7b8704SHao Sun { 1500b7b8704SHao Sun void *p; 1510b7b8704SHao Sun 1520b7b8704SHao Sun p = kvmalloc(len, gfp); 1530b7b8704SHao Sun if (p) 1540b7b8704SHao Sun memcpy(p, src, len); 1550b7b8704SHao Sun return p; 1560b7b8704SHao Sun } 1570b7b8704SHao Sun EXPORT_SYMBOL(kvmemdup); 1580b7b8704SHao Sun 1590b7b8704SHao Sun /** 160f3515741SDavid Howells * kmemdup_nul - Create a NUL-terminated string from unterminated data 161f3515741SDavid Howells * @s: The data to stringify 162f3515741SDavid Howells * @len: The size of the data 163f3515741SDavid Howells * @gfp: the GFP mask used in the kmalloc() call when allocating memory 164a862f68aSMike Rapoport * 165a862f68aSMike Rapoport * Return: newly allocated copy of @s with NUL-termination or %NULL in 166a862f68aSMike Rapoport * case of error 167f3515741SDavid Howells */ 168f3515741SDavid Howells char *kmemdup_nul(const char *s, size_t len, gfp_t gfp) 169f3515741SDavid Howells { 170f3515741SDavid Howells char *buf; 171f3515741SDavid Howells 172f3515741SDavid Howells if (!s) 173f3515741SDavid Howells return NULL; 174f3515741SDavid Howells 175f3515741SDavid Howells buf = kmalloc_track_caller(len + 1, gfp); 176f3515741SDavid Howells if (buf) { 177f3515741SDavid Howells memcpy(buf, s, len); 178f3515741SDavid Howells buf[len] = '\0'; 179f3515741SDavid Howells } 180f3515741SDavid Howells return buf; 181f3515741SDavid Howells } 182f3515741SDavid Howells EXPORT_SYMBOL(kmemdup_nul); 183f3515741SDavid Howells 184f3515741SDavid Howells /** 185610a77e0SLi Zefan * memdup_user - duplicate memory region from user space 186610a77e0SLi Zefan * 187610a77e0SLi Zefan * @src: source address in user space 188610a77e0SLi Zefan * @len: number of bytes to copy 189610a77e0SLi Zefan * 190a862f68aSMike Rapoport * Return: an ERR_PTR() on failure. Result is physically 19150fd2f29SAl Viro * contiguous, to be freed by kfree(). 192610a77e0SLi Zefan */ 193610a77e0SLi Zefan void *memdup_user(const void __user *src, size_t len) 194610a77e0SLi Zefan { 195610a77e0SLi Zefan void *p; 196610a77e0SLi Zefan 1976c8fcc09SDaniel Vetter p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN); 198610a77e0SLi Zefan if (!p) 199610a77e0SLi Zefan return ERR_PTR(-ENOMEM); 200610a77e0SLi Zefan 201610a77e0SLi Zefan if (copy_from_user(p, src, len)) { 202610a77e0SLi Zefan kfree(p); 203610a77e0SLi Zefan return ERR_PTR(-EFAULT); 204610a77e0SLi Zefan } 205610a77e0SLi Zefan 206610a77e0SLi Zefan return p; 207610a77e0SLi Zefan } 208610a77e0SLi Zefan EXPORT_SYMBOL(memdup_user); 209610a77e0SLi Zefan 21050fd2f29SAl Viro /** 21150fd2f29SAl Viro * vmemdup_user - duplicate memory region from user space 21250fd2f29SAl Viro * 21350fd2f29SAl Viro * @src: source address in user space 21450fd2f29SAl Viro * @len: number of bytes to copy 21550fd2f29SAl Viro * 216a862f68aSMike Rapoport * Return: an ERR_PTR() on failure. Result may be not 21750fd2f29SAl Viro * physically contiguous. Use kvfree() to free. 21850fd2f29SAl Viro */ 21950fd2f29SAl Viro void *vmemdup_user(const void __user *src, size_t len) 22050fd2f29SAl Viro { 22150fd2f29SAl Viro void *p; 22250fd2f29SAl Viro 22350fd2f29SAl Viro p = kvmalloc(len, GFP_USER); 22450fd2f29SAl Viro if (!p) 22550fd2f29SAl Viro return ERR_PTR(-ENOMEM); 22650fd2f29SAl Viro 22750fd2f29SAl Viro if (copy_from_user(p, src, len)) { 22850fd2f29SAl Viro kvfree(p); 22950fd2f29SAl Viro return ERR_PTR(-EFAULT); 23050fd2f29SAl Viro } 23150fd2f29SAl Viro 23250fd2f29SAl Viro return p; 23350fd2f29SAl Viro } 23450fd2f29SAl Viro EXPORT_SYMBOL(vmemdup_user); 23550fd2f29SAl Viro 236b86181f1SMike Rapoport /** 23796840aa0SDavi Arnaut * strndup_user - duplicate an existing string from user space 23896840aa0SDavi Arnaut * @s: The string to duplicate 23996840aa0SDavi Arnaut * @n: Maximum number of bytes to copy, including the trailing NUL. 240a862f68aSMike Rapoport * 241e9145521SAndrew Morton * Return: newly allocated copy of @s or an ERR_PTR() in case of error 24296840aa0SDavi Arnaut */ 24396840aa0SDavi Arnaut char *strndup_user(const char __user *s, long n) 24496840aa0SDavi Arnaut { 24596840aa0SDavi Arnaut char *p; 24696840aa0SDavi Arnaut long length; 24796840aa0SDavi Arnaut 24896840aa0SDavi Arnaut length = strnlen_user(s, n); 24996840aa0SDavi Arnaut 25096840aa0SDavi Arnaut if (!length) 25196840aa0SDavi Arnaut return ERR_PTR(-EFAULT); 25296840aa0SDavi Arnaut 25396840aa0SDavi Arnaut if (length > n) 25496840aa0SDavi Arnaut return ERR_PTR(-EINVAL); 25596840aa0SDavi Arnaut 25690d74045SJulia Lawall p = memdup_user(s, length); 25796840aa0SDavi Arnaut 25890d74045SJulia Lawall if (IS_ERR(p)) 25990d74045SJulia Lawall return p; 26096840aa0SDavi Arnaut 26196840aa0SDavi Arnaut p[length - 1] = '\0'; 26296840aa0SDavi Arnaut 26396840aa0SDavi Arnaut return p; 26496840aa0SDavi Arnaut } 26596840aa0SDavi Arnaut EXPORT_SYMBOL(strndup_user); 26616d69265SAndrew Morton 267e9d408e1SAl Viro /** 268e9d408e1SAl Viro * memdup_user_nul - duplicate memory region from user space and NUL-terminate 269e9d408e1SAl Viro * 270e9d408e1SAl Viro * @src: source address in user space 271e9d408e1SAl Viro * @len: number of bytes to copy 272e9d408e1SAl Viro * 273a862f68aSMike Rapoport * Return: an ERR_PTR() on failure. 274e9d408e1SAl Viro */ 275e9d408e1SAl Viro void *memdup_user_nul(const void __user *src, size_t len) 276e9d408e1SAl Viro { 277e9d408e1SAl Viro char *p; 278e9d408e1SAl Viro 279e9d408e1SAl Viro /* 280e9d408e1SAl Viro * Always use GFP_KERNEL, since copy_from_user() can sleep and 281e9d408e1SAl Viro * cause pagefault, which makes it pointless to use GFP_NOFS 282e9d408e1SAl Viro * or GFP_ATOMIC. 283e9d408e1SAl Viro */ 284e9d408e1SAl Viro p = kmalloc_track_caller(len + 1, GFP_KERNEL); 285e9d408e1SAl Viro if (!p) 286e9d408e1SAl Viro return ERR_PTR(-ENOMEM); 287e9d408e1SAl Viro 288e9d408e1SAl Viro if (copy_from_user(p, src, len)) { 289e9d408e1SAl Viro kfree(p); 290e9d408e1SAl Viro return ERR_PTR(-EFAULT); 291e9d408e1SAl Viro } 292e9d408e1SAl Viro p[len] = '\0'; 293e9d408e1SAl Viro 294e9d408e1SAl Viro return p; 295e9d408e1SAl Viro } 296e9d408e1SAl Viro EXPORT_SYMBOL(memdup_user_nul); 297e9d408e1SAl Viro 298b7643757SSiddhesh Poyarekar /* Check if the vma is being used as a stack by this task */ 299d17af505SAndy Lutomirski int vma_is_stack_for_current(struct vm_area_struct *vma) 300b7643757SSiddhesh Poyarekar { 301d17af505SAndy Lutomirski struct task_struct * __maybe_unused t = current; 302d17af505SAndy Lutomirski 303b7643757SSiddhesh Poyarekar return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); 304b7643757SSiddhesh Poyarekar } 305b7643757SSiddhesh Poyarekar 306295992fbSChristian König /* 307295992fbSChristian König * Change backing file, only valid to use during initial VMA setup. 308295992fbSChristian König */ 309295992fbSChristian König void vma_set_file(struct vm_area_struct *vma, struct file *file) 310295992fbSChristian König { 311295992fbSChristian König /* Changing an anonymous vma with this is illegal */ 312295992fbSChristian König get_file(file); 313295992fbSChristian König swap(vma->vm_file, file); 314295992fbSChristian König fput(file); 315295992fbSChristian König } 316295992fbSChristian König EXPORT_SYMBOL(vma_set_file); 317295992fbSChristian König 318649775beSAlexandre Ghiti #ifndef STACK_RND_MASK 319649775beSAlexandre Ghiti #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */ 320649775beSAlexandre Ghiti #endif 321649775beSAlexandre Ghiti 322649775beSAlexandre Ghiti unsigned long randomize_stack_top(unsigned long stack_top) 323649775beSAlexandre Ghiti { 324649775beSAlexandre Ghiti unsigned long random_variable = 0; 325649775beSAlexandre Ghiti 326649775beSAlexandre Ghiti if (current->flags & PF_RANDOMIZE) { 327649775beSAlexandre Ghiti random_variable = get_random_long(); 328649775beSAlexandre Ghiti random_variable &= STACK_RND_MASK; 329649775beSAlexandre Ghiti random_variable <<= PAGE_SHIFT; 330649775beSAlexandre Ghiti } 331649775beSAlexandre Ghiti #ifdef CONFIG_STACK_GROWSUP 332649775beSAlexandre Ghiti return PAGE_ALIGN(stack_top) + random_variable; 333649775beSAlexandre Ghiti #else 334649775beSAlexandre Ghiti return PAGE_ALIGN(stack_top) - random_variable; 335649775beSAlexandre Ghiti #endif 336649775beSAlexandre Ghiti } 337649775beSAlexandre Ghiti 3385ad7dd88SJason A. Donenfeld /** 3395ad7dd88SJason A. Donenfeld * randomize_page - Generate a random, page aligned address 3405ad7dd88SJason A. Donenfeld * @start: The smallest acceptable address the caller will take. 3415ad7dd88SJason A. Donenfeld * @range: The size of the area, starting at @start, within which the 3425ad7dd88SJason A. Donenfeld * random address must fall. 3435ad7dd88SJason A. Donenfeld * 3445ad7dd88SJason A. Donenfeld * If @start + @range would overflow, @range is capped. 3455ad7dd88SJason A. Donenfeld * 3465ad7dd88SJason A. Donenfeld * NOTE: Historical use of randomize_range, which this replaces, presumed that 3475ad7dd88SJason A. Donenfeld * @start was already page aligned. We now align it regardless. 3485ad7dd88SJason A. Donenfeld * 3495ad7dd88SJason A. Donenfeld * Return: A page aligned address within [start, start + range). On error, 3505ad7dd88SJason A. Donenfeld * @start is returned. 3515ad7dd88SJason A. Donenfeld */ 3525ad7dd88SJason A. Donenfeld unsigned long randomize_page(unsigned long start, unsigned long range) 3535ad7dd88SJason A. Donenfeld { 3545ad7dd88SJason A. Donenfeld if (!PAGE_ALIGNED(start)) { 3555ad7dd88SJason A. Donenfeld range -= PAGE_ALIGN(start) - start; 3565ad7dd88SJason A. Donenfeld start = PAGE_ALIGN(start); 3575ad7dd88SJason A. Donenfeld } 3585ad7dd88SJason A. Donenfeld 3595ad7dd88SJason A. Donenfeld if (start > ULONG_MAX - range) 3605ad7dd88SJason A. Donenfeld range = ULONG_MAX - start; 3615ad7dd88SJason A. Donenfeld 3625ad7dd88SJason A. Donenfeld range >>= PAGE_SHIFT; 3635ad7dd88SJason A. Donenfeld 3645ad7dd88SJason A. Donenfeld if (range == 0) 3655ad7dd88SJason A. Donenfeld return start; 3665ad7dd88SJason A. Donenfeld 3675ad7dd88SJason A. Donenfeld return start + (get_random_long() % range << PAGE_SHIFT); 3685ad7dd88SJason A. Donenfeld } 3695ad7dd88SJason A. Donenfeld 37067f3977fSAlexandre Ghiti #ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT 371723820f3SChristophe Leroy unsigned long __weak arch_randomize_brk(struct mm_struct *mm) 372e7142bf5SAlexandre Ghiti { 373e7142bf5SAlexandre Ghiti /* Is the current task 32bit ? */ 374e7142bf5SAlexandre Ghiti if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task()) 375e7142bf5SAlexandre Ghiti return randomize_page(mm->brk, SZ_32M); 376e7142bf5SAlexandre Ghiti 377e7142bf5SAlexandre Ghiti return randomize_page(mm->brk, SZ_1G); 378e7142bf5SAlexandre Ghiti } 379e7142bf5SAlexandre Ghiti 38067f3977fSAlexandre Ghiti unsigned long arch_mmap_rnd(void) 38167f3977fSAlexandre Ghiti { 38267f3977fSAlexandre Ghiti unsigned long rnd; 38367f3977fSAlexandre Ghiti 38467f3977fSAlexandre Ghiti #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS 38567f3977fSAlexandre Ghiti if (is_compat_task()) 38667f3977fSAlexandre Ghiti rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1); 38767f3977fSAlexandre Ghiti else 38867f3977fSAlexandre Ghiti #endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */ 38967f3977fSAlexandre Ghiti rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1); 39067f3977fSAlexandre Ghiti 39167f3977fSAlexandre Ghiti return rnd << PAGE_SHIFT; 39267f3977fSAlexandre Ghiti } 39367f3977fSAlexandre Ghiti 39467f3977fSAlexandre Ghiti static int mmap_is_legacy(struct rlimit *rlim_stack) 39567f3977fSAlexandre Ghiti { 39667f3977fSAlexandre Ghiti if (current->personality & ADDR_COMPAT_LAYOUT) 39767f3977fSAlexandre Ghiti return 1; 39867f3977fSAlexandre Ghiti 3993033cd43SHelge Deller /* On parisc the stack always grows up - so a unlimited stack should 4003033cd43SHelge Deller * not be an indicator to use the legacy memory layout. */ 4013033cd43SHelge Deller if (rlim_stack->rlim_cur == RLIM_INFINITY && 4023033cd43SHelge Deller !IS_ENABLED(CONFIG_STACK_GROWSUP)) 40367f3977fSAlexandre Ghiti return 1; 40467f3977fSAlexandre Ghiti 40567f3977fSAlexandre Ghiti return sysctl_legacy_va_layout; 40667f3977fSAlexandre Ghiti } 40767f3977fSAlexandre Ghiti 40867f3977fSAlexandre Ghiti /* 40967f3977fSAlexandre Ghiti * Leave enough space between the mmap area and the stack to honour ulimit in 41067f3977fSAlexandre Ghiti * the face of randomisation. 41167f3977fSAlexandre Ghiti */ 41267f3977fSAlexandre Ghiti #define MIN_GAP (SZ_128M) 41367f3977fSAlexandre Ghiti #define MAX_GAP (STACK_TOP / 6 * 5) 41467f3977fSAlexandre Ghiti 41567f3977fSAlexandre Ghiti static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack) 41667f3977fSAlexandre Ghiti { 41767f3977fSAlexandre Ghiti unsigned long gap = rlim_stack->rlim_cur; 41867f3977fSAlexandre Ghiti unsigned long pad = stack_guard_gap; 41967f3977fSAlexandre Ghiti 42067f3977fSAlexandre Ghiti /* Account for stack randomization if necessary */ 42167f3977fSAlexandre Ghiti if (current->flags & PF_RANDOMIZE) 42267f3977fSAlexandre Ghiti pad += (STACK_RND_MASK << PAGE_SHIFT); 42367f3977fSAlexandre Ghiti 42467f3977fSAlexandre Ghiti /* Values close to RLIM_INFINITY can overflow. */ 42567f3977fSAlexandre Ghiti if (gap + pad > gap) 42667f3977fSAlexandre Ghiti gap += pad; 42767f3977fSAlexandre Ghiti 42867f3977fSAlexandre Ghiti if (gap < MIN_GAP) 42967f3977fSAlexandre Ghiti gap = MIN_GAP; 43067f3977fSAlexandre Ghiti else if (gap > MAX_GAP) 43167f3977fSAlexandre Ghiti gap = MAX_GAP; 43267f3977fSAlexandre Ghiti 43367f3977fSAlexandre Ghiti return PAGE_ALIGN(STACK_TOP - gap - rnd); 43467f3977fSAlexandre Ghiti } 43567f3977fSAlexandre Ghiti 43667f3977fSAlexandre Ghiti void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) 43767f3977fSAlexandre Ghiti { 43867f3977fSAlexandre Ghiti unsigned long random_factor = 0UL; 43967f3977fSAlexandre Ghiti 44067f3977fSAlexandre Ghiti if (current->flags & PF_RANDOMIZE) 44167f3977fSAlexandre Ghiti random_factor = arch_mmap_rnd(); 44267f3977fSAlexandre Ghiti 44367f3977fSAlexandre Ghiti if (mmap_is_legacy(rlim_stack)) { 44467f3977fSAlexandre Ghiti mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; 44567f3977fSAlexandre Ghiti mm->get_unmapped_area = arch_get_unmapped_area; 44667f3977fSAlexandre Ghiti } else { 44767f3977fSAlexandre Ghiti mm->mmap_base = mmap_base(random_factor, rlim_stack); 44867f3977fSAlexandre Ghiti mm->get_unmapped_area = arch_get_unmapped_area_topdown; 44967f3977fSAlexandre Ghiti } 45067f3977fSAlexandre Ghiti } 45167f3977fSAlexandre Ghiti #elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT) 4528f2af155SKees Cook void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) 45316d69265SAndrew Morton { 45416d69265SAndrew Morton mm->mmap_base = TASK_UNMAPPED_BASE; 45516d69265SAndrew Morton mm->get_unmapped_area = arch_get_unmapped_area; 45616d69265SAndrew Morton } 45716d69265SAndrew Morton #endif 458912985dcSRusty Russell 45979eb597cSDaniel Jordan /** 46079eb597cSDaniel Jordan * __account_locked_vm - account locked pages to an mm's locked_vm 46179eb597cSDaniel Jordan * @mm: mm to account against 46279eb597cSDaniel Jordan * @pages: number of pages to account 46379eb597cSDaniel Jordan * @inc: %true if @pages should be considered positive, %false if not 46479eb597cSDaniel Jordan * @task: task used to check RLIMIT_MEMLOCK 46579eb597cSDaniel Jordan * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped 46679eb597cSDaniel Jordan * 46779eb597cSDaniel Jordan * Assumes @task and @mm are valid (i.e. at least one reference on each), and 468c1e8d7c6SMichel Lespinasse * that mmap_lock is held as writer. 46979eb597cSDaniel Jordan * 47079eb597cSDaniel Jordan * Return: 47179eb597cSDaniel Jordan * * 0 on success 47279eb597cSDaniel Jordan * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded. 47379eb597cSDaniel Jordan */ 47479eb597cSDaniel Jordan int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc, 47579eb597cSDaniel Jordan struct task_struct *task, bool bypass_rlim) 47679eb597cSDaniel Jordan { 47779eb597cSDaniel Jordan unsigned long locked_vm, limit; 47879eb597cSDaniel Jordan int ret = 0; 47979eb597cSDaniel Jordan 48042fc5414SMichel Lespinasse mmap_assert_write_locked(mm); 48179eb597cSDaniel Jordan 48279eb597cSDaniel Jordan locked_vm = mm->locked_vm; 48379eb597cSDaniel Jordan if (inc) { 48479eb597cSDaniel Jordan if (!bypass_rlim) { 48579eb597cSDaniel Jordan limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT; 48679eb597cSDaniel Jordan if (locked_vm + pages > limit) 48779eb597cSDaniel Jordan ret = -ENOMEM; 48879eb597cSDaniel Jordan } 48979eb597cSDaniel Jordan if (!ret) 49079eb597cSDaniel Jordan mm->locked_vm = locked_vm + pages; 49179eb597cSDaniel Jordan } else { 49279eb597cSDaniel Jordan WARN_ON_ONCE(pages > locked_vm); 49379eb597cSDaniel Jordan mm->locked_vm = locked_vm - pages; 49479eb597cSDaniel Jordan } 49579eb597cSDaniel Jordan 49679eb597cSDaniel Jordan pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid, 49779eb597cSDaniel Jordan (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT, 49879eb597cSDaniel Jordan locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK), 49979eb597cSDaniel Jordan ret ? " - exceeded" : ""); 50079eb597cSDaniel Jordan 50179eb597cSDaniel Jordan return ret; 50279eb597cSDaniel Jordan } 50379eb597cSDaniel Jordan EXPORT_SYMBOL_GPL(__account_locked_vm); 50479eb597cSDaniel Jordan 50579eb597cSDaniel Jordan /** 50679eb597cSDaniel Jordan * account_locked_vm - account locked pages to an mm's locked_vm 50779eb597cSDaniel Jordan * @mm: mm to account against, may be NULL 50879eb597cSDaniel Jordan * @pages: number of pages to account 50979eb597cSDaniel Jordan * @inc: %true if @pages should be considered positive, %false if not 51079eb597cSDaniel Jordan * 51179eb597cSDaniel Jordan * Assumes a non-NULL @mm is valid (i.e. at least one reference on it). 51279eb597cSDaniel Jordan * 51379eb597cSDaniel Jordan * Return: 51479eb597cSDaniel Jordan * * 0 on success, or if mm is NULL 51579eb597cSDaniel Jordan * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded. 51679eb597cSDaniel Jordan */ 51779eb597cSDaniel Jordan int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc) 51879eb597cSDaniel Jordan { 51979eb597cSDaniel Jordan int ret; 52079eb597cSDaniel Jordan 52179eb597cSDaniel Jordan if (pages == 0 || !mm) 52279eb597cSDaniel Jordan return 0; 52379eb597cSDaniel Jordan 524d8ed45c5SMichel Lespinasse mmap_write_lock(mm); 52579eb597cSDaniel Jordan ret = __account_locked_vm(mm, pages, inc, current, 52679eb597cSDaniel Jordan capable(CAP_IPC_LOCK)); 527d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 52879eb597cSDaniel Jordan 52979eb597cSDaniel Jordan return ret; 53079eb597cSDaniel Jordan } 53179eb597cSDaniel Jordan EXPORT_SYMBOL_GPL(account_locked_vm); 53279eb597cSDaniel Jordan 533eb36c587SAl Viro unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr, 534eb36c587SAl Viro unsigned long len, unsigned long prot, 5359fbeb5abSMichal Hocko unsigned long flag, unsigned long pgoff) 536eb36c587SAl Viro { 537eb36c587SAl Viro unsigned long ret; 538eb36c587SAl Viro struct mm_struct *mm = current->mm; 53941badc15SMichel Lespinasse unsigned long populate; 540897ab3e0SMike Rapoport LIST_HEAD(uf); 541eb36c587SAl Viro 542eb36c587SAl Viro ret = security_mmap_file(file, prot, flag); 543eb36c587SAl Viro if (!ret) { 544d8ed45c5SMichel Lespinasse if (mmap_write_lock_killable(mm)) 545dc0ef0dfSMichal Hocko return -EINTR; 546592b5fadSYu-cheng Yu ret = do_mmap(file, addr, len, prot, flag, 0, pgoff, &populate, 54745e55300SPeter Collingbourne &uf); 548d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 549897ab3e0SMike Rapoport userfaultfd_unmap_complete(mm, &uf); 55041badc15SMichel Lespinasse if (populate) 55141badc15SMichel Lespinasse mm_populate(ret, populate); 552eb36c587SAl Viro } 553eb36c587SAl Viro return ret; 554eb36c587SAl Viro } 555eb36c587SAl Viro 556eb36c587SAl Viro unsigned long vm_mmap(struct file *file, unsigned long addr, 557eb36c587SAl Viro unsigned long len, unsigned long prot, 558eb36c587SAl Viro unsigned long flag, unsigned long offset) 559eb36c587SAl Viro { 560eb36c587SAl Viro if (unlikely(offset + PAGE_ALIGN(len) < offset)) 561eb36c587SAl Viro return -EINVAL; 562ea53cde0SAlexander Kuleshov if (unlikely(offset_in_page(offset))) 563eb36c587SAl Viro return -EINVAL; 564eb36c587SAl Viro 5659fbeb5abSMichal Hocko return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); 566eb36c587SAl Viro } 567eb36c587SAl Viro EXPORT_SYMBOL(vm_mmap); 568eb36c587SAl Viro 569a7c3e901SMichal Hocko /** 570a7c3e901SMichal Hocko * kvmalloc_node - attempt to allocate physically contiguous memory, but upon 571a7c3e901SMichal Hocko * failure, fall back to non-contiguous (vmalloc) allocation. 572a7c3e901SMichal Hocko * @size: size of the request. 573a7c3e901SMichal Hocko * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL. 574a7c3e901SMichal Hocko * @node: numa node to allocate from 575a7c3e901SMichal Hocko * 576a7c3e901SMichal Hocko * Uses kmalloc to get the memory but if the allocation fails then falls back 577a7c3e901SMichal Hocko * to the vmalloc allocator. Use kvfree for freeing the memory. 578a7c3e901SMichal Hocko * 579a421ef30SMichal Hocko * GFP_NOWAIT and GFP_ATOMIC are not supported, neither is the __GFP_NORETRY modifier. 580cc965a29SMichal Hocko * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is 581cc965a29SMichal Hocko * preferable to the vmalloc fallback, due to visible performance drawbacks. 582a7c3e901SMichal Hocko * 583a862f68aSMike Rapoport * Return: pointer to the allocated memory of %NULL in case of failure 584a7c3e901SMichal Hocko */ 585a7c3e901SMichal Hocko void *kvmalloc_node(size_t size, gfp_t flags, int node) 586a7c3e901SMichal Hocko { 587a7c3e901SMichal Hocko gfp_t kmalloc_flags = flags; 588a7c3e901SMichal Hocko void *ret; 589a7c3e901SMichal Hocko 590a7c3e901SMichal Hocko /* 5914f4f2ba9SMichal Hocko * We want to attempt a large physically contiguous block first because 5924f4f2ba9SMichal Hocko * it is less likely to fragment multiple larger blocks and therefore 5934f4f2ba9SMichal Hocko * contribute to a long term fragmentation less than vmalloc fallback. 5944f4f2ba9SMichal Hocko * However make sure that larger requests are not too disruptive - no 5954f4f2ba9SMichal Hocko * OOM killer and no allocation failure warnings as we have a fallback. 596a7c3e901SMichal Hocko */ 5976c5ab651SMichal Hocko if (size > PAGE_SIZE) { 5986c5ab651SMichal Hocko kmalloc_flags |= __GFP_NOWARN; 5996c5ab651SMichal Hocko 600cc965a29SMichal Hocko if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL)) 6016c5ab651SMichal Hocko kmalloc_flags |= __GFP_NORETRY; 602a421ef30SMichal Hocko 603a421ef30SMichal Hocko /* nofail semantic is implemented by the vmalloc fallback */ 604a421ef30SMichal Hocko kmalloc_flags &= ~__GFP_NOFAIL; 6056c5ab651SMichal Hocko } 606a7c3e901SMichal Hocko 607a7c3e901SMichal Hocko ret = kmalloc_node(size, kmalloc_flags, node); 608a7c3e901SMichal Hocko 609a7c3e901SMichal Hocko /* 610a7c3e901SMichal Hocko * It doesn't really make sense to fallback to vmalloc for sub page 611a7c3e901SMichal Hocko * requests 612a7c3e901SMichal Hocko */ 613a7c3e901SMichal Hocko if (ret || size <= PAGE_SIZE) 614a7c3e901SMichal Hocko return ret; 615a7c3e901SMichal Hocko 61630c19366SFlorian Westphal /* non-sleeping allocations are not supported by vmalloc */ 61730c19366SFlorian Westphal if (!gfpflags_allow_blocking(flags)) 61830c19366SFlorian Westphal return NULL; 61930c19366SFlorian Westphal 6207661809dSLinus Torvalds /* Don't even allow crazy sizes */ 6210708a0afSDaniel Borkmann if (unlikely(size > INT_MAX)) { 6220708a0afSDaniel Borkmann WARN_ON_ONCE(!(flags & __GFP_NOWARN)); 6237661809dSLinus Torvalds return NULL; 6240708a0afSDaniel Borkmann } 6257661809dSLinus Torvalds 6269becb688SLinus Torvalds /* 6279becb688SLinus Torvalds * kvmalloc() can always use VM_ALLOW_HUGE_VMAP, 6289becb688SLinus Torvalds * since the callers already cannot assume anything 6299becb688SLinus Torvalds * about the resulting pointer, and cannot play 6309becb688SLinus Torvalds * protection games. 6319becb688SLinus Torvalds */ 6329becb688SLinus Torvalds return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, 6339becb688SLinus Torvalds flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP, 6349becb688SLinus Torvalds node, __builtin_return_address(0)); 635a7c3e901SMichal Hocko } 636a7c3e901SMichal Hocko EXPORT_SYMBOL(kvmalloc_node); 637a7c3e901SMichal Hocko 638ff4dc772SMike Rapoport /** 63904b8e946SAndrew Morton * kvfree() - Free memory. 64004b8e946SAndrew Morton * @addr: Pointer to allocated memory. 641ff4dc772SMike Rapoport * 64204b8e946SAndrew Morton * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc(). 64304b8e946SAndrew Morton * It is slightly more efficient to use kfree() or vfree() if you are certain 64404b8e946SAndrew Morton * that you know which one to use. 64504b8e946SAndrew Morton * 64652414d33SAndrey Ryabinin * Context: Either preemptible task context or not-NMI interrupt. 647ff4dc772SMike Rapoport */ 64839f1f78dSAl Viro void kvfree(const void *addr) 64939f1f78dSAl Viro { 65039f1f78dSAl Viro if (is_vmalloc_addr(addr)) 65139f1f78dSAl Viro vfree(addr); 65239f1f78dSAl Viro else 65339f1f78dSAl Viro kfree(addr); 65439f1f78dSAl Viro } 65539f1f78dSAl Viro EXPORT_SYMBOL(kvfree); 65639f1f78dSAl Viro 657d4eaa283SWaiman Long /** 658d4eaa283SWaiman Long * kvfree_sensitive - Free a data object containing sensitive information. 659d4eaa283SWaiman Long * @addr: address of the data object to be freed. 660d4eaa283SWaiman Long * @len: length of the data object. 661d4eaa283SWaiman Long * 662d4eaa283SWaiman Long * Use the special memzero_explicit() function to clear the content of a 663d4eaa283SWaiman Long * kvmalloc'ed object containing sensitive data to make sure that the 664d4eaa283SWaiman Long * compiler won't optimize out the data clearing. 665d4eaa283SWaiman Long */ 666d4eaa283SWaiman Long void kvfree_sensitive(const void *addr, size_t len) 667d4eaa283SWaiman Long { 668d4eaa283SWaiman Long if (likely(!ZERO_OR_NULL_PTR(addr))) { 669d4eaa283SWaiman Long memzero_explicit((void *)addr, len); 670d4eaa283SWaiman Long kvfree(addr); 671d4eaa283SWaiman Long } 672d4eaa283SWaiman Long } 673d4eaa283SWaiman Long EXPORT_SYMBOL(kvfree_sensitive); 674d4eaa283SWaiman Long 675de2860f4SDave Chinner void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags) 676de2860f4SDave Chinner { 677de2860f4SDave Chinner void *newp; 678de2860f4SDave Chinner 679de2860f4SDave Chinner if (oldsize >= newsize) 680de2860f4SDave Chinner return (void *)p; 681de2860f4SDave Chinner newp = kvmalloc(newsize, flags); 682de2860f4SDave Chinner if (!newp) 683de2860f4SDave Chinner return NULL; 684de2860f4SDave Chinner memcpy(newp, p, oldsize); 685de2860f4SDave Chinner kvfree(p); 686de2860f4SDave Chinner return newp; 687de2860f4SDave Chinner } 688de2860f4SDave Chinner EXPORT_SYMBOL(kvrealloc); 689de2860f4SDave Chinner 690a8749a35SPaolo Bonzini /** 691a8749a35SPaolo Bonzini * __vmalloc_array - allocate memory for a virtually contiguous array. 692a8749a35SPaolo Bonzini * @n: number of elements. 693a8749a35SPaolo Bonzini * @size: element size. 694a8749a35SPaolo Bonzini * @flags: the type of memory to allocate (see kmalloc). 695a8749a35SPaolo Bonzini */ 696a8749a35SPaolo Bonzini void *__vmalloc_array(size_t n, size_t size, gfp_t flags) 697a8749a35SPaolo Bonzini { 698a8749a35SPaolo Bonzini size_t bytes; 699a8749a35SPaolo Bonzini 700a8749a35SPaolo Bonzini if (unlikely(check_mul_overflow(n, size, &bytes))) 701a8749a35SPaolo Bonzini return NULL; 702a8749a35SPaolo Bonzini return __vmalloc(bytes, flags); 703a8749a35SPaolo Bonzini } 704a8749a35SPaolo Bonzini EXPORT_SYMBOL(__vmalloc_array); 705a8749a35SPaolo Bonzini 706a8749a35SPaolo Bonzini /** 707a8749a35SPaolo Bonzini * vmalloc_array - allocate memory for a virtually contiguous array. 708a8749a35SPaolo Bonzini * @n: number of elements. 709a8749a35SPaolo Bonzini * @size: element size. 710a8749a35SPaolo Bonzini */ 711a8749a35SPaolo Bonzini void *vmalloc_array(size_t n, size_t size) 712a8749a35SPaolo Bonzini { 713a8749a35SPaolo Bonzini return __vmalloc_array(n, size, GFP_KERNEL); 714a8749a35SPaolo Bonzini } 715a8749a35SPaolo Bonzini EXPORT_SYMBOL(vmalloc_array); 716a8749a35SPaolo Bonzini 717a8749a35SPaolo Bonzini /** 718a8749a35SPaolo Bonzini * __vcalloc - allocate and zero memory for a virtually contiguous array. 719a8749a35SPaolo Bonzini * @n: number of elements. 720a8749a35SPaolo Bonzini * @size: element size. 721a8749a35SPaolo Bonzini * @flags: the type of memory to allocate (see kmalloc). 722a8749a35SPaolo Bonzini */ 723a8749a35SPaolo Bonzini void *__vcalloc(size_t n, size_t size, gfp_t flags) 724a8749a35SPaolo Bonzini { 725a8749a35SPaolo Bonzini return __vmalloc_array(n, size, flags | __GFP_ZERO); 726a8749a35SPaolo Bonzini } 727a8749a35SPaolo Bonzini EXPORT_SYMBOL(__vcalloc); 728a8749a35SPaolo Bonzini 729a8749a35SPaolo Bonzini /** 730a8749a35SPaolo Bonzini * vcalloc - allocate and zero memory for a virtually contiguous array. 731a8749a35SPaolo Bonzini * @n: number of elements. 732a8749a35SPaolo Bonzini * @size: element size. 733a8749a35SPaolo Bonzini */ 734a8749a35SPaolo Bonzini void *vcalloc(size_t n, size_t size) 735a8749a35SPaolo Bonzini { 736a8749a35SPaolo Bonzini return __vmalloc_array(n, size, GFP_KERNEL | __GFP_ZERO); 737a8749a35SPaolo Bonzini } 738a8749a35SPaolo Bonzini EXPORT_SYMBOL(vcalloc); 739a8749a35SPaolo Bonzini 740e05b3453SMatthew Wilcox (Oracle) struct anon_vma *folio_anon_vma(struct folio *folio) 741e39155eaSKirill A. Shutemov { 74264601000SMatthew Wilcox (Oracle) unsigned long mapping = (unsigned long)folio->mapping; 743e39155eaSKirill A. Shutemov 744e39155eaSKirill A. Shutemov if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 745e39155eaSKirill A. Shutemov return NULL; 74664601000SMatthew Wilcox (Oracle) return (void *)(mapping - PAGE_MAPPING_ANON); 747e39155eaSKirill A. Shutemov } 748e39155eaSKirill A. Shutemov 7492f52578fSMatthew Wilcox (Oracle) /** 7502f52578fSMatthew Wilcox (Oracle) * folio_mapping - Find the mapping where this folio is stored. 7512f52578fSMatthew Wilcox (Oracle) * @folio: The folio. 7522f52578fSMatthew Wilcox (Oracle) * 7532f52578fSMatthew Wilcox (Oracle) * For folios which are in the page cache, return the mapping that this 7542f52578fSMatthew Wilcox (Oracle) * page belongs to. Folios in the swap cache return the swap mapping 7552f52578fSMatthew Wilcox (Oracle) * this page is stored in (which is different from the mapping for the 7562f52578fSMatthew Wilcox (Oracle) * swap file or swap device where the data is stored). 7572f52578fSMatthew Wilcox (Oracle) * 7582f52578fSMatthew Wilcox (Oracle) * You can call this for folios which aren't in the swap cache or page 7592f52578fSMatthew Wilcox (Oracle) * cache and it will return NULL. 7602f52578fSMatthew Wilcox (Oracle) */ 7612f52578fSMatthew Wilcox (Oracle) struct address_space *folio_mapping(struct folio *folio) 7629800339bSShaohua Li { 7631c290f64SKirill A. Shutemov struct address_space *mapping; 7641c290f64SKirill A. Shutemov 76503e5ac2fSMikulas Patocka /* This happens if someone calls flush_dcache_page on slab page */ 7662f52578fSMatthew Wilcox (Oracle) if (unlikely(folio_test_slab(folio))) 76703e5ac2fSMikulas Patocka return NULL; 76803e5ac2fSMikulas Patocka 7692f52578fSMatthew Wilcox (Oracle) if (unlikely(folio_test_swapcache(folio))) 7703d2c9087SDavid Hildenbrand return swap_address_space(folio->swap); 77133806f06SShaohua Li 7722f52578fSMatthew Wilcox (Oracle) mapping = folio->mapping; 77368f2736aSMatthew Wilcox (Oracle) if ((unsigned long)mapping & PAGE_MAPPING_FLAGS) 774e39155eaSKirill A. Shutemov return NULL; 775bda807d4SMinchan Kim 77668f2736aSMatthew Wilcox (Oracle) return mapping; 7779800339bSShaohua Li } 7782f52578fSMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_mapping); 7799800339bSShaohua Li 7804ba1119cSMatthew Wilcox (Oracle) /** 781715cbfd6SMatthew Wilcox (Oracle) * folio_copy - Copy the contents of one folio to another. 782715cbfd6SMatthew Wilcox (Oracle) * @dst: Folio to copy to. 783715cbfd6SMatthew Wilcox (Oracle) * @src: Folio to copy from. 784715cbfd6SMatthew Wilcox (Oracle) * 785715cbfd6SMatthew Wilcox (Oracle) * The bytes in the folio represented by @src are copied to @dst. 786715cbfd6SMatthew Wilcox (Oracle) * Assumes the caller has validated that @dst is at least as large as @src. 787715cbfd6SMatthew Wilcox (Oracle) * Can be called in atomic context for order-0 folios, but if the folio is 788715cbfd6SMatthew Wilcox (Oracle) * larger, it may sleep. 789715cbfd6SMatthew Wilcox (Oracle) */ 790715cbfd6SMatthew Wilcox (Oracle) void folio_copy(struct folio *dst, struct folio *src) 79179789db0SMatthew Wilcox (Oracle) { 792715cbfd6SMatthew Wilcox (Oracle) long i = 0; 793715cbfd6SMatthew Wilcox (Oracle) long nr = folio_nr_pages(src); 79479789db0SMatthew Wilcox (Oracle) 795715cbfd6SMatthew Wilcox (Oracle) for (;;) { 796715cbfd6SMatthew Wilcox (Oracle) copy_highpage(folio_page(dst, i), folio_page(src, i)); 797715cbfd6SMatthew Wilcox (Oracle) if (++i == nr) 798715cbfd6SMatthew Wilcox (Oracle) break; 79979789db0SMatthew Wilcox (Oracle) cond_resched(); 80079789db0SMatthew Wilcox (Oracle) } 80179789db0SMatthew Wilcox (Oracle) } 802*4093602dSMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_copy); 80379789db0SMatthew Wilcox (Oracle) 80439a1aa8eSAndrey Ryabinin int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; 80539a1aa8eSAndrey Ryabinin int sysctl_overcommit_ratio __read_mostly = 50; 80639a1aa8eSAndrey Ryabinin unsigned long sysctl_overcommit_kbytes __read_mostly; 80739a1aa8eSAndrey Ryabinin int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; 80839a1aa8eSAndrey Ryabinin unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */ 80939a1aa8eSAndrey Ryabinin unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */ 81039a1aa8eSAndrey Ryabinin 81132927393SChristoph Hellwig int overcommit_ratio_handler(struct ctl_table *table, int write, void *buffer, 81232927393SChristoph Hellwig size_t *lenp, loff_t *ppos) 81349f0ce5fSJerome Marchand { 81449f0ce5fSJerome Marchand int ret; 81549f0ce5fSJerome Marchand 81649f0ce5fSJerome Marchand ret = proc_dointvec(table, write, buffer, lenp, ppos); 81749f0ce5fSJerome Marchand if (ret == 0 && write) 81849f0ce5fSJerome Marchand sysctl_overcommit_kbytes = 0; 81949f0ce5fSJerome Marchand return ret; 82049f0ce5fSJerome Marchand } 82149f0ce5fSJerome Marchand 82256f3547bSFeng Tang static void sync_overcommit_as(struct work_struct *dummy) 82356f3547bSFeng Tang { 82456f3547bSFeng Tang percpu_counter_sync(&vm_committed_as); 82556f3547bSFeng Tang } 82656f3547bSFeng Tang 82756f3547bSFeng Tang int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer, 82856f3547bSFeng Tang size_t *lenp, loff_t *ppos) 82956f3547bSFeng Tang { 83056f3547bSFeng Tang struct ctl_table t; 831bcbda810SChen Jun int new_policy = -1; 83256f3547bSFeng Tang int ret; 83356f3547bSFeng Tang 83456f3547bSFeng Tang /* 83556f3547bSFeng Tang * The deviation of sync_overcommit_as could be big with loose policy 83656f3547bSFeng Tang * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to 83756f3547bSFeng Tang * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply 83831454980SBhaskar Chowdhury * with the strict "NEVER", and to avoid possible race condition (even 83956f3547bSFeng Tang * though user usually won't too frequently do the switching to policy 84056f3547bSFeng Tang * OVERCOMMIT_NEVER), the switch is done in the following order: 84156f3547bSFeng Tang * 1. changing the batch 84256f3547bSFeng Tang * 2. sync percpu count on each CPU 84356f3547bSFeng Tang * 3. switch the policy 84456f3547bSFeng Tang */ 84556f3547bSFeng Tang if (write) { 84656f3547bSFeng Tang t = *table; 84756f3547bSFeng Tang t.data = &new_policy; 84856f3547bSFeng Tang ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 849bcbda810SChen Jun if (ret || new_policy == -1) 85056f3547bSFeng Tang return ret; 85156f3547bSFeng Tang 85256f3547bSFeng Tang mm_compute_batch(new_policy); 85356f3547bSFeng Tang if (new_policy == OVERCOMMIT_NEVER) 85456f3547bSFeng Tang schedule_on_each_cpu(sync_overcommit_as); 85556f3547bSFeng Tang sysctl_overcommit_memory = new_policy; 85656f3547bSFeng Tang } else { 85756f3547bSFeng Tang ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 85856f3547bSFeng Tang } 85956f3547bSFeng Tang 86056f3547bSFeng Tang return ret; 86156f3547bSFeng Tang } 86256f3547bSFeng Tang 86332927393SChristoph Hellwig int overcommit_kbytes_handler(struct ctl_table *table, int write, void *buffer, 86432927393SChristoph Hellwig size_t *lenp, loff_t *ppos) 86549f0ce5fSJerome Marchand { 86649f0ce5fSJerome Marchand int ret; 86749f0ce5fSJerome Marchand 86849f0ce5fSJerome Marchand ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 86949f0ce5fSJerome Marchand if (ret == 0 && write) 87049f0ce5fSJerome Marchand sysctl_overcommit_ratio = 0; 87149f0ce5fSJerome Marchand return ret; 87249f0ce5fSJerome Marchand } 87349f0ce5fSJerome Marchand 87400619bccSJerome Marchand /* 87500619bccSJerome Marchand * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used 87600619bccSJerome Marchand */ 87700619bccSJerome Marchand unsigned long vm_commit_limit(void) 87800619bccSJerome Marchand { 87949f0ce5fSJerome Marchand unsigned long allowed; 88049f0ce5fSJerome Marchand 88149f0ce5fSJerome Marchand if (sysctl_overcommit_kbytes) 88249f0ce5fSJerome Marchand allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10); 88349f0ce5fSJerome Marchand else 884ca79b0c2SArun KS allowed = ((totalram_pages() - hugetlb_total_pages()) 88549f0ce5fSJerome Marchand * sysctl_overcommit_ratio / 100); 88649f0ce5fSJerome Marchand allowed += total_swap_pages; 88749f0ce5fSJerome Marchand 88849f0ce5fSJerome Marchand return allowed; 88900619bccSJerome Marchand } 89000619bccSJerome Marchand 89139a1aa8eSAndrey Ryabinin /* 89239a1aa8eSAndrey Ryabinin * Make sure vm_committed_as in one cacheline and not cacheline shared with 89339a1aa8eSAndrey Ryabinin * other variables. It can be updated by several CPUs frequently. 89439a1aa8eSAndrey Ryabinin */ 89539a1aa8eSAndrey Ryabinin struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp; 89639a1aa8eSAndrey Ryabinin 89739a1aa8eSAndrey Ryabinin /* 89839a1aa8eSAndrey Ryabinin * The global memory commitment made in the system can be a metric 89939a1aa8eSAndrey Ryabinin * that can be used to drive ballooning decisions when Linux is hosted 90039a1aa8eSAndrey Ryabinin * as a guest. On Hyper-V, the host implements a policy engine for dynamically 90139a1aa8eSAndrey Ryabinin * balancing memory across competing virtual machines that are hosted. 90239a1aa8eSAndrey Ryabinin * Several metrics drive this policy engine including the guest reported 90339a1aa8eSAndrey Ryabinin * memory commitment. 9044e2ee51eSFeng Tang * 9054e2ee51eSFeng Tang * The time cost of this is very low for small platforms, and for big 9064e2ee51eSFeng Tang * platform like a 2S/36C/72T Skylake server, in worst case where 9074e2ee51eSFeng Tang * vm_committed_as's spinlock is under severe contention, the time cost 9084e2ee51eSFeng Tang * could be about 30~40 microseconds. 90939a1aa8eSAndrey Ryabinin */ 91039a1aa8eSAndrey Ryabinin unsigned long vm_memory_committed(void) 91139a1aa8eSAndrey Ryabinin { 9124e2ee51eSFeng Tang return percpu_counter_sum_positive(&vm_committed_as); 91339a1aa8eSAndrey Ryabinin } 91439a1aa8eSAndrey Ryabinin EXPORT_SYMBOL_GPL(vm_memory_committed); 91539a1aa8eSAndrey Ryabinin 91639a1aa8eSAndrey Ryabinin /* 91739a1aa8eSAndrey Ryabinin * Check that a process has enough memory to allocate a new virtual 91839a1aa8eSAndrey Ryabinin * mapping. 0 means there is enough memory for the allocation to 91939a1aa8eSAndrey Ryabinin * succeed and -ENOMEM implies there is not. 92039a1aa8eSAndrey Ryabinin * 92139a1aa8eSAndrey Ryabinin * We currently support three overcommit policies, which are set via the 922ee65728eSMike Rapoport * vm.overcommit_memory sysctl. See Documentation/mm/overcommit-accounting.rst 92339a1aa8eSAndrey Ryabinin * 92439a1aa8eSAndrey Ryabinin * Strict overcommit modes added 2002 Feb 26 by Alan Cox. 92539a1aa8eSAndrey Ryabinin * Additional code 2002 Jul 20 by Robert Love. 92639a1aa8eSAndrey Ryabinin * 92739a1aa8eSAndrey Ryabinin * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise. 92839a1aa8eSAndrey Ryabinin * 92939a1aa8eSAndrey Ryabinin * Note this is a helper function intended to be used by LSMs which 93039a1aa8eSAndrey Ryabinin * wish to use this logic. 93139a1aa8eSAndrey Ryabinin */ 93239a1aa8eSAndrey Ryabinin int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) 93339a1aa8eSAndrey Ryabinin { 9348c7829b0SJohannes Weiner long allowed; 93539a1aa8eSAndrey Ryabinin 93639a1aa8eSAndrey Ryabinin vm_acct_memory(pages); 93739a1aa8eSAndrey Ryabinin 93839a1aa8eSAndrey Ryabinin /* 93939a1aa8eSAndrey Ryabinin * Sometimes we want to use more memory than we have 94039a1aa8eSAndrey Ryabinin */ 94139a1aa8eSAndrey Ryabinin if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS) 94239a1aa8eSAndrey Ryabinin return 0; 94339a1aa8eSAndrey Ryabinin 94439a1aa8eSAndrey Ryabinin if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { 9458c7829b0SJohannes Weiner if (pages > totalram_pages() + total_swap_pages) 94639a1aa8eSAndrey Ryabinin goto error; 94739a1aa8eSAndrey Ryabinin return 0; 94839a1aa8eSAndrey Ryabinin } 94939a1aa8eSAndrey Ryabinin 95039a1aa8eSAndrey Ryabinin allowed = vm_commit_limit(); 95139a1aa8eSAndrey Ryabinin /* 95239a1aa8eSAndrey Ryabinin * Reserve some for root 95339a1aa8eSAndrey Ryabinin */ 95439a1aa8eSAndrey Ryabinin if (!cap_sys_admin) 95539a1aa8eSAndrey Ryabinin allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); 95639a1aa8eSAndrey Ryabinin 95739a1aa8eSAndrey Ryabinin /* 95839a1aa8eSAndrey Ryabinin * Don't let a single process grow so big a user can't recover 95939a1aa8eSAndrey Ryabinin */ 96039a1aa8eSAndrey Ryabinin if (mm) { 9618c7829b0SJohannes Weiner long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10); 9628c7829b0SJohannes Weiner 96339a1aa8eSAndrey Ryabinin allowed -= min_t(long, mm->total_vm / 32, reserve); 96439a1aa8eSAndrey Ryabinin } 96539a1aa8eSAndrey Ryabinin 96639a1aa8eSAndrey Ryabinin if (percpu_counter_read_positive(&vm_committed_as) < allowed) 96739a1aa8eSAndrey Ryabinin return 0; 96839a1aa8eSAndrey Ryabinin error: 9696bdfc60cSJakub Wilk pr_warn_ratelimited("%s: pid: %d, comm: %s, not enough memory for the allocation\n", 97044b414c8SKefeng Wang __func__, current->pid, current->comm); 97139a1aa8eSAndrey Ryabinin vm_unacct_memory(pages); 97239a1aa8eSAndrey Ryabinin 97339a1aa8eSAndrey Ryabinin return -ENOMEM; 97439a1aa8eSAndrey Ryabinin } 97539a1aa8eSAndrey Ryabinin 976a9090253SWilliam Roberts /** 977a9090253SWilliam Roberts * get_cmdline() - copy the cmdline value to a buffer. 978a9090253SWilliam Roberts * @task: the task whose cmdline value to copy. 979a9090253SWilliam Roberts * @buffer: the buffer to copy to. 980a9090253SWilliam Roberts * @buflen: the length of the buffer. Larger cmdline values are truncated 981a9090253SWilliam Roberts * to this length. 982a862f68aSMike Rapoport * 983a862f68aSMike Rapoport * Return: the size of the cmdline field copied. Note that the copy does 984a9090253SWilliam Roberts * not guarantee an ending NULL byte. 985a9090253SWilliam Roberts */ 986a9090253SWilliam Roberts int get_cmdline(struct task_struct *task, char *buffer, int buflen) 987a9090253SWilliam Roberts { 988a9090253SWilliam Roberts int res = 0; 989a9090253SWilliam Roberts unsigned int len; 990a9090253SWilliam Roberts struct mm_struct *mm = get_task_mm(task); 991a3b609efSMateusz Guzik unsigned long arg_start, arg_end, env_start, env_end; 992a9090253SWilliam Roberts if (!mm) 993a9090253SWilliam Roberts goto out; 994a9090253SWilliam Roberts if (!mm->arg_end) 995a9090253SWilliam Roberts goto out_mm; /* Shh! No looking before we're done */ 996a9090253SWilliam Roberts 997bc81426fSMichal Koutný spin_lock(&mm->arg_lock); 998a3b609efSMateusz Guzik arg_start = mm->arg_start; 999a3b609efSMateusz Guzik arg_end = mm->arg_end; 1000a3b609efSMateusz Guzik env_start = mm->env_start; 1001a3b609efSMateusz Guzik env_end = mm->env_end; 1002bc81426fSMichal Koutný spin_unlock(&mm->arg_lock); 1003a3b609efSMateusz Guzik 1004a3b609efSMateusz Guzik len = arg_end - arg_start; 1005a9090253SWilliam Roberts 1006a9090253SWilliam Roberts if (len > buflen) 1007a9090253SWilliam Roberts len = buflen; 1008a9090253SWilliam Roberts 1009f307ab6dSLorenzo Stoakes res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE); 1010a9090253SWilliam Roberts 1011a9090253SWilliam Roberts /* 1012a9090253SWilliam Roberts * If the nul at the end of args has been overwritten, then 1013a9090253SWilliam Roberts * assume application is using setproctitle(3). 1014a9090253SWilliam Roberts */ 1015a9090253SWilliam Roberts if (res > 0 && buffer[res-1] != '\0' && len < buflen) { 1016a9090253SWilliam Roberts len = strnlen(buffer, res); 1017a9090253SWilliam Roberts if (len < res) { 1018a9090253SWilliam Roberts res = len; 1019a9090253SWilliam Roberts } else { 1020a3b609efSMateusz Guzik len = env_end - env_start; 1021a9090253SWilliam Roberts if (len > buflen - res) 1022a9090253SWilliam Roberts len = buflen - res; 1023a3b609efSMateusz Guzik res += access_process_vm(task, env_start, 1024f307ab6dSLorenzo Stoakes buffer+res, len, 1025f307ab6dSLorenzo Stoakes FOLL_FORCE); 1026a9090253SWilliam Roberts res = strnlen(buffer, res); 1027a9090253SWilliam Roberts } 1028a9090253SWilliam Roberts } 1029a9090253SWilliam Roberts out_mm: 1030a9090253SWilliam Roberts mmput(mm); 1031a9090253SWilliam Roberts out: 1032a9090253SWilliam Roberts return res; 1033a9090253SWilliam Roberts } 1034010c164aSSong Liu 10354d1a8a2dSCatalin Marinas int __weak memcmp_pages(struct page *page1, struct page *page2) 1036010c164aSSong Liu { 1037010c164aSSong Liu char *addr1, *addr2; 1038010c164aSSong Liu int ret; 1039010c164aSSong Liu 1040010c164aSSong Liu addr1 = kmap_atomic(page1); 1041010c164aSSong Liu addr2 = kmap_atomic(page2); 1042010c164aSSong Liu ret = memcmp(addr1, addr2, PAGE_SIZE); 1043010c164aSSong Liu kunmap_atomic(addr2); 1044010c164aSSong Liu kunmap_atomic(addr1); 1045010c164aSSong Liu return ret; 1046010c164aSSong Liu } 10478e7f37f2SPaul E. McKenney 10485bb1bb35SPaul E. McKenney #ifdef CONFIG_PRINTK 10498e7f37f2SPaul E. McKenney /** 10508e7f37f2SPaul E. McKenney * mem_dump_obj - Print available provenance information 10518e7f37f2SPaul E. McKenney * @object: object for which to find provenance information. 10528e7f37f2SPaul E. McKenney * 10538e7f37f2SPaul E. McKenney * This function uses pr_cont(), so that the caller is expected to have 10548e7f37f2SPaul E. McKenney * printed out whatever preamble is appropriate. The provenance information 10558e7f37f2SPaul E. McKenney * depends on the type of object and on how much debugging is enabled. 10568e7f37f2SPaul E. McKenney * For example, for a slab-cache object, the slab name is printed, and, 10578e7f37f2SPaul E. McKenney * if available, the return address and stack trace from the allocation 1058e548eaa1SManinder Singh * and last free path of that object. 10598e7f37f2SPaul E. McKenney */ 10608e7f37f2SPaul E. McKenney void mem_dump_obj(void *object) 10618e7f37f2SPaul E. McKenney { 10622521781cSJoe Perches const char *type; 10632521781cSJoe Perches 106498f18083SPaul E. McKenney if (kmem_valid_obj(object)) { 106598f18083SPaul E. McKenney kmem_dump_obj(object); 106698f18083SPaul E. McKenney return; 106798f18083SPaul E. McKenney } 10682521781cSJoe Perches 106998f18083SPaul E. McKenney if (vmalloc_dump_obj(object)) 107098f18083SPaul E. McKenney return; 10712521781cSJoe Perches 1072c83ad36aSZqiang if (is_vmalloc_addr(object)) 1073c83ad36aSZqiang type = "vmalloc memory"; 1074c83ad36aSZqiang else if (virt_addr_valid(object)) 10752521781cSJoe Perches type = "non-slab/vmalloc memory"; 10762521781cSJoe Perches else if (object == NULL) 10772521781cSJoe Perches type = "NULL pointer"; 1078b70fa3b1SPaul E. McKenney else if (object == ZERO_SIZE_PTR) 10792521781cSJoe Perches type = "zero-size pointer"; 1080b70fa3b1SPaul E. McKenney else 10812521781cSJoe Perches type = "non-paged memory"; 10822521781cSJoe Perches 10832521781cSJoe Perches pr_cont(" %s\n", type); 10848e7f37f2SPaul E. McKenney } 10850d3dd2c8SPaul E. McKenney EXPORT_SYMBOL_GPL(mem_dump_obj); 10865bb1bb35SPaul E. McKenney #endif 108782840451SDavid Hildenbrand 108882840451SDavid Hildenbrand /* 108982840451SDavid Hildenbrand * A driver might set a page logically offline -- PageOffline() -- and 109082840451SDavid Hildenbrand * turn the page inaccessible in the hypervisor; after that, access to page 109182840451SDavid Hildenbrand * content can be fatal. 109282840451SDavid Hildenbrand * 109382840451SDavid Hildenbrand * Some special PFN walkers -- i.e., /proc/kcore -- read content of random 109482840451SDavid Hildenbrand * pages after checking PageOffline(); however, these PFN walkers can race 109582840451SDavid Hildenbrand * with drivers that set PageOffline(). 109682840451SDavid Hildenbrand * 109782840451SDavid Hildenbrand * page_offline_freeze()/page_offline_thaw() allows for a subsystem to 109882840451SDavid Hildenbrand * synchronize with such drivers, achieving that a page cannot be set 109982840451SDavid Hildenbrand * PageOffline() while frozen. 110082840451SDavid Hildenbrand * 110182840451SDavid Hildenbrand * page_offline_begin()/page_offline_end() is used by drivers that care about 110282840451SDavid Hildenbrand * such races when setting a page PageOffline(). 110382840451SDavid Hildenbrand */ 110482840451SDavid Hildenbrand static DECLARE_RWSEM(page_offline_rwsem); 110582840451SDavid Hildenbrand 110682840451SDavid Hildenbrand void page_offline_freeze(void) 110782840451SDavid Hildenbrand { 110882840451SDavid Hildenbrand down_read(&page_offline_rwsem); 110982840451SDavid Hildenbrand } 111082840451SDavid Hildenbrand 111182840451SDavid Hildenbrand void page_offline_thaw(void) 111282840451SDavid Hildenbrand { 111382840451SDavid Hildenbrand up_read(&page_offline_rwsem); 111482840451SDavid Hildenbrand } 111582840451SDavid Hildenbrand 111682840451SDavid Hildenbrand void page_offline_begin(void) 111782840451SDavid Hildenbrand { 111882840451SDavid Hildenbrand down_write(&page_offline_rwsem); 111982840451SDavid Hildenbrand } 112082840451SDavid Hildenbrand EXPORT_SYMBOL(page_offline_begin); 112182840451SDavid Hildenbrand 112282840451SDavid Hildenbrand void page_offline_end(void) 112382840451SDavid Hildenbrand { 112482840451SDavid Hildenbrand up_write(&page_offline_rwsem); 112582840451SDavid Hildenbrand } 112682840451SDavid Hildenbrand EXPORT_SYMBOL(page_offline_end); 112708b0b005SMatthew Wilcox (Oracle) 112829d26f12SMatthew Wilcox (Oracle) #ifndef flush_dcache_folio 112908b0b005SMatthew Wilcox (Oracle) void flush_dcache_folio(struct folio *folio) 113008b0b005SMatthew Wilcox (Oracle) { 113108b0b005SMatthew Wilcox (Oracle) long i, nr = folio_nr_pages(folio); 113208b0b005SMatthew Wilcox (Oracle) 113308b0b005SMatthew Wilcox (Oracle) for (i = 0; i < nr; i++) 113408b0b005SMatthew Wilcox (Oracle) flush_dcache_page(folio_page(folio, i)); 113508b0b005SMatthew Wilcox (Oracle) } 113608b0b005SMatthew Wilcox (Oracle) EXPORT_SYMBOL(flush_dcache_folio); 113708b0b005SMatthew Wilcox (Oracle) #endif 1138