1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 216d69265SAndrew Morton #include <linux/mm.h> 330992c97SMatt Mackall #include <linux/slab.h> 430992c97SMatt Mackall #include <linux/string.h> 53b32123dSGideon Israel Dsouza #include <linux/compiler.h> 6b95f1b31SPaul Gortmaker #include <linux/export.h> 796840aa0SDavi Arnaut #include <linux/err.h> 83b8f14b4SAdrian Bunk #include <linux/sched.h> 96e84f315SIngo Molnar #include <linux/sched/mm.h> 1079eb597cSDaniel Jordan #include <linux/sched/signal.h> 1168db0cf1SIngo Molnar #include <linux/sched/task_stack.h> 12eb36c587SAl Viro #include <linux/security.h> 139800339bSShaohua Li #include <linux/swap.h> 1433806f06SShaohua Li #include <linux/swapops.h> 1500619bccSJerome Marchand #include <linux/mman.h> 1600619bccSJerome Marchand #include <linux/hugetlb.h> 1739f1f78dSAl Viro #include <linux/vmalloc.h> 18897ab3e0SMike Rapoport #include <linux/userfaultfd_k.h> 19649775beSAlexandre Ghiti #include <linux/elf.h> 2067f3977fSAlexandre Ghiti #include <linux/elf-randomize.h> 2167f3977fSAlexandre Ghiti #include <linux/personality.h> 22649775beSAlexandre Ghiti #include <linux/random.h> 2367f3977fSAlexandre Ghiti #include <linux/processor.h> 2467f3977fSAlexandre Ghiti #include <linux/sizes.h> 2567f3977fSAlexandre Ghiti #include <linux/compat.h> 2600619bccSJerome Marchand 277c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 2830992c97SMatt Mackall 296038def0SNamhyung Kim #include "internal.h" 30014bb1deSNeilBrown #include "swap.h" 316038def0SNamhyung Kim 32a4bb1e43SAndrzej Hajda /** 33a4bb1e43SAndrzej Hajda * kfree_const - conditionally free memory 34a4bb1e43SAndrzej Hajda * @x: pointer to the memory 35a4bb1e43SAndrzej Hajda * 36a4bb1e43SAndrzej Hajda * Function calls kfree only if @x is not in .rodata section. 37a4bb1e43SAndrzej Hajda */ 38a4bb1e43SAndrzej Hajda void kfree_const(const void *x) 39a4bb1e43SAndrzej Hajda { 40a4bb1e43SAndrzej Hajda if (!is_kernel_rodata((unsigned long)x)) 41a4bb1e43SAndrzej Hajda kfree(x); 42a4bb1e43SAndrzej Hajda } 43a4bb1e43SAndrzej Hajda EXPORT_SYMBOL(kfree_const); 44a4bb1e43SAndrzej Hajda 4530992c97SMatt Mackall /** 4630992c97SMatt Mackall * kstrdup - allocate space for and copy an existing string 4730992c97SMatt Mackall * @s: the string to duplicate 4830992c97SMatt Mackall * @gfp: the GFP mask used in the kmalloc() call when allocating memory 49a862f68aSMike Rapoport * 50a862f68aSMike Rapoport * Return: newly allocated copy of @s or %NULL in case of error 5130992c97SMatt Mackall */ 522a6772ebSAlexey Dobriyan noinline 5330992c97SMatt Mackall char *kstrdup(const char *s, gfp_t gfp) 5430992c97SMatt Mackall { 5530992c97SMatt Mackall size_t len; 5630992c97SMatt Mackall char *buf; 5730992c97SMatt Mackall 5830992c97SMatt Mackall if (!s) 5930992c97SMatt Mackall return NULL; 6030992c97SMatt Mackall 6130992c97SMatt Mackall len = strlen(s) + 1; 621d2c8eeaSChristoph Hellwig buf = kmalloc_track_caller(len, gfp); 6330992c97SMatt Mackall if (buf) 6430992c97SMatt Mackall memcpy(buf, s, len); 6530992c97SMatt Mackall return buf; 6630992c97SMatt Mackall } 6730992c97SMatt Mackall EXPORT_SYMBOL(kstrdup); 6896840aa0SDavi Arnaut 691a2f67b4SAlexey Dobriyan /** 70a4bb1e43SAndrzej Hajda * kstrdup_const - conditionally duplicate an existing const string 71a4bb1e43SAndrzej Hajda * @s: the string to duplicate 72a4bb1e43SAndrzej Hajda * @gfp: the GFP mask used in the kmalloc() call when allocating memory 73a4bb1e43SAndrzej Hajda * 74295a1730SBartosz Golaszewski * Note: Strings allocated by kstrdup_const should be freed by kfree_const and 75295a1730SBartosz Golaszewski * must not be passed to krealloc(). 76a862f68aSMike Rapoport * 77a862f68aSMike Rapoport * Return: source string if it is in .rodata section otherwise 78a862f68aSMike Rapoport * fallback to kstrdup. 79a4bb1e43SAndrzej Hajda */ 80a4bb1e43SAndrzej Hajda const char *kstrdup_const(const char *s, gfp_t gfp) 81a4bb1e43SAndrzej Hajda { 82a4bb1e43SAndrzej Hajda if (is_kernel_rodata((unsigned long)s)) 83a4bb1e43SAndrzej Hajda return s; 84a4bb1e43SAndrzej Hajda 85a4bb1e43SAndrzej Hajda return kstrdup(s, gfp); 86a4bb1e43SAndrzej Hajda } 87a4bb1e43SAndrzej Hajda EXPORT_SYMBOL(kstrdup_const); 88a4bb1e43SAndrzej Hajda 89a4bb1e43SAndrzej Hajda /** 901e66df3eSJeremy Fitzhardinge * kstrndup - allocate space for and copy an existing string 911e66df3eSJeremy Fitzhardinge * @s: the string to duplicate 921e66df3eSJeremy Fitzhardinge * @max: read at most @max chars from @s 931e66df3eSJeremy Fitzhardinge * @gfp: the GFP mask used in the kmalloc() call when allocating memory 94f3515741SDavid Howells * 95f3515741SDavid Howells * Note: Use kmemdup_nul() instead if the size is known exactly. 96a862f68aSMike Rapoport * 97a862f68aSMike Rapoport * Return: newly allocated copy of @s or %NULL in case of error 981e66df3eSJeremy Fitzhardinge */ 991e66df3eSJeremy Fitzhardinge char *kstrndup(const char *s, size_t max, gfp_t gfp) 1001e66df3eSJeremy Fitzhardinge { 1011e66df3eSJeremy Fitzhardinge size_t len; 1021e66df3eSJeremy Fitzhardinge char *buf; 1031e66df3eSJeremy Fitzhardinge 1041e66df3eSJeremy Fitzhardinge if (!s) 1051e66df3eSJeremy Fitzhardinge return NULL; 1061e66df3eSJeremy Fitzhardinge 1071e66df3eSJeremy Fitzhardinge len = strnlen(s, max); 1081e66df3eSJeremy Fitzhardinge buf = kmalloc_track_caller(len+1, gfp); 1091e66df3eSJeremy Fitzhardinge if (buf) { 1101e66df3eSJeremy Fitzhardinge memcpy(buf, s, len); 1111e66df3eSJeremy Fitzhardinge buf[len] = '\0'; 1121e66df3eSJeremy Fitzhardinge } 1131e66df3eSJeremy Fitzhardinge return buf; 1141e66df3eSJeremy Fitzhardinge } 1151e66df3eSJeremy Fitzhardinge EXPORT_SYMBOL(kstrndup); 1161e66df3eSJeremy Fitzhardinge 1171e66df3eSJeremy Fitzhardinge /** 1181a2f67b4SAlexey Dobriyan * kmemdup - duplicate region of memory 1191a2f67b4SAlexey Dobriyan * 1201a2f67b4SAlexey Dobriyan * @src: memory region to duplicate 1211a2f67b4SAlexey Dobriyan * @len: memory region length 1221a2f67b4SAlexey Dobriyan * @gfp: GFP mask to use 123a862f68aSMike Rapoport * 1240b7b8704SHao Sun * Return: newly allocated copy of @src or %NULL in case of error, 1250b7b8704SHao Sun * result is physically contiguous. Use kfree() to free. 1261a2f67b4SAlexey Dobriyan */ 1271a2f67b4SAlexey Dobriyan void *kmemdup(const void *src, size_t len, gfp_t gfp) 1281a2f67b4SAlexey Dobriyan { 1291a2f67b4SAlexey Dobriyan void *p; 1301a2f67b4SAlexey Dobriyan 1311d2c8eeaSChristoph Hellwig p = kmalloc_track_caller(len, gfp); 1321a2f67b4SAlexey Dobriyan if (p) 1331a2f67b4SAlexey Dobriyan memcpy(p, src, len); 1341a2f67b4SAlexey Dobriyan return p; 1351a2f67b4SAlexey Dobriyan } 1361a2f67b4SAlexey Dobriyan EXPORT_SYMBOL(kmemdup); 1371a2f67b4SAlexey Dobriyan 138ef2ad80cSChristoph Lameter /** 1397092e9b3SKartik * kmemdup_array - duplicate a given array. 1407092e9b3SKartik * 1417092e9b3SKartik * @src: array to duplicate. 1427092e9b3SKartik * @element_size: size of each element of array. 1437092e9b3SKartik * @count: number of elements to duplicate from array. 1447092e9b3SKartik * @gfp: GFP mask to use. 1457092e9b3SKartik * 1467092e9b3SKartik * Return: duplicated array of @src or %NULL in case of error, 1477092e9b3SKartik * result is physically contiguous. Use kfree() to free. 1487092e9b3SKartik */ 1497092e9b3SKartik void *kmemdup_array(const void *src, size_t element_size, size_t count, gfp_t gfp) 1507092e9b3SKartik { 1517092e9b3SKartik return kmemdup(src, size_mul(element_size, count), gfp); 1527092e9b3SKartik } 1537092e9b3SKartik EXPORT_SYMBOL(kmemdup_array); 1547092e9b3SKartik 1557092e9b3SKartik /** 1560b7b8704SHao Sun * kvmemdup - duplicate region of memory 1570b7b8704SHao Sun * 1580b7b8704SHao Sun * @src: memory region to duplicate 1590b7b8704SHao Sun * @len: memory region length 1600b7b8704SHao Sun * @gfp: GFP mask to use 1610b7b8704SHao Sun * 1620b7b8704SHao Sun * Return: newly allocated copy of @src or %NULL in case of error, 1630b7b8704SHao Sun * result may be not physically contiguous. Use kvfree() to free. 1640b7b8704SHao Sun */ 1650b7b8704SHao Sun void *kvmemdup(const void *src, size_t len, gfp_t gfp) 1660b7b8704SHao Sun { 1670b7b8704SHao Sun void *p; 1680b7b8704SHao Sun 1690b7b8704SHao Sun p = kvmalloc(len, gfp); 1700b7b8704SHao Sun if (p) 1710b7b8704SHao Sun memcpy(p, src, len); 1720b7b8704SHao Sun return p; 1730b7b8704SHao Sun } 1740b7b8704SHao Sun EXPORT_SYMBOL(kvmemdup); 1750b7b8704SHao Sun 1760b7b8704SHao Sun /** 177f3515741SDavid Howells * kmemdup_nul - Create a NUL-terminated string from unterminated data 178f3515741SDavid Howells * @s: The data to stringify 179f3515741SDavid Howells * @len: The size of the data 180f3515741SDavid Howells * @gfp: the GFP mask used in the kmalloc() call when allocating memory 181a862f68aSMike Rapoport * 182a862f68aSMike Rapoport * Return: newly allocated copy of @s with NUL-termination or %NULL in 183a862f68aSMike Rapoport * case of error 184f3515741SDavid Howells */ 185f3515741SDavid Howells char *kmemdup_nul(const char *s, size_t len, gfp_t gfp) 186f3515741SDavid Howells { 187f3515741SDavid Howells char *buf; 188f3515741SDavid Howells 189f3515741SDavid Howells if (!s) 190f3515741SDavid Howells return NULL; 191f3515741SDavid Howells 192f3515741SDavid Howells buf = kmalloc_track_caller(len + 1, gfp); 193f3515741SDavid Howells if (buf) { 194f3515741SDavid Howells memcpy(buf, s, len); 195f3515741SDavid Howells buf[len] = '\0'; 196f3515741SDavid Howells } 197f3515741SDavid Howells return buf; 198f3515741SDavid Howells } 199f3515741SDavid Howells EXPORT_SYMBOL(kmemdup_nul); 200f3515741SDavid Howells 201f3515741SDavid Howells /** 202610a77e0SLi Zefan * memdup_user - duplicate memory region from user space 203610a77e0SLi Zefan * 204610a77e0SLi Zefan * @src: source address in user space 205610a77e0SLi Zefan * @len: number of bytes to copy 206610a77e0SLi Zefan * 207a862f68aSMike Rapoport * Return: an ERR_PTR() on failure. Result is physically 20850fd2f29SAl Viro * contiguous, to be freed by kfree(). 209610a77e0SLi Zefan */ 210610a77e0SLi Zefan void *memdup_user(const void __user *src, size_t len) 211610a77e0SLi Zefan { 212610a77e0SLi Zefan void *p; 213610a77e0SLi Zefan 2146c8fcc09SDaniel Vetter p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN); 215610a77e0SLi Zefan if (!p) 216610a77e0SLi Zefan return ERR_PTR(-ENOMEM); 217610a77e0SLi Zefan 218610a77e0SLi Zefan if (copy_from_user(p, src, len)) { 219610a77e0SLi Zefan kfree(p); 220610a77e0SLi Zefan return ERR_PTR(-EFAULT); 221610a77e0SLi Zefan } 222610a77e0SLi Zefan 223610a77e0SLi Zefan return p; 224610a77e0SLi Zefan } 225610a77e0SLi Zefan EXPORT_SYMBOL(memdup_user); 226610a77e0SLi Zefan 22750fd2f29SAl Viro /** 22850fd2f29SAl Viro * vmemdup_user - duplicate memory region from user space 22950fd2f29SAl Viro * 23050fd2f29SAl Viro * @src: source address in user space 23150fd2f29SAl Viro * @len: number of bytes to copy 23250fd2f29SAl Viro * 233a862f68aSMike Rapoport * Return: an ERR_PTR() on failure. Result may be not 23450fd2f29SAl Viro * physically contiguous. Use kvfree() to free. 23550fd2f29SAl Viro */ 23650fd2f29SAl Viro void *vmemdup_user(const void __user *src, size_t len) 23750fd2f29SAl Viro { 23850fd2f29SAl Viro void *p; 23950fd2f29SAl Viro 24050fd2f29SAl Viro p = kvmalloc(len, GFP_USER); 24150fd2f29SAl Viro if (!p) 24250fd2f29SAl Viro return ERR_PTR(-ENOMEM); 24350fd2f29SAl Viro 24450fd2f29SAl Viro if (copy_from_user(p, src, len)) { 24550fd2f29SAl Viro kvfree(p); 24650fd2f29SAl Viro return ERR_PTR(-EFAULT); 24750fd2f29SAl Viro } 24850fd2f29SAl Viro 24950fd2f29SAl Viro return p; 25050fd2f29SAl Viro } 25150fd2f29SAl Viro EXPORT_SYMBOL(vmemdup_user); 25250fd2f29SAl Viro 253b86181f1SMike Rapoport /** 25496840aa0SDavi Arnaut * strndup_user - duplicate an existing string from user space 25596840aa0SDavi Arnaut * @s: The string to duplicate 25696840aa0SDavi Arnaut * @n: Maximum number of bytes to copy, including the trailing NUL. 257a862f68aSMike Rapoport * 258e9145521SAndrew Morton * Return: newly allocated copy of @s or an ERR_PTR() in case of error 25996840aa0SDavi Arnaut */ 26096840aa0SDavi Arnaut char *strndup_user(const char __user *s, long n) 26196840aa0SDavi Arnaut { 26296840aa0SDavi Arnaut char *p; 26396840aa0SDavi Arnaut long length; 26496840aa0SDavi Arnaut 26596840aa0SDavi Arnaut length = strnlen_user(s, n); 26696840aa0SDavi Arnaut 26796840aa0SDavi Arnaut if (!length) 26896840aa0SDavi Arnaut return ERR_PTR(-EFAULT); 26996840aa0SDavi Arnaut 27096840aa0SDavi Arnaut if (length > n) 27196840aa0SDavi Arnaut return ERR_PTR(-EINVAL); 27296840aa0SDavi Arnaut 27390d74045SJulia Lawall p = memdup_user(s, length); 27496840aa0SDavi Arnaut 27590d74045SJulia Lawall if (IS_ERR(p)) 27690d74045SJulia Lawall return p; 27796840aa0SDavi Arnaut 27896840aa0SDavi Arnaut p[length - 1] = '\0'; 27996840aa0SDavi Arnaut 28096840aa0SDavi Arnaut return p; 28196840aa0SDavi Arnaut } 28296840aa0SDavi Arnaut EXPORT_SYMBOL(strndup_user); 28316d69265SAndrew Morton 284e9d408e1SAl Viro /** 285e9d408e1SAl Viro * memdup_user_nul - duplicate memory region from user space and NUL-terminate 286e9d408e1SAl Viro * 287e9d408e1SAl Viro * @src: source address in user space 288e9d408e1SAl Viro * @len: number of bytes to copy 289e9d408e1SAl Viro * 290a862f68aSMike Rapoport * Return: an ERR_PTR() on failure. 291e9d408e1SAl Viro */ 292e9d408e1SAl Viro void *memdup_user_nul(const void __user *src, size_t len) 293e9d408e1SAl Viro { 294e9d408e1SAl Viro char *p; 295e9d408e1SAl Viro 296e9d408e1SAl Viro /* 297e9d408e1SAl Viro * Always use GFP_KERNEL, since copy_from_user() can sleep and 298e9d408e1SAl Viro * cause pagefault, which makes it pointless to use GFP_NOFS 299e9d408e1SAl Viro * or GFP_ATOMIC. 300e9d408e1SAl Viro */ 301e9d408e1SAl Viro p = kmalloc_track_caller(len + 1, GFP_KERNEL); 302e9d408e1SAl Viro if (!p) 303e9d408e1SAl Viro return ERR_PTR(-ENOMEM); 304e9d408e1SAl Viro 305e9d408e1SAl Viro if (copy_from_user(p, src, len)) { 306e9d408e1SAl Viro kfree(p); 307e9d408e1SAl Viro return ERR_PTR(-EFAULT); 308e9d408e1SAl Viro } 309e9d408e1SAl Viro p[len] = '\0'; 310e9d408e1SAl Viro 311e9d408e1SAl Viro return p; 312e9d408e1SAl Viro } 313e9d408e1SAl Viro EXPORT_SYMBOL(memdup_user_nul); 314e9d408e1SAl Viro 315b7643757SSiddhesh Poyarekar /* Check if the vma is being used as a stack by this task */ 316d17af505SAndy Lutomirski int vma_is_stack_for_current(struct vm_area_struct *vma) 317b7643757SSiddhesh Poyarekar { 318d17af505SAndy Lutomirski struct task_struct * __maybe_unused t = current; 319d17af505SAndy Lutomirski 320b7643757SSiddhesh Poyarekar return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); 321b7643757SSiddhesh Poyarekar } 322b7643757SSiddhesh Poyarekar 323295992fbSChristian König /* 324295992fbSChristian König * Change backing file, only valid to use during initial VMA setup. 325295992fbSChristian König */ 326295992fbSChristian König void vma_set_file(struct vm_area_struct *vma, struct file *file) 327295992fbSChristian König { 328295992fbSChristian König /* Changing an anonymous vma with this is illegal */ 329295992fbSChristian König get_file(file); 330295992fbSChristian König swap(vma->vm_file, file); 331295992fbSChristian König fput(file); 332295992fbSChristian König } 333295992fbSChristian König EXPORT_SYMBOL(vma_set_file); 334295992fbSChristian König 335649775beSAlexandre Ghiti #ifndef STACK_RND_MASK 336649775beSAlexandre Ghiti #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */ 337649775beSAlexandre Ghiti #endif 338649775beSAlexandre Ghiti 339649775beSAlexandre Ghiti unsigned long randomize_stack_top(unsigned long stack_top) 340649775beSAlexandre Ghiti { 341649775beSAlexandre Ghiti unsigned long random_variable = 0; 342649775beSAlexandre Ghiti 343649775beSAlexandre Ghiti if (current->flags & PF_RANDOMIZE) { 344649775beSAlexandre Ghiti random_variable = get_random_long(); 345649775beSAlexandre Ghiti random_variable &= STACK_RND_MASK; 346649775beSAlexandre Ghiti random_variable <<= PAGE_SHIFT; 347649775beSAlexandre Ghiti } 348649775beSAlexandre Ghiti #ifdef CONFIG_STACK_GROWSUP 349649775beSAlexandre Ghiti return PAGE_ALIGN(stack_top) + random_variable; 350649775beSAlexandre Ghiti #else 351649775beSAlexandre Ghiti return PAGE_ALIGN(stack_top) - random_variable; 352649775beSAlexandre Ghiti #endif 353649775beSAlexandre Ghiti } 354649775beSAlexandre Ghiti 3555ad7dd88SJason A. Donenfeld /** 3565ad7dd88SJason A. Donenfeld * randomize_page - Generate a random, page aligned address 3575ad7dd88SJason A. Donenfeld * @start: The smallest acceptable address the caller will take. 3585ad7dd88SJason A. Donenfeld * @range: The size of the area, starting at @start, within which the 3595ad7dd88SJason A. Donenfeld * random address must fall. 3605ad7dd88SJason A. Donenfeld * 3615ad7dd88SJason A. Donenfeld * If @start + @range would overflow, @range is capped. 3625ad7dd88SJason A. Donenfeld * 3635ad7dd88SJason A. Donenfeld * NOTE: Historical use of randomize_range, which this replaces, presumed that 3645ad7dd88SJason A. Donenfeld * @start was already page aligned. We now align it regardless. 3655ad7dd88SJason A. Donenfeld * 3665ad7dd88SJason A. Donenfeld * Return: A page aligned address within [start, start + range). On error, 3675ad7dd88SJason A. Donenfeld * @start is returned. 3685ad7dd88SJason A. Donenfeld */ 3695ad7dd88SJason A. Donenfeld unsigned long randomize_page(unsigned long start, unsigned long range) 3705ad7dd88SJason A. Donenfeld { 3715ad7dd88SJason A. Donenfeld if (!PAGE_ALIGNED(start)) { 3725ad7dd88SJason A. Donenfeld range -= PAGE_ALIGN(start) - start; 3735ad7dd88SJason A. Donenfeld start = PAGE_ALIGN(start); 3745ad7dd88SJason A. Donenfeld } 3755ad7dd88SJason A. Donenfeld 3765ad7dd88SJason A. Donenfeld if (start > ULONG_MAX - range) 3775ad7dd88SJason A. Donenfeld range = ULONG_MAX - start; 3785ad7dd88SJason A. Donenfeld 3795ad7dd88SJason A. Donenfeld range >>= PAGE_SHIFT; 3805ad7dd88SJason A. Donenfeld 3815ad7dd88SJason A. Donenfeld if (range == 0) 3825ad7dd88SJason A. Donenfeld return start; 3835ad7dd88SJason A. Donenfeld 3845ad7dd88SJason A. Donenfeld return start + (get_random_long() % range << PAGE_SHIFT); 3855ad7dd88SJason A. Donenfeld } 3865ad7dd88SJason A. Donenfeld 38767f3977fSAlexandre Ghiti #ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT 388723820f3SChristophe Leroy unsigned long __weak arch_randomize_brk(struct mm_struct *mm) 389e7142bf5SAlexandre Ghiti { 390e7142bf5SAlexandre Ghiti /* Is the current task 32bit ? */ 391e7142bf5SAlexandre Ghiti if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task()) 392e7142bf5SAlexandre Ghiti return randomize_page(mm->brk, SZ_32M); 393e7142bf5SAlexandre Ghiti 394e7142bf5SAlexandre Ghiti return randomize_page(mm->brk, SZ_1G); 395e7142bf5SAlexandre Ghiti } 396e7142bf5SAlexandre Ghiti 39767f3977fSAlexandre Ghiti unsigned long arch_mmap_rnd(void) 39867f3977fSAlexandre Ghiti { 39967f3977fSAlexandre Ghiti unsigned long rnd; 40067f3977fSAlexandre Ghiti 40167f3977fSAlexandre Ghiti #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS 40267f3977fSAlexandre Ghiti if (is_compat_task()) 40367f3977fSAlexandre Ghiti rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1); 40467f3977fSAlexandre Ghiti else 40567f3977fSAlexandre Ghiti #endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */ 40667f3977fSAlexandre Ghiti rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1); 40767f3977fSAlexandre Ghiti 40867f3977fSAlexandre Ghiti return rnd << PAGE_SHIFT; 40967f3977fSAlexandre Ghiti } 41067f3977fSAlexandre Ghiti 41167f3977fSAlexandre Ghiti static int mmap_is_legacy(struct rlimit *rlim_stack) 41267f3977fSAlexandre Ghiti { 41367f3977fSAlexandre Ghiti if (current->personality & ADDR_COMPAT_LAYOUT) 41467f3977fSAlexandre Ghiti return 1; 41567f3977fSAlexandre Ghiti 4163033cd43SHelge Deller /* On parisc the stack always grows up - so a unlimited stack should 4173033cd43SHelge Deller * not be an indicator to use the legacy memory layout. */ 4183033cd43SHelge Deller if (rlim_stack->rlim_cur == RLIM_INFINITY && 4193033cd43SHelge Deller !IS_ENABLED(CONFIG_STACK_GROWSUP)) 42067f3977fSAlexandre Ghiti return 1; 42167f3977fSAlexandre Ghiti 42267f3977fSAlexandre Ghiti return sysctl_legacy_va_layout; 42367f3977fSAlexandre Ghiti } 42467f3977fSAlexandre Ghiti 42567f3977fSAlexandre Ghiti /* 42667f3977fSAlexandre Ghiti * Leave enough space between the mmap area and the stack to honour ulimit in 42767f3977fSAlexandre Ghiti * the face of randomisation. 42867f3977fSAlexandre Ghiti */ 42967f3977fSAlexandre Ghiti #define MIN_GAP (SZ_128M) 43067f3977fSAlexandre Ghiti #define MAX_GAP (STACK_TOP / 6 * 5) 43167f3977fSAlexandre Ghiti 43267f3977fSAlexandre Ghiti static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack) 43367f3977fSAlexandre Ghiti { 4345f74f820SHelge Deller #ifdef CONFIG_STACK_GROWSUP 4355f74f820SHelge Deller /* 4365f74f820SHelge Deller * For an upwards growing stack the calculation is much simpler. 4375f74f820SHelge Deller * Memory for the maximum stack size is reserved at the top of the 4385f74f820SHelge Deller * task. mmap_base starts directly below the stack and grows 4395f74f820SHelge Deller * downwards. 4405f74f820SHelge Deller */ 4415f74f820SHelge Deller return PAGE_ALIGN_DOWN(mmap_upper_limit(rlim_stack) - rnd); 4425f74f820SHelge Deller #else 44367f3977fSAlexandre Ghiti unsigned long gap = rlim_stack->rlim_cur; 44467f3977fSAlexandre Ghiti unsigned long pad = stack_guard_gap; 44567f3977fSAlexandre Ghiti 44667f3977fSAlexandre Ghiti /* Account for stack randomization if necessary */ 44767f3977fSAlexandre Ghiti if (current->flags & PF_RANDOMIZE) 44867f3977fSAlexandre Ghiti pad += (STACK_RND_MASK << PAGE_SHIFT); 44967f3977fSAlexandre Ghiti 45067f3977fSAlexandre Ghiti /* Values close to RLIM_INFINITY can overflow. */ 45167f3977fSAlexandre Ghiti if (gap + pad > gap) 45267f3977fSAlexandre Ghiti gap += pad; 45367f3977fSAlexandre Ghiti 45467f3977fSAlexandre Ghiti if (gap < MIN_GAP) 45567f3977fSAlexandre Ghiti gap = MIN_GAP; 45667f3977fSAlexandre Ghiti else if (gap > MAX_GAP) 45767f3977fSAlexandre Ghiti gap = MAX_GAP; 45867f3977fSAlexandre Ghiti 45967f3977fSAlexandre Ghiti return PAGE_ALIGN(STACK_TOP - gap - rnd); 4605f74f820SHelge Deller #endif 46167f3977fSAlexandre Ghiti } 46267f3977fSAlexandre Ghiti 46367f3977fSAlexandre Ghiti void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) 46467f3977fSAlexandre Ghiti { 46567f3977fSAlexandre Ghiti unsigned long random_factor = 0UL; 46667f3977fSAlexandre Ghiti 46767f3977fSAlexandre Ghiti if (current->flags & PF_RANDOMIZE) 46867f3977fSAlexandre Ghiti random_factor = arch_mmap_rnd(); 46967f3977fSAlexandre Ghiti 47067f3977fSAlexandre Ghiti if (mmap_is_legacy(rlim_stack)) { 47167f3977fSAlexandre Ghiti mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; 47267f3977fSAlexandre Ghiti mm->get_unmapped_area = arch_get_unmapped_area; 47367f3977fSAlexandre Ghiti } else { 47467f3977fSAlexandre Ghiti mm->mmap_base = mmap_base(random_factor, rlim_stack); 47567f3977fSAlexandre Ghiti mm->get_unmapped_area = arch_get_unmapped_area_topdown; 47667f3977fSAlexandre Ghiti } 47767f3977fSAlexandre Ghiti } 47867f3977fSAlexandre Ghiti #elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT) 4798f2af155SKees Cook void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) 48016d69265SAndrew Morton { 48116d69265SAndrew Morton mm->mmap_base = TASK_UNMAPPED_BASE; 48216d69265SAndrew Morton mm->get_unmapped_area = arch_get_unmapped_area; 48316d69265SAndrew Morton } 48416d69265SAndrew Morton #endif 485912985dcSRusty Russell 48679eb597cSDaniel Jordan /** 48779eb597cSDaniel Jordan * __account_locked_vm - account locked pages to an mm's locked_vm 48879eb597cSDaniel Jordan * @mm: mm to account against 48979eb597cSDaniel Jordan * @pages: number of pages to account 49079eb597cSDaniel Jordan * @inc: %true if @pages should be considered positive, %false if not 49179eb597cSDaniel Jordan * @task: task used to check RLIMIT_MEMLOCK 49279eb597cSDaniel Jordan * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped 49379eb597cSDaniel Jordan * 49479eb597cSDaniel Jordan * Assumes @task and @mm are valid (i.e. at least one reference on each), and 495c1e8d7c6SMichel Lespinasse * that mmap_lock is held as writer. 49679eb597cSDaniel Jordan * 49779eb597cSDaniel Jordan * Return: 49879eb597cSDaniel Jordan * * 0 on success 49979eb597cSDaniel Jordan * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded. 50079eb597cSDaniel Jordan */ 50179eb597cSDaniel Jordan int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc, 50279eb597cSDaniel Jordan struct task_struct *task, bool bypass_rlim) 50379eb597cSDaniel Jordan { 50479eb597cSDaniel Jordan unsigned long locked_vm, limit; 50579eb597cSDaniel Jordan int ret = 0; 50679eb597cSDaniel Jordan 50742fc5414SMichel Lespinasse mmap_assert_write_locked(mm); 50879eb597cSDaniel Jordan 50979eb597cSDaniel Jordan locked_vm = mm->locked_vm; 51079eb597cSDaniel Jordan if (inc) { 51179eb597cSDaniel Jordan if (!bypass_rlim) { 51279eb597cSDaniel Jordan limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT; 51379eb597cSDaniel Jordan if (locked_vm + pages > limit) 51479eb597cSDaniel Jordan ret = -ENOMEM; 51579eb597cSDaniel Jordan } 51679eb597cSDaniel Jordan if (!ret) 51779eb597cSDaniel Jordan mm->locked_vm = locked_vm + pages; 51879eb597cSDaniel Jordan } else { 51979eb597cSDaniel Jordan WARN_ON_ONCE(pages > locked_vm); 52079eb597cSDaniel Jordan mm->locked_vm = locked_vm - pages; 52179eb597cSDaniel Jordan } 52279eb597cSDaniel Jordan 52379eb597cSDaniel Jordan pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid, 52479eb597cSDaniel Jordan (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT, 52579eb597cSDaniel Jordan locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK), 52679eb597cSDaniel Jordan ret ? " - exceeded" : ""); 52779eb597cSDaniel Jordan 52879eb597cSDaniel Jordan return ret; 52979eb597cSDaniel Jordan } 53079eb597cSDaniel Jordan EXPORT_SYMBOL_GPL(__account_locked_vm); 53179eb597cSDaniel Jordan 53279eb597cSDaniel Jordan /** 53379eb597cSDaniel Jordan * account_locked_vm - account locked pages to an mm's locked_vm 53479eb597cSDaniel Jordan * @mm: mm to account against, may be NULL 53579eb597cSDaniel Jordan * @pages: number of pages to account 53679eb597cSDaniel Jordan * @inc: %true if @pages should be considered positive, %false if not 53779eb597cSDaniel Jordan * 53879eb597cSDaniel Jordan * Assumes a non-NULL @mm is valid (i.e. at least one reference on it). 53979eb597cSDaniel Jordan * 54079eb597cSDaniel Jordan * Return: 54179eb597cSDaniel Jordan * * 0 on success, or if mm is NULL 54279eb597cSDaniel Jordan * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded. 54379eb597cSDaniel Jordan */ 54479eb597cSDaniel Jordan int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc) 54579eb597cSDaniel Jordan { 54679eb597cSDaniel Jordan int ret; 54779eb597cSDaniel Jordan 54879eb597cSDaniel Jordan if (pages == 0 || !mm) 54979eb597cSDaniel Jordan return 0; 55079eb597cSDaniel Jordan 551d8ed45c5SMichel Lespinasse mmap_write_lock(mm); 55279eb597cSDaniel Jordan ret = __account_locked_vm(mm, pages, inc, current, 55379eb597cSDaniel Jordan capable(CAP_IPC_LOCK)); 554d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 55579eb597cSDaniel Jordan 55679eb597cSDaniel Jordan return ret; 55779eb597cSDaniel Jordan } 55879eb597cSDaniel Jordan EXPORT_SYMBOL_GPL(account_locked_vm); 55979eb597cSDaniel Jordan 560eb36c587SAl Viro unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr, 561eb36c587SAl Viro unsigned long len, unsigned long prot, 5629fbeb5abSMichal Hocko unsigned long flag, unsigned long pgoff) 563eb36c587SAl Viro { 564eb36c587SAl Viro unsigned long ret; 565eb36c587SAl Viro struct mm_struct *mm = current->mm; 56641badc15SMichel Lespinasse unsigned long populate; 567897ab3e0SMike Rapoport LIST_HEAD(uf); 568eb36c587SAl Viro 569eb36c587SAl Viro ret = security_mmap_file(file, prot, flag); 570eb36c587SAl Viro if (!ret) { 571d8ed45c5SMichel Lespinasse if (mmap_write_lock_killable(mm)) 572dc0ef0dfSMichal Hocko return -EINTR; 573592b5fadSYu-cheng Yu ret = do_mmap(file, addr, len, prot, flag, 0, pgoff, &populate, 57445e55300SPeter Collingbourne &uf); 575d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 576897ab3e0SMike Rapoport userfaultfd_unmap_complete(mm, &uf); 57741badc15SMichel Lespinasse if (populate) 57841badc15SMichel Lespinasse mm_populate(ret, populate); 579eb36c587SAl Viro } 580eb36c587SAl Viro return ret; 581eb36c587SAl Viro } 582eb36c587SAl Viro 583eb36c587SAl Viro unsigned long vm_mmap(struct file *file, unsigned long addr, 584eb36c587SAl Viro unsigned long len, unsigned long prot, 585eb36c587SAl Viro unsigned long flag, unsigned long offset) 586eb36c587SAl Viro { 587eb36c587SAl Viro if (unlikely(offset + PAGE_ALIGN(len) < offset)) 588eb36c587SAl Viro return -EINVAL; 589ea53cde0SAlexander Kuleshov if (unlikely(offset_in_page(offset))) 590eb36c587SAl Viro return -EINVAL; 591eb36c587SAl Viro 5929fbeb5abSMichal Hocko return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); 593eb36c587SAl Viro } 594eb36c587SAl Viro EXPORT_SYMBOL(vm_mmap); 595eb36c587SAl Viro 596a7c3e901SMichal Hocko /** 597a7c3e901SMichal Hocko * kvmalloc_node - attempt to allocate physically contiguous memory, but upon 598a7c3e901SMichal Hocko * failure, fall back to non-contiguous (vmalloc) allocation. 599a7c3e901SMichal Hocko * @size: size of the request. 600a7c3e901SMichal Hocko * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL. 601a7c3e901SMichal Hocko * @node: numa node to allocate from 602a7c3e901SMichal Hocko * 603a7c3e901SMichal Hocko * Uses kmalloc to get the memory but if the allocation fails then falls back 604a7c3e901SMichal Hocko * to the vmalloc allocator. Use kvfree for freeing the memory. 605a7c3e901SMichal Hocko * 606a421ef30SMichal Hocko * GFP_NOWAIT and GFP_ATOMIC are not supported, neither is the __GFP_NORETRY modifier. 607cc965a29SMichal Hocko * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is 608cc965a29SMichal Hocko * preferable to the vmalloc fallback, due to visible performance drawbacks. 609a7c3e901SMichal Hocko * 610a862f68aSMike Rapoport * Return: pointer to the allocated memory of %NULL in case of failure 611a7c3e901SMichal Hocko */ 612a7c3e901SMichal Hocko void *kvmalloc_node(size_t size, gfp_t flags, int node) 613a7c3e901SMichal Hocko { 614a7c3e901SMichal Hocko gfp_t kmalloc_flags = flags; 615a7c3e901SMichal Hocko void *ret; 616a7c3e901SMichal Hocko 617a7c3e901SMichal Hocko /* 6184f4f2ba9SMichal Hocko * We want to attempt a large physically contiguous block first because 6194f4f2ba9SMichal Hocko * it is less likely to fragment multiple larger blocks and therefore 6204f4f2ba9SMichal Hocko * contribute to a long term fragmentation less than vmalloc fallback. 6214f4f2ba9SMichal Hocko * However make sure that larger requests are not too disruptive - no 6224f4f2ba9SMichal Hocko * OOM killer and no allocation failure warnings as we have a fallback. 623a7c3e901SMichal Hocko */ 6246c5ab651SMichal Hocko if (size > PAGE_SIZE) { 6256c5ab651SMichal Hocko kmalloc_flags |= __GFP_NOWARN; 6266c5ab651SMichal Hocko 627cc965a29SMichal Hocko if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL)) 6286c5ab651SMichal Hocko kmalloc_flags |= __GFP_NORETRY; 629a421ef30SMichal Hocko 630a421ef30SMichal Hocko /* nofail semantic is implemented by the vmalloc fallback */ 631a421ef30SMichal Hocko kmalloc_flags &= ~__GFP_NOFAIL; 6326c5ab651SMichal Hocko } 633a7c3e901SMichal Hocko 634a7c3e901SMichal Hocko ret = kmalloc_node(size, kmalloc_flags, node); 635a7c3e901SMichal Hocko 636a7c3e901SMichal Hocko /* 637a7c3e901SMichal Hocko * It doesn't really make sense to fallback to vmalloc for sub page 638a7c3e901SMichal Hocko * requests 639a7c3e901SMichal Hocko */ 640a7c3e901SMichal Hocko if (ret || size <= PAGE_SIZE) 641a7c3e901SMichal Hocko return ret; 642a7c3e901SMichal Hocko 64330c19366SFlorian Westphal /* non-sleeping allocations are not supported by vmalloc */ 64430c19366SFlorian Westphal if (!gfpflags_allow_blocking(flags)) 64530c19366SFlorian Westphal return NULL; 64630c19366SFlorian Westphal 6477661809dSLinus Torvalds /* Don't even allow crazy sizes */ 6480708a0afSDaniel Borkmann if (unlikely(size > INT_MAX)) { 6490708a0afSDaniel Borkmann WARN_ON_ONCE(!(flags & __GFP_NOWARN)); 6507661809dSLinus Torvalds return NULL; 6510708a0afSDaniel Borkmann } 6527661809dSLinus Torvalds 6539becb688SLinus Torvalds /* 6549becb688SLinus Torvalds * kvmalloc() can always use VM_ALLOW_HUGE_VMAP, 6559becb688SLinus Torvalds * since the callers already cannot assume anything 6569becb688SLinus Torvalds * about the resulting pointer, and cannot play 6579becb688SLinus Torvalds * protection games. 6589becb688SLinus Torvalds */ 6599becb688SLinus Torvalds return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, 6609becb688SLinus Torvalds flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP, 6619becb688SLinus Torvalds node, __builtin_return_address(0)); 662a7c3e901SMichal Hocko } 663a7c3e901SMichal Hocko EXPORT_SYMBOL(kvmalloc_node); 664a7c3e901SMichal Hocko 665ff4dc772SMike Rapoport /** 66604b8e946SAndrew Morton * kvfree() - Free memory. 66704b8e946SAndrew Morton * @addr: Pointer to allocated memory. 668ff4dc772SMike Rapoport * 66904b8e946SAndrew Morton * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc(). 67004b8e946SAndrew Morton * It is slightly more efficient to use kfree() or vfree() if you are certain 67104b8e946SAndrew Morton * that you know which one to use. 67204b8e946SAndrew Morton * 67352414d33SAndrey Ryabinin * Context: Either preemptible task context or not-NMI interrupt. 674ff4dc772SMike Rapoport */ 67539f1f78dSAl Viro void kvfree(const void *addr) 67639f1f78dSAl Viro { 67739f1f78dSAl Viro if (is_vmalloc_addr(addr)) 67839f1f78dSAl Viro vfree(addr); 67939f1f78dSAl Viro else 68039f1f78dSAl Viro kfree(addr); 68139f1f78dSAl Viro } 68239f1f78dSAl Viro EXPORT_SYMBOL(kvfree); 68339f1f78dSAl Viro 684d4eaa283SWaiman Long /** 685d4eaa283SWaiman Long * kvfree_sensitive - Free a data object containing sensitive information. 686d4eaa283SWaiman Long * @addr: address of the data object to be freed. 687d4eaa283SWaiman Long * @len: length of the data object. 688d4eaa283SWaiman Long * 689d4eaa283SWaiman Long * Use the special memzero_explicit() function to clear the content of a 690d4eaa283SWaiman Long * kvmalloc'ed object containing sensitive data to make sure that the 691d4eaa283SWaiman Long * compiler won't optimize out the data clearing. 692d4eaa283SWaiman Long */ 693d4eaa283SWaiman Long void kvfree_sensitive(const void *addr, size_t len) 694d4eaa283SWaiman Long { 695d4eaa283SWaiman Long if (likely(!ZERO_OR_NULL_PTR(addr))) { 696d4eaa283SWaiman Long memzero_explicit((void *)addr, len); 697d4eaa283SWaiman Long kvfree(addr); 698d4eaa283SWaiman Long } 699d4eaa283SWaiman Long } 700d4eaa283SWaiman Long EXPORT_SYMBOL(kvfree_sensitive); 701d4eaa283SWaiman Long 702de2860f4SDave Chinner void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags) 703de2860f4SDave Chinner { 704de2860f4SDave Chinner void *newp; 705de2860f4SDave Chinner 706de2860f4SDave Chinner if (oldsize >= newsize) 707de2860f4SDave Chinner return (void *)p; 708de2860f4SDave Chinner newp = kvmalloc(newsize, flags); 709de2860f4SDave Chinner if (!newp) 710de2860f4SDave Chinner return NULL; 711de2860f4SDave Chinner memcpy(newp, p, oldsize); 712de2860f4SDave Chinner kvfree(p); 713de2860f4SDave Chinner return newp; 714de2860f4SDave Chinner } 715de2860f4SDave Chinner EXPORT_SYMBOL(kvrealloc); 716de2860f4SDave Chinner 717a8749a35SPaolo Bonzini /** 718a8749a35SPaolo Bonzini * __vmalloc_array - allocate memory for a virtually contiguous array. 719a8749a35SPaolo Bonzini * @n: number of elements. 720a8749a35SPaolo Bonzini * @size: element size. 721a8749a35SPaolo Bonzini * @flags: the type of memory to allocate (see kmalloc). 722a8749a35SPaolo Bonzini */ 723a8749a35SPaolo Bonzini void *__vmalloc_array(size_t n, size_t size, gfp_t flags) 724a8749a35SPaolo Bonzini { 725a8749a35SPaolo Bonzini size_t bytes; 726a8749a35SPaolo Bonzini 727a8749a35SPaolo Bonzini if (unlikely(check_mul_overflow(n, size, &bytes))) 728a8749a35SPaolo Bonzini return NULL; 729a8749a35SPaolo Bonzini return __vmalloc(bytes, flags); 730a8749a35SPaolo Bonzini } 731a8749a35SPaolo Bonzini EXPORT_SYMBOL(__vmalloc_array); 732a8749a35SPaolo Bonzini 733a8749a35SPaolo Bonzini /** 734a8749a35SPaolo Bonzini * vmalloc_array - allocate memory for a virtually contiguous array. 735a8749a35SPaolo Bonzini * @n: number of elements. 736a8749a35SPaolo Bonzini * @size: element size. 737a8749a35SPaolo Bonzini */ 738a8749a35SPaolo Bonzini void *vmalloc_array(size_t n, size_t size) 739a8749a35SPaolo Bonzini { 740a8749a35SPaolo Bonzini return __vmalloc_array(n, size, GFP_KERNEL); 741a8749a35SPaolo Bonzini } 742a8749a35SPaolo Bonzini EXPORT_SYMBOL(vmalloc_array); 743a8749a35SPaolo Bonzini 744a8749a35SPaolo Bonzini /** 745a8749a35SPaolo Bonzini * __vcalloc - allocate and zero memory for a virtually contiguous array. 746a8749a35SPaolo Bonzini * @n: number of elements. 747a8749a35SPaolo Bonzini * @size: element size. 748a8749a35SPaolo Bonzini * @flags: the type of memory to allocate (see kmalloc). 749a8749a35SPaolo Bonzini */ 750a8749a35SPaolo Bonzini void *__vcalloc(size_t n, size_t size, gfp_t flags) 751a8749a35SPaolo Bonzini { 752a8749a35SPaolo Bonzini return __vmalloc_array(n, size, flags | __GFP_ZERO); 753a8749a35SPaolo Bonzini } 754a8749a35SPaolo Bonzini EXPORT_SYMBOL(__vcalloc); 755a8749a35SPaolo Bonzini 756a8749a35SPaolo Bonzini /** 757a8749a35SPaolo Bonzini * vcalloc - allocate and zero memory for a virtually contiguous array. 758a8749a35SPaolo Bonzini * @n: number of elements. 759a8749a35SPaolo Bonzini * @size: element size. 760a8749a35SPaolo Bonzini */ 761a8749a35SPaolo Bonzini void *vcalloc(size_t n, size_t size) 762a8749a35SPaolo Bonzini { 763a8749a35SPaolo Bonzini return __vmalloc_array(n, size, GFP_KERNEL | __GFP_ZERO); 764a8749a35SPaolo Bonzini } 765a8749a35SPaolo Bonzini EXPORT_SYMBOL(vcalloc); 766a8749a35SPaolo Bonzini 767e05b3453SMatthew Wilcox (Oracle) struct anon_vma *folio_anon_vma(struct folio *folio) 768e39155eaSKirill A. Shutemov { 76964601000SMatthew Wilcox (Oracle) unsigned long mapping = (unsigned long)folio->mapping; 770e39155eaSKirill A. Shutemov 771e39155eaSKirill A. Shutemov if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 772e39155eaSKirill A. Shutemov return NULL; 77364601000SMatthew Wilcox (Oracle) return (void *)(mapping - PAGE_MAPPING_ANON); 774e39155eaSKirill A. Shutemov } 775e39155eaSKirill A. Shutemov 7762f52578fSMatthew Wilcox (Oracle) /** 7772f52578fSMatthew Wilcox (Oracle) * folio_mapping - Find the mapping where this folio is stored. 7782f52578fSMatthew Wilcox (Oracle) * @folio: The folio. 7792f52578fSMatthew Wilcox (Oracle) * 7802f52578fSMatthew Wilcox (Oracle) * For folios which are in the page cache, return the mapping that this 7812f52578fSMatthew Wilcox (Oracle) * page belongs to. Folios in the swap cache return the swap mapping 7822f52578fSMatthew Wilcox (Oracle) * this page is stored in (which is different from the mapping for the 7832f52578fSMatthew Wilcox (Oracle) * swap file or swap device where the data is stored). 7842f52578fSMatthew Wilcox (Oracle) * 7852f52578fSMatthew Wilcox (Oracle) * You can call this for folios which aren't in the swap cache or page 7862f52578fSMatthew Wilcox (Oracle) * cache and it will return NULL. 7872f52578fSMatthew Wilcox (Oracle) */ 7882f52578fSMatthew Wilcox (Oracle) struct address_space *folio_mapping(struct folio *folio) 7899800339bSShaohua Li { 7901c290f64SKirill A. Shutemov struct address_space *mapping; 7911c290f64SKirill A. Shutemov 79203e5ac2fSMikulas Patocka /* This happens if someone calls flush_dcache_page on slab page */ 7932f52578fSMatthew Wilcox (Oracle) if (unlikely(folio_test_slab(folio))) 79403e5ac2fSMikulas Patocka return NULL; 79503e5ac2fSMikulas Patocka 7962f52578fSMatthew Wilcox (Oracle) if (unlikely(folio_test_swapcache(folio))) 7973d2c9087SDavid Hildenbrand return swap_address_space(folio->swap); 79833806f06SShaohua Li 7992f52578fSMatthew Wilcox (Oracle) mapping = folio->mapping; 80068f2736aSMatthew Wilcox (Oracle) if ((unsigned long)mapping & PAGE_MAPPING_FLAGS) 801e39155eaSKirill A. Shutemov return NULL; 802bda807d4SMinchan Kim 80368f2736aSMatthew Wilcox (Oracle) return mapping; 8049800339bSShaohua Li } 8052f52578fSMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_mapping); 8069800339bSShaohua Li 8074ba1119cSMatthew Wilcox (Oracle) /** 808715cbfd6SMatthew Wilcox (Oracle) * folio_copy - Copy the contents of one folio to another. 809715cbfd6SMatthew Wilcox (Oracle) * @dst: Folio to copy to. 810715cbfd6SMatthew Wilcox (Oracle) * @src: Folio to copy from. 811715cbfd6SMatthew Wilcox (Oracle) * 812715cbfd6SMatthew Wilcox (Oracle) * The bytes in the folio represented by @src are copied to @dst. 813715cbfd6SMatthew Wilcox (Oracle) * Assumes the caller has validated that @dst is at least as large as @src. 814715cbfd6SMatthew Wilcox (Oracle) * Can be called in atomic context for order-0 folios, but if the folio is 815715cbfd6SMatthew Wilcox (Oracle) * larger, it may sleep. 816715cbfd6SMatthew Wilcox (Oracle) */ 817715cbfd6SMatthew Wilcox (Oracle) void folio_copy(struct folio *dst, struct folio *src) 81879789db0SMatthew Wilcox (Oracle) { 819715cbfd6SMatthew Wilcox (Oracle) long i = 0; 820715cbfd6SMatthew Wilcox (Oracle) long nr = folio_nr_pages(src); 82179789db0SMatthew Wilcox (Oracle) 822715cbfd6SMatthew Wilcox (Oracle) for (;;) { 823715cbfd6SMatthew Wilcox (Oracle) copy_highpage(folio_page(dst, i), folio_page(src, i)); 824715cbfd6SMatthew Wilcox (Oracle) if (++i == nr) 825715cbfd6SMatthew Wilcox (Oracle) break; 82679789db0SMatthew Wilcox (Oracle) cond_resched(); 82779789db0SMatthew Wilcox (Oracle) } 82879789db0SMatthew Wilcox (Oracle) } 8294093602dSMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_copy); 83079789db0SMatthew Wilcox (Oracle) 83139a1aa8eSAndrey Ryabinin int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; 83239a1aa8eSAndrey Ryabinin int sysctl_overcommit_ratio __read_mostly = 50; 83339a1aa8eSAndrey Ryabinin unsigned long sysctl_overcommit_kbytes __read_mostly; 83439a1aa8eSAndrey Ryabinin int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; 83539a1aa8eSAndrey Ryabinin unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */ 83639a1aa8eSAndrey Ryabinin unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */ 83739a1aa8eSAndrey Ryabinin 83832927393SChristoph Hellwig int overcommit_ratio_handler(struct ctl_table *table, int write, void *buffer, 83932927393SChristoph Hellwig size_t *lenp, loff_t *ppos) 84049f0ce5fSJerome Marchand { 84149f0ce5fSJerome Marchand int ret; 84249f0ce5fSJerome Marchand 84349f0ce5fSJerome Marchand ret = proc_dointvec(table, write, buffer, lenp, ppos); 84449f0ce5fSJerome Marchand if (ret == 0 && write) 84549f0ce5fSJerome Marchand sysctl_overcommit_kbytes = 0; 84649f0ce5fSJerome Marchand return ret; 84749f0ce5fSJerome Marchand } 84849f0ce5fSJerome Marchand 84956f3547bSFeng Tang static void sync_overcommit_as(struct work_struct *dummy) 85056f3547bSFeng Tang { 85156f3547bSFeng Tang percpu_counter_sync(&vm_committed_as); 85256f3547bSFeng Tang } 85356f3547bSFeng Tang 85456f3547bSFeng Tang int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer, 85556f3547bSFeng Tang size_t *lenp, loff_t *ppos) 85656f3547bSFeng Tang { 85756f3547bSFeng Tang struct ctl_table t; 858bcbda810SChen Jun int new_policy = -1; 85956f3547bSFeng Tang int ret; 86056f3547bSFeng Tang 86156f3547bSFeng Tang /* 86256f3547bSFeng Tang * The deviation of sync_overcommit_as could be big with loose policy 86356f3547bSFeng Tang * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to 86456f3547bSFeng Tang * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply 86531454980SBhaskar Chowdhury * with the strict "NEVER", and to avoid possible race condition (even 86656f3547bSFeng Tang * though user usually won't too frequently do the switching to policy 86756f3547bSFeng Tang * OVERCOMMIT_NEVER), the switch is done in the following order: 86856f3547bSFeng Tang * 1. changing the batch 86956f3547bSFeng Tang * 2. sync percpu count on each CPU 87056f3547bSFeng Tang * 3. switch the policy 87156f3547bSFeng Tang */ 87256f3547bSFeng Tang if (write) { 87356f3547bSFeng Tang t = *table; 87456f3547bSFeng Tang t.data = &new_policy; 87556f3547bSFeng Tang ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 876bcbda810SChen Jun if (ret || new_policy == -1) 87756f3547bSFeng Tang return ret; 87856f3547bSFeng Tang 87956f3547bSFeng Tang mm_compute_batch(new_policy); 88056f3547bSFeng Tang if (new_policy == OVERCOMMIT_NEVER) 88156f3547bSFeng Tang schedule_on_each_cpu(sync_overcommit_as); 88256f3547bSFeng Tang sysctl_overcommit_memory = new_policy; 88356f3547bSFeng Tang } else { 88456f3547bSFeng Tang ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 88556f3547bSFeng Tang } 88656f3547bSFeng Tang 88756f3547bSFeng Tang return ret; 88856f3547bSFeng Tang } 88956f3547bSFeng Tang 89032927393SChristoph Hellwig int overcommit_kbytes_handler(struct ctl_table *table, int write, void *buffer, 89132927393SChristoph Hellwig size_t *lenp, loff_t *ppos) 89249f0ce5fSJerome Marchand { 89349f0ce5fSJerome Marchand int ret; 89449f0ce5fSJerome Marchand 89549f0ce5fSJerome Marchand ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 89649f0ce5fSJerome Marchand if (ret == 0 && write) 89749f0ce5fSJerome Marchand sysctl_overcommit_ratio = 0; 89849f0ce5fSJerome Marchand return ret; 89949f0ce5fSJerome Marchand } 90049f0ce5fSJerome Marchand 90100619bccSJerome Marchand /* 90200619bccSJerome Marchand * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used 90300619bccSJerome Marchand */ 90400619bccSJerome Marchand unsigned long vm_commit_limit(void) 90500619bccSJerome Marchand { 90649f0ce5fSJerome Marchand unsigned long allowed; 90749f0ce5fSJerome Marchand 90849f0ce5fSJerome Marchand if (sysctl_overcommit_kbytes) 90949f0ce5fSJerome Marchand allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10); 91049f0ce5fSJerome Marchand else 911ca79b0c2SArun KS allowed = ((totalram_pages() - hugetlb_total_pages()) 91249f0ce5fSJerome Marchand * sysctl_overcommit_ratio / 100); 91349f0ce5fSJerome Marchand allowed += total_swap_pages; 91449f0ce5fSJerome Marchand 91549f0ce5fSJerome Marchand return allowed; 91600619bccSJerome Marchand } 91700619bccSJerome Marchand 91839a1aa8eSAndrey Ryabinin /* 91939a1aa8eSAndrey Ryabinin * Make sure vm_committed_as in one cacheline and not cacheline shared with 92039a1aa8eSAndrey Ryabinin * other variables. It can be updated by several CPUs frequently. 92139a1aa8eSAndrey Ryabinin */ 92239a1aa8eSAndrey Ryabinin struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp; 92339a1aa8eSAndrey Ryabinin 92439a1aa8eSAndrey Ryabinin /* 92539a1aa8eSAndrey Ryabinin * The global memory commitment made in the system can be a metric 92639a1aa8eSAndrey Ryabinin * that can be used to drive ballooning decisions when Linux is hosted 92739a1aa8eSAndrey Ryabinin * as a guest. On Hyper-V, the host implements a policy engine for dynamically 92839a1aa8eSAndrey Ryabinin * balancing memory across competing virtual machines that are hosted. 92939a1aa8eSAndrey Ryabinin * Several metrics drive this policy engine including the guest reported 93039a1aa8eSAndrey Ryabinin * memory commitment. 9314e2ee51eSFeng Tang * 9324e2ee51eSFeng Tang * The time cost of this is very low for small platforms, and for big 9334e2ee51eSFeng Tang * platform like a 2S/36C/72T Skylake server, in worst case where 9344e2ee51eSFeng Tang * vm_committed_as's spinlock is under severe contention, the time cost 9354e2ee51eSFeng Tang * could be about 30~40 microseconds. 93639a1aa8eSAndrey Ryabinin */ 93739a1aa8eSAndrey Ryabinin unsigned long vm_memory_committed(void) 93839a1aa8eSAndrey Ryabinin { 9394e2ee51eSFeng Tang return percpu_counter_sum_positive(&vm_committed_as); 94039a1aa8eSAndrey Ryabinin } 94139a1aa8eSAndrey Ryabinin EXPORT_SYMBOL_GPL(vm_memory_committed); 94239a1aa8eSAndrey Ryabinin 94339a1aa8eSAndrey Ryabinin /* 94439a1aa8eSAndrey Ryabinin * Check that a process has enough memory to allocate a new virtual 94539a1aa8eSAndrey Ryabinin * mapping. 0 means there is enough memory for the allocation to 94639a1aa8eSAndrey Ryabinin * succeed and -ENOMEM implies there is not. 94739a1aa8eSAndrey Ryabinin * 94839a1aa8eSAndrey Ryabinin * We currently support three overcommit policies, which are set via the 949ee65728eSMike Rapoport * vm.overcommit_memory sysctl. See Documentation/mm/overcommit-accounting.rst 95039a1aa8eSAndrey Ryabinin * 95139a1aa8eSAndrey Ryabinin * Strict overcommit modes added 2002 Feb 26 by Alan Cox. 95239a1aa8eSAndrey Ryabinin * Additional code 2002 Jul 20 by Robert Love. 95339a1aa8eSAndrey Ryabinin * 95439a1aa8eSAndrey Ryabinin * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise. 95539a1aa8eSAndrey Ryabinin * 95639a1aa8eSAndrey Ryabinin * Note this is a helper function intended to be used by LSMs which 95739a1aa8eSAndrey Ryabinin * wish to use this logic. 95839a1aa8eSAndrey Ryabinin */ 95939a1aa8eSAndrey Ryabinin int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) 96039a1aa8eSAndrey Ryabinin { 9618c7829b0SJohannes Weiner long allowed; 962*f5eec036SMatthew Cassell unsigned long bytes_failed; 96339a1aa8eSAndrey Ryabinin 96439a1aa8eSAndrey Ryabinin vm_acct_memory(pages); 96539a1aa8eSAndrey Ryabinin 96639a1aa8eSAndrey Ryabinin /* 96739a1aa8eSAndrey Ryabinin * Sometimes we want to use more memory than we have 96839a1aa8eSAndrey Ryabinin */ 96939a1aa8eSAndrey Ryabinin if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS) 97039a1aa8eSAndrey Ryabinin return 0; 97139a1aa8eSAndrey Ryabinin 97239a1aa8eSAndrey Ryabinin if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { 9738c7829b0SJohannes Weiner if (pages > totalram_pages() + total_swap_pages) 97439a1aa8eSAndrey Ryabinin goto error; 97539a1aa8eSAndrey Ryabinin return 0; 97639a1aa8eSAndrey Ryabinin } 97739a1aa8eSAndrey Ryabinin 97839a1aa8eSAndrey Ryabinin allowed = vm_commit_limit(); 97939a1aa8eSAndrey Ryabinin /* 98039a1aa8eSAndrey Ryabinin * Reserve some for root 98139a1aa8eSAndrey Ryabinin */ 98239a1aa8eSAndrey Ryabinin if (!cap_sys_admin) 98339a1aa8eSAndrey Ryabinin allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); 98439a1aa8eSAndrey Ryabinin 98539a1aa8eSAndrey Ryabinin /* 98639a1aa8eSAndrey Ryabinin * Don't let a single process grow so big a user can't recover 98739a1aa8eSAndrey Ryabinin */ 98839a1aa8eSAndrey Ryabinin if (mm) { 9898c7829b0SJohannes Weiner long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10); 9908c7829b0SJohannes Weiner 99139a1aa8eSAndrey Ryabinin allowed -= min_t(long, mm->total_vm / 32, reserve); 99239a1aa8eSAndrey Ryabinin } 99339a1aa8eSAndrey Ryabinin 99439a1aa8eSAndrey Ryabinin if (percpu_counter_read_positive(&vm_committed_as) < allowed) 99539a1aa8eSAndrey Ryabinin return 0; 99639a1aa8eSAndrey Ryabinin error: 997*f5eec036SMatthew Cassell bytes_failed = pages << PAGE_SHIFT; 998*f5eec036SMatthew Cassell pr_warn_ratelimited("%s: pid: %d, comm: %s, bytes: %lu not enough memory for the allocation\n", 999*f5eec036SMatthew Cassell __func__, current->pid, current->comm, bytes_failed); 100039a1aa8eSAndrey Ryabinin vm_unacct_memory(pages); 100139a1aa8eSAndrey Ryabinin 100239a1aa8eSAndrey Ryabinin return -ENOMEM; 100339a1aa8eSAndrey Ryabinin } 100439a1aa8eSAndrey Ryabinin 1005a9090253SWilliam Roberts /** 1006a9090253SWilliam Roberts * get_cmdline() - copy the cmdline value to a buffer. 1007a9090253SWilliam Roberts * @task: the task whose cmdline value to copy. 1008a9090253SWilliam Roberts * @buffer: the buffer to copy to. 1009a9090253SWilliam Roberts * @buflen: the length of the buffer. Larger cmdline values are truncated 1010a9090253SWilliam Roberts * to this length. 1011a862f68aSMike Rapoport * 1012a862f68aSMike Rapoport * Return: the size of the cmdline field copied. Note that the copy does 1013a9090253SWilliam Roberts * not guarantee an ending NULL byte. 1014a9090253SWilliam Roberts */ 1015a9090253SWilliam Roberts int get_cmdline(struct task_struct *task, char *buffer, int buflen) 1016a9090253SWilliam Roberts { 1017a9090253SWilliam Roberts int res = 0; 1018a9090253SWilliam Roberts unsigned int len; 1019a9090253SWilliam Roberts struct mm_struct *mm = get_task_mm(task); 1020a3b609efSMateusz Guzik unsigned long arg_start, arg_end, env_start, env_end; 1021a9090253SWilliam Roberts if (!mm) 1022a9090253SWilliam Roberts goto out; 1023a9090253SWilliam Roberts if (!mm->arg_end) 1024a9090253SWilliam Roberts goto out_mm; /* Shh! No looking before we're done */ 1025a9090253SWilliam Roberts 1026bc81426fSMichal Koutný spin_lock(&mm->arg_lock); 1027a3b609efSMateusz Guzik arg_start = mm->arg_start; 1028a3b609efSMateusz Guzik arg_end = mm->arg_end; 1029a3b609efSMateusz Guzik env_start = mm->env_start; 1030a3b609efSMateusz Guzik env_end = mm->env_end; 1031bc81426fSMichal Koutný spin_unlock(&mm->arg_lock); 1032a3b609efSMateusz Guzik 1033a3b609efSMateusz Guzik len = arg_end - arg_start; 1034a9090253SWilliam Roberts 1035a9090253SWilliam Roberts if (len > buflen) 1036a9090253SWilliam Roberts len = buflen; 1037a9090253SWilliam Roberts 1038f307ab6dSLorenzo Stoakes res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE); 1039a9090253SWilliam Roberts 1040a9090253SWilliam Roberts /* 1041a9090253SWilliam Roberts * If the nul at the end of args has been overwritten, then 1042a9090253SWilliam Roberts * assume application is using setproctitle(3). 1043a9090253SWilliam Roberts */ 1044a9090253SWilliam Roberts if (res > 0 && buffer[res-1] != '\0' && len < buflen) { 1045a9090253SWilliam Roberts len = strnlen(buffer, res); 1046a9090253SWilliam Roberts if (len < res) { 1047a9090253SWilliam Roberts res = len; 1048a9090253SWilliam Roberts } else { 1049a3b609efSMateusz Guzik len = env_end - env_start; 1050a9090253SWilliam Roberts if (len > buflen - res) 1051a9090253SWilliam Roberts len = buflen - res; 1052a3b609efSMateusz Guzik res += access_process_vm(task, env_start, 1053f307ab6dSLorenzo Stoakes buffer+res, len, 1054f307ab6dSLorenzo Stoakes FOLL_FORCE); 1055a9090253SWilliam Roberts res = strnlen(buffer, res); 1056a9090253SWilliam Roberts } 1057a9090253SWilliam Roberts } 1058a9090253SWilliam Roberts out_mm: 1059a9090253SWilliam Roberts mmput(mm); 1060a9090253SWilliam Roberts out: 1061a9090253SWilliam Roberts return res; 1062a9090253SWilliam Roberts } 1063010c164aSSong Liu 10644d1a8a2dSCatalin Marinas int __weak memcmp_pages(struct page *page1, struct page *page2) 1065010c164aSSong Liu { 1066010c164aSSong Liu char *addr1, *addr2; 1067010c164aSSong Liu int ret; 1068010c164aSSong Liu 10692f753762SFabio De Francesco addr1 = kmap_local_page(page1); 10702f753762SFabio De Francesco addr2 = kmap_local_page(page2); 1071010c164aSSong Liu ret = memcmp(addr1, addr2, PAGE_SIZE); 10722f753762SFabio De Francesco kunmap_local(addr2); 10732f753762SFabio De Francesco kunmap_local(addr1); 1074010c164aSSong Liu return ret; 1075010c164aSSong Liu } 10768e7f37f2SPaul E. McKenney 10775bb1bb35SPaul E. McKenney #ifdef CONFIG_PRINTK 10788e7f37f2SPaul E. McKenney /** 10798e7f37f2SPaul E. McKenney * mem_dump_obj - Print available provenance information 10808e7f37f2SPaul E. McKenney * @object: object for which to find provenance information. 10818e7f37f2SPaul E. McKenney * 10828e7f37f2SPaul E. McKenney * This function uses pr_cont(), so that the caller is expected to have 10838e7f37f2SPaul E. McKenney * printed out whatever preamble is appropriate. The provenance information 10848e7f37f2SPaul E. McKenney * depends on the type of object and on how much debugging is enabled. 10858e7f37f2SPaul E. McKenney * For example, for a slab-cache object, the slab name is printed, and, 10868e7f37f2SPaul E. McKenney * if available, the return address and stack trace from the allocation 1087e548eaa1SManinder Singh * and last free path of that object. 10888e7f37f2SPaul E. McKenney */ 10898e7f37f2SPaul E. McKenney void mem_dump_obj(void *object) 10908e7f37f2SPaul E. McKenney { 10912521781cSJoe Perches const char *type; 10922521781cSJoe Perches 10936e284c55SZhen Lei if (kmem_dump_obj(object)) 109498f18083SPaul E. McKenney return; 10952521781cSJoe Perches 109698f18083SPaul E. McKenney if (vmalloc_dump_obj(object)) 109798f18083SPaul E. McKenney return; 10982521781cSJoe Perches 1099c83ad36aSZqiang if (is_vmalloc_addr(object)) 1100c83ad36aSZqiang type = "vmalloc memory"; 1101c83ad36aSZqiang else if (virt_addr_valid(object)) 11022521781cSJoe Perches type = "non-slab/vmalloc memory"; 11032521781cSJoe Perches else if (object == NULL) 11042521781cSJoe Perches type = "NULL pointer"; 1105b70fa3b1SPaul E. McKenney else if (object == ZERO_SIZE_PTR) 11062521781cSJoe Perches type = "zero-size pointer"; 1107b70fa3b1SPaul E. McKenney else 11082521781cSJoe Perches type = "non-paged memory"; 11092521781cSJoe Perches 11102521781cSJoe Perches pr_cont(" %s\n", type); 11118e7f37f2SPaul E. McKenney } 11120d3dd2c8SPaul E. McKenney EXPORT_SYMBOL_GPL(mem_dump_obj); 11135bb1bb35SPaul E. McKenney #endif 111482840451SDavid Hildenbrand 111582840451SDavid Hildenbrand /* 111682840451SDavid Hildenbrand * A driver might set a page logically offline -- PageOffline() -- and 111782840451SDavid Hildenbrand * turn the page inaccessible in the hypervisor; after that, access to page 111882840451SDavid Hildenbrand * content can be fatal. 111982840451SDavid Hildenbrand * 112082840451SDavid Hildenbrand * Some special PFN walkers -- i.e., /proc/kcore -- read content of random 112182840451SDavid Hildenbrand * pages after checking PageOffline(); however, these PFN walkers can race 112282840451SDavid Hildenbrand * with drivers that set PageOffline(). 112382840451SDavid Hildenbrand * 112482840451SDavid Hildenbrand * page_offline_freeze()/page_offline_thaw() allows for a subsystem to 112582840451SDavid Hildenbrand * synchronize with such drivers, achieving that a page cannot be set 112682840451SDavid Hildenbrand * PageOffline() while frozen. 112782840451SDavid Hildenbrand * 112882840451SDavid Hildenbrand * page_offline_begin()/page_offline_end() is used by drivers that care about 112982840451SDavid Hildenbrand * such races when setting a page PageOffline(). 113082840451SDavid Hildenbrand */ 113182840451SDavid Hildenbrand static DECLARE_RWSEM(page_offline_rwsem); 113282840451SDavid Hildenbrand 113382840451SDavid Hildenbrand void page_offline_freeze(void) 113482840451SDavid Hildenbrand { 113582840451SDavid Hildenbrand down_read(&page_offline_rwsem); 113682840451SDavid Hildenbrand } 113782840451SDavid Hildenbrand 113882840451SDavid Hildenbrand void page_offline_thaw(void) 113982840451SDavid Hildenbrand { 114082840451SDavid Hildenbrand up_read(&page_offline_rwsem); 114182840451SDavid Hildenbrand } 114282840451SDavid Hildenbrand 114382840451SDavid Hildenbrand void page_offline_begin(void) 114482840451SDavid Hildenbrand { 114582840451SDavid Hildenbrand down_write(&page_offline_rwsem); 114682840451SDavid Hildenbrand } 114782840451SDavid Hildenbrand EXPORT_SYMBOL(page_offline_begin); 114882840451SDavid Hildenbrand 114982840451SDavid Hildenbrand void page_offline_end(void) 115082840451SDavid Hildenbrand { 115182840451SDavid Hildenbrand up_write(&page_offline_rwsem); 115282840451SDavid Hildenbrand } 115382840451SDavid Hildenbrand EXPORT_SYMBOL(page_offline_end); 115408b0b005SMatthew Wilcox (Oracle) 115529d26f12SMatthew Wilcox (Oracle) #ifndef flush_dcache_folio 115608b0b005SMatthew Wilcox (Oracle) void flush_dcache_folio(struct folio *folio) 115708b0b005SMatthew Wilcox (Oracle) { 115808b0b005SMatthew Wilcox (Oracle) long i, nr = folio_nr_pages(folio); 115908b0b005SMatthew Wilcox (Oracle) 116008b0b005SMatthew Wilcox (Oracle) for (i = 0; i < nr; i++) 116108b0b005SMatthew Wilcox (Oracle) flush_dcache_page(folio_page(folio, i)); 116208b0b005SMatthew Wilcox (Oracle) } 116308b0b005SMatthew Wilcox (Oracle) EXPORT_SYMBOL(flush_dcache_folio); 116408b0b005SMatthew Wilcox (Oracle) #endif 1165