Lines Matching +full:non +full:- +full:contiguous
1 // SPDX-License-Identifier: GPL-2.0-only
20 #include <linux/elf-randomize.h>
36 * kfree_const - conditionally free memory
49 * __kmemdup_nul - Create a NUL-terminated string from @s, which might be unterminated.
54 * Return: newly allocated copy of @s with NUL-termination or %NULL in
67 /* Ensure the buf is always NUL-terminated, regardless of @s. */ in __kmemdup_nul()
73 * kstrdup - allocate space for and copy an existing string
87 * kstrdup_const - conditionally duplicate an existing const string
107 * kstrndup - allocate space for and copy an existing string
123 * kmemdup - duplicate region of memory
130 * result is physically contiguous. Use kfree() to free.
144 * kmemdup_array - duplicate a given array.
152 * result is physically contiguous. Use kfree() to free.
161 * kvmemdup - duplicate region of memory
168 * result may be not physically contiguous. Use kvfree() to free.
182 * kmemdup_nul - Create a NUL-terminated string from unterminated data
187 * Return: newly allocated copy of @s with NUL-termination or %NULL in
207 * memdup_user - duplicate memory region from user space
213 * contiguous, to be freed by kfree().
221 return ERR_PTR(-ENOMEM); in memdup_user()
225 return ERR_PTR(-EFAULT); in memdup_user()
233 * vmemdup_user - duplicate memory region from user space
239 * physically contiguous. Use kvfree() to free.
247 return ERR_PTR(-ENOMEM); in vmemdup_user()
251 return ERR_PTR(-EFAULT); in vmemdup_user()
259 * strndup_user - duplicate an existing string from user space
273 return ERR_PTR(-EFAULT); in strndup_user()
276 return ERR_PTR(-EINVAL); in strndup_user()
283 p[length - 1] = '\0'; in strndup_user()
290 * memdup_user_nul - duplicate memory region from user space and NUL-terminate
303 return ERR_PTR(-ENOMEM); in memdup_user_nul()
307 return ERR_PTR(-EFAULT); in memdup_user_nul()
320 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); in vma_is_stack_for_current()
330 swap(vma->vm_file, file); in vma_set_file()
336 #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */
343 if (current->flags & PF_RANDOMIZE) { in randomize_stack_top()
351 return PAGE_ALIGN(stack_top) - random_variable; in randomize_stack_top()
356 * randomize_page - Generate a random, page aligned address
372 range -= PAGE_ALIGN(start) - start; in randomize_page()
376 if (start > ULONG_MAX - range) in randomize_page()
377 range = ULONG_MAX - start; in randomize_page()
392 return randomize_page(mm->brk, SZ_32M); in arch_randomize_brk()
394 return randomize_page(mm->brk, SZ_1G); in arch_randomize_brk()
403 rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1); in arch_mmap_rnd()
406 rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1); in arch_mmap_rnd()
413 if (current->personality & ADDR_COMPAT_LAYOUT) in mmap_is_legacy()
416 /* On parisc the stack always grows up - so a unlimited stack should in mmap_is_legacy()
418 if (rlim_stack->rlim_cur == RLIM_INFINITY && in mmap_is_legacy()
441 return PAGE_ALIGN_DOWN(mmap_upper_limit(rlim_stack) - rnd); in mmap_base()
443 unsigned long gap = rlim_stack->rlim_cur; in mmap_base()
447 if (current->flags & PF_RANDOMIZE) in mmap_base()
459 return PAGE_ALIGN(STACK_TOP - gap - rnd); in mmap_base()
467 if (current->flags & PF_RANDOMIZE) in arch_pick_mmap_layout()
471 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; in arch_pick_mmap_layout()
472 clear_bit(MMF_TOPDOWN, &mm->flags); in arch_pick_mmap_layout()
474 mm->mmap_base = mmap_base(random_factor, rlim_stack); in arch_pick_mmap_layout()
475 set_bit(MMF_TOPDOWN, &mm->flags); in arch_pick_mmap_layout()
481 mm->mmap_base = TASK_UNMAPPED_BASE; in arch_pick_mmap_layout()
482 clear_bit(MMF_TOPDOWN, &mm->flags); in arch_pick_mmap_layout()
490 * __account_locked_vm - account locked pages to an mm's locked_vm
502 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
512 locked_vm = mm->locked_vm; in __account_locked_vm()
517 ret = -ENOMEM; in __account_locked_vm()
520 mm->locked_vm = locked_vm + pages; in __account_locked_vm()
523 mm->locked_vm = locked_vm - pages; in __account_locked_vm()
526 pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid, in __account_locked_vm()
527 (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT, in __account_locked_vm()
529 ret ? " - exceeded" : ""); in __account_locked_vm()
536 * account_locked_vm - account locked pages to an mm's locked_vm
541 * Assumes a non-NULL @mm is valid (i.e. at least one reference on it).
545 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
568 struct mm_struct *mm = current->mm; in vm_mmap_pgoff()
577 return -EINTR; in vm_mmap_pgoff()
610 return -EINVAL; in vm_mmap()
612 return -EINVAL; in vm_mmap()
621 * We want to attempt a large physically contiguous block first because in kmalloc_gfp_adjust()
624 * However make sure that larger requests are not too disruptive - no in kmalloc_gfp_adjust()
641 * __kvmalloc_node - attempt to allocate physically contiguous memory, but upon
642 * failure, fall back to non-contiguous (vmalloc) allocation.
645 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
671 /* non-sleeping allocations are not supported by vmalloc */ in __kvmalloc_node_noprof()
694 * kvfree() - Free memory.
701 * Context: Either preemptible task context or not-NMI interrupt.
713 * kvfree_sensitive - Free a data object containing sensitive information.
731 * kvrealloc - reallocate memory; contents remain unchanged
781 * __vmalloc_array - allocate memory for a virtually contiguous array.
797 * vmalloc_array - allocate memory for a virtually contiguous array.
808 * __vcalloc - allocate and zero memory for a virtually contiguous array.
820 * vcalloc - allocate and zero memory for a virtually contiguous array.
832 unsigned long mapping = (unsigned long)folio->mapping; in folio_anon_vma()
836 return (void *)(mapping - PAGE_MAPPING_ANON); in folio_anon_vma()
840 * folio_mapping - Find the mapping where this folio is stored.
860 return swap_address_space(folio->swap); in folio_mapping()
862 mapping = folio->mapping; in folio_mapping()
871 * folio_copy - Copy the contents of one folio to another.
877 * Can be called in atomic context for order-0 folios, but if the folio is
901 return -EHWPOISON; in folio_mc_copy()
938 int new_policy = -1; in overcommit_policy_handler()
956 if (ret || new_policy == -1) in overcommit_policy_handler()
989 allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10); in vm_commit_limit()
991 allowed = ((totalram_pages() - hugetlb_total_pages()) in vm_commit_limit()
1007 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
1026 * succeed and -ENOMEM implies there is not.
1029 * vm.overcommit_memory sysctl. See Documentation/mm/overcommit-accounting.rst
1063 allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); in __vm_enough_memory()
1069 long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10); in __vm_enough_memory()
1071 allowed -= min_t(long, mm->total_vm / 32, reserve); in __vm_enough_memory()
1079 __func__, current->pid, current->comm, bytes_failed); in __vm_enough_memory()
1082 return -ENOMEM; in __vm_enough_memory()
1086 * get_cmdline() - copy the cmdline value to a buffer.
1103 if (!mm->arg_end) in get_cmdline()
1106 spin_lock(&mm->arg_lock); in get_cmdline()
1107 arg_start = mm->arg_start; in get_cmdline()
1108 arg_end = mm->arg_end; in get_cmdline()
1109 env_start = mm->env_start; in get_cmdline()
1110 env_end = mm->env_end; in get_cmdline()
1111 spin_unlock(&mm->arg_lock); in get_cmdline()
1113 len = arg_end - arg_start; in get_cmdline()
1124 if (res > 0 && buffer[res-1] != '\0' && len < buflen) { in get_cmdline()
1129 len = env_end - env_start; in get_cmdline()
1130 if (len > buflen - res) in get_cmdline()
1131 len = buflen - res; in get_cmdline()
1159 * mem_dump_obj - Print available provenance information
1165 * For example, for a slab-cache object, the slab name is printed, and,
1182 type = "non-slab/vmalloc memory"; in mem_dump_obj()
1186 type = "zero-size pointer"; in mem_dump_obj()
1188 type = "non-paged memory"; in mem_dump_obj()
1196 * A driver might set a page logically offline -- PageOffline() -- and
1200 * Some special PFN walkers -- i.e., /proc/kcore -- read content of random