Lines Matching +full:memory +full:- +full:region

1 // SPDX-License-Identifier: GPL-2.0-only
6 * have any form of memory management unit (thus no virtual memory).
8 * See Documentation/admin-guide/mm/nommu-mmap.rst
10 * Copyright (c) 2004-2008 David Howells <dhowells@redhat.com>
11 * Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
12 * Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
14 * Copyright (c) 2007-2010 Paul Mundt <lethal@linux-sh.org>
29 #include <linux/backing-dev.h>
67 * Return the total memory allocated for this pointer, not
94 * region. This test is intentionally done in reverse order,
96 * PAGE_SIZE for 0-order pages.
101 vma = find_vma(current->mm, (unsigned long)objp);
103 return vma->vm_end - vma->vm_start;
156 mmap_write_lock(current->mm);
157 vma = find_vma(current->mm, (unsigned long)ret);
160 mmap_write_unlock(current->mm);
188 count = -(unsigned long) addr;
194 * vmalloc - allocate virtually contiguous memory
213 * vzalloc - allocate virtually contiguous memory with zero fill
219 * The memory allocated is set to zero.
231 * vmalloc_node - allocate memory on a specific node
248 * vzalloc_node - allocate memory on a specific node with zero fill
254 * The memory allocated is set to zero.
266 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
279 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
282 * The resulting memory area is 32bit addressable and zeroed so it can be
291 * We'll have to sort out the ZONE_DMA bits for 64-bit,
338 return -EINVAL;
345 return -EINVAL;
352 return -EINVAL;
359 return -EINVAL;
366 * like trying to un-brk an area that has already been mapped
372 struct mm_struct *mm = current->mm;
374 if (brk < mm->start_brk || brk > mm->context.end_brk)
375 return mm->brk;
377 if (mm->brk == brk)
378 return mm->brk;
383 if (brk <= mm->brk) {
384 mm->brk = brk;
389 * Ok, looks good - let it rip.
391 flush_icache_user_range(mm->brk, brk);
392 return mm->brk = brk;
396 * initialise the percpu counter for VM and region record slabs
408 * validate the region tree
409 * - the caller must hold the region lock
414 struct vm_region *region, *last;
422 BUG_ON(last->vm_end <= last->vm_start);
423 BUG_ON(last->vm_top < last->vm_end);
426 region = rb_entry(p, struct vm_region, vm_rb);
429 BUG_ON(region->vm_end <= region->vm_start);
430 BUG_ON(region->vm_top < region->vm_end);
431 BUG_ON(region->vm_start < last->vm_top);
443 * add a region into the global tree
445 static void add_nommu_region(struct vm_region *region)
457 if (region->vm_start < pregion->vm_start)
458 p = &(*p)->rb_left;
459 else if (region->vm_start > pregion->vm_start)
460 p = &(*p)->rb_right;
461 else if (pregion == region)
467 rb_link_node(&region->vm_rb, parent, p);
468 rb_insert_color(&region->vm_rb, &nommu_region_tree);
474 * delete a region from the global tree
476 static void delete_nommu_region(struct vm_region *region)
481 rb_erase(&region->vm_rb, &nommu_region_tree);
499 * release a reference to a region
500 * - the caller must hold the region semaphore for writing, which this releases
501 * - the region may not have been added to the tree yet, in which case vm_top
504 static void __put_nommu_region(struct vm_region *region)
509 if (--region->vm_usage == 0) {
510 if (region->vm_top > region->vm_start)
511 delete_nommu_region(region);
514 if (region->vm_file)
515 fput(region->vm_file);
517 /* IO memory and memory shared directly out of the pagecache
519 if (region->vm_flags & VM_MAPPED_COPY)
520 free_page_series(region->vm_start, region->vm_top);
521 kmem_cache_free(vm_region_jar, region);
528 * release a reference to a region
530 static void put_nommu_region(struct vm_region *region)
533 __put_nommu_region(region);
538 vma->vm_mm = mm;
541 if (vma->vm_file) {
542 struct address_space *mapping = vma->vm_file->f_mapping;
546 vma_interval_tree_insert(vma, &mapping->i_mmap);
554 vma->vm_mm->map_count--;
556 if (vma->vm_file) {
558 mapping = vma->vm_file->f_mapping;
562 vma_interval_tree_remove(vma, &mapping->i_mmap);
573 VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_start);
575 vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
578 current->pid);
579 return -ENOMEM;
592 if (vma->vm_ops && vma->vm_ops->close)
593 vma->vm_ops->close(vma);
594 if (vma->vm_file)
595 fput(vma->vm_file);
596 put_nommu_region(vma->vm_region);
607 return mt_find(&mm->mm_mt, &index, end_addr - 1);
613 * - should be called with mm->mmap_lock at least held readlocked
641 * - not supported under NOMMU conditions
645 return -ENOMEM;
656 * - should be called with mm->mmap_lock at least held readlocked
669 if (vma->vm_start != addr)
671 if (vma->vm_end != end)
694 return -EINVAL;
698 return -EINVAL;
701 return -EINVAL;
706 return -ENOMEM;
710 return -EOVERFLOW;
714 if (!file->f_op->mmap)
715 return -ENODEV;
718 * - we support chardevs that provide their own "memory"
719 * - we support files/blockdevs that are memory backed
721 if (file->f_op->mmap_capabilities) {
722 capabilities = file->f_op->mmap_capabilities(file);
726 switch (file_inode(file)->i_mode & S_IFMT) {
740 return -EINVAL;
746 if (!file->f_op->get_unmapped_area)
748 if (!(file->f_mode & FMODE_CAN_READ))
752 if (!(file->f_mode & FMODE_READ))
753 return -EACCES;
758 !(file->f_mode & FMODE_WRITE))
759 return -EACCES;
762 (file->f_mode & FMODE_WRITE))
763 return -EACCES;
766 return -ENODEV;
771 /* we're going to read the file into private memory we
774 return -ENODEV;
790 return -EINVAL;
797 if (path_noexec(&file->f_path)) {
799 return -EPERM;
802 if (current->personality & READ_IMPLIES_EXEC) {
814 /* anonymous mappings are always memory backed and can be
821 (current->personality & READ_IMPLIES_EXEC))
861 if (!(prot & PROT_WRITE) && !current->ptrace)
864 * modify memory, especially also not via active ptrace
888 ret = call_mmap(vma->vm_file, vma);
890 vma->vm_region->vm_top = vma->vm_region->vm_end;
893 if (ret != -ENOSYS)
896 /* getting -ENOSYS indicates that direct mmap isn't possible (as
899 return -ENODEV;
906 struct vm_region *region,
916 * shared mappings on devices or memory. VM_MAYOVERLAY will be set if
921 ret = call_mmap(vma->vm_file, vma);
923 if (WARN_ON_ONCE(!is_nommu_shared_mapping(vma->vm_flags)))
924 ret = -ENOSYS;
926 vma->vm_region->vm_top = vma->vm_region->vm_end;
929 if (ret != -ENOSYS)
938 /* allocate some memory to hold the mapping
939 * - note that this may not return a page-aligned address if the object
946 /* we don't want to allocate a power-of-2 sized page set */
947 if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages)
957 region->vm_flags = vma->vm_flags;
958 region->vm_start = (unsigned long) base;
959 region->vm_end = region->vm_start + len;
960 region->vm_top = region->vm_start + (total << PAGE_SHIFT);
962 vma->vm_start = region->vm_start;
963 vma->vm_end = region->vm_start + len;
965 if (vma->vm_file) {
969 fpos = vma->vm_pgoff;
972 ret = kernel_read(vma->vm_file, base, len, &fpos);
978 memset(base + ret, 0, len - ret);
987 free_page_series(region->vm_start, region->vm_top);
988 region->vm_start = vma->vm_start = 0;
989 region->vm_end = vma->vm_end = 0;
990 region->vm_top = 0;
995 len, current->pid, current->comm);
997 return -ENOMEM;
1014 struct vm_region *region;
1018 VMA_ITERATOR(vmi, current->mm, 0);
1039 region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
1040 if (!region)
1043 vma = vm_area_alloc(current->mm);
1047 region->vm_usage = 1;
1048 region->vm_flags = vm_flags;
1049 region->vm_pgoff = pgoff;
1052 vma->vm_pgoff = pgoff;
1055 region->vm_file = get_file(file);
1056 vma->vm_file = get_file(file);
1063 * - we can only share with a superset match on most regular files
1064 * - shared mappings on character devices and memory backed files are
1073 pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1079 if (!is_nommu_shared_mapping(pregion->vm_flags))
1083 if (file_inode(pregion->vm_file) !=
1087 if (pregion->vm_pgoff >= pgend)
1090 rpglen = pregion->vm_end - pregion->vm_start;
1091 rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT;
1092 rpgend = pregion->vm_pgoff + rpglen;
1098 if ((pregion->vm_pgoff != pgoff || rpglen != pglen) &&
1099 !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) {
1100 /* new mapping is not a subset of the region */
1106 /* we've found a region we can share */
1107 pregion->vm_usage++;
1108 vma->vm_region = pregion;
1109 start = pregion->vm_start;
1110 start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT;
1111 vma->vm_start = start;
1112 vma->vm_end = start + len;
1114 if (pregion->vm_flags & VM_MAPPED_COPY)
1119 vma->vm_region = NULL;
1120 vma->vm_start = 0;
1121 vma->vm_end = 0;
1122 pregion->vm_usage--;
1127 fput(region->vm_file);
1128 kmem_cache_free(vm_region_jar, region);
1129 region = pregion;
1135 * - this is the hook for quasi-memory character devices to
1139 addr = file->f_op->get_unmapped_area(file, addr, len,
1143 if (ret != -ENOSYS)
1149 ret = -ENODEV;
1155 vma->vm_start = region->vm_start = addr;
1156 vma->vm_end = region->vm_end = addr + len;
1161 vma->vm_region = region;
1164 * - the region is filled in if NOMMU_MAP_DIRECT is still set
1166 if (file && vma->vm_flags & VM_SHARED)
1169 ret = do_mmap_private(vma, region, len, capabilities);
1172 add_nommu_region(region);
1175 if (!vma->vm_file &&
1178 memset((void *)region->vm_start, 0,
1179 region->vm_end - region->vm_start);
1182 result = vma->vm_start;
1184 current->mm->total_vm += len >> PAGE_SHIFT;
1187 BUG_ON(!vma->vm_region);
1188 vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
1192 setup_vma_to_mm(vma, current->mm);
1193 current->mm->map_count++;
1197 /* we flush the region from the icache only when the first executable
1199 if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
1200 flush_icache_user_range(region->vm_start, region->vm_end);
1201 region->vm_icache_flushed = true;
1212 if (region->vm_file)
1213 fput(region->vm_file);
1214 kmem_cache_free(vm_region_jar, region);
1215 if (vma->vm_file)
1216 fput(vma->vm_file);
1223 ret = -EINVAL;
1227 kmem_cache_free(vm_region_jar, region);
1229 len, current->pid);
1231 return -ENOMEM;
1234 pr_warn("Allocation of vm region for %lu byte allocation from process %d failed\n",
1235 len, current->pid);
1237 return -ENOMEM;
1245 unsigned long retval = -EBADF;
1284 return -EFAULT;
1286 return -EINVAL;
1301 struct vm_region *region;
1306 * only a single usage on the region) */
1307 if (vma->vm_file)
1308 return -ENOMEM;
1310 mm = vma->vm_mm;
1311 if (mm->map_count >= sysctl_max_map_count)
1312 return -ENOMEM;
1314 region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL);
1315 if (!region)
1316 return -ENOMEM;
1323 *region = *vma->vm_region;
1324 new->vm_region = region;
1326 npages = (addr - vma->vm_start) >> PAGE_SHIFT;
1329 region->vm_top = region->vm_end = new->vm_end = addr;
1331 region->vm_start = new->vm_start = addr;
1332 region->vm_pgoff = new->vm_pgoff += npages;
1335 vma_iter_config(vmi, new->vm_start, new->vm_end);
1338 current->pid);
1342 if (new->vm_ops && new->vm_ops->open)
1343 new->vm_ops->open(new);
1346 delete_nommu_region(vma->vm_region);
1348 vma->vm_region->vm_start = vma->vm_start = addr;
1349 vma->vm_region->vm_pgoff = vma->vm_pgoff += npages;
1351 vma->vm_region->vm_end = vma->vm_end = addr;
1352 vma->vm_region->vm_top = addr;
1354 add_nommu_region(vma->vm_region);
1355 add_nommu_region(new->vm_region);
1361 mm->map_count++;
1367 kmem_cache_free(vm_region_jar, region);
1368 return -ENOMEM;
1379 struct vm_region *region;
1383 if (from > vma->vm_start) {
1384 if (vma_iter_clear_gfp(vmi, from, vma->vm_end, GFP_KERNEL))
1385 return -ENOMEM;
1386 vma->vm_end = from;
1388 if (vma_iter_clear_gfp(vmi, vma->vm_start, to, GFP_KERNEL))
1389 return -ENOMEM;
1390 vma->vm_start = to;
1393 /* cut the backing region down to size */
1394 region = vma->vm_region;
1395 BUG_ON(region->vm_usage != 1);
1398 delete_nommu_region(region);
1399 if (from > region->vm_start) {
1400 to = region->vm_top;
1401 region->vm_top = region->vm_end = from;
1403 region->vm_start = to;
1405 add_nommu_region(region);
1414 * - under NOMMU conditions the chunk to be unmapped must be backed by a single
1426 return -EINVAL;
1435 pr_warn("munmap of memory not mmapped by process %d (%s): 0x%lx-0x%lx\n",
1436 current->pid, current->comm,
1437 start, start + len - 1);
1440 return -EINVAL;
1443 /* we're allowed to split an anonymous VMA but not a file-backed one */
1444 if (vma->vm_file) {
1446 if (start > vma->vm_start)
1447 return -EINVAL;
1448 if (end == vma->vm_end)
1452 return -EINVAL;
1455 if (start == vma->vm_start && end == vma->vm_end)
1457 if (start < vma->vm_start || end > vma->vm_end)
1458 return -EINVAL;
1460 return -EINVAL;
1461 if (end != vma->vm_end && offset_in_page(end))
1462 return -EINVAL;
1463 if (start != vma->vm_start && end != vma->vm_end) {
1473 ret = -ENOMEM;
1481 struct mm_struct *mm = current->mm;
1507 mm->total_vm = 0;
1519 __mt_destroy(&mm->mm_mt);
1528 * as long as it stays within the region allocated by do_mmap_private() and the
1543 return (unsigned long) -EINVAL;
1546 return -EINVAL;
1549 return (unsigned long) -EINVAL;
1551 vma = find_vma_exact(current->mm, addr, old_len);
1553 return (unsigned long) -EINVAL;
1555 if (vma->vm_end != vma->vm_start + old_len)
1556 return (unsigned long) -EFAULT;
1558 if (is_nommu_shared_mapping(vma->vm_flags))
1559 return (unsigned long) -EPERM;
1561 if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start)
1562 return (unsigned long) -ENOMEM;
1564 /* all checks complete - do it */
1565 vma->vm_end = vma->vm_start + new_len;
1566 return vma->vm_start;
1575 mmap_write_lock(current->mm);
1577 mmap_write_unlock(current->mm);
1585 return -EINVAL;
1595 unsigned long vm_len = vma->vm_end - vma->vm_start;
1597 pfn += vma->vm_pgoff;
1598 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
1605 unsigned int size = vma->vm_end - vma->vm_start;
1607 if (!(vma->vm_flags & VM_USERMAP))
1608 return -EINVAL;
1610 vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT));
1611 vma->vm_end = vma->vm_start + size;
1645 if (addr + len >= vma->vm_end)
1646 len = vma->vm_end - addr;
1649 if (write && vma->vm_flags & VM_MAYWRITE)
1652 else if (!write && vma->vm_flags & VM_MAYREAD)
1667 * access_remote_vm - access another process' address space
1684 * - source/target buffer must be kernel space
1706 * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode
1720 struct vm_region *region;
1725 high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1728 i_mmap_lock_read(inode->i_mapping);
1731 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) {
1732 /* found one - only interested if it's shared out of the page
1734 if (vma->vm_flags & VM_SHARED) {
1735 i_mmap_unlock_read(inode->i_mapping);
1737 return -ETXTBSY; /* not quite true, but near enough */
1741 /* reduce any regions that overlap the dead zone - if in existence,
1747 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) {
1748 if (!(vma->vm_flags & VM_SHARED))
1751 region = vma->vm_region;
1752 r_size = region->vm_top - region->vm_start;
1753 r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size;
1756 region->vm_top -= r_top - newsize;
1757 if (region->vm_end > region->vm_top)
1758 region->vm_end = region->vm_top;
1762 i_mmap_unlock_read(inode->i_mapping);
1770 * This is intended to prevent a user from starting a single memory hogging
1774 * The default value is min(3% of free memory, 128MB)
1792 * to log in and kill a memory hogging process.