Lines Matching +full:memory +full:- +full:mapping

1 // SPDX-License-Identifier: GPL-2.0-only
6 * have any form of memory management unit (thus no virtual memory).
8 * See Documentation/admin-guide/mm/nommu-mmap.rst
10 * Copyright (c) 2004-2008 David Howells <dhowells@redhat.com>
11 * Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
12 * Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
14 * Copyright (c) 2007-2010 Paul Mundt <lethal@linux-sh.org>
29 #include <linux/backing-dev.h>
60 * Return the total memory allocated for this pointer, not
89 * PAGE_SIZE for 0-order folios. in kobjsize()
94 vma = find_vma(current->mm, (unsigned long)objp); in kobjsize()
96 return vma->vm_end - vma->vm_start; in kobjsize()
150 mmap_write_lock(current->mm); in __vmalloc_user_flags()
151 vma = find_vma(current->mm, (unsigned long)ret); in __vmalloc_user_flags()
154 mmap_write_unlock(current->mm); in __vmalloc_user_flags()
182 count = -(unsigned long) addr; in vread_iter()
188 * vmalloc - allocate virtually contiguous memory
205 * vmalloc_huge_node - allocate virtually contiguous memory, on a node
223 * vzalloc - allocate virtually contiguous memory with zero fill
229 * The memory allocated is set to zero.
241 * vmalloc_node - allocate memory on a specific node
258 * vzalloc_node - allocate memory on a specific node with zero fill
264 * The memory allocated is set to zero.
276 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
289 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
292 * The resulting memory area is 32bit addressable and zeroed so it can be
301 * We'll have to sort out the ZONE_DMA bits for 64-bit, in vmalloc_32_user_noprof()
348 return -EINVAL; in vm_insert_page()
355 return -EINVAL; in vm_insert_pages()
362 return -EINVAL; in vm_map_pages()
369 return -EINVAL; in vm_map_pages_zero()
376 * like trying to un-brk an area that has already been mapped
382 struct mm_struct *mm = current->mm; in SYSCALL_DEFINE1()
384 if (brk < mm->start_brk || brk > mm->context.end_brk) in SYSCALL_DEFINE1()
385 return mm->brk; in SYSCALL_DEFINE1()
387 if (mm->brk == brk) in SYSCALL_DEFINE1()
388 return mm->brk; in SYSCALL_DEFINE1()
393 if (brk <= mm->brk) { in SYSCALL_DEFINE1()
394 mm->brk = brk; in SYSCALL_DEFINE1()
399 * Ok, looks good - let it rip. in SYSCALL_DEFINE1()
401 flush_icache_user_range(mm->brk, brk); in SYSCALL_DEFINE1()
402 return mm->brk = brk; in SYSCALL_DEFINE1()
435 * - the caller must hold the region lock
448 BUG_ON(last->vm_end <= last->vm_start); in validate_nommu_regions()
449 BUG_ON(last->vm_top < last->vm_end); in validate_nommu_regions()
455 BUG_ON(region->vm_end <= region->vm_start); in validate_nommu_regions()
456 BUG_ON(region->vm_top < region->vm_end); in validate_nommu_regions()
457 BUG_ON(region->vm_start < last->vm_top); in validate_nommu_regions()
483 if (region->vm_start < pregion->vm_start) in add_nommu_region()
484 p = &(*p)->rb_left; in add_nommu_region()
485 else if (region->vm_start > pregion->vm_start) in add_nommu_region()
486 p = &(*p)->rb_right; in add_nommu_region()
493 rb_link_node(&region->vm_rb, parent, p); in add_nommu_region()
494 rb_insert_color(&region->vm_rb, &nommu_region_tree); in add_nommu_region()
507 rb_erase(&region->vm_rb, &nommu_region_tree); in delete_nommu_region()
526 * - the caller must hold the region semaphore for writing, which this releases
527 * - the region may not have been added to the tree yet, in which case vm_top
535 if (--region->vm_usage == 0) { in __put_nommu_region()
536 if (region->vm_top > region->vm_start) in __put_nommu_region()
540 if (region->vm_file) in __put_nommu_region()
541 fput(region->vm_file); in __put_nommu_region()
543 /* IO memory and memory shared directly out of the pagecache in __put_nommu_region()
545 if (region->vm_flags & VM_MAPPED_COPY) in __put_nommu_region()
546 free_page_series(region->vm_start, region->vm_top); in __put_nommu_region()
564 vma->vm_mm = mm; in setup_vma_to_mm()
566 /* add the VMA to the mapping */ in setup_vma_to_mm()
567 if (vma->vm_file) { in setup_vma_to_mm()
568 struct address_space *mapping = vma->vm_file->f_mapping; in setup_vma_to_mm() local
570 i_mmap_lock_write(mapping); in setup_vma_to_mm()
571 flush_dcache_mmap_lock(mapping); in setup_vma_to_mm()
572 vma_interval_tree_insert(vma, &mapping->i_mmap); in setup_vma_to_mm()
573 flush_dcache_mmap_unlock(mapping); in setup_vma_to_mm()
574 i_mmap_unlock_write(mapping); in setup_vma_to_mm()
580 vma->vm_mm->map_count--; in cleanup_vma_from_mm()
581 /* remove the VMA from the mapping */ in cleanup_vma_from_mm()
582 if (vma->vm_file) { in cleanup_vma_from_mm()
583 struct address_space *mapping; in cleanup_vma_from_mm() local
584 mapping = vma->vm_file->f_mapping; in cleanup_vma_from_mm()
586 i_mmap_lock_write(mapping); in cleanup_vma_from_mm()
587 flush_dcache_mmap_lock(mapping); in cleanup_vma_from_mm()
588 vma_interval_tree_remove(vma, &mapping->i_mmap); in cleanup_vma_from_mm()
589 flush_dcache_mmap_unlock(mapping); in cleanup_vma_from_mm()
590 i_mmap_unlock_write(mapping); in cleanup_vma_from_mm()
599 VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_start); in delete_vma_from_mm()
601 vma_iter_config(&vmi, vma->vm_start, vma->vm_end); in delete_vma_from_mm()
604 current->pid); in delete_vma_from_mm()
605 return -ENOMEM; in delete_vma_from_mm()
619 if (vma->vm_file) in delete_vma()
620 fput(vma->vm_file); in delete_vma()
621 put_nommu_region(vma->vm_region); in delete_vma()
632 return mt_find(&mm->mm_mt, &index, end_addr - 1); in find_vma_intersection()
638 * - should be called with mm->mmap_lock at least held readlocked
650 * - not supported under NOMMU conditions
654 return -ENOMEM; in expand_stack_locked()
665 * - should be called with mm->mmap_lock at least held readlocked
678 if (vma->vm_start != addr) in find_vma_exact()
680 if (vma->vm_end != end) in find_vma_exact()
687 * determine whether a mapping should be permitted and, if so, what sort of
688 * mapping we're capable of supporting
703 return -EINVAL; in validate_mmap_request()
707 return -EINVAL; in validate_mmap_request()
710 return -EINVAL; in validate_mmap_request()
715 return -ENOMEM; in validate_mmap_request()
719 return -EOVERFLOW; in validate_mmap_request()
724 return -ENODEV; in validate_mmap_request()
727 * - we support chardevs that provide their own "memory" in validate_mmap_request()
728 * - we support files/blockdevs that are memory backed in validate_mmap_request()
730 if (file->f_op->mmap_capabilities) { in validate_mmap_request()
731 capabilities = file->f_op->mmap_capabilities(file); in validate_mmap_request()
735 switch (file_inode(file)->i_mode & S_IFMT) { in validate_mmap_request()
749 return -EINVAL; in validate_mmap_request()
755 if (!file->f_op->get_unmapped_area) in validate_mmap_request()
757 if (!(file->f_mode & FMODE_CAN_READ)) in validate_mmap_request()
761 if (!(file->f_mode & FMODE_READ)) in validate_mmap_request()
762 return -EACCES; in validate_mmap_request()
767 !(file->f_mode & FMODE_WRITE)) in validate_mmap_request()
768 return -EACCES; in validate_mmap_request()
771 (file->f_mode & FMODE_WRITE)) in validate_mmap_request()
772 return -EACCES; in validate_mmap_request()
775 return -ENODEV; in validate_mmap_request()
780 /* we're going to read the file into private memory we in validate_mmap_request()
783 return -ENODEV; in validate_mmap_request()
785 /* we don't permit a private writable mapping to be in validate_mmap_request()
799 return -EINVAL; in validate_mmap_request()
806 if (path_noexec(&file->f_path)) { in validate_mmap_request()
808 return -EPERM; in validate_mmap_request()
811 if (current->personality & READ_IMPLIES_EXEC) { in validate_mmap_request()
823 /* anonymous mappings are always memory backed and can be in validate_mmap_request()
830 (current->personality & READ_IMPLIES_EXEC)) in validate_mmap_request()
845 * we've determined that we can make the mapping, now translate what we
864 /* MAP_PRIVATE file mapping */ in determine_vm_flags()
870 if (!(prot & PROT_WRITE) && !current->ptrace) in determine_vm_flags()
872 * R/O private file mapping which cannot be used to in determine_vm_flags()
873 * modify memory, especially also not via active ptrace in determine_vm_flags()
876 * the file mapping, which will work e.g., on chardevs, in determine_vm_flags()
881 /* MAP_SHARED file mapping: NOMMU_MAP_DIRECT is set. */ in determine_vm_flags()
890 * set up a shared mapping on a file (the driver or filesystem provides and
897 ret = mmap_file(vma->vm_file, vma); in do_mmap_shared_file()
899 vma->vm_region->vm_top = vma->vm_region->vm_end; in do_mmap_shared_file()
902 if (ret != -ENOSYS) in do_mmap_shared_file()
905 /* getting -ENOSYS indicates that direct mmap isn't possible (as in do_mmap_shared_file()
908 return -ENODEV; in do_mmap_shared_file()
912 * set up a private mapping or an anonymous shared mapping
924 * Invoke the file's mapping function so that it can keep track of in do_mmap_private()
925 * shared mappings on devices or memory. VM_MAYOVERLAY will be set if in do_mmap_private()
930 ret = mmap_file(vma->vm_file, vma); in do_mmap_private()
932 if (WARN_ON_ONCE(!is_nommu_shared_mapping(vma->vm_flags))) in do_mmap_private()
933 ret = -ENOSYS; in do_mmap_private()
935 vma->vm_region->vm_top = vma->vm_region->vm_end; in do_mmap_private()
938 if (ret != -ENOSYS) in do_mmap_private()
947 /* allocate some memory to hold the mapping in do_mmap_private()
948 * - note that this may not return a page-aligned address if the object in do_mmap_private()
955 /* we don't want to allocate a power-of-2 sized page set */ in do_mmap_private()
956 if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) in do_mmap_private()
966 region->vm_flags = vma->vm_flags; in do_mmap_private()
967 region->vm_start = (unsigned long) base; in do_mmap_private()
968 region->vm_end = region->vm_start + len; in do_mmap_private()
969 region->vm_top = region->vm_start + (total << PAGE_SHIFT); in do_mmap_private()
971 vma->vm_start = region->vm_start; in do_mmap_private()
972 vma->vm_end = region->vm_start + len; in do_mmap_private()
974 if (vma->vm_file) { in do_mmap_private()
978 fpos = vma->vm_pgoff; in do_mmap_private()
981 ret = kernel_read(vma->vm_file, base, len, &fpos); in do_mmap_private()
987 memset(base + ret, 0, len - ret); in do_mmap_private()
996 free_page_series(region->vm_start, region->vm_top); in do_mmap_private()
997 region->vm_start = vma->vm_start = 0; in do_mmap_private()
998 region->vm_end = vma->vm_end = 0; in do_mmap_private()
999 region->vm_top = 0; in do_mmap_private()
1004 len, current->pid, current->comm); in do_mmap_private()
1006 return -ENOMEM; in do_mmap_private()
1010 * handle mapping creation for uClinux
1027 VMA_ITERATOR(vmi, current->mm, 0); in do_mmap()
1031 /* decide whether we should attempt the mapping, and if so what sort of in do_mmap()
1032 * mapping */ in do_mmap()
1042 /* we've determined that we can make the mapping, now translate what we in do_mmap()
1047 /* we're going to need to record the mapping */ in do_mmap()
1052 vma = vm_area_alloc(current->mm); in do_mmap()
1056 region->vm_usage = 1; in do_mmap()
1057 region->vm_flags = vm_flags; in do_mmap()
1058 region->vm_pgoff = pgoff; in do_mmap()
1061 vma->vm_pgoff = pgoff; in do_mmap()
1064 region->vm_file = get_file(file); in do_mmap()
1065 vma->vm_file = get_file(file); in do_mmap()
1071 * mmap() calls that overlap with our proposed mapping in do_mmap()
1072 * - we can only share with a superset match on most regular files in do_mmap()
1073 * - shared mappings on character devices and memory backed files are in do_mmap()
1082 pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; in do_mmap()
1088 if (!is_nommu_shared_mapping(pregion->vm_flags)) in do_mmap()
1092 if (file_inode(pregion->vm_file) != in do_mmap()
1096 if (pregion->vm_pgoff >= pgend) in do_mmap()
1099 rpglen = pregion->vm_end - pregion->vm_start; in do_mmap()
1100 rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT; in do_mmap()
1101 rpgend = pregion->vm_pgoff + rpglen; in do_mmap()
1107 if ((pregion->vm_pgoff != pgoff || rpglen != pglen) && in do_mmap()
1108 !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) { in do_mmap()
1109 /* new mapping is not a subset of the region */ in do_mmap()
1116 pregion->vm_usage++; in do_mmap()
1117 vma->vm_region = pregion; in do_mmap()
1118 start = pregion->vm_start; in do_mmap()
1119 start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT; in do_mmap()
1120 vma->vm_start = start; in do_mmap()
1121 vma->vm_end = start + len; in do_mmap()
1123 if (pregion->vm_flags & VM_MAPPED_COPY) in do_mmap()
1128 vma->vm_region = NULL; in do_mmap()
1129 vma->vm_start = 0; in do_mmap()
1130 vma->vm_end = 0; in do_mmap()
1131 pregion->vm_usage--; in do_mmap()
1136 fput(region->vm_file); in do_mmap()
1143 /* obtain the address at which to make a shared mapping in do_mmap()
1144 * - this is the hook for quasi-memory character devices to in do_mmap()
1145 * tell us the location of a shared mapping in do_mmap()
1148 addr = file->f_op->get_unmapped_area(file, addr, len, in do_mmap()
1152 if (ret != -ENOSYS) in do_mmap()
1156 * the mapping so we'll have to attempt to copy in do_mmap()
1158 ret = -ENODEV; in do_mmap()
1164 vma->vm_start = region->vm_start = addr; in do_mmap()
1165 vma->vm_end = region->vm_end = addr + len; in do_mmap()
1170 vma->vm_region = region; in do_mmap()
1172 /* set up the mapping in do_mmap()
1173 * - the region is filled in if NOMMU_MAP_DIRECT is still set in do_mmap()
1175 if (file && vma->vm_flags & VM_SHARED) in do_mmap()
1184 if (!vma->vm_file && in do_mmap()
1187 memset((void *)region->vm_start, 0, in do_mmap()
1188 region->vm_end - region->vm_start); in do_mmap()
1190 /* okay... we have a mapping; now we have to register it */ in do_mmap()
1191 result = vma->vm_start; in do_mmap()
1193 current->mm->total_vm += len >> PAGE_SHIFT; in do_mmap()
1196 BUG_ON(!vma->vm_region); in do_mmap()
1197 vma_iter_config(&vmi, vma->vm_start, vma->vm_end); in do_mmap()
1201 setup_vma_to_mm(vma, current->mm); in do_mmap()
1202 current->mm->map_count++; in do_mmap()
1207 * mapping of it is made */ in do_mmap()
1208 if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) { in do_mmap()
1209 flush_icache_user_range(region->vm_start, region->vm_end); in do_mmap()
1210 region->vm_icache_flushed = true; in do_mmap()
1221 if (region->vm_file) in do_mmap()
1222 fput(region->vm_file); in do_mmap()
1224 if (vma->vm_file) in do_mmap()
1225 fput(vma->vm_file); in do_mmap()
1232 ret = -EINVAL; in do_mmap()
1238 len, current->pid); in do_mmap()
1240 return -ENOMEM; in do_mmap()
1244 len, current->pid); in do_mmap()
1246 return -ENOMEM; in do_mmap()
1254 unsigned long retval = -EBADF; in ksys_mmap_pgoff()
1293 return -EFAULT; in SYSCALL_DEFINE1()
1295 return -EINVAL; in SYSCALL_DEFINE1()
1316 if (vma->vm_file) in split_vma()
1317 return -ENOMEM; in split_vma()
1319 mm = vma->vm_mm; in split_vma()
1320 if (mm->map_count >= sysctl_max_map_count) in split_vma()
1321 return -ENOMEM; in split_vma()
1325 return -ENOMEM; in split_vma()
1332 *region = *vma->vm_region; in split_vma()
1333 new->vm_region = region; in split_vma()
1335 npages = (addr - vma->vm_start) >> PAGE_SHIFT; in split_vma()
1338 region->vm_top = region->vm_end = new->vm_end = addr; in split_vma()
1340 region->vm_start = new->vm_start = addr; in split_vma()
1341 region->vm_pgoff = new->vm_pgoff += npages; in split_vma()
1344 vma_iter_config(vmi, new->vm_start, new->vm_end); in split_vma()
1347 current->pid); in split_vma()
1351 if (new->vm_ops && new->vm_ops->open) in split_vma()
1352 new->vm_ops->open(new); in split_vma()
1355 delete_nommu_region(vma->vm_region); in split_vma()
1357 vma->vm_region->vm_start = vma->vm_start = addr; in split_vma()
1358 vma->vm_region->vm_pgoff = vma->vm_pgoff += npages; in split_vma()
1360 vma->vm_region->vm_end = vma->vm_end = addr; in split_vma()
1361 vma->vm_region->vm_top = addr; in split_vma()
1363 add_nommu_region(vma->vm_region); in split_vma()
1364 add_nommu_region(new->vm_region); in split_vma()
1370 mm->map_count++; in split_vma()
1377 return -ENOMEM; in split_vma()
1392 if (from > vma->vm_start) { in vmi_shrink_vma()
1393 if (vma_iter_clear_gfp(vmi, from, vma->vm_end, GFP_KERNEL)) in vmi_shrink_vma()
1394 return -ENOMEM; in vmi_shrink_vma()
1395 vma->vm_end = from; in vmi_shrink_vma()
1397 if (vma_iter_clear_gfp(vmi, vma->vm_start, to, GFP_KERNEL)) in vmi_shrink_vma()
1398 return -ENOMEM; in vmi_shrink_vma()
1399 vma->vm_start = to; in vmi_shrink_vma()
1403 region = vma->vm_region; in vmi_shrink_vma()
1404 BUG_ON(region->vm_usage != 1); in vmi_shrink_vma()
1408 if (from > region->vm_start) { in vmi_shrink_vma()
1409 to = region->vm_top; in vmi_shrink_vma()
1410 region->vm_top = region->vm_end = from; in vmi_shrink_vma()
1412 region->vm_start = to; in vmi_shrink_vma()
1422 * release a mapping
1423 * - under NOMMU conditions the chunk to be unmapped must be backed by a single
1435 return -EINVAL; in do_munmap()
1444 pr_warn("munmap of memory not mmapped by process %d (%s): 0x%lx-0x%lx\n", in do_munmap()
1445 current->pid, current->comm, in do_munmap()
1446 start, start + len - 1); in do_munmap()
1449 return -EINVAL; in do_munmap()
1452 /* we're allowed to split an anonymous VMA but not a file-backed one */ in do_munmap()
1453 if (vma->vm_file) { in do_munmap()
1455 if (start > vma->vm_start) in do_munmap()
1456 return -EINVAL; in do_munmap()
1457 if (end == vma->vm_end) in do_munmap()
1461 return -EINVAL; in do_munmap()
1464 if (start == vma->vm_start && end == vma->vm_end) in do_munmap()
1466 if (start < vma->vm_start || end > vma->vm_end) in do_munmap()
1467 return -EINVAL; in do_munmap()
1469 return -EINVAL; in do_munmap()
1470 if (end != vma->vm_end && offset_in_page(end)) in do_munmap()
1471 return -EINVAL; in do_munmap()
1472 if (start != vma->vm_start && end != vma->vm_end) { in do_munmap()
1482 ret = -ENOMEM; in do_munmap()
1490 struct mm_struct *mm = current->mm; in vm_munmap()
1516 mm->total_vm = 0; in exit_mmap()
1528 __mt_destroy(&mm->mm_mt); in exit_mmap()
1533 * expand (or shrink) an existing mapping, potentially moving it at the same
1536 * under NOMMU conditions, we only permit changing a mapping's size, and only
1552 return (unsigned long) -EINVAL; in do_mremap()
1555 return -EINVAL; in do_mremap()
1558 return (unsigned long) -EINVAL; in do_mremap()
1560 vma = find_vma_exact(current->mm, addr, old_len); in do_mremap()
1562 return (unsigned long) -EINVAL; in do_mremap()
1564 if (vma->vm_end != vma->vm_start + old_len) in do_mremap()
1565 return (unsigned long) -EFAULT; in do_mremap()
1567 if (is_nommu_shared_mapping(vma->vm_flags)) in do_mremap()
1568 return (unsigned long) -EPERM; in do_mremap()
1570 if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start) in do_mremap()
1571 return (unsigned long) -ENOMEM; in do_mremap()
1573 /* all checks complete - do it */ in do_mremap()
1574 vma->vm_end = vma->vm_start + new_len; in do_mremap()
1575 return vma->vm_start; in do_mremap()
1584 mmap_write_lock(current->mm); in SYSCALL_DEFINE5()
1586 mmap_write_unlock(current->mm); in SYSCALL_DEFINE5()
1594 return -EINVAL; in remap_pfn_range()
1604 unsigned long vm_len = vma->vm_end - vma->vm_start; in vm_iomap_memory()
1606 pfn += vma->vm_pgoff; in vm_iomap_memory()
1607 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); in vm_iomap_memory()
1614 unsigned int size = vma->vm_end - vma->vm_start; in remap_vmalloc_range()
1616 if (!(vma->vm_flags & VM_USERMAP)) in remap_vmalloc_range()
1617 return -EINVAL; in remap_vmalloc_range()
1619 vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT)); in remap_vmalloc_range()
1620 vma->vm_end = vma->vm_start + size; in remap_vmalloc_range()
1653 /* don't overrun this mapping */ in __access_remote_vm()
1654 if (addr + len >= vma->vm_end) in __access_remote_vm()
1655 len = vma->vm_end - addr; in __access_remote_vm()
1658 if (write && vma->vm_flags & VM_MAYWRITE) in __access_remote_vm()
1661 else if (!write && vma->vm_flags & VM_MAYREAD) in __access_remote_vm()
1676 * access_remote_vm - access another process' address space
1693 * - source/target buffer must be kernel space
1717 * If there is any error return -EFAULT.
1724 int ret = -EFAULT; in __copy_remote_vm_str()
1739 /* don't overrun this mapping */ in __copy_remote_vm_str()
1740 if (addr_end > vma->vm_end) in __copy_remote_vm_str()
1741 len = vma->vm_end - addr; in __copy_remote_vm_str()
1744 if (vma->vm_flags & VM_MAYREAD) { in __copy_remote_vm_str()
1747 ret = len - 1; in __copy_remote_vm_str()
1756 * copy_remote_vm_str - copy a string from another process's address space.
1766 * not including the trailing NUL. Always guaranteed to leave NUL-terminated
1767 * buffer. On any error, return -EFAULT.
1781 return -EFAULT; in copy_remote_vm_str()
1794 * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode
1813 high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; in nommu_shrink_inode_mappings()
1816 i_mmap_lock_read(inode->i_mapping); in nommu_shrink_inode_mappings()
1819 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) { in nommu_shrink_inode_mappings()
1820 /* found one - only interested if it's shared out of the page in nommu_shrink_inode_mappings()
1822 if (vma->vm_flags & VM_SHARED) { in nommu_shrink_inode_mappings()
1823 i_mmap_unlock_read(inode->i_mapping); in nommu_shrink_inode_mappings()
1825 return -ETXTBSY; /* not quite true, but near enough */ in nommu_shrink_inode_mappings()
1829 /* reduce any regions that overlap the dead zone - if in existence, in nommu_shrink_inode_mappings()
1835 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) { in nommu_shrink_inode_mappings()
1836 if (!(vma->vm_flags & VM_SHARED)) in nommu_shrink_inode_mappings()
1839 region = vma->vm_region; in nommu_shrink_inode_mappings()
1840 r_size = region->vm_top - region->vm_start; in nommu_shrink_inode_mappings()
1841 r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size; in nommu_shrink_inode_mappings()
1844 region->vm_top -= r_top - newsize; in nommu_shrink_inode_mappings()
1845 if (region->vm_end > region->vm_top) in nommu_shrink_inode_mappings()
1846 region->vm_end = region->vm_top; in nommu_shrink_inode_mappings()
1850 i_mmap_unlock_read(inode->i_mapping); in nommu_shrink_inode_mappings()
1858 * This is intended to prevent a user from starting a single memory hogging
1862 * The default value is min(3% of free memory, 128MB)
1880 * to log in and kill a memory hogging process.