11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * mm/mmap.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Written by obz. 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Address space accounting code <alan@redhat.com> 71da177e4SLinus Torvalds */ 81da177e4SLinus Torvalds 91da177e4SLinus Torvalds #include <linux/slab.h> 101da177e4SLinus Torvalds #include <linux/mm.h> 111da177e4SLinus Torvalds #include <linux/shm.h> 121da177e4SLinus Torvalds #include <linux/mman.h> 131da177e4SLinus Torvalds #include <linux/pagemap.h> 141da177e4SLinus Torvalds #include <linux/swap.h> 151da177e4SLinus Torvalds #include <linux/syscalls.h> 16c59ede7bSRandy.Dunlap #include <linux/capability.h> 171da177e4SLinus Torvalds #include <linux/init.h> 181da177e4SLinus Torvalds #include <linux/file.h> 191da177e4SLinus Torvalds #include <linux/fs.h> 201da177e4SLinus Torvalds #include <linux/personality.h> 211da177e4SLinus Torvalds #include <linux/security.h> 221da177e4SLinus Torvalds #include <linux/hugetlb.h> 231da177e4SLinus Torvalds #include <linux/profile.h> 241da177e4SLinus Torvalds #include <linux/module.h> 251da177e4SLinus Torvalds #include <linux/mount.h> 261da177e4SLinus Torvalds #include <linux/mempolicy.h> 271da177e4SLinus Torvalds #include <linux/rmap.h> 281da177e4SLinus Torvalds 291da177e4SLinus Torvalds #include <asm/uaccess.h> 301da177e4SLinus Torvalds #include <asm/cacheflush.h> 311da177e4SLinus Torvalds #include <asm/tlb.h> 321da177e4SLinus Torvalds 333a459756SKirill Korotaev #ifndef arch_mmap_check 343a459756SKirill Korotaev #define arch_mmap_check(addr, len, flags) (0) 353a459756SKirill Korotaev #endif 363a459756SKirill Korotaev 37e0da382cSHugh Dickins static void unmap_region(struct mm_struct *mm, 38e0da382cSHugh Dickins struct vm_area_struct *vma, struct vm_area_struct *prev, 39e0da382cSHugh Dickins unsigned long start, unsigned long end); 40e0da382cSHugh Dickins 411da177e4SLinus Torvalds /* 421da177e4SLinus Torvalds * WARNING: the debugging will use recursive algorithms so never enable this 431da177e4SLinus Torvalds * unless you know what you are doing. 441da177e4SLinus Torvalds */ 451da177e4SLinus Torvalds #undef DEBUG_MM_RB 461da177e4SLinus Torvalds 471da177e4SLinus Torvalds /* description of effects of mapping type and prot in current implementation. 481da177e4SLinus Torvalds * this is due to the limited x86 page protection hardware. The expected 491da177e4SLinus Torvalds * behavior is in parens: 501da177e4SLinus Torvalds * 511da177e4SLinus Torvalds * map_type prot 521da177e4SLinus Torvalds * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC 531da177e4SLinus Torvalds * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes 541da177e4SLinus Torvalds * w: (no) no w: (no) no w: (yes) yes w: (no) no 551da177e4SLinus Torvalds * x: (no) no x: (no) yes x: (no) yes x: (yes) yes 561da177e4SLinus Torvalds * 571da177e4SLinus Torvalds * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes 581da177e4SLinus Torvalds * w: (no) no w: (no) no w: (copy) copy w: (no) no 591da177e4SLinus Torvalds * x: (no) no x: (no) yes x: (no) yes x: (yes) yes 601da177e4SLinus Torvalds * 611da177e4SLinus Torvalds */ 621da177e4SLinus Torvalds pgprot_t protection_map[16] = { 631da177e4SLinus Torvalds __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111, 641da177e4SLinus Torvalds __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111 651da177e4SLinus Torvalds }; 661da177e4SLinus Torvalds 67804af2cfSHugh Dickins pgprot_t vm_get_page_prot(unsigned long vm_flags) 68804af2cfSHugh Dickins { 69804af2cfSHugh Dickins return protection_map[vm_flags & 70804af2cfSHugh Dickins (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]; 71804af2cfSHugh Dickins } 72804af2cfSHugh Dickins EXPORT_SYMBOL(vm_get_page_prot); 73804af2cfSHugh Dickins 741da177e4SLinus Torvalds int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ 751da177e4SLinus Torvalds int sysctl_overcommit_ratio = 50; /* default is 50% */ 76c3d8c141SChristoph Lameter int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; 771da177e4SLinus Torvalds atomic_t vm_committed_space = ATOMIC_INIT(0); 781da177e4SLinus Torvalds 791da177e4SLinus Torvalds /* 801da177e4SLinus Torvalds * Check that a process has enough memory to allocate a new virtual 811da177e4SLinus Torvalds * mapping. 0 means there is enough memory for the allocation to 821da177e4SLinus Torvalds * succeed and -ENOMEM implies there is not. 831da177e4SLinus Torvalds * 841da177e4SLinus Torvalds * We currently support three overcommit policies, which are set via the 851da177e4SLinus Torvalds * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting 861da177e4SLinus Torvalds * 871da177e4SLinus Torvalds * Strict overcommit modes added 2002 Feb 26 by Alan Cox. 881da177e4SLinus Torvalds * Additional code 2002 Jul 20 by Robert Love. 891da177e4SLinus Torvalds * 901da177e4SLinus Torvalds * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise. 911da177e4SLinus Torvalds * 921da177e4SLinus Torvalds * Note this is a helper function intended to be used by LSMs which 931da177e4SLinus Torvalds * wish to use this logic. 941da177e4SLinus Torvalds */ 951da177e4SLinus Torvalds int __vm_enough_memory(long pages, int cap_sys_admin) 961da177e4SLinus Torvalds { 971da177e4SLinus Torvalds unsigned long free, allowed; 981da177e4SLinus Torvalds 991da177e4SLinus Torvalds vm_acct_memory(pages); 1001da177e4SLinus Torvalds 1011da177e4SLinus Torvalds /* 1021da177e4SLinus Torvalds * Sometimes we want to use more memory than we have 1031da177e4SLinus Torvalds */ 1041da177e4SLinus Torvalds if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS) 1051da177e4SLinus Torvalds return 0; 1061da177e4SLinus Torvalds 1071da177e4SLinus Torvalds if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { 1081da177e4SLinus Torvalds unsigned long n; 1091da177e4SLinus Torvalds 110347ce434SChristoph Lameter free = global_page_state(NR_FILE_PAGES); 1111da177e4SLinus Torvalds free += nr_swap_pages; 1121da177e4SLinus Torvalds 1131da177e4SLinus Torvalds /* 1141da177e4SLinus Torvalds * Any slabs which are created with the 1151da177e4SLinus Torvalds * SLAB_RECLAIM_ACCOUNT flag claim to have contents 1161da177e4SLinus Torvalds * which are reclaimable, under pressure. The dentry 1171da177e4SLinus Torvalds * cache and most inode caches should fall into this 1181da177e4SLinus Torvalds */ 119972d1a7bSChristoph Lameter free += global_page_state(NR_SLAB_RECLAIMABLE); 1201da177e4SLinus Torvalds 1211da177e4SLinus Torvalds /* 1221da177e4SLinus Torvalds * Leave the last 3% for root 1231da177e4SLinus Torvalds */ 1241da177e4SLinus Torvalds if (!cap_sys_admin) 1251da177e4SLinus Torvalds free -= free / 32; 1261da177e4SLinus Torvalds 1271da177e4SLinus Torvalds if (free > pages) 1281da177e4SLinus Torvalds return 0; 1291da177e4SLinus Torvalds 1301da177e4SLinus Torvalds /* 1311da177e4SLinus Torvalds * nr_free_pages() is very expensive on large systems, 1321da177e4SLinus Torvalds * only call if we're about to fail. 1331da177e4SLinus Torvalds */ 1341da177e4SLinus Torvalds n = nr_free_pages(); 1356d9f7839SHideo AOKI 1366d9f7839SHideo AOKI /* 1376d9f7839SHideo AOKI * Leave reserved pages. The pages are not for anonymous pages. 1386d9f7839SHideo AOKI */ 1396d9f7839SHideo AOKI if (n <= totalreserve_pages) 1406d9f7839SHideo AOKI goto error; 1416d9f7839SHideo AOKI else 1426d9f7839SHideo AOKI n -= totalreserve_pages; 1436d9f7839SHideo AOKI 1446d9f7839SHideo AOKI /* 1456d9f7839SHideo AOKI * Leave the last 3% for root 1466d9f7839SHideo AOKI */ 1471da177e4SLinus Torvalds if (!cap_sys_admin) 1481da177e4SLinus Torvalds n -= n / 32; 1491da177e4SLinus Torvalds free += n; 1501da177e4SLinus Torvalds 1511da177e4SLinus Torvalds if (free > pages) 1521da177e4SLinus Torvalds return 0; 1536d9f7839SHideo AOKI 1546d9f7839SHideo AOKI goto error; 1551da177e4SLinus Torvalds } 1561da177e4SLinus Torvalds 1571da177e4SLinus Torvalds allowed = (totalram_pages - hugetlb_total_pages()) 1581da177e4SLinus Torvalds * sysctl_overcommit_ratio / 100; 1591da177e4SLinus Torvalds /* 1601da177e4SLinus Torvalds * Leave the last 3% for root 1611da177e4SLinus Torvalds */ 1621da177e4SLinus Torvalds if (!cap_sys_admin) 1631da177e4SLinus Torvalds allowed -= allowed / 32; 1641da177e4SLinus Torvalds allowed += total_swap_pages; 1651da177e4SLinus Torvalds 1661da177e4SLinus Torvalds /* Don't let a single process grow too big: 1671da177e4SLinus Torvalds leave 3% of the size of this process for other processes */ 1681da177e4SLinus Torvalds allowed -= current->mm->total_vm / 32; 1691da177e4SLinus Torvalds 1702f60f8d3SSimon Derr /* 1712f60f8d3SSimon Derr * cast `allowed' as a signed long because vm_committed_space 1722f60f8d3SSimon Derr * sometimes has a negative value 1732f60f8d3SSimon Derr */ 1742f60f8d3SSimon Derr if (atomic_read(&vm_committed_space) < (long)allowed) 1751da177e4SLinus Torvalds return 0; 1766d9f7839SHideo AOKI error: 1771da177e4SLinus Torvalds vm_unacct_memory(pages); 1781da177e4SLinus Torvalds 1791da177e4SLinus Torvalds return -ENOMEM; 1801da177e4SLinus Torvalds } 1811da177e4SLinus Torvalds 1821da177e4SLinus Torvalds EXPORT_SYMBOL(__vm_enough_memory); 1831da177e4SLinus Torvalds 1841da177e4SLinus Torvalds /* 1851da177e4SLinus Torvalds * Requires inode->i_mapping->i_mmap_lock 1861da177e4SLinus Torvalds */ 1871da177e4SLinus Torvalds static void __remove_shared_vm_struct(struct vm_area_struct *vma, 1881da177e4SLinus Torvalds struct file *file, struct address_space *mapping) 1891da177e4SLinus Torvalds { 1901da177e4SLinus Torvalds if (vma->vm_flags & VM_DENYWRITE) 191d3ac7f89SJosef "Jeff" Sipek atomic_inc(&file->f_path.dentry->d_inode->i_writecount); 1921da177e4SLinus Torvalds if (vma->vm_flags & VM_SHARED) 1931da177e4SLinus Torvalds mapping->i_mmap_writable--; 1941da177e4SLinus Torvalds 1951da177e4SLinus Torvalds flush_dcache_mmap_lock(mapping); 1961da177e4SLinus Torvalds if (unlikely(vma->vm_flags & VM_NONLINEAR)) 1971da177e4SLinus Torvalds list_del_init(&vma->shared.vm_set.list); 1981da177e4SLinus Torvalds else 1991da177e4SLinus Torvalds vma_prio_tree_remove(vma, &mapping->i_mmap); 2001da177e4SLinus Torvalds flush_dcache_mmap_unlock(mapping); 2011da177e4SLinus Torvalds } 2021da177e4SLinus Torvalds 2031da177e4SLinus Torvalds /* 204a8fb5618SHugh Dickins * Unlink a file-based vm structure from its prio_tree, to hide 205a8fb5618SHugh Dickins * vma from rmap and vmtruncate before freeing its page tables. 2061da177e4SLinus Torvalds */ 207a8fb5618SHugh Dickins void unlink_file_vma(struct vm_area_struct *vma) 2081da177e4SLinus Torvalds { 2091da177e4SLinus Torvalds struct file *file = vma->vm_file; 2101da177e4SLinus Torvalds 2111da177e4SLinus Torvalds if (file) { 2121da177e4SLinus Torvalds struct address_space *mapping = file->f_mapping; 2131da177e4SLinus Torvalds spin_lock(&mapping->i_mmap_lock); 2141da177e4SLinus Torvalds __remove_shared_vm_struct(vma, file, mapping); 2151da177e4SLinus Torvalds spin_unlock(&mapping->i_mmap_lock); 2161da177e4SLinus Torvalds } 217a8fb5618SHugh Dickins } 218a8fb5618SHugh Dickins 219a8fb5618SHugh Dickins /* 220a8fb5618SHugh Dickins * Close a vm structure and free it, returning the next. 221a8fb5618SHugh Dickins */ 222a8fb5618SHugh Dickins static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) 223a8fb5618SHugh Dickins { 224a8fb5618SHugh Dickins struct vm_area_struct *next = vma->vm_next; 225a8fb5618SHugh Dickins 226a8fb5618SHugh Dickins might_sleep(); 2271da177e4SLinus Torvalds if (vma->vm_ops && vma->vm_ops->close) 2281da177e4SLinus Torvalds vma->vm_ops->close(vma); 229a8fb5618SHugh Dickins if (vma->vm_file) 230a8fb5618SHugh Dickins fput(vma->vm_file); 2311da177e4SLinus Torvalds mpol_free(vma_policy(vma)); 2321da177e4SLinus Torvalds kmem_cache_free(vm_area_cachep, vma); 233a8fb5618SHugh Dickins return next; 2341da177e4SLinus Torvalds } 2351da177e4SLinus Torvalds 2361da177e4SLinus Torvalds asmlinkage unsigned long sys_brk(unsigned long brk) 2371da177e4SLinus Torvalds { 2381da177e4SLinus Torvalds unsigned long rlim, retval; 2391da177e4SLinus Torvalds unsigned long newbrk, oldbrk; 2401da177e4SLinus Torvalds struct mm_struct *mm = current->mm; 2411da177e4SLinus Torvalds 2421da177e4SLinus Torvalds down_write(&mm->mmap_sem); 2431da177e4SLinus Torvalds 2441da177e4SLinus Torvalds if (brk < mm->end_code) 2451da177e4SLinus Torvalds goto out; 2461e624196SRam Gupta 2471e624196SRam Gupta /* 2481e624196SRam Gupta * Check against rlimit here. If this check is done later after the test 2491e624196SRam Gupta * of oldbrk with newbrk then it can escape the test and let the data 2501e624196SRam Gupta * segment grow beyond its set limit the in case where the limit is 2511e624196SRam Gupta * not page aligned -Ram Gupta 2521e624196SRam Gupta */ 2531e624196SRam Gupta rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur; 2541e624196SRam Gupta if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim) 2551e624196SRam Gupta goto out; 2561e624196SRam Gupta 2571da177e4SLinus Torvalds newbrk = PAGE_ALIGN(brk); 2581da177e4SLinus Torvalds oldbrk = PAGE_ALIGN(mm->brk); 2591da177e4SLinus Torvalds if (oldbrk == newbrk) 2601da177e4SLinus Torvalds goto set_brk; 2611da177e4SLinus Torvalds 2621da177e4SLinus Torvalds /* Always allow shrinking brk. */ 2631da177e4SLinus Torvalds if (brk <= mm->brk) { 2641da177e4SLinus Torvalds if (!do_munmap(mm, newbrk, oldbrk-newbrk)) 2651da177e4SLinus Torvalds goto set_brk; 2661da177e4SLinus Torvalds goto out; 2671da177e4SLinus Torvalds } 2681da177e4SLinus Torvalds 2691da177e4SLinus Torvalds /* Check against existing mmap mappings. */ 2701da177e4SLinus Torvalds if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE)) 2711da177e4SLinus Torvalds goto out; 2721da177e4SLinus Torvalds 2731da177e4SLinus Torvalds /* Ok, looks good - let it rip. */ 2741da177e4SLinus Torvalds if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk) 2751da177e4SLinus Torvalds goto out; 2761da177e4SLinus Torvalds set_brk: 2771da177e4SLinus Torvalds mm->brk = brk; 2781da177e4SLinus Torvalds out: 2791da177e4SLinus Torvalds retval = mm->brk; 2801da177e4SLinus Torvalds up_write(&mm->mmap_sem); 2811da177e4SLinus Torvalds return retval; 2821da177e4SLinus Torvalds } 2831da177e4SLinus Torvalds 2841da177e4SLinus Torvalds #ifdef DEBUG_MM_RB 2851da177e4SLinus Torvalds static int browse_rb(struct rb_root *root) 2861da177e4SLinus Torvalds { 2871da177e4SLinus Torvalds int i = 0, j; 2881da177e4SLinus Torvalds struct rb_node *nd, *pn = NULL; 2891da177e4SLinus Torvalds unsigned long prev = 0, pend = 0; 2901da177e4SLinus Torvalds 2911da177e4SLinus Torvalds for (nd = rb_first(root); nd; nd = rb_next(nd)) { 2921da177e4SLinus Torvalds struct vm_area_struct *vma; 2931da177e4SLinus Torvalds vma = rb_entry(nd, struct vm_area_struct, vm_rb); 2941da177e4SLinus Torvalds if (vma->vm_start < prev) 2951da177e4SLinus Torvalds printk("vm_start %lx prev %lx\n", vma->vm_start, prev), i = -1; 2961da177e4SLinus Torvalds if (vma->vm_start < pend) 2971da177e4SLinus Torvalds printk("vm_start %lx pend %lx\n", vma->vm_start, pend); 2981da177e4SLinus Torvalds if (vma->vm_start > vma->vm_end) 2991da177e4SLinus Torvalds printk("vm_end %lx < vm_start %lx\n", vma->vm_end, vma->vm_start); 3001da177e4SLinus Torvalds i++; 3011da177e4SLinus Torvalds pn = nd; 302*d1af65d1SDavid Miller prev = vma->vm_start; 303*d1af65d1SDavid Miller pend = vma->vm_end; 3041da177e4SLinus Torvalds } 3051da177e4SLinus Torvalds j = 0; 3061da177e4SLinus Torvalds for (nd = pn; nd; nd = rb_prev(nd)) { 3071da177e4SLinus Torvalds j++; 3081da177e4SLinus Torvalds } 3091da177e4SLinus Torvalds if (i != j) 3101da177e4SLinus Torvalds printk("backwards %d, forwards %d\n", j, i), i = 0; 3111da177e4SLinus Torvalds return i; 3121da177e4SLinus Torvalds } 3131da177e4SLinus Torvalds 3141da177e4SLinus Torvalds void validate_mm(struct mm_struct *mm) 3151da177e4SLinus Torvalds { 3161da177e4SLinus Torvalds int bug = 0; 3171da177e4SLinus Torvalds int i = 0; 3181da177e4SLinus Torvalds struct vm_area_struct *tmp = mm->mmap; 3191da177e4SLinus Torvalds while (tmp) { 3201da177e4SLinus Torvalds tmp = tmp->vm_next; 3211da177e4SLinus Torvalds i++; 3221da177e4SLinus Torvalds } 3231da177e4SLinus Torvalds if (i != mm->map_count) 3241da177e4SLinus Torvalds printk("map_count %d vm_next %d\n", mm->map_count, i), bug = 1; 3251da177e4SLinus Torvalds i = browse_rb(&mm->mm_rb); 3261da177e4SLinus Torvalds if (i != mm->map_count) 3271da177e4SLinus Torvalds printk("map_count %d rb %d\n", mm->map_count, i), bug = 1; 32846a350efSEric Sesterhenn BUG_ON(bug); 3291da177e4SLinus Torvalds } 3301da177e4SLinus Torvalds #else 3311da177e4SLinus Torvalds #define validate_mm(mm) do { } while (0) 3321da177e4SLinus Torvalds #endif 3331da177e4SLinus Torvalds 3341da177e4SLinus Torvalds static struct vm_area_struct * 3351da177e4SLinus Torvalds find_vma_prepare(struct mm_struct *mm, unsigned long addr, 3361da177e4SLinus Torvalds struct vm_area_struct **pprev, struct rb_node ***rb_link, 3371da177e4SLinus Torvalds struct rb_node ** rb_parent) 3381da177e4SLinus Torvalds { 3391da177e4SLinus Torvalds struct vm_area_struct * vma; 3401da177e4SLinus Torvalds struct rb_node ** __rb_link, * __rb_parent, * rb_prev; 3411da177e4SLinus Torvalds 3421da177e4SLinus Torvalds __rb_link = &mm->mm_rb.rb_node; 3431da177e4SLinus Torvalds rb_prev = __rb_parent = NULL; 3441da177e4SLinus Torvalds vma = NULL; 3451da177e4SLinus Torvalds 3461da177e4SLinus Torvalds while (*__rb_link) { 3471da177e4SLinus Torvalds struct vm_area_struct *vma_tmp; 3481da177e4SLinus Torvalds 3491da177e4SLinus Torvalds __rb_parent = *__rb_link; 3501da177e4SLinus Torvalds vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb); 3511da177e4SLinus Torvalds 3521da177e4SLinus Torvalds if (vma_tmp->vm_end > addr) { 3531da177e4SLinus Torvalds vma = vma_tmp; 3541da177e4SLinus Torvalds if (vma_tmp->vm_start <= addr) 3551da177e4SLinus Torvalds return vma; 3561da177e4SLinus Torvalds __rb_link = &__rb_parent->rb_left; 3571da177e4SLinus Torvalds } else { 3581da177e4SLinus Torvalds rb_prev = __rb_parent; 3591da177e4SLinus Torvalds __rb_link = &__rb_parent->rb_right; 3601da177e4SLinus Torvalds } 3611da177e4SLinus Torvalds } 3621da177e4SLinus Torvalds 3631da177e4SLinus Torvalds *pprev = NULL; 3641da177e4SLinus Torvalds if (rb_prev) 3651da177e4SLinus Torvalds *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb); 3661da177e4SLinus Torvalds *rb_link = __rb_link; 3671da177e4SLinus Torvalds *rb_parent = __rb_parent; 3681da177e4SLinus Torvalds return vma; 3691da177e4SLinus Torvalds } 3701da177e4SLinus Torvalds 3711da177e4SLinus Torvalds static inline void 3721da177e4SLinus Torvalds __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, 3731da177e4SLinus Torvalds struct vm_area_struct *prev, struct rb_node *rb_parent) 3741da177e4SLinus Torvalds { 3751da177e4SLinus Torvalds if (prev) { 3761da177e4SLinus Torvalds vma->vm_next = prev->vm_next; 3771da177e4SLinus Torvalds prev->vm_next = vma; 3781da177e4SLinus Torvalds } else { 3791da177e4SLinus Torvalds mm->mmap = vma; 3801da177e4SLinus Torvalds if (rb_parent) 3811da177e4SLinus Torvalds vma->vm_next = rb_entry(rb_parent, 3821da177e4SLinus Torvalds struct vm_area_struct, vm_rb); 3831da177e4SLinus Torvalds else 3841da177e4SLinus Torvalds vma->vm_next = NULL; 3851da177e4SLinus Torvalds } 3861da177e4SLinus Torvalds } 3871da177e4SLinus Torvalds 3881da177e4SLinus Torvalds void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, 3891da177e4SLinus Torvalds struct rb_node **rb_link, struct rb_node *rb_parent) 3901da177e4SLinus Torvalds { 3911da177e4SLinus Torvalds rb_link_node(&vma->vm_rb, rb_parent, rb_link); 3921da177e4SLinus Torvalds rb_insert_color(&vma->vm_rb, &mm->mm_rb); 3931da177e4SLinus Torvalds } 3941da177e4SLinus Torvalds 3951da177e4SLinus Torvalds static inline void __vma_link_file(struct vm_area_struct *vma) 3961da177e4SLinus Torvalds { 3971da177e4SLinus Torvalds struct file * file; 3981da177e4SLinus Torvalds 3991da177e4SLinus Torvalds file = vma->vm_file; 4001da177e4SLinus Torvalds if (file) { 4011da177e4SLinus Torvalds struct address_space *mapping = file->f_mapping; 4021da177e4SLinus Torvalds 4031da177e4SLinus Torvalds if (vma->vm_flags & VM_DENYWRITE) 404d3ac7f89SJosef "Jeff" Sipek atomic_dec(&file->f_path.dentry->d_inode->i_writecount); 4051da177e4SLinus Torvalds if (vma->vm_flags & VM_SHARED) 4061da177e4SLinus Torvalds mapping->i_mmap_writable++; 4071da177e4SLinus Torvalds 4081da177e4SLinus Torvalds flush_dcache_mmap_lock(mapping); 4091da177e4SLinus Torvalds if (unlikely(vma->vm_flags & VM_NONLINEAR)) 4101da177e4SLinus Torvalds vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear); 4111da177e4SLinus Torvalds else 4121da177e4SLinus Torvalds vma_prio_tree_insert(vma, &mapping->i_mmap); 4131da177e4SLinus Torvalds flush_dcache_mmap_unlock(mapping); 4141da177e4SLinus Torvalds } 4151da177e4SLinus Torvalds } 4161da177e4SLinus Torvalds 4171da177e4SLinus Torvalds static void 4181da177e4SLinus Torvalds __vma_link(struct mm_struct *mm, struct vm_area_struct *vma, 4191da177e4SLinus Torvalds struct vm_area_struct *prev, struct rb_node **rb_link, 4201da177e4SLinus Torvalds struct rb_node *rb_parent) 4211da177e4SLinus Torvalds { 4221da177e4SLinus Torvalds __vma_link_list(mm, vma, prev, rb_parent); 4231da177e4SLinus Torvalds __vma_link_rb(mm, vma, rb_link, rb_parent); 4241da177e4SLinus Torvalds __anon_vma_link(vma); 4251da177e4SLinus Torvalds } 4261da177e4SLinus Torvalds 4271da177e4SLinus Torvalds static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma, 4281da177e4SLinus Torvalds struct vm_area_struct *prev, struct rb_node **rb_link, 4291da177e4SLinus Torvalds struct rb_node *rb_parent) 4301da177e4SLinus Torvalds { 4311da177e4SLinus Torvalds struct address_space *mapping = NULL; 4321da177e4SLinus Torvalds 4331da177e4SLinus Torvalds if (vma->vm_file) 4341da177e4SLinus Torvalds mapping = vma->vm_file->f_mapping; 4351da177e4SLinus Torvalds 4361da177e4SLinus Torvalds if (mapping) { 4371da177e4SLinus Torvalds spin_lock(&mapping->i_mmap_lock); 4381da177e4SLinus Torvalds vma->vm_truncate_count = mapping->truncate_count; 4391da177e4SLinus Torvalds } 4401da177e4SLinus Torvalds anon_vma_lock(vma); 4411da177e4SLinus Torvalds 4421da177e4SLinus Torvalds __vma_link(mm, vma, prev, rb_link, rb_parent); 4431da177e4SLinus Torvalds __vma_link_file(vma); 4441da177e4SLinus Torvalds 4451da177e4SLinus Torvalds anon_vma_unlock(vma); 4461da177e4SLinus Torvalds if (mapping) 4471da177e4SLinus Torvalds spin_unlock(&mapping->i_mmap_lock); 4481da177e4SLinus Torvalds 4491da177e4SLinus Torvalds mm->map_count++; 4501da177e4SLinus Torvalds validate_mm(mm); 4511da177e4SLinus Torvalds } 4521da177e4SLinus Torvalds 4531da177e4SLinus Torvalds /* 4541da177e4SLinus Torvalds * Helper for vma_adjust in the split_vma insert case: 4551da177e4SLinus Torvalds * insert vm structure into list and rbtree and anon_vma, 4561da177e4SLinus Torvalds * but it has already been inserted into prio_tree earlier. 4571da177e4SLinus Torvalds */ 4581da177e4SLinus Torvalds static void 4591da177e4SLinus Torvalds __insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) 4601da177e4SLinus Torvalds { 4611da177e4SLinus Torvalds struct vm_area_struct * __vma, * prev; 4621da177e4SLinus Torvalds struct rb_node ** rb_link, * rb_parent; 4631da177e4SLinus Torvalds 4641da177e4SLinus Torvalds __vma = find_vma_prepare(mm, vma->vm_start,&prev, &rb_link, &rb_parent); 46546a350efSEric Sesterhenn BUG_ON(__vma && __vma->vm_start < vma->vm_end); 4661da177e4SLinus Torvalds __vma_link(mm, vma, prev, rb_link, rb_parent); 4671da177e4SLinus Torvalds mm->map_count++; 4681da177e4SLinus Torvalds } 4691da177e4SLinus Torvalds 4701da177e4SLinus Torvalds static inline void 4711da177e4SLinus Torvalds __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma, 4721da177e4SLinus Torvalds struct vm_area_struct *prev) 4731da177e4SLinus Torvalds { 4741da177e4SLinus Torvalds prev->vm_next = vma->vm_next; 4751da177e4SLinus Torvalds rb_erase(&vma->vm_rb, &mm->mm_rb); 4761da177e4SLinus Torvalds if (mm->mmap_cache == vma) 4771da177e4SLinus Torvalds mm->mmap_cache = prev; 4781da177e4SLinus Torvalds } 4791da177e4SLinus Torvalds 4801da177e4SLinus Torvalds /* 4811da177e4SLinus Torvalds * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that 4821da177e4SLinus Torvalds * is already present in an i_mmap tree without adjusting the tree. 4831da177e4SLinus Torvalds * The following helper function should be used when such adjustments 4841da177e4SLinus Torvalds * are necessary. The "insert" vma (if any) is to be inserted 4851da177e4SLinus Torvalds * before we drop the necessary locks. 4861da177e4SLinus Torvalds */ 4871da177e4SLinus Torvalds void vma_adjust(struct vm_area_struct *vma, unsigned long start, 4881da177e4SLinus Torvalds unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert) 4891da177e4SLinus Torvalds { 4901da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 4911da177e4SLinus Torvalds struct vm_area_struct *next = vma->vm_next; 4921da177e4SLinus Torvalds struct vm_area_struct *importer = NULL; 4931da177e4SLinus Torvalds struct address_space *mapping = NULL; 4941da177e4SLinus Torvalds struct prio_tree_root *root = NULL; 4951da177e4SLinus Torvalds struct file *file = vma->vm_file; 4961da177e4SLinus Torvalds struct anon_vma *anon_vma = NULL; 4971da177e4SLinus Torvalds long adjust_next = 0; 4981da177e4SLinus Torvalds int remove_next = 0; 4991da177e4SLinus Torvalds 5001da177e4SLinus Torvalds if (next && !insert) { 5011da177e4SLinus Torvalds if (end >= next->vm_end) { 5021da177e4SLinus Torvalds /* 5031da177e4SLinus Torvalds * vma expands, overlapping all the next, and 5041da177e4SLinus Torvalds * perhaps the one after too (mprotect case 6). 5051da177e4SLinus Torvalds */ 5061da177e4SLinus Torvalds again: remove_next = 1 + (end > next->vm_end); 5071da177e4SLinus Torvalds end = next->vm_end; 5081da177e4SLinus Torvalds anon_vma = next->anon_vma; 5091da177e4SLinus Torvalds importer = vma; 5101da177e4SLinus Torvalds } else if (end > next->vm_start) { 5111da177e4SLinus Torvalds /* 5121da177e4SLinus Torvalds * vma expands, overlapping part of the next: 5131da177e4SLinus Torvalds * mprotect case 5 shifting the boundary up. 5141da177e4SLinus Torvalds */ 5151da177e4SLinus Torvalds adjust_next = (end - next->vm_start) >> PAGE_SHIFT; 5161da177e4SLinus Torvalds anon_vma = next->anon_vma; 5171da177e4SLinus Torvalds importer = vma; 5181da177e4SLinus Torvalds } else if (end < vma->vm_end) { 5191da177e4SLinus Torvalds /* 5201da177e4SLinus Torvalds * vma shrinks, and !insert tells it's not 5211da177e4SLinus Torvalds * split_vma inserting another: so it must be 5221da177e4SLinus Torvalds * mprotect case 4 shifting the boundary down. 5231da177e4SLinus Torvalds */ 5241da177e4SLinus Torvalds adjust_next = - ((vma->vm_end - end) >> PAGE_SHIFT); 5251da177e4SLinus Torvalds anon_vma = next->anon_vma; 5261da177e4SLinus Torvalds importer = next; 5271da177e4SLinus Torvalds } 5281da177e4SLinus Torvalds } 5291da177e4SLinus Torvalds 5301da177e4SLinus Torvalds if (file) { 5311da177e4SLinus Torvalds mapping = file->f_mapping; 5321da177e4SLinus Torvalds if (!(vma->vm_flags & VM_NONLINEAR)) 5331da177e4SLinus Torvalds root = &mapping->i_mmap; 5341da177e4SLinus Torvalds spin_lock(&mapping->i_mmap_lock); 5351da177e4SLinus Torvalds if (importer && 5361da177e4SLinus Torvalds vma->vm_truncate_count != next->vm_truncate_count) { 5371da177e4SLinus Torvalds /* 5381da177e4SLinus Torvalds * unmap_mapping_range might be in progress: 5391da177e4SLinus Torvalds * ensure that the expanding vma is rescanned. 5401da177e4SLinus Torvalds */ 5411da177e4SLinus Torvalds importer->vm_truncate_count = 0; 5421da177e4SLinus Torvalds } 5431da177e4SLinus Torvalds if (insert) { 5441da177e4SLinus Torvalds insert->vm_truncate_count = vma->vm_truncate_count; 5451da177e4SLinus Torvalds /* 5461da177e4SLinus Torvalds * Put into prio_tree now, so instantiated pages 5471da177e4SLinus Torvalds * are visible to arm/parisc __flush_dcache_page 5481da177e4SLinus Torvalds * throughout; but we cannot insert into address 5491da177e4SLinus Torvalds * space until vma start or end is updated. 5501da177e4SLinus Torvalds */ 5511da177e4SLinus Torvalds __vma_link_file(insert); 5521da177e4SLinus Torvalds } 5531da177e4SLinus Torvalds } 5541da177e4SLinus Torvalds 5551da177e4SLinus Torvalds /* 5561da177e4SLinus Torvalds * When changing only vma->vm_end, we don't really need 5571da177e4SLinus Torvalds * anon_vma lock: but is that case worth optimizing out? 5581da177e4SLinus Torvalds */ 5591da177e4SLinus Torvalds if (vma->anon_vma) 5601da177e4SLinus Torvalds anon_vma = vma->anon_vma; 5611da177e4SLinus Torvalds if (anon_vma) { 5621da177e4SLinus Torvalds spin_lock(&anon_vma->lock); 5631da177e4SLinus Torvalds /* 5641da177e4SLinus Torvalds * Easily overlooked: when mprotect shifts the boundary, 5651da177e4SLinus Torvalds * make sure the expanding vma has anon_vma set if the 5661da177e4SLinus Torvalds * shrinking vma had, to cover any anon pages imported. 5671da177e4SLinus Torvalds */ 5681da177e4SLinus Torvalds if (importer && !importer->anon_vma) { 5691da177e4SLinus Torvalds importer->anon_vma = anon_vma; 5701da177e4SLinus Torvalds __anon_vma_link(importer); 5711da177e4SLinus Torvalds } 5721da177e4SLinus Torvalds } 5731da177e4SLinus Torvalds 5741da177e4SLinus Torvalds if (root) { 5751da177e4SLinus Torvalds flush_dcache_mmap_lock(mapping); 5761da177e4SLinus Torvalds vma_prio_tree_remove(vma, root); 5771da177e4SLinus Torvalds if (adjust_next) 5781da177e4SLinus Torvalds vma_prio_tree_remove(next, root); 5791da177e4SLinus Torvalds } 5801da177e4SLinus Torvalds 5811da177e4SLinus Torvalds vma->vm_start = start; 5821da177e4SLinus Torvalds vma->vm_end = end; 5831da177e4SLinus Torvalds vma->vm_pgoff = pgoff; 5841da177e4SLinus Torvalds if (adjust_next) { 5851da177e4SLinus Torvalds next->vm_start += adjust_next << PAGE_SHIFT; 5861da177e4SLinus Torvalds next->vm_pgoff += adjust_next; 5871da177e4SLinus Torvalds } 5881da177e4SLinus Torvalds 5891da177e4SLinus Torvalds if (root) { 5901da177e4SLinus Torvalds if (adjust_next) 5911da177e4SLinus Torvalds vma_prio_tree_insert(next, root); 5921da177e4SLinus Torvalds vma_prio_tree_insert(vma, root); 5931da177e4SLinus Torvalds flush_dcache_mmap_unlock(mapping); 5941da177e4SLinus Torvalds } 5951da177e4SLinus Torvalds 5961da177e4SLinus Torvalds if (remove_next) { 5971da177e4SLinus Torvalds /* 5981da177e4SLinus Torvalds * vma_merge has merged next into vma, and needs 5991da177e4SLinus Torvalds * us to remove next before dropping the locks. 6001da177e4SLinus Torvalds */ 6011da177e4SLinus Torvalds __vma_unlink(mm, next, vma); 6021da177e4SLinus Torvalds if (file) 6031da177e4SLinus Torvalds __remove_shared_vm_struct(next, file, mapping); 6041da177e4SLinus Torvalds if (next->anon_vma) 6051da177e4SLinus Torvalds __anon_vma_merge(vma, next); 6061da177e4SLinus Torvalds } else if (insert) { 6071da177e4SLinus Torvalds /* 6081da177e4SLinus Torvalds * split_vma has split insert from vma, and needs 6091da177e4SLinus Torvalds * us to insert it before dropping the locks 6101da177e4SLinus Torvalds * (it may either follow vma or precede it). 6111da177e4SLinus Torvalds */ 6121da177e4SLinus Torvalds __insert_vm_struct(mm, insert); 6131da177e4SLinus Torvalds } 6141da177e4SLinus Torvalds 6151da177e4SLinus Torvalds if (anon_vma) 6161da177e4SLinus Torvalds spin_unlock(&anon_vma->lock); 6171da177e4SLinus Torvalds if (mapping) 6181da177e4SLinus Torvalds spin_unlock(&mapping->i_mmap_lock); 6191da177e4SLinus Torvalds 6201da177e4SLinus Torvalds if (remove_next) { 6211da177e4SLinus Torvalds if (file) 6221da177e4SLinus Torvalds fput(file); 6231da177e4SLinus Torvalds mm->map_count--; 6241da177e4SLinus Torvalds mpol_free(vma_policy(next)); 6251da177e4SLinus Torvalds kmem_cache_free(vm_area_cachep, next); 6261da177e4SLinus Torvalds /* 6271da177e4SLinus Torvalds * In mprotect's case 6 (see comments on vma_merge), 6281da177e4SLinus Torvalds * we must remove another next too. It would clutter 6291da177e4SLinus Torvalds * up the code too much to do both in one go. 6301da177e4SLinus Torvalds */ 6311da177e4SLinus Torvalds if (remove_next == 2) { 6321da177e4SLinus Torvalds next = vma->vm_next; 6331da177e4SLinus Torvalds goto again; 6341da177e4SLinus Torvalds } 6351da177e4SLinus Torvalds } 6361da177e4SLinus Torvalds 6371da177e4SLinus Torvalds validate_mm(mm); 6381da177e4SLinus Torvalds } 6391da177e4SLinus Torvalds 6401da177e4SLinus Torvalds /* 6411da177e4SLinus Torvalds * If the vma has a ->close operation then the driver probably needs to release 6421da177e4SLinus Torvalds * per-vma resources, so we don't attempt to merge those. 6431da177e4SLinus Torvalds */ 644a6f563dbSHugh Dickins #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP) 6451da177e4SLinus Torvalds 6461da177e4SLinus Torvalds static inline int is_mergeable_vma(struct vm_area_struct *vma, 6471da177e4SLinus Torvalds struct file *file, unsigned long vm_flags) 6481da177e4SLinus Torvalds { 6491da177e4SLinus Torvalds if (vma->vm_flags != vm_flags) 6501da177e4SLinus Torvalds return 0; 6511da177e4SLinus Torvalds if (vma->vm_file != file) 6521da177e4SLinus Torvalds return 0; 6531da177e4SLinus Torvalds if (vma->vm_ops && vma->vm_ops->close) 6541da177e4SLinus Torvalds return 0; 6551da177e4SLinus Torvalds return 1; 6561da177e4SLinus Torvalds } 6571da177e4SLinus Torvalds 6581da177e4SLinus Torvalds static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1, 6591da177e4SLinus Torvalds struct anon_vma *anon_vma2) 6601da177e4SLinus Torvalds { 6611da177e4SLinus Torvalds return !anon_vma1 || !anon_vma2 || (anon_vma1 == anon_vma2); 6621da177e4SLinus Torvalds } 6631da177e4SLinus Torvalds 6641da177e4SLinus Torvalds /* 6651da177e4SLinus Torvalds * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) 6661da177e4SLinus Torvalds * in front of (at a lower virtual address and file offset than) the vma. 6671da177e4SLinus Torvalds * 6681da177e4SLinus Torvalds * We cannot merge two vmas if they have differently assigned (non-NULL) 6691da177e4SLinus Torvalds * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. 6701da177e4SLinus Torvalds * 6711da177e4SLinus Torvalds * We don't check here for the merged mmap wrapping around the end of pagecache 6721da177e4SLinus Torvalds * indices (16TB on ia32) because do_mmap_pgoff() does not permit mmap's which 6731da177e4SLinus Torvalds * wrap, nor mmaps which cover the final page at index -1UL. 6741da177e4SLinus Torvalds */ 6751da177e4SLinus Torvalds static int 6761da177e4SLinus Torvalds can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, 6771da177e4SLinus Torvalds struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff) 6781da177e4SLinus Torvalds { 6791da177e4SLinus Torvalds if (is_mergeable_vma(vma, file, vm_flags) && 6801da177e4SLinus Torvalds is_mergeable_anon_vma(anon_vma, vma->anon_vma)) { 6811da177e4SLinus Torvalds if (vma->vm_pgoff == vm_pgoff) 6821da177e4SLinus Torvalds return 1; 6831da177e4SLinus Torvalds } 6841da177e4SLinus Torvalds return 0; 6851da177e4SLinus Torvalds } 6861da177e4SLinus Torvalds 6871da177e4SLinus Torvalds /* 6881da177e4SLinus Torvalds * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) 6891da177e4SLinus Torvalds * beyond (at a higher virtual address and file offset than) the vma. 6901da177e4SLinus Torvalds * 6911da177e4SLinus Torvalds * We cannot merge two vmas if they have differently assigned (non-NULL) 6921da177e4SLinus Torvalds * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. 6931da177e4SLinus Torvalds */ 6941da177e4SLinus Torvalds static int 6951da177e4SLinus Torvalds can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, 6961da177e4SLinus Torvalds struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff) 6971da177e4SLinus Torvalds { 6981da177e4SLinus Torvalds if (is_mergeable_vma(vma, file, vm_flags) && 6991da177e4SLinus Torvalds is_mergeable_anon_vma(anon_vma, vma->anon_vma)) { 7001da177e4SLinus Torvalds pgoff_t vm_pglen; 7011da177e4SLinus Torvalds vm_pglen = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 7021da177e4SLinus Torvalds if (vma->vm_pgoff + vm_pglen == vm_pgoff) 7031da177e4SLinus Torvalds return 1; 7041da177e4SLinus Torvalds } 7051da177e4SLinus Torvalds return 0; 7061da177e4SLinus Torvalds } 7071da177e4SLinus Torvalds 7081da177e4SLinus Torvalds /* 7091da177e4SLinus Torvalds * Given a mapping request (addr,end,vm_flags,file,pgoff), figure out 7101da177e4SLinus Torvalds * whether that can be merged with its predecessor or its successor. 7111da177e4SLinus Torvalds * Or both (it neatly fills a hole). 7121da177e4SLinus Torvalds * 7131da177e4SLinus Torvalds * In most cases - when called for mmap, brk or mremap - [addr,end) is 7141da177e4SLinus Torvalds * certain not to be mapped by the time vma_merge is called; but when 7151da177e4SLinus Torvalds * called for mprotect, it is certain to be already mapped (either at 7161da177e4SLinus Torvalds * an offset within prev, or at the start of next), and the flags of 7171da177e4SLinus Torvalds * this area are about to be changed to vm_flags - and the no-change 7181da177e4SLinus Torvalds * case has already been eliminated. 7191da177e4SLinus Torvalds * 7201da177e4SLinus Torvalds * The following mprotect cases have to be considered, where AAAA is 7211da177e4SLinus Torvalds * the area passed down from mprotect_fixup, never extending beyond one 7221da177e4SLinus Torvalds * vma, PPPPPP is the prev vma specified, and NNNNNN the next vma after: 7231da177e4SLinus Torvalds * 7241da177e4SLinus Torvalds * AAAA AAAA AAAA AAAA 7251da177e4SLinus Torvalds * PPPPPPNNNNNN PPPPPPNNNNNN PPPPPPNNNNNN PPPPNNNNXXXX 7261da177e4SLinus Torvalds * cannot merge might become might become might become 7271da177e4SLinus Torvalds * PPNNNNNNNNNN PPPPPPPPPPNN PPPPPPPPPPPP 6 or 7281da177e4SLinus Torvalds * mmap, brk or case 4 below case 5 below PPPPPPPPXXXX 7 or 7291da177e4SLinus Torvalds * mremap move: PPPPNNNNNNNN 8 7301da177e4SLinus Torvalds * AAAA 7311da177e4SLinus Torvalds * PPPP NNNN PPPPPPPPPPPP PPPPPPPPNNNN PPPPNNNNNNNN 7321da177e4SLinus Torvalds * might become case 1 below case 2 below case 3 below 7331da177e4SLinus Torvalds * 7341da177e4SLinus Torvalds * Odd one out? Case 8, because it extends NNNN but needs flags of XXXX: 7351da177e4SLinus Torvalds * mprotect_fixup updates vm_flags & vm_page_prot on successful return. 7361da177e4SLinus Torvalds */ 7371da177e4SLinus Torvalds struct vm_area_struct *vma_merge(struct mm_struct *mm, 7381da177e4SLinus Torvalds struct vm_area_struct *prev, unsigned long addr, 7391da177e4SLinus Torvalds unsigned long end, unsigned long vm_flags, 7401da177e4SLinus Torvalds struct anon_vma *anon_vma, struct file *file, 7411da177e4SLinus Torvalds pgoff_t pgoff, struct mempolicy *policy) 7421da177e4SLinus Torvalds { 7431da177e4SLinus Torvalds pgoff_t pglen = (end - addr) >> PAGE_SHIFT; 7441da177e4SLinus Torvalds struct vm_area_struct *area, *next; 7451da177e4SLinus Torvalds 7461da177e4SLinus Torvalds /* 7471da177e4SLinus Torvalds * We later require that vma->vm_flags == vm_flags, 7481da177e4SLinus Torvalds * so this tests vma->vm_flags & VM_SPECIAL, too. 7491da177e4SLinus Torvalds */ 7501da177e4SLinus Torvalds if (vm_flags & VM_SPECIAL) 7511da177e4SLinus Torvalds return NULL; 7521da177e4SLinus Torvalds 7531da177e4SLinus Torvalds if (prev) 7541da177e4SLinus Torvalds next = prev->vm_next; 7551da177e4SLinus Torvalds else 7561da177e4SLinus Torvalds next = mm->mmap; 7571da177e4SLinus Torvalds area = next; 7581da177e4SLinus Torvalds if (next && next->vm_end == end) /* cases 6, 7, 8 */ 7591da177e4SLinus Torvalds next = next->vm_next; 7601da177e4SLinus Torvalds 7611da177e4SLinus Torvalds /* 7621da177e4SLinus Torvalds * Can it merge with the predecessor? 7631da177e4SLinus Torvalds */ 7641da177e4SLinus Torvalds if (prev && prev->vm_end == addr && 7651da177e4SLinus Torvalds mpol_equal(vma_policy(prev), policy) && 7661da177e4SLinus Torvalds can_vma_merge_after(prev, vm_flags, 7671da177e4SLinus Torvalds anon_vma, file, pgoff)) { 7681da177e4SLinus Torvalds /* 7691da177e4SLinus Torvalds * OK, it can. Can we now merge in the successor as well? 7701da177e4SLinus Torvalds */ 7711da177e4SLinus Torvalds if (next && end == next->vm_start && 7721da177e4SLinus Torvalds mpol_equal(policy, vma_policy(next)) && 7731da177e4SLinus Torvalds can_vma_merge_before(next, vm_flags, 7741da177e4SLinus Torvalds anon_vma, file, pgoff+pglen) && 7751da177e4SLinus Torvalds is_mergeable_anon_vma(prev->anon_vma, 7761da177e4SLinus Torvalds next->anon_vma)) { 7771da177e4SLinus Torvalds /* cases 1, 6 */ 7781da177e4SLinus Torvalds vma_adjust(prev, prev->vm_start, 7791da177e4SLinus Torvalds next->vm_end, prev->vm_pgoff, NULL); 7801da177e4SLinus Torvalds } else /* cases 2, 5, 7 */ 7811da177e4SLinus Torvalds vma_adjust(prev, prev->vm_start, 7821da177e4SLinus Torvalds end, prev->vm_pgoff, NULL); 7831da177e4SLinus Torvalds return prev; 7841da177e4SLinus Torvalds } 7851da177e4SLinus Torvalds 7861da177e4SLinus Torvalds /* 7871da177e4SLinus Torvalds * Can this new request be merged in front of next? 7881da177e4SLinus Torvalds */ 7891da177e4SLinus Torvalds if (next && end == next->vm_start && 7901da177e4SLinus Torvalds mpol_equal(policy, vma_policy(next)) && 7911da177e4SLinus Torvalds can_vma_merge_before(next, vm_flags, 7921da177e4SLinus Torvalds anon_vma, file, pgoff+pglen)) { 7931da177e4SLinus Torvalds if (prev && addr < prev->vm_end) /* case 4 */ 7941da177e4SLinus Torvalds vma_adjust(prev, prev->vm_start, 7951da177e4SLinus Torvalds addr, prev->vm_pgoff, NULL); 7961da177e4SLinus Torvalds else /* cases 3, 8 */ 7971da177e4SLinus Torvalds vma_adjust(area, addr, next->vm_end, 7981da177e4SLinus Torvalds next->vm_pgoff - pglen, NULL); 7991da177e4SLinus Torvalds return area; 8001da177e4SLinus Torvalds } 8011da177e4SLinus Torvalds 8021da177e4SLinus Torvalds return NULL; 8031da177e4SLinus Torvalds } 8041da177e4SLinus Torvalds 8051da177e4SLinus Torvalds /* 8061da177e4SLinus Torvalds * find_mergeable_anon_vma is used by anon_vma_prepare, to check 8071da177e4SLinus Torvalds * neighbouring vmas for a suitable anon_vma, before it goes off 8081da177e4SLinus Torvalds * to allocate a new anon_vma. It checks because a repetitive 8091da177e4SLinus Torvalds * sequence of mprotects and faults may otherwise lead to distinct 8101da177e4SLinus Torvalds * anon_vmas being allocated, preventing vma merge in subsequent 8111da177e4SLinus Torvalds * mprotect. 8121da177e4SLinus Torvalds */ 8131da177e4SLinus Torvalds struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) 8141da177e4SLinus Torvalds { 8151da177e4SLinus Torvalds struct vm_area_struct *near; 8161da177e4SLinus Torvalds unsigned long vm_flags; 8171da177e4SLinus Torvalds 8181da177e4SLinus Torvalds near = vma->vm_next; 8191da177e4SLinus Torvalds if (!near) 8201da177e4SLinus Torvalds goto try_prev; 8211da177e4SLinus Torvalds 8221da177e4SLinus Torvalds /* 8231da177e4SLinus Torvalds * Since only mprotect tries to remerge vmas, match flags 8241da177e4SLinus Torvalds * which might be mprotected into each other later on. 8251da177e4SLinus Torvalds * Neither mlock nor madvise tries to remerge at present, 8261da177e4SLinus Torvalds * so leave their flags as obstructing a merge. 8271da177e4SLinus Torvalds */ 8281da177e4SLinus Torvalds vm_flags = vma->vm_flags & ~(VM_READ|VM_WRITE|VM_EXEC); 8291da177e4SLinus Torvalds vm_flags |= near->vm_flags & (VM_READ|VM_WRITE|VM_EXEC); 8301da177e4SLinus Torvalds 8311da177e4SLinus Torvalds if (near->anon_vma && vma->vm_end == near->vm_start && 8321da177e4SLinus Torvalds mpol_equal(vma_policy(vma), vma_policy(near)) && 8331da177e4SLinus Torvalds can_vma_merge_before(near, vm_flags, 8341da177e4SLinus Torvalds NULL, vma->vm_file, vma->vm_pgoff + 8351da177e4SLinus Torvalds ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT))) 8361da177e4SLinus Torvalds return near->anon_vma; 8371da177e4SLinus Torvalds try_prev: 8381da177e4SLinus Torvalds /* 8391da177e4SLinus Torvalds * It is potentially slow to have to call find_vma_prev here. 8401da177e4SLinus Torvalds * But it's only on the first write fault on the vma, not 8411da177e4SLinus Torvalds * every time, and we could devise a way to avoid it later 8421da177e4SLinus Torvalds * (e.g. stash info in next's anon_vma_node when assigning 8431da177e4SLinus Torvalds * an anon_vma, or when trying vma_merge). Another time. 8441da177e4SLinus Torvalds */ 84546a350efSEric Sesterhenn BUG_ON(find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma); 8461da177e4SLinus Torvalds if (!near) 8471da177e4SLinus Torvalds goto none; 8481da177e4SLinus Torvalds 8491da177e4SLinus Torvalds vm_flags = vma->vm_flags & ~(VM_READ|VM_WRITE|VM_EXEC); 8501da177e4SLinus Torvalds vm_flags |= near->vm_flags & (VM_READ|VM_WRITE|VM_EXEC); 8511da177e4SLinus Torvalds 8521da177e4SLinus Torvalds if (near->anon_vma && near->vm_end == vma->vm_start && 8531da177e4SLinus Torvalds mpol_equal(vma_policy(near), vma_policy(vma)) && 8541da177e4SLinus Torvalds can_vma_merge_after(near, vm_flags, 8551da177e4SLinus Torvalds NULL, vma->vm_file, vma->vm_pgoff)) 8561da177e4SLinus Torvalds return near->anon_vma; 8571da177e4SLinus Torvalds none: 8581da177e4SLinus Torvalds /* 8591da177e4SLinus Torvalds * There's no absolute need to look only at touching neighbours: 8601da177e4SLinus Torvalds * we could search further afield for "compatible" anon_vmas. 8611da177e4SLinus Torvalds * But it would probably just be a waste of time searching, 8621da177e4SLinus Torvalds * or lead to too many vmas hanging off the same anon_vma. 8631da177e4SLinus Torvalds * We're trying to allow mprotect remerging later on, 8641da177e4SLinus Torvalds * not trying to minimize memory used for anon_vmas. 8651da177e4SLinus Torvalds */ 8661da177e4SLinus Torvalds return NULL; 8671da177e4SLinus Torvalds } 8681da177e4SLinus Torvalds 8691da177e4SLinus Torvalds #ifdef CONFIG_PROC_FS 870ab50b8edSHugh Dickins void vm_stat_account(struct mm_struct *mm, unsigned long flags, 8711da177e4SLinus Torvalds struct file *file, long pages) 8721da177e4SLinus Torvalds { 8731da177e4SLinus Torvalds const unsigned long stack_flags 8741da177e4SLinus Torvalds = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN); 8751da177e4SLinus Torvalds 8761da177e4SLinus Torvalds if (file) { 8771da177e4SLinus Torvalds mm->shared_vm += pages; 8781da177e4SLinus Torvalds if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC) 8791da177e4SLinus Torvalds mm->exec_vm += pages; 8801da177e4SLinus Torvalds } else if (flags & stack_flags) 8811da177e4SLinus Torvalds mm->stack_vm += pages; 8821da177e4SLinus Torvalds if (flags & (VM_RESERVED|VM_IO)) 8831da177e4SLinus Torvalds mm->reserved_vm += pages; 8841da177e4SLinus Torvalds } 8851da177e4SLinus Torvalds #endif /* CONFIG_PROC_FS */ 8861da177e4SLinus Torvalds 8871da177e4SLinus Torvalds /* 8881da177e4SLinus Torvalds * The caller must hold down_write(current->mm->mmap_sem). 8891da177e4SLinus Torvalds */ 8901da177e4SLinus Torvalds 8911da177e4SLinus Torvalds unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, 8921da177e4SLinus Torvalds unsigned long len, unsigned long prot, 8931da177e4SLinus Torvalds unsigned long flags, unsigned long pgoff) 8941da177e4SLinus Torvalds { 8951da177e4SLinus Torvalds struct mm_struct * mm = current->mm; 8961da177e4SLinus Torvalds struct vm_area_struct * vma, * prev; 8971da177e4SLinus Torvalds struct inode *inode; 8981da177e4SLinus Torvalds unsigned int vm_flags; 8991da177e4SLinus Torvalds int correct_wcount = 0; 9001da177e4SLinus Torvalds int error; 9011da177e4SLinus Torvalds struct rb_node ** rb_link, * rb_parent; 9021da177e4SLinus Torvalds int accountable = 1; 9031da177e4SLinus Torvalds unsigned long charged = 0, reqprot = prot; 9041da177e4SLinus Torvalds 9051da177e4SLinus Torvalds /* 9061da177e4SLinus Torvalds * Does the application expect PROT_READ to imply PROT_EXEC? 9071da177e4SLinus Torvalds * 9081da177e4SLinus Torvalds * (the exception is when the underlying filesystem is noexec 9091da177e4SLinus Torvalds * mounted, in which case we dont add PROT_EXEC.) 9101da177e4SLinus Torvalds */ 9111da177e4SLinus Torvalds if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) 912d3ac7f89SJosef "Jeff" Sipek if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC))) 9131da177e4SLinus Torvalds prot |= PROT_EXEC; 9141da177e4SLinus Torvalds 9151da177e4SLinus Torvalds if (!len) 9161da177e4SLinus Torvalds return -EINVAL; 9171da177e4SLinus Torvalds 9183a459756SKirill Korotaev error = arch_mmap_check(addr, len, flags); 9193a459756SKirill Korotaev if (error) 9203a459756SKirill Korotaev return error; 9213a459756SKirill Korotaev 9221da177e4SLinus Torvalds /* Careful about overflows.. */ 9231da177e4SLinus Torvalds len = PAGE_ALIGN(len); 9241da177e4SLinus Torvalds if (!len || len > TASK_SIZE) 9251da177e4SLinus Torvalds return -ENOMEM; 9261da177e4SLinus Torvalds 9271da177e4SLinus Torvalds /* offset overflow? */ 9281da177e4SLinus Torvalds if ((pgoff + (len >> PAGE_SHIFT)) < pgoff) 9291da177e4SLinus Torvalds return -EOVERFLOW; 9301da177e4SLinus Torvalds 9311da177e4SLinus Torvalds /* Too many mappings? */ 9321da177e4SLinus Torvalds if (mm->map_count > sysctl_max_map_count) 9331da177e4SLinus Torvalds return -ENOMEM; 9341da177e4SLinus Torvalds 9351da177e4SLinus Torvalds /* Obtain the address to map to. we verify (or select) it and ensure 9361da177e4SLinus Torvalds * that it represents a valid section of the address space. 9371da177e4SLinus Torvalds */ 9381da177e4SLinus Torvalds addr = get_unmapped_area(file, addr, len, pgoff, flags); 9391da177e4SLinus Torvalds if (addr & ~PAGE_MASK) 9401da177e4SLinus Torvalds return addr; 9411da177e4SLinus Torvalds 9421da177e4SLinus Torvalds /* Do simple checking here so the lower-level routines won't have 9431da177e4SLinus Torvalds * to. we assume access permissions have been handled by the open 9441da177e4SLinus Torvalds * of the memory object, so we don't do any here. 9451da177e4SLinus Torvalds */ 9461da177e4SLinus Torvalds vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) | 9471da177e4SLinus Torvalds mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; 9481da177e4SLinus Torvalds 9491da177e4SLinus Torvalds if (flags & MAP_LOCKED) { 9501da177e4SLinus Torvalds if (!can_do_mlock()) 9511da177e4SLinus Torvalds return -EPERM; 9521da177e4SLinus Torvalds vm_flags |= VM_LOCKED; 9531da177e4SLinus Torvalds } 9541da177e4SLinus Torvalds /* mlock MCL_FUTURE? */ 9551da177e4SLinus Torvalds if (vm_flags & VM_LOCKED) { 9561da177e4SLinus Torvalds unsigned long locked, lock_limit; 95793ea1d0aSChris Wright locked = len >> PAGE_SHIFT; 95893ea1d0aSChris Wright locked += mm->locked_vm; 9591da177e4SLinus Torvalds lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; 96093ea1d0aSChris Wright lock_limit >>= PAGE_SHIFT; 9611da177e4SLinus Torvalds if (locked > lock_limit && !capable(CAP_IPC_LOCK)) 9621da177e4SLinus Torvalds return -EAGAIN; 9631da177e4SLinus Torvalds } 9641da177e4SLinus Torvalds 965d3ac7f89SJosef "Jeff" Sipek inode = file ? file->f_path.dentry->d_inode : NULL; 9661da177e4SLinus Torvalds 9671da177e4SLinus Torvalds if (file) { 9681da177e4SLinus Torvalds switch (flags & MAP_TYPE) { 9691da177e4SLinus Torvalds case MAP_SHARED: 9701da177e4SLinus Torvalds if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE)) 9711da177e4SLinus Torvalds return -EACCES; 9721da177e4SLinus Torvalds 9731da177e4SLinus Torvalds /* 9741da177e4SLinus Torvalds * Make sure we don't allow writing to an append-only 9751da177e4SLinus Torvalds * file.. 9761da177e4SLinus Torvalds */ 9771da177e4SLinus Torvalds if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE)) 9781da177e4SLinus Torvalds return -EACCES; 9791da177e4SLinus Torvalds 9801da177e4SLinus Torvalds /* 9811da177e4SLinus Torvalds * Make sure there are no mandatory locks on the file. 9821da177e4SLinus Torvalds */ 9831da177e4SLinus Torvalds if (locks_verify_locked(inode)) 9841da177e4SLinus Torvalds return -EAGAIN; 9851da177e4SLinus Torvalds 9861da177e4SLinus Torvalds vm_flags |= VM_SHARED | VM_MAYSHARE; 9871da177e4SLinus Torvalds if (!(file->f_mode & FMODE_WRITE)) 9881da177e4SLinus Torvalds vm_flags &= ~(VM_MAYWRITE | VM_SHARED); 9891da177e4SLinus Torvalds 9901da177e4SLinus Torvalds /* fall through */ 9911da177e4SLinus Torvalds case MAP_PRIVATE: 9921da177e4SLinus Torvalds if (!(file->f_mode & FMODE_READ)) 9931da177e4SLinus Torvalds return -EACCES; 994d3ac7f89SJosef "Jeff" Sipek if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) { 99580c5606cSLinus Torvalds if (vm_flags & VM_EXEC) 99680c5606cSLinus Torvalds return -EPERM; 99780c5606cSLinus Torvalds vm_flags &= ~VM_MAYEXEC; 99880c5606cSLinus Torvalds } 99980c5606cSLinus Torvalds if (is_file_hugepages(file)) 100080c5606cSLinus Torvalds accountable = 0; 100180c5606cSLinus Torvalds 100280c5606cSLinus Torvalds if (!file->f_op || !file->f_op->mmap) 100380c5606cSLinus Torvalds return -ENODEV; 10041da177e4SLinus Torvalds break; 10051da177e4SLinus Torvalds 10061da177e4SLinus Torvalds default: 10071da177e4SLinus Torvalds return -EINVAL; 10081da177e4SLinus Torvalds } 10091da177e4SLinus Torvalds } else { 10101da177e4SLinus Torvalds switch (flags & MAP_TYPE) { 10111da177e4SLinus Torvalds case MAP_SHARED: 10121da177e4SLinus Torvalds vm_flags |= VM_SHARED | VM_MAYSHARE; 10131da177e4SLinus Torvalds break; 10141da177e4SLinus Torvalds case MAP_PRIVATE: 10151da177e4SLinus Torvalds /* 10161da177e4SLinus Torvalds * Set pgoff according to addr for anon_vma. 10171da177e4SLinus Torvalds */ 10181da177e4SLinus Torvalds pgoff = addr >> PAGE_SHIFT; 10191da177e4SLinus Torvalds break; 10201da177e4SLinus Torvalds default: 10211da177e4SLinus Torvalds return -EINVAL; 10221da177e4SLinus Torvalds } 10231da177e4SLinus Torvalds } 10241da177e4SLinus Torvalds 10251da177e4SLinus Torvalds error = security_file_mmap(file, reqprot, prot, flags); 10261da177e4SLinus Torvalds if (error) 10271da177e4SLinus Torvalds return error; 10281da177e4SLinus Torvalds 10291da177e4SLinus Torvalds /* Clear old maps */ 10301da177e4SLinus Torvalds error = -ENOMEM; 10311da177e4SLinus Torvalds munmap_back: 10321da177e4SLinus Torvalds vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); 10331da177e4SLinus Torvalds if (vma && vma->vm_start < addr + len) { 10341da177e4SLinus Torvalds if (do_munmap(mm, addr, len)) 10351da177e4SLinus Torvalds return -ENOMEM; 10361da177e4SLinus Torvalds goto munmap_back; 10371da177e4SLinus Torvalds } 10381da177e4SLinus Torvalds 10391da177e4SLinus Torvalds /* Check against address space limit. */ 1040119f657cSakpm@osdl.org if (!may_expand_vm(mm, len >> PAGE_SHIFT)) 10411da177e4SLinus Torvalds return -ENOMEM; 10421da177e4SLinus Torvalds 10431da177e4SLinus Torvalds if (accountable && (!(flags & MAP_NORESERVE) || 10441da177e4SLinus Torvalds sysctl_overcommit_memory == OVERCOMMIT_NEVER)) { 10451da177e4SLinus Torvalds if (vm_flags & VM_SHARED) { 10461da177e4SLinus Torvalds /* Check memory availability in shmem_file_setup? */ 10471da177e4SLinus Torvalds vm_flags |= VM_ACCOUNT; 10481da177e4SLinus Torvalds } else if (vm_flags & VM_WRITE) { 10491da177e4SLinus Torvalds /* 10501da177e4SLinus Torvalds * Private writable mapping: check memory availability 10511da177e4SLinus Torvalds */ 10521da177e4SLinus Torvalds charged = len >> PAGE_SHIFT; 10531da177e4SLinus Torvalds if (security_vm_enough_memory(charged)) 10541da177e4SLinus Torvalds return -ENOMEM; 10551da177e4SLinus Torvalds vm_flags |= VM_ACCOUNT; 10561da177e4SLinus Torvalds } 10571da177e4SLinus Torvalds } 10581da177e4SLinus Torvalds 10591da177e4SLinus Torvalds /* 10601da177e4SLinus Torvalds * Can we just expand an old private anonymous mapping? 10611da177e4SLinus Torvalds * The VM_SHARED test is necessary because shmem_zero_setup 10621da177e4SLinus Torvalds * will create the file object for a shared anonymous map below. 10631da177e4SLinus Torvalds */ 10641da177e4SLinus Torvalds if (!file && !(vm_flags & VM_SHARED) && 10651da177e4SLinus Torvalds vma_merge(mm, prev, addr, addr + len, vm_flags, 10661da177e4SLinus Torvalds NULL, NULL, pgoff, NULL)) 10671da177e4SLinus Torvalds goto out; 10681da177e4SLinus Torvalds 10691da177e4SLinus Torvalds /* 10701da177e4SLinus Torvalds * Determine the object being mapped and call the appropriate 10711da177e4SLinus Torvalds * specific mapper. the address has already been validated, but 10721da177e4SLinus Torvalds * not unmapped, but the maps are removed from the list. 10731da177e4SLinus Torvalds */ 1074c5e3b83eSPekka Enberg vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); 10751da177e4SLinus Torvalds if (!vma) { 10761da177e4SLinus Torvalds error = -ENOMEM; 10771da177e4SLinus Torvalds goto unacct_error; 10781da177e4SLinus Torvalds } 10791da177e4SLinus Torvalds 10801da177e4SLinus Torvalds vma->vm_mm = mm; 10811da177e4SLinus Torvalds vma->vm_start = addr; 10821da177e4SLinus Torvalds vma->vm_end = addr + len; 10831da177e4SLinus Torvalds vma->vm_flags = vm_flags; 10849637a5efSDavid Howells vma->vm_page_prot = protection_map[vm_flags & 10859637a5efSDavid Howells (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]; 10861da177e4SLinus Torvalds vma->vm_pgoff = pgoff; 10871da177e4SLinus Torvalds 10881da177e4SLinus Torvalds if (file) { 10891da177e4SLinus Torvalds error = -EINVAL; 10901da177e4SLinus Torvalds if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) 10911da177e4SLinus Torvalds goto free_vma; 10921da177e4SLinus Torvalds if (vm_flags & VM_DENYWRITE) { 10931da177e4SLinus Torvalds error = deny_write_access(file); 10941da177e4SLinus Torvalds if (error) 10951da177e4SLinus Torvalds goto free_vma; 10961da177e4SLinus Torvalds correct_wcount = 1; 10971da177e4SLinus Torvalds } 10981da177e4SLinus Torvalds vma->vm_file = file; 10991da177e4SLinus Torvalds get_file(file); 11001da177e4SLinus Torvalds error = file->f_op->mmap(file, vma); 11011da177e4SLinus Torvalds if (error) 11021da177e4SLinus Torvalds goto unmap_and_free_vma; 11031da177e4SLinus Torvalds } else if (vm_flags & VM_SHARED) { 11041da177e4SLinus Torvalds error = shmem_zero_setup(vma); 11051da177e4SLinus Torvalds if (error) 11061da177e4SLinus Torvalds goto free_vma; 11071da177e4SLinus Torvalds } 11081da177e4SLinus Torvalds 11091da177e4SLinus Torvalds /* We set VM_ACCOUNT in a shared mapping's vm_flags, to inform 11101da177e4SLinus Torvalds * shmem_zero_setup (perhaps called through /dev/zero's ->mmap) 11111da177e4SLinus Torvalds * that memory reservation must be checked; but that reservation 11121da177e4SLinus Torvalds * belongs to shared memory object, not to vma: so now clear it. 11131da177e4SLinus Torvalds */ 11141da177e4SLinus Torvalds if ((vm_flags & (VM_SHARED|VM_ACCOUNT)) == (VM_SHARED|VM_ACCOUNT)) 11151da177e4SLinus Torvalds vma->vm_flags &= ~VM_ACCOUNT; 11161da177e4SLinus Torvalds 11171da177e4SLinus Torvalds /* Can addr have changed?? 11181da177e4SLinus Torvalds * 11191da177e4SLinus Torvalds * Answer: Yes, several device drivers can do it in their 11201da177e4SLinus Torvalds * f_op->mmap method. -DaveM 11211da177e4SLinus Torvalds */ 11221da177e4SLinus Torvalds addr = vma->vm_start; 11231da177e4SLinus Torvalds pgoff = vma->vm_pgoff; 11241da177e4SLinus Torvalds vm_flags = vma->vm_flags; 11251da177e4SLinus Torvalds 1126d08b3851SPeter Zijlstra if (vma_wants_writenotify(vma)) 1127d08b3851SPeter Zijlstra vma->vm_page_prot = 1128d08b3851SPeter Zijlstra protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC)]; 1129d08b3851SPeter Zijlstra 11301da177e4SLinus Torvalds if (!file || !vma_merge(mm, prev, addr, vma->vm_end, 11311da177e4SLinus Torvalds vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) { 11321da177e4SLinus Torvalds file = vma->vm_file; 11331da177e4SLinus Torvalds vma_link(mm, vma, prev, rb_link, rb_parent); 11341da177e4SLinus Torvalds if (correct_wcount) 11351da177e4SLinus Torvalds atomic_inc(&inode->i_writecount); 11361da177e4SLinus Torvalds } else { 11371da177e4SLinus Torvalds if (file) { 11381da177e4SLinus Torvalds if (correct_wcount) 11391da177e4SLinus Torvalds atomic_inc(&inode->i_writecount); 11401da177e4SLinus Torvalds fput(file); 11411da177e4SLinus Torvalds } 11421da177e4SLinus Torvalds mpol_free(vma_policy(vma)); 11431da177e4SLinus Torvalds kmem_cache_free(vm_area_cachep, vma); 11441da177e4SLinus Torvalds } 11451da177e4SLinus Torvalds out: 11461da177e4SLinus Torvalds mm->total_vm += len >> PAGE_SHIFT; 1147ab50b8edSHugh Dickins vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); 11481da177e4SLinus Torvalds if (vm_flags & VM_LOCKED) { 11491da177e4SLinus Torvalds mm->locked_vm += len >> PAGE_SHIFT; 11501da177e4SLinus Torvalds make_pages_present(addr, addr + len); 11511da177e4SLinus Torvalds } 11521da177e4SLinus Torvalds if (flags & MAP_POPULATE) { 11531da177e4SLinus Torvalds up_write(&mm->mmap_sem); 11541da177e4SLinus Torvalds sys_remap_file_pages(addr, len, 0, 11551da177e4SLinus Torvalds pgoff, flags & MAP_NONBLOCK); 11561da177e4SLinus Torvalds down_write(&mm->mmap_sem); 11571da177e4SLinus Torvalds } 11581da177e4SLinus Torvalds return addr; 11591da177e4SLinus Torvalds 11601da177e4SLinus Torvalds unmap_and_free_vma: 11611da177e4SLinus Torvalds if (correct_wcount) 11621da177e4SLinus Torvalds atomic_inc(&inode->i_writecount); 11631da177e4SLinus Torvalds vma->vm_file = NULL; 11641da177e4SLinus Torvalds fput(file); 11651da177e4SLinus Torvalds 11661da177e4SLinus Torvalds /* Undo any partial mapping done by a device driver. */ 1167e0da382cSHugh Dickins unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end); 1168e0da382cSHugh Dickins charged = 0; 11691da177e4SLinus Torvalds free_vma: 11701da177e4SLinus Torvalds kmem_cache_free(vm_area_cachep, vma); 11711da177e4SLinus Torvalds unacct_error: 11721da177e4SLinus Torvalds if (charged) 11731da177e4SLinus Torvalds vm_unacct_memory(charged); 11741da177e4SLinus Torvalds return error; 11751da177e4SLinus Torvalds } 11761da177e4SLinus Torvalds 11771da177e4SLinus Torvalds EXPORT_SYMBOL(do_mmap_pgoff); 11781da177e4SLinus Torvalds 11791da177e4SLinus Torvalds /* Get an address range which is currently unmapped. 11801da177e4SLinus Torvalds * For shmat() with addr=0. 11811da177e4SLinus Torvalds * 11821da177e4SLinus Torvalds * Ugly calling convention alert: 11831da177e4SLinus Torvalds * Return value with the low bits set means error value, 11841da177e4SLinus Torvalds * ie 11851da177e4SLinus Torvalds * if (ret & ~PAGE_MASK) 11861da177e4SLinus Torvalds * error = ret; 11871da177e4SLinus Torvalds * 11881da177e4SLinus Torvalds * This function "knows" that -ENOMEM has the bits set. 11891da177e4SLinus Torvalds */ 11901da177e4SLinus Torvalds #ifndef HAVE_ARCH_UNMAPPED_AREA 11911da177e4SLinus Torvalds unsigned long 11921da177e4SLinus Torvalds arch_get_unmapped_area(struct file *filp, unsigned long addr, 11931da177e4SLinus Torvalds unsigned long len, unsigned long pgoff, unsigned long flags) 11941da177e4SLinus Torvalds { 11951da177e4SLinus Torvalds struct mm_struct *mm = current->mm; 11961da177e4SLinus Torvalds struct vm_area_struct *vma; 11971da177e4SLinus Torvalds unsigned long start_addr; 11981da177e4SLinus Torvalds 11991da177e4SLinus Torvalds if (len > TASK_SIZE) 12001da177e4SLinus Torvalds return -ENOMEM; 12011da177e4SLinus Torvalds 12021da177e4SLinus Torvalds if (addr) { 12031da177e4SLinus Torvalds addr = PAGE_ALIGN(addr); 12041da177e4SLinus Torvalds vma = find_vma(mm, addr); 12051da177e4SLinus Torvalds if (TASK_SIZE - len >= addr && 12061da177e4SLinus Torvalds (!vma || addr + len <= vma->vm_start)) 12071da177e4SLinus Torvalds return addr; 12081da177e4SLinus Torvalds } 12091363c3cdSWolfgang Wander if (len > mm->cached_hole_size) { 12101da177e4SLinus Torvalds start_addr = addr = mm->free_area_cache; 12111363c3cdSWolfgang Wander } else { 12121363c3cdSWolfgang Wander start_addr = addr = TASK_UNMAPPED_BASE; 12131363c3cdSWolfgang Wander mm->cached_hole_size = 0; 12141363c3cdSWolfgang Wander } 12151da177e4SLinus Torvalds 12161da177e4SLinus Torvalds full_search: 12171da177e4SLinus Torvalds for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { 12181da177e4SLinus Torvalds /* At this point: (!vma || addr < vma->vm_end). */ 12191da177e4SLinus Torvalds if (TASK_SIZE - len < addr) { 12201da177e4SLinus Torvalds /* 12211da177e4SLinus Torvalds * Start a new search - just in case we missed 12221da177e4SLinus Torvalds * some holes. 12231da177e4SLinus Torvalds */ 12241da177e4SLinus Torvalds if (start_addr != TASK_UNMAPPED_BASE) { 12251363c3cdSWolfgang Wander addr = TASK_UNMAPPED_BASE; 12261363c3cdSWolfgang Wander start_addr = addr; 12271363c3cdSWolfgang Wander mm->cached_hole_size = 0; 12281da177e4SLinus Torvalds goto full_search; 12291da177e4SLinus Torvalds } 12301da177e4SLinus Torvalds return -ENOMEM; 12311da177e4SLinus Torvalds } 12321da177e4SLinus Torvalds if (!vma || addr + len <= vma->vm_start) { 12331da177e4SLinus Torvalds /* 12341da177e4SLinus Torvalds * Remember the place where we stopped the search: 12351da177e4SLinus Torvalds */ 12361da177e4SLinus Torvalds mm->free_area_cache = addr + len; 12371da177e4SLinus Torvalds return addr; 12381da177e4SLinus Torvalds } 12391363c3cdSWolfgang Wander if (addr + mm->cached_hole_size < vma->vm_start) 12401363c3cdSWolfgang Wander mm->cached_hole_size = vma->vm_start - addr; 12411da177e4SLinus Torvalds addr = vma->vm_end; 12421da177e4SLinus Torvalds } 12431da177e4SLinus Torvalds } 12441da177e4SLinus Torvalds #endif 12451da177e4SLinus Torvalds 12461363c3cdSWolfgang Wander void arch_unmap_area(struct mm_struct *mm, unsigned long addr) 12471da177e4SLinus Torvalds { 12481da177e4SLinus Torvalds /* 12491da177e4SLinus Torvalds * Is this a new hole at the lowest possible address? 12501da177e4SLinus Torvalds */ 12511363c3cdSWolfgang Wander if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) { 12521363c3cdSWolfgang Wander mm->free_area_cache = addr; 12531363c3cdSWolfgang Wander mm->cached_hole_size = ~0UL; 12541363c3cdSWolfgang Wander } 12551da177e4SLinus Torvalds } 12561da177e4SLinus Torvalds 12571da177e4SLinus Torvalds /* 12581da177e4SLinus Torvalds * This mmap-allocator allocates new areas top-down from below the 12591da177e4SLinus Torvalds * stack's low limit (the base): 12601da177e4SLinus Torvalds */ 12611da177e4SLinus Torvalds #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 12621da177e4SLinus Torvalds unsigned long 12631da177e4SLinus Torvalds arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, 12641da177e4SLinus Torvalds const unsigned long len, const unsigned long pgoff, 12651da177e4SLinus Torvalds const unsigned long flags) 12661da177e4SLinus Torvalds { 12671da177e4SLinus Torvalds struct vm_area_struct *vma; 12681da177e4SLinus Torvalds struct mm_struct *mm = current->mm; 12691da177e4SLinus Torvalds unsigned long addr = addr0; 12701da177e4SLinus Torvalds 12711da177e4SLinus Torvalds /* requested length too big for entire address space */ 12721da177e4SLinus Torvalds if (len > TASK_SIZE) 12731da177e4SLinus Torvalds return -ENOMEM; 12741da177e4SLinus Torvalds 12751da177e4SLinus Torvalds /* requesting a specific address */ 12761da177e4SLinus Torvalds if (addr) { 12771da177e4SLinus Torvalds addr = PAGE_ALIGN(addr); 12781da177e4SLinus Torvalds vma = find_vma(mm, addr); 12791da177e4SLinus Torvalds if (TASK_SIZE - len >= addr && 12801da177e4SLinus Torvalds (!vma || addr + len <= vma->vm_start)) 12811da177e4SLinus Torvalds return addr; 12821da177e4SLinus Torvalds } 12831da177e4SLinus Torvalds 12841363c3cdSWolfgang Wander /* check if free_area_cache is useful for us */ 12851363c3cdSWolfgang Wander if (len <= mm->cached_hole_size) { 12861363c3cdSWolfgang Wander mm->cached_hole_size = 0; 12871363c3cdSWolfgang Wander mm->free_area_cache = mm->mmap_base; 12881363c3cdSWolfgang Wander } 12891363c3cdSWolfgang Wander 12901da177e4SLinus Torvalds /* either no address requested or can't fit in requested address hole */ 12911da177e4SLinus Torvalds addr = mm->free_area_cache; 12921da177e4SLinus Torvalds 12931da177e4SLinus Torvalds /* make sure it can fit in the remaining address space */ 129449a43876SLinus Torvalds if (addr > len) { 12951da177e4SLinus Torvalds vma = find_vma(mm, addr-len); 12961da177e4SLinus Torvalds if (!vma || addr <= vma->vm_start) 12971da177e4SLinus Torvalds /* remember the address as a hint for next time */ 12981da177e4SLinus Torvalds return (mm->free_area_cache = addr-len); 12991da177e4SLinus Torvalds } 13001da177e4SLinus Torvalds 130173219d17SChris Wright if (mm->mmap_base < len) 130273219d17SChris Wright goto bottomup; 130373219d17SChris Wright 13041da177e4SLinus Torvalds addr = mm->mmap_base-len; 13051da177e4SLinus Torvalds 13061da177e4SLinus Torvalds do { 13071da177e4SLinus Torvalds /* 13081da177e4SLinus Torvalds * Lookup failure means no vma is above this address, 13091da177e4SLinus Torvalds * else if new region fits below vma->vm_start, 13101da177e4SLinus Torvalds * return with success: 13111da177e4SLinus Torvalds */ 13121da177e4SLinus Torvalds vma = find_vma(mm, addr); 13131da177e4SLinus Torvalds if (!vma || addr+len <= vma->vm_start) 13141da177e4SLinus Torvalds /* remember the address as a hint for next time */ 13151da177e4SLinus Torvalds return (mm->free_area_cache = addr); 13161da177e4SLinus Torvalds 13171363c3cdSWolfgang Wander /* remember the largest hole we saw so far */ 13181363c3cdSWolfgang Wander if (addr + mm->cached_hole_size < vma->vm_start) 13191363c3cdSWolfgang Wander mm->cached_hole_size = vma->vm_start - addr; 13201363c3cdSWolfgang Wander 13211da177e4SLinus Torvalds /* try just below the current vma->vm_start */ 13221da177e4SLinus Torvalds addr = vma->vm_start-len; 132349a43876SLinus Torvalds } while (len < vma->vm_start); 13241da177e4SLinus Torvalds 132573219d17SChris Wright bottomup: 13261da177e4SLinus Torvalds /* 13271da177e4SLinus Torvalds * A failed mmap() very likely causes application failure, 13281da177e4SLinus Torvalds * so fall back to the bottom-up function here. This scenario 13291da177e4SLinus Torvalds * can happen with large stack limits and large mmap() 13301da177e4SLinus Torvalds * allocations. 13311da177e4SLinus Torvalds */ 13321363c3cdSWolfgang Wander mm->cached_hole_size = ~0UL; 13331da177e4SLinus Torvalds mm->free_area_cache = TASK_UNMAPPED_BASE; 13341da177e4SLinus Torvalds addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); 13351da177e4SLinus Torvalds /* 13361da177e4SLinus Torvalds * Restore the topdown base: 13371da177e4SLinus Torvalds */ 13381da177e4SLinus Torvalds mm->free_area_cache = mm->mmap_base; 13391363c3cdSWolfgang Wander mm->cached_hole_size = ~0UL; 13401da177e4SLinus Torvalds 13411da177e4SLinus Torvalds return addr; 13421da177e4SLinus Torvalds } 13431da177e4SLinus Torvalds #endif 13441da177e4SLinus Torvalds 13451363c3cdSWolfgang Wander void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr) 13461da177e4SLinus Torvalds { 13471da177e4SLinus Torvalds /* 13481da177e4SLinus Torvalds * Is this a new hole at the highest possible address? 13491da177e4SLinus Torvalds */ 13501363c3cdSWolfgang Wander if (addr > mm->free_area_cache) 13511363c3cdSWolfgang Wander mm->free_area_cache = addr; 13521da177e4SLinus Torvalds 13531da177e4SLinus Torvalds /* dont allow allocations above current base */ 13541363c3cdSWolfgang Wander if (mm->free_area_cache > mm->mmap_base) 13551363c3cdSWolfgang Wander mm->free_area_cache = mm->mmap_base; 13561da177e4SLinus Torvalds } 13571da177e4SLinus Torvalds 13581da177e4SLinus Torvalds unsigned long 13591da177e4SLinus Torvalds get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, 13601da177e4SLinus Torvalds unsigned long pgoff, unsigned long flags) 13611da177e4SLinus Torvalds { 13621da177e4SLinus Torvalds unsigned long ret; 13631da177e4SLinus Torvalds 136407ab67c8SLinus Torvalds if (!(flags & MAP_FIXED)) { 136507ab67c8SLinus Torvalds unsigned long (*get_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); 136607ab67c8SLinus Torvalds 136707ab67c8SLinus Torvalds get_area = current->mm->get_unmapped_area; 136807ab67c8SLinus Torvalds if (file && file->f_op && file->f_op->get_unmapped_area) 136907ab67c8SLinus Torvalds get_area = file->f_op->get_unmapped_area; 137007ab67c8SLinus Torvalds addr = get_area(file, addr, len, pgoff, flags); 137107ab67c8SLinus Torvalds if (IS_ERR_VALUE(addr)) 137207ab67c8SLinus Torvalds return addr; 137307ab67c8SLinus Torvalds } 137407ab67c8SLinus Torvalds 13751da177e4SLinus Torvalds if (addr > TASK_SIZE - len) 13761da177e4SLinus Torvalds return -ENOMEM; 13771da177e4SLinus Torvalds if (addr & ~PAGE_MASK) 13781da177e4SLinus Torvalds return -EINVAL; 13791da177e4SLinus Torvalds if (file && is_file_hugepages(file)) { 13801da177e4SLinus Torvalds /* 13811da177e4SLinus Torvalds * Check if the given range is hugepage aligned, and 13821da177e4SLinus Torvalds * can be made suitable for hugepages. 13831da177e4SLinus Torvalds */ 138468589bc3SHugh Dickins ret = prepare_hugepage_range(addr, len, pgoff); 13851da177e4SLinus Torvalds } else { 13861da177e4SLinus Torvalds /* 13871da177e4SLinus Torvalds * Ensure that a normal request is not falling in a 13881da177e4SLinus Torvalds * reserved hugepage range. For some archs like IA-64, 13891da177e4SLinus Torvalds * there is a separate region for hugepages. 13901da177e4SLinus Torvalds */ 13911da177e4SLinus Torvalds ret = is_hugepage_only_range(current->mm, addr, len); 13921da177e4SLinus Torvalds } 13931da177e4SLinus Torvalds if (ret) 13941da177e4SLinus Torvalds return -EINVAL; 13951da177e4SLinus Torvalds return addr; 13961da177e4SLinus Torvalds } 13971da177e4SLinus Torvalds 13981da177e4SLinus Torvalds EXPORT_SYMBOL(get_unmapped_area); 13991da177e4SLinus Torvalds 14001da177e4SLinus Torvalds /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ 14011da177e4SLinus Torvalds struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr) 14021da177e4SLinus Torvalds { 14031da177e4SLinus Torvalds struct vm_area_struct *vma = NULL; 14041da177e4SLinus Torvalds 14051da177e4SLinus Torvalds if (mm) { 14061da177e4SLinus Torvalds /* Check the cache first. */ 14071da177e4SLinus Torvalds /* (Cache hit rate is typically around 35%.) */ 14081da177e4SLinus Torvalds vma = mm->mmap_cache; 14091da177e4SLinus Torvalds if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) { 14101da177e4SLinus Torvalds struct rb_node * rb_node; 14111da177e4SLinus Torvalds 14121da177e4SLinus Torvalds rb_node = mm->mm_rb.rb_node; 14131da177e4SLinus Torvalds vma = NULL; 14141da177e4SLinus Torvalds 14151da177e4SLinus Torvalds while (rb_node) { 14161da177e4SLinus Torvalds struct vm_area_struct * vma_tmp; 14171da177e4SLinus Torvalds 14181da177e4SLinus Torvalds vma_tmp = rb_entry(rb_node, 14191da177e4SLinus Torvalds struct vm_area_struct, vm_rb); 14201da177e4SLinus Torvalds 14211da177e4SLinus Torvalds if (vma_tmp->vm_end > addr) { 14221da177e4SLinus Torvalds vma = vma_tmp; 14231da177e4SLinus Torvalds if (vma_tmp->vm_start <= addr) 14241da177e4SLinus Torvalds break; 14251da177e4SLinus Torvalds rb_node = rb_node->rb_left; 14261da177e4SLinus Torvalds } else 14271da177e4SLinus Torvalds rb_node = rb_node->rb_right; 14281da177e4SLinus Torvalds } 14291da177e4SLinus Torvalds if (vma) 14301da177e4SLinus Torvalds mm->mmap_cache = vma; 14311da177e4SLinus Torvalds } 14321da177e4SLinus Torvalds } 14331da177e4SLinus Torvalds return vma; 14341da177e4SLinus Torvalds } 14351da177e4SLinus Torvalds 14361da177e4SLinus Torvalds EXPORT_SYMBOL(find_vma); 14371da177e4SLinus Torvalds 14381da177e4SLinus Torvalds /* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */ 14391da177e4SLinus Torvalds struct vm_area_struct * 14401da177e4SLinus Torvalds find_vma_prev(struct mm_struct *mm, unsigned long addr, 14411da177e4SLinus Torvalds struct vm_area_struct **pprev) 14421da177e4SLinus Torvalds { 14431da177e4SLinus Torvalds struct vm_area_struct *vma = NULL, *prev = NULL; 14441da177e4SLinus Torvalds struct rb_node * rb_node; 14451da177e4SLinus Torvalds if (!mm) 14461da177e4SLinus Torvalds goto out; 14471da177e4SLinus Torvalds 14481da177e4SLinus Torvalds /* Guard against addr being lower than the first VMA */ 14491da177e4SLinus Torvalds vma = mm->mmap; 14501da177e4SLinus Torvalds 14511da177e4SLinus Torvalds /* Go through the RB tree quickly. */ 14521da177e4SLinus Torvalds rb_node = mm->mm_rb.rb_node; 14531da177e4SLinus Torvalds 14541da177e4SLinus Torvalds while (rb_node) { 14551da177e4SLinus Torvalds struct vm_area_struct *vma_tmp; 14561da177e4SLinus Torvalds vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb); 14571da177e4SLinus Torvalds 14581da177e4SLinus Torvalds if (addr < vma_tmp->vm_end) { 14591da177e4SLinus Torvalds rb_node = rb_node->rb_left; 14601da177e4SLinus Torvalds } else { 14611da177e4SLinus Torvalds prev = vma_tmp; 14621da177e4SLinus Torvalds if (!prev->vm_next || (addr < prev->vm_next->vm_end)) 14631da177e4SLinus Torvalds break; 14641da177e4SLinus Torvalds rb_node = rb_node->rb_right; 14651da177e4SLinus Torvalds } 14661da177e4SLinus Torvalds } 14671da177e4SLinus Torvalds 14681da177e4SLinus Torvalds out: 14691da177e4SLinus Torvalds *pprev = prev; 14701da177e4SLinus Torvalds return prev ? prev->vm_next : vma; 14711da177e4SLinus Torvalds } 14721da177e4SLinus Torvalds 14731da177e4SLinus Torvalds /* 14741da177e4SLinus Torvalds * Verify that the stack growth is acceptable and 14751da177e4SLinus Torvalds * update accounting. This is shared with both the 14761da177e4SLinus Torvalds * grow-up and grow-down cases. 14771da177e4SLinus Torvalds */ 14781da177e4SLinus Torvalds static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, unsigned long grow) 14791da177e4SLinus Torvalds { 14801da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 14811da177e4SLinus Torvalds struct rlimit *rlim = current->signal->rlim; 14820d59a01bSAdam Litke unsigned long new_start; 14831da177e4SLinus Torvalds 14841da177e4SLinus Torvalds /* address space limit tests */ 1485119f657cSakpm@osdl.org if (!may_expand_vm(mm, grow)) 14861da177e4SLinus Torvalds return -ENOMEM; 14871da177e4SLinus Torvalds 14881da177e4SLinus Torvalds /* Stack limit test */ 14891da177e4SLinus Torvalds if (size > rlim[RLIMIT_STACK].rlim_cur) 14901da177e4SLinus Torvalds return -ENOMEM; 14911da177e4SLinus Torvalds 14921da177e4SLinus Torvalds /* mlock limit tests */ 14931da177e4SLinus Torvalds if (vma->vm_flags & VM_LOCKED) { 14941da177e4SLinus Torvalds unsigned long locked; 14951da177e4SLinus Torvalds unsigned long limit; 14961da177e4SLinus Torvalds locked = mm->locked_vm + grow; 14971da177e4SLinus Torvalds limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT; 14981da177e4SLinus Torvalds if (locked > limit && !capable(CAP_IPC_LOCK)) 14991da177e4SLinus Torvalds return -ENOMEM; 15001da177e4SLinus Torvalds } 15011da177e4SLinus Torvalds 15020d59a01bSAdam Litke /* Check to ensure the stack will not grow into a hugetlb-only region */ 15030d59a01bSAdam Litke new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start : 15040d59a01bSAdam Litke vma->vm_end - size; 15050d59a01bSAdam Litke if (is_hugepage_only_range(vma->vm_mm, new_start, size)) 15060d59a01bSAdam Litke return -EFAULT; 15070d59a01bSAdam Litke 15081da177e4SLinus Torvalds /* 15091da177e4SLinus Torvalds * Overcommit.. This must be the final test, as it will 15101da177e4SLinus Torvalds * update security statistics. 15111da177e4SLinus Torvalds */ 15121da177e4SLinus Torvalds if (security_vm_enough_memory(grow)) 15131da177e4SLinus Torvalds return -ENOMEM; 15141da177e4SLinus Torvalds 15151da177e4SLinus Torvalds /* Ok, everything looks good - let it rip */ 15161da177e4SLinus Torvalds mm->total_vm += grow; 15171da177e4SLinus Torvalds if (vma->vm_flags & VM_LOCKED) 15181da177e4SLinus Torvalds mm->locked_vm += grow; 1519ab50b8edSHugh Dickins vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow); 15201da177e4SLinus Torvalds return 0; 15211da177e4SLinus Torvalds } 15221da177e4SLinus Torvalds 152346dea3d0SHugh Dickins #if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64) 15241da177e4SLinus Torvalds /* 152546dea3d0SHugh Dickins * PA-RISC uses this for its stack; IA64 for its Register Backing Store. 152646dea3d0SHugh Dickins * vma is the last one with address > vma->vm_end. Have to extend vma. 15271da177e4SLinus Torvalds */ 15289ab88515SMatthew Wilcox #ifndef CONFIG_IA64 152946dea3d0SHugh Dickins static inline 153046dea3d0SHugh Dickins #endif 153146dea3d0SHugh Dickins int expand_upwards(struct vm_area_struct *vma, unsigned long address) 15321da177e4SLinus Torvalds { 15331da177e4SLinus Torvalds int error; 15341da177e4SLinus Torvalds 15351da177e4SLinus Torvalds if (!(vma->vm_flags & VM_GROWSUP)) 15361da177e4SLinus Torvalds return -EFAULT; 15371da177e4SLinus Torvalds 15381da177e4SLinus Torvalds /* 15391da177e4SLinus Torvalds * We must make sure the anon_vma is allocated 15401da177e4SLinus Torvalds * so that the anon_vma locking is not a noop. 15411da177e4SLinus Torvalds */ 15421da177e4SLinus Torvalds if (unlikely(anon_vma_prepare(vma))) 15431da177e4SLinus Torvalds return -ENOMEM; 15441da177e4SLinus Torvalds anon_vma_lock(vma); 15451da177e4SLinus Torvalds 15461da177e4SLinus Torvalds /* 15471da177e4SLinus Torvalds * vma->vm_start/vm_end cannot change under us because the caller 15481da177e4SLinus Torvalds * is required to hold the mmap_sem in read mode. We need the 15491da177e4SLinus Torvalds * anon_vma lock to serialize against concurrent expand_stacks. 15501da177e4SLinus Torvalds */ 15511da177e4SLinus Torvalds address += 4 + PAGE_SIZE - 1; 15521da177e4SLinus Torvalds address &= PAGE_MASK; 15531da177e4SLinus Torvalds error = 0; 15541da177e4SLinus Torvalds 15551da177e4SLinus Torvalds /* Somebody else might have raced and expanded it already */ 15561da177e4SLinus Torvalds if (address > vma->vm_end) { 15571da177e4SLinus Torvalds unsigned long size, grow; 15581da177e4SLinus Torvalds 15591da177e4SLinus Torvalds size = address - vma->vm_start; 15601da177e4SLinus Torvalds grow = (address - vma->vm_end) >> PAGE_SHIFT; 15611da177e4SLinus Torvalds 15621da177e4SLinus Torvalds error = acct_stack_growth(vma, size, grow); 15631da177e4SLinus Torvalds if (!error) 15641da177e4SLinus Torvalds vma->vm_end = address; 15651da177e4SLinus Torvalds } 15661da177e4SLinus Torvalds anon_vma_unlock(vma); 15671da177e4SLinus Torvalds return error; 15681da177e4SLinus Torvalds } 156946dea3d0SHugh Dickins #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */ 157046dea3d0SHugh Dickins 157146dea3d0SHugh Dickins #ifdef CONFIG_STACK_GROWSUP 157246dea3d0SHugh Dickins int expand_stack(struct vm_area_struct *vma, unsigned long address) 157346dea3d0SHugh Dickins { 157446dea3d0SHugh Dickins return expand_upwards(vma, address); 157546dea3d0SHugh Dickins } 15761da177e4SLinus Torvalds 15771da177e4SLinus Torvalds struct vm_area_struct * 15781da177e4SLinus Torvalds find_extend_vma(struct mm_struct *mm, unsigned long addr) 15791da177e4SLinus Torvalds { 15801da177e4SLinus Torvalds struct vm_area_struct *vma, *prev; 15811da177e4SLinus Torvalds 15821da177e4SLinus Torvalds addr &= PAGE_MASK; 15831da177e4SLinus Torvalds vma = find_vma_prev(mm, addr, &prev); 15841da177e4SLinus Torvalds if (vma && (vma->vm_start <= addr)) 15851da177e4SLinus Torvalds return vma; 15861da177e4SLinus Torvalds if (!prev || expand_stack(prev, addr)) 15871da177e4SLinus Torvalds return NULL; 15881da177e4SLinus Torvalds if (prev->vm_flags & VM_LOCKED) { 15891da177e4SLinus Torvalds make_pages_present(addr, prev->vm_end); 15901da177e4SLinus Torvalds } 15911da177e4SLinus Torvalds return prev; 15921da177e4SLinus Torvalds } 15931da177e4SLinus Torvalds #else 15941da177e4SLinus Torvalds /* 15951da177e4SLinus Torvalds * vma is the first one with address < vma->vm_start. Have to extend vma. 15961da177e4SLinus Torvalds */ 15971da177e4SLinus Torvalds int expand_stack(struct vm_area_struct *vma, unsigned long address) 15981da177e4SLinus Torvalds { 15991da177e4SLinus Torvalds int error; 16001da177e4SLinus Torvalds 16011da177e4SLinus Torvalds /* 16021da177e4SLinus Torvalds * We must make sure the anon_vma is allocated 16031da177e4SLinus Torvalds * so that the anon_vma locking is not a noop. 16041da177e4SLinus Torvalds */ 16051da177e4SLinus Torvalds if (unlikely(anon_vma_prepare(vma))) 16061da177e4SLinus Torvalds return -ENOMEM; 16071da177e4SLinus Torvalds anon_vma_lock(vma); 16081da177e4SLinus Torvalds 16091da177e4SLinus Torvalds /* 16101da177e4SLinus Torvalds * vma->vm_start/vm_end cannot change under us because the caller 16111da177e4SLinus Torvalds * is required to hold the mmap_sem in read mode. We need the 16121da177e4SLinus Torvalds * anon_vma lock to serialize against concurrent expand_stacks. 16131da177e4SLinus Torvalds */ 16141da177e4SLinus Torvalds address &= PAGE_MASK; 16151da177e4SLinus Torvalds error = 0; 16161da177e4SLinus Torvalds 16171da177e4SLinus Torvalds /* Somebody else might have raced and expanded it already */ 16181da177e4SLinus Torvalds if (address < vma->vm_start) { 16191da177e4SLinus Torvalds unsigned long size, grow; 16201da177e4SLinus Torvalds 16211da177e4SLinus Torvalds size = vma->vm_end - address; 16221da177e4SLinus Torvalds grow = (vma->vm_start - address) >> PAGE_SHIFT; 16231da177e4SLinus Torvalds 16241da177e4SLinus Torvalds error = acct_stack_growth(vma, size, grow); 16251da177e4SLinus Torvalds if (!error) { 16261da177e4SLinus Torvalds vma->vm_start = address; 16271da177e4SLinus Torvalds vma->vm_pgoff -= grow; 16281da177e4SLinus Torvalds } 16291da177e4SLinus Torvalds } 16301da177e4SLinus Torvalds anon_vma_unlock(vma); 16311da177e4SLinus Torvalds return error; 16321da177e4SLinus Torvalds } 16331da177e4SLinus Torvalds 16341da177e4SLinus Torvalds struct vm_area_struct * 16351da177e4SLinus Torvalds find_extend_vma(struct mm_struct * mm, unsigned long addr) 16361da177e4SLinus Torvalds { 16371da177e4SLinus Torvalds struct vm_area_struct * vma; 16381da177e4SLinus Torvalds unsigned long start; 16391da177e4SLinus Torvalds 16401da177e4SLinus Torvalds addr &= PAGE_MASK; 16411da177e4SLinus Torvalds vma = find_vma(mm,addr); 16421da177e4SLinus Torvalds if (!vma) 16431da177e4SLinus Torvalds return NULL; 16441da177e4SLinus Torvalds if (vma->vm_start <= addr) 16451da177e4SLinus Torvalds return vma; 16461da177e4SLinus Torvalds if (!(vma->vm_flags & VM_GROWSDOWN)) 16471da177e4SLinus Torvalds return NULL; 16481da177e4SLinus Torvalds start = vma->vm_start; 16491da177e4SLinus Torvalds if (expand_stack(vma, addr)) 16501da177e4SLinus Torvalds return NULL; 16511da177e4SLinus Torvalds if (vma->vm_flags & VM_LOCKED) { 16521da177e4SLinus Torvalds make_pages_present(addr, start); 16531da177e4SLinus Torvalds } 16541da177e4SLinus Torvalds return vma; 16551da177e4SLinus Torvalds } 16561da177e4SLinus Torvalds #endif 16571da177e4SLinus Torvalds 16582c0b3814SHugh Dickins /* 16592c0b3814SHugh Dickins * Ok - we have the memory areas we should free on the vma list, 16602c0b3814SHugh Dickins * so release them, and do the vma updates. 16611da177e4SLinus Torvalds * 16622c0b3814SHugh Dickins * Called with the mm semaphore held. 16631da177e4SLinus Torvalds */ 16642c0b3814SHugh Dickins static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) 16651da177e4SLinus Torvalds { 1666365e9c87SHugh Dickins /* Update high watermark before we lower total_vm */ 1667365e9c87SHugh Dickins update_hiwater_vm(mm); 16682c0b3814SHugh Dickins do { 1669ab50b8edSHugh Dickins long nrpages = vma_pages(vma); 16701da177e4SLinus Torvalds 1671ab50b8edSHugh Dickins mm->total_vm -= nrpages; 1672ab50b8edSHugh Dickins if (vma->vm_flags & VM_LOCKED) 1673ab50b8edSHugh Dickins mm->locked_vm -= nrpages; 1674ab50b8edSHugh Dickins vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages); 1675a8fb5618SHugh Dickins vma = remove_vma(vma); 1676146425a3SHugh Dickins } while (vma); 16771da177e4SLinus Torvalds validate_mm(mm); 16781da177e4SLinus Torvalds } 16791da177e4SLinus Torvalds 16801da177e4SLinus Torvalds /* 16811da177e4SLinus Torvalds * Get rid of page table information in the indicated region. 16821da177e4SLinus Torvalds * 1683f10df686SPaolo 'Blaisorblade' Giarrusso * Called with the mm semaphore held. 16841da177e4SLinus Torvalds */ 16851da177e4SLinus Torvalds static void unmap_region(struct mm_struct *mm, 1686e0da382cSHugh Dickins struct vm_area_struct *vma, struct vm_area_struct *prev, 1687e0da382cSHugh Dickins unsigned long start, unsigned long end) 16881da177e4SLinus Torvalds { 1689e0da382cSHugh Dickins struct vm_area_struct *next = prev? prev->vm_next: mm->mmap; 16901da177e4SLinus Torvalds struct mmu_gather *tlb; 16911da177e4SLinus Torvalds unsigned long nr_accounted = 0; 16921da177e4SLinus Torvalds 16931da177e4SLinus Torvalds lru_add_drain(); 16941da177e4SLinus Torvalds tlb = tlb_gather_mmu(mm, 0); 1695365e9c87SHugh Dickins update_hiwater_rss(mm); 1696508034a3SHugh Dickins unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL); 16971da177e4SLinus Torvalds vm_unacct_memory(nr_accounted); 1698e2cdef8cSHugh Dickins free_pgtables(&tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS, 1699e0da382cSHugh Dickins next? next->vm_start: 0); 17001da177e4SLinus Torvalds tlb_finish_mmu(tlb, start, end); 17011da177e4SLinus Torvalds } 17021da177e4SLinus Torvalds 17031da177e4SLinus Torvalds /* 17041da177e4SLinus Torvalds * Create a list of vma's touched by the unmap, removing them from the mm's 17051da177e4SLinus Torvalds * vma list as we go.. 17061da177e4SLinus Torvalds */ 17071da177e4SLinus Torvalds static void 17081da177e4SLinus Torvalds detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, 17091da177e4SLinus Torvalds struct vm_area_struct *prev, unsigned long end) 17101da177e4SLinus Torvalds { 17111da177e4SLinus Torvalds struct vm_area_struct **insertion_point; 17121da177e4SLinus Torvalds struct vm_area_struct *tail_vma = NULL; 17131363c3cdSWolfgang Wander unsigned long addr; 17141da177e4SLinus Torvalds 17151da177e4SLinus Torvalds insertion_point = (prev ? &prev->vm_next : &mm->mmap); 17161da177e4SLinus Torvalds do { 17171da177e4SLinus Torvalds rb_erase(&vma->vm_rb, &mm->mm_rb); 17181da177e4SLinus Torvalds mm->map_count--; 17191da177e4SLinus Torvalds tail_vma = vma; 17201da177e4SLinus Torvalds vma = vma->vm_next; 17211da177e4SLinus Torvalds } while (vma && vma->vm_start < end); 17221da177e4SLinus Torvalds *insertion_point = vma; 17231da177e4SLinus Torvalds tail_vma->vm_next = NULL; 17241363c3cdSWolfgang Wander if (mm->unmap_area == arch_unmap_area) 17251363c3cdSWolfgang Wander addr = prev ? prev->vm_end : mm->mmap_base; 17261363c3cdSWolfgang Wander else 17271363c3cdSWolfgang Wander addr = vma ? vma->vm_start : mm->mmap_base; 17281363c3cdSWolfgang Wander mm->unmap_area(mm, addr); 17291da177e4SLinus Torvalds mm->mmap_cache = NULL; /* Kill the cache. */ 17301da177e4SLinus Torvalds } 17311da177e4SLinus Torvalds 17321da177e4SLinus Torvalds /* 17331da177e4SLinus Torvalds * Split a vma into two pieces at address 'addr', a new vma is allocated 17341da177e4SLinus Torvalds * either for the first part or the the tail. 17351da177e4SLinus Torvalds */ 17361da177e4SLinus Torvalds int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, 17371da177e4SLinus Torvalds unsigned long addr, int new_below) 17381da177e4SLinus Torvalds { 17391da177e4SLinus Torvalds struct mempolicy *pol; 17401da177e4SLinus Torvalds struct vm_area_struct *new; 17411da177e4SLinus Torvalds 17421da177e4SLinus Torvalds if (is_vm_hugetlb_page(vma) && (addr & ~HPAGE_MASK)) 17431da177e4SLinus Torvalds return -EINVAL; 17441da177e4SLinus Torvalds 17451da177e4SLinus Torvalds if (mm->map_count >= sysctl_max_map_count) 17461da177e4SLinus Torvalds return -ENOMEM; 17471da177e4SLinus Torvalds 1748e94b1766SChristoph Lameter new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 17491da177e4SLinus Torvalds if (!new) 17501da177e4SLinus Torvalds return -ENOMEM; 17511da177e4SLinus Torvalds 17521da177e4SLinus Torvalds /* most fields are the same, copy all, and then fixup */ 17531da177e4SLinus Torvalds *new = *vma; 17541da177e4SLinus Torvalds 17551da177e4SLinus Torvalds if (new_below) 17561da177e4SLinus Torvalds new->vm_end = addr; 17571da177e4SLinus Torvalds else { 17581da177e4SLinus Torvalds new->vm_start = addr; 17591da177e4SLinus Torvalds new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); 17601da177e4SLinus Torvalds } 17611da177e4SLinus Torvalds 17621da177e4SLinus Torvalds pol = mpol_copy(vma_policy(vma)); 17631da177e4SLinus Torvalds if (IS_ERR(pol)) { 17641da177e4SLinus Torvalds kmem_cache_free(vm_area_cachep, new); 17651da177e4SLinus Torvalds return PTR_ERR(pol); 17661da177e4SLinus Torvalds } 17671da177e4SLinus Torvalds vma_set_policy(new, pol); 17681da177e4SLinus Torvalds 17691da177e4SLinus Torvalds if (new->vm_file) 17701da177e4SLinus Torvalds get_file(new->vm_file); 17711da177e4SLinus Torvalds 17721da177e4SLinus Torvalds if (new->vm_ops && new->vm_ops->open) 17731da177e4SLinus Torvalds new->vm_ops->open(new); 17741da177e4SLinus Torvalds 17751da177e4SLinus Torvalds if (new_below) 17761da177e4SLinus Torvalds vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff + 17771da177e4SLinus Torvalds ((addr - new->vm_start) >> PAGE_SHIFT), new); 17781da177e4SLinus Torvalds else 17791da177e4SLinus Torvalds vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); 17801da177e4SLinus Torvalds 17811da177e4SLinus Torvalds return 0; 17821da177e4SLinus Torvalds } 17831da177e4SLinus Torvalds 17841da177e4SLinus Torvalds /* Munmap is split into 2 main parts -- this part which finds 17851da177e4SLinus Torvalds * what needs doing, and the areas themselves, which do the 17861da177e4SLinus Torvalds * work. This now handles partial unmappings. 17871da177e4SLinus Torvalds * Jeremy Fitzhardinge <jeremy@goop.org> 17881da177e4SLinus Torvalds */ 17891da177e4SLinus Torvalds int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) 17901da177e4SLinus Torvalds { 17911da177e4SLinus Torvalds unsigned long end; 1792146425a3SHugh Dickins struct vm_area_struct *vma, *prev, *last; 17931da177e4SLinus Torvalds 17941da177e4SLinus Torvalds if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start) 17951da177e4SLinus Torvalds return -EINVAL; 17961da177e4SLinus Torvalds 17971da177e4SLinus Torvalds if ((len = PAGE_ALIGN(len)) == 0) 17981da177e4SLinus Torvalds return -EINVAL; 17991da177e4SLinus Torvalds 18001da177e4SLinus Torvalds /* Find the first overlapping VMA */ 1801146425a3SHugh Dickins vma = find_vma_prev(mm, start, &prev); 1802146425a3SHugh Dickins if (!vma) 18031da177e4SLinus Torvalds return 0; 1804146425a3SHugh Dickins /* we have start < vma->vm_end */ 18051da177e4SLinus Torvalds 18061da177e4SLinus Torvalds /* if it doesn't overlap, we have nothing.. */ 18071da177e4SLinus Torvalds end = start + len; 1808146425a3SHugh Dickins if (vma->vm_start >= end) 18091da177e4SLinus Torvalds return 0; 18101da177e4SLinus Torvalds 18111da177e4SLinus Torvalds /* 18121da177e4SLinus Torvalds * If we need to split any vma, do it now to save pain later. 18131da177e4SLinus Torvalds * 18141da177e4SLinus Torvalds * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially 18151da177e4SLinus Torvalds * unmapped vm_area_struct will remain in use: so lower split_vma 18161da177e4SLinus Torvalds * places tmp vma above, and higher split_vma places tmp vma below. 18171da177e4SLinus Torvalds */ 1818146425a3SHugh Dickins if (start > vma->vm_start) { 1819146425a3SHugh Dickins int error = split_vma(mm, vma, start, 0); 18201da177e4SLinus Torvalds if (error) 18211da177e4SLinus Torvalds return error; 1822146425a3SHugh Dickins prev = vma; 18231da177e4SLinus Torvalds } 18241da177e4SLinus Torvalds 18251da177e4SLinus Torvalds /* Does it split the last one? */ 18261da177e4SLinus Torvalds last = find_vma(mm, end); 18271da177e4SLinus Torvalds if (last && end > last->vm_start) { 18281da177e4SLinus Torvalds int error = split_vma(mm, last, end, 1); 18291da177e4SLinus Torvalds if (error) 18301da177e4SLinus Torvalds return error; 18311da177e4SLinus Torvalds } 1832146425a3SHugh Dickins vma = prev? prev->vm_next: mm->mmap; 18331da177e4SLinus Torvalds 18341da177e4SLinus Torvalds /* 18351da177e4SLinus Torvalds * Remove the vma's, and unmap the actual pages 18361da177e4SLinus Torvalds */ 1837146425a3SHugh Dickins detach_vmas_to_be_unmapped(mm, vma, prev, end); 1838146425a3SHugh Dickins unmap_region(mm, vma, prev, start, end); 18391da177e4SLinus Torvalds 18401da177e4SLinus Torvalds /* Fix up all other VM information */ 18412c0b3814SHugh Dickins remove_vma_list(mm, vma); 18421da177e4SLinus Torvalds 18431da177e4SLinus Torvalds return 0; 18441da177e4SLinus Torvalds } 18451da177e4SLinus Torvalds 18461da177e4SLinus Torvalds EXPORT_SYMBOL(do_munmap); 18471da177e4SLinus Torvalds 18481da177e4SLinus Torvalds asmlinkage long sys_munmap(unsigned long addr, size_t len) 18491da177e4SLinus Torvalds { 18501da177e4SLinus Torvalds int ret; 18511da177e4SLinus Torvalds struct mm_struct *mm = current->mm; 18521da177e4SLinus Torvalds 18531da177e4SLinus Torvalds profile_munmap(addr); 18541da177e4SLinus Torvalds 18551da177e4SLinus Torvalds down_write(&mm->mmap_sem); 18561da177e4SLinus Torvalds ret = do_munmap(mm, addr, len); 18571da177e4SLinus Torvalds up_write(&mm->mmap_sem); 18581da177e4SLinus Torvalds return ret; 18591da177e4SLinus Torvalds } 18601da177e4SLinus Torvalds 18611da177e4SLinus Torvalds static inline void verify_mm_writelocked(struct mm_struct *mm) 18621da177e4SLinus Torvalds { 1863a241ec65SPaul E. McKenney #ifdef CONFIG_DEBUG_VM 18641da177e4SLinus Torvalds if (unlikely(down_read_trylock(&mm->mmap_sem))) { 18651da177e4SLinus Torvalds WARN_ON(1); 18661da177e4SLinus Torvalds up_read(&mm->mmap_sem); 18671da177e4SLinus Torvalds } 18681da177e4SLinus Torvalds #endif 18691da177e4SLinus Torvalds } 18701da177e4SLinus Torvalds 18711da177e4SLinus Torvalds /* 18721da177e4SLinus Torvalds * this is really a simplified "do_mmap". it only handles 18731da177e4SLinus Torvalds * anonymous maps. eventually we may be able to do some 18741da177e4SLinus Torvalds * brk-specific accounting here. 18751da177e4SLinus Torvalds */ 18761da177e4SLinus Torvalds unsigned long do_brk(unsigned long addr, unsigned long len) 18771da177e4SLinus Torvalds { 18781da177e4SLinus Torvalds struct mm_struct * mm = current->mm; 18791da177e4SLinus Torvalds struct vm_area_struct * vma, * prev; 18801da177e4SLinus Torvalds unsigned long flags; 18811da177e4SLinus Torvalds struct rb_node ** rb_link, * rb_parent; 18821da177e4SLinus Torvalds pgoff_t pgoff = addr >> PAGE_SHIFT; 18833a459756SKirill Korotaev int error; 18841da177e4SLinus Torvalds 18851da177e4SLinus Torvalds len = PAGE_ALIGN(len); 18861da177e4SLinus Torvalds if (!len) 18871da177e4SLinus Torvalds return addr; 18881da177e4SLinus Torvalds 18891da177e4SLinus Torvalds if ((addr + len) > TASK_SIZE || (addr + len) < addr) 18901da177e4SLinus Torvalds return -EINVAL; 18911da177e4SLinus Torvalds 1892cd2579d7SHugh Dickins if (is_hugepage_only_range(mm, addr, len)) 1893cd2579d7SHugh Dickins return -EINVAL; 1894cb07c9a1SDavid Gibson 18953a459756SKirill Korotaev flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; 18963a459756SKirill Korotaev 18973a459756SKirill Korotaev error = arch_mmap_check(addr, len, flags); 18983a459756SKirill Korotaev if (error) 18993a459756SKirill Korotaev return error; 19003a459756SKirill Korotaev 19011da177e4SLinus Torvalds /* 19021da177e4SLinus Torvalds * mlock MCL_FUTURE? 19031da177e4SLinus Torvalds */ 19041da177e4SLinus Torvalds if (mm->def_flags & VM_LOCKED) { 19051da177e4SLinus Torvalds unsigned long locked, lock_limit; 190693ea1d0aSChris Wright locked = len >> PAGE_SHIFT; 190793ea1d0aSChris Wright locked += mm->locked_vm; 19081da177e4SLinus Torvalds lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; 190993ea1d0aSChris Wright lock_limit >>= PAGE_SHIFT; 19101da177e4SLinus Torvalds if (locked > lock_limit && !capable(CAP_IPC_LOCK)) 19111da177e4SLinus Torvalds return -EAGAIN; 19121da177e4SLinus Torvalds } 19131da177e4SLinus Torvalds 19141da177e4SLinus Torvalds /* 19151da177e4SLinus Torvalds * mm->mmap_sem is required to protect against another thread 19161da177e4SLinus Torvalds * changing the mappings in case we sleep. 19171da177e4SLinus Torvalds */ 19181da177e4SLinus Torvalds verify_mm_writelocked(mm); 19191da177e4SLinus Torvalds 19201da177e4SLinus Torvalds /* 19211da177e4SLinus Torvalds * Clear old maps. this also does some error checking for us 19221da177e4SLinus Torvalds */ 19231da177e4SLinus Torvalds munmap_back: 19241da177e4SLinus Torvalds vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); 19251da177e4SLinus Torvalds if (vma && vma->vm_start < addr + len) { 19261da177e4SLinus Torvalds if (do_munmap(mm, addr, len)) 19271da177e4SLinus Torvalds return -ENOMEM; 19281da177e4SLinus Torvalds goto munmap_back; 19291da177e4SLinus Torvalds } 19301da177e4SLinus Torvalds 19311da177e4SLinus Torvalds /* Check against address space limits *after* clearing old maps... */ 1932119f657cSakpm@osdl.org if (!may_expand_vm(mm, len >> PAGE_SHIFT)) 19331da177e4SLinus Torvalds return -ENOMEM; 19341da177e4SLinus Torvalds 19351da177e4SLinus Torvalds if (mm->map_count > sysctl_max_map_count) 19361da177e4SLinus Torvalds return -ENOMEM; 19371da177e4SLinus Torvalds 19381da177e4SLinus Torvalds if (security_vm_enough_memory(len >> PAGE_SHIFT)) 19391da177e4SLinus Torvalds return -ENOMEM; 19401da177e4SLinus Torvalds 19411da177e4SLinus Torvalds /* Can we just expand an old private anonymous mapping? */ 19421da177e4SLinus Torvalds if (vma_merge(mm, prev, addr, addr + len, flags, 19431da177e4SLinus Torvalds NULL, NULL, pgoff, NULL)) 19441da177e4SLinus Torvalds goto out; 19451da177e4SLinus Torvalds 19461da177e4SLinus Torvalds /* 19471da177e4SLinus Torvalds * create a vma struct for an anonymous mapping 19481da177e4SLinus Torvalds */ 1949c5e3b83eSPekka Enberg vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); 19501da177e4SLinus Torvalds if (!vma) { 19511da177e4SLinus Torvalds vm_unacct_memory(len >> PAGE_SHIFT); 19521da177e4SLinus Torvalds return -ENOMEM; 19531da177e4SLinus Torvalds } 19541da177e4SLinus Torvalds 19551da177e4SLinus Torvalds vma->vm_mm = mm; 19561da177e4SLinus Torvalds vma->vm_start = addr; 19571da177e4SLinus Torvalds vma->vm_end = addr + len; 19581da177e4SLinus Torvalds vma->vm_pgoff = pgoff; 19591da177e4SLinus Torvalds vma->vm_flags = flags; 19609637a5efSDavid Howells vma->vm_page_prot = protection_map[flags & 19619637a5efSDavid Howells (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]; 19621da177e4SLinus Torvalds vma_link(mm, vma, prev, rb_link, rb_parent); 19631da177e4SLinus Torvalds out: 19641da177e4SLinus Torvalds mm->total_vm += len >> PAGE_SHIFT; 19651da177e4SLinus Torvalds if (flags & VM_LOCKED) { 19661da177e4SLinus Torvalds mm->locked_vm += len >> PAGE_SHIFT; 19671da177e4SLinus Torvalds make_pages_present(addr, addr + len); 19681da177e4SLinus Torvalds } 19691da177e4SLinus Torvalds return addr; 19701da177e4SLinus Torvalds } 19711da177e4SLinus Torvalds 19721da177e4SLinus Torvalds EXPORT_SYMBOL(do_brk); 19731da177e4SLinus Torvalds 19741da177e4SLinus Torvalds /* Release all mmaps. */ 19751da177e4SLinus Torvalds void exit_mmap(struct mm_struct *mm) 19761da177e4SLinus Torvalds { 19771da177e4SLinus Torvalds struct mmu_gather *tlb; 1978e0da382cSHugh Dickins struct vm_area_struct *vma = mm->mmap; 19791da177e4SLinus Torvalds unsigned long nr_accounted = 0; 1980ee39b37bSHugh Dickins unsigned long end; 19811da177e4SLinus Torvalds 19821da177e4SLinus Torvalds lru_add_drain(); 19831da177e4SLinus Torvalds flush_cache_mm(mm); 1984e0da382cSHugh Dickins tlb = tlb_gather_mmu(mm, 1); 1985365e9c87SHugh Dickins /* Don't update_hiwater_rss(mm) here, do_exit already did */ 1986e0da382cSHugh Dickins /* Use -1 here to ensure all VMAs in the mm are unmapped */ 1987508034a3SHugh Dickins end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL); 19881da177e4SLinus Torvalds vm_unacct_memory(nr_accounted); 1989e2cdef8cSHugh Dickins free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); 1990ee39b37bSHugh Dickins tlb_finish_mmu(tlb, 0, end); 19911da177e4SLinus Torvalds 19921da177e4SLinus Torvalds /* 19938f4f8c16SHugh Dickins * Walk the list again, actually closing and freeing it, 19948f4f8c16SHugh Dickins * with preemption enabled, without holding any MM locks. 19951da177e4SLinus Torvalds */ 1996a8fb5618SHugh Dickins while (vma) 1997a8fb5618SHugh Dickins vma = remove_vma(vma); 1998e0da382cSHugh Dickins 1999e2cdef8cSHugh Dickins BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT); 20001da177e4SLinus Torvalds } 20011da177e4SLinus Torvalds 20021da177e4SLinus Torvalds /* Insert vm structure into process list sorted by address 20031da177e4SLinus Torvalds * and into the inode's i_mmap tree. If vm_file is non-NULL 20041da177e4SLinus Torvalds * then i_mmap_lock is taken here. 20051da177e4SLinus Torvalds */ 20061da177e4SLinus Torvalds int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) 20071da177e4SLinus Torvalds { 20081da177e4SLinus Torvalds struct vm_area_struct * __vma, * prev; 20091da177e4SLinus Torvalds struct rb_node ** rb_link, * rb_parent; 20101da177e4SLinus Torvalds 20111da177e4SLinus Torvalds /* 20121da177e4SLinus Torvalds * The vm_pgoff of a purely anonymous vma should be irrelevant 20131da177e4SLinus Torvalds * until its first write fault, when page's anon_vma and index 20141da177e4SLinus Torvalds * are set. But now set the vm_pgoff it will almost certainly 20151da177e4SLinus Torvalds * end up with (unless mremap moves it elsewhere before that 20161da177e4SLinus Torvalds * first wfault), so /proc/pid/maps tells a consistent story. 20171da177e4SLinus Torvalds * 20181da177e4SLinus Torvalds * By setting it to reflect the virtual start address of the 20191da177e4SLinus Torvalds * vma, merges and splits can happen in a seamless way, just 20201da177e4SLinus Torvalds * using the existing file pgoff checks and manipulations. 20211da177e4SLinus Torvalds * Similarly in do_mmap_pgoff and in do_brk. 20221da177e4SLinus Torvalds */ 20231da177e4SLinus Torvalds if (!vma->vm_file) { 20241da177e4SLinus Torvalds BUG_ON(vma->anon_vma); 20251da177e4SLinus Torvalds vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; 20261da177e4SLinus Torvalds } 20271da177e4SLinus Torvalds __vma = find_vma_prepare(mm,vma->vm_start,&prev,&rb_link,&rb_parent); 20281da177e4SLinus Torvalds if (__vma && __vma->vm_start < vma->vm_end) 20291da177e4SLinus Torvalds return -ENOMEM; 20302fd4ef85SHugh Dickins if ((vma->vm_flags & VM_ACCOUNT) && 20312fd4ef85SHugh Dickins security_vm_enough_memory(vma_pages(vma))) 20322fd4ef85SHugh Dickins return -ENOMEM; 20331da177e4SLinus Torvalds vma_link(mm, vma, prev, rb_link, rb_parent); 20341da177e4SLinus Torvalds return 0; 20351da177e4SLinus Torvalds } 20361da177e4SLinus Torvalds 20371da177e4SLinus Torvalds /* 20381da177e4SLinus Torvalds * Copy the vma structure to a new location in the same mm, 20391da177e4SLinus Torvalds * prior to moving page table entries, to effect an mremap move. 20401da177e4SLinus Torvalds */ 20411da177e4SLinus Torvalds struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, 20421da177e4SLinus Torvalds unsigned long addr, unsigned long len, pgoff_t pgoff) 20431da177e4SLinus Torvalds { 20441da177e4SLinus Torvalds struct vm_area_struct *vma = *vmap; 20451da177e4SLinus Torvalds unsigned long vma_start = vma->vm_start; 20461da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 20471da177e4SLinus Torvalds struct vm_area_struct *new_vma, *prev; 20481da177e4SLinus Torvalds struct rb_node **rb_link, *rb_parent; 20491da177e4SLinus Torvalds struct mempolicy *pol; 20501da177e4SLinus Torvalds 20511da177e4SLinus Torvalds /* 20521da177e4SLinus Torvalds * If anonymous vma has not yet been faulted, update new pgoff 20531da177e4SLinus Torvalds * to match new location, to increase its chance of merging. 20541da177e4SLinus Torvalds */ 20551da177e4SLinus Torvalds if (!vma->vm_file && !vma->anon_vma) 20561da177e4SLinus Torvalds pgoff = addr >> PAGE_SHIFT; 20571da177e4SLinus Torvalds 20581da177e4SLinus Torvalds find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); 20591da177e4SLinus Torvalds new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags, 20601da177e4SLinus Torvalds vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma)); 20611da177e4SLinus Torvalds if (new_vma) { 20621da177e4SLinus Torvalds /* 20631da177e4SLinus Torvalds * Source vma may have been merged into new_vma 20641da177e4SLinus Torvalds */ 20651da177e4SLinus Torvalds if (vma_start >= new_vma->vm_start && 20661da177e4SLinus Torvalds vma_start < new_vma->vm_end) 20671da177e4SLinus Torvalds *vmap = new_vma; 20681da177e4SLinus Torvalds } else { 2069e94b1766SChristoph Lameter new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 20701da177e4SLinus Torvalds if (new_vma) { 20711da177e4SLinus Torvalds *new_vma = *vma; 20721da177e4SLinus Torvalds pol = mpol_copy(vma_policy(vma)); 20731da177e4SLinus Torvalds if (IS_ERR(pol)) { 20741da177e4SLinus Torvalds kmem_cache_free(vm_area_cachep, new_vma); 20751da177e4SLinus Torvalds return NULL; 20761da177e4SLinus Torvalds } 20771da177e4SLinus Torvalds vma_set_policy(new_vma, pol); 20781da177e4SLinus Torvalds new_vma->vm_start = addr; 20791da177e4SLinus Torvalds new_vma->vm_end = addr + len; 20801da177e4SLinus Torvalds new_vma->vm_pgoff = pgoff; 20811da177e4SLinus Torvalds if (new_vma->vm_file) 20821da177e4SLinus Torvalds get_file(new_vma->vm_file); 20831da177e4SLinus Torvalds if (new_vma->vm_ops && new_vma->vm_ops->open) 20841da177e4SLinus Torvalds new_vma->vm_ops->open(new_vma); 20851da177e4SLinus Torvalds vma_link(mm, new_vma, prev, rb_link, rb_parent); 20861da177e4SLinus Torvalds } 20871da177e4SLinus Torvalds } 20881da177e4SLinus Torvalds return new_vma; 20891da177e4SLinus Torvalds } 2090119f657cSakpm@osdl.org 2091119f657cSakpm@osdl.org /* 2092119f657cSakpm@osdl.org * Return true if the calling process may expand its vm space by the passed 2093119f657cSakpm@osdl.org * number of pages 2094119f657cSakpm@osdl.org */ 2095119f657cSakpm@osdl.org int may_expand_vm(struct mm_struct *mm, unsigned long npages) 2096119f657cSakpm@osdl.org { 2097119f657cSakpm@osdl.org unsigned long cur = mm->total_vm; /* pages */ 2098119f657cSakpm@osdl.org unsigned long lim; 2099119f657cSakpm@osdl.org 2100119f657cSakpm@osdl.org lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT; 2101119f657cSakpm@osdl.org 2102119f657cSakpm@osdl.org if (cur + npages > lim) 2103119f657cSakpm@osdl.org return 0; 2104119f657cSakpm@osdl.org return 1; 2105119f657cSakpm@osdl.org } 2106fa5dc22fSRoland McGrath 2107fa5dc22fSRoland McGrath 2108fa5dc22fSRoland McGrath static struct page *special_mapping_nopage(struct vm_area_struct *vma, 2109fa5dc22fSRoland McGrath unsigned long address, int *type) 2110fa5dc22fSRoland McGrath { 2111fa5dc22fSRoland McGrath struct page **pages; 2112fa5dc22fSRoland McGrath 2113fa5dc22fSRoland McGrath BUG_ON(address < vma->vm_start || address >= vma->vm_end); 2114fa5dc22fSRoland McGrath 2115fa5dc22fSRoland McGrath address -= vma->vm_start; 2116fa5dc22fSRoland McGrath for (pages = vma->vm_private_data; address > 0 && *pages; ++pages) 2117fa5dc22fSRoland McGrath address -= PAGE_SIZE; 2118fa5dc22fSRoland McGrath 2119fa5dc22fSRoland McGrath if (*pages) { 2120fa5dc22fSRoland McGrath struct page *page = *pages; 2121fa5dc22fSRoland McGrath get_page(page); 2122fa5dc22fSRoland McGrath return page; 2123fa5dc22fSRoland McGrath } 2124fa5dc22fSRoland McGrath 2125fa5dc22fSRoland McGrath return NOPAGE_SIGBUS; 2126fa5dc22fSRoland McGrath } 2127fa5dc22fSRoland McGrath 2128fa5dc22fSRoland McGrath /* 2129fa5dc22fSRoland McGrath * Having a close hook prevents vma merging regardless of flags. 2130fa5dc22fSRoland McGrath */ 2131fa5dc22fSRoland McGrath static void special_mapping_close(struct vm_area_struct *vma) 2132fa5dc22fSRoland McGrath { 2133fa5dc22fSRoland McGrath } 2134fa5dc22fSRoland McGrath 2135fa5dc22fSRoland McGrath static struct vm_operations_struct special_mapping_vmops = { 2136fa5dc22fSRoland McGrath .close = special_mapping_close, 2137fa5dc22fSRoland McGrath .nopage = special_mapping_nopage, 2138fa5dc22fSRoland McGrath }; 2139fa5dc22fSRoland McGrath 2140fa5dc22fSRoland McGrath /* 2141fa5dc22fSRoland McGrath * Called with mm->mmap_sem held for writing. 2142fa5dc22fSRoland McGrath * Insert a new vma covering the given region, with the given flags. 2143fa5dc22fSRoland McGrath * Its pages are supplied by the given array of struct page *. 2144fa5dc22fSRoland McGrath * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated. 2145fa5dc22fSRoland McGrath * The region past the last page supplied will always produce SIGBUS. 2146fa5dc22fSRoland McGrath * The array pointer and the pages it points to are assumed to stay alive 2147fa5dc22fSRoland McGrath * for as long as this mapping might exist. 2148fa5dc22fSRoland McGrath */ 2149fa5dc22fSRoland McGrath int install_special_mapping(struct mm_struct *mm, 2150fa5dc22fSRoland McGrath unsigned long addr, unsigned long len, 2151fa5dc22fSRoland McGrath unsigned long vm_flags, struct page **pages) 2152fa5dc22fSRoland McGrath { 2153fa5dc22fSRoland McGrath struct vm_area_struct *vma; 2154fa5dc22fSRoland McGrath 2155fa5dc22fSRoland McGrath vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); 2156fa5dc22fSRoland McGrath if (unlikely(vma == NULL)) 2157fa5dc22fSRoland McGrath return -ENOMEM; 2158fa5dc22fSRoland McGrath 2159fa5dc22fSRoland McGrath vma->vm_mm = mm; 2160fa5dc22fSRoland McGrath vma->vm_start = addr; 2161fa5dc22fSRoland McGrath vma->vm_end = addr + len; 2162fa5dc22fSRoland McGrath 2163fa5dc22fSRoland McGrath vma->vm_flags = vm_flags | mm->def_flags; 2164fa5dc22fSRoland McGrath vma->vm_page_prot = protection_map[vma->vm_flags & 7]; 2165fa5dc22fSRoland McGrath 2166fa5dc22fSRoland McGrath vma->vm_ops = &special_mapping_vmops; 2167fa5dc22fSRoland McGrath vma->vm_private_data = pages; 2168fa5dc22fSRoland McGrath 2169fa5dc22fSRoland McGrath if (unlikely(insert_vm_struct(mm, vma))) { 2170fa5dc22fSRoland McGrath kmem_cache_free(vm_area_cachep, vma); 2171fa5dc22fSRoland McGrath return -ENOMEM; 2172fa5dc22fSRoland McGrath } 2173fa5dc22fSRoland McGrath 2174fa5dc22fSRoland McGrath mm->total_vm += len >> PAGE_SHIFT; 2175fa5dc22fSRoland McGrath 2176fa5dc22fSRoland McGrath return 0; 2177fa5dc22fSRoland McGrath } 2178