xref: /linux/mm/mmap.c (revision 592b5fad1677aa98a578ae50eb81d7383752c9c8)
1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  * mm/mmap.c
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  * Written by obz.
61da177e4SLinus Torvalds  *
7046c6884SAlan Cox  * Address space accounting code	<alan@lxorguk.ukuu.org.uk>
81da177e4SLinus Torvalds  */
91da177e4SLinus Torvalds 
10b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11b1de0d13SMitchel Humpherys 
12e8420a8eSCyril Hrubis #include <linux/kernel.h>
131da177e4SLinus Torvalds #include <linux/slab.h>
144af3c9ccSAlexey Dobriyan #include <linux/backing-dev.h>
151da177e4SLinus Torvalds #include <linux/mm.h>
1617fca131SArnd Bergmann #include <linux/mm_inline.h>
171da177e4SLinus Torvalds #include <linux/shm.h>
181da177e4SLinus Torvalds #include <linux/mman.h>
191da177e4SLinus Torvalds #include <linux/pagemap.h>
201da177e4SLinus Torvalds #include <linux/swap.h>
211da177e4SLinus Torvalds #include <linux/syscalls.h>
22c59ede7bSRandy.Dunlap #include <linux/capability.h>
231da177e4SLinus Torvalds #include <linux/init.h>
241da177e4SLinus Torvalds #include <linux/file.h>
251da177e4SLinus Torvalds #include <linux/fs.h>
261da177e4SLinus Torvalds #include <linux/personality.h>
271da177e4SLinus Torvalds #include <linux/security.h>
281da177e4SLinus Torvalds #include <linux/hugetlb.h>
29c01d5b30SHugh Dickins #include <linux/shmem_fs.h>
301da177e4SLinus Torvalds #include <linux/profile.h>
31b95f1b31SPaul Gortmaker #include <linux/export.h>
321da177e4SLinus Torvalds #include <linux/mount.h>
331da177e4SLinus Torvalds #include <linux/mempolicy.h>
341da177e4SLinus Torvalds #include <linux/rmap.h>
35cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h>
3682f71ae4SKonstantin Khlebnikov #include <linux/mmdebug.h>
37cdd6c482SIngo Molnar #include <linux/perf_event.h>
38120a795dSAl Viro #include <linux/audit.h>
39b15d00b6SAndrea Arcangeli #include <linux/khugepaged.h>
402b144498SSrikar Dronamraju #include <linux/uprobes.h>
411640879aSAndrew Shewmaker #include <linux/notifier.h>
421640879aSAndrew Shewmaker #include <linux/memory.h>
43b1de0d13SMitchel Humpherys #include <linux/printk.h>
4419a809afSAndrea Arcangeli #include <linux/userfaultfd_k.h>
45d977d56cSKonstantin Khlebnikov #include <linux/moduleparam.h>
4662b5f7d0SDave Hansen #include <linux/pkeys.h>
4721292580SAndrea Arcangeli #include <linux/oom.h>
4804f5866eSAndrea Arcangeli #include <linux/sched/mm.h>
49d7597f59SStefan Roesch #include <linux/ksm.h>
501da177e4SLinus Torvalds 
517c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
521da177e4SLinus Torvalds #include <asm/cacheflush.h>
531da177e4SLinus Torvalds #include <asm/tlb.h>
54d6dd61c8SJeremy Fitzhardinge #include <asm/mmu_context.h>
551da177e4SLinus Torvalds 
56df529cabSJaewon Kim #define CREATE_TRACE_POINTS
57df529cabSJaewon Kim #include <trace/events/mmap.h>
58df529cabSJaewon Kim 
5942b77728SJan Beulich #include "internal.h"
6042b77728SJan Beulich 
613a459756SKirill Korotaev #ifndef arch_mmap_check
623a459756SKirill Korotaev #define arch_mmap_check(addr, len, flags)	(0)
633a459756SKirill Korotaev #endif
643a459756SKirill Korotaev 
65d07e2259SDaniel Cashman #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
66d07e2259SDaniel Cashman const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN;
67d07e2259SDaniel Cashman const int mmap_rnd_bits_max = CONFIG_ARCH_MMAP_RND_BITS_MAX;
68d07e2259SDaniel Cashman int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS;
69d07e2259SDaniel Cashman #endif
70d07e2259SDaniel Cashman #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
71d07e2259SDaniel Cashman const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN;
72d07e2259SDaniel Cashman const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX;
73d07e2259SDaniel Cashman int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS;
74d07e2259SDaniel Cashman #endif
75d07e2259SDaniel Cashman 
76f4fcd558SKonstantin Khlebnikov static bool ignore_rlimit_data;
77d977d56cSKonstantin Khlebnikov core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644);
78d07e2259SDaniel Cashman 
79763ecb03SLiam R. Howlett static void unmap_region(struct mm_struct *mm, struct maple_tree *mt,
80e0da382cSHugh Dickins 		struct vm_area_struct *vma, struct vm_area_struct *prev,
81763ecb03SLiam R. Howlett 		struct vm_area_struct *next, unsigned long start,
8268f48381SSuren Baghdasaryan 		unsigned long end, bool mm_wr_locked);
83e0da382cSHugh Dickins 
8464e45507SPeter Feiner static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
8564e45507SPeter Feiner {
8664e45507SPeter Feiner 	return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
8764e45507SPeter Feiner }
8864e45507SPeter Feiner 
8964e45507SPeter Feiner /* Update vma->vm_page_prot to reflect vma->vm_flags. */
9064e45507SPeter Feiner void vma_set_page_prot(struct vm_area_struct *vma)
9164e45507SPeter Feiner {
9264e45507SPeter Feiner 	unsigned long vm_flags = vma->vm_flags;
936d2329f8SAndrea Arcangeli 	pgprot_t vm_page_prot;
9464e45507SPeter Feiner 
956d2329f8SAndrea Arcangeli 	vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags);
966d2329f8SAndrea Arcangeli 	if (vma_wants_writenotify(vma, vm_page_prot)) {
9764e45507SPeter Feiner 		vm_flags &= ~VM_SHARED;
986d2329f8SAndrea Arcangeli 		vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags);
9964e45507SPeter Feiner 	}
100c1e8d7c6SMichel Lespinasse 	/* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */
1016d2329f8SAndrea Arcangeli 	WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
10264e45507SPeter Feiner }
10364e45507SPeter Feiner 
1041da177e4SLinus Torvalds /*
105c8c06efaSDavidlohr Bueso  * Requires inode->i_mapping->i_mmap_rwsem
1061da177e4SLinus Torvalds  */
1071da177e4SLinus Torvalds static void __remove_shared_vm_struct(struct vm_area_struct *vma,
1081da177e4SLinus Torvalds 		struct file *file, struct address_space *mapping)
1091da177e4SLinus Torvalds {
1101da177e4SLinus Torvalds 	if (vma->vm_flags & VM_SHARED)
1114bb5f5d9SDavid Herrmann 		mapping_unmap_writable(mapping);
1121da177e4SLinus Torvalds 
1131da177e4SLinus Torvalds 	flush_dcache_mmap_lock(mapping);
1146b2dbba8SMichel Lespinasse 	vma_interval_tree_remove(vma, &mapping->i_mmap);
1151da177e4SLinus Torvalds 	flush_dcache_mmap_unlock(mapping);
1161da177e4SLinus Torvalds }
1171da177e4SLinus Torvalds 
1181da177e4SLinus Torvalds /*
1196b2dbba8SMichel Lespinasse  * Unlink a file-based vm structure from its interval tree, to hide
120a8fb5618SHugh Dickins  * vma from rmap and vmtruncate before freeing its page tables.
1211da177e4SLinus Torvalds  */
122a8fb5618SHugh Dickins void unlink_file_vma(struct vm_area_struct *vma)
1231da177e4SLinus Torvalds {
1241da177e4SLinus Torvalds 	struct file *file = vma->vm_file;
1251da177e4SLinus Torvalds 
1261da177e4SLinus Torvalds 	if (file) {
1271da177e4SLinus Torvalds 		struct address_space *mapping = file->f_mapping;
12883cde9e8SDavidlohr Bueso 		i_mmap_lock_write(mapping);
1291da177e4SLinus Torvalds 		__remove_shared_vm_struct(vma, file, mapping);
13083cde9e8SDavidlohr Bueso 		i_mmap_unlock_write(mapping);
1311da177e4SLinus Torvalds 	}
132a8fb5618SHugh Dickins }
133a8fb5618SHugh Dickins 
134a8fb5618SHugh Dickins /*
135763ecb03SLiam R. Howlett  * Close a vm structure and free it.
136a8fb5618SHugh Dickins  */
1370d2ebf9cSSuren Baghdasaryan static void remove_vma(struct vm_area_struct *vma, bool unreachable)
138a8fb5618SHugh Dickins {
139a8fb5618SHugh Dickins 	might_sleep();
1401da177e4SLinus Torvalds 	if (vma->vm_ops && vma->vm_ops->close)
1411da177e4SLinus Torvalds 		vma->vm_ops->close(vma);
142e9714acfSKonstantin Khlebnikov 	if (vma->vm_file)
143a8fb5618SHugh Dickins 		fput(vma->vm_file);
144f0be3d32SLee Schermerhorn 	mpol_put(vma_policy(vma));
1450d2ebf9cSSuren Baghdasaryan 	if (unreachable)
1460d2ebf9cSSuren Baghdasaryan 		__vm_area_free(vma);
1470d2ebf9cSSuren Baghdasaryan 	else
1483928d4f5SLinus Torvalds 		vm_area_free(vma);
1491da177e4SLinus Torvalds }
1501da177e4SLinus Torvalds 
151b62b633eSLiam R. Howlett static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi,
152b62b633eSLiam R. Howlett 						    unsigned long min)
153b62b633eSLiam R. Howlett {
154b62b633eSLiam R. Howlett 	return mas_prev(&vmi->mas, min);
155b62b633eSLiam R. Howlett }
156b62b633eSLiam R. Howlett 
157b62b633eSLiam R. Howlett static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
158b62b633eSLiam R. Howlett 			unsigned long start, unsigned long end, gfp_t gfp)
159b62b633eSLiam R. Howlett {
160b62b633eSLiam R. Howlett 	vmi->mas.index = start;
161b62b633eSLiam R. Howlett 	vmi->mas.last = end - 1;
162b62b633eSLiam R. Howlett 	mas_store_gfp(&vmi->mas, NULL, gfp);
163b62b633eSLiam R. Howlett 	if (unlikely(mas_is_err(&vmi->mas)))
164b62b633eSLiam R. Howlett 		return -ENOMEM;
165b62b633eSLiam R. Howlett 
166b62b633eSLiam R. Howlett 	return 0;
167b62b633eSLiam R. Howlett }
168b62b633eSLiam R. Howlett 
1692e7ce7d3SLiam R. Howlett /*
1702e7ce7d3SLiam R. Howlett  * check_brk_limits() - Use platform specific check of range & verify mlock
1712e7ce7d3SLiam R. Howlett  * limits.
1722e7ce7d3SLiam R. Howlett  * @addr: The address to check
1732e7ce7d3SLiam R. Howlett  * @len: The size of increase.
1742e7ce7d3SLiam R. Howlett  *
1752e7ce7d3SLiam R. Howlett  * Return: 0 on success.
1762e7ce7d3SLiam R. Howlett  */
1772e7ce7d3SLiam R. Howlett static int check_brk_limits(unsigned long addr, unsigned long len)
1782e7ce7d3SLiam R. Howlett {
1792e7ce7d3SLiam R. Howlett 	unsigned long mapped_addr;
1802e7ce7d3SLiam R. Howlett 
1812e7ce7d3SLiam R. Howlett 	mapped_addr = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
1822e7ce7d3SLiam R. Howlett 	if (IS_ERR_VALUE(mapped_addr))
1832e7ce7d3SLiam R. Howlett 		return mapped_addr;
1842e7ce7d3SLiam R. Howlett 
185b0cc5e89SAndrew Morton 	return mlock_future_ok(current->mm, current->mm->def_flags, len)
1863c54a298SLorenzo Stoakes 		? 0 : -EAGAIN;
1872e7ce7d3SLiam R. Howlett }
18892fed820SLiam R. Howlett static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma,
189763ecb03SLiam R. Howlett 		unsigned long addr, unsigned long request, unsigned long flags);
1906a6160a7SHeiko Carstens SYSCALL_DEFINE1(brk, unsigned long, brk)
1911da177e4SLinus Torvalds {
1929bc8039eSYang Shi 	unsigned long newbrk, oldbrk, origbrk;
1931da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
1942e7ce7d3SLiam R. Howlett 	struct vm_area_struct *brkvma, *next = NULL;
195a5b4592cSJiri Kosina 	unsigned long min_brk;
196408579cdSLiam R. Howlett 	bool populate = false;
197897ab3e0SMike Rapoport 	LIST_HEAD(uf);
19892fed820SLiam R. Howlett 	struct vma_iterator vmi;
1991da177e4SLinus Torvalds 
200d8ed45c5SMichel Lespinasse 	if (mmap_write_lock_killable(mm))
201dc0ef0dfSMichal Hocko 		return -EINTR;
2021da177e4SLinus Torvalds 
2039bc8039eSYang Shi 	origbrk = mm->brk;
2049bc8039eSYang Shi 
205a5b4592cSJiri Kosina #ifdef CONFIG_COMPAT_BRK
2065520e894SJiri Kosina 	/*
2075520e894SJiri Kosina 	 * CONFIG_COMPAT_BRK can still be overridden by setting
2085520e894SJiri Kosina 	 * randomize_va_space to 2, which will still cause mm->start_brk
2095520e894SJiri Kosina 	 * to be arbitrarily shifted
2105520e894SJiri Kosina 	 */
2114471a675SJiri Kosina 	if (current->brk_randomized)
2125520e894SJiri Kosina 		min_brk = mm->start_brk;
2135520e894SJiri Kosina 	else
2145520e894SJiri Kosina 		min_brk = mm->end_data;
215a5b4592cSJiri Kosina #else
216a5b4592cSJiri Kosina 	min_brk = mm->start_brk;
217a5b4592cSJiri Kosina #endif
218a5b4592cSJiri Kosina 	if (brk < min_brk)
2191da177e4SLinus Torvalds 		goto out;
2201e624196SRam Gupta 
2211e624196SRam Gupta 	/*
2221e624196SRam Gupta 	 * Check against rlimit here. If this check is done later after the test
2231e624196SRam Gupta 	 * of oldbrk with newbrk then it can escape the test and let the data
2241e624196SRam Gupta 	 * segment grow beyond its set limit the in case where the limit is
2251e624196SRam Gupta 	 * not page aligned -Ram Gupta
2261e624196SRam Gupta 	 */
2278764b338SCyrill Gorcunov 	if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
2288764b338SCyrill Gorcunov 			      mm->end_data, mm->start_data))
2291e624196SRam Gupta 		goto out;
2301e624196SRam Gupta 
2311da177e4SLinus Torvalds 	newbrk = PAGE_ALIGN(brk);
2321da177e4SLinus Torvalds 	oldbrk = PAGE_ALIGN(mm->brk);
2339bc8039eSYang Shi 	if (oldbrk == newbrk) {
2349bc8039eSYang Shi 		mm->brk = brk;
2359bc8039eSYang Shi 		goto success;
2369bc8039eSYang Shi 	}
2371da177e4SLinus Torvalds 
238408579cdSLiam R. Howlett 	/* Always allow shrinking brk. */
2391da177e4SLinus Torvalds 	if (brk <= mm->brk) {
2402e7ce7d3SLiam R. Howlett 		/* Search one past newbrk */
24192fed820SLiam R. Howlett 		vma_iter_init(&vmi, mm, newbrk);
24292fed820SLiam R. Howlett 		brkvma = vma_find(&vmi, oldbrk);
243f5ad5083SJason A. Donenfeld 		if (!brkvma || brkvma->vm_start >= oldbrk)
2442e7ce7d3SLiam R. Howlett 			goto out; /* mapping intersects with an existing non-brk vma. */
2459bc8039eSYang Shi 		/*
2462e7ce7d3SLiam R. Howlett 		 * mm->brk must be protected by write mmap_lock.
247408579cdSLiam R. Howlett 		 * do_vma_munmap() will drop the lock on success,  so update it
24827b26701SLiam R. Howlett 		 * before calling do_vma_munmap().
2499bc8039eSYang Shi 		 */
2509bc8039eSYang Shi 		mm->brk = brk;
251408579cdSLiam R. Howlett 		if (do_vma_munmap(&vmi, brkvma, newbrk, oldbrk, &uf, true))
2521da177e4SLinus Torvalds 			goto out;
253408579cdSLiam R. Howlett 
254408579cdSLiam R. Howlett 		goto success_unlocked;
2551da177e4SLinus Torvalds 	}
2561da177e4SLinus Torvalds 
2572e7ce7d3SLiam R. Howlett 	if (check_brk_limits(oldbrk, newbrk - oldbrk))
2582e7ce7d3SLiam R. Howlett 		goto out;
2592e7ce7d3SLiam R. Howlett 
2602e7ce7d3SLiam R. Howlett 	/*
2612e7ce7d3SLiam R. Howlett 	 * Only check if the next VMA is within the stack_guard_gap of the
2622e7ce7d3SLiam R. Howlett 	 * expansion area
2632e7ce7d3SLiam R. Howlett 	 */
26492fed820SLiam R. Howlett 	vma_iter_init(&vmi, mm, oldbrk);
26592fed820SLiam R. Howlett 	next = vma_find(&vmi, newbrk + PAGE_SIZE + stack_guard_gap);
2661be7107fSHugh Dickins 	if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
2671da177e4SLinus Torvalds 		goto out;
2681da177e4SLinus Torvalds 
26992fed820SLiam R. Howlett 	brkvma = vma_prev_limit(&vmi, mm->start_brk);
2701da177e4SLinus Torvalds 	/* Ok, looks good - let it rip. */
27192fed820SLiam R. Howlett 	if (do_brk_flags(&vmi, brkvma, oldbrk, newbrk - oldbrk, 0) < 0)
2721da177e4SLinus Torvalds 		goto out;
2732e7ce7d3SLiam R. Howlett 
2741da177e4SLinus Torvalds 	mm->brk = brk;
275408579cdSLiam R. Howlett 	if (mm->def_flags & VM_LOCKED)
276408579cdSLiam R. Howlett 		populate = true;
2779bc8039eSYang Shi 
2789bc8039eSYang Shi success:
279d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
280408579cdSLiam R. Howlett success_unlocked:
281897ab3e0SMike Rapoport 	userfaultfd_unmap_complete(mm, &uf);
282128557ffSMichel Lespinasse 	if (populate)
283128557ffSMichel Lespinasse 		mm_populate(oldbrk, newbrk - oldbrk);
284128557ffSMichel Lespinasse 	return brk;
285128557ffSMichel Lespinasse 
2861da177e4SLinus Torvalds out:
287408579cdSLiam R. Howlett 	mm->brk = origbrk;
288d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
289b7204006SAdrian Huang 	return origbrk;
2901da177e4SLinus Torvalds }
2911da177e4SLinus Torvalds 
292d4af56c5SLiam R. Howlett #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
293eafd4dc4SRashika Kheria static void validate_mm(struct mm_struct *mm)
2941da177e4SLinus Torvalds {
2951da177e4SLinus Torvalds 	int bug = 0;
2961da177e4SLinus Torvalds 	int i = 0;
297763ecb03SLiam R. Howlett 	struct vm_area_struct *vma;
298b50e195fSLiam R. Howlett 	VMA_ITERATOR(vmi, mm, 0);
299ff26f70fSAndrew Morton 
300b50e195fSLiam R. Howlett 	mt_validate(&mm->mm_mt);
301b50e195fSLiam R. Howlett 	for_each_vma(vmi, vma) {
302524e00b3SLiam R. Howlett #ifdef CONFIG_DEBUG_VM_RB
30312352d3cSKonstantin Khlebnikov 		struct anon_vma *anon_vma = vma->anon_vma;
304ed8ea815SMichel Lespinasse 		struct anon_vma_chain *avc;
305b50e195fSLiam R. Howlett #endif
306b50e195fSLiam R. Howlett 		unsigned long vmi_start, vmi_end;
307b50e195fSLiam R. Howlett 		bool warn = 0;
308ff26f70fSAndrew Morton 
309b50e195fSLiam R. Howlett 		vmi_start = vma_iter_addr(&vmi);
310b50e195fSLiam R. Howlett 		vmi_end = vma_iter_end(&vmi);
311b50e195fSLiam R. Howlett 		if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm))
312b50e195fSLiam R. Howlett 			warn = 1;
313b50e195fSLiam R. Howlett 
314b50e195fSLiam R. Howlett 		if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm))
315b50e195fSLiam R. Howlett 			warn = 1;
316b50e195fSLiam R. Howlett 
317b50e195fSLiam R. Howlett 		if (warn) {
318b50e195fSLiam R. Howlett 			pr_emerg("issue in %s\n", current->comm);
319b50e195fSLiam R. Howlett 			dump_stack();
320b50e195fSLiam R. Howlett 			dump_vma(vma);
321b50e195fSLiam R. Howlett 			pr_emerg("tree range: %px start %lx end %lx\n", vma,
322b50e195fSLiam R. Howlett 				 vmi_start, vmi_end - 1);
323b50e195fSLiam R. Howlett 			vma_iter_dump_tree(&vmi);
324b50e195fSLiam R. Howlett 		}
325b50e195fSLiam R. Howlett 
326b50e195fSLiam R. Howlett #ifdef CONFIG_DEBUG_VM_RB
32712352d3cSKonstantin Khlebnikov 		if (anon_vma) {
32812352d3cSKonstantin Khlebnikov 			anon_vma_lock_read(anon_vma);
329ed8ea815SMichel Lespinasse 			list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
330ed8ea815SMichel Lespinasse 				anon_vma_interval_tree_verify(avc);
33112352d3cSKonstantin Khlebnikov 			anon_vma_unlock_read(anon_vma);
33212352d3cSKonstantin Khlebnikov 		}
333524e00b3SLiam R. Howlett #endif
3341da177e4SLinus Torvalds 		i++;
3351da177e4SLinus Torvalds 	}
3365a0768f6SMichel Lespinasse 	if (i != mm->map_count) {
337b50e195fSLiam R. Howlett 		pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i);
3385a0768f6SMichel Lespinasse 		bug = 1;
3395a0768f6SMichel Lespinasse 	}
34096dad67fSSasha Levin 	VM_BUG_ON_MM(bug, mm);
3411da177e4SLinus Torvalds }
342524e00b3SLiam R. Howlett 
343524e00b3SLiam R. Howlett #else /* !CONFIG_DEBUG_VM_MAPLE_TREE */
3441da177e4SLinus Torvalds #define validate_mm(mm) do { } while (0)
345524e00b3SLiam R. Howlett #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
346d3737187SMichel Lespinasse 
347bf181b9fSMichel Lespinasse /*
348bf181b9fSMichel Lespinasse  * vma has some anon_vma assigned, and is already inserted on that
349bf181b9fSMichel Lespinasse  * anon_vma's interval trees.
350bf181b9fSMichel Lespinasse  *
351bf181b9fSMichel Lespinasse  * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
352bf181b9fSMichel Lespinasse  * vma must be removed from the anon_vma's interval trees using
353bf181b9fSMichel Lespinasse  * anon_vma_interval_tree_pre_update_vma().
354bf181b9fSMichel Lespinasse  *
355bf181b9fSMichel Lespinasse  * After the update, the vma will be reinserted using
356bf181b9fSMichel Lespinasse  * anon_vma_interval_tree_post_update_vma().
357bf181b9fSMichel Lespinasse  *
358c1e8d7c6SMichel Lespinasse  * The entire update must be protected by exclusive mmap_lock and by
359bf181b9fSMichel Lespinasse  * the root anon_vma's mutex.
360bf181b9fSMichel Lespinasse  */
361bf181b9fSMichel Lespinasse static inline void
362bf181b9fSMichel Lespinasse anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
363bf181b9fSMichel Lespinasse {
364bf181b9fSMichel Lespinasse 	struct anon_vma_chain *avc;
365bf181b9fSMichel Lespinasse 
366bf181b9fSMichel Lespinasse 	list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
367bf181b9fSMichel Lespinasse 		anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root);
368bf181b9fSMichel Lespinasse }
369bf181b9fSMichel Lespinasse 
370bf181b9fSMichel Lespinasse static inline void
371bf181b9fSMichel Lespinasse anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
372bf181b9fSMichel Lespinasse {
373bf181b9fSMichel Lespinasse 	struct anon_vma_chain *avc;
374bf181b9fSMichel Lespinasse 
375bf181b9fSMichel Lespinasse 	list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
376bf181b9fSMichel Lespinasse 		anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
377bf181b9fSMichel Lespinasse }
378bf181b9fSMichel Lespinasse 
379e8420a8eSCyril Hrubis static unsigned long count_vma_pages_range(struct mm_struct *mm,
380e8420a8eSCyril Hrubis 		unsigned long addr, unsigned long end)
381e8420a8eSCyril Hrubis {
3822e3af1dbSMatthew Wilcox (Oracle) 	VMA_ITERATOR(vmi, mm, addr);
383e8420a8eSCyril Hrubis 	struct vm_area_struct *vma;
3842e3af1dbSMatthew Wilcox (Oracle) 	unsigned long nr_pages = 0;
385e8420a8eSCyril Hrubis 
3862e3af1dbSMatthew Wilcox (Oracle) 	for_each_vma_range(vmi, vma, end) {
3872e3af1dbSMatthew Wilcox (Oracle) 		unsigned long vm_start = max(addr, vma->vm_start);
3882e3af1dbSMatthew Wilcox (Oracle) 		unsigned long vm_end = min(end, vma->vm_end);
389e8420a8eSCyril Hrubis 
3902e3af1dbSMatthew Wilcox (Oracle) 		nr_pages += PHYS_PFN(vm_end - vm_start);
391e8420a8eSCyril Hrubis 	}
392e8420a8eSCyril Hrubis 
393e8420a8eSCyril Hrubis 	return nr_pages;
394e8420a8eSCyril Hrubis }
395e8420a8eSCyril Hrubis 
396c154124fSLiam R. Howlett static void __vma_link_file(struct vm_area_struct *vma,
397c154124fSLiam R. Howlett 			    struct address_space *mapping)
3981da177e4SLinus Torvalds {
3991da177e4SLinus Torvalds 	if (vma->vm_flags & VM_SHARED)
400cf508b58SMiaohe Lin 		mapping_allow_writable(mapping);
4011da177e4SLinus Torvalds 
4021da177e4SLinus Torvalds 	flush_dcache_mmap_lock(mapping);
4036b2dbba8SMichel Lespinasse 	vma_interval_tree_insert(vma, &mapping->i_mmap);
4041da177e4SLinus Torvalds 	flush_dcache_mmap_unlock(mapping);
4051da177e4SLinus Torvalds }
4061da177e4SLinus Torvalds 
407763ecb03SLiam R. Howlett static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
4081da177e4SLinus Torvalds {
40979e4f2caSLiam R. Howlett 	VMA_ITERATOR(vmi, mm, 0);
4101da177e4SLinus Torvalds 	struct address_space *mapping = NULL;
4111da177e4SLinus Torvalds 
41279e4f2caSLiam R. Howlett 	if (vma_iter_prealloc(&vmi))
413d4af56c5SLiam R. Howlett 		return -ENOMEM;
414d4af56c5SLiam R. Howlett 
41564ac4940SHuang Shijie 	if (vma->vm_file) {
4161da177e4SLinus Torvalds 		mapping = vma->vm_file->f_mapping;
41783cde9e8SDavidlohr Bueso 		i_mmap_lock_write(mapping);
41864ac4940SHuang Shijie 	}
4191da177e4SLinus Torvalds 
42079e4f2caSLiam R. Howlett 	vma_iter_store(&vmi, vma);
4211da177e4SLinus Torvalds 
422c154124fSLiam R. Howlett 	if (mapping) {
423c154124fSLiam R. Howlett 		__vma_link_file(vma, mapping);
42483cde9e8SDavidlohr Bueso 		i_mmap_unlock_write(mapping);
425c154124fSLiam R. Howlett 	}
4261da177e4SLinus Torvalds 
4271da177e4SLinus Torvalds 	mm->map_count++;
4281da177e4SLinus Torvalds 	validate_mm(mm);
429d4af56c5SLiam R. Howlett 	return 0;
4301da177e4SLinus Torvalds }
4311da177e4SLinus Torvalds 
4321da177e4SLinus Torvalds /*
43368cefec5SLiam R. Howlett  * init_multi_vma_prep() - Initializer for struct vma_prepare
43468cefec5SLiam R. Howlett  * @vp: The vma_prepare struct
43568cefec5SLiam R. Howlett  * @vma: The vma that will be altered once locked
43668cefec5SLiam R. Howlett  * @next: The next vma if it is to be adjusted
43768cefec5SLiam R. Howlett  * @remove: The first vma to be removed
43868cefec5SLiam R. Howlett  * @remove2: The second vma to be removed
43968cefec5SLiam R. Howlett  */
44068cefec5SLiam R. Howlett static inline void init_multi_vma_prep(struct vma_prepare *vp,
44168cefec5SLiam R. Howlett 		struct vm_area_struct *vma, struct vm_area_struct *next,
44268cefec5SLiam R. Howlett 		struct vm_area_struct *remove, struct vm_area_struct *remove2)
44368cefec5SLiam R. Howlett {
44468cefec5SLiam R. Howlett 	memset(vp, 0, sizeof(struct vma_prepare));
44568cefec5SLiam R. Howlett 	vp->vma = vma;
44668cefec5SLiam R. Howlett 	vp->anon_vma = vma->anon_vma;
44768cefec5SLiam R. Howlett 	vp->remove = remove;
44868cefec5SLiam R. Howlett 	vp->remove2 = remove2;
44968cefec5SLiam R. Howlett 	vp->adj_next = next;
45068cefec5SLiam R. Howlett 	if (!vp->anon_vma && next)
45168cefec5SLiam R. Howlett 		vp->anon_vma = next->anon_vma;
45268cefec5SLiam R. Howlett 
45368cefec5SLiam R. Howlett 	vp->file = vma->vm_file;
45468cefec5SLiam R. Howlett 	if (vp->file)
45568cefec5SLiam R. Howlett 		vp->mapping = vma->vm_file->f_mapping;
45668cefec5SLiam R. Howlett 
45768cefec5SLiam R. Howlett }
45868cefec5SLiam R. Howlett 
45968cefec5SLiam R. Howlett /*
46068cefec5SLiam R. Howlett  * init_vma_prep() - Initializer wrapper for vma_prepare struct
46168cefec5SLiam R. Howlett  * @vp: The vma_prepare struct
46268cefec5SLiam R. Howlett  * @vma: The vma that will be altered once locked
46368cefec5SLiam R. Howlett  */
46468cefec5SLiam R. Howlett static inline void init_vma_prep(struct vma_prepare *vp,
46568cefec5SLiam R. Howlett 				 struct vm_area_struct *vma)
46668cefec5SLiam R. Howlett {
46768cefec5SLiam R. Howlett 	init_multi_vma_prep(vp, vma, NULL, NULL, NULL);
46868cefec5SLiam R. Howlett }
46968cefec5SLiam R. Howlett 
47068cefec5SLiam R. Howlett 
47168cefec5SLiam R. Howlett /*
472440703e0SLiam R. Howlett  * vma_prepare() - Helper function for handling locking VMAs prior to altering
473440703e0SLiam R. Howlett  * @vp: The initialized vma_prepare struct
474440703e0SLiam R. Howlett  */
475440703e0SLiam R. Howlett static inline void vma_prepare(struct vma_prepare *vp)
476440703e0SLiam R. Howlett {
47710fca64aSSuren Baghdasaryan 	vma_start_write(vp->vma);
47810fca64aSSuren Baghdasaryan 	if (vp->adj_next)
47910fca64aSSuren Baghdasaryan 		vma_start_write(vp->adj_next);
48010fca64aSSuren Baghdasaryan 	/* vp->insert is always a newly created VMA, no need for locking */
48110fca64aSSuren Baghdasaryan 	if (vp->remove)
48210fca64aSSuren Baghdasaryan 		vma_start_write(vp->remove);
48310fca64aSSuren Baghdasaryan 	if (vp->remove2)
48410fca64aSSuren Baghdasaryan 		vma_start_write(vp->remove2);
48510fca64aSSuren Baghdasaryan 
486440703e0SLiam R. Howlett 	if (vp->file) {
487440703e0SLiam R. Howlett 		uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end);
488440703e0SLiam R. Howlett 
489440703e0SLiam R. Howlett 		if (vp->adj_next)
490440703e0SLiam R. Howlett 			uprobe_munmap(vp->adj_next, vp->adj_next->vm_start,
491440703e0SLiam R. Howlett 				      vp->adj_next->vm_end);
492440703e0SLiam R. Howlett 
493440703e0SLiam R. Howlett 		i_mmap_lock_write(vp->mapping);
494440703e0SLiam R. Howlett 		if (vp->insert && vp->insert->vm_file) {
495440703e0SLiam R. Howlett 			/*
496440703e0SLiam R. Howlett 			 * Put into interval tree now, so instantiated pages
497440703e0SLiam R. Howlett 			 * are visible to arm/parisc __flush_dcache_page
498440703e0SLiam R. Howlett 			 * throughout; but we cannot insert into address
499440703e0SLiam R. Howlett 			 * space until vma start or end is updated.
500440703e0SLiam R. Howlett 			 */
501440703e0SLiam R. Howlett 			__vma_link_file(vp->insert,
502440703e0SLiam R. Howlett 					vp->insert->vm_file->f_mapping);
503440703e0SLiam R. Howlett 		}
504440703e0SLiam R. Howlett 	}
505440703e0SLiam R. Howlett 
506440703e0SLiam R. Howlett 	if (vp->anon_vma) {
507440703e0SLiam R. Howlett 		anon_vma_lock_write(vp->anon_vma);
508440703e0SLiam R. Howlett 		anon_vma_interval_tree_pre_update_vma(vp->vma);
509440703e0SLiam R. Howlett 		if (vp->adj_next)
510440703e0SLiam R. Howlett 			anon_vma_interval_tree_pre_update_vma(vp->adj_next);
511440703e0SLiam R. Howlett 	}
512440703e0SLiam R. Howlett 
513440703e0SLiam R. Howlett 	if (vp->file) {
514440703e0SLiam R. Howlett 		flush_dcache_mmap_lock(vp->mapping);
515440703e0SLiam R. Howlett 		vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap);
516440703e0SLiam R. Howlett 		if (vp->adj_next)
517440703e0SLiam R. Howlett 			vma_interval_tree_remove(vp->adj_next,
518440703e0SLiam R. Howlett 						 &vp->mapping->i_mmap);
519440703e0SLiam R. Howlett 	}
520440703e0SLiam R. Howlett 
521440703e0SLiam R. Howlett }
522440703e0SLiam R. Howlett 
523440703e0SLiam R. Howlett /*
524440703e0SLiam R. Howlett  * vma_complete- Helper function for handling the unlocking after altering VMAs,
525440703e0SLiam R. Howlett  * or for inserting a VMA.
526440703e0SLiam R. Howlett  *
527440703e0SLiam R. Howlett  * @vp: The vma_prepare struct
528440703e0SLiam R. Howlett  * @vmi: The vma iterator
529440703e0SLiam R. Howlett  * @mm: The mm_struct
530440703e0SLiam R. Howlett  */
531440703e0SLiam R. Howlett static inline void vma_complete(struct vma_prepare *vp,
532440703e0SLiam R. Howlett 				struct vma_iterator *vmi, struct mm_struct *mm)
533440703e0SLiam R. Howlett {
534440703e0SLiam R. Howlett 	if (vp->file) {
535440703e0SLiam R. Howlett 		if (vp->adj_next)
536440703e0SLiam R. Howlett 			vma_interval_tree_insert(vp->adj_next,
537440703e0SLiam R. Howlett 						 &vp->mapping->i_mmap);
538440703e0SLiam R. Howlett 		vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap);
539440703e0SLiam R. Howlett 		flush_dcache_mmap_unlock(vp->mapping);
540440703e0SLiam R. Howlett 	}
541440703e0SLiam R. Howlett 
542440703e0SLiam R. Howlett 	if (vp->remove && vp->file) {
543440703e0SLiam R. Howlett 		__remove_shared_vm_struct(vp->remove, vp->file, vp->mapping);
544440703e0SLiam R. Howlett 		if (vp->remove2)
545440703e0SLiam R. Howlett 			__remove_shared_vm_struct(vp->remove2, vp->file,
546440703e0SLiam R. Howlett 						  vp->mapping);
547440703e0SLiam R. Howlett 	} else if (vp->insert) {
548440703e0SLiam R. Howlett 		/*
549440703e0SLiam R. Howlett 		 * split_vma has split insert from vma, and needs
550440703e0SLiam R. Howlett 		 * us to insert it before dropping the locks
551440703e0SLiam R. Howlett 		 * (it may either follow vma or precede it).
552440703e0SLiam R. Howlett 		 */
553440703e0SLiam R. Howlett 		vma_iter_store(vmi, vp->insert);
554440703e0SLiam R. Howlett 		mm->map_count++;
555440703e0SLiam R. Howlett 	}
556440703e0SLiam R. Howlett 
557440703e0SLiam R. Howlett 	if (vp->anon_vma) {
558440703e0SLiam R. Howlett 		anon_vma_interval_tree_post_update_vma(vp->vma);
559440703e0SLiam R. Howlett 		if (vp->adj_next)
560440703e0SLiam R. Howlett 			anon_vma_interval_tree_post_update_vma(vp->adj_next);
561440703e0SLiam R. Howlett 		anon_vma_unlock_write(vp->anon_vma);
562440703e0SLiam R. Howlett 	}
563440703e0SLiam R. Howlett 
564440703e0SLiam R. Howlett 	if (vp->file) {
565440703e0SLiam R. Howlett 		i_mmap_unlock_write(vp->mapping);
566440703e0SLiam R. Howlett 		uprobe_mmap(vp->vma);
567440703e0SLiam R. Howlett 
568440703e0SLiam R. Howlett 		if (vp->adj_next)
569440703e0SLiam R. Howlett 			uprobe_mmap(vp->adj_next);
570440703e0SLiam R. Howlett 	}
571440703e0SLiam R. Howlett 
572440703e0SLiam R. Howlett 	if (vp->remove) {
573440703e0SLiam R. Howlett again:
574457f67beSSuren Baghdasaryan 		vma_mark_detached(vp->remove, true);
575440703e0SLiam R. Howlett 		if (vp->file) {
576440703e0SLiam R. Howlett 			uprobe_munmap(vp->remove, vp->remove->vm_start,
577440703e0SLiam R. Howlett 				      vp->remove->vm_end);
578440703e0SLiam R. Howlett 			fput(vp->file);
579440703e0SLiam R. Howlett 		}
580440703e0SLiam R. Howlett 		if (vp->remove->anon_vma)
581440703e0SLiam R. Howlett 			anon_vma_merge(vp->vma, vp->remove);
582440703e0SLiam R. Howlett 		mm->map_count--;
583440703e0SLiam R. Howlett 		mpol_put(vma_policy(vp->remove));
584440703e0SLiam R. Howlett 		if (!vp->remove2)
585440703e0SLiam R. Howlett 			WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end);
586440703e0SLiam R. Howlett 		vm_area_free(vp->remove);
587440703e0SLiam R. Howlett 
588440703e0SLiam R. Howlett 		/*
589440703e0SLiam R. Howlett 		 * In mprotect's case 6 (see comments on vma_merge),
5905ff783f1SVlastimil Babka 		 * we are removing both mid and next vmas
591440703e0SLiam R. Howlett 		 */
592440703e0SLiam R. Howlett 		if (vp->remove2) {
593440703e0SLiam R. Howlett 			vp->remove = vp->remove2;
594440703e0SLiam R. Howlett 			vp->remove2 = NULL;
595440703e0SLiam R. Howlett 			goto again;
596440703e0SLiam R. Howlett 		}
597440703e0SLiam R. Howlett 	}
598440703e0SLiam R. Howlett 	if (vp->insert && vp->file)
599440703e0SLiam R. Howlett 		uprobe_mmap(vp->insert);
600440703e0SLiam R. Howlett }
601440703e0SLiam R. Howlett 
602440703e0SLiam R. Howlett /*
60304241ffeSLiam R. Howlett  * dup_anon_vma() - Helper function to duplicate anon_vma
60404241ffeSLiam R. Howlett  * @dst: The destination VMA
60504241ffeSLiam R. Howlett  * @src: The source VMA
60604241ffeSLiam R. Howlett  *
60704241ffeSLiam R. Howlett  * Returns: 0 on success.
60804241ffeSLiam R. Howlett  */
60904241ffeSLiam R. Howlett static inline int dup_anon_vma(struct vm_area_struct *dst,
61004241ffeSLiam R. Howlett 			       struct vm_area_struct *src)
61104241ffeSLiam R. Howlett {
61204241ffeSLiam R. Howlett 	/*
61304241ffeSLiam R. Howlett 	 * Easily overlooked: when mprotect shifts the boundary, make sure the
61404241ffeSLiam R. Howlett 	 * expanding vma has anon_vma set if the shrinking vma had, to cover any
61504241ffeSLiam R. Howlett 	 * anon pages imported.
61604241ffeSLiam R. Howlett 	 */
61704241ffeSLiam R. Howlett 	if (src->anon_vma && !dst->anon_vma) {
61804241ffeSLiam R. Howlett 		dst->anon_vma = src->anon_vma;
61904241ffeSLiam R. Howlett 		return anon_vma_clone(dst, src);
62004241ffeSLiam R. Howlett 	}
62104241ffeSLiam R. Howlett 
62204241ffeSLiam R. Howlett 	return 0;
62304241ffeSLiam R. Howlett }
62404241ffeSLiam R. Howlett 
62504241ffeSLiam R. Howlett /*
6269303d3e1SLiam R. Howlett  * vma_expand - Expand an existing VMA
6279303d3e1SLiam R. Howlett  *
6289303d3e1SLiam R. Howlett  * @vmi: The vma iterator
6299303d3e1SLiam R. Howlett  * @vma: The vma to expand
6309303d3e1SLiam R. Howlett  * @start: The start of the vma
6319303d3e1SLiam R. Howlett  * @end: The exclusive end of the vma
6329303d3e1SLiam R. Howlett  * @pgoff: The page offset of vma
6339303d3e1SLiam R. Howlett  * @next: The current of next vma.
6349303d3e1SLiam R. Howlett  *
6359303d3e1SLiam R. Howlett  * Expand @vma to @start and @end.  Can expand off the start and end.  Will
6369303d3e1SLiam R. Howlett  * expand over @next if it's different from @vma and @end == @next->vm_end.
6379303d3e1SLiam R. Howlett  * Checking if the @vma can expand and merge with @next needs to be handled by
6389303d3e1SLiam R. Howlett  * the caller.
6399303d3e1SLiam R. Howlett  *
6409303d3e1SLiam R. Howlett  * Returns: 0 on success
6419303d3e1SLiam R. Howlett  */
6427c9813e8SLiam R. Howlett int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
6439303d3e1SLiam R. Howlett 	       unsigned long start, unsigned long end, pgoff_t pgoff,
6449303d3e1SLiam R. Howlett 	       struct vm_area_struct *next)
6459303d3e1SLiam R. Howlett {
64668cefec5SLiam R. Howlett 	bool remove_next = false;
6479303d3e1SLiam R. Howlett 	struct vma_prepare vp;
6489303d3e1SLiam R. Howlett 
6499303d3e1SLiam R. Howlett 	if (next && (vma != next) && (end == next->vm_end)) {
65004241ffeSLiam R. Howlett 		int ret;
6519303d3e1SLiam R. Howlett 
65204241ffeSLiam R. Howlett 		remove_next = true;
65304241ffeSLiam R. Howlett 		ret = dup_anon_vma(vma, next);
65404241ffeSLiam R. Howlett 		if (ret)
65504241ffeSLiam R. Howlett 			return ret;
6569303d3e1SLiam R. Howlett 	}
6579303d3e1SLiam R. Howlett 
65868cefec5SLiam R. Howlett 	init_multi_vma_prep(&vp, vma, NULL, remove_next ? next : NULL, NULL);
6599303d3e1SLiam R. Howlett 	/* Not merging but overwriting any part of next is not handled. */
6609303d3e1SLiam R. Howlett 	VM_WARN_ON(next && !vp.remove &&
6619303d3e1SLiam R. Howlett 		  next != vma && end > next->vm_start);
6629303d3e1SLiam R. Howlett 	/* Only handles expanding */
6639303d3e1SLiam R. Howlett 	VM_WARN_ON(vma->vm_start < start || vma->vm_end > end);
6649303d3e1SLiam R. Howlett 
6659303d3e1SLiam R. Howlett 	if (vma_iter_prealloc(vmi))
6669303d3e1SLiam R. Howlett 		goto nomem;
6679303d3e1SLiam R. Howlett 
668ccf1d78dSSuren Baghdasaryan 	vma_prepare(&vp);
6699303d3e1SLiam R. Howlett 	vma_adjust_trans_huge(vma, start, end, 0);
6709303d3e1SLiam R. Howlett 	/* VMA iterator points to previous, so set to start if necessary */
6719303d3e1SLiam R. Howlett 	if (vma_iter_addr(vmi) != start)
6729303d3e1SLiam R. Howlett 		vma_iter_set(vmi, start);
6739303d3e1SLiam R. Howlett 
6749303d3e1SLiam R. Howlett 	vma->vm_start = start;
6759303d3e1SLiam R. Howlett 	vma->vm_end = end;
6769303d3e1SLiam R. Howlett 	vma->vm_pgoff = pgoff;
6779303d3e1SLiam R. Howlett 	/* Note: mas must be pointing to the expanding VMA */
6789303d3e1SLiam R. Howlett 	vma_iter_store(vmi, vma);
6799303d3e1SLiam R. Howlett 
6809303d3e1SLiam R. Howlett 	vma_complete(&vp, vmi, vma->vm_mm);
6819303d3e1SLiam R. Howlett 	validate_mm(vma->vm_mm);
6829303d3e1SLiam R. Howlett 	return 0;
6839303d3e1SLiam R. Howlett 
6849303d3e1SLiam R. Howlett nomem:
6859303d3e1SLiam R. Howlett 	return -ENOMEM;
6869303d3e1SLiam R. Howlett }
687cf51e86dSLiam R. Howlett 
688cf51e86dSLiam R. Howlett /*
689cf51e86dSLiam R. Howlett  * vma_shrink() - Reduce an existing VMAs memory area
690cf51e86dSLiam R. Howlett  * @vmi: The vma iterator
691cf51e86dSLiam R. Howlett  * @vma: The VMA to modify
692cf51e86dSLiam R. Howlett  * @start: The new start
693cf51e86dSLiam R. Howlett  * @end: The new end
694cf51e86dSLiam R. Howlett  *
695cf51e86dSLiam R. Howlett  * Returns: 0 on success, -ENOMEM otherwise
696cf51e86dSLiam R. Howlett  */
697cf51e86dSLiam R. Howlett int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
698cf51e86dSLiam R. Howlett 	       unsigned long start, unsigned long end, pgoff_t pgoff)
699cf51e86dSLiam R. Howlett {
700cf51e86dSLiam R. Howlett 	struct vma_prepare vp;
701cf51e86dSLiam R. Howlett 
702cf51e86dSLiam R. Howlett 	WARN_ON((vma->vm_start != start) && (vma->vm_end != end));
703cf51e86dSLiam R. Howlett 
704cf51e86dSLiam R. Howlett 	if (vma_iter_prealloc(vmi))
705cf51e86dSLiam R. Howlett 		return -ENOMEM;
706cf51e86dSLiam R. Howlett 
707cf51e86dSLiam R. Howlett 	init_vma_prep(&vp, vma);
708cf51e86dSLiam R. Howlett 	vma_prepare(&vp);
709ccf1d78dSSuren Baghdasaryan 	vma_adjust_trans_huge(vma, start, end, 0);
710cf51e86dSLiam R. Howlett 
711cf51e86dSLiam R. Howlett 	if (vma->vm_start < start)
712cf51e86dSLiam R. Howlett 		vma_iter_clear(vmi, vma->vm_start, start);
713cf51e86dSLiam R. Howlett 
714cf51e86dSLiam R. Howlett 	if (vma->vm_end > end)
715cf51e86dSLiam R. Howlett 		vma_iter_clear(vmi, end, vma->vm_end);
716cf51e86dSLiam R. Howlett 
717cf51e86dSLiam R. Howlett 	vma->vm_start = start;
718cf51e86dSLiam R. Howlett 	vma->vm_end = end;
719cf51e86dSLiam R. Howlett 	vma->vm_pgoff = pgoff;
720cf51e86dSLiam R. Howlett 	vma_complete(&vp, vmi, vma->vm_mm);
721cf51e86dSLiam R. Howlett 	validate_mm(vma->vm_mm);
722cf51e86dSLiam R. Howlett 	return 0;
723cf51e86dSLiam R. Howlett }
724cf51e86dSLiam R. Howlett 
7259303d3e1SLiam R. Howlett /*
7261da177e4SLinus Torvalds  * If the vma has a ->close operation then the driver probably needs to release
727714965caSVlastimil Babka  * per-vma resources, so we don't attempt to merge those if the caller indicates
728714965caSVlastimil Babka  * the current vma may be removed as part of the merge.
7291da177e4SLinus Torvalds  */
7302dbf4010SVlastimil Babka static inline bool is_mergeable_vma(struct vm_area_struct *vma,
73119a809afSAndrea Arcangeli 		struct file *file, unsigned long vm_flags,
7329a10064fSColin Cross 		struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
733714965caSVlastimil Babka 		struct anon_vma_name *anon_name, bool may_remove_vma)
7341da177e4SLinus Torvalds {
73534228d47SCyrill Gorcunov 	/*
73634228d47SCyrill Gorcunov 	 * VM_SOFTDIRTY should not prevent from VMA merging, if we
73734228d47SCyrill Gorcunov 	 * match the flags but dirty bit -- the caller should mark
73834228d47SCyrill Gorcunov 	 * merged VMA as dirty. If dirty bit won't be excluded from
7398bb4e7a2SWei Yang 	 * comparison, we increase pressure on the memory system forcing
74034228d47SCyrill Gorcunov 	 * the kernel to generate new VMAs when old one could be
74134228d47SCyrill Gorcunov 	 * extended instead.
74234228d47SCyrill Gorcunov 	 */
74334228d47SCyrill Gorcunov 	if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY)
7442dbf4010SVlastimil Babka 		return false;
7451da177e4SLinus Torvalds 	if (vma->vm_file != file)
7462dbf4010SVlastimil Babka 		return false;
747714965caSVlastimil Babka 	if (may_remove_vma && vma->vm_ops && vma->vm_ops->close)
7482dbf4010SVlastimil Babka 		return false;
74919a809afSAndrea Arcangeli 	if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx))
7502dbf4010SVlastimil Babka 		return false;
7515c26f6acSSuren Baghdasaryan 	if (!anon_vma_name_eq(anon_vma_name(vma), anon_name))
7522dbf4010SVlastimil Babka 		return false;
7532dbf4010SVlastimil Babka 	return true;
7541da177e4SLinus Torvalds }
7551da177e4SLinus Torvalds 
7562dbf4010SVlastimil Babka static inline bool is_mergeable_anon_vma(struct anon_vma *anon_vma1,
7572dbf4010SVlastimil Babka 		 struct anon_vma *anon_vma2, struct vm_area_struct *vma)
7581da177e4SLinus Torvalds {
759965f55deSShaohua Li 	/*
760965f55deSShaohua Li 	 * The list_is_singular() test is to avoid merging VMA cloned from
761965f55deSShaohua Li 	 * parents. This can improve scalability caused by anon_vma lock.
762965f55deSShaohua Li 	 */
763965f55deSShaohua Li 	if ((!anon_vma1 || !anon_vma2) && (!vma ||
764965f55deSShaohua Li 		list_is_singular(&vma->anon_vma_chain)))
7652dbf4010SVlastimil Babka 		return true;
766965f55deSShaohua Li 	return anon_vma1 == anon_vma2;
7671da177e4SLinus Torvalds }
7681da177e4SLinus Torvalds 
7691da177e4SLinus Torvalds /*
7701da177e4SLinus Torvalds  * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
7711da177e4SLinus Torvalds  * in front of (at a lower virtual address and file offset than) the vma.
7721da177e4SLinus Torvalds  *
7731da177e4SLinus Torvalds  * We cannot merge two vmas if they have differently assigned (non-NULL)
7741da177e4SLinus Torvalds  * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
7751da177e4SLinus Torvalds  *
7761da177e4SLinus Torvalds  * We don't check here for the merged mmap wrapping around the end of pagecache
77745e55300SPeter Collingbourne  * indices (16TB on ia32) because do_mmap() does not permit mmap's which
7781da177e4SLinus Torvalds  * wrap, nor mmaps which cover the final page at index -1UL.
779714965caSVlastimil Babka  *
780714965caSVlastimil Babka  * We assume the vma may be removed as part of the merge.
7811da177e4SLinus Torvalds  */
7822dbf4010SVlastimil Babka static bool
7831da177e4SLinus Torvalds can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
78419a809afSAndrea Arcangeli 		struct anon_vma *anon_vma, struct file *file,
7852dbf4010SVlastimil Babka 		pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
7865c26f6acSSuren Baghdasaryan 		struct anon_vma_name *anon_name)
7871da177e4SLinus Torvalds {
788714965caSVlastimil Babka 	if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name, true) &&
789965f55deSShaohua Li 	    is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
7901da177e4SLinus Torvalds 		if (vma->vm_pgoff == vm_pgoff)
7912dbf4010SVlastimil Babka 			return true;
7921da177e4SLinus Torvalds 	}
7932dbf4010SVlastimil Babka 	return false;
7941da177e4SLinus Torvalds }
7951da177e4SLinus Torvalds 
7961da177e4SLinus Torvalds /*
7971da177e4SLinus Torvalds  * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
7981da177e4SLinus Torvalds  * beyond (at a higher virtual address and file offset than) the vma.
7991da177e4SLinus Torvalds  *
8001da177e4SLinus Torvalds  * We cannot merge two vmas if they have differently assigned (non-NULL)
8011da177e4SLinus Torvalds  * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
802714965caSVlastimil Babka  *
803714965caSVlastimil Babka  * We assume that vma is not removed as part of the merge.
8041da177e4SLinus Torvalds  */
8052dbf4010SVlastimil Babka static bool
8061da177e4SLinus Torvalds can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
80719a809afSAndrea Arcangeli 		struct anon_vma *anon_vma, struct file *file,
8082dbf4010SVlastimil Babka 		pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
8095c26f6acSSuren Baghdasaryan 		struct anon_vma_name *anon_name)
8101da177e4SLinus Torvalds {
811714965caSVlastimil Babka 	if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name, false) &&
812965f55deSShaohua Li 	    is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
8131da177e4SLinus Torvalds 		pgoff_t vm_pglen;
814d6e93217SLibin 		vm_pglen = vma_pages(vma);
8151da177e4SLinus Torvalds 		if (vma->vm_pgoff + vm_pglen == vm_pgoff)
8162dbf4010SVlastimil Babka 			return true;
8171da177e4SLinus Torvalds 	}
8182dbf4010SVlastimil Babka 	return false;
8191da177e4SLinus Torvalds }
8201da177e4SLinus Torvalds 
8211da177e4SLinus Torvalds /*
8229a10064fSColin Cross  * Given a mapping request (addr,end,vm_flags,file,pgoff,anon_name),
8239a10064fSColin Cross  * figure out whether that can be merged with its predecessor or its
8249a10064fSColin Cross  * successor.  Or both (it neatly fills a hole).
8251da177e4SLinus Torvalds  *
8261da177e4SLinus Torvalds  * In most cases - when called for mmap, brk or mremap - [addr,end) is
8271da177e4SLinus Torvalds  * certain not to be mapped by the time vma_merge is called; but when
8281da177e4SLinus Torvalds  * called for mprotect, it is certain to be already mapped (either at
8291da177e4SLinus Torvalds  * an offset within prev, or at the start of next), and the flags of
8301da177e4SLinus Torvalds  * this area are about to be changed to vm_flags - and the no-change
8311da177e4SLinus Torvalds  * case has already been eliminated.
8321da177e4SLinus Torvalds  *
833fcfccd91SLorenzo Stoakes  * The following mprotect cases have to be considered, where **** is
8341da177e4SLinus Torvalds  * the area passed down from mprotect_fixup, never extending beyond one
835fcfccd91SLorenzo Stoakes  * vma, PPPP is the previous vma, CCCC is a concurrent vma that starts
836fcfccd91SLorenzo Stoakes  * at the same address as **** and is of the same or larger span, and
837fcfccd91SLorenzo Stoakes  * NNNN the next vma after ****:
8381da177e4SLinus Torvalds  *
839fcfccd91SLorenzo Stoakes  *     ****             ****                   ****
840fcfccd91SLorenzo Stoakes  *    PPPPPPNNNNNN    PPPPPPNNNNNN       PPPPPPCCCCCC
8415d42ab29SWei Yang  *    cannot merge    might become       might become
842fcfccd91SLorenzo Stoakes  *                    PPNNNNNNNNNN       PPPPPPPPPPCC
8435d42ab29SWei Yang  *    mmap, brk or    case 4 below       case 5 below
8445d42ab29SWei Yang  *    mremap move:
845fcfccd91SLorenzo Stoakes  *                        ****               ****
846fcfccd91SLorenzo Stoakes  *                    PPPP    NNNN       PPPPCCCCNNNN
8475d42ab29SWei Yang  *                    might become       might become
8485d42ab29SWei Yang  *                    PPPPPPPPPPPP 1 or  PPPPPPPPPPPP 6 or
849fcfccd91SLorenzo Stoakes  *                    PPPPPPPPNNNN 2 or  PPPPPPPPNNNN 7 or
850fcfccd91SLorenzo Stoakes  *                    PPPPNNNNNNNN 3     PPPPNNNNNNNN 8
8511da177e4SLinus Torvalds  *
852fcfccd91SLorenzo Stoakes  * It is important for case 8 that the vma CCCC overlapping the
853fcfccd91SLorenzo Stoakes  * region **** is never going to extended over NNNN. Instead NNNN must
854fcfccd91SLorenzo Stoakes  * be extended in region **** and CCCC must be removed. This way in
8550503ea8fSLiam R. Howlett  * all cases where vma_merge succeeds, the moment vma_merge drops the
856e86f15eeSAndrea Arcangeli  * rmap_locks, the properties of the merged vma will be already
857e86f15eeSAndrea Arcangeli  * correct for the whole merged range. Some of those properties like
858e86f15eeSAndrea Arcangeli  * vm_page_prot/vm_flags may be accessed by rmap_walks and they must
859e86f15eeSAndrea Arcangeli  * be correct for the whole merged range immediately after the
860fcfccd91SLorenzo Stoakes  * rmap_locks are released. Otherwise if NNNN would be removed and
861fcfccd91SLorenzo Stoakes  * CCCC would be extended over the NNNN range, remove_migration_ptes
862e86f15eeSAndrea Arcangeli  * or other rmap walkers (if working on addresses beyond the "end"
863fcfccd91SLorenzo Stoakes  * parameter) may establish ptes with the wrong permissions of CCCC
864fcfccd91SLorenzo Stoakes  * instead of the right permissions of NNNN.
8650503ea8fSLiam R. Howlett  *
8660503ea8fSLiam R. Howlett  * In the code below:
8670503ea8fSLiam R. Howlett  * PPPP is represented by *prev
868fcfccd91SLorenzo Stoakes  * CCCC is represented by *curr or not represented at all (NULL)
869fcfccd91SLorenzo Stoakes  * NNNN is represented by *next or not represented at all (NULL)
870fcfccd91SLorenzo Stoakes  * **** is not represented - it will be merged and the vma containing the
8719e8a39d2SVlastimil Babka  *      area is returned, or the function will return NULL
8721da177e4SLinus Torvalds  */
8739760ebffSLiam R. Howlett struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
8741da177e4SLinus Torvalds 			struct vm_area_struct *prev, unsigned long addr,
8751da177e4SLinus Torvalds 			unsigned long end, unsigned long vm_flags,
8761da177e4SLinus Torvalds 			struct anon_vma *anon_vma, struct file *file,
87719a809afSAndrea Arcangeli 			pgoff_t pgoff, struct mempolicy *policy,
8789a10064fSColin Cross 			struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
8795c26f6acSSuren Baghdasaryan 			struct anon_vma_name *anon_name)
8801da177e4SLinus Torvalds {
881b0729ae0SLorenzo Stoakes 	struct vm_area_struct *curr, *next, *res;
8820503ea8fSLiam R. Howlett 	struct vm_area_struct *vma, *adjust, *remove, *remove2;
8830173db4fSLorenzo Stoakes 	struct vma_prepare vp;
8840173db4fSLorenzo Stoakes 	pgoff_t vma_pgoff;
8850173db4fSLorenzo Stoakes 	int err = 0;
886eef19944SJakub Matěna 	bool merge_prev = false;
887eef19944SJakub Matěna 	bool merge_next = false;
8880503ea8fSLiam R. Howlett 	bool vma_expanded = false;
8890503ea8fSLiam R. Howlett 	unsigned long vma_start = addr;
8900173db4fSLorenzo Stoakes 	unsigned long vma_end = end;
8910173db4fSLorenzo Stoakes 	pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
8920173db4fSLorenzo Stoakes 	long adj_start = 0;
8931da177e4SLinus Torvalds 
8940503ea8fSLiam R. Howlett 	validate_mm(mm);
8951da177e4SLinus Torvalds 	/*
8961da177e4SLinus Torvalds 	 * We later require that vma->vm_flags == vm_flags,
8971da177e4SLinus Torvalds 	 * so this tests vma->vm_flags & VM_SPECIAL, too.
8981da177e4SLinus Torvalds 	 */
8991da177e4SLinus Torvalds 	if (vm_flags & VM_SPECIAL)
9001da177e4SLinus Torvalds 		return NULL;
9011da177e4SLinus Torvalds 
90200cd00a6SLorenzo Stoakes 	/* Does the input range span an existing VMA? (cases 5 - 8) */
90300cd00a6SLorenzo Stoakes 	curr = find_vma_intersection(mm, prev ? prev->vm_end : 0, end);
9041da177e4SLinus Torvalds 
90500cd00a6SLorenzo Stoakes 	if (!curr ||			/* cases 1 - 4 */
90600cd00a6SLorenzo Stoakes 	    end == curr->vm_end)	/* cases 6 - 8, adjacent VMA */
90700cd00a6SLorenzo Stoakes 		next = vma_lookup(mm, end);
90800cd00a6SLorenzo Stoakes 	else
90900cd00a6SLorenzo Stoakes 		next = NULL;		/* case 5 */
910e86f15eeSAndrea Arcangeli 
9110503ea8fSLiam R. Howlett 	if (prev) {
9120503ea8fSLiam R. Howlett 		vma_start = prev->vm_start;
9130503ea8fSLiam R. Howlett 		vma_pgoff = prev->vm_pgoff;
9140173db4fSLorenzo Stoakes 
915eef19944SJakub Matěna 		/* Can we merge the predecessor? */
9160173db4fSLorenzo Stoakes 		if (addr == prev->vm_end && mpol_equal(vma_policy(prev), policy)
9170503ea8fSLiam R. Howlett 		    && can_vma_merge_after(prev, vm_flags, anon_vma, file,
9180503ea8fSLiam R. Howlett 					   pgoff, vm_userfaultfd_ctx, anon_name)) {
919eef19944SJakub Matěna 			merge_prev = true;
92018b098afSLiam R. Howlett 			vma_prev(vmi);
9211da177e4SLinus Torvalds 		}
9220503ea8fSLiam R. Howlett 	}
923b0729ae0SLorenzo Stoakes 
924eef19944SJakub Matěna 	/* Can we merge the successor? */
92500cd00a6SLorenzo Stoakes 	if (next && mpol_equal(policy, vma_policy(next)) &&
9260173db4fSLorenzo Stoakes 	    can_vma_merge_before(next, vm_flags, anon_vma, file, pgoff+pglen,
9279a10064fSColin Cross 				 vm_userfaultfd_ctx, anon_name)) {
928eef19944SJakub Matěna 		merge_next = true;
929eef19944SJakub Matěna 	}
9300503ea8fSLiam R. Howlett 
93129417d29SLorenzo Stoakes 	/* Verify some invariant that must be enforced by the caller. */
93229417d29SLorenzo Stoakes 	VM_WARN_ON(prev && addr <= prev->vm_start);
93329417d29SLorenzo Stoakes 	VM_WARN_ON(curr && (addr != curr->vm_start || end > curr->vm_end));
93429417d29SLorenzo Stoakes 	VM_WARN_ON(addr >= end);
93529417d29SLorenzo Stoakes 
9360173db4fSLorenzo Stoakes 	if (!merge_prev && !merge_next)
9370173db4fSLorenzo Stoakes 		return NULL; /* Not mergeable. */
9380173db4fSLorenzo Stoakes 
9390173db4fSLorenzo Stoakes 	res = vma = prev;
9400503ea8fSLiam R. Howlett 	remove = remove2 = adjust = NULL;
9410173db4fSLorenzo Stoakes 
942eef19944SJakub Matěna 	/* Can we merge both the predecessor and the successor? */
943eef19944SJakub Matěna 	if (merge_prev && merge_next &&
9440503ea8fSLiam R. Howlett 	    is_mergeable_anon_vma(prev->anon_vma, next->anon_vma, NULL)) {
9455ff783f1SVlastimil Babka 		remove = next;				/* case 1 */
9460503ea8fSLiam R. Howlett 		vma_end = next->vm_end;
94750dac011SVlastimil Babka 		err = dup_anon_vma(prev, next);
948fcfccd91SLorenzo Stoakes 		if (curr) {				/* case 6 */
949fcfccd91SLorenzo Stoakes 			remove = curr;
9500503ea8fSLiam R. Howlett 			remove2 = next;
9515ff783f1SVlastimil Babka 			if (!next->anon_vma)
952fcfccd91SLorenzo Stoakes 				err = dup_anon_vma(prev, curr);
9530503ea8fSLiam R. Howlett 		}
9540173db4fSLorenzo Stoakes 	} else if (merge_prev) {			/* case 2 */
955fcfccd91SLorenzo Stoakes 		if (curr) {
956fcfccd91SLorenzo Stoakes 			err = dup_anon_vma(prev, curr);
957fcfccd91SLorenzo Stoakes 			if (end == curr->vm_end) {	/* case 7 */
958fcfccd91SLorenzo Stoakes 				remove = curr;
9590503ea8fSLiam R. Howlett 			} else {			/* case 5 */
960fcfccd91SLorenzo Stoakes 				adjust = curr;
961fcfccd91SLorenzo Stoakes 				adj_start = (end - curr->vm_start);
9620503ea8fSLiam R. Howlett 			}
9630503ea8fSLiam R. Howlett 		}
9640173db4fSLorenzo Stoakes 	} else { /* merge_next */
965eef19944SJakub Matěna 		res = next;
9660503ea8fSLiam R. Howlett 		if (prev && addr < prev->vm_end) {	/* case 4 */
9670503ea8fSLiam R. Howlett 			vma_end = addr;
968183b7a60SVlastimil Babka 			adjust = next;
9691e76454fSVlastimil Babka 			adj_start = -(prev->vm_end - addr);
970183b7a60SVlastimil Babka 			err = dup_anon_vma(next, prev);
9710503ea8fSLiam R. Howlett 		} else {
972b0729ae0SLorenzo Stoakes 			/*
973b0729ae0SLorenzo Stoakes 			 * Note that cases 3 and 8 are the ONLY ones where prev
974b0729ae0SLorenzo Stoakes 			 * is permitted to be (but is not necessarily) NULL.
975b0729ae0SLorenzo Stoakes 			 */
9760503ea8fSLiam R. Howlett 			vma = next;			/* case 3 */
9770503ea8fSLiam R. Howlett 			vma_start = addr;
9780503ea8fSLiam R. Howlett 			vma_end = next->vm_end;
9797e775787SVlastimil Babka 			vma_pgoff = next->vm_pgoff - pglen;
980fcfccd91SLorenzo Stoakes 			if (curr) {			/* case 8 */
981fcfccd91SLorenzo Stoakes 				vma_pgoff = curr->vm_pgoff;
982fcfccd91SLorenzo Stoakes 				remove = curr;
983fcfccd91SLorenzo Stoakes 				err = dup_anon_vma(next, curr);
9840503ea8fSLiam R. Howlett 			}
9850503ea8fSLiam R. Howlett 		}
9861da177e4SLinus Torvalds 	}
9871da177e4SLinus Torvalds 
9880173db4fSLorenzo Stoakes 	/* Error in anon_vma clone. */
989eef19944SJakub Matěna 	if (err)
9901da177e4SLinus Torvalds 		return NULL;
9910503ea8fSLiam R. Howlett 
9920503ea8fSLiam R. Howlett 	if (vma_iter_prealloc(vmi))
9930503ea8fSLiam R. Howlett 		return NULL;
9940503ea8fSLiam R. Howlett 
9950503ea8fSLiam R. Howlett 	init_multi_vma_prep(&vp, vma, adjust, remove, remove2);
9960503ea8fSLiam R. Howlett 	VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma &&
9970503ea8fSLiam R. Howlett 		   vp.anon_vma != adjust->anon_vma);
9980503ea8fSLiam R. Howlett 
9990503ea8fSLiam R. Howlett 	vma_prepare(&vp);
1000ccf1d78dSSuren Baghdasaryan 	vma_adjust_trans_huge(vma, vma_start, vma_end, adj_start);
10010503ea8fSLiam R. Howlett 	if (vma_start < vma->vm_start || vma_end > vma->vm_end)
10020503ea8fSLiam R. Howlett 		vma_expanded = true;
10030503ea8fSLiam R. Howlett 
10040503ea8fSLiam R. Howlett 	vma->vm_start = vma_start;
10050503ea8fSLiam R. Howlett 	vma->vm_end = vma_end;
10060503ea8fSLiam R. Howlett 	vma->vm_pgoff = vma_pgoff;
10070503ea8fSLiam R. Howlett 
10080503ea8fSLiam R. Howlett 	if (vma_expanded)
10090503ea8fSLiam R. Howlett 		vma_iter_store(vmi, vma);
10100503ea8fSLiam R. Howlett 
10111e76454fSVlastimil Babka 	if (adj_start) {
10121e76454fSVlastimil Babka 		adjust->vm_start += adj_start;
10131e76454fSVlastimil Babka 		adjust->vm_pgoff += adj_start >> PAGE_SHIFT;
10141e76454fSVlastimil Babka 		if (adj_start < 0) {
10150503ea8fSLiam R. Howlett 			WARN_ON(vma_expanded);
10160503ea8fSLiam R. Howlett 			vma_iter_store(vmi, next);
10170503ea8fSLiam R. Howlett 		}
10180503ea8fSLiam R. Howlett 	}
10190503ea8fSLiam R. Howlett 
10200503ea8fSLiam R. Howlett 	vma_complete(&vp, vmi, mm);
10210503ea8fSLiam R. Howlett 	vma_iter_free(vmi);
10220503ea8fSLiam R. Howlett 	validate_mm(mm);
1023eef19944SJakub Matěna 	khugepaged_enter_vma(res, vm_flags);
10241da177e4SLinus Torvalds 
10259760ebffSLiam R. Howlett 	return res;
1026f2ebfe43SLiam R. Howlett }
1027f2ebfe43SLiam R. Howlett 
10281da177e4SLinus Torvalds /*
1029b4f315b4SEthon Paul  * Rough compatibility check to quickly see if it's even worth looking
1030d0e9fe17SLinus Torvalds  * at sharing an anon_vma.
1031d0e9fe17SLinus Torvalds  *
1032d0e9fe17SLinus Torvalds  * They need to have the same vm_file, and the flags can only differ
1033d0e9fe17SLinus Torvalds  * in things that mprotect may change.
1034d0e9fe17SLinus Torvalds  *
1035d0e9fe17SLinus Torvalds  * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
1036d0e9fe17SLinus Torvalds  * we can merge the two vma's. For example, we refuse to merge a vma if
1037d0e9fe17SLinus Torvalds  * there is a vm_ops->close() function, because that indicates that the
1038d0e9fe17SLinus Torvalds  * driver is doing some kind of reference counting. But that doesn't
1039d0e9fe17SLinus Torvalds  * really matter for the anon_vma sharing case.
1040d0e9fe17SLinus Torvalds  */
1041d0e9fe17SLinus Torvalds static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
1042d0e9fe17SLinus Torvalds {
1043d0e9fe17SLinus Torvalds 	return a->vm_end == b->vm_start &&
1044d0e9fe17SLinus Torvalds 		mpol_equal(vma_policy(a), vma_policy(b)) &&
1045d0e9fe17SLinus Torvalds 		a->vm_file == b->vm_file &&
10466cb4d9a2SAnshuman Khandual 		!((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_SOFTDIRTY)) &&
1047d0e9fe17SLinus Torvalds 		b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
1048d0e9fe17SLinus Torvalds }
1049d0e9fe17SLinus Torvalds 
1050d0e9fe17SLinus Torvalds /*
1051d0e9fe17SLinus Torvalds  * Do some basic sanity checking to see if we can re-use the anon_vma
1052d0e9fe17SLinus Torvalds  * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
1053d0e9fe17SLinus Torvalds  * the same as 'old', the other will be the new one that is trying
1054d0e9fe17SLinus Torvalds  * to share the anon_vma.
1055d0e9fe17SLinus Torvalds  *
10565b449489SFlorian Rommel  * NOTE! This runs with mmap_lock held for reading, so it is possible that
1057d0e9fe17SLinus Torvalds  * the anon_vma of 'old' is concurrently in the process of being set up
1058d0e9fe17SLinus Torvalds  * by another page fault trying to merge _that_. But that's ok: if it
1059d0e9fe17SLinus Torvalds  * is being set up, that automatically means that it will be a singleton
1060d0e9fe17SLinus Torvalds  * acceptable for merging, so we can do all of this optimistically. But
10614db0c3c2SJason Low  * we do that READ_ONCE() to make sure that we never re-load the pointer.
1062d0e9fe17SLinus Torvalds  *
1063d0e9fe17SLinus Torvalds  * IOW: that the "list_is_singular()" test on the anon_vma_chain only
1064d0e9fe17SLinus Torvalds  * matters for the 'stable anon_vma' case (ie the thing we want to avoid
1065d0e9fe17SLinus Torvalds  * is to return an anon_vma that is "complex" due to having gone through
1066d0e9fe17SLinus Torvalds  * a fork).
1067d0e9fe17SLinus Torvalds  *
1068d0e9fe17SLinus Torvalds  * We also make sure that the two vma's are compatible (adjacent,
1069d0e9fe17SLinus Torvalds  * and with the same memory policies). That's all stable, even with just
10705b449489SFlorian Rommel  * a read lock on the mmap_lock.
1071d0e9fe17SLinus Torvalds  */
1072d0e9fe17SLinus Torvalds static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b)
1073d0e9fe17SLinus Torvalds {
1074d0e9fe17SLinus Torvalds 	if (anon_vma_compatible(a, b)) {
10754db0c3c2SJason Low 		struct anon_vma *anon_vma = READ_ONCE(old->anon_vma);
1076d0e9fe17SLinus Torvalds 
1077d0e9fe17SLinus Torvalds 		if (anon_vma && list_is_singular(&old->anon_vma_chain))
1078d0e9fe17SLinus Torvalds 			return anon_vma;
1079d0e9fe17SLinus Torvalds 	}
1080d0e9fe17SLinus Torvalds 	return NULL;
1081d0e9fe17SLinus Torvalds }
1082d0e9fe17SLinus Torvalds 
1083d0e9fe17SLinus Torvalds /*
10841da177e4SLinus Torvalds  * find_mergeable_anon_vma is used by anon_vma_prepare, to check
10851da177e4SLinus Torvalds  * neighbouring vmas for a suitable anon_vma, before it goes off
10861da177e4SLinus Torvalds  * to allocate a new anon_vma.  It checks because a repetitive
10871da177e4SLinus Torvalds  * sequence of mprotects and faults may otherwise lead to distinct
10881da177e4SLinus Torvalds  * anon_vmas being allocated, preventing vma merge in subsequent
10891da177e4SLinus Torvalds  * mprotect.
10901da177e4SLinus Torvalds  */
10911da177e4SLinus Torvalds struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
10921da177e4SLinus Torvalds {
1093763ecb03SLiam R. Howlett 	MA_STATE(mas, &vma->vm_mm->mm_mt, vma->vm_end, vma->vm_end);
1094a67c8caaSMiaohe Lin 	struct anon_vma *anon_vma = NULL;
1095763ecb03SLiam R. Howlett 	struct vm_area_struct *prev, *next;
10961da177e4SLinus Torvalds 
1097a67c8caaSMiaohe Lin 	/* Try next first. */
1098763ecb03SLiam R. Howlett 	next = mas_walk(&mas);
1099763ecb03SLiam R. Howlett 	if (next) {
1100763ecb03SLiam R. Howlett 		anon_vma = reusable_anon_vma(next, vma, next);
1101d0e9fe17SLinus Torvalds 		if (anon_vma)
1102d0e9fe17SLinus Torvalds 			return anon_vma;
1103a67c8caaSMiaohe Lin 	}
11041da177e4SLinus Torvalds 
1105763ecb03SLiam R. Howlett 	prev = mas_prev(&mas, 0);
1106763ecb03SLiam R. Howlett 	VM_BUG_ON_VMA(prev != vma, vma);
1107763ecb03SLiam R. Howlett 	prev = mas_prev(&mas, 0);
1108a67c8caaSMiaohe Lin 	/* Try prev next. */
1109763ecb03SLiam R. Howlett 	if (prev)
1110763ecb03SLiam R. Howlett 		anon_vma = reusable_anon_vma(prev, prev, vma);
1111a67c8caaSMiaohe Lin 
11121da177e4SLinus Torvalds 	/*
1113a67c8caaSMiaohe Lin 	 * We might reach here with anon_vma == NULL if we can't find
1114a67c8caaSMiaohe Lin 	 * any reusable anon_vma.
11151da177e4SLinus Torvalds 	 * There's no absolute need to look only at touching neighbours:
11161da177e4SLinus Torvalds 	 * we could search further afield for "compatible" anon_vmas.
11171da177e4SLinus Torvalds 	 * But it would probably just be a waste of time searching,
11181da177e4SLinus Torvalds 	 * or lead to too many vmas hanging off the same anon_vma.
11191da177e4SLinus Torvalds 	 * We're trying to allow mprotect remerging later on,
11201da177e4SLinus Torvalds 	 * not trying to minimize memory used for anon_vmas.
11211da177e4SLinus Torvalds 	 */
1122a67c8caaSMiaohe Lin 	return anon_vma;
11231da177e4SLinus Torvalds }
11241da177e4SLinus Torvalds 
11251da177e4SLinus Torvalds /*
112640401530SAl Viro  * If a hint addr is less than mmap_min_addr change hint to be as
112740401530SAl Viro  * low as possible but still greater than mmap_min_addr
112840401530SAl Viro  */
112940401530SAl Viro static inline unsigned long round_hint_to_min(unsigned long hint)
113040401530SAl Viro {
113140401530SAl Viro 	hint &= PAGE_MASK;
113240401530SAl Viro 	if (((void *)hint != NULL) &&
113340401530SAl Viro 	    (hint < mmap_min_addr))
113440401530SAl Viro 		return PAGE_ALIGN(mmap_min_addr);
113540401530SAl Viro 	return hint;
113640401530SAl Viro }
113740401530SAl Viro 
1138b0cc5e89SAndrew Morton bool mlock_future_ok(struct mm_struct *mm, unsigned long flags,
11393c54a298SLorenzo Stoakes 			unsigned long bytes)
1140363ee17fSDavidlohr Bueso {
11413c54a298SLorenzo Stoakes 	unsigned long locked_pages, limit_pages;
1142363ee17fSDavidlohr Bueso 
11433c54a298SLorenzo Stoakes 	if (!(flags & VM_LOCKED) || capable(CAP_IPC_LOCK))
11443c54a298SLorenzo Stoakes 		return true;
11453c54a298SLorenzo Stoakes 
11463c54a298SLorenzo Stoakes 	locked_pages = bytes >> PAGE_SHIFT;
11473c54a298SLorenzo Stoakes 	locked_pages += mm->locked_vm;
11483c54a298SLorenzo Stoakes 
11493c54a298SLorenzo Stoakes 	limit_pages = rlimit(RLIMIT_MEMLOCK);
11503c54a298SLorenzo Stoakes 	limit_pages >>= PAGE_SHIFT;
11513c54a298SLorenzo Stoakes 
11523c54a298SLorenzo Stoakes 	return locked_pages <= limit_pages;
1153363ee17fSDavidlohr Bueso }
1154363ee17fSDavidlohr Bueso 
1155be83bbf8SLinus Torvalds static inline u64 file_mmap_size_max(struct file *file, struct inode *inode)
1156be83bbf8SLinus Torvalds {
1157be83bbf8SLinus Torvalds 	if (S_ISREG(inode->i_mode))
1158423913adSLinus Torvalds 		return MAX_LFS_FILESIZE;
1159be83bbf8SLinus Torvalds 
1160be83bbf8SLinus Torvalds 	if (S_ISBLK(inode->i_mode))
1161be83bbf8SLinus Torvalds 		return MAX_LFS_FILESIZE;
1162be83bbf8SLinus Torvalds 
116376f34950SIvan Khoronzhuk 	if (S_ISSOCK(inode->i_mode))
116476f34950SIvan Khoronzhuk 		return MAX_LFS_FILESIZE;
116576f34950SIvan Khoronzhuk 
1166be83bbf8SLinus Torvalds 	/* Special "we do even unsigned file positions" case */
1167be83bbf8SLinus Torvalds 	if (file->f_mode & FMODE_UNSIGNED_OFFSET)
1168be83bbf8SLinus Torvalds 		return 0;
1169be83bbf8SLinus Torvalds 
1170be83bbf8SLinus Torvalds 	/* Yes, random drivers might want more. But I'm tired of buggy drivers */
1171be83bbf8SLinus Torvalds 	return ULONG_MAX;
1172be83bbf8SLinus Torvalds }
1173be83bbf8SLinus Torvalds 
1174be83bbf8SLinus Torvalds static inline bool file_mmap_ok(struct file *file, struct inode *inode,
1175be83bbf8SLinus Torvalds 				unsigned long pgoff, unsigned long len)
1176be83bbf8SLinus Torvalds {
1177be83bbf8SLinus Torvalds 	u64 maxsize = file_mmap_size_max(file, inode);
1178be83bbf8SLinus Torvalds 
1179be83bbf8SLinus Torvalds 	if (maxsize && len > maxsize)
1180be83bbf8SLinus Torvalds 		return false;
1181be83bbf8SLinus Torvalds 	maxsize -= len;
1182be83bbf8SLinus Torvalds 	if (pgoff > maxsize >> PAGE_SHIFT)
1183be83bbf8SLinus Torvalds 		return false;
1184be83bbf8SLinus Torvalds 	return true;
1185be83bbf8SLinus Torvalds }
1186be83bbf8SLinus Torvalds 
118740401530SAl Viro /*
11883e4e28c5SMichel Lespinasse  * The caller must write-lock current->mm->mmap_lock.
11891da177e4SLinus Torvalds  */
11901fcfd8dbSOleg Nesterov unsigned long do_mmap(struct file *file, unsigned long addr,
11911da177e4SLinus Torvalds 			unsigned long len, unsigned long prot,
1192*592b5fadSYu-cheng Yu 			unsigned long flags, vm_flags_t vm_flags,
1193*592b5fadSYu-cheng Yu 			unsigned long pgoff, unsigned long *populate,
1194*592b5fadSYu-cheng Yu 			struct list_head *uf)
11951da177e4SLinus Torvalds {
11961da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
119762b5f7d0SDave Hansen 	int pkey = 0;
11981da177e4SLinus Torvalds 
1199524e00b3SLiam R. Howlett 	validate_mm(mm);
120041badc15SMichel Lespinasse 	*populate = 0;
1201bebeb3d6SMichel Lespinasse 
1202e37609bbSPiotr Kwapulinski 	if (!len)
1203e37609bbSPiotr Kwapulinski 		return -EINVAL;
1204e37609bbSPiotr Kwapulinski 
12051da177e4SLinus Torvalds 	/*
12061da177e4SLinus Torvalds 	 * Does the application expect PROT_READ to imply PROT_EXEC?
12071da177e4SLinus Torvalds 	 *
12081da177e4SLinus Torvalds 	 * (the exception is when the underlying filesystem is noexec
12091da177e4SLinus Torvalds 	 *  mounted, in which case we dont add PROT_EXEC.)
12101da177e4SLinus Torvalds 	 */
12111da177e4SLinus Torvalds 	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
121290f8572bSEric W. Biederman 		if (!(file && path_noexec(&file->f_path)))
12131da177e4SLinus Torvalds 			prot |= PROT_EXEC;
12141da177e4SLinus Torvalds 
1215a4ff8e86SMichal Hocko 	/* force arch specific MAP_FIXED handling in get_unmapped_area */
1216a4ff8e86SMichal Hocko 	if (flags & MAP_FIXED_NOREPLACE)
1217a4ff8e86SMichal Hocko 		flags |= MAP_FIXED;
1218a4ff8e86SMichal Hocko 
12197cd94146SEric Paris 	if (!(flags & MAP_FIXED))
12207cd94146SEric Paris 		addr = round_hint_to_min(addr);
12217cd94146SEric Paris 
12221da177e4SLinus Torvalds 	/* Careful about overflows.. */
12231da177e4SLinus Torvalds 	len = PAGE_ALIGN(len);
12249206de95SAl Viro 	if (!len)
12251da177e4SLinus Torvalds 		return -ENOMEM;
12261da177e4SLinus Torvalds 
12271da177e4SLinus Torvalds 	/* offset overflow? */
12281da177e4SLinus Torvalds 	if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
12291da177e4SLinus Torvalds 		return -EOVERFLOW;
12301da177e4SLinus Torvalds 
12311da177e4SLinus Torvalds 	/* Too many mappings? */
12321da177e4SLinus Torvalds 	if (mm->map_count > sysctl_max_map_count)
12331da177e4SLinus Torvalds 		return -ENOMEM;
12341da177e4SLinus Torvalds 
12351da177e4SLinus Torvalds 	/* Obtain the address to map to. we verify (or select) it and ensure
12361da177e4SLinus Torvalds 	 * that it represents a valid section of the address space.
12371da177e4SLinus Torvalds 	 */
12381da177e4SLinus Torvalds 	addr = get_unmapped_area(file, addr, len, pgoff, flags);
1239ff68dac6SGaowei Pu 	if (IS_ERR_VALUE(addr))
12401da177e4SLinus Torvalds 		return addr;
12411da177e4SLinus Torvalds 
1242a4ff8e86SMichal Hocko 	if (flags & MAP_FIXED_NOREPLACE) {
124335e43c5fSLiam Howlett 		if (find_vma_intersection(mm, addr, addr + len))
1244a4ff8e86SMichal Hocko 			return -EEXIST;
1245a4ff8e86SMichal Hocko 	}
1246a4ff8e86SMichal Hocko 
124762b5f7d0SDave Hansen 	if (prot == PROT_EXEC) {
124862b5f7d0SDave Hansen 		pkey = execute_only_pkey(mm);
124962b5f7d0SDave Hansen 		if (pkey < 0)
125062b5f7d0SDave Hansen 			pkey = 0;
125162b5f7d0SDave Hansen 	}
125262b5f7d0SDave Hansen 
12531da177e4SLinus Torvalds 	/* Do simple checking here so the lower-level routines won't have
12541da177e4SLinus Torvalds 	 * to. we assume access permissions have been handled by the open
12551da177e4SLinus Torvalds 	 * of the memory object, so we don't do any here.
12561da177e4SLinus Torvalds 	 */
1257*592b5fadSYu-cheng Yu 	vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) |
12581da177e4SLinus Torvalds 			mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
12591da177e4SLinus Torvalds 
1260cdf7b341SHuang Shijie 	if (flags & MAP_LOCKED)
12611da177e4SLinus Torvalds 		if (!can_do_mlock())
12621da177e4SLinus Torvalds 			return -EPERM;
1263ba470de4SRik van Riel 
1264b0cc5e89SAndrew Morton 	if (!mlock_future_ok(mm, vm_flags, len))
12651da177e4SLinus Torvalds 		return -EAGAIN;
12661da177e4SLinus Torvalds 
12671da177e4SLinus Torvalds 	if (file) {
1268077bf22bSOleg Nesterov 		struct inode *inode = file_inode(file);
12691c972597SDan Williams 		unsigned long flags_mask;
12701c972597SDan Williams 
1271be83bbf8SLinus Torvalds 		if (!file_mmap_ok(file, inode, pgoff, len))
1272be83bbf8SLinus Torvalds 			return -EOVERFLOW;
1273be83bbf8SLinus Torvalds 
12741c972597SDan Williams 		flags_mask = LEGACY_MAP_MASK | file->f_op->mmap_supported_flags;
1275077bf22bSOleg Nesterov 
12761da177e4SLinus Torvalds 		switch (flags & MAP_TYPE) {
12771da177e4SLinus Torvalds 		case MAP_SHARED:
12781c972597SDan Williams 			/*
12791c972597SDan Williams 			 * Force use of MAP_SHARED_VALIDATE with non-legacy
12801c972597SDan Williams 			 * flags. E.g. MAP_SYNC is dangerous to use with
12811c972597SDan Williams 			 * MAP_SHARED as you don't know which consistency model
12821c972597SDan Williams 			 * you will get. We silently ignore unsupported flags
12831c972597SDan Williams 			 * with MAP_SHARED to preserve backward compatibility.
12841c972597SDan Williams 			 */
12851c972597SDan Williams 			flags &= LEGACY_MAP_MASK;
1286e4a9bc58SJoe Perches 			fallthrough;
12871c972597SDan Williams 		case MAP_SHARED_VALIDATE:
12881c972597SDan Williams 			if (flags & ~flags_mask)
12891c972597SDan Williams 				return -EOPNOTSUPP;
1290dc617f29SDarrick J. Wong 			if (prot & PROT_WRITE) {
1291dc617f29SDarrick J. Wong 				if (!(file->f_mode & FMODE_WRITE))
12921da177e4SLinus Torvalds 					return -EACCES;
1293dc617f29SDarrick J. Wong 				if (IS_SWAPFILE(file->f_mapping->host))
1294dc617f29SDarrick J. Wong 					return -ETXTBSY;
1295dc617f29SDarrick J. Wong 			}
12961da177e4SLinus Torvalds 
12971da177e4SLinus Torvalds 			/*
12981da177e4SLinus Torvalds 			 * Make sure we don't allow writing to an append-only
12991da177e4SLinus Torvalds 			 * file..
13001da177e4SLinus Torvalds 			 */
13011da177e4SLinus Torvalds 			if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
13021da177e4SLinus Torvalds 				return -EACCES;
13031da177e4SLinus Torvalds 
13041da177e4SLinus Torvalds 			vm_flags |= VM_SHARED | VM_MAYSHARE;
13051da177e4SLinus Torvalds 			if (!(file->f_mode & FMODE_WRITE))
13061da177e4SLinus Torvalds 				vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
1307e4a9bc58SJoe Perches 			fallthrough;
13081da177e4SLinus Torvalds 		case MAP_PRIVATE:
13091da177e4SLinus Torvalds 			if (!(file->f_mode & FMODE_READ))
13101da177e4SLinus Torvalds 				return -EACCES;
131190f8572bSEric W. Biederman 			if (path_noexec(&file->f_path)) {
131280c5606cSLinus Torvalds 				if (vm_flags & VM_EXEC)
131380c5606cSLinus Torvalds 					return -EPERM;
131480c5606cSLinus Torvalds 				vm_flags &= ~VM_MAYEXEC;
131580c5606cSLinus Torvalds 			}
131680c5606cSLinus Torvalds 
131772c2d531SAl Viro 			if (!file->f_op->mmap)
131880c5606cSLinus Torvalds 				return -ENODEV;
1319b2c56e4fSOleg Nesterov 			if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1320b2c56e4fSOleg Nesterov 				return -EINVAL;
13211da177e4SLinus Torvalds 			break;
13221da177e4SLinus Torvalds 
13231da177e4SLinus Torvalds 		default:
13241da177e4SLinus Torvalds 			return -EINVAL;
13251da177e4SLinus Torvalds 		}
13261da177e4SLinus Torvalds 	} else {
13271da177e4SLinus Torvalds 		switch (flags & MAP_TYPE) {
13281da177e4SLinus Torvalds 		case MAP_SHARED:
1329b2c56e4fSOleg Nesterov 			if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1330b2c56e4fSOleg Nesterov 				return -EINVAL;
1331ce363942STejun Heo 			/*
1332ce363942STejun Heo 			 * Ignore pgoff.
1333ce363942STejun Heo 			 */
1334ce363942STejun Heo 			pgoff = 0;
13351da177e4SLinus Torvalds 			vm_flags |= VM_SHARED | VM_MAYSHARE;
13361da177e4SLinus Torvalds 			break;
13371da177e4SLinus Torvalds 		case MAP_PRIVATE:
13381da177e4SLinus Torvalds 			/*
13391da177e4SLinus Torvalds 			 * Set pgoff according to addr for anon_vma.
13401da177e4SLinus Torvalds 			 */
13411da177e4SLinus Torvalds 			pgoff = addr >> PAGE_SHIFT;
13421da177e4SLinus Torvalds 			break;
13431da177e4SLinus Torvalds 		default:
13441da177e4SLinus Torvalds 			return -EINVAL;
13451da177e4SLinus Torvalds 		}
13461da177e4SLinus Torvalds 	}
13471da177e4SLinus Torvalds 
1348c22c0d63SMichel Lespinasse 	/*
1349c22c0d63SMichel Lespinasse 	 * Set 'VM_NORESERVE' if we should not account for the
1350c22c0d63SMichel Lespinasse 	 * memory use of this mapping.
1351c22c0d63SMichel Lespinasse 	 */
1352c22c0d63SMichel Lespinasse 	if (flags & MAP_NORESERVE) {
1353c22c0d63SMichel Lespinasse 		/* We honor MAP_NORESERVE if allowed to overcommit */
1354c22c0d63SMichel Lespinasse 		if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
1355c22c0d63SMichel Lespinasse 			vm_flags |= VM_NORESERVE;
1356c22c0d63SMichel Lespinasse 
1357c22c0d63SMichel Lespinasse 		/* hugetlb applies strict overcommit unless MAP_NORESERVE */
1358c22c0d63SMichel Lespinasse 		if (file && is_file_hugepages(file))
1359c22c0d63SMichel Lespinasse 			vm_flags |= VM_NORESERVE;
1360c22c0d63SMichel Lespinasse 	}
1361c22c0d63SMichel Lespinasse 
1362897ab3e0SMike Rapoport 	addr = mmap_region(file, addr, len, vm_flags, pgoff, uf);
136309a9f1d2SMichel Lespinasse 	if (!IS_ERR_VALUE(addr) &&
136409a9f1d2SMichel Lespinasse 	    ((vm_flags & VM_LOCKED) ||
136509a9f1d2SMichel Lespinasse 	     (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE))
136641badc15SMichel Lespinasse 		*populate = len;
1367bebeb3d6SMichel Lespinasse 	return addr;
13680165ab44SMiklos Szeredi }
13696be5ceb0SLinus Torvalds 
1370a90f590aSDominik Brodowski unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
1371a90f590aSDominik Brodowski 			      unsigned long prot, unsigned long flags,
1372a90f590aSDominik Brodowski 			      unsigned long fd, unsigned long pgoff)
137366f0dc48SHugh Dickins {
137466f0dc48SHugh Dickins 	struct file *file = NULL;
13751e3ee14bSChen Gang 	unsigned long retval;
137666f0dc48SHugh Dickins 
137766f0dc48SHugh Dickins 	if (!(flags & MAP_ANONYMOUS)) {
1378120a795dSAl Viro 		audit_mmap_fd(fd, flags);
137966f0dc48SHugh Dickins 		file = fget(fd);
138066f0dc48SHugh Dickins 		if (!file)
13811e3ee14bSChen Gang 			return -EBADF;
13827bba8f0eSZhen Lei 		if (is_file_hugepages(file)) {
1383af73e4d9SNaoya Horiguchi 			len = ALIGN(len, huge_page_size(hstate_file(file)));
13847bba8f0eSZhen Lei 		} else if (unlikely(flags & MAP_HUGETLB)) {
1385493af578SJörn Engel 			retval = -EINVAL;
1386493af578SJörn Engel 			goto out_fput;
13877bba8f0eSZhen Lei 		}
138866f0dc48SHugh Dickins 	} else if (flags & MAP_HUGETLB) {
1389c103a4dcSAndrew Morton 		struct hstate *hs;
1390af73e4d9SNaoya Horiguchi 
139120ac2893SAnshuman Khandual 		hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
1392091d0d55SLi Zefan 		if (!hs)
1393091d0d55SLi Zefan 			return -EINVAL;
1394091d0d55SLi Zefan 
1395091d0d55SLi Zefan 		len = ALIGN(len, huge_page_size(hs));
139666f0dc48SHugh Dickins 		/*
139766f0dc48SHugh Dickins 		 * VM_NORESERVE is used because the reservations will be
139866f0dc48SHugh Dickins 		 * taken when vm_ops->mmap() is called
139966f0dc48SHugh Dickins 		 */
1400af73e4d9SNaoya Horiguchi 		file = hugetlb_file_setup(HUGETLB_ANON_FILE, len,
140142d7395fSAndi Kleen 				VM_NORESERVE,
140283c1fd76Szhangyiru 				HUGETLB_ANONHUGE_INODE,
140342d7395fSAndi Kleen 				(flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
140466f0dc48SHugh Dickins 		if (IS_ERR(file))
140566f0dc48SHugh Dickins 			return PTR_ERR(file);
140666f0dc48SHugh Dickins 	}
140766f0dc48SHugh Dickins 
14089fbeb5abSMichal Hocko 	retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1409493af578SJörn Engel out_fput:
141066f0dc48SHugh Dickins 	if (file)
141166f0dc48SHugh Dickins 		fput(file);
141266f0dc48SHugh Dickins 	return retval;
141366f0dc48SHugh Dickins }
141466f0dc48SHugh Dickins 
1415a90f590aSDominik Brodowski SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1416a90f590aSDominik Brodowski 		unsigned long, prot, unsigned long, flags,
1417a90f590aSDominik Brodowski 		unsigned long, fd, unsigned long, pgoff)
1418a90f590aSDominik Brodowski {
1419a90f590aSDominik Brodowski 	return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
1420a90f590aSDominik Brodowski }
1421a90f590aSDominik Brodowski 
1422a4679373SChristoph Hellwig #ifdef __ARCH_WANT_SYS_OLD_MMAP
1423a4679373SChristoph Hellwig struct mmap_arg_struct {
1424a4679373SChristoph Hellwig 	unsigned long addr;
1425a4679373SChristoph Hellwig 	unsigned long len;
1426a4679373SChristoph Hellwig 	unsigned long prot;
1427a4679373SChristoph Hellwig 	unsigned long flags;
1428a4679373SChristoph Hellwig 	unsigned long fd;
1429a4679373SChristoph Hellwig 	unsigned long offset;
1430a4679373SChristoph Hellwig };
1431a4679373SChristoph Hellwig 
1432a4679373SChristoph Hellwig SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1433a4679373SChristoph Hellwig {
1434a4679373SChristoph Hellwig 	struct mmap_arg_struct a;
1435a4679373SChristoph Hellwig 
1436a4679373SChristoph Hellwig 	if (copy_from_user(&a, arg, sizeof(a)))
1437a4679373SChristoph Hellwig 		return -EFAULT;
1438de1741a1SAlexander Kuleshov 	if (offset_in_page(a.offset))
1439a4679373SChristoph Hellwig 		return -EINVAL;
1440a4679373SChristoph Hellwig 
1441a90f590aSDominik Brodowski 	return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1442a4679373SChristoph Hellwig 			       a.offset >> PAGE_SHIFT);
1443a4679373SChristoph Hellwig }
1444a4679373SChristoph Hellwig #endif /* __ARCH_WANT_SYS_OLD_MMAP */
1445a4679373SChristoph Hellwig 
144654cbbbf3SLorenzo Stoakes static bool vm_ops_needs_writenotify(const struct vm_operations_struct *vm_ops)
144754cbbbf3SLorenzo Stoakes {
144854cbbbf3SLorenzo Stoakes 	return vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite);
144954cbbbf3SLorenzo Stoakes }
145054cbbbf3SLorenzo Stoakes 
145154cbbbf3SLorenzo Stoakes static bool vma_is_shared_writable(struct vm_area_struct *vma)
145254cbbbf3SLorenzo Stoakes {
145354cbbbf3SLorenzo Stoakes 	return (vma->vm_flags & (VM_WRITE | VM_SHARED)) ==
145454cbbbf3SLorenzo Stoakes 		(VM_WRITE | VM_SHARED);
145554cbbbf3SLorenzo Stoakes }
145654cbbbf3SLorenzo Stoakes 
145754cbbbf3SLorenzo Stoakes static bool vma_fs_can_writeback(struct vm_area_struct *vma)
145854cbbbf3SLorenzo Stoakes {
145954cbbbf3SLorenzo Stoakes 	/* No managed pages to writeback. */
146054cbbbf3SLorenzo Stoakes 	if (vma->vm_flags & VM_PFNMAP)
146154cbbbf3SLorenzo Stoakes 		return false;
146254cbbbf3SLorenzo Stoakes 
146354cbbbf3SLorenzo Stoakes 	return vma->vm_file && vma->vm_file->f_mapping &&
146454cbbbf3SLorenzo Stoakes 		mapping_can_writeback(vma->vm_file->f_mapping);
146554cbbbf3SLorenzo Stoakes }
146654cbbbf3SLorenzo Stoakes 
146754cbbbf3SLorenzo Stoakes /*
146854cbbbf3SLorenzo Stoakes  * Does this VMA require the underlying folios to have their dirty state
146954cbbbf3SLorenzo Stoakes  * tracked?
147054cbbbf3SLorenzo Stoakes  */
147154cbbbf3SLorenzo Stoakes bool vma_needs_dirty_tracking(struct vm_area_struct *vma)
147254cbbbf3SLorenzo Stoakes {
147354cbbbf3SLorenzo Stoakes 	/* Only shared, writable VMAs require dirty tracking. */
147454cbbbf3SLorenzo Stoakes 	if (!vma_is_shared_writable(vma))
147554cbbbf3SLorenzo Stoakes 		return false;
147654cbbbf3SLorenzo Stoakes 
147754cbbbf3SLorenzo Stoakes 	/* Does the filesystem need to be notified? */
147854cbbbf3SLorenzo Stoakes 	if (vm_ops_needs_writenotify(vma->vm_ops))
147954cbbbf3SLorenzo Stoakes 		return true;
148054cbbbf3SLorenzo Stoakes 
148154cbbbf3SLorenzo Stoakes 	/*
148254cbbbf3SLorenzo Stoakes 	 * Even if the filesystem doesn't indicate a need for writenotify, if it
148354cbbbf3SLorenzo Stoakes 	 * can writeback, dirty tracking is still required.
148454cbbbf3SLorenzo Stoakes 	 */
148554cbbbf3SLorenzo Stoakes 	return vma_fs_can_writeback(vma);
148654cbbbf3SLorenzo Stoakes }
148754cbbbf3SLorenzo Stoakes 
14884e950f6fSAlexey Dobriyan /*
14898bb4e7a2SWei Yang  * Some shared mappings will want the pages marked read-only
14904e950f6fSAlexey Dobriyan  * to track write events. If so, we'll downgrade vm_page_prot
14914e950f6fSAlexey Dobriyan  * to the private version (using protection_map[] without the
14924e950f6fSAlexey Dobriyan  * VM_SHARED bit).
14934e950f6fSAlexey Dobriyan  */
14946d2329f8SAndrea Arcangeli int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
14954e950f6fSAlexey Dobriyan {
14964e950f6fSAlexey Dobriyan 	/* If it was private or non-writable, the write bit is already clear */
149754cbbbf3SLorenzo Stoakes 	if (!vma_is_shared_writable(vma))
14984e950f6fSAlexey Dobriyan 		return 0;
14994e950f6fSAlexey Dobriyan 
15004e950f6fSAlexey Dobriyan 	/* The backer wishes to know when pages are first written to? */
150154cbbbf3SLorenzo Stoakes 	if (vm_ops_needs_writenotify(vma->vm_ops))
15024e950f6fSAlexey Dobriyan 		return 1;
15034e950f6fSAlexey Dobriyan 
150464e45507SPeter Feiner 	/* The open routine did something to the protections that pgprot_modify
150564e45507SPeter Feiner 	 * won't preserve? */
15066d2329f8SAndrea Arcangeli 	if (pgprot_val(vm_page_prot) !=
150754cbbbf3SLorenzo Stoakes 	    pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags)))
15084e950f6fSAlexey Dobriyan 		return 0;
15094e950f6fSAlexey Dobriyan 
1510f96f7a40SDavid Hildenbrand 	/*
1511f96f7a40SDavid Hildenbrand 	 * Do we need to track softdirty? hugetlb does not support softdirty
1512f96f7a40SDavid Hildenbrand 	 * tracking yet.
1513f96f7a40SDavid Hildenbrand 	 */
1514f96f7a40SDavid Hildenbrand 	if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma))
151564e45507SPeter Feiner 		return 1;
151664e45507SPeter Feiner 
151751d3d5ebSDavid Hildenbrand 	/* Do we need write faults for uffd-wp tracking? */
151851d3d5ebSDavid Hildenbrand 	if (userfaultfd_wp(vma))
151951d3d5ebSDavid Hildenbrand 		return 1;
152051d3d5ebSDavid Hildenbrand 
15214e950f6fSAlexey Dobriyan 	/* Can the mapping track the dirty pages? */
152254cbbbf3SLorenzo Stoakes 	return vma_fs_can_writeback(vma);
15234e950f6fSAlexey Dobriyan }
15244e950f6fSAlexey Dobriyan 
1525fc8744adSLinus Torvalds /*
1526fc8744adSLinus Torvalds  * We account for memory if it's a private writeable mapping,
15275a6fe125SMel Gorman  * not hugepages and VM_NORESERVE wasn't set.
1528fc8744adSLinus Torvalds  */
1529ca16d140SKOSAKI Motohiro static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
1530fc8744adSLinus Torvalds {
15315a6fe125SMel Gorman 	/*
15325a6fe125SMel Gorman 	 * hugetlb has its own accounting separate from the core VM
15335a6fe125SMel Gorman 	 * VM_HUGETLB may not be set yet so we cannot check for that flag.
15345a6fe125SMel Gorman 	 */
15355a6fe125SMel Gorman 	if (file && is_file_hugepages(file))
15365a6fe125SMel Gorman 		return 0;
15375a6fe125SMel Gorman 
1538fc8744adSLinus Torvalds 	return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
1539fc8744adSLinus Torvalds }
1540fc8744adSLinus Torvalds 
15413499a131SLiam R. Howlett /**
15423499a131SLiam R. Howlett  * unmapped_area() - Find an area between the low_limit and the high_limit with
15433499a131SLiam R. Howlett  * the correct alignment and offset, all from @info. Note: current->mm is used
15443499a131SLiam R. Howlett  * for the search.
15453499a131SLiam R. Howlett  *
154682b24936SVernon Yang  * @info: The unmapped area information including the range [low_limit -
154782b24936SVernon Yang  * high_limit), the alignment offset and mask.
15483499a131SLiam R. Howlett  *
15493499a131SLiam R. Howlett  * Return: A memory address or -ENOMEM.
15503499a131SLiam R. Howlett  */
1551baceaf1cSJaewon Kim static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
1552db4fbfb9SMichel Lespinasse {
15536b008640SLinus Torvalds 	unsigned long length, gap;
15546b008640SLinus Torvalds 	unsigned long low_limit, high_limit;
155558c5d0d6SLiam R. Howlett 	struct vm_area_struct *tmp;
1556db4fbfb9SMichel Lespinasse 
15573499a131SLiam R. Howlett 	MA_STATE(mas, &current->mm->mm_mt, 0, 0);
1558db4fbfb9SMichel Lespinasse 
1559db4fbfb9SMichel Lespinasse 	/* Adjust search length to account for worst case alignment overhead */
1560db4fbfb9SMichel Lespinasse 	length = info->length + info->align_mask;
1561db4fbfb9SMichel Lespinasse 	if (length < info->length)
1562db4fbfb9SMichel Lespinasse 		return -ENOMEM;
1563db4fbfb9SMichel Lespinasse 
156458c5d0d6SLiam R. Howlett 	low_limit = info->low_limit;
15656b008640SLinus Torvalds 	if (low_limit < mmap_min_addr)
15666b008640SLinus Torvalds 		low_limit = mmap_min_addr;
15676b008640SLinus Torvalds 	high_limit = info->high_limit;
156858c5d0d6SLiam R. Howlett retry:
15696b008640SLinus Torvalds 	if (mas_empty_area(&mas, low_limit, high_limit - 1, length))
15703499a131SLiam R. Howlett 		return -ENOMEM;
15713499a131SLiam R. Howlett 
1572d4af56c5SLiam R. Howlett 	gap = mas.index;
1573d4af56c5SLiam R. Howlett 	gap += (info->align_offset - gap) & info->align_mask;
157458c5d0d6SLiam R. Howlett 	tmp = mas_next(&mas, ULONG_MAX);
157558c5d0d6SLiam R. Howlett 	if (tmp && (tmp->vm_flags & VM_GROWSDOWN)) { /* Avoid prev check if possible */
157658c5d0d6SLiam R. Howlett 		if (vm_start_gap(tmp) < gap + length - 1) {
157758c5d0d6SLiam R. Howlett 			low_limit = tmp->vm_end;
157858c5d0d6SLiam R. Howlett 			mas_reset(&mas);
157958c5d0d6SLiam R. Howlett 			goto retry;
158058c5d0d6SLiam R. Howlett 		}
158158c5d0d6SLiam R. Howlett 	} else {
158258c5d0d6SLiam R. Howlett 		tmp = mas_prev(&mas, 0);
158358c5d0d6SLiam R. Howlett 		if (tmp && vm_end_gap(tmp) > gap) {
158458c5d0d6SLiam R. Howlett 			low_limit = vm_end_gap(tmp);
158558c5d0d6SLiam R. Howlett 			mas_reset(&mas);
158658c5d0d6SLiam R. Howlett 			goto retry;
158758c5d0d6SLiam R. Howlett 		}
158858c5d0d6SLiam R. Howlett 	}
158958c5d0d6SLiam R. Howlett 
15903499a131SLiam R. Howlett 	return gap;
1591db4fbfb9SMichel Lespinasse }
1592db4fbfb9SMichel Lespinasse 
15933499a131SLiam R. Howlett /**
15943499a131SLiam R. Howlett  * unmapped_area_topdown() - Find an area between the low_limit and the
159582b24936SVernon Yang  * high_limit with the correct alignment and offset at the highest available
15963499a131SLiam R. Howlett  * address, all from @info. Note: current->mm is used for the search.
15973499a131SLiam R. Howlett  *
159882b24936SVernon Yang  * @info: The unmapped area information including the range [low_limit -
159982b24936SVernon Yang  * high_limit), the alignment offset and mask.
16003499a131SLiam R. Howlett  *
16013499a131SLiam R. Howlett  * Return: A memory address or -ENOMEM.
16023499a131SLiam R. Howlett  */
1603baceaf1cSJaewon Kim static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
1604db4fbfb9SMichel Lespinasse {
16056b008640SLinus Torvalds 	unsigned long length, gap, gap_end;
16066b008640SLinus Torvalds 	unsigned long low_limit, high_limit;
160758c5d0d6SLiam R. Howlett 	struct vm_area_struct *tmp;
1608d4af56c5SLiam R. Howlett 
16093499a131SLiam R. Howlett 	MA_STATE(mas, &current->mm->mm_mt, 0, 0);
1610db4fbfb9SMichel Lespinasse 	/* Adjust search length to account for worst case alignment overhead */
1611db4fbfb9SMichel Lespinasse 	length = info->length + info->align_mask;
1612db4fbfb9SMichel Lespinasse 	if (length < info->length)
1613db4fbfb9SMichel Lespinasse 		return -ENOMEM;
1614db4fbfb9SMichel Lespinasse 
16156b008640SLinus Torvalds 	low_limit = info->low_limit;
16166b008640SLinus Torvalds 	if (low_limit < mmap_min_addr)
16176b008640SLinus Torvalds 		low_limit = mmap_min_addr;
161858c5d0d6SLiam R. Howlett 	high_limit = info->high_limit;
161958c5d0d6SLiam R. Howlett retry:
16206b008640SLinus Torvalds 	if (mas_empty_area_rev(&mas, low_limit, high_limit - 1, length))
16213499a131SLiam R. Howlett 		return -ENOMEM;
16223499a131SLiam R. Howlett 
1623d4af56c5SLiam R. Howlett 	gap = mas.last + 1 - info->length;
1624d4af56c5SLiam R. Howlett 	gap -= (gap - info->align_offset) & info->align_mask;
162558c5d0d6SLiam R. Howlett 	gap_end = mas.last;
162658c5d0d6SLiam R. Howlett 	tmp = mas_next(&mas, ULONG_MAX);
162758c5d0d6SLiam R. Howlett 	if (tmp && (tmp->vm_flags & VM_GROWSDOWN)) { /* Avoid prev check if possible */
162858c5d0d6SLiam R. Howlett 		if (vm_start_gap(tmp) <= gap_end) {
162958c5d0d6SLiam R. Howlett 			high_limit = vm_start_gap(tmp);
163058c5d0d6SLiam R. Howlett 			mas_reset(&mas);
163158c5d0d6SLiam R. Howlett 			goto retry;
163258c5d0d6SLiam R. Howlett 		}
163358c5d0d6SLiam R. Howlett 	} else {
163458c5d0d6SLiam R. Howlett 		tmp = mas_prev(&mas, 0);
163558c5d0d6SLiam R. Howlett 		if (tmp && vm_end_gap(tmp) > gap) {
163658c5d0d6SLiam R. Howlett 			high_limit = tmp->vm_start;
163758c5d0d6SLiam R. Howlett 			mas_reset(&mas);
163858c5d0d6SLiam R. Howlett 			goto retry;
163958c5d0d6SLiam R. Howlett 		}
164058c5d0d6SLiam R. Howlett 	}
164158c5d0d6SLiam R. Howlett 
16423499a131SLiam R. Howlett 	return gap;
1643db4fbfb9SMichel Lespinasse }
1644db4fbfb9SMichel Lespinasse 
1645baceaf1cSJaewon Kim /*
1646baceaf1cSJaewon Kim  * Search for an unmapped address range.
1647baceaf1cSJaewon Kim  *
1648baceaf1cSJaewon Kim  * We are looking for a range that:
1649baceaf1cSJaewon Kim  * - does not intersect with any VMA;
1650baceaf1cSJaewon Kim  * - is contained within the [low_limit, high_limit) interval;
1651baceaf1cSJaewon Kim  * - is at least the desired size.
1652baceaf1cSJaewon Kim  * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
1653baceaf1cSJaewon Kim  */
1654baceaf1cSJaewon Kim unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info)
1655baceaf1cSJaewon Kim {
1656df529cabSJaewon Kim 	unsigned long addr;
1657df529cabSJaewon Kim 
1658baceaf1cSJaewon Kim 	if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
1659df529cabSJaewon Kim 		addr = unmapped_area_topdown(info);
1660baceaf1cSJaewon Kim 	else
1661df529cabSJaewon Kim 		addr = unmapped_area(info);
1662df529cabSJaewon Kim 
1663df529cabSJaewon Kim 	trace_vm_unmapped_area(addr, info);
1664df529cabSJaewon Kim 	return addr;
1665baceaf1cSJaewon Kim }
1666f6795053SSteve Capper 
16671da177e4SLinus Torvalds /* Get an address range which is currently unmapped.
16681da177e4SLinus Torvalds  * For shmat() with addr=0.
16691da177e4SLinus Torvalds  *
16701da177e4SLinus Torvalds  * Ugly calling convention alert:
16711da177e4SLinus Torvalds  * Return value with the low bits set means error value,
16721da177e4SLinus Torvalds  * ie
16731da177e4SLinus Torvalds  *	if (ret & ~PAGE_MASK)
16741da177e4SLinus Torvalds  *		error = ret;
16751da177e4SLinus Torvalds  *
16761da177e4SLinus Torvalds  * This function "knows" that -ENOMEM has the bits set.
16771da177e4SLinus Torvalds  */
16781da177e4SLinus Torvalds unsigned long
16794b439e25SChristophe Leroy generic_get_unmapped_area(struct file *filp, unsigned long addr,
16804b439e25SChristophe Leroy 			  unsigned long len, unsigned long pgoff,
16814b439e25SChristophe Leroy 			  unsigned long flags)
16821da177e4SLinus Torvalds {
16831da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
16841be7107fSHugh Dickins 	struct vm_area_struct *vma, *prev;
1685db4fbfb9SMichel Lespinasse 	struct vm_unmapped_area_info info;
16862cb4de08SChristophe Leroy 	const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
16871da177e4SLinus Torvalds 
1688f6795053SSteve Capper 	if (len > mmap_end - mmap_min_addr)
16891da177e4SLinus Torvalds 		return -ENOMEM;
16901da177e4SLinus Torvalds 
169106abdfb4SBenjamin Herrenschmidt 	if (flags & MAP_FIXED)
169206abdfb4SBenjamin Herrenschmidt 		return addr;
169306abdfb4SBenjamin Herrenschmidt 
16941da177e4SLinus Torvalds 	if (addr) {
16951da177e4SLinus Torvalds 		addr = PAGE_ALIGN(addr);
16961be7107fSHugh Dickins 		vma = find_vma_prev(mm, addr, &prev);
1697f6795053SSteve Capper 		if (mmap_end - len >= addr && addr >= mmap_min_addr &&
16981be7107fSHugh Dickins 		    (!vma || addr + len <= vm_start_gap(vma)) &&
16991be7107fSHugh Dickins 		    (!prev || addr >= vm_end_gap(prev)))
17001da177e4SLinus Torvalds 			return addr;
17011da177e4SLinus Torvalds 	}
17021da177e4SLinus Torvalds 
1703db4fbfb9SMichel Lespinasse 	info.flags = 0;
1704db4fbfb9SMichel Lespinasse 	info.length = len;
17054e99b021SHeiko Carstens 	info.low_limit = mm->mmap_base;
1706f6795053SSteve Capper 	info.high_limit = mmap_end;
1707db4fbfb9SMichel Lespinasse 	info.align_mask = 0;
170809ef5283SJaewon Kim 	info.align_offset = 0;
1709db4fbfb9SMichel Lespinasse 	return vm_unmapped_area(&info);
17101da177e4SLinus Torvalds }
17114b439e25SChristophe Leroy 
17124b439e25SChristophe Leroy #ifndef HAVE_ARCH_UNMAPPED_AREA
17134b439e25SChristophe Leroy unsigned long
17144b439e25SChristophe Leroy arch_get_unmapped_area(struct file *filp, unsigned long addr,
17154b439e25SChristophe Leroy 		       unsigned long len, unsigned long pgoff,
17164b439e25SChristophe Leroy 		       unsigned long flags)
17174b439e25SChristophe Leroy {
17184b439e25SChristophe Leroy 	return generic_get_unmapped_area(filp, addr, len, pgoff, flags);
17194b439e25SChristophe Leroy }
17201da177e4SLinus Torvalds #endif
17211da177e4SLinus Torvalds 
17221da177e4SLinus Torvalds /*
17231da177e4SLinus Torvalds  * This mmap-allocator allocates new areas top-down from below the
17241da177e4SLinus Torvalds  * stack's low limit (the base):
17251da177e4SLinus Torvalds  */
17261da177e4SLinus Torvalds unsigned long
17274b439e25SChristophe Leroy generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
172843cca0b1SYang Fan 				  unsigned long len, unsigned long pgoff,
172943cca0b1SYang Fan 				  unsigned long flags)
17301da177e4SLinus Torvalds {
17311be7107fSHugh Dickins 	struct vm_area_struct *vma, *prev;
17321da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
1733db4fbfb9SMichel Lespinasse 	struct vm_unmapped_area_info info;
17342cb4de08SChristophe Leroy 	const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
17351da177e4SLinus Torvalds 
17361da177e4SLinus Torvalds 	/* requested length too big for entire address space */
1737f6795053SSteve Capper 	if (len > mmap_end - mmap_min_addr)
17381da177e4SLinus Torvalds 		return -ENOMEM;
17391da177e4SLinus Torvalds 
174006abdfb4SBenjamin Herrenschmidt 	if (flags & MAP_FIXED)
174106abdfb4SBenjamin Herrenschmidt 		return addr;
174206abdfb4SBenjamin Herrenschmidt 
17431da177e4SLinus Torvalds 	/* requesting a specific address */
17441da177e4SLinus Torvalds 	if (addr) {
17451da177e4SLinus Torvalds 		addr = PAGE_ALIGN(addr);
17461be7107fSHugh Dickins 		vma = find_vma_prev(mm, addr, &prev);
1747f6795053SSteve Capper 		if (mmap_end - len >= addr && addr >= mmap_min_addr &&
17481be7107fSHugh Dickins 				(!vma || addr + len <= vm_start_gap(vma)) &&
17491be7107fSHugh Dickins 				(!prev || addr >= vm_end_gap(prev)))
17501da177e4SLinus Torvalds 			return addr;
17511da177e4SLinus Torvalds 	}
17521da177e4SLinus Torvalds 
1753db4fbfb9SMichel Lespinasse 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
1754db4fbfb9SMichel Lespinasse 	info.length = len;
17556b008640SLinus Torvalds 	info.low_limit = PAGE_SIZE;
1756f6795053SSteve Capper 	info.high_limit = arch_get_mmap_base(addr, mm->mmap_base);
1757db4fbfb9SMichel Lespinasse 	info.align_mask = 0;
175809ef5283SJaewon Kim 	info.align_offset = 0;
1759db4fbfb9SMichel Lespinasse 	addr = vm_unmapped_area(&info);
1760b716ad95SXiao Guangrong 
17611da177e4SLinus Torvalds 	/*
17621da177e4SLinus Torvalds 	 * A failed mmap() very likely causes application failure,
17631da177e4SLinus Torvalds 	 * so fall back to the bottom-up function here. This scenario
17641da177e4SLinus Torvalds 	 * can happen with large stack limits and large mmap()
17651da177e4SLinus Torvalds 	 * allocations.
17661da177e4SLinus Torvalds 	 */
1767de1741a1SAlexander Kuleshov 	if (offset_in_page(addr)) {
1768db4fbfb9SMichel Lespinasse 		VM_BUG_ON(addr != -ENOMEM);
1769db4fbfb9SMichel Lespinasse 		info.flags = 0;
1770db4fbfb9SMichel Lespinasse 		info.low_limit = TASK_UNMAPPED_BASE;
1771f6795053SSteve Capper 		info.high_limit = mmap_end;
1772db4fbfb9SMichel Lespinasse 		addr = vm_unmapped_area(&info);
1773db4fbfb9SMichel Lespinasse 	}
17741da177e4SLinus Torvalds 
17751da177e4SLinus Torvalds 	return addr;
17761da177e4SLinus Torvalds }
17774b439e25SChristophe Leroy 
17784b439e25SChristophe Leroy #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
17794b439e25SChristophe Leroy unsigned long
17804b439e25SChristophe Leroy arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
17814b439e25SChristophe Leroy 			       unsigned long len, unsigned long pgoff,
17824b439e25SChristophe Leroy 			       unsigned long flags)
17834b439e25SChristophe Leroy {
17844b439e25SChristophe Leroy 	return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
17854b439e25SChristophe Leroy }
17861da177e4SLinus Torvalds #endif
17871da177e4SLinus Torvalds 
17881da177e4SLinus Torvalds unsigned long
17891da177e4SLinus Torvalds get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
17901da177e4SLinus Torvalds 		unsigned long pgoff, unsigned long flags)
17911da177e4SLinus Torvalds {
179206abdfb4SBenjamin Herrenschmidt 	unsigned long (*get_area)(struct file *, unsigned long,
179306abdfb4SBenjamin Herrenschmidt 				  unsigned long, unsigned long, unsigned long);
179407ab67c8SLinus Torvalds 
17959206de95SAl Viro 	unsigned long error = arch_mmap_check(addr, len, flags);
17969206de95SAl Viro 	if (error)
17979206de95SAl Viro 		return error;
17989206de95SAl Viro 
17999206de95SAl Viro 	/* Careful about overflows.. */
18009206de95SAl Viro 	if (len > TASK_SIZE)
18019206de95SAl Viro 		return -ENOMEM;
18029206de95SAl Viro 
180307ab67c8SLinus Torvalds 	get_area = current->mm->get_unmapped_area;
1804c01d5b30SHugh Dickins 	if (file) {
1805c01d5b30SHugh Dickins 		if (file->f_op->get_unmapped_area)
180607ab67c8SLinus Torvalds 			get_area = file->f_op->get_unmapped_area;
1807c01d5b30SHugh Dickins 	} else if (flags & MAP_SHARED) {
1808c01d5b30SHugh Dickins 		/*
1809c01d5b30SHugh Dickins 		 * mmap_region() will call shmem_zero_setup() to create a file,
1810c01d5b30SHugh Dickins 		 * so use shmem's get_unmapped_area in case it can be huge.
181145e55300SPeter Collingbourne 		 * do_mmap() will clear pgoff, so match alignment.
1812c01d5b30SHugh Dickins 		 */
1813c01d5b30SHugh Dickins 		pgoff = 0;
1814c01d5b30SHugh Dickins 		get_area = shmem_get_unmapped_area;
1815c01d5b30SHugh Dickins 	}
1816c01d5b30SHugh Dickins 
181707ab67c8SLinus Torvalds 	addr = get_area(file, addr, len, pgoff, flags);
181807ab67c8SLinus Torvalds 	if (IS_ERR_VALUE(addr))
181907ab67c8SLinus Torvalds 		return addr;
182007ab67c8SLinus Torvalds 
18211da177e4SLinus Torvalds 	if (addr > TASK_SIZE - len)
18221da177e4SLinus Torvalds 		return -ENOMEM;
1823de1741a1SAlexander Kuleshov 	if (offset_in_page(addr))
18241da177e4SLinus Torvalds 		return -EINVAL;
182506abdfb4SBenjamin Herrenschmidt 
18269ac4ed4bSAl Viro 	error = security_mmap_addr(addr);
18279ac4ed4bSAl Viro 	return error ? error : addr;
18281da177e4SLinus Torvalds }
18291da177e4SLinus Torvalds 
18301da177e4SLinus Torvalds EXPORT_SYMBOL(get_unmapped_area);
18311da177e4SLinus Torvalds 
1832be8432e7SLiam R. Howlett /**
1833abdba2ddSLiam R. Howlett  * find_vma_intersection() - Look up the first VMA which intersects the interval
1834abdba2ddSLiam R. Howlett  * @mm: The process address space.
1835abdba2ddSLiam R. Howlett  * @start_addr: The inclusive start user address.
1836abdba2ddSLiam R. Howlett  * @end_addr: The exclusive end user address.
1837abdba2ddSLiam R. Howlett  *
1838abdba2ddSLiam R. Howlett  * Returns: The first VMA within the provided range, %NULL otherwise.  Assumes
1839abdba2ddSLiam R. Howlett  * start_addr < end_addr.
1840abdba2ddSLiam R. Howlett  */
1841abdba2ddSLiam R. Howlett struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
1842abdba2ddSLiam R. Howlett 					     unsigned long start_addr,
1843abdba2ddSLiam R. Howlett 					     unsigned long end_addr)
1844abdba2ddSLiam R. Howlett {
1845abdba2ddSLiam R. Howlett 	unsigned long index = start_addr;
1846abdba2ddSLiam R. Howlett 
1847abdba2ddSLiam R. Howlett 	mmap_assert_locked(mm);
18487964cf8cSLiam R. Howlett 	return mt_find(&mm->mm_mt, &index, end_addr - 1);
1849abdba2ddSLiam R. Howlett }
1850abdba2ddSLiam R. Howlett EXPORT_SYMBOL(find_vma_intersection);
1851abdba2ddSLiam R. Howlett 
1852abdba2ddSLiam R. Howlett /**
1853be8432e7SLiam R. Howlett  * find_vma() - Find the VMA for a given address, or the next VMA.
1854be8432e7SLiam R. Howlett  * @mm: The mm_struct to check
1855be8432e7SLiam R. Howlett  * @addr: The address
1856be8432e7SLiam R. Howlett  *
1857be8432e7SLiam R. Howlett  * Returns: The VMA associated with addr, or the next VMA.
1858be8432e7SLiam R. Howlett  * May return %NULL in the case of no VMA at addr or above.
1859be8432e7SLiam R. Howlett  */
18601da177e4SLinus Torvalds struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
18611da177e4SLinus Torvalds {
1862be8432e7SLiam R. Howlett 	unsigned long index = addr;
18631da177e4SLinus Torvalds 
18645b78ed24SLuigi Rizzo 	mmap_assert_locked(mm);
18657964cf8cSLiam R. Howlett 	return mt_find(&mm->mm_mt, &index, ULONG_MAX);
18661da177e4SLinus Torvalds }
18671da177e4SLinus Torvalds EXPORT_SYMBOL(find_vma);
18681da177e4SLinus Torvalds 
18697fdbd37dSLiam R. Howlett /**
18707fdbd37dSLiam R. Howlett  * find_vma_prev() - Find the VMA for a given address, or the next vma and
18717fdbd37dSLiam R. Howlett  * set %pprev to the previous VMA, if any.
18727fdbd37dSLiam R. Howlett  * @mm: The mm_struct to check
18737fdbd37dSLiam R. Howlett  * @addr: The address
18747fdbd37dSLiam R. Howlett  * @pprev: The pointer to set to the previous VMA
18757fdbd37dSLiam R. Howlett  *
18767fdbd37dSLiam R. Howlett  * Note that RCU lock is missing here since the external mmap_lock() is used
18777fdbd37dSLiam R. Howlett  * instead.
18787fdbd37dSLiam R. Howlett  *
18797fdbd37dSLiam R. Howlett  * Returns: The VMA associated with @addr, or the next vma.
18807fdbd37dSLiam R. Howlett  * May return %NULL in the case of no vma at addr or above.
18816bd4837dSKOSAKI Motohiro  */
18821da177e4SLinus Torvalds struct vm_area_struct *
18831da177e4SLinus Torvalds find_vma_prev(struct mm_struct *mm, unsigned long addr,
18841da177e4SLinus Torvalds 			struct vm_area_struct **pprev)
18851da177e4SLinus Torvalds {
18866bd4837dSKOSAKI Motohiro 	struct vm_area_struct *vma;
18877fdbd37dSLiam R. Howlett 	MA_STATE(mas, &mm->mm_mt, addr, addr);
18881da177e4SLinus Torvalds 
18897fdbd37dSLiam R. Howlett 	vma = mas_walk(&mas);
18907fdbd37dSLiam R. Howlett 	*pprev = mas_prev(&mas, 0);
18917fdbd37dSLiam R. Howlett 	if (!vma)
18927fdbd37dSLiam R. Howlett 		vma = mas_next(&mas, ULONG_MAX);
18936bd4837dSKOSAKI Motohiro 	return vma;
18941da177e4SLinus Torvalds }
18951da177e4SLinus Torvalds 
18961da177e4SLinus Torvalds /*
18971da177e4SLinus Torvalds  * Verify that the stack growth is acceptable and
18981da177e4SLinus Torvalds  * update accounting. This is shared with both the
18991da177e4SLinus Torvalds  * grow-up and grow-down cases.
19001da177e4SLinus Torvalds  */
19011be7107fSHugh Dickins static int acct_stack_growth(struct vm_area_struct *vma,
19021be7107fSHugh Dickins 			     unsigned long size, unsigned long grow)
19031da177e4SLinus Torvalds {
19041da177e4SLinus Torvalds 	struct mm_struct *mm = vma->vm_mm;
19051be7107fSHugh Dickins 	unsigned long new_start;
19061da177e4SLinus Torvalds 
19071da177e4SLinus Torvalds 	/* address space limit tests */
190884638335SKonstantin Khlebnikov 	if (!may_expand_vm(mm, vma->vm_flags, grow))
19091da177e4SLinus Torvalds 		return -ENOMEM;
19101da177e4SLinus Torvalds 
19111da177e4SLinus Torvalds 	/* Stack limit test */
191224c79d8eSKrzysztof Opasiak 	if (size > rlimit(RLIMIT_STACK))
19131da177e4SLinus Torvalds 		return -ENOMEM;
19141da177e4SLinus Torvalds 
19151da177e4SLinus Torvalds 	/* mlock limit tests */
1916b0cc5e89SAndrew Morton 	if (!mlock_future_ok(mm, vma->vm_flags, grow << PAGE_SHIFT))
19171da177e4SLinus Torvalds 		return -ENOMEM;
19181da177e4SLinus Torvalds 
19190d59a01bSAdam Litke 	/* Check to ensure the stack will not grow into a hugetlb-only region */
19200d59a01bSAdam Litke 	new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
19210d59a01bSAdam Litke 			vma->vm_end - size;
19220d59a01bSAdam Litke 	if (is_hugepage_only_range(vma->vm_mm, new_start, size))
19230d59a01bSAdam Litke 		return -EFAULT;
19240d59a01bSAdam Litke 
19251da177e4SLinus Torvalds 	/*
19261da177e4SLinus Torvalds 	 * Overcommit..  This must be the final test, as it will
19271da177e4SLinus Torvalds 	 * update security statistics.
19281da177e4SLinus Torvalds 	 */
192905fa199dSHugh Dickins 	if (security_vm_enough_memory_mm(mm, grow))
19301da177e4SLinus Torvalds 		return -ENOMEM;
19311da177e4SLinus Torvalds 
19321da177e4SLinus Torvalds 	return 0;
19331da177e4SLinus Torvalds }
19341da177e4SLinus Torvalds 
193546dea3d0SHugh Dickins #if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
19361da177e4SLinus Torvalds /*
193746dea3d0SHugh Dickins  * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
193846dea3d0SHugh Dickins  * vma is the last one with address > vma->vm_end.  Have to extend vma.
19391da177e4SLinus Torvalds  */
19408d7071afSLinus Torvalds static int expand_upwards(struct vm_area_struct *vma, unsigned long address)
19411da177e4SLinus Torvalds {
194209357814SOleg Nesterov 	struct mm_struct *mm = vma->vm_mm;
19431be7107fSHugh Dickins 	struct vm_area_struct *next;
19441be7107fSHugh Dickins 	unsigned long gap_addr;
194512352d3cSKonstantin Khlebnikov 	int error = 0;
1946d4af56c5SLiam R. Howlett 	MA_STATE(mas, &mm->mm_mt, 0, 0);
19471da177e4SLinus Torvalds 
19481da177e4SLinus Torvalds 	if (!(vma->vm_flags & VM_GROWSUP))
19491da177e4SLinus Torvalds 		return -EFAULT;
19501da177e4SLinus Torvalds 
1951bd726c90SHelge Deller 	/* Guard against exceeding limits of the address space. */
19521be7107fSHugh Dickins 	address &= PAGE_MASK;
195337511fb5SHelge Deller 	if (address >= (TASK_SIZE & PAGE_MASK))
195412352d3cSKonstantin Khlebnikov 		return -ENOMEM;
1955bd726c90SHelge Deller 	address += PAGE_SIZE;
195612352d3cSKonstantin Khlebnikov 
19571be7107fSHugh Dickins 	/* Enforce stack_guard_gap */
19581be7107fSHugh Dickins 	gap_addr = address + stack_guard_gap;
1959bd726c90SHelge Deller 
1960bd726c90SHelge Deller 	/* Guard against overflow */
1961bd726c90SHelge Deller 	if (gap_addr < address || gap_addr > TASK_SIZE)
1962bd726c90SHelge Deller 		gap_addr = TASK_SIZE;
1963bd726c90SHelge Deller 
1964763ecb03SLiam R. Howlett 	next = find_vma_intersection(mm, vma->vm_end, gap_addr);
1965763ecb03SLiam R. Howlett 	if (next && vma_is_accessible(next)) {
19661be7107fSHugh Dickins 		if (!(next->vm_flags & VM_GROWSUP))
19671be7107fSHugh Dickins 			return -ENOMEM;
19681be7107fSHugh Dickins 		/* Check that both stack segments have the same anon_vma? */
19691be7107fSHugh Dickins 	}
19701be7107fSHugh Dickins 
1971c5d5546eSVernon Yang 	if (mas_preallocate(&mas, GFP_KERNEL))
19721da177e4SLinus Torvalds 		return -ENOMEM;
19731da177e4SLinus Torvalds 
1974d4af56c5SLiam R. Howlett 	/* We must make sure the anon_vma is allocated. */
1975d4af56c5SLiam R. Howlett 	if (unlikely(anon_vma_prepare(vma))) {
1976d4af56c5SLiam R. Howlett 		mas_destroy(&mas);
1977d4af56c5SLiam R. Howlett 		return -ENOMEM;
1978d4af56c5SLiam R. Howlett 	}
1979d4af56c5SLiam R. Howlett 
1980c137381fSSuren Baghdasaryan 	/* Lock the VMA before expanding to prevent concurrent page faults */
1981c137381fSSuren Baghdasaryan 	vma_start_write(vma);
19821da177e4SLinus Torvalds 	/*
19831da177e4SLinus Torvalds 	 * vma->vm_start/vm_end cannot change under us because the caller
1984c1e8d7c6SMichel Lespinasse 	 * is required to hold the mmap_lock in read mode.  We need the
19851da177e4SLinus Torvalds 	 * anon_vma lock to serialize against concurrent expand_stacks.
19861da177e4SLinus Torvalds 	 */
198712352d3cSKonstantin Khlebnikov 	anon_vma_lock_write(vma->anon_vma);
19881da177e4SLinus Torvalds 
19891da177e4SLinus Torvalds 	/* Somebody else might have raced and expanded it already */
19901da177e4SLinus Torvalds 	if (address > vma->vm_end) {
19911da177e4SLinus Torvalds 		unsigned long size, grow;
19921da177e4SLinus Torvalds 
19931da177e4SLinus Torvalds 		size = address - vma->vm_start;
19941da177e4SLinus Torvalds 		grow = (address - vma->vm_end) >> PAGE_SHIFT;
19951da177e4SLinus Torvalds 
199642c36f63SHugh Dickins 		error = -ENOMEM;
199742c36f63SHugh Dickins 		if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
19981da177e4SLinus Torvalds 			error = acct_stack_growth(vma, size, grow);
19993af9e859SEric B Munson 			if (!error) {
20004128997bSMichel Lespinasse 				/*
2001524e00b3SLiam R. Howlett 				 * We only hold a shared mmap_lock lock here, so
2002524e00b3SLiam R. Howlett 				 * we need to protect against concurrent vma
2003524e00b3SLiam R. Howlett 				 * expansions.  anon_vma_lock_write() doesn't
2004524e00b3SLiam R. Howlett 				 * help here, as we don't guarantee that all
2005524e00b3SLiam R. Howlett 				 * growable vmas in a mm share the same root
2006524e00b3SLiam R. Howlett 				 * anon vma.  So, we reuse mm->page_table_lock
2007524e00b3SLiam R. Howlett 				 * to guard against concurrent vma expansions.
20084128997bSMichel Lespinasse 				 */
200909357814SOleg Nesterov 				spin_lock(&mm->page_table_lock);
201087e8827bSOleg Nesterov 				if (vma->vm_flags & VM_LOCKED)
201109357814SOleg Nesterov 					mm->locked_vm += grow;
201284638335SKonstantin Khlebnikov 				vm_stat_account(mm, vma->vm_flags, grow);
2013bf181b9fSMichel Lespinasse 				anon_vma_interval_tree_pre_update_vma(vma);
20141da177e4SLinus Torvalds 				vma->vm_end = address;
2015d4af56c5SLiam R. Howlett 				/* Overwrite old entry in mtree. */
2016fbcc3104SLiam R. Howlett 				mas_set_range(&mas, vma->vm_start, address - 1);
2017fbcc3104SLiam R. Howlett 				mas_store_prealloc(&mas, vma);
2018bf181b9fSMichel Lespinasse 				anon_vma_interval_tree_post_update_vma(vma);
201909357814SOleg Nesterov 				spin_unlock(&mm->page_table_lock);
20204128997bSMichel Lespinasse 
20213af9e859SEric B Munson 				perf_event_mmap(vma);
20223af9e859SEric B Munson 			}
20231da177e4SLinus Torvalds 		}
202442c36f63SHugh Dickins 	}
202512352d3cSKonstantin Khlebnikov 	anon_vma_unlock_write(vma->anon_vma);
2026c791576cSYang Shi 	khugepaged_enter_vma(vma, vma->vm_flags);
2027d4af56c5SLiam R. Howlett 	mas_destroy(&mas);
20281da177e4SLinus Torvalds 	return error;
20291da177e4SLinus Torvalds }
203046dea3d0SHugh Dickins #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
203146dea3d0SHugh Dickins 
20321da177e4SLinus Torvalds /*
20331da177e4SLinus Torvalds  * vma is the first one with address < vma->vm_start.  Have to extend vma.
20348d7071afSLinus Torvalds  * mmap_lock held for writing.
20351da177e4SLinus Torvalds  */
2036524e00b3SLiam R. Howlett int expand_downwards(struct vm_area_struct *vma, unsigned long address)
20371da177e4SLinus Torvalds {
203809357814SOleg Nesterov 	struct mm_struct *mm = vma->vm_mm;
2039763ecb03SLiam R. Howlett 	MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_start);
20401be7107fSHugh Dickins 	struct vm_area_struct *prev;
20410a1d5299SJann Horn 	int error = 0;
20421da177e4SLinus Torvalds 
20438d7071afSLinus Torvalds 	if (!(vma->vm_flags & VM_GROWSDOWN))
20448d7071afSLinus Torvalds 		return -EFAULT;
20458d7071afSLinus Torvalds 
20468869477aSEric Paris 	address &= PAGE_MASK;
20478b35ca3eSBen Hutchings 	if (address < mmap_min_addr || address < FIRST_USER_ADDRESS)
20480a1d5299SJann Horn 		return -EPERM;
20498869477aSEric Paris 
20501be7107fSHugh Dickins 	/* Enforce stack_guard_gap */
2051763ecb03SLiam R. Howlett 	prev = mas_prev(&mas, 0);
20521be7107fSHugh Dickins 	/* Check that both stack segments have the same anon_vma? */
2053f440fa1aSLiam R. Howlett 	if (prev) {
2054f440fa1aSLiam R. Howlett 		if (!(prev->vm_flags & VM_GROWSDOWN) &&
2055f440fa1aSLiam R. Howlett 		    vma_is_accessible(prev) &&
2056f440fa1aSLiam R. Howlett 		    (address - prev->vm_end < stack_guard_gap))
205732e4e6d5SOleg Nesterov 			return -ENOMEM;
20581be7107fSHugh Dickins 	}
20591be7107fSHugh Dickins 
2060c5d5546eSVernon Yang 	if (mas_preallocate(&mas, GFP_KERNEL))
206112352d3cSKonstantin Khlebnikov 		return -ENOMEM;
20621da177e4SLinus Torvalds 
2063d4af56c5SLiam R. Howlett 	/* We must make sure the anon_vma is allocated. */
2064d4af56c5SLiam R. Howlett 	if (unlikely(anon_vma_prepare(vma))) {
2065d4af56c5SLiam R. Howlett 		mas_destroy(&mas);
2066d4af56c5SLiam R. Howlett 		return -ENOMEM;
2067d4af56c5SLiam R. Howlett 	}
2068d4af56c5SLiam R. Howlett 
2069c137381fSSuren Baghdasaryan 	/* Lock the VMA before expanding to prevent concurrent page faults */
2070c137381fSSuren Baghdasaryan 	vma_start_write(vma);
20711da177e4SLinus Torvalds 	/*
20721da177e4SLinus Torvalds 	 * vma->vm_start/vm_end cannot change under us because the caller
2073c1e8d7c6SMichel Lespinasse 	 * is required to hold the mmap_lock in read mode.  We need the
20741da177e4SLinus Torvalds 	 * anon_vma lock to serialize against concurrent expand_stacks.
20751da177e4SLinus Torvalds 	 */
207612352d3cSKonstantin Khlebnikov 	anon_vma_lock_write(vma->anon_vma);
20771da177e4SLinus Torvalds 
20781da177e4SLinus Torvalds 	/* Somebody else might have raced and expanded it already */
20791da177e4SLinus Torvalds 	if (address < vma->vm_start) {
20801da177e4SLinus Torvalds 		unsigned long size, grow;
20811da177e4SLinus Torvalds 
20821da177e4SLinus Torvalds 		size = vma->vm_end - address;
20831da177e4SLinus Torvalds 		grow = (vma->vm_start - address) >> PAGE_SHIFT;
20841da177e4SLinus Torvalds 
2085a626ca6aSLinus Torvalds 		error = -ENOMEM;
2086a626ca6aSLinus Torvalds 		if (grow <= vma->vm_pgoff) {
20871da177e4SLinus Torvalds 			error = acct_stack_growth(vma, size, grow);
20881da177e4SLinus Torvalds 			if (!error) {
20894128997bSMichel Lespinasse 				/*
2090524e00b3SLiam R. Howlett 				 * We only hold a shared mmap_lock lock here, so
2091524e00b3SLiam R. Howlett 				 * we need to protect against concurrent vma
2092524e00b3SLiam R. Howlett 				 * expansions.  anon_vma_lock_write() doesn't
2093524e00b3SLiam R. Howlett 				 * help here, as we don't guarantee that all
2094524e00b3SLiam R. Howlett 				 * growable vmas in a mm share the same root
2095524e00b3SLiam R. Howlett 				 * anon vma.  So, we reuse mm->page_table_lock
2096524e00b3SLiam R. Howlett 				 * to guard against concurrent vma expansions.
20974128997bSMichel Lespinasse 				 */
209809357814SOleg Nesterov 				spin_lock(&mm->page_table_lock);
209987e8827bSOleg Nesterov 				if (vma->vm_flags & VM_LOCKED)
210009357814SOleg Nesterov 					mm->locked_vm += grow;
210184638335SKonstantin Khlebnikov 				vm_stat_account(mm, vma->vm_flags, grow);
2102bf181b9fSMichel Lespinasse 				anon_vma_interval_tree_pre_update_vma(vma);
21031da177e4SLinus Torvalds 				vma->vm_start = address;
21041da177e4SLinus Torvalds 				vma->vm_pgoff -= grow;
2105d4af56c5SLiam R. Howlett 				/* Overwrite old entry in mtree. */
2106fbcc3104SLiam R. Howlett 				mas_set_range(&mas, address, vma->vm_end - 1);
2107fbcc3104SLiam R. Howlett 				mas_store_prealloc(&mas, vma);
2108bf181b9fSMichel Lespinasse 				anon_vma_interval_tree_post_update_vma(vma);
210909357814SOleg Nesterov 				spin_unlock(&mm->page_table_lock);
21104128997bSMichel Lespinasse 
21113af9e859SEric B Munson 				perf_event_mmap(vma);
21121da177e4SLinus Torvalds 			}
21131da177e4SLinus Torvalds 		}
2114a626ca6aSLinus Torvalds 	}
211512352d3cSKonstantin Khlebnikov 	anon_vma_unlock_write(vma->anon_vma);
2116c791576cSYang Shi 	khugepaged_enter_vma(vma, vma->vm_flags);
2117d4af56c5SLiam R. Howlett 	mas_destroy(&mas);
21181da177e4SLinus Torvalds 	return error;
21191da177e4SLinus Torvalds }
21201da177e4SLinus Torvalds 
21211be7107fSHugh Dickins /* enforced gap between the expanding stack and other mappings. */
21221be7107fSHugh Dickins unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
21231be7107fSHugh Dickins 
21241be7107fSHugh Dickins static int __init cmdline_parse_stack_guard_gap(char *p)
21251be7107fSHugh Dickins {
21261be7107fSHugh Dickins 	unsigned long val;
21271be7107fSHugh Dickins 	char *endptr;
21281be7107fSHugh Dickins 
21291be7107fSHugh Dickins 	val = simple_strtoul(p, &endptr, 10);
21301be7107fSHugh Dickins 	if (!*endptr)
21311be7107fSHugh Dickins 		stack_guard_gap = val << PAGE_SHIFT;
21321be7107fSHugh Dickins 
2133e6d09493SRandy Dunlap 	return 1;
21341be7107fSHugh Dickins }
21351be7107fSHugh Dickins __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
21361be7107fSHugh Dickins 
2137b6a2fea3SOllie Wild #ifdef CONFIG_STACK_GROWSUP
21388d7071afSLinus Torvalds int expand_stack_locked(struct vm_area_struct *vma, unsigned long address)
2139b6a2fea3SOllie Wild {
2140b6a2fea3SOllie Wild 	return expand_upwards(vma, address);
2141b6a2fea3SOllie Wild }
2142b6a2fea3SOllie Wild 
21438d7071afSLinus Torvalds struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr)
2144b6a2fea3SOllie Wild {
2145b6a2fea3SOllie Wild 	struct vm_area_struct *vma, *prev;
2146b6a2fea3SOllie Wild 
2147b6a2fea3SOllie Wild 	addr &= PAGE_MASK;
2148b6a2fea3SOllie Wild 	vma = find_vma_prev(mm, addr, &prev);
2149b6a2fea3SOllie Wild 	if (vma && (vma->vm_start <= addr))
2150b6a2fea3SOllie Wild 		return vma;
2151f440fa1aSLiam R. Howlett 	if (!prev)
2152f440fa1aSLiam R. Howlett 		return NULL;
21538d7071afSLinus Torvalds 	if (expand_stack_locked(prev, addr))
2154b6a2fea3SOllie Wild 		return NULL;
2155cea10a19SMichel Lespinasse 	if (prev->vm_flags & VM_LOCKED)
2156fc05f566SKirill A. Shutemov 		populate_vma_page_range(prev, addr, prev->vm_end, NULL);
2157b6a2fea3SOllie Wild 	return prev;
2158b6a2fea3SOllie Wild }
2159b6a2fea3SOllie Wild #else
21608d7071afSLinus Torvalds int expand_stack_locked(struct vm_area_struct *vma, unsigned long address)
2161b6a2fea3SOllie Wild {
2162f440fa1aSLiam R. Howlett 	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
2163f440fa1aSLiam R. Howlett 		return -EINVAL;
2164b6a2fea3SOllie Wild 	return expand_downwards(vma, address);
2165b6a2fea3SOllie Wild }
2166b6a2fea3SOllie Wild 
21678d7071afSLinus Torvalds struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr)
21681da177e4SLinus Torvalds {
21691da177e4SLinus Torvalds 	struct vm_area_struct *vma;
21701da177e4SLinus Torvalds 	unsigned long start;
21711da177e4SLinus Torvalds 
21721da177e4SLinus Torvalds 	addr &= PAGE_MASK;
21731da177e4SLinus Torvalds 	vma = find_vma(mm, addr);
21741da177e4SLinus Torvalds 	if (!vma)
21751da177e4SLinus Torvalds 		return NULL;
21761da177e4SLinus Torvalds 	if (vma->vm_start <= addr)
21771da177e4SLinus Torvalds 		return vma;
21781da177e4SLinus Torvalds 	start = vma->vm_start;
21798d7071afSLinus Torvalds 	if (expand_stack_locked(vma, addr))
21801da177e4SLinus Torvalds 		return NULL;
2181cea10a19SMichel Lespinasse 	if (vma->vm_flags & VM_LOCKED)
2182fc05f566SKirill A. Shutemov 		populate_vma_page_range(vma, addr, start, NULL);
21831da177e4SLinus Torvalds 	return vma;
21841da177e4SLinus Torvalds }
21851da177e4SLinus Torvalds #endif
21861da177e4SLinus Torvalds 
21878d7071afSLinus Torvalds /*
21888d7071afSLinus Torvalds  * IA64 has some horrid mapping rules: it can expand both up and down,
21898d7071afSLinus Torvalds  * but with various special rules.
21908d7071afSLinus Torvalds  *
21918d7071afSLinus Torvalds  * We'll get rid of this architecture eventually, so the ugliness is
21928d7071afSLinus Torvalds  * temporary.
21938d7071afSLinus Torvalds  */
21948d7071afSLinus Torvalds #ifdef CONFIG_IA64
21958d7071afSLinus Torvalds static inline bool vma_expand_ok(struct vm_area_struct *vma, unsigned long addr)
2196f440fa1aSLiam R. Howlett {
21978d7071afSLinus Torvalds 	return REGION_NUMBER(addr) == REGION_NUMBER(vma->vm_start) &&
21988d7071afSLinus Torvalds 		REGION_OFFSET(addr) < RGN_MAP_LIMIT;
2199f440fa1aSLiam R. Howlett }
22008d7071afSLinus Torvalds 
22018d7071afSLinus Torvalds /*
22028d7071afSLinus Torvalds  * IA64 stacks grow down, but there's a special register backing store
22038d7071afSLinus Torvalds  * that can grow up. Only sequentially, though, so the new address must
22048d7071afSLinus Torvalds  * match vm_end.
22058d7071afSLinus Torvalds  */
22068d7071afSLinus Torvalds static inline int vma_expand_up(struct vm_area_struct *vma, unsigned long addr)
22078d7071afSLinus Torvalds {
22088d7071afSLinus Torvalds 	if (!vma_expand_ok(vma, addr))
22098d7071afSLinus Torvalds 		return -EFAULT;
22108d7071afSLinus Torvalds 	if (vma->vm_end != (addr & PAGE_MASK))
22118d7071afSLinus Torvalds 		return -EFAULT;
22128d7071afSLinus Torvalds 	return expand_upwards(vma, addr);
22138d7071afSLinus Torvalds }
22148d7071afSLinus Torvalds 
22158d7071afSLinus Torvalds static inline bool vma_expand_down(struct vm_area_struct *vma, unsigned long addr)
22168d7071afSLinus Torvalds {
22178d7071afSLinus Torvalds 	if (!vma_expand_ok(vma, addr))
22188d7071afSLinus Torvalds 		return -EFAULT;
22198d7071afSLinus Torvalds 	return expand_downwards(vma, addr);
22208d7071afSLinus Torvalds }
22218d7071afSLinus Torvalds 
22228d7071afSLinus Torvalds #elif defined(CONFIG_STACK_GROWSUP)
22238d7071afSLinus Torvalds 
22248d7071afSLinus Torvalds #define vma_expand_up(vma,addr) expand_upwards(vma, addr)
22258d7071afSLinus Torvalds #define vma_expand_down(vma, addr) (-EFAULT)
22268d7071afSLinus Torvalds 
22278d7071afSLinus Torvalds #else
22288d7071afSLinus Torvalds 
22298d7071afSLinus Torvalds #define vma_expand_up(vma,addr) (-EFAULT)
22308d7071afSLinus Torvalds #define vma_expand_down(vma, addr) expand_downwards(vma, addr)
22318d7071afSLinus Torvalds 
22328d7071afSLinus Torvalds #endif
22338d7071afSLinus Torvalds 
22348d7071afSLinus Torvalds /*
22358d7071afSLinus Torvalds  * expand_stack(): legacy interface for page faulting. Don't use unless
22368d7071afSLinus Torvalds  * you have to.
22378d7071afSLinus Torvalds  *
22388d7071afSLinus Torvalds  * This is called with the mm locked for reading, drops the lock, takes
22398d7071afSLinus Torvalds  * the lock for writing, tries to look up a vma again, expands it if
22408d7071afSLinus Torvalds  * necessary, and downgrades the lock to reading again.
22418d7071afSLinus Torvalds  *
22428d7071afSLinus Torvalds  * If no vma is found or it can't be expanded, it returns NULL and has
22438d7071afSLinus Torvalds  * dropped the lock.
22448d7071afSLinus Torvalds  */
22458d7071afSLinus Torvalds struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr)
22468d7071afSLinus Torvalds {
22478d7071afSLinus Torvalds 	struct vm_area_struct *vma, *prev;
22488d7071afSLinus Torvalds 
22498d7071afSLinus Torvalds 	mmap_read_unlock(mm);
22508d7071afSLinus Torvalds 	if (mmap_write_lock_killable(mm))
22518d7071afSLinus Torvalds 		return NULL;
22528d7071afSLinus Torvalds 
22538d7071afSLinus Torvalds 	vma = find_vma_prev(mm, addr, &prev);
22548d7071afSLinus Torvalds 	if (vma && vma->vm_start <= addr)
22558d7071afSLinus Torvalds 		goto success;
22568d7071afSLinus Torvalds 
22578d7071afSLinus Torvalds 	if (prev && !vma_expand_up(prev, addr)) {
22588d7071afSLinus Torvalds 		vma = prev;
22598d7071afSLinus Torvalds 		goto success;
22608d7071afSLinus Torvalds 	}
22618d7071afSLinus Torvalds 
22628d7071afSLinus Torvalds 	if (vma && !vma_expand_down(vma, addr))
22638d7071afSLinus Torvalds 		goto success;
22648d7071afSLinus Torvalds 
22658d7071afSLinus Torvalds 	mmap_write_unlock(mm);
22668d7071afSLinus Torvalds 	return NULL;
22678d7071afSLinus Torvalds 
22688d7071afSLinus Torvalds success:
22698d7071afSLinus Torvalds 	mmap_write_downgrade(mm);
22708d7071afSLinus Torvalds 	return vma;
22718d7071afSLinus Torvalds }
2272e1d6d01aSJesse Barnes 
22732c0b3814SHugh Dickins /*
2274763ecb03SLiam R. Howlett  * Ok - we have the memory areas we should free on a maple tree so release them,
2275763ecb03SLiam R. Howlett  * and do the vma updates.
22761da177e4SLinus Torvalds  *
22772c0b3814SHugh Dickins  * Called with the mm semaphore held.
22781da177e4SLinus Torvalds  */
2279763ecb03SLiam R. Howlett static inline void remove_mt(struct mm_struct *mm, struct ma_state *mas)
22801da177e4SLinus Torvalds {
22814f74d2c8SLinus Torvalds 	unsigned long nr_accounted = 0;
2282763ecb03SLiam R. Howlett 	struct vm_area_struct *vma;
22834f74d2c8SLinus Torvalds 
2284365e9c87SHugh Dickins 	/* Update high watermark before we lower total_vm */
2285365e9c87SHugh Dickins 	update_hiwater_vm(mm);
2286763ecb03SLiam R. Howlett 	mas_for_each(mas, vma, ULONG_MAX) {
2287ab50b8edSHugh Dickins 		long nrpages = vma_pages(vma);
22881da177e4SLinus Torvalds 
22894f74d2c8SLinus Torvalds 		if (vma->vm_flags & VM_ACCOUNT)
22904f74d2c8SLinus Torvalds 			nr_accounted += nrpages;
229184638335SKonstantin Khlebnikov 		vm_stat_account(mm, vma->vm_flags, -nrpages);
22920d2ebf9cSSuren Baghdasaryan 		remove_vma(vma, false);
2293763ecb03SLiam R. Howlett 	}
22944f74d2c8SLinus Torvalds 	vm_unacct_memory(nr_accounted);
22951da177e4SLinus Torvalds 	validate_mm(mm);
22961da177e4SLinus Torvalds }
22971da177e4SLinus Torvalds 
22981da177e4SLinus Torvalds /*
22991da177e4SLinus Torvalds  * Get rid of page table information in the indicated region.
23001da177e4SLinus Torvalds  *
2301f10df686SPaolo 'Blaisorblade' Giarrusso  * Called with the mm semaphore held.
23021da177e4SLinus Torvalds  */
2303763ecb03SLiam R. Howlett static void unmap_region(struct mm_struct *mm, struct maple_tree *mt,
2304e0da382cSHugh Dickins 		struct vm_area_struct *vma, struct vm_area_struct *prev,
2305763ecb03SLiam R. Howlett 		struct vm_area_struct *next,
230668f48381SSuren Baghdasaryan 		unsigned long start, unsigned long end, bool mm_wr_locked)
23071da177e4SLinus Torvalds {
2308d16dfc55SPeter Zijlstra 	struct mmu_gather tlb;
23091da177e4SLinus Torvalds 
23101da177e4SLinus Torvalds 	lru_add_drain();
2311a72afd87SWill Deacon 	tlb_gather_mmu(&tlb, mm);
2312365e9c87SHugh Dickins 	update_hiwater_rss(mm);
231368f48381SSuren Baghdasaryan 	unmap_vmas(&tlb, mt, vma, start, end, mm_wr_locked);
2314763ecb03SLiam R. Howlett 	free_pgtables(&tlb, mt, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
231598e51a22SSuren Baghdasaryan 				 next ? next->vm_start : USER_PGTABLES_CEILING,
231698e51a22SSuren Baghdasaryan 				 mm_wr_locked);
2317ae8eba8bSWill Deacon 	tlb_finish_mmu(&tlb);
23181da177e4SLinus Torvalds }
23191da177e4SLinus Torvalds 
23201da177e4SLinus Torvalds /*
2321def5efe0SDavid Rientjes  * __split_vma() bypasses sysctl_max_map_count checking.  We use this where it
2322def5efe0SDavid Rientjes  * has already been checked or doesn't make sense to fail.
23230fd5a9e2SLiam R. Howlett  * VMA Iterator will point to the end VMA.
23241da177e4SLinus Torvalds  */
23259760ebffSLiam R. Howlett int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
23261da177e4SLinus Torvalds 		unsigned long addr, int new_below)
23271da177e4SLinus Torvalds {
2328b2b3b886SLiam R. Howlett 	struct vma_prepare vp;
23291da177e4SLinus Torvalds 	struct vm_area_struct *new;
2330e3975891SChen Gang 	int err;
23319760ebffSLiam R. Howlett 
2332b50e195fSLiam R. Howlett 	validate_mm(vma->vm_mm);
23331da177e4SLinus Torvalds 
2334b2b3b886SLiam R. Howlett 	WARN_ON(vma->vm_start >= addr);
2335b2b3b886SLiam R. Howlett 	WARN_ON(vma->vm_end <= addr);
2336b2b3b886SLiam R. Howlett 
2337dd3b614fSDmitry Safonov 	if (vma->vm_ops && vma->vm_ops->may_split) {
2338dd3b614fSDmitry Safonov 		err = vma->vm_ops->may_split(vma, addr);
233931383c68SDan Williams 		if (err)
234031383c68SDan Williams 			return err;
234131383c68SDan Williams 	}
23421da177e4SLinus Torvalds 
23433928d4f5SLinus Torvalds 	new = vm_area_dup(vma);
23441da177e4SLinus Torvalds 	if (!new)
2345e3975891SChen Gang 		return -ENOMEM;
23461da177e4SLinus Torvalds 
2347b2b3b886SLiam R. Howlett 	err = -ENOMEM;
2348b2b3b886SLiam R. Howlett 	if (vma_iter_prealloc(vmi))
2349b2b3b886SLiam R. Howlett 		goto out_free_vma;
2350b2b3b886SLiam R. Howlett 
2351b2b3b886SLiam R. Howlett 	if (new_below) {
23521da177e4SLinus Torvalds 		new->vm_end = addr;
2353b2b3b886SLiam R. Howlett 	} else {
23541da177e4SLinus Torvalds 		new->vm_start = addr;
23551da177e4SLinus Torvalds 		new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
23561da177e4SLinus Torvalds 	}
23571da177e4SLinus Torvalds 
2358ef0855d3SOleg Nesterov 	err = vma_dup_policy(vma, new);
2359ef0855d3SOleg Nesterov 	if (err)
2360b2b3b886SLiam R. Howlett 		goto out_free_vmi;
23611da177e4SLinus Torvalds 
2362c4ea95d7SDaniel Forrest 	err = anon_vma_clone(new, vma);
2363c4ea95d7SDaniel Forrest 	if (err)
23645beb4930SRik van Riel 		goto out_free_mpol;
23655beb4930SRik van Riel 
2366e9714acfSKonstantin Khlebnikov 	if (new->vm_file)
23671da177e4SLinus Torvalds 		get_file(new->vm_file);
23681da177e4SLinus Torvalds 
23691da177e4SLinus Torvalds 	if (new->vm_ops && new->vm_ops->open)
23701da177e4SLinus Torvalds 		new->vm_ops->open(new);
23711da177e4SLinus Torvalds 
2372b2b3b886SLiam R. Howlett 	init_vma_prep(&vp, vma);
2373b2b3b886SLiam R. Howlett 	vp.insert = new;
2374b2b3b886SLiam R. Howlett 	vma_prepare(&vp);
2375ccf1d78dSSuren Baghdasaryan 	vma_adjust_trans_huge(vma, vma->vm_start, addr, 0);
23761da177e4SLinus Torvalds 
2377b2b3b886SLiam R. Howlett 	if (new_below) {
2378b2b3b886SLiam R. Howlett 		vma->vm_start = addr;
2379b2b3b886SLiam R. Howlett 		vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT;
2380b2b3b886SLiam R. Howlett 	} else {
2381b2b3b886SLiam R. Howlett 		vma->vm_end = addr;
23829760ebffSLiam R. Howlett 	}
23835beb4930SRik van Riel 
2384b2b3b886SLiam R. Howlett 	/* vma_complete stores the new vma */
2385b2b3b886SLiam R. Howlett 	vma_complete(&vp, vmi, vma->vm_mm);
2386b2b3b886SLiam R. Howlett 
2387b2b3b886SLiam R. Howlett 	/* Success. */
2388b2b3b886SLiam R. Howlett 	if (new_below)
2389b2b3b886SLiam R. Howlett 		vma_next(vmi);
2390b50e195fSLiam R. Howlett 	validate_mm(vma->vm_mm);
2391b2b3b886SLiam R. Howlett 	return 0;
2392b2b3b886SLiam R. Howlett 
23935beb4930SRik van Riel out_free_mpol:
2394ef0855d3SOleg Nesterov 	mpol_put(vma_policy(new));
2395b2b3b886SLiam R. Howlett out_free_vmi:
2396b2b3b886SLiam R. Howlett 	vma_iter_free(vmi);
23975beb4930SRik van Riel out_free_vma:
23983928d4f5SLinus Torvalds 	vm_area_free(new);
2399b50e195fSLiam R. Howlett 	validate_mm(vma->vm_mm);
24005beb4930SRik van Riel 	return err;
24011da177e4SLinus Torvalds }
24021da177e4SLinus Torvalds 
2403659ace58SKOSAKI Motohiro /*
2404659ace58SKOSAKI Motohiro  * Split a vma into two pieces at address 'addr', a new vma is allocated
2405659ace58SKOSAKI Motohiro  * either for the first part or the tail.
2406659ace58SKOSAKI Motohiro  */
24079760ebffSLiam R. Howlett int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
2408659ace58SKOSAKI Motohiro 	      unsigned long addr, int new_below)
2409659ace58SKOSAKI Motohiro {
24109760ebffSLiam R. Howlett 	if (vma->vm_mm->map_count >= sysctl_max_map_count)
2411659ace58SKOSAKI Motohiro 		return -ENOMEM;
2412659ace58SKOSAKI Motohiro 
24139760ebffSLiam R. Howlett 	return __split_vma(vmi, vma, addr, new_below);
2414f2ebfe43SLiam R. Howlett }
2415f2ebfe43SLiam R. Howlett 
241611f9a21aSLiam R. Howlett /*
2417183654ceSLiam R. Howlett  * do_vmi_align_munmap() - munmap the aligned region from @start to @end.
2418183654ceSLiam R. Howlett  * @vmi: The vma iterator
241911f9a21aSLiam R. Howlett  * @vma: The starting vm_area_struct
242011f9a21aSLiam R. Howlett  * @mm: The mm_struct
242111f9a21aSLiam R. Howlett  * @start: The aligned start address to munmap.
242211f9a21aSLiam R. Howlett  * @end: The aligned end address to munmap.
242311f9a21aSLiam R. Howlett  * @uf: The userfaultfd list_head
2424408579cdSLiam R. Howlett  * @unlock: Set to true to drop the mmap_lock.  unlocking only happens on
2425408579cdSLiam R. Howlett  * success.
242611f9a21aSLiam R. Howlett  *
2427408579cdSLiam R. Howlett  * Return: 0 on success and drops the lock if so directed, error and leaves the
2428408579cdSLiam R. Howlett  * lock held otherwise.
242911f9a21aSLiam R. Howlett  */
243011f9a21aSLiam R. Howlett static int
2431183654ceSLiam R. Howlett do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
243211f9a21aSLiam R. Howlett 		    struct mm_struct *mm, unsigned long start,
2433408579cdSLiam R. Howlett 		    unsigned long end, struct list_head *uf, bool unlock)
243411f9a21aSLiam R. Howlett {
2435763ecb03SLiam R. Howlett 	struct vm_area_struct *prev, *next = NULL;
2436763ecb03SLiam R. Howlett 	struct maple_tree mt_detach;
2437763ecb03SLiam R. Howlett 	int count = 0;
2438d4af56c5SLiam R. Howlett 	int error = -ENOMEM;
2439606c812eSLiam R. Howlett 	unsigned long locked_vm = 0;
2440763ecb03SLiam R. Howlett 	MA_STATE(mas_detach, &mt_detach, 0, 0);
24413dd44325SLiam R. Howlett 	mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
2442763ecb03SLiam R. Howlett 	mt_set_external_lock(&mt_detach, &mm->mmap_lock);
2443524e00b3SLiam R. Howlett 
24441da177e4SLinus Torvalds 	/*
24451da177e4SLinus Torvalds 	 * If we need to split any vma, do it now to save pain later.
24461da177e4SLinus Torvalds 	 *
24471da177e4SLinus Torvalds 	 * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially
24481da177e4SLinus Torvalds 	 * unmapped vm_area_struct will remain in use: so lower split_vma
24491da177e4SLinus Torvalds 	 * places tmp vma above, and higher split_vma places tmp vma below.
24501da177e4SLinus Torvalds 	 */
2451763ecb03SLiam R. Howlett 
2452763ecb03SLiam R. Howlett 	/* Does it split the first one? */
2453146425a3SHugh Dickins 	if (start > vma->vm_start) {
2454659ace58SKOSAKI Motohiro 
2455659ace58SKOSAKI Motohiro 		/*
2456659ace58SKOSAKI Motohiro 		 * Make sure that map_count on return from munmap() will
2457659ace58SKOSAKI Motohiro 		 * not exceed its limit; but let map_count go just above
2458659ace58SKOSAKI Motohiro 		 * its limit temporarily, to help free resources as expected.
2459659ace58SKOSAKI Motohiro 		 */
2460659ace58SKOSAKI Motohiro 		if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
2461d4af56c5SLiam R. Howlett 			goto map_count_exceeded;
2462659ace58SKOSAKI Motohiro 
24639760ebffSLiam R. Howlett 		error = __split_vma(vmi, vma, start, 0);
24641da177e4SLinus Torvalds 		if (error)
2465763ecb03SLiam R. Howlett 			goto start_split_failed;
246611f9a21aSLiam R. Howlett 
24670fd5a9e2SLiam R. Howlett 		vma = vma_iter_load(vmi);
24681da177e4SLinus Torvalds 	}
24691da177e4SLinus Torvalds 
2470183654ceSLiam R. Howlett 	prev = vma_prev(vmi);
2471763ecb03SLiam R. Howlett 	if (unlikely((!prev)))
2472183654ceSLiam R. Howlett 		vma_iter_set(vmi, start);
247311f9a21aSLiam R. Howlett 
2474763ecb03SLiam R. Howlett 	/*
2475763ecb03SLiam R. Howlett 	 * Detach a range of VMAs from the mm. Using next as a temp variable as
2476763ecb03SLiam R. Howlett 	 * it is always overwritten.
2477763ecb03SLiam R. Howlett 	 */
2478183654ceSLiam R. Howlett 	for_each_vma_range(*vmi, next, end) {
2479763ecb03SLiam R. Howlett 		/* Does it split the end? */
2480763ecb03SLiam R. Howlett 		if (next->vm_end > end) {
24816b73cff2SLiam R. Howlett 			error = __split_vma(vmi, next, end, 0);
24821da177e4SLinus Torvalds 			if (error)
2483763ecb03SLiam R. Howlett 				goto end_split_failed;
248411f9a21aSLiam R. Howlett 		}
2485606c812eSLiam R. Howlett 		vma_start_write(next);
2486606c812eSLiam R. Howlett 		mas_set_range(&mas_detach, next->vm_start, next->vm_end - 1);
24876c26bd43SDavid Woodhouse 		error = mas_store_gfp(&mas_detach, next, GFP_KERNEL);
24886c26bd43SDavid Woodhouse 		if (error)
2489606c812eSLiam R. Howlett 			goto munmap_gather_failed;
2490606c812eSLiam R. Howlett 		vma_mark_detached(next, true);
2491606c812eSLiam R. Howlett 		if (next->vm_flags & VM_LOCKED)
2492606c812eSLiam R. Howlett 			locked_vm += vma_pages(next);
2493763ecb03SLiam R. Howlett 
2494763ecb03SLiam R. Howlett 		count++;
24952376dd7cSAndrea Arcangeli 		if (unlikely(uf)) {
24962376dd7cSAndrea Arcangeli 			/*
24972376dd7cSAndrea Arcangeli 			 * If userfaultfd_unmap_prep returns an error the vmas
2498f0953a1bSIngo Molnar 			 * will remain split, but userland will get a
24992376dd7cSAndrea Arcangeli 			 * highly unexpected error anyway. This is no
25002376dd7cSAndrea Arcangeli 			 * different than the case where the first of the two
25012376dd7cSAndrea Arcangeli 			 * __split_vma fails, but we don't undo the first
25022376dd7cSAndrea Arcangeli 			 * split, despite we could. This is unlikely enough
25032376dd7cSAndrea Arcangeli 			 * failure that it's not worth optimizing it for.
25042376dd7cSAndrea Arcangeli 			 */
250565ac1320SLiam R. Howlett 			error = userfaultfd_unmap_prep(next, start, end, uf);
250611f9a21aSLiam R. Howlett 
25072376dd7cSAndrea Arcangeli 			if (error)
2508d4af56c5SLiam R. Howlett 				goto userfaultfd_error;
25092376dd7cSAndrea Arcangeli 		}
2510ba470de4SRik van Riel #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
2511ba470de4SRik van Riel 		BUG_ON(next->vm_start < start);
2512ba470de4SRik van Riel 		BUG_ON(next->vm_start > end);
25131da177e4SLinus Torvalds #endif
25141da177e4SLinus Torvalds 	}
2515146425a3SHugh Dickins 
251615c0c60bSLiam R. Howlett 	if (vma_iter_end(vmi) > end)
251715c0c60bSLiam R. Howlett 		next = vma_iter_load(vmi);
251815c0c60bSLiam R. Howlett 
251915c0c60bSLiam R. Howlett 	if (!next)
25201da177e4SLinus Torvalds 		next = vma_next(vmi);
25212376dd7cSAndrea Arcangeli 
2522763ecb03SLiam R. Howlett #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
2523763ecb03SLiam R. Howlett 	/* Make sure no VMAs are about to be lost. */
2524763ecb03SLiam R. Howlett 	{
2525763ecb03SLiam R. Howlett 		MA_STATE(test, &mt_detach, start, end - 1);
2526763ecb03SLiam R. Howlett 		struct vm_area_struct *vma_mas, *vma_test;
2527763ecb03SLiam R. Howlett 		int test_count = 0;
2528763ecb03SLiam R. Howlett 
2529183654ceSLiam R. Howlett 		vma_iter_set(vmi, start);
2530763ecb03SLiam R. Howlett 		rcu_read_lock();
2531763ecb03SLiam R. Howlett 		vma_test = mas_find(&test, end - 1);
2532183654ceSLiam R. Howlett 		for_each_vma_range(*vmi, vma_mas, end) {
2533763ecb03SLiam R. Howlett 			BUG_ON(vma_mas != vma_test);
2534763ecb03SLiam R. Howlett 			test_count++;
2535763ecb03SLiam R. Howlett 			vma_test = mas_next(&test, end - 1);
2536763ecb03SLiam R. Howlett 		}
2537763ecb03SLiam R. Howlett 		rcu_read_unlock();
2538763ecb03SLiam R. Howlett 		BUG_ON(count != test_count);
2539763ecb03SLiam R. Howlett 	}
2540763ecb03SLiam R. Howlett #endif
2541183654ceSLiam R. Howlett 	vma_iter_set(vmi, start);
25426c26bd43SDavid Woodhouse 	error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL);
25436c26bd43SDavid Woodhouse 	if (error)
2544606c812eSLiam R. Howlett 		goto clear_tree_failed;
25450378c0a0SLiam R. Howlett 
25466c26bd43SDavid Woodhouse 	/* Point of no return */
2547606c812eSLiam R. Howlett 	mm->locked_vm -= locked_vm;
2548763ecb03SLiam R. Howlett 	mm->map_count -= count;
2549408579cdSLiam R. Howlett 	if (unlock)
2550d8ed45c5SMichel Lespinasse 		mmap_write_downgrade(mm);
2551dd2283f2SYang Shi 
255268f48381SSuren Baghdasaryan 	/*
255368f48381SSuren Baghdasaryan 	 * We can free page tables without write-locking mmap_lock because VMAs
255468f48381SSuren Baghdasaryan 	 * were isolated before we downgraded mmap_lock.
255568f48381SSuren Baghdasaryan 	 */
2556408579cdSLiam R. Howlett 	unmap_region(mm, &mt_detach, vma, prev, next, start, end, !unlock);
2557763ecb03SLiam R. Howlett 	/* Statistics and freeing VMAs */
2558763ecb03SLiam R. Howlett 	mas_set(&mas_detach, start);
2559763ecb03SLiam R. Howlett 	remove_mt(mm, &mas_detach);
2560763ecb03SLiam R. Howlett 	__mt_destroy(&mt_detach);
2561ae80b404SLinus Torvalds 	validate_mm(mm);
2562408579cdSLiam R. Howlett 	if (unlock)
2563408579cdSLiam R. Howlett 		mmap_read_unlock(mm);
25641da177e4SLinus Torvalds 
2565408579cdSLiam R. Howlett 	return 0;
2566d4af56c5SLiam R. Howlett 
2567606c812eSLiam R. Howlett clear_tree_failed:
2568d4af56c5SLiam R. Howlett userfaultfd_error:
2569606c812eSLiam R. Howlett munmap_gather_failed:
2570763ecb03SLiam R. Howlett end_split_failed:
2571606c812eSLiam R. Howlett 	mas_set(&mas_detach, 0);
2572606c812eSLiam R. Howlett 	mas_for_each(&mas_detach, next, end)
2573606c812eSLiam R. Howlett 		vma_mark_detached(next, false);
2574606c812eSLiam R. Howlett 
2575763ecb03SLiam R. Howlett 	__mt_destroy(&mt_detach);
2576763ecb03SLiam R. Howlett start_split_failed:
2577763ecb03SLiam R. Howlett map_count_exceeded:
2578b5641a5dSLinus Torvalds 	validate_mm(mm);
2579d4af56c5SLiam R. Howlett 	return error;
25801da177e4SLinus Torvalds }
25811da177e4SLinus Torvalds 
258211f9a21aSLiam R. Howlett /*
2583183654ceSLiam R. Howlett  * do_vmi_munmap() - munmap a given range.
2584183654ceSLiam R. Howlett  * @vmi: The vma iterator
258511f9a21aSLiam R. Howlett  * @mm: The mm_struct
258611f9a21aSLiam R. Howlett  * @start: The start address to munmap
258711f9a21aSLiam R. Howlett  * @len: The length of the range to munmap
258811f9a21aSLiam R. Howlett  * @uf: The userfaultfd list_head
2589408579cdSLiam R. Howlett  * @unlock: set to true if the user wants to drop the mmap_lock on success
259011f9a21aSLiam R. Howlett  *
259111f9a21aSLiam R. Howlett  * This function takes a @mas that is either pointing to the previous VMA or set
259211f9a21aSLiam R. Howlett  * to MA_START and sets it up to remove the mapping(s).  The @len will be
259311f9a21aSLiam R. Howlett  * aligned and any arch_unmap work will be preformed.
259411f9a21aSLiam R. Howlett  *
2595408579cdSLiam R. Howlett  * Return: 0 on success and drops the lock if so directed, error and leaves the
2596408579cdSLiam R. Howlett  * lock held otherwise.
259711f9a21aSLiam R. Howlett  */
2598183654ceSLiam R. Howlett int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
259911f9a21aSLiam R. Howlett 		  unsigned long start, size_t len, struct list_head *uf,
2600408579cdSLiam R. Howlett 		  bool unlock)
260111f9a21aSLiam R. Howlett {
260211f9a21aSLiam R. Howlett 	unsigned long end;
260311f9a21aSLiam R. Howlett 	struct vm_area_struct *vma;
260411f9a21aSLiam R. Howlett 
260511f9a21aSLiam R. Howlett 	if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start)
260611f9a21aSLiam R. Howlett 		return -EINVAL;
260711f9a21aSLiam R. Howlett 
260811f9a21aSLiam R. Howlett 	end = start + PAGE_ALIGN(len);
260911f9a21aSLiam R. Howlett 	if (end == start)
261011f9a21aSLiam R. Howlett 		return -EINVAL;
261111f9a21aSLiam R. Howlett 
261211f9a21aSLiam R. Howlett 	 /* arch_unmap() might do unmaps itself.  */
261311f9a21aSLiam R. Howlett 	arch_unmap(mm, start, end);
261411f9a21aSLiam R. Howlett 
261511f9a21aSLiam R. Howlett 	/* Find the first overlapping VMA */
2616183654ceSLiam R. Howlett 	vma = vma_find(vmi, end);
2617408579cdSLiam R. Howlett 	if (!vma) {
2618408579cdSLiam R. Howlett 		if (unlock)
2619408579cdSLiam R. Howlett 			mmap_write_unlock(mm);
262011f9a21aSLiam R. Howlett 		return 0;
2621408579cdSLiam R. Howlett 	}
262211f9a21aSLiam R. Howlett 
2623408579cdSLiam R. Howlett 	return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
262411f9a21aSLiam R. Howlett }
262511f9a21aSLiam R. Howlett 
262611f9a21aSLiam R. Howlett /* do_munmap() - Wrapper function for non-maple tree aware do_munmap() calls.
262711f9a21aSLiam R. Howlett  * @mm: The mm_struct
262811f9a21aSLiam R. Howlett  * @start: The start address to munmap
262911f9a21aSLiam R. Howlett  * @len: The length to be munmapped.
263011f9a21aSLiam R. Howlett  * @uf: The userfaultfd list_head
2631408579cdSLiam R. Howlett  *
2632408579cdSLiam R. Howlett  * Return: 0 on success, error otherwise.
263311f9a21aSLiam R. Howlett  */
2634dd2283f2SYang Shi int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
2635dd2283f2SYang Shi 	      struct list_head *uf)
2636dd2283f2SYang Shi {
2637183654ceSLiam R. Howlett 	VMA_ITERATOR(vmi, mm, start);
263811f9a21aSLiam R. Howlett 
2639183654ceSLiam R. Howlett 	return do_vmi_munmap(&vmi, mm, start, len, uf, false);
2640dd2283f2SYang Shi }
2641dd2283f2SYang Shi 
2642e99668a5SLiam R. Howlett unsigned long mmap_region(struct file *file, unsigned long addr,
2643e99668a5SLiam R. Howlett 		unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
2644e99668a5SLiam R. Howlett 		struct list_head *uf)
2645e99668a5SLiam R. Howlett {
2646e99668a5SLiam R. Howlett 	struct mm_struct *mm = current->mm;
2647e99668a5SLiam R. Howlett 	struct vm_area_struct *vma = NULL;
2648e99668a5SLiam R. Howlett 	struct vm_area_struct *next, *prev, *merge;
2649e99668a5SLiam R. Howlett 	pgoff_t pglen = len >> PAGE_SHIFT;
2650e99668a5SLiam R. Howlett 	unsigned long charged = 0;
2651e99668a5SLiam R. Howlett 	unsigned long end = addr + len;
2652e99668a5SLiam R. Howlett 	unsigned long merge_start = addr, merge_end = end;
2653e99668a5SLiam R. Howlett 	pgoff_t vm_pgoff;
2654e99668a5SLiam R. Howlett 	int error;
2655183654ceSLiam R. Howlett 	VMA_ITERATOR(vmi, mm, addr);
2656e99668a5SLiam R. Howlett 
2657e99668a5SLiam R. Howlett 	/* Check against address space limit. */
2658e99668a5SLiam R. Howlett 	if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) {
2659e99668a5SLiam R. Howlett 		unsigned long nr_pages;
2660e99668a5SLiam R. Howlett 
2661e99668a5SLiam R. Howlett 		/*
2662e99668a5SLiam R. Howlett 		 * MAP_FIXED may remove pages of mappings that intersects with
2663e99668a5SLiam R. Howlett 		 * requested mapping. Account for the pages it would unmap.
2664e99668a5SLiam R. Howlett 		 */
2665e99668a5SLiam R. Howlett 		nr_pages = count_vma_pages_range(mm, addr, end);
2666e99668a5SLiam R. Howlett 
2667e99668a5SLiam R. Howlett 		if (!may_expand_vm(mm, vm_flags,
2668e99668a5SLiam R. Howlett 					(len >> PAGE_SHIFT) - nr_pages))
2669e99668a5SLiam R. Howlett 			return -ENOMEM;
2670e99668a5SLiam R. Howlett 	}
2671e99668a5SLiam R. Howlett 
2672e99668a5SLiam R. Howlett 	/* Unmap any existing mapping in the area */
2673183654ceSLiam R. Howlett 	if (do_vmi_munmap(&vmi, mm, addr, len, uf, false))
2674e99668a5SLiam R. Howlett 		return -ENOMEM;
2675e99668a5SLiam R. Howlett 
2676e99668a5SLiam R. Howlett 	/*
2677e99668a5SLiam R. Howlett 	 * Private writable mapping: check memory availability
2678e99668a5SLiam R. Howlett 	 */
2679e99668a5SLiam R. Howlett 	if (accountable_mapping(file, vm_flags)) {
2680e99668a5SLiam R. Howlett 		charged = len >> PAGE_SHIFT;
2681e99668a5SLiam R. Howlett 		if (security_vm_enough_memory_mm(mm, charged))
2682e99668a5SLiam R. Howlett 			return -ENOMEM;
2683e99668a5SLiam R. Howlett 		vm_flags |= VM_ACCOUNT;
2684e99668a5SLiam R. Howlett 	}
2685e99668a5SLiam R. Howlett 
2686183654ceSLiam R. Howlett 	next = vma_next(&vmi);
2687183654ceSLiam R. Howlett 	prev = vma_prev(&vmi);
2688e99668a5SLiam R. Howlett 	if (vm_flags & VM_SPECIAL)
2689e99668a5SLiam R. Howlett 		goto cannot_expand;
2690e99668a5SLiam R. Howlett 
2691e99668a5SLiam R. Howlett 	/* Attempt to expand an old mapping */
2692e99668a5SLiam R. Howlett 	/* Check next */
2693e99668a5SLiam R. Howlett 	if (next && next->vm_start == end && !vma_policy(next) &&
2694e99668a5SLiam R. Howlett 	    can_vma_merge_before(next, vm_flags, NULL, file, pgoff+pglen,
2695e99668a5SLiam R. Howlett 				 NULL_VM_UFFD_CTX, NULL)) {
2696e99668a5SLiam R. Howlett 		merge_end = next->vm_end;
2697e99668a5SLiam R. Howlett 		vma = next;
2698e99668a5SLiam R. Howlett 		vm_pgoff = next->vm_pgoff - pglen;
2699e99668a5SLiam R. Howlett 	}
2700e99668a5SLiam R. Howlett 
2701e99668a5SLiam R. Howlett 	/* Check prev */
2702e99668a5SLiam R. Howlett 	if (prev && prev->vm_end == addr && !vma_policy(prev) &&
2703e99668a5SLiam R. Howlett 	    (vma ? can_vma_merge_after(prev, vm_flags, vma->anon_vma, file,
2704e99668a5SLiam R. Howlett 				       pgoff, vma->vm_userfaultfd_ctx, NULL) :
2705e99668a5SLiam R. Howlett 		   can_vma_merge_after(prev, vm_flags, NULL, file, pgoff,
2706e99668a5SLiam R. Howlett 				       NULL_VM_UFFD_CTX, NULL))) {
2707e99668a5SLiam R. Howlett 		merge_start = prev->vm_start;
2708e99668a5SLiam R. Howlett 		vma = prev;
2709e99668a5SLiam R. Howlett 		vm_pgoff = prev->vm_pgoff;
2710e99668a5SLiam R. Howlett 	}
2711e99668a5SLiam R. Howlett 
2712e99668a5SLiam R. Howlett 
2713e99668a5SLiam R. Howlett 	/* Actually expand, if possible */
2714e99668a5SLiam R. Howlett 	if (vma &&
27153c441ab7SLiam R. Howlett 	    !vma_expand(&vmi, vma, merge_start, merge_end, vm_pgoff, next)) {
2716e99668a5SLiam R. Howlett 		khugepaged_enter_vma(vma, vm_flags);
2717e99668a5SLiam R. Howlett 		goto expanded;
2718e99668a5SLiam R. Howlett 	}
2719e99668a5SLiam R. Howlett 
2720e99668a5SLiam R. Howlett cannot_expand:
27215c1c03deSLiam R. Howlett 	if (prev)
27225c1c03deSLiam R. Howlett 		vma_iter_next_range(&vmi);
27235c1c03deSLiam R. Howlett 
2724e99668a5SLiam R. Howlett 	/*
2725e99668a5SLiam R. Howlett 	 * Determine the object being mapped and call the appropriate
2726e99668a5SLiam R. Howlett 	 * specific mapper. the address has already been validated, but
2727e99668a5SLiam R. Howlett 	 * not unmapped, but the maps are removed from the list.
2728e99668a5SLiam R. Howlett 	 */
2729e99668a5SLiam R. Howlett 	vma = vm_area_alloc(mm);
2730e99668a5SLiam R. Howlett 	if (!vma) {
2731e99668a5SLiam R. Howlett 		error = -ENOMEM;
2732e99668a5SLiam R. Howlett 		goto unacct_error;
2733e99668a5SLiam R. Howlett 	}
2734e99668a5SLiam R. Howlett 
27350fd5a9e2SLiam R. Howlett 	vma_iter_set(&vmi, addr);
2736e99668a5SLiam R. Howlett 	vma->vm_start = addr;
2737e99668a5SLiam R. Howlett 	vma->vm_end = end;
27381c71222eSSuren Baghdasaryan 	vm_flags_init(vma, vm_flags);
2739e99668a5SLiam R. Howlett 	vma->vm_page_prot = vm_get_page_prot(vm_flags);
2740e99668a5SLiam R. Howlett 	vma->vm_pgoff = pgoff;
2741e99668a5SLiam R. Howlett 
2742e99668a5SLiam R. Howlett 	if (file) {
2743e99668a5SLiam R. Howlett 		if (vm_flags & VM_SHARED) {
2744e99668a5SLiam R. Howlett 			error = mapping_map_writable(file->f_mapping);
2745e99668a5SLiam R. Howlett 			if (error)
2746e99668a5SLiam R. Howlett 				goto free_vma;
2747e99668a5SLiam R. Howlett 		}
2748e99668a5SLiam R. Howlett 
2749e99668a5SLiam R. Howlett 		vma->vm_file = get_file(file);
2750e99668a5SLiam R. Howlett 		error = call_mmap(file, vma);
2751e99668a5SLiam R. Howlett 		if (error)
2752e99668a5SLiam R. Howlett 			goto unmap_and_free_vma;
2753e99668a5SLiam R. Howlett 
2754a57b7051SLiam Howlett 		/*
2755a57b7051SLiam Howlett 		 * Expansion is handled above, merging is handled below.
2756a57b7051SLiam Howlett 		 * Drivers should not alter the address of the VMA.
2757e99668a5SLiam R. Howlett 		 */
2758a57b7051SLiam Howlett 		error = -EINVAL;
2759cc8d1b09SLiam R. Howlett 		if (WARN_ON((addr != vma->vm_start)))
2760a57b7051SLiam Howlett 			goto close_and_free_vma;
2761e99668a5SLiam R. Howlett 
2762cc8d1b09SLiam R. Howlett 		vma_iter_set(&vmi, addr);
2763e99668a5SLiam R. Howlett 		/*
2764e99668a5SLiam R. Howlett 		 * If vm_flags changed after call_mmap(), we should try merge
2765e99668a5SLiam R. Howlett 		 * vma again as we may succeed this time.
2766e99668a5SLiam R. Howlett 		 */
2767e99668a5SLiam R. Howlett 		if (unlikely(vm_flags != vma->vm_flags && prev)) {
27689760ebffSLiam R. Howlett 			merge = vma_merge(&vmi, mm, prev, vma->vm_start,
27699760ebffSLiam R. Howlett 				    vma->vm_end, vma->vm_flags, NULL,
27709760ebffSLiam R. Howlett 				    vma->vm_file, vma->vm_pgoff, NULL,
27719760ebffSLiam R. Howlett 				    NULL_VM_UFFD_CTX, NULL);
2772e99668a5SLiam R. Howlett 			if (merge) {
2773e99668a5SLiam R. Howlett 				/*
2774e99668a5SLiam R. Howlett 				 * ->mmap() can change vma->vm_file and fput
2775e99668a5SLiam R. Howlett 				 * the original file. So fput the vma->vm_file
2776e99668a5SLiam R. Howlett 				 * here or we would add an extra fput for file
2777e99668a5SLiam R. Howlett 				 * and cause general protection fault
2778e99668a5SLiam R. Howlett 				 * ultimately.
2779e99668a5SLiam R. Howlett 				 */
2780e99668a5SLiam R. Howlett 				fput(vma->vm_file);
2781e99668a5SLiam R. Howlett 				vm_area_free(vma);
2782e99668a5SLiam R. Howlett 				vma = merge;
2783e99668a5SLiam R. Howlett 				/* Update vm_flags to pick up the change. */
2784e99668a5SLiam R. Howlett 				vm_flags = vma->vm_flags;
2785e99668a5SLiam R. Howlett 				goto unmap_writable;
2786e99668a5SLiam R. Howlett 			}
2787e99668a5SLiam R. Howlett 		}
2788e99668a5SLiam R. Howlett 
2789e99668a5SLiam R. Howlett 		vm_flags = vma->vm_flags;
2790e99668a5SLiam R. Howlett 	} else if (vm_flags & VM_SHARED) {
2791e99668a5SLiam R. Howlett 		error = shmem_zero_setup(vma);
2792e99668a5SLiam R. Howlett 		if (error)
2793e99668a5SLiam R. Howlett 			goto free_vma;
2794e99668a5SLiam R. Howlett 	} else {
2795e99668a5SLiam R. Howlett 		vma_set_anonymous(vma);
2796e99668a5SLiam R. Howlett 	}
2797e99668a5SLiam R. Howlett 
2798b507808eSJoey Gouly 	if (map_deny_write_exec(vma, vma->vm_flags)) {
2799b507808eSJoey Gouly 		error = -EACCES;
2800b507808eSJoey Gouly 		goto close_and_free_vma;
2801b507808eSJoey Gouly 	}
2802b507808eSJoey Gouly 
2803e99668a5SLiam R. Howlett 	/* Allow architectures to sanity-check the vm_flags */
2804e99668a5SLiam R. Howlett 	error = -EINVAL;
2805cc8d1b09SLiam R. Howlett 	if (!arch_validate_flags(vma->vm_flags))
2806deb0f656SCarlos Llamas 		goto close_and_free_vma;
2807e99668a5SLiam R. Howlett 
2808e99668a5SLiam R. Howlett 	error = -ENOMEM;
2809cc8d1b09SLiam R. Howlett 	if (vma_iter_prealloc(&vmi))
28105789151eSMike Kravetz 		goto close_and_free_vma;
2811e99668a5SLiam R. Howlett 
28121c7873e3SHugh Dickins 	/* Lock the VMA since it is modified after insertion into VMA tree */
28131c7873e3SHugh Dickins 	vma_start_write(vma);
2814e99668a5SLiam R. Howlett 	if (vma->vm_file)
2815e99668a5SLiam R. Howlett 		i_mmap_lock_write(vma->vm_file->f_mapping);
2816e99668a5SLiam R. Howlett 
2817183654ceSLiam R. Howlett 	vma_iter_store(&vmi, vma);
2818e99668a5SLiam R. Howlett 	mm->map_count++;
2819e99668a5SLiam R. Howlett 	if (vma->vm_file) {
2820e99668a5SLiam R. Howlett 		if (vma->vm_flags & VM_SHARED)
2821e99668a5SLiam R. Howlett 			mapping_allow_writable(vma->vm_file->f_mapping);
2822e99668a5SLiam R. Howlett 
2823e99668a5SLiam R. Howlett 		flush_dcache_mmap_lock(vma->vm_file->f_mapping);
2824e99668a5SLiam R. Howlett 		vma_interval_tree_insert(vma, &vma->vm_file->f_mapping->i_mmap);
2825e99668a5SLiam R. Howlett 		flush_dcache_mmap_unlock(vma->vm_file->f_mapping);
2826e99668a5SLiam R. Howlett 		i_mmap_unlock_write(vma->vm_file->f_mapping);
2827e99668a5SLiam R. Howlett 	}
2828e99668a5SLiam R. Howlett 
2829e99668a5SLiam R. Howlett 	/*
2830e99668a5SLiam R. Howlett 	 * vma_merge() calls khugepaged_enter_vma() either, the below
2831e99668a5SLiam R. Howlett 	 * call covers the non-merge case.
2832e99668a5SLiam R. Howlett 	 */
2833e99668a5SLiam R. Howlett 	khugepaged_enter_vma(vma, vma->vm_flags);
2834e99668a5SLiam R. Howlett 
2835e99668a5SLiam R. Howlett 	/* Once vma denies write, undo our temporary denial count */
2836e99668a5SLiam R. Howlett unmap_writable:
2837e99668a5SLiam R. Howlett 	if (file && vm_flags & VM_SHARED)
2838e99668a5SLiam R. Howlett 		mapping_unmap_writable(file->f_mapping);
2839e99668a5SLiam R. Howlett 	file = vma->vm_file;
2840d7597f59SStefan Roesch 	ksm_add_vma(vma);
2841e99668a5SLiam R. Howlett expanded:
2842e99668a5SLiam R. Howlett 	perf_event_mmap(vma);
2843e99668a5SLiam R. Howlett 
2844e99668a5SLiam R. Howlett 	vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT);
2845e99668a5SLiam R. Howlett 	if (vm_flags & VM_LOCKED) {
2846e99668a5SLiam R. Howlett 		if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
2847e99668a5SLiam R. Howlett 					is_vm_hugetlb_page(vma) ||
2848e99668a5SLiam R. Howlett 					vma == get_gate_vma(current->mm))
2849e430a95aSSuren Baghdasaryan 			vm_flags_clear(vma, VM_LOCKED_MASK);
2850e99668a5SLiam R. Howlett 		else
2851e99668a5SLiam R. Howlett 			mm->locked_vm += (len >> PAGE_SHIFT);
2852e99668a5SLiam R. Howlett 	}
2853e99668a5SLiam R. Howlett 
2854e99668a5SLiam R. Howlett 	if (file)
2855e99668a5SLiam R. Howlett 		uprobe_mmap(vma);
2856e99668a5SLiam R. Howlett 
2857e99668a5SLiam R. Howlett 	/*
2858e99668a5SLiam R. Howlett 	 * New (or expanded) vma always get soft dirty status.
2859e99668a5SLiam R. Howlett 	 * Otherwise user-space soft-dirty page tracker won't
2860e99668a5SLiam R. Howlett 	 * be able to distinguish situation when vma area unmapped,
2861e99668a5SLiam R. Howlett 	 * then new mapped in-place (which must be aimed as
2862e99668a5SLiam R. Howlett 	 * a completely new data area).
2863e99668a5SLiam R. Howlett 	 */
28641c71222eSSuren Baghdasaryan 	vm_flags_set(vma, VM_SOFTDIRTY);
2865e99668a5SLiam R. Howlett 
2866e99668a5SLiam R. Howlett 	vma_set_page_prot(vma);
2867e99668a5SLiam R. Howlett 
2868e99668a5SLiam R. Howlett 	validate_mm(mm);
2869e99668a5SLiam R. Howlett 	return addr;
2870e99668a5SLiam R. Howlett 
2871deb0f656SCarlos Llamas close_and_free_vma:
2872cc8d1b09SLiam R. Howlett 	if (file && vma->vm_ops && vma->vm_ops->close)
2873deb0f656SCarlos Llamas 		vma->vm_ops->close(vma);
2874cc8d1b09SLiam R. Howlett 
2875cc8d1b09SLiam R. Howlett 	if (file || vma->vm_file) {
2876e99668a5SLiam R. Howlett unmap_and_free_vma:
2877e99668a5SLiam R. Howlett 		fput(vma->vm_file);
2878e99668a5SLiam R. Howlett 		vma->vm_file = NULL;
2879e99668a5SLiam R. Howlett 
2880e99668a5SLiam R. Howlett 		/* Undo any partial mapping done by a device driver. */
2881cc8d1b09SLiam R. Howlett 		unmap_region(mm, &mm->mm_mt, vma, prev, next, vma->vm_start,
288268f48381SSuren Baghdasaryan 			     vma->vm_end, true);
2883cc8d1b09SLiam R. Howlett 	}
2884cc674ab3SLi Zetao 	if (file && (vm_flags & VM_SHARED))
2885e99668a5SLiam R. Howlett 		mapping_unmap_writable(file->f_mapping);
2886e99668a5SLiam R. Howlett free_vma:
2887e99668a5SLiam R. Howlett 	vm_area_free(vma);
2888e99668a5SLiam R. Howlett unacct_error:
2889e99668a5SLiam R. Howlett 	if (charged)
2890e99668a5SLiam R. Howlett 		vm_unacct_memory(charged);
2891e99668a5SLiam R. Howlett 	validate_mm(mm);
2892e99668a5SLiam R. Howlett 	return error;
2893e99668a5SLiam R. Howlett }
2894e99668a5SLiam R. Howlett 
2895408579cdSLiam R. Howlett static int __vm_munmap(unsigned long start, size_t len, bool unlock)
2896a46ef99dSLinus Torvalds {
2897a46ef99dSLinus Torvalds 	int ret;
2898bfce281cSAl Viro 	struct mm_struct *mm = current->mm;
2899897ab3e0SMike Rapoport 	LIST_HEAD(uf);
2900183654ceSLiam R. Howlett 	VMA_ITERATOR(vmi, mm, start);
2901a46ef99dSLinus Torvalds 
2902d8ed45c5SMichel Lespinasse 	if (mmap_write_lock_killable(mm))
2903ae798783SMichal Hocko 		return -EINTR;
2904ae798783SMichal Hocko 
2905408579cdSLiam R. Howlett 	ret = do_vmi_munmap(&vmi, mm, start, len, &uf, unlock);
2906408579cdSLiam R. Howlett 	if (ret || !unlock)
2907d8ed45c5SMichel Lespinasse 		mmap_write_unlock(mm);
2908dd2283f2SYang Shi 
2909897ab3e0SMike Rapoport 	userfaultfd_unmap_complete(mm, &uf);
2910a46ef99dSLinus Torvalds 	return ret;
2911a46ef99dSLinus Torvalds }
2912dd2283f2SYang Shi 
2913dd2283f2SYang Shi int vm_munmap(unsigned long start, size_t len)
2914dd2283f2SYang Shi {
2915dd2283f2SYang Shi 	return __vm_munmap(start, len, false);
2916dd2283f2SYang Shi }
2917a46ef99dSLinus Torvalds EXPORT_SYMBOL(vm_munmap);
2918a46ef99dSLinus Torvalds 
29196a6160a7SHeiko Carstens SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
29201da177e4SLinus Torvalds {
2921ce18d171SCatalin Marinas 	addr = untagged_addr(addr);
2922dd2283f2SYang Shi 	return __vm_munmap(addr, len, true);
29231da177e4SLinus Torvalds }
29241da177e4SLinus Torvalds 
2925c8d78c18SKirill A. Shutemov 
2926c8d78c18SKirill A. Shutemov /*
2927c8d78c18SKirill A. Shutemov  * Emulation of deprecated remap_file_pages() syscall.
2928c8d78c18SKirill A. Shutemov  */
2929c8d78c18SKirill A. Shutemov SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
2930c8d78c18SKirill A. Shutemov 		unsigned long, prot, unsigned long, pgoff, unsigned long, flags)
2931c8d78c18SKirill A. Shutemov {
2932c8d78c18SKirill A. Shutemov 
2933c8d78c18SKirill A. Shutemov 	struct mm_struct *mm = current->mm;
2934c8d78c18SKirill A. Shutemov 	struct vm_area_struct *vma;
2935c8d78c18SKirill A. Shutemov 	unsigned long populate = 0;
2936c8d78c18SKirill A. Shutemov 	unsigned long ret = -EINVAL;
2937c8d78c18SKirill A. Shutemov 	struct file *file;
2938c8d78c18SKirill A. Shutemov 
2939ee65728eSMike Rapoport 	pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/mm/remap_file_pages.rst.\n",
2940c8d78c18SKirill A. Shutemov 		     current->comm, current->pid);
2941c8d78c18SKirill A. Shutemov 
2942c8d78c18SKirill A. Shutemov 	if (prot)
2943c8d78c18SKirill A. Shutemov 		return ret;
2944c8d78c18SKirill A. Shutemov 	start = start & PAGE_MASK;
2945c8d78c18SKirill A. Shutemov 	size = size & PAGE_MASK;
2946c8d78c18SKirill A. Shutemov 
2947c8d78c18SKirill A. Shutemov 	if (start + size <= start)
2948c8d78c18SKirill A. Shutemov 		return ret;
2949c8d78c18SKirill A. Shutemov 
2950c8d78c18SKirill A. Shutemov 	/* Does pgoff wrap? */
2951c8d78c18SKirill A. Shutemov 	if (pgoff + (size >> PAGE_SHIFT) < pgoff)
2952c8d78c18SKirill A. Shutemov 		return ret;
2953c8d78c18SKirill A. Shutemov 
2954d8ed45c5SMichel Lespinasse 	if (mmap_write_lock_killable(mm))
2955dc0ef0dfSMichal Hocko 		return -EINTR;
2956dc0ef0dfSMichal Hocko 
29579b593cb2SLiam R. Howlett 	vma = vma_lookup(mm, start);
2958c8d78c18SKirill A. Shutemov 
2959c8d78c18SKirill A. Shutemov 	if (!vma || !(vma->vm_flags & VM_SHARED))
2960c8d78c18SKirill A. Shutemov 		goto out;
2961c8d78c18SKirill A. Shutemov 
296248f7df32SKirill A. Shutemov 	if (start + size > vma->vm_end) {
2963763ecb03SLiam R. Howlett 		VMA_ITERATOR(vmi, mm, vma->vm_end);
2964763ecb03SLiam R. Howlett 		struct vm_area_struct *next, *prev = vma;
296548f7df32SKirill A. Shutemov 
2966763ecb03SLiam R. Howlett 		for_each_vma_range(vmi, next, start + size) {
296748f7df32SKirill A. Shutemov 			/* hole between vmas ? */
2968763ecb03SLiam R. Howlett 			if (next->vm_start != prev->vm_end)
296948f7df32SKirill A. Shutemov 				goto out;
297048f7df32SKirill A. Shutemov 
297148f7df32SKirill A. Shutemov 			if (next->vm_file != vma->vm_file)
297248f7df32SKirill A. Shutemov 				goto out;
297348f7df32SKirill A. Shutemov 
297448f7df32SKirill A. Shutemov 			if (next->vm_flags != vma->vm_flags)
297548f7df32SKirill A. Shutemov 				goto out;
297648f7df32SKirill A. Shutemov 
29771db43d3fSLiam Howlett 			if (start + size <= next->vm_end)
29781db43d3fSLiam Howlett 				break;
29791db43d3fSLiam Howlett 
2980763ecb03SLiam R. Howlett 			prev = next;
298148f7df32SKirill A. Shutemov 		}
298248f7df32SKirill A. Shutemov 
298348f7df32SKirill A. Shutemov 		if (!next)
2984c8d78c18SKirill A. Shutemov 			goto out;
2985c8d78c18SKirill A. Shutemov 	}
2986c8d78c18SKirill A. Shutemov 
2987c8d78c18SKirill A. Shutemov 	prot |= vma->vm_flags & VM_READ ? PROT_READ : 0;
2988c8d78c18SKirill A. Shutemov 	prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0;
2989c8d78c18SKirill A. Shutemov 	prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0;
2990c8d78c18SKirill A. Shutemov 
2991c8d78c18SKirill A. Shutemov 	flags &= MAP_NONBLOCK;
2992c8d78c18SKirill A. Shutemov 	flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE;
2993fce000b1SLiam Howlett 	if (vma->vm_flags & VM_LOCKED)
2994c8d78c18SKirill A. Shutemov 		flags |= MAP_LOCKED;
299548f7df32SKirill A. Shutemov 
2996c8d78c18SKirill A. Shutemov 	file = get_file(vma->vm_file);
299745e55300SPeter Collingbourne 	ret = do_mmap(vma->vm_file, start, size,
2998*592b5fadSYu-cheng Yu 			prot, flags, 0, pgoff, &populate, NULL);
2999c8d78c18SKirill A. Shutemov 	fput(file);
3000c8d78c18SKirill A. Shutemov out:
3001d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
3002c8d78c18SKirill A. Shutemov 	if (populate)
3003c8d78c18SKirill A. Shutemov 		mm_populate(ret, populate);
3004c8d78c18SKirill A. Shutemov 	if (!IS_ERR_VALUE(ret))
3005c8d78c18SKirill A. Shutemov 		ret = 0;
3006c8d78c18SKirill A. Shutemov 	return ret;
3007c8d78c18SKirill A. Shutemov }
3008c8d78c18SKirill A. Shutemov 
30091da177e4SLinus Torvalds /*
301027b26701SLiam R. Howlett  * do_vma_munmap() - Unmap a full or partial vma.
301127b26701SLiam R. Howlett  * @vmi: The vma iterator pointing at the vma
301227b26701SLiam R. Howlett  * @vma: The first vma to be munmapped
301327b26701SLiam R. Howlett  * @start: the start of the address to unmap
301427b26701SLiam R. Howlett  * @end: The end of the address to unmap
30152e7ce7d3SLiam R. Howlett  * @uf: The userfaultfd list_head
3016408579cdSLiam R. Howlett  * @unlock: Drop the lock on success
30172e7ce7d3SLiam R. Howlett  *
301827b26701SLiam R. Howlett  * unmaps a VMA mapping when the vma iterator is already in position.
301927b26701SLiam R. Howlett  * Does not handle alignment.
3020408579cdSLiam R. Howlett  *
3021408579cdSLiam R. Howlett  * Return: 0 on success drops the lock of so directed, error on failure and will
3022408579cdSLiam R. Howlett  * still hold the lock.
30231da177e4SLinus Torvalds  */
302427b26701SLiam R. Howlett int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
3025408579cdSLiam R. Howlett 		unsigned long start, unsigned long end, struct list_head *uf,
3026408579cdSLiam R. Howlett 		bool unlock)
30272e7ce7d3SLiam R. Howlett {
30282e7ce7d3SLiam R. Howlett 	struct mm_struct *mm = vma->vm_mm;
30292e7ce7d3SLiam R. Howlett 
303027b26701SLiam R. Howlett 	arch_unmap(mm, start, end);
3031b5641a5dSLinus Torvalds 	return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
30322e7ce7d3SLiam R. Howlett }
30332e7ce7d3SLiam R. Howlett 
30342e7ce7d3SLiam R. Howlett /*
30352e7ce7d3SLiam R. Howlett  * do_brk_flags() - Increase the brk vma if the flags match.
303692fed820SLiam R. Howlett  * @vmi: The vma iterator
30372e7ce7d3SLiam R. Howlett  * @addr: The start address
30382e7ce7d3SLiam R. Howlett  * @len: The length of the increase
30392e7ce7d3SLiam R. Howlett  * @vma: The vma,
30402e7ce7d3SLiam R. Howlett  * @flags: The VMA Flags
30412e7ce7d3SLiam R. Howlett  *
30422e7ce7d3SLiam R. Howlett  * Extend the brk VMA from addr to addr + len.  If the VMA is NULL or the flags
30432e7ce7d3SLiam R. Howlett  * do not match then create a new anonymous VMA.  Eventually we may be able to
30442e7ce7d3SLiam R. Howlett  * do some brk-specific accounting here.
30452e7ce7d3SLiam R. Howlett  */
304692fed820SLiam R. Howlett static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
3047763ecb03SLiam R. Howlett 		unsigned long addr, unsigned long len, unsigned long flags)
30481da177e4SLinus Torvalds {
30491da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
3050287051b1SLiam R. Howlett 	struct vma_prepare vp;
30512e7ce7d3SLiam R. Howlett 
3052b50e195fSLiam R. Howlett 	validate_mm(mm);
30532e7ce7d3SLiam R. Howlett 	/*
30542e7ce7d3SLiam R. Howlett 	 * Check against address space limits by the changed size
30552e7ce7d3SLiam R. Howlett 	 * Note: This happens *after* clearing old mappings in some code paths.
30562e7ce7d3SLiam R. Howlett 	 */
305716e72e9bSDenys Vlasenko 	flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
305884638335SKonstantin Khlebnikov 	if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT))
30591da177e4SLinus Torvalds 		return -ENOMEM;
30601da177e4SLinus Torvalds 
30611da177e4SLinus Torvalds 	if (mm->map_count > sysctl_max_map_count)
30621da177e4SLinus Torvalds 		return -ENOMEM;
30631da177e4SLinus Torvalds 
3064191c5424SAl Viro 	if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
30651da177e4SLinus Torvalds 		return -ENOMEM;
30661da177e4SLinus Torvalds 
30671da177e4SLinus Torvalds 	/*
30682e7ce7d3SLiam R. Howlett 	 * Expand the existing vma if possible; Note that singular lists do not
30692e7ce7d3SLiam R. Howlett 	 * occur after forking, so the expand will only happen on new VMAs.
30701da177e4SLinus Torvalds 	 */
30716c28ca64SLiam Howlett 	if (vma && vma->vm_end == addr && !vma_policy(vma) &&
30726c28ca64SLiam Howlett 	    can_vma_merge_after(vma, flags, NULL, NULL,
30736c28ca64SLiam Howlett 				addr >> PAGE_SHIFT, NULL_VM_UFFD_CTX, NULL)) {
307492fed820SLiam R. Howlett 		if (vma_iter_prealloc(vmi))
3075675eaca1SAlistair Popple 			goto unacct_fail;
307628c5609fSLiam Howlett 
3077287051b1SLiam R. Howlett 		init_vma_prep(&vp, vma);
3078287051b1SLiam R. Howlett 		vma_prepare(&vp);
3079ccf1d78dSSuren Baghdasaryan 		vma_adjust_trans_huge(vma, vma->vm_start, addr + len, 0);
30802e7ce7d3SLiam R. Howlett 		vma->vm_end = addr + len;
30811c71222eSSuren Baghdasaryan 		vm_flags_set(vma, VM_SOFTDIRTY);
308292fed820SLiam R. Howlett 		vma_iter_store(vmi, vma);
30832e7ce7d3SLiam R. Howlett 
3084287051b1SLiam R. Howlett 		vma_complete(&vp, vmi, mm);
30852e7ce7d3SLiam R. Howlett 		khugepaged_enter_vma(vma, flags);
30862e7ce7d3SLiam R. Howlett 		goto out;
30872e7ce7d3SLiam R. Howlett 	}
30882e7ce7d3SLiam R. Howlett 
30892e7ce7d3SLiam R. Howlett 	/* create a vma struct for an anonymous mapping */
30902e7ce7d3SLiam R. Howlett 	vma = vm_area_alloc(mm);
30912e7ce7d3SLiam R. Howlett 	if (!vma)
3092675eaca1SAlistair Popple 		goto unacct_fail;
30931da177e4SLinus Torvalds 
3094bfd40eafSKirill A. Shutemov 	vma_set_anonymous(vma);
30951da177e4SLinus Torvalds 	vma->vm_start = addr;
30961da177e4SLinus Torvalds 	vma->vm_end = addr + len;
30972e7ce7d3SLiam R. Howlett 	vma->vm_pgoff = addr >> PAGE_SHIFT;
30981c71222eSSuren Baghdasaryan 	vm_flags_init(vma, flags);
30993ed75eb8SColy Li 	vma->vm_page_prot = vm_get_page_prot(flags);
310092fed820SLiam R. Howlett 	if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL))
31012e7ce7d3SLiam R. Howlett 		goto mas_store_fail;
3102d4af56c5SLiam R. Howlett 
31032e7ce7d3SLiam R. Howlett 	mm->map_count++;
3104d7597f59SStefan Roesch 	ksm_add_vma(vma);
31051da177e4SLinus Torvalds out:
31063af9e859SEric B Munson 	perf_event_mmap(vma);
31071da177e4SLinus Torvalds 	mm->total_vm += len >> PAGE_SHIFT;
310884638335SKonstantin Khlebnikov 	mm->data_vm += len >> PAGE_SHIFT;
3109128557ffSMichel Lespinasse 	if (flags & VM_LOCKED)
3110ba470de4SRik van Riel 		mm->locked_vm += (len >> PAGE_SHIFT);
31111c71222eSSuren Baghdasaryan 	vm_flags_set(vma, VM_SOFTDIRTY);
3112763ecb03SLiam R. Howlett 	validate_mm(mm);
31135d22fc25SLinus Torvalds 	return 0;
3114d4af56c5SLiam R. Howlett 
31152e7ce7d3SLiam R. Howlett mas_store_fail:
3116d4af56c5SLiam R. Howlett 	vm_area_free(vma);
3117675eaca1SAlistair Popple unacct_fail:
31182e7ce7d3SLiam R. Howlett 	vm_unacct_memory(len >> PAGE_SHIFT);
31192e7ce7d3SLiam R. Howlett 	return -ENOMEM;
31201da177e4SLinus Torvalds }
31211da177e4SLinus Torvalds 
3122bb177a73SMichal Hocko int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
3123e4eb1ff6SLinus Torvalds {
3124e4eb1ff6SLinus Torvalds 	struct mm_struct *mm = current->mm;
31252e7ce7d3SLiam R. Howlett 	struct vm_area_struct *vma = NULL;
3126bb177a73SMichal Hocko 	unsigned long len;
31275d22fc25SLinus Torvalds 	int ret;
3128128557ffSMichel Lespinasse 	bool populate;
3129897ab3e0SMike Rapoport 	LIST_HEAD(uf);
313092fed820SLiam R. Howlett 	VMA_ITERATOR(vmi, mm, addr);
3131e4eb1ff6SLinus Torvalds 
3132bb177a73SMichal Hocko 	len = PAGE_ALIGN(request);
3133bb177a73SMichal Hocko 	if (len < request)
3134bb177a73SMichal Hocko 		return -ENOMEM;
3135bb177a73SMichal Hocko 	if (!len)
3136bb177a73SMichal Hocko 		return 0;
3137bb177a73SMichal Hocko 
3138d8ed45c5SMichel Lespinasse 	if (mmap_write_lock_killable(mm))
31392d6c9282SMichal Hocko 		return -EINTR;
31402d6c9282SMichal Hocko 
31412e7ce7d3SLiam R. Howlett 	/* Until we need other flags, refuse anything except VM_EXEC. */
31422e7ce7d3SLiam R. Howlett 	if ((flags & (~VM_EXEC)) != 0)
31432e7ce7d3SLiam R. Howlett 		return -EINVAL;
31442e7ce7d3SLiam R. Howlett 
31452e7ce7d3SLiam R. Howlett 	ret = check_brk_limits(addr, len);
31462e7ce7d3SLiam R. Howlett 	if (ret)
31472e7ce7d3SLiam R. Howlett 		goto limits_failed;
31482e7ce7d3SLiam R. Howlett 
3149183654ceSLiam R. Howlett 	ret = do_vmi_munmap(&vmi, mm, addr, len, &uf, 0);
31502e7ce7d3SLiam R. Howlett 	if (ret)
31512e7ce7d3SLiam R. Howlett 		goto munmap_failed;
31522e7ce7d3SLiam R. Howlett 
315392fed820SLiam R. Howlett 	vma = vma_prev(&vmi);
315492fed820SLiam R. Howlett 	ret = do_brk_flags(&vmi, vma, addr, len, flags);
3155128557ffSMichel Lespinasse 	populate = ((mm->def_flags & VM_LOCKED) != 0);
3156d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
3157897ab3e0SMike Rapoport 	userfaultfd_unmap_complete(mm, &uf);
31585d22fc25SLinus Torvalds 	if (populate && !ret)
3159128557ffSMichel Lespinasse 		mm_populate(addr, len);
3160e4eb1ff6SLinus Torvalds 	return ret;
31612e7ce7d3SLiam R. Howlett 
31622e7ce7d3SLiam R. Howlett munmap_failed:
31632e7ce7d3SLiam R. Howlett limits_failed:
31642e7ce7d3SLiam R. Howlett 	mmap_write_unlock(mm);
31652e7ce7d3SLiam R. Howlett 	return ret;
3166e4eb1ff6SLinus Torvalds }
316716e72e9bSDenys Vlasenko EXPORT_SYMBOL(vm_brk_flags);
316816e72e9bSDenys Vlasenko 
316916e72e9bSDenys Vlasenko int vm_brk(unsigned long addr, unsigned long len)
317016e72e9bSDenys Vlasenko {
317116e72e9bSDenys Vlasenko 	return vm_brk_flags(addr, len, 0);
317216e72e9bSDenys Vlasenko }
3173e4eb1ff6SLinus Torvalds EXPORT_SYMBOL(vm_brk);
31741da177e4SLinus Torvalds 
31751da177e4SLinus Torvalds /* Release all mmaps. */
31761da177e4SLinus Torvalds void exit_mmap(struct mm_struct *mm)
31771da177e4SLinus Torvalds {
3178d16dfc55SPeter Zijlstra 	struct mmu_gather tlb;
3179ba470de4SRik van Riel 	struct vm_area_struct *vma;
31801da177e4SLinus Torvalds 	unsigned long nr_accounted = 0;
3181763ecb03SLiam R. Howlett 	MA_STATE(mas, &mm->mm_mt, 0, 0);
3182763ecb03SLiam R. Howlett 	int count = 0;
31831da177e4SLinus Torvalds 
3184d6dd61c8SJeremy Fitzhardinge 	/* mm's last user has gone, and its about to be pulled down */
3185cddb8a5cSAndrea Arcangeli 	mmu_notifier_release(mm);
3186d6dd61c8SJeremy Fitzhardinge 
3187bf3980c8SSuren Baghdasaryan 	mmap_read_lock(mm);
31889480c53eSJeremy Fitzhardinge 	arch_exit_mmap(mm);
31899480c53eSJeremy Fitzhardinge 
3190763ecb03SLiam R. Howlett 	vma = mas_find(&mas, ULONG_MAX);
319164591e86SSuren Baghdasaryan 	if (!vma) {
319264591e86SSuren Baghdasaryan 		/* Can happen if dup_mmap() received an OOM */
3193bf3980c8SSuren Baghdasaryan 		mmap_read_unlock(mm);
31949480c53eSJeremy Fitzhardinge 		return;
319564591e86SSuren Baghdasaryan 	}
31969480c53eSJeremy Fitzhardinge 
31971da177e4SLinus Torvalds 	lru_add_drain();
31981da177e4SLinus Torvalds 	flush_cache_mm(mm);
3199d8b45053SWill Deacon 	tlb_gather_mmu_fullmm(&tlb, mm);
3200901608d9SOleg Nesterov 	/* update_hiwater_rss(mm) here? but nobody should be looking */
3201763ecb03SLiam R. Howlett 	/* Use ULONG_MAX here to ensure all VMAs in the mm are unmapped */
320268f48381SSuren Baghdasaryan 	unmap_vmas(&tlb, &mm->mm_mt, vma, 0, ULONG_MAX, false);
3203bf3980c8SSuren Baghdasaryan 	mmap_read_unlock(mm);
3204bf3980c8SSuren Baghdasaryan 
3205bf3980c8SSuren Baghdasaryan 	/*
3206bf3980c8SSuren Baghdasaryan 	 * Set MMF_OOM_SKIP to hide this task from the oom killer/reaper
3207b3541d91SSuren Baghdasaryan 	 * because the memory has been already freed.
3208bf3980c8SSuren Baghdasaryan 	 */
3209bf3980c8SSuren Baghdasaryan 	set_bit(MMF_OOM_SKIP, &mm->flags);
3210bf3980c8SSuren Baghdasaryan 	mmap_write_lock(mm);
32113dd44325SLiam R. Howlett 	mt_clear_in_rcu(&mm->mm_mt);
3212763ecb03SLiam R. Howlett 	free_pgtables(&tlb, &mm->mm_mt, vma, FIRST_USER_ADDRESS,
321398e51a22SSuren Baghdasaryan 		      USER_PGTABLES_CEILING, true);
3214ae8eba8bSWill Deacon 	tlb_finish_mmu(&tlb);
32151da177e4SLinus Torvalds 
3216763ecb03SLiam R. Howlett 	/*
3217763ecb03SLiam R. Howlett 	 * Walk the list again, actually closing and freeing it, with preemption
3218763ecb03SLiam R. Howlett 	 * enabled, without holding any MM locks besides the unreachable
3219763ecb03SLiam R. Howlett 	 * mmap_write_lock.
3220763ecb03SLiam R. Howlett 	 */
3221763ecb03SLiam R. Howlett 	do {
32224f74d2c8SLinus Torvalds 		if (vma->vm_flags & VM_ACCOUNT)
32234f74d2c8SLinus Torvalds 			nr_accounted += vma_pages(vma);
32240d2ebf9cSSuren Baghdasaryan 		remove_vma(vma, true);
3225763ecb03SLiam R. Howlett 		count++;
32260a3b3c25SPaul E. McKenney 		cond_resched();
3227763ecb03SLiam R. Howlett 	} while ((vma = mas_find(&mas, ULONG_MAX)) != NULL);
3228763ecb03SLiam R. Howlett 
3229763ecb03SLiam R. Howlett 	BUG_ON(count != mm->map_count);
3230d4af56c5SLiam R. Howlett 
3231d4af56c5SLiam R. Howlett 	trace_exit_mmap(mm);
3232d4af56c5SLiam R. Howlett 	__mt_destroy(&mm->mm_mt);
323364591e86SSuren Baghdasaryan 	mmap_write_unlock(mm);
32344f74d2c8SLinus Torvalds 	vm_unacct_memory(nr_accounted);
32351da177e4SLinus Torvalds }
32361da177e4SLinus Torvalds 
32371da177e4SLinus Torvalds /* Insert vm structure into process list sorted by address
32381da177e4SLinus Torvalds  * and into the inode's i_mmap tree.  If vm_file is non-NULL
3239c8c06efaSDavidlohr Bueso  * then i_mmap_rwsem is taken here.
32401da177e4SLinus Torvalds  */
32411da177e4SLinus Torvalds int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
32421da177e4SLinus Torvalds {
3243d4af56c5SLiam R. Howlett 	unsigned long charged = vma_pages(vma);
32441da177e4SLinus Torvalds 
3245d4af56c5SLiam R. Howlett 
3246d0601a50SLiam R. Howlett 	if (find_vma_intersection(mm, vma->vm_start, vma->vm_end))
3247c9d13f5fSChen Gang 		return -ENOMEM;
3248d4af56c5SLiam R. Howlett 
3249c9d13f5fSChen Gang 	if ((vma->vm_flags & VM_ACCOUNT) &&
3250d4af56c5SLiam R. Howlett 	     security_vm_enough_memory_mm(mm, charged))
3251c9d13f5fSChen Gang 		return -ENOMEM;
3252c9d13f5fSChen Gang 
32531da177e4SLinus Torvalds 	/*
32541da177e4SLinus Torvalds 	 * The vm_pgoff of a purely anonymous vma should be irrelevant
32551da177e4SLinus Torvalds 	 * until its first write fault, when page's anon_vma and index
32561da177e4SLinus Torvalds 	 * are set.  But now set the vm_pgoff it will almost certainly
32571da177e4SLinus Torvalds 	 * end up with (unless mremap moves it elsewhere before that
32581da177e4SLinus Torvalds 	 * first wfault), so /proc/pid/maps tells a consistent story.
32591da177e4SLinus Torvalds 	 *
32601da177e4SLinus Torvalds 	 * By setting it to reflect the virtual start address of the
32611da177e4SLinus Torvalds 	 * vma, merges and splits can happen in a seamless way, just
32621da177e4SLinus Torvalds 	 * using the existing file pgoff checks and manipulations.
32638332326eSLiao Pingfang 	 * Similarly in do_mmap and in do_brk_flags.
32641da177e4SLinus Torvalds 	 */
32658a9cc3b5SOleg Nesterov 	if (vma_is_anonymous(vma)) {
32661da177e4SLinus Torvalds 		BUG_ON(vma->anon_vma);
32671da177e4SLinus Torvalds 		vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
32681da177e4SLinus Torvalds 	}
32692b144498SSrikar Dronamraju 
3270763ecb03SLiam R. Howlett 	if (vma_link(mm, vma)) {
3271d4af56c5SLiam R. Howlett 		vm_unacct_memory(charged);
3272d4af56c5SLiam R. Howlett 		return -ENOMEM;
3273d4af56c5SLiam R. Howlett 	}
3274d4af56c5SLiam R. Howlett 
32751da177e4SLinus Torvalds 	return 0;
32761da177e4SLinus Torvalds }
32771da177e4SLinus Torvalds 
32781da177e4SLinus Torvalds /*
32791da177e4SLinus Torvalds  * Copy the vma structure to a new location in the same mm,
32801da177e4SLinus Torvalds  * prior to moving page table entries, to effect an mremap move.
32811da177e4SLinus Torvalds  */
32821da177e4SLinus Torvalds struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
328338a76013SMichel Lespinasse 	unsigned long addr, unsigned long len, pgoff_t pgoff,
328438a76013SMichel Lespinasse 	bool *need_rmap_locks)
32851da177e4SLinus Torvalds {
32861da177e4SLinus Torvalds 	struct vm_area_struct *vma = *vmap;
32871da177e4SLinus Torvalds 	unsigned long vma_start = vma->vm_start;
32881da177e4SLinus Torvalds 	struct mm_struct *mm = vma->vm_mm;
32891da177e4SLinus Torvalds 	struct vm_area_struct *new_vma, *prev;
3290948f017bSAndrea Arcangeli 	bool faulted_in_anon_vma = true;
3291076f16bfSLiam R. Howlett 	VMA_ITERATOR(vmi, mm, addr);
32921da177e4SLinus Torvalds 
3293b50e195fSLiam R. Howlett 	validate_mm(mm);
32941da177e4SLinus Torvalds 	/*
32951da177e4SLinus Torvalds 	 * If anonymous vma has not yet been faulted, update new pgoff
32961da177e4SLinus Torvalds 	 * to match new location, to increase its chance of merging.
32971da177e4SLinus Torvalds 	 */
3298ce75799bSOleg Nesterov 	if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) {
32991da177e4SLinus Torvalds 		pgoff = addr >> PAGE_SHIFT;
3300948f017bSAndrea Arcangeli 		faulted_in_anon_vma = false;
3301948f017bSAndrea Arcangeli 	}
33021da177e4SLinus Torvalds 
3303763ecb03SLiam R. Howlett 	new_vma = find_vma_prev(mm, addr, &prev);
3304763ecb03SLiam R. Howlett 	if (new_vma && new_vma->vm_start < addr + len)
33056597d783SHugh Dickins 		return NULL;	/* should never get here */
3306524e00b3SLiam R. Howlett 
33079760ebffSLiam R. Howlett 	new_vma = vma_merge(&vmi, mm, prev, addr, addr + len, vma->vm_flags,
330819a809afSAndrea Arcangeli 			    vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
33095c26f6acSSuren Baghdasaryan 			    vma->vm_userfaultfd_ctx, anon_vma_name(vma));
33101da177e4SLinus Torvalds 	if (new_vma) {
33111da177e4SLinus Torvalds 		/*
33121da177e4SLinus Torvalds 		 * Source vma may have been merged into new_vma
33131da177e4SLinus Torvalds 		 */
3314948f017bSAndrea Arcangeli 		if (unlikely(vma_start >= new_vma->vm_start &&
3315948f017bSAndrea Arcangeli 			     vma_start < new_vma->vm_end)) {
3316948f017bSAndrea Arcangeli 			/*
3317948f017bSAndrea Arcangeli 			 * The only way we can get a vma_merge with
3318948f017bSAndrea Arcangeli 			 * self during an mremap is if the vma hasn't
3319948f017bSAndrea Arcangeli 			 * been faulted in yet and we were allowed to
3320948f017bSAndrea Arcangeli 			 * reset the dst vma->vm_pgoff to the
3321948f017bSAndrea Arcangeli 			 * destination address of the mremap to allow
3322948f017bSAndrea Arcangeli 			 * the merge to happen. mremap must change the
3323948f017bSAndrea Arcangeli 			 * vm_pgoff linearity between src and dst vmas
3324948f017bSAndrea Arcangeli 			 * (in turn preventing a vma_merge) to be
3325948f017bSAndrea Arcangeli 			 * safe. It is only safe to keep the vm_pgoff
3326948f017bSAndrea Arcangeli 			 * linear if there are no pages mapped yet.
3327948f017bSAndrea Arcangeli 			 */
332881d1b09cSSasha Levin 			VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma);
332938a76013SMichel Lespinasse 			*vmap = vma = new_vma;
3330108d6642SMichel Lespinasse 		}
333138a76013SMichel Lespinasse 		*need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
33321da177e4SLinus Torvalds 	} else {
33333928d4f5SLinus Torvalds 		new_vma = vm_area_dup(vma);
3334e3975891SChen Gang 		if (!new_vma)
3335e3975891SChen Gang 			goto out;
33361da177e4SLinus Torvalds 		new_vma->vm_start = addr;
33371da177e4SLinus Torvalds 		new_vma->vm_end = addr + len;
33381da177e4SLinus Torvalds 		new_vma->vm_pgoff = pgoff;
3339ef0855d3SOleg Nesterov 		if (vma_dup_policy(vma, new_vma))
3340523d4e20SMichel Lespinasse 			goto out_free_vma;
3341523d4e20SMichel Lespinasse 		if (anon_vma_clone(new_vma, vma))
3342523d4e20SMichel Lespinasse 			goto out_free_mempol;
3343e9714acfSKonstantin Khlebnikov 		if (new_vma->vm_file)
33441da177e4SLinus Torvalds 			get_file(new_vma->vm_file);
33451da177e4SLinus Torvalds 		if (new_vma->vm_ops && new_vma->vm_ops->open)
33461da177e4SLinus Torvalds 			new_vma->vm_ops->open(new_vma);
3347d6ac235dSSuren Baghdasaryan 		vma_start_write(new_vma);
3348763ecb03SLiam R. Howlett 		if (vma_link(mm, new_vma))
3349524e00b3SLiam R. Howlett 			goto out_vma_link;
335038a76013SMichel Lespinasse 		*need_rmap_locks = false;
33511da177e4SLinus Torvalds 	}
3352b50e195fSLiam R. Howlett 	validate_mm(mm);
33531da177e4SLinus Torvalds 	return new_vma;
33545beb4930SRik van Riel 
3355524e00b3SLiam R. Howlett out_vma_link:
3356524e00b3SLiam R. Howlett 	if (new_vma->vm_ops && new_vma->vm_ops->close)
3357524e00b3SLiam R. Howlett 		new_vma->vm_ops->close(new_vma);
335892b73996SLiam Howlett 
335992b73996SLiam Howlett 	if (new_vma->vm_file)
336092b73996SLiam Howlett 		fput(new_vma->vm_file);
336192b73996SLiam Howlett 
336292b73996SLiam Howlett 	unlink_anon_vmas(new_vma);
33635beb4930SRik van Riel out_free_mempol:
3364ef0855d3SOleg Nesterov 	mpol_put(vma_policy(new_vma));
33655beb4930SRik van Riel out_free_vma:
33663928d4f5SLinus Torvalds 	vm_area_free(new_vma);
3367e3975891SChen Gang out:
3368b50e195fSLiam R. Howlett 	validate_mm(mm);
33695beb4930SRik van Riel 	return NULL;
33701da177e4SLinus Torvalds }
3371119f657cSakpm@osdl.org 
3372119f657cSakpm@osdl.org /*
3373119f657cSakpm@osdl.org  * Return true if the calling process may expand its vm space by the passed
3374119f657cSakpm@osdl.org  * number of pages
3375119f657cSakpm@osdl.org  */
337684638335SKonstantin Khlebnikov bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages)
3377119f657cSakpm@osdl.org {
337884638335SKonstantin Khlebnikov 	if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT)
337984638335SKonstantin Khlebnikov 		return false;
3380119f657cSakpm@osdl.org 
3381d977d56cSKonstantin Khlebnikov 	if (is_data_mapping(flags) &&
3382d977d56cSKonstantin Khlebnikov 	    mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) {
3383f4fcd558SKonstantin Khlebnikov 		/* Workaround for Valgrind */
3384f4fcd558SKonstantin Khlebnikov 		if (rlimit(RLIMIT_DATA) == 0 &&
3385f4fcd558SKonstantin Khlebnikov 		    mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT)
3386f4fcd558SKonstantin Khlebnikov 			return true;
338757a7702bSDavid Woodhouse 
338857a7702bSDavid Woodhouse 		pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits%s.\n",
3389d977d56cSKonstantin Khlebnikov 			     current->comm, current->pid,
3390d977d56cSKonstantin Khlebnikov 			     (mm->data_vm + npages) << PAGE_SHIFT,
339157a7702bSDavid Woodhouse 			     rlimit(RLIMIT_DATA),
339257a7702bSDavid Woodhouse 			     ignore_rlimit_data ? "" : " or use boot option ignore_rlimit_data");
339357a7702bSDavid Woodhouse 
339457a7702bSDavid Woodhouse 		if (!ignore_rlimit_data)
3395d977d56cSKonstantin Khlebnikov 			return false;
3396d977d56cSKonstantin Khlebnikov 	}
3397119f657cSakpm@osdl.org 
339884638335SKonstantin Khlebnikov 	return true;
339984638335SKonstantin Khlebnikov }
340084638335SKonstantin Khlebnikov 
340184638335SKonstantin Khlebnikov void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages)
340284638335SKonstantin Khlebnikov {
34037866076bSPeng Liu 	WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages);
340484638335SKonstantin Khlebnikov 
3405d977d56cSKonstantin Khlebnikov 	if (is_exec_mapping(flags))
340684638335SKonstantin Khlebnikov 		mm->exec_vm += npages;
3407d977d56cSKonstantin Khlebnikov 	else if (is_stack_mapping(flags))
340884638335SKonstantin Khlebnikov 		mm->stack_vm += npages;
3409d977d56cSKonstantin Khlebnikov 	else if (is_data_mapping(flags))
341084638335SKonstantin Khlebnikov 		mm->data_vm += npages;
3411119f657cSakpm@osdl.org }
3412fa5dc22fSRoland McGrath 
3413b3ec9f33SSouptick Joarder static vm_fault_t special_mapping_fault(struct vm_fault *vmf);
3414a62c34bdSAndy Lutomirski 
3415a62c34bdSAndy Lutomirski /*
3416a62c34bdSAndy Lutomirski  * Having a close hook prevents vma merging regardless of flags.
3417a62c34bdSAndy Lutomirski  */
3418a62c34bdSAndy Lutomirski static void special_mapping_close(struct vm_area_struct *vma)
3419a62c34bdSAndy Lutomirski {
3420a62c34bdSAndy Lutomirski }
3421a62c34bdSAndy Lutomirski 
3422a62c34bdSAndy Lutomirski static const char *special_mapping_name(struct vm_area_struct *vma)
3423a62c34bdSAndy Lutomirski {
3424a62c34bdSAndy Lutomirski 	return ((struct vm_special_mapping *)vma->vm_private_data)->name;
3425a62c34bdSAndy Lutomirski }
3426a62c34bdSAndy Lutomirski 
342714d07113SBrian Geffon static int special_mapping_mremap(struct vm_area_struct *new_vma)
3428b059a453SDmitry Safonov {
3429b059a453SDmitry Safonov 	struct vm_special_mapping *sm = new_vma->vm_private_data;
3430b059a453SDmitry Safonov 
3431280e87e9SDmitry Safonov 	if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
3432280e87e9SDmitry Safonov 		return -EFAULT;
3433280e87e9SDmitry Safonov 
3434b059a453SDmitry Safonov 	if (sm->mremap)
3435b059a453SDmitry Safonov 		return sm->mremap(sm, new_vma);
3436280e87e9SDmitry Safonov 
3437b059a453SDmitry Safonov 	return 0;
3438b059a453SDmitry Safonov }
3439b059a453SDmitry Safonov 
3440871402e0SDmitry Safonov static int special_mapping_split(struct vm_area_struct *vma, unsigned long addr)
3441871402e0SDmitry Safonov {
3442871402e0SDmitry Safonov 	/*
3443871402e0SDmitry Safonov 	 * Forbid splitting special mappings - kernel has expectations over
3444871402e0SDmitry Safonov 	 * the number of pages in mapping. Together with VM_DONTEXPAND
3445871402e0SDmitry Safonov 	 * the size of vma should stay the same over the special mapping's
3446871402e0SDmitry Safonov 	 * lifetime.
3447871402e0SDmitry Safonov 	 */
3448871402e0SDmitry Safonov 	return -EINVAL;
3449871402e0SDmitry Safonov }
3450871402e0SDmitry Safonov 
3451a62c34bdSAndy Lutomirski static const struct vm_operations_struct special_mapping_vmops = {
3452a62c34bdSAndy Lutomirski 	.close = special_mapping_close,
3453a62c34bdSAndy Lutomirski 	.fault = special_mapping_fault,
3454b059a453SDmitry Safonov 	.mremap = special_mapping_mremap,
3455a62c34bdSAndy Lutomirski 	.name = special_mapping_name,
3456af34ebebSDmitry Safonov 	/* vDSO code relies that VVAR can't be accessed remotely */
3457af34ebebSDmitry Safonov 	.access = NULL,
3458871402e0SDmitry Safonov 	.may_split = special_mapping_split,
3459a62c34bdSAndy Lutomirski };
3460a62c34bdSAndy Lutomirski 
3461a62c34bdSAndy Lutomirski static const struct vm_operations_struct legacy_special_mapping_vmops = {
3462a62c34bdSAndy Lutomirski 	.close = special_mapping_close,
3463a62c34bdSAndy Lutomirski 	.fault = special_mapping_fault,
3464a62c34bdSAndy Lutomirski };
3465fa5dc22fSRoland McGrath 
3466b3ec9f33SSouptick Joarder static vm_fault_t special_mapping_fault(struct vm_fault *vmf)
3467fa5dc22fSRoland McGrath {
346811bac800SDave Jiang 	struct vm_area_struct *vma = vmf->vma;
3469b1d0e4f5SNick Piggin 	pgoff_t pgoff;
3470fa5dc22fSRoland McGrath 	struct page **pages;
3471fa5dc22fSRoland McGrath 
3472f872f540SAndy Lutomirski 	if (vma->vm_ops == &legacy_special_mapping_vmops) {
3473a62c34bdSAndy Lutomirski 		pages = vma->vm_private_data;
3474f872f540SAndy Lutomirski 	} else {
3475f872f540SAndy Lutomirski 		struct vm_special_mapping *sm = vma->vm_private_data;
3476f872f540SAndy Lutomirski 
3477f872f540SAndy Lutomirski 		if (sm->fault)
347811bac800SDave Jiang 			return sm->fault(sm, vmf->vma, vmf);
3479f872f540SAndy Lutomirski 
3480f872f540SAndy Lutomirski 		pages = sm->pages;
3481f872f540SAndy Lutomirski 	}
3482a62c34bdSAndy Lutomirski 
34838a9cc3b5SOleg Nesterov 	for (pgoff = vmf->pgoff; pgoff && *pages; ++pages)
3484b1d0e4f5SNick Piggin 		pgoff--;
3485fa5dc22fSRoland McGrath 
3486fa5dc22fSRoland McGrath 	if (*pages) {
3487fa5dc22fSRoland McGrath 		struct page *page = *pages;
3488fa5dc22fSRoland McGrath 		get_page(page);
3489b1d0e4f5SNick Piggin 		vmf->page = page;
3490b1d0e4f5SNick Piggin 		return 0;
3491fa5dc22fSRoland McGrath 	}
3492fa5dc22fSRoland McGrath 
3493b1d0e4f5SNick Piggin 	return VM_FAULT_SIGBUS;
3494fa5dc22fSRoland McGrath }
3495fa5dc22fSRoland McGrath 
3496a62c34bdSAndy Lutomirski static struct vm_area_struct *__install_special_mapping(
3497a62c34bdSAndy Lutomirski 	struct mm_struct *mm,
3498fa5dc22fSRoland McGrath 	unsigned long addr, unsigned long len,
349927f28b97SChen Gang 	unsigned long vm_flags, void *priv,
350027f28b97SChen Gang 	const struct vm_operations_struct *ops)
3501fa5dc22fSRoland McGrath {
3502462e635eSTavis Ormandy 	int ret;
3503fa5dc22fSRoland McGrath 	struct vm_area_struct *vma;
3504fa5dc22fSRoland McGrath 
3505b50e195fSLiam R. Howlett 	validate_mm(mm);
3506490fc053SLinus Torvalds 	vma = vm_area_alloc(mm);
3507fa5dc22fSRoland McGrath 	if (unlikely(vma == NULL))
35083935ed6aSStefani Seibold 		return ERR_PTR(-ENOMEM);
3509fa5dc22fSRoland McGrath 
3510fa5dc22fSRoland McGrath 	vma->vm_start = addr;
3511fa5dc22fSRoland McGrath 	vma->vm_end = addr + len;
3512fa5dc22fSRoland McGrath 
3513e430a95aSSuren Baghdasaryan 	vm_flags_init(vma, (vm_flags | mm->def_flags |
3514e430a95aSSuren Baghdasaryan 		      VM_DONTEXPAND | VM_SOFTDIRTY) & ~VM_LOCKED_MASK);
35153ed75eb8SColy Li 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
3516fa5dc22fSRoland McGrath 
3517a62c34bdSAndy Lutomirski 	vma->vm_ops = ops;
3518a62c34bdSAndy Lutomirski 	vma->vm_private_data = priv;
3519fa5dc22fSRoland McGrath 
3520462e635eSTavis Ormandy 	ret = insert_vm_struct(mm, vma);
3521462e635eSTavis Ormandy 	if (ret)
3522462e635eSTavis Ormandy 		goto out;
3523fa5dc22fSRoland McGrath 
352484638335SKonstantin Khlebnikov 	vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT);
3525fa5dc22fSRoland McGrath 
3526cdd6c482SIngo Molnar 	perf_event_mmap(vma);
3527089dd79dSPeter Zijlstra 
3528b50e195fSLiam R. Howlett 	validate_mm(mm);
35293935ed6aSStefani Seibold 	return vma;
3530462e635eSTavis Ormandy 
3531462e635eSTavis Ormandy out:
35323928d4f5SLinus Torvalds 	vm_area_free(vma);
3533b50e195fSLiam R. Howlett 	validate_mm(mm);
35343935ed6aSStefani Seibold 	return ERR_PTR(ret);
35353935ed6aSStefani Seibold }
35363935ed6aSStefani Seibold 
35372eefd878SDmitry Safonov bool vma_is_special_mapping(const struct vm_area_struct *vma,
35382eefd878SDmitry Safonov 	const struct vm_special_mapping *sm)
35392eefd878SDmitry Safonov {
35402eefd878SDmitry Safonov 	return vma->vm_private_data == sm &&
35412eefd878SDmitry Safonov 		(vma->vm_ops == &special_mapping_vmops ||
35422eefd878SDmitry Safonov 		 vma->vm_ops == &legacy_special_mapping_vmops);
35432eefd878SDmitry Safonov }
35442eefd878SDmitry Safonov 
3545a62c34bdSAndy Lutomirski /*
3546c1e8d7c6SMichel Lespinasse  * Called with mm->mmap_lock held for writing.
3547a62c34bdSAndy Lutomirski  * Insert a new vma covering the given region, with the given flags.
3548a62c34bdSAndy Lutomirski  * Its pages are supplied by the given array of struct page *.
3549a62c34bdSAndy Lutomirski  * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated.
3550a62c34bdSAndy Lutomirski  * The region past the last page supplied will always produce SIGBUS.
3551a62c34bdSAndy Lutomirski  * The array pointer and the pages it points to are assumed to stay alive
3552a62c34bdSAndy Lutomirski  * for as long as this mapping might exist.
3553a62c34bdSAndy Lutomirski  */
3554a62c34bdSAndy Lutomirski struct vm_area_struct *_install_special_mapping(
3555a62c34bdSAndy Lutomirski 	struct mm_struct *mm,
3556a62c34bdSAndy Lutomirski 	unsigned long addr, unsigned long len,
3557a62c34bdSAndy Lutomirski 	unsigned long vm_flags, const struct vm_special_mapping *spec)
3558a62c34bdSAndy Lutomirski {
355927f28b97SChen Gang 	return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec,
356027f28b97SChen Gang 					&special_mapping_vmops);
3561a62c34bdSAndy Lutomirski }
3562a62c34bdSAndy Lutomirski 
35633935ed6aSStefani Seibold int install_special_mapping(struct mm_struct *mm,
35643935ed6aSStefani Seibold 			    unsigned long addr, unsigned long len,
35653935ed6aSStefani Seibold 			    unsigned long vm_flags, struct page **pages)
35663935ed6aSStefani Seibold {
3567a62c34bdSAndy Lutomirski 	struct vm_area_struct *vma = __install_special_mapping(
356827f28b97SChen Gang 		mm, addr, len, vm_flags, (void *)pages,
356927f28b97SChen Gang 		&legacy_special_mapping_vmops);
35703935ed6aSStefani Seibold 
357114bd5b45SDuan Jiong 	return PTR_ERR_OR_ZERO(vma);
3572fa5dc22fSRoland McGrath }
35737906d00cSAndrea Arcangeli 
35747906d00cSAndrea Arcangeli static DEFINE_MUTEX(mm_all_locks_mutex);
35757906d00cSAndrea Arcangeli 
3576454ed842SPeter Zijlstra static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
35777906d00cSAndrea Arcangeli {
3578f808c13fSDavidlohr Bueso 	if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
35797906d00cSAndrea Arcangeli 		/*
35807906d00cSAndrea Arcangeli 		 * The LSB of head.next can't change from under us
35817906d00cSAndrea Arcangeli 		 * because we hold the mm_all_locks_mutex.
35827906d00cSAndrea Arcangeli 		 */
3583da1c55f1SMichel Lespinasse 		down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock);
35847906d00cSAndrea Arcangeli 		/*
35857906d00cSAndrea Arcangeli 		 * We can safely modify head.next after taking the
35865a505085SIngo Molnar 		 * anon_vma->root->rwsem. If some other vma in this mm shares
35877906d00cSAndrea Arcangeli 		 * the same anon_vma we won't take it again.
35887906d00cSAndrea Arcangeli 		 *
35897906d00cSAndrea Arcangeli 		 * No need of atomic instructions here, head.next
35907906d00cSAndrea Arcangeli 		 * can't change from under us thanks to the
35915a505085SIngo Molnar 		 * anon_vma->root->rwsem.
35927906d00cSAndrea Arcangeli 		 */
35937906d00cSAndrea Arcangeli 		if (__test_and_set_bit(0, (unsigned long *)
3594f808c13fSDavidlohr Bueso 				       &anon_vma->root->rb_root.rb_root.rb_node))
35957906d00cSAndrea Arcangeli 			BUG();
35967906d00cSAndrea Arcangeli 	}
35977906d00cSAndrea Arcangeli }
35987906d00cSAndrea Arcangeli 
3599454ed842SPeter Zijlstra static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
36007906d00cSAndrea Arcangeli {
36017906d00cSAndrea Arcangeli 	if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
36027906d00cSAndrea Arcangeli 		/*
36037906d00cSAndrea Arcangeli 		 * AS_MM_ALL_LOCKS can't change from under us because
36047906d00cSAndrea Arcangeli 		 * we hold the mm_all_locks_mutex.
36057906d00cSAndrea Arcangeli 		 *
36067906d00cSAndrea Arcangeli 		 * Operations on ->flags have to be atomic because
36077906d00cSAndrea Arcangeli 		 * even if AS_MM_ALL_LOCKS is stable thanks to the
36087906d00cSAndrea Arcangeli 		 * mm_all_locks_mutex, there may be other cpus
36097906d00cSAndrea Arcangeli 		 * changing other bitflags in parallel to us.
36107906d00cSAndrea Arcangeli 		 */
36117906d00cSAndrea Arcangeli 		if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
36127906d00cSAndrea Arcangeli 			BUG();
3613da1c55f1SMichel Lespinasse 		down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock);
36147906d00cSAndrea Arcangeli 	}
36157906d00cSAndrea Arcangeli }
36167906d00cSAndrea Arcangeli 
36177906d00cSAndrea Arcangeli /*
36187906d00cSAndrea Arcangeli  * This operation locks against the VM for all pte/vma/mm related
36197906d00cSAndrea Arcangeli  * operations that could ever happen on a certain mm. This includes
36207906d00cSAndrea Arcangeli  * vmtruncate, try_to_unmap, and all page faults.
36217906d00cSAndrea Arcangeli  *
3622c1e8d7c6SMichel Lespinasse  * The caller must take the mmap_lock in write mode before calling
36237906d00cSAndrea Arcangeli  * mm_take_all_locks(). The caller isn't allowed to release the
3624c1e8d7c6SMichel Lespinasse  * mmap_lock until mm_drop_all_locks() returns.
36257906d00cSAndrea Arcangeli  *
3626c1e8d7c6SMichel Lespinasse  * mmap_lock in write mode is required in order to block all operations
36277906d00cSAndrea Arcangeli  * that could modify pagetables and free pages without need of
362827ba0644SKirill A. Shutemov  * altering the vma layout. It's also needed in write mode to avoid new
36297906d00cSAndrea Arcangeli  * anon_vmas to be associated with existing vmas.
36307906d00cSAndrea Arcangeli  *
36317906d00cSAndrea Arcangeli  * A single task can't take more than one mm_take_all_locks() in a row
36327906d00cSAndrea Arcangeli  * or it would deadlock.
36337906d00cSAndrea Arcangeli  *
3634bf181b9fSMichel Lespinasse  * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in
36357906d00cSAndrea Arcangeli  * mapping->flags avoid to take the same lock twice, if more than one
36367906d00cSAndrea Arcangeli  * vma in this mm is backed by the same anon_vma or address_space.
36377906d00cSAndrea Arcangeli  *
363888f306b6SKirill A. Shutemov  * We take locks in following order, accordingly to comment at beginning
363988f306b6SKirill A. Shutemov  * of mm/rmap.c:
364088f306b6SKirill A. Shutemov  *   - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for
364188f306b6SKirill A. Shutemov  *     hugetlb mapping);
3642eeff9a5dSSuren Baghdasaryan  *   - all vmas marked locked
364388f306b6SKirill A. Shutemov  *   - all i_mmap_rwsem locks;
364488f306b6SKirill A. Shutemov  *   - all anon_vma->rwseml
364588f306b6SKirill A. Shutemov  *
364688f306b6SKirill A. Shutemov  * We can take all locks within these types randomly because the VM code
364788f306b6SKirill A. Shutemov  * doesn't nest them and we protected from parallel mm_take_all_locks() by
364888f306b6SKirill A. Shutemov  * mm_all_locks_mutex.
36497906d00cSAndrea Arcangeli  *
36507906d00cSAndrea Arcangeli  * mm_take_all_locks() and mm_drop_all_locks are expensive operations
36517906d00cSAndrea Arcangeli  * that may have to take thousand of locks.
36527906d00cSAndrea Arcangeli  *
36537906d00cSAndrea Arcangeli  * mm_take_all_locks() can fail if it's interrupted by signals.
36547906d00cSAndrea Arcangeli  */
36557906d00cSAndrea Arcangeli int mm_take_all_locks(struct mm_struct *mm)
36567906d00cSAndrea Arcangeli {
36577906d00cSAndrea Arcangeli 	struct vm_area_struct *vma;
36585beb4930SRik van Riel 	struct anon_vma_chain *avc;
3659763ecb03SLiam R. Howlett 	MA_STATE(mas, &mm->mm_mt, 0, 0);
36607906d00cSAndrea Arcangeli 
3661325bca1fSRolf Eike Beer 	mmap_assert_write_locked(mm);
36627906d00cSAndrea Arcangeli 
36637906d00cSAndrea Arcangeli 	mutex_lock(&mm_all_locks_mutex);
36647906d00cSAndrea Arcangeli 
3665763ecb03SLiam R. Howlett 	mas_for_each(&mas, vma, ULONG_MAX) {
36667906d00cSAndrea Arcangeli 		if (signal_pending(current))
36677906d00cSAndrea Arcangeli 			goto out_unlock;
3668eeff9a5dSSuren Baghdasaryan 		vma_start_write(vma);
3669eeff9a5dSSuren Baghdasaryan 	}
3670eeff9a5dSSuren Baghdasaryan 
3671eeff9a5dSSuren Baghdasaryan 	mas_set(&mas, 0);
3672eeff9a5dSSuren Baghdasaryan 	mas_for_each(&mas, vma, ULONG_MAX) {
3673eeff9a5dSSuren Baghdasaryan 		if (signal_pending(current))
3674eeff9a5dSSuren Baghdasaryan 			goto out_unlock;
367588f306b6SKirill A. Shutemov 		if (vma->vm_file && vma->vm_file->f_mapping &&
367688f306b6SKirill A. Shutemov 				is_vm_hugetlb_page(vma))
367788f306b6SKirill A. Shutemov 			vm_lock_mapping(mm, vma->vm_file->f_mapping);
367888f306b6SKirill A. Shutemov 	}
367988f306b6SKirill A. Shutemov 
3680763ecb03SLiam R. Howlett 	mas_set(&mas, 0);
3681763ecb03SLiam R. Howlett 	mas_for_each(&mas, vma, ULONG_MAX) {
368288f306b6SKirill A. Shutemov 		if (signal_pending(current))
368388f306b6SKirill A. Shutemov 			goto out_unlock;
368488f306b6SKirill A. Shutemov 		if (vma->vm_file && vma->vm_file->f_mapping &&
368588f306b6SKirill A. Shutemov 				!is_vm_hugetlb_page(vma))
3686454ed842SPeter Zijlstra 			vm_lock_mapping(mm, vma->vm_file->f_mapping);
36877906d00cSAndrea Arcangeli 	}
36887cd5a02fSPeter Zijlstra 
3689763ecb03SLiam R. Howlett 	mas_set(&mas, 0);
3690763ecb03SLiam R. Howlett 	mas_for_each(&mas, vma, ULONG_MAX) {
36917cd5a02fSPeter Zijlstra 		if (signal_pending(current))
36927cd5a02fSPeter Zijlstra 			goto out_unlock;
36937cd5a02fSPeter Zijlstra 		if (vma->anon_vma)
36945beb4930SRik van Riel 			list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
36955beb4930SRik van Riel 				vm_lock_anon_vma(mm, avc->anon_vma);
36967cd5a02fSPeter Zijlstra 	}
36977cd5a02fSPeter Zijlstra 
3698584cff54SKautuk Consul 	return 0;
36997906d00cSAndrea Arcangeli 
37007906d00cSAndrea Arcangeli out_unlock:
37017906d00cSAndrea Arcangeli 	mm_drop_all_locks(mm);
3702584cff54SKautuk Consul 	return -EINTR;
37037906d00cSAndrea Arcangeli }
37047906d00cSAndrea Arcangeli 
37057906d00cSAndrea Arcangeli static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
37067906d00cSAndrea Arcangeli {
3707f808c13fSDavidlohr Bueso 	if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
37087906d00cSAndrea Arcangeli 		/*
37097906d00cSAndrea Arcangeli 		 * The LSB of head.next can't change to 0 from under
37107906d00cSAndrea Arcangeli 		 * us because we hold the mm_all_locks_mutex.
37117906d00cSAndrea Arcangeli 		 *
37127906d00cSAndrea Arcangeli 		 * We must however clear the bitflag before unlocking
3713bf181b9fSMichel Lespinasse 		 * the vma so the users using the anon_vma->rb_root will
37147906d00cSAndrea Arcangeli 		 * never see our bitflag.
37157906d00cSAndrea Arcangeli 		 *
37167906d00cSAndrea Arcangeli 		 * No need of atomic instructions here, head.next
37177906d00cSAndrea Arcangeli 		 * can't change from under us until we release the
37185a505085SIngo Molnar 		 * anon_vma->root->rwsem.
37197906d00cSAndrea Arcangeli 		 */
37207906d00cSAndrea Arcangeli 		if (!__test_and_clear_bit(0, (unsigned long *)
3721f808c13fSDavidlohr Bueso 					  &anon_vma->root->rb_root.rb_root.rb_node))
37227906d00cSAndrea Arcangeli 			BUG();
372308b52706SKonstantin Khlebnikov 		anon_vma_unlock_write(anon_vma);
37247906d00cSAndrea Arcangeli 	}
37257906d00cSAndrea Arcangeli }
37267906d00cSAndrea Arcangeli 
37277906d00cSAndrea Arcangeli static void vm_unlock_mapping(struct address_space *mapping)
37287906d00cSAndrea Arcangeli {
37297906d00cSAndrea Arcangeli 	if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
37307906d00cSAndrea Arcangeli 		/*
37317906d00cSAndrea Arcangeli 		 * AS_MM_ALL_LOCKS can't change to 0 from under us
37327906d00cSAndrea Arcangeli 		 * because we hold the mm_all_locks_mutex.
37337906d00cSAndrea Arcangeli 		 */
373483cde9e8SDavidlohr Bueso 		i_mmap_unlock_write(mapping);
37357906d00cSAndrea Arcangeli 		if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
37367906d00cSAndrea Arcangeli 					&mapping->flags))
37377906d00cSAndrea Arcangeli 			BUG();
37387906d00cSAndrea Arcangeli 	}
37397906d00cSAndrea Arcangeli }
37407906d00cSAndrea Arcangeli 
37417906d00cSAndrea Arcangeli /*
3742c1e8d7c6SMichel Lespinasse  * The mmap_lock cannot be released by the caller until
37437906d00cSAndrea Arcangeli  * mm_drop_all_locks() returns.
37447906d00cSAndrea Arcangeli  */
37457906d00cSAndrea Arcangeli void mm_drop_all_locks(struct mm_struct *mm)
37467906d00cSAndrea Arcangeli {
37477906d00cSAndrea Arcangeli 	struct vm_area_struct *vma;
37485beb4930SRik van Riel 	struct anon_vma_chain *avc;
3749763ecb03SLiam R. Howlett 	MA_STATE(mas, &mm->mm_mt, 0, 0);
37507906d00cSAndrea Arcangeli 
3751325bca1fSRolf Eike Beer 	mmap_assert_write_locked(mm);
37527906d00cSAndrea Arcangeli 	BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
37537906d00cSAndrea Arcangeli 
3754763ecb03SLiam R. Howlett 	mas_for_each(&mas, vma, ULONG_MAX) {
37557906d00cSAndrea Arcangeli 		if (vma->anon_vma)
37565beb4930SRik van Riel 			list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
37575beb4930SRik van Riel 				vm_unlock_anon_vma(avc->anon_vma);
37587906d00cSAndrea Arcangeli 		if (vma->vm_file && vma->vm_file->f_mapping)
37597906d00cSAndrea Arcangeli 			vm_unlock_mapping(vma->vm_file->f_mapping);
37607906d00cSAndrea Arcangeli 	}
3761eeff9a5dSSuren Baghdasaryan 	vma_end_write_all(mm);
37627906d00cSAndrea Arcangeli 
37637906d00cSAndrea Arcangeli 	mutex_unlock(&mm_all_locks_mutex);
37647906d00cSAndrea Arcangeli }
37658feae131SDavid Howells 
37668feae131SDavid Howells /*
37673edf41d8Sseokhoon.yoon  * initialise the percpu counter for VM
37688feae131SDavid Howells  */
37698feae131SDavid Howells void __init mmap_init(void)
37708feae131SDavid Howells {
377100a62ce9SKOSAKI Motohiro 	int ret;
377200a62ce9SKOSAKI Motohiro 
3773908c7f19STejun Heo 	ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
377400a62ce9SKOSAKI Motohiro 	VM_BUG_ON(ret);
37758feae131SDavid Howells }
3776c9b1d098SAndrew Shewmaker 
3777c9b1d098SAndrew Shewmaker /*
3778c9b1d098SAndrew Shewmaker  * Initialise sysctl_user_reserve_kbytes.
3779c9b1d098SAndrew Shewmaker  *
3780c9b1d098SAndrew Shewmaker  * This is intended to prevent a user from starting a single memory hogging
3781c9b1d098SAndrew Shewmaker  * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
3782c9b1d098SAndrew Shewmaker  * mode.
3783c9b1d098SAndrew Shewmaker  *
3784c9b1d098SAndrew Shewmaker  * The default value is min(3% of free memory, 128MB)
3785c9b1d098SAndrew Shewmaker  * 128MB is enough to recover with sshd/login, bash, and top/kill.
3786c9b1d098SAndrew Shewmaker  */
37871640879aSAndrew Shewmaker static int init_user_reserve(void)
3788c9b1d098SAndrew Shewmaker {
3789c9b1d098SAndrew Shewmaker 	unsigned long free_kbytes;
3790c9b1d098SAndrew Shewmaker 
3791c41f012aSMichal Hocko 	free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
3792c9b1d098SAndrew Shewmaker 
3793c9b1d098SAndrew Shewmaker 	sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
3794c9b1d098SAndrew Shewmaker 	return 0;
3795c9b1d098SAndrew Shewmaker }
3796a64fb3cdSPaul Gortmaker subsys_initcall(init_user_reserve);
37974eeab4f5SAndrew Shewmaker 
37984eeab4f5SAndrew Shewmaker /*
37994eeab4f5SAndrew Shewmaker  * Initialise sysctl_admin_reserve_kbytes.
38004eeab4f5SAndrew Shewmaker  *
38014eeab4f5SAndrew Shewmaker  * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
38024eeab4f5SAndrew Shewmaker  * to log in and kill a memory hogging process.
38034eeab4f5SAndrew Shewmaker  *
38044eeab4f5SAndrew Shewmaker  * Systems with more than 256MB will reserve 8MB, enough to recover
38054eeab4f5SAndrew Shewmaker  * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
38064eeab4f5SAndrew Shewmaker  * only reserve 3% of free pages by default.
38074eeab4f5SAndrew Shewmaker  */
38081640879aSAndrew Shewmaker static int init_admin_reserve(void)
38094eeab4f5SAndrew Shewmaker {
38104eeab4f5SAndrew Shewmaker 	unsigned long free_kbytes;
38114eeab4f5SAndrew Shewmaker 
3812c41f012aSMichal Hocko 	free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
38134eeab4f5SAndrew Shewmaker 
38144eeab4f5SAndrew Shewmaker 	sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
38154eeab4f5SAndrew Shewmaker 	return 0;
38164eeab4f5SAndrew Shewmaker }
3817a64fb3cdSPaul Gortmaker subsys_initcall(init_admin_reserve);
38181640879aSAndrew Shewmaker 
38191640879aSAndrew Shewmaker /*
38201640879aSAndrew Shewmaker  * Reinititalise user and admin reserves if memory is added or removed.
38211640879aSAndrew Shewmaker  *
38221640879aSAndrew Shewmaker  * The default user reserve max is 128MB, and the default max for the
38231640879aSAndrew Shewmaker  * admin reserve is 8MB. These are usually, but not always, enough to
38241640879aSAndrew Shewmaker  * enable recovery from a memory hogging process using login/sshd, a shell,
38251640879aSAndrew Shewmaker  * and tools like top. It may make sense to increase or even disable the
38261640879aSAndrew Shewmaker  * reserve depending on the existence of swap or variations in the recovery
38271640879aSAndrew Shewmaker  * tools. So, the admin may have changed them.
38281640879aSAndrew Shewmaker  *
38291640879aSAndrew Shewmaker  * If memory is added and the reserves have been eliminated or increased above
38301640879aSAndrew Shewmaker  * the default max, then we'll trust the admin.
38311640879aSAndrew Shewmaker  *
38321640879aSAndrew Shewmaker  * If memory is removed and there isn't enough free memory, then we
38331640879aSAndrew Shewmaker  * need to reset the reserves.
38341640879aSAndrew Shewmaker  *
38351640879aSAndrew Shewmaker  * Otherwise keep the reserve set by the admin.
38361640879aSAndrew Shewmaker  */
38371640879aSAndrew Shewmaker static int reserve_mem_notifier(struct notifier_block *nb,
38381640879aSAndrew Shewmaker 			     unsigned long action, void *data)
38391640879aSAndrew Shewmaker {
38401640879aSAndrew Shewmaker 	unsigned long tmp, free_kbytes;
38411640879aSAndrew Shewmaker 
38421640879aSAndrew Shewmaker 	switch (action) {
38431640879aSAndrew Shewmaker 	case MEM_ONLINE:
38441640879aSAndrew Shewmaker 		/* Default max is 128MB. Leave alone if modified by operator. */
38451640879aSAndrew Shewmaker 		tmp = sysctl_user_reserve_kbytes;
38461640879aSAndrew Shewmaker 		if (0 < tmp && tmp < (1UL << 17))
38471640879aSAndrew Shewmaker 			init_user_reserve();
38481640879aSAndrew Shewmaker 
38491640879aSAndrew Shewmaker 		/* Default max is 8MB.  Leave alone if modified by operator. */
38501640879aSAndrew Shewmaker 		tmp = sysctl_admin_reserve_kbytes;
38511640879aSAndrew Shewmaker 		if (0 < tmp && tmp < (1UL << 13))
38521640879aSAndrew Shewmaker 			init_admin_reserve();
38531640879aSAndrew Shewmaker 
38541640879aSAndrew Shewmaker 		break;
38551640879aSAndrew Shewmaker 	case MEM_OFFLINE:
3856c41f012aSMichal Hocko 		free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
38571640879aSAndrew Shewmaker 
38581640879aSAndrew Shewmaker 		if (sysctl_user_reserve_kbytes > free_kbytes) {
38591640879aSAndrew Shewmaker 			init_user_reserve();
38601640879aSAndrew Shewmaker 			pr_info("vm.user_reserve_kbytes reset to %lu\n",
38611640879aSAndrew Shewmaker 				sysctl_user_reserve_kbytes);
38621640879aSAndrew Shewmaker 		}
38631640879aSAndrew Shewmaker 
38641640879aSAndrew Shewmaker 		if (sysctl_admin_reserve_kbytes > free_kbytes) {
38651640879aSAndrew Shewmaker 			init_admin_reserve();
38661640879aSAndrew Shewmaker 			pr_info("vm.admin_reserve_kbytes reset to %lu\n",
38671640879aSAndrew Shewmaker 				sysctl_admin_reserve_kbytes);
38681640879aSAndrew Shewmaker 		}
38691640879aSAndrew Shewmaker 		break;
38701640879aSAndrew Shewmaker 	default:
38711640879aSAndrew Shewmaker 		break;
38721640879aSAndrew Shewmaker 	}
38731640879aSAndrew Shewmaker 	return NOTIFY_OK;
38741640879aSAndrew Shewmaker }
38751640879aSAndrew Shewmaker 
38761640879aSAndrew Shewmaker static int __meminit init_reserve_notifier(void)
38771640879aSAndrew Shewmaker {
38781eeaa4fdSLiu Shixin 	if (hotplug_memory_notifier(reserve_mem_notifier, DEFAULT_CALLBACK_PRI))
3879b1de0d13SMitchel Humpherys 		pr_err("Failed registering memory add/remove notifier for admin reserve\n");
38801640879aSAndrew Shewmaker 
38811640879aSAndrew Shewmaker 	return 0;
38821640879aSAndrew Shewmaker }
3883a64fb3cdSPaul Gortmaker subsys_initcall(init_reserve_notifier);
3884