xref: /linux/mm/vma.c (revision 7ec1885a7e283caaf6566aedc1eea5988d545f97)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 /*
4  * VMA-specific functions.
5  */
6 
7 #include "vma_internal.h"
8 #include "vma.h"
9 
10 struct mmap_state {
11 	struct mm_struct *mm;
12 	struct vma_iterator *vmi;
13 
14 	unsigned long addr;
15 	unsigned long end;
16 	pgoff_t pgoff;
17 	unsigned long pglen;
18 	union {
19 		vm_flags_t vm_flags;
20 		vma_flags_t vma_flags;
21 	};
22 	struct file *file;
23 	pgprot_t page_prot;
24 
25 	/* User-defined fields, perhaps updated by .mmap_prepare(). */
26 	const struct vm_operations_struct *vm_ops;
27 	void *vm_private_data;
28 
29 	unsigned long charged;
30 
31 	struct vm_area_struct *prev;
32 	struct vm_area_struct *next;
33 
34 	/* Unmapping state. */
35 	struct vma_munmap_struct vms;
36 	struct ma_state mas_detach;
37 	struct maple_tree mt_detach;
38 
39 	/* Determine if we can check KSM flags early in mmap() logic. */
40 	bool check_ksm_early :1;
41 	/* If we map new, hold the file rmap lock on mapping. */
42 	bool hold_file_rmap_lock :1;
43 	/* If .mmap_prepare changed the file, we don't need to pin. */
44 	bool file_doesnt_need_get :1;
45 };
46 
47 #define MMAP_STATE(name, mm_, vmi_, addr_, len_, pgoff_, vm_flags_, file_) \
48 	struct mmap_state name = {					\
49 		.mm = mm_,						\
50 		.vmi = vmi_,						\
51 		.addr = addr_,						\
52 		.end = (addr_) + (len_),				\
53 		.pgoff = pgoff_,					\
54 		.pglen = PHYS_PFN(len_),				\
55 		.vm_flags = vm_flags_,					\
56 		.file = file_,						\
57 		.page_prot = vm_get_page_prot(vm_flags_),		\
58 	}
59 
60 #define VMG_MMAP_STATE(name, map_, vma_)				\
61 	struct vma_merge_struct name = {				\
62 		.mm = (map_)->mm,					\
63 		.vmi = (map_)->vmi,					\
64 		.start = (map_)->addr,					\
65 		.end = (map_)->end,					\
66 		.vm_flags = (map_)->vm_flags,				\
67 		.pgoff = (map_)->pgoff,					\
68 		.file = (map_)->file,					\
69 		.prev = (map_)->prev,					\
70 		.middle = vma_,						\
71 		.next = (vma_) ? NULL : (map_)->next,			\
72 		.state = VMA_MERGE_START,				\
73 	}
74 
75 /* Was this VMA ever forked from a parent, i.e. maybe contains CoW mappings? */
76 static bool vma_is_fork_child(struct vm_area_struct *vma)
77 {
78 	/*
79 	 * The list_is_singular() test is to avoid merging VMA cloned from
80 	 * parents. This can improve scalability caused by the anon_vma root
81 	 * lock.
82 	 */
83 	return vma && vma->anon_vma && !list_is_singular(&vma->anon_vma_chain);
84 }
85 
86 static inline bool is_mergeable_vma(struct vma_merge_struct *vmg, bool merge_next)
87 {
88 	struct vm_area_struct *vma = merge_next ? vmg->next : vmg->prev;
89 	vma_flags_t diff;
90 
91 	if (!mpol_equal(vmg->policy, vma_policy(vma)))
92 		return false;
93 
94 	diff = vma_flags_diff_pair(&vma->flags, &vmg->vma_flags);
95 	vma_flags_clear_mask(&diff, VMA_IGNORE_MERGE_FLAGS);
96 
97 	if (!vma_flags_empty(&diff))
98 		return false;
99 	if (vma->vm_file != vmg->file)
100 		return false;
101 	if (!is_mergeable_vm_userfaultfd_ctx(vma, vmg->uffd_ctx))
102 		return false;
103 	if (!anon_vma_name_eq(anon_vma_name(vma), vmg->anon_name))
104 		return false;
105 	return true;
106 }
107 
108 static bool is_mergeable_anon_vma(struct vma_merge_struct *vmg, bool merge_next)
109 {
110 	struct vm_area_struct *tgt = merge_next ? vmg->next : vmg->prev;
111 	struct vm_area_struct *src = vmg->middle; /* existing merge case. */
112 	struct anon_vma *tgt_anon = tgt->anon_vma;
113 	struct anon_vma *src_anon = vmg->anon_vma;
114 
115 	/*
116 	 * We _can_ have !src, vmg->anon_vma via copy_vma(). In this instance we
117 	 * will remove the existing VMA's anon_vma's so there's no scalability
118 	 * concerns.
119 	 */
120 	VM_WARN_ON(src && src_anon != src->anon_vma);
121 
122 	/* Case 1 - we will dup_anon_vma() from src into tgt. */
123 	if (!tgt_anon && src_anon) {
124 		struct vm_area_struct *copied_from = vmg->copied_from;
125 
126 		if (vma_is_fork_child(src))
127 			return false;
128 		if (vma_is_fork_child(copied_from))
129 			return false;
130 
131 		return true;
132 	}
133 	/* Case 2 - we will simply use tgt's anon_vma. */
134 	if (tgt_anon && !src_anon)
135 		return !vma_is_fork_child(tgt);
136 	/* Case 3 - the anon_vma's are already shared. */
137 	return src_anon == tgt_anon;
138 }
139 
140 /*
141  * init_multi_vma_prep() - Initializer for struct vma_prepare
142  * @vp: The vma_prepare struct
143  * @vma: The vma that will be altered once locked
144  * @vmg: The merge state that will be used to determine adjustment and VMA
145  *       removal.
146  */
147 static void init_multi_vma_prep(struct vma_prepare *vp,
148 				struct vm_area_struct *vma,
149 				struct vma_merge_struct *vmg)
150 {
151 	struct vm_area_struct *adjust;
152 	struct vm_area_struct **remove = &vp->remove;
153 
154 	memset(vp, 0, sizeof(struct vma_prepare));
155 	vp->vma = vma;
156 	vp->anon_vma = vma->anon_vma;
157 
158 	if (vmg && vmg->__remove_middle) {
159 		*remove = vmg->middle;
160 		remove = &vp->remove2;
161 	}
162 	if (vmg && vmg->__remove_next)
163 		*remove = vmg->next;
164 
165 	if (vmg && vmg->__adjust_middle_start)
166 		adjust = vmg->middle;
167 	else if (vmg && vmg->__adjust_next_start)
168 		adjust = vmg->next;
169 	else
170 		adjust = NULL;
171 
172 	vp->adj_next = adjust;
173 	if (!vp->anon_vma && adjust)
174 		vp->anon_vma = adjust->anon_vma;
175 
176 	VM_WARN_ON(vp->anon_vma && adjust && adjust->anon_vma &&
177 		   vp->anon_vma != adjust->anon_vma);
178 
179 	vp->file = vma->vm_file;
180 	if (vp->file)
181 		vp->mapping = vma->vm_file->f_mapping;
182 
183 	if (vmg && vmg->skip_vma_uprobe)
184 		vp->skip_vma_uprobe = true;
185 }
186 
187 /*
188  * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
189  * in front of (at a lower virtual address and file offset than) the vma.
190  *
191  * We cannot merge two vmas if they have differently assigned (non-NULL)
192  * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
193  *
194  * We don't check here for the merged mmap wrapping around the end of pagecache
195  * indices (16TB on ia32) because do_mmap() does not permit mmap's which
196  * wrap, nor mmaps which cover the final page at index -1UL.
197  *
198  * We assume the vma may be removed as part of the merge.
199  */
200 static bool can_vma_merge_before(struct vma_merge_struct *vmg)
201 {
202 	pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start);
203 
204 	if (is_mergeable_vma(vmg, /* merge_next = */ true) &&
205 	    is_mergeable_anon_vma(vmg, /* merge_next = */ true)) {
206 		if (vmg->next->vm_pgoff == vmg->pgoff + pglen)
207 			return true;
208 	}
209 
210 	return false;
211 }
212 
213 /*
214  * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
215  * beyond (at a higher virtual address and file offset than) the vma.
216  *
217  * We cannot merge two vmas if they have differently assigned (non-NULL)
218  * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
219  *
220  * We assume that vma is not removed as part of the merge.
221  */
222 static bool can_vma_merge_after(struct vma_merge_struct *vmg)
223 {
224 	if (is_mergeable_vma(vmg, /* merge_next = */ false) &&
225 	    is_mergeable_anon_vma(vmg, /* merge_next = */ false)) {
226 		if (vmg->prev->vm_pgoff + vma_pages(vmg->prev) == vmg->pgoff)
227 			return true;
228 	}
229 	return false;
230 }
231 
232 static void __vma_link_file(struct vm_area_struct *vma,
233 			    struct address_space *mapping)
234 {
235 	if (vma_is_shared_maywrite(vma))
236 		mapping_allow_writable(mapping);
237 
238 	flush_dcache_mmap_lock(mapping);
239 	vma_interval_tree_insert(vma, &mapping->i_mmap);
240 	flush_dcache_mmap_unlock(mapping);
241 }
242 
243 /*
244  * Requires inode->i_mapping->i_mmap_rwsem
245  */
246 static void __remove_shared_vm_struct(struct vm_area_struct *vma,
247 				      struct address_space *mapping)
248 {
249 	if (vma_is_shared_maywrite(vma))
250 		mapping_unmap_writable(mapping);
251 
252 	flush_dcache_mmap_lock(mapping);
253 	vma_interval_tree_remove(vma, &mapping->i_mmap);
254 	flush_dcache_mmap_unlock(mapping);
255 }
256 
257 /*
258  * vma has some anon_vma assigned, and is already inserted on that
259  * anon_vma's interval trees.
260  *
261  * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
262  * vma must be removed from the anon_vma's interval trees using
263  * anon_vma_interval_tree_pre_update_vma().
264  *
265  * After the update, the vma will be reinserted using
266  * anon_vma_interval_tree_post_update_vma().
267  *
268  * The entire update must be protected by exclusive mmap_lock and by
269  * the root anon_vma's mutex.
270  */
271 static void
272 anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
273 {
274 	struct anon_vma_chain *avc;
275 
276 	list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
277 		anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root);
278 }
279 
280 static void
281 anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
282 {
283 	struct anon_vma_chain *avc;
284 
285 	list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
286 		anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
287 }
288 
289 /*
290  * vma_prepare() - Helper function for handling locking VMAs prior to altering
291  * @vp: The initialized vma_prepare struct
292  */
293 static void vma_prepare(struct vma_prepare *vp)
294 {
295 	if (vp->file) {
296 		uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end);
297 
298 		if (vp->adj_next)
299 			uprobe_munmap(vp->adj_next, vp->adj_next->vm_start,
300 				      vp->adj_next->vm_end);
301 
302 		i_mmap_lock_write(vp->mapping);
303 		if (vp->insert && vp->insert->vm_file) {
304 			/*
305 			 * Put into interval tree now, so instantiated pages
306 			 * are visible to arm/parisc __flush_dcache_page
307 			 * throughout; but we cannot insert into address
308 			 * space until vma start or end is updated.
309 			 */
310 			__vma_link_file(vp->insert,
311 					vp->insert->vm_file->f_mapping);
312 		}
313 	}
314 
315 	if (vp->anon_vma) {
316 		anon_vma_lock_write(vp->anon_vma);
317 		anon_vma_interval_tree_pre_update_vma(vp->vma);
318 		if (vp->adj_next)
319 			anon_vma_interval_tree_pre_update_vma(vp->adj_next);
320 	}
321 
322 	if (vp->file) {
323 		flush_dcache_mmap_lock(vp->mapping);
324 		vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap);
325 		if (vp->adj_next)
326 			vma_interval_tree_remove(vp->adj_next,
327 						 &vp->mapping->i_mmap);
328 	}
329 
330 }
331 
332 /*
333  * vma_complete- Helper function for handling the unlocking after altering VMAs,
334  * or for inserting a VMA.
335  *
336  * @vp: The vma_prepare struct
337  * @vmi: The vma iterator
338  * @mm: The mm_struct
339  */
340 static void vma_complete(struct vma_prepare *vp, struct vma_iterator *vmi,
341 			 struct mm_struct *mm)
342 {
343 	if (vp->file) {
344 		if (vp->adj_next)
345 			vma_interval_tree_insert(vp->adj_next,
346 						 &vp->mapping->i_mmap);
347 		vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap);
348 		flush_dcache_mmap_unlock(vp->mapping);
349 	}
350 
351 	if (vp->remove && vp->file) {
352 		__remove_shared_vm_struct(vp->remove, vp->mapping);
353 		if (vp->remove2)
354 			__remove_shared_vm_struct(vp->remove2, vp->mapping);
355 	} else if (vp->insert) {
356 		/*
357 		 * split_vma has split insert from vma, and needs
358 		 * us to insert it before dropping the locks
359 		 * (it may either follow vma or precede it).
360 		 */
361 		vma_iter_store_new(vmi, vp->insert);
362 		mm->map_count++;
363 	}
364 
365 	if (vp->anon_vma) {
366 		anon_vma_interval_tree_post_update_vma(vp->vma);
367 		if (vp->adj_next)
368 			anon_vma_interval_tree_post_update_vma(vp->adj_next);
369 		anon_vma_unlock_write(vp->anon_vma);
370 	}
371 
372 	if (vp->file) {
373 		i_mmap_unlock_write(vp->mapping);
374 
375 		if (!vp->skip_vma_uprobe) {
376 			uprobe_mmap(vp->vma);
377 
378 			if (vp->adj_next)
379 				uprobe_mmap(vp->adj_next);
380 		}
381 	}
382 
383 	if (vp->remove) {
384 again:
385 		vma_mark_detached(vp->remove);
386 		if (vp->file) {
387 			uprobe_munmap(vp->remove, vp->remove->vm_start,
388 				      vp->remove->vm_end);
389 			fput(vp->file);
390 		}
391 		if (vp->remove->anon_vma)
392 			unlink_anon_vmas(vp->remove);
393 		mm->map_count--;
394 		mpol_put(vma_policy(vp->remove));
395 		if (!vp->remove2)
396 			WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end);
397 		vm_area_free(vp->remove);
398 
399 		/*
400 		 * In mprotect's case 6 (see comments on vma_merge),
401 		 * we are removing both mid and next vmas
402 		 */
403 		if (vp->remove2) {
404 			vp->remove = vp->remove2;
405 			vp->remove2 = NULL;
406 			goto again;
407 		}
408 	}
409 	if (vp->insert && vp->file)
410 		uprobe_mmap(vp->insert);
411 }
412 
413 /*
414  * init_vma_prep() - Initializer wrapper for vma_prepare struct
415  * @vp: The vma_prepare struct
416  * @vma: The vma that will be altered once locked
417  */
418 static void init_vma_prep(struct vma_prepare *vp, struct vm_area_struct *vma)
419 {
420 	init_multi_vma_prep(vp, vma, NULL);
421 }
422 
423 /*
424  * Can the proposed VMA be merged with the left (previous) VMA taking into
425  * account the start position of the proposed range.
426  */
427 static bool can_vma_merge_left(struct vma_merge_struct *vmg)
428 
429 {
430 	return vmg->prev && vmg->prev->vm_end == vmg->start &&
431 		can_vma_merge_after(vmg);
432 }
433 
434 /*
435  * Can the proposed VMA be merged with the right (next) VMA taking into
436  * account the end position of the proposed range.
437  *
438  * In addition, if we can merge with the left VMA, ensure that left and right
439  * anon_vma's are also compatible.
440  */
441 static bool can_vma_merge_right(struct vma_merge_struct *vmg,
442 				bool can_merge_left)
443 {
444 	struct vm_area_struct *next = vmg->next;
445 	struct vm_area_struct *prev;
446 
447 	if (!next || vmg->end != next->vm_start || !can_vma_merge_before(vmg))
448 		return false;
449 
450 	if (!can_merge_left)
451 		return true;
452 
453 	/*
454 	 * If we can merge with prev (left) and next (right), indicating that
455 	 * each VMA's anon_vma is compatible with the proposed anon_vma, this
456 	 * does not mean prev and next are compatible with EACH OTHER.
457 	 *
458 	 * We therefore check this in addition to mergeability to either side.
459 	 */
460 	prev = vmg->prev;
461 	return !prev->anon_vma || !next->anon_vma ||
462 		prev->anon_vma == next->anon_vma;
463 }
464 
465 /*
466  * Close a vm structure and free it.
467  */
468 void remove_vma(struct vm_area_struct *vma)
469 {
470 	might_sleep();
471 	vma_close(vma);
472 	if (vma->vm_file)
473 		fput(vma->vm_file);
474 	mpol_put(vma_policy(vma));
475 	vm_area_free(vma);
476 }
477 
478 /*
479  * Get rid of page table information in the indicated region.
480  *
481  * Called with the mm semaphore held.
482  */
483 void unmap_region(struct unmap_desc *unmap)
484 {
485 	struct mm_struct *mm = unmap->first->vm_mm;
486 	struct mmu_gather tlb;
487 
488 	tlb_gather_mmu(&tlb, mm);
489 	update_hiwater_rss(mm);
490 	unmap_vmas(&tlb, unmap);
491 	mas_set(unmap->mas, unmap->tree_reset);
492 	free_pgtables(&tlb, unmap);
493 	tlb_finish_mmu(&tlb);
494 }
495 
496 /*
497  * __split_vma() bypasses sysctl_max_map_count checking.  We use this where it
498  * has already been checked or doesn't make sense to fail.
499  * VMA Iterator will point to the original VMA.
500  */
501 static __must_check int
502 __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
503 	    unsigned long addr, int new_below)
504 {
505 	struct vma_prepare vp;
506 	struct vm_area_struct *new;
507 	int err;
508 
509 	WARN_ON(vma->vm_start >= addr);
510 	WARN_ON(vma->vm_end <= addr);
511 
512 	if (vma->vm_ops && vma->vm_ops->may_split) {
513 		err = vma->vm_ops->may_split(vma, addr);
514 		if (err)
515 			return err;
516 	}
517 
518 	new = vm_area_dup(vma);
519 	if (!new)
520 		return -ENOMEM;
521 
522 	if (new_below) {
523 		new->vm_end = addr;
524 	} else {
525 		new->vm_start = addr;
526 		new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
527 	}
528 
529 	err = -ENOMEM;
530 	vma_iter_config(vmi, new->vm_start, new->vm_end);
531 	if (vma_iter_prealloc(vmi, new))
532 		goto out_free_vma;
533 
534 	err = vma_dup_policy(vma, new);
535 	if (err)
536 		goto out_free_vmi;
537 
538 	err = anon_vma_clone(new, vma, VMA_OP_SPLIT);
539 	if (err)
540 		goto out_free_mpol;
541 
542 	if (new->vm_file)
543 		get_file(new->vm_file);
544 
545 	if (new->vm_ops && new->vm_ops->open)
546 		new->vm_ops->open(new);
547 
548 	vma_start_write(vma);
549 	vma_start_write(new);
550 
551 	init_vma_prep(&vp, vma);
552 	vp.insert = new;
553 	vma_prepare(&vp);
554 
555 	/*
556 	 * Get rid of huge pages and shared page tables straddling the split
557 	 * boundary.
558 	 */
559 	vma_adjust_trans_huge(vma, vma->vm_start, addr, NULL);
560 	if (is_vm_hugetlb_page(vma))
561 		hugetlb_split(vma, addr);
562 
563 	if (new_below) {
564 		vma->vm_start = addr;
565 		vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT;
566 	} else {
567 		vma->vm_end = addr;
568 	}
569 
570 	/* vma_complete stores the new vma */
571 	vma_complete(&vp, vmi, vma->vm_mm);
572 	validate_mm(vma->vm_mm);
573 
574 	/* Success. */
575 	if (new_below)
576 		vma_next(vmi);
577 	else
578 		vma_prev(vmi);
579 
580 	return 0;
581 
582 out_free_mpol:
583 	mpol_put(vma_policy(new));
584 out_free_vmi:
585 	vma_iter_free(vmi);
586 out_free_vma:
587 	vm_area_free(new);
588 	return err;
589 }
590 
591 /*
592  * Split a vma into two pieces at address 'addr', a new vma is allocated
593  * either for the first part or the tail.
594  */
595 static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
596 		     unsigned long addr, int new_below)
597 {
598 	if (vma->vm_mm->map_count >= get_sysctl_max_map_count())
599 		return -ENOMEM;
600 
601 	return __split_vma(vmi, vma, addr, new_below);
602 }
603 
604 /*
605  * dup_anon_vma() - Helper function to duplicate anon_vma on VMA merge in the
606  * instance that the destination VMA has no anon_vma but the source does.
607  *
608  * @dst: The destination VMA
609  * @src: The source VMA
610  * @dup: Pointer to the destination VMA when successful.
611  *
612  * Returns: 0 on success.
613  */
614 static int dup_anon_vma(struct vm_area_struct *dst,
615 			struct vm_area_struct *src, struct vm_area_struct **dup)
616 {
617 	/*
618 	 * There are three cases to consider for correctly propagating
619 	 * anon_vma's on merge.
620 	 *
621 	 * The first is trivial - neither VMA has anon_vma, we need not do
622 	 * anything.
623 	 *
624 	 * The second where both have anon_vma is also a no-op, as they must
625 	 * then be the same, so there is simply nothing to copy.
626 	 *
627 	 * Here we cover the third - if the destination VMA has no anon_vma,
628 	 * that is it is unfaulted, we need to ensure that the newly merged
629 	 * range is referenced by the anon_vma's of the source.
630 	 */
631 	if (src->anon_vma && !dst->anon_vma) {
632 		int ret;
633 
634 		vma_assert_write_locked(dst);
635 		dst->anon_vma = src->anon_vma;
636 		ret = anon_vma_clone(dst, src, VMA_OP_MERGE_UNFAULTED);
637 		if (ret)
638 			return ret;
639 
640 		*dup = dst;
641 	}
642 
643 	return 0;
644 }
645 
646 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
647 void validate_mm(struct mm_struct *mm)
648 {
649 	int bug = 0;
650 	int i = 0;
651 	struct vm_area_struct *vma;
652 	VMA_ITERATOR(vmi, mm, 0);
653 
654 	mt_validate(&mm->mm_mt);
655 	for_each_vma(vmi, vma) {
656 #ifdef CONFIG_DEBUG_VM_RB
657 		struct anon_vma *anon_vma = vma->anon_vma;
658 		struct anon_vma_chain *avc;
659 #endif
660 		unsigned long vmi_start, vmi_end;
661 		bool warn = 0;
662 
663 		vmi_start = vma_iter_addr(&vmi);
664 		vmi_end = vma_iter_end(&vmi);
665 		if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm))
666 			warn = 1;
667 
668 		if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm))
669 			warn = 1;
670 
671 		if (warn) {
672 			pr_emerg("issue in %s\n", current->comm);
673 			dump_stack();
674 			dump_vma(vma);
675 			pr_emerg("tree range: %px start %lx end %lx\n", vma,
676 				 vmi_start, vmi_end - 1);
677 			vma_iter_dump_tree(&vmi);
678 		}
679 
680 #ifdef CONFIG_DEBUG_VM_RB
681 		if (anon_vma) {
682 			anon_vma_lock_read(anon_vma);
683 			list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
684 				anon_vma_interval_tree_verify(avc);
685 			anon_vma_unlock_read(anon_vma);
686 		}
687 #endif
688 		/* Check for a infinite loop */
689 		if (++i > mm->map_count + 10) {
690 			i = -1;
691 			break;
692 		}
693 	}
694 	if (i != mm->map_count) {
695 		pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i);
696 		bug = 1;
697 	}
698 	VM_BUG_ON_MM(bug, mm);
699 }
700 #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
701 
702 /*
703  * Based on the vmg flag indicating whether we need to adjust the vm_start field
704  * for the middle or next VMA, we calculate what the range of the newly adjusted
705  * VMA ought to be, and set the VMA's range accordingly.
706  */
707 static void vmg_adjust_set_range(struct vma_merge_struct *vmg)
708 {
709 	struct vm_area_struct *adjust;
710 	pgoff_t pgoff;
711 
712 	if (vmg->__adjust_middle_start) {
713 		adjust = vmg->middle;
714 		pgoff = adjust->vm_pgoff + PHYS_PFN(vmg->end - adjust->vm_start);
715 	} else if (vmg->__adjust_next_start) {
716 		adjust = vmg->next;
717 		pgoff = adjust->vm_pgoff - PHYS_PFN(adjust->vm_start - vmg->end);
718 	} else {
719 		return;
720 	}
721 
722 	vma_set_range(adjust, vmg->end, adjust->vm_end, pgoff);
723 }
724 
725 /*
726  * Actually perform the VMA merge operation.
727  *
728  * IMPORTANT: We guarantee that, should vmg->give_up_on_oom is set, to not
729  * modify any VMAs or cause inconsistent state should an OOM condition arise.
730  *
731  * Returns 0 on success, or an error value on failure.
732  */
733 static int commit_merge(struct vma_merge_struct *vmg)
734 {
735 	struct vm_area_struct *vma;
736 	struct vma_prepare vp;
737 
738 	if (vmg->__adjust_next_start) {
739 		/* We manipulate middle and adjust next, which is the target. */
740 		vma = vmg->middle;
741 		vma_iter_config(vmg->vmi, vmg->end, vmg->next->vm_end);
742 	} else {
743 		vma = vmg->target;
744 		 /* Note: vma iterator must be pointing to 'start'. */
745 		vma_iter_config(vmg->vmi, vmg->start, vmg->end);
746 	}
747 
748 	init_multi_vma_prep(&vp, vma, vmg);
749 
750 	/*
751 	 * If vmg->give_up_on_oom is set, we're safe, because we don't actually
752 	 * manipulate any VMAs until we succeed at preallocation.
753 	 *
754 	 * Past this point, we will not return an error.
755 	 */
756 	if (vma_iter_prealloc(vmg->vmi, vma))
757 		return -ENOMEM;
758 
759 	vma_prepare(&vp);
760 	/*
761 	 * THP pages may need to do additional splits if we increase
762 	 * middle->vm_start.
763 	 */
764 	vma_adjust_trans_huge(vma, vmg->start, vmg->end,
765 			      vmg->__adjust_middle_start ? vmg->middle : NULL);
766 	vma_set_range(vma, vmg->start, vmg->end, vmg->pgoff);
767 	vmg_adjust_set_range(vmg);
768 	vma_iter_store_overwrite(vmg->vmi, vmg->target);
769 
770 	vma_complete(&vp, vmg->vmi, vma->vm_mm);
771 
772 	return 0;
773 }
774 
775 /* We can only remove VMAs when merging if they do not have a close hook. */
776 static bool can_merge_remove_vma(struct vm_area_struct *vma)
777 {
778 	return !vma->vm_ops || !vma->vm_ops->close;
779 }
780 
781 /*
782  * vma_merge_existing_range - Attempt to merge VMAs based on a VMA having its
783  * attributes modified.
784  *
785  * @vmg: Describes the modifications being made to a VMA and associated
786  *       metadata.
787  *
788  * When the attributes of a range within a VMA change, then it might be possible
789  * for immediately adjacent VMAs to be merged into that VMA due to having
790  * identical properties.
791  *
792  * This function checks for the existence of any such mergeable VMAs and updates
793  * the maple tree describing the @vmg->middle->vm_mm address space to account
794  * for this, as well as any VMAs shrunk/expanded/deleted as a result of this
795  * merge.
796  *
797  * As part of this operation, if a merge occurs, the @vmg object will have its
798  * vma, start, end, and pgoff fields modified to execute the merge. Subsequent
799  * calls to this function should reset these fields.
800  *
801  * Returns: The merged VMA if merge succeeds, or NULL otherwise.
802  *
803  * ASSUMPTIONS:
804  * - The caller must assign the VMA to be modified to @vmg->middle.
805  * - The caller must have set @vmg->prev to the previous VMA, if there is one.
806  * - The caller must not set @vmg->next, as we determine this.
807  * - The caller must hold a WRITE lock on the mm_struct->mmap_lock.
808  * - vmi must be positioned within [@vmg->middle->vm_start, @vmg->middle->vm_end).
809  */
810 static __must_check struct vm_area_struct *vma_merge_existing_range(
811 		struct vma_merge_struct *vmg)
812 {
813 	vma_flags_t sticky_flags = vma_flags_and_mask(&vmg->vma_flags,
814 						      VMA_STICKY_FLAGS);
815 	struct vm_area_struct *middle = vmg->middle;
816 	struct vm_area_struct *prev = vmg->prev;
817 	struct vm_area_struct *next;
818 	struct vm_area_struct *anon_dup = NULL;
819 	unsigned long start = vmg->start;
820 	unsigned long end = vmg->end;
821 	bool left_side = middle && start == middle->vm_start;
822 	bool right_side = middle && end == middle->vm_end;
823 	int err = 0;
824 	bool merge_left, merge_right, merge_both;
825 
826 	mmap_assert_write_locked(vmg->mm);
827 	VM_WARN_ON_VMG(!middle, vmg); /* We are modifying a VMA, so caller must specify. */
828 	VM_WARN_ON_VMG(vmg->next, vmg); /* We set this. */
829 	VM_WARN_ON_VMG(prev && start <= prev->vm_start, vmg);
830 	VM_WARN_ON_VMG(start >= end, vmg);
831 
832 	/*
833 	 * If middle == prev, then we are offset into a VMA. Otherwise, if we are
834 	 * not, we must span a portion of the VMA.
835 	 */
836 	VM_WARN_ON_VMG(middle &&
837 		       ((middle != prev && vmg->start != middle->vm_start) ||
838 			vmg->end > middle->vm_end), vmg);
839 	/* The vmi must be positioned within vmg->middle. */
840 	VM_WARN_ON_VMG(middle &&
841 		       !(vma_iter_addr(vmg->vmi) >= middle->vm_start &&
842 			 vma_iter_addr(vmg->vmi) < middle->vm_end), vmg);
843 	/* An existing merge can never be used by the mremap() logic. */
844 	VM_WARN_ON_VMG(vmg->copied_from, vmg);
845 
846 	vmg->state = VMA_MERGE_NOMERGE;
847 
848 	/*
849 	 * If a special mapping or if the range being modified is neither at the
850 	 * furthermost left or right side of the VMA, then we have no chance of
851 	 * merging and should abort.
852 	 */
853 	if (vmg->vm_flags & VM_SPECIAL || (!left_side && !right_side))
854 		return NULL;
855 
856 	if (left_side)
857 		merge_left = can_vma_merge_left(vmg);
858 	else
859 		merge_left = false;
860 
861 	if (right_side) {
862 		next = vmg->next = vma_iter_next_range(vmg->vmi);
863 		vma_iter_prev_range(vmg->vmi);
864 
865 		merge_right = can_vma_merge_right(vmg, merge_left);
866 	} else {
867 		merge_right = false;
868 		next = NULL;
869 	}
870 
871 	if (merge_left)		/* If merging prev, position iterator there. */
872 		vma_prev(vmg->vmi);
873 	else if (!merge_right)	/* If we have nothing to merge, abort. */
874 		return NULL;
875 
876 	merge_both = merge_left && merge_right;
877 	/* If we span the entire VMA, a merge implies it will be deleted. */
878 	vmg->__remove_middle = left_side && right_side;
879 
880 	/*
881 	 * If we need to remove middle in its entirety but are unable to do so,
882 	 * we have no sensible recourse but to abort the merge.
883 	 */
884 	if (vmg->__remove_middle && !can_merge_remove_vma(middle))
885 		return NULL;
886 
887 	/*
888 	 * If we merge both VMAs, then next is also deleted. This implies
889 	 * merge_will_delete_vma also.
890 	 */
891 	vmg->__remove_next = merge_both;
892 
893 	/*
894 	 * If we cannot delete next, then we can reduce the operation to merging
895 	 * prev and middle (thereby deleting middle).
896 	 */
897 	if (vmg->__remove_next && !can_merge_remove_vma(next)) {
898 		vmg->__remove_next = false;
899 		merge_right = false;
900 		merge_both = false;
901 	}
902 
903 	/* No matter what happens, we will be adjusting middle. */
904 	vma_start_write(middle);
905 
906 	if (merge_right) {
907 		vma_flags_t next_sticky;
908 
909 		vma_start_write(next);
910 		vmg->target = next;
911 		next_sticky = vma_flags_and_mask(&next->flags, VMA_STICKY_FLAGS);
912 		vma_flags_set_mask(&sticky_flags, next_sticky);
913 	}
914 
915 	if (merge_left) {
916 		vma_flags_t prev_sticky;
917 
918 		vma_start_write(prev);
919 		vmg->target = prev;
920 
921 		prev_sticky = vma_flags_and_mask(&prev->flags, VMA_STICKY_FLAGS);
922 		vma_flags_set_mask(&sticky_flags, prev_sticky);
923 	}
924 
925 	if (merge_both) {
926 		/*
927 		 * |<-------------------->|
928 		 * |-------********-------|
929 		 *   prev   middle   next
930 		 *  extend  delete  delete
931 		 */
932 
933 		vmg->start = prev->vm_start;
934 		vmg->end = next->vm_end;
935 		vmg->pgoff = prev->vm_pgoff;
936 
937 		/*
938 		 * We already ensured anon_vma compatibility above, so now it's
939 		 * simply a case of, if prev has no anon_vma object, which of
940 		 * next or middle contains the anon_vma we must duplicate.
941 		 */
942 		err = dup_anon_vma(prev, next->anon_vma ? next : middle,
943 				   &anon_dup);
944 	} else if (merge_left) {
945 		/*
946 		 * |<------------>|      OR
947 		 * |<----------------->|
948 		 * |-------*************
949 		 *   prev     middle
950 		 *  extend shrink/delete
951 		 */
952 
953 		vmg->start = prev->vm_start;
954 		vmg->pgoff = prev->vm_pgoff;
955 
956 		if (!vmg->__remove_middle)
957 			vmg->__adjust_middle_start = true;
958 
959 		err = dup_anon_vma(prev, middle, &anon_dup);
960 	} else { /* merge_right */
961 		/*
962 		 *     |<------------->| OR
963 		 * |<----------------->|
964 		 * *************-------|
965 		 *    middle     next
966 		 * shrink/delete extend
967 		 */
968 
969 		pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start);
970 
971 		VM_WARN_ON_VMG(!merge_right, vmg);
972 		/* If we are offset into a VMA, then prev must be middle. */
973 		VM_WARN_ON_VMG(vmg->start > middle->vm_start && prev && middle != prev, vmg);
974 
975 		if (vmg->__remove_middle) {
976 			vmg->end = next->vm_end;
977 			vmg->pgoff = next->vm_pgoff - pglen;
978 		} else {
979 			/* We shrink middle and expand next. */
980 			vmg->__adjust_next_start = true;
981 			vmg->start = middle->vm_start;
982 			vmg->end = start;
983 			vmg->pgoff = middle->vm_pgoff;
984 		}
985 
986 		err = dup_anon_vma(next, middle, &anon_dup);
987 	}
988 
989 	if (err || commit_merge(vmg))
990 		goto abort;
991 
992 	vma_set_flags_mask(vmg->target, sticky_flags);
993 	khugepaged_enter_vma(vmg->target, vmg->vm_flags);
994 	vmg->state = VMA_MERGE_SUCCESS;
995 	return vmg->target;
996 
997 abort:
998 	vma_iter_set(vmg->vmi, start);
999 	vma_iter_load(vmg->vmi);
1000 
1001 	if (anon_dup)
1002 		unlink_anon_vmas(anon_dup);
1003 
1004 	/*
1005 	 * This means we have failed to clone anon_vma's correctly, but no
1006 	 * actual changes to VMAs have occurred, so no harm no foul - if the
1007 	 * user doesn't want this reported and instead just wants to give up on
1008 	 * the merge, allow it.
1009 	 */
1010 	if (!vmg->give_up_on_oom)
1011 		vmg->state = VMA_MERGE_ERROR_NOMEM;
1012 	return NULL;
1013 }
1014 
1015 /*
1016  * vma_merge_new_range - Attempt to merge a new VMA into address space
1017  *
1018  * @vmg: Describes the VMA we are adding, in the range @vmg->start to @vmg->end
1019  *       (exclusive), which we try to merge with any adjacent VMAs if possible.
1020  *
1021  * We are about to add a VMA to the address space starting at @vmg->start and
1022  * ending at @vmg->end. There are three different possible scenarios:
1023  *
1024  * 1. There is a VMA with identical properties immediately adjacent to the
1025  *    proposed new VMA [@vmg->start, @vmg->end) either before or after it -
1026  *    EXPAND that VMA:
1027  *
1028  * Proposed:       |-----|  or  |-----|
1029  * Existing:  |----|                  |----|
1030  *
1031  * 2. There are VMAs with identical properties immediately adjacent to the
1032  *    proposed new VMA [@vmg->start, @vmg->end) both before AND after it -
1033  *    EXPAND the former and REMOVE the latter:
1034  *
1035  * Proposed:       |-----|
1036  * Existing:  |----|     |----|
1037  *
1038  * 3. There are no VMAs immediately adjacent to the proposed new VMA or those
1039  *    VMAs do not have identical attributes - NO MERGE POSSIBLE.
1040  *
1041  * In instances where we can merge, this function returns the expanded VMA which
1042  * will have its range adjusted accordingly and the underlying maple tree also
1043  * adjusted.
1044  *
1045  * Returns: In instances where no merge was possible, NULL. Otherwise, a pointer
1046  *          to the VMA we expanded.
1047  *
1048  * This function adjusts @vmg to provide @vmg->next if not already specified,
1049  * and adjusts [@vmg->start, @vmg->end) to span the expanded range.
1050  *
1051  * ASSUMPTIONS:
1052  * - The caller must hold a WRITE lock on the mm_struct->mmap_lock.
1053  * - The caller must have determined that [@vmg->start, @vmg->end) is empty,
1054      other than VMAs that will be unmapped should the operation succeed.
1055  * - The caller must have specified the previous vma in @vmg->prev.
1056  * - The caller must have specified the next vma in @vmg->next.
1057  * - The caller must have positioned the vmi at or before the gap.
1058  */
1059 struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg)
1060 {
1061 	struct vm_area_struct *prev = vmg->prev;
1062 	struct vm_area_struct *next = vmg->next;
1063 	unsigned long end = vmg->end;
1064 	bool can_merge_left, can_merge_right;
1065 
1066 	mmap_assert_write_locked(vmg->mm);
1067 	VM_WARN_ON_VMG(vmg->middle, vmg);
1068 	VM_WARN_ON_VMG(vmg->target, vmg);
1069 	/* vmi must point at or before the gap. */
1070 	VM_WARN_ON_VMG(vma_iter_addr(vmg->vmi) > end, vmg);
1071 
1072 	vmg->state = VMA_MERGE_NOMERGE;
1073 
1074 	/* Special VMAs are unmergeable, also if no prev/next. */
1075 	if ((vmg->vm_flags & VM_SPECIAL) || (!prev && !next))
1076 		return NULL;
1077 
1078 	can_merge_left = can_vma_merge_left(vmg);
1079 	can_merge_right = !vmg->just_expand && can_vma_merge_right(vmg, can_merge_left);
1080 
1081 	/* If we can merge with the next VMA, adjust vmg accordingly. */
1082 	if (can_merge_right) {
1083 		vmg->end = next->vm_end;
1084 		vmg->target = next;
1085 	}
1086 
1087 	/* If we can merge with the previous VMA, adjust vmg accordingly. */
1088 	if (can_merge_left) {
1089 		vmg->start = prev->vm_start;
1090 		vmg->target = prev;
1091 		vmg->pgoff = prev->vm_pgoff;
1092 
1093 		/*
1094 		 * If this merge would result in removal of the next VMA but we
1095 		 * are not permitted to do so, reduce the operation to merging
1096 		 * prev and vma.
1097 		 */
1098 		if (can_merge_right && !can_merge_remove_vma(next))
1099 			vmg->end = end;
1100 
1101 		/* In expand-only case we are already positioned at prev. */
1102 		if (!vmg->just_expand) {
1103 			/* Equivalent to going to the previous range. */
1104 			vma_prev(vmg->vmi);
1105 		}
1106 	}
1107 
1108 	/*
1109 	 * Now try to expand adjacent VMA(s). This takes care of removing the
1110 	 * following VMA if we have VMAs on both sides.
1111 	 */
1112 	if (vmg->target && !vma_expand(vmg)) {
1113 		khugepaged_enter_vma(vmg->target, vmg->vm_flags);
1114 		vmg->state = VMA_MERGE_SUCCESS;
1115 		return vmg->target;
1116 	}
1117 
1118 	return NULL;
1119 }
1120 
1121 /*
1122  * vma_merge_copied_range - Attempt to merge a VMA that is being copied by
1123  * mremap()
1124  *
1125  * @vmg: Describes the VMA we are adding, in the copied-to range @vmg->start to
1126  *       @vmg->end (exclusive), which we try to merge with any adjacent VMAs if
1127  *       possible.
1128  *
1129  * vmg->prev, next, start, end, pgoff should all be relative to the COPIED TO
1130  * range, i.e. the target range for the VMA.
1131  *
1132  * Returns: In instances where no merge was possible, NULL. Otherwise, a pointer
1133  *          to the VMA we expanded.
1134  *
1135  * ASSUMPTIONS: Same as vma_merge_new_range(), except vmg->middle must contain
1136  *              the copied-from VMA.
1137  */
1138 static struct vm_area_struct *vma_merge_copied_range(struct vma_merge_struct *vmg)
1139 {
1140 	/* We must have a copied-from VMA. */
1141 	VM_WARN_ON_VMG(!vmg->middle, vmg);
1142 
1143 	vmg->copied_from = vmg->middle;
1144 	vmg->middle = NULL;
1145 	return vma_merge_new_range(vmg);
1146 }
1147 
1148 /*
1149  * vma_expand - Expand an existing VMA
1150  *
1151  * @vmg: Describes a VMA expansion operation.
1152  *
1153  * Expand @vma to vmg->start and vmg->end.  Can expand off the start and end.
1154  * Will expand over vmg->next if it's different from vmg->target and vmg->end ==
1155  * vmg->next->vm_end.  Checking if the vmg->target can expand and merge with
1156  * vmg->next needs to be handled by the caller.
1157  *
1158  * Returns: 0 on success.
1159  *
1160  * ASSUMPTIONS:
1161  * - The caller must hold a WRITE lock on the mm_struct->mmap_lock.
1162  * - The caller must have set @vmg->target and @vmg->next.
1163  */
1164 int vma_expand(struct vma_merge_struct *vmg)
1165 {
1166 	struct vm_area_struct *anon_dup = NULL;
1167 	struct vm_area_struct *target = vmg->target;
1168 	struct vm_area_struct *next = vmg->next;
1169 	bool remove_next = false;
1170 	vma_flags_t sticky_flags =
1171 		vma_flags_and_mask(&vmg->vma_flags, VMA_STICKY_FLAGS);
1172 	vma_flags_t target_sticky;
1173 	int ret = 0;
1174 
1175 	mmap_assert_write_locked(vmg->mm);
1176 	vma_start_write(target);
1177 
1178 	target_sticky = vma_flags_and_mask(&target->flags, VMA_STICKY_FLAGS);
1179 
1180 	if (next && target != next && vmg->end == next->vm_end)
1181 		remove_next = true;
1182 
1183 	/* We must have a target. */
1184 	VM_WARN_ON_VMG(!target, vmg);
1185 	/* This should have already been checked by this point. */
1186 	VM_WARN_ON_VMG(remove_next && !can_merge_remove_vma(next), vmg);
1187 	/* Not merging but overwriting any part of next is not handled. */
1188 	VM_WARN_ON_VMG(next && !remove_next &&
1189 		       next != target && vmg->end > next->vm_start, vmg);
1190 	/* Only handles expanding. */
1191 	VM_WARN_ON_VMG(target->vm_start < vmg->start ||
1192 		       target->vm_end > vmg->end, vmg);
1193 
1194 	vma_flags_set_mask(&sticky_flags, target_sticky);
1195 
1196 	/*
1197 	 * If we are removing the next VMA or copying from a VMA
1198 	 * (e.g. mremap()'ing), we must propagate anon_vma state.
1199 	 *
1200 	 * Note that, by convention, callers ignore OOM for this case, so
1201 	 * we don't need to account for vmg->give_up_on_mm here.
1202 	 */
1203 	if (remove_next)
1204 		ret = dup_anon_vma(target, next, &anon_dup);
1205 	if (!ret && vmg->copied_from)
1206 		ret = dup_anon_vma(target, vmg->copied_from, &anon_dup);
1207 	if (ret)
1208 		return ret;
1209 
1210 	if (remove_next) {
1211 		vma_flags_t next_sticky;
1212 
1213 		vma_start_write(next);
1214 		vmg->__remove_next = true;
1215 
1216 		next_sticky = vma_flags_and_mask(&next->flags, VMA_STICKY_FLAGS);
1217 		vma_flags_set_mask(&sticky_flags, next_sticky);
1218 	}
1219 	if (commit_merge(vmg))
1220 		goto nomem;
1221 
1222 	vma_set_flags_mask(target, sticky_flags);
1223 	return 0;
1224 
1225 nomem:
1226 	if (anon_dup)
1227 		unlink_anon_vmas(anon_dup);
1228 	/*
1229 	 * If the user requests that we just give upon OOM, we are safe to do so
1230 	 * here, as commit merge provides this contract to us. Nothing has been
1231 	 * changed - no harm no foul, just don't report it.
1232 	 */
1233 	if (!vmg->give_up_on_oom)
1234 		vmg->state = VMA_MERGE_ERROR_NOMEM;
1235 	return -ENOMEM;
1236 }
1237 
1238 /*
1239  * vma_shrink() - Reduce an existing VMAs memory area
1240  * @vmi: The vma iterator
1241  * @vma: The VMA to modify
1242  * @start: The new start
1243  * @end: The new end
1244  *
1245  * Returns: 0 on success, -ENOMEM otherwise
1246  */
1247 int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
1248 	       unsigned long start, unsigned long end, pgoff_t pgoff)
1249 {
1250 	struct vma_prepare vp;
1251 
1252 	WARN_ON((vma->vm_start != start) && (vma->vm_end != end));
1253 
1254 	if (vma->vm_start < start)
1255 		vma_iter_config(vmi, vma->vm_start, start);
1256 	else
1257 		vma_iter_config(vmi, end, vma->vm_end);
1258 
1259 	if (vma_iter_prealloc(vmi, NULL))
1260 		return -ENOMEM;
1261 
1262 	vma_start_write(vma);
1263 
1264 	init_vma_prep(&vp, vma);
1265 	vma_prepare(&vp);
1266 	vma_adjust_trans_huge(vma, start, end, NULL);
1267 
1268 	vma_iter_clear(vmi);
1269 	vma_set_range(vma, start, end, pgoff);
1270 	vma_complete(&vp, vmi, vma->vm_mm);
1271 	validate_mm(vma->vm_mm);
1272 	return 0;
1273 }
1274 
1275 static inline void vms_clear_ptes(struct vma_munmap_struct *vms,
1276 		    struct ma_state *mas_detach, bool mm_wr_locked)
1277 {
1278 	struct unmap_desc unmap = {
1279 		.mas = mas_detach,
1280 		.first = vms->vma,
1281 		/* start and end may be different if there is no prev or next vma. */
1282 		.pg_start = vms->unmap_start,
1283 		.pg_end = vms->unmap_end,
1284 		.vma_start = vms->start,
1285 		.vma_end = vms->end,
1286 		/*
1287 		 * The tree limits and reset differ from the normal case since it's a
1288 		 * side-tree
1289 		 */
1290 		.tree_reset = 1,
1291 		.tree_end = vms->vma_count,
1292 		/*
1293 		 * We can free page tables without write-locking mmap_lock because VMAs
1294 		 * were isolated before we downgraded mmap_lock.
1295 		 */
1296 		.mm_wr_locked = mm_wr_locked,
1297 	};
1298 
1299 	if (!vms->clear_ptes) /* Nothing to do */
1300 		return;
1301 
1302 	mas_set(mas_detach, 1);
1303 	unmap_region(&unmap);
1304 	vms->clear_ptes = false;
1305 }
1306 
1307 static void vms_clean_up_area(struct vma_munmap_struct *vms,
1308 		struct ma_state *mas_detach)
1309 {
1310 	struct vm_area_struct *vma;
1311 
1312 	if (!vms->nr_pages)
1313 		return;
1314 
1315 	vms_clear_ptes(vms, mas_detach, true);
1316 	mas_set(mas_detach, 0);
1317 	mas_for_each(mas_detach, vma, ULONG_MAX)
1318 		vma_close(vma);
1319 }
1320 
1321 /*
1322  * vms_complete_munmap_vmas() - Finish the munmap() operation
1323  * @vms: The vma munmap struct
1324  * @mas_detach: The maple state of the detached vmas
1325  *
1326  * This updates the mm_struct, unmaps the region, frees the resources
1327  * used for the munmap() and may downgrade the lock - if requested.  Everything
1328  * needed to be done once the vma maple tree is updated.
1329  */
1330 static void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
1331 		struct ma_state *mas_detach)
1332 {
1333 	struct vm_area_struct *vma;
1334 	struct mm_struct *mm;
1335 
1336 	mm = current->mm;
1337 	mm->map_count -= vms->vma_count;
1338 	mm->locked_vm -= vms->locked_vm;
1339 	if (vms->unlock)
1340 		mmap_write_downgrade(mm);
1341 
1342 	if (!vms->nr_pages)
1343 		return;
1344 
1345 	vms_clear_ptes(vms, mas_detach, !vms->unlock);
1346 	/* Update high watermark before we lower total_vm */
1347 	update_hiwater_vm(mm);
1348 	/* Stat accounting */
1349 	WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm) - vms->nr_pages);
1350 	/* Paranoid bookkeeping */
1351 	VM_WARN_ON(vms->exec_vm > mm->exec_vm);
1352 	VM_WARN_ON(vms->stack_vm > mm->stack_vm);
1353 	VM_WARN_ON(vms->data_vm > mm->data_vm);
1354 	mm->exec_vm -= vms->exec_vm;
1355 	mm->stack_vm -= vms->stack_vm;
1356 	mm->data_vm -= vms->data_vm;
1357 
1358 	/* Remove and clean up vmas */
1359 	mas_set(mas_detach, 0);
1360 	mas_for_each(mas_detach, vma, ULONG_MAX)
1361 		remove_vma(vma);
1362 
1363 	vm_unacct_memory(vms->nr_accounted);
1364 	validate_mm(mm);
1365 	if (vms->unlock)
1366 		mmap_read_unlock(mm);
1367 
1368 	__mt_destroy(mas_detach->tree);
1369 }
1370 
1371 /*
1372  * reattach_vmas() - Undo any munmap work and free resources
1373  * @mas_detach: The maple state with the detached maple tree
1374  *
1375  * Reattach any detached vmas and free up the maple tree used to track the vmas.
1376  */
1377 static void reattach_vmas(struct ma_state *mas_detach)
1378 {
1379 	struct vm_area_struct *vma;
1380 
1381 	mas_set(mas_detach, 0);
1382 	mas_for_each(mas_detach, vma, ULONG_MAX)
1383 		vma_mark_attached(vma);
1384 
1385 	__mt_destroy(mas_detach->tree);
1386 }
1387 
1388 /*
1389  * vms_gather_munmap_vmas() - Put all VMAs within a range into a maple tree
1390  * for removal at a later date.  Handles splitting first and last if necessary
1391  * and marking the vmas as isolated.
1392  *
1393  * @vms: The vma munmap struct
1394  * @mas_detach: The maple state tracking the detached tree
1395  *
1396  * Return: 0 on success, error otherwise
1397  */
1398 static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
1399 		struct ma_state *mas_detach)
1400 {
1401 	struct vm_area_struct *next = NULL;
1402 	int error;
1403 
1404 	/*
1405 	 * If we need to split any vma, do it now to save pain later.
1406 	 * Does it split the first one?
1407 	 */
1408 	if (vms->start > vms->vma->vm_start) {
1409 
1410 		/*
1411 		 * Make sure that map_count on return from munmap() will
1412 		 * not exceed its limit; but let map_count go just above
1413 		 * its limit temporarily, to help free resources as expected.
1414 		 */
1415 		if (vms->end < vms->vma->vm_end &&
1416 		    vms->vma->vm_mm->map_count >= get_sysctl_max_map_count()) {
1417 			error = -ENOMEM;
1418 			goto map_count_exceeded;
1419 		}
1420 
1421 		/* Don't bother splitting the VMA if we can't unmap it anyway */
1422 		if (vma_is_sealed(vms->vma)) {
1423 			error = -EPERM;
1424 			goto start_split_failed;
1425 		}
1426 
1427 		error = __split_vma(vms->vmi, vms->vma, vms->start, 1);
1428 		if (error)
1429 			goto start_split_failed;
1430 	}
1431 	vms->prev = vma_prev(vms->vmi);
1432 	if (vms->prev)
1433 		vms->unmap_start = vms->prev->vm_end;
1434 
1435 	/*
1436 	 * Detach a range of VMAs from the mm. Using next as a temp variable as
1437 	 * it is always overwritten.
1438 	 */
1439 	for_each_vma_range(*(vms->vmi), next, vms->end) {
1440 		long nrpages;
1441 
1442 		if (vma_is_sealed(next)) {
1443 			error = -EPERM;
1444 			goto modify_vma_failed;
1445 		}
1446 		/* Does it split the end? */
1447 		if (next->vm_end > vms->end) {
1448 			error = __split_vma(vms->vmi, next, vms->end, 0);
1449 			if (error)
1450 				goto end_split_failed;
1451 		}
1452 		vma_start_write(next);
1453 		mas_set(mas_detach, vms->vma_count++);
1454 		error = mas_store_gfp(mas_detach, next, GFP_KERNEL);
1455 		if (error)
1456 			goto munmap_gather_failed;
1457 
1458 		vma_mark_detached(next);
1459 		nrpages = vma_pages(next);
1460 
1461 		vms->nr_pages += nrpages;
1462 		if (next->vm_flags & VM_LOCKED)
1463 			vms->locked_vm += nrpages;
1464 
1465 		if (next->vm_flags & VM_ACCOUNT)
1466 			vms->nr_accounted += nrpages;
1467 
1468 		if (is_exec_mapping(next->vm_flags))
1469 			vms->exec_vm += nrpages;
1470 		else if (is_stack_mapping(next->vm_flags))
1471 			vms->stack_vm += nrpages;
1472 		else if (is_data_mapping(next->vm_flags))
1473 			vms->data_vm += nrpages;
1474 
1475 		if (vms->uf) {
1476 			/*
1477 			 * If userfaultfd_unmap_prep returns an error the vmas
1478 			 * will remain split, but userland will get a
1479 			 * highly unexpected error anyway. This is no
1480 			 * different than the case where the first of the two
1481 			 * __split_vma fails, but we don't undo the first
1482 			 * split, despite we could. This is unlikely enough
1483 			 * failure that it's not worth optimizing it for.
1484 			 */
1485 			error = userfaultfd_unmap_prep(next, vms->start,
1486 						       vms->end, vms->uf);
1487 			if (error)
1488 				goto userfaultfd_error;
1489 		}
1490 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
1491 		BUG_ON(next->vm_start < vms->start);
1492 		BUG_ON(next->vm_start > vms->end);
1493 #endif
1494 	}
1495 
1496 	vms->next = vma_next(vms->vmi);
1497 	if (vms->next)
1498 		vms->unmap_end = vms->next->vm_start;
1499 
1500 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
1501 	/* Make sure no VMAs are about to be lost. */
1502 	{
1503 		MA_STATE(test, mas_detach->tree, 0, 0);
1504 		struct vm_area_struct *vma_mas, *vma_test;
1505 		int test_count = 0;
1506 
1507 		vma_iter_set(vms->vmi, vms->start);
1508 		rcu_read_lock();
1509 		vma_test = mas_find(&test, vms->vma_count - 1);
1510 		for_each_vma_range(*(vms->vmi), vma_mas, vms->end) {
1511 			BUG_ON(vma_mas != vma_test);
1512 			test_count++;
1513 			vma_test = mas_next(&test, vms->vma_count - 1);
1514 		}
1515 		rcu_read_unlock();
1516 		BUG_ON(vms->vma_count != test_count);
1517 	}
1518 #endif
1519 
1520 	while (vma_iter_addr(vms->vmi) > vms->start)
1521 		vma_iter_prev_range(vms->vmi);
1522 
1523 	vms->clear_ptes = true;
1524 	return 0;
1525 
1526 userfaultfd_error:
1527 munmap_gather_failed:
1528 end_split_failed:
1529 modify_vma_failed:
1530 	reattach_vmas(mas_detach);
1531 start_split_failed:
1532 map_count_exceeded:
1533 	return error;
1534 }
1535 
1536 /*
1537  * init_vma_munmap() - Initializer wrapper for vma_munmap_struct
1538  * @vms: The vma munmap struct
1539  * @vmi: The vma iterator
1540  * @vma: The first vm_area_struct to munmap
1541  * @start: The aligned start address to munmap
1542  * @end: The aligned end address to munmap
1543  * @uf: The userfaultfd list_head
1544  * @unlock: Unlock after the operation.  Only unlocked on success
1545  */
1546 static void init_vma_munmap(struct vma_munmap_struct *vms,
1547 		struct vma_iterator *vmi, struct vm_area_struct *vma,
1548 		unsigned long start, unsigned long end, struct list_head *uf,
1549 		bool unlock)
1550 {
1551 	vms->vmi = vmi;
1552 	vms->vma = vma;
1553 	if (vma) {
1554 		vms->start = start;
1555 		vms->end = end;
1556 	} else {
1557 		vms->start = vms->end = 0;
1558 	}
1559 	vms->unlock = unlock;
1560 	vms->uf = uf;
1561 	vms->vma_count = 0;
1562 	vms->nr_pages = vms->locked_vm = vms->nr_accounted = 0;
1563 	vms->exec_vm = vms->stack_vm = vms->data_vm = 0;
1564 	vms->unmap_start = FIRST_USER_ADDRESS;
1565 	vms->unmap_end = USER_PGTABLES_CEILING;
1566 	vms->clear_ptes = false;
1567 }
1568 
1569 /*
1570  * do_vmi_align_munmap() - munmap the aligned region from @start to @end.
1571  * @vmi: The vma iterator
1572  * @vma: The starting vm_area_struct
1573  * @mm: The mm_struct
1574  * @start: The aligned start address to munmap.
1575  * @end: The aligned end address to munmap.
1576  * @uf: The userfaultfd list_head
1577  * @unlock: Set to true to drop the mmap_lock.  unlocking only happens on
1578  * success.
1579  *
1580  * Return: 0 on success and drops the lock if so directed, error and leaves the
1581  * lock held otherwise.
1582  */
1583 int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
1584 		struct mm_struct *mm, unsigned long start, unsigned long end,
1585 		struct list_head *uf, bool unlock)
1586 {
1587 	struct maple_tree mt_detach;
1588 	MA_STATE(mas_detach, &mt_detach, 0, 0);
1589 	mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
1590 	mt_on_stack(mt_detach);
1591 	struct vma_munmap_struct vms;
1592 	int error;
1593 
1594 	init_vma_munmap(&vms, vmi, vma, start, end, uf, unlock);
1595 	error = vms_gather_munmap_vmas(&vms, &mas_detach);
1596 	if (error)
1597 		goto gather_failed;
1598 
1599 	error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL);
1600 	if (error)
1601 		goto clear_tree_failed;
1602 
1603 	/* Point of no return */
1604 	vms_complete_munmap_vmas(&vms, &mas_detach);
1605 	return 0;
1606 
1607 clear_tree_failed:
1608 	reattach_vmas(&mas_detach);
1609 gather_failed:
1610 	validate_mm(mm);
1611 	return error;
1612 }
1613 
1614 /*
1615  * do_vmi_munmap() - munmap a given range.
1616  * @vmi: The vma iterator
1617  * @mm: The mm_struct
1618  * @start: The start address to munmap
1619  * @len: The length of the range to munmap
1620  * @uf: The userfaultfd list_head
1621  * @unlock: set to true if the user wants to drop the mmap_lock on success
1622  *
1623  * This function takes a @mas that is either pointing to the previous VMA or set
1624  * to MA_START and sets it up to remove the mapping(s).  The @len will be
1625  * aligned.
1626  *
1627  * Return: 0 on success and drops the lock if so directed, error and leaves the
1628  * lock held otherwise.
1629  */
1630 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
1631 		  unsigned long start, size_t len, struct list_head *uf,
1632 		  bool unlock)
1633 {
1634 	unsigned long end;
1635 	struct vm_area_struct *vma;
1636 
1637 	if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start)
1638 		return -EINVAL;
1639 
1640 	end = start + PAGE_ALIGN(len);
1641 	if (end == start)
1642 		return -EINVAL;
1643 
1644 	/* Find the first overlapping VMA */
1645 	vma = vma_find(vmi, end);
1646 	if (!vma) {
1647 		if (unlock)
1648 			mmap_write_unlock(mm);
1649 		return 0;
1650 	}
1651 
1652 	return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
1653 }
1654 
1655 /*
1656  * We are about to modify one or multiple of a VMA's flags, policy, userfaultfd
1657  * context and anonymous VMA name within the range [start, end).
1658  *
1659  * As a result, we might be able to merge the newly modified VMA range with an
1660  * adjacent VMA with identical properties.
1661  *
1662  * If no merge is possible and the range does not span the entirety of the VMA,
1663  * we then need to split the VMA to accommodate the change.
1664  *
1665  * The function returns either the merged VMA, the original VMA if a split was
1666  * required instead, or an error if the split failed.
1667  */
1668 static struct vm_area_struct *vma_modify(struct vma_merge_struct *vmg)
1669 {
1670 	struct vm_area_struct *vma = vmg->middle;
1671 	unsigned long start = vmg->start;
1672 	unsigned long end = vmg->end;
1673 	struct vm_area_struct *merged;
1674 
1675 	/* First, try to merge. */
1676 	merged = vma_merge_existing_range(vmg);
1677 	if (merged)
1678 		return merged;
1679 	if (vmg_nomem(vmg))
1680 		return ERR_PTR(-ENOMEM);
1681 
1682 	/*
1683 	 * Split can fail for reasons other than OOM, so if the user requests
1684 	 * this it's probably a mistake.
1685 	 */
1686 	VM_WARN_ON(vmg->give_up_on_oom &&
1687 		   (vma->vm_start != start || vma->vm_end != end));
1688 
1689 	/* Split any preceding portion of the VMA. */
1690 	if (vma->vm_start < start) {
1691 		int err = split_vma(vmg->vmi, vma, start, 1);
1692 
1693 		if (err)
1694 			return ERR_PTR(err);
1695 	}
1696 
1697 	/* Split any trailing portion of the VMA. */
1698 	if (vma->vm_end > end) {
1699 		int err = split_vma(vmg->vmi, vma, end, 0);
1700 
1701 		if (err)
1702 			return ERR_PTR(err);
1703 	}
1704 
1705 	return vma;
1706 }
1707 
1708 struct vm_area_struct *vma_modify_flags(struct vma_iterator *vmi,
1709 		struct vm_area_struct *prev, struct vm_area_struct *vma,
1710 		unsigned long start, unsigned long end,
1711 		vm_flags_t *vm_flags_ptr)
1712 {
1713 	VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
1714 	const vm_flags_t vm_flags = *vm_flags_ptr;
1715 	struct vm_area_struct *ret;
1716 
1717 	vmg.vm_flags = vm_flags;
1718 
1719 	ret = vma_modify(&vmg);
1720 	if (IS_ERR(ret))
1721 		return ret;
1722 
1723 	/*
1724 	 * For a merge to succeed, the flags must match those
1725 	 * requested. However, sticky flags may have been retained, so propagate
1726 	 * them to the caller.
1727 	 */
1728 	if (vmg.state == VMA_MERGE_SUCCESS)
1729 		*vm_flags_ptr = ret->vm_flags;
1730 	return ret;
1731 }
1732 
1733 struct vm_area_struct *vma_modify_name(struct vma_iterator *vmi,
1734 		struct vm_area_struct *prev, struct vm_area_struct *vma,
1735 		unsigned long start, unsigned long end,
1736 		struct anon_vma_name *new_name)
1737 {
1738 	VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
1739 
1740 	vmg.anon_name = new_name;
1741 
1742 	return vma_modify(&vmg);
1743 }
1744 
1745 struct vm_area_struct *vma_modify_policy(struct vma_iterator *vmi,
1746 		struct vm_area_struct *prev, struct vm_area_struct *vma,
1747 		unsigned long start, unsigned long end,
1748 		struct mempolicy *new_pol)
1749 {
1750 	VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
1751 
1752 	vmg.policy = new_pol;
1753 
1754 	return vma_modify(&vmg);
1755 }
1756 
1757 struct vm_area_struct *vma_modify_flags_uffd(struct vma_iterator *vmi,
1758 		struct vm_area_struct *prev, struct vm_area_struct *vma,
1759 		unsigned long start, unsigned long end, vm_flags_t vm_flags,
1760 		struct vm_userfaultfd_ctx new_ctx, bool give_up_on_oom)
1761 {
1762 	VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
1763 
1764 	vmg.vm_flags = vm_flags;
1765 	vmg.uffd_ctx = new_ctx;
1766 	if (give_up_on_oom)
1767 		vmg.give_up_on_oom = true;
1768 
1769 	return vma_modify(&vmg);
1770 }
1771 
1772 /*
1773  * Expand vma by delta bytes, potentially merging with an immediately adjacent
1774  * VMA with identical properties.
1775  */
1776 struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
1777 					struct vm_area_struct *vma,
1778 					unsigned long delta)
1779 {
1780 	VMG_VMA_STATE(vmg, vmi, vma, vma, vma->vm_end, vma->vm_end + delta);
1781 
1782 	vmg.next = vma_iter_next_rewind(vmi, NULL);
1783 	vmg.middle = NULL; /* We use the VMA to populate VMG fields only. */
1784 
1785 	return vma_merge_new_range(&vmg);
1786 }
1787 
1788 void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb)
1789 {
1790 	vb->count = 0;
1791 }
1792 
1793 static void unlink_file_vma_batch_process(struct unlink_vma_file_batch *vb)
1794 {
1795 	struct address_space *mapping;
1796 	int i;
1797 
1798 	mapping = vb->vmas[0]->vm_file->f_mapping;
1799 	i_mmap_lock_write(mapping);
1800 	for (i = 0; i < vb->count; i++) {
1801 		VM_WARN_ON_ONCE(vb->vmas[i]->vm_file->f_mapping != mapping);
1802 		__remove_shared_vm_struct(vb->vmas[i], mapping);
1803 	}
1804 	i_mmap_unlock_write(mapping);
1805 
1806 	unlink_file_vma_batch_init(vb);
1807 }
1808 
1809 void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb,
1810 			       struct vm_area_struct *vma)
1811 {
1812 	if (vma->vm_file == NULL)
1813 		return;
1814 
1815 	if ((vb->count > 0 && vb->vmas[0]->vm_file != vma->vm_file) ||
1816 	    vb->count == ARRAY_SIZE(vb->vmas))
1817 		unlink_file_vma_batch_process(vb);
1818 
1819 	vb->vmas[vb->count] = vma;
1820 	vb->count++;
1821 }
1822 
1823 void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb)
1824 {
1825 	if (vb->count > 0)
1826 		unlink_file_vma_batch_process(vb);
1827 }
1828 
1829 static void vma_link_file(struct vm_area_struct *vma, bool hold_rmap_lock)
1830 {
1831 	struct file *file = vma->vm_file;
1832 	struct address_space *mapping;
1833 
1834 	if (file) {
1835 		mapping = file->f_mapping;
1836 		i_mmap_lock_write(mapping);
1837 		__vma_link_file(vma, mapping);
1838 		if (!hold_rmap_lock)
1839 			i_mmap_unlock_write(mapping);
1840 	}
1841 }
1842 
1843 static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
1844 {
1845 	VMA_ITERATOR(vmi, mm, 0);
1846 
1847 	vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
1848 	if (vma_iter_prealloc(&vmi, vma))
1849 		return -ENOMEM;
1850 
1851 	vma_start_write(vma);
1852 	vma_iter_store_new(&vmi, vma);
1853 	vma_link_file(vma, /* hold_rmap_lock= */false);
1854 	mm->map_count++;
1855 	validate_mm(mm);
1856 	return 0;
1857 }
1858 
1859 /*
1860  * Copy the vma structure to a new location in the same mm,
1861  * prior to moving page table entries, to effect an mremap move.
1862  */
1863 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
1864 	unsigned long addr, unsigned long len, pgoff_t pgoff,
1865 	bool *need_rmap_locks)
1866 {
1867 	struct vm_area_struct *vma = *vmap;
1868 	unsigned long vma_start = vma->vm_start;
1869 	struct mm_struct *mm = vma->vm_mm;
1870 	struct vm_area_struct *new_vma;
1871 	bool faulted_in_anon_vma = true;
1872 	VMA_ITERATOR(vmi, mm, addr);
1873 	VMG_VMA_STATE(vmg, &vmi, NULL, vma, addr, addr + len);
1874 
1875 	/*
1876 	 * If anonymous vma has not yet been faulted, update new pgoff
1877 	 * to match new location, to increase its chance of merging.
1878 	 */
1879 	if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) {
1880 		pgoff = addr >> PAGE_SHIFT;
1881 		faulted_in_anon_vma = false;
1882 	}
1883 
1884 	/*
1885 	 * If the VMA we are copying might contain a uprobe PTE, ensure
1886 	 * that we do not establish one upon merge. Otherwise, when mremap()
1887 	 * moves page tables, it will orphan the newly created PTE.
1888 	 */
1889 	if (vma->vm_file)
1890 		vmg.skip_vma_uprobe = true;
1891 
1892 	new_vma = find_vma_prev(mm, addr, &vmg.prev);
1893 	if (new_vma && new_vma->vm_start < addr + len)
1894 		return NULL;	/* should never get here */
1895 
1896 	vmg.pgoff = pgoff;
1897 	vmg.next = vma_iter_next_rewind(&vmi, NULL);
1898 	new_vma = vma_merge_copied_range(&vmg);
1899 
1900 	if (new_vma) {
1901 		/*
1902 		 * Source vma may have been merged into new_vma
1903 		 */
1904 		if (unlikely(vma_start >= new_vma->vm_start &&
1905 			     vma_start < new_vma->vm_end)) {
1906 			/*
1907 			 * The only way we can get a vma_merge with
1908 			 * self during an mremap is if the vma hasn't
1909 			 * been faulted in yet and we were allowed to
1910 			 * reset the dst vma->vm_pgoff to the
1911 			 * destination address of the mremap to allow
1912 			 * the merge to happen. mremap must change the
1913 			 * vm_pgoff linearity between src and dst vmas
1914 			 * (in turn preventing a vma_merge) to be
1915 			 * safe. It is only safe to keep the vm_pgoff
1916 			 * linear if there are no pages mapped yet.
1917 			 */
1918 			VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma);
1919 			*vmap = vma = new_vma;
1920 		}
1921 		*need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
1922 	} else {
1923 		new_vma = vm_area_dup(vma);
1924 		if (!new_vma)
1925 			goto out;
1926 		vma_set_range(new_vma, addr, addr + len, pgoff);
1927 		if (vma_dup_policy(vma, new_vma))
1928 			goto out_free_vma;
1929 		if (anon_vma_clone(new_vma, vma, VMA_OP_REMAP))
1930 			goto out_free_mempol;
1931 		if (new_vma->vm_file)
1932 			get_file(new_vma->vm_file);
1933 		if (new_vma->vm_ops && new_vma->vm_ops->open)
1934 			new_vma->vm_ops->open(new_vma);
1935 		if (vma_link(mm, new_vma))
1936 			goto out_vma_link;
1937 		*need_rmap_locks = false;
1938 	}
1939 	return new_vma;
1940 
1941 out_vma_link:
1942 	fixup_hugetlb_reservations(new_vma);
1943 	vma_close(new_vma);
1944 
1945 	if (new_vma->vm_file)
1946 		fput(new_vma->vm_file);
1947 
1948 	unlink_anon_vmas(new_vma);
1949 out_free_mempol:
1950 	mpol_put(vma_policy(new_vma));
1951 out_free_vma:
1952 	vm_area_free(new_vma);
1953 out:
1954 	return NULL;
1955 }
1956 
1957 /*
1958  * Rough compatibility check to quickly see if it's even worth looking
1959  * at sharing an anon_vma.
1960  *
1961  * They need to have the same vm_file, and the flags can only differ
1962  * in things that mprotect may change.
1963  *
1964  * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
1965  * we can merge the two vma's. For example, we refuse to merge a vma if
1966  * there is a vm_ops->close() function, because that indicates that the
1967  * driver is doing some kind of reference counting. But that doesn't
1968  * really matter for the anon_vma sharing case.
1969  */
1970 static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
1971 {
1972 	vma_flags_t diff = vma_flags_diff_pair(&a->flags, &b->flags);
1973 
1974 	vma_flags_clear_mask(&diff, VMA_ACCESS_FLAGS);
1975 	vma_flags_clear_mask(&diff, VMA_IGNORE_MERGE_FLAGS);
1976 
1977 	return a->vm_end == b->vm_start &&
1978 		mpol_equal(vma_policy(a), vma_policy(b)) &&
1979 		a->vm_file == b->vm_file &&
1980 		vma_flags_empty(&diff) &&
1981 		b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
1982 }
1983 
1984 /*
1985  * Do some basic sanity checking to see if we can re-use the anon_vma
1986  * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
1987  * the same as 'old', the other will be the new one that is trying
1988  * to share the anon_vma.
1989  *
1990  * NOTE! This runs with mmap_lock held for reading, so it is possible that
1991  * the anon_vma of 'old' is concurrently in the process of being set up
1992  * by another page fault trying to merge _that_. But that's ok: if it
1993  * is being set up, that automatically means that it will be a singleton
1994  * acceptable for merging, so we can do all of this optimistically. But
1995  * we do that READ_ONCE() to make sure that we never re-load the pointer.
1996  *
1997  * IOW: that the "list_is_singular()" test on the anon_vma_chain only
1998  * matters for the 'stable anon_vma' case (ie the thing we want to avoid
1999  * is to return an anon_vma that is "complex" due to having gone through
2000  * a fork).
2001  *
2002  * We also make sure that the two vma's are compatible (adjacent,
2003  * and with the same memory policies). That's all stable, even with just
2004  * a read lock on the mmap_lock.
2005  */
2006 static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old,
2007 					  struct vm_area_struct *a,
2008 					  struct vm_area_struct *b)
2009 {
2010 	if (anon_vma_compatible(a, b)) {
2011 		struct anon_vma *anon_vma = READ_ONCE(old->anon_vma);
2012 
2013 		if (anon_vma && list_is_singular(&old->anon_vma_chain))
2014 			return anon_vma;
2015 	}
2016 	return NULL;
2017 }
2018 
2019 /*
2020  * find_mergeable_anon_vma is used by anon_vma_prepare, to check
2021  * neighbouring vmas for a suitable anon_vma, before it goes off
2022  * to allocate a new anon_vma.  It checks because a repetitive
2023  * sequence of mprotects and faults may otherwise lead to distinct
2024  * anon_vmas being allocated, preventing vma merge in subsequent
2025  * mprotect.
2026  */
2027 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
2028 {
2029 	struct anon_vma *anon_vma = NULL;
2030 	struct vm_area_struct *prev, *next;
2031 	VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_end);
2032 
2033 	/* Try next first. */
2034 	next = vma_iter_load(&vmi);
2035 	if (next) {
2036 		anon_vma = reusable_anon_vma(next, vma, next);
2037 		if (anon_vma)
2038 			return anon_vma;
2039 	}
2040 
2041 	prev = vma_prev(&vmi);
2042 	VM_BUG_ON_VMA(prev != vma, vma);
2043 	prev = vma_prev(&vmi);
2044 	/* Try prev next. */
2045 	if (prev)
2046 		anon_vma = reusable_anon_vma(prev, prev, vma);
2047 
2048 	/*
2049 	 * We might reach here with anon_vma == NULL if we can't find
2050 	 * any reusable anon_vma.
2051 	 * There's no absolute need to look only at touching neighbours:
2052 	 * we could search further afield for "compatible" anon_vmas.
2053 	 * But it would probably just be a waste of time searching,
2054 	 * or lead to too many vmas hanging off the same anon_vma.
2055 	 * We're trying to allow mprotect remerging later on,
2056 	 * not trying to minimize memory used for anon_vmas.
2057 	 */
2058 	return anon_vma;
2059 }
2060 
2061 static bool vm_ops_needs_writenotify(const struct vm_operations_struct *vm_ops)
2062 {
2063 	return vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite);
2064 }
2065 
2066 static bool vma_is_shared_writable(struct vm_area_struct *vma)
2067 {
2068 	return (vma->vm_flags & (VM_WRITE | VM_SHARED)) ==
2069 		(VM_WRITE | VM_SHARED);
2070 }
2071 
2072 static bool vma_fs_can_writeback(struct vm_area_struct *vma)
2073 {
2074 	/* No managed pages to writeback. */
2075 	if (vma->vm_flags & VM_PFNMAP)
2076 		return false;
2077 
2078 	return vma->vm_file && vma->vm_file->f_mapping &&
2079 		mapping_can_writeback(vma->vm_file->f_mapping);
2080 }
2081 
2082 /*
2083  * Does this VMA require the underlying folios to have their dirty state
2084  * tracked?
2085  */
2086 bool vma_needs_dirty_tracking(struct vm_area_struct *vma)
2087 {
2088 	/* Only shared, writable VMAs require dirty tracking. */
2089 	if (!vma_is_shared_writable(vma))
2090 		return false;
2091 
2092 	/* Does the filesystem need to be notified? */
2093 	if (vm_ops_needs_writenotify(vma->vm_ops))
2094 		return true;
2095 
2096 	/*
2097 	 * Even if the filesystem doesn't indicate a need for writenotify, if it
2098 	 * can writeback, dirty tracking is still required.
2099 	 */
2100 	return vma_fs_can_writeback(vma);
2101 }
2102 
2103 /*
2104  * Some shared mappings will want the pages marked read-only
2105  * to track write events. If so, we'll downgrade vm_page_prot
2106  * to the private version (using protection_map[] without the
2107  * VM_SHARED bit).
2108  */
2109 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
2110 {
2111 	/* If it was private or non-writable, the write bit is already clear */
2112 	if (!vma_is_shared_writable(vma))
2113 		return false;
2114 
2115 	/* The backer wishes to know when pages are first written to? */
2116 	if (vm_ops_needs_writenotify(vma->vm_ops))
2117 		return true;
2118 
2119 	/* The open routine did something to the protections that pgprot_modify
2120 	 * won't preserve? */
2121 	if (pgprot_val(vm_page_prot) !=
2122 	    pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags)))
2123 		return false;
2124 
2125 	/*
2126 	 * Do we need to track softdirty? hugetlb does not support softdirty
2127 	 * tracking yet.
2128 	 */
2129 	if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma))
2130 		return true;
2131 
2132 	/* Do we need write faults for uffd-wp tracking? */
2133 	if (userfaultfd_wp(vma))
2134 		return true;
2135 
2136 	/* Can the mapping track the dirty pages? */
2137 	return vma_fs_can_writeback(vma);
2138 }
2139 
2140 static DEFINE_MUTEX(mm_all_locks_mutex);
2141 
2142 static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
2143 {
2144 	if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
2145 		/*
2146 		 * The LSB of head.next can't change from under us
2147 		 * because we hold the mm_all_locks_mutex.
2148 		 */
2149 		down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock);
2150 		/*
2151 		 * We can safely modify head.next after taking the
2152 		 * anon_vma->root->rwsem. If some other vma in this mm shares
2153 		 * the same anon_vma we won't take it again.
2154 		 *
2155 		 * No need of atomic instructions here, head.next
2156 		 * can't change from under us thanks to the
2157 		 * anon_vma->root->rwsem.
2158 		 */
2159 		if (__test_and_set_bit(0, (unsigned long *)
2160 				       &anon_vma->root->rb_root.rb_root.rb_node))
2161 			BUG();
2162 	}
2163 }
2164 
2165 static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
2166 {
2167 	if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
2168 		/*
2169 		 * AS_MM_ALL_LOCKS can't change from under us because
2170 		 * we hold the mm_all_locks_mutex.
2171 		 *
2172 		 * Operations on ->flags have to be atomic because
2173 		 * even if AS_MM_ALL_LOCKS is stable thanks to the
2174 		 * mm_all_locks_mutex, there may be other cpus
2175 		 * changing other bitflags in parallel to us.
2176 		 */
2177 		if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
2178 			BUG();
2179 		down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock);
2180 	}
2181 }
2182 
2183 /*
2184  * This operation locks against the VM for all pte/vma/mm related
2185  * operations that could ever happen on a certain mm. This includes
2186  * vmtruncate, try_to_unmap, and all page faults.
2187  *
2188  * The caller must take the mmap_lock in write mode before calling
2189  * mm_take_all_locks(). The caller isn't allowed to release the
2190  * mmap_lock until mm_drop_all_locks() returns.
2191  *
2192  * mmap_lock in write mode is required in order to block all operations
2193  * that could modify pagetables and free pages without need of
2194  * altering the vma layout. It's also needed in write mode to avoid new
2195  * anon_vmas to be associated with existing vmas.
2196  *
2197  * A single task can't take more than one mm_take_all_locks() in a row
2198  * or it would deadlock.
2199  *
2200  * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in
2201  * mapping->flags avoid to take the same lock twice, if more than one
2202  * vma in this mm is backed by the same anon_vma or address_space.
2203  *
2204  * We take locks in following order, accordingly to comment at beginning
2205  * of mm/rmap.c:
2206  *   - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for
2207  *     hugetlb mapping);
2208  *   - all vmas marked locked
2209  *   - all i_mmap_rwsem locks;
2210  *   - all anon_vma->rwseml
2211  *
2212  * We can take all locks within these types randomly because the VM code
2213  * doesn't nest them and we protected from parallel mm_take_all_locks() by
2214  * mm_all_locks_mutex.
2215  *
2216  * mm_take_all_locks() and mm_drop_all_locks are expensive operations
2217  * that may have to take thousand of locks.
2218  *
2219  * mm_take_all_locks() can fail if it's interrupted by signals.
2220  */
2221 int mm_take_all_locks(struct mm_struct *mm)
2222 {
2223 	struct vm_area_struct *vma;
2224 	struct anon_vma_chain *avc;
2225 	VMA_ITERATOR(vmi, mm, 0);
2226 
2227 	mmap_assert_write_locked(mm);
2228 
2229 	mutex_lock(&mm_all_locks_mutex);
2230 
2231 	/*
2232 	 * vma_start_write() does not have a complement in mm_drop_all_locks()
2233 	 * because vma_start_write() is always asymmetrical; it marks a VMA as
2234 	 * being written to until mmap_write_unlock() or mmap_write_downgrade()
2235 	 * is reached.
2236 	 */
2237 	for_each_vma(vmi, vma) {
2238 		if (signal_pending(current))
2239 			goto out_unlock;
2240 		vma_start_write(vma);
2241 	}
2242 
2243 	vma_iter_init(&vmi, mm, 0);
2244 	for_each_vma(vmi, vma) {
2245 		if (signal_pending(current))
2246 			goto out_unlock;
2247 		if (vma->vm_file && vma->vm_file->f_mapping &&
2248 				is_vm_hugetlb_page(vma))
2249 			vm_lock_mapping(mm, vma->vm_file->f_mapping);
2250 	}
2251 
2252 	vma_iter_init(&vmi, mm, 0);
2253 	for_each_vma(vmi, vma) {
2254 		if (signal_pending(current))
2255 			goto out_unlock;
2256 		if (vma->vm_file && vma->vm_file->f_mapping &&
2257 				!is_vm_hugetlb_page(vma))
2258 			vm_lock_mapping(mm, vma->vm_file->f_mapping);
2259 	}
2260 
2261 	vma_iter_init(&vmi, mm, 0);
2262 	for_each_vma(vmi, vma) {
2263 		if (signal_pending(current))
2264 			goto out_unlock;
2265 		if (vma->anon_vma)
2266 			list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
2267 				vm_lock_anon_vma(mm, avc->anon_vma);
2268 	}
2269 
2270 	return 0;
2271 
2272 out_unlock:
2273 	mm_drop_all_locks(mm);
2274 	return -EINTR;
2275 }
2276 
2277 static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
2278 {
2279 	if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
2280 		/*
2281 		 * The LSB of head.next can't change to 0 from under
2282 		 * us because we hold the mm_all_locks_mutex.
2283 		 *
2284 		 * We must however clear the bitflag before unlocking
2285 		 * the vma so the users using the anon_vma->rb_root will
2286 		 * never see our bitflag.
2287 		 *
2288 		 * No need of atomic instructions here, head.next
2289 		 * can't change from under us until we release the
2290 		 * anon_vma->root->rwsem.
2291 		 */
2292 		if (!__test_and_clear_bit(0, (unsigned long *)
2293 					  &anon_vma->root->rb_root.rb_root.rb_node))
2294 			BUG();
2295 		anon_vma_unlock_write(anon_vma);
2296 	}
2297 }
2298 
2299 static void vm_unlock_mapping(struct address_space *mapping)
2300 {
2301 	if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
2302 		/*
2303 		 * AS_MM_ALL_LOCKS can't change to 0 from under us
2304 		 * because we hold the mm_all_locks_mutex.
2305 		 */
2306 		i_mmap_unlock_write(mapping);
2307 		if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
2308 					&mapping->flags))
2309 			BUG();
2310 	}
2311 }
2312 
2313 /*
2314  * The mmap_lock cannot be released by the caller until
2315  * mm_drop_all_locks() returns.
2316  */
2317 void mm_drop_all_locks(struct mm_struct *mm)
2318 {
2319 	struct vm_area_struct *vma;
2320 	struct anon_vma_chain *avc;
2321 	VMA_ITERATOR(vmi, mm, 0);
2322 
2323 	mmap_assert_write_locked(mm);
2324 	BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
2325 
2326 	for_each_vma(vmi, vma) {
2327 		if (vma->anon_vma)
2328 			list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
2329 				vm_unlock_anon_vma(avc->anon_vma);
2330 		if (vma->vm_file && vma->vm_file->f_mapping)
2331 			vm_unlock_mapping(vma->vm_file->f_mapping);
2332 	}
2333 
2334 	mutex_unlock(&mm_all_locks_mutex);
2335 }
2336 
2337 /*
2338  * We account for memory if it's a private writeable mapping,
2339  * not hugepages and VM_NORESERVE wasn't set.
2340  */
2341 static bool accountable_mapping(struct file *file, vm_flags_t vm_flags)
2342 {
2343 	/*
2344 	 * hugetlb has its own accounting separate from the core VM
2345 	 * VM_HUGETLB may not be set yet so we cannot check for that flag.
2346 	 */
2347 	if (file && is_file_hugepages(file))
2348 		return false;
2349 
2350 	return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
2351 }
2352 
2353 /*
2354  * vms_abort_munmap_vmas() - Undo as much as possible from an aborted munmap()
2355  * operation.
2356  * @vms: The vma unmap structure
2357  * @mas_detach: The maple state with the detached maple tree
2358  *
2359  * Reattach any detached vmas, free up the maple tree used to track the vmas.
2360  * If that's not possible because the ptes are cleared (and vm_ops->closed() may
2361  * have been called), then a NULL is written over the vmas and the vmas are
2362  * removed (munmap() completed).
2363  */
2364 static void vms_abort_munmap_vmas(struct vma_munmap_struct *vms,
2365 		struct ma_state *mas_detach)
2366 {
2367 	struct ma_state *mas = &vms->vmi->mas;
2368 
2369 	if (!vms->nr_pages)
2370 		return;
2371 
2372 	if (vms->clear_ptes)
2373 		return reattach_vmas(mas_detach);
2374 
2375 	/*
2376 	 * Aborting cannot just call the vm_ops open() because they are often
2377 	 * not symmetrical and state data has been lost.  Resort to the old
2378 	 * failure method of leaving a gap where the MAP_FIXED mapping failed.
2379 	 */
2380 	mas_set_range(mas, vms->start, vms->end - 1);
2381 	mas_store_gfp(mas, NULL, GFP_KERNEL|__GFP_NOFAIL);
2382 	/* Clean up the insertion of the unfortunate gap */
2383 	vms_complete_munmap_vmas(vms, mas_detach);
2384 }
2385 
2386 static void update_ksm_flags(struct mmap_state *map)
2387 {
2388 	map->vm_flags = ksm_vma_flags(map->mm, map->file, map->vm_flags);
2389 }
2390 
2391 static void set_desc_from_map(struct vm_area_desc *desc,
2392 		const struct mmap_state *map)
2393 {
2394 	desc->start = map->addr;
2395 	desc->end = map->end;
2396 
2397 	desc->pgoff = map->pgoff;
2398 	desc->vm_file = map->file;
2399 	desc->vma_flags = map->vma_flags;
2400 	desc->page_prot = map->page_prot;
2401 }
2402 
2403 /*
2404  * __mmap_setup() - Prepare to gather any overlapping VMAs that need to be
2405  * unmapped once the map operation is completed, check limits, account mapping
2406  * and clean up any pre-existing VMAs.
2407  *
2408  * As a result it sets up the @map and @desc objects.
2409  *
2410  * @map: Mapping state.
2411  * @desc: VMA descriptor
2412  * @uf:  Userfaultfd context list.
2413  *
2414  * Returns: 0 on success, error code otherwise.
2415  */
2416 static int __mmap_setup(struct mmap_state *map, struct vm_area_desc *desc,
2417 			struct list_head *uf)
2418 {
2419 	int error;
2420 	struct vma_iterator *vmi = map->vmi;
2421 	struct vma_munmap_struct *vms = &map->vms;
2422 
2423 	/* Find the first overlapping VMA and initialise unmap state. */
2424 	vms->vma = vma_find(vmi, map->end);
2425 	init_vma_munmap(vms, vmi, vms->vma, map->addr, map->end, uf,
2426 			/* unlock = */ false);
2427 
2428 	/* OK, we have overlapping VMAs - prepare to unmap them. */
2429 	if (vms->vma) {
2430 		mt_init_flags(&map->mt_detach,
2431 			      vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
2432 		mt_on_stack(map->mt_detach);
2433 		mas_init(&map->mas_detach, &map->mt_detach, /* addr = */ 0);
2434 		/* Prepare to unmap any existing mapping in the area */
2435 		error = vms_gather_munmap_vmas(vms, &map->mas_detach);
2436 		if (error) {
2437 			/* On error VMAs will already have been reattached. */
2438 			vms->nr_pages = 0;
2439 			return error;
2440 		}
2441 
2442 		map->next = vms->next;
2443 		map->prev = vms->prev;
2444 	} else {
2445 		map->next = vma_iter_next_rewind(vmi, &map->prev);
2446 	}
2447 
2448 	/* Check against address space limit. */
2449 	if (!may_expand_vm(map->mm, map->vm_flags, map->pglen - vms->nr_pages))
2450 		return -ENOMEM;
2451 
2452 	/* Private writable mapping: check memory availability. */
2453 	if (accountable_mapping(map->file, map->vm_flags)) {
2454 		map->charged = map->pglen;
2455 		map->charged -= vms->nr_accounted;
2456 		if (map->charged) {
2457 			error = security_vm_enough_memory_mm(map->mm, map->charged);
2458 			if (error)
2459 				return error;
2460 		}
2461 
2462 		vms->nr_accounted = 0;
2463 		map->vm_flags |= VM_ACCOUNT;
2464 	}
2465 
2466 	/*
2467 	 * Clear PTEs while the vma is still in the tree so that rmap
2468 	 * cannot race with the freeing later in the truncate scenario.
2469 	 * This is also needed for mmap_file(), which is why vm_ops
2470 	 * close function is called.
2471 	 */
2472 	vms_clean_up_area(vms, &map->mas_detach);
2473 
2474 	set_desc_from_map(desc, map);
2475 	return 0;
2476 }
2477 
2478 
2479 static int __mmap_new_file_vma(struct mmap_state *map,
2480 			       struct vm_area_struct *vma)
2481 {
2482 	struct vma_iterator *vmi = map->vmi;
2483 	int error;
2484 
2485 	vma->vm_file = map->file;
2486 	if (!map->file_doesnt_need_get)
2487 		get_file(map->file);
2488 
2489 	if (!map->file->f_op->mmap)
2490 		return 0;
2491 
2492 	error = mmap_file(vma->vm_file, vma);
2493 	if (error) {
2494 		UNMAP_STATE(unmap, vmi, vma, vma->vm_start, vma->vm_end,
2495 			    map->prev, map->next);
2496 		fput(vma->vm_file);
2497 		vma->vm_file = NULL;
2498 
2499 		vma_iter_set(vmi, vma->vm_end);
2500 		/* Undo any partial mapping done by a device driver. */
2501 		unmap_region(&unmap);
2502 		return error;
2503 	}
2504 
2505 	/* Drivers cannot alter the address of the VMA. */
2506 	WARN_ON_ONCE(map->addr != vma->vm_start);
2507 	/*
2508 	 * Drivers should not permit writability when previously it was
2509 	 * disallowed.
2510 	 */
2511 	VM_WARN_ON_ONCE(map->vm_flags != vma->vm_flags &&
2512 			!(map->vm_flags & VM_MAYWRITE) &&
2513 			(vma->vm_flags & VM_MAYWRITE));
2514 
2515 	map->file = vma->vm_file;
2516 	map->vm_flags = vma->vm_flags;
2517 
2518 	return 0;
2519 }
2520 
2521 /*
2522  * __mmap_new_vma() - Allocate a new VMA for the region, as merging was not
2523  * possible.
2524  *
2525  * @map:  Mapping state.
2526  * @vmap: Output pointer for the new VMA.
2527  *
2528  * Returns: Zero on success, or an error.
2529  */
2530 static int __mmap_new_vma(struct mmap_state *map, struct vm_area_struct **vmap)
2531 {
2532 	struct vma_iterator *vmi = map->vmi;
2533 	int error = 0;
2534 	struct vm_area_struct *vma;
2535 
2536 	/*
2537 	 * Determine the object being mapped and call the appropriate
2538 	 * specific mapper. the address has already been validated, but
2539 	 * not unmapped, but the maps are removed from the list.
2540 	 */
2541 	vma = vm_area_alloc(map->mm);
2542 	if (!vma)
2543 		return -ENOMEM;
2544 
2545 	vma_iter_config(vmi, map->addr, map->end);
2546 	vma_set_range(vma, map->addr, map->end, map->pgoff);
2547 	vm_flags_init(vma, map->vm_flags);
2548 	vma->vm_page_prot = map->page_prot;
2549 
2550 	if (vma_iter_prealloc(vmi, vma)) {
2551 		error = -ENOMEM;
2552 		goto free_vma;
2553 	}
2554 
2555 	if (map->file)
2556 		error = __mmap_new_file_vma(map, vma);
2557 	else if (map->vm_flags & VM_SHARED)
2558 		error = shmem_zero_setup(vma);
2559 	else
2560 		vma_set_anonymous(vma);
2561 
2562 	if (error)
2563 		goto free_iter_vma;
2564 
2565 	if (!map->check_ksm_early) {
2566 		update_ksm_flags(map);
2567 		vm_flags_init(vma, map->vm_flags);
2568 	}
2569 
2570 #ifdef CONFIG_SPARC64
2571 	/* TODO: Fix SPARC ADI! */
2572 	WARN_ON_ONCE(!arch_validate_flags(map->vm_flags));
2573 #endif
2574 
2575 	/* Lock the VMA since it is modified after insertion into VMA tree */
2576 	vma_start_write(vma);
2577 	vma_iter_store_new(vmi, vma);
2578 	map->mm->map_count++;
2579 	vma_link_file(vma, map->hold_file_rmap_lock);
2580 
2581 	/*
2582 	 * vma_merge_new_range() calls khugepaged_enter_vma() too, the below
2583 	 * call covers the non-merge case.
2584 	 */
2585 	if (!vma_is_anonymous(vma))
2586 		khugepaged_enter_vma(vma, map->vm_flags);
2587 	*vmap = vma;
2588 	return 0;
2589 
2590 free_iter_vma:
2591 	vma_iter_free(vmi);
2592 free_vma:
2593 	vm_area_free(vma);
2594 	return error;
2595 }
2596 
2597 /*
2598  * __mmap_complete() - Unmap any VMAs we overlap, account memory mapping
2599  *                     statistics, handle locking and finalise the VMA.
2600  *
2601  * @map: Mapping state.
2602  * @vma: Merged or newly allocated VMA for the mmap()'d region.
2603  */
2604 static void __mmap_complete(struct mmap_state *map, struct vm_area_struct *vma)
2605 {
2606 	struct mm_struct *mm = map->mm;
2607 	vm_flags_t vm_flags = vma->vm_flags;
2608 
2609 	perf_event_mmap(vma);
2610 
2611 	/* Unmap any existing mapping in the area. */
2612 	vms_complete_munmap_vmas(&map->vms, &map->mas_detach);
2613 
2614 	vm_stat_account(mm, vma->vm_flags, map->pglen);
2615 	if (vm_flags & VM_LOCKED) {
2616 		if (!vma_supports_mlock(vma))
2617 			vm_flags_clear(vma, VM_LOCKED_MASK);
2618 		else
2619 			mm->locked_vm += map->pglen;
2620 	}
2621 
2622 	if (vma->vm_file)
2623 		uprobe_mmap(vma);
2624 
2625 	/*
2626 	 * New (or expanded) vma always get soft dirty status.
2627 	 * Otherwise user-space soft-dirty page tracker won't
2628 	 * be able to distinguish situation when vma area unmapped,
2629 	 * then new mapped in-place (which must be aimed as
2630 	 * a completely new data area).
2631 	 */
2632 	if (pgtable_supports_soft_dirty())
2633 		vm_flags_set(vma, VM_SOFTDIRTY);
2634 
2635 	vma_set_page_prot(vma);
2636 }
2637 
2638 static void call_action_prepare(struct mmap_state *map,
2639 				struct vm_area_desc *desc)
2640 {
2641 	struct mmap_action *action = &desc->action;
2642 
2643 	mmap_action_prepare(action, desc);
2644 
2645 	if (action->hide_from_rmap_until_complete)
2646 		map->hold_file_rmap_lock = true;
2647 }
2648 
2649 /*
2650  * Invoke the f_op->mmap_prepare() callback for a file-backed mapping that
2651  * specifies it.
2652  *
2653  * This is called prior to any merge attempt, and updates whitelisted fields
2654  * that are permitted to be updated by the caller.
2655  *
2656  * All but user-defined fields will be pre-populated with original values.
2657  *
2658  * Returns 0 on success, or an error code otherwise.
2659  */
2660 static int call_mmap_prepare(struct mmap_state *map,
2661 		struct vm_area_desc *desc)
2662 {
2663 	int err;
2664 
2665 	/* Invoke the hook. */
2666 	err = vfs_mmap_prepare(map->file, desc);
2667 	if (err)
2668 		return err;
2669 
2670 	call_action_prepare(map, desc);
2671 
2672 	/* Update fields permitted to be changed. */
2673 	map->pgoff = desc->pgoff;
2674 	if (desc->vm_file != map->file) {
2675 		map->file_doesnt_need_get = true;
2676 		map->file = desc->vm_file;
2677 	}
2678 	map->vma_flags = desc->vma_flags;
2679 	map->page_prot = desc->page_prot;
2680 	/* User-defined fields. */
2681 	map->vm_ops = desc->vm_ops;
2682 	map->vm_private_data = desc->private_data;
2683 
2684 	return 0;
2685 }
2686 
2687 static void set_vma_user_defined_fields(struct vm_area_struct *vma,
2688 		struct mmap_state *map)
2689 {
2690 	if (map->vm_ops)
2691 		vma->vm_ops = map->vm_ops;
2692 	vma->vm_private_data = map->vm_private_data;
2693 }
2694 
2695 /*
2696  * Are we guaranteed no driver can change state such as to preclude KSM merging?
2697  * If so, let's set the KSM mergeable flag early so we don't break VMA merging.
2698  */
2699 static bool can_set_ksm_flags_early(struct mmap_state *map)
2700 {
2701 	struct file *file = map->file;
2702 
2703 	/* Anonymous mappings have no driver which can change them. */
2704 	if (!file)
2705 		return true;
2706 
2707 	/*
2708 	 * If .mmap_prepare() is specified, then the driver will have already
2709 	 * manipulated state prior to updating KSM flags. So no need to worry
2710 	 * about mmap callbacks modifying VMA flags after the KSM flag has been
2711 	 * updated here, which could otherwise affect KSM eligibility.
2712 	 */
2713 	if (file->f_op->mmap_prepare)
2714 		return true;
2715 
2716 	/* shmem is safe. */
2717 	if (shmem_file(file))
2718 		return true;
2719 
2720 	/* Any other .mmap callback is not safe. */
2721 	return false;
2722 }
2723 
2724 static int call_action_complete(struct mmap_state *map,
2725 				struct vm_area_desc *desc,
2726 				struct vm_area_struct *vma)
2727 {
2728 	struct mmap_action *action = &desc->action;
2729 	int ret;
2730 
2731 	ret = mmap_action_complete(action, vma);
2732 
2733 	/* If we held the file rmap we need to release it. */
2734 	if (map->hold_file_rmap_lock) {
2735 		struct file *file = vma->vm_file;
2736 
2737 		i_mmap_unlock_write(file->f_mapping);
2738 	}
2739 	return ret;
2740 }
2741 
2742 static unsigned long __mmap_region(struct file *file, unsigned long addr,
2743 		unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
2744 		struct list_head *uf)
2745 {
2746 	struct mm_struct *mm = current->mm;
2747 	struct vm_area_struct *vma = NULL;
2748 	bool have_mmap_prepare = file && file->f_op->mmap_prepare;
2749 	VMA_ITERATOR(vmi, mm, addr);
2750 	MMAP_STATE(map, mm, &vmi, addr, len, pgoff, vm_flags, file);
2751 	struct vm_area_desc desc = {
2752 		.mm = mm,
2753 		.file = file,
2754 		.action = {
2755 			.type = MMAP_NOTHING, /* Default to no further action. */
2756 		},
2757 	};
2758 	bool allocated_new = false;
2759 	int error;
2760 
2761 	map.check_ksm_early = can_set_ksm_flags_early(&map);
2762 
2763 	error = __mmap_setup(&map, &desc, uf);
2764 	if (!error && have_mmap_prepare)
2765 		error = call_mmap_prepare(&map, &desc);
2766 	if (error)
2767 		goto abort_munmap;
2768 
2769 	if (map.check_ksm_early)
2770 		update_ksm_flags(&map);
2771 
2772 	/* Attempt to merge with adjacent VMAs... */
2773 	if (map.prev || map.next) {
2774 		VMG_MMAP_STATE(vmg, &map, /* vma = */ NULL);
2775 
2776 		vma = vma_merge_new_range(&vmg);
2777 	}
2778 
2779 	/* ...but if we can't, allocate a new VMA. */
2780 	if (!vma) {
2781 		error = __mmap_new_vma(&map, &vma);
2782 		if (error)
2783 			goto unacct_error;
2784 		allocated_new = true;
2785 	}
2786 
2787 	if (have_mmap_prepare)
2788 		set_vma_user_defined_fields(vma, &map);
2789 
2790 	__mmap_complete(&map, vma);
2791 
2792 	if (have_mmap_prepare && allocated_new) {
2793 		error = call_action_complete(&map, &desc, vma);
2794 
2795 		if (error)
2796 			return error;
2797 	}
2798 
2799 	return addr;
2800 
2801 	/* Accounting was done by __mmap_setup(). */
2802 unacct_error:
2803 	if (map.charged)
2804 		vm_unacct_memory(map.charged);
2805 abort_munmap:
2806 	vms_abort_munmap_vmas(&map.vms, &map.mas_detach);
2807 	return error;
2808 }
2809 
2810 /**
2811  * mmap_region() - Actually perform the userland mapping of a VMA into
2812  * current->mm with known, aligned and overflow-checked @addr and @len, and
2813  * correctly determined VMA flags @vm_flags and page offset @pgoff.
2814  *
2815  * This is an internal memory management function, and should not be used
2816  * directly.
2817  *
2818  * The caller must write-lock current->mm->mmap_lock.
2819  *
2820  * @file: If a file-backed mapping, a pointer to the struct file describing the
2821  * file to be mapped, otherwise NULL.
2822  * @addr: The page-aligned address at which to perform the mapping.
2823  * @len: The page-aligned, non-zero, length of the mapping.
2824  * @vm_flags: The VMA flags which should be applied to the mapping.
2825  * @pgoff: If @file is specified, the page offset into the file, if not then
2826  * the virtual page offset in memory of the anonymous mapping.
2827  * @uf: Optionally, a pointer to a list head used for tracking userfaultfd unmap
2828  * events.
2829  *
2830  * Returns: Either an error, or the address at which the requested mapping has
2831  * been performed.
2832  */
2833 unsigned long mmap_region(struct file *file, unsigned long addr,
2834 			  unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
2835 			  struct list_head *uf)
2836 {
2837 	unsigned long ret;
2838 	bool writable_file_mapping = false;
2839 
2840 	mmap_assert_write_locked(current->mm);
2841 
2842 	/* Check to see if MDWE is applicable. */
2843 	if (map_deny_write_exec(vm_flags, vm_flags))
2844 		return -EACCES;
2845 
2846 	/* Allow architectures to sanity-check the vm_flags. */
2847 	if (!arch_validate_flags(vm_flags))
2848 		return -EINVAL;
2849 
2850 	/* Map writable and ensure this isn't a sealed memfd. */
2851 	if (file && is_shared_maywrite_vm_flags(vm_flags)) {
2852 		int error = mapping_map_writable(file->f_mapping);
2853 
2854 		if (error)
2855 			return error;
2856 		writable_file_mapping = true;
2857 	}
2858 
2859 	ret = __mmap_region(file, addr, len, vm_flags, pgoff, uf);
2860 
2861 	/* Clear our write mapping regardless of error. */
2862 	if (writable_file_mapping)
2863 		mapping_unmap_writable(file->f_mapping);
2864 
2865 	validate_mm(current->mm);
2866 	return ret;
2867 }
2868 
2869 /*
2870  * do_brk_flags() - Increase the brk vma if the flags match.
2871  * @vmi: The vma iterator
2872  * @addr: The start address
2873  * @len: The length of the increase
2874  * @vma: The vma,
2875  * @vm_flags: The VMA Flags
2876  *
2877  * Extend the brk VMA from addr to addr + len.  If the VMA is NULL or the flags
2878  * do not match then create a new anonymous VMA.  Eventually we may be able to
2879  * do some brk-specific accounting here.
2880  */
2881 int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
2882 		 unsigned long addr, unsigned long len, vm_flags_t vm_flags)
2883 {
2884 	struct mm_struct *mm = current->mm;
2885 
2886 	/*
2887 	 * Check against address space limits by the changed size
2888 	 * Note: This happens *after* clearing old mappings in some code paths.
2889 	 */
2890 	vm_flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
2891 	vm_flags = ksm_vma_flags(mm, NULL, vm_flags);
2892 	if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT))
2893 		return -ENOMEM;
2894 
2895 	if (mm->map_count > get_sysctl_max_map_count())
2896 		return -ENOMEM;
2897 
2898 	if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
2899 		return -ENOMEM;
2900 
2901 	/*
2902 	 * Expand the existing vma if possible; Note that singular lists do not
2903 	 * occur after forking, so the expand will only happen on new VMAs.
2904 	 */
2905 	if (vma && vma->vm_end == addr) {
2906 		VMG_STATE(vmg, mm, vmi, addr, addr + len, vm_flags, PHYS_PFN(addr));
2907 
2908 		vmg.prev = vma;
2909 		/* vmi is positioned at prev, which this mode expects. */
2910 		vmg.just_expand = true;
2911 
2912 		if (vma_merge_new_range(&vmg))
2913 			goto out;
2914 		else if (vmg_nomem(&vmg))
2915 			goto unacct_fail;
2916 	}
2917 
2918 	if (vma)
2919 		vma_iter_next_range(vmi);
2920 	/* create a vma struct for an anonymous mapping */
2921 	vma = vm_area_alloc(mm);
2922 	if (!vma)
2923 		goto unacct_fail;
2924 
2925 	vma_set_anonymous(vma);
2926 	vma_set_range(vma, addr, addr + len, addr >> PAGE_SHIFT);
2927 	vm_flags_init(vma, vm_flags);
2928 	vma->vm_page_prot = vm_get_page_prot(vm_flags);
2929 	vma_start_write(vma);
2930 	if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL))
2931 		goto mas_store_fail;
2932 
2933 	mm->map_count++;
2934 	validate_mm(mm);
2935 out:
2936 	perf_event_mmap(vma);
2937 	mm->total_vm += len >> PAGE_SHIFT;
2938 	mm->data_vm += len >> PAGE_SHIFT;
2939 	if (vm_flags & VM_LOCKED)
2940 		mm->locked_vm += (len >> PAGE_SHIFT);
2941 	if (pgtable_supports_soft_dirty())
2942 		vm_flags_set(vma, VM_SOFTDIRTY);
2943 	return 0;
2944 
2945 mas_store_fail:
2946 	vm_area_free(vma);
2947 unacct_fail:
2948 	vm_unacct_memory(len >> PAGE_SHIFT);
2949 	return -ENOMEM;
2950 }
2951 
2952 /**
2953  * unmapped_area() - Find an area between the low_limit and the high_limit with
2954  * the correct alignment and offset, all from @info. Note: current->mm is used
2955  * for the search.
2956  *
2957  * @info: The unmapped area information including the range [low_limit -
2958  * high_limit), the alignment offset and mask.
2959  *
2960  * Return: A memory address or -ENOMEM.
2961  */
2962 unsigned long unmapped_area(struct vm_unmapped_area_info *info)
2963 {
2964 	unsigned long length, gap;
2965 	unsigned long low_limit, high_limit;
2966 	struct vm_area_struct *tmp;
2967 	VMA_ITERATOR(vmi, current->mm, 0);
2968 
2969 	/* Adjust search length to account for worst case alignment overhead */
2970 	length = info->length + info->align_mask + info->start_gap;
2971 	if (length < info->length)
2972 		return -ENOMEM;
2973 
2974 	low_limit = info->low_limit;
2975 	if (low_limit < mmap_min_addr)
2976 		low_limit = mmap_min_addr;
2977 	high_limit = info->high_limit;
2978 retry:
2979 	if (vma_iter_area_lowest(&vmi, low_limit, high_limit, length))
2980 		return -ENOMEM;
2981 
2982 	/*
2983 	 * Adjust for the gap first so it doesn't interfere with the later
2984 	 * alignment. The first step is the minimum needed to fulfill the start
2985 	 * gap, the next step is the minimum to align that. It is the minimum
2986 	 * needed to fulfill both.
2987 	 */
2988 	gap = vma_iter_addr(&vmi) + info->start_gap;
2989 	gap += (info->align_offset - gap) & info->align_mask;
2990 	tmp = vma_next(&vmi);
2991 	if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
2992 		if (vm_start_gap(tmp) < gap + length - 1) {
2993 			low_limit = tmp->vm_end;
2994 			vma_iter_reset(&vmi);
2995 			goto retry;
2996 		}
2997 	} else {
2998 		tmp = vma_prev(&vmi);
2999 		if (tmp && vm_end_gap(tmp) > gap) {
3000 			low_limit = vm_end_gap(tmp);
3001 			vma_iter_reset(&vmi);
3002 			goto retry;
3003 		}
3004 	}
3005 
3006 	return gap;
3007 }
3008 
3009 /**
3010  * unmapped_area_topdown() - Find an area between the low_limit and the
3011  * high_limit with the correct alignment and offset at the highest available
3012  * address, all from @info. Note: current->mm is used for the search.
3013  *
3014  * @info: The unmapped area information including the range [low_limit -
3015  * high_limit), the alignment offset and mask.
3016  *
3017  * Return: A memory address or -ENOMEM.
3018  */
3019 unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
3020 {
3021 	unsigned long length, gap, gap_end;
3022 	unsigned long low_limit, high_limit;
3023 	struct vm_area_struct *tmp;
3024 	VMA_ITERATOR(vmi, current->mm, 0);
3025 
3026 	/* Adjust search length to account for worst case alignment overhead */
3027 	length = info->length + info->align_mask + info->start_gap;
3028 	if (length < info->length)
3029 		return -ENOMEM;
3030 
3031 	low_limit = info->low_limit;
3032 	if (low_limit < mmap_min_addr)
3033 		low_limit = mmap_min_addr;
3034 	high_limit = info->high_limit;
3035 retry:
3036 	if (vma_iter_area_highest(&vmi, low_limit, high_limit, length))
3037 		return -ENOMEM;
3038 
3039 	gap = vma_iter_end(&vmi) - info->length;
3040 	gap -= (gap - info->align_offset) & info->align_mask;
3041 	gap_end = vma_iter_end(&vmi);
3042 	tmp = vma_next(&vmi);
3043 	if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
3044 		if (vm_start_gap(tmp) < gap_end) {
3045 			high_limit = vm_start_gap(tmp);
3046 			vma_iter_reset(&vmi);
3047 			goto retry;
3048 		}
3049 	} else {
3050 		tmp = vma_prev(&vmi);
3051 		if (tmp && vm_end_gap(tmp) > gap) {
3052 			high_limit = tmp->vm_start;
3053 			vma_iter_reset(&vmi);
3054 			goto retry;
3055 		}
3056 	}
3057 
3058 	return gap;
3059 }
3060 
3061 /*
3062  * Verify that the stack growth is acceptable and
3063  * update accounting. This is shared with both the
3064  * grow-up and grow-down cases.
3065  */
3066 static int acct_stack_growth(struct vm_area_struct *vma,
3067 			     unsigned long size, unsigned long grow)
3068 {
3069 	struct mm_struct *mm = vma->vm_mm;
3070 	unsigned long new_start;
3071 
3072 	/* address space limit tests */
3073 	if (!may_expand_vm(mm, vma->vm_flags, grow))
3074 		return -ENOMEM;
3075 
3076 	/* Stack limit test */
3077 	if (size > rlimit(RLIMIT_STACK))
3078 		return -ENOMEM;
3079 
3080 	/* mlock limit tests */
3081 	if (!mlock_future_ok(mm, vma->vm_flags & VM_LOCKED, grow << PAGE_SHIFT))
3082 		return -ENOMEM;
3083 
3084 	/* Check to ensure the stack will not grow into a hugetlb-only region */
3085 	new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
3086 			vma->vm_end - size;
3087 	if (is_hugepage_only_range(vma->vm_mm, new_start, size))
3088 		return -EFAULT;
3089 
3090 	/*
3091 	 * Overcommit..  This must be the final test, as it will
3092 	 * update security statistics.
3093 	 */
3094 	if (security_vm_enough_memory_mm(mm, grow))
3095 		return -ENOMEM;
3096 
3097 	return 0;
3098 }
3099 
3100 #if defined(CONFIG_STACK_GROWSUP)
3101 /*
3102  * PA-RISC uses this for its stack.
3103  * vma is the last one with address > vma->vm_end.  Have to extend vma.
3104  */
3105 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
3106 {
3107 	struct mm_struct *mm = vma->vm_mm;
3108 	struct vm_area_struct *next;
3109 	unsigned long gap_addr;
3110 	int error = 0;
3111 	VMA_ITERATOR(vmi, mm, vma->vm_start);
3112 
3113 	if (!(vma->vm_flags & VM_GROWSUP))
3114 		return -EFAULT;
3115 
3116 	mmap_assert_write_locked(mm);
3117 
3118 	/* Guard against exceeding limits of the address space. */
3119 	address &= PAGE_MASK;
3120 	if (address >= (TASK_SIZE & PAGE_MASK))
3121 		return -ENOMEM;
3122 	address += PAGE_SIZE;
3123 
3124 	/* Enforce stack_guard_gap */
3125 	gap_addr = address + stack_guard_gap;
3126 
3127 	/* Guard against overflow */
3128 	if (gap_addr < address || gap_addr > TASK_SIZE)
3129 		gap_addr = TASK_SIZE;
3130 
3131 	next = find_vma_intersection(mm, vma->vm_end, gap_addr);
3132 	if (next && vma_is_accessible(next)) {
3133 		if (!(next->vm_flags & VM_GROWSUP))
3134 			return -ENOMEM;
3135 		/* Check that both stack segments have the same anon_vma? */
3136 	}
3137 
3138 	if (next)
3139 		vma_iter_prev_range_limit(&vmi, address);
3140 
3141 	vma_iter_config(&vmi, vma->vm_start, address);
3142 	if (vma_iter_prealloc(&vmi, vma))
3143 		return -ENOMEM;
3144 
3145 	/* We must make sure the anon_vma is allocated. */
3146 	if (unlikely(anon_vma_prepare(vma))) {
3147 		vma_iter_free(&vmi);
3148 		return -ENOMEM;
3149 	}
3150 
3151 	/* Lock the VMA before expanding to prevent concurrent page faults */
3152 	vma_start_write(vma);
3153 	/* We update the anon VMA tree. */
3154 	anon_vma_lock_write(vma->anon_vma);
3155 
3156 	/* Somebody else might have raced and expanded it already */
3157 	if (address > vma->vm_end) {
3158 		unsigned long size, grow;
3159 
3160 		size = address - vma->vm_start;
3161 		grow = (address - vma->vm_end) >> PAGE_SHIFT;
3162 
3163 		error = -ENOMEM;
3164 		if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
3165 			error = acct_stack_growth(vma, size, grow);
3166 			if (!error) {
3167 				if (vma->vm_flags & VM_LOCKED)
3168 					mm->locked_vm += grow;
3169 				vm_stat_account(mm, vma->vm_flags, grow);
3170 				anon_vma_interval_tree_pre_update_vma(vma);
3171 				vma->vm_end = address;
3172 				/* Overwrite old entry in mtree. */
3173 				vma_iter_store_overwrite(&vmi, vma);
3174 				anon_vma_interval_tree_post_update_vma(vma);
3175 
3176 				perf_event_mmap(vma);
3177 			}
3178 		}
3179 	}
3180 	anon_vma_unlock_write(vma->anon_vma);
3181 	vma_iter_free(&vmi);
3182 	validate_mm(mm);
3183 	return error;
3184 }
3185 #endif /* CONFIG_STACK_GROWSUP */
3186 
3187 /*
3188  * vma is the first one with address < vma->vm_start.  Have to extend vma.
3189  * mmap_lock held for writing.
3190  */
3191 int expand_downwards(struct vm_area_struct *vma, unsigned long address)
3192 {
3193 	struct mm_struct *mm = vma->vm_mm;
3194 	struct vm_area_struct *prev;
3195 	int error = 0;
3196 	VMA_ITERATOR(vmi, mm, vma->vm_start);
3197 
3198 	if (!(vma->vm_flags & VM_GROWSDOWN))
3199 		return -EFAULT;
3200 
3201 	mmap_assert_write_locked(mm);
3202 
3203 	address &= PAGE_MASK;
3204 	if (address < mmap_min_addr || address < FIRST_USER_ADDRESS)
3205 		return -EPERM;
3206 
3207 	/* Enforce stack_guard_gap */
3208 	prev = vma_prev(&vmi);
3209 	/* Check that both stack segments have the same anon_vma? */
3210 	if (prev) {
3211 		if (!(prev->vm_flags & VM_GROWSDOWN) &&
3212 		    vma_is_accessible(prev) &&
3213 		    (address - prev->vm_end < stack_guard_gap))
3214 			return -ENOMEM;
3215 	}
3216 
3217 	if (prev)
3218 		vma_iter_next_range_limit(&vmi, vma->vm_start);
3219 
3220 	vma_iter_config(&vmi, address, vma->vm_end);
3221 	if (vma_iter_prealloc(&vmi, vma))
3222 		return -ENOMEM;
3223 
3224 	/* We must make sure the anon_vma is allocated. */
3225 	if (unlikely(anon_vma_prepare(vma))) {
3226 		vma_iter_free(&vmi);
3227 		return -ENOMEM;
3228 	}
3229 
3230 	/* Lock the VMA before expanding to prevent concurrent page faults */
3231 	vma_start_write(vma);
3232 	/* We update the anon VMA tree. */
3233 	anon_vma_lock_write(vma->anon_vma);
3234 
3235 	/* Somebody else might have raced and expanded it already */
3236 	if (address < vma->vm_start) {
3237 		unsigned long size, grow;
3238 
3239 		size = vma->vm_end - address;
3240 		grow = (vma->vm_start - address) >> PAGE_SHIFT;
3241 
3242 		error = -ENOMEM;
3243 		if (grow <= vma->vm_pgoff) {
3244 			error = acct_stack_growth(vma, size, grow);
3245 			if (!error) {
3246 				if (vma->vm_flags & VM_LOCKED)
3247 					mm->locked_vm += grow;
3248 				vm_stat_account(mm, vma->vm_flags, grow);
3249 				anon_vma_interval_tree_pre_update_vma(vma);
3250 				vma->vm_start = address;
3251 				vma->vm_pgoff -= grow;
3252 				/* Overwrite old entry in mtree. */
3253 				vma_iter_store_overwrite(&vmi, vma);
3254 				anon_vma_interval_tree_post_update_vma(vma);
3255 
3256 				perf_event_mmap(vma);
3257 			}
3258 		}
3259 	}
3260 	anon_vma_unlock_write(vma->anon_vma);
3261 	vma_iter_free(&vmi);
3262 	validate_mm(mm);
3263 	return error;
3264 }
3265 
3266 int __vm_munmap(unsigned long start, size_t len, bool unlock)
3267 {
3268 	int ret;
3269 	struct mm_struct *mm = current->mm;
3270 	LIST_HEAD(uf);
3271 	VMA_ITERATOR(vmi, mm, start);
3272 
3273 	if (mmap_write_lock_killable(mm))
3274 		return -EINTR;
3275 
3276 	ret = do_vmi_munmap(&vmi, mm, start, len, &uf, unlock);
3277 	if (ret || !unlock)
3278 		mmap_write_unlock(mm);
3279 
3280 	userfaultfd_unmap_complete(mm, &uf);
3281 	return ret;
3282 }
3283 
3284 /* Insert vm structure into process list sorted by address
3285  * and into the inode's i_mmap tree.  If vm_file is non-NULL
3286  * then i_mmap_rwsem is taken here.
3287  */
3288 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
3289 {
3290 	unsigned long charged = vma_pages(vma);
3291 
3292 
3293 	if (find_vma_intersection(mm, vma->vm_start, vma->vm_end))
3294 		return -ENOMEM;
3295 
3296 	if ((vma->vm_flags & VM_ACCOUNT) &&
3297 	     security_vm_enough_memory_mm(mm, charged))
3298 		return -ENOMEM;
3299 
3300 	/*
3301 	 * The vm_pgoff of a purely anonymous vma should be irrelevant
3302 	 * until its first write fault, when page's anon_vma and index
3303 	 * are set.  But now set the vm_pgoff it will almost certainly
3304 	 * end up with (unless mremap moves it elsewhere before that
3305 	 * first wfault), so /proc/pid/maps tells a consistent story.
3306 	 *
3307 	 * By setting it to reflect the virtual start address of the
3308 	 * vma, merges and splits can happen in a seamless way, just
3309 	 * using the existing file pgoff checks and manipulations.
3310 	 * Similarly in do_mmap and in do_brk_flags.
3311 	 */
3312 	if (vma_is_anonymous(vma)) {
3313 		BUG_ON(vma->anon_vma);
3314 		vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
3315 	}
3316 
3317 	if (vma_link(mm, vma)) {
3318 		if (vma->vm_flags & VM_ACCOUNT)
3319 			vm_unacct_memory(charged);
3320 		return -ENOMEM;
3321 	}
3322 
3323 	return 0;
3324 }
3325 
3326 /**
3327  * vma_mmu_pagesize - Default MMU page size granularity for this VMA.
3328  * @vma: The user mapping.
3329  *
3330  * In the common case, the default page size used by the MMU matches the
3331  * default page size used by the kernel (see vma_kernel_pagesize()). On
3332  * architectures where it differs, an architecture-specific 'strong' version
3333  * of this symbol is required.
3334  *
3335  * The default MMU page size is not affected by Transparent Huge Pages
3336  * being in effect, or any usage of larger MMU page sizes (either through
3337  * architectural huge-page mappings or other explicit/implicit coalescing of
3338  * virtual ranges performed by the MMU).
3339  *
3340  * Return: The default MMU page size granularity for this VMA.
3341  */
3342 __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
3343 {
3344 	return vma_kernel_pagesize(vma);
3345 }
3346