xref: /linux/mm/vma.c (revision 769669bd9ca4cbae2562d57fe753efdcf17a196d)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 /*
4  * VMA-specific functions.
5  */
6 
7 #include "vma_internal.h"
8 #include "vma.h"
9 
10 struct mmap_state {
11 	struct mm_struct *mm;
12 	struct vma_iterator *vmi;
13 
14 	unsigned long addr;
15 	unsigned long end;
16 	pgoff_t pgoff;
17 	unsigned long pglen;
18 	union {
19 		vm_flags_t vm_flags;
20 		vma_flags_t vma_flags;
21 	};
22 	struct file *file;
23 	pgprot_t page_prot;
24 
25 	/* User-defined fields, perhaps updated by .mmap_prepare(). */
26 	const struct vm_operations_struct *vm_ops;
27 	void *vm_private_data;
28 
29 	unsigned long charged;
30 
31 	struct vm_area_struct *prev;
32 	struct vm_area_struct *next;
33 
34 	/* Unmapping state. */
35 	struct vma_munmap_struct vms;
36 	struct ma_state mas_detach;
37 	struct maple_tree mt_detach;
38 
39 	/* Determine if we can check KSM flags early in mmap() logic. */
40 	bool check_ksm_early :1;
41 	/* If we map new, hold the file rmap lock on mapping. */
42 	bool hold_file_rmap_lock :1;
43 	/* If .mmap_prepare changed the file, we don't need to pin. */
44 	bool file_doesnt_need_get :1;
45 };
46 
47 #define MMAP_STATE(name, mm_, vmi_, addr_, len_, pgoff_, vm_flags_, file_) \
48 	struct mmap_state name = {					\
49 		.mm = mm_,						\
50 		.vmi = vmi_,						\
51 		.addr = addr_,						\
52 		.end = (addr_) + (len_),				\
53 		.pgoff = pgoff_,					\
54 		.pglen = PHYS_PFN(len_),				\
55 		.vm_flags = vm_flags_,					\
56 		.file = file_,						\
57 		.page_prot = vm_get_page_prot(vm_flags_),		\
58 	}
59 
60 #define VMG_MMAP_STATE(name, map_, vma_)				\
61 	struct vma_merge_struct name = {				\
62 		.mm = (map_)->mm,					\
63 		.vmi = (map_)->vmi,					\
64 		.start = (map_)->addr,					\
65 		.end = (map_)->end,					\
66 		.vm_flags = (map_)->vm_flags,				\
67 		.pgoff = (map_)->pgoff,					\
68 		.file = (map_)->file,					\
69 		.prev = (map_)->prev,					\
70 		.middle = vma_,						\
71 		.next = (vma_) ? NULL : (map_)->next,			\
72 		.state = VMA_MERGE_START,				\
73 	}
74 
75 /* Was this VMA ever forked from a parent, i.e. maybe contains CoW mappings? */
76 static bool vma_is_fork_child(struct vm_area_struct *vma)
77 {
78 	/*
79 	 * The list_is_singular() test is to avoid merging VMA cloned from
80 	 * parents. This can improve scalability caused by the anon_vma root
81 	 * lock.
82 	 */
83 	return vma && vma->anon_vma && !list_is_singular(&vma->anon_vma_chain);
84 }
85 
86 static inline bool is_mergeable_vma(struct vma_merge_struct *vmg, bool merge_next)
87 {
88 	struct vm_area_struct *vma = merge_next ? vmg->next : vmg->prev;
89 	vma_flags_t diff;
90 
91 	if (!mpol_equal(vmg->policy, vma_policy(vma)))
92 		return false;
93 
94 	diff = vma_flags_diff_pair(&vma->flags, &vmg->vma_flags);
95 	vma_flags_clear_mask(&diff, VMA_IGNORE_MERGE_FLAGS);
96 
97 	if (!vma_flags_empty(&diff))
98 		return false;
99 	if (vma->vm_file != vmg->file)
100 		return false;
101 	if (!is_mergeable_vm_userfaultfd_ctx(vma, vmg->uffd_ctx))
102 		return false;
103 	if (!anon_vma_name_eq(anon_vma_name(vma), vmg->anon_name))
104 		return false;
105 	return true;
106 }
107 
108 static bool is_mergeable_anon_vma(struct vma_merge_struct *vmg, bool merge_next)
109 {
110 	struct vm_area_struct *tgt = merge_next ? vmg->next : vmg->prev;
111 	struct vm_area_struct *src = vmg->middle; /* existing merge case. */
112 	struct anon_vma *tgt_anon = tgt->anon_vma;
113 	struct anon_vma *src_anon = vmg->anon_vma;
114 
115 	/*
116 	 * We _can_ have !src, vmg->anon_vma via copy_vma(). In this instance we
117 	 * will remove the existing VMA's anon_vma's so there's no scalability
118 	 * concerns.
119 	 */
120 	VM_WARN_ON(src && src_anon != src->anon_vma);
121 
122 	/* Case 1 - we will dup_anon_vma() from src into tgt. */
123 	if (!tgt_anon && src_anon) {
124 		struct vm_area_struct *copied_from = vmg->copied_from;
125 
126 		if (vma_is_fork_child(src))
127 			return false;
128 		if (vma_is_fork_child(copied_from))
129 			return false;
130 
131 		return true;
132 	}
133 	/* Case 2 - we will simply use tgt's anon_vma. */
134 	if (tgt_anon && !src_anon)
135 		return !vma_is_fork_child(tgt);
136 	/* Case 3 - the anon_vma's are already shared. */
137 	return src_anon == tgt_anon;
138 }
139 
140 /*
141  * init_multi_vma_prep() - Initializer for struct vma_prepare
142  * @vp: The vma_prepare struct
143  * @vma: The vma that will be altered once locked
144  * @vmg: The merge state that will be used to determine adjustment and VMA
145  *       removal.
146  */
147 static void init_multi_vma_prep(struct vma_prepare *vp,
148 				struct vm_area_struct *vma,
149 				struct vma_merge_struct *vmg)
150 {
151 	struct vm_area_struct *adjust;
152 	struct vm_area_struct **remove = &vp->remove;
153 
154 	memset(vp, 0, sizeof(struct vma_prepare));
155 	vp->vma = vma;
156 	vp->anon_vma = vma->anon_vma;
157 
158 	if (vmg && vmg->__remove_middle) {
159 		*remove = vmg->middle;
160 		remove = &vp->remove2;
161 	}
162 	if (vmg && vmg->__remove_next)
163 		*remove = vmg->next;
164 
165 	if (vmg && vmg->__adjust_middle_start)
166 		adjust = vmg->middle;
167 	else if (vmg && vmg->__adjust_next_start)
168 		adjust = vmg->next;
169 	else
170 		adjust = NULL;
171 
172 	vp->adj_next = adjust;
173 	if (!vp->anon_vma && adjust)
174 		vp->anon_vma = adjust->anon_vma;
175 
176 	VM_WARN_ON(vp->anon_vma && adjust && adjust->anon_vma &&
177 		   vp->anon_vma != adjust->anon_vma);
178 
179 	vp->file = vma->vm_file;
180 	if (vp->file)
181 		vp->mapping = vma->vm_file->f_mapping;
182 
183 	if (vmg && vmg->skip_vma_uprobe)
184 		vp->skip_vma_uprobe = true;
185 }
186 
187 /*
188  * Return true if we can merge this (vma_flags,anon_vma,file,vm_pgoff)
189  * in front of (at a lower virtual address and file offset than) the vma.
190  *
191  * We cannot merge two vmas if they have differently assigned (non-NULL)
192  * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
193  *
194  * We don't check here for the merged mmap wrapping around the end of pagecache
195  * indices (16TB on ia32) because do_mmap() does not permit mmap's which
196  * wrap, nor mmaps which cover the final page at index -1UL.
197  *
198  * We assume the vma may be removed as part of the merge.
199  */
200 static bool can_vma_merge_before(struct vma_merge_struct *vmg)
201 {
202 	pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start);
203 
204 	if (is_mergeable_vma(vmg, /* merge_next = */ true) &&
205 	    is_mergeable_anon_vma(vmg, /* merge_next = */ true)) {
206 		if (vmg->next->vm_pgoff == vmg->pgoff + pglen)
207 			return true;
208 	}
209 
210 	return false;
211 }
212 
213 /*
214  * Return true if we can merge this (vma_flags,anon_vma,file,vm_pgoff)
215  * beyond (at a higher virtual address and file offset than) the vma.
216  *
217  * We cannot merge two vmas if they have differently assigned (non-NULL)
218  * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
219  *
220  * We assume that vma is not removed as part of the merge.
221  */
222 static bool can_vma_merge_after(struct vma_merge_struct *vmg)
223 {
224 	if (is_mergeable_vma(vmg, /* merge_next = */ false) &&
225 	    is_mergeable_anon_vma(vmg, /* merge_next = */ false)) {
226 		if (vmg->prev->vm_pgoff + vma_pages(vmg->prev) == vmg->pgoff)
227 			return true;
228 	}
229 	return false;
230 }
231 
232 static void __vma_link_file(struct vm_area_struct *vma,
233 			    struct address_space *mapping)
234 {
235 	if (vma_is_shared_maywrite(vma))
236 		mapping_allow_writable(mapping);
237 
238 	flush_dcache_mmap_lock(mapping);
239 	vma_interval_tree_insert(vma, &mapping->i_mmap);
240 	flush_dcache_mmap_unlock(mapping);
241 }
242 
243 /*
244  * Requires inode->i_mapping->i_mmap_rwsem
245  */
246 static void __remove_shared_vm_struct(struct vm_area_struct *vma,
247 				      struct address_space *mapping)
248 {
249 	if (vma_is_shared_maywrite(vma))
250 		mapping_unmap_writable(mapping);
251 
252 	flush_dcache_mmap_lock(mapping);
253 	vma_interval_tree_remove(vma, &mapping->i_mmap);
254 	flush_dcache_mmap_unlock(mapping);
255 }
256 
257 /*
258  * vma has some anon_vma assigned, and is already inserted on that
259  * anon_vma's interval trees.
260  *
261  * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
262  * vma must be removed from the anon_vma's interval trees using
263  * anon_vma_interval_tree_pre_update_vma().
264  *
265  * After the update, the vma will be reinserted using
266  * anon_vma_interval_tree_post_update_vma().
267  *
268  * The entire update must be protected by exclusive mmap_lock and by
269  * the root anon_vma's mutex.
270  */
271 static void
272 anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
273 {
274 	struct anon_vma_chain *avc;
275 
276 	list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
277 		anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root);
278 }
279 
280 static void
281 anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
282 {
283 	struct anon_vma_chain *avc;
284 
285 	list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
286 		anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
287 }
288 
289 /*
290  * vma_prepare() - Helper function for handling locking VMAs prior to altering
291  * @vp: The initialized vma_prepare struct
292  */
293 static void vma_prepare(struct vma_prepare *vp)
294 {
295 	if (vp->file) {
296 		uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end);
297 
298 		if (vp->adj_next)
299 			uprobe_munmap(vp->adj_next, vp->adj_next->vm_start,
300 				      vp->adj_next->vm_end);
301 
302 		i_mmap_lock_write(vp->mapping);
303 		if (vp->insert && vp->insert->vm_file) {
304 			/*
305 			 * Put into interval tree now, so instantiated pages
306 			 * are visible to arm/parisc __flush_dcache_page
307 			 * throughout; but we cannot insert into address
308 			 * space until vma start or end is updated.
309 			 */
310 			__vma_link_file(vp->insert,
311 					vp->insert->vm_file->f_mapping);
312 		}
313 	}
314 
315 	if (vp->anon_vma) {
316 		anon_vma_lock_write(vp->anon_vma);
317 		anon_vma_interval_tree_pre_update_vma(vp->vma);
318 		if (vp->adj_next)
319 			anon_vma_interval_tree_pre_update_vma(vp->adj_next);
320 	}
321 
322 	if (vp->file) {
323 		flush_dcache_mmap_lock(vp->mapping);
324 		vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap);
325 		if (vp->adj_next)
326 			vma_interval_tree_remove(vp->adj_next,
327 						 &vp->mapping->i_mmap);
328 	}
329 
330 }
331 
332 /*
333  * vma_complete- Helper function for handling the unlocking after altering VMAs,
334  * or for inserting a VMA.
335  *
336  * @vp: The vma_prepare struct
337  * @vmi: The vma iterator
338  * @mm: The mm_struct
339  */
340 static void vma_complete(struct vma_prepare *vp, struct vma_iterator *vmi,
341 			 struct mm_struct *mm)
342 {
343 	if (vp->file) {
344 		if (vp->adj_next)
345 			vma_interval_tree_insert(vp->adj_next,
346 						 &vp->mapping->i_mmap);
347 		vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap);
348 		flush_dcache_mmap_unlock(vp->mapping);
349 	}
350 
351 	if (vp->remove && vp->file) {
352 		__remove_shared_vm_struct(vp->remove, vp->mapping);
353 		if (vp->remove2)
354 			__remove_shared_vm_struct(vp->remove2, vp->mapping);
355 	} else if (vp->insert) {
356 		/*
357 		 * split_vma has split insert from vma, and needs
358 		 * us to insert it before dropping the locks
359 		 * (it may either follow vma or precede it).
360 		 */
361 		vma_iter_store_new(vmi, vp->insert);
362 		mm->map_count++;
363 	}
364 
365 	if (vp->anon_vma) {
366 		anon_vma_interval_tree_post_update_vma(vp->vma);
367 		if (vp->adj_next)
368 			anon_vma_interval_tree_post_update_vma(vp->adj_next);
369 		anon_vma_unlock_write(vp->anon_vma);
370 	}
371 
372 	if (vp->file) {
373 		i_mmap_unlock_write(vp->mapping);
374 
375 		if (!vp->skip_vma_uprobe) {
376 			uprobe_mmap(vp->vma);
377 
378 			if (vp->adj_next)
379 				uprobe_mmap(vp->adj_next);
380 		}
381 	}
382 
383 	if (vp->remove) {
384 again:
385 		vma_mark_detached(vp->remove);
386 		if (vp->file) {
387 			uprobe_munmap(vp->remove, vp->remove->vm_start,
388 				      vp->remove->vm_end);
389 			fput(vp->file);
390 		}
391 		if (vp->remove->anon_vma)
392 			unlink_anon_vmas(vp->remove);
393 		mm->map_count--;
394 		mpol_put(vma_policy(vp->remove));
395 		if (!vp->remove2)
396 			WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end);
397 		vm_area_free(vp->remove);
398 
399 		/*
400 		 * In mprotect's case 6 (see comments on vma_merge),
401 		 * we are removing both mid and next vmas
402 		 */
403 		if (vp->remove2) {
404 			vp->remove = vp->remove2;
405 			vp->remove2 = NULL;
406 			goto again;
407 		}
408 	}
409 	if (vp->insert && vp->file)
410 		uprobe_mmap(vp->insert);
411 }
412 
413 /*
414  * init_vma_prep() - Initializer wrapper for vma_prepare struct
415  * @vp: The vma_prepare struct
416  * @vma: The vma that will be altered once locked
417  */
418 static void init_vma_prep(struct vma_prepare *vp, struct vm_area_struct *vma)
419 {
420 	init_multi_vma_prep(vp, vma, NULL);
421 }
422 
423 /*
424  * Can the proposed VMA be merged with the left (previous) VMA taking into
425  * account the start position of the proposed range.
426  */
427 static bool can_vma_merge_left(struct vma_merge_struct *vmg)
428 
429 {
430 	return vmg->prev && vmg->prev->vm_end == vmg->start &&
431 		can_vma_merge_after(vmg);
432 }
433 
434 /*
435  * Can the proposed VMA be merged with the right (next) VMA taking into
436  * account the end position of the proposed range.
437  *
438  * In addition, if we can merge with the left VMA, ensure that left and right
439  * anon_vma's are also compatible.
440  */
441 static bool can_vma_merge_right(struct vma_merge_struct *vmg,
442 				bool can_merge_left)
443 {
444 	struct vm_area_struct *next = vmg->next;
445 	struct vm_area_struct *prev;
446 
447 	if (!next || vmg->end != next->vm_start || !can_vma_merge_before(vmg))
448 		return false;
449 
450 	if (!can_merge_left)
451 		return true;
452 
453 	/*
454 	 * If we can merge with prev (left) and next (right), indicating that
455 	 * each VMA's anon_vma is compatible with the proposed anon_vma, this
456 	 * does not mean prev and next are compatible with EACH OTHER.
457 	 *
458 	 * We therefore check this in addition to mergeability to either side.
459 	 */
460 	prev = vmg->prev;
461 	return !prev->anon_vma || !next->anon_vma ||
462 		prev->anon_vma == next->anon_vma;
463 }
464 
465 /*
466  * Close a vm structure and free it.
467  */
468 void remove_vma(struct vm_area_struct *vma)
469 {
470 	might_sleep();
471 	vma_close(vma);
472 	if (vma->vm_file)
473 		fput(vma->vm_file);
474 	mpol_put(vma_policy(vma));
475 	vm_area_free(vma);
476 }
477 
478 /*
479  * Get rid of page table information in the indicated region.
480  *
481  * Called with the mm semaphore held.
482  */
483 void unmap_region(struct unmap_desc *unmap)
484 {
485 	struct mm_struct *mm = unmap->first->vm_mm;
486 	struct mmu_gather tlb;
487 
488 	tlb_gather_mmu(&tlb, mm);
489 	update_hiwater_rss(mm);
490 	unmap_vmas(&tlb, unmap);
491 	mas_set(unmap->mas, unmap->tree_reset);
492 	free_pgtables(&tlb, unmap);
493 	tlb_finish_mmu(&tlb);
494 }
495 
496 /*
497  * __split_vma() bypasses sysctl_max_map_count checking.  We use this where it
498  * has already been checked or doesn't make sense to fail.
499  * VMA Iterator will point to the original VMA.
500  */
501 static __must_check int
502 __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
503 	    unsigned long addr, int new_below)
504 {
505 	struct vma_prepare vp;
506 	struct vm_area_struct *new;
507 	int err;
508 
509 	WARN_ON(vma->vm_start >= addr);
510 	WARN_ON(vma->vm_end <= addr);
511 
512 	if (vma->vm_ops && vma->vm_ops->may_split) {
513 		err = vma->vm_ops->may_split(vma, addr);
514 		if (err)
515 			return err;
516 	}
517 
518 	new = vm_area_dup(vma);
519 	if (!new)
520 		return -ENOMEM;
521 
522 	if (new_below) {
523 		new->vm_end = addr;
524 	} else {
525 		new->vm_start = addr;
526 		new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
527 	}
528 
529 	err = -ENOMEM;
530 	vma_iter_config(vmi, new->vm_start, new->vm_end);
531 	if (vma_iter_prealloc(vmi, new))
532 		goto out_free_vma;
533 
534 	err = vma_dup_policy(vma, new);
535 	if (err)
536 		goto out_free_vmi;
537 
538 	err = anon_vma_clone(new, vma, VMA_OP_SPLIT);
539 	if (err)
540 		goto out_free_mpol;
541 
542 	if (new->vm_file)
543 		get_file(new->vm_file);
544 
545 	if (new->vm_ops && new->vm_ops->open)
546 		new->vm_ops->open(new);
547 
548 	vma_start_write(vma);
549 	vma_start_write(new);
550 
551 	init_vma_prep(&vp, vma);
552 	vp.insert = new;
553 	vma_prepare(&vp);
554 
555 	/*
556 	 * Get rid of huge pages and shared page tables straddling the split
557 	 * boundary.
558 	 */
559 	vma_adjust_trans_huge(vma, vma->vm_start, addr, NULL);
560 	if (is_vm_hugetlb_page(vma))
561 		hugetlb_split(vma, addr);
562 
563 	if (new_below) {
564 		vma->vm_start = addr;
565 		vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT;
566 	} else {
567 		vma->vm_end = addr;
568 	}
569 
570 	/* vma_complete stores the new vma */
571 	vma_complete(&vp, vmi, vma->vm_mm);
572 	validate_mm(vma->vm_mm);
573 
574 	/* Success. */
575 	if (new_below)
576 		vma_next(vmi);
577 	else
578 		vma_prev(vmi);
579 
580 	return 0;
581 
582 out_free_mpol:
583 	mpol_put(vma_policy(new));
584 out_free_vmi:
585 	vma_iter_free(vmi);
586 out_free_vma:
587 	vm_area_free(new);
588 	return err;
589 }
590 
591 /*
592  * Split a vma into two pieces at address 'addr', a new vma is allocated
593  * either for the first part or the tail.
594  */
595 static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
596 		     unsigned long addr, int new_below)
597 {
598 	if (vma->vm_mm->map_count >= get_sysctl_max_map_count())
599 		return -ENOMEM;
600 
601 	return __split_vma(vmi, vma, addr, new_below);
602 }
603 
604 /*
605  * dup_anon_vma() - Helper function to duplicate anon_vma on VMA merge in the
606  * instance that the destination VMA has no anon_vma but the source does.
607  *
608  * @dst: The destination VMA
609  * @src: The source VMA
610  * @dup: Pointer to the destination VMA when successful.
611  *
612  * Returns: 0 on success.
613  */
614 static int dup_anon_vma(struct vm_area_struct *dst,
615 			struct vm_area_struct *src, struct vm_area_struct **dup)
616 {
617 	/*
618 	 * There are three cases to consider for correctly propagating
619 	 * anon_vma's on merge.
620 	 *
621 	 * The first is trivial - neither VMA has anon_vma, we need not do
622 	 * anything.
623 	 *
624 	 * The second where both have anon_vma is also a no-op, as they must
625 	 * then be the same, so there is simply nothing to copy.
626 	 *
627 	 * Here we cover the third - if the destination VMA has no anon_vma,
628 	 * that is it is unfaulted, we need to ensure that the newly merged
629 	 * range is referenced by the anon_vma's of the source.
630 	 */
631 	if (src->anon_vma && !dst->anon_vma) {
632 		int ret;
633 
634 		vma_assert_write_locked(dst);
635 		dst->anon_vma = src->anon_vma;
636 		ret = anon_vma_clone(dst, src, VMA_OP_MERGE_UNFAULTED);
637 		if (ret)
638 			return ret;
639 
640 		*dup = dst;
641 	}
642 
643 	return 0;
644 }
645 
646 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
647 void validate_mm(struct mm_struct *mm)
648 {
649 	int bug = 0;
650 	int i = 0;
651 	struct vm_area_struct *vma;
652 	VMA_ITERATOR(vmi, mm, 0);
653 
654 	mt_validate(&mm->mm_mt);
655 	for_each_vma(vmi, vma) {
656 #ifdef CONFIG_DEBUG_VM_RB
657 		struct anon_vma *anon_vma = vma->anon_vma;
658 		struct anon_vma_chain *avc;
659 #endif
660 		unsigned long vmi_start, vmi_end;
661 		bool warn = 0;
662 
663 		vmi_start = vma_iter_addr(&vmi);
664 		vmi_end = vma_iter_end(&vmi);
665 		if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm))
666 			warn = 1;
667 
668 		if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm))
669 			warn = 1;
670 
671 		if (warn) {
672 			pr_emerg("issue in %s\n", current->comm);
673 			dump_stack();
674 			dump_vma(vma);
675 			pr_emerg("tree range: %px start %lx end %lx\n", vma,
676 				 vmi_start, vmi_end - 1);
677 			vma_iter_dump_tree(&vmi);
678 		}
679 
680 #ifdef CONFIG_DEBUG_VM_RB
681 		if (anon_vma) {
682 			anon_vma_lock_read(anon_vma);
683 			list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
684 				anon_vma_interval_tree_verify(avc);
685 			anon_vma_unlock_read(anon_vma);
686 		}
687 #endif
688 		/* Check for a infinite loop */
689 		if (++i > mm->map_count + 10) {
690 			i = -1;
691 			break;
692 		}
693 	}
694 	if (i != mm->map_count) {
695 		pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i);
696 		bug = 1;
697 	}
698 	VM_BUG_ON_MM(bug, mm);
699 }
700 #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
701 
702 /*
703  * Based on the vmg flag indicating whether we need to adjust the vm_start field
704  * for the middle or next VMA, we calculate what the range of the newly adjusted
705  * VMA ought to be, and set the VMA's range accordingly.
706  */
707 static void vmg_adjust_set_range(struct vma_merge_struct *vmg)
708 {
709 	struct vm_area_struct *adjust;
710 	pgoff_t pgoff;
711 
712 	if (vmg->__adjust_middle_start) {
713 		adjust = vmg->middle;
714 		pgoff = adjust->vm_pgoff + PHYS_PFN(vmg->end - adjust->vm_start);
715 	} else if (vmg->__adjust_next_start) {
716 		adjust = vmg->next;
717 		pgoff = adjust->vm_pgoff - PHYS_PFN(adjust->vm_start - vmg->end);
718 	} else {
719 		return;
720 	}
721 
722 	vma_set_range(adjust, vmg->end, adjust->vm_end, pgoff);
723 }
724 
725 /*
726  * Actually perform the VMA merge operation.
727  *
728  * IMPORTANT: We guarantee that, should vmg->give_up_on_oom is set, to not
729  * modify any VMAs or cause inconsistent state should an OOM condition arise.
730  *
731  * Returns 0 on success, or an error value on failure.
732  */
733 static int commit_merge(struct vma_merge_struct *vmg)
734 {
735 	struct vm_area_struct *vma;
736 	struct vma_prepare vp;
737 
738 	if (vmg->__adjust_next_start) {
739 		/* We manipulate middle and adjust next, which is the target. */
740 		vma = vmg->middle;
741 		vma_iter_config(vmg->vmi, vmg->end, vmg->next->vm_end);
742 	} else {
743 		vma = vmg->target;
744 		 /* Note: vma iterator must be pointing to 'start'. */
745 		vma_iter_config(vmg->vmi, vmg->start, vmg->end);
746 	}
747 
748 	init_multi_vma_prep(&vp, vma, vmg);
749 
750 	/*
751 	 * If vmg->give_up_on_oom is set, we're safe, because we don't actually
752 	 * manipulate any VMAs until we succeed at preallocation.
753 	 *
754 	 * Past this point, we will not return an error.
755 	 */
756 	if (vma_iter_prealloc(vmg->vmi, vma))
757 		return -ENOMEM;
758 
759 	vma_prepare(&vp);
760 	/*
761 	 * THP pages may need to do additional splits if we increase
762 	 * middle->vm_start.
763 	 */
764 	vma_adjust_trans_huge(vma, vmg->start, vmg->end,
765 			      vmg->__adjust_middle_start ? vmg->middle : NULL);
766 	vma_set_range(vma, vmg->start, vmg->end, vmg->pgoff);
767 	vmg_adjust_set_range(vmg);
768 	vma_iter_store_overwrite(vmg->vmi, vmg->target);
769 
770 	vma_complete(&vp, vmg->vmi, vma->vm_mm);
771 
772 	return 0;
773 }
774 
775 /* We can only remove VMAs when merging if they do not have a close hook. */
776 static bool can_merge_remove_vma(struct vm_area_struct *vma)
777 {
778 	return !vma->vm_ops || !vma->vm_ops->close;
779 }
780 
781 /*
782  * vma_merge_existing_range - Attempt to merge VMAs based on a VMA having its
783  * attributes modified.
784  *
785  * @vmg: Describes the modifications being made to a VMA and associated
786  *       metadata.
787  *
788  * When the attributes of a range within a VMA change, then it might be possible
789  * for immediately adjacent VMAs to be merged into that VMA due to having
790  * identical properties.
791  *
792  * This function checks for the existence of any such mergeable VMAs and updates
793  * the maple tree describing the @vmg->middle->vm_mm address space to account
794  * for this, as well as any VMAs shrunk/expanded/deleted as a result of this
795  * merge.
796  *
797  * As part of this operation, if a merge occurs, the @vmg object will have its
798  * vma, start, end, and pgoff fields modified to execute the merge. Subsequent
799  * calls to this function should reset these fields.
800  *
801  * Returns: The merged VMA if merge succeeds, or NULL otherwise.
802  *
803  * ASSUMPTIONS:
804  * - The caller must assign the VMA to be modified to @vmg->middle.
805  * - The caller must have set @vmg->prev to the previous VMA, if there is one.
806  * - The caller must not set @vmg->next, as we determine this.
807  * - The caller must hold a WRITE lock on the mm_struct->mmap_lock.
808  * - vmi must be positioned within [@vmg->middle->vm_start, @vmg->middle->vm_end).
809  */
810 static __must_check struct vm_area_struct *vma_merge_existing_range(
811 		struct vma_merge_struct *vmg)
812 {
813 	vma_flags_t sticky_flags = vma_flags_and_mask(&vmg->vma_flags,
814 						      VMA_STICKY_FLAGS);
815 	struct vm_area_struct *middle = vmg->middle;
816 	struct vm_area_struct *prev = vmg->prev;
817 	struct vm_area_struct *next;
818 	struct vm_area_struct *anon_dup = NULL;
819 	unsigned long start = vmg->start;
820 	unsigned long end = vmg->end;
821 	bool left_side = middle && start == middle->vm_start;
822 	bool right_side = middle && end == middle->vm_end;
823 	int err = 0;
824 	bool merge_left, merge_right, merge_both;
825 
826 	mmap_assert_write_locked(vmg->mm);
827 	VM_WARN_ON_VMG(!middle, vmg); /* We are modifying a VMA, so caller must specify. */
828 	VM_WARN_ON_VMG(vmg->next, vmg); /* We set this. */
829 	VM_WARN_ON_VMG(prev && start <= prev->vm_start, vmg);
830 	VM_WARN_ON_VMG(start >= end, vmg);
831 
832 	/*
833 	 * If middle == prev, then we are offset into a VMA. Otherwise, if we are
834 	 * not, we must span a portion of the VMA.
835 	 */
836 	VM_WARN_ON_VMG(middle &&
837 		       ((middle != prev && vmg->start != middle->vm_start) ||
838 			vmg->end > middle->vm_end), vmg);
839 	/* The vmi must be positioned within vmg->middle. */
840 	VM_WARN_ON_VMG(middle &&
841 		       !(vma_iter_addr(vmg->vmi) >= middle->vm_start &&
842 			 vma_iter_addr(vmg->vmi) < middle->vm_end), vmg);
843 	/* An existing merge can never be used by the mremap() logic. */
844 	VM_WARN_ON_VMG(vmg->copied_from, vmg);
845 
846 	vmg->state = VMA_MERGE_NOMERGE;
847 
848 	/*
849 	 * If a special mapping or if the range being modified is neither at the
850 	 * furthermost left or right side of the VMA, then we have no chance of
851 	 * merging and should abort.
852 	 */
853 	if (vma_flags_test_any_mask(&vmg->vma_flags, VMA_SPECIAL_FLAGS) ||
854 	    (!left_side && !right_side))
855 		return NULL;
856 
857 	if (left_side)
858 		merge_left = can_vma_merge_left(vmg);
859 	else
860 		merge_left = false;
861 
862 	if (right_side) {
863 		next = vmg->next = vma_iter_next_range(vmg->vmi);
864 		vma_iter_prev_range(vmg->vmi);
865 
866 		merge_right = can_vma_merge_right(vmg, merge_left);
867 	} else {
868 		merge_right = false;
869 		next = NULL;
870 	}
871 
872 	if (merge_left)		/* If merging prev, position iterator there. */
873 		vma_prev(vmg->vmi);
874 	else if (!merge_right)	/* If we have nothing to merge, abort. */
875 		return NULL;
876 
877 	merge_both = merge_left && merge_right;
878 	/* If we span the entire VMA, a merge implies it will be deleted. */
879 	vmg->__remove_middle = left_side && right_side;
880 
881 	/*
882 	 * If we need to remove middle in its entirety but are unable to do so,
883 	 * we have no sensible recourse but to abort the merge.
884 	 */
885 	if (vmg->__remove_middle && !can_merge_remove_vma(middle))
886 		return NULL;
887 
888 	/*
889 	 * If we merge both VMAs, then next is also deleted. This implies
890 	 * merge_will_delete_vma also.
891 	 */
892 	vmg->__remove_next = merge_both;
893 
894 	/*
895 	 * If we cannot delete next, then we can reduce the operation to merging
896 	 * prev and middle (thereby deleting middle).
897 	 */
898 	if (vmg->__remove_next && !can_merge_remove_vma(next)) {
899 		vmg->__remove_next = false;
900 		merge_right = false;
901 		merge_both = false;
902 	}
903 
904 	/* No matter what happens, we will be adjusting middle. */
905 	vma_start_write(middle);
906 
907 	if (merge_right) {
908 		vma_flags_t next_sticky;
909 
910 		vma_start_write(next);
911 		vmg->target = next;
912 		next_sticky = vma_flags_and_mask(&next->flags, VMA_STICKY_FLAGS);
913 		vma_flags_set_mask(&sticky_flags, next_sticky);
914 	}
915 
916 	if (merge_left) {
917 		vma_flags_t prev_sticky;
918 
919 		vma_start_write(prev);
920 		vmg->target = prev;
921 
922 		prev_sticky = vma_flags_and_mask(&prev->flags, VMA_STICKY_FLAGS);
923 		vma_flags_set_mask(&sticky_flags, prev_sticky);
924 	}
925 
926 	if (merge_both) {
927 		/*
928 		 * |<-------------------->|
929 		 * |-------********-------|
930 		 *   prev   middle   next
931 		 *  extend  delete  delete
932 		 */
933 
934 		vmg->start = prev->vm_start;
935 		vmg->end = next->vm_end;
936 		vmg->pgoff = prev->vm_pgoff;
937 
938 		/*
939 		 * We already ensured anon_vma compatibility above, so now it's
940 		 * simply a case of, if prev has no anon_vma object, which of
941 		 * next or middle contains the anon_vma we must duplicate.
942 		 */
943 		err = dup_anon_vma(prev, next->anon_vma ? next : middle,
944 				   &anon_dup);
945 	} else if (merge_left) {
946 		/*
947 		 * |<------------>|      OR
948 		 * |<----------------->|
949 		 * |-------*************
950 		 *   prev     middle
951 		 *  extend shrink/delete
952 		 */
953 
954 		vmg->start = prev->vm_start;
955 		vmg->pgoff = prev->vm_pgoff;
956 
957 		if (!vmg->__remove_middle)
958 			vmg->__adjust_middle_start = true;
959 
960 		err = dup_anon_vma(prev, middle, &anon_dup);
961 	} else { /* merge_right */
962 		/*
963 		 *     |<------------->| OR
964 		 * |<----------------->|
965 		 * *************-------|
966 		 *    middle     next
967 		 * shrink/delete extend
968 		 */
969 
970 		pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start);
971 
972 		VM_WARN_ON_VMG(!merge_right, vmg);
973 		/* If we are offset into a VMA, then prev must be middle. */
974 		VM_WARN_ON_VMG(vmg->start > middle->vm_start && prev && middle != prev, vmg);
975 
976 		if (vmg->__remove_middle) {
977 			vmg->end = next->vm_end;
978 			vmg->pgoff = next->vm_pgoff - pglen;
979 		} else {
980 			/* We shrink middle and expand next. */
981 			vmg->__adjust_next_start = true;
982 			vmg->start = middle->vm_start;
983 			vmg->end = start;
984 			vmg->pgoff = middle->vm_pgoff;
985 		}
986 
987 		err = dup_anon_vma(next, middle, &anon_dup);
988 	}
989 
990 	if (err || commit_merge(vmg))
991 		goto abort;
992 
993 	vma_set_flags_mask(vmg->target, sticky_flags);
994 	khugepaged_enter_vma(vmg->target, vmg->vm_flags);
995 	vmg->state = VMA_MERGE_SUCCESS;
996 	return vmg->target;
997 
998 abort:
999 	vma_iter_set(vmg->vmi, start);
1000 	vma_iter_load(vmg->vmi);
1001 
1002 	if (anon_dup)
1003 		unlink_anon_vmas(anon_dup);
1004 
1005 	/*
1006 	 * This means we have failed to clone anon_vma's correctly, but no
1007 	 * actual changes to VMAs have occurred, so no harm no foul - if the
1008 	 * user doesn't want this reported and instead just wants to give up on
1009 	 * the merge, allow it.
1010 	 */
1011 	if (!vmg->give_up_on_oom)
1012 		vmg->state = VMA_MERGE_ERROR_NOMEM;
1013 	return NULL;
1014 }
1015 
1016 /*
1017  * vma_merge_new_range - Attempt to merge a new VMA into address space
1018  *
1019  * @vmg: Describes the VMA we are adding, in the range @vmg->start to @vmg->end
1020  *       (exclusive), which we try to merge with any adjacent VMAs if possible.
1021  *
1022  * We are about to add a VMA to the address space starting at @vmg->start and
1023  * ending at @vmg->end. There are three different possible scenarios:
1024  *
1025  * 1. There is a VMA with identical properties immediately adjacent to the
1026  *    proposed new VMA [@vmg->start, @vmg->end) either before or after it -
1027  *    EXPAND that VMA:
1028  *
1029  * Proposed:       |-----|  or  |-----|
1030  * Existing:  |----|                  |----|
1031  *
1032  * 2. There are VMAs with identical properties immediately adjacent to the
1033  *    proposed new VMA [@vmg->start, @vmg->end) both before AND after it -
1034  *    EXPAND the former and REMOVE the latter:
1035  *
1036  * Proposed:       |-----|
1037  * Existing:  |----|     |----|
1038  *
1039  * 3. There are no VMAs immediately adjacent to the proposed new VMA or those
1040  *    VMAs do not have identical attributes - NO MERGE POSSIBLE.
1041  *
1042  * In instances where we can merge, this function returns the expanded VMA which
1043  * will have its range adjusted accordingly and the underlying maple tree also
1044  * adjusted.
1045  *
1046  * Returns: In instances where no merge was possible, NULL. Otherwise, a pointer
1047  *          to the VMA we expanded.
1048  *
1049  * This function adjusts @vmg to provide @vmg->next if not already specified,
1050  * and adjusts [@vmg->start, @vmg->end) to span the expanded range.
1051  *
1052  * ASSUMPTIONS:
1053  * - The caller must hold a WRITE lock on the mm_struct->mmap_lock.
1054  * - The caller must have determined that [@vmg->start, @vmg->end) is empty,
1055      other than VMAs that will be unmapped should the operation succeed.
1056  * - The caller must have specified the previous vma in @vmg->prev.
1057  * - The caller must have specified the next vma in @vmg->next.
1058  * - The caller must have positioned the vmi at or before the gap.
1059  */
1060 struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg)
1061 {
1062 	struct vm_area_struct *prev = vmg->prev;
1063 	struct vm_area_struct *next = vmg->next;
1064 	unsigned long end = vmg->end;
1065 	bool can_merge_left, can_merge_right;
1066 
1067 	mmap_assert_write_locked(vmg->mm);
1068 	VM_WARN_ON_VMG(vmg->middle, vmg);
1069 	VM_WARN_ON_VMG(vmg->target, vmg);
1070 	/* vmi must point at or before the gap. */
1071 	VM_WARN_ON_VMG(vma_iter_addr(vmg->vmi) > end, vmg);
1072 
1073 	vmg->state = VMA_MERGE_NOMERGE;
1074 
1075 	/* Special VMAs are unmergeable, also if no prev/next. */
1076 	if (vma_flags_test_any_mask(&vmg->vma_flags, VMA_SPECIAL_FLAGS) ||
1077 	    (!prev && !next))
1078 		return NULL;
1079 
1080 	can_merge_left = can_vma_merge_left(vmg);
1081 	can_merge_right = !vmg->just_expand && can_vma_merge_right(vmg, can_merge_left);
1082 
1083 	/* If we can merge with the next VMA, adjust vmg accordingly. */
1084 	if (can_merge_right) {
1085 		vmg->end = next->vm_end;
1086 		vmg->target = next;
1087 	}
1088 
1089 	/* If we can merge with the previous VMA, adjust vmg accordingly. */
1090 	if (can_merge_left) {
1091 		vmg->start = prev->vm_start;
1092 		vmg->target = prev;
1093 		vmg->pgoff = prev->vm_pgoff;
1094 
1095 		/*
1096 		 * If this merge would result in removal of the next VMA but we
1097 		 * are not permitted to do so, reduce the operation to merging
1098 		 * prev and vma.
1099 		 */
1100 		if (can_merge_right && !can_merge_remove_vma(next))
1101 			vmg->end = end;
1102 
1103 		/* In expand-only case we are already positioned at prev. */
1104 		if (!vmg->just_expand) {
1105 			/* Equivalent to going to the previous range. */
1106 			vma_prev(vmg->vmi);
1107 		}
1108 	}
1109 
1110 	/*
1111 	 * Now try to expand adjacent VMA(s). This takes care of removing the
1112 	 * following VMA if we have VMAs on both sides.
1113 	 */
1114 	if (vmg->target && !vma_expand(vmg)) {
1115 		khugepaged_enter_vma(vmg->target, vmg->vm_flags);
1116 		vmg->state = VMA_MERGE_SUCCESS;
1117 		return vmg->target;
1118 	}
1119 
1120 	return NULL;
1121 }
1122 
1123 /*
1124  * vma_merge_copied_range - Attempt to merge a VMA that is being copied by
1125  * mremap()
1126  *
1127  * @vmg: Describes the VMA we are adding, in the copied-to range @vmg->start to
1128  *       @vmg->end (exclusive), which we try to merge with any adjacent VMAs if
1129  *       possible.
1130  *
1131  * vmg->prev, next, start, end, pgoff should all be relative to the COPIED TO
1132  * range, i.e. the target range for the VMA.
1133  *
1134  * Returns: In instances where no merge was possible, NULL. Otherwise, a pointer
1135  *          to the VMA we expanded.
1136  *
1137  * ASSUMPTIONS: Same as vma_merge_new_range(), except vmg->middle must contain
1138  *              the copied-from VMA.
1139  */
1140 static struct vm_area_struct *vma_merge_copied_range(struct vma_merge_struct *vmg)
1141 {
1142 	/* We must have a copied-from VMA. */
1143 	VM_WARN_ON_VMG(!vmg->middle, vmg);
1144 
1145 	vmg->copied_from = vmg->middle;
1146 	vmg->middle = NULL;
1147 	return vma_merge_new_range(vmg);
1148 }
1149 
1150 /*
1151  * vma_expand - Expand an existing VMA
1152  *
1153  * @vmg: Describes a VMA expansion operation.
1154  *
1155  * Expand @vma to vmg->start and vmg->end.  Can expand off the start and end.
1156  * Will expand over vmg->next if it's different from vmg->target and vmg->end ==
1157  * vmg->next->vm_end.  Checking if the vmg->target can expand and merge with
1158  * vmg->next needs to be handled by the caller.
1159  *
1160  * Returns: 0 on success.
1161  *
1162  * ASSUMPTIONS:
1163  * - The caller must hold a WRITE lock on the mm_struct->mmap_lock.
1164  * - The caller must have set @vmg->target and @vmg->next.
1165  */
1166 int vma_expand(struct vma_merge_struct *vmg)
1167 {
1168 	struct vm_area_struct *anon_dup = NULL;
1169 	struct vm_area_struct *target = vmg->target;
1170 	struct vm_area_struct *next = vmg->next;
1171 	bool remove_next = false;
1172 	vma_flags_t sticky_flags =
1173 		vma_flags_and_mask(&vmg->vma_flags, VMA_STICKY_FLAGS);
1174 	vma_flags_t target_sticky;
1175 	int ret = 0;
1176 
1177 	mmap_assert_write_locked(vmg->mm);
1178 	vma_start_write(target);
1179 
1180 	target_sticky = vma_flags_and_mask(&target->flags, VMA_STICKY_FLAGS);
1181 
1182 	if (next && target != next && vmg->end == next->vm_end)
1183 		remove_next = true;
1184 
1185 	/* We must have a target. */
1186 	VM_WARN_ON_VMG(!target, vmg);
1187 	/* This should have already been checked by this point. */
1188 	VM_WARN_ON_VMG(remove_next && !can_merge_remove_vma(next), vmg);
1189 	/* Not merging but overwriting any part of next is not handled. */
1190 	VM_WARN_ON_VMG(next && !remove_next &&
1191 		       next != target && vmg->end > next->vm_start, vmg);
1192 	/* Only handles expanding. */
1193 	VM_WARN_ON_VMG(target->vm_start < vmg->start ||
1194 		       target->vm_end > vmg->end, vmg);
1195 
1196 	vma_flags_set_mask(&sticky_flags, target_sticky);
1197 
1198 	/*
1199 	 * If we are removing the next VMA or copying from a VMA
1200 	 * (e.g. mremap()'ing), we must propagate anon_vma state.
1201 	 *
1202 	 * Note that, by convention, callers ignore OOM for this case, so
1203 	 * we don't need to account for vmg->give_up_on_mm here.
1204 	 */
1205 	if (remove_next)
1206 		ret = dup_anon_vma(target, next, &anon_dup);
1207 	if (!ret && vmg->copied_from)
1208 		ret = dup_anon_vma(target, vmg->copied_from, &anon_dup);
1209 	if (ret)
1210 		return ret;
1211 
1212 	if (remove_next) {
1213 		vma_flags_t next_sticky;
1214 
1215 		vma_start_write(next);
1216 		vmg->__remove_next = true;
1217 
1218 		next_sticky = vma_flags_and_mask(&next->flags, VMA_STICKY_FLAGS);
1219 		vma_flags_set_mask(&sticky_flags, next_sticky);
1220 	}
1221 	if (commit_merge(vmg))
1222 		goto nomem;
1223 
1224 	vma_set_flags_mask(target, sticky_flags);
1225 	return 0;
1226 
1227 nomem:
1228 	if (anon_dup)
1229 		unlink_anon_vmas(anon_dup);
1230 	/*
1231 	 * If the user requests that we just give upon OOM, we are safe to do so
1232 	 * here, as commit merge provides this contract to us. Nothing has been
1233 	 * changed - no harm no foul, just don't report it.
1234 	 */
1235 	if (!vmg->give_up_on_oom)
1236 		vmg->state = VMA_MERGE_ERROR_NOMEM;
1237 	return -ENOMEM;
1238 }
1239 
1240 /*
1241  * vma_shrink() - Reduce an existing VMAs memory area
1242  * @vmi: The vma iterator
1243  * @vma: The VMA to modify
1244  * @start: The new start
1245  * @end: The new end
1246  *
1247  * Returns: 0 on success, -ENOMEM otherwise
1248  */
1249 int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
1250 	       unsigned long start, unsigned long end, pgoff_t pgoff)
1251 {
1252 	struct vma_prepare vp;
1253 
1254 	WARN_ON((vma->vm_start != start) && (vma->vm_end != end));
1255 
1256 	if (vma->vm_start < start)
1257 		vma_iter_config(vmi, vma->vm_start, start);
1258 	else
1259 		vma_iter_config(vmi, end, vma->vm_end);
1260 
1261 	if (vma_iter_prealloc(vmi, NULL))
1262 		return -ENOMEM;
1263 
1264 	vma_start_write(vma);
1265 
1266 	init_vma_prep(&vp, vma);
1267 	vma_prepare(&vp);
1268 	vma_adjust_trans_huge(vma, start, end, NULL);
1269 
1270 	vma_iter_clear(vmi);
1271 	vma_set_range(vma, start, end, pgoff);
1272 	vma_complete(&vp, vmi, vma->vm_mm);
1273 	validate_mm(vma->vm_mm);
1274 	return 0;
1275 }
1276 
1277 static inline void vms_clear_ptes(struct vma_munmap_struct *vms,
1278 		    struct ma_state *mas_detach, bool mm_wr_locked)
1279 {
1280 	struct unmap_desc unmap = {
1281 		.mas = mas_detach,
1282 		.first = vms->vma,
1283 		/* start and end may be different if there is no prev or next vma. */
1284 		.pg_start = vms->unmap_start,
1285 		.pg_end = vms->unmap_end,
1286 		.vma_start = vms->start,
1287 		.vma_end = vms->end,
1288 		/*
1289 		 * The tree limits and reset differ from the normal case since it's a
1290 		 * side-tree
1291 		 */
1292 		.tree_reset = 1,
1293 		.tree_end = vms->vma_count,
1294 		/*
1295 		 * We can free page tables without write-locking mmap_lock because VMAs
1296 		 * were isolated before we downgraded mmap_lock.
1297 		 */
1298 		.mm_wr_locked = mm_wr_locked,
1299 	};
1300 
1301 	if (!vms->clear_ptes) /* Nothing to do */
1302 		return;
1303 
1304 	mas_set(mas_detach, 1);
1305 	unmap_region(&unmap);
1306 	vms->clear_ptes = false;
1307 }
1308 
1309 static void vms_clean_up_area(struct vma_munmap_struct *vms,
1310 		struct ma_state *mas_detach)
1311 {
1312 	struct vm_area_struct *vma;
1313 
1314 	if (!vms->nr_pages)
1315 		return;
1316 
1317 	vms_clear_ptes(vms, mas_detach, true);
1318 	mas_set(mas_detach, 0);
1319 	mas_for_each(mas_detach, vma, ULONG_MAX)
1320 		vma_close(vma);
1321 }
1322 
1323 /*
1324  * vms_complete_munmap_vmas() - Finish the munmap() operation
1325  * @vms: The vma munmap struct
1326  * @mas_detach: The maple state of the detached vmas
1327  *
1328  * This updates the mm_struct, unmaps the region, frees the resources
1329  * used for the munmap() and may downgrade the lock - if requested.  Everything
1330  * needed to be done once the vma maple tree is updated.
1331  */
1332 static void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
1333 		struct ma_state *mas_detach)
1334 {
1335 	struct vm_area_struct *vma;
1336 	struct mm_struct *mm;
1337 
1338 	mm = current->mm;
1339 	mm->map_count -= vms->vma_count;
1340 	mm->locked_vm -= vms->locked_vm;
1341 	if (vms->unlock)
1342 		mmap_write_downgrade(mm);
1343 
1344 	if (!vms->nr_pages)
1345 		return;
1346 
1347 	vms_clear_ptes(vms, mas_detach, !vms->unlock);
1348 	/* Update high watermark before we lower total_vm */
1349 	update_hiwater_vm(mm);
1350 	/* Stat accounting */
1351 	WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm) - vms->nr_pages);
1352 	/* Paranoid bookkeeping */
1353 	VM_WARN_ON(vms->exec_vm > mm->exec_vm);
1354 	VM_WARN_ON(vms->stack_vm > mm->stack_vm);
1355 	VM_WARN_ON(vms->data_vm > mm->data_vm);
1356 	mm->exec_vm -= vms->exec_vm;
1357 	mm->stack_vm -= vms->stack_vm;
1358 	mm->data_vm -= vms->data_vm;
1359 
1360 	/* Remove and clean up vmas */
1361 	mas_set(mas_detach, 0);
1362 	mas_for_each(mas_detach, vma, ULONG_MAX)
1363 		remove_vma(vma);
1364 
1365 	vm_unacct_memory(vms->nr_accounted);
1366 	validate_mm(mm);
1367 	if (vms->unlock)
1368 		mmap_read_unlock(mm);
1369 
1370 	__mt_destroy(mas_detach->tree);
1371 }
1372 
1373 /*
1374  * reattach_vmas() - Undo any munmap work and free resources
1375  * @mas_detach: The maple state with the detached maple tree
1376  *
1377  * Reattach any detached vmas and free up the maple tree used to track the vmas.
1378  */
1379 static void reattach_vmas(struct ma_state *mas_detach)
1380 {
1381 	struct vm_area_struct *vma;
1382 
1383 	mas_set(mas_detach, 0);
1384 	mas_for_each(mas_detach, vma, ULONG_MAX)
1385 		vma_mark_attached(vma);
1386 
1387 	__mt_destroy(mas_detach->tree);
1388 }
1389 
1390 /*
1391  * vms_gather_munmap_vmas() - Put all VMAs within a range into a maple tree
1392  * for removal at a later date.  Handles splitting first and last if necessary
1393  * and marking the vmas as isolated.
1394  *
1395  * @vms: The vma munmap struct
1396  * @mas_detach: The maple state tracking the detached tree
1397  *
1398  * Return: 0 on success, error otherwise
1399  */
1400 static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
1401 		struct ma_state *mas_detach)
1402 {
1403 	struct vm_area_struct *next = NULL;
1404 	int error;
1405 
1406 	/*
1407 	 * If we need to split any vma, do it now to save pain later.
1408 	 * Does it split the first one?
1409 	 */
1410 	if (vms->start > vms->vma->vm_start) {
1411 
1412 		/*
1413 		 * Make sure that map_count on return from munmap() will
1414 		 * not exceed its limit; but let map_count go just above
1415 		 * its limit temporarily, to help free resources as expected.
1416 		 */
1417 		if (vms->end < vms->vma->vm_end &&
1418 		    vms->vma->vm_mm->map_count >= get_sysctl_max_map_count()) {
1419 			error = -ENOMEM;
1420 			goto map_count_exceeded;
1421 		}
1422 
1423 		/* Don't bother splitting the VMA if we can't unmap it anyway */
1424 		if (vma_is_sealed(vms->vma)) {
1425 			error = -EPERM;
1426 			goto start_split_failed;
1427 		}
1428 
1429 		error = __split_vma(vms->vmi, vms->vma, vms->start, 1);
1430 		if (error)
1431 			goto start_split_failed;
1432 	}
1433 	vms->prev = vma_prev(vms->vmi);
1434 	if (vms->prev)
1435 		vms->unmap_start = vms->prev->vm_end;
1436 
1437 	/*
1438 	 * Detach a range of VMAs from the mm. Using next as a temp variable as
1439 	 * it is always overwritten.
1440 	 */
1441 	for_each_vma_range(*(vms->vmi), next, vms->end) {
1442 		long nrpages;
1443 
1444 		if (vma_is_sealed(next)) {
1445 			error = -EPERM;
1446 			goto modify_vma_failed;
1447 		}
1448 		/* Does it split the end? */
1449 		if (next->vm_end > vms->end) {
1450 			error = __split_vma(vms->vmi, next, vms->end, 0);
1451 			if (error)
1452 				goto end_split_failed;
1453 		}
1454 		vma_start_write(next);
1455 		mas_set(mas_detach, vms->vma_count++);
1456 		error = mas_store_gfp(mas_detach, next, GFP_KERNEL);
1457 		if (error)
1458 			goto munmap_gather_failed;
1459 
1460 		vma_mark_detached(next);
1461 		nrpages = vma_pages(next);
1462 
1463 		vms->nr_pages += nrpages;
1464 		if (vma_test(next, VMA_LOCKED_BIT))
1465 			vms->locked_vm += nrpages;
1466 
1467 		if (vma_test(next, VMA_ACCOUNT_BIT))
1468 			vms->nr_accounted += nrpages;
1469 
1470 		if (is_exec_mapping(next->vm_flags))
1471 			vms->exec_vm += nrpages;
1472 		else if (is_stack_mapping(next->vm_flags))
1473 			vms->stack_vm += nrpages;
1474 		else if (is_data_mapping_vma_flags(&next->flags))
1475 			vms->data_vm += nrpages;
1476 
1477 		if (vms->uf) {
1478 			/*
1479 			 * If userfaultfd_unmap_prep returns an error the vmas
1480 			 * will remain split, but userland will get a
1481 			 * highly unexpected error anyway. This is no
1482 			 * different than the case where the first of the two
1483 			 * __split_vma fails, but we don't undo the first
1484 			 * split, despite we could. This is unlikely enough
1485 			 * failure that it's not worth optimizing it for.
1486 			 */
1487 			error = userfaultfd_unmap_prep(next, vms->start,
1488 						       vms->end, vms->uf);
1489 			if (error)
1490 				goto userfaultfd_error;
1491 		}
1492 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
1493 		BUG_ON(next->vm_start < vms->start);
1494 		BUG_ON(next->vm_start > vms->end);
1495 #endif
1496 	}
1497 
1498 	vms->next = vma_next(vms->vmi);
1499 	if (vms->next)
1500 		vms->unmap_end = vms->next->vm_start;
1501 
1502 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
1503 	/* Make sure no VMAs are about to be lost. */
1504 	{
1505 		MA_STATE(test, mas_detach->tree, 0, 0);
1506 		struct vm_area_struct *vma_mas, *vma_test;
1507 		int test_count = 0;
1508 
1509 		vma_iter_set(vms->vmi, vms->start);
1510 		rcu_read_lock();
1511 		vma_test = mas_find(&test, vms->vma_count - 1);
1512 		for_each_vma_range(*(vms->vmi), vma_mas, vms->end) {
1513 			BUG_ON(vma_mas != vma_test);
1514 			test_count++;
1515 			vma_test = mas_next(&test, vms->vma_count - 1);
1516 		}
1517 		rcu_read_unlock();
1518 		BUG_ON(vms->vma_count != test_count);
1519 	}
1520 #endif
1521 
1522 	while (vma_iter_addr(vms->vmi) > vms->start)
1523 		vma_iter_prev_range(vms->vmi);
1524 
1525 	vms->clear_ptes = true;
1526 	return 0;
1527 
1528 userfaultfd_error:
1529 munmap_gather_failed:
1530 end_split_failed:
1531 modify_vma_failed:
1532 	reattach_vmas(mas_detach);
1533 start_split_failed:
1534 map_count_exceeded:
1535 	return error;
1536 }
1537 
1538 /*
1539  * init_vma_munmap() - Initializer wrapper for vma_munmap_struct
1540  * @vms: The vma munmap struct
1541  * @vmi: The vma iterator
1542  * @vma: The first vm_area_struct to munmap
1543  * @start: The aligned start address to munmap
1544  * @end: The aligned end address to munmap
1545  * @uf: The userfaultfd list_head
1546  * @unlock: Unlock after the operation.  Only unlocked on success
1547  */
1548 static void init_vma_munmap(struct vma_munmap_struct *vms,
1549 		struct vma_iterator *vmi, struct vm_area_struct *vma,
1550 		unsigned long start, unsigned long end, struct list_head *uf,
1551 		bool unlock)
1552 {
1553 	vms->vmi = vmi;
1554 	vms->vma = vma;
1555 	if (vma) {
1556 		vms->start = start;
1557 		vms->end = end;
1558 	} else {
1559 		vms->start = vms->end = 0;
1560 	}
1561 	vms->unlock = unlock;
1562 	vms->uf = uf;
1563 	vms->vma_count = 0;
1564 	vms->nr_pages = vms->locked_vm = vms->nr_accounted = 0;
1565 	vms->exec_vm = vms->stack_vm = vms->data_vm = 0;
1566 	vms->unmap_start = FIRST_USER_ADDRESS;
1567 	vms->unmap_end = USER_PGTABLES_CEILING;
1568 	vms->clear_ptes = false;
1569 }
1570 
1571 /*
1572  * do_vmi_align_munmap() - munmap the aligned region from @start to @end.
1573  * @vmi: The vma iterator
1574  * @vma: The starting vm_area_struct
1575  * @mm: The mm_struct
1576  * @start: The aligned start address to munmap.
1577  * @end: The aligned end address to munmap.
1578  * @uf: The userfaultfd list_head
1579  * @unlock: Set to true to drop the mmap_lock.  unlocking only happens on
1580  * success.
1581  *
1582  * Return: 0 on success and drops the lock if so directed, error and leaves the
1583  * lock held otherwise.
1584  */
1585 int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
1586 		struct mm_struct *mm, unsigned long start, unsigned long end,
1587 		struct list_head *uf, bool unlock)
1588 {
1589 	struct maple_tree mt_detach;
1590 	MA_STATE(mas_detach, &mt_detach, 0, 0);
1591 	mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
1592 	mt_on_stack(mt_detach);
1593 	struct vma_munmap_struct vms;
1594 	int error;
1595 
1596 	init_vma_munmap(&vms, vmi, vma, start, end, uf, unlock);
1597 	error = vms_gather_munmap_vmas(&vms, &mas_detach);
1598 	if (error)
1599 		goto gather_failed;
1600 
1601 	error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL);
1602 	if (error)
1603 		goto clear_tree_failed;
1604 
1605 	/* Point of no return */
1606 	vms_complete_munmap_vmas(&vms, &mas_detach);
1607 	return 0;
1608 
1609 clear_tree_failed:
1610 	reattach_vmas(&mas_detach);
1611 gather_failed:
1612 	validate_mm(mm);
1613 	return error;
1614 }
1615 
1616 /*
1617  * do_vmi_munmap() - munmap a given range.
1618  * @vmi: The vma iterator
1619  * @mm: The mm_struct
1620  * @start: The start address to munmap
1621  * @len: The length of the range to munmap
1622  * @uf: The userfaultfd list_head
1623  * @unlock: set to true if the user wants to drop the mmap_lock on success
1624  *
1625  * This function takes a @mas that is either pointing to the previous VMA or set
1626  * to MA_START and sets it up to remove the mapping(s).  The @len will be
1627  * aligned.
1628  *
1629  * Return: 0 on success and drops the lock if so directed, error and leaves the
1630  * lock held otherwise.
1631  */
1632 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
1633 		  unsigned long start, size_t len, struct list_head *uf,
1634 		  bool unlock)
1635 {
1636 	unsigned long end;
1637 	struct vm_area_struct *vma;
1638 
1639 	if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start)
1640 		return -EINVAL;
1641 
1642 	end = start + PAGE_ALIGN(len);
1643 	if (end == start)
1644 		return -EINVAL;
1645 
1646 	/* Find the first overlapping VMA */
1647 	vma = vma_find(vmi, end);
1648 	if (!vma) {
1649 		if (unlock)
1650 			mmap_write_unlock(mm);
1651 		return 0;
1652 	}
1653 
1654 	return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
1655 }
1656 
1657 /*
1658  * We are about to modify one or multiple of a VMA's flags, policy, userfaultfd
1659  * context and anonymous VMA name within the range [start, end).
1660  *
1661  * As a result, we might be able to merge the newly modified VMA range with an
1662  * adjacent VMA with identical properties.
1663  *
1664  * If no merge is possible and the range does not span the entirety of the VMA,
1665  * we then need to split the VMA to accommodate the change.
1666  *
1667  * The function returns either the merged VMA, the original VMA if a split was
1668  * required instead, or an error if the split failed.
1669  */
1670 static struct vm_area_struct *vma_modify(struct vma_merge_struct *vmg)
1671 {
1672 	struct vm_area_struct *vma = vmg->middle;
1673 	unsigned long start = vmg->start;
1674 	unsigned long end = vmg->end;
1675 	struct vm_area_struct *merged;
1676 
1677 	/* First, try to merge. */
1678 	merged = vma_merge_existing_range(vmg);
1679 	if (merged)
1680 		return merged;
1681 	if (vmg_nomem(vmg))
1682 		return ERR_PTR(-ENOMEM);
1683 
1684 	/*
1685 	 * Split can fail for reasons other than OOM, so if the user requests
1686 	 * this it's probably a mistake.
1687 	 */
1688 	VM_WARN_ON(vmg->give_up_on_oom &&
1689 		   (vma->vm_start != start || vma->vm_end != end));
1690 
1691 	/* Split any preceding portion of the VMA. */
1692 	if (vma->vm_start < start) {
1693 		int err = split_vma(vmg->vmi, vma, start, 1);
1694 
1695 		if (err)
1696 			return ERR_PTR(err);
1697 	}
1698 
1699 	/* Split any trailing portion of the VMA. */
1700 	if (vma->vm_end > end) {
1701 		int err = split_vma(vmg->vmi, vma, end, 0);
1702 
1703 		if (err)
1704 			return ERR_PTR(err);
1705 	}
1706 
1707 	return vma;
1708 }
1709 
1710 struct vm_area_struct *vma_modify_flags(struct vma_iterator *vmi,
1711 		struct vm_area_struct *prev, struct vm_area_struct *vma,
1712 		unsigned long start, unsigned long end,
1713 		vm_flags_t *vm_flags_ptr)
1714 {
1715 	VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
1716 	const vm_flags_t vm_flags = *vm_flags_ptr;
1717 	struct vm_area_struct *ret;
1718 
1719 	vmg.vm_flags = vm_flags;
1720 
1721 	ret = vma_modify(&vmg);
1722 	if (IS_ERR(ret))
1723 		return ret;
1724 
1725 	/*
1726 	 * For a merge to succeed, the flags must match those
1727 	 * requested. However, sticky flags may have been retained, so propagate
1728 	 * them to the caller.
1729 	 */
1730 	if (vmg.state == VMA_MERGE_SUCCESS)
1731 		*vm_flags_ptr = ret->vm_flags;
1732 	return ret;
1733 }
1734 
1735 struct vm_area_struct *vma_modify_name(struct vma_iterator *vmi,
1736 		struct vm_area_struct *prev, struct vm_area_struct *vma,
1737 		unsigned long start, unsigned long end,
1738 		struct anon_vma_name *new_name)
1739 {
1740 	VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
1741 
1742 	vmg.anon_name = new_name;
1743 
1744 	return vma_modify(&vmg);
1745 }
1746 
1747 struct vm_area_struct *vma_modify_policy(struct vma_iterator *vmi,
1748 		struct vm_area_struct *prev, struct vm_area_struct *vma,
1749 		unsigned long start, unsigned long end,
1750 		struct mempolicy *new_pol)
1751 {
1752 	VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
1753 
1754 	vmg.policy = new_pol;
1755 
1756 	return vma_modify(&vmg);
1757 }
1758 
1759 struct vm_area_struct *vma_modify_flags_uffd(struct vma_iterator *vmi,
1760 		struct vm_area_struct *prev, struct vm_area_struct *vma,
1761 		unsigned long start, unsigned long end, vm_flags_t vm_flags,
1762 		struct vm_userfaultfd_ctx new_ctx, bool give_up_on_oom)
1763 {
1764 	VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
1765 
1766 	vmg.vm_flags = vm_flags;
1767 	vmg.uffd_ctx = new_ctx;
1768 	if (give_up_on_oom)
1769 		vmg.give_up_on_oom = true;
1770 
1771 	return vma_modify(&vmg);
1772 }
1773 
1774 /*
1775  * Expand vma by delta bytes, potentially merging with an immediately adjacent
1776  * VMA with identical properties.
1777  */
1778 struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
1779 					struct vm_area_struct *vma,
1780 					unsigned long delta)
1781 {
1782 	VMG_VMA_STATE(vmg, vmi, vma, vma, vma->vm_end, vma->vm_end + delta);
1783 
1784 	vmg.next = vma_iter_next_rewind(vmi, NULL);
1785 	vmg.middle = NULL; /* We use the VMA to populate VMG fields only. */
1786 
1787 	return vma_merge_new_range(&vmg);
1788 }
1789 
1790 void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb)
1791 {
1792 	vb->count = 0;
1793 }
1794 
1795 static void unlink_file_vma_batch_process(struct unlink_vma_file_batch *vb)
1796 {
1797 	struct address_space *mapping;
1798 	int i;
1799 
1800 	mapping = vb->vmas[0]->vm_file->f_mapping;
1801 	i_mmap_lock_write(mapping);
1802 	for (i = 0; i < vb->count; i++) {
1803 		VM_WARN_ON_ONCE(vb->vmas[i]->vm_file->f_mapping != mapping);
1804 		__remove_shared_vm_struct(vb->vmas[i], mapping);
1805 	}
1806 	i_mmap_unlock_write(mapping);
1807 
1808 	unlink_file_vma_batch_init(vb);
1809 }
1810 
1811 void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb,
1812 			       struct vm_area_struct *vma)
1813 {
1814 	if (vma->vm_file == NULL)
1815 		return;
1816 
1817 	if ((vb->count > 0 && vb->vmas[0]->vm_file != vma->vm_file) ||
1818 	    vb->count == ARRAY_SIZE(vb->vmas))
1819 		unlink_file_vma_batch_process(vb);
1820 
1821 	vb->vmas[vb->count] = vma;
1822 	vb->count++;
1823 }
1824 
1825 void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb)
1826 {
1827 	if (vb->count > 0)
1828 		unlink_file_vma_batch_process(vb);
1829 }
1830 
1831 static void vma_link_file(struct vm_area_struct *vma, bool hold_rmap_lock)
1832 {
1833 	struct file *file = vma->vm_file;
1834 	struct address_space *mapping;
1835 
1836 	if (file) {
1837 		mapping = file->f_mapping;
1838 		i_mmap_lock_write(mapping);
1839 		__vma_link_file(vma, mapping);
1840 		if (!hold_rmap_lock)
1841 			i_mmap_unlock_write(mapping);
1842 	}
1843 }
1844 
1845 static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
1846 {
1847 	VMA_ITERATOR(vmi, mm, 0);
1848 
1849 	vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
1850 	if (vma_iter_prealloc(&vmi, vma))
1851 		return -ENOMEM;
1852 
1853 	vma_start_write(vma);
1854 	vma_iter_store_new(&vmi, vma);
1855 	vma_link_file(vma, /* hold_rmap_lock= */false);
1856 	mm->map_count++;
1857 	validate_mm(mm);
1858 	return 0;
1859 }
1860 
1861 /*
1862  * Copy the vma structure to a new location in the same mm,
1863  * prior to moving page table entries, to effect an mremap move.
1864  */
1865 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
1866 	unsigned long addr, unsigned long len, pgoff_t pgoff,
1867 	bool *need_rmap_locks)
1868 {
1869 	struct vm_area_struct *vma = *vmap;
1870 	unsigned long vma_start = vma->vm_start;
1871 	struct mm_struct *mm = vma->vm_mm;
1872 	struct vm_area_struct *new_vma;
1873 	bool faulted_in_anon_vma = true;
1874 	VMA_ITERATOR(vmi, mm, addr);
1875 	VMG_VMA_STATE(vmg, &vmi, NULL, vma, addr, addr + len);
1876 
1877 	/*
1878 	 * If anonymous vma has not yet been faulted, update new pgoff
1879 	 * to match new location, to increase its chance of merging.
1880 	 */
1881 	if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) {
1882 		pgoff = addr >> PAGE_SHIFT;
1883 		faulted_in_anon_vma = false;
1884 	}
1885 
1886 	/*
1887 	 * If the VMA we are copying might contain a uprobe PTE, ensure
1888 	 * that we do not establish one upon merge. Otherwise, when mremap()
1889 	 * moves page tables, it will orphan the newly created PTE.
1890 	 */
1891 	if (vma->vm_file)
1892 		vmg.skip_vma_uprobe = true;
1893 
1894 	new_vma = find_vma_prev(mm, addr, &vmg.prev);
1895 	if (new_vma && new_vma->vm_start < addr + len)
1896 		return NULL;	/* should never get here */
1897 
1898 	vmg.pgoff = pgoff;
1899 	vmg.next = vma_iter_next_rewind(&vmi, NULL);
1900 	new_vma = vma_merge_copied_range(&vmg);
1901 
1902 	if (new_vma) {
1903 		/*
1904 		 * Source vma may have been merged into new_vma
1905 		 */
1906 		if (unlikely(vma_start >= new_vma->vm_start &&
1907 			     vma_start < new_vma->vm_end)) {
1908 			/*
1909 			 * The only way we can get a vma_merge with
1910 			 * self during an mremap is if the vma hasn't
1911 			 * been faulted in yet and we were allowed to
1912 			 * reset the dst vma->vm_pgoff to the
1913 			 * destination address of the mremap to allow
1914 			 * the merge to happen. mremap must change the
1915 			 * vm_pgoff linearity between src and dst vmas
1916 			 * (in turn preventing a vma_merge) to be
1917 			 * safe. It is only safe to keep the vm_pgoff
1918 			 * linear if there are no pages mapped yet.
1919 			 */
1920 			VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma);
1921 			*vmap = vma = new_vma;
1922 		}
1923 		*need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
1924 	} else {
1925 		new_vma = vm_area_dup(vma);
1926 		if (!new_vma)
1927 			goto out;
1928 		vma_set_range(new_vma, addr, addr + len, pgoff);
1929 		if (vma_dup_policy(vma, new_vma))
1930 			goto out_free_vma;
1931 		if (anon_vma_clone(new_vma, vma, VMA_OP_REMAP))
1932 			goto out_free_mempol;
1933 		if (new_vma->vm_file)
1934 			get_file(new_vma->vm_file);
1935 		if (new_vma->vm_ops && new_vma->vm_ops->open)
1936 			new_vma->vm_ops->open(new_vma);
1937 		if (vma_link(mm, new_vma))
1938 			goto out_vma_link;
1939 		*need_rmap_locks = false;
1940 	}
1941 	return new_vma;
1942 
1943 out_vma_link:
1944 	fixup_hugetlb_reservations(new_vma);
1945 	vma_close(new_vma);
1946 
1947 	if (new_vma->vm_file)
1948 		fput(new_vma->vm_file);
1949 
1950 	unlink_anon_vmas(new_vma);
1951 out_free_mempol:
1952 	mpol_put(vma_policy(new_vma));
1953 out_free_vma:
1954 	vm_area_free(new_vma);
1955 out:
1956 	return NULL;
1957 }
1958 
1959 /*
1960  * Rough compatibility check to quickly see if it's even worth looking
1961  * at sharing an anon_vma.
1962  *
1963  * They need to have the same vm_file, and the flags can only differ
1964  * in things that mprotect may change.
1965  *
1966  * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
1967  * we can merge the two vma's. For example, we refuse to merge a vma if
1968  * there is a vm_ops->close() function, because that indicates that the
1969  * driver is doing some kind of reference counting. But that doesn't
1970  * really matter for the anon_vma sharing case.
1971  */
1972 static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
1973 {
1974 	vma_flags_t diff = vma_flags_diff_pair(&a->flags, &b->flags);
1975 
1976 	vma_flags_clear_mask(&diff, VMA_ACCESS_FLAGS);
1977 	vma_flags_clear_mask(&diff, VMA_IGNORE_MERGE_FLAGS);
1978 
1979 	return a->vm_end == b->vm_start &&
1980 		mpol_equal(vma_policy(a), vma_policy(b)) &&
1981 		a->vm_file == b->vm_file &&
1982 		vma_flags_empty(&diff) &&
1983 		b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
1984 }
1985 
1986 /*
1987  * Do some basic sanity checking to see if we can re-use the anon_vma
1988  * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
1989  * the same as 'old', the other will be the new one that is trying
1990  * to share the anon_vma.
1991  *
1992  * NOTE! This runs with mmap_lock held for reading, so it is possible that
1993  * the anon_vma of 'old' is concurrently in the process of being set up
1994  * by another page fault trying to merge _that_. But that's ok: if it
1995  * is being set up, that automatically means that it will be a singleton
1996  * acceptable for merging, so we can do all of this optimistically. But
1997  * we do that READ_ONCE() to make sure that we never re-load the pointer.
1998  *
1999  * IOW: that the "list_is_singular()" test on the anon_vma_chain only
2000  * matters for the 'stable anon_vma' case (ie the thing we want to avoid
2001  * is to return an anon_vma that is "complex" due to having gone through
2002  * a fork).
2003  *
2004  * We also make sure that the two vma's are compatible (adjacent,
2005  * and with the same memory policies). That's all stable, even with just
2006  * a read lock on the mmap_lock.
2007  */
2008 static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old,
2009 					  struct vm_area_struct *a,
2010 					  struct vm_area_struct *b)
2011 {
2012 	if (anon_vma_compatible(a, b)) {
2013 		struct anon_vma *anon_vma = READ_ONCE(old->anon_vma);
2014 
2015 		if (anon_vma && list_is_singular(&old->anon_vma_chain))
2016 			return anon_vma;
2017 	}
2018 	return NULL;
2019 }
2020 
2021 /*
2022  * find_mergeable_anon_vma is used by anon_vma_prepare, to check
2023  * neighbouring vmas for a suitable anon_vma, before it goes off
2024  * to allocate a new anon_vma.  It checks because a repetitive
2025  * sequence of mprotects and faults may otherwise lead to distinct
2026  * anon_vmas being allocated, preventing vma merge in subsequent
2027  * mprotect.
2028  */
2029 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
2030 {
2031 	struct anon_vma *anon_vma = NULL;
2032 	struct vm_area_struct *prev, *next;
2033 	VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_end);
2034 
2035 	/* Try next first. */
2036 	next = vma_iter_load(&vmi);
2037 	if (next) {
2038 		anon_vma = reusable_anon_vma(next, vma, next);
2039 		if (anon_vma)
2040 			return anon_vma;
2041 	}
2042 
2043 	prev = vma_prev(&vmi);
2044 	VM_BUG_ON_VMA(prev != vma, vma);
2045 	prev = vma_prev(&vmi);
2046 	/* Try prev next. */
2047 	if (prev)
2048 		anon_vma = reusable_anon_vma(prev, prev, vma);
2049 
2050 	/*
2051 	 * We might reach here with anon_vma == NULL if we can't find
2052 	 * any reusable anon_vma.
2053 	 * There's no absolute need to look only at touching neighbours:
2054 	 * we could search further afield for "compatible" anon_vmas.
2055 	 * But it would probably just be a waste of time searching,
2056 	 * or lead to too many vmas hanging off the same anon_vma.
2057 	 * We're trying to allow mprotect remerging later on,
2058 	 * not trying to minimize memory used for anon_vmas.
2059 	 */
2060 	return anon_vma;
2061 }
2062 
2063 static bool vm_ops_needs_writenotify(const struct vm_operations_struct *vm_ops)
2064 {
2065 	return vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite);
2066 }
2067 
2068 static bool vma_is_shared_writable(struct vm_area_struct *vma)
2069 {
2070 	return vma_test_all(vma, VMA_WRITE_BIT, VMA_SHARED_BIT);
2071 }
2072 
2073 static bool vma_fs_can_writeback(struct vm_area_struct *vma)
2074 {
2075 	/* No managed pages to writeback. */
2076 	if (vma_test(vma, VMA_PFNMAP_BIT))
2077 		return false;
2078 
2079 	return vma->vm_file && vma->vm_file->f_mapping &&
2080 		mapping_can_writeback(vma->vm_file->f_mapping);
2081 }
2082 
2083 /*
2084  * Does this VMA require the underlying folios to have their dirty state
2085  * tracked?
2086  */
2087 bool vma_needs_dirty_tracking(struct vm_area_struct *vma)
2088 {
2089 	/* Only shared, writable VMAs require dirty tracking. */
2090 	if (!vma_is_shared_writable(vma))
2091 		return false;
2092 
2093 	/* Does the filesystem need to be notified? */
2094 	if (vm_ops_needs_writenotify(vma->vm_ops))
2095 		return true;
2096 
2097 	/*
2098 	 * Even if the filesystem doesn't indicate a need for writenotify, if it
2099 	 * can writeback, dirty tracking is still required.
2100 	 */
2101 	return vma_fs_can_writeback(vma);
2102 }
2103 
2104 /*
2105  * Some shared mappings will want the pages marked read-only
2106  * to track write events. If so, we'll downgrade vm_page_prot
2107  * to the private version (using protection_map[] without the
2108  * VM_SHARED bit).
2109  */
2110 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
2111 {
2112 	/* If it was private or non-writable, the write bit is already clear */
2113 	if (!vma_is_shared_writable(vma))
2114 		return false;
2115 
2116 	/* The backer wishes to know when pages are first written to? */
2117 	if (vm_ops_needs_writenotify(vma->vm_ops))
2118 		return true;
2119 
2120 	/* The open routine did something to the protections that pgprot_modify
2121 	 * won't preserve? */
2122 	if (pgprot_val(vm_page_prot) !=
2123 	    pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags)))
2124 		return false;
2125 
2126 	/*
2127 	 * Do we need to track softdirty? hugetlb does not support softdirty
2128 	 * tracking yet.
2129 	 */
2130 	if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma))
2131 		return true;
2132 
2133 	/* Do we need write faults for uffd-wp tracking? */
2134 	if (userfaultfd_wp(vma))
2135 		return true;
2136 
2137 	/* Can the mapping track the dirty pages? */
2138 	return vma_fs_can_writeback(vma);
2139 }
2140 
2141 static DEFINE_MUTEX(mm_all_locks_mutex);
2142 
2143 static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
2144 {
2145 	if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
2146 		/*
2147 		 * The LSB of head.next can't change from under us
2148 		 * because we hold the mm_all_locks_mutex.
2149 		 */
2150 		down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock);
2151 		/*
2152 		 * We can safely modify head.next after taking the
2153 		 * anon_vma->root->rwsem. If some other vma in this mm shares
2154 		 * the same anon_vma we won't take it again.
2155 		 *
2156 		 * No need of atomic instructions here, head.next
2157 		 * can't change from under us thanks to the
2158 		 * anon_vma->root->rwsem.
2159 		 */
2160 		if (__test_and_set_bit(0, (unsigned long *)
2161 				       &anon_vma->root->rb_root.rb_root.rb_node))
2162 			BUG();
2163 	}
2164 }
2165 
2166 static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
2167 {
2168 	if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
2169 		/*
2170 		 * AS_MM_ALL_LOCKS can't change from under us because
2171 		 * we hold the mm_all_locks_mutex.
2172 		 *
2173 		 * Operations on ->flags have to be atomic because
2174 		 * even if AS_MM_ALL_LOCKS is stable thanks to the
2175 		 * mm_all_locks_mutex, there may be other cpus
2176 		 * changing other bitflags in parallel to us.
2177 		 */
2178 		if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
2179 			BUG();
2180 		down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock);
2181 	}
2182 }
2183 
2184 /*
2185  * This operation locks against the VM for all pte/vma/mm related
2186  * operations that could ever happen on a certain mm. This includes
2187  * vmtruncate, try_to_unmap, and all page faults.
2188  *
2189  * The caller must take the mmap_lock in write mode before calling
2190  * mm_take_all_locks(). The caller isn't allowed to release the
2191  * mmap_lock until mm_drop_all_locks() returns.
2192  *
2193  * mmap_lock in write mode is required in order to block all operations
2194  * that could modify pagetables and free pages without need of
2195  * altering the vma layout. It's also needed in write mode to avoid new
2196  * anon_vmas to be associated with existing vmas.
2197  *
2198  * A single task can't take more than one mm_take_all_locks() in a row
2199  * or it would deadlock.
2200  *
2201  * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in
2202  * mapping->flags avoid to take the same lock twice, if more than one
2203  * vma in this mm is backed by the same anon_vma or address_space.
2204  *
2205  * We take locks in following order, accordingly to comment at beginning
2206  * of mm/rmap.c:
2207  *   - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for
2208  *     hugetlb mapping);
2209  *   - all vmas marked locked
2210  *   - all i_mmap_rwsem locks;
2211  *   - all anon_vma->rwseml
2212  *
2213  * We can take all locks within these types randomly because the VM code
2214  * doesn't nest them and we protected from parallel mm_take_all_locks() by
2215  * mm_all_locks_mutex.
2216  *
2217  * mm_take_all_locks() and mm_drop_all_locks are expensive operations
2218  * that may have to take thousand of locks.
2219  *
2220  * mm_take_all_locks() can fail if it's interrupted by signals.
2221  */
2222 int mm_take_all_locks(struct mm_struct *mm)
2223 {
2224 	struct vm_area_struct *vma;
2225 	struct anon_vma_chain *avc;
2226 	VMA_ITERATOR(vmi, mm, 0);
2227 
2228 	mmap_assert_write_locked(mm);
2229 
2230 	mutex_lock(&mm_all_locks_mutex);
2231 
2232 	/*
2233 	 * vma_start_write() does not have a complement in mm_drop_all_locks()
2234 	 * because vma_start_write() is always asymmetrical; it marks a VMA as
2235 	 * being written to until mmap_write_unlock() or mmap_write_downgrade()
2236 	 * is reached.
2237 	 */
2238 	for_each_vma(vmi, vma) {
2239 		if (signal_pending(current))
2240 			goto out_unlock;
2241 		vma_start_write(vma);
2242 	}
2243 
2244 	vma_iter_init(&vmi, mm, 0);
2245 	for_each_vma(vmi, vma) {
2246 		if (signal_pending(current))
2247 			goto out_unlock;
2248 		if (vma->vm_file && vma->vm_file->f_mapping &&
2249 				is_vm_hugetlb_page(vma))
2250 			vm_lock_mapping(mm, vma->vm_file->f_mapping);
2251 	}
2252 
2253 	vma_iter_init(&vmi, mm, 0);
2254 	for_each_vma(vmi, vma) {
2255 		if (signal_pending(current))
2256 			goto out_unlock;
2257 		if (vma->vm_file && vma->vm_file->f_mapping &&
2258 				!is_vm_hugetlb_page(vma))
2259 			vm_lock_mapping(mm, vma->vm_file->f_mapping);
2260 	}
2261 
2262 	vma_iter_init(&vmi, mm, 0);
2263 	for_each_vma(vmi, vma) {
2264 		if (signal_pending(current))
2265 			goto out_unlock;
2266 		if (vma->anon_vma)
2267 			list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
2268 				vm_lock_anon_vma(mm, avc->anon_vma);
2269 	}
2270 
2271 	return 0;
2272 
2273 out_unlock:
2274 	mm_drop_all_locks(mm);
2275 	return -EINTR;
2276 }
2277 
2278 static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
2279 {
2280 	if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
2281 		/*
2282 		 * The LSB of head.next can't change to 0 from under
2283 		 * us because we hold the mm_all_locks_mutex.
2284 		 *
2285 		 * We must however clear the bitflag before unlocking
2286 		 * the vma so the users using the anon_vma->rb_root will
2287 		 * never see our bitflag.
2288 		 *
2289 		 * No need of atomic instructions here, head.next
2290 		 * can't change from under us until we release the
2291 		 * anon_vma->root->rwsem.
2292 		 */
2293 		if (!__test_and_clear_bit(0, (unsigned long *)
2294 					  &anon_vma->root->rb_root.rb_root.rb_node))
2295 			BUG();
2296 		anon_vma_unlock_write(anon_vma);
2297 	}
2298 }
2299 
2300 static void vm_unlock_mapping(struct address_space *mapping)
2301 {
2302 	if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
2303 		/*
2304 		 * AS_MM_ALL_LOCKS can't change to 0 from under us
2305 		 * because we hold the mm_all_locks_mutex.
2306 		 */
2307 		i_mmap_unlock_write(mapping);
2308 		if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
2309 					&mapping->flags))
2310 			BUG();
2311 	}
2312 }
2313 
2314 /*
2315  * The mmap_lock cannot be released by the caller until
2316  * mm_drop_all_locks() returns.
2317  */
2318 void mm_drop_all_locks(struct mm_struct *mm)
2319 {
2320 	struct vm_area_struct *vma;
2321 	struct anon_vma_chain *avc;
2322 	VMA_ITERATOR(vmi, mm, 0);
2323 
2324 	mmap_assert_write_locked(mm);
2325 	BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
2326 
2327 	for_each_vma(vmi, vma) {
2328 		if (vma->anon_vma)
2329 			list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
2330 				vm_unlock_anon_vma(avc->anon_vma);
2331 		if (vma->vm_file && vma->vm_file->f_mapping)
2332 			vm_unlock_mapping(vma->vm_file->f_mapping);
2333 	}
2334 
2335 	mutex_unlock(&mm_all_locks_mutex);
2336 }
2337 
2338 /*
2339  * We account for memory if it's a private writeable mapping,
2340  * not hugepages and VM_NORESERVE wasn't set.
2341  */
2342 static bool accountable_mapping(struct mmap_state *map)
2343 {
2344 	const struct file *file = map->file;
2345 	vma_flags_t mask;
2346 
2347 	/*
2348 	 * hugetlb has its own accounting separate from the core VM
2349 	 * VM_HUGETLB may not be set yet so we cannot check for that flag.
2350 	 */
2351 	if (file && is_file_hugepages(file))
2352 		return false;
2353 
2354 	mask = vma_flags_and(&map->vma_flags, VMA_NORESERVE_BIT, VMA_SHARED_BIT,
2355 			     VMA_WRITE_BIT);
2356 	return vma_flags_same(&mask, VMA_WRITE_BIT);
2357 }
2358 
2359 /*
2360  * vms_abort_munmap_vmas() - Undo as much as possible from an aborted munmap()
2361  * operation.
2362  * @vms: The vma unmap structure
2363  * @mas_detach: The maple state with the detached maple tree
2364  *
2365  * Reattach any detached vmas, free up the maple tree used to track the vmas.
2366  * If that's not possible because the ptes are cleared (and vm_ops->closed() may
2367  * have been called), then a NULL is written over the vmas and the vmas are
2368  * removed (munmap() completed).
2369  */
2370 static void vms_abort_munmap_vmas(struct vma_munmap_struct *vms,
2371 		struct ma_state *mas_detach)
2372 {
2373 	struct ma_state *mas = &vms->vmi->mas;
2374 
2375 	if (!vms->nr_pages)
2376 		return;
2377 
2378 	if (vms->clear_ptes)
2379 		return reattach_vmas(mas_detach);
2380 
2381 	/*
2382 	 * Aborting cannot just call the vm_ops open() because they are often
2383 	 * not symmetrical and state data has been lost.  Resort to the old
2384 	 * failure method of leaving a gap where the MAP_FIXED mapping failed.
2385 	 */
2386 	mas_set_range(mas, vms->start, vms->end - 1);
2387 	mas_store_gfp(mas, NULL, GFP_KERNEL|__GFP_NOFAIL);
2388 	/* Clean up the insertion of the unfortunate gap */
2389 	vms_complete_munmap_vmas(vms, mas_detach);
2390 }
2391 
2392 static void update_ksm_flags(struct mmap_state *map)
2393 {
2394 	map->vma_flags = ksm_vma_flags(map->mm, map->file, map->vma_flags);
2395 }
2396 
2397 static void set_desc_from_map(struct vm_area_desc *desc,
2398 		const struct mmap_state *map)
2399 {
2400 	desc->start = map->addr;
2401 	desc->end = map->end;
2402 
2403 	desc->pgoff = map->pgoff;
2404 	desc->vm_file = map->file;
2405 	desc->vma_flags = map->vma_flags;
2406 	desc->page_prot = map->page_prot;
2407 }
2408 
2409 /*
2410  * __mmap_setup() - Prepare to gather any overlapping VMAs that need to be
2411  * unmapped once the map operation is completed, check limits, account mapping
2412  * and clean up any pre-existing VMAs.
2413  *
2414  * As a result it sets up the @map and @desc objects.
2415  *
2416  * @map: Mapping state.
2417  * @desc: VMA descriptor
2418  * @uf:  Userfaultfd context list.
2419  *
2420  * Returns: 0 on success, error code otherwise.
2421  */
2422 static int __mmap_setup(struct mmap_state *map, struct vm_area_desc *desc,
2423 			struct list_head *uf)
2424 {
2425 	int error;
2426 	struct vma_iterator *vmi = map->vmi;
2427 	struct vma_munmap_struct *vms = &map->vms;
2428 
2429 	/* Find the first overlapping VMA and initialise unmap state. */
2430 	vms->vma = vma_find(vmi, map->end);
2431 	init_vma_munmap(vms, vmi, vms->vma, map->addr, map->end, uf,
2432 			/* unlock = */ false);
2433 
2434 	/* OK, we have overlapping VMAs - prepare to unmap them. */
2435 	if (vms->vma) {
2436 		mt_init_flags(&map->mt_detach,
2437 			      vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
2438 		mt_on_stack(map->mt_detach);
2439 		mas_init(&map->mas_detach, &map->mt_detach, /* addr = */ 0);
2440 		/* Prepare to unmap any existing mapping in the area */
2441 		error = vms_gather_munmap_vmas(vms, &map->mas_detach);
2442 		if (error) {
2443 			/* On error VMAs will already have been reattached. */
2444 			vms->nr_pages = 0;
2445 			return error;
2446 		}
2447 
2448 		map->next = vms->next;
2449 		map->prev = vms->prev;
2450 	} else {
2451 		map->next = vma_iter_next_rewind(vmi, &map->prev);
2452 	}
2453 
2454 	/* Check against address space limit. */
2455 	if (!may_expand_vm(map->mm, &map->vma_flags, map->pglen - vms->nr_pages))
2456 		return -ENOMEM;
2457 
2458 	/* Private writable mapping: check memory availability. */
2459 	if (accountable_mapping(map)) {
2460 		map->charged = map->pglen;
2461 		map->charged -= vms->nr_accounted;
2462 		if (map->charged) {
2463 			error = security_vm_enough_memory_mm(map->mm, map->charged);
2464 			if (error)
2465 				return error;
2466 		}
2467 
2468 		vms->nr_accounted = 0;
2469 		vma_flags_set(&map->vma_flags, VMA_ACCOUNT_BIT);
2470 	}
2471 
2472 	/*
2473 	 * Clear PTEs while the vma is still in the tree so that rmap
2474 	 * cannot race with the freeing later in the truncate scenario.
2475 	 * This is also needed for mmap_file(), which is why vm_ops
2476 	 * close function is called.
2477 	 */
2478 	vms_clean_up_area(vms, &map->mas_detach);
2479 
2480 	set_desc_from_map(desc, map);
2481 	return 0;
2482 }
2483 
2484 
2485 static int __mmap_new_file_vma(struct mmap_state *map,
2486 			       struct vm_area_struct *vma)
2487 {
2488 	struct vma_iterator *vmi = map->vmi;
2489 	int error;
2490 
2491 	vma->vm_file = map->file;
2492 	if (!map->file_doesnt_need_get)
2493 		get_file(map->file);
2494 
2495 	if (!map->file->f_op->mmap)
2496 		return 0;
2497 
2498 	error = mmap_file(vma->vm_file, vma);
2499 	if (error) {
2500 		UNMAP_STATE(unmap, vmi, vma, vma->vm_start, vma->vm_end,
2501 			    map->prev, map->next);
2502 		fput(vma->vm_file);
2503 		vma->vm_file = NULL;
2504 
2505 		vma_iter_set(vmi, vma->vm_end);
2506 		/* Undo any partial mapping done by a device driver. */
2507 		unmap_region(&unmap);
2508 		return error;
2509 	}
2510 
2511 	/* Drivers cannot alter the address of the VMA. */
2512 	WARN_ON_ONCE(map->addr != vma->vm_start);
2513 	/*
2514 	 * Drivers should not permit writability when previously it was
2515 	 * disallowed.
2516 	 */
2517 	VM_WARN_ON_ONCE(!vma_flags_same_pair(&map->vma_flags, &vma->flags) &&
2518 			!vma_flags_test(&map->vma_flags, VMA_MAYWRITE_BIT) &&
2519 			vma_test(vma, VMA_MAYWRITE_BIT));
2520 
2521 	map->file = vma->vm_file;
2522 	map->vma_flags = vma->flags;
2523 
2524 	return 0;
2525 }
2526 
2527 /*
2528  * __mmap_new_vma() - Allocate a new VMA for the region, as merging was not
2529  * possible.
2530  *
2531  * @map:  Mapping state.
2532  * @vmap: Output pointer for the new VMA.
2533  *
2534  * Returns: Zero on success, or an error.
2535  */
2536 static int __mmap_new_vma(struct mmap_state *map, struct vm_area_struct **vmap)
2537 {
2538 	struct vma_iterator *vmi = map->vmi;
2539 	int error = 0;
2540 	struct vm_area_struct *vma;
2541 
2542 	/*
2543 	 * Determine the object being mapped and call the appropriate
2544 	 * specific mapper. the address has already been validated, but
2545 	 * not unmapped, but the maps are removed from the list.
2546 	 */
2547 	vma = vm_area_alloc(map->mm);
2548 	if (!vma)
2549 		return -ENOMEM;
2550 
2551 	vma_iter_config(vmi, map->addr, map->end);
2552 	vma_set_range(vma, map->addr, map->end, map->pgoff);
2553 	vma->flags = map->vma_flags;
2554 	vma->vm_page_prot = map->page_prot;
2555 
2556 	if (vma_iter_prealloc(vmi, vma)) {
2557 		error = -ENOMEM;
2558 		goto free_vma;
2559 	}
2560 
2561 	if (map->file)
2562 		error = __mmap_new_file_vma(map, vma);
2563 	else if (vma_flags_test(&map->vma_flags, VMA_SHARED_BIT))
2564 		error = shmem_zero_setup(vma);
2565 	else
2566 		vma_set_anonymous(vma);
2567 
2568 	if (error)
2569 		goto free_iter_vma;
2570 
2571 	if (!map->check_ksm_early) {
2572 		update_ksm_flags(map);
2573 		vma->flags = map->vma_flags;
2574 	}
2575 
2576 #ifdef CONFIG_SPARC64
2577 	/* TODO: Fix SPARC ADI! */
2578 	WARN_ON_ONCE(!arch_validate_flags(map->vm_flags));
2579 #endif
2580 
2581 	/* Lock the VMA since it is modified after insertion into VMA tree */
2582 	vma_start_write(vma);
2583 	vma_iter_store_new(vmi, vma);
2584 	map->mm->map_count++;
2585 	vma_link_file(vma, map->hold_file_rmap_lock);
2586 
2587 	/*
2588 	 * vma_merge_new_range() calls khugepaged_enter_vma() too, the below
2589 	 * call covers the non-merge case.
2590 	 */
2591 	if (!vma_is_anonymous(vma))
2592 		khugepaged_enter_vma(vma, map->vm_flags);
2593 	*vmap = vma;
2594 	return 0;
2595 
2596 free_iter_vma:
2597 	vma_iter_free(vmi);
2598 free_vma:
2599 	vm_area_free(vma);
2600 	return error;
2601 }
2602 
2603 /*
2604  * __mmap_complete() - Unmap any VMAs we overlap, account memory mapping
2605  *                     statistics, handle locking and finalise the VMA.
2606  *
2607  * @map: Mapping state.
2608  * @vma: Merged or newly allocated VMA for the mmap()'d region.
2609  */
2610 static void __mmap_complete(struct mmap_state *map, struct vm_area_struct *vma)
2611 {
2612 	struct mm_struct *mm = map->mm;
2613 
2614 	perf_event_mmap(vma);
2615 
2616 	/* Unmap any existing mapping in the area. */
2617 	vms_complete_munmap_vmas(&map->vms, &map->mas_detach);
2618 
2619 	vm_stat_account(mm, vma->vm_flags, map->pglen);
2620 	if (vma_test(vma, VMA_LOCKED_BIT)) {
2621 		if (!vma_supports_mlock(vma))
2622 			vma_clear_flags_mask(vma, VMA_LOCKED_MASK);
2623 		else
2624 			mm->locked_vm += map->pglen;
2625 	}
2626 
2627 	if (vma->vm_file)
2628 		uprobe_mmap(vma);
2629 
2630 	/*
2631 	 * New (or expanded) vma always get soft dirty status.
2632 	 * Otherwise user-space soft-dirty page tracker won't
2633 	 * be able to distinguish situation when vma area unmapped,
2634 	 * then new mapped in-place (which must be aimed as
2635 	 * a completely new data area).
2636 	 */
2637 	if (pgtable_supports_soft_dirty())
2638 		vma_set_flags(vma, VMA_SOFTDIRTY_BIT);
2639 
2640 	vma_set_page_prot(vma);
2641 }
2642 
2643 static void call_action_prepare(struct mmap_state *map,
2644 				struct vm_area_desc *desc)
2645 {
2646 	struct mmap_action *action = &desc->action;
2647 
2648 	mmap_action_prepare(action, desc);
2649 
2650 	if (action->hide_from_rmap_until_complete)
2651 		map->hold_file_rmap_lock = true;
2652 }
2653 
2654 /*
2655  * Invoke the f_op->mmap_prepare() callback for a file-backed mapping that
2656  * specifies it.
2657  *
2658  * This is called prior to any merge attempt, and updates whitelisted fields
2659  * that are permitted to be updated by the caller.
2660  *
2661  * All but user-defined fields will be pre-populated with original values.
2662  *
2663  * Returns 0 on success, or an error code otherwise.
2664  */
2665 static int call_mmap_prepare(struct mmap_state *map,
2666 		struct vm_area_desc *desc)
2667 {
2668 	int err;
2669 
2670 	/* Invoke the hook. */
2671 	err = vfs_mmap_prepare(map->file, desc);
2672 	if (err)
2673 		return err;
2674 
2675 	call_action_prepare(map, desc);
2676 
2677 	/* Update fields permitted to be changed. */
2678 	map->pgoff = desc->pgoff;
2679 	if (desc->vm_file != map->file) {
2680 		map->file_doesnt_need_get = true;
2681 		map->file = desc->vm_file;
2682 	}
2683 	map->vma_flags = desc->vma_flags;
2684 	map->page_prot = desc->page_prot;
2685 	/* User-defined fields. */
2686 	map->vm_ops = desc->vm_ops;
2687 	map->vm_private_data = desc->private_data;
2688 
2689 	return 0;
2690 }
2691 
2692 static void set_vma_user_defined_fields(struct vm_area_struct *vma,
2693 		struct mmap_state *map)
2694 {
2695 	if (map->vm_ops)
2696 		vma->vm_ops = map->vm_ops;
2697 	vma->vm_private_data = map->vm_private_data;
2698 }
2699 
2700 /*
2701  * Are we guaranteed no driver can change state such as to preclude KSM merging?
2702  * If so, let's set the KSM mergeable flag early so we don't break VMA merging.
2703  */
2704 static bool can_set_ksm_flags_early(struct mmap_state *map)
2705 {
2706 	struct file *file = map->file;
2707 
2708 	/* Anonymous mappings have no driver which can change them. */
2709 	if (!file)
2710 		return true;
2711 
2712 	/*
2713 	 * If .mmap_prepare() is specified, then the driver will have already
2714 	 * manipulated state prior to updating KSM flags. So no need to worry
2715 	 * about mmap callbacks modifying VMA flags after the KSM flag has been
2716 	 * updated here, which could otherwise affect KSM eligibility.
2717 	 */
2718 	if (file->f_op->mmap_prepare)
2719 		return true;
2720 
2721 	/* shmem is safe. */
2722 	if (shmem_file(file))
2723 		return true;
2724 
2725 	/* Any other .mmap callback is not safe. */
2726 	return false;
2727 }
2728 
2729 static int call_action_complete(struct mmap_state *map,
2730 				struct vm_area_desc *desc,
2731 				struct vm_area_struct *vma)
2732 {
2733 	struct mmap_action *action = &desc->action;
2734 	int ret;
2735 
2736 	ret = mmap_action_complete(action, vma);
2737 
2738 	/* If we held the file rmap we need to release it. */
2739 	if (map->hold_file_rmap_lock) {
2740 		struct file *file = vma->vm_file;
2741 
2742 		i_mmap_unlock_write(file->f_mapping);
2743 	}
2744 	return ret;
2745 }
2746 
2747 static unsigned long __mmap_region(struct file *file, unsigned long addr,
2748 		unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
2749 		struct list_head *uf)
2750 {
2751 	struct mm_struct *mm = current->mm;
2752 	struct vm_area_struct *vma = NULL;
2753 	bool have_mmap_prepare = file && file->f_op->mmap_prepare;
2754 	VMA_ITERATOR(vmi, mm, addr);
2755 	MMAP_STATE(map, mm, &vmi, addr, len, pgoff, vm_flags, file);
2756 	struct vm_area_desc desc = {
2757 		.mm = mm,
2758 		.file = file,
2759 		.action = {
2760 			.type = MMAP_NOTHING, /* Default to no further action. */
2761 		},
2762 	};
2763 	bool allocated_new = false;
2764 	int error;
2765 
2766 	map.check_ksm_early = can_set_ksm_flags_early(&map);
2767 
2768 	error = __mmap_setup(&map, &desc, uf);
2769 	if (!error && have_mmap_prepare)
2770 		error = call_mmap_prepare(&map, &desc);
2771 	if (error)
2772 		goto abort_munmap;
2773 
2774 	if (map.check_ksm_early)
2775 		update_ksm_flags(&map);
2776 
2777 	/* Attempt to merge with adjacent VMAs... */
2778 	if (map.prev || map.next) {
2779 		VMG_MMAP_STATE(vmg, &map, /* vma = */ NULL);
2780 
2781 		vma = vma_merge_new_range(&vmg);
2782 	}
2783 
2784 	/* ...but if we can't, allocate a new VMA. */
2785 	if (!vma) {
2786 		error = __mmap_new_vma(&map, &vma);
2787 		if (error)
2788 			goto unacct_error;
2789 		allocated_new = true;
2790 	}
2791 
2792 	if (have_mmap_prepare)
2793 		set_vma_user_defined_fields(vma, &map);
2794 
2795 	__mmap_complete(&map, vma);
2796 
2797 	if (have_mmap_prepare && allocated_new) {
2798 		error = call_action_complete(&map, &desc, vma);
2799 
2800 		if (error)
2801 			return error;
2802 	}
2803 
2804 	return addr;
2805 
2806 	/* Accounting was done by __mmap_setup(). */
2807 unacct_error:
2808 	if (map.charged)
2809 		vm_unacct_memory(map.charged);
2810 abort_munmap:
2811 	vms_abort_munmap_vmas(&map.vms, &map.mas_detach);
2812 	return error;
2813 }
2814 
2815 /**
2816  * mmap_region() - Actually perform the userland mapping of a VMA into
2817  * current->mm with known, aligned and overflow-checked @addr and @len, and
2818  * correctly determined VMA flags @vm_flags and page offset @pgoff.
2819  *
2820  * This is an internal memory management function, and should not be used
2821  * directly.
2822  *
2823  * The caller must write-lock current->mm->mmap_lock.
2824  *
2825  * @file: If a file-backed mapping, a pointer to the struct file describing the
2826  * file to be mapped, otherwise NULL.
2827  * @addr: The page-aligned address at which to perform the mapping.
2828  * @len: The page-aligned, non-zero, length of the mapping.
2829  * @vm_flags: The VMA flags which should be applied to the mapping.
2830  * @pgoff: If @file is specified, the page offset into the file, if not then
2831  * the virtual page offset in memory of the anonymous mapping.
2832  * @uf: Optionally, a pointer to a list head used for tracking userfaultfd unmap
2833  * events.
2834  *
2835  * Returns: Either an error, or the address at which the requested mapping has
2836  * been performed.
2837  */
2838 unsigned long mmap_region(struct file *file, unsigned long addr,
2839 			  unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
2840 			  struct list_head *uf)
2841 {
2842 	unsigned long ret;
2843 	bool writable_file_mapping = false;
2844 
2845 	mmap_assert_write_locked(current->mm);
2846 
2847 	/* Check to see if MDWE is applicable. */
2848 	if (map_deny_write_exec(vm_flags, vm_flags))
2849 		return -EACCES;
2850 
2851 	/* Allow architectures to sanity-check the vm_flags. */
2852 	if (!arch_validate_flags(vm_flags))
2853 		return -EINVAL;
2854 
2855 	/* Map writable and ensure this isn't a sealed memfd. */
2856 	if (file && is_shared_maywrite_vm_flags(vm_flags)) {
2857 		int error = mapping_map_writable(file->f_mapping);
2858 
2859 		if (error)
2860 			return error;
2861 		writable_file_mapping = true;
2862 	}
2863 
2864 	ret = __mmap_region(file, addr, len, vm_flags, pgoff, uf);
2865 
2866 	/* Clear our write mapping regardless of error. */
2867 	if (writable_file_mapping)
2868 		mapping_unmap_writable(file->f_mapping);
2869 
2870 	validate_mm(current->mm);
2871 	return ret;
2872 }
2873 
2874 /**
2875  * do_brk_flags() - Increase the brk vma if the flags match.
2876  * @vmi: The vma iterator
2877  * @addr: The start address
2878  * @len: The length of the increase
2879  * @vma: The vma,
2880  * @vma_flags: The VMA Flags
2881  *
2882  * Extend the brk VMA from addr to addr + len.  If the VMA is NULL or the flags
2883  * do not match then create a new anonymous VMA.  Eventually we may be able to
2884  * do some brk-specific accounting here.
2885  *
2886  * Returns: %0 on success, or otherwise an error.
2887  */
2888 int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
2889 		 unsigned long addr, unsigned long len, vma_flags_t vma_flags)
2890 {
2891 	struct mm_struct *mm = current->mm;
2892 
2893 	/*
2894 	 * Check against address space limits by the changed size
2895 	 * Note: This happens *after* clearing old mappings in some code paths.
2896 	 */
2897 	vma_flags_set_mask(&vma_flags, VMA_DATA_DEFAULT_FLAGS);
2898 	vma_flags_set(&vma_flags, VMA_ACCOUNT_BIT);
2899 	vma_flags_set_mask(&vma_flags, mm->def_vma_flags);
2900 
2901 	vma_flags = ksm_vma_flags(mm, NULL, vma_flags);
2902 	if (!may_expand_vm(mm, &vma_flags, len >> PAGE_SHIFT))
2903 		return -ENOMEM;
2904 
2905 	if (mm->map_count > get_sysctl_max_map_count())
2906 		return -ENOMEM;
2907 
2908 	if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
2909 		return -ENOMEM;
2910 
2911 	/*
2912 	 * Expand the existing vma if possible; Note that singular lists do not
2913 	 * occur after forking, so the expand will only happen on new VMAs.
2914 	 */
2915 	if (vma && vma->vm_end == addr) {
2916 		VMG_STATE(vmg, mm, vmi, addr, addr + len, vma_flags, PHYS_PFN(addr));
2917 
2918 		vmg.prev = vma;
2919 		/* vmi is positioned at prev, which this mode expects. */
2920 		vmg.just_expand = true;
2921 
2922 		if (vma_merge_new_range(&vmg))
2923 			goto out;
2924 		else if (vmg_nomem(&vmg))
2925 			goto unacct_fail;
2926 	}
2927 
2928 	if (vma)
2929 		vma_iter_next_range(vmi);
2930 	/* create a vma struct for an anonymous mapping */
2931 	vma = vm_area_alloc(mm);
2932 	if (!vma)
2933 		goto unacct_fail;
2934 
2935 	vma_set_anonymous(vma);
2936 	vma_set_range(vma, addr, addr + len, addr >> PAGE_SHIFT);
2937 	vma->flags = vma_flags;
2938 	vma->vm_page_prot = vm_get_page_prot(vma_flags_to_legacy(vma_flags));
2939 	vma_start_write(vma);
2940 	if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL))
2941 		goto mas_store_fail;
2942 
2943 	mm->map_count++;
2944 	validate_mm(mm);
2945 out:
2946 	perf_event_mmap(vma);
2947 	mm->total_vm += len >> PAGE_SHIFT;
2948 	mm->data_vm += len >> PAGE_SHIFT;
2949 	if (vma_flags_test(&vma_flags, VMA_LOCKED_BIT))
2950 		mm->locked_vm += (len >> PAGE_SHIFT);
2951 	if (pgtable_supports_soft_dirty())
2952 		vma_set_flags(vma, VMA_SOFTDIRTY_BIT);
2953 	return 0;
2954 
2955 mas_store_fail:
2956 	vm_area_free(vma);
2957 unacct_fail:
2958 	vm_unacct_memory(len >> PAGE_SHIFT);
2959 	return -ENOMEM;
2960 }
2961 
2962 /**
2963  * unmapped_area() - Find an area between the low_limit and the high_limit with
2964  * the correct alignment and offset, all from @info. Note: current->mm is used
2965  * for the search.
2966  *
2967  * @info: The unmapped area information including the range [low_limit -
2968  * high_limit), the alignment offset and mask.
2969  *
2970  * Return: A memory address or -ENOMEM.
2971  */
2972 unsigned long unmapped_area(struct vm_unmapped_area_info *info)
2973 {
2974 	unsigned long length, gap;
2975 	unsigned long low_limit, high_limit;
2976 	struct vm_area_struct *tmp;
2977 	VMA_ITERATOR(vmi, current->mm, 0);
2978 
2979 	/* Adjust search length to account for worst case alignment overhead */
2980 	length = info->length + info->align_mask + info->start_gap;
2981 	if (length < info->length)
2982 		return -ENOMEM;
2983 
2984 	low_limit = info->low_limit;
2985 	if (low_limit < mmap_min_addr)
2986 		low_limit = mmap_min_addr;
2987 	high_limit = info->high_limit;
2988 retry:
2989 	if (vma_iter_area_lowest(&vmi, low_limit, high_limit, length))
2990 		return -ENOMEM;
2991 
2992 	/*
2993 	 * Adjust for the gap first so it doesn't interfere with the later
2994 	 * alignment. The first step is the minimum needed to fulfill the start
2995 	 * gap, the next step is the minimum to align that. It is the minimum
2996 	 * needed to fulfill both.
2997 	 */
2998 	gap = vma_iter_addr(&vmi) + info->start_gap;
2999 	gap += (info->align_offset - gap) & info->align_mask;
3000 	tmp = vma_next(&vmi);
3001 	/* Avoid prev check if possible */
3002 	if (tmp && vma_test_any_mask(tmp, VMA_STARTGAP_FLAGS)) {
3003 		if (vm_start_gap(tmp) < gap + length - 1) {
3004 			low_limit = tmp->vm_end;
3005 			vma_iter_reset(&vmi);
3006 			goto retry;
3007 		}
3008 	} else {
3009 		tmp = vma_prev(&vmi);
3010 		if (tmp && vm_end_gap(tmp) > gap) {
3011 			low_limit = vm_end_gap(tmp);
3012 			vma_iter_reset(&vmi);
3013 			goto retry;
3014 		}
3015 	}
3016 
3017 	return gap;
3018 }
3019 
3020 /**
3021  * unmapped_area_topdown() - Find an area between the low_limit and the
3022  * high_limit with the correct alignment and offset at the highest available
3023  * address, all from @info. Note: current->mm is used for the search.
3024  *
3025  * @info: The unmapped area information including the range [low_limit -
3026  * high_limit), the alignment offset and mask.
3027  *
3028  * Return: A memory address or -ENOMEM.
3029  */
3030 unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
3031 {
3032 	unsigned long length, gap, gap_end;
3033 	unsigned long low_limit, high_limit;
3034 	struct vm_area_struct *tmp;
3035 	VMA_ITERATOR(vmi, current->mm, 0);
3036 
3037 	/* Adjust search length to account for worst case alignment overhead */
3038 	length = info->length + info->align_mask + info->start_gap;
3039 	if (length < info->length)
3040 		return -ENOMEM;
3041 
3042 	low_limit = info->low_limit;
3043 	if (low_limit < mmap_min_addr)
3044 		low_limit = mmap_min_addr;
3045 	high_limit = info->high_limit;
3046 retry:
3047 	if (vma_iter_area_highest(&vmi, low_limit, high_limit, length))
3048 		return -ENOMEM;
3049 
3050 	gap = vma_iter_end(&vmi) - info->length;
3051 	gap -= (gap - info->align_offset) & info->align_mask;
3052 	gap_end = vma_iter_end(&vmi);
3053 	tmp = vma_next(&vmi);
3054 	 /* Avoid prev check if possible */
3055 	if (tmp && vma_test_any_mask(tmp, VMA_STARTGAP_FLAGS)) {
3056 		if (vm_start_gap(tmp) < gap_end) {
3057 			high_limit = vm_start_gap(tmp);
3058 			vma_iter_reset(&vmi);
3059 			goto retry;
3060 		}
3061 	} else {
3062 		tmp = vma_prev(&vmi);
3063 		if (tmp && vm_end_gap(tmp) > gap) {
3064 			high_limit = tmp->vm_start;
3065 			vma_iter_reset(&vmi);
3066 			goto retry;
3067 		}
3068 	}
3069 
3070 	return gap;
3071 }
3072 
3073 /*
3074  * Verify that the stack growth is acceptable and
3075  * update accounting. This is shared with both the
3076  * grow-up and grow-down cases.
3077  */
3078 static int acct_stack_growth(struct vm_area_struct *vma,
3079 			     unsigned long size, unsigned long grow)
3080 {
3081 	struct mm_struct *mm = vma->vm_mm;
3082 	unsigned long new_start;
3083 
3084 	/* address space limit tests */
3085 	if (!may_expand_vm(mm, &vma->flags, grow))
3086 		return -ENOMEM;
3087 
3088 	/* Stack limit test */
3089 	if (size > rlimit(RLIMIT_STACK))
3090 		return -ENOMEM;
3091 
3092 	/* mlock limit tests */
3093 	if (!mlock_future_ok(mm, vma_test(vma, VMA_LOCKED_BIT),
3094 			     grow << PAGE_SHIFT))
3095 		return -ENOMEM;
3096 
3097 	/* Check to ensure the stack will not grow into a hugetlb-only region */
3098 	new_start = vma->vm_end - size;
3099 #ifdef CONFIG_STACK_GROWSUP
3100 	if (vma_test(vma, VMA_GROWSUP_BIT))
3101 		new_start = vma->vm_start;
3102 #endif
3103 	if (is_hugepage_only_range(vma->vm_mm, new_start, size))
3104 		return -EFAULT;
3105 
3106 	/*
3107 	 * Overcommit..  This must be the final test, as it will
3108 	 * update security statistics.
3109 	 */
3110 	if (security_vm_enough_memory_mm(mm, grow))
3111 		return -ENOMEM;
3112 
3113 	return 0;
3114 }
3115 
3116 #ifdef CONFIG_STACK_GROWSUP
3117 /*
3118  * PA-RISC uses this for its stack.
3119  * vma is the last one with address > vma->vm_end.  Have to extend vma.
3120  */
3121 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
3122 {
3123 	struct mm_struct *mm = vma->vm_mm;
3124 	struct vm_area_struct *next;
3125 	unsigned long gap_addr;
3126 	int error = 0;
3127 	VMA_ITERATOR(vmi, mm, vma->vm_start);
3128 
3129 	if (!vma_test(vma, VMA_GROWSUP_BIT))
3130 		return -EFAULT;
3131 
3132 	mmap_assert_write_locked(mm);
3133 
3134 	/* Guard against exceeding limits of the address space. */
3135 	address &= PAGE_MASK;
3136 	if (address >= (TASK_SIZE & PAGE_MASK))
3137 		return -ENOMEM;
3138 	address += PAGE_SIZE;
3139 
3140 	/* Enforce stack_guard_gap */
3141 	gap_addr = address + stack_guard_gap;
3142 
3143 	/* Guard against overflow */
3144 	if (gap_addr < address || gap_addr > TASK_SIZE)
3145 		gap_addr = TASK_SIZE;
3146 
3147 	next = find_vma_intersection(mm, vma->vm_end, gap_addr);
3148 	if (next && vma_is_accessible(next)) {
3149 		if (!vma_test(next, VMA_GROWSUP_BIT))
3150 			return -ENOMEM;
3151 		/* Check that both stack segments have the same anon_vma? */
3152 	}
3153 
3154 	if (next)
3155 		vma_iter_prev_range_limit(&vmi, address);
3156 
3157 	vma_iter_config(&vmi, vma->vm_start, address);
3158 	if (vma_iter_prealloc(&vmi, vma))
3159 		return -ENOMEM;
3160 
3161 	/* We must make sure the anon_vma is allocated. */
3162 	if (unlikely(anon_vma_prepare(vma))) {
3163 		vma_iter_free(&vmi);
3164 		return -ENOMEM;
3165 	}
3166 
3167 	/* Lock the VMA before expanding to prevent concurrent page faults */
3168 	vma_start_write(vma);
3169 	/* We update the anon VMA tree. */
3170 	anon_vma_lock_write(vma->anon_vma);
3171 
3172 	/* Somebody else might have raced and expanded it already */
3173 	if (address > vma->vm_end) {
3174 		unsigned long size, grow;
3175 
3176 		size = address - vma->vm_start;
3177 		grow = (address - vma->vm_end) >> PAGE_SHIFT;
3178 
3179 		error = -ENOMEM;
3180 		if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
3181 			error = acct_stack_growth(vma, size, grow);
3182 			if (!error) {
3183 				if (vma_test(vma, VMA_LOCKED_BIT))
3184 					mm->locked_vm += grow;
3185 				vm_stat_account(mm, vma->vm_flags, grow);
3186 				anon_vma_interval_tree_pre_update_vma(vma);
3187 				vma->vm_end = address;
3188 				/* Overwrite old entry in mtree. */
3189 				vma_iter_store_overwrite(&vmi, vma);
3190 				anon_vma_interval_tree_post_update_vma(vma);
3191 
3192 				perf_event_mmap(vma);
3193 			}
3194 		}
3195 	}
3196 	anon_vma_unlock_write(vma->anon_vma);
3197 	vma_iter_free(&vmi);
3198 	validate_mm(mm);
3199 	return error;
3200 }
3201 #endif /* CONFIG_STACK_GROWSUP */
3202 
3203 /*
3204  * vma is the first one with address < vma->vm_start.  Have to extend vma.
3205  * mmap_lock held for writing.
3206  */
3207 int expand_downwards(struct vm_area_struct *vma, unsigned long address)
3208 {
3209 	struct mm_struct *mm = vma->vm_mm;
3210 	struct vm_area_struct *prev;
3211 	int error = 0;
3212 	VMA_ITERATOR(vmi, mm, vma->vm_start);
3213 
3214 	if (!vma_test(vma, VMA_GROWSDOWN_BIT))
3215 		return -EFAULT;
3216 
3217 	mmap_assert_write_locked(mm);
3218 
3219 	address &= PAGE_MASK;
3220 	if (address < mmap_min_addr || address < FIRST_USER_ADDRESS)
3221 		return -EPERM;
3222 
3223 	/* Enforce stack_guard_gap */
3224 	prev = vma_prev(&vmi);
3225 	/* Check that both stack segments have the same anon_vma? */
3226 	if (prev) {
3227 		if (!vma_test(prev, VMA_GROWSDOWN_BIT) &&
3228 		    vma_is_accessible(prev) &&
3229 		    (address - prev->vm_end < stack_guard_gap))
3230 			return -ENOMEM;
3231 	}
3232 
3233 	if (prev)
3234 		vma_iter_next_range_limit(&vmi, vma->vm_start);
3235 
3236 	vma_iter_config(&vmi, address, vma->vm_end);
3237 	if (vma_iter_prealloc(&vmi, vma))
3238 		return -ENOMEM;
3239 
3240 	/* We must make sure the anon_vma is allocated. */
3241 	if (unlikely(anon_vma_prepare(vma))) {
3242 		vma_iter_free(&vmi);
3243 		return -ENOMEM;
3244 	}
3245 
3246 	/* Lock the VMA before expanding to prevent concurrent page faults */
3247 	vma_start_write(vma);
3248 	/* We update the anon VMA tree. */
3249 	anon_vma_lock_write(vma->anon_vma);
3250 
3251 	/* Somebody else might have raced and expanded it already */
3252 	if (address < vma->vm_start) {
3253 		unsigned long size, grow;
3254 
3255 		size = vma->vm_end - address;
3256 		grow = (vma->vm_start - address) >> PAGE_SHIFT;
3257 
3258 		error = -ENOMEM;
3259 		if (grow <= vma->vm_pgoff) {
3260 			error = acct_stack_growth(vma, size, grow);
3261 			if (!error) {
3262 				if (vma_test(vma, VMA_LOCKED_BIT))
3263 					mm->locked_vm += grow;
3264 				vm_stat_account(mm, vma->vm_flags, grow);
3265 				anon_vma_interval_tree_pre_update_vma(vma);
3266 				vma->vm_start = address;
3267 				vma->vm_pgoff -= grow;
3268 				/* Overwrite old entry in mtree. */
3269 				vma_iter_store_overwrite(&vmi, vma);
3270 				anon_vma_interval_tree_post_update_vma(vma);
3271 
3272 				perf_event_mmap(vma);
3273 			}
3274 		}
3275 	}
3276 	anon_vma_unlock_write(vma->anon_vma);
3277 	vma_iter_free(&vmi);
3278 	validate_mm(mm);
3279 	return error;
3280 }
3281 
3282 int __vm_munmap(unsigned long start, size_t len, bool unlock)
3283 {
3284 	int ret;
3285 	struct mm_struct *mm = current->mm;
3286 	LIST_HEAD(uf);
3287 	VMA_ITERATOR(vmi, mm, start);
3288 
3289 	if (mmap_write_lock_killable(mm))
3290 		return -EINTR;
3291 
3292 	ret = do_vmi_munmap(&vmi, mm, start, len, &uf, unlock);
3293 	if (ret || !unlock)
3294 		mmap_write_unlock(mm);
3295 
3296 	userfaultfd_unmap_complete(mm, &uf);
3297 	return ret;
3298 }
3299 
3300 /* Insert vm structure into process list sorted by address
3301  * and into the inode's i_mmap tree.  If vm_file is non-NULL
3302  * then i_mmap_rwsem is taken here.
3303  */
3304 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
3305 {
3306 	unsigned long charged = vma_pages(vma);
3307 
3308 	if (find_vma_intersection(mm, vma->vm_start, vma->vm_end))
3309 		return -ENOMEM;
3310 
3311 	if (vma_test(vma, VMA_ACCOUNT_BIT) &&
3312 	     security_vm_enough_memory_mm(mm, charged))
3313 		return -ENOMEM;
3314 
3315 	/*
3316 	 * The vm_pgoff of a purely anonymous vma should be irrelevant
3317 	 * until its first write fault, when page's anon_vma and index
3318 	 * are set.  But now set the vm_pgoff it will almost certainly
3319 	 * end up with (unless mremap moves it elsewhere before that
3320 	 * first wfault), so /proc/pid/maps tells a consistent story.
3321 	 *
3322 	 * By setting it to reflect the virtual start address of the
3323 	 * vma, merges and splits can happen in a seamless way, just
3324 	 * using the existing file pgoff checks and manipulations.
3325 	 * Similarly in do_mmap and in do_brk_flags.
3326 	 */
3327 	if (vma_is_anonymous(vma)) {
3328 		BUG_ON(vma->anon_vma);
3329 		vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
3330 	}
3331 
3332 	if (vma_link(mm, vma)) {
3333 		if (vma_test(vma, VMA_ACCOUNT_BIT))
3334 			vm_unacct_memory(charged);
3335 		return -ENOMEM;
3336 	}
3337 
3338 	return 0;
3339 }
3340 
3341 /**
3342  * vma_mmu_pagesize - Default MMU page size granularity for this VMA.
3343  * @vma: The user mapping.
3344  *
3345  * In the common case, the default page size used by the MMU matches the
3346  * default page size used by the kernel (see vma_kernel_pagesize()). On
3347  * architectures where it differs, an architecture-specific 'strong' version
3348  * of this symbol is required.
3349  *
3350  * The default MMU page size is not affected by Transparent Huge Pages
3351  * being in effect, or any usage of larger MMU page sizes (either through
3352  * architectural huge-page mappings or other explicit/implicit coalescing of
3353  * virtual ranges performed by the MMU).
3354  *
3355  * Return: The default MMU page size granularity for this VMA.
3356  */
3357 __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
3358 {
3359 	return vma_kernel_pagesize(vma);
3360 }
3361