xref: /linux/mm/vma.c (revision 01c373e9a5ce2273812eaf83036c5357829fb3f7)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 /*
4  * VMA-specific functions.
5  */
6 
7 #include "vma_internal.h"
8 #include "vma.h"
9 
10 static inline bool is_mergeable_vma(struct vma_merge_struct *vmg, bool merge_next)
11 {
12 	struct vm_area_struct *vma = merge_next ? vmg->next : vmg->prev;
13 
14 	if (!mpol_equal(vmg->policy, vma_policy(vma)))
15 		return false;
16 	/*
17 	 * VM_SOFTDIRTY should not prevent from VMA merging, if we
18 	 * match the flags but dirty bit -- the caller should mark
19 	 * merged VMA as dirty. If dirty bit won't be excluded from
20 	 * comparison, we increase pressure on the memory system forcing
21 	 * the kernel to generate new VMAs when old one could be
22 	 * extended instead.
23 	 */
24 	if ((vma->vm_flags ^ vmg->flags) & ~VM_SOFTDIRTY)
25 		return false;
26 	if (vma->vm_file != vmg->file)
27 		return false;
28 	if (!is_mergeable_vm_userfaultfd_ctx(vma, vmg->uffd_ctx))
29 		return false;
30 	if (!anon_vma_name_eq(anon_vma_name(vma), vmg->anon_name))
31 		return false;
32 	return true;
33 }
34 
35 static inline bool is_mergeable_anon_vma(struct anon_vma *anon_vma1,
36 		 struct anon_vma *anon_vma2, struct vm_area_struct *vma)
37 {
38 	/*
39 	 * The list_is_singular() test is to avoid merging VMA cloned from
40 	 * parents. This can improve scalability caused by anon_vma lock.
41 	 */
42 	if ((!anon_vma1 || !anon_vma2) && (!vma ||
43 		list_is_singular(&vma->anon_vma_chain)))
44 		return true;
45 	return anon_vma1 == anon_vma2;
46 }
47 
48 /* Are the anon_vma's belonging to each VMA compatible with one another? */
49 static inline bool are_anon_vmas_compatible(struct vm_area_struct *vma1,
50 					    struct vm_area_struct *vma2)
51 {
52 	return is_mergeable_anon_vma(vma1->anon_vma, vma2->anon_vma, NULL);
53 }
54 
55 /*
56  * init_multi_vma_prep() - Initializer for struct vma_prepare
57  * @vp: The vma_prepare struct
58  * @vma: The vma that will be altered once locked
59  * @next: The next vma if it is to be adjusted
60  * @remove: The first vma to be removed
61  * @remove2: The second vma to be removed
62  */
63 static void init_multi_vma_prep(struct vma_prepare *vp,
64 				struct vm_area_struct *vma,
65 				struct vm_area_struct *next,
66 				struct vm_area_struct *remove,
67 				struct vm_area_struct *remove2)
68 {
69 	memset(vp, 0, sizeof(struct vma_prepare));
70 	vp->vma = vma;
71 	vp->anon_vma = vma->anon_vma;
72 	vp->remove = remove;
73 	vp->remove2 = remove2;
74 	vp->adj_next = next;
75 	if (!vp->anon_vma && next)
76 		vp->anon_vma = next->anon_vma;
77 
78 	vp->file = vma->vm_file;
79 	if (vp->file)
80 		vp->mapping = vma->vm_file->f_mapping;
81 
82 }
83 
84 /*
85  * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
86  * in front of (at a lower virtual address and file offset than) the vma.
87  *
88  * We cannot merge two vmas if they have differently assigned (non-NULL)
89  * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
90  *
91  * We don't check here for the merged mmap wrapping around the end of pagecache
92  * indices (16TB on ia32) because do_mmap() does not permit mmap's which
93  * wrap, nor mmaps which cover the final page at index -1UL.
94  *
95  * We assume the vma may be removed as part of the merge.
96  */
97 static bool can_vma_merge_before(struct vma_merge_struct *vmg)
98 {
99 	pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start);
100 
101 	if (is_mergeable_vma(vmg, /* merge_next = */ true) &&
102 	    is_mergeable_anon_vma(vmg->anon_vma, vmg->next->anon_vma, vmg->next)) {
103 		if (vmg->next->vm_pgoff == vmg->pgoff + pglen)
104 			return true;
105 	}
106 
107 	return false;
108 }
109 
110 /*
111  * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
112  * beyond (at a higher virtual address and file offset than) the vma.
113  *
114  * We cannot merge two vmas if they have differently assigned (non-NULL)
115  * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
116  *
117  * We assume that vma is not removed as part of the merge.
118  */
119 static bool can_vma_merge_after(struct vma_merge_struct *vmg)
120 {
121 	if (is_mergeable_vma(vmg, /* merge_next = */ false) &&
122 	    is_mergeable_anon_vma(vmg->anon_vma, vmg->prev->anon_vma, vmg->prev)) {
123 		if (vmg->prev->vm_pgoff + vma_pages(vmg->prev) == vmg->pgoff)
124 			return true;
125 	}
126 	return false;
127 }
128 
129 static void __vma_link_file(struct vm_area_struct *vma,
130 			    struct address_space *mapping)
131 {
132 	if (vma_is_shared_maywrite(vma))
133 		mapping_allow_writable(mapping);
134 
135 	flush_dcache_mmap_lock(mapping);
136 	vma_interval_tree_insert(vma, &mapping->i_mmap);
137 	flush_dcache_mmap_unlock(mapping);
138 }
139 
140 /*
141  * Requires inode->i_mapping->i_mmap_rwsem
142  */
143 static void __remove_shared_vm_struct(struct vm_area_struct *vma,
144 				      struct address_space *mapping)
145 {
146 	if (vma_is_shared_maywrite(vma))
147 		mapping_unmap_writable(mapping);
148 
149 	flush_dcache_mmap_lock(mapping);
150 	vma_interval_tree_remove(vma, &mapping->i_mmap);
151 	flush_dcache_mmap_unlock(mapping);
152 }
153 
154 /*
155  * vma_prepare() - Helper function for handling locking VMAs prior to altering
156  * @vp: The initialized vma_prepare struct
157  */
158 static void vma_prepare(struct vma_prepare *vp)
159 {
160 	if (vp->file) {
161 		uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end);
162 
163 		if (vp->adj_next)
164 			uprobe_munmap(vp->adj_next, vp->adj_next->vm_start,
165 				      vp->adj_next->vm_end);
166 
167 		i_mmap_lock_write(vp->mapping);
168 		if (vp->insert && vp->insert->vm_file) {
169 			/*
170 			 * Put into interval tree now, so instantiated pages
171 			 * are visible to arm/parisc __flush_dcache_page
172 			 * throughout; but we cannot insert into address
173 			 * space until vma start or end is updated.
174 			 */
175 			__vma_link_file(vp->insert,
176 					vp->insert->vm_file->f_mapping);
177 		}
178 	}
179 
180 	if (vp->anon_vma) {
181 		anon_vma_lock_write(vp->anon_vma);
182 		anon_vma_interval_tree_pre_update_vma(vp->vma);
183 		if (vp->adj_next)
184 			anon_vma_interval_tree_pre_update_vma(vp->adj_next);
185 	}
186 
187 	if (vp->file) {
188 		flush_dcache_mmap_lock(vp->mapping);
189 		vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap);
190 		if (vp->adj_next)
191 			vma_interval_tree_remove(vp->adj_next,
192 						 &vp->mapping->i_mmap);
193 	}
194 
195 }
196 
197 /*
198  * vma_complete- Helper function for handling the unlocking after altering VMAs,
199  * or for inserting a VMA.
200  *
201  * @vp: The vma_prepare struct
202  * @vmi: The vma iterator
203  * @mm: The mm_struct
204  */
205 static void vma_complete(struct vma_prepare *vp, struct vma_iterator *vmi,
206 			 struct mm_struct *mm)
207 {
208 	if (vp->file) {
209 		if (vp->adj_next)
210 			vma_interval_tree_insert(vp->adj_next,
211 						 &vp->mapping->i_mmap);
212 		vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap);
213 		flush_dcache_mmap_unlock(vp->mapping);
214 	}
215 
216 	if (vp->remove && vp->file) {
217 		__remove_shared_vm_struct(vp->remove, vp->mapping);
218 		if (vp->remove2)
219 			__remove_shared_vm_struct(vp->remove2, vp->mapping);
220 	} else if (vp->insert) {
221 		/*
222 		 * split_vma has split insert from vma, and needs
223 		 * us to insert it before dropping the locks
224 		 * (it may either follow vma or precede it).
225 		 */
226 		vma_iter_store(vmi, vp->insert);
227 		mm->map_count++;
228 	}
229 
230 	if (vp->anon_vma) {
231 		anon_vma_interval_tree_post_update_vma(vp->vma);
232 		if (vp->adj_next)
233 			anon_vma_interval_tree_post_update_vma(vp->adj_next);
234 		anon_vma_unlock_write(vp->anon_vma);
235 	}
236 
237 	if (vp->file) {
238 		i_mmap_unlock_write(vp->mapping);
239 		uprobe_mmap(vp->vma);
240 
241 		if (vp->adj_next)
242 			uprobe_mmap(vp->adj_next);
243 	}
244 
245 	if (vp->remove) {
246 again:
247 		vma_mark_detached(vp->remove, true);
248 		if (vp->file) {
249 			uprobe_munmap(vp->remove, vp->remove->vm_start,
250 				      vp->remove->vm_end);
251 			fput(vp->file);
252 		}
253 		if (vp->remove->anon_vma)
254 			anon_vma_merge(vp->vma, vp->remove);
255 		mm->map_count--;
256 		mpol_put(vma_policy(vp->remove));
257 		if (!vp->remove2)
258 			WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end);
259 		vm_area_free(vp->remove);
260 
261 		/*
262 		 * In mprotect's case 6 (see comments on vma_merge),
263 		 * we are removing both mid and next vmas
264 		 */
265 		if (vp->remove2) {
266 			vp->remove = vp->remove2;
267 			vp->remove2 = NULL;
268 			goto again;
269 		}
270 	}
271 	if (vp->insert && vp->file)
272 		uprobe_mmap(vp->insert);
273 }
274 
275 /*
276  * init_vma_prep() - Initializer wrapper for vma_prepare struct
277  * @vp: The vma_prepare struct
278  * @vma: The vma that will be altered once locked
279  */
280 static void init_vma_prep(struct vma_prepare *vp, struct vm_area_struct *vma)
281 {
282 	init_multi_vma_prep(vp, vma, NULL, NULL, NULL);
283 }
284 
285 /*
286  * Can the proposed VMA be merged with the left (previous) VMA taking into
287  * account the start position of the proposed range.
288  */
289 static bool can_vma_merge_left(struct vma_merge_struct *vmg)
290 
291 {
292 	return vmg->prev && vmg->prev->vm_end == vmg->start &&
293 		can_vma_merge_after(vmg);
294 }
295 
296 /*
297  * Can the proposed VMA be merged with the right (next) VMA taking into
298  * account the end position of the proposed range.
299  *
300  * In addition, if we can merge with the left VMA, ensure that left and right
301  * anon_vma's are also compatible.
302  */
303 static bool can_vma_merge_right(struct vma_merge_struct *vmg,
304 				bool can_merge_left)
305 {
306 	if (!vmg->next || vmg->end != vmg->next->vm_start ||
307 	    !can_vma_merge_before(vmg))
308 		return false;
309 
310 	if (!can_merge_left)
311 		return true;
312 
313 	/*
314 	 * If we can merge with prev (left) and next (right), indicating that
315 	 * each VMA's anon_vma is compatible with the proposed anon_vma, this
316 	 * does not mean prev and next are compatible with EACH OTHER.
317 	 *
318 	 * We therefore check this in addition to mergeability to either side.
319 	 */
320 	return are_anon_vmas_compatible(vmg->prev, vmg->next);
321 }
322 
323 /*
324  * Close a vm structure and free it.
325  */
326 void remove_vma(struct vm_area_struct *vma, bool unreachable, bool closed)
327 {
328 	might_sleep();
329 	if (!closed && vma->vm_ops && vma->vm_ops->close)
330 		vma->vm_ops->close(vma);
331 	if (vma->vm_file)
332 		fput(vma->vm_file);
333 	mpol_put(vma_policy(vma));
334 	if (unreachable)
335 		__vm_area_free(vma);
336 	else
337 		vm_area_free(vma);
338 }
339 
340 /*
341  * Get rid of page table information in the indicated region.
342  *
343  * Called with the mm semaphore held.
344  */
345 void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
346 		struct vm_area_struct *prev, struct vm_area_struct *next)
347 {
348 	struct mm_struct *mm = vma->vm_mm;
349 	struct mmu_gather tlb;
350 
351 	lru_add_drain();
352 	tlb_gather_mmu(&tlb, mm);
353 	update_hiwater_rss(mm);
354 	unmap_vmas(&tlb, mas, vma, vma->vm_start, vma->vm_end, vma->vm_end,
355 		   /* mm_wr_locked = */ true);
356 	mas_set(mas, vma->vm_end);
357 	free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
358 		      next ? next->vm_start : USER_PGTABLES_CEILING,
359 		      /* mm_wr_locked = */ true);
360 	tlb_finish_mmu(&tlb);
361 }
362 
363 /*
364  * __split_vma() bypasses sysctl_max_map_count checking.  We use this where it
365  * has already been checked or doesn't make sense to fail.
366  * VMA Iterator will point to the original VMA.
367  */
368 static int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
369 		       unsigned long addr, int new_below)
370 {
371 	struct vma_prepare vp;
372 	struct vm_area_struct *new;
373 	int err;
374 
375 	WARN_ON(vma->vm_start >= addr);
376 	WARN_ON(vma->vm_end <= addr);
377 
378 	if (vma->vm_ops && vma->vm_ops->may_split) {
379 		err = vma->vm_ops->may_split(vma, addr);
380 		if (err)
381 			return err;
382 	}
383 
384 	new = vm_area_dup(vma);
385 	if (!new)
386 		return -ENOMEM;
387 
388 	if (new_below) {
389 		new->vm_end = addr;
390 	} else {
391 		new->vm_start = addr;
392 		new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
393 	}
394 
395 	err = -ENOMEM;
396 	vma_iter_config(vmi, new->vm_start, new->vm_end);
397 	if (vma_iter_prealloc(vmi, new))
398 		goto out_free_vma;
399 
400 	err = vma_dup_policy(vma, new);
401 	if (err)
402 		goto out_free_vmi;
403 
404 	err = anon_vma_clone(new, vma);
405 	if (err)
406 		goto out_free_mpol;
407 
408 	if (new->vm_file)
409 		get_file(new->vm_file);
410 
411 	if (new->vm_ops && new->vm_ops->open)
412 		new->vm_ops->open(new);
413 
414 	vma_start_write(vma);
415 	vma_start_write(new);
416 
417 	init_vma_prep(&vp, vma);
418 	vp.insert = new;
419 	vma_prepare(&vp);
420 	vma_adjust_trans_huge(vma, vma->vm_start, addr, 0);
421 
422 	if (new_below) {
423 		vma->vm_start = addr;
424 		vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT;
425 	} else {
426 		vma->vm_end = addr;
427 	}
428 
429 	/* vma_complete stores the new vma */
430 	vma_complete(&vp, vmi, vma->vm_mm);
431 	validate_mm(vma->vm_mm);
432 
433 	/* Success. */
434 	if (new_below)
435 		vma_next(vmi);
436 	else
437 		vma_prev(vmi);
438 
439 	return 0;
440 
441 out_free_mpol:
442 	mpol_put(vma_policy(new));
443 out_free_vmi:
444 	vma_iter_free(vmi);
445 out_free_vma:
446 	vm_area_free(new);
447 	return err;
448 }
449 
450 /*
451  * Split a vma into two pieces at address 'addr', a new vma is allocated
452  * either for the first part or the tail.
453  */
454 static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
455 		     unsigned long addr, int new_below)
456 {
457 	if (vma->vm_mm->map_count >= sysctl_max_map_count)
458 		return -ENOMEM;
459 
460 	return __split_vma(vmi, vma, addr, new_below);
461 }
462 
463 /*
464  * vma has some anon_vma assigned, and is already inserted on that
465  * anon_vma's interval trees.
466  *
467  * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
468  * vma must be removed from the anon_vma's interval trees using
469  * anon_vma_interval_tree_pre_update_vma().
470  *
471  * After the update, the vma will be reinserted using
472  * anon_vma_interval_tree_post_update_vma().
473  *
474  * The entire update must be protected by exclusive mmap_lock and by
475  * the root anon_vma's mutex.
476  */
477 void
478 anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
479 {
480 	struct anon_vma_chain *avc;
481 
482 	list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
483 		anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root);
484 }
485 
486 void
487 anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
488 {
489 	struct anon_vma_chain *avc;
490 
491 	list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
492 		anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
493 }
494 
495 /*
496  * dup_anon_vma() - Helper function to duplicate anon_vma
497  * @dst: The destination VMA
498  * @src: The source VMA
499  * @dup: Pointer to the destination VMA when successful.
500  *
501  * Returns: 0 on success.
502  */
503 static int dup_anon_vma(struct vm_area_struct *dst,
504 			struct vm_area_struct *src, struct vm_area_struct **dup)
505 {
506 	/*
507 	 * Easily overlooked: when mprotect shifts the boundary, make sure the
508 	 * expanding vma has anon_vma set if the shrinking vma had, to cover any
509 	 * anon pages imported.
510 	 */
511 	if (src->anon_vma && !dst->anon_vma) {
512 		int ret;
513 
514 		vma_assert_write_locked(dst);
515 		dst->anon_vma = src->anon_vma;
516 		ret = anon_vma_clone(dst, src);
517 		if (ret)
518 			return ret;
519 
520 		*dup = dst;
521 	}
522 
523 	return 0;
524 }
525 
526 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
527 void validate_mm(struct mm_struct *mm)
528 {
529 	int bug = 0;
530 	int i = 0;
531 	struct vm_area_struct *vma;
532 	VMA_ITERATOR(vmi, mm, 0);
533 
534 	mt_validate(&mm->mm_mt);
535 	for_each_vma(vmi, vma) {
536 #ifdef CONFIG_DEBUG_VM_RB
537 		struct anon_vma *anon_vma = vma->anon_vma;
538 		struct anon_vma_chain *avc;
539 #endif
540 		unsigned long vmi_start, vmi_end;
541 		bool warn = 0;
542 
543 		vmi_start = vma_iter_addr(&vmi);
544 		vmi_end = vma_iter_end(&vmi);
545 		if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm))
546 			warn = 1;
547 
548 		if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm))
549 			warn = 1;
550 
551 		if (warn) {
552 			pr_emerg("issue in %s\n", current->comm);
553 			dump_stack();
554 			dump_vma(vma);
555 			pr_emerg("tree range: %px start %lx end %lx\n", vma,
556 				 vmi_start, vmi_end - 1);
557 			vma_iter_dump_tree(&vmi);
558 		}
559 
560 #ifdef CONFIG_DEBUG_VM_RB
561 		if (anon_vma) {
562 			anon_vma_lock_read(anon_vma);
563 			list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
564 				anon_vma_interval_tree_verify(avc);
565 			anon_vma_unlock_read(anon_vma);
566 		}
567 #endif
568 		i++;
569 	}
570 	if (i != mm->map_count) {
571 		pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i);
572 		bug = 1;
573 	}
574 	VM_BUG_ON_MM(bug, mm);
575 }
576 #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
577 
578 /* Actually perform the VMA merge operation. */
579 static int commit_merge(struct vma_merge_struct *vmg,
580 			struct vm_area_struct *adjust,
581 			struct vm_area_struct *remove,
582 			struct vm_area_struct *remove2,
583 			long adj_start,
584 			bool expanded)
585 {
586 	struct vma_prepare vp;
587 
588 	init_multi_vma_prep(&vp, vmg->vma, adjust, remove, remove2);
589 
590 	VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma &&
591 		   vp.anon_vma != adjust->anon_vma);
592 
593 	if (expanded) {
594 		/* Note: vma iterator must be pointing to 'start'. */
595 		vma_iter_config(vmg->vmi, vmg->start, vmg->end);
596 	} else {
597 		vma_iter_config(vmg->vmi, adjust->vm_start + adj_start,
598 				adjust->vm_end);
599 	}
600 
601 	if (vma_iter_prealloc(vmg->vmi, vmg->vma))
602 		return -ENOMEM;
603 
604 	vma_prepare(&vp);
605 	vma_adjust_trans_huge(vmg->vma, vmg->start, vmg->end, adj_start);
606 	vma_set_range(vmg->vma, vmg->start, vmg->end, vmg->pgoff);
607 
608 	if (expanded)
609 		vma_iter_store(vmg->vmi, vmg->vma);
610 
611 	if (adj_start) {
612 		adjust->vm_start += adj_start;
613 		adjust->vm_pgoff += PHYS_PFN(adj_start);
614 		if (adj_start < 0) {
615 			WARN_ON(expanded);
616 			vma_iter_store(vmg->vmi, adjust);
617 		}
618 	}
619 
620 	vma_complete(&vp, vmg->vmi, vmg->vma->vm_mm);
621 
622 	return 0;
623 }
624 
625 /* We can only remove VMAs when merging if they do not have a close hook. */
626 static bool can_merge_remove_vma(struct vm_area_struct *vma)
627 {
628 	return !vma->vm_ops || !vma->vm_ops->close;
629 }
630 
631 /*
632  * vma_merge_existing_range - Attempt to merge VMAs based on a VMA having its
633  * attributes modified.
634  *
635  * @vmg: Describes the modifications being made to a VMA and associated
636  *       metadata.
637  *
638  * When the attributes of a range within a VMA change, then it might be possible
639  * for immediately adjacent VMAs to be merged into that VMA due to having
640  * identical properties.
641  *
642  * This function checks for the existence of any such mergeable VMAs and updates
643  * the maple tree describing the @vmg->vma->vm_mm address space to account for
644  * this, as well as any VMAs shrunk/expanded/deleted as a result of this merge.
645  *
646  * As part of this operation, if a merge occurs, the @vmg object will have its
647  * vma, start, end, and pgoff fields modified to execute the merge. Subsequent
648  * calls to this function should reset these fields.
649  *
650  * Returns: The merged VMA if merge succeeds, or NULL otherwise.
651  *
652  * ASSUMPTIONS:
653  * - The caller must assign the VMA to be modifed to @vmg->vma.
654  * - The caller must have set @vmg->prev to the previous VMA, if there is one.
655  * - The caller must not set @vmg->next, as we determine this.
656  * - The caller must hold a WRITE lock on the mm_struct->mmap_lock.
657  * - vmi must be positioned within [@vmg->vma->vm_start, @vmg->vma->vm_end).
658  */
659 static struct vm_area_struct *vma_merge_existing_range(struct vma_merge_struct *vmg)
660 {
661 	struct vm_area_struct *vma = vmg->vma;
662 	struct vm_area_struct *prev = vmg->prev;
663 	struct vm_area_struct *next, *res;
664 	struct vm_area_struct *anon_dup = NULL;
665 	struct vm_area_struct *adjust = NULL;
666 	unsigned long start = vmg->start;
667 	unsigned long end = vmg->end;
668 	bool left_side = vma && start == vma->vm_start;
669 	bool right_side = vma && end == vma->vm_end;
670 	int err = 0;
671 	long adj_start = 0;
672 	bool merge_will_delete_vma, merge_will_delete_next;
673 	bool merge_left, merge_right, merge_both;
674 	bool expanded;
675 
676 	mmap_assert_write_locked(vmg->mm);
677 	VM_WARN_ON(!vma); /* We are modifying a VMA, so caller must specify. */
678 	VM_WARN_ON(vmg->next); /* We set this. */
679 	VM_WARN_ON(prev && start <= prev->vm_start);
680 	VM_WARN_ON(start >= end);
681 	/*
682 	 * If vma == prev, then we are offset into a VMA. Otherwise, if we are
683 	 * not, we must span a portion of the VMA.
684 	 */
685 	VM_WARN_ON(vma && ((vma != prev && vmg->start != vma->vm_start) ||
686 			   vmg->end > vma->vm_end));
687 	/* The vmi must be positioned within vmg->vma. */
688 	VM_WARN_ON(vma && !(vma_iter_addr(vmg->vmi) >= vma->vm_start &&
689 			    vma_iter_addr(vmg->vmi) < vma->vm_end));
690 
691 	vmg->state = VMA_MERGE_NOMERGE;
692 
693 	/*
694 	 * If a special mapping or if the range being modified is neither at the
695 	 * furthermost left or right side of the VMA, then we have no chance of
696 	 * merging and should abort.
697 	 */
698 	if (vmg->flags & VM_SPECIAL || (!left_side && !right_side))
699 		return NULL;
700 
701 	if (left_side)
702 		merge_left = can_vma_merge_left(vmg);
703 	else
704 		merge_left = false;
705 
706 	if (right_side) {
707 		next = vmg->next = vma_iter_next_range(vmg->vmi);
708 		vma_iter_prev_range(vmg->vmi);
709 
710 		merge_right = can_vma_merge_right(vmg, merge_left);
711 	} else {
712 		merge_right = false;
713 		next = NULL;
714 	}
715 
716 	if (merge_left)		/* If merging prev, position iterator there. */
717 		vma_prev(vmg->vmi);
718 	else if (!merge_right)	/* If we have nothing to merge, abort. */
719 		return NULL;
720 
721 	merge_both = merge_left && merge_right;
722 	/* If we span the entire VMA, a merge implies it will be deleted. */
723 	merge_will_delete_vma = left_side && right_side;
724 
725 	/*
726 	 * If we need to remove vma in its entirety but are unable to do so,
727 	 * we have no sensible recourse but to abort the merge.
728 	 */
729 	if (merge_will_delete_vma && !can_merge_remove_vma(vma))
730 		return NULL;
731 
732 	/*
733 	 * If we merge both VMAs, then next is also deleted. This implies
734 	 * merge_will_delete_vma also.
735 	 */
736 	merge_will_delete_next = merge_both;
737 
738 	/*
739 	 * If we cannot delete next, then we can reduce the operation to merging
740 	 * prev and vma (thereby deleting vma).
741 	 */
742 	if (merge_will_delete_next && !can_merge_remove_vma(next)) {
743 		merge_will_delete_next = false;
744 		merge_right = false;
745 		merge_both = false;
746 	}
747 
748 	/* No matter what happens, we will be adjusting vma. */
749 	vma_start_write(vma);
750 
751 	if (merge_left)
752 		vma_start_write(prev);
753 
754 	if (merge_right)
755 		vma_start_write(next);
756 
757 	if (merge_both) {
758 		/*
759 		 *         |<----->|
760 		 * |-------*********-------|
761 		 *   prev     vma     next
762 		 *  extend   delete  delete
763 		 */
764 
765 		vmg->vma = prev;
766 		vmg->start = prev->vm_start;
767 		vmg->end = next->vm_end;
768 		vmg->pgoff = prev->vm_pgoff;
769 
770 		/*
771 		 * We already ensured anon_vma compatibility above, so now it's
772 		 * simply a case of, if prev has no anon_vma object, which of
773 		 * next or vma contains the anon_vma we must duplicate.
774 		 */
775 		err = dup_anon_vma(prev, next->anon_vma ? next : vma, &anon_dup);
776 	} else if (merge_left) {
777 		/*
778 		 *         |<----->| OR
779 		 *         |<--------->|
780 		 * |-------*************
781 		 *   prev       vma
782 		 *  extend shrink/delete
783 		 */
784 
785 		vmg->vma = prev;
786 		vmg->start = prev->vm_start;
787 		vmg->pgoff = prev->vm_pgoff;
788 
789 		if (!merge_will_delete_vma) {
790 			adjust = vma;
791 			adj_start = vmg->end - vma->vm_start;
792 		}
793 
794 		err = dup_anon_vma(prev, vma, &anon_dup);
795 	} else { /* merge_right */
796 		/*
797 		 *     |<----->| OR
798 		 * |<--------->|
799 		 * *************-------|
800 		 *      vma       next
801 		 * shrink/delete extend
802 		 */
803 
804 		pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start);
805 
806 		VM_WARN_ON(!merge_right);
807 		/* If we are offset into a VMA, then prev must be vma. */
808 		VM_WARN_ON(vmg->start > vma->vm_start && prev && vma != prev);
809 
810 		if (merge_will_delete_vma) {
811 			vmg->vma = next;
812 			vmg->end = next->vm_end;
813 			vmg->pgoff = next->vm_pgoff - pglen;
814 		} else {
815 			/*
816 			 * We shrink vma and expand next.
817 			 *
818 			 * IMPORTANT: This is the ONLY case where the final
819 			 * merged VMA is NOT vmg->vma, but rather vmg->next.
820 			 */
821 
822 			vmg->start = vma->vm_start;
823 			vmg->end = start;
824 			vmg->pgoff = vma->vm_pgoff;
825 
826 			adjust = next;
827 			adj_start = -(vma->vm_end - start);
828 		}
829 
830 		err = dup_anon_vma(next, vma, &anon_dup);
831 	}
832 
833 	if (err)
834 		goto abort;
835 
836 	/*
837 	 * In nearly all cases, we expand vmg->vma. There is one exception -
838 	 * merge_right where we partially span the VMA. In this case we shrink
839 	 * the end of vmg->vma and adjust the start of vmg->next accordingly.
840 	 */
841 	expanded = !merge_right || merge_will_delete_vma;
842 
843 	if (commit_merge(vmg, adjust,
844 			 merge_will_delete_vma ? vma : NULL,
845 			 merge_will_delete_next ? next : NULL,
846 			 adj_start, expanded)) {
847 		if (anon_dup)
848 			unlink_anon_vmas(anon_dup);
849 
850 		vmg->state = VMA_MERGE_ERROR_NOMEM;
851 		return NULL;
852 	}
853 
854 	res = merge_left ? prev : next;
855 	khugepaged_enter_vma(res, vmg->flags);
856 
857 	vmg->state = VMA_MERGE_SUCCESS;
858 	return res;
859 
860 abort:
861 	vma_iter_set(vmg->vmi, start);
862 	vma_iter_load(vmg->vmi);
863 	vmg->state = VMA_MERGE_ERROR_NOMEM;
864 	return NULL;
865 }
866 
867 /*
868  * vma_merge_new_range - Attempt to merge a new VMA into address space
869  *
870  * @vmg: Describes the VMA we are adding, in the range @vmg->start to @vmg->end
871  *       (exclusive), which we try to merge with any adjacent VMAs if possible.
872  *
873  * We are about to add a VMA to the address space starting at @vmg->start and
874  * ending at @vmg->end. There are three different possible scenarios:
875  *
876  * 1. There is a VMA with identical properties immediately adjacent to the
877  *    proposed new VMA [@vmg->start, @vmg->end) either before or after it -
878  *    EXPAND that VMA:
879  *
880  * Proposed:       |-----|  or  |-----|
881  * Existing:  |----|                  |----|
882  *
883  * 2. There are VMAs with identical properties immediately adjacent to the
884  *    proposed new VMA [@vmg->start, @vmg->end) both before AND after it -
885  *    EXPAND the former and REMOVE the latter:
886  *
887  * Proposed:       |-----|
888  * Existing:  |----|     |----|
889  *
890  * 3. There are no VMAs immediately adjacent to the proposed new VMA or those
891  *    VMAs do not have identical attributes - NO MERGE POSSIBLE.
892  *
893  * In instances where we can merge, this function returns the expanded VMA which
894  * will have its range adjusted accordingly and the underlying maple tree also
895  * adjusted.
896  *
897  * Returns: In instances where no merge was possible, NULL. Otherwise, a pointer
898  *          to the VMA we expanded.
899  *
900  * This function adjusts @vmg to provide @vmg->next if not already specified,
901  * and adjusts [@vmg->start, @vmg->end) to span the expanded range.
902  *
903  * ASSUMPTIONS:
904  * - The caller must hold a WRITE lock on the mm_struct->mmap_lock.
905  * - The caller must have determined that [@vmg->start, @vmg->end) is empty,
906      other than VMAs that will be unmapped should the operation succeed.
907  * - The caller must have specified the previous vma in @vmg->prev.
908  * - The caller must have specified the next vma in @vmg->next.
909  * - The caller must have positioned the vmi at or before the gap.
910  */
911 struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg)
912 {
913 	struct vm_area_struct *prev = vmg->prev;
914 	struct vm_area_struct *next = vmg->next;
915 	unsigned long start = vmg->start;
916 	unsigned long end = vmg->end;
917 	pgoff_t pgoff = vmg->pgoff;
918 	pgoff_t pglen = PHYS_PFN(end - start);
919 	bool can_merge_left, can_merge_right;
920 
921 	mmap_assert_write_locked(vmg->mm);
922 	VM_WARN_ON(vmg->vma);
923 	/* vmi must point at or before the gap. */
924 	VM_WARN_ON(vma_iter_addr(vmg->vmi) > end);
925 
926 	vmg->state = VMA_MERGE_NOMERGE;
927 
928 	/* Special VMAs are unmergeable, also if no prev/next. */
929 	if ((vmg->flags & VM_SPECIAL) || (!prev && !next))
930 		return NULL;
931 
932 	can_merge_left = can_vma_merge_left(vmg);
933 	can_merge_right = can_vma_merge_right(vmg, can_merge_left);
934 
935 	/* If we can merge with the next VMA, adjust vmg accordingly. */
936 	if (can_merge_right) {
937 		vmg->end = next->vm_end;
938 		vmg->vma = next;
939 		vmg->pgoff = next->vm_pgoff - pglen;
940 	}
941 
942 	/* If we can merge with the previous VMA, adjust vmg accordingly. */
943 	if (can_merge_left) {
944 		vmg->start = prev->vm_start;
945 		vmg->vma = prev;
946 		vmg->pgoff = prev->vm_pgoff;
947 
948 		/*
949 		 * If this merge would result in removal of the next VMA but we
950 		 * are not permitted to do so, reduce the operation to merging
951 		 * prev and vma.
952 		 */
953 		if (can_merge_right && !can_merge_remove_vma(next))
954 			vmg->end = end;
955 
956 		vma_prev(vmg->vmi); /* Equivalent to going to the previous range */
957 	}
958 
959 	/*
960 	 * Now try to expand adjacent VMA(s). This takes care of removing the
961 	 * following VMA if we have VMAs on both sides.
962 	 */
963 	if (vmg->vma && !vma_expand(vmg)) {
964 		khugepaged_enter_vma(vmg->vma, vmg->flags);
965 		vmg->state = VMA_MERGE_SUCCESS;
966 		return vmg->vma;
967 	}
968 
969 	/* If expansion failed, reset state. Allows us to retry merge later. */
970 	vmg->vma = NULL;
971 	vmg->start = start;
972 	vmg->end = end;
973 	vmg->pgoff = pgoff;
974 	if (vmg->vma == prev)
975 		vma_iter_set(vmg->vmi, start);
976 
977 	return NULL;
978 }
979 
980 /*
981  * vma_expand - Expand an existing VMA
982  *
983  * @vmg: Describes a VMA expansion operation.
984  *
985  * Expand @vma to vmg->start and vmg->end.  Can expand off the start and end.
986  * Will expand over vmg->next if it's different from vmg->vma and vmg->end ==
987  * vmg->next->vm_end.  Checking if the vmg->vma can expand and merge with
988  * vmg->next needs to be handled by the caller.
989  *
990  * Returns: 0 on success.
991  *
992  * ASSUMPTIONS:
993  * - The caller must hold a WRITE lock on vmg->vma->mm->mmap_lock.
994  * - The caller must have set @vmg->vma and @vmg->next.
995  */
996 int vma_expand(struct vma_merge_struct *vmg)
997 {
998 	struct vm_area_struct *anon_dup = NULL;
999 	bool remove_next = false;
1000 	struct vm_area_struct *vma = vmg->vma;
1001 	struct vm_area_struct *next = vmg->next;
1002 
1003 	mmap_assert_write_locked(vmg->mm);
1004 
1005 	vma_start_write(vma);
1006 	if (next && (vma != next) && (vmg->end == next->vm_end)) {
1007 		int ret;
1008 
1009 		remove_next = true;
1010 		/* This should already have been checked by this point. */
1011 		VM_WARN_ON(!can_merge_remove_vma(next));
1012 		vma_start_write(next);
1013 		ret = dup_anon_vma(vma, next, &anon_dup);
1014 		if (ret)
1015 			return ret;
1016 	}
1017 
1018 	/* Not merging but overwriting any part of next is not handled. */
1019 	VM_WARN_ON(next && !remove_next &&
1020 		  next != vma && vmg->end > next->vm_start);
1021 	/* Only handles expanding */
1022 	VM_WARN_ON(vma->vm_start < vmg->start || vma->vm_end > vmg->end);
1023 
1024 	if (commit_merge(vmg, NULL, remove_next ? next : NULL, NULL, 0, true))
1025 		goto nomem;
1026 
1027 	return 0;
1028 
1029 nomem:
1030 	vmg->state = VMA_MERGE_ERROR_NOMEM;
1031 	if (anon_dup)
1032 		unlink_anon_vmas(anon_dup);
1033 	return -ENOMEM;
1034 }
1035 
1036 /*
1037  * vma_shrink() - Reduce an existing VMAs memory area
1038  * @vmi: The vma iterator
1039  * @vma: The VMA to modify
1040  * @start: The new start
1041  * @end: The new end
1042  *
1043  * Returns: 0 on success, -ENOMEM otherwise
1044  */
1045 int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
1046 	       unsigned long start, unsigned long end, pgoff_t pgoff)
1047 {
1048 	struct vma_prepare vp;
1049 
1050 	WARN_ON((vma->vm_start != start) && (vma->vm_end != end));
1051 
1052 	if (vma->vm_start < start)
1053 		vma_iter_config(vmi, vma->vm_start, start);
1054 	else
1055 		vma_iter_config(vmi, end, vma->vm_end);
1056 
1057 	if (vma_iter_prealloc(vmi, NULL))
1058 		return -ENOMEM;
1059 
1060 	vma_start_write(vma);
1061 
1062 	init_vma_prep(&vp, vma);
1063 	vma_prepare(&vp);
1064 	vma_adjust_trans_huge(vma, start, end, 0);
1065 
1066 	vma_iter_clear(vmi);
1067 	vma_set_range(vma, start, end, pgoff);
1068 	vma_complete(&vp, vmi, vma->vm_mm);
1069 	validate_mm(vma->vm_mm);
1070 	return 0;
1071 }
1072 
1073 static inline void vms_clear_ptes(struct vma_munmap_struct *vms,
1074 		    struct ma_state *mas_detach, bool mm_wr_locked)
1075 {
1076 	struct mmu_gather tlb;
1077 
1078 	if (!vms->clear_ptes) /* Nothing to do */
1079 		return;
1080 
1081 	/*
1082 	 * We can free page tables without write-locking mmap_lock because VMAs
1083 	 * were isolated before we downgraded mmap_lock.
1084 	 */
1085 	mas_set(mas_detach, 1);
1086 	lru_add_drain();
1087 	tlb_gather_mmu(&tlb, vms->vma->vm_mm);
1088 	update_hiwater_rss(vms->vma->vm_mm);
1089 	unmap_vmas(&tlb, mas_detach, vms->vma, vms->start, vms->end,
1090 		   vms->vma_count, mm_wr_locked);
1091 
1092 	mas_set(mas_detach, 1);
1093 	/* start and end may be different if there is no prev or next vma. */
1094 	free_pgtables(&tlb, mas_detach, vms->vma, vms->unmap_start,
1095 		      vms->unmap_end, mm_wr_locked);
1096 	tlb_finish_mmu(&tlb);
1097 	vms->clear_ptes = false;
1098 }
1099 
1100 void vms_clean_up_area(struct vma_munmap_struct *vms,
1101 		struct ma_state *mas_detach)
1102 {
1103 	struct vm_area_struct *vma;
1104 
1105 	if (!vms->nr_pages)
1106 		return;
1107 
1108 	vms_clear_ptes(vms, mas_detach, true);
1109 	mas_set(mas_detach, 0);
1110 	mas_for_each(mas_detach, vma, ULONG_MAX)
1111 		if (vma->vm_ops && vma->vm_ops->close)
1112 			vma->vm_ops->close(vma);
1113 	vms->closed_vm_ops = true;
1114 }
1115 
1116 /*
1117  * vms_complete_munmap_vmas() - Finish the munmap() operation
1118  * @vms: The vma munmap struct
1119  * @mas_detach: The maple state of the detached vmas
1120  *
1121  * This updates the mm_struct, unmaps the region, frees the resources
1122  * used for the munmap() and may downgrade the lock - if requested.  Everything
1123  * needed to be done once the vma maple tree is updated.
1124  */
1125 void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
1126 		struct ma_state *mas_detach)
1127 {
1128 	struct vm_area_struct *vma;
1129 	struct mm_struct *mm;
1130 
1131 	mm = current->mm;
1132 	mm->map_count -= vms->vma_count;
1133 	mm->locked_vm -= vms->locked_vm;
1134 	if (vms->unlock)
1135 		mmap_write_downgrade(mm);
1136 
1137 	if (!vms->nr_pages)
1138 		return;
1139 
1140 	vms_clear_ptes(vms, mas_detach, !vms->unlock);
1141 	/* Update high watermark before we lower total_vm */
1142 	update_hiwater_vm(mm);
1143 	/* Stat accounting */
1144 	WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm) - vms->nr_pages);
1145 	/* Paranoid bookkeeping */
1146 	VM_WARN_ON(vms->exec_vm > mm->exec_vm);
1147 	VM_WARN_ON(vms->stack_vm > mm->stack_vm);
1148 	VM_WARN_ON(vms->data_vm > mm->data_vm);
1149 	mm->exec_vm -= vms->exec_vm;
1150 	mm->stack_vm -= vms->stack_vm;
1151 	mm->data_vm -= vms->data_vm;
1152 
1153 	/* Remove and clean up vmas */
1154 	mas_set(mas_detach, 0);
1155 	mas_for_each(mas_detach, vma, ULONG_MAX)
1156 		remove_vma(vma, /* = */ false, vms->closed_vm_ops);
1157 
1158 	vm_unacct_memory(vms->nr_accounted);
1159 	validate_mm(mm);
1160 	if (vms->unlock)
1161 		mmap_read_unlock(mm);
1162 
1163 	__mt_destroy(mas_detach->tree);
1164 }
1165 
1166 /*
1167  * vms_gather_munmap_vmas() - Put all VMAs within a range into a maple tree
1168  * for removal at a later date.  Handles splitting first and last if necessary
1169  * and marking the vmas as isolated.
1170  *
1171  * @vms: The vma munmap struct
1172  * @mas_detach: The maple state tracking the detached tree
1173  *
1174  * Return: 0 on success, -EPERM on mseal vmas, -ENOMEM otherwise
1175  */
1176 int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
1177 		struct ma_state *mas_detach)
1178 {
1179 	struct vm_area_struct *next = NULL;
1180 	int error = -ENOMEM;
1181 
1182 	/*
1183 	 * If we need to split any vma, do it now to save pain later.
1184 	 * Does it split the first one?
1185 	 */
1186 	if (vms->start > vms->vma->vm_start) {
1187 
1188 		/*
1189 		 * Make sure that map_count on return from munmap() will
1190 		 * not exceed its limit; but let map_count go just above
1191 		 * its limit temporarily, to help free resources as expected.
1192 		 */
1193 		if (vms->end < vms->vma->vm_end &&
1194 		    vms->vma->vm_mm->map_count >= sysctl_max_map_count)
1195 			goto map_count_exceeded;
1196 
1197 		/* Don't bother splitting the VMA if we can't unmap it anyway */
1198 		if (!can_modify_vma(vms->vma)) {
1199 			error = -EPERM;
1200 			goto start_split_failed;
1201 		}
1202 
1203 		if (__split_vma(vms->vmi, vms->vma, vms->start, 1))
1204 			goto start_split_failed;
1205 	}
1206 	vms->prev = vma_prev(vms->vmi);
1207 	if (vms->prev)
1208 		vms->unmap_start = vms->prev->vm_end;
1209 
1210 	/*
1211 	 * Detach a range of VMAs from the mm. Using next as a temp variable as
1212 	 * it is always overwritten.
1213 	 */
1214 	for_each_vma_range(*(vms->vmi), next, vms->end) {
1215 		long nrpages;
1216 
1217 		if (!can_modify_vma(next)) {
1218 			error = -EPERM;
1219 			goto modify_vma_failed;
1220 		}
1221 		/* Does it split the end? */
1222 		if (next->vm_end > vms->end) {
1223 			if (__split_vma(vms->vmi, next, vms->end, 0))
1224 				goto end_split_failed;
1225 		}
1226 		vma_start_write(next);
1227 		mas_set(mas_detach, vms->vma_count++);
1228 		if (mas_store_gfp(mas_detach, next, GFP_KERNEL))
1229 			goto munmap_gather_failed;
1230 
1231 		vma_mark_detached(next, true);
1232 		nrpages = vma_pages(next);
1233 
1234 		vms->nr_pages += nrpages;
1235 		if (next->vm_flags & VM_LOCKED)
1236 			vms->locked_vm += nrpages;
1237 
1238 		if (next->vm_flags & VM_ACCOUNT)
1239 			vms->nr_accounted += nrpages;
1240 
1241 		if (is_exec_mapping(next->vm_flags))
1242 			vms->exec_vm += nrpages;
1243 		else if (is_stack_mapping(next->vm_flags))
1244 			vms->stack_vm += nrpages;
1245 		else if (is_data_mapping(next->vm_flags))
1246 			vms->data_vm += nrpages;
1247 
1248 		if (unlikely(vms->uf)) {
1249 			/*
1250 			 * If userfaultfd_unmap_prep returns an error the vmas
1251 			 * will remain split, but userland will get a
1252 			 * highly unexpected error anyway. This is no
1253 			 * different than the case where the first of the two
1254 			 * __split_vma fails, but we don't undo the first
1255 			 * split, despite we could. This is unlikely enough
1256 			 * failure that it's not worth optimizing it for.
1257 			 */
1258 			if (userfaultfd_unmap_prep(next, vms->start, vms->end,
1259 						   vms->uf))
1260 				goto userfaultfd_error;
1261 		}
1262 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
1263 		BUG_ON(next->vm_start < vms->start);
1264 		BUG_ON(next->vm_start > vms->end);
1265 #endif
1266 	}
1267 
1268 	vms->next = vma_next(vms->vmi);
1269 	if (vms->next)
1270 		vms->unmap_end = vms->next->vm_start;
1271 
1272 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
1273 	/* Make sure no VMAs are about to be lost. */
1274 	{
1275 		MA_STATE(test, mas_detach->tree, 0, 0);
1276 		struct vm_area_struct *vma_mas, *vma_test;
1277 		int test_count = 0;
1278 
1279 		vma_iter_set(vms->vmi, vms->start);
1280 		rcu_read_lock();
1281 		vma_test = mas_find(&test, vms->vma_count - 1);
1282 		for_each_vma_range(*(vms->vmi), vma_mas, vms->end) {
1283 			BUG_ON(vma_mas != vma_test);
1284 			test_count++;
1285 			vma_test = mas_next(&test, vms->vma_count - 1);
1286 		}
1287 		rcu_read_unlock();
1288 		BUG_ON(vms->vma_count != test_count);
1289 	}
1290 #endif
1291 
1292 	while (vma_iter_addr(vms->vmi) > vms->start)
1293 		vma_iter_prev_range(vms->vmi);
1294 
1295 	vms->clear_ptes = true;
1296 	return 0;
1297 
1298 userfaultfd_error:
1299 munmap_gather_failed:
1300 end_split_failed:
1301 modify_vma_failed:
1302 	reattach_vmas(mas_detach);
1303 start_split_failed:
1304 map_count_exceeded:
1305 	return error;
1306 }
1307 
1308 /*
1309  * do_vmi_align_munmap() - munmap the aligned region from @start to @end.
1310  * @vmi: The vma iterator
1311  * @vma: The starting vm_area_struct
1312  * @mm: The mm_struct
1313  * @start: The aligned start address to munmap.
1314  * @end: The aligned end address to munmap.
1315  * @uf: The userfaultfd list_head
1316  * @unlock: Set to true to drop the mmap_lock.  unlocking only happens on
1317  * success.
1318  *
1319  * Return: 0 on success and drops the lock if so directed, error and leaves the
1320  * lock held otherwise.
1321  */
1322 int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
1323 		struct mm_struct *mm, unsigned long start, unsigned long end,
1324 		struct list_head *uf, bool unlock)
1325 {
1326 	struct maple_tree mt_detach;
1327 	MA_STATE(mas_detach, &mt_detach, 0, 0);
1328 	mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
1329 	mt_on_stack(mt_detach);
1330 	struct vma_munmap_struct vms;
1331 	int error;
1332 
1333 	init_vma_munmap(&vms, vmi, vma, start, end, uf, unlock);
1334 	error = vms_gather_munmap_vmas(&vms, &mas_detach);
1335 	if (error)
1336 		goto gather_failed;
1337 
1338 	error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL);
1339 	if (error)
1340 		goto clear_tree_failed;
1341 
1342 	/* Point of no return */
1343 	vms_complete_munmap_vmas(&vms, &mas_detach);
1344 	return 0;
1345 
1346 clear_tree_failed:
1347 	reattach_vmas(&mas_detach);
1348 gather_failed:
1349 	validate_mm(mm);
1350 	return error;
1351 }
1352 
1353 /*
1354  * do_vmi_munmap() - munmap a given range.
1355  * @vmi: The vma iterator
1356  * @mm: The mm_struct
1357  * @start: The start address to munmap
1358  * @len: The length of the range to munmap
1359  * @uf: The userfaultfd list_head
1360  * @unlock: set to true if the user wants to drop the mmap_lock on success
1361  *
1362  * This function takes a @mas that is either pointing to the previous VMA or set
1363  * to MA_START and sets it up to remove the mapping(s).  The @len will be
1364  * aligned.
1365  *
1366  * Return: 0 on success and drops the lock if so directed, error and leaves the
1367  * lock held otherwise.
1368  */
1369 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
1370 		  unsigned long start, size_t len, struct list_head *uf,
1371 		  bool unlock)
1372 {
1373 	unsigned long end;
1374 	struct vm_area_struct *vma;
1375 
1376 	if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start)
1377 		return -EINVAL;
1378 
1379 	end = start + PAGE_ALIGN(len);
1380 	if (end == start)
1381 		return -EINVAL;
1382 
1383 	/* Find the first overlapping VMA */
1384 	vma = vma_find(vmi, end);
1385 	if (!vma) {
1386 		if (unlock)
1387 			mmap_write_unlock(mm);
1388 		return 0;
1389 	}
1390 
1391 	return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
1392 }
1393 
1394 /*
1395  * We are about to modify one or multiple of a VMA's flags, policy, userfaultfd
1396  * context and anonymous VMA name within the range [start, end).
1397  *
1398  * As a result, we might be able to merge the newly modified VMA range with an
1399  * adjacent VMA with identical properties.
1400  *
1401  * If no merge is possible and the range does not span the entirety of the VMA,
1402  * we then need to split the VMA to accommodate the change.
1403  *
1404  * The function returns either the merged VMA, the original VMA if a split was
1405  * required instead, or an error if the split failed.
1406  */
1407 static struct vm_area_struct *vma_modify(struct vma_merge_struct *vmg)
1408 {
1409 	struct vm_area_struct *vma = vmg->vma;
1410 	struct vm_area_struct *merged;
1411 
1412 	/* First, try to merge. */
1413 	merged = vma_merge_existing_range(vmg);
1414 	if (merged)
1415 		return merged;
1416 
1417 	/* Split any preceding portion of the VMA. */
1418 	if (vma->vm_start < vmg->start) {
1419 		int err = split_vma(vmg->vmi, vma, vmg->start, 1);
1420 
1421 		if (err)
1422 			return ERR_PTR(err);
1423 	}
1424 
1425 	/* Split any trailing portion of the VMA. */
1426 	if (vma->vm_end > vmg->end) {
1427 		int err = split_vma(vmg->vmi, vma, vmg->end, 0);
1428 
1429 		if (err)
1430 			return ERR_PTR(err);
1431 	}
1432 
1433 	return vma;
1434 }
1435 
1436 struct vm_area_struct *vma_modify_flags(
1437 	struct vma_iterator *vmi, struct vm_area_struct *prev,
1438 	struct vm_area_struct *vma, unsigned long start, unsigned long end,
1439 	unsigned long new_flags)
1440 {
1441 	VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
1442 
1443 	vmg.flags = new_flags;
1444 
1445 	return vma_modify(&vmg);
1446 }
1447 
1448 struct vm_area_struct
1449 *vma_modify_flags_name(struct vma_iterator *vmi,
1450 		       struct vm_area_struct *prev,
1451 		       struct vm_area_struct *vma,
1452 		       unsigned long start,
1453 		       unsigned long end,
1454 		       unsigned long new_flags,
1455 		       struct anon_vma_name *new_name)
1456 {
1457 	VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
1458 
1459 	vmg.flags = new_flags;
1460 	vmg.anon_name = new_name;
1461 
1462 	return vma_modify(&vmg);
1463 }
1464 
1465 struct vm_area_struct
1466 *vma_modify_policy(struct vma_iterator *vmi,
1467 		   struct vm_area_struct *prev,
1468 		   struct vm_area_struct *vma,
1469 		   unsigned long start, unsigned long end,
1470 		   struct mempolicy *new_pol)
1471 {
1472 	VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
1473 
1474 	vmg.policy = new_pol;
1475 
1476 	return vma_modify(&vmg);
1477 }
1478 
1479 struct vm_area_struct
1480 *vma_modify_flags_uffd(struct vma_iterator *vmi,
1481 		       struct vm_area_struct *prev,
1482 		       struct vm_area_struct *vma,
1483 		       unsigned long start, unsigned long end,
1484 		       unsigned long new_flags,
1485 		       struct vm_userfaultfd_ctx new_ctx)
1486 {
1487 	VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
1488 
1489 	vmg.flags = new_flags;
1490 	vmg.uffd_ctx = new_ctx;
1491 
1492 	return vma_modify(&vmg);
1493 }
1494 
1495 /*
1496  * Expand vma by delta bytes, potentially merging with an immediately adjacent
1497  * VMA with identical properties.
1498  */
1499 struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
1500 					struct vm_area_struct *vma,
1501 					unsigned long delta)
1502 {
1503 	VMG_VMA_STATE(vmg, vmi, vma, vma, vma->vm_end, vma->vm_end + delta);
1504 
1505 	vmg.next = vma_iter_next_rewind(vmi, NULL);
1506 	vmg.vma = NULL; /* We use the VMA to populate VMG fields only. */
1507 
1508 	return vma_merge_new_range(&vmg);
1509 }
1510 
1511 void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb)
1512 {
1513 	vb->count = 0;
1514 }
1515 
1516 static void unlink_file_vma_batch_process(struct unlink_vma_file_batch *vb)
1517 {
1518 	struct address_space *mapping;
1519 	int i;
1520 
1521 	mapping = vb->vmas[0]->vm_file->f_mapping;
1522 	i_mmap_lock_write(mapping);
1523 	for (i = 0; i < vb->count; i++) {
1524 		VM_WARN_ON_ONCE(vb->vmas[i]->vm_file->f_mapping != mapping);
1525 		__remove_shared_vm_struct(vb->vmas[i], mapping);
1526 	}
1527 	i_mmap_unlock_write(mapping);
1528 
1529 	unlink_file_vma_batch_init(vb);
1530 }
1531 
1532 void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb,
1533 			       struct vm_area_struct *vma)
1534 {
1535 	if (vma->vm_file == NULL)
1536 		return;
1537 
1538 	if ((vb->count > 0 && vb->vmas[0]->vm_file != vma->vm_file) ||
1539 	    vb->count == ARRAY_SIZE(vb->vmas))
1540 		unlink_file_vma_batch_process(vb);
1541 
1542 	vb->vmas[vb->count] = vma;
1543 	vb->count++;
1544 }
1545 
1546 void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb)
1547 {
1548 	if (vb->count > 0)
1549 		unlink_file_vma_batch_process(vb);
1550 }
1551 
1552 /*
1553  * Unlink a file-based vm structure from its interval tree, to hide
1554  * vma from rmap and vmtruncate before freeing its page tables.
1555  */
1556 void unlink_file_vma(struct vm_area_struct *vma)
1557 {
1558 	struct file *file = vma->vm_file;
1559 
1560 	if (file) {
1561 		struct address_space *mapping = file->f_mapping;
1562 
1563 		i_mmap_lock_write(mapping);
1564 		__remove_shared_vm_struct(vma, mapping);
1565 		i_mmap_unlock_write(mapping);
1566 	}
1567 }
1568 
1569 void vma_link_file(struct vm_area_struct *vma)
1570 {
1571 	struct file *file = vma->vm_file;
1572 	struct address_space *mapping;
1573 
1574 	if (file) {
1575 		mapping = file->f_mapping;
1576 		i_mmap_lock_write(mapping);
1577 		__vma_link_file(vma, mapping);
1578 		i_mmap_unlock_write(mapping);
1579 	}
1580 }
1581 
1582 int vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
1583 {
1584 	VMA_ITERATOR(vmi, mm, 0);
1585 
1586 	vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
1587 	if (vma_iter_prealloc(&vmi, vma))
1588 		return -ENOMEM;
1589 
1590 	vma_start_write(vma);
1591 	vma_iter_store(&vmi, vma);
1592 	vma_link_file(vma);
1593 	mm->map_count++;
1594 	validate_mm(mm);
1595 	return 0;
1596 }
1597 
1598 /*
1599  * Copy the vma structure to a new location in the same mm,
1600  * prior to moving page table entries, to effect an mremap move.
1601  */
1602 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
1603 	unsigned long addr, unsigned long len, pgoff_t pgoff,
1604 	bool *need_rmap_locks)
1605 {
1606 	struct vm_area_struct *vma = *vmap;
1607 	unsigned long vma_start = vma->vm_start;
1608 	struct mm_struct *mm = vma->vm_mm;
1609 	struct vm_area_struct *new_vma;
1610 	bool faulted_in_anon_vma = true;
1611 	VMA_ITERATOR(vmi, mm, addr);
1612 	VMG_VMA_STATE(vmg, &vmi, NULL, vma, addr, addr + len);
1613 
1614 	/*
1615 	 * If anonymous vma has not yet been faulted, update new pgoff
1616 	 * to match new location, to increase its chance of merging.
1617 	 */
1618 	if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) {
1619 		pgoff = addr >> PAGE_SHIFT;
1620 		faulted_in_anon_vma = false;
1621 	}
1622 
1623 	new_vma = find_vma_prev(mm, addr, &vmg.prev);
1624 	if (new_vma && new_vma->vm_start < addr + len)
1625 		return NULL;	/* should never get here */
1626 
1627 	vmg.vma = NULL; /* New VMA range. */
1628 	vmg.pgoff = pgoff;
1629 	vmg.next = vma_iter_next_rewind(&vmi, NULL);
1630 	new_vma = vma_merge_new_range(&vmg);
1631 
1632 	if (new_vma) {
1633 		/*
1634 		 * Source vma may have been merged into new_vma
1635 		 */
1636 		if (unlikely(vma_start >= new_vma->vm_start &&
1637 			     vma_start < new_vma->vm_end)) {
1638 			/*
1639 			 * The only way we can get a vma_merge with
1640 			 * self during an mremap is if the vma hasn't
1641 			 * been faulted in yet and we were allowed to
1642 			 * reset the dst vma->vm_pgoff to the
1643 			 * destination address of the mremap to allow
1644 			 * the merge to happen. mremap must change the
1645 			 * vm_pgoff linearity between src and dst vmas
1646 			 * (in turn preventing a vma_merge) to be
1647 			 * safe. It is only safe to keep the vm_pgoff
1648 			 * linear if there are no pages mapped yet.
1649 			 */
1650 			VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma);
1651 			*vmap = vma = new_vma;
1652 		}
1653 		*need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
1654 	} else {
1655 		new_vma = vm_area_dup(vma);
1656 		if (!new_vma)
1657 			goto out;
1658 		vma_set_range(new_vma, addr, addr + len, pgoff);
1659 		if (vma_dup_policy(vma, new_vma))
1660 			goto out_free_vma;
1661 		if (anon_vma_clone(new_vma, vma))
1662 			goto out_free_mempol;
1663 		if (new_vma->vm_file)
1664 			get_file(new_vma->vm_file);
1665 		if (new_vma->vm_ops && new_vma->vm_ops->open)
1666 			new_vma->vm_ops->open(new_vma);
1667 		if (vma_link(mm, new_vma))
1668 			goto out_vma_link;
1669 		*need_rmap_locks = false;
1670 	}
1671 	return new_vma;
1672 
1673 out_vma_link:
1674 	if (new_vma->vm_ops && new_vma->vm_ops->close)
1675 		new_vma->vm_ops->close(new_vma);
1676 
1677 	if (new_vma->vm_file)
1678 		fput(new_vma->vm_file);
1679 
1680 	unlink_anon_vmas(new_vma);
1681 out_free_mempol:
1682 	mpol_put(vma_policy(new_vma));
1683 out_free_vma:
1684 	vm_area_free(new_vma);
1685 out:
1686 	return NULL;
1687 }
1688 
1689 /*
1690  * Rough compatibility check to quickly see if it's even worth looking
1691  * at sharing an anon_vma.
1692  *
1693  * They need to have the same vm_file, and the flags can only differ
1694  * in things that mprotect may change.
1695  *
1696  * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
1697  * we can merge the two vma's. For example, we refuse to merge a vma if
1698  * there is a vm_ops->close() function, because that indicates that the
1699  * driver is doing some kind of reference counting. But that doesn't
1700  * really matter for the anon_vma sharing case.
1701  */
1702 static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
1703 {
1704 	return a->vm_end == b->vm_start &&
1705 		mpol_equal(vma_policy(a), vma_policy(b)) &&
1706 		a->vm_file == b->vm_file &&
1707 		!((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_SOFTDIRTY)) &&
1708 		b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
1709 }
1710 
1711 /*
1712  * Do some basic sanity checking to see if we can re-use the anon_vma
1713  * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
1714  * the same as 'old', the other will be the new one that is trying
1715  * to share the anon_vma.
1716  *
1717  * NOTE! This runs with mmap_lock held for reading, so it is possible that
1718  * the anon_vma of 'old' is concurrently in the process of being set up
1719  * by another page fault trying to merge _that_. But that's ok: if it
1720  * is being set up, that automatically means that it will be a singleton
1721  * acceptable for merging, so we can do all of this optimistically. But
1722  * we do that READ_ONCE() to make sure that we never re-load the pointer.
1723  *
1724  * IOW: that the "list_is_singular()" test on the anon_vma_chain only
1725  * matters for the 'stable anon_vma' case (ie the thing we want to avoid
1726  * is to return an anon_vma that is "complex" due to having gone through
1727  * a fork).
1728  *
1729  * We also make sure that the two vma's are compatible (adjacent,
1730  * and with the same memory policies). That's all stable, even with just
1731  * a read lock on the mmap_lock.
1732  */
1733 static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old,
1734 					  struct vm_area_struct *a,
1735 					  struct vm_area_struct *b)
1736 {
1737 	if (anon_vma_compatible(a, b)) {
1738 		struct anon_vma *anon_vma = READ_ONCE(old->anon_vma);
1739 
1740 		if (anon_vma && list_is_singular(&old->anon_vma_chain))
1741 			return anon_vma;
1742 	}
1743 	return NULL;
1744 }
1745 
1746 /*
1747  * find_mergeable_anon_vma is used by anon_vma_prepare, to check
1748  * neighbouring vmas for a suitable anon_vma, before it goes off
1749  * to allocate a new anon_vma.  It checks because a repetitive
1750  * sequence of mprotects and faults may otherwise lead to distinct
1751  * anon_vmas being allocated, preventing vma merge in subsequent
1752  * mprotect.
1753  */
1754 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
1755 {
1756 	struct anon_vma *anon_vma = NULL;
1757 	struct vm_area_struct *prev, *next;
1758 	VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_end);
1759 
1760 	/* Try next first. */
1761 	next = vma_iter_load(&vmi);
1762 	if (next) {
1763 		anon_vma = reusable_anon_vma(next, vma, next);
1764 		if (anon_vma)
1765 			return anon_vma;
1766 	}
1767 
1768 	prev = vma_prev(&vmi);
1769 	VM_BUG_ON_VMA(prev != vma, vma);
1770 	prev = vma_prev(&vmi);
1771 	/* Try prev next. */
1772 	if (prev)
1773 		anon_vma = reusable_anon_vma(prev, prev, vma);
1774 
1775 	/*
1776 	 * We might reach here with anon_vma == NULL if we can't find
1777 	 * any reusable anon_vma.
1778 	 * There's no absolute need to look only at touching neighbours:
1779 	 * we could search further afield for "compatible" anon_vmas.
1780 	 * But it would probably just be a waste of time searching,
1781 	 * or lead to too many vmas hanging off the same anon_vma.
1782 	 * We're trying to allow mprotect remerging later on,
1783 	 * not trying to minimize memory used for anon_vmas.
1784 	 */
1785 	return anon_vma;
1786 }
1787 
1788 static bool vm_ops_needs_writenotify(const struct vm_operations_struct *vm_ops)
1789 {
1790 	return vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite);
1791 }
1792 
1793 static bool vma_is_shared_writable(struct vm_area_struct *vma)
1794 {
1795 	return (vma->vm_flags & (VM_WRITE | VM_SHARED)) ==
1796 		(VM_WRITE | VM_SHARED);
1797 }
1798 
1799 static bool vma_fs_can_writeback(struct vm_area_struct *vma)
1800 {
1801 	/* No managed pages to writeback. */
1802 	if (vma->vm_flags & VM_PFNMAP)
1803 		return false;
1804 
1805 	return vma->vm_file && vma->vm_file->f_mapping &&
1806 		mapping_can_writeback(vma->vm_file->f_mapping);
1807 }
1808 
1809 /*
1810  * Does this VMA require the underlying folios to have their dirty state
1811  * tracked?
1812  */
1813 bool vma_needs_dirty_tracking(struct vm_area_struct *vma)
1814 {
1815 	/* Only shared, writable VMAs require dirty tracking. */
1816 	if (!vma_is_shared_writable(vma))
1817 		return false;
1818 
1819 	/* Does the filesystem need to be notified? */
1820 	if (vm_ops_needs_writenotify(vma->vm_ops))
1821 		return true;
1822 
1823 	/*
1824 	 * Even if the filesystem doesn't indicate a need for writenotify, if it
1825 	 * can writeback, dirty tracking is still required.
1826 	 */
1827 	return vma_fs_can_writeback(vma);
1828 }
1829 
1830 /*
1831  * Some shared mappings will want the pages marked read-only
1832  * to track write events. If so, we'll downgrade vm_page_prot
1833  * to the private version (using protection_map[] without the
1834  * VM_SHARED bit).
1835  */
1836 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
1837 {
1838 	/* If it was private or non-writable, the write bit is already clear */
1839 	if (!vma_is_shared_writable(vma))
1840 		return false;
1841 
1842 	/* The backer wishes to know when pages are first written to? */
1843 	if (vm_ops_needs_writenotify(vma->vm_ops))
1844 		return true;
1845 
1846 	/* The open routine did something to the protections that pgprot_modify
1847 	 * won't preserve? */
1848 	if (pgprot_val(vm_page_prot) !=
1849 	    pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags)))
1850 		return false;
1851 
1852 	/*
1853 	 * Do we need to track softdirty? hugetlb does not support softdirty
1854 	 * tracking yet.
1855 	 */
1856 	if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma))
1857 		return true;
1858 
1859 	/* Do we need write faults for uffd-wp tracking? */
1860 	if (userfaultfd_wp(vma))
1861 		return true;
1862 
1863 	/* Can the mapping track the dirty pages? */
1864 	return vma_fs_can_writeback(vma);
1865 }
1866 
1867 static DEFINE_MUTEX(mm_all_locks_mutex);
1868 
1869 static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
1870 {
1871 	if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
1872 		/*
1873 		 * The LSB of head.next can't change from under us
1874 		 * because we hold the mm_all_locks_mutex.
1875 		 */
1876 		down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock);
1877 		/*
1878 		 * We can safely modify head.next after taking the
1879 		 * anon_vma->root->rwsem. If some other vma in this mm shares
1880 		 * the same anon_vma we won't take it again.
1881 		 *
1882 		 * No need of atomic instructions here, head.next
1883 		 * can't change from under us thanks to the
1884 		 * anon_vma->root->rwsem.
1885 		 */
1886 		if (__test_and_set_bit(0, (unsigned long *)
1887 				       &anon_vma->root->rb_root.rb_root.rb_node))
1888 			BUG();
1889 	}
1890 }
1891 
1892 static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
1893 {
1894 	if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
1895 		/*
1896 		 * AS_MM_ALL_LOCKS can't change from under us because
1897 		 * we hold the mm_all_locks_mutex.
1898 		 *
1899 		 * Operations on ->flags have to be atomic because
1900 		 * even if AS_MM_ALL_LOCKS is stable thanks to the
1901 		 * mm_all_locks_mutex, there may be other cpus
1902 		 * changing other bitflags in parallel to us.
1903 		 */
1904 		if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
1905 			BUG();
1906 		down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock);
1907 	}
1908 }
1909 
1910 /*
1911  * This operation locks against the VM for all pte/vma/mm related
1912  * operations that could ever happen on a certain mm. This includes
1913  * vmtruncate, try_to_unmap, and all page faults.
1914  *
1915  * The caller must take the mmap_lock in write mode before calling
1916  * mm_take_all_locks(). The caller isn't allowed to release the
1917  * mmap_lock until mm_drop_all_locks() returns.
1918  *
1919  * mmap_lock in write mode is required in order to block all operations
1920  * that could modify pagetables and free pages without need of
1921  * altering the vma layout. It's also needed in write mode to avoid new
1922  * anon_vmas to be associated with existing vmas.
1923  *
1924  * A single task can't take more than one mm_take_all_locks() in a row
1925  * or it would deadlock.
1926  *
1927  * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in
1928  * mapping->flags avoid to take the same lock twice, if more than one
1929  * vma in this mm is backed by the same anon_vma or address_space.
1930  *
1931  * We take locks in following order, accordingly to comment at beginning
1932  * of mm/rmap.c:
1933  *   - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for
1934  *     hugetlb mapping);
1935  *   - all vmas marked locked
1936  *   - all i_mmap_rwsem locks;
1937  *   - all anon_vma->rwseml
1938  *
1939  * We can take all locks within these types randomly because the VM code
1940  * doesn't nest them and we protected from parallel mm_take_all_locks() by
1941  * mm_all_locks_mutex.
1942  *
1943  * mm_take_all_locks() and mm_drop_all_locks are expensive operations
1944  * that may have to take thousand of locks.
1945  *
1946  * mm_take_all_locks() can fail if it's interrupted by signals.
1947  */
1948 int mm_take_all_locks(struct mm_struct *mm)
1949 {
1950 	struct vm_area_struct *vma;
1951 	struct anon_vma_chain *avc;
1952 	VMA_ITERATOR(vmi, mm, 0);
1953 
1954 	mmap_assert_write_locked(mm);
1955 
1956 	mutex_lock(&mm_all_locks_mutex);
1957 
1958 	/*
1959 	 * vma_start_write() does not have a complement in mm_drop_all_locks()
1960 	 * because vma_start_write() is always asymmetrical; it marks a VMA as
1961 	 * being written to until mmap_write_unlock() or mmap_write_downgrade()
1962 	 * is reached.
1963 	 */
1964 	for_each_vma(vmi, vma) {
1965 		if (signal_pending(current))
1966 			goto out_unlock;
1967 		vma_start_write(vma);
1968 	}
1969 
1970 	vma_iter_init(&vmi, mm, 0);
1971 	for_each_vma(vmi, vma) {
1972 		if (signal_pending(current))
1973 			goto out_unlock;
1974 		if (vma->vm_file && vma->vm_file->f_mapping &&
1975 				is_vm_hugetlb_page(vma))
1976 			vm_lock_mapping(mm, vma->vm_file->f_mapping);
1977 	}
1978 
1979 	vma_iter_init(&vmi, mm, 0);
1980 	for_each_vma(vmi, vma) {
1981 		if (signal_pending(current))
1982 			goto out_unlock;
1983 		if (vma->vm_file && vma->vm_file->f_mapping &&
1984 				!is_vm_hugetlb_page(vma))
1985 			vm_lock_mapping(mm, vma->vm_file->f_mapping);
1986 	}
1987 
1988 	vma_iter_init(&vmi, mm, 0);
1989 	for_each_vma(vmi, vma) {
1990 		if (signal_pending(current))
1991 			goto out_unlock;
1992 		if (vma->anon_vma)
1993 			list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
1994 				vm_lock_anon_vma(mm, avc->anon_vma);
1995 	}
1996 
1997 	return 0;
1998 
1999 out_unlock:
2000 	mm_drop_all_locks(mm);
2001 	return -EINTR;
2002 }
2003 
2004 static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
2005 {
2006 	if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
2007 		/*
2008 		 * The LSB of head.next can't change to 0 from under
2009 		 * us because we hold the mm_all_locks_mutex.
2010 		 *
2011 		 * We must however clear the bitflag before unlocking
2012 		 * the vma so the users using the anon_vma->rb_root will
2013 		 * never see our bitflag.
2014 		 *
2015 		 * No need of atomic instructions here, head.next
2016 		 * can't change from under us until we release the
2017 		 * anon_vma->root->rwsem.
2018 		 */
2019 		if (!__test_and_clear_bit(0, (unsigned long *)
2020 					  &anon_vma->root->rb_root.rb_root.rb_node))
2021 			BUG();
2022 		anon_vma_unlock_write(anon_vma);
2023 	}
2024 }
2025 
2026 static void vm_unlock_mapping(struct address_space *mapping)
2027 {
2028 	if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
2029 		/*
2030 		 * AS_MM_ALL_LOCKS can't change to 0 from under us
2031 		 * because we hold the mm_all_locks_mutex.
2032 		 */
2033 		i_mmap_unlock_write(mapping);
2034 		if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
2035 					&mapping->flags))
2036 			BUG();
2037 	}
2038 }
2039 
2040 /*
2041  * The mmap_lock cannot be released by the caller until
2042  * mm_drop_all_locks() returns.
2043  */
2044 void mm_drop_all_locks(struct mm_struct *mm)
2045 {
2046 	struct vm_area_struct *vma;
2047 	struct anon_vma_chain *avc;
2048 	VMA_ITERATOR(vmi, mm, 0);
2049 
2050 	mmap_assert_write_locked(mm);
2051 	BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
2052 
2053 	for_each_vma(vmi, vma) {
2054 		if (vma->anon_vma)
2055 			list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
2056 				vm_unlock_anon_vma(avc->anon_vma);
2057 		if (vma->vm_file && vma->vm_file->f_mapping)
2058 			vm_unlock_mapping(vma->vm_file->f_mapping);
2059 	}
2060 
2061 	mutex_unlock(&mm_all_locks_mutex);
2062 }
2063