xref: /linux/mm/vma.c (revision 9c3ebeda8fb5a8e9e82ab9364ec3d4b80cd0ec3d)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 /*
4  * VMA-specific functions.
5  */
6 
7 #include "vma_internal.h"
8 #include "vma.h"
9 
10 /*
11  * If the vma has a ->close operation then the driver probably needs to release
12  * per-vma resources, so we don't attempt to merge those if the caller indicates
13  * the current vma may be removed as part of the merge.
14  */
15 static inline bool is_mergeable_vma(struct vm_area_struct *vma,
16 		struct file *file, unsigned long vm_flags,
17 		struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
18 		struct anon_vma_name *anon_name, bool may_remove_vma)
19 {
20 	/*
21 	 * VM_SOFTDIRTY should not prevent from VMA merging, if we
22 	 * match the flags but dirty bit -- the caller should mark
23 	 * merged VMA as dirty. If dirty bit won't be excluded from
24 	 * comparison, we increase pressure on the memory system forcing
25 	 * the kernel to generate new VMAs when old one could be
26 	 * extended instead.
27 	 */
28 	if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY)
29 		return false;
30 	if (vma->vm_file != file)
31 		return false;
32 	if (may_remove_vma && vma->vm_ops && vma->vm_ops->close)
33 		return false;
34 	if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx))
35 		return false;
36 	if (!anon_vma_name_eq(anon_vma_name(vma), anon_name))
37 		return false;
38 	return true;
39 }
40 
41 static inline bool is_mergeable_anon_vma(struct anon_vma *anon_vma1,
42 		 struct anon_vma *anon_vma2, struct vm_area_struct *vma)
43 {
44 	/*
45 	 * The list_is_singular() test is to avoid merging VMA cloned from
46 	 * parents. This can improve scalability caused by anon_vma lock.
47 	 */
48 	if ((!anon_vma1 || !anon_vma2) && (!vma ||
49 		list_is_singular(&vma->anon_vma_chain)))
50 		return true;
51 	return anon_vma1 == anon_vma2;
52 }
53 
54 /*
55  * init_multi_vma_prep() - Initializer for struct vma_prepare
56  * @vp: The vma_prepare struct
57  * @vma: The vma that will be altered once locked
58  * @next: The next vma if it is to be adjusted
59  * @remove: The first vma to be removed
60  * @remove2: The second vma to be removed
61  */
62 static void init_multi_vma_prep(struct vma_prepare *vp,
63 				struct vm_area_struct *vma,
64 				struct vm_area_struct *next,
65 				struct vm_area_struct *remove,
66 				struct vm_area_struct *remove2)
67 {
68 	memset(vp, 0, sizeof(struct vma_prepare));
69 	vp->vma = vma;
70 	vp->anon_vma = vma->anon_vma;
71 	vp->remove = remove;
72 	vp->remove2 = remove2;
73 	vp->adj_next = next;
74 	if (!vp->anon_vma && next)
75 		vp->anon_vma = next->anon_vma;
76 
77 	vp->file = vma->vm_file;
78 	if (vp->file)
79 		vp->mapping = vma->vm_file->f_mapping;
80 
81 }
82 
83 /*
84  * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
85  * in front of (at a lower virtual address and file offset than) the vma.
86  *
87  * We cannot merge two vmas if they have differently assigned (non-NULL)
88  * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
89  *
90  * We don't check here for the merged mmap wrapping around the end of pagecache
91  * indices (16TB on ia32) because do_mmap() does not permit mmap's which
92  * wrap, nor mmaps which cover the final page at index -1UL.
93  *
94  * We assume the vma may be removed as part of the merge.
95  */
96 bool
97 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
98 		struct anon_vma *anon_vma, struct file *file,
99 		pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
100 		struct anon_vma_name *anon_name)
101 {
102 	if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name, true) &&
103 	    is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
104 		if (vma->vm_pgoff == vm_pgoff)
105 			return true;
106 	}
107 	return false;
108 }
109 
110 /*
111  * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
112  * beyond (at a higher virtual address and file offset than) the vma.
113  *
114  * We cannot merge two vmas if they have differently assigned (non-NULL)
115  * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
116  *
117  * We assume that vma is not removed as part of the merge.
118  */
119 bool
120 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
121 		struct anon_vma *anon_vma, struct file *file,
122 		pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
123 		struct anon_vma_name *anon_name)
124 {
125 	if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name, false) &&
126 	    is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
127 		pgoff_t vm_pglen;
128 
129 		vm_pglen = vma_pages(vma);
130 		if (vma->vm_pgoff + vm_pglen == vm_pgoff)
131 			return true;
132 	}
133 	return false;
134 }
135 
136 /*
137  * Close a vm structure and free it.
138  */
139 void remove_vma(struct vm_area_struct *vma, bool unreachable)
140 {
141 	might_sleep();
142 	if (vma->vm_ops && vma->vm_ops->close)
143 		vma->vm_ops->close(vma);
144 	if (vma->vm_file)
145 		fput(vma->vm_file);
146 	mpol_put(vma_policy(vma));
147 	if (unreachable)
148 		__vm_area_free(vma);
149 	else
150 		vm_area_free(vma);
151 }
152 
153 /*
154  * Get rid of page table information in the indicated region.
155  *
156  * Called with the mm semaphore held.
157  */
158 void unmap_region(struct mm_struct *mm, struct ma_state *mas,
159 		struct vm_area_struct *vma, struct vm_area_struct *prev,
160 		struct vm_area_struct *next, unsigned long start,
161 		unsigned long end, unsigned long tree_end, bool mm_wr_locked)
162 {
163 	struct mmu_gather tlb;
164 	unsigned long mt_start = mas->index;
165 
166 	lru_add_drain();
167 	tlb_gather_mmu(&tlb, mm);
168 	update_hiwater_rss(mm);
169 	unmap_vmas(&tlb, mas, vma, start, end, tree_end, mm_wr_locked);
170 	mas_set(mas, mt_start);
171 	free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
172 				 next ? next->vm_start : USER_PGTABLES_CEILING,
173 				 mm_wr_locked);
174 	tlb_finish_mmu(&tlb);
175 }
176 
177 /*
178  * __split_vma() bypasses sysctl_max_map_count checking.  We use this where it
179  * has already been checked or doesn't make sense to fail.
180  * VMA Iterator will point to the original VMA.
181  */
182 static int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
183 		       unsigned long addr, int new_below)
184 {
185 	struct vma_prepare vp;
186 	struct vm_area_struct *new;
187 	int err;
188 
189 	WARN_ON(vma->vm_start >= addr);
190 	WARN_ON(vma->vm_end <= addr);
191 
192 	if (vma->vm_ops && vma->vm_ops->may_split) {
193 		err = vma->vm_ops->may_split(vma, addr);
194 		if (err)
195 			return err;
196 	}
197 
198 	new = vm_area_dup(vma);
199 	if (!new)
200 		return -ENOMEM;
201 
202 	if (new_below) {
203 		new->vm_end = addr;
204 	} else {
205 		new->vm_start = addr;
206 		new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
207 	}
208 
209 	err = -ENOMEM;
210 	vma_iter_config(vmi, new->vm_start, new->vm_end);
211 	if (vma_iter_prealloc(vmi, new))
212 		goto out_free_vma;
213 
214 	err = vma_dup_policy(vma, new);
215 	if (err)
216 		goto out_free_vmi;
217 
218 	err = anon_vma_clone(new, vma);
219 	if (err)
220 		goto out_free_mpol;
221 
222 	if (new->vm_file)
223 		get_file(new->vm_file);
224 
225 	if (new->vm_ops && new->vm_ops->open)
226 		new->vm_ops->open(new);
227 
228 	vma_start_write(vma);
229 	vma_start_write(new);
230 
231 	init_vma_prep(&vp, vma);
232 	vp.insert = new;
233 	vma_prepare(&vp);
234 	vma_adjust_trans_huge(vma, vma->vm_start, addr, 0);
235 
236 	if (new_below) {
237 		vma->vm_start = addr;
238 		vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT;
239 	} else {
240 		vma->vm_end = addr;
241 	}
242 
243 	/* vma_complete stores the new vma */
244 	vma_complete(&vp, vmi, vma->vm_mm);
245 	validate_mm(vma->vm_mm);
246 
247 	/* Success. */
248 	if (new_below)
249 		vma_next(vmi);
250 	else
251 		vma_prev(vmi);
252 
253 	return 0;
254 
255 out_free_mpol:
256 	mpol_put(vma_policy(new));
257 out_free_vmi:
258 	vma_iter_free(vmi);
259 out_free_vma:
260 	vm_area_free(new);
261 	return err;
262 }
263 
264 /*
265  * Split a vma into two pieces at address 'addr', a new vma is allocated
266  * either for the first part or the tail.
267  */
268 static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
269 		     unsigned long addr, int new_below)
270 {
271 	if (vma->vm_mm->map_count >= sysctl_max_map_count)
272 		return -ENOMEM;
273 
274 	return __split_vma(vmi, vma, addr, new_below);
275 }
276 
277 /*
278  * init_vma_prep() - Initializer wrapper for vma_prepare struct
279  * @vp: The vma_prepare struct
280  * @vma: The vma that will be altered once locked
281  */
282 void init_vma_prep(struct vma_prepare *vp,
283 		   struct vm_area_struct *vma)
284 {
285 	init_multi_vma_prep(vp, vma, NULL, NULL, NULL);
286 }
287 
288 /*
289  * Requires inode->i_mapping->i_mmap_rwsem
290  */
291 static void __remove_shared_vm_struct(struct vm_area_struct *vma,
292 				      struct address_space *mapping)
293 {
294 	if (vma_is_shared_maywrite(vma))
295 		mapping_unmap_writable(mapping);
296 
297 	flush_dcache_mmap_lock(mapping);
298 	vma_interval_tree_remove(vma, &mapping->i_mmap);
299 	flush_dcache_mmap_unlock(mapping);
300 }
301 
302 /*
303  * vma has some anon_vma assigned, and is already inserted on that
304  * anon_vma's interval trees.
305  *
306  * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
307  * vma must be removed from the anon_vma's interval trees using
308  * anon_vma_interval_tree_pre_update_vma().
309  *
310  * After the update, the vma will be reinserted using
311  * anon_vma_interval_tree_post_update_vma().
312  *
313  * The entire update must be protected by exclusive mmap_lock and by
314  * the root anon_vma's mutex.
315  */
316 void
317 anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
318 {
319 	struct anon_vma_chain *avc;
320 
321 	list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
322 		anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root);
323 }
324 
325 void
326 anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
327 {
328 	struct anon_vma_chain *avc;
329 
330 	list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
331 		anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
332 }
333 
334 static void __vma_link_file(struct vm_area_struct *vma,
335 			    struct address_space *mapping)
336 {
337 	if (vma_is_shared_maywrite(vma))
338 		mapping_allow_writable(mapping);
339 
340 	flush_dcache_mmap_lock(mapping);
341 	vma_interval_tree_insert(vma, &mapping->i_mmap);
342 	flush_dcache_mmap_unlock(mapping);
343 }
344 
345 /*
346  * vma_prepare() - Helper function for handling locking VMAs prior to altering
347  * @vp: The initialized vma_prepare struct
348  */
349 void vma_prepare(struct vma_prepare *vp)
350 {
351 	if (vp->file) {
352 		uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end);
353 
354 		if (vp->adj_next)
355 			uprobe_munmap(vp->adj_next, vp->adj_next->vm_start,
356 				      vp->adj_next->vm_end);
357 
358 		i_mmap_lock_write(vp->mapping);
359 		if (vp->insert && vp->insert->vm_file) {
360 			/*
361 			 * Put into interval tree now, so instantiated pages
362 			 * are visible to arm/parisc __flush_dcache_page
363 			 * throughout; but we cannot insert into address
364 			 * space until vma start or end is updated.
365 			 */
366 			__vma_link_file(vp->insert,
367 					vp->insert->vm_file->f_mapping);
368 		}
369 	}
370 
371 	if (vp->anon_vma) {
372 		anon_vma_lock_write(vp->anon_vma);
373 		anon_vma_interval_tree_pre_update_vma(vp->vma);
374 		if (vp->adj_next)
375 			anon_vma_interval_tree_pre_update_vma(vp->adj_next);
376 	}
377 
378 	if (vp->file) {
379 		flush_dcache_mmap_lock(vp->mapping);
380 		vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap);
381 		if (vp->adj_next)
382 			vma_interval_tree_remove(vp->adj_next,
383 						 &vp->mapping->i_mmap);
384 	}
385 
386 }
387 
388 /*
389  * dup_anon_vma() - Helper function to duplicate anon_vma
390  * @dst: The destination VMA
391  * @src: The source VMA
392  * @dup: Pointer to the destination VMA when successful.
393  *
394  * Returns: 0 on success.
395  */
396 static int dup_anon_vma(struct vm_area_struct *dst,
397 			struct vm_area_struct *src, struct vm_area_struct **dup)
398 {
399 	/*
400 	 * Easily overlooked: when mprotect shifts the boundary, make sure the
401 	 * expanding vma has anon_vma set if the shrinking vma had, to cover any
402 	 * anon pages imported.
403 	 */
404 	if (src->anon_vma && !dst->anon_vma) {
405 		int ret;
406 
407 		vma_assert_write_locked(dst);
408 		dst->anon_vma = src->anon_vma;
409 		ret = anon_vma_clone(dst, src);
410 		if (ret)
411 			return ret;
412 
413 		*dup = dst;
414 	}
415 
416 	return 0;
417 }
418 
419 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
420 void validate_mm(struct mm_struct *mm)
421 {
422 	int bug = 0;
423 	int i = 0;
424 	struct vm_area_struct *vma;
425 	VMA_ITERATOR(vmi, mm, 0);
426 
427 	mt_validate(&mm->mm_mt);
428 	for_each_vma(vmi, vma) {
429 #ifdef CONFIG_DEBUG_VM_RB
430 		struct anon_vma *anon_vma = vma->anon_vma;
431 		struct anon_vma_chain *avc;
432 #endif
433 		unsigned long vmi_start, vmi_end;
434 		bool warn = 0;
435 
436 		vmi_start = vma_iter_addr(&vmi);
437 		vmi_end = vma_iter_end(&vmi);
438 		if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm))
439 			warn = 1;
440 
441 		if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm))
442 			warn = 1;
443 
444 		if (warn) {
445 			pr_emerg("issue in %s\n", current->comm);
446 			dump_stack();
447 			dump_vma(vma);
448 			pr_emerg("tree range: %px start %lx end %lx\n", vma,
449 				 vmi_start, vmi_end - 1);
450 			vma_iter_dump_tree(&vmi);
451 		}
452 
453 #ifdef CONFIG_DEBUG_VM_RB
454 		if (anon_vma) {
455 			anon_vma_lock_read(anon_vma);
456 			list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
457 				anon_vma_interval_tree_verify(avc);
458 			anon_vma_unlock_read(anon_vma);
459 		}
460 #endif
461 		i++;
462 	}
463 	if (i != mm->map_count) {
464 		pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i);
465 		bug = 1;
466 	}
467 	VM_BUG_ON_MM(bug, mm);
468 }
469 #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
470 
471 /*
472  * vma_expand - Expand an existing VMA
473  *
474  * @vmi: The vma iterator
475  * @vma: The vma to expand
476  * @start: The start of the vma
477  * @end: The exclusive end of the vma
478  * @pgoff: The page offset of vma
479  * @next: The current of next vma.
480  *
481  * Expand @vma to @start and @end.  Can expand off the start and end.  Will
482  * expand over @next if it's different from @vma and @end == @next->vm_end.
483  * Checking if the @vma can expand and merge with @next needs to be handled by
484  * the caller.
485  *
486  * Returns: 0 on success
487  */
488 int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
489 	       unsigned long start, unsigned long end, pgoff_t pgoff,
490 	       struct vm_area_struct *next)
491 {
492 	struct vm_area_struct *anon_dup = NULL;
493 	bool remove_next = false;
494 	struct vma_prepare vp;
495 
496 	vma_start_write(vma);
497 	if (next && (vma != next) && (end == next->vm_end)) {
498 		int ret;
499 
500 		remove_next = true;
501 		vma_start_write(next);
502 		ret = dup_anon_vma(vma, next, &anon_dup);
503 		if (ret)
504 			return ret;
505 	}
506 
507 	init_multi_vma_prep(&vp, vma, NULL, remove_next ? next : NULL, NULL);
508 	/* Not merging but overwriting any part of next is not handled. */
509 	VM_WARN_ON(next && !vp.remove &&
510 		  next != vma && end > next->vm_start);
511 	/* Only handles expanding */
512 	VM_WARN_ON(vma->vm_start < start || vma->vm_end > end);
513 
514 	/* Note: vma iterator must be pointing to 'start' */
515 	vma_iter_config(vmi, start, end);
516 	if (vma_iter_prealloc(vmi, vma))
517 		goto nomem;
518 
519 	vma_prepare(&vp);
520 	vma_adjust_trans_huge(vma, start, end, 0);
521 	vma_set_range(vma, start, end, pgoff);
522 	vma_iter_store(vmi, vma);
523 
524 	vma_complete(&vp, vmi, vma->vm_mm);
525 	validate_mm(vma->vm_mm);
526 	return 0;
527 
528 nomem:
529 	if (anon_dup)
530 		unlink_anon_vmas(anon_dup);
531 	return -ENOMEM;
532 }
533 
534 /*
535  * vma_shrink() - Reduce an existing VMAs memory area
536  * @vmi: The vma iterator
537  * @vma: The VMA to modify
538  * @start: The new start
539  * @end: The new end
540  *
541  * Returns: 0 on success, -ENOMEM otherwise
542  */
543 int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
544 	       unsigned long start, unsigned long end, pgoff_t pgoff)
545 {
546 	struct vma_prepare vp;
547 
548 	WARN_ON((vma->vm_start != start) && (vma->vm_end != end));
549 
550 	if (vma->vm_start < start)
551 		vma_iter_config(vmi, vma->vm_start, start);
552 	else
553 		vma_iter_config(vmi, end, vma->vm_end);
554 
555 	if (vma_iter_prealloc(vmi, NULL))
556 		return -ENOMEM;
557 
558 	vma_start_write(vma);
559 
560 	init_vma_prep(&vp, vma);
561 	vma_prepare(&vp);
562 	vma_adjust_trans_huge(vma, start, end, 0);
563 
564 	vma_iter_clear(vmi);
565 	vma_set_range(vma, start, end, pgoff);
566 	vma_complete(&vp, vmi, vma->vm_mm);
567 	validate_mm(vma->vm_mm);
568 	return 0;
569 }
570 
571 /*
572  * vma_complete- Helper function for handling the unlocking after altering VMAs,
573  * or for inserting a VMA.
574  *
575  * @vp: The vma_prepare struct
576  * @vmi: The vma iterator
577  * @mm: The mm_struct
578  */
579 void vma_complete(struct vma_prepare *vp,
580 		  struct vma_iterator *vmi, struct mm_struct *mm)
581 {
582 	if (vp->file) {
583 		if (vp->adj_next)
584 			vma_interval_tree_insert(vp->adj_next,
585 						 &vp->mapping->i_mmap);
586 		vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap);
587 		flush_dcache_mmap_unlock(vp->mapping);
588 	}
589 
590 	if (vp->remove && vp->file) {
591 		__remove_shared_vm_struct(vp->remove, vp->mapping);
592 		if (vp->remove2)
593 			__remove_shared_vm_struct(vp->remove2, vp->mapping);
594 	} else if (vp->insert) {
595 		/*
596 		 * split_vma has split insert from vma, and needs
597 		 * us to insert it before dropping the locks
598 		 * (it may either follow vma or precede it).
599 		 */
600 		vma_iter_store(vmi, vp->insert);
601 		mm->map_count++;
602 	}
603 
604 	if (vp->anon_vma) {
605 		anon_vma_interval_tree_post_update_vma(vp->vma);
606 		if (vp->adj_next)
607 			anon_vma_interval_tree_post_update_vma(vp->adj_next);
608 		anon_vma_unlock_write(vp->anon_vma);
609 	}
610 
611 	if (vp->file) {
612 		i_mmap_unlock_write(vp->mapping);
613 		uprobe_mmap(vp->vma);
614 
615 		if (vp->adj_next)
616 			uprobe_mmap(vp->adj_next);
617 	}
618 
619 	if (vp->remove) {
620 again:
621 		vma_mark_detached(vp->remove, true);
622 		if (vp->file) {
623 			uprobe_munmap(vp->remove, vp->remove->vm_start,
624 				      vp->remove->vm_end);
625 			fput(vp->file);
626 		}
627 		if (vp->remove->anon_vma)
628 			anon_vma_merge(vp->vma, vp->remove);
629 		mm->map_count--;
630 		mpol_put(vma_policy(vp->remove));
631 		if (!vp->remove2)
632 			WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end);
633 		vm_area_free(vp->remove);
634 
635 		/*
636 		 * In mprotect's case 6 (see comments on vma_merge),
637 		 * we are removing both mid and next vmas
638 		 */
639 		if (vp->remove2) {
640 			vp->remove = vp->remove2;
641 			vp->remove2 = NULL;
642 			goto again;
643 		}
644 	}
645 	if (vp->insert && vp->file)
646 		uprobe_mmap(vp->insert);
647 }
648 
649 static void vms_complete_pte_clear(struct vma_munmap_struct *vms,
650 		struct ma_state *mas_detach, bool mm_wr_locked)
651 {
652 	struct mmu_gather tlb;
653 
654 	/*
655 	 * We can free page tables without write-locking mmap_lock because VMAs
656 	 * were isolated before we downgraded mmap_lock.
657 	 */
658 	mas_set(mas_detach, 1);
659 	lru_add_drain();
660 	tlb_gather_mmu(&tlb, vms->mm);
661 	update_hiwater_rss(vms->mm);
662 	unmap_vmas(&tlb, mas_detach, vms->vma, vms->start, vms->end, vms->vma_count, mm_wr_locked);
663 	mas_set(mas_detach, 1);
664 	/* start and end may be different if there is no prev or next vma. */
665 	free_pgtables(&tlb, mas_detach, vms->vma, vms->unmap_start, vms->unmap_end, mm_wr_locked);
666 	tlb_finish_mmu(&tlb);
667 }
668 
669 /*
670  * vms_complete_munmap_vmas() - Finish the munmap() operation
671  * @vms: The vma munmap struct
672  * @mas_detach: The maple state of the detached vmas
673  *
674  * This updates the mm_struct, unmaps the region, frees the resources
675  * used for the munmap() and may downgrade the lock - if requested.  Everything
676  * needed to be done once the vma maple tree is updated.
677  */
678 void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
679 		struct ma_state *mas_detach)
680 {
681 	struct vm_area_struct *vma;
682 	struct mm_struct *mm;
683 
684 	mm = vms->mm;
685 	mm->map_count -= vms->vma_count;
686 	mm->locked_vm -= vms->locked_vm;
687 	if (vms->unlock)
688 		mmap_write_downgrade(mm);
689 
690 	vms_complete_pte_clear(vms, mas_detach, !vms->unlock);
691 	/* Update high watermark before we lower total_vm */
692 	update_hiwater_vm(mm);
693 	/* Stat accounting */
694 	WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm) - vms->nr_pages);
695 	/* Paranoid bookkeeping */
696 	VM_WARN_ON(vms->exec_vm > mm->exec_vm);
697 	VM_WARN_ON(vms->stack_vm > mm->stack_vm);
698 	VM_WARN_ON(vms->data_vm > mm->data_vm);
699 	mm->exec_vm -= vms->exec_vm;
700 	mm->stack_vm -= vms->stack_vm;
701 	mm->data_vm -= vms->data_vm;
702 
703 	/* Remove and clean up vmas */
704 	mas_set(mas_detach, 0);
705 	mas_for_each(mas_detach, vma, ULONG_MAX)
706 		remove_vma(vma, false);
707 
708 	vm_unacct_memory(vms->nr_accounted);
709 	validate_mm(mm);
710 	if (vms->unlock)
711 		mmap_read_unlock(mm);
712 
713 	__mt_destroy(mas_detach->tree);
714 }
715 
716 /*
717  * vms_gather_munmap_vmas() - Put all VMAs within a range into a maple tree
718  * for removal at a later date.  Handles splitting first and last if necessary
719  * and marking the vmas as isolated.
720  *
721  * @vms: The vma munmap struct
722  * @mas_detach: The maple state tracking the detached tree
723  *
724  * Return: 0 on success, -EPERM on mseal vmas, -ENOMEM otherwise
725  */
726 int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
727 		struct ma_state *mas_detach)
728 {
729 	struct vm_area_struct *next = NULL;
730 	int error = -ENOMEM;
731 
732 	/*
733 	 * If we need to split any vma, do it now to save pain later.
734 	 *
735 	 * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially
736 	 * unmapped vm_area_struct will remain in use: so lower split_vma
737 	 * places tmp vma above, and higher split_vma places tmp vma below.
738 	 */
739 
740 	/* Does it split the first one? */
741 	if (vms->start > vms->vma->vm_start) {
742 
743 		/*
744 		 * Make sure that map_count on return from munmap() will
745 		 * not exceed its limit; but let map_count go just above
746 		 * its limit temporarily, to help free resources as expected.
747 		 */
748 		if (vms->end < vms->vma->vm_end &&
749 		    vms->mm->map_count >= sysctl_max_map_count)
750 			goto map_count_exceeded;
751 
752 		/* Don't bother splitting the VMA if we can't unmap it anyway */
753 		if (!can_modify_vma(vms->vma)) {
754 			error = -EPERM;
755 			goto start_split_failed;
756 		}
757 
758 		if (__split_vma(vms->vmi, vms->vma, vms->start, 1))
759 			goto start_split_failed;
760 	}
761 	vms->prev = vma_prev(vms->vmi);
762 	if (vms->prev)
763 		vms->unmap_start = vms->prev->vm_end;
764 
765 	/*
766 	 * Detach a range of VMAs from the mm. Using next as a temp variable as
767 	 * it is always overwritten.
768 	 */
769 	for_each_vma_range(*(vms->vmi), next, vms->end) {
770 		long nrpages;
771 
772 		if (!can_modify_vma(next)) {
773 			error = -EPERM;
774 			goto modify_vma_failed;
775 		}
776 		/* Does it split the end? */
777 		if (next->vm_end > vms->end) {
778 			if (__split_vma(vms->vmi, next, vms->end, 0))
779 				goto end_split_failed;
780 		}
781 		vma_start_write(next);
782 		mas_set(mas_detach, vms->vma_count++);
783 		if (mas_store_gfp(mas_detach, next, GFP_KERNEL))
784 			goto munmap_gather_failed;
785 
786 		vma_mark_detached(next, true);
787 		nrpages = vma_pages(next);
788 
789 		vms->nr_pages += nrpages;
790 		if (next->vm_flags & VM_LOCKED)
791 			vms->locked_vm += nrpages;
792 
793 		if (next->vm_flags & VM_ACCOUNT)
794 			vms->nr_accounted += nrpages;
795 
796 		if (is_exec_mapping(next->vm_flags))
797 			vms->exec_vm += nrpages;
798 		else if (is_stack_mapping(next->vm_flags))
799 			vms->stack_vm += nrpages;
800 		else if (is_data_mapping(next->vm_flags))
801 			vms->data_vm += nrpages;
802 
803 		if (unlikely(vms->uf)) {
804 			/*
805 			 * If userfaultfd_unmap_prep returns an error the vmas
806 			 * will remain split, but userland will get a
807 			 * highly unexpected error anyway. This is no
808 			 * different than the case where the first of the two
809 			 * __split_vma fails, but we don't undo the first
810 			 * split, despite we could. This is unlikely enough
811 			 * failure that it's not worth optimizing it for.
812 			 */
813 			if (userfaultfd_unmap_prep(next, vms->start, vms->end,
814 						   vms->uf))
815 				goto userfaultfd_error;
816 		}
817 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
818 		BUG_ON(next->vm_start < vms->start);
819 		BUG_ON(next->vm_start > vms->end);
820 #endif
821 	}
822 
823 	vms->next = vma_next(vms->vmi);
824 	if (vms->next)
825 		vms->unmap_end = vms->next->vm_start;
826 
827 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
828 	/* Make sure no VMAs are about to be lost. */
829 	{
830 		MA_STATE(test, mas_detach->tree, 0, 0);
831 		struct vm_area_struct *vma_mas, *vma_test;
832 		int test_count = 0;
833 
834 		vma_iter_set(vms->vmi, vms->start);
835 		rcu_read_lock();
836 		vma_test = mas_find(&test, vms->vma_count - 1);
837 		for_each_vma_range(*(vms->vmi), vma_mas, vms->end) {
838 			BUG_ON(vma_mas != vma_test);
839 			test_count++;
840 			vma_test = mas_next(&test, vms->vma_count - 1);
841 		}
842 		rcu_read_unlock();
843 		BUG_ON(vms->vma_count != test_count);
844 	}
845 #endif
846 
847 	while (vma_iter_addr(vms->vmi) > vms->start)
848 		vma_iter_prev_range(vms->vmi);
849 
850 	return 0;
851 
852 userfaultfd_error:
853 munmap_gather_failed:
854 end_split_failed:
855 modify_vma_failed:
856 	abort_munmap_vmas(mas_detach);
857 start_split_failed:
858 map_count_exceeded:
859 	return error;
860 }
861 
862 /*
863  * do_vmi_align_munmap() - munmap the aligned region from @start to @end.
864  * @vmi: The vma iterator
865  * @vma: The starting vm_area_struct
866  * @mm: The mm_struct
867  * @start: The aligned start address to munmap.
868  * @end: The aligned end address to munmap.
869  * @uf: The userfaultfd list_head
870  * @unlock: Set to true to drop the mmap_lock.  unlocking only happens on
871  * success.
872  *
873  * Return: 0 on success and drops the lock if so directed, error and leaves the
874  * lock held otherwise.
875  */
876 int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
877 		struct mm_struct *mm, unsigned long start, unsigned long end,
878 		struct list_head *uf, bool unlock)
879 {
880 	struct maple_tree mt_detach;
881 	MA_STATE(mas_detach, &mt_detach, 0, 0);
882 	mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
883 	mt_on_stack(mt_detach);
884 	struct vma_munmap_struct vms;
885 	int error;
886 
887 	init_vma_munmap(&vms, vmi, vma, start, end, uf, unlock);
888 	error = vms_gather_munmap_vmas(&vms, &mas_detach);
889 	if (error)
890 		goto gather_failed;
891 
892 	error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL);
893 	if (error)
894 		goto clear_tree_failed;
895 
896 	/* Point of no return */
897 	vms_complete_munmap_vmas(&vms, &mas_detach);
898 	return 0;
899 
900 clear_tree_failed:
901 	abort_munmap_vmas(&mas_detach);
902 gather_failed:
903 	validate_mm(mm);
904 	return error;
905 }
906 
907 /*
908  * do_vmi_munmap() - munmap a given range.
909  * @vmi: The vma iterator
910  * @mm: The mm_struct
911  * @start: The start address to munmap
912  * @len: The length of the range to munmap
913  * @uf: The userfaultfd list_head
914  * @unlock: set to true if the user wants to drop the mmap_lock on success
915  *
916  * This function takes a @mas that is either pointing to the previous VMA or set
917  * to MA_START and sets it up to remove the mapping(s).  The @len will be
918  * aligned.
919  *
920  * Return: 0 on success and drops the lock if so directed, error and leaves the
921  * lock held otherwise.
922  */
923 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
924 		  unsigned long start, size_t len, struct list_head *uf,
925 		  bool unlock)
926 {
927 	unsigned long end;
928 	struct vm_area_struct *vma;
929 
930 	if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start)
931 		return -EINVAL;
932 
933 	end = start + PAGE_ALIGN(len);
934 	if (end == start)
935 		return -EINVAL;
936 
937 	/* Find the first overlapping VMA */
938 	vma = vma_find(vmi, end);
939 	if (!vma) {
940 		if (unlock)
941 			mmap_write_unlock(mm);
942 		return 0;
943 	}
944 
945 	return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
946 }
947 
948 /*
949  * Given a mapping request (addr,end,vm_flags,file,pgoff,anon_name),
950  * figure out whether that can be merged with its predecessor or its
951  * successor.  Or both (it neatly fills a hole).
952  *
953  * In most cases - when called for mmap, brk or mremap - [addr,end) is
954  * certain not to be mapped by the time vma_merge is called; but when
955  * called for mprotect, it is certain to be already mapped (either at
956  * an offset within prev, or at the start of next), and the flags of
957  * this area are about to be changed to vm_flags - and the no-change
958  * case has already been eliminated.
959  *
960  * The following mprotect cases have to be considered, where **** is
961  * the area passed down from mprotect_fixup, never extending beyond one
962  * vma, PPPP is the previous vma, CCCC is a concurrent vma that starts
963  * at the same address as **** and is of the same or larger span, and
964  * NNNN the next vma after ****:
965  *
966  *     ****             ****                   ****
967  *    PPPPPPNNNNNN    PPPPPPNNNNNN       PPPPPPCCCCCC
968  *    cannot merge    might become       might become
969  *                    PPNNNNNNNNNN       PPPPPPPPPPCC
970  *    mmap, brk or    case 4 below       case 5 below
971  *    mremap move:
972  *                        ****               ****
973  *                    PPPP    NNNN       PPPPCCCCNNNN
974  *                    might become       might become
975  *                    PPPPPPPPPPPP 1 or  PPPPPPPPPPPP 6 or
976  *                    PPPPPPPPNNNN 2 or  PPPPPPPPNNNN 7 or
977  *                    PPPPNNNNNNNN 3     PPPPNNNNNNNN 8
978  *
979  * It is important for case 8 that the vma CCCC overlapping the
980  * region **** is never going to extended over NNNN. Instead NNNN must
981  * be extended in region **** and CCCC must be removed. This way in
982  * all cases where vma_merge succeeds, the moment vma_merge drops the
983  * rmap_locks, the properties of the merged vma will be already
984  * correct for the whole merged range. Some of those properties like
985  * vm_page_prot/vm_flags may be accessed by rmap_walks and they must
986  * be correct for the whole merged range immediately after the
987  * rmap_locks are released. Otherwise if NNNN would be removed and
988  * CCCC would be extended over the NNNN range, remove_migration_ptes
989  * or other rmap walkers (if working on addresses beyond the "end"
990  * parameter) may establish ptes with the wrong permissions of CCCC
991  * instead of the right permissions of NNNN.
992  *
993  * In the code below:
994  * PPPP is represented by *prev
995  * CCCC is represented by *curr or not represented at all (NULL)
996  * NNNN is represented by *next or not represented at all (NULL)
997  * **** is not represented - it will be merged and the vma containing the
998  *      area is returned, or the function will return NULL
999  */
1000 static struct vm_area_struct
1001 *vma_merge(struct vma_iterator *vmi, struct vm_area_struct *prev,
1002 	   struct vm_area_struct *src, unsigned long addr, unsigned long end,
1003 	   unsigned long vm_flags, pgoff_t pgoff, struct mempolicy *policy,
1004 	   struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
1005 	   struct anon_vma_name *anon_name)
1006 {
1007 	struct mm_struct *mm = src->vm_mm;
1008 	struct anon_vma *anon_vma = src->anon_vma;
1009 	struct file *file = src->vm_file;
1010 	struct vm_area_struct *curr, *next, *res;
1011 	struct vm_area_struct *vma, *adjust, *remove, *remove2;
1012 	struct vm_area_struct *anon_dup = NULL;
1013 	struct vma_prepare vp;
1014 	pgoff_t vma_pgoff;
1015 	int err = 0;
1016 	bool merge_prev = false;
1017 	bool merge_next = false;
1018 	bool vma_expanded = false;
1019 	unsigned long vma_start = addr;
1020 	unsigned long vma_end = end;
1021 	pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
1022 	long adj_start = 0;
1023 
1024 	/*
1025 	 * We later require that vma->vm_flags == vm_flags,
1026 	 * so this tests vma->vm_flags & VM_SPECIAL, too.
1027 	 */
1028 	if (vm_flags & VM_SPECIAL)
1029 		return NULL;
1030 
1031 	/* Does the input range span an existing VMA? (cases 5 - 8) */
1032 	curr = find_vma_intersection(mm, prev ? prev->vm_end : 0, end);
1033 
1034 	if (!curr ||			/* cases 1 - 4 */
1035 	    end == curr->vm_end)	/* cases 6 - 8, adjacent VMA */
1036 		next = vma_lookup(mm, end);
1037 	else
1038 		next = NULL;		/* case 5 */
1039 
1040 	if (prev) {
1041 		vma_start = prev->vm_start;
1042 		vma_pgoff = prev->vm_pgoff;
1043 
1044 		/* Can we merge the predecessor? */
1045 		if (addr == prev->vm_end && mpol_equal(vma_policy(prev), policy)
1046 		    && can_vma_merge_after(prev, vm_flags, anon_vma, file,
1047 					   pgoff, vm_userfaultfd_ctx, anon_name)) {
1048 			merge_prev = true;
1049 			vma_prev(vmi);
1050 		}
1051 	}
1052 
1053 	/* Can we merge the successor? */
1054 	if (next && mpol_equal(policy, vma_policy(next)) &&
1055 	    can_vma_merge_before(next, vm_flags, anon_vma, file, pgoff+pglen,
1056 				 vm_userfaultfd_ctx, anon_name)) {
1057 		merge_next = true;
1058 	}
1059 
1060 	/* Verify some invariant that must be enforced by the caller. */
1061 	VM_WARN_ON(prev && addr <= prev->vm_start);
1062 	VM_WARN_ON(curr && (addr != curr->vm_start || end > curr->vm_end));
1063 	VM_WARN_ON(addr >= end);
1064 
1065 	if (!merge_prev && !merge_next)
1066 		return NULL; /* Not mergeable. */
1067 
1068 	if (merge_prev)
1069 		vma_start_write(prev);
1070 
1071 	res = vma = prev;
1072 	remove = remove2 = adjust = NULL;
1073 
1074 	/* Can we merge both the predecessor and the successor? */
1075 	if (merge_prev && merge_next &&
1076 	    is_mergeable_anon_vma(prev->anon_vma, next->anon_vma, NULL)) {
1077 		vma_start_write(next);
1078 		remove = next;				/* case 1 */
1079 		vma_end = next->vm_end;
1080 		err = dup_anon_vma(prev, next, &anon_dup);
1081 		if (curr) {				/* case 6 */
1082 			vma_start_write(curr);
1083 			remove = curr;
1084 			remove2 = next;
1085 			/*
1086 			 * Note that the dup_anon_vma below cannot overwrite err
1087 			 * since the first caller would do nothing unless next
1088 			 * has an anon_vma.
1089 			 */
1090 			if (!next->anon_vma)
1091 				err = dup_anon_vma(prev, curr, &anon_dup);
1092 		}
1093 	} else if (merge_prev) {			/* case 2 */
1094 		if (curr) {
1095 			vma_start_write(curr);
1096 			if (end == curr->vm_end) {	/* case 7 */
1097 				/*
1098 				 * can_vma_merge_after() assumed we would not be
1099 				 * removing prev vma, so it skipped the check
1100 				 * for vm_ops->close, but we are removing curr
1101 				 */
1102 				if (curr->vm_ops && curr->vm_ops->close)
1103 					err = -EINVAL;
1104 				remove = curr;
1105 			} else {			/* case 5 */
1106 				adjust = curr;
1107 				adj_start = (end - curr->vm_start);
1108 			}
1109 			if (!err)
1110 				err = dup_anon_vma(prev, curr, &anon_dup);
1111 		}
1112 	} else { /* merge_next */
1113 		vma_start_write(next);
1114 		res = next;
1115 		if (prev && addr < prev->vm_end) {	/* case 4 */
1116 			vma_start_write(prev);
1117 			vma_end = addr;
1118 			adjust = next;
1119 			adj_start = -(prev->vm_end - addr);
1120 			err = dup_anon_vma(next, prev, &anon_dup);
1121 		} else {
1122 			/*
1123 			 * Note that cases 3 and 8 are the ONLY ones where prev
1124 			 * is permitted to be (but is not necessarily) NULL.
1125 			 */
1126 			vma = next;			/* case 3 */
1127 			vma_start = addr;
1128 			vma_end = next->vm_end;
1129 			vma_pgoff = next->vm_pgoff - pglen;
1130 			if (curr) {			/* case 8 */
1131 				vma_pgoff = curr->vm_pgoff;
1132 				vma_start_write(curr);
1133 				remove = curr;
1134 				err = dup_anon_vma(next, curr, &anon_dup);
1135 			}
1136 		}
1137 	}
1138 
1139 	/* Error in anon_vma clone. */
1140 	if (err)
1141 		goto anon_vma_fail;
1142 
1143 	if (vma_start < vma->vm_start || vma_end > vma->vm_end)
1144 		vma_expanded = true;
1145 
1146 	if (vma_expanded) {
1147 		vma_iter_config(vmi, vma_start, vma_end);
1148 	} else {
1149 		vma_iter_config(vmi, adjust->vm_start + adj_start,
1150 				adjust->vm_end);
1151 	}
1152 
1153 	if (vma_iter_prealloc(vmi, vma))
1154 		goto prealloc_fail;
1155 
1156 	init_multi_vma_prep(&vp, vma, adjust, remove, remove2);
1157 	VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma &&
1158 		   vp.anon_vma != adjust->anon_vma);
1159 
1160 	vma_prepare(&vp);
1161 	vma_adjust_trans_huge(vma, vma_start, vma_end, adj_start);
1162 	vma_set_range(vma, vma_start, vma_end, vma_pgoff);
1163 
1164 	if (vma_expanded)
1165 		vma_iter_store(vmi, vma);
1166 
1167 	if (adj_start) {
1168 		adjust->vm_start += adj_start;
1169 		adjust->vm_pgoff += adj_start >> PAGE_SHIFT;
1170 		if (adj_start < 0) {
1171 			WARN_ON(vma_expanded);
1172 			vma_iter_store(vmi, next);
1173 		}
1174 	}
1175 
1176 	vma_complete(&vp, vmi, mm);
1177 	validate_mm(mm);
1178 	khugepaged_enter_vma(res, vm_flags);
1179 	return res;
1180 
1181 prealloc_fail:
1182 	if (anon_dup)
1183 		unlink_anon_vmas(anon_dup);
1184 
1185 anon_vma_fail:
1186 	vma_iter_set(vmi, addr);
1187 	vma_iter_load(vmi);
1188 	return NULL;
1189 }
1190 
1191 /*
1192  * We are about to modify one or multiple of a VMA's flags, policy, userfaultfd
1193  * context and anonymous VMA name within the range [start, end).
1194  *
1195  * As a result, we might be able to merge the newly modified VMA range with an
1196  * adjacent VMA with identical properties.
1197  *
1198  * If no merge is possible and the range does not span the entirety of the VMA,
1199  * we then need to split the VMA to accommodate the change.
1200  *
1201  * The function returns either the merged VMA, the original VMA if a split was
1202  * required instead, or an error if the split failed.
1203  */
1204 struct vm_area_struct *vma_modify(struct vma_iterator *vmi,
1205 				  struct vm_area_struct *prev,
1206 				  struct vm_area_struct *vma,
1207 				  unsigned long start, unsigned long end,
1208 				  unsigned long vm_flags,
1209 				  struct mempolicy *policy,
1210 				  struct vm_userfaultfd_ctx uffd_ctx,
1211 				  struct anon_vma_name *anon_name)
1212 {
1213 	pgoff_t pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
1214 	struct vm_area_struct *merged;
1215 
1216 	merged = vma_merge(vmi, prev, vma, start, end, vm_flags,
1217 			   pgoff, policy, uffd_ctx, anon_name);
1218 	if (merged)
1219 		return merged;
1220 
1221 	if (vma->vm_start < start) {
1222 		int err = split_vma(vmi, vma, start, 1);
1223 
1224 		if (err)
1225 			return ERR_PTR(err);
1226 	}
1227 
1228 	if (vma->vm_end > end) {
1229 		int err = split_vma(vmi, vma, end, 0);
1230 
1231 		if (err)
1232 			return ERR_PTR(err);
1233 	}
1234 
1235 	return vma;
1236 }
1237 
1238 /*
1239  * Attempt to merge a newly mapped VMA with those adjacent to it. The caller
1240  * must ensure that [start, end) does not overlap any existing VMA.
1241  */
1242 struct vm_area_struct
1243 *vma_merge_new_vma(struct vma_iterator *vmi, struct vm_area_struct *prev,
1244 		   struct vm_area_struct *vma, unsigned long start,
1245 		   unsigned long end, pgoff_t pgoff)
1246 {
1247 	return vma_merge(vmi, prev, vma, start, end, vma->vm_flags, pgoff,
1248 			 vma_policy(vma), vma->vm_userfaultfd_ctx, anon_vma_name(vma));
1249 }
1250 
1251 /*
1252  * Expand vma by delta bytes, potentially merging with an immediately adjacent
1253  * VMA with identical properties.
1254  */
1255 struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
1256 					struct vm_area_struct *vma,
1257 					unsigned long delta)
1258 {
1259 	pgoff_t pgoff = vma->vm_pgoff + vma_pages(vma);
1260 
1261 	/* vma is specified as prev, so case 1 or 2 will apply. */
1262 	return vma_merge(vmi, vma, vma, vma->vm_end, vma->vm_end + delta,
1263 			 vma->vm_flags, pgoff, vma_policy(vma),
1264 			 vma->vm_userfaultfd_ctx, anon_vma_name(vma));
1265 }
1266 
1267 void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb)
1268 {
1269 	vb->count = 0;
1270 }
1271 
1272 static void unlink_file_vma_batch_process(struct unlink_vma_file_batch *vb)
1273 {
1274 	struct address_space *mapping;
1275 	int i;
1276 
1277 	mapping = vb->vmas[0]->vm_file->f_mapping;
1278 	i_mmap_lock_write(mapping);
1279 	for (i = 0; i < vb->count; i++) {
1280 		VM_WARN_ON_ONCE(vb->vmas[i]->vm_file->f_mapping != mapping);
1281 		__remove_shared_vm_struct(vb->vmas[i], mapping);
1282 	}
1283 	i_mmap_unlock_write(mapping);
1284 
1285 	unlink_file_vma_batch_init(vb);
1286 }
1287 
1288 void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb,
1289 			       struct vm_area_struct *vma)
1290 {
1291 	if (vma->vm_file == NULL)
1292 		return;
1293 
1294 	if ((vb->count > 0 && vb->vmas[0]->vm_file != vma->vm_file) ||
1295 	    vb->count == ARRAY_SIZE(vb->vmas))
1296 		unlink_file_vma_batch_process(vb);
1297 
1298 	vb->vmas[vb->count] = vma;
1299 	vb->count++;
1300 }
1301 
1302 void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb)
1303 {
1304 	if (vb->count > 0)
1305 		unlink_file_vma_batch_process(vb);
1306 }
1307 
1308 /*
1309  * Unlink a file-based vm structure from its interval tree, to hide
1310  * vma from rmap and vmtruncate before freeing its page tables.
1311  */
1312 void unlink_file_vma(struct vm_area_struct *vma)
1313 {
1314 	struct file *file = vma->vm_file;
1315 
1316 	if (file) {
1317 		struct address_space *mapping = file->f_mapping;
1318 
1319 		i_mmap_lock_write(mapping);
1320 		__remove_shared_vm_struct(vma, mapping);
1321 		i_mmap_unlock_write(mapping);
1322 	}
1323 }
1324 
1325 void vma_link_file(struct vm_area_struct *vma)
1326 {
1327 	struct file *file = vma->vm_file;
1328 	struct address_space *mapping;
1329 
1330 	if (file) {
1331 		mapping = file->f_mapping;
1332 		i_mmap_lock_write(mapping);
1333 		__vma_link_file(vma, mapping);
1334 		i_mmap_unlock_write(mapping);
1335 	}
1336 }
1337 
1338 int vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
1339 {
1340 	VMA_ITERATOR(vmi, mm, 0);
1341 
1342 	vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
1343 	if (vma_iter_prealloc(&vmi, vma))
1344 		return -ENOMEM;
1345 
1346 	vma_start_write(vma);
1347 	vma_iter_store(&vmi, vma);
1348 	vma_link_file(vma);
1349 	mm->map_count++;
1350 	validate_mm(mm);
1351 	return 0;
1352 }
1353 
1354 /*
1355  * Copy the vma structure to a new location in the same mm,
1356  * prior to moving page table entries, to effect an mremap move.
1357  */
1358 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
1359 	unsigned long addr, unsigned long len, pgoff_t pgoff,
1360 	bool *need_rmap_locks)
1361 {
1362 	struct vm_area_struct *vma = *vmap;
1363 	unsigned long vma_start = vma->vm_start;
1364 	struct mm_struct *mm = vma->vm_mm;
1365 	struct vm_area_struct *new_vma, *prev;
1366 	bool faulted_in_anon_vma = true;
1367 	VMA_ITERATOR(vmi, mm, addr);
1368 
1369 	/*
1370 	 * If anonymous vma has not yet been faulted, update new pgoff
1371 	 * to match new location, to increase its chance of merging.
1372 	 */
1373 	if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) {
1374 		pgoff = addr >> PAGE_SHIFT;
1375 		faulted_in_anon_vma = false;
1376 	}
1377 
1378 	new_vma = find_vma_prev(mm, addr, &prev);
1379 	if (new_vma && new_vma->vm_start < addr + len)
1380 		return NULL;	/* should never get here */
1381 
1382 	new_vma = vma_merge_new_vma(&vmi, prev, vma, addr, addr + len, pgoff);
1383 	if (new_vma) {
1384 		/*
1385 		 * Source vma may have been merged into new_vma
1386 		 */
1387 		if (unlikely(vma_start >= new_vma->vm_start &&
1388 			     vma_start < new_vma->vm_end)) {
1389 			/*
1390 			 * The only way we can get a vma_merge with
1391 			 * self during an mremap is if the vma hasn't
1392 			 * been faulted in yet and we were allowed to
1393 			 * reset the dst vma->vm_pgoff to the
1394 			 * destination address of the mremap to allow
1395 			 * the merge to happen. mremap must change the
1396 			 * vm_pgoff linearity between src and dst vmas
1397 			 * (in turn preventing a vma_merge) to be
1398 			 * safe. It is only safe to keep the vm_pgoff
1399 			 * linear if there are no pages mapped yet.
1400 			 */
1401 			VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma);
1402 			*vmap = vma = new_vma;
1403 		}
1404 		*need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
1405 	} else {
1406 		new_vma = vm_area_dup(vma);
1407 		if (!new_vma)
1408 			goto out;
1409 		vma_set_range(new_vma, addr, addr + len, pgoff);
1410 		if (vma_dup_policy(vma, new_vma))
1411 			goto out_free_vma;
1412 		if (anon_vma_clone(new_vma, vma))
1413 			goto out_free_mempol;
1414 		if (new_vma->vm_file)
1415 			get_file(new_vma->vm_file);
1416 		if (new_vma->vm_ops && new_vma->vm_ops->open)
1417 			new_vma->vm_ops->open(new_vma);
1418 		if (vma_link(mm, new_vma))
1419 			goto out_vma_link;
1420 		*need_rmap_locks = false;
1421 	}
1422 	return new_vma;
1423 
1424 out_vma_link:
1425 	if (new_vma->vm_ops && new_vma->vm_ops->close)
1426 		new_vma->vm_ops->close(new_vma);
1427 
1428 	if (new_vma->vm_file)
1429 		fput(new_vma->vm_file);
1430 
1431 	unlink_anon_vmas(new_vma);
1432 out_free_mempol:
1433 	mpol_put(vma_policy(new_vma));
1434 out_free_vma:
1435 	vm_area_free(new_vma);
1436 out:
1437 	return NULL;
1438 }
1439 
1440 /*
1441  * Rough compatibility check to quickly see if it's even worth looking
1442  * at sharing an anon_vma.
1443  *
1444  * They need to have the same vm_file, and the flags can only differ
1445  * in things that mprotect may change.
1446  *
1447  * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
1448  * we can merge the two vma's. For example, we refuse to merge a vma if
1449  * there is a vm_ops->close() function, because that indicates that the
1450  * driver is doing some kind of reference counting. But that doesn't
1451  * really matter for the anon_vma sharing case.
1452  */
1453 static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
1454 {
1455 	return a->vm_end == b->vm_start &&
1456 		mpol_equal(vma_policy(a), vma_policy(b)) &&
1457 		a->vm_file == b->vm_file &&
1458 		!((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_SOFTDIRTY)) &&
1459 		b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
1460 }
1461 
1462 /*
1463  * Do some basic sanity checking to see if we can re-use the anon_vma
1464  * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
1465  * the same as 'old', the other will be the new one that is trying
1466  * to share the anon_vma.
1467  *
1468  * NOTE! This runs with mmap_lock held for reading, so it is possible that
1469  * the anon_vma of 'old' is concurrently in the process of being set up
1470  * by another page fault trying to merge _that_. But that's ok: if it
1471  * is being set up, that automatically means that it will be a singleton
1472  * acceptable for merging, so we can do all of this optimistically. But
1473  * we do that READ_ONCE() to make sure that we never re-load the pointer.
1474  *
1475  * IOW: that the "list_is_singular()" test on the anon_vma_chain only
1476  * matters for the 'stable anon_vma' case (ie the thing we want to avoid
1477  * is to return an anon_vma that is "complex" due to having gone through
1478  * a fork).
1479  *
1480  * We also make sure that the two vma's are compatible (adjacent,
1481  * and with the same memory policies). That's all stable, even with just
1482  * a read lock on the mmap_lock.
1483  */
1484 static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old,
1485 					  struct vm_area_struct *a,
1486 					  struct vm_area_struct *b)
1487 {
1488 	if (anon_vma_compatible(a, b)) {
1489 		struct anon_vma *anon_vma = READ_ONCE(old->anon_vma);
1490 
1491 		if (anon_vma && list_is_singular(&old->anon_vma_chain))
1492 			return anon_vma;
1493 	}
1494 	return NULL;
1495 }
1496 
1497 /*
1498  * find_mergeable_anon_vma is used by anon_vma_prepare, to check
1499  * neighbouring vmas for a suitable anon_vma, before it goes off
1500  * to allocate a new anon_vma.  It checks because a repetitive
1501  * sequence of mprotects and faults may otherwise lead to distinct
1502  * anon_vmas being allocated, preventing vma merge in subsequent
1503  * mprotect.
1504  */
1505 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
1506 {
1507 	struct anon_vma *anon_vma = NULL;
1508 	struct vm_area_struct *prev, *next;
1509 	VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_end);
1510 
1511 	/* Try next first. */
1512 	next = vma_iter_load(&vmi);
1513 	if (next) {
1514 		anon_vma = reusable_anon_vma(next, vma, next);
1515 		if (anon_vma)
1516 			return anon_vma;
1517 	}
1518 
1519 	prev = vma_prev(&vmi);
1520 	VM_BUG_ON_VMA(prev != vma, vma);
1521 	prev = vma_prev(&vmi);
1522 	/* Try prev next. */
1523 	if (prev)
1524 		anon_vma = reusable_anon_vma(prev, prev, vma);
1525 
1526 	/*
1527 	 * We might reach here with anon_vma == NULL if we can't find
1528 	 * any reusable anon_vma.
1529 	 * There's no absolute need to look only at touching neighbours:
1530 	 * we could search further afield for "compatible" anon_vmas.
1531 	 * But it would probably just be a waste of time searching,
1532 	 * or lead to too many vmas hanging off the same anon_vma.
1533 	 * We're trying to allow mprotect remerging later on,
1534 	 * not trying to minimize memory used for anon_vmas.
1535 	 */
1536 	return anon_vma;
1537 }
1538 
1539 static bool vm_ops_needs_writenotify(const struct vm_operations_struct *vm_ops)
1540 {
1541 	return vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite);
1542 }
1543 
1544 static bool vma_is_shared_writable(struct vm_area_struct *vma)
1545 {
1546 	return (vma->vm_flags & (VM_WRITE | VM_SHARED)) ==
1547 		(VM_WRITE | VM_SHARED);
1548 }
1549 
1550 static bool vma_fs_can_writeback(struct vm_area_struct *vma)
1551 {
1552 	/* No managed pages to writeback. */
1553 	if (vma->vm_flags & VM_PFNMAP)
1554 		return false;
1555 
1556 	return vma->vm_file && vma->vm_file->f_mapping &&
1557 		mapping_can_writeback(vma->vm_file->f_mapping);
1558 }
1559 
1560 /*
1561  * Does this VMA require the underlying folios to have their dirty state
1562  * tracked?
1563  */
1564 bool vma_needs_dirty_tracking(struct vm_area_struct *vma)
1565 {
1566 	/* Only shared, writable VMAs require dirty tracking. */
1567 	if (!vma_is_shared_writable(vma))
1568 		return false;
1569 
1570 	/* Does the filesystem need to be notified? */
1571 	if (vm_ops_needs_writenotify(vma->vm_ops))
1572 		return true;
1573 
1574 	/*
1575 	 * Even if the filesystem doesn't indicate a need for writenotify, if it
1576 	 * can writeback, dirty tracking is still required.
1577 	 */
1578 	return vma_fs_can_writeback(vma);
1579 }
1580 
1581 /*
1582  * Some shared mappings will want the pages marked read-only
1583  * to track write events. If so, we'll downgrade vm_page_prot
1584  * to the private version (using protection_map[] without the
1585  * VM_SHARED bit).
1586  */
1587 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
1588 {
1589 	/* If it was private or non-writable, the write bit is already clear */
1590 	if (!vma_is_shared_writable(vma))
1591 		return false;
1592 
1593 	/* The backer wishes to know when pages are first written to? */
1594 	if (vm_ops_needs_writenotify(vma->vm_ops))
1595 		return true;
1596 
1597 	/* The open routine did something to the protections that pgprot_modify
1598 	 * won't preserve? */
1599 	if (pgprot_val(vm_page_prot) !=
1600 	    pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags)))
1601 		return false;
1602 
1603 	/*
1604 	 * Do we need to track softdirty? hugetlb does not support softdirty
1605 	 * tracking yet.
1606 	 */
1607 	if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma))
1608 		return true;
1609 
1610 	/* Do we need write faults for uffd-wp tracking? */
1611 	if (userfaultfd_wp(vma))
1612 		return true;
1613 
1614 	/* Can the mapping track the dirty pages? */
1615 	return vma_fs_can_writeback(vma);
1616 }
1617 
1618 unsigned long count_vma_pages_range(struct mm_struct *mm,
1619 				    unsigned long addr, unsigned long end)
1620 {
1621 	VMA_ITERATOR(vmi, mm, addr);
1622 	struct vm_area_struct *vma;
1623 	unsigned long nr_pages = 0;
1624 
1625 	for_each_vma_range(vmi, vma, end) {
1626 		unsigned long vm_start = max(addr, vma->vm_start);
1627 		unsigned long vm_end = min(end, vma->vm_end);
1628 
1629 		nr_pages += PHYS_PFN(vm_end - vm_start);
1630 	}
1631 
1632 	return nr_pages;
1633 }
1634 
1635 static DEFINE_MUTEX(mm_all_locks_mutex);
1636 
1637 static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
1638 {
1639 	if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
1640 		/*
1641 		 * The LSB of head.next can't change from under us
1642 		 * because we hold the mm_all_locks_mutex.
1643 		 */
1644 		down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock);
1645 		/*
1646 		 * We can safely modify head.next after taking the
1647 		 * anon_vma->root->rwsem. If some other vma in this mm shares
1648 		 * the same anon_vma we won't take it again.
1649 		 *
1650 		 * No need of atomic instructions here, head.next
1651 		 * can't change from under us thanks to the
1652 		 * anon_vma->root->rwsem.
1653 		 */
1654 		if (__test_and_set_bit(0, (unsigned long *)
1655 				       &anon_vma->root->rb_root.rb_root.rb_node))
1656 			BUG();
1657 	}
1658 }
1659 
1660 static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
1661 {
1662 	if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
1663 		/*
1664 		 * AS_MM_ALL_LOCKS can't change from under us because
1665 		 * we hold the mm_all_locks_mutex.
1666 		 *
1667 		 * Operations on ->flags have to be atomic because
1668 		 * even if AS_MM_ALL_LOCKS is stable thanks to the
1669 		 * mm_all_locks_mutex, there may be other cpus
1670 		 * changing other bitflags in parallel to us.
1671 		 */
1672 		if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
1673 			BUG();
1674 		down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock);
1675 	}
1676 }
1677 
1678 /*
1679  * This operation locks against the VM for all pte/vma/mm related
1680  * operations that could ever happen on a certain mm. This includes
1681  * vmtruncate, try_to_unmap, and all page faults.
1682  *
1683  * The caller must take the mmap_lock in write mode before calling
1684  * mm_take_all_locks(). The caller isn't allowed to release the
1685  * mmap_lock until mm_drop_all_locks() returns.
1686  *
1687  * mmap_lock in write mode is required in order to block all operations
1688  * that could modify pagetables and free pages without need of
1689  * altering the vma layout. It's also needed in write mode to avoid new
1690  * anon_vmas to be associated with existing vmas.
1691  *
1692  * A single task can't take more than one mm_take_all_locks() in a row
1693  * or it would deadlock.
1694  *
1695  * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in
1696  * mapping->flags avoid to take the same lock twice, if more than one
1697  * vma in this mm is backed by the same anon_vma or address_space.
1698  *
1699  * We take locks in following order, accordingly to comment at beginning
1700  * of mm/rmap.c:
1701  *   - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for
1702  *     hugetlb mapping);
1703  *   - all vmas marked locked
1704  *   - all i_mmap_rwsem locks;
1705  *   - all anon_vma->rwseml
1706  *
1707  * We can take all locks within these types randomly because the VM code
1708  * doesn't nest them and we protected from parallel mm_take_all_locks() by
1709  * mm_all_locks_mutex.
1710  *
1711  * mm_take_all_locks() and mm_drop_all_locks are expensive operations
1712  * that may have to take thousand of locks.
1713  *
1714  * mm_take_all_locks() can fail if it's interrupted by signals.
1715  */
1716 int mm_take_all_locks(struct mm_struct *mm)
1717 {
1718 	struct vm_area_struct *vma;
1719 	struct anon_vma_chain *avc;
1720 	VMA_ITERATOR(vmi, mm, 0);
1721 
1722 	mmap_assert_write_locked(mm);
1723 
1724 	mutex_lock(&mm_all_locks_mutex);
1725 
1726 	/*
1727 	 * vma_start_write() does not have a complement in mm_drop_all_locks()
1728 	 * because vma_start_write() is always asymmetrical; it marks a VMA as
1729 	 * being written to until mmap_write_unlock() or mmap_write_downgrade()
1730 	 * is reached.
1731 	 */
1732 	for_each_vma(vmi, vma) {
1733 		if (signal_pending(current))
1734 			goto out_unlock;
1735 		vma_start_write(vma);
1736 	}
1737 
1738 	vma_iter_init(&vmi, mm, 0);
1739 	for_each_vma(vmi, vma) {
1740 		if (signal_pending(current))
1741 			goto out_unlock;
1742 		if (vma->vm_file && vma->vm_file->f_mapping &&
1743 				is_vm_hugetlb_page(vma))
1744 			vm_lock_mapping(mm, vma->vm_file->f_mapping);
1745 	}
1746 
1747 	vma_iter_init(&vmi, mm, 0);
1748 	for_each_vma(vmi, vma) {
1749 		if (signal_pending(current))
1750 			goto out_unlock;
1751 		if (vma->vm_file && vma->vm_file->f_mapping &&
1752 				!is_vm_hugetlb_page(vma))
1753 			vm_lock_mapping(mm, vma->vm_file->f_mapping);
1754 	}
1755 
1756 	vma_iter_init(&vmi, mm, 0);
1757 	for_each_vma(vmi, vma) {
1758 		if (signal_pending(current))
1759 			goto out_unlock;
1760 		if (vma->anon_vma)
1761 			list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
1762 				vm_lock_anon_vma(mm, avc->anon_vma);
1763 	}
1764 
1765 	return 0;
1766 
1767 out_unlock:
1768 	mm_drop_all_locks(mm);
1769 	return -EINTR;
1770 }
1771 
1772 static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
1773 {
1774 	if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
1775 		/*
1776 		 * The LSB of head.next can't change to 0 from under
1777 		 * us because we hold the mm_all_locks_mutex.
1778 		 *
1779 		 * We must however clear the bitflag before unlocking
1780 		 * the vma so the users using the anon_vma->rb_root will
1781 		 * never see our bitflag.
1782 		 *
1783 		 * No need of atomic instructions here, head.next
1784 		 * can't change from under us until we release the
1785 		 * anon_vma->root->rwsem.
1786 		 */
1787 		if (!__test_and_clear_bit(0, (unsigned long *)
1788 					  &anon_vma->root->rb_root.rb_root.rb_node))
1789 			BUG();
1790 		anon_vma_unlock_write(anon_vma);
1791 	}
1792 }
1793 
1794 static void vm_unlock_mapping(struct address_space *mapping)
1795 {
1796 	if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
1797 		/*
1798 		 * AS_MM_ALL_LOCKS can't change to 0 from under us
1799 		 * because we hold the mm_all_locks_mutex.
1800 		 */
1801 		i_mmap_unlock_write(mapping);
1802 		if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
1803 					&mapping->flags))
1804 			BUG();
1805 	}
1806 }
1807 
1808 /*
1809  * The mmap_lock cannot be released by the caller until
1810  * mm_drop_all_locks() returns.
1811  */
1812 void mm_drop_all_locks(struct mm_struct *mm)
1813 {
1814 	struct vm_area_struct *vma;
1815 	struct anon_vma_chain *avc;
1816 	VMA_ITERATOR(vmi, mm, 0);
1817 
1818 	mmap_assert_write_locked(mm);
1819 	BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
1820 
1821 	for_each_vma(vmi, vma) {
1822 		if (vma->anon_vma)
1823 			list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
1824 				vm_unlock_anon_vma(avc->anon_vma);
1825 		if (vma->vm_file && vma->vm_file->f_mapping)
1826 			vm_unlock_mapping(vma->vm_file->f_mapping);
1827 	}
1828 
1829 	mutex_unlock(&mm_all_locks_mutex);
1830 }
1831