1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /*
4 * VMA-specific functions.
5 */
6
7 #include "vma_internal.h"
8 #include "vma.h"
9
is_mergeable_vma(struct vma_merge_struct * vmg,bool merge_next)10 static inline bool is_mergeable_vma(struct vma_merge_struct *vmg, bool merge_next)
11 {
12 struct vm_area_struct *vma = merge_next ? vmg->next : vmg->prev;
13
14 if (!mpol_equal(vmg->policy, vma_policy(vma)))
15 return false;
16 /*
17 * VM_SOFTDIRTY should not prevent from VMA merging, if we
18 * match the flags but dirty bit -- the caller should mark
19 * merged VMA as dirty. If dirty bit won't be excluded from
20 * comparison, we increase pressure on the memory system forcing
21 * the kernel to generate new VMAs when old one could be
22 * extended instead.
23 */
24 if ((vma->vm_flags ^ vmg->flags) & ~VM_SOFTDIRTY)
25 return false;
26 if (vma->vm_file != vmg->file)
27 return false;
28 if (!is_mergeable_vm_userfaultfd_ctx(vma, vmg->uffd_ctx))
29 return false;
30 if (!anon_vma_name_eq(anon_vma_name(vma), vmg->anon_name))
31 return false;
32 return true;
33 }
34
is_mergeable_anon_vma(struct anon_vma * anon_vma1,struct anon_vma * anon_vma2,struct vm_area_struct * vma)35 static inline bool is_mergeable_anon_vma(struct anon_vma *anon_vma1,
36 struct anon_vma *anon_vma2, struct vm_area_struct *vma)
37 {
38 /*
39 * The list_is_singular() test is to avoid merging VMA cloned from
40 * parents. This can improve scalability caused by anon_vma lock.
41 */
42 if ((!anon_vma1 || !anon_vma2) && (!vma ||
43 list_is_singular(&vma->anon_vma_chain)))
44 return true;
45 return anon_vma1 == anon_vma2;
46 }
47
48 /* Are the anon_vma's belonging to each VMA compatible with one another? */
are_anon_vmas_compatible(struct vm_area_struct * vma1,struct vm_area_struct * vma2)49 static inline bool are_anon_vmas_compatible(struct vm_area_struct *vma1,
50 struct vm_area_struct *vma2)
51 {
52 return is_mergeable_anon_vma(vma1->anon_vma, vma2->anon_vma, NULL);
53 }
54
55 /*
56 * init_multi_vma_prep() - Initializer for struct vma_prepare
57 * @vp: The vma_prepare struct
58 * @vma: The vma that will be altered once locked
59 * @next: The next vma if it is to be adjusted
60 * @remove: The first vma to be removed
61 * @remove2: The second vma to be removed
62 */
init_multi_vma_prep(struct vma_prepare * vp,struct vm_area_struct * vma,struct vm_area_struct * next,struct vm_area_struct * remove,struct vm_area_struct * remove2)63 static void init_multi_vma_prep(struct vma_prepare *vp,
64 struct vm_area_struct *vma,
65 struct vm_area_struct *next,
66 struct vm_area_struct *remove,
67 struct vm_area_struct *remove2)
68 {
69 memset(vp, 0, sizeof(struct vma_prepare));
70 vp->vma = vma;
71 vp->anon_vma = vma->anon_vma;
72 vp->remove = remove;
73 vp->remove2 = remove2;
74 vp->adj_next = next;
75 if (!vp->anon_vma && next)
76 vp->anon_vma = next->anon_vma;
77
78 vp->file = vma->vm_file;
79 if (vp->file)
80 vp->mapping = vma->vm_file->f_mapping;
81
82 }
83
84 /*
85 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
86 * in front of (at a lower virtual address and file offset than) the vma.
87 *
88 * We cannot merge two vmas if they have differently assigned (non-NULL)
89 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
90 *
91 * We don't check here for the merged mmap wrapping around the end of pagecache
92 * indices (16TB on ia32) because do_mmap() does not permit mmap's which
93 * wrap, nor mmaps which cover the final page at index -1UL.
94 *
95 * We assume the vma may be removed as part of the merge.
96 */
can_vma_merge_before(struct vma_merge_struct * vmg)97 static bool can_vma_merge_before(struct vma_merge_struct *vmg)
98 {
99 pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start);
100
101 if (is_mergeable_vma(vmg, /* merge_next = */ true) &&
102 is_mergeable_anon_vma(vmg->anon_vma, vmg->next->anon_vma, vmg->next)) {
103 if (vmg->next->vm_pgoff == vmg->pgoff + pglen)
104 return true;
105 }
106
107 return false;
108 }
109
110 /*
111 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
112 * beyond (at a higher virtual address and file offset than) the vma.
113 *
114 * We cannot merge two vmas if they have differently assigned (non-NULL)
115 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
116 *
117 * We assume that vma is not removed as part of the merge.
118 */
can_vma_merge_after(struct vma_merge_struct * vmg)119 static bool can_vma_merge_after(struct vma_merge_struct *vmg)
120 {
121 if (is_mergeable_vma(vmg, /* merge_next = */ false) &&
122 is_mergeable_anon_vma(vmg->anon_vma, vmg->prev->anon_vma, vmg->prev)) {
123 if (vmg->prev->vm_pgoff + vma_pages(vmg->prev) == vmg->pgoff)
124 return true;
125 }
126 return false;
127 }
128
__vma_link_file(struct vm_area_struct * vma,struct address_space * mapping)129 static void __vma_link_file(struct vm_area_struct *vma,
130 struct address_space *mapping)
131 {
132 if (vma_is_shared_maywrite(vma))
133 mapping_allow_writable(mapping);
134
135 flush_dcache_mmap_lock(mapping);
136 vma_interval_tree_insert(vma, &mapping->i_mmap);
137 flush_dcache_mmap_unlock(mapping);
138 }
139
140 /*
141 * Requires inode->i_mapping->i_mmap_rwsem
142 */
__remove_shared_vm_struct(struct vm_area_struct * vma,struct address_space * mapping)143 static void __remove_shared_vm_struct(struct vm_area_struct *vma,
144 struct address_space *mapping)
145 {
146 if (vma_is_shared_maywrite(vma))
147 mapping_unmap_writable(mapping);
148
149 flush_dcache_mmap_lock(mapping);
150 vma_interval_tree_remove(vma, &mapping->i_mmap);
151 flush_dcache_mmap_unlock(mapping);
152 }
153
154 /*
155 * vma_prepare() - Helper function for handling locking VMAs prior to altering
156 * @vp: The initialized vma_prepare struct
157 */
vma_prepare(struct vma_prepare * vp)158 static void vma_prepare(struct vma_prepare *vp)
159 {
160 if (vp->file) {
161 uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end);
162
163 if (vp->adj_next)
164 uprobe_munmap(vp->adj_next, vp->adj_next->vm_start,
165 vp->adj_next->vm_end);
166
167 i_mmap_lock_write(vp->mapping);
168 if (vp->insert && vp->insert->vm_file) {
169 /*
170 * Put into interval tree now, so instantiated pages
171 * are visible to arm/parisc __flush_dcache_page
172 * throughout; but we cannot insert into address
173 * space until vma start or end is updated.
174 */
175 __vma_link_file(vp->insert,
176 vp->insert->vm_file->f_mapping);
177 }
178 }
179
180 if (vp->anon_vma) {
181 anon_vma_lock_write(vp->anon_vma);
182 anon_vma_interval_tree_pre_update_vma(vp->vma);
183 if (vp->adj_next)
184 anon_vma_interval_tree_pre_update_vma(vp->adj_next);
185 }
186
187 if (vp->file) {
188 flush_dcache_mmap_lock(vp->mapping);
189 vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap);
190 if (vp->adj_next)
191 vma_interval_tree_remove(vp->adj_next,
192 &vp->mapping->i_mmap);
193 }
194
195 }
196
197 /*
198 * vma_complete- Helper function for handling the unlocking after altering VMAs,
199 * or for inserting a VMA.
200 *
201 * @vp: The vma_prepare struct
202 * @vmi: The vma iterator
203 * @mm: The mm_struct
204 */
vma_complete(struct vma_prepare * vp,struct vma_iterator * vmi,struct mm_struct * mm)205 static void vma_complete(struct vma_prepare *vp, struct vma_iterator *vmi,
206 struct mm_struct *mm)
207 {
208 if (vp->file) {
209 if (vp->adj_next)
210 vma_interval_tree_insert(vp->adj_next,
211 &vp->mapping->i_mmap);
212 vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap);
213 flush_dcache_mmap_unlock(vp->mapping);
214 }
215
216 if (vp->remove && vp->file) {
217 __remove_shared_vm_struct(vp->remove, vp->mapping);
218 if (vp->remove2)
219 __remove_shared_vm_struct(vp->remove2, vp->mapping);
220 } else if (vp->insert) {
221 /*
222 * split_vma has split insert from vma, and needs
223 * us to insert it before dropping the locks
224 * (it may either follow vma or precede it).
225 */
226 vma_iter_store(vmi, vp->insert);
227 mm->map_count++;
228 }
229
230 if (vp->anon_vma) {
231 anon_vma_interval_tree_post_update_vma(vp->vma);
232 if (vp->adj_next)
233 anon_vma_interval_tree_post_update_vma(vp->adj_next);
234 anon_vma_unlock_write(vp->anon_vma);
235 }
236
237 if (vp->file) {
238 i_mmap_unlock_write(vp->mapping);
239 uprobe_mmap(vp->vma);
240
241 if (vp->adj_next)
242 uprobe_mmap(vp->adj_next);
243 }
244
245 if (vp->remove) {
246 again:
247 vma_mark_detached(vp->remove, true);
248 if (vp->file) {
249 uprobe_munmap(vp->remove, vp->remove->vm_start,
250 vp->remove->vm_end);
251 fput(vp->file);
252 }
253 if (vp->remove->anon_vma)
254 anon_vma_merge(vp->vma, vp->remove);
255 mm->map_count--;
256 mpol_put(vma_policy(vp->remove));
257 if (!vp->remove2)
258 WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end);
259 vm_area_free(vp->remove);
260
261 /*
262 * In mprotect's case 6 (see comments on vma_merge),
263 * we are removing both mid and next vmas
264 */
265 if (vp->remove2) {
266 vp->remove = vp->remove2;
267 vp->remove2 = NULL;
268 goto again;
269 }
270 }
271 if (vp->insert && vp->file)
272 uprobe_mmap(vp->insert);
273 }
274
275 /*
276 * init_vma_prep() - Initializer wrapper for vma_prepare struct
277 * @vp: The vma_prepare struct
278 * @vma: The vma that will be altered once locked
279 */
init_vma_prep(struct vma_prepare * vp,struct vm_area_struct * vma)280 static void init_vma_prep(struct vma_prepare *vp, struct vm_area_struct *vma)
281 {
282 init_multi_vma_prep(vp, vma, NULL, NULL, NULL);
283 }
284
285 /*
286 * Can the proposed VMA be merged with the left (previous) VMA taking into
287 * account the start position of the proposed range.
288 */
can_vma_merge_left(struct vma_merge_struct * vmg)289 static bool can_vma_merge_left(struct vma_merge_struct *vmg)
290
291 {
292 return vmg->prev && vmg->prev->vm_end == vmg->start &&
293 can_vma_merge_after(vmg);
294 }
295
296 /*
297 * Can the proposed VMA be merged with the right (next) VMA taking into
298 * account the end position of the proposed range.
299 *
300 * In addition, if we can merge with the left VMA, ensure that left and right
301 * anon_vma's are also compatible.
302 */
can_vma_merge_right(struct vma_merge_struct * vmg,bool can_merge_left)303 static bool can_vma_merge_right(struct vma_merge_struct *vmg,
304 bool can_merge_left)
305 {
306 if (!vmg->next || vmg->end != vmg->next->vm_start ||
307 !can_vma_merge_before(vmg))
308 return false;
309
310 if (!can_merge_left)
311 return true;
312
313 /*
314 * If we can merge with prev (left) and next (right), indicating that
315 * each VMA's anon_vma is compatible with the proposed anon_vma, this
316 * does not mean prev and next are compatible with EACH OTHER.
317 *
318 * We therefore check this in addition to mergeability to either side.
319 */
320 return are_anon_vmas_compatible(vmg->prev, vmg->next);
321 }
322
323 /*
324 * Close a vm structure and free it.
325 */
remove_vma(struct vm_area_struct * vma,bool unreachable,bool closed)326 void remove_vma(struct vm_area_struct *vma, bool unreachable, bool closed)
327 {
328 might_sleep();
329 if (!closed && vma->vm_ops && vma->vm_ops->close)
330 vma->vm_ops->close(vma);
331 if (vma->vm_file)
332 fput(vma->vm_file);
333 mpol_put(vma_policy(vma));
334 if (unreachable)
335 __vm_area_free(vma);
336 else
337 vm_area_free(vma);
338 }
339
340 /*
341 * Get rid of page table information in the indicated region.
342 *
343 * Called with the mm semaphore held.
344 */
unmap_region(struct ma_state * mas,struct vm_area_struct * vma,struct vm_area_struct * prev,struct vm_area_struct * next)345 void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
346 struct vm_area_struct *prev, struct vm_area_struct *next)
347 {
348 struct mm_struct *mm = vma->vm_mm;
349 struct mmu_gather tlb;
350
351 lru_add_drain();
352 tlb_gather_mmu(&tlb, mm);
353 update_hiwater_rss(mm);
354 unmap_vmas(&tlb, mas, vma, vma->vm_start, vma->vm_end, vma->vm_end,
355 /* mm_wr_locked = */ true);
356 mas_set(mas, vma->vm_end);
357 free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
358 next ? next->vm_start : USER_PGTABLES_CEILING,
359 /* mm_wr_locked = */ true);
360 tlb_finish_mmu(&tlb);
361 }
362
363 /*
364 * __split_vma() bypasses sysctl_max_map_count checking. We use this where it
365 * has already been checked or doesn't make sense to fail.
366 * VMA Iterator will point to the original VMA.
367 */
__split_vma(struct vma_iterator * vmi,struct vm_area_struct * vma,unsigned long addr,int new_below)368 static int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
369 unsigned long addr, int new_below)
370 {
371 struct vma_prepare vp;
372 struct vm_area_struct *new;
373 int err;
374
375 WARN_ON(vma->vm_start >= addr);
376 WARN_ON(vma->vm_end <= addr);
377
378 if (vma->vm_ops && vma->vm_ops->may_split) {
379 err = vma->vm_ops->may_split(vma, addr);
380 if (err)
381 return err;
382 }
383
384 new = vm_area_dup(vma);
385 if (!new)
386 return -ENOMEM;
387
388 if (new_below) {
389 new->vm_end = addr;
390 } else {
391 new->vm_start = addr;
392 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
393 }
394
395 err = -ENOMEM;
396 vma_iter_config(vmi, new->vm_start, new->vm_end);
397 if (vma_iter_prealloc(vmi, new))
398 goto out_free_vma;
399
400 err = vma_dup_policy(vma, new);
401 if (err)
402 goto out_free_vmi;
403
404 err = anon_vma_clone(new, vma);
405 if (err)
406 goto out_free_mpol;
407
408 if (new->vm_file)
409 get_file(new->vm_file);
410
411 if (new->vm_ops && new->vm_ops->open)
412 new->vm_ops->open(new);
413
414 vma_start_write(vma);
415 vma_start_write(new);
416
417 init_vma_prep(&vp, vma);
418 vp.insert = new;
419 vma_prepare(&vp);
420 vma_adjust_trans_huge(vma, vma->vm_start, addr, 0);
421
422 if (new_below) {
423 vma->vm_start = addr;
424 vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT;
425 } else {
426 vma->vm_end = addr;
427 }
428
429 /* vma_complete stores the new vma */
430 vma_complete(&vp, vmi, vma->vm_mm);
431 validate_mm(vma->vm_mm);
432
433 /* Success. */
434 if (new_below)
435 vma_next(vmi);
436 else
437 vma_prev(vmi);
438
439 return 0;
440
441 out_free_mpol:
442 mpol_put(vma_policy(new));
443 out_free_vmi:
444 vma_iter_free(vmi);
445 out_free_vma:
446 vm_area_free(new);
447 return err;
448 }
449
450 /*
451 * Split a vma into two pieces at address 'addr', a new vma is allocated
452 * either for the first part or the tail.
453 */
split_vma(struct vma_iterator * vmi,struct vm_area_struct * vma,unsigned long addr,int new_below)454 static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
455 unsigned long addr, int new_below)
456 {
457 if (vma->vm_mm->map_count >= sysctl_max_map_count)
458 return -ENOMEM;
459
460 return __split_vma(vmi, vma, addr, new_below);
461 }
462
463 /*
464 * vma has some anon_vma assigned, and is already inserted on that
465 * anon_vma's interval trees.
466 *
467 * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
468 * vma must be removed from the anon_vma's interval trees using
469 * anon_vma_interval_tree_pre_update_vma().
470 *
471 * After the update, the vma will be reinserted using
472 * anon_vma_interval_tree_post_update_vma().
473 *
474 * The entire update must be protected by exclusive mmap_lock and by
475 * the root anon_vma's mutex.
476 */
477 void
anon_vma_interval_tree_pre_update_vma(struct vm_area_struct * vma)478 anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
479 {
480 struct anon_vma_chain *avc;
481
482 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
483 anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root);
484 }
485
486 void
anon_vma_interval_tree_post_update_vma(struct vm_area_struct * vma)487 anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
488 {
489 struct anon_vma_chain *avc;
490
491 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
492 anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
493 }
494
495 /*
496 * dup_anon_vma() - Helper function to duplicate anon_vma
497 * @dst: The destination VMA
498 * @src: The source VMA
499 * @dup: Pointer to the destination VMA when successful.
500 *
501 * Returns: 0 on success.
502 */
dup_anon_vma(struct vm_area_struct * dst,struct vm_area_struct * src,struct vm_area_struct ** dup)503 static int dup_anon_vma(struct vm_area_struct *dst,
504 struct vm_area_struct *src, struct vm_area_struct **dup)
505 {
506 /*
507 * Easily overlooked: when mprotect shifts the boundary, make sure the
508 * expanding vma has anon_vma set if the shrinking vma had, to cover any
509 * anon pages imported.
510 */
511 if (src->anon_vma && !dst->anon_vma) {
512 int ret;
513
514 vma_assert_write_locked(dst);
515 dst->anon_vma = src->anon_vma;
516 ret = anon_vma_clone(dst, src);
517 if (ret)
518 return ret;
519
520 *dup = dst;
521 }
522
523 return 0;
524 }
525
526 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
validate_mm(struct mm_struct * mm)527 void validate_mm(struct mm_struct *mm)
528 {
529 int bug = 0;
530 int i = 0;
531 struct vm_area_struct *vma;
532 VMA_ITERATOR(vmi, mm, 0);
533
534 mt_validate(&mm->mm_mt);
535 for_each_vma(vmi, vma) {
536 #ifdef CONFIG_DEBUG_VM_RB
537 struct anon_vma *anon_vma = vma->anon_vma;
538 struct anon_vma_chain *avc;
539 #endif
540 unsigned long vmi_start, vmi_end;
541 bool warn = 0;
542
543 vmi_start = vma_iter_addr(&vmi);
544 vmi_end = vma_iter_end(&vmi);
545 if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm))
546 warn = 1;
547
548 if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm))
549 warn = 1;
550
551 if (warn) {
552 pr_emerg("issue in %s\n", current->comm);
553 dump_stack();
554 dump_vma(vma);
555 pr_emerg("tree range: %px start %lx end %lx\n", vma,
556 vmi_start, vmi_end - 1);
557 vma_iter_dump_tree(&vmi);
558 }
559
560 #ifdef CONFIG_DEBUG_VM_RB
561 if (anon_vma) {
562 anon_vma_lock_read(anon_vma);
563 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
564 anon_vma_interval_tree_verify(avc);
565 anon_vma_unlock_read(anon_vma);
566 }
567 #endif
568 i++;
569 }
570 if (i != mm->map_count) {
571 pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i);
572 bug = 1;
573 }
574 VM_BUG_ON_MM(bug, mm);
575 }
576 #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
577
578 /* Actually perform the VMA merge operation. */
commit_merge(struct vma_merge_struct * vmg,struct vm_area_struct * adjust,struct vm_area_struct * remove,struct vm_area_struct * remove2,long adj_start,bool expanded)579 static int commit_merge(struct vma_merge_struct *vmg,
580 struct vm_area_struct *adjust,
581 struct vm_area_struct *remove,
582 struct vm_area_struct *remove2,
583 long adj_start,
584 bool expanded)
585 {
586 struct vma_prepare vp;
587
588 init_multi_vma_prep(&vp, vmg->vma, adjust, remove, remove2);
589
590 VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma &&
591 vp.anon_vma != adjust->anon_vma);
592
593 if (expanded) {
594 /* Note: vma iterator must be pointing to 'start'. */
595 vma_iter_config(vmg->vmi, vmg->start, vmg->end);
596 } else {
597 vma_iter_config(vmg->vmi, adjust->vm_start + adj_start,
598 adjust->vm_end);
599 }
600
601 if (vma_iter_prealloc(vmg->vmi, vmg->vma))
602 return -ENOMEM;
603
604 vma_prepare(&vp);
605 vma_adjust_trans_huge(vmg->vma, vmg->start, vmg->end, adj_start);
606 vma_set_range(vmg->vma, vmg->start, vmg->end, vmg->pgoff);
607
608 if (expanded)
609 vma_iter_store(vmg->vmi, vmg->vma);
610
611 if (adj_start) {
612 adjust->vm_start += adj_start;
613 adjust->vm_pgoff += PHYS_PFN(adj_start);
614 if (adj_start < 0) {
615 WARN_ON(expanded);
616 vma_iter_store(vmg->vmi, adjust);
617 }
618 }
619
620 vma_complete(&vp, vmg->vmi, vmg->vma->vm_mm);
621
622 return 0;
623 }
624
625 /* We can only remove VMAs when merging if they do not have a close hook. */
can_merge_remove_vma(struct vm_area_struct * vma)626 static bool can_merge_remove_vma(struct vm_area_struct *vma)
627 {
628 return !vma->vm_ops || !vma->vm_ops->close;
629 }
630
631 /*
632 * vma_merge_existing_range - Attempt to merge VMAs based on a VMA having its
633 * attributes modified.
634 *
635 * @vmg: Describes the modifications being made to a VMA and associated
636 * metadata.
637 *
638 * When the attributes of a range within a VMA change, then it might be possible
639 * for immediately adjacent VMAs to be merged into that VMA due to having
640 * identical properties.
641 *
642 * This function checks for the existence of any such mergeable VMAs and updates
643 * the maple tree describing the @vmg->vma->vm_mm address space to account for
644 * this, as well as any VMAs shrunk/expanded/deleted as a result of this merge.
645 *
646 * As part of this operation, if a merge occurs, the @vmg object will have its
647 * vma, start, end, and pgoff fields modified to execute the merge. Subsequent
648 * calls to this function should reset these fields.
649 *
650 * Returns: The merged VMA if merge succeeds, or NULL otherwise.
651 *
652 * ASSUMPTIONS:
653 * - The caller must assign the VMA to be modifed to @vmg->vma.
654 * - The caller must have set @vmg->prev to the previous VMA, if there is one.
655 * - The caller must not set @vmg->next, as we determine this.
656 * - The caller must hold a WRITE lock on the mm_struct->mmap_lock.
657 * - vmi must be positioned within [@vmg->vma->vm_start, @vmg->vma->vm_end).
658 */
vma_merge_existing_range(struct vma_merge_struct * vmg)659 static struct vm_area_struct *vma_merge_existing_range(struct vma_merge_struct *vmg)
660 {
661 struct vm_area_struct *vma = vmg->vma;
662 struct vm_area_struct *prev = vmg->prev;
663 struct vm_area_struct *next, *res;
664 struct vm_area_struct *anon_dup = NULL;
665 struct vm_area_struct *adjust = NULL;
666 unsigned long start = vmg->start;
667 unsigned long end = vmg->end;
668 bool left_side = vma && start == vma->vm_start;
669 bool right_side = vma && end == vma->vm_end;
670 int err = 0;
671 long adj_start = 0;
672 bool merge_will_delete_vma, merge_will_delete_next;
673 bool merge_left, merge_right, merge_both;
674 bool expanded;
675
676 mmap_assert_write_locked(vmg->mm);
677 VM_WARN_ON(!vma); /* We are modifying a VMA, so caller must specify. */
678 VM_WARN_ON(vmg->next); /* We set this. */
679 VM_WARN_ON(prev && start <= prev->vm_start);
680 VM_WARN_ON(start >= end);
681 /*
682 * If vma == prev, then we are offset into a VMA. Otherwise, if we are
683 * not, we must span a portion of the VMA.
684 */
685 VM_WARN_ON(vma && ((vma != prev && vmg->start != vma->vm_start) ||
686 vmg->end > vma->vm_end));
687 /* The vmi must be positioned within vmg->vma. */
688 VM_WARN_ON(vma && !(vma_iter_addr(vmg->vmi) >= vma->vm_start &&
689 vma_iter_addr(vmg->vmi) < vma->vm_end));
690
691 vmg->state = VMA_MERGE_NOMERGE;
692
693 /*
694 * If a special mapping or if the range being modified is neither at the
695 * furthermost left or right side of the VMA, then we have no chance of
696 * merging and should abort.
697 */
698 if (vmg->flags & VM_SPECIAL || (!left_side && !right_side))
699 return NULL;
700
701 if (left_side)
702 merge_left = can_vma_merge_left(vmg);
703 else
704 merge_left = false;
705
706 if (right_side) {
707 next = vmg->next = vma_iter_next_range(vmg->vmi);
708 vma_iter_prev_range(vmg->vmi);
709
710 merge_right = can_vma_merge_right(vmg, merge_left);
711 } else {
712 merge_right = false;
713 next = NULL;
714 }
715
716 if (merge_left) /* If merging prev, position iterator there. */
717 vma_prev(vmg->vmi);
718 else if (!merge_right) /* If we have nothing to merge, abort. */
719 return NULL;
720
721 merge_both = merge_left && merge_right;
722 /* If we span the entire VMA, a merge implies it will be deleted. */
723 merge_will_delete_vma = left_side && right_side;
724
725 /*
726 * If we need to remove vma in its entirety but are unable to do so,
727 * we have no sensible recourse but to abort the merge.
728 */
729 if (merge_will_delete_vma && !can_merge_remove_vma(vma))
730 return NULL;
731
732 /*
733 * If we merge both VMAs, then next is also deleted. This implies
734 * merge_will_delete_vma also.
735 */
736 merge_will_delete_next = merge_both;
737
738 /*
739 * If we cannot delete next, then we can reduce the operation to merging
740 * prev and vma (thereby deleting vma).
741 */
742 if (merge_will_delete_next && !can_merge_remove_vma(next)) {
743 merge_will_delete_next = false;
744 merge_right = false;
745 merge_both = false;
746 }
747
748 /* No matter what happens, we will be adjusting vma. */
749 vma_start_write(vma);
750
751 if (merge_left)
752 vma_start_write(prev);
753
754 if (merge_right)
755 vma_start_write(next);
756
757 if (merge_both) {
758 /*
759 * |<----->|
760 * |-------*********-------|
761 * prev vma next
762 * extend delete delete
763 */
764
765 vmg->vma = prev;
766 vmg->start = prev->vm_start;
767 vmg->end = next->vm_end;
768 vmg->pgoff = prev->vm_pgoff;
769
770 /*
771 * We already ensured anon_vma compatibility above, so now it's
772 * simply a case of, if prev has no anon_vma object, which of
773 * next or vma contains the anon_vma we must duplicate.
774 */
775 err = dup_anon_vma(prev, next->anon_vma ? next : vma, &anon_dup);
776 } else if (merge_left) {
777 /*
778 * |<----->| OR
779 * |<--------->|
780 * |-------*************
781 * prev vma
782 * extend shrink/delete
783 */
784
785 vmg->vma = prev;
786 vmg->start = prev->vm_start;
787 vmg->pgoff = prev->vm_pgoff;
788
789 if (!merge_will_delete_vma) {
790 adjust = vma;
791 adj_start = vmg->end - vma->vm_start;
792 }
793
794 err = dup_anon_vma(prev, vma, &anon_dup);
795 } else { /* merge_right */
796 /*
797 * |<----->| OR
798 * |<--------->|
799 * *************-------|
800 * vma next
801 * shrink/delete extend
802 */
803
804 pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start);
805
806 VM_WARN_ON(!merge_right);
807 /* If we are offset into a VMA, then prev must be vma. */
808 VM_WARN_ON(vmg->start > vma->vm_start && prev && vma != prev);
809
810 if (merge_will_delete_vma) {
811 vmg->vma = next;
812 vmg->end = next->vm_end;
813 vmg->pgoff = next->vm_pgoff - pglen;
814 } else {
815 /*
816 * We shrink vma and expand next.
817 *
818 * IMPORTANT: This is the ONLY case where the final
819 * merged VMA is NOT vmg->vma, but rather vmg->next.
820 */
821
822 vmg->start = vma->vm_start;
823 vmg->end = start;
824 vmg->pgoff = vma->vm_pgoff;
825
826 adjust = next;
827 adj_start = -(vma->vm_end - start);
828 }
829
830 err = dup_anon_vma(next, vma, &anon_dup);
831 }
832
833 if (err)
834 goto abort;
835
836 /*
837 * In nearly all cases, we expand vmg->vma. There is one exception -
838 * merge_right where we partially span the VMA. In this case we shrink
839 * the end of vmg->vma and adjust the start of vmg->next accordingly.
840 */
841 expanded = !merge_right || merge_will_delete_vma;
842
843 if (commit_merge(vmg, adjust,
844 merge_will_delete_vma ? vma : NULL,
845 merge_will_delete_next ? next : NULL,
846 adj_start, expanded)) {
847 if (anon_dup)
848 unlink_anon_vmas(anon_dup);
849
850 vmg->state = VMA_MERGE_ERROR_NOMEM;
851 return NULL;
852 }
853
854 res = merge_left ? prev : next;
855 khugepaged_enter_vma(res, vmg->flags);
856
857 vmg->state = VMA_MERGE_SUCCESS;
858 return res;
859
860 abort:
861 vma_iter_set(vmg->vmi, start);
862 vma_iter_load(vmg->vmi);
863 vmg->state = VMA_MERGE_ERROR_NOMEM;
864 return NULL;
865 }
866
867 /*
868 * vma_merge_new_range - Attempt to merge a new VMA into address space
869 *
870 * @vmg: Describes the VMA we are adding, in the range @vmg->start to @vmg->end
871 * (exclusive), which we try to merge with any adjacent VMAs if possible.
872 *
873 * We are about to add a VMA to the address space starting at @vmg->start and
874 * ending at @vmg->end. There are three different possible scenarios:
875 *
876 * 1. There is a VMA with identical properties immediately adjacent to the
877 * proposed new VMA [@vmg->start, @vmg->end) either before or after it -
878 * EXPAND that VMA:
879 *
880 * Proposed: |-----| or |-----|
881 * Existing: |----| |----|
882 *
883 * 2. There are VMAs with identical properties immediately adjacent to the
884 * proposed new VMA [@vmg->start, @vmg->end) both before AND after it -
885 * EXPAND the former and REMOVE the latter:
886 *
887 * Proposed: |-----|
888 * Existing: |----| |----|
889 *
890 * 3. There are no VMAs immediately adjacent to the proposed new VMA or those
891 * VMAs do not have identical attributes - NO MERGE POSSIBLE.
892 *
893 * In instances where we can merge, this function returns the expanded VMA which
894 * will have its range adjusted accordingly and the underlying maple tree also
895 * adjusted.
896 *
897 * Returns: In instances where no merge was possible, NULL. Otherwise, a pointer
898 * to the VMA we expanded.
899 *
900 * This function adjusts @vmg to provide @vmg->next if not already specified,
901 * and adjusts [@vmg->start, @vmg->end) to span the expanded range.
902 *
903 * ASSUMPTIONS:
904 * - The caller must hold a WRITE lock on the mm_struct->mmap_lock.
905 * - The caller must have determined that [@vmg->start, @vmg->end) is empty,
906 other than VMAs that will be unmapped should the operation succeed.
907 * - The caller must have specified the previous vma in @vmg->prev.
908 * - The caller must have specified the next vma in @vmg->next.
909 * - The caller must have positioned the vmi at or before the gap.
910 */
vma_merge_new_range(struct vma_merge_struct * vmg)911 struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg)
912 {
913 struct vm_area_struct *prev = vmg->prev;
914 struct vm_area_struct *next = vmg->next;
915 unsigned long start = vmg->start;
916 unsigned long end = vmg->end;
917 pgoff_t pgoff = vmg->pgoff;
918 pgoff_t pglen = PHYS_PFN(end - start);
919 bool can_merge_left, can_merge_right;
920 bool just_expand = vmg->merge_flags & VMG_FLAG_JUST_EXPAND;
921
922 mmap_assert_write_locked(vmg->mm);
923 VM_WARN_ON(vmg->vma);
924 /* vmi must point at or before the gap. */
925 VM_WARN_ON(vma_iter_addr(vmg->vmi) > end);
926
927 vmg->state = VMA_MERGE_NOMERGE;
928
929 /* Special VMAs are unmergeable, also if no prev/next. */
930 if ((vmg->flags & VM_SPECIAL) || (!prev && !next))
931 return NULL;
932
933 can_merge_left = can_vma_merge_left(vmg);
934 can_merge_right = !just_expand && can_vma_merge_right(vmg, can_merge_left);
935
936 /* If we can merge with the next VMA, adjust vmg accordingly. */
937 if (can_merge_right) {
938 vmg->end = next->vm_end;
939 vmg->vma = next;
940 vmg->pgoff = next->vm_pgoff - pglen;
941 }
942
943 /* If we can merge with the previous VMA, adjust vmg accordingly. */
944 if (can_merge_left) {
945 vmg->start = prev->vm_start;
946 vmg->vma = prev;
947 vmg->pgoff = prev->vm_pgoff;
948
949 /*
950 * If this merge would result in removal of the next VMA but we
951 * are not permitted to do so, reduce the operation to merging
952 * prev and vma.
953 */
954 if (can_merge_right && !can_merge_remove_vma(next))
955 vmg->end = end;
956
957 /* In expand-only case we are already positioned at prev. */
958 if (!just_expand) {
959 /* Equivalent to going to the previous range. */
960 vma_prev(vmg->vmi);
961 }
962 }
963
964 /*
965 * Now try to expand adjacent VMA(s). This takes care of removing the
966 * following VMA if we have VMAs on both sides.
967 */
968 if (vmg->vma && !vma_expand(vmg)) {
969 khugepaged_enter_vma(vmg->vma, vmg->flags);
970 vmg->state = VMA_MERGE_SUCCESS;
971 return vmg->vma;
972 }
973
974 /* If expansion failed, reset state. Allows us to retry merge later. */
975 if (!just_expand) {
976 vmg->vma = NULL;
977 vmg->start = start;
978 vmg->end = end;
979 vmg->pgoff = pgoff;
980 if (vmg->vma == prev)
981 vma_iter_set(vmg->vmi, start);
982 }
983
984 return NULL;
985 }
986
987 /*
988 * vma_expand - Expand an existing VMA
989 *
990 * @vmg: Describes a VMA expansion operation.
991 *
992 * Expand @vma to vmg->start and vmg->end. Can expand off the start and end.
993 * Will expand over vmg->next if it's different from vmg->vma and vmg->end ==
994 * vmg->next->vm_end. Checking if the vmg->vma can expand and merge with
995 * vmg->next needs to be handled by the caller.
996 *
997 * Returns: 0 on success.
998 *
999 * ASSUMPTIONS:
1000 * - The caller must hold a WRITE lock on vmg->vma->mm->mmap_lock.
1001 * - The caller must have set @vmg->vma and @vmg->next.
1002 */
vma_expand(struct vma_merge_struct * vmg)1003 int vma_expand(struct vma_merge_struct *vmg)
1004 {
1005 struct vm_area_struct *anon_dup = NULL;
1006 bool remove_next = false;
1007 struct vm_area_struct *vma = vmg->vma;
1008 struct vm_area_struct *next = vmg->next;
1009
1010 mmap_assert_write_locked(vmg->mm);
1011
1012 vma_start_write(vma);
1013 if (next && (vma != next) && (vmg->end == next->vm_end)) {
1014 int ret;
1015
1016 remove_next = true;
1017 /* This should already have been checked by this point. */
1018 VM_WARN_ON(!can_merge_remove_vma(next));
1019 vma_start_write(next);
1020 ret = dup_anon_vma(vma, next, &anon_dup);
1021 if (ret)
1022 return ret;
1023 }
1024
1025 /* Not merging but overwriting any part of next is not handled. */
1026 VM_WARN_ON(next && !remove_next &&
1027 next != vma && vmg->end > next->vm_start);
1028 /* Only handles expanding */
1029 VM_WARN_ON(vma->vm_start < vmg->start || vma->vm_end > vmg->end);
1030
1031 if (commit_merge(vmg, NULL, remove_next ? next : NULL, NULL, 0, true))
1032 goto nomem;
1033
1034 return 0;
1035
1036 nomem:
1037 vmg->state = VMA_MERGE_ERROR_NOMEM;
1038 if (anon_dup)
1039 unlink_anon_vmas(anon_dup);
1040 return -ENOMEM;
1041 }
1042
1043 /*
1044 * vma_shrink() - Reduce an existing VMAs memory area
1045 * @vmi: The vma iterator
1046 * @vma: The VMA to modify
1047 * @start: The new start
1048 * @end: The new end
1049 *
1050 * Returns: 0 on success, -ENOMEM otherwise
1051 */
vma_shrink(struct vma_iterator * vmi,struct vm_area_struct * vma,unsigned long start,unsigned long end,pgoff_t pgoff)1052 int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
1053 unsigned long start, unsigned long end, pgoff_t pgoff)
1054 {
1055 struct vma_prepare vp;
1056
1057 WARN_ON((vma->vm_start != start) && (vma->vm_end != end));
1058
1059 if (vma->vm_start < start)
1060 vma_iter_config(vmi, vma->vm_start, start);
1061 else
1062 vma_iter_config(vmi, end, vma->vm_end);
1063
1064 if (vma_iter_prealloc(vmi, NULL))
1065 return -ENOMEM;
1066
1067 vma_start_write(vma);
1068
1069 init_vma_prep(&vp, vma);
1070 vma_prepare(&vp);
1071 vma_adjust_trans_huge(vma, start, end, 0);
1072
1073 vma_iter_clear(vmi);
1074 vma_set_range(vma, start, end, pgoff);
1075 vma_complete(&vp, vmi, vma->vm_mm);
1076 validate_mm(vma->vm_mm);
1077 return 0;
1078 }
1079
vms_clear_ptes(struct vma_munmap_struct * vms,struct ma_state * mas_detach,bool mm_wr_locked)1080 static inline void vms_clear_ptes(struct vma_munmap_struct *vms,
1081 struct ma_state *mas_detach, bool mm_wr_locked)
1082 {
1083 struct mmu_gather tlb;
1084
1085 if (!vms->clear_ptes) /* Nothing to do */
1086 return;
1087
1088 /*
1089 * We can free page tables without write-locking mmap_lock because VMAs
1090 * were isolated before we downgraded mmap_lock.
1091 */
1092 mas_set(mas_detach, 1);
1093 lru_add_drain();
1094 tlb_gather_mmu(&tlb, vms->vma->vm_mm);
1095 update_hiwater_rss(vms->vma->vm_mm);
1096 unmap_vmas(&tlb, mas_detach, vms->vma, vms->start, vms->end,
1097 vms->vma_count, mm_wr_locked);
1098
1099 mas_set(mas_detach, 1);
1100 /* start and end may be different if there is no prev or next vma. */
1101 free_pgtables(&tlb, mas_detach, vms->vma, vms->unmap_start,
1102 vms->unmap_end, mm_wr_locked);
1103 tlb_finish_mmu(&tlb);
1104 vms->clear_ptes = false;
1105 }
1106
vms_clean_up_area(struct vma_munmap_struct * vms,struct ma_state * mas_detach)1107 void vms_clean_up_area(struct vma_munmap_struct *vms,
1108 struct ma_state *mas_detach)
1109 {
1110 struct vm_area_struct *vma;
1111
1112 if (!vms->nr_pages)
1113 return;
1114
1115 vms_clear_ptes(vms, mas_detach, true);
1116 mas_set(mas_detach, 0);
1117 mas_for_each(mas_detach, vma, ULONG_MAX)
1118 if (vma->vm_ops && vma->vm_ops->close)
1119 vma->vm_ops->close(vma);
1120 vms->closed_vm_ops = true;
1121 }
1122
1123 /*
1124 * vms_complete_munmap_vmas() - Finish the munmap() operation
1125 * @vms: The vma munmap struct
1126 * @mas_detach: The maple state of the detached vmas
1127 *
1128 * This updates the mm_struct, unmaps the region, frees the resources
1129 * used for the munmap() and may downgrade the lock - if requested. Everything
1130 * needed to be done once the vma maple tree is updated.
1131 */
vms_complete_munmap_vmas(struct vma_munmap_struct * vms,struct ma_state * mas_detach)1132 void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
1133 struct ma_state *mas_detach)
1134 {
1135 struct vm_area_struct *vma;
1136 struct mm_struct *mm;
1137
1138 mm = current->mm;
1139 mm->map_count -= vms->vma_count;
1140 mm->locked_vm -= vms->locked_vm;
1141 if (vms->unlock)
1142 mmap_write_downgrade(mm);
1143
1144 if (!vms->nr_pages)
1145 return;
1146
1147 vms_clear_ptes(vms, mas_detach, !vms->unlock);
1148 /* Update high watermark before we lower total_vm */
1149 update_hiwater_vm(mm);
1150 /* Stat accounting */
1151 WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm) - vms->nr_pages);
1152 /* Paranoid bookkeeping */
1153 VM_WARN_ON(vms->exec_vm > mm->exec_vm);
1154 VM_WARN_ON(vms->stack_vm > mm->stack_vm);
1155 VM_WARN_ON(vms->data_vm > mm->data_vm);
1156 mm->exec_vm -= vms->exec_vm;
1157 mm->stack_vm -= vms->stack_vm;
1158 mm->data_vm -= vms->data_vm;
1159
1160 /* Remove and clean up vmas */
1161 mas_set(mas_detach, 0);
1162 mas_for_each(mas_detach, vma, ULONG_MAX)
1163 remove_vma(vma, /* = */ false, vms->closed_vm_ops);
1164
1165 vm_unacct_memory(vms->nr_accounted);
1166 validate_mm(mm);
1167 if (vms->unlock)
1168 mmap_read_unlock(mm);
1169
1170 __mt_destroy(mas_detach->tree);
1171 }
1172
1173 /*
1174 * vms_gather_munmap_vmas() - Put all VMAs within a range into a maple tree
1175 * for removal at a later date. Handles splitting first and last if necessary
1176 * and marking the vmas as isolated.
1177 *
1178 * @vms: The vma munmap struct
1179 * @mas_detach: The maple state tracking the detached tree
1180 *
1181 * Return: 0 on success, error otherwise
1182 */
vms_gather_munmap_vmas(struct vma_munmap_struct * vms,struct ma_state * mas_detach)1183 int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
1184 struct ma_state *mas_detach)
1185 {
1186 struct vm_area_struct *next = NULL;
1187 int error;
1188
1189 /*
1190 * If we need to split any vma, do it now to save pain later.
1191 * Does it split the first one?
1192 */
1193 if (vms->start > vms->vma->vm_start) {
1194
1195 /*
1196 * Make sure that map_count on return from munmap() will
1197 * not exceed its limit; but let map_count go just above
1198 * its limit temporarily, to help free resources as expected.
1199 */
1200 if (vms->end < vms->vma->vm_end &&
1201 vms->vma->vm_mm->map_count >= sysctl_max_map_count) {
1202 error = -ENOMEM;
1203 goto map_count_exceeded;
1204 }
1205
1206 /* Don't bother splitting the VMA if we can't unmap it anyway */
1207 if (!can_modify_vma(vms->vma)) {
1208 error = -EPERM;
1209 goto start_split_failed;
1210 }
1211
1212 error = __split_vma(vms->vmi, vms->vma, vms->start, 1);
1213 if (error)
1214 goto start_split_failed;
1215 }
1216 vms->prev = vma_prev(vms->vmi);
1217 if (vms->prev)
1218 vms->unmap_start = vms->prev->vm_end;
1219
1220 /*
1221 * Detach a range of VMAs from the mm. Using next as a temp variable as
1222 * it is always overwritten.
1223 */
1224 for_each_vma_range(*(vms->vmi), next, vms->end) {
1225 long nrpages;
1226
1227 if (!can_modify_vma(next)) {
1228 error = -EPERM;
1229 goto modify_vma_failed;
1230 }
1231 /* Does it split the end? */
1232 if (next->vm_end > vms->end) {
1233 error = __split_vma(vms->vmi, next, vms->end, 0);
1234 if (error)
1235 goto end_split_failed;
1236 }
1237 vma_start_write(next);
1238 mas_set(mas_detach, vms->vma_count++);
1239 error = mas_store_gfp(mas_detach, next, GFP_KERNEL);
1240 if (error)
1241 goto munmap_gather_failed;
1242
1243 vma_mark_detached(next, true);
1244 nrpages = vma_pages(next);
1245
1246 vms->nr_pages += nrpages;
1247 if (next->vm_flags & VM_LOCKED)
1248 vms->locked_vm += nrpages;
1249
1250 if (next->vm_flags & VM_ACCOUNT)
1251 vms->nr_accounted += nrpages;
1252
1253 if (is_exec_mapping(next->vm_flags))
1254 vms->exec_vm += nrpages;
1255 else if (is_stack_mapping(next->vm_flags))
1256 vms->stack_vm += nrpages;
1257 else if (is_data_mapping(next->vm_flags))
1258 vms->data_vm += nrpages;
1259
1260 if (unlikely(vms->uf)) {
1261 /*
1262 * If userfaultfd_unmap_prep returns an error the vmas
1263 * will remain split, but userland will get a
1264 * highly unexpected error anyway. This is no
1265 * different than the case where the first of the two
1266 * __split_vma fails, but we don't undo the first
1267 * split, despite we could. This is unlikely enough
1268 * failure that it's not worth optimizing it for.
1269 */
1270 error = userfaultfd_unmap_prep(next, vms->start,
1271 vms->end, vms->uf);
1272 if (error)
1273 goto userfaultfd_error;
1274 }
1275 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
1276 BUG_ON(next->vm_start < vms->start);
1277 BUG_ON(next->vm_start > vms->end);
1278 #endif
1279 }
1280
1281 vms->next = vma_next(vms->vmi);
1282 if (vms->next)
1283 vms->unmap_end = vms->next->vm_start;
1284
1285 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
1286 /* Make sure no VMAs are about to be lost. */
1287 {
1288 MA_STATE(test, mas_detach->tree, 0, 0);
1289 struct vm_area_struct *vma_mas, *vma_test;
1290 int test_count = 0;
1291
1292 vma_iter_set(vms->vmi, vms->start);
1293 rcu_read_lock();
1294 vma_test = mas_find(&test, vms->vma_count - 1);
1295 for_each_vma_range(*(vms->vmi), vma_mas, vms->end) {
1296 BUG_ON(vma_mas != vma_test);
1297 test_count++;
1298 vma_test = mas_next(&test, vms->vma_count - 1);
1299 }
1300 rcu_read_unlock();
1301 BUG_ON(vms->vma_count != test_count);
1302 }
1303 #endif
1304
1305 while (vma_iter_addr(vms->vmi) > vms->start)
1306 vma_iter_prev_range(vms->vmi);
1307
1308 vms->clear_ptes = true;
1309 return 0;
1310
1311 userfaultfd_error:
1312 munmap_gather_failed:
1313 end_split_failed:
1314 modify_vma_failed:
1315 reattach_vmas(mas_detach);
1316 start_split_failed:
1317 map_count_exceeded:
1318 return error;
1319 }
1320
1321 /*
1322 * do_vmi_align_munmap() - munmap the aligned region from @start to @end.
1323 * @vmi: The vma iterator
1324 * @vma: The starting vm_area_struct
1325 * @mm: The mm_struct
1326 * @start: The aligned start address to munmap.
1327 * @end: The aligned end address to munmap.
1328 * @uf: The userfaultfd list_head
1329 * @unlock: Set to true to drop the mmap_lock. unlocking only happens on
1330 * success.
1331 *
1332 * Return: 0 on success and drops the lock if so directed, error and leaves the
1333 * lock held otherwise.
1334 */
do_vmi_align_munmap(struct vma_iterator * vmi,struct vm_area_struct * vma,struct mm_struct * mm,unsigned long start,unsigned long end,struct list_head * uf,bool unlock)1335 int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
1336 struct mm_struct *mm, unsigned long start, unsigned long end,
1337 struct list_head *uf, bool unlock)
1338 {
1339 struct maple_tree mt_detach;
1340 MA_STATE(mas_detach, &mt_detach, 0, 0);
1341 mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
1342 mt_on_stack(mt_detach);
1343 struct vma_munmap_struct vms;
1344 int error;
1345
1346 init_vma_munmap(&vms, vmi, vma, start, end, uf, unlock);
1347 error = vms_gather_munmap_vmas(&vms, &mas_detach);
1348 if (error)
1349 goto gather_failed;
1350
1351 error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL);
1352 if (error)
1353 goto clear_tree_failed;
1354
1355 /* Point of no return */
1356 vms_complete_munmap_vmas(&vms, &mas_detach);
1357 return 0;
1358
1359 clear_tree_failed:
1360 reattach_vmas(&mas_detach);
1361 gather_failed:
1362 validate_mm(mm);
1363 return error;
1364 }
1365
1366 /*
1367 * do_vmi_munmap() - munmap a given range.
1368 * @vmi: The vma iterator
1369 * @mm: The mm_struct
1370 * @start: The start address to munmap
1371 * @len: The length of the range to munmap
1372 * @uf: The userfaultfd list_head
1373 * @unlock: set to true if the user wants to drop the mmap_lock on success
1374 *
1375 * This function takes a @mas that is either pointing to the previous VMA or set
1376 * to MA_START and sets it up to remove the mapping(s). The @len will be
1377 * aligned.
1378 *
1379 * Return: 0 on success and drops the lock if so directed, error and leaves the
1380 * lock held otherwise.
1381 */
do_vmi_munmap(struct vma_iterator * vmi,struct mm_struct * mm,unsigned long start,size_t len,struct list_head * uf,bool unlock)1382 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
1383 unsigned long start, size_t len, struct list_head *uf,
1384 bool unlock)
1385 {
1386 unsigned long end;
1387 struct vm_area_struct *vma;
1388
1389 if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start)
1390 return -EINVAL;
1391
1392 end = start + PAGE_ALIGN(len);
1393 if (end == start)
1394 return -EINVAL;
1395
1396 /* Find the first overlapping VMA */
1397 vma = vma_find(vmi, end);
1398 if (!vma) {
1399 if (unlock)
1400 mmap_write_unlock(mm);
1401 return 0;
1402 }
1403
1404 return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
1405 }
1406
1407 /*
1408 * We are about to modify one or multiple of a VMA's flags, policy, userfaultfd
1409 * context and anonymous VMA name within the range [start, end).
1410 *
1411 * As a result, we might be able to merge the newly modified VMA range with an
1412 * adjacent VMA with identical properties.
1413 *
1414 * If no merge is possible and the range does not span the entirety of the VMA,
1415 * we then need to split the VMA to accommodate the change.
1416 *
1417 * The function returns either the merged VMA, the original VMA if a split was
1418 * required instead, or an error if the split failed.
1419 */
vma_modify(struct vma_merge_struct * vmg)1420 static struct vm_area_struct *vma_modify(struct vma_merge_struct *vmg)
1421 {
1422 struct vm_area_struct *vma = vmg->vma;
1423 struct vm_area_struct *merged;
1424
1425 /* First, try to merge. */
1426 merged = vma_merge_existing_range(vmg);
1427 if (merged)
1428 return merged;
1429
1430 /* Split any preceding portion of the VMA. */
1431 if (vma->vm_start < vmg->start) {
1432 int err = split_vma(vmg->vmi, vma, vmg->start, 1);
1433
1434 if (err)
1435 return ERR_PTR(err);
1436 }
1437
1438 /* Split any trailing portion of the VMA. */
1439 if (vma->vm_end > vmg->end) {
1440 int err = split_vma(vmg->vmi, vma, vmg->end, 0);
1441
1442 if (err)
1443 return ERR_PTR(err);
1444 }
1445
1446 return vma;
1447 }
1448
vma_modify_flags(struct vma_iterator * vmi,struct vm_area_struct * prev,struct vm_area_struct * vma,unsigned long start,unsigned long end,unsigned long new_flags)1449 struct vm_area_struct *vma_modify_flags(
1450 struct vma_iterator *vmi, struct vm_area_struct *prev,
1451 struct vm_area_struct *vma, unsigned long start, unsigned long end,
1452 unsigned long new_flags)
1453 {
1454 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
1455
1456 vmg.flags = new_flags;
1457
1458 return vma_modify(&vmg);
1459 }
1460
1461 struct vm_area_struct
vma_modify_flags_name(struct vma_iterator * vmi,struct vm_area_struct * prev,struct vm_area_struct * vma,unsigned long start,unsigned long end,unsigned long new_flags,struct anon_vma_name * new_name)1462 *vma_modify_flags_name(struct vma_iterator *vmi,
1463 struct vm_area_struct *prev,
1464 struct vm_area_struct *vma,
1465 unsigned long start,
1466 unsigned long end,
1467 unsigned long new_flags,
1468 struct anon_vma_name *new_name)
1469 {
1470 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
1471
1472 vmg.flags = new_flags;
1473 vmg.anon_name = new_name;
1474
1475 return vma_modify(&vmg);
1476 }
1477
1478 struct vm_area_struct
vma_modify_policy(struct vma_iterator * vmi,struct vm_area_struct * prev,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct mempolicy * new_pol)1479 *vma_modify_policy(struct vma_iterator *vmi,
1480 struct vm_area_struct *prev,
1481 struct vm_area_struct *vma,
1482 unsigned long start, unsigned long end,
1483 struct mempolicy *new_pol)
1484 {
1485 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
1486
1487 vmg.policy = new_pol;
1488
1489 return vma_modify(&vmg);
1490 }
1491
1492 struct vm_area_struct
vma_modify_flags_uffd(struct vma_iterator * vmi,struct vm_area_struct * prev,struct vm_area_struct * vma,unsigned long start,unsigned long end,unsigned long new_flags,struct vm_userfaultfd_ctx new_ctx)1493 *vma_modify_flags_uffd(struct vma_iterator *vmi,
1494 struct vm_area_struct *prev,
1495 struct vm_area_struct *vma,
1496 unsigned long start, unsigned long end,
1497 unsigned long new_flags,
1498 struct vm_userfaultfd_ctx new_ctx)
1499 {
1500 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
1501
1502 vmg.flags = new_flags;
1503 vmg.uffd_ctx = new_ctx;
1504
1505 return vma_modify(&vmg);
1506 }
1507
1508 /*
1509 * Expand vma by delta bytes, potentially merging with an immediately adjacent
1510 * VMA with identical properties.
1511 */
vma_merge_extend(struct vma_iterator * vmi,struct vm_area_struct * vma,unsigned long delta)1512 struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
1513 struct vm_area_struct *vma,
1514 unsigned long delta)
1515 {
1516 VMG_VMA_STATE(vmg, vmi, vma, vma, vma->vm_end, vma->vm_end + delta);
1517
1518 vmg.next = vma_iter_next_rewind(vmi, NULL);
1519 vmg.vma = NULL; /* We use the VMA to populate VMG fields only. */
1520
1521 return vma_merge_new_range(&vmg);
1522 }
1523
unlink_file_vma_batch_init(struct unlink_vma_file_batch * vb)1524 void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb)
1525 {
1526 vb->count = 0;
1527 }
1528
unlink_file_vma_batch_process(struct unlink_vma_file_batch * vb)1529 static void unlink_file_vma_batch_process(struct unlink_vma_file_batch *vb)
1530 {
1531 struct address_space *mapping;
1532 int i;
1533
1534 mapping = vb->vmas[0]->vm_file->f_mapping;
1535 i_mmap_lock_write(mapping);
1536 for (i = 0; i < vb->count; i++) {
1537 VM_WARN_ON_ONCE(vb->vmas[i]->vm_file->f_mapping != mapping);
1538 __remove_shared_vm_struct(vb->vmas[i], mapping);
1539 }
1540 i_mmap_unlock_write(mapping);
1541
1542 unlink_file_vma_batch_init(vb);
1543 }
1544
unlink_file_vma_batch_add(struct unlink_vma_file_batch * vb,struct vm_area_struct * vma)1545 void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb,
1546 struct vm_area_struct *vma)
1547 {
1548 if (vma->vm_file == NULL)
1549 return;
1550
1551 if ((vb->count > 0 && vb->vmas[0]->vm_file != vma->vm_file) ||
1552 vb->count == ARRAY_SIZE(vb->vmas))
1553 unlink_file_vma_batch_process(vb);
1554
1555 vb->vmas[vb->count] = vma;
1556 vb->count++;
1557 }
1558
unlink_file_vma_batch_final(struct unlink_vma_file_batch * vb)1559 void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb)
1560 {
1561 if (vb->count > 0)
1562 unlink_file_vma_batch_process(vb);
1563 }
1564
1565 /*
1566 * Unlink a file-based vm structure from its interval tree, to hide
1567 * vma from rmap and vmtruncate before freeing its page tables.
1568 */
unlink_file_vma(struct vm_area_struct * vma)1569 void unlink_file_vma(struct vm_area_struct *vma)
1570 {
1571 struct file *file = vma->vm_file;
1572
1573 if (file) {
1574 struct address_space *mapping = file->f_mapping;
1575
1576 i_mmap_lock_write(mapping);
1577 __remove_shared_vm_struct(vma, mapping);
1578 i_mmap_unlock_write(mapping);
1579 }
1580 }
1581
vma_link_file(struct vm_area_struct * vma)1582 void vma_link_file(struct vm_area_struct *vma)
1583 {
1584 struct file *file = vma->vm_file;
1585 struct address_space *mapping;
1586
1587 if (file) {
1588 mapping = file->f_mapping;
1589 i_mmap_lock_write(mapping);
1590 __vma_link_file(vma, mapping);
1591 i_mmap_unlock_write(mapping);
1592 }
1593 }
1594
vma_link(struct mm_struct * mm,struct vm_area_struct * vma)1595 int vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
1596 {
1597 VMA_ITERATOR(vmi, mm, 0);
1598
1599 vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
1600 if (vma_iter_prealloc(&vmi, vma))
1601 return -ENOMEM;
1602
1603 vma_start_write(vma);
1604 vma_iter_store(&vmi, vma);
1605 vma_link_file(vma);
1606 mm->map_count++;
1607 validate_mm(mm);
1608 return 0;
1609 }
1610
1611 /*
1612 * Copy the vma structure to a new location in the same mm,
1613 * prior to moving page table entries, to effect an mremap move.
1614 */
copy_vma(struct vm_area_struct ** vmap,unsigned long addr,unsigned long len,pgoff_t pgoff,bool * need_rmap_locks)1615 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
1616 unsigned long addr, unsigned long len, pgoff_t pgoff,
1617 bool *need_rmap_locks)
1618 {
1619 struct vm_area_struct *vma = *vmap;
1620 unsigned long vma_start = vma->vm_start;
1621 struct mm_struct *mm = vma->vm_mm;
1622 struct vm_area_struct *new_vma;
1623 bool faulted_in_anon_vma = true;
1624 VMA_ITERATOR(vmi, mm, addr);
1625 VMG_VMA_STATE(vmg, &vmi, NULL, vma, addr, addr + len);
1626
1627 /*
1628 * If anonymous vma has not yet been faulted, update new pgoff
1629 * to match new location, to increase its chance of merging.
1630 */
1631 if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) {
1632 pgoff = addr >> PAGE_SHIFT;
1633 faulted_in_anon_vma = false;
1634 }
1635
1636 new_vma = find_vma_prev(mm, addr, &vmg.prev);
1637 if (new_vma && new_vma->vm_start < addr + len)
1638 return NULL; /* should never get here */
1639
1640 vmg.vma = NULL; /* New VMA range. */
1641 vmg.pgoff = pgoff;
1642 vmg.next = vma_iter_next_rewind(&vmi, NULL);
1643 new_vma = vma_merge_new_range(&vmg);
1644
1645 if (new_vma) {
1646 /*
1647 * Source vma may have been merged into new_vma
1648 */
1649 if (unlikely(vma_start >= new_vma->vm_start &&
1650 vma_start < new_vma->vm_end)) {
1651 /*
1652 * The only way we can get a vma_merge with
1653 * self during an mremap is if the vma hasn't
1654 * been faulted in yet and we were allowed to
1655 * reset the dst vma->vm_pgoff to the
1656 * destination address of the mremap to allow
1657 * the merge to happen. mremap must change the
1658 * vm_pgoff linearity between src and dst vmas
1659 * (in turn preventing a vma_merge) to be
1660 * safe. It is only safe to keep the vm_pgoff
1661 * linear if there are no pages mapped yet.
1662 */
1663 VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma);
1664 *vmap = vma = new_vma;
1665 }
1666 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
1667 } else {
1668 new_vma = vm_area_dup(vma);
1669 if (!new_vma)
1670 goto out;
1671 vma_set_range(new_vma, addr, addr + len, pgoff);
1672 if (vma_dup_policy(vma, new_vma))
1673 goto out_free_vma;
1674 if (anon_vma_clone(new_vma, vma))
1675 goto out_free_mempol;
1676 if (new_vma->vm_file)
1677 get_file(new_vma->vm_file);
1678 if (new_vma->vm_ops && new_vma->vm_ops->open)
1679 new_vma->vm_ops->open(new_vma);
1680 if (vma_link(mm, new_vma))
1681 goto out_vma_link;
1682 *need_rmap_locks = false;
1683 }
1684 return new_vma;
1685
1686 out_vma_link:
1687 if (new_vma->vm_ops && new_vma->vm_ops->close)
1688 new_vma->vm_ops->close(new_vma);
1689
1690 if (new_vma->vm_file)
1691 fput(new_vma->vm_file);
1692
1693 unlink_anon_vmas(new_vma);
1694 out_free_mempol:
1695 mpol_put(vma_policy(new_vma));
1696 out_free_vma:
1697 vm_area_free(new_vma);
1698 out:
1699 return NULL;
1700 }
1701
1702 /*
1703 * Rough compatibility check to quickly see if it's even worth looking
1704 * at sharing an anon_vma.
1705 *
1706 * They need to have the same vm_file, and the flags can only differ
1707 * in things that mprotect may change.
1708 *
1709 * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
1710 * we can merge the two vma's. For example, we refuse to merge a vma if
1711 * there is a vm_ops->close() function, because that indicates that the
1712 * driver is doing some kind of reference counting. But that doesn't
1713 * really matter for the anon_vma sharing case.
1714 */
anon_vma_compatible(struct vm_area_struct * a,struct vm_area_struct * b)1715 static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
1716 {
1717 return a->vm_end == b->vm_start &&
1718 mpol_equal(vma_policy(a), vma_policy(b)) &&
1719 a->vm_file == b->vm_file &&
1720 !((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_SOFTDIRTY)) &&
1721 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
1722 }
1723
1724 /*
1725 * Do some basic sanity checking to see if we can re-use the anon_vma
1726 * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
1727 * the same as 'old', the other will be the new one that is trying
1728 * to share the anon_vma.
1729 *
1730 * NOTE! This runs with mmap_lock held for reading, so it is possible that
1731 * the anon_vma of 'old' is concurrently in the process of being set up
1732 * by another page fault trying to merge _that_. But that's ok: if it
1733 * is being set up, that automatically means that it will be a singleton
1734 * acceptable for merging, so we can do all of this optimistically. But
1735 * we do that READ_ONCE() to make sure that we never re-load the pointer.
1736 *
1737 * IOW: that the "list_is_singular()" test on the anon_vma_chain only
1738 * matters for the 'stable anon_vma' case (ie the thing we want to avoid
1739 * is to return an anon_vma that is "complex" due to having gone through
1740 * a fork).
1741 *
1742 * We also make sure that the two vma's are compatible (adjacent,
1743 * and with the same memory policies). That's all stable, even with just
1744 * a read lock on the mmap_lock.
1745 */
reusable_anon_vma(struct vm_area_struct * old,struct vm_area_struct * a,struct vm_area_struct * b)1746 static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old,
1747 struct vm_area_struct *a,
1748 struct vm_area_struct *b)
1749 {
1750 if (anon_vma_compatible(a, b)) {
1751 struct anon_vma *anon_vma = READ_ONCE(old->anon_vma);
1752
1753 if (anon_vma && list_is_singular(&old->anon_vma_chain))
1754 return anon_vma;
1755 }
1756 return NULL;
1757 }
1758
1759 /*
1760 * find_mergeable_anon_vma is used by anon_vma_prepare, to check
1761 * neighbouring vmas for a suitable anon_vma, before it goes off
1762 * to allocate a new anon_vma. It checks because a repetitive
1763 * sequence of mprotects and faults may otherwise lead to distinct
1764 * anon_vmas being allocated, preventing vma merge in subsequent
1765 * mprotect.
1766 */
find_mergeable_anon_vma(struct vm_area_struct * vma)1767 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
1768 {
1769 struct anon_vma *anon_vma = NULL;
1770 struct vm_area_struct *prev, *next;
1771 VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_end);
1772
1773 /* Try next first. */
1774 next = vma_iter_load(&vmi);
1775 if (next) {
1776 anon_vma = reusable_anon_vma(next, vma, next);
1777 if (anon_vma)
1778 return anon_vma;
1779 }
1780
1781 prev = vma_prev(&vmi);
1782 VM_BUG_ON_VMA(prev != vma, vma);
1783 prev = vma_prev(&vmi);
1784 /* Try prev next. */
1785 if (prev)
1786 anon_vma = reusable_anon_vma(prev, prev, vma);
1787
1788 /*
1789 * We might reach here with anon_vma == NULL if we can't find
1790 * any reusable anon_vma.
1791 * There's no absolute need to look only at touching neighbours:
1792 * we could search further afield for "compatible" anon_vmas.
1793 * But it would probably just be a waste of time searching,
1794 * or lead to too many vmas hanging off the same anon_vma.
1795 * We're trying to allow mprotect remerging later on,
1796 * not trying to minimize memory used for anon_vmas.
1797 */
1798 return anon_vma;
1799 }
1800
vm_ops_needs_writenotify(const struct vm_operations_struct * vm_ops)1801 static bool vm_ops_needs_writenotify(const struct vm_operations_struct *vm_ops)
1802 {
1803 return vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite);
1804 }
1805
vma_is_shared_writable(struct vm_area_struct * vma)1806 static bool vma_is_shared_writable(struct vm_area_struct *vma)
1807 {
1808 return (vma->vm_flags & (VM_WRITE | VM_SHARED)) ==
1809 (VM_WRITE | VM_SHARED);
1810 }
1811
vma_fs_can_writeback(struct vm_area_struct * vma)1812 static bool vma_fs_can_writeback(struct vm_area_struct *vma)
1813 {
1814 /* No managed pages to writeback. */
1815 if (vma->vm_flags & VM_PFNMAP)
1816 return false;
1817
1818 return vma->vm_file && vma->vm_file->f_mapping &&
1819 mapping_can_writeback(vma->vm_file->f_mapping);
1820 }
1821
1822 /*
1823 * Does this VMA require the underlying folios to have their dirty state
1824 * tracked?
1825 */
vma_needs_dirty_tracking(struct vm_area_struct * vma)1826 bool vma_needs_dirty_tracking(struct vm_area_struct *vma)
1827 {
1828 /* Only shared, writable VMAs require dirty tracking. */
1829 if (!vma_is_shared_writable(vma))
1830 return false;
1831
1832 /* Does the filesystem need to be notified? */
1833 if (vm_ops_needs_writenotify(vma->vm_ops))
1834 return true;
1835
1836 /*
1837 * Even if the filesystem doesn't indicate a need for writenotify, if it
1838 * can writeback, dirty tracking is still required.
1839 */
1840 return vma_fs_can_writeback(vma);
1841 }
1842
1843 /*
1844 * Some shared mappings will want the pages marked read-only
1845 * to track write events. If so, we'll downgrade vm_page_prot
1846 * to the private version (using protection_map[] without the
1847 * VM_SHARED bit).
1848 */
vma_wants_writenotify(struct vm_area_struct * vma,pgprot_t vm_page_prot)1849 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
1850 {
1851 /* If it was private or non-writable, the write bit is already clear */
1852 if (!vma_is_shared_writable(vma))
1853 return false;
1854
1855 /* The backer wishes to know when pages are first written to? */
1856 if (vm_ops_needs_writenotify(vma->vm_ops))
1857 return true;
1858
1859 /* The open routine did something to the protections that pgprot_modify
1860 * won't preserve? */
1861 if (pgprot_val(vm_page_prot) !=
1862 pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags)))
1863 return false;
1864
1865 /*
1866 * Do we need to track softdirty? hugetlb does not support softdirty
1867 * tracking yet.
1868 */
1869 if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma))
1870 return true;
1871
1872 /* Do we need write faults for uffd-wp tracking? */
1873 if (userfaultfd_wp(vma))
1874 return true;
1875
1876 /* Can the mapping track the dirty pages? */
1877 return vma_fs_can_writeback(vma);
1878 }
1879
1880 static DEFINE_MUTEX(mm_all_locks_mutex);
1881
vm_lock_anon_vma(struct mm_struct * mm,struct anon_vma * anon_vma)1882 static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
1883 {
1884 if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
1885 /*
1886 * The LSB of head.next can't change from under us
1887 * because we hold the mm_all_locks_mutex.
1888 */
1889 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock);
1890 /*
1891 * We can safely modify head.next after taking the
1892 * anon_vma->root->rwsem. If some other vma in this mm shares
1893 * the same anon_vma we won't take it again.
1894 *
1895 * No need of atomic instructions here, head.next
1896 * can't change from under us thanks to the
1897 * anon_vma->root->rwsem.
1898 */
1899 if (__test_and_set_bit(0, (unsigned long *)
1900 &anon_vma->root->rb_root.rb_root.rb_node))
1901 BUG();
1902 }
1903 }
1904
vm_lock_mapping(struct mm_struct * mm,struct address_space * mapping)1905 static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
1906 {
1907 if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
1908 /*
1909 * AS_MM_ALL_LOCKS can't change from under us because
1910 * we hold the mm_all_locks_mutex.
1911 *
1912 * Operations on ->flags have to be atomic because
1913 * even if AS_MM_ALL_LOCKS is stable thanks to the
1914 * mm_all_locks_mutex, there may be other cpus
1915 * changing other bitflags in parallel to us.
1916 */
1917 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
1918 BUG();
1919 down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock);
1920 }
1921 }
1922
1923 /*
1924 * This operation locks against the VM for all pte/vma/mm related
1925 * operations that could ever happen on a certain mm. This includes
1926 * vmtruncate, try_to_unmap, and all page faults.
1927 *
1928 * The caller must take the mmap_lock in write mode before calling
1929 * mm_take_all_locks(). The caller isn't allowed to release the
1930 * mmap_lock until mm_drop_all_locks() returns.
1931 *
1932 * mmap_lock in write mode is required in order to block all operations
1933 * that could modify pagetables and free pages without need of
1934 * altering the vma layout. It's also needed in write mode to avoid new
1935 * anon_vmas to be associated with existing vmas.
1936 *
1937 * A single task can't take more than one mm_take_all_locks() in a row
1938 * or it would deadlock.
1939 *
1940 * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in
1941 * mapping->flags avoid to take the same lock twice, if more than one
1942 * vma in this mm is backed by the same anon_vma or address_space.
1943 *
1944 * We take locks in following order, accordingly to comment at beginning
1945 * of mm/rmap.c:
1946 * - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for
1947 * hugetlb mapping);
1948 * - all vmas marked locked
1949 * - all i_mmap_rwsem locks;
1950 * - all anon_vma->rwseml
1951 *
1952 * We can take all locks within these types randomly because the VM code
1953 * doesn't nest them and we protected from parallel mm_take_all_locks() by
1954 * mm_all_locks_mutex.
1955 *
1956 * mm_take_all_locks() and mm_drop_all_locks are expensive operations
1957 * that may have to take thousand of locks.
1958 *
1959 * mm_take_all_locks() can fail if it's interrupted by signals.
1960 */
mm_take_all_locks(struct mm_struct * mm)1961 int mm_take_all_locks(struct mm_struct *mm)
1962 {
1963 struct vm_area_struct *vma;
1964 struct anon_vma_chain *avc;
1965 VMA_ITERATOR(vmi, mm, 0);
1966
1967 mmap_assert_write_locked(mm);
1968
1969 mutex_lock(&mm_all_locks_mutex);
1970
1971 /*
1972 * vma_start_write() does not have a complement in mm_drop_all_locks()
1973 * because vma_start_write() is always asymmetrical; it marks a VMA as
1974 * being written to until mmap_write_unlock() or mmap_write_downgrade()
1975 * is reached.
1976 */
1977 for_each_vma(vmi, vma) {
1978 if (signal_pending(current))
1979 goto out_unlock;
1980 vma_start_write(vma);
1981 }
1982
1983 vma_iter_init(&vmi, mm, 0);
1984 for_each_vma(vmi, vma) {
1985 if (signal_pending(current))
1986 goto out_unlock;
1987 if (vma->vm_file && vma->vm_file->f_mapping &&
1988 is_vm_hugetlb_page(vma))
1989 vm_lock_mapping(mm, vma->vm_file->f_mapping);
1990 }
1991
1992 vma_iter_init(&vmi, mm, 0);
1993 for_each_vma(vmi, vma) {
1994 if (signal_pending(current))
1995 goto out_unlock;
1996 if (vma->vm_file && vma->vm_file->f_mapping &&
1997 !is_vm_hugetlb_page(vma))
1998 vm_lock_mapping(mm, vma->vm_file->f_mapping);
1999 }
2000
2001 vma_iter_init(&vmi, mm, 0);
2002 for_each_vma(vmi, vma) {
2003 if (signal_pending(current))
2004 goto out_unlock;
2005 if (vma->anon_vma)
2006 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
2007 vm_lock_anon_vma(mm, avc->anon_vma);
2008 }
2009
2010 return 0;
2011
2012 out_unlock:
2013 mm_drop_all_locks(mm);
2014 return -EINTR;
2015 }
2016
vm_unlock_anon_vma(struct anon_vma * anon_vma)2017 static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
2018 {
2019 if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
2020 /*
2021 * The LSB of head.next can't change to 0 from under
2022 * us because we hold the mm_all_locks_mutex.
2023 *
2024 * We must however clear the bitflag before unlocking
2025 * the vma so the users using the anon_vma->rb_root will
2026 * never see our bitflag.
2027 *
2028 * No need of atomic instructions here, head.next
2029 * can't change from under us until we release the
2030 * anon_vma->root->rwsem.
2031 */
2032 if (!__test_and_clear_bit(0, (unsigned long *)
2033 &anon_vma->root->rb_root.rb_root.rb_node))
2034 BUG();
2035 anon_vma_unlock_write(anon_vma);
2036 }
2037 }
2038
vm_unlock_mapping(struct address_space * mapping)2039 static void vm_unlock_mapping(struct address_space *mapping)
2040 {
2041 if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
2042 /*
2043 * AS_MM_ALL_LOCKS can't change to 0 from under us
2044 * because we hold the mm_all_locks_mutex.
2045 */
2046 i_mmap_unlock_write(mapping);
2047 if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
2048 &mapping->flags))
2049 BUG();
2050 }
2051 }
2052
2053 /*
2054 * The mmap_lock cannot be released by the caller until
2055 * mm_drop_all_locks() returns.
2056 */
mm_drop_all_locks(struct mm_struct * mm)2057 void mm_drop_all_locks(struct mm_struct *mm)
2058 {
2059 struct vm_area_struct *vma;
2060 struct anon_vma_chain *avc;
2061 VMA_ITERATOR(vmi, mm, 0);
2062
2063 mmap_assert_write_locked(mm);
2064 BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
2065
2066 for_each_vma(vmi, vma) {
2067 if (vma->anon_vma)
2068 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
2069 vm_unlock_anon_vma(avc->anon_vma);
2070 if (vma->vm_file && vma->vm_file->f_mapping)
2071 vm_unlock_mapping(vma->vm_file->f_mapping);
2072 }
2073
2074 mutex_unlock(&mm_all_locks_mutex);
2075 }
2076