1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /*
4 * VMA-specific functions.
5 */
6
7 #include "vma_internal.h"
8 #include "vma.h"
9
10 struct mmap_state {
11 struct mm_struct *mm;
12 struct vma_iterator *vmi;
13
14 unsigned long addr;
15 unsigned long end;
16 pgoff_t pgoff;
17 unsigned long pglen;
18 unsigned long flags;
19 struct file *file;
20
21 unsigned long charged;
22 bool retry_merge;
23
24 struct vm_area_struct *prev;
25 struct vm_area_struct *next;
26
27 /* Unmapping state. */
28 struct vma_munmap_struct vms;
29 struct ma_state mas_detach;
30 struct maple_tree mt_detach;
31 };
32
33 #define MMAP_STATE(name, mm_, vmi_, addr_, len_, pgoff_, flags_, file_) \
34 struct mmap_state name = { \
35 .mm = mm_, \
36 .vmi = vmi_, \
37 .addr = addr_, \
38 .end = (addr_) + (len_), \
39 .pgoff = pgoff_, \
40 .pglen = PHYS_PFN(len_), \
41 .flags = flags_, \
42 .file = file_, \
43 }
44
45 #define VMG_MMAP_STATE(name, map_, vma_) \
46 struct vma_merge_struct name = { \
47 .mm = (map_)->mm, \
48 .vmi = (map_)->vmi, \
49 .start = (map_)->addr, \
50 .end = (map_)->end, \
51 .flags = (map_)->flags, \
52 .pgoff = (map_)->pgoff, \
53 .file = (map_)->file, \
54 .prev = (map_)->prev, \
55 .vma = vma_, \
56 .next = (vma_) ? NULL : (map_)->next, \
57 .state = VMA_MERGE_START, \
58 .merge_flags = VMG_FLAG_DEFAULT, \
59 }
60
is_mergeable_vma(struct vma_merge_struct * vmg,bool merge_next)61 static inline bool is_mergeable_vma(struct vma_merge_struct *vmg, bool merge_next)
62 {
63 struct vm_area_struct *vma = merge_next ? vmg->next : vmg->prev;
64
65 if (!mpol_equal(vmg->policy, vma_policy(vma)))
66 return false;
67 /*
68 * VM_SOFTDIRTY should not prevent from VMA merging, if we
69 * match the flags but dirty bit -- the caller should mark
70 * merged VMA as dirty. If dirty bit won't be excluded from
71 * comparison, we increase pressure on the memory system forcing
72 * the kernel to generate new VMAs when old one could be
73 * extended instead.
74 */
75 if ((vma->vm_flags ^ vmg->flags) & ~VM_SOFTDIRTY)
76 return false;
77 if (vma->vm_file != vmg->file)
78 return false;
79 if (!is_mergeable_vm_userfaultfd_ctx(vma, vmg->uffd_ctx))
80 return false;
81 if (!anon_vma_name_eq(anon_vma_name(vma), vmg->anon_name))
82 return false;
83 return true;
84 }
85
is_mergeable_anon_vma(struct anon_vma * anon_vma1,struct anon_vma * anon_vma2,struct vm_area_struct * vma)86 static inline bool is_mergeable_anon_vma(struct anon_vma *anon_vma1,
87 struct anon_vma *anon_vma2, struct vm_area_struct *vma)
88 {
89 /*
90 * The list_is_singular() test is to avoid merging VMA cloned from
91 * parents. This can improve scalability caused by anon_vma lock.
92 */
93 if ((!anon_vma1 || !anon_vma2) && (!vma ||
94 list_is_singular(&vma->anon_vma_chain)))
95 return true;
96 return anon_vma1 == anon_vma2;
97 }
98
99 /* Are the anon_vma's belonging to each VMA compatible with one another? */
are_anon_vmas_compatible(struct vm_area_struct * vma1,struct vm_area_struct * vma2)100 static inline bool are_anon_vmas_compatible(struct vm_area_struct *vma1,
101 struct vm_area_struct *vma2)
102 {
103 return is_mergeable_anon_vma(vma1->anon_vma, vma2->anon_vma, NULL);
104 }
105
106 /*
107 * init_multi_vma_prep() - Initializer for struct vma_prepare
108 * @vp: The vma_prepare struct
109 * @vma: The vma that will be altered once locked
110 * @next: The next vma if it is to be adjusted
111 * @remove: The first vma to be removed
112 * @remove2: The second vma to be removed
113 */
init_multi_vma_prep(struct vma_prepare * vp,struct vm_area_struct * vma,struct vm_area_struct * next,struct vm_area_struct * remove,struct vm_area_struct * remove2)114 static void init_multi_vma_prep(struct vma_prepare *vp,
115 struct vm_area_struct *vma,
116 struct vm_area_struct *next,
117 struct vm_area_struct *remove,
118 struct vm_area_struct *remove2)
119 {
120 memset(vp, 0, sizeof(struct vma_prepare));
121 vp->vma = vma;
122 vp->anon_vma = vma->anon_vma;
123 vp->remove = remove;
124 vp->remove2 = remove2;
125 vp->adj_next = next;
126 if (!vp->anon_vma && next)
127 vp->anon_vma = next->anon_vma;
128
129 vp->file = vma->vm_file;
130 if (vp->file)
131 vp->mapping = vma->vm_file->f_mapping;
132
133 }
134
135 /*
136 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
137 * in front of (at a lower virtual address and file offset than) the vma.
138 *
139 * We cannot merge two vmas if they have differently assigned (non-NULL)
140 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
141 *
142 * We don't check here for the merged mmap wrapping around the end of pagecache
143 * indices (16TB on ia32) because do_mmap() does not permit mmap's which
144 * wrap, nor mmaps which cover the final page at index -1UL.
145 *
146 * We assume the vma may be removed as part of the merge.
147 */
can_vma_merge_before(struct vma_merge_struct * vmg)148 static bool can_vma_merge_before(struct vma_merge_struct *vmg)
149 {
150 pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start);
151
152 if (is_mergeable_vma(vmg, /* merge_next = */ true) &&
153 is_mergeable_anon_vma(vmg->anon_vma, vmg->next->anon_vma, vmg->next)) {
154 if (vmg->next->vm_pgoff == vmg->pgoff + pglen)
155 return true;
156 }
157
158 return false;
159 }
160
161 /*
162 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
163 * beyond (at a higher virtual address and file offset than) the vma.
164 *
165 * We cannot merge two vmas if they have differently assigned (non-NULL)
166 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
167 *
168 * We assume that vma is not removed as part of the merge.
169 */
can_vma_merge_after(struct vma_merge_struct * vmg)170 static bool can_vma_merge_after(struct vma_merge_struct *vmg)
171 {
172 if (is_mergeable_vma(vmg, /* merge_next = */ false) &&
173 is_mergeable_anon_vma(vmg->anon_vma, vmg->prev->anon_vma, vmg->prev)) {
174 if (vmg->prev->vm_pgoff + vma_pages(vmg->prev) == vmg->pgoff)
175 return true;
176 }
177 return false;
178 }
179
__vma_link_file(struct vm_area_struct * vma,struct address_space * mapping)180 static void __vma_link_file(struct vm_area_struct *vma,
181 struct address_space *mapping)
182 {
183 if (vma_is_shared_maywrite(vma))
184 mapping_allow_writable(mapping);
185
186 flush_dcache_mmap_lock(mapping);
187 vma_interval_tree_insert(vma, &mapping->i_mmap);
188 flush_dcache_mmap_unlock(mapping);
189 }
190
191 /*
192 * Requires inode->i_mapping->i_mmap_rwsem
193 */
__remove_shared_vm_struct(struct vm_area_struct * vma,struct address_space * mapping)194 static void __remove_shared_vm_struct(struct vm_area_struct *vma,
195 struct address_space *mapping)
196 {
197 if (vma_is_shared_maywrite(vma))
198 mapping_unmap_writable(mapping);
199
200 flush_dcache_mmap_lock(mapping);
201 vma_interval_tree_remove(vma, &mapping->i_mmap);
202 flush_dcache_mmap_unlock(mapping);
203 }
204
205 /*
206 * vma_prepare() - Helper function for handling locking VMAs prior to altering
207 * @vp: The initialized vma_prepare struct
208 */
vma_prepare(struct vma_prepare * vp)209 static void vma_prepare(struct vma_prepare *vp)
210 {
211 if (vp->file) {
212 uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end);
213
214 if (vp->adj_next)
215 uprobe_munmap(vp->adj_next, vp->adj_next->vm_start,
216 vp->adj_next->vm_end);
217
218 i_mmap_lock_write(vp->mapping);
219 if (vp->insert && vp->insert->vm_file) {
220 /*
221 * Put into interval tree now, so instantiated pages
222 * are visible to arm/parisc __flush_dcache_page
223 * throughout; but we cannot insert into address
224 * space until vma start or end is updated.
225 */
226 __vma_link_file(vp->insert,
227 vp->insert->vm_file->f_mapping);
228 }
229 }
230
231 if (vp->anon_vma) {
232 anon_vma_lock_write(vp->anon_vma);
233 anon_vma_interval_tree_pre_update_vma(vp->vma);
234 if (vp->adj_next)
235 anon_vma_interval_tree_pre_update_vma(vp->adj_next);
236 }
237
238 if (vp->file) {
239 flush_dcache_mmap_lock(vp->mapping);
240 vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap);
241 if (vp->adj_next)
242 vma_interval_tree_remove(vp->adj_next,
243 &vp->mapping->i_mmap);
244 }
245
246 }
247
248 /*
249 * vma_complete- Helper function for handling the unlocking after altering VMAs,
250 * or for inserting a VMA.
251 *
252 * @vp: The vma_prepare struct
253 * @vmi: The vma iterator
254 * @mm: The mm_struct
255 */
vma_complete(struct vma_prepare * vp,struct vma_iterator * vmi,struct mm_struct * mm)256 static void vma_complete(struct vma_prepare *vp, struct vma_iterator *vmi,
257 struct mm_struct *mm)
258 {
259 if (vp->file) {
260 if (vp->adj_next)
261 vma_interval_tree_insert(vp->adj_next,
262 &vp->mapping->i_mmap);
263 vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap);
264 flush_dcache_mmap_unlock(vp->mapping);
265 }
266
267 if (vp->remove && vp->file) {
268 __remove_shared_vm_struct(vp->remove, vp->mapping);
269 if (vp->remove2)
270 __remove_shared_vm_struct(vp->remove2, vp->mapping);
271 } else if (vp->insert) {
272 /*
273 * split_vma has split insert from vma, and needs
274 * us to insert it before dropping the locks
275 * (it may either follow vma or precede it).
276 */
277 vma_iter_store(vmi, vp->insert);
278 mm->map_count++;
279 }
280
281 if (vp->anon_vma) {
282 anon_vma_interval_tree_post_update_vma(vp->vma);
283 if (vp->adj_next)
284 anon_vma_interval_tree_post_update_vma(vp->adj_next);
285 anon_vma_unlock_write(vp->anon_vma);
286 }
287
288 if (vp->file) {
289 i_mmap_unlock_write(vp->mapping);
290 uprobe_mmap(vp->vma);
291
292 if (vp->adj_next)
293 uprobe_mmap(vp->adj_next);
294 }
295
296 if (vp->remove) {
297 again:
298 vma_mark_detached(vp->remove, true);
299 if (vp->file) {
300 uprobe_munmap(vp->remove, vp->remove->vm_start,
301 vp->remove->vm_end);
302 fput(vp->file);
303 }
304 if (vp->remove->anon_vma)
305 anon_vma_merge(vp->vma, vp->remove);
306 mm->map_count--;
307 mpol_put(vma_policy(vp->remove));
308 if (!vp->remove2)
309 WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end);
310 vm_area_free(vp->remove);
311
312 /*
313 * In mprotect's case 6 (see comments on vma_merge),
314 * we are removing both mid and next vmas
315 */
316 if (vp->remove2) {
317 vp->remove = vp->remove2;
318 vp->remove2 = NULL;
319 goto again;
320 }
321 }
322 if (vp->insert && vp->file)
323 uprobe_mmap(vp->insert);
324 }
325
326 /*
327 * init_vma_prep() - Initializer wrapper for vma_prepare struct
328 * @vp: The vma_prepare struct
329 * @vma: The vma that will be altered once locked
330 */
init_vma_prep(struct vma_prepare * vp,struct vm_area_struct * vma)331 static void init_vma_prep(struct vma_prepare *vp, struct vm_area_struct *vma)
332 {
333 init_multi_vma_prep(vp, vma, NULL, NULL, NULL);
334 }
335
336 /*
337 * Can the proposed VMA be merged with the left (previous) VMA taking into
338 * account the start position of the proposed range.
339 */
can_vma_merge_left(struct vma_merge_struct * vmg)340 static bool can_vma_merge_left(struct vma_merge_struct *vmg)
341
342 {
343 return vmg->prev && vmg->prev->vm_end == vmg->start &&
344 can_vma_merge_after(vmg);
345 }
346
347 /*
348 * Can the proposed VMA be merged with the right (next) VMA taking into
349 * account the end position of the proposed range.
350 *
351 * In addition, if we can merge with the left VMA, ensure that left and right
352 * anon_vma's are also compatible.
353 */
can_vma_merge_right(struct vma_merge_struct * vmg,bool can_merge_left)354 static bool can_vma_merge_right(struct vma_merge_struct *vmg,
355 bool can_merge_left)
356 {
357 if (!vmg->next || vmg->end != vmg->next->vm_start ||
358 !can_vma_merge_before(vmg))
359 return false;
360
361 if (!can_merge_left)
362 return true;
363
364 /*
365 * If we can merge with prev (left) and next (right), indicating that
366 * each VMA's anon_vma is compatible with the proposed anon_vma, this
367 * does not mean prev and next are compatible with EACH OTHER.
368 *
369 * We therefore check this in addition to mergeability to either side.
370 */
371 return are_anon_vmas_compatible(vmg->prev, vmg->next);
372 }
373
374 /*
375 * Close a vm structure and free it.
376 */
remove_vma(struct vm_area_struct * vma,bool unreachable)377 void remove_vma(struct vm_area_struct *vma, bool unreachable)
378 {
379 might_sleep();
380 vma_close(vma);
381 if (vma->vm_file)
382 fput(vma->vm_file);
383 mpol_put(vma_policy(vma));
384 if (unreachable)
385 __vm_area_free(vma);
386 else
387 vm_area_free(vma);
388 }
389
390 /*
391 * Get rid of page table information in the indicated region.
392 *
393 * Called with the mm semaphore held.
394 */
unmap_region(struct ma_state * mas,struct vm_area_struct * vma,struct vm_area_struct * prev,struct vm_area_struct * next)395 void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
396 struct vm_area_struct *prev, struct vm_area_struct *next)
397 {
398 struct mm_struct *mm = vma->vm_mm;
399 struct mmu_gather tlb;
400
401 lru_add_drain();
402 tlb_gather_mmu(&tlb, mm);
403 update_hiwater_rss(mm);
404 unmap_vmas(&tlb, mas, vma, vma->vm_start, vma->vm_end, vma->vm_end,
405 /* mm_wr_locked = */ true);
406 mas_set(mas, vma->vm_end);
407 free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
408 next ? next->vm_start : USER_PGTABLES_CEILING,
409 /* mm_wr_locked = */ true);
410 tlb_finish_mmu(&tlb);
411 }
412
413 /*
414 * __split_vma() bypasses sysctl_max_map_count checking. We use this where it
415 * has already been checked or doesn't make sense to fail.
416 * VMA Iterator will point to the original VMA.
417 */
__split_vma(struct vma_iterator * vmi,struct vm_area_struct * vma,unsigned long addr,int new_below)418 static int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
419 unsigned long addr, int new_below)
420 {
421 struct vma_prepare vp;
422 struct vm_area_struct *new;
423 int err;
424
425 WARN_ON(vma->vm_start >= addr);
426 WARN_ON(vma->vm_end <= addr);
427
428 if (vma->vm_ops && vma->vm_ops->may_split) {
429 err = vma->vm_ops->may_split(vma, addr);
430 if (err)
431 return err;
432 }
433
434 new = vm_area_dup(vma);
435 if (!new)
436 return -ENOMEM;
437
438 if (new_below) {
439 new->vm_end = addr;
440 } else {
441 new->vm_start = addr;
442 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
443 }
444
445 err = -ENOMEM;
446 vma_iter_config(vmi, new->vm_start, new->vm_end);
447 if (vma_iter_prealloc(vmi, new))
448 goto out_free_vma;
449
450 err = vma_dup_policy(vma, new);
451 if (err)
452 goto out_free_vmi;
453
454 err = anon_vma_clone(new, vma);
455 if (err)
456 goto out_free_mpol;
457
458 if (new->vm_file)
459 get_file(new->vm_file);
460
461 if (new->vm_ops && new->vm_ops->open)
462 new->vm_ops->open(new);
463
464 vma_start_write(vma);
465 vma_start_write(new);
466
467 init_vma_prep(&vp, vma);
468 vp.insert = new;
469 vma_prepare(&vp);
470 vma_adjust_trans_huge(vma, vma->vm_start, addr, 0);
471
472 if (new_below) {
473 vma->vm_start = addr;
474 vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT;
475 } else {
476 vma->vm_end = addr;
477 }
478
479 /* vma_complete stores the new vma */
480 vma_complete(&vp, vmi, vma->vm_mm);
481 validate_mm(vma->vm_mm);
482
483 /* Success. */
484 if (new_below)
485 vma_next(vmi);
486 else
487 vma_prev(vmi);
488
489 return 0;
490
491 out_free_mpol:
492 mpol_put(vma_policy(new));
493 out_free_vmi:
494 vma_iter_free(vmi);
495 out_free_vma:
496 vm_area_free(new);
497 return err;
498 }
499
500 /*
501 * Split a vma into two pieces at address 'addr', a new vma is allocated
502 * either for the first part or the tail.
503 */
split_vma(struct vma_iterator * vmi,struct vm_area_struct * vma,unsigned long addr,int new_below)504 static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
505 unsigned long addr, int new_below)
506 {
507 if (vma->vm_mm->map_count >= sysctl_max_map_count)
508 return -ENOMEM;
509
510 return __split_vma(vmi, vma, addr, new_below);
511 }
512
513 /*
514 * vma has some anon_vma assigned, and is already inserted on that
515 * anon_vma's interval trees.
516 *
517 * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
518 * vma must be removed from the anon_vma's interval trees using
519 * anon_vma_interval_tree_pre_update_vma().
520 *
521 * After the update, the vma will be reinserted using
522 * anon_vma_interval_tree_post_update_vma().
523 *
524 * The entire update must be protected by exclusive mmap_lock and by
525 * the root anon_vma's mutex.
526 */
527 void
anon_vma_interval_tree_pre_update_vma(struct vm_area_struct * vma)528 anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
529 {
530 struct anon_vma_chain *avc;
531
532 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
533 anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root);
534 }
535
536 void
anon_vma_interval_tree_post_update_vma(struct vm_area_struct * vma)537 anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
538 {
539 struct anon_vma_chain *avc;
540
541 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
542 anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
543 }
544
545 /*
546 * dup_anon_vma() - Helper function to duplicate anon_vma
547 * @dst: The destination VMA
548 * @src: The source VMA
549 * @dup: Pointer to the destination VMA when successful.
550 *
551 * Returns: 0 on success.
552 */
dup_anon_vma(struct vm_area_struct * dst,struct vm_area_struct * src,struct vm_area_struct ** dup)553 static int dup_anon_vma(struct vm_area_struct *dst,
554 struct vm_area_struct *src, struct vm_area_struct **dup)
555 {
556 /*
557 * Easily overlooked: when mprotect shifts the boundary, make sure the
558 * expanding vma has anon_vma set if the shrinking vma had, to cover any
559 * anon pages imported.
560 */
561 if (src->anon_vma && !dst->anon_vma) {
562 int ret;
563
564 vma_assert_write_locked(dst);
565 dst->anon_vma = src->anon_vma;
566 ret = anon_vma_clone(dst, src);
567 if (ret)
568 return ret;
569
570 *dup = dst;
571 }
572
573 return 0;
574 }
575
576 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
validate_mm(struct mm_struct * mm)577 void validate_mm(struct mm_struct *mm)
578 {
579 int bug = 0;
580 int i = 0;
581 struct vm_area_struct *vma;
582 VMA_ITERATOR(vmi, mm, 0);
583
584 mt_validate(&mm->mm_mt);
585 for_each_vma(vmi, vma) {
586 #ifdef CONFIG_DEBUG_VM_RB
587 struct anon_vma *anon_vma = vma->anon_vma;
588 struct anon_vma_chain *avc;
589 #endif
590 unsigned long vmi_start, vmi_end;
591 bool warn = 0;
592
593 vmi_start = vma_iter_addr(&vmi);
594 vmi_end = vma_iter_end(&vmi);
595 if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm))
596 warn = 1;
597
598 if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm))
599 warn = 1;
600
601 if (warn) {
602 pr_emerg("issue in %s\n", current->comm);
603 dump_stack();
604 dump_vma(vma);
605 pr_emerg("tree range: %px start %lx end %lx\n", vma,
606 vmi_start, vmi_end - 1);
607 vma_iter_dump_tree(&vmi);
608 }
609
610 #ifdef CONFIG_DEBUG_VM_RB
611 if (anon_vma) {
612 anon_vma_lock_read(anon_vma);
613 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
614 anon_vma_interval_tree_verify(avc);
615 anon_vma_unlock_read(anon_vma);
616 }
617 #endif
618 /* Check for a infinite loop */
619 if (++i > mm->map_count + 10) {
620 i = -1;
621 break;
622 }
623 }
624 if (i != mm->map_count) {
625 pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i);
626 bug = 1;
627 }
628 VM_BUG_ON_MM(bug, mm);
629 }
630 #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
631
632 /* Actually perform the VMA merge operation. */
commit_merge(struct vma_merge_struct * vmg,struct vm_area_struct * adjust,struct vm_area_struct * remove,struct vm_area_struct * remove2,long adj_start,bool expanded)633 static int commit_merge(struct vma_merge_struct *vmg,
634 struct vm_area_struct *adjust,
635 struct vm_area_struct *remove,
636 struct vm_area_struct *remove2,
637 long adj_start,
638 bool expanded)
639 {
640 struct vma_prepare vp;
641
642 init_multi_vma_prep(&vp, vmg->vma, adjust, remove, remove2);
643
644 VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma &&
645 vp.anon_vma != adjust->anon_vma);
646
647 if (expanded) {
648 /* Note: vma iterator must be pointing to 'start'. */
649 vma_iter_config(vmg->vmi, vmg->start, vmg->end);
650 } else {
651 vma_iter_config(vmg->vmi, adjust->vm_start + adj_start,
652 adjust->vm_end);
653 }
654
655 if (vma_iter_prealloc(vmg->vmi, vmg->vma))
656 return -ENOMEM;
657
658 vma_prepare(&vp);
659 vma_adjust_trans_huge(vmg->vma, vmg->start, vmg->end, adj_start);
660 vma_set_range(vmg->vma, vmg->start, vmg->end, vmg->pgoff);
661
662 if (expanded)
663 vma_iter_store(vmg->vmi, vmg->vma);
664
665 if (adj_start) {
666 adjust->vm_start += adj_start;
667 adjust->vm_pgoff += PHYS_PFN(adj_start);
668 if (adj_start < 0) {
669 WARN_ON(expanded);
670 vma_iter_store(vmg->vmi, adjust);
671 }
672 }
673
674 vma_complete(&vp, vmg->vmi, vmg->vma->vm_mm);
675
676 return 0;
677 }
678
679 /* We can only remove VMAs when merging if they do not have a close hook. */
can_merge_remove_vma(struct vm_area_struct * vma)680 static bool can_merge_remove_vma(struct vm_area_struct *vma)
681 {
682 return !vma->vm_ops || !vma->vm_ops->close;
683 }
684
685 /*
686 * vma_merge_existing_range - Attempt to merge VMAs based on a VMA having its
687 * attributes modified.
688 *
689 * @vmg: Describes the modifications being made to a VMA and associated
690 * metadata.
691 *
692 * When the attributes of a range within a VMA change, then it might be possible
693 * for immediately adjacent VMAs to be merged into that VMA due to having
694 * identical properties.
695 *
696 * This function checks for the existence of any such mergeable VMAs and updates
697 * the maple tree describing the @vmg->vma->vm_mm address space to account for
698 * this, as well as any VMAs shrunk/expanded/deleted as a result of this merge.
699 *
700 * As part of this operation, if a merge occurs, the @vmg object will have its
701 * vma, start, end, and pgoff fields modified to execute the merge. Subsequent
702 * calls to this function should reset these fields.
703 *
704 * Returns: The merged VMA if merge succeeds, or NULL otherwise.
705 *
706 * ASSUMPTIONS:
707 * - The caller must assign the VMA to be modifed to @vmg->vma.
708 * - The caller must have set @vmg->prev to the previous VMA, if there is one.
709 * - The caller must not set @vmg->next, as we determine this.
710 * - The caller must hold a WRITE lock on the mm_struct->mmap_lock.
711 * - vmi must be positioned within [@vmg->vma->vm_start, @vmg->vma->vm_end).
712 */
vma_merge_existing_range(struct vma_merge_struct * vmg)713 static struct vm_area_struct *vma_merge_existing_range(struct vma_merge_struct *vmg)
714 {
715 struct vm_area_struct *vma = vmg->vma;
716 struct vm_area_struct *prev = vmg->prev;
717 struct vm_area_struct *next, *res;
718 struct vm_area_struct *anon_dup = NULL;
719 struct vm_area_struct *adjust = NULL;
720 unsigned long start = vmg->start;
721 unsigned long end = vmg->end;
722 bool left_side = vma && start == vma->vm_start;
723 bool right_side = vma && end == vma->vm_end;
724 int err = 0;
725 long adj_start = 0;
726 bool merge_will_delete_vma, merge_will_delete_next;
727 bool merge_left, merge_right, merge_both;
728 bool expanded;
729
730 mmap_assert_write_locked(vmg->mm);
731 VM_WARN_ON(!vma); /* We are modifying a VMA, so caller must specify. */
732 VM_WARN_ON(vmg->next); /* We set this. */
733 VM_WARN_ON(prev && start <= prev->vm_start);
734 VM_WARN_ON(start >= end);
735 /*
736 * If vma == prev, then we are offset into a VMA. Otherwise, if we are
737 * not, we must span a portion of the VMA.
738 */
739 VM_WARN_ON(vma && ((vma != prev && vmg->start != vma->vm_start) ||
740 vmg->end > vma->vm_end));
741 /* The vmi must be positioned within vmg->vma. */
742 VM_WARN_ON(vma && !(vma_iter_addr(vmg->vmi) >= vma->vm_start &&
743 vma_iter_addr(vmg->vmi) < vma->vm_end));
744
745 vmg->state = VMA_MERGE_NOMERGE;
746
747 /*
748 * If a special mapping or if the range being modified is neither at the
749 * furthermost left or right side of the VMA, then we have no chance of
750 * merging and should abort.
751 */
752 if (vmg->flags & VM_SPECIAL || (!left_side && !right_side))
753 return NULL;
754
755 if (left_side)
756 merge_left = can_vma_merge_left(vmg);
757 else
758 merge_left = false;
759
760 if (right_side) {
761 next = vmg->next = vma_iter_next_range(vmg->vmi);
762 vma_iter_prev_range(vmg->vmi);
763
764 merge_right = can_vma_merge_right(vmg, merge_left);
765 } else {
766 merge_right = false;
767 next = NULL;
768 }
769
770 if (merge_left) /* If merging prev, position iterator there. */
771 vma_prev(vmg->vmi);
772 else if (!merge_right) /* If we have nothing to merge, abort. */
773 return NULL;
774
775 merge_both = merge_left && merge_right;
776 /* If we span the entire VMA, a merge implies it will be deleted. */
777 merge_will_delete_vma = left_side && right_side;
778
779 /*
780 * If we need to remove vma in its entirety but are unable to do so,
781 * we have no sensible recourse but to abort the merge.
782 */
783 if (merge_will_delete_vma && !can_merge_remove_vma(vma))
784 return NULL;
785
786 /*
787 * If we merge both VMAs, then next is also deleted. This implies
788 * merge_will_delete_vma also.
789 */
790 merge_will_delete_next = merge_both;
791
792 /*
793 * If we cannot delete next, then we can reduce the operation to merging
794 * prev and vma (thereby deleting vma).
795 */
796 if (merge_will_delete_next && !can_merge_remove_vma(next)) {
797 merge_will_delete_next = false;
798 merge_right = false;
799 merge_both = false;
800 }
801
802 /* No matter what happens, we will be adjusting vma. */
803 vma_start_write(vma);
804
805 if (merge_left)
806 vma_start_write(prev);
807
808 if (merge_right)
809 vma_start_write(next);
810
811 if (merge_both) {
812 /*
813 * |<----->|
814 * |-------*********-------|
815 * prev vma next
816 * extend delete delete
817 */
818
819 vmg->vma = prev;
820 vmg->start = prev->vm_start;
821 vmg->end = next->vm_end;
822 vmg->pgoff = prev->vm_pgoff;
823
824 /*
825 * We already ensured anon_vma compatibility above, so now it's
826 * simply a case of, if prev has no anon_vma object, which of
827 * next or vma contains the anon_vma we must duplicate.
828 */
829 err = dup_anon_vma(prev, next->anon_vma ? next : vma, &anon_dup);
830 } else if (merge_left) {
831 /*
832 * |<----->| OR
833 * |<--------->|
834 * |-------*************
835 * prev vma
836 * extend shrink/delete
837 */
838
839 vmg->vma = prev;
840 vmg->start = prev->vm_start;
841 vmg->pgoff = prev->vm_pgoff;
842
843 if (!merge_will_delete_vma) {
844 adjust = vma;
845 adj_start = vmg->end - vma->vm_start;
846 }
847
848 err = dup_anon_vma(prev, vma, &anon_dup);
849 } else { /* merge_right */
850 /*
851 * |<----->| OR
852 * |<--------->|
853 * *************-------|
854 * vma next
855 * shrink/delete extend
856 */
857
858 pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start);
859
860 VM_WARN_ON(!merge_right);
861 /* If we are offset into a VMA, then prev must be vma. */
862 VM_WARN_ON(vmg->start > vma->vm_start && prev && vma != prev);
863
864 if (merge_will_delete_vma) {
865 vmg->vma = next;
866 vmg->end = next->vm_end;
867 vmg->pgoff = next->vm_pgoff - pglen;
868 } else {
869 /*
870 * We shrink vma and expand next.
871 *
872 * IMPORTANT: This is the ONLY case where the final
873 * merged VMA is NOT vmg->vma, but rather vmg->next.
874 */
875
876 vmg->start = vma->vm_start;
877 vmg->end = start;
878 vmg->pgoff = vma->vm_pgoff;
879
880 adjust = next;
881 adj_start = -(vma->vm_end - start);
882 }
883
884 err = dup_anon_vma(next, vma, &anon_dup);
885 }
886
887 if (err)
888 goto abort;
889
890 /*
891 * In nearly all cases, we expand vmg->vma. There is one exception -
892 * merge_right where we partially span the VMA. In this case we shrink
893 * the end of vmg->vma and adjust the start of vmg->next accordingly.
894 */
895 expanded = !merge_right || merge_will_delete_vma;
896
897 if (commit_merge(vmg, adjust,
898 merge_will_delete_vma ? vma : NULL,
899 merge_will_delete_next ? next : NULL,
900 adj_start, expanded)) {
901 if (anon_dup)
902 unlink_anon_vmas(anon_dup);
903
904 vmg->state = VMA_MERGE_ERROR_NOMEM;
905 return NULL;
906 }
907
908 res = merge_left ? prev : next;
909 khugepaged_enter_vma(res, vmg->flags);
910
911 vmg->state = VMA_MERGE_SUCCESS;
912 return res;
913
914 abort:
915 vma_iter_set(vmg->vmi, start);
916 vma_iter_load(vmg->vmi);
917 vmg->state = VMA_MERGE_ERROR_NOMEM;
918 return NULL;
919 }
920
921 /*
922 * vma_merge_new_range - Attempt to merge a new VMA into address space
923 *
924 * @vmg: Describes the VMA we are adding, in the range @vmg->start to @vmg->end
925 * (exclusive), which we try to merge with any adjacent VMAs if possible.
926 *
927 * We are about to add a VMA to the address space starting at @vmg->start and
928 * ending at @vmg->end. There are three different possible scenarios:
929 *
930 * 1. There is a VMA with identical properties immediately adjacent to the
931 * proposed new VMA [@vmg->start, @vmg->end) either before or after it -
932 * EXPAND that VMA:
933 *
934 * Proposed: |-----| or |-----|
935 * Existing: |----| |----|
936 *
937 * 2. There are VMAs with identical properties immediately adjacent to the
938 * proposed new VMA [@vmg->start, @vmg->end) both before AND after it -
939 * EXPAND the former and REMOVE the latter:
940 *
941 * Proposed: |-----|
942 * Existing: |----| |----|
943 *
944 * 3. There are no VMAs immediately adjacent to the proposed new VMA or those
945 * VMAs do not have identical attributes - NO MERGE POSSIBLE.
946 *
947 * In instances where we can merge, this function returns the expanded VMA which
948 * will have its range adjusted accordingly and the underlying maple tree also
949 * adjusted.
950 *
951 * Returns: In instances where no merge was possible, NULL. Otherwise, a pointer
952 * to the VMA we expanded.
953 *
954 * This function adjusts @vmg to provide @vmg->next if not already specified,
955 * and adjusts [@vmg->start, @vmg->end) to span the expanded range.
956 *
957 * ASSUMPTIONS:
958 * - The caller must hold a WRITE lock on the mm_struct->mmap_lock.
959 * - The caller must have determined that [@vmg->start, @vmg->end) is empty,
960 other than VMAs that will be unmapped should the operation succeed.
961 * - The caller must have specified the previous vma in @vmg->prev.
962 * - The caller must have specified the next vma in @vmg->next.
963 * - The caller must have positioned the vmi at or before the gap.
964 */
vma_merge_new_range(struct vma_merge_struct * vmg)965 struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg)
966 {
967 struct vm_area_struct *prev = vmg->prev;
968 struct vm_area_struct *next = vmg->next;
969 unsigned long end = vmg->end;
970 bool can_merge_left, can_merge_right;
971 bool just_expand = vmg->merge_flags & VMG_FLAG_JUST_EXPAND;
972
973 mmap_assert_write_locked(vmg->mm);
974 VM_WARN_ON(vmg->vma);
975 /* vmi must point at or before the gap. */
976 VM_WARN_ON(vma_iter_addr(vmg->vmi) > end);
977
978 vmg->state = VMA_MERGE_NOMERGE;
979
980 /* Special VMAs are unmergeable, also if no prev/next. */
981 if ((vmg->flags & VM_SPECIAL) || (!prev && !next))
982 return NULL;
983
984 can_merge_left = can_vma_merge_left(vmg);
985 can_merge_right = !just_expand && can_vma_merge_right(vmg, can_merge_left);
986
987 /* If we can merge with the next VMA, adjust vmg accordingly. */
988 if (can_merge_right) {
989 vmg->end = next->vm_end;
990 vmg->vma = next;
991 }
992
993 /* If we can merge with the previous VMA, adjust vmg accordingly. */
994 if (can_merge_left) {
995 vmg->start = prev->vm_start;
996 vmg->vma = prev;
997 vmg->pgoff = prev->vm_pgoff;
998
999 /*
1000 * If this merge would result in removal of the next VMA but we
1001 * are not permitted to do so, reduce the operation to merging
1002 * prev and vma.
1003 */
1004 if (can_merge_right && !can_merge_remove_vma(next))
1005 vmg->end = end;
1006
1007 /* In expand-only case we are already positioned at prev. */
1008 if (!just_expand) {
1009 /* Equivalent to going to the previous range. */
1010 vma_prev(vmg->vmi);
1011 }
1012 }
1013
1014 /*
1015 * Now try to expand adjacent VMA(s). This takes care of removing the
1016 * following VMA if we have VMAs on both sides.
1017 */
1018 if (vmg->vma && !vma_expand(vmg)) {
1019 khugepaged_enter_vma(vmg->vma, vmg->flags);
1020 vmg->state = VMA_MERGE_SUCCESS;
1021 return vmg->vma;
1022 }
1023
1024 return NULL;
1025 }
1026
1027 /*
1028 * vma_expand - Expand an existing VMA
1029 *
1030 * @vmg: Describes a VMA expansion operation.
1031 *
1032 * Expand @vma to vmg->start and vmg->end. Can expand off the start and end.
1033 * Will expand over vmg->next if it's different from vmg->vma and vmg->end ==
1034 * vmg->next->vm_end. Checking if the vmg->vma can expand and merge with
1035 * vmg->next needs to be handled by the caller.
1036 *
1037 * Returns: 0 on success.
1038 *
1039 * ASSUMPTIONS:
1040 * - The caller must hold a WRITE lock on vmg->vma->mm->mmap_lock.
1041 * - The caller must have set @vmg->vma and @vmg->next.
1042 */
vma_expand(struct vma_merge_struct * vmg)1043 int vma_expand(struct vma_merge_struct *vmg)
1044 {
1045 struct vm_area_struct *anon_dup = NULL;
1046 bool remove_next = false;
1047 struct vm_area_struct *vma = vmg->vma;
1048 struct vm_area_struct *next = vmg->next;
1049
1050 mmap_assert_write_locked(vmg->mm);
1051
1052 vma_start_write(vma);
1053 if (next && (vma != next) && (vmg->end == next->vm_end)) {
1054 int ret;
1055
1056 remove_next = true;
1057 /* This should already have been checked by this point. */
1058 VM_WARN_ON(!can_merge_remove_vma(next));
1059 vma_start_write(next);
1060 ret = dup_anon_vma(vma, next, &anon_dup);
1061 if (ret)
1062 return ret;
1063 }
1064
1065 /* Not merging but overwriting any part of next is not handled. */
1066 VM_WARN_ON(next && !remove_next &&
1067 next != vma && vmg->end > next->vm_start);
1068 /* Only handles expanding */
1069 VM_WARN_ON(vma->vm_start < vmg->start || vma->vm_end > vmg->end);
1070
1071 if (commit_merge(vmg, NULL, remove_next ? next : NULL, NULL, 0, true))
1072 goto nomem;
1073
1074 return 0;
1075
1076 nomem:
1077 vmg->state = VMA_MERGE_ERROR_NOMEM;
1078 if (anon_dup)
1079 unlink_anon_vmas(anon_dup);
1080 return -ENOMEM;
1081 }
1082
1083 /*
1084 * vma_shrink() - Reduce an existing VMAs memory area
1085 * @vmi: The vma iterator
1086 * @vma: The VMA to modify
1087 * @start: The new start
1088 * @end: The new end
1089 *
1090 * Returns: 0 on success, -ENOMEM otherwise
1091 */
vma_shrink(struct vma_iterator * vmi,struct vm_area_struct * vma,unsigned long start,unsigned long end,pgoff_t pgoff)1092 int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
1093 unsigned long start, unsigned long end, pgoff_t pgoff)
1094 {
1095 struct vma_prepare vp;
1096
1097 WARN_ON((vma->vm_start != start) && (vma->vm_end != end));
1098
1099 if (vma->vm_start < start)
1100 vma_iter_config(vmi, vma->vm_start, start);
1101 else
1102 vma_iter_config(vmi, end, vma->vm_end);
1103
1104 if (vma_iter_prealloc(vmi, NULL))
1105 return -ENOMEM;
1106
1107 vma_start_write(vma);
1108
1109 init_vma_prep(&vp, vma);
1110 vma_prepare(&vp);
1111 vma_adjust_trans_huge(vma, start, end, 0);
1112
1113 vma_iter_clear(vmi);
1114 vma_set_range(vma, start, end, pgoff);
1115 vma_complete(&vp, vmi, vma->vm_mm);
1116 validate_mm(vma->vm_mm);
1117 return 0;
1118 }
1119
vms_clear_ptes(struct vma_munmap_struct * vms,struct ma_state * mas_detach,bool mm_wr_locked)1120 static inline void vms_clear_ptes(struct vma_munmap_struct *vms,
1121 struct ma_state *mas_detach, bool mm_wr_locked)
1122 {
1123 struct mmu_gather tlb;
1124
1125 if (!vms->clear_ptes) /* Nothing to do */
1126 return;
1127
1128 /*
1129 * We can free page tables without write-locking mmap_lock because VMAs
1130 * were isolated before we downgraded mmap_lock.
1131 */
1132 mas_set(mas_detach, 1);
1133 lru_add_drain();
1134 tlb_gather_mmu(&tlb, vms->vma->vm_mm);
1135 update_hiwater_rss(vms->vma->vm_mm);
1136 unmap_vmas(&tlb, mas_detach, vms->vma, vms->start, vms->end,
1137 vms->vma_count, mm_wr_locked);
1138
1139 mas_set(mas_detach, 1);
1140 /* start and end may be different if there is no prev or next vma. */
1141 free_pgtables(&tlb, mas_detach, vms->vma, vms->unmap_start,
1142 vms->unmap_end, mm_wr_locked);
1143 tlb_finish_mmu(&tlb);
1144 vms->clear_ptes = false;
1145 }
1146
vms_clean_up_area(struct vma_munmap_struct * vms,struct ma_state * mas_detach)1147 static void vms_clean_up_area(struct vma_munmap_struct *vms,
1148 struct ma_state *mas_detach)
1149 {
1150 struct vm_area_struct *vma;
1151
1152 if (!vms->nr_pages)
1153 return;
1154
1155 vms_clear_ptes(vms, mas_detach, true);
1156 mas_set(mas_detach, 0);
1157 mas_for_each(mas_detach, vma, ULONG_MAX)
1158 vma_close(vma);
1159 }
1160
1161 /*
1162 * vms_complete_munmap_vmas() - Finish the munmap() operation
1163 * @vms: The vma munmap struct
1164 * @mas_detach: The maple state of the detached vmas
1165 *
1166 * This updates the mm_struct, unmaps the region, frees the resources
1167 * used for the munmap() and may downgrade the lock - if requested. Everything
1168 * needed to be done once the vma maple tree is updated.
1169 */
vms_complete_munmap_vmas(struct vma_munmap_struct * vms,struct ma_state * mas_detach)1170 static void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
1171 struct ma_state *mas_detach)
1172 {
1173 struct vm_area_struct *vma;
1174 struct mm_struct *mm;
1175
1176 mm = current->mm;
1177 mm->map_count -= vms->vma_count;
1178 mm->locked_vm -= vms->locked_vm;
1179 if (vms->unlock)
1180 mmap_write_downgrade(mm);
1181
1182 if (!vms->nr_pages)
1183 return;
1184
1185 vms_clear_ptes(vms, mas_detach, !vms->unlock);
1186 /* Update high watermark before we lower total_vm */
1187 update_hiwater_vm(mm);
1188 /* Stat accounting */
1189 WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm) - vms->nr_pages);
1190 /* Paranoid bookkeeping */
1191 VM_WARN_ON(vms->exec_vm > mm->exec_vm);
1192 VM_WARN_ON(vms->stack_vm > mm->stack_vm);
1193 VM_WARN_ON(vms->data_vm > mm->data_vm);
1194 mm->exec_vm -= vms->exec_vm;
1195 mm->stack_vm -= vms->stack_vm;
1196 mm->data_vm -= vms->data_vm;
1197
1198 /* Remove and clean up vmas */
1199 mas_set(mas_detach, 0);
1200 mas_for_each(mas_detach, vma, ULONG_MAX)
1201 remove_vma(vma, /* unreachable = */ false);
1202
1203 vm_unacct_memory(vms->nr_accounted);
1204 validate_mm(mm);
1205 if (vms->unlock)
1206 mmap_read_unlock(mm);
1207
1208 __mt_destroy(mas_detach->tree);
1209 }
1210
1211 /*
1212 * reattach_vmas() - Undo any munmap work and free resources
1213 * @mas_detach: The maple state with the detached maple tree
1214 *
1215 * Reattach any detached vmas and free up the maple tree used to track the vmas.
1216 */
reattach_vmas(struct ma_state * mas_detach)1217 static void reattach_vmas(struct ma_state *mas_detach)
1218 {
1219 struct vm_area_struct *vma;
1220
1221 mas_set(mas_detach, 0);
1222 mas_for_each(mas_detach, vma, ULONG_MAX)
1223 vma_mark_detached(vma, false);
1224
1225 __mt_destroy(mas_detach->tree);
1226 }
1227
1228 /*
1229 * vms_gather_munmap_vmas() - Put all VMAs within a range into a maple tree
1230 * for removal at a later date. Handles splitting first and last if necessary
1231 * and marking the vmas as isolated.
1232 *
1233 * @vms: The vma munmap struct
1234 * @mas_detach: The maple state tracking the detached tree
1235 *
1236 * Return: 0 on success, error otherwise
1237 */
vms_gather_munmap_vmas(struct vma_munmap_struct * vms,struct ma_state * mas_detach)1238 static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
1239 struct ma_state *mas_detach)
1240 {
1241 struct vm_area_struct *next = NULL;
1242 int error;
1243
1244 /*
1245 * If we need to split any vma, do it now to save pain later.
1246 * Does it split the first one?
1247 */
1248 if (vms->start > vms->vma->vm_start) {
1249
1250 /*
1251 * Make sure that map_count on return from munmap() will
1252 * not exceed its limit; but let map_count go just above
1253 * its limit temporarily, to help free resources as expected.
1254 */
1255 if (vms->end < vms->vma->vm_end &&
1256 vms->vma->vm_mm->map_count >= sysctl_max_map_count) {
1257 error = -ENOMEM;
1258 goto map_count_exceeded;
1259 }
1260
1261 /* Don't bother splitting the VMA if we can't unmap it anyway */
1262 if (!can_modify_vma(vms->vma)) {
1263 error = -EPERM;
1264 goto start_split_failed;
1265 }
1266
1267 error = __split_vma(vms->vmi, vms->vma, vms->start, 1);
1268 if (error)
1269 goto start_split_failed;
1270 }
1271 vms->prev = vma_prev(vms->vmi);
1272 if (vms->prev)
1273 vms->unmap_start = vms->prev->vm_end;
1274
1275 /*
1276 * Detach a range of VMAs from the mm. Using next as a temp variable as
1277 * it is always overwritten.
1278 */
1279 for_each_vma_range(*(vms->vmi), next, vms->end) {
1280 long nrpages;
1281
1282 if (!can_modify_vma(next)) {
1283 error = -EPERM;
1284 goto modify_vma_failed;
1285 }
1286 /* Does it split the end? */
1287 if (next->vm_end > vms->end) {
1288 error = __split_vma(vms->vmi, next, vms->end, 0);
1289 if (error)
1290 goto end_split_failed;
1291 }
1292 vma_start_write(next);
1293 mas_set(mas_detach, vms->vma_count++);
1294 error = mas_store_gfp(mas_detach, next, GFP_KERNEL);
1295 if (error)
1296 goto munmap_gather_failed;
1297
1298 vma_mark_detached(next, true);
1299 nrpages = vma_pages(next);
1300
1301 vms->nr_pages += nrpages;
1302 if (next->vm_flags & VM_LOCKED)
1303 vms->locked_vm += nrpages;
1304
1305 if (next->vm_flags & VM_ACCOUNT)
1306 vms->nr_accounted += nrpages;
1307
1308 if (is_exec_mapping(next->vm_flags))
1309 vms->exec_vm += nrpages;
1310 else if (is_stack_mapping(next->vm_flags))
1311 vms->stack_vm += nrpages;
1312 else if (is_data_mapping(next->vm_flags))
1313 vms->data_vm += nrpages;
1314
1315 if (vms->uf) {
1316 /*
1317 * If userfaultfd_unmap_prep returns an error the vmas
1318 * will remain split, but userland will get a
1319 * highly unexpected error anyway. This is no
1320 * different than the case where the first of the two
1321 * __split_vma fails, but we don't undo the first
1322 * split, despite we could. This is unlikely enough
1323 * failure that it's not worth optimizing it for.
1324 */
1325 error = userfaultfd_unmap_prep(next, vms->start,
1326 vms->end, vms->uf);
1327 if (error)
1328 goto userfaultfd_error;
1329 }
1330 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
1331 BUG_ON(next->vm_start < vms->start);
1332 BUG_ON(next->vm_start > vms->end);
1333 #endif
1334 }
1335
1336 vms->next = vma_next(vms->vmi);
1337 if (vms->next)
1338 vms->unmap_end = vms->next->vm_start;
1339
1340 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
1341 /* Make sure no VMAs are about to be lost. */
1342 {
1343 MA_STATE(test, mas_detach->tree, 0, 0);
1344 struct vm_area_struct *vma_mas, *vma_test;
1345 int test_count = 0;
1346
1347 vma_iter_set(vms->vmi, vms->start);
1348 rcu_read_lock();
1349 vma_test = mas_find(&test, vms->vma_count - 1);
1350 for_each_vma_range(*(vms->vmi), vma_mas, vms->end) {
1351 BUG_ON(vma_mas != vma_test);
1352 test_count++;
1353 vma_test = mas_next(&test, vms->vma_count - 1);
1354 }
1355 rcu_read_unlock();
1356 BUG_ON(vms->vma_count != test_count);
1357 }
1358 #endif
1359
1360 while (vma_iter_addr(vms->vmi) > vms->start)
1361 vma_iter_prev_range(vms->vmi);
1362
1363 vms->clear_ptes = true;
1364 return 0;
1365
1366 userfaultfd_error:
1367 munmap_gather_failed:
1368 end_split_failed:
1369 modify_vma_failed:
1370 reattach_vmas(mas_detach);
1371 start_split_failed:
1372 map_count_exceeded:
1373 return error;
1374 }
1375
1376 /*
1377 * init_vma_munmap() - Initializer wrapper for vma_munmap_struct
1378 * @vms: The vma munmap struct
1379 * @vmi: The vma iterator
1380 * @vma: The first vm_area_struct to munmap
1381 * @start: The aligned start address to munmap
1382 * @end: The aligned end address to munmap
1383 * @uf: The userfaultfd list_head
1384 * @unlock: Unlock after the operation. Only unlocked on success
1385 */
init_vma_munmap(struct vma_munmap_struct * vms,struct vma_iterator * vmi,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct list_head * uf,bool unlock)1386 static void init_vma_munmap(struct vma_munmap_struct *vms,
1387 struct vma_iterator *vmi, struct vm_area_struct *vma,
1388 unsigned long start, unsigned long end, struct list_head *uf,
1389 bool unlock)
1390 {
1391 vms->vmi = vmi;
1392 vms->vma = vma;
1393 if (vma) {
1394 vms->start = start;
1395 vms->end = end;
1396 } else {
1397 vms->start = vms->end = 0;
1398 }
1399 vms->unlock = unlock;
1400 vms->uf = uf;
1401 vms->vma_count = 0;
1402 vms->nr_pages = vms->locked_vm = vms->nr_accounted = 0;
1403 vms->exec_vm = vms->stack_vm = vms->data_vm = 0;
1404 vms->unmap_start = FIRST_USER_ADDRESS;
1405 vms->unmap_end = USER_PGTABLES_CEILING;
1406 vms->clear_ptes = false;
1407 }
1408
1409 /*
1410 * do_vmi_align_munmap() - munmap the aligned region from @start to @end.
1411 * @vmi: The vma iterator
1412 * @vma: The starting vm_area_struct
1413 * @mm: The mm_struct
1414 * @start: The aligned start address to munmap.
1415 * @end: The aligned end address to munmap.
1416 * @uf: The userfaultfd list_head
1417 * @unlock: Set to true to drop the mmap_lock. unlocking only happens on
1418 * success.
1419 *
1420 * Return: 0 on success and drops the lock if so directed, error and leaves the
1421 * lock held otherwise.
1422 */
do_vmi_align_munmap(struct vma_iterator * vmi,struct vm_area_struct * vma,struct mm_struct * mm,unsigned long start,unsigned long end,struct list_head * uf,bool unlock)1423 int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
1424 struct mm_struct *mm, unsigned long start, unsigned long end,
1425 struct list_head *uf, bool unlock)
1426 {
1427 struct maple_tree mt_detach;
1428 MA_STATE(mas_detach, &mt_detach, 0, 0);
1429 mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
1430 mt_on_stack(mt_detach);
1431 struct vma_munmap_struct vms;
1432 int error;
1433
1434 init_vma_munmap(&vms, vmi, vma, start, end, uf, unlock);
1435 error = vms_gather_munmap_vmas(&vms, &mas_detach);
1436 if (error)
1437 goto gather_failed;
1438
1439 error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL);
1440 if (error)
1441 goto clear_tree_failed;
1442
1443 /* Point of no return */
1444 vms_complete_munmap_vmas(&vms, &mas_detach);
1445 return 0;
1446
1447 clear_tree_failed:
1448 reattach_vmas(&mas_detach);
1449 gather_failed:
1450 validate_mm(mm);
1451 return error;
1452 }
1453
1454 /*
1455 * do_vmi_munmap() - munmap a given range.
1456 * @vmi: The vma iterator
1457 * @mm: The mm_struct
1458 * @start: The start address to munmap
1459 * @len: The length of the range to munmap
1460 * @uf: The userfaultfd list_head
1461 * @unlock: set to true if the user wants to drop the mmap_lock on success
1462 *
1463 * This function takes a @mas that is either pointing to the previous VMA or set
1464 * to MA_START and sets it up to remove the mapping(s). The @len will be
1465 * aligned.
1466 *
1467 * Return: 0 on success and drops the lock if so directed, error and leaves the
1468 * lock held otherwise.
1469 */
do_vmi_munmap(struct vma_iterator * vmi,struct mm_struct * mm,unsigned long start,size_t len,struct list_head * uf,bool unlock)1470 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
1471 unsigned long start, size_t len, struct list_head *uf,
1472 bool unlock)
1473 {
1474 unsigned long end;
1475 struct vm_area_struct *vma;
1476
1477 if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start)
1478 return -EINVAL;
1479
1480 end = start + PAGE_ALIGN(len);
1481 if (end == start)
1482 return -EINVAL;
1483
1484 /* Find the first overlapping VMA */
1485 vma = vma_find(vmi, end);
1486 if (!vma) {
1487 if (unlock)
1488 mmap_write_unlock(mm);
1489 return 0;
1490 }
1491
1492 return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
1493 }
1494
1495 /*
1496 * We are about to modify one or multiple of a VMA's flags, policy, userfaultfd
1497 * context and anonymous VMA name within the range [start, end).
1498 *
1499 * As a result, we might be able to merge the newly modified VMA range with an
1500 * adjacent VMA with identical properties.
1501 *
1502 * If no merge is possible and the range does not span the entirety of the VMA,
1503 * we then need to split the VMA to accommodate the change.
1504 *
1505 * The function returns either the merged VMA, the original VMA if a split was
1506 * required instead, or an error if the split failed.
1507 */
vma_modify(struct vma_merge_struct * vmg)1508 static struct vm_area_struct *vma_modify(struct vma_merge_struct *vmg)
1509 {
1510 struct vm_area_struct *vma = vmg->vma;
1511 struct vm_area_struct *merged;
1512
1513 /* First, try to merge. */
1514 merged = vma_merge_existing_range(vmg);
1515 if (merged)
1516 return merged;
1517
1518 /* Split any preceding portion of the VMA. */
1519 if (vma->vm_start < vmg->start) {
1520 int err = split_vma(vmg->vmi, vma, vmg->start, 1);
1521
1522 if (err)
1523 return ERR_PTR(err);
1524 }
1525
1526 /* Split any trailing portion of the VMA. */
1527 if (vma->vm_end > vmg->end) {
1528 int err = split_vma(vmg->vmi, vma, vmg->end, 0);
1529
1530 if (err)
1531 return ERR_PTR(err);
1532 }
1533
1534 return vma;
1535 }
1536
vma_modify_flags(struct vma_iterator * vmi,struct vm_area_struct * prev,struct vm_area_struct * vma,unsigned long start,unsigned long end,unsigned long new_flags)1537 struct vm_area_struct *vma_modify_flags(
1538 struct vma_iterator *vmi, struct vm_area_struct *prev,
1539 struct vm_area_struct *vma, unsigned long start, unsigned long end,
1540 unsigned long new_flags)
1541 {
1542 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
1543
1544 vmg.flags = new_flags;
1545
1546 return vma_modify(&vmg);
1547 }
1548
1549 struct vm_area_struct
vma_modify_flags_name(struct vma_iterator * vmi,struct vm_area_struct * prev,struct vm_area_struct * vma,unsigned long start,unsigned long end,unsigned long new_flags,struct anon_vma_name * new_name)1550 *vma_modify_flags_name(struct vma_iterator *vmi,
1551 struct vm_area_struct *prev,
1552 struct vm_area_struct *vma,
1553 unsigned long start,
1554 unsigned long end,
1555 unsigned long new_flags,
1556 struct anon_vma_name *new_name)
1557 {
1558 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
1559
1560 vmg.flags = new_flags;
1561 vmg.anon_name = new_name;
1562
1563 return vma_modify(&vmg);
1564 }
1565
1566 struct vm_area_struct
vma_modify_policy(struct vma_iterator * vmi,struct vm_area_struct * prev,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct mempolicy * new_pol)1567 *vma_modify_policy(struct vma_iterator *vmi,
1568 struct vm_area_struct *prev,
1569 struct vm_area_struct *vma,
1570 unsigned long start, unsigned long end,
1571 struct mempolicy *new_pol)
1572 {
1573 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
1574
1575 vmg.policy = new_pol;
1576
1577 return vma_modify(&vmg);
1578 }
1579
1580 struct vm_area_struct
vma_modify_flags_uffd(struct vma_iterator * vmi,struct vm_area_struct * prev,struct vm_area_struct * vma,unsigned long start,unsigned long end,unsigned long new_flags,struct vm_userfaultfd_ctx new_ctx)1581 *vma_modify_flags_uffd(struct vma_iterator *vmi,
1582 struct vm_area_struct *prev,
1583 struct vm_area_struct *vma,
1584 unsigned long start, unsigned long end,
1585 unsigned long new_flags,
1586 struct vm_userfaultfd_ctx new_ctx)
1587 {
1588 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
1589
1590 vmg.flags = new_flags;
1591 vmg.uffd_ctx = new_ctx;
1592
1593 return vma_modify(&vmg);
1594 }
1595
1596 /*
1597 * Expand vma by delta bytes, potentially merging with an immediately adjacent
1598 * VMA with identical properties.
1599 */
vma_merge_extend(struct vma_iterator * vmi,struct vm_area_struct * vma,unsigned long delta)1600 struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
1601 struct vm_area_struct *vma,
1602 unsigned long delta)
1603 {
1604 VMG_VMA_STATE(vmg, vmi, vma, vma, vma->vm_end, vma->vm_end + delta);
1605
1606 vmg.next = vma_iter_next_rewind(vmi, NULL);
1607 vmg.vma = NULL; /* We use the VMA to populate VMG fields only. */
1608
1609 return vma_merge_new_range(&vmg);
1610 }
1611
unlink_file_vma_batch_init(struct unlink_vma_file_batch * vb)1612 void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb)
1613 {
1614 vb->count = 0;
1615 }
1616
unlink_file_vma_batch_process(struct unlink_vma_file_batch * vb)1617 static void unlink_file_vma_batch_process(struct unlink_vma_file_batch *vb)
1618 {
1619 struct address_space *mapping;
1620 int i;
1621
1622 mapping = vb->vmas[0]->vm_file->f_mapping;
1623 i_mmap_lock_write(mapping);
1624 for (i = 0; i < vb->count; i++) {
1625 VM_WARN_ON_ONCE(vb->vmas[i]->vm_file->f_mapping != mapping);
1626 __remove_shared_vm_struct(vb->vmas[i], mapping);
1627 }
1628 i_mmap_unlock_write(mapping);
1629
1630 unlink_file_vma_batch_init(vb);
1631 }
1632
unlink_file_vma_batch_add(struct unlink_vma_file_batch * vb,struct vm_area_struct * vma)1633 void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb,
1634 struct vm_area_struct *vma)
1635 {
1636 if (vma->vm_file == NULL)
1637 return;
1638
1639 if ((vb->count > 0 && vb->vmas[0]->vm_file != vma->vm_file) ||
1640 vb->count == ARRAY_SIZE(vb->vmas))
1641 unlink_file_vma_batch_process(vb);
1642
1643 vb->vmas[vb->count] = vma;
1644 vb->count++;
1645 }
1646
unlink_file_vma_batch_final(struct unlink_vma_file_batch * vb)1647 void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb)
1648 {
1649 if (vb->count > 0)
1650 unlink_file_vma_batch_process(vb);
1651 }
1652
1653 /*
1654 * Unlink a file-based vm structure from its interval tree, to hide
1655 * vma from rmap and vmtruncate before freeing its page tables.
1656 */
unlink_file_vma(struct vm_area_struct * vma)1657 void unlink_file_vma(struct vm_area_struct *vma)
1658 {
1659 struct file *file = vma->vm_file;
1660
1661 if (file) {
1662 struct address_space *mapping = file->f_mapping;
1663
1664 i_mmap_lock_write(mapping);
1665 __remove_shared_vm_struct(vma, mapping);
1666 i_mmap_unlock_write(mapping);
1667 }
1668 }
1669
vma_link_file(struct vm_area_struct * vma)1670 void vma_link_file(struct vm_area_struct *vma)
1671 {
1672 struct file *file = vma->vm_file;
1673 struct address_space *mapping;
1674
1675 if (file) {
1676 mapping = file->f_mapping;
1677 i_mmap_lock_write(mapping);
1678 __vma_link_file(vma, mapping);
1679 i_mmap_unlock_write(mapping);
1680 }
1681 }
1682
vma_link(struct mm_struct * mm,struct vm_area_struct * vma)1683 int vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
1684 {
1685 VMA_ITERATOR(vmi, mm, 0);
1686
1687 vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
1688 if (vma_iter_prealloc(&vmi, vma))
1689 return -ENOMEM;
1690
1691 vma_start_write(vma);
1692 vma_iter_store(&vmi, vma);
1693 vma_link_file(vma);
1694 mm->map_count++;
1695 validate_mm(mm);
1696 return 0;
1697 }
1698
1699 /*
1700 * Copy the vma structure to a new location in the same mm,
1701 * prior to moving page table entries, to effect an mremap move.
1702 */
copy_vma(struct vm_area_struct ** vmap,unsigned long addr,unsigned long len,pgoff_t pgoff,bool * need_rmap_locks)1703 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
1704 unsigned long addr, unsigned long len, pgoff_t pgoff,
1705 bool *need_rmap_locks)
1706 {
1707 struct vm_area_struct *vma = *vmap;
1708 unsigned long vma_start = vma->vm_start;
1709 struct mm_struct *mm = vma->vm_mm;
1710 struct vm_area_struct *new_vma;
1711 bool faulted_in_anon_vma = true;
1712 VMA_ITERATOR(vmi, mm, addr);
1713 VMG_VMA_STATE(vmg, &vmi, NULL, vma, addr, addr + len);
1714
1715 /*
1716 * If anonymous vma has not yet been faulted, update new pgoff
1717 * to match new location, to increase its chance of merging.
1718 */
1719 if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) {
1720 pgoff = addr >> PAGE_SHIFT;
1721 faulted_in_anon_vma = false;
1722 }
1723
1724 new_vma = find_vma_prev(mm, addr, &vmg.prev);
1725 if (new_vma && new_vma->vm_start < addr + len)
1726 return NULL; /* should never get here */
1727
1728 vmg.vma = NULL; /* New VMA range. */
1729 vmg.pgoff = pgoff;
1730 vmg.next = vma_iter_next_rewind(&vmi, NULL);
1731 new_vma = vma_merge_new_range(&vmg);
1732
1733 if (new_vma) {
1734 /*
1735 * Source vma may have been merged into new_vma
1736 */
1737 if (unlikely(vma_start >= new_vma->vm_start &&
1738 vma_start < new_vma->vm_end)) {
1739 /*
1740 * The only way we can get a vma_merge with
1741 * self during an mremap is if the vma hasn't
1742 * been faulted in yet and we were allowed to
1743 * reset the dst vma->vm_pgoff to the
1744 * destination address of the mremap to allow
1745 * the merge to happen. mremap must change the
1746 * vm_pgoff linearity between src and dst vmas
1747 * (in turn preventing a vma_merge) to be
1748 * safe. It is only safe to keep the vm_pgoff
1749 * linear if there are no pages mapped yet.
1750 */
1751 VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma);
1752 *vmap = vma = new_vma;
1753 }
1754 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
1755 } else {
1756 new_vma = vm_area_dup(vma);
1757 if (!new_vma)
1758 goto out;
1759 vma_set_range(new_vma, addr, addr + len, pgoff);
1760 if (vma_dup_policy(vma, new_vma))
1761 goto out_free_vma;
1762 if (anon_vma_clone(new_vma, vma))
1763 goto out_free_mempol;
1764 if (new_vma->vm_file)
1765 get_file(new_vma->vm_file);
1766 if (new_vma->vm_ops && new_vma->vm_ops->open)
1767 new_vma->vm_ops->open(new_vma);
1768 if (vma_link(mm, new_vma))
1769 goto out_vma_link;
1770 *need_rmap_locks = false;
1771 }
1772 return new_vma;
1773
1774 out_vma_link:
1775 vma_close(new_vma);
1776
1777 if (new_vma->vm_file)
1778 fput(new_vma->vm_file);
1779
1780 unlink_anon_vmas(new_vma);
1781 out_free_mempol:
1782 mpol_put(vma_policy(new_vma));
1783 out_free_vma:
1784 vm_area_free(new_vma);
1785 out:
1786 return NULL;
1787 }
1788
1789 /*
1790 * Rough compatibility check to quickly see if it's even worth looking
1791 * at sharing an anon_vma.
1792 *
1793 * They need to have the same vm_file, and the flags can only differ
1794 * in things that mprotect may change.
1795 *
1796 * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
1797 * we can merge the two vma's. For example, we refuse to merge a vma if
1798 * there is a vm_ops->close() function, because that indicates that the
1799 * driver is doing some kind of reference counting. But that doesn't
1800 * really matter for the anon_vma sharing case.
1801 */
anon_vma_compatible(struct vm_area_struct * a,struct vm_area_struct * b)1802 static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
1803 {
1804 return a->vm_end == b->vm_start &&
1805 mpol_equal(vma_policy(a), vma_policy(b)) &&
1806 a->vm_file == b->vm_file &&
1807 !((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_SOFTDIRTY)) &&
1808 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
1809 }
1810
1811 /*
1812 * Do some basic sanity checking to see if we can re-use the anon_vma
1813 * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
1814 * the same as 'old', the other will be the new one that is trying
1815 * to share the anon_vma.
1816 *
1817 * NOTE! This runs with mmap_lock held for reading, so it is possible that
1818 * the anon_vma of 'old' is concurrently in the process of being set up
1819 * by another page fault trying to merge _that_. But that's ok: if it
1820 * is being set up, that automatically means that it will be a singleton
1821 * acceptable for merging, so we can do all of this optimistically. But
1822 * we do that READ_ONCE() to make sure that we never re-load the pointer.
1823 *
1824 * IOW: that the "list_is_singular()" test on the anon_vma_chain only
1825 * matters for the 'stable anon_vma' case (ie the thing we want to avoid
1826 * is to return an anon_vma that is "complex" due to having gone through
1827 * a fork).
1828 *
1829 * We also make sure that the two vma's are compatible (adjacent,
1830 * and with the same memory policies). That's all stable, even with just
1831 * a read lock on the mmap_lock.
1832 */
reusable_anon_vma(struct vm_area_struct * old,struct vm_area_struct * a,struct vm_area_struct * b)1833 static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old,
1834 struct vm_area_struct *a,
1835 struct vm_area_struct *b)
1836 {
1837 if (anon_vma_compatible(a, b)) {
1838 struct anon_vma *anon_vma = READ_ONCE(old->anon_vma);
1839
1840 if (anon_vma && list_is_singular(&old->anon_vma_chain))
1841 return anon_vma;
1842 }
1843 return NULL;
1844 }
1845
1846 /*
1847 * find_mergeable_anon_vma is used by anon_vma_prepare, to check
1848 * neighbouring vmas for a suitable anon_vma, before it goes off
1849 * to allocate a new anon_vma. It checks because a repetitive
1850 * sequence of mprotects and faults may otherwise lead to distinct
1851 * anon_vmas being allocated, preventing vma merge in subsequent
1852 * mprotect.
1853 */
find_mergeable_anon_vma(struct vm_area_struct * vma)1854 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
1855 {
1856 struct anon_vma *anon_vma = NULL;
1857 struct vm_area_struct *prev, *next;
1858 VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_end);
1859
1860 /* Try next first. */
1861 next = vma_iter_load(&vmi);
1862 if (next) {
1863 anon_vma = reusable_anon_vma(next, vma, next);
1864 if (anon_vma)
1865 return anon_vma;
1866 }
1867
1868 prev = vma_prev(&vmi);
1869 VM_BUG_ON_VMA(prev != vma, vma);
1870 prev = vma_prev(&vmi);
1871 /* Try prev next. */
1872 if (prev)
1873 anon_vma = reusable_anon_vma(prev, prev, vma);
1874
1875 /*
1876 * We might reach here with anon_vma == NULL if we can't find
1877 * any reusable anon_vma.
1878 * There's no absolute need to look only at touching neighbours:
1879 * we could search further afield for "compatible" anon_vmas.
1880 * But it would probably just be a waste of time searching,
1881 * or lead to too many vmas hanging off the same anon_vma.
1882 * We're trying to allow mprotect remerging later on,
1883 * not trying to minimize memory used for anon_vmas.
1884 */
1885 return anon_vma;
1886 }
1887
vm_ops_needs_writenotify(const struct vm_operations_struct * vm_ops)1888 static bool vm_ops_needs_writenotify(const struct vm_operations_struct *vm_ops)
1889 {
1890 return vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite);
1891 }
1892
vma_is_shared_writable(struct vm_area_struct * vma)1893 static bool vma_is_shared_writable(struct vm_area_struct *vma)
1894 {
1895 return (vma->vm_flags & (VM_WRITE | VM_SHARED)) ==
1896 (VM_WRITE | VM_SHARED);
1897 }
1898
vma_fs_can_writeback(struct vm_area_struct * vma)1899 static bool vma_fs_can_writeback(struct vm_area_struct *vma)
1900 {
1901 /* No managed pages to writeback. */
1902 if (vma->vm_flags & VM_PFNMAP)
1903 return false;
1904
1905 return vma->vm_file && vma->vm_file->f_mapping &&
1906 mapping_can_writeback(vma->vm_file->f_mapping);
1907 }
1908
1909 /*
1910 * Does this VMA require the underlying folios to have their dirty state
1911 * tracked?
1912 */
vma_needs_dirty_tracking(struct vm_area_struct * vma)1913 bool vma_needs_dirty_tracking(struct vm_area_struct *vma)
1914 {
1915 /* Only shared, writable VMAs require dirty tracking. */
1916 if (!vma_is_shared_writable(vma))
1917 return false;
1918
1919 /* Does the filesystem need to be notified? */
1920 if (vm_ops_needs_writenotify(vma->vm_ops))
1921 return true;
1922
1923 /*
1924 * Even if the filesystem doesn't indicate a need for writenotify, if it
1925 * can writeback, dirty tracking is still required.
1926 */
1927 return vma_fs_can_writeback(vma);
1928 }
1929
1930 /*
1931 * Some shared mappings will want the pages marked read-only
1932 * to track write events. If so, we'll downgrade vm_page_prot
1933 * to the private version (using protection_map[] without the
1934 * VM_SHARED bit).
1935 */
vma_wants_writenotify(struct vm_area_struct * vma,pgprot_t vm_page_prot)1936 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
1937 {
1938 /* If it was private or non-writable, the write bit is already clear */
1939 if (!vma_is_shared_writable(vma))
1940 return false;
1941
1942 /* The backer wishes to know when pages are first written to? */
1943 if (vm_ops_needs_writenotify(vma->vm_ops))
1944 return true;
1945
1946 /* The open routine did something to the protections that pgprot_modify
1947 * won't preserve? */
1948 if (pgprot_val(vm_page_prot) !=
1949 pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags)))
1950 return false;
1951
1952 /*
1953 * Do we need to track softdirty? hugetlb does not support softdirty
1954 * tracking yet.
1955 */
1956 if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma))
1957 return true;
1958
1959 /* Do we need write faults for uffd-wp tracking? */
1960 if (userfaultfd_wp(vma))
1961 return true;
1962
1963 /* Can the mapping track the dirty pages? */
1964 return vma_fs_can_writeback(vma);
1965 }
1966
1967 static DEFINE_MUTEX(mm_all_locks_mutex);
1968
vm_lock_anon_vma(struct mm_struct * mm,struct anon_vma * anon_vma)1969 static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
1970 {
1971 if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
1972 /*
1973 * The LSB of head.next can't change from under us
1974 * because we hold the mm_all_locks_mutex.
1975 */
1976 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock);
1977 /*
1978 * We can safely modify head.next after taking the
1979 * anon_vma->root->rwsem. If some other vma in this mm shares
1980 * the same anon_vma we won't take it again.
1981 *
1982 * No need of atomic instructions here, head.next
1983 * can't change from under us thanks to the
1984 * anon_vma->root->rwsem.
1985 */
1986 if (__test_and_set_bit(0, (unsigned long *)
1987 &anon_vma->root->rb_root.rb_root.rb_node))
1988 BUG();
1989 }
1990 }
1991
vm_lock_mapping(struct mm_struct * mm,struct address_space * mapping)1992 static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
1993 {
1994 if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
1995 /*
1996 * AS_MM_ALL_LOCKS can't change from under us because
1997 * we hold the mm_all_locks_mutex.
1998 *
1999 * Operations on ->flags have to be atomic because
2000 * even if AS_MM_ALL_LOCKS is stable thanks to the
2001 * mm_all_locks_mutex, there may be other cpus
2002 * changing other bitflags in parallel to us.
2003 */
2004 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
2005 BUG();
2006 down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock);
2007 }
2008 }
2009
2010 /*
2011 * This operation locks against the VM for all pte/vma/mm related
2012 * operations that could ever happen on a certain mm. This includes
2013 * vmtruncate, try_to_unmap, and all page faults.
2014 *
2015 * The caller must take the mmap_lock in write mode before calling
2016 * mm_take_all_locks(). The caller isn't allowed to release the
2017 * mmap_lock until mm_drop_all_locks() returns.
2018 *
2019 * mmap_lock in write mode is required in order to block all operations
2020 * that could modify pagetables and free pages without need of
2021 * altering the vma layout. It's also needed in write mode to avoid new
2022 * anon_vmas to be associated with existing vmas.
2023 *
2024 * A single task can't take more than one mm_take_all_locks() in a row
2025 * or it would deadlock.
2026 *
2027 * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in
2028 * mapping->flags avoid to take the same lock twice, if more than one
2029 * vma in this mm is backed by the same anon_vma or address_space.
2030 *
2031 * We take locks in following order, accordingly to comment at beginning
2032 * of mm/rmap.c:
2033 * - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for
2034 * hugetlb mapping);
2035 * - all vmas marked locked
2036 * - all i_mmap_rwsem locks;
2037 * - all anon_vma->rwseml
2038 *
2039 * We can take all locks within these types randomly because the VM code
2040 * doesn't nest them and we protected from parallel mm_take_all_locks() by
2041 * mm_all_locks_mutex.
2042 *
2043 * mm_take_all_locks() and mm_drop_all_locks are expensive operations
2044 * that may have to take thousand of locks.
2045 *
2046 * mm_take_all_locks() can fail if it's interrupted by signals.
2047 */
mm_take_all_locks(struct mm_struct * mm)2048 int mm_take_all_locks(struct mm_struct *mm)
2049 {
2050 struct vm_area_struct *vma;
2051 struct anon_vma_chain *avc;
2052 VMA_ITERATOR(vmi, mm, 0);
2053
2054 mmap_assert_write_locked(mm);
2055
2056 mutex_lock(&mm_all_locks_mutex);
2057
2058 /*
2059 * vma_start_write() does not have a complement in mm_drop_all_locks()
2060 * because vma_start_write() is always asymmetrical; it marks a VMA as
2061 * being written to until mmap_write_unlock() or mmap_write_downgrade()
2062 * is reached.
2063 */
2064 for_each_vma(vmi, vma) {
2065 if (signal_pending(current))
2066 goto out_unlock;
2067 vma_start_write(vma);
2068 }
2069
2070 vma_iter_init(&vmi, mm, 0);
2071 for_each_vma(vmi, vma) {
2072 if (signal_pending(current))
2073 goto out_unlock;
2074 if (vma->vm_file && vma->vm_file->f_mapping &&
2075 is_vm_hugetlb_page(vma))
2076 vm_lock_mapping(mm, vma->vm_file->f_mapping);
2077 }
2078
2079 vma_iter_init(&vmi, mm, 0);
2080 for_each_vma(vmi, vma) {
2081 if (signal_pending(current))
2082 goto out_unlock;
2083 if (vma->vm_file && vma->vm_file->f_mapping &&
2084 !is_vm_hugetlb_page(vma))
2085 vm_lock_mapping(mm, vma->vm_file->f_mapping);
2086 }
2087
2088 vma_iter_init(&vmi, mm, 0);
2089 for_each_vma(vmi, vma) {
2090 if (signal_pending(current))
2091 goto out_unlock;
2092 if (vma->anon_vma)
2093 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
2094 vm_lock_anon_vma(mm, avc->anon_vma);
2095 }
2096
2097 return 0;
2098
2099 out_unlock:
2100 mm_drop_all_locks(mm);
2101 return -EINTR;
2102 }
2103
vm_unlock_anon_vma(struct anon_vma * anon_vma)2104 static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
2105 {
2106 if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
2107 /*
2108 * The LSB of head.next can't change to 0 from under
2109 * us because we hold the mm_all_locks_mutex.
2110 *
2111 * We must however clear the bitflag before unlocking
2112 * the vma so the users using the anon_vma->rb_root will
2113 * never see our bitflag.
2114 *
2115 * No need of atomic instructions here, head.next
2116 * can't change from under us until we release the
2117 * anon_vma->root->rwsem.
2118 */
2119 if (!__test_and_clear_bit(0, (unsigned long *)
2120 &anon_vma->root->rb_root.rb_root.rb_node))
2121 BUG();
2122 anon_vma_unlock_write(anon_vma);
2123 }
2124 }
2125
vm_unlock_mapping(struct address_space * mapping)2126 static void vm_unlock_mapping(struct address_space *mapping)
2127 {
2128 if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
2129 /*
2130 * AS_MM_ALL_LOCKS can't change to 0 from under us
2131 * because we hold the mm_all_locks_mutex.
2132 */
2133 i_mmap_unlock_write(mapping);
2134 if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
2135 &mapping->flags))
2136 BUG();
2137 }
2138 }
2139
2140 /*
2141 * The mmap_lock cannot be released by the caller until
2142 * mm_drop_all_locks() returns.
2143 */
mm_drop_all_locks(struct mm_struct * mm)2144 void mm_drop_all_locks(struct mm_struct *mm)
2145 {
2146 struct vm_area_struct *vma;
2147 struct anon_vma_chain *avc;
2148 VMA_ITERATOR(vmi, mm, 0);
2149
2150 mmap_assert_write_locked(mm);
2151 BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
2152
2153 for_each_vma(vmi, vma) {
2154 if (vma->anon_vma)
2155 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
2156 vm_unlock_anon_vma(avc->anon_vma);
2157 if (vma->vm_file && vma->vm_file->f_mapping)
2158 vm_unlock_mapping(vma->vm_file->f_mapping);
2159 }
2160
2161 mutex_unlock(&mm_all_locks_mutex);
2162 }
2163
2164 /*
2165 * We account for memory if it's a private writeable mapping,
2166 * not hugepages and VM_NORESERVE wasn't set.
2167 */
accountable_mapping(struct file * file,vm_flags_t vm_flags)2168 static bool accountable_mapping(struct file *file, vm_flags_t vm_flags)
2169 {
2170 /*
2171 * hugetlb has its own accounting separate from the core VM
2172 * VM_HUGETLB may not be set yet so we cannot check for that flag.
2173 */
2174 if (file && is_file_hugepages(file))
2175 return false;
2176
2177 return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
2178 }
2179
2180 /*
2181 * vms_abort_munmap_vmas() - Undo as much as possible from an aborted munmap()
2182 * operation.
2183 * @vms: The vma unmap structure
2184 * @mas_detach: The maple state with the detached maple tree
2185 *
2186 * Reattach any detached vmas, free up the maple tree used to track the vmas.
2187 * If that's not possible because the ptes are cleared (and vm_ops->closed() may
2188 * have been called), then a NULL is written over the vmas and the vmas are
2189 * removed (munmap() completed).
2190 */
vms_abort_munmap_vmas(struct vma_munmap_struct * vms,struct ma_state * mas_detach)2191 static void vms_abort_munmap_vmas(struct vma_munmap_struct *vms,
2192 struct ma_state *mas_detach)
2193 {
2194 struct ma_state *mas = &vms->vmi->mas;
2195
2196 if (!vms->nr_pages)
2197 return;
2198
2199 if (vms->clear_ptes)
2200 return reattach_vmas(mas_detach);
2201
2202 /*
2203 * Aborting cannot just call the vm_ops open() because they are often
2204 * not symmetrical and state data has been lost. Resort to the old
2205 * failure method of leaving a gap where the MAP_FIXED mapping failed.
2206 */
2207 mas_set_range(mas, vms->start, vms->end - 1);
2208 mas_store_gfp(mas, NULL, GFP_KERNEL|__GFP_NOFAIL);
2209 /* Clean up the insertion of the unfortunate gap */
2210 vms_complete_munmap_vmas(vms, mas_detach);
2211 }
2212
2213 /*
2214 * __mmap_prepare() - Prepare to gather any overlapping VMAs that need to be
2215 * unmapped once the map operation is completed, check limits, account mapping
2216 * and clean up any pre-existing VMAs.
2217 *
2218 * @map: Mapping state.
2219 * @uf: Userfaultfd context list.
2220 *
2221 * Returns: 0 on success, error code otherwise.
2222 */
__mmap_prepare(struct mmap_state * map,struct list_head * uf)2223 static int __mmap_prepare(struct mmap_state *map, struct list_head *uf)
2224 {
2225 int error;
2226 struct vma_iterator *vmi = map->vmi;
2227 struct vma_munmap_struct *vms = &map->vms;
2228
2229 /* Find the first overlapping VMA and initialise unmap state. */
2230 vms->vma = vma_find(vmi, map->end);
2231 init_vma_munmap(vms, vmi, vms->vma, map->addr, map->end, uf,
2232 /* unlock = */ false);
2233
2234 /* OK, we have overlapping VMAs - prepare to unmap them. */
2235 if (vms->vma) {
2236 mt_init_flags(&map->mt_detach,
2237 vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
2238 mt_on_stack(map->mt_detach);
2239 mas_init(&map->mas_detach, &map->mt_detach, /* addr = */ 0);
2240 /* Prepare to unmap any existing mapping in the area */
2241 error = vms_gather_munmap_vmas(vms, &map->mas_detach);
2242 if (error) {
2243 /* On error VMAs will already have been reattached. */
2244 vms->nr_pages = 0;
2245 return error;
2246 }
2247
2248 map->next = vms->next;
2249 map->prev = vms->prev;
2250 } else {
2251 map->next = vma_iter_next_rewind(vmi, &map->prev);
2252 }
2253
2254 /* Check against address space limit. */
2255 if (!may_expand_vm(map->mm, map->flags, map->pglen - vms->nr_pages))
2256 return -ENOMEM;
2257
2258 /* Private writable mapping: check memory availability. */
2259 if (accountable_mapping(map->file, map->flags)) {
2260 map->charged = map->pglen;
2261 map->charged -= vms->nr_accounted;
2262 if (map->charged) {
2263 error = security_vm_enough_memory_mm(map->mm, map->charged);
2264 if (error)
2265 return error;
2266 }
2267
2268 vms->nr_accounted = 0;
2269 map->flags |= VM_ACCOUNT;
2270 }
2271
2272 /*
2273 * Clear PTEs while the vma is still in the tree so that rmap
2274 * cannot race with the freeing later in the truncate scenario.
2275 * This is also needed for mmap_file(), which is why vm_ops
2276 * close function is called.
2277 */
2278 vms_clean_up_area(vms, &map->mas_detach);
2279
2280 return 0;
2281 }
2282
2283
__mmap_new_file_vma(struct mmap_state * map,struct vm_area_struct * vma)2284 static int __mmap_new_file_vma(struct mmap_state *map,
2285 struct vm_area_struct *vma)
2286 {
2287 struct vma_iterator *vmi = map->vmi;
2288 int error;
2289
2290 vma->vm_file = get_file(map->file);
2291 error = mmap_file(vma->vm_file, vma);
2292 if (error) {
2293 fput(vma->vm_file);
2294 vma->vm_file = NULL;
2295
2296 vma_iter_set(vmi, vma->vm_end);
2297 /* Undo any partial mapping done by a device driver. */
2298 unmap_region(&vmi->mas, vma, map->prev, map->next);
2299
2300 return error;
2301 }
2302
2303 /* Drivers cannot alter the address of the VMA. */
2304 WARN_ON_ONCE(map->addr != vma->vm_start);
2305 /*
2306 * Drivers should not permit writability when previously it was
2307 * disallowed.
2308 */
2309 VM_WARN_ON_ONCE(map->flags != vma->vm_flags &&
2310 !(map->flags & VM_MAYWRITE) &&
2311 (vma->vm_flags & VM_MAYWRITE));
2312
2313 /* If the flags change (and are mergeable), let's retry later. */
2314 map->retry_merge = vma->vm_flags != map->flags && !(vma->vm_flags & VM_SPECIAL);
2315 map->flags = vma->vm_flags;
2316
2317 return 0;
2318 }
2319
2320 /*
2321 * __mmap_new_vma() - Allocate a new VMA for the region, as merging was not
2322 * possible.
2323 *
2324 * @map: Mapping state.
2325 * @vmap: Output pointer for the new VMA.
2326 *
2327 * Returns: Zero on success, or an error.
2328 */
__mmap_new_vma(struct mmap_state * map,struct vm_area_struct ** vmap)2329 static int __mmap_new_vma(struct mmap_state *map, struct vm_area_struct **vmap)
2330 {
2331 struct vma_iterator *vmi = map->vmi;
2332 int error = 0;
2333 struct vm_area_struct *vma;
2334
2335 /*
2336 * Determine the object being mapped and call the appropriate
2337 * specific mapper. the address has already been validated, but
2338 * not unmapped, but the maps are removed from the list.
2339 */
2340 vma = vm_area_alloc(map->mm);
2341 if (!vma)
2342 return -ENOMEM;
2343
2344 vma_iter_config(vmi, map->addr, map->end);
2345 vma_set_range(vma, map->addr, map->end, map->pgoff);
2346 vm_flags_init(vma, map->flags);
2347 vma->vm_page_prot = vm_get_page_prot(map->flags);
2348
2349 if (vma_iter_prealloc(vmi, vma)) {
2350 error = -ENOMEM;
2351 goto free_vma;
2352 }
2353
2354 if (map->file)
2355 error = __mmap_new_file_vma(map, vma);
2356 else if (map->flags & VM_SHARED)
2357 error = shmem_zero_setup(vma);
2358 else
2359 vma_set_anonymous(vma);
2360
2361 if (error)
2362 goto free_iter_vma;
2363
2364 #ifdef CONFIG_SPARC64
2365 /* TODO: Fix SPARC ADI! */
2366 WARN_ON_ONCE(!arch_validate_flags(map->flags));
2367 #endif
2368
2369 /* Lock the VMA since it is modified after insertion into VMA tree */
2370 vma_start_write(vma);
2371 vma_iter_store(vmi, vma);
2372 map->mm->map_count++;
2373 vma_link_file(vma);
2374
2375 /*
2376 * vma_merge_new_range() calls khugepaged_enter_vma() too, the below
2377 * call covers the non-merge case.
2378 */
2379 khugepaged_enter_vma(vma, map->flags);
2380 ksm_add_vma(vma);
2381 *vmap = vma;
2382 return 0;
2383
2384 free_iter_vma:
2385 vma_iter_free(vmi);
2386 free_vma:
2387 vm_area_free(vma);
2388 return error;
2389 }
2390
2391 /*
2392 * __mmap_complete() - Unmap any VMAs we overlap, account memory mapping
2393 * statistics, handle locking and finalise the VMA.
2394 *
2395 * @map: Mapping state.
2396 * @vma: Merged or newly allocated VMA for the mmap()'d region.
2397 */
__mmap_complete(struct mmap_state * map,struct vm_area_struct * vma)2398 static void __mmap_complete(struct mmap_state *map, struct vm_area_struct *vma)
2399 {
2400 struct mm_struct *mm = map->mm;
2401 unsigned long vm_flags = vma->vm_flags;
2402
2403 perf_event_mmap(vma);
2404
2405 /* Unmap any existing mapping in the area. */
2406 vms_complete_munmap_vmas(&map->vms, &map->mas_detach);
2407
2408 vm_stat_account(mm, vma->vm_flags, map->pglen);
2409 if (vm_flags & VM_LOCKED) {
2410 if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
2411 is_vm_hugetlb_page(vma) ||
2412 vma == get_gate_vma(mm))
2413 vm_flags_clear(vma, VM_LOCKED_MASK);
2414 else
2415 mm->locked_vm += map->pglen;
2416 }
2417
2418 if (vma->vm_file)
2419 uprobe_mmap(vma);
2420
2421 /*
2422 * New (or expanded) vma always get soft dirty status.
2423 * Otherwise user-space soft-dirty page tracker won't
2424 * be able to distinguish situation when vma area unmapped,
2425 * then new mapped in-place (which must be aimed as
2426 * a completely new data area).
2427 */
2428 vm_flags_set(vma, VM_SOFTDIRTY);
2429
2430 vma_set_page_prot(vma);
2431 }
2432
__mmap_region(struct file * file,unsigned long addr,unsigned long len,vm_flags_t vm_flags,unsigned long pgoff,struct list_head * uf)2433 unsigned long __mmap_region(struct file *file, unsigned long addr,
2434 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
2435 struct list_head *uf)
2436 {
2437 struct mm_struct *mm = current->mm;
2438 struct vm_area_struct *vma = NULL;
2439 int error;
2440 VMA_ITERATOR(vmi, mm, addr);
2441 MMAP_STATE(map, mm, &vmi, addr, len, pgoff, vm_flags, file);
2442
2443 error = __mmap_prepare(&map, uf);
2444 if (error)
2445 goto abort_munmap;
2446
2447 /* Attempt to merge with adjacent VMAs... */
2448 if (map.prev || map.next) {
2449 VMG_MMAP_STATE(vmg, &map, /* vma = */ NULL);
2450
2451 vma = vma_merge_new_range(&vmg);
2452 }
2453
2454 /* ...but if we can't, allocate a new VMA. */
2455 if (!vma) {
2456 error = __mmap_new_vma(&map, &vma);
2457 if (error)
2458 goto unacct_error;
2459 }
2460
2461 /* If flags changed, we might be able to merge, so try again. */
2462 if (map.retry_merge) {
2463 struct vm_area_struct *merged;
2464 VMG_MMAP_STATE(vmg, &map, vma);
2465
2466 vma_iter_config(map.vmi, map.addr, map.end);
2467 merged = vma_merge_existing_range(&vmg);
2468 if (merged)
2469 vma = merged;
2470 }
2471
2472 __mmap_complete(&map, vma);
2473
2474 return addr;
2475
2476 /* Accounting was done by __mmap_prepare(). */
2477 unacct_error:
2478 if (map.charged)
2479 vm_unacct_memory(map.charged);
2480 abort_munmap:
2481 vms_abort_munmap_vmas(&map.vms, &map.mas_detach);
2482 return error;
2483 }
2484