1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /*
4 * VMA-specific functions.
5 */
6
7 #include "vma_internal.h"
8 #include "vma.h"
9
10 struct mmap_state {
11 struct mm_struct *mm;
12 struct vma_iterator *vmi;
13
14 unsigned long addr;
15 unsigned long end;
16 pgoff_t pgoff;
17 unsigned long pglen;
18 union {
19 vm_flags_t vm_flags;
20 vma_flags_t vma_flags;
21 };
22 struct file *file;
23 pgprot_t page_prot;
24
25 /* User-defined fields, perhaps updated by .mmap_prepare(). */
26 const struct vm_operations_struct *vm_ops;
27 void *vm_private_data;
28
29 unsigned long charged;
30
31 struct vm_area_struct *prev;
32 struct vm_area_struct *next;
33
34 /* Unmapping state. */
35 struct vma_munmap_struct vms;
36 struct ma_state mas_detach;
37 struct maple_tree mt_detach;
38
39 /* Determine if we can check KSM flags early in mmap() logic. */
40 bool check_ksm_early :1;
41 /* If we map new, hold the file rmap lock on mapping. */
42 bool hold_file_rmap_lock :1;
43 /* If .mmap_prepare changed the file, we don't need to pin. */
44 bool file_doesnt_need_get :1;
45 };
46
47 #define MMAP_STATE(name, mm_, vmi_, addr_, len_, pgoff_, vm_flags_, file_) \
48 struct mmap_state name = { \
49 .mm = mm_, \
50 .vmi = vmi_, \
51 .addr = addr_, \
52 .end = (addr_) + (len_), \
53 .pgoff = pgoff_, \
54 .pglen = PHYS_PFN(len_), \
55 .vm_flags = vm_flags_, \
56 .file = file_, \
57 .page_prot = vm_get_page_prot(vm_flags_), \
58 }
59
60 #define VMG_MMAP_STATE(name, map_, vma_) \
61 struct vma_merge_struct name = { \
62 .mm = (map_)->mm, \
63 .vmi = (map_)->vmi, \
64 .start = (map_)->addr, \
65 .end = (map_)->end, \
66 .vm_flags = (map_)->vm_flags, \
67 .pgoff = (map_)->pgoff, \
68 .file = (map_)->file, \
69 .prev = (map_)->prev, \
70 .middle = vma_, \
71 .next = (vma_) ? NULL : (map_)->next, \
72 .state = VMA_MERGE_START, \
73 }
74
75 /* Was this VMA ever forked from a parent, i.e. maybe contains CoW mappings? */
vma_is_fork_child(struct vm_area_struct * vma)76 static bool vma_is_fork_child(struct vm_area_struct *vma)
77 {
78 /*
79 * The list_is_singular() test is to avoid merging VMA cloned from
80 * parents. This can improve scalability caused by the anon_vma root
81 * lock.
82 */
83 return vma && vma->anon_vma && !list_is_singular(&vma->anon_vma_chain);
84 }
85
is_mergeable_vma(struct vma_merge_struct * vmg,bool merge_next)86 static inline bool is_mergeable_vma(struct vma_merge_struct *vmg, bool merge_next)
87 {
88 struct vm_area_struct *vma = merge_next ? vmg->next : vmg->prev;
89
90 if (!mpol_equal(vmg->policy, vma_policy(vma)))
91 return false;
92 if ((vma->vm_flags ^ vmg->vm_flags) & ~VM_IGNORE_MERGE)
93 return false;
94 if (vma->vm_file != vmg->file)
95 return false;
96 if (!is_mergeable_vm_userfaultfd_ctx(vma, vmg->uffd_ctx))
97 return false;
98 if (!anon_vma_name_eq(anon_vma_name(vma), vmg->anon_name))
99 return false;
100 return true;
101 }
102
is_mergeable_anon_vma(struct vma_merge_struct * vmg,bool merge_next)103 static bool is_mergeable_anon_vma(struct vma_merge_struct *vmg, bool merge_next)
104 {
105 struct vm_area_struct *tgt = merge_next ? vmg->next : vmg->prev;
106 struct vm_area_struct *src = vmg->middle; /* existing merge case. */
107 struct anon_vma *tgt_anon = tgt->anon_vma;
108 struct anon_vma *src_anon = vmg->anon_vma;
109
110 /*
111 * We _can_ have !src, vmg->anon_vma via copy_vma(). In this instance we
112 * will remove the existing VMA's anon_vma's so there's no scalability
113 * concerns.
114 */
115 VM_WARN_ON(src && src_anon != src->anon_vma);
116
117 /* Case 1 - we will dup_anon_vma() from src into tgt. */
118 if (!tgt_anon && src_anon) {
119 struct vm_area_struct *copied_from = vmg->copied_from;
120
121 if (vma_is_fork_child(src))
122 return false;
123 if (vma_is_fork_child(copied_from))
124 return false;
125
126 return true;
127 }
128 /* Case 2 - we will simply use tgt's anon_vma. */
129 if (tgt_anon && !src_anon)
130 return !vma_is_fork_child(tgt);
131 /* Case 3 - the anon_vma's are already shared. */
132 return src_anon == tgt_anon;
133 }
134
135 /*
136 * init_multi_vma_prep() - Initializer for struct vma_prepare
137 * @vp: The vma_prepare struct
138 * @vma: The vma that will be altered once locked
139 * @vmg: The merge state that will be used to determine adjustment and VMA
140 * removal.
141 */
init_multi_vma_prep(struct vma_prepare * vp,struct vm_area_struct * vma,struct vma_merge_struct * vmg)142 static void init_multi_vma_prep(struct vma_prepare *vp,
143 struct vm_area_struct *vma,
144 struct vma_merge_struct *vmg)
145 {
146 struct vm_area_struct *adjust;
147 struct vm_area_struct **remove = &vp->remove;
148
149 memset(vp, 0, sizeof(struct vma_prepare));
150 vp->vma = vma;
151 vp->anon_vma = vma->anon_vma;
152
153 if (vmg && vmg->__remove_middle) {
154 *remove = vmg->middle;
155 remove = &vp->remove2;
156 }
157 if (vmg && vmg->__remove_next)
158 *remove = vmg->next;
159
160 if (vmg && vmg->__adjust_middle_start)
161 adjust = vmg->middle;
162 else if (vmg && vmg->__adjust_next_start)
163 adjust = vmg->next;
164 else
165 adjust = NULL;
166
167 vp->adj_next = adjust;
168 if (!vp->anon_vma && adjust)
169 vp->anon_vma = adjust->anon_vma;
170
171 VM_WARN_ON(vp->anon_vma && adjust && adjust->anon_vma &&
172 vp->anon_vma != adjust->anon_vma);
173
174 vp->file = vma->vm_file;
175 if (vp->file)
176 vp->mapping = vma->vm_file->f_mapping;
177
178 if (vmg && vmg->skip_vma_uprobe)
179 vp->skip_vma_uprobe = true;
180 }
181
182 /*
183 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
184 * in front of (at a lower virtual address and file offset than) the vma.
185 *
186 * We cannot merge two vmas if they have differently assigned (non-NULL)
187 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
188 *
189 * We don't check here for the merged mmap wrapping around the end of pagecache
190 * indices (16TB on ia32) because do_mmap() does not permit mmap's which
191 * wrap, nor mmaps which cover the final page at index -1UL.
192 *
193 * We assume the vma may be removed as part of the merge.
194 */
can_vma_merge_before(struct vma_merge_struct * vmg)195 static bool can_vma_merge_before(struct vma_merge_struct *vmg)
196 {
197 pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start);
198
199 if (is_mergeable_vma(vmg, /* merge_next = */ true) &&
200 is_mergeable_anon_vma(vmg, /* merge_next = */ true)) {
201 if (vmg->next->vm_pgoff == vmg->pgoff + pglen)
202 return true;
203 }
204
205 return false;
206 }
207
208 /*
209 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
210 * beyond (at a higher virtual address and file offset than) the vma.
211 *
212 * We cannot merge two vmas if they have differently assigned (non-NULL)
213 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
214 *
215 * We assume that vma is not removed as part of the merge.
216 */
can_vma_merge_after(struct vma_merge_struct * vmg)217 static bool can_vma_merge_after(struct vma_merge_struct *vmg)
218 {
219 if (is_mergeable_vma(vmg, /* merge_next = */ false) &&
220 is_mergeable_anon_vma(vmg, /* merge_next = */ false)) {
221 if (vmg->prev->vm_pgoff + vma_pages(vmg->prev) == vmg->pgoff)
222 return true;
223 }
224 return false;
225 }
226
__vma_link_file(struct vm_area_struct * vma,struct address_space * mapping)227 static void __vma_link_file(struct vm_area_struct *vma,
228 struct address_space *mapping)
229 {
230 if (vma_is_shared_maywrite(vma))
231 mapping_allow_writable(mapping);
232
233 flush_dcache_mmap_lock(mapping);
234 vma_interval_tree_insert(vma, &mapping->i_mmap);
235 flush_dcache_mmap_unlock(mapping);
236 }
237
238 /*
239 * Requires inode->i_mapping->i_mmap_rwsem
240 */
__remove_shared_vm_struct(struct vm_area_struct * vma,struct address_space * mapping)241 static void __remove_shared_vm_struct(struct vm_area_struct *vma,
242 struct address_space *mapping)
243 {
244 if (vma_is_shared_maywrite(vma))
245 mapping_unmap_writable(mapping);
246
247 flush_dcache_mmap_lock(mapping);
248 vma_interval_tree_remove(vma, &mapping->i_mmap);
249 flush_dcache_mmap_unlock(mapping);
250 }
251
252 /*
253 * vma has some anon_vma assigned, and is already inserted on that
254 * anon_vma's interval trees.
255 *
256 * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
257 * vma must be removed from the anon_vma's interval trees using
258 * anon_vma_interval_tree_pre_update_vma().
259 *
260 * After the update, the vma will be reinserted using
261 * anon_vma_interval_tree_post_update_vma().
262 *
263 * The entire update must be protected by exclusive mmap_lock and by
264 * the root anon_vma's mutex.
265 */
266 static void
anon_vma_interval_tree_pre_update_vma(struct vm_area_struct * vma)267 anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
268 {
269 struct anon_vma_chain *avc;
270
271 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
272 anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root);
273 }
274
275 static void
anon_vma_interval_tree_post_update_vma(struct vm_area_struct * vma)276 anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
277 {
278 struct anon_vma_chain *avc;
279
280 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
281 anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
282 }
283
284 /*
285 * vma_prepare() - Helper function for handling locking VMAs prior to altering
286 * @vp: The initialized vma_prepare struct
287 */
vma_prepare(struct vma_prepare * vp)288 static void vma_prepare(struct vma_prepare *vp)
289 {
290 if (vp->file) {
291 uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end);
292
293 if (vp->adj_next)
294 uprobe_munmap(vp->adj_next, vp->adj_next->vm_start,
295 vp->adj_next->vm_end);
296
297 i_mmap_lock_write(vp->mapping);
298 if (vp->insert && vp->insert->vm_file) {
299 /*
300 * Put into interval tree now, so instantiated pages
301 * are visible to arm/parisc __flush_dcache_page
302 * throughout; but we cannot insert into address
303 * space until vma start or end is updated.
304 */
305 __vma_link_file(vp->insert,
306 vp->insert->vm_file->f_mapping);
307 }
308 }
309
310 if (vp->anon_vma) {
311 anon_vma_lock_write(vp->anon_vma);
312 anon_vma_interval_tree_pre_update_vma(vp->vma);
313 if (vp->adj_next)
314 anon_vma_interval_tree_pre_update_vma(vp->adj_next);
315 }
316
317 if (vp->file) {
318 flush_dcache_mmap_lock(vp->mapping);
319 vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap);
320 if (vp->adj_next)
321 vma_interval_tree_remove(vp->adj_next,
322 &vp->mapping->i_mmap);
323 }
324
325 }
326
327 /*
328 * vma_complete- Helper function for handling the unlocking after altering VMAs,
329 * or for inserting a VMA.
330 *
331 * @vp: The vma_prepare struct
332 * @vmi: The vma iterator
333 * @mm: The mm_struct
334 */
vma_complete(struct vma_prepare * vp,struct vma_iterator * vmi,struct mm_struct * mm)335 static void vma_complete(struct vma_prepare *vp, struct vma_iterator *vmi,
336 struct mm_struct *mm)
337 {
338 if (vp->file) {
339 if (vp->adj_next)
340 vma_interval_tree_insert(vp->adj_next,
341 &vp->mapping->i_mmap);
342 vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap);
343 flush_dcache_mmap_unlock(vp->mapping);
344 }
345
346 if (vp->remove && vp->file) {
347 __remove_shared_vm_struct(vp->remove, vp->mapping);
348 if (vp->remove2)
349 __remove_shared_vm_struct(vp->remove2, vp->mapping);
350 } else if (vp->insert) {
351 /*
352 * split_vma has split insert from vma, and needs
353 * us to insert it before dropping the locks
354 * (it may either follow vma or precede it).
355 */
356 vma_iter_store_new(vmi, vp->insert);
357 mm->map_count++;
358 }
359
360 if (vp->anon_vma) {
361 anon_vma_interval_tree_post_update_vma(vp->vma);
362 if (vp->adj_next)
363 anon_vma_interval_tree_post_update_vma(vp->adj_next);
364 anon_vma_unlock_write(vp->anon_vma);
365 }
366
367 if (vp->file) {
368 i_mmap_unlock_write(vp->mapping);
369
370 if (!vp->skip_vma_uprobe) {
371 uprobe_mmap(vp->vma);
372
373 if (vp->adj_next)
374 uprobe_mmap(vp->adj_next);
375 }
376 }
377
378 if (vp->remove) {
379 again:
380 vma_mark_detached(vp->remove);
381 if (vp->file) {
382 uprobe_munmap(vp->remove, vp->remove->vm_start,
383 vp->remove->vm_end);
384 fput(vp->file);
385 }
386 if (vp->remove->anon_vma)
387 unlink_anon_vmas(vp->remove);
388 mm->map_count--;
389 mpol_put(vma_policy(vp->remove));
390 if (!vp->remove2)
391 WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end);
392 vm_area_free(vp->remove);
393
394 /*
395 * In mprotect's case 6 (see comments on vma_merge),
396 * we are removing both mid and next vmas
397 */
398 if (vp->remove2) {
399 vp->remove = vp->remove2;
400 vp->remove2 = NULL;
401 goto again;
402 }
403 }
404 if (vp->insert && vp->file)
405 uprobe_mmap(vp->insert);
406 }
407
408 /*
409 * init_vma_prep() - Initializer wrapper for vma_prepare struct
410 * @vp: The vma_prepare struct
411 * @vma: The vma that will be altered once locked
412 */
init_vma_prep(struct vma_prepare * vp,struct vm_area_struct * vma)413 static void init_vma_prep(struct vma_prepare *vp, struct vm_area_struct *vma)
414 {
415 init_multi_vma_prep(vp, vma, NULL);
416 }
417
418 /*
419 * Can the proposed VMA be merged with the left (previous) VMA taking into
420 * account the start position of the proposed range.
421 */
can_vma_merge_left(struct vma_merge_struct * vmg)422 static bool can_vma_merge_left(struct vma_merge_struct *vmg)
423
424 {
425 return vmg->prev && vmg->prev->vm_end == vmg->start &&
426 can_vma_merge_after(vmg);
427 }
428
429 /*
430 * Can the proposed VMA be merged with the right (next) VMA taking into
431 * account the end position of the proposed range.
432 *
433 * In addition, if we can merge with the left VMA, ensure that left and right
434 * anon_vma's are also compatible.
435 */
can_vma_merge_right(struct vma_merge_struct * vmg,bool can_merge_left)436 static bool can_vma_merge_right(struct vma_merge_struct *vmg,
437 bool can_merge_left)
438 {
439 struct vm_area_struct *next = vmg->next;
440 struct vm_area_struct *prev;
441
442 if (!next || vmg->end != next->vm_start || !can_vma_merge_before(vmg))
443 return false;
444
445 if (!can_merge_left)
446 return true;
447
448 /*
449 * If we can merge with prev (left) and next (right), indicating that
450 * each VMA's anon_vma is compatible with the proposed anon_vma, this
451 * does not mean prev and next are compatible with EACH OTHER.
452 *
453 * We therefore check this in addition to mergeability to either side.
454 */
455 prev = vmg->prev;
456 return !prev->anon_vma || !next->anon_vma ||
457 prev->anon_vma == next->anon_vma;
458 }
459
460 /*
461 * Close a vm structure and free it.
462 */
remove_vma(struct vm_area_struct * vma)463 void remove_vma(struct vm_area_struct *vma)
464 {
465 might_sleep();
466 vma_close(vma);
467 if (vma->vm_file)
468 fput(vma->vm_file);
469 mpol_put(vma_policy(vma));
470 vm_area_free(vma);
471 }
472
473 /*
474 * Get rid of page table information in the indicated region.
475 *
476 * Called with the mm semaphore held.
477 */
unmap_region(struct unmap_desc * unmap)478 void unmap_region(struct unmap_desc *unmap)
479 {
480 struct mm_struct *mm = unmap->first->vm_mm;
481 struct mmu_gather tlb;
482
483 tlb_gather_mmu(&tlb, mm);
484 update_hiwater_rss(mm);
485 unmap_vmas(&tlb, unmap);
486 mas_set(unmap->mas, unmap->tree_reset);
487 free_pgtables(&tlb, unmap);
488 tlb_finish_mmu(&tlb);
489 }
490
491 /*
492 * __split_vma() bypasses sysctl_max_map_count checking. We use this where it
493 * has already been checked or doesn't make sense to fail.
494 * VMA Iterator will point to the original VMA.
495 */
496 static __must_check int
__split_vma(struct vma_iterator * vmi,struct vm_area_struct * vma,unsigned long addr,int new_below)497 __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
498 unsigned long addr, int new_below)
499 {
500 struct vma_prepare vp;
501 struct vm_area_struct *new;
502 int err;
503
504 WARN_ON(vma->vm_start >= addr);
505 WARN_ON(vma->vm_end <= addr);
506
507 if (vma->vm_ops && vma->vm_ops->may_split) {
508 err = vma->vm_ops->may_split(vma, addr);
509 if (err)
510 return err;
511 }
512
513 new = vm_area_dup(vma);
514 if (!new)
515 return -ENOMEM;
516
517 if (new_below) {
518 new->vm_end = addr;
519 } else {
520 new->vm_start = addr;
521 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
522 }
523
524 err = -ENOMEM;
525 vma_iter_config(vmi, new->vm_start, new->vm_end);
526 if (vma_iter_prealloc(vmi, new))
527 goto out_free_vma;
528
529 err = vma_dup_policy(vma, new);
530 if (err)
531 goto out_free_vmi;
532
533 err = anon_vma_clone(new, vma, VMA_OP_SPLIT);
534 if (err)
535 goto out_free_mpol;
536
537 if (new->vm_file)
538 get_file(new->vm_file);
539
540 if (new->vm_ops && new->vm_ops->open)
541 new->vm_ops->open(new);
542
543 vma_start_write(vma);
544 vma_start_write(new);
545
546 init_vma_prep(&vp, vma);
547 vp.insert = new;
548 vma_prepare(&vp);
549
550 /*
551 * Get rid of huge pages and shared page tables straddling the split
552 * boundary.
553 */
554 vma_adjust_trans_huge(vma, vma->vm_start, addr, NULL);
555 if (is_vm_hugetlb_page(vma))
556 hugetlb_split(vma, addr);
557
558 if (new_below) {
559 vma->vm_start = addr;
560 vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT;
561 } else {
562 vma->vm_end = addr;
563 }
564
565 /* vma_complete stores the new vma */
566 vma_complete(&vp, vmi, vma->vm_mm);
567 validate_mm(vma->vm_mm);
568
569 /* Success. */
570 if (new_below)
571 vma_next(vmi);
572 else
573 vma_prev(vmi);
574
575 return 0;
576
577 out_free_mpol:
578 mpol_put(vma_policy(new));
579 out_free_vmi:
580 vma_iter_free(vmi);
581 out_free_vma:
582 vm_area_free(new);
583 return err;
584 }
585
586 /*
587 * Split a vma into two pieces at address 'addr', a new vma is allocated
588 * either for the first part or the tail.
589 */
split_vma(struct vma_iterator * vmi,struct vm_area_struct * vma,unsigned long addr,int new_below)590 static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
591 unsigned long addr, int new_below)
592 {
593 if (vma->vm_mm->map_count >= sysctl_max_map_count)
594 return -ENOMEM;
595
596 return __split_vma(vmi, vma, addr, new_below);
597 }
598
599 /*
600 * dup_anon_vma() - Helper function to duplicate anon_vma on VMA merge in the
601 * instance that the destination VMA has no anon_vma but the source does.
602 *
603 * @dst: The destination VMA
604 * @src: The source VMA
605 * @dup: Pointer to the destination VMA when successful.
606 *
607 * Returns: 0 on success.
608 */
dup_anon_vma(struct vm_area_struct * dst,struct vm_area_struct * src,struct vm_area_struct ** dup)609 static int dup_anon_vma(struct vm_area_struct *dst,
610 struct vm_area_struct *src, struct vm_area_struct **dup)
611 {
612 /*
613 * There are three cases to consider for correctly propagating
614 * anon_vma's on merge.
615 *
616 * The first is trivial - neither VMA has anon_vma, we need not do
617 * anything.
618 *
619 * The second where both have anon_vma is also a no-op, as they must
620 * then be the same, so there is simply nothing to copy.
621 *
622 * Here we cover the third - if the destination VMA has no anon_vma,
623 * that is it is unfaulted, we need to ensure that the newly merged
624 * range is referenced by the anon_vma's of the source.
625 */
626 if (src->anon_vma && !dst->anon_vma) {
627 int ret;
628
629 vma_assert_write_locked(dst);
630 dst->anon_vma = src->anon_vma;
631 ret = anon_vma_clone(dst, src, VMA_OP_MERGE_UNFAULTED);
632 if (ret)
633 return ret;
634
635 *dup = dst;
636 }
637
638 return 0;
639 }
640
641 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
validate_mm(struct mm_struct * mm)642 void validate_mm(struct mm_struct *mm)
643 {
644 int bug = 0;
645 int i = 0;
646 struct vm_area_struct *vma;
647 VMA_ITERATOR(vmi, mm, 0);
648
649 mt_validate(&mm->mm_mt);
650 for_each_vma(vmi, vma) {
651 #ifdef CONFIG_DEBUG_VM_RB
652 struct anon_vma *anon_vma = vma->anon_vma;
653 struct anon_vma_chain *avc;
654 #endif
655 unsigned long vmi_start, vmi_end;
656 bool warn = 0;
657
658 vmi_start = vma_iter_addr(&vmi);
659 vmi_end = vma_iter_end(&vmi);
660 if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm))
661 warn = 1;
662
663 if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm))
664 warn = 1;
665
666 if (warn) {
667 pr_emerg("issue in %s\n", current->comm);
668 dump_stack();
669 dump_vma(vma);
670 pr_emerg("tree range: %px start %lx end %lx\n", vma,
671 vmi_start, vmi_end - 1);
672 vma_iter_dump_tree(&vmi);
673 }
674
675 #ifdef CONFIG_DEBUG_VM_RB
676 if (anon_vma) {
677 anon_vma_lock_read(anon_vma);
678 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
679 anon_vma_interval_tree_verify(avc);
680 anon_vma_unlock_read(anon_vma);
681 }
682 #endif
683 /* Check for a infinite loop */
684 if (++i > mm->map_count + 10) {
685 i = -1;
686 break;
687 }
688 }
689 if (i != mm->map_count) {
690 pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i);
691 bug = 1;
692 }
693 VM_BUG_ON_MM(bug, mm);
694 }
695 #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
696
697 /*
698 * Based on the vmg flag indicating whether we need to adjust the vm_start field
699 * for the middle or next VMA, we calculate what the range of the newly adjusted
700 * VMA ought to be, and set the VMA's range accordingly.
701 */
vmg_adjust_set_range(struct vma_merge_struct * vmg)702 static void vmg_adjust_set_range(struct vma_merge_struct *vmg)
703 {
704 struct vm_area_struct *adjust;
705 pgoff_t pgoff;
706
707 if (vmg->__adjust_middle_start) {
708 adjust = vmg->middle;
709 pgoff = adjust->vm_pgoff + PHYS_PFN(vmg->end - adjust->vm_start);
710 } else if (vmg->__adjust_next_start) {
711 adjust = vmg->next;
712 pgoff = adjust->vm_pgoff - PHYS_PFN(adjust->vm_start - vmg->end);
713 } else {
714 return;
715 }
716
717 vma_set_range(adjust, vmg->end, adjust->vm_end, pgoff);
718 }
719
720 /*
721 * Actually perform the VMA merge operation.
722 *
723 * IMPORTANT: We guarantee that, should vmg->give_up_on_oom is set, to not
724 * modify any VMAs or cause inconsistent state should an OOM condition arise.
725 *
726 * Returns 0 on success, or an error value on failure.
727 */
commit_merge(struct vma_merge_struct * vmg)728 static int commit_merge(struct vma_merge_struct *vmg)
729 {
730 struct vm_area_struct *vma;
731 struct vma_prepare vp;
732
733 if (vmg->__adjust_next_start) {
734 /* We manipulate middle and adjust next, which is the target. */
735 vma = vmg->middle;
736 vma_iter_config(vmg->vmi, vmg->end, vmg->next->vm_end);
737 } else {
738 vma = vmg->target;
739 /* Note: vma iterator must be pointing to 'start'. */
740 vma_iter_config(vmg->vmi, vmg->start, vmg->end);
741 }
742
743 init_multi_vma_prep(&vp, vma, vmg);
744
745 /*
746 * If vmg->give_up_on_oom is set, we're safe, because we don't actually
747 * manipulate any VMAs until we succeed at preallocation.
748 *
749 * Past this point, we will not return an error.
750 */
751 if (vma_iter_prealloc(vmg->vmi, vma))
752 return -ENOMEM;
753
754 vma_prepare(&vp);
755 /*
756 * THP pages may need to do additional splits if we increase
757 * middle->vm_start.
758 */
759 vma_adjust_trans_huge(vma, vmg->start, vmg->end,
760 vmg->__adjust_middle_start ? vmg->middle : NULL);
761 vma_set_range(vma, vmg->start, vmg->end, vmg->pgoff);
762 vmg_adjust_set_range(vmg);
763 vma_iter_store_overwrite(vmg->vmi, vmg->target);
764
765 vma_complete(&vp, vmg->vmi, vma->vm_mm);
766
767 return 0;
768 }
769
770 /* We can only remove VMAs when merging if they do not have a close hook. */
can_merge_remove_vma(struct vm_area_struct * vma)771 static bool can_merge_remove_vma(struct vm_area_struct *vma)
772 {
773 return !vma->vm_ops || !vma->vm_ops->close;
774 }
775
776 /*
777 * vma_merge_existing_range - Attempt to merge VMAs based on a VMA having its
778 * attributes modified.
779 *
780 * @vmg: Describes the modifications being made to a VMA and associated
781 * metadata.
782 *
783 * When the attributes of a range within a VMA change, then it might be possible
784 * for immediately adjacent VMAs to be merged into that VMA due to having
785 * identical properties.
786 *
787 * This function checks for the existence of any such mergeable VMAs and updates
788 * the maple tree describing the @vmg->middle->vm_mm address space to account
789 * for this, as well as any VMAs shrunk/expanded/deleted as a result of this
790 * merge.
791 *
792 * As part of this operation, if a merge occurs, the @vmg object will have its
793 * vma, start, end, and pgoff fields modified to execute the merge. Subsequent
794 * calls to this function should reset these fields.
795 *
796 * Returns: The merged VMA if merge succeeds, or NULL otherwise.
797 *
798 * ASSUMPTIONS:
799 * - The caller must assign the VMA to be modified to @vmg->middle.
800 * - The caller must have set @vmg->prev to the previous VMA, if there is one.
801 * - The caller must not set @vmg->next, as we determine this.
802 * - The caller must hold a WRITE lock on the mm_struct->mmap_lock.
803 * - vmi must be positioned within [@vmg->middle->vm_start, @vmg->middle->vm_end).
804 */
vma_merge_existing_range(struct vma_merge_struct * vmg)805 static __must_check struct vm_area_struct *vma_merge_existing_range(
806 struct vma_merge_struct *vmg)
807 {
808 vm_flags_t sticky_flags = vmg->vm_flags & VM_STICKY;
809 struct vm_area_struct *middle = vmg->middle;
810 struct vm_area_struct *prev = vmg->prev;
811 struct vm_area_struct *next;
812 struct vm_area_struct *anon_dup = NULL;
813 unsigned long start = vmg->start;
814 unsigned long end = vmg->end;
815 bool left_side = middle && start == middle->vm_start;
816 bool right_side = middle && end == middle->vm_end;
817 int err = 0;
818 bool merge_left, merge_right, merge_both;
819
820 mmap_assert_write_locked(vmg->mm);
821 VM_WARN_ON_VMG(!middle, vmg); /* We are modifying a VMA, so caller must specify. */
822 VM_WARN_ON_VMG(vmg->next, vmg); /* We set this. */
823 VM_WARN_ON_VMG(prev && start <= prev->vm_start, vmg);
824 VM_WARN_ON_VMG(start >= end, vmg);
825
826 /*
827 * If middle == prev, then we are offset into a VMA. Otherwise, if we are
828 * not, we must span a portion of the VMA.
829 */
830 VM_WARN_ON_VMG(middle &&
831 ((middle != prev && vmg->start != middle->vm_start) ||
832 vmg->end > middle->vm_end), vmg);
833 /* The vmi must be positioned within vmg->middle. */
834 VM_WARN_ON_VMG(middle &&
835 !(vma_iter_addr(vmg->vmi) >= middle->vm_start &&
836 vma_iter_addr(vmg->vmi) < middle->vm_end), vmg);
837 /* An existing merge can never be used by the mremap() logic. */
838 VM_WARN_ON_VMG(vmg->copied_from, vmg);
839
840 vmg->state = VMA_MERGE_NOMERGE;
841
842 /*
843 * If a special mapping or if the range being modified is neither at the
844 * furthermost left or right side of the VMA, then we have no chance of
845 * merging and should abort.
846 */
847 if (vmg->vm_flags & VM_SPECIAL || (!left_side && !right_side))
848 return NULL;
849
850 if (left_side)
851 merge_left = can_vma_merge_left(vmg);
852 else
853 merge_left = false;
854
855 if (right_side) {
856 next = vmg->next = vma_iter_next_range(vmg->vmi);
857 vma_iter_prev_range(vmg->vmi);
858
859 merge_right = can_vma_merge_right(vmg, merge_left);
860 } else {
861 merge_right = false;
862 next = NULL;
863 }
864
865 if (merge_left) /* If merging prev, position iterator there. */
866 vma_prev(vmg->vmi);
867 else if (!merge_right) /* If we have nothing to merge, abort. */
868 return NULL;
869
870 merge_both = merge_left && merge_right;
871 /* If we span the entire VMA, a merge implies it will be deleted. */
872 vmg->__remove_middle = left_side && right_side;
873
874 /*
875 * If we need to remove middle in its entirety but are unable to do so,
876 * we have no sensible recourse but to abort the merge.
877 */
878 if (vmg->__remove_middle && !can_merge_remove_vma(middle))
879 return NULL;
880
881 /*
882 * If we merge both VMAs, then next is also deleted. This implies
883 * merge_will_delete_vma also.
884 */
885 vmg->__remove_next = merge_both;
886
887 /*
888 * If we cannot delete next, then we can reduce the operation to merging
889 * prev and middle (thereby deleting middle).
890 */
891 if (vmg->__remove_next && !can_merge_remove_vma(next)) {
892 vmg->__remove_next = false;
893 merge_right = false;
894 merge_both = false;
895 }
896
897 /* No matter what happens, we will be adjusting middle. */
898 vma_start_write(middle);
899
900 if (merge_right) {
901 vma_start_write(next);
902 vmg->target = next;
903 sticky_flags |= (next->vm_flags & VM_STICKY);
904 }
905
906 if (merge_left) {
907 vma_start_write(prev);
908 vmg->target = prev;
909 sticky_flags |= (prev->vm_flags & VM_STICKY);
910 }
911
912 if (merge_both) {
913 /*
914 * |<-------------------->|
915 * |-------********-------|
916 * prev middle next
917 * extend delete delete
918 */
919
920 vmg->start = prev->vm_start;
921 vmg->end = next->vm_end;
922 vmg->pgoff = prev->vm_pgoff;
923
924 /*
925 * We already ensured anon_vma compatibility above, so now it's
926 * simply a case of, if prev has no anon_vma object, which of
927 * next or middle contains the anon_vma we must duplicate.
928 */
929 err = dup_anon_vma(prev, next->anon_vma ? next : middle,
930 &anon_dup);
931 } else if (merge_left) {
932 /*
933 * |<------------>| OR
934 * |<----------------->|
935 * |-------*************
936 * prev middle
937 * extend shrink/delete
938 */
939
940 vmg->start = prev->vm_start;
941 vmg->pgoff = prev->vm_pgoff;
942
943 if (!vmg->__remove_middle)
944 vmg->__adjust_middle_start = true;
945
946 err = dup_anon_vma(prev, middle, &anon_dup);
947 } else { /* merge_right */
948 /*
949 * |<------------->| OR
950 * |<----------------->|
951 * *************-------|
952 * middle next
953 * shrink/delete extend
954 */
955
956 pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start);
957
958 VM_WARN_ON_VMG(!merge_right, vmg);
959 /* If we are offset into a VMA, then prev must be middle. */
960 VM_WARN_ON_VMG(vmg->start > middle->vm_start && prev && middle != prev, vmg);
961
962 if (vmg->__remove_middle) {
963 vmg->end = next->vm_end;
964 vmg->pgoff = next->vm_pgoff - pglen;
965 } else {
966 /* We shrink middle and expand next. */
967 vmg->__adjust_next_start = true;
968 vmg->start = middle->vm_start;
969 vmg->end = start;
970 vmg->pgoff = middle->vm_pgoff;
971 }
972
973 err = dup_anon_vma(next, middle, &anon_dup);
974 }
975
976 if (err || commit_merge(vmg))
977 goto abort;
978
979 vm_flags_set(vmg->target, sticky_flags);
980 khugepaged_enter_vma(vmg->target, vmg->vm_flags);
981 vmg->state = VMA_MERGE_SUCCESS;
982 return vmg->target;
983
984 abort:
985 vma_iter_set(vmg->vmi, start);
986 vma_iter_load(vmg->vmi);
987
988 if (anon_dup)
989 unlink_anon_vmas(anon_dup);
990
991 /*
992 * This means we have failed to clone anon_vma's correctly, but no
993 * actual changes to VMAs have occurred, so no harm no foul - if the
994 * user doesn't want this reported and instead just wants to give up on
995 * the merge, allow it.
996 */
997 if (!vmg->give_up_on_oom)
998 vmg->state = VMA_MERGE_ERROR_NOMEM;
999 return NULL;
1000 }
1001
1002 /*
1003 * vma_merge_new_range - Attempt to merge a new VMA into address space
1004 *
1005 * @vmg: Describes the VMA we are adding, in the range @vmg->start to @vmg->end
1006 * (exclusive), which we try to merge with any adjacent VMAs if possible.
1007 *
1008 * We are about to add a VMA to the address space starting at @vmg->start and
1009 * ending at @vmg->end. There are three different possible scenarios:
1010 *
1011 * 1. There is a VMA with identical properties immediately adjacent to the
1012 * proposed new VMA [@vmg->start, @vmg->end) either before or after it -
1013 * EXPAND that VMA:
1014 *
1015 * Proposed: |-----| or |-----|
1016 * Existing: |----| |----|
1017 *
1018 * 2. There are VMAs with identical properties immediately adjacent to the
1019 * proposed new VMA [@vmg->start, @vmg->end) both before AND after it -
1020 * EXPAND the former and REMOVE the latter:
1021 *
1022 * Proposed: |-----|
1023 * Existing: |----| |----|
1024 *
1025 * 3. There are no VMAs immediately adjacent to the proposed new VMA or those
1026 * VMAs do not have identical attributes - NO MERGE POSSIBLE.
1027 *
1028 * In instances where we can merge, this function returns the expanded VMA which
1029 * will have its range adjusted accordingly and the underlying maple tree also
1030 * adjusted.
1031 *
1032 * Returns: In instances where no merge was possible, NULL. Otherwise, a pointer
1033 * to the VMA we expanded.
1034 *
1035 * This function adjusts @vmg to provide @vmg->next if not already specified,
1036 * and adjusts [@vmg->start, @vmg->end) to span the expanded range.
1037 *
1038 * ASSUMPTIONS:
1039 * - The caller must hold a WRITE lock on the mm_struct->mmap_lock.
1040 * - The caller must have determined that [@vmg->start, @vmg->end) is empty,
1041 other than VMAs that will be unmapped should the operation succeed.
1042 * - The caller must have specified the previous vma in @vmg->prev.
1043 * - The caller must have specified the next vma in @vmg->next.
1044 * - The caller must have positioned the vmi at or before the gap.
1045 */
vma_merge_new_range(struct vma_merge_struct * vmg)1046 struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg)
1047 {
1048 struct vm_area_struct *prev = vmg->prev;
1049 struct vm_area_struct *next = vmg->next;
1050 unsigned long end = vmg->end;
1051 bool can_merge_left, can_merge_right;
1052
1053 mmap_assert_write_locked(vmg->mm);
1054 VM_WARN_ON_VMG(vmg->middle, vmg);
1055 VM_WARN_ON_VMG(vmg->target, vmg);
1056 /* vmi must point at or before the gap. */
1057 VM_WARN_ON_VMG(vma_iter_addr(vmg->vmi) > end, vmg);
1058
1059 vmg->state = VMA_MERGE_NOMERGE;
1060
1061 /* Special VMAs are unmergeable, also if no prev/next. */
1062 if ((vmg->vm_flags & VM_SPECIAL) || (!prev && !next))
1063 return NULL;
1064
1065 can_merge_left = can_vma_merge_left(vmg);
1066 can_merge_right = !vmg->just_expand && can_vma_merge_right(vmg, can_merge_left);
1067
1068 /* If we can merge with the next VMA, adjust vmg accordingly. */
1069 if (can_merge_right) {
1070 vmg->end = next->vm_end;
1071 vmg->target = next;
1072 }
1073
1074 /* If we can merge with the previous VMA, adjust vmg accordingly. */
1075 if (can_merge_left) {
1076 vmg->start = prev->vm_start;
1077 vmg->target = prev;
1078 vmg->pgoff = prev->vm_pgoff;
1079
1080 /*
1081 * If this merge would result in removal of the next VMA but we
1082 * are not permitted to do so, reduce the operation to merging
1083 * prev and vma.
1084 */
1085 if (can_merge_right && !can_merge_remove_vma(next))
1086 vmg->end = end;
1087
1088 /* In expand-only case we are already positioned at prev. */
1089 if (!vmg->just_expand) {
1090 /* Equivalent to going to the previous range. */
1091 vma_prev(vmg->vmi);
1092 }
1093 }
1094
1095 /*
1096 * Now try to expand adjacent VMA(s). This takes care of removing the
1097 * following VMA if we have VMAs on both sides.
1098 */
1099 if (vmg->target && !vma_expand(vmg)) {
1100 khugepaged_enter_vma(vmg->target, vmg->vm_flags);
1101 vmg->state = VMA_MERGE_SUCCESS;
1102 return vmg->target;
1103 }
1104
1105 return NULL;
1106 }
1107
1108 /*
1109 * vma_merge_copied_range - Attempt to merge a VMA that is being copied by
1110 * mremap()
1111 *
1112 * @vmg: Describes the VMA we are adding, in the copied-to range @vmg->start to
1113 * @vmg->end (exclusive), which we try to merge with any adjacent VMAs if
1114 * possible.
1115 *
1116 * vmg->prev, next, start, end, pgoff should all be relative to the COPIED TO
1117 * range, i.e. the target range for the VMA.
1118 *
1119 * Returns: In instances where no merge was possible, NULL. Otherwise, a pointer
1120 * to the VMA we expanded.
1121 *
1122 * ASSUMPTIONS: Same as vma_merge_new_range(), except vmg->middle must contain
1123 * the copied-from VMA.
1124 */
vma_merge_copied_range(struct vma_merge_struct * vmg)1125 static struct vm_area_struct *vma_merge_copied_range(struct vma_merge_struct *vmg)
1126 {
1127 /* We must have a copied-from VMA. */
1128 VM_WARN_ON_VMG(!vmg->middle, vmg);
1129
1130 vmg->copied_from = vmg->middle;
1131 vmg->middle = NULL;
1132 return vma_merge_new_range(vmg);
1133 }
1134
1135 /*
1136 * vma_expand - Expand an existing VMA
1137 *
1138 * @vmg: Describes a VMA expansion operation.
1139 *
1140 * Expand @vma to vmg->start and vmg->end. Can expand off the start and end.
1141 * Will expand over vmg->next if it's different from vmg->target and vmg->end ==
1142 * vmg->next->vm_end. Checking if the vmg->target can expand and merge with
1143 * vmg->next needs to be handled by the caller.
1144 *
1145 * Returns: 0 on success.
1146 *
1147 * ASSUMPTIONS:
1148 * - The caller must hold a WRITE lock on the mm_struct->mmap_lock.
1149 * - The caller must have set @vmg->target and @vmg->next.
1150 */
vma_expand(struct vma_merge_struct * vmg)1151 int vma_expand(struct vma_merge_struct *vmg)
1152 {
1153 struct vm_area_struct *anon_dup = NULL;
1154 struct vm_area_struct *target = vmg->target;
1155 struct vm_area_struct *next = vmg->next;
1156 bool remove_next = false;
1157 vm_flags_t sticky_flags;
1158 int ret = 0;
1159
1160 mmap_assert_write_locked(vmg->mm);
1161 vma_start_write(target);
1162
1163 if (next && target != next && vmg->end == next->vm_end)
1164 remove_next = true;
1165
1166 /* We must have a target. */
1167 VM_WARN_ON_VMG(!target, vmg);
1168 /* This should have already been checked by this point. */
1169 VM_WARN_ON_VMG(remove_next && !can_merge_remove_vma(next), vmg);
1170 /* Not merging but overwriting any part of next is not handled. */
1171 VM_WARN_ON_VMG(next && !remove_next &&
1172 next != target && vmg->end > next->vm_start, vmg);
1173 /* Only handles expanding. */
1174 VM_WARN_ON_VMG(target->vm_start < vmg->start ||
1175 target->vm_end > vmg->end, vmg);
1176
1177 sticky_flags = vmg->vm_flags & VM_STICKY;
1178 sticky_flags |= target->vm_flags & VM_STICKY;
1179 if (remove_next)
1180 sticky_flags |= next->vm_flags & VM_STICKY;
1181
1182 /*
1183 * If we are removing the next VMA or copying from a VMA
1184 * (e.g. mremap()'ing), we must propagate anon_vma state.
1185 *
1186 * Note that, by convention, callers ignore OOM for this case, so
1187 * we don't need to account for vmg->give_up_on_mm here.
1188 */
1189 if (remove_next)
1190 ret = dup_anon_vma(target, next, &anon_dup);
1191 if (!ret && vmg->copied_from)
1192 ret = dup_anon_vma(target, vmg->copied_from, &anon_dup);
1193 if (ret)
1194 return ret;
1195
1196 if (remove_next) {
1197 vma_start_write(next);
1198 vmg->__remove_next = true;
1199 }
1200 if (commit_merge(vmg))
1201 goto nomem;
1202
1203 vm_flags_set(target, sticky_flags);
1204 return 0;
1205
1206 nomem:
1207 if (anon_dup)
1208 unlink_anon_vmas(anon_dup);
1209 /*
1210 * If the user requests that we just give upon OOM, we are safe to do so
1211 * here, as commit merge provides this contract to us. Nothing has been
1212 * changed - no harm no foul, just don't report it.
1213 */
1214 if (!vmg->give_up_on_oom)
1215 vmg->state = VMA_MERGE_ERROR_NOMEM;
1216 return -ENOMEM;
1217 }
1218
1219 /*
1220 * vma_shrink() - Reduce an existing VMAs memory area
1221 * @vmi: The vma iterator
1222 * @vma: The VMA to modify
1223 * @start: The new start
1224 * @end: The new end
1225 *
1226 * Returns: 0 on success, -ENOMEM otherwise
1227 */
vma_shrink(struct vma_iterator * vmi,struct vm_area_struct * vma,unsigned long start,unsigned long end,pgoff_t pgoff)1228 int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
1229 unsigned long start, unsigned long end, pgoff_t pgoff)
1230 {
1231 struct vma_prepare vp;
1232
1233 WARN_ON((vma->vm_start != start) && (vma->vm_end != end));
1234
1235 if (vma->vm_start < start)
1236 vma_iter_config(vmi, vma->vm_start, start);
1237 else
1238 vma_iter_config(vmi, end, vma->vm_end);
1239
1240 if (vma_iter_prealloc(vmi, NULL))
1241 return -ENOMEM;
1242
1243 vma_start_write(vma);
1244
1245 init_vma_prep(&vp, vma);
1246 vma_prepare(&vp);
1247 vma_adjust_trans_huge(vma, start, end, NULL);
1248
1249 vma_iter_clear(vmi);
1250 vma_set_range(vma, start, end, pgoff);
1251 vma_complete(&vp, vmi, vma->vm_mm);
1252 validate_mm(vma->vm_mm);
1253 return 0;
1254 }
1255
vms_clear_ptes(struct vma_munmap_struct * vms,struct ma_state * mas_detach,bool mm_wr_locked)1256 static inline void vms_clear_ptes(struct vma_munmap_struct *vms,
1257 struct ma_state *mas_detach, bool mm_wr_locked)
1258 {
1259 struct unmap_desc unmap = {
1260 .mas = mas_detach,
1261 .first = vms->vma,
1262 /* start and end may be different if there is no prev or next vma. */
1263 .pg_start = vms->unmap_start,
1264 .pg_end = vms->unmap_end,
1265 .vma_start = vms->start,
1266 .vma_end = vms->end,
1267 /*
1268 * The tree limits and reset differ from the normal case since it's a
1269 * side-tree
1270 */
1271 .tree_reset = 1,
1272 .tree_end = vms->vma_count,
1273 /*
1274 * We can free page tables without write-locking mmap_lock because VMAs
1275 * were isolated before we downgraded mmap_lock.
1276 */
1277 .mm_wr_locked = mm_wr_locked,
1278 };
1279
1280 if (!vms->clear_ptes) /* Nothing to do */
1281 return;
1282
1283 mas_set(mas_detach, 1);
1284 unmap_region(&unmap);
1285 vms->clear_ptes = false;
1286 }
1287
vms_clean_up_area(struct vma_munmap_struct * vms,struct ma_state * mas_detach)1288 static void vms_clean_up_area(struct vma_munmap_struct *vms,
1289 struct ma_state *mas_detach)
1290 {
1291 struct vm_area_struct *vma;
1292
1293 if (!vms->nr_pages)
1294 return;
1295
1296 vms_clear_ptes(vms, mas_detach, true);
1297 mas_set(mas_detach, 0);
1298 mas_for_each(mas_detach, vma, ULONG_MAX)
1299 vma_close(vma);
1300 }
1301
1302 /*
1303 * vms_complete_munmap_vmas() - Finish the munmap() operation
1304 * @vms: The vma munmap struct
1305 * @mas_detach: The maple state of the detached vmas
1306 *
1307 * This updates the mm_struct, unmaps the region, frees the resources
1308 * used for the munmap() and may downgrade the lock - if requested. Everything
1309 * needed to be done once the vma maple tree is updated.
1310 */
vms_complete_munmap_vmas(struct vma_munmap_struct * vms,struct ma_state * mas_detach)1311 static void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
1312 struct ma_state *mas_detach)
1313 {
1314 struct vm_area_struct *vma;
1315 struct mm_struct *mm;
1316
1317 mm = current->mm;
1318 mm->map_count -= vms->vma_count;
1319 mm->locked_vm -= vms->locked_vm;
1320 if (vms->unlock)
1321 mmap_write_downgrade(mm);
1322
1323 if (!vms->nr_pages)
1324 return;
1325
1326 vms_clear_ptes(vms, mas_detach, !vms->unlock);
1327 /* Update high watermark before we lower total_vm */
1328 update_hiwater_vm(mm);
1329 /* Stat accounting */
1330 WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm) - vms->nr_pages);
1331 /* Paranoid bookkeeping */
1332 VM_WARN_ON(vms->exec_vm > mm->exec_vm);
1333 VM_WARN_ON(vms->stack_vm > mm->stack_vm);
1334 VM_WARN_ON(vms->data_vm > mm->data_vm);
1335 mm->exec_vm -= vms->exec_vm;
1336 mm->stack_vm -= vms->stack_vm;
1337 mm->data_vm -= vms->data_vm;
1338
1339 /* Remove and clean up vmas */
1340 mas_set(mas_detach, 0);
1341 mas_for_each(mas_detach, vma, ULONG_MAX)
1342 remove_vma(vma);
1343
1344 vm_unacct_memory(vms->nr_accounted);
1345 validate_mm(mm);
1346 if (vms->unlock)
1347 mmap_read_unlock(mm);
1348
1349 __mt_destroy(mas_detach->tree);
1350 }
1351
1352 /*
1353 * reattach_vmas() - Undo any munmap work and free resources
1354 * @mas_detach: The maple state with the detached maple tree
1355 *
1356 * Reattach any detached vmas and free up the maple tree used to track the vmas.
1357 */
reattach_vmas(struct ma_state * mas_detach)1358 static void reattach_vmas(struct ma_state *mas_detach)
1359 {
1360 struct vm_area_struct *vma;
1361
1362 mas_set(mas_detach, 0);
1363 mas_for_each(mas_detach, vma, ULONG_MAX)
1364 vma_mark_attached(vma);
1365
1366 __mt_destroy(mas_detach->tree);
1367 }
1368
1369 /*
1370 * vms_gather_munmap_vmas() - Put all VMAs within a range into a maple tree
1371 * for removal at a later date. Handles splitting first and last if necessary
1372 * and marking the vmas as isolated.
1373 *
1374 * @vms: The vma munmap struct
1375 * @mas_detach: The maple state tracking the detached tree
1376 *
1377 * Return: 0 on success, error otherwise
1378 */
vms_gather_munmap_vmas(struct vma_munmap_struct * vms,struct ma_state * mas_detach)1379 static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
1380 struct ma_state *mas_detach)
1381 {
1382 struct vm_area_struct *next = NULL;
1383 int error;
1384
1385 /*
1386 * If we need to split any vma, do it now to save pain later.
1387 * Does it split the first one?
1388 */
1389 if (vms->start > vms->vma->vm_start) {
1390
1391 /*
1392 * Make sure that map_count on return from munmap() will
1393 * not exceed its limit; but let map_count go just above
1394 * its limit temporarily, to help free resources as expected.
1395 */
1396 if (vms->end < vms->vma->vm_end &&
1397 vms->vma->vm_mm->map_count >= sysctl_max_map_count) {
1398 error = -ENOMEM;
1399 goto map_count_exceeded;
1400 }
1401
1402 /* Don't bother splitting the VMA if we can't unmap it anyway */
1403 if (vma_is_sealed(vms->vma)) {
1404 error = -EPERM;
1405 goto start_split_failed;
1406 }
1407
1408 error = __split_vma(vms->vmi, vms->vma, vms->start, 1);
1409 if (error)
1410 goto start_split_failed;
1411 }
1412 vms->prev = vma_prev(vms->vmi);
1413 if (vms->prev)
1414 vms->unmap_start = vms->prev->vm_end;
1415
1416 /*
1417 * Detach a range of VMAs from the mm. Using next as a temp variable as
1418 * it is always overwritten.
1419 */
1420 for_each_vma_range(*(vms->vmi), next, vms->end) {
1421 long nrpages;
1422
1423 if (vma_is_sealed(next)) {
1424 error = -EPERM;
1425 goto modify_vma_failed;
1426 }
1427 /* Does it split the end? */
1428 if (next->vm_end > vms->end) {
1429 error = __split_vma(vms->vmi, next, vms->end, 0);
1430 if (error)
1431 goto end_split_failed;
1432 }
1433 vma_start_write(next);
1434 mas_set(mas_detach, vms->vma_count++);
1435 error = mas_store_gfp(mas_detach, next, GFP_KERNEL);
1436 if (error)
1437 goto munmap_gather_failed;
1438
1439 vma_mark_detached(next);
1440 nrpages = vma_pages(next);
1441
1442 vms->nr_pages += nrpages;
1443 if (next->vm_flags & VM_LOCKED)
1444 vms->locked_vm += nrpages;
1445
1446 if (next->vm_flags & VM_ACCOUNT)
1447 vms->nr_accounted += nrpages;
1448
1449 if (is_exec_mapping(next->vm_flags))
1450 vms->exec_vm += nrpages;
1451 else if (is_stack_mapping(next->vm_flags))
1452 vms->stack_vm += nrpages;
1453 else if (is_data_mapping(next->vm_flags))
1454 vms->data_vm += nrpages;
1455
1456 if (vms->uf) {
1457 /*
1458 * If userfaultfd_unmap_prep returns an error the vmas
1459 * will remain split, but userland will get a
1460 * highly unexpected error anyway. This is no
1461 * different than the case where the first of the two
1462 * __split_vma fails, but we don't undo the first
1463 * split, despite we could. This is unlikely enough
1464 * failure that it's not worth optimizing it for.
1465 */
1466 error = userfaultfd_unmap_prep(next, vms->start,
1467 vms->end, vms->uf);
1468 if (error)
1469 goto userfaultfd_error;
1470 }
1471 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
1472 BUG_ON(next->vm_start < vms->start);
1473 BUG_ON(next->vm_start > vms->end);
1474 #endif
1475 }
1476
1477 vms->next = vma_next(vms->vmi);
1478 if (vms->next)
1479 vms->unmap_end = vms->next->vm_start;
1480
1481 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
1482 /* Make sure no VMAs are about to be lost. */
1483 {
1484 MA_STATE(test, mas_detach->tree, 0, 0);
1485 struct vm_area_struct *vma_mas, *vma_test;
1486 int test_count = 0;
1487
1488 vma_iter_set(vms->vmi, vms->start);
1489 rcu_read_lock();
1490 vma_test = mas_find(&test, vms->vma_count - 1);
1491 for_each_vma_range(*(vms->vmi), vma_mas, vms->end) {
1492 BUG_ON(vma_mas != vma_test);
1493 test_count++;
1494 vma_test = mas_next(&test, vms->vma_count - 1);
1495 }
1496 rcu_read_unlock();
1497 BUG_ON(vms->vma_count != test_count);
1498 }
1499 #endif
1500
1501 while (vma_iter_addr(vms->vmi) > vms->start)
1502 vma_iter_prev_range(vms->vmi);
1503
1504 vms->clear_ptes = true;
1505 return 0;
1506
1507 userfaultfd_error:
1508 munmap_gather_failed:
1509 end_split_failed:
1510 modify_vma_failed:
1511 reattach_vmas(mas_detach);
1512 start_split_failed:
1513 map_count_exceeded:
1514 return error;
1515 }
1516
1517 /*
1518 * init_vma_munmap() - Initializer wrapper for vma_munmap_struct
1519 * @vms: The vma munmap struct
1520 * @vmi: The vma iterator
1521 * @vma: The first vm_area_struct to munmap
1522 * @start: The aligned start address to munmap
1523 * @end: The aligned end address to munmap
1524 * @uf: The userfaultfd list_head
1525 * @unlock: Unlock after the operation. Only unlocked on success
1526 */
init_vma_munmap(struct vma_munmap_struct * vms,struct vma_iterator * vmi,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct list_head * uf,bool unlock)1527 static void init_vma_munmap(struct vma_munmap_struct *vms,
1528 struct vma_iterator *vmi, struct vm_area_struct *vma,
1529 unsigned long start, unsigned long end, struct list_head *uf,
1530 bool unlock)
1531 {
1532 vms->vmi = vmi;
1533 vms->vma = vma;
1534 if (vma) {
1535 vms->start = start;
1536 vms->end = end;
1537 } else {
1538 vms->start = vms->end = 0;
1539 }
1540 vms->unlock = unlock;
1541 vms->uf = uf;
1542 vms->vma_count = 0;
1543 vms->nr_pages = vms->locked_vm = vms->nr_accounted = 0;
1544 vms->exec_vm = vms->stack_vm = vms->data_vm = 0;
1545 vms->unmap_start = FIRST_USER_ADDRESS;
1546 vms->unmap_end = USER_PGTABLES_CEILING;
1547 vms->clear_ptes = false;
1548 }
1549
1550 /*
1551 * do_vmi_align_munmap() - munmap the aligned region from @start to @end.
1552 * @vmi: The vma iterator
1553 * @vma: The starting vm_area_struct
1554 * @mm: The mm_struct
1555 * @start: The aligned start address to munmap.
1556 * @end: The aligned end address to munmap.
1557 * @uf: The userfaultfd list_head
1558 * @unlock: Set to true to drop the mmap_lock. unlocking only happens on
1559 * success.
1560 *
1561 * Return: 0 on success and drops the lock if so directed, error and leaves the
1562 * lock held otherwise.
1563 */
do_vmi_align_munmap(struct vma_iterator * vmi,struct vm_area_struct * vma,struct mm_struct * mm,unsigned long start,unsigned long end,struct list_head * uf,bool unlock)1564 int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
1565 struct mm_struct *mm, unsigned long start, unsigned long end,
1566 struct list_head *uf, bool unlock)
1567 {
1568 struct maple_tree mt_detach;
1569 MA_STATE(mas_detach, &mt_detach, 0, 0);
1570 mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
1571 mt_on_stack(mt_detach);
1572 struct vma_munmap_struct vms;
1573 int error;
1574
1575 init_vma_munmap(&vms, vmi, vma, start, end, uf, unlock);
1576 error = vms_gather_munmap_vmas(&vms, &mas_detach);
1577 if (error)
1578 goto gather_failed;
1579
1580 error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL);
1581 if (error)
1582 goto clear_tree_failed;
1583
1584 /* Point of no return */
1585 vms_complete_munmap_vmas(&vms, &mas_detach);
1586 return 0;
1587
1588 clear_tree_failed:
1589 reattach_vmas(&mas_detach);
1590 gather_failed:
1591 validate_mm(mm);
1592 return error;
1593 }
1594
1595 /*
1596 * do_vmi_munmap() - munmap a given range.
1597 * @vmi: The vma iterator
1598 * @mm: The mm_struct
1599 * @start: The start address to munmap
1600 * @len: The length of the range to munmap
1601 * @uf: The userfaultfd list_head
1602 * @unlock: set to true if the user wants to drop the mmap_lock on success
1603 *
1604 * This function takes a @mas that is either pointing to the previous VMA or set
1605 * to MA_START and sets it up to remove the mapping(s). The @len will be
1606 * aligned.
1607 *
1608 * Return: 0 on success and drops the lock if so directed, error and leaves the
1609 * lock held otherwise.
1610 */
do_vmi_munmap(struct vma_iterator * vmi,struct mm_struct * mm,unsigned long start,size_t len,struct list_head * uf,bool unlock)1611 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
1612 unsigned long start, size_t len, struct list_head *uf,
1613 bool unlock)
1614 {
1615 unsigned long end;
1616 struct vm_area_struct *vma;
1617
1618 if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start)
1619 return -EINVAL;
1620
1621 end = start + PAGE_ALIGN(len);
1622 if (end == start)
1623 return -EINVAL;
1624
1625 /* Find the first overlapping VMA */
1626 vma = vma_find(vmi, end);
1627 if (!vma) {
1628 if (unlock)
1629 mmap_write_unlock(mm);
1630 return 0;
1631 }
1632
1633 return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
1634 }
1635
1636 /*
1637 * We are about to modify one or multiple of a VMA's flags, policy, userfaultfd
1638 * context and anonymous VMA name within the range [start, end).
1639 *
1640 * As a result, we might be able to merge the newly modified VMA range with an
1641 * adjacent VMA with identical properties.
1642 *
1643 * If no merge is possible and the range does not span the entirety of the VMA,
1644 * we then need to split the VMA to accommodate the change.
1645 *
1646 * The function returns either the merged VMA, the original VMA if a split was
1647 * required instead, or an error if the split failed.
1648 */
vma_modify(struct vma_merge_struct * vmg)1649 static struct vm_area_struct *vma_modify(struct vma_merge_struct *vmg)
1650 {
1651 struct vm_area_struct *vma = vmg->middle;
1652 unsigned long start = vmg->start;
1653 unsigned long end = vmg->end;
1654 struct vm_area_struct *merged;
1655
1656 /* First, try to merge. */
1657 merged = vma_merge_existing_range(vmg);
1658 if (merged)
1659 return merged;
1660 if (vmg_nomem(vmg))
1661 return ERR_PTR(-ENOMEM);
1662
1663 /*
1664 * Split can fail for reasons other than OOM, so if the user requests
1665 * this it's probably a mistake.
1666 */
1667 VM_WARN_ON(vmg->give_up_on_oom &&
1668 (vma->vm_start != start || vma->vm_end != end));
1669
1670 /* Split any preceding portion of the VMA. */
1671 if (vma->vm_start < start) {
1672 int err = split_vma(vmg->vmi, vma, start, 1);
1673
1674 if (err)
1675 return ERR_PTR(err);
1676 }
1677
1678 /* Split any trailing portion of the VMA. */
1679 if (vma->vm_end > end) {
1680 int err = split_vma(vmg->vmi, vma, end, 0);
1681
1682 if (err)
1683 return ERR_PTR(err);
1684 }
1685
1686 return vma;
1687 }
1688
vma_modify_flags(struct vma_iterator * vmi,struct vm_area_struct * prev,struct vm_area_struct * vma,unsigned long start,unsigned long end,vm_flags_t * vm_flags_ptr)1689 struct vm_area_struct *vma_modify_flags(struct vma_iterator *vmi,
1690 struct vm_area_struct *prev, struct vm_area_struct *vma,
1691 unsigned long start, unsigned long end,
1692 vm_flags_t *vm_flags_ptr)
1693 {
1694 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
1695 const vm_flags_t vm_flags = *vm_flags_ptr;
1696 struct vm_area_struct *ret;
1697
1698 vmg.vm_flags = vm_flags;
1699
1700 ret = vma_modify(&vmg);
1701 if (IS_ERR(ret))
1702 return ret;
1703
1704 /*
1705 * For a merge to succeed, the flags must match those
1706 * requested. However, sticky flags may have been retained, so propagate
1707 * them to the caller.
1708 */
1709 if (vmg.state == VMA_MERGE_SUCCESS)
1710 *vm_flags_ptr = ret->vm_flags;
1711 return ret;
1712 }
1713
vma_modify_name(struct vma_iterator * vmi,struct vm_area_struct * prev,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct anon_vma_name * new_name)1714 struct vm_area_struct *vma_modify_name(struct vma_iterator *vmi,
1715 struct vm_area_struct *prev, struct vm_area_struct *vma,
1716 unsigned long start, unsigned long end,
1717 struct anon_vma_name *new_name)
1718 {
1719 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
1720
1721 vmg.anon_name = new_name;
1722
1723 return vma_modify(&vmg);
1724 }
1725
vma_modify_policy(struct vma_iterator * vmi,struct vm_area_struct * prev,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct mempolicy * new_pol)1726 struct vm_area_struct *vma_modify_policy(struct vma_iterator *vmi,
1727 struct vm_area_struct *prev, struct vm_area_struct *vma,
1728 unsigned long start, unsigned long end,
1729 struct mempolicy *new_pol)
1730 {
1731 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
1732
1733 vmg.policy = new_pol;
1734
1735 return vma_modify(&vmg);
1736 }
1737
vma_modify_flags_uffd(struct vma_iterator * vmi,struct vm_area_struct * prev,struct vm_area_struct * vma,unsigned long start,unsigned long end,vm_flags_t vm_flags,struct vm_userfaultfd_ctx new_ctx,bool give_up_on_oom)1738 struct vm_area_struct *vma_modify_flags_uffd(struct vma_iterator *vmi,
1739 struct vm_area_struct *prev, struct vm_area_struct *vma,
1740 unsigned long start, unsigned long end, vm_flags_t vm_flags,
1741 struct vm_userfaultfd_ctx new_ctx, bool give_up_on_oom)
1742 {
1743 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
1744
1745 vmg.vm_flags = vm_flags;
1746 vmg.uffd_ctx = new_ctx;
1747 if (give_up_on_oom)
1748 vmg.give_up_on_oom = true;
1749
1750 return vma_modify(&vmg);
1751 }
1752
1753 /*
1754 * Expand vma by delta bytes, potentially merging with an immediately adjacent
1755 * VMA with identical properties.
1756 */
vma_merge_extend(struct vma_iterator * vmi,struct vm_area_struct * vma,unsigned long delta)1757 struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
1758 struct vm_area_struct *vma,
1759 unsigned long delta)
1760 {
1761 VMG_VMA_STATE(vmg, vmi, vma, vma, vma->vm_end, vma->vm_end + delta);
1762
1763 vmg.next = vma_iter_next_rewind(vmi, NULL);
1764 vmg.middle = NULL; /* We use the VMA to populate VMG fields only. */
1765
1766 return vma_merge_new_range(&vmg);
1767 }
1768
unlink_file_vma_batch_init(struct unlink_vma_file_batch * vb)1769 void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb)
1770 {
1771 vb->count = 0;
1772 }
1773
unlink_file_vma_batch_process(struct unlink_vma_file_batch * vb)1774 static void unlink_file_vma_batch_process(struct unlink_vma_file_batch *vb)
1775 {
1776 struct address_space *mapping;
1777 int i;
1778
1779 mapping = vb->vmas[0]->vm_file->f_mapping;
1780 i_mmap_lock_write(mapping);
1781 for (i = 0; i < vb->count; i++) {
1782 VM_WARN_ON_ONCE(vb->vmas[i]->vm_file->f_mapping != mapping);
1783 __remove_shared_vm_struct(vb->vmas[i], mapping);
1784 }
1785 i_mmap_unlock_write(mapping);
1786
1787 unlink_file_vma_batch_init(vb);
1788 }
1789
unlink_file_vma_batch_add(struct unlink_vma_file_batch * vb,struct vm_area_struct * vma)1790 void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb,
1791 struct vm_area_struct *vma)
1792 {
1793 if (vma->vm_file == NULL)
1794 return;
1795
1796 if ((vb->count > 0 && vb->vmas[0]->vm_file != vma->vm_file) ||
1797 vb->count == ARRAY_SIZE(vb->vmas))
1798 unlink_file_vma_batch_process(vb);
1799
1800 vb->vmas[vb->count] = vma;
1801 vb->count++;
1802 }
1803
unlink_file_vma_batch_final(struct unlink_vma_file_batch * vb)1804 void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb)
1805 {
1806 if (vb->count > 0)
1807 unlink_file_vma_batch_process(vb);
1808 }
1809
vma_link_file(struct vm_area_struct * vma,bool hold_rmap_lock)1810 static void vma_link_file(struct vm_area_struct *vma, bool hold_rmap_lock)
1811 {
1812 struct file *file = vma->vm_file;
1813 struct address_space *mapping;
1814
1815 if (file) {
1816 mapping = file->f_mapping;
1817 i_mmap_lock_write(mapping);
1818 __vma_link_file(vma, mapping);
1819 if (!hold_rmap_lock)
1820 i_mmap_unlock_write(mapping);
1821 }
1822 }
1823
vma_link(struct mm_struct * mm,struct vm_area_struct * vma)1824 static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
1825 {
1826 VMA_ITERATOR(vmi, mm, 0);
1827
1828 vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
1829 if (vma_iter_prealloc(&vmi, vma))
1830 return -ENOMEM;
1831
1832 vma_start_write(vma);
1833 vma_iter_store_new(&vmi, vma);
1834 vma_link_file(vma, /* hold_rmap_lock= */false);
1835 mm->map_count++;
1836 validate_mm(mm);
1837 return 0;
1838 }
1839
1840 /*
1841 * Copy the vma structure to a new location in the same mm,
1842 * prior to moving page table entries, to effect an mremap move.
1843 */
copy_vma(struct vm_area_struct ** vmap,unsigned long addr,unsigned long len,pgoff_t pgoff,bool * need_rmap_locks)1844 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
1845 unsigned long addr, unsigned long len, pgoff_t pgoff,
1846 bool *need_rmap_locks)
1847 {
1848 struct vm_area_struct *vma = *vmap;
1849 unsigned long vma_start = vma->vm_start;
1850 struct mm_struct *mm = vma->vm_mm;
1851 struct vm_area_struct *new_vma;
1852 bool faulted_in_anon_vma = true;
1853 VMA_ITERATOR(vmi, mm, addr);
1854 VMG_VMA_STATE(vmg, &vmi, NULL, vma, addr, addr + len);
1855
1856 /*
1857 * If anonymous vma has not yet been faulted, update new pgoff
1858 * to match new location, to increase its chance of merging.
1859 */
1860 if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) {
1861 pgoff = addr >> PAGE_SHIFT;
1862 faulted_in_anon_vma = false;
1863 }
1864
1865 /*
1866 * If the VMA we are copying might contain a uprobe PTE, ensure
1867 * that we do not establish one upon merge. Otherwise, when mremap()
1868 * moves page tables, it will orphan the newly created PTE.
1869 */
1870 if (vma->vm_file)
1871 vmg.skip_vma_uprobe = true;
1872
1873 new_vma = find_vma_prev(mm, addr, &vmg.prev);
1874 if (new_vma && new_vma->vm_start < addr + len)
1875 return NULL; /* should never get here */
1876
1877 vmg.pgoff = pgoff;
1878 vmg.next = vma_iter_next_rewind(&vmi, NULL);
1879 new_vma = vma_merge_copied_range(&vmg);
1880
1881 if (new_vma) {
1882 /*
1883 * Source vma may have been merged into new_vma
1884 */
1885 if (unlikely(vma_start >= new_vma->vm_start &&
1886 vma_start < new_vma->vm_end)) {
1887 /*
1888 * The only way we can get a vma_merge with
1889 * self during an mremap is if the vma hasn't
1890 * been faulted in yet and we were allowed to
1891 * reset the dst vma->vm_pgoff to the
1892 * destination address of the mremap to allow
1893 * the merge to happen. mremap must change the
1894 * vm_pgoff linearity between src and dst vmas
1895 * (in turn preventing a vma_merge) to be
1896 * safe. It is only safe to keep the vm_pgoff
1897 * linear if there are no pages mapped yet.
1898 */
1899 VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma);
1900 *vmap = vma = new_vma;
1901 }
1902 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
1903 } else {
1904 new_vma = vm_area_dup(vma);
1905 if (!new_vma)
1906 goto out;
1907 vma_set_range(new_vma, addr, addr + len, pgoff);
1908 if (vma_dup_policy(vma, new_vma))
1909 goto out_free_vma;
1910 if (anon_vma_clone(new_vma, vma, VMA_OP_REMAP))
1911 goto out_free_mempol;
1912 if (new_vma->vm_file)
1913 get_file(new_vma->vm_file);
1914 if (new_vma->vm_ops && new_vma->vm_ops->open)
1915 new_vma->vm_ops->open(new_vma);
1916 if (vma_link(mm, new_vma))
1917 goto out_vma_link;
1918 *need_rmap_locks = false;
1919 }
1920 return new_vma;
1921
1922 out_vma_link:
1923 fixup_hugetlb_reservations(new_vma);
1924 vma_close(new_vma);
1925
1926 if (new_vma->vm_file)
1927 fput(new_vma->vm_file);
1928
1929 unlink_anon_vmas(new_vma);
1930 out_free_mempol:
1931 mpol_put(vma_policy(new_vma));
1932 out_free_vma:
1933 vm_area_free(new_vma);
1934 out:
1935 return NULL;
1936 }
1937
1938 /*
1939 * Rough compatibility check to quickly see if it's even worth looking
1940 * at sharing an anon_vma.
1941 *
1942 * They need to have the same vm_file, and the flags can only differ
1943 * in things that mprotect may change.
1944 *
1945 * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
1946 * we can merge the two vma's. For example, we refuse to merge a vma if
1947 * there is a vm_ops->close() function, because that indicates that the
1948 * driver is doing some kind of reference counting. But that doesn't
1949 * really matter for the anon_vma sharing case.
1950 */
anon_vma_compatible(struct vm_area_struct * a,struct vm_area_struct * b)1951 static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
1952 {
1953 return a->vm_end == b->vm_start &&
1954 mpol_equal(vma_policy(a), vma_policy(b)) &&
1955 a->vm_file == b->vm_file &&
1956 !((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_IGNORE_MERGE)) &&
1957 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
1958 }
1959
1960 /*
1961 * Do some basic sanity checking to see if we can re-use the anon_vma
1962 * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
1963 * the same as 'old', the other will be the new one that is trying
1964 * to share the anon_vma.
1965 *
1966 * NOTE! This runs with mmap_lock held for reading, so it is possible that
1967 * the anon_vma of 'old' is concurrently in the process of being set up
1968 * by another page fault trying to merge _that_. But that's ok: if it
1969 * is being set up, that automatically means that it will be a singleton
1970 * acceptable for merging, so we can do all of this optimistically. But
1971 * we do that READ_ONCE() to make sure that we never re-load the pointer.
1972 *
1973 * IOW: that the "list_is_singular()" test on the anon_vma_chain only
1974 * matters for the 'stable anon_vma' case (ie the thing we want to avoid
1975 * is to return an anon_vma that is "complex" due to having gone through
1976 * a fork).
1977 *
1978 * We also make sure that the two vma's are compatible (adjacent,
1979 * and with the same memory policies). That's all stable, even with just
1980 * a read lock on the mmap_lock.
1981 */
reusable_anon_vma(struct vm_area_struct * old,struct vm_area_struct * a,struct vm_area_struct * b)1982 static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old,
1983 struct vm_area_struct *a,
1984 struct vm_area_struct *b)
1985 {
1986 if (anon_vma_compatible(a, b)) {
1987 struct anon_vma *anon_vma = READ_ONCE(old->anon_vma);
1988
1989 if (anon_vma && list_is_singular(&old->anon_vma_chain))
1990 return anon_vma;
1991 }
1992 return NULL;
1993 }
1994
1995 /*
1996 * find_mergeable_anon_vma is used by anon_vma_prepare, to check
1997 * neighbouring vmas for a suitable anon_vma, before it goes off
1998 * to allocate a new anon_vma. It checks because a repetitive
1999 * sequence of mprotects and faults may otherwise lead to distinct
2000 * anon_vmas being allocated, preventing vma merge in subsequent
2001 * mprotect.
2002 */
find_mergeable_anon_vma(struct vm_area_struct * vma)2003 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
2004 {
2005 struct anon_vma *anon_vma = NULL;
2006 struct vm_area_struct *prev, *next;
2007 VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_end);
2008
2009 /* Try next first. */
2010 next = vma_iter_load(&vmi);
2011 if (next) {
2012 anon_vma = reusable_anon_vma(next, vma, next);
2013 if (anon_vma)
2014 return anon_vma;
2015 }
2016
2017 prev = vma_prev(&vmi);
2018 VM_BUG_ON_VMA(prev != vma, vma);
2019 prev = vma_prev(&vmi);
2020 /* Try prev next. */
2021 if (prev)
2022 anon_vma = reusable_anon_vma(prev, prev, vma);
2023
2024 /*
2025 * We might reach here with anon_vma == NULL if we can't find
2026 * any reusable anon_vma.
2027 * There's no absolute need to look only at touching neighbours:
2028 * we could search further afield for "compatible" anon_vmas.
2029 * But it would probably just be a waste of time searching,
2030 * or lead to too many vmas hanging off the same anon_vma.
2031 * We're trying to allow mprotect remerging later on,
2032 * not trying to minimize memory used for anon_vmas.
2033 */
2034 return anon_vma;
2035 }
2036
vm_ops_needs_writenotify(const struct vm_operations_struct * vm_ops)2037 static bool vm_ops_needs_writenotify(const struct vm_operations_struct *vm_ops)
2038 {
2039 return vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite);
2040 }
2041
vma_is_shared_writable(struct vm_area_struct * vma)2042 static bool vma_is_shared_writable(struct vm_area_struct *vma)
2043 {
2044 return (vma->vm_flags & (VM_WRITE | VM_SHARED)) ==
2045 (VM_WRITE | VM_SHARED);
2046 }
2047
vma_fs_can_writeback(struct vm_area_struct * vma)2048 static bool vma_fs_can_writeback(struct vm_area_struct *vma)
2049 {
2050 /* No managed pages to writeback. */
2051 if (vma->vm_flags & VM_PFNMAP)
2052 return false;
2053
2054 return vma->vm_file && vma->vm_file->f_mapping &&
2055 mapping_can_writeback(vma->vm_file->f_mapping);
2056 }
2057
2058 /*
2059 * Does this VMA require the underlying folios to have their dirty state
2060 * tracked?
2061 */
vma_needs_dirty_tracking(struct vm_area_struct * vma)2062 bool vma_needs_dirty_tracking(struct vm_area_struct *vma)
2063 {
2064 /* Only shared, writable VMAs require dirty tracking. */
2065 if (!vma_is_shared_writable(vma))
2066 return false;
2067
2068 /* Does the filesystem need to be notified? */
2069 if (vm_ops_needs_writenotify(vma->vm_ops))
2070 return true;
2071
2072 /*
2073 * Even if the filesystem doesn't indicate a need for writenotify, if it
2074 * can writeback, dirty tracking is still required.
2075 */
2076 return vma_fs_can_writeback(vma);
2077 }
2078
2079 /*
2080 * Some shared mappings will want the pages marked read-only
2081 * to track write events. If so, we'll downgrade vm_page_prot
2082 * to the private version (using protection_map[] without the
2083 * VM_SHARED bit).
2084 */
vma_wants_writenotify(struct vm_area_struct * vma,pgprot_t vm_page_prot)2085 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
2086 {
2087 /* If it was private or non-writable, the write bit is already clear */
2088 if (!vma_is_shared_writable(vma))
2089 return false;
2090
2091 /* The backer wishes to know when pages are first written to? */
2092 if (vm_ops_needs_writenotify(vma->vm_ops))
2093 return true;
2094
2095 /* The open routine did something to the protections that pgprot_modify
2096 * won't preserve? */
2097 if (pgprot_val(vm_page_prot) !=
2098 pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags)))
2099 return false;
2100
2101 /*
2102 * Do we need to track softdirty? hugetlb does not support softdirty
2103 * tracking yet.
2104 */
2105 if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma))
2106 return true;
2107
2108 /* Do we need write faults for uffd-wp tracking? */
2109 if (userfaultfd_wp(vma))
2110 return true;
2111
2112 /* Can the mapping track the dirty pages? */
2113 return vma_fs_can_writeback(vma);
2114 }
2115
2116 static DEFINE_MUTEX(mm_all_locks_mutex);
2117
vm_lock_anon_vma(struct mm_struct * mm,struct anon_vma * anon_vma)2118 static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
2119 {
2120 if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
2121 /*
2122 * The LSB of head.next can't change from under us
2123 * because we hold the mm_all_locks_mutex.
2124 */
2125 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock);
2126 /*
2127 * We can safely modify head.next after taking the
2128 * anon_vma->root->rwsem. If some other vma in this mm shares
2129 * the same anon_vma we won't take it again.
2130 *
2131 * No need of atomic instructions here, head.next
2132 * can't change from under us thanks to the
2133 * anon_vma->root->rwsem.
2134 */
2135 if (__test_and_set_bit(0, (unsigned long *)
2136 &anon_vma->root->rb_root.rb_root.rb_node))
2137 BUG();
2138 }
2139 }
2140
vm_lock_mapping(struct mm_struct * mm,struct address_space * mapping)2141 static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
2142 {
2143 if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
2144 /*
2145 * AS_MM_ALL_LOCKS can't change from under us because
2146 * we hold the mm_all_locks_mutex.
2147 *
2148 * Operations on ->flags have to be atomic because
2149 * even if AS_MM_ALL_LOCKS is stable thanks to the
2150 * mm_all_locks_mutex, there may be other cpus
2151 * changing other bitflags in parallel to us.
2152 */
2153 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
2154 BUG();
2155 down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock);
2156 }
2157 }
2158
2159 /*
2160 * This operation locks against the VM for all pte/vma/mm related
2161 * operations that could ever happen on a certain mm. This includes
2162 * vmtruncate, try_to_unmap, and all page faults.
2163 *
2164 * The caller must take the mmap_lock in write mode before calling
2165 * mm_take_all_locks(). The caller isn't allowed to release the
2166 * mmap_lock until mm_drop_all_locks() returns.
2167 *
2168 * mmap_lock in write mode is required in order to block all operations
2169 * that could modify pagetables and free pages without need of
2170 * altering the vma layout. It's also needed in write mode to avoid new
2171 * anon_vmas to be associated with existing vmas.
2172 *
2173 * A single task can't take more than one mm_take_all_locks() in a row
2174 * or it would deadlock.
2175 *
2176 * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in
2177 * mapping->flags avoid to take the same lock twice, if more than one
2178 * vma in this mm is backed by the same anon_vma or address_space.
2179 *
2180 * We take locks in following order, accordingly to comment at beginning
2181 * of mm/rmap.c:
2182 * - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for
2183 * hugetlb mapping);
2184 * - all vmas marked locked
2185 * - all i_mmap_rwsem locks;
2186 * - all anon_vma->rwseml
2187 *
2188 * We can take all locks within these types randomly because the VM code
2189 * doesn't nest them and we protected from parallel mm_take_all_locks() by
2190 * mm_all_locks_mutex.
2191 *
2192 * mm_take_all_locks() and mm_drop_all_locks are expensive operations
2193 * that may have to take thousand of locks.
2194 *
2195 * mm_take_all_locks() can fail if it's interrupted by signals.
2196 */
mm_take_all_locks(struct mm_struct * mm)2197 int mm_take_all_locks(struct mm_struct *mm)
2198 {
2199 struct vm_area_struct *vma;
2200 struct anon_vma_chain *avc;
2201 VMA_ITERATOR(vmi, mm, 0);
2202
2203 mmap_assert_write_locked(mm);
2204
2205 mutex_lock(&mm_all_locks_mutex);
2206
2207 /*
2208 * vma_start_write() does not have a complement in mm_drop_all_locks()
2209 * because vma_start_write() is always asymmetrical; it marks a VMA as
2210 * being written to until mmap_write_unlock() or mmap_write_downgrade()
2211 * is reached.
2212 */
2213 for_each_vma(vmi, vma) {
2214 if (signal_pending(current))
2215 goto out_unlock;
2216 vma_start_write(vma);
2217 }
2218
2219 vma_iter_init(&vmi, mm, 0);
2220 for_each_vma(vmi, vma) {
2221 if (signal_pending(current))
2222 goto out_unlock;
2223 if (vma->vm_file && vma->vm_file->f_mapping &&
2224 is_vm_hugetlb_page(vma))
2225 vm_lock_mapping(mm, vma->vm_file->f_mapping);
2226 }
2227
2228 vma_iter_init(&vmi, mm, 0);
2229 for_each_vma(vmi, vma) {
2230 if (signal_pending(current))
2231 goto out_unlock;
2232 if (vma->vm_file && vma->vm_file->f_mapping &&
2233 !is_vm_hugetlb_page(vma))
2234 vm_lock_mapping(mm, vma->vm_file->f_mapping);
2235 }
2236
2237 vma_iter_init(&vmi, mm, 0);
2238 for_each_vma(vmi, vma) {
2239 if (signal_pending(current))
2240 goto out_unlock;
2241 if (vma->anon_vma)
2242 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
2243 vm_lock_anon_vma(mm, avc->anon_vma);
2244 }
2245
2246 return 0;
2247
2248 out_unlock:
2249 mm_drop_all_locks(mm);
2250 return -EINTR;
2251 }
2252
vm_unlock_anon_vma(struct anon_vma * anon_vma)2253 static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
2254 {
2255 if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
2256 /*
2257 * The LSB of head.next can't change to 0 from under
2258 * us because we hold the mm_all_locks_mutex.
2259 *
2260 * We must however clear the bitflag before unlocking
2261 * the vma so the users using the anon_vma->rb_root will
2262 * never see our bitflag.
2263 *
2264 * No need of atomic instructions here, head.next
2265 * can't change from under us until we release the
2266 * anon_vma->root->rwsem.
2267 */
2268 if (!__test_and_clear_bit(0, (unsigned long *)
2269 &anon_vma->root->rb_root.rb_root.rb_node))
2270 BUG();
2271 anon_vma_unlock_write(anon_vma);
2272 }
2273 }
2274
vm_unlock_mapping(struct address_space * mapping)2275 static void vm_unlock_mapping(struct address_space *mapping)
2276 {
2277 if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
2278 /*
2279 * AS_MM_ALL_LOCKS can't change to 0 from under us
2280 * because we hold the mm_all_locks_mutex.
2281 */
2282 i_mmap_unlock_write(mapping);
2283 if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
2284 &mapping->flags))
2285 BUG();
2286 }
2287 }
2288
2289 /*
2290 * The mmap_lock cannot be released by the caller until
2291 * mm_drop_all_locks() returns.
2292 */
mm_drop_all_locks(struct mm_struct * mm)2293 void mm_drop_all_locks(struct mm_struct *mm)
2294 {
2295 struct vm_area_struct *vma;
2296 struct anon_vma_chain *avc;
2297 VMA_ITERATOR(vmi, mm, 0);
2298
2299 mmap_assert_write_locked(mm);
2300 BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
2301
2302 for_each_vma(vmi, vma) {
2303 if (vma->anon_vma)
2304 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
2305 vm_unlock_anon_vma(avc->anon_vma);
2306 if (vma->vm_file && vma->vm_file->f_mapping)
2307 vm_unlock_mapping(vma->vm_file->f_mapping);
2308 }
2309
2310 mutex_unlock(&mm_all_locks_mutex);
2311 }
2312
2313 /*
2314 * We account for memory if it's a private writeable mapping,
2315 * not hugepages and VM_NORESERVE wasn't set.
2316 */
accountable_mapping(struct file * file,vm_flags_t vm_flags)2317 static bool accountable_mapping(struct file *file, vm_flags_t vm_flags)
2318 {
2319 /*
2320 * hugetlb has its own accounting separate from the core VM
2321 * VM_HUGETLB may not be set yet so we cannot check for that flag.
2322 */
2323 if (file && is_file_hugepages(file))
2324 return false;
2325
2326 return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
2327 }
2328
2329 /*
2330 * vms_abort_munmap_vmas() - Undo as much as possible from an aborted munmap()
2331 * operation.
2332 * @vms: The vma unmap structure
2333 * @mas_detach: The maple state with the detached maple tree
2334 *
2335 * Reattach any detached vmas, free up the maple tree used to track the vmas.
2336 * If that's not possible because the ptes are cleared (and vm_ops->closed() may
2337 * have been called), then a NULL is written over the vmas and the vmas are
2338 * removed (munmap() completed).
2339 */
vms_abort_munmap_vmas(struct vma_munmap_struct * vms,struct ma_state * mas_detach)2340 static void vms_abort_munmap_vmas(struct vma_munmap_struct *vms,
2341 struct ma_state *mas_detach)
2342 {
2343 struct ma_state *mas = &vms->vmi->mas;
2344
2345 if (!vms->nr_pages)
2346 return;
2347
2348 if (vms->clear_ptes)
2349 return reattach_vmas(mas_detach);
2350
2351 /*
2352 * Aborting cannot just call the vm_ops open() because they are often
2353 * not symmetrical and state data has been lost. Resort to the old
2354 * failure method of leaving a gap where the MAP_FIXED mapping failed.
2355 */
2356 mas_set_range(mas, vms->start, vms->end - 1);
2357 mas_store_gfp(mas, NULL, GFP_KERNEL|__GFP_NOFAIL);
2358 /* Clean up the insertion of the unfortunate gap */
2359 vms_complete_munmap_vmas(vms, mas_detach);
2360 }
2361
update_ksm_flags(struct mmap_state * map)2362 static void update_ksm_flags(struct mmap_state *map)
2363 {
2364 map->vm_flags = ksm_vma_flags(map->mm, map->file, map->vm_flags);
2365 }
2366
set_desc_from_map(struct vm_area_desc * desc,const struct mmap_state * map)2367 static void set_desc_from_map(struct vm_area_desc *desc,
2368 const struct mmap_state *map)
2369 {
2370 desc->start = map->addr;
2371 desc->end = map->end;
2372
2373 desc->pgoff = map->pgoff;
2374 desc->vm_file = map->file;
2375 desc->vma_flags = map->vma_flags;
2376 desc->page_prot = map->page_prot;
2377 }
2378
2379 /*
2380 * __mmap_setup() - Prepare to gather any overlapping VMAs that need to be
2381 * unmapped once the map operation is completed, check limits, account mapping
2382 * and clean up any pre-existing VMAs.
2383 *
2384 * As a result it sets up the @map and @desc objects.
2385 *
2386 * @map: Mapping state.
2387 * @desc: VMA descriptor
2388 * @uf: Userfaultfd context list.
2389 *
2390 * Returns: 0 on success, error code otherwise.
2391 */
__mmap_setup(struct mmap_state * map,struct vm_area_desc * desc,struct list_head * uf)2392 static int __mmap_setup(struct mmap_state *map, struct vm_area_desc *desc,
2393 struct list_head *uf)
2394 {
2395 int error;
2396 struct vma_iterator *vmi = map->vmi;
2397 struct vma_munmap_struct *vms = &map->vms;
2398
2399 /* Find the first overlapping VMA and initialise unmap state. */
2400 vms->vma = vma_find(vmi, map->end);
2401 init_vma_munmap(vms, vmi, vms->vma, map->addr, map->end, uf,
2402 /* unlock = */ false);
2403
2404 /* OK, we have overlapping VMAs - prepare to unmap them. */
2405 if (vms->vma) {
2406 mt_init_flags(&map->mt_detach,
2407 vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
2408 mt_on_stack(map->mt_detach);
2409 mas_init(&map->mas_detach, &map->mt_detach, /* addr = */ 0);
2410 /* Prepare to unmap any existing mapping in the area */
2411 error = vms_gather_munmap_vmas(vms, &map->mas_detach);
2412 if (error) {
2413 /* On error VMAs will already have been reattached. */
2414 vms->nr_pages = 0;
2415 return error;
2416 }
2417
2418 map->next = vms->next;
2419 map->prev = vms->prev;
2420 } else {
2421 map->next = vma_iter_next_rewind(vmi, &map->prev);
2422 }
2423
2424 /* Check against address space limit. */
2425 if (!may_expand_vm(map->mm, map->vm_flags, map->pglen - vms->nr_pages))
2426 return -ENOMEM;
2427
2428 /* Private writable mapping: check memory availability. */
2429 if (accountable_mapping(map->file, map->vm_flags)) {
2430 map->charged = map->pglen;
2431 map->charged -= vms->nr_accounted;
2432 if (map->charged) {
2433 error = security_vm_enough_memory_mm(map->mm, map->charged);
2434 if (error)
2435 return error;
2436 }
2437
2438 vms->nr_accounted = 0;
2439 map->vm_flags |= VM_ACCOUNT;
2440 }
2441
2442 /*
2443 * Clear PTEs while the vma is still in the tree so that rmap
2444 * cannot race with the freeing later in the truncate scenario.
2445 * This is also needed for mmap_file(), which is why vm_ops
2446 * close function is called.
2447 */
2448 vms_clean_up_area(vms, &map->mas_detach);
2449
2450 set_desc_from_map(desc, map);
2451 return 0;
2452 }
2453
2454
__mmap_new_file_vma(struct mmap_state * map,struct vm_area_struct * vma)2455 static int __mmap_new_file_vma(struct mmap_state *map,
2456 struct vm_area_struct *vma)
2457 {
2458 struct vma_iterator *vmi = map->vmi;
2459 int error;
2460
2461 vma->vm_file = map->file;
2462 if (!map->file_doesnt_need_get)
2463 get_file(map->file);
2464
2465 if (!map->file->f_op->mmap)
2466 return 0;
2467
2468 error = mmap_file(vma->vm_file, vma);
2469 if (error) {
2470 UNMAP_STATE(unmap, vmi, vma, vma->vm_start, vma->vm_end,
2471 map->prev, map->next);
2472 fput(vma->vm_file);
2473 vma->vm_file = NULL;
2474
2475 vma_iter_set(vmi, vma->vm_end);
2476 /* Undo any partial mapping done by a device driver. */
2477 unmap_region(&unmap);
2478 return error;
2479 }
2480
2481 /* Drivers cannot alter the address of the VMA. */
2482 WARN_ON_ONCE(map->addr != vma->vm_start);
2483 /*
2484 * Drivers should not permit writability when previously it was
2485 * disallowed.
2486 */
2487 VM_WARN_ON_ONCE(map->vm_flags != vma->vm_flags &&
2488 !(map->vm_flags & VM_MAYWRITE) &&
2489 (vma->vm_flags & VM_MAYWRITE));
2490
2491 map->file = vma->vm_file;
2492 map->vm_flags = vma->vm_flags;
2493
2494 return 0;
2495 }
2496
2497 /*
2498 * __mmap_new_vma() - Allocate a new VMA for the region, as merging was not
2499 * possible.
2500 *
2501 * @map: Mapping state.
2502 * @vmap: Output pointer for the new VMA.
2503 *
2504 * Returns: Zero on success, or an error.
2505 */
__mmap_new_vma(struct mmap_state * map,struct vm_area_struct ** vmap)2506 static int __mmap_new_vma(struct mmap_state *map, struct vm_area_struct **vmap)
2507 {
2508 struct vma_iterator *vmi = map->vmi;
2509 int error = 0;
2510 struct vm_area_struct *vma;
2511
2512 /*
2513 * Determine the object being mapped and call the appropriate
2514 * specific mapper. the address has already been validated, but
2515 * not unmapped, but the maps are removed from the list.
2516 */
2517 vma = vm_area_alloc(map->mm);
2518 if (!vma)
2519 return -ENOMEM;
2520
2521 vma_iter_config(vmi, map->addr, map->end);
2522 vma_set_range(vma, map->addr, map->end, map->pgoff);
2523 vm_flags_init(vma, map->vm_flags);
2524 vma->vm_page_prot = map->page_prot;
2525
2526 if (vma_iter_prealloc(vmi, vma)) {
2527 error = -ENOMEM;
2528 goto free_vma;
2529 }
2530
2531 if (map->file)
2532 error = __mmap_new_file_vma(map, vma);
2533 else if (map->vm_flags & VM_SHARED)
2534 error = shmem_zero_setup(vma);
2535 else
2536 vma_set_anonymous(vma);
2537
2538 if (error)
2539 goto free_iter_vma;
2540
2541 if (!map->check_ksm_early) {
2542 update_ksm_flags(map);
2543 vm_flags_init(vma, map->vm_flags);
2544 }
2545
2546 #ifdef CONFIG_SPARC64
2547 /* TODO: Fix SPARC ADI! */
2548 WARN_ON_ONCE(!arch_validate_flags(map->vm_flags));
2549 #endif
2550
2551 /* Lock the VMA since it is modified after insertion into VMA tree */
2552 vma_start_write(vma);
2553 vma_iter_store_new(vmi, vma);
2554 map->mm->map_count++;
2555 vma_link_file(vma, map->hold_file_rmap_lock);
2556
2557 /*
2558 * vma_merge_new_range() calls khugepaged_enter_vma() too, the below
2559 * call covers the non-merge case.
2560 */
2561 if (!vma_is_anonymous(vma))
2562 khugepaged_enter_vma(vma, map->vm_flags);
2563 *vmap = vma;
2564 return 0;
2565
2566 free_iter_vma:
2567 vma_iter_free(vmi);
2568 free_vma:
2569 vm_area_free(vma);
2570 return error;
2571 }
2572
2573 /*
2574 * __mmap_complete() - Unmap any VMAs we overlap, account memory mapping
2575 * statistics, handle locking and finalise the VMA.
2576 *
2577 * @map: Mapping state.
2578 * @vma: Merged or newly allocated VMA for the mmap()'d region.
2579 */
__mmap_complete(struct mmap_state * map,struct vm_area_struct * vma)2580 static void __mmap_complete(struct mmap_state *map, struct vm_area_struct *vma)
2581 {
2582 struct mm_struct *mm = map->mm;
2583 vm_flags_t vm_flags = vma->vm_flags;
2584
2585 perf_event_mmap(vma);
2586
2587 /* Unmap any existing mapping in the area. */
2588 vms_complete_munmap_vmas(&map->vms, &map->mas_detach);
2589
2590 vm_stat_account(mm, vma->vm_flags, map->pglen);
2591 if (vm_flags & VM_LOCKED) {
2592 if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
2593 is_vm_hugetlb_page(vma) ||
2594 vma == get_gate_vma(mm))
2595 vm_flags_clear(vma, VM_LOCKED_MASK);
2596 else
2597 mm->locked_vm += map->pglen;
2598 }
2599
2600 if (vma->vm_file)
2601 uprobe_mmap(vma);
2602
2603 /*
2604 * New (or expanded) vma always get soft dirty status.
2605 * Otherwise user-space soft-dirty page tracker won't
2606 * be able to distinguish situation when vma area unmapped,
2607 * then new mapped in-place (which must be aimed as
2608 * a completely new data area).
2609 */
2610 if (pgtable_supports_soft_dirty())
2611 vm_flags_set(vma, VM_SOFTDIRTY);
2612
2613 vma_set_page_prot(vma);
2614 }
2615
call_action_prepare(struct mmap_state * map,struct vm_area_desc * desc)2616 static void call_action_prepare(struct mmap_state *map,
2617 struct vm_area_desc *desc)
2618 {
2619 struct mmap_action *action = &desc->action;
2620
2621 mmap_action_prepare(action, desc);
2622
2623 if (action->hide_from_rmap_until_complete)
2624 map->hold_file_rmap_lock = true;
2625 }
2626
2627 /*
2628 * Invoke the f_op->mmap_prepare() callback for a file-backed mapping that
2629 * specifies it.
2630 *
2631 * This is called prior to any merge attempt, and updates whitelisted fields
2632 * that are permitted to be updated by the caller.
2633 *
2634 * All but user-defined fields will be pre-populated with original values.
2635 *
2636 * Returns 0 on success, or an error code otherwise.
2637 */
call_mmap_prepare(struct mmap_state * map,struct vm_area_desc * desc)2638 static int call_mmap_prepare(struct mmap_state *map,
2639 struct vm_area_desc *desc)
2640 {
2641 int err;
2642
2643 /* Invoke the hook. */
2644 err = vfs_mmap_prepare(map->file, desc);
2645 if (err)
2646 return err;
2647
2648 call_action_prepare(map, desc);
2649
2650 /* Update fields permitted to be changed. */
2651 map->pgoff = desc->pgoff;
2652 if (desc->vm_file != map->file) {
2653 map->file_doesnt_need_get = true;
2654 map->file = desc->vm_file;
2655 }
2656 map->vma_flags = desc->vma_flags;
2657 map->page_prot = desc->page_prot;
2658 /* User-defined fields. */
2659 map->vm_ops = desc->vm_ops;
2660 map->vm_private_data = desc->private_data;
2661
2662 return 0;
2663 }
2664
set_vma_user_defined_fields(struct vm_area_struct * vma,struct mmap_state * map)2665 static void set_vma_user_defined_fields(struct vm_area_struct *vma,
2666 struct mmap_state *map)
2667 {
2668 if (map->vm_ops)
2669 vma->vm_ops = map->vm_ops;
2670 vma->vm_private_data = map->vm_private_data;
2671 }
2672
2673 /*
2674 * Are we guaranteed no driver can change state such as to preclude KSM merging?
2675 * If so, let's set the KSM mergeable flag early so we don't break VMA merging.
2676 */
can_set_ksm_flags_early(struct mmap_state * map)2677 static bool can_set_ksm_flags_early(struct mmap_state *map)
2678 {
2679 struct file *file = map->file;
2680
2681 /* Anonymous mappings have no driver which can change them. */
2682 if (!file)
2683 return true;
2684
2685 /*
2686 * If .mmap_prepare() is specified, then the driver will have already
2687 * manipulated state prior to updating KSM flags. So no need to worry
2688 * about mmap callbacks modifying VMA flags after the KSM flag has been
2689 * updated here, which could otherwise affect KSM eligibility.
2690 */
2691 if (file->f_op->mmap_prepare)
2692 return true;
2693
2694 /* shmem is safe. */
2695 if (shmem_file(file))
2696 return true;
2697
2698 /* Any other .mmap callback is not safe. */
2699 return false;
2700 }
2701
call_action_complete(struct mmap_state * map,struct vm_area_desc * desc,struct vm_area_struct * vma)2702 static int call_action_complete(struct mmap_state *map,
2703 struct vm_area_desc *desc,
2704 struct vm_area_struct *vma)
2705 {
2706 struct mmap_action *action = &desc->action;
2707 int ret;
2708
2709 ret = mmap_action_complete(action, vma);
2710
2711 /* If we held the file rmap we need to release it. */
2712 if (map->hold_file_rmap_lock) {
2713 struct file *file = vma->vm_file;
2714
2715 i_mmap_unlock_write(file->f_mapping);
2716 }
2717 return ret;
2718 }
2719
__mmap_region(struct file * file,unsigned long addr,unsigned long len,vm_flags_t vm_flags,unsigned long pgoff,struct list_head * uf)2720 static unsigned long __mmap_region(struct file *file, unsigned long addr,
2721 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
2722 struct list_head *uf)
2723 {
2724 struct mm_struct *mm = current->mm;
2725 struct vm_area_struct *vma = NULL;
2726 bool have_mmap_prepare = file && file->f_op->mmap_prepare;
2727 VMA_ITERATOR(vmi, mm, addr);
2728 MMAP_STATE(map, mm, &vmi, addr, len, pgoff, vm_flags, file);
2729 struct vm_area_desc desc = {
2730 .mm = mm,
2731 .file = file,
2732 .action = {
2733 .type = MMAP_NOTHING, /* Default to no further action. */
2734 },
2735 };
2736 bool allocated_new = false;
2737 int error;
2738
2739 map.check_ksm_early = can_set_ksm_flags_early(&map);
2740
2741 error = __mmap_setup(&map, &desc, uf);
2742 if (!error && have_mmap_prepare)
2743 error = call_mmap_prepare(&map, &desc);
2744 if (error)
2745 goto abort_munmap;
2746
2747 if (map.check_ksm_early)
2748 update_ksm_flags(&map);
2749
2750 /* Attempt to merge with adjacent VMAs... */
2751 if (map.prev || map.next) {
2752 VMG_MMAP_STATE(vmg, &map, /* vma = */ NULL);
2753
2754 vma = vma_merge_new_range(&vmg);
2755 }
2756
2757 /* ...but if we can't, allocate a new VMA. */
2758 if (!vma) {
2759 error = __mmap_new_vma(&map, &vma);
2760 if (error)
2761 goto unacct_error;
2762 allocated_new = true;
2763 }
2764
2765 if (have_mmap_prepare)
2766 set_vma_user_defined_fields(vma, &map);
2767
2768 __mmap_complete(&map, vma);
2769
2770 if (have_mmap_prepare && allocated_new) {
2771 error = call_action_complete(&map, &desc, vma);
2772
2773 if (error)
2774 return error;
2775 }
2776
2777 return addr;
2778
2779 /* Accounting was done by __mmap_setup(). */
2780 unacct_error:
2781 if (map.charged)
2782 vm_unacct_memory(map.charged);
2783 abort_munmap:
2784 vms_abort_munmap_vmas(&map.vms, &map.mas_detach);
2785 return error;
2786 }
2787
2788 /**
2789 * mmap_region() - Actually perform the userland mapping of a VMA into
2790 * current->mm with known, aligned and overflow-checked @addr and @len, and
2791 * correctly determined VMA flags @vm_flags and page offset @pgoff.
2792 *
2793 * This is an internal memory management function, and should not be used
2794 * directly.
2795 *
2796 * The caller must write-lock current->mm->mmap_lock.
2797 *
2798 * @file: If a file-backed mapping, a pointer to the struct file describing the
2799 * file to be mapped, otherwise NULL.
2800 * @addr: The page-aligned address at which to perform the mapping.
2801 * @len: The page-aligned, non-zero, length of the mapping.
2802 * @vm_flags: The VMA flags which should be applied to the mapping.
2803 * @pgoff: If @file is specified, the page offset into the file, if not then
2804 * the virtual page offset in memory of the anonymous mapping.
2805 * @uf: Optionally, a pointer to a list head used for tracking userfaultfd unmap
2806 * events.
2807 *
2808 * Returns: Either an error, or the address at which the requested mapping has
2809 * been performed.
2810 */
mmap_region(struct file * file,unsigned long addr,unsigned long len,vm_flags_t vm_flags,unsigned long pgoff,struct list_head * uf)2811 unsigned long mmap_region(struct file *file, unsigned long addr,
2812 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
2813 struct list_head *uf)
2814 {
2815 unsigned long ret;
2816 bool writable_file_mapping = false;
2817
2818 mmap_assert_write_locked(current->mm);
2819
2820 /* Check to see if MDWE is applicable. */
2821 if (map_deny_write_exec(vm_flags, vm_flags))
2822 return -EACCES;
2823
2824 /* Allow architectures to sanity-check the vm_flags. */
2825 if (!arch_validate_flags(vm_flags))
2826 return -EINVAL;
2827
2828 /* Map writable and ensure this isn't a sealed memfd. */
2829 if (file && is_shared_maywrite_vm_flags(vm_flags)) {
2830 int error = mapping_map_writable(file->f_mapping);
2831
2832 if (error)
2833 return error;
2834 writable_file_mapping = true;
2835 }
2836
2837 ret = __mmap_region(file, addr, len, vm_flags, pgoff, uf);
2838
2839 /* Clear our write mapping regardless of error. */
2840 if (writable_file_mapping)
2841 mapping_unmap_writable(file->f_mapping);
2842
2843 validate_mm(current->mm);
2844 return ret;
2845 }
2846
2847 /*
2848 * do_brk_flags() - Increase the brk vma if the flags match.
2849 * @vmi: The vma iterator
2850 * @addr: The start address
2851 * @len: The length of the increase
2852 * @vma: The vma,
2853 * @vm_flags: The VMA Flags
2854 *
2855 * Extend the brk VMA from addr to addr + len. If the VMA is NULL or the flags
2856 * do not match then create a new anonymous VMA. Eventually we may be able to
2857 * do some brk-specific accounting here.
2858 */
do_brk_flags(struct vma_iterator * vmi,struct vm_area_struct * vma,unsigned long addr,unsigned long len,vm_flags_t vm_flags)2859 int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
2860 unsigned long addr, unsigned long len, vm_flags_t vm_flags)
2861 {
2862 struct mm_struct *mm = current->mm;
2863
2864 /*
2865 * Check against address space limits by the changed size
2866 * Note: This happens *after* clearing old mappings in some code paths.
2867 */
2868 vm_flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
2869 vm_flags = ksm_vma_flags(mm, NULL, vm_flags);
2870 if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT))
2871 return -ENOMEM;
2872
2873 if (mm->map_count > sysctl_max_map_count)
2874 return -ENOMEM;
2875
2876 if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
2877 return -ENOMEM;
2878
2879 /*
2880 * Expand the existing vma if possible; Note that singular lists do not
2881 * occur after forking, so the expand will only happen on new VMAs.
2882 */
2883 if (vma && vma->vm_end == addr) {
2884 VMG_STATE(vmg, mm, vmi, addr, addr + len, vm_flags, PHYS_PFN(addr));
2885
2886 vmg.prev = vma;
2887 /* vmi is positioned at prev, which this mode expects. */
2888 vmg.just_expand = true;
2889
2890 if (vma_merge_new_range(&vmg))
2891 goto out;
2892 else if (vmg_nomem(&vmg))
2893 goto unacct_fail;
2894 }
2895
2896 if (vma)
2897 vma_iter_next_range(vmi);
2898 /* create a vma struct for an anonymous mapping */
2899 vma = vm_area_alloc(mm);
2900 if (!vma)
2901 goto unacct_fail;
2902
2903 vma_set_anonymous(vma);
2904 vma_set_range(vma, addr, addr + len, addr >> PAGE_SHIFT);
2905 vm_flags_init(vma, vm_flags);
2906 vma->vm_page_prot = vm_get_page_prot(vm_flags);
2907 vma_start_write(vma);
2908 if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL))
2909 goto mas_store_fail;
2910
2911 mm->map_count++;
2912 validate_mm(mm);
2913 out:
2914 perf_event_mmap(vma);
2915 mm->total_vm += len >> PAGE_SHIFT;
2916 mm->data_vm += len >> PAGE_SHIFT;
2917 if (vm_flags & VM_LOCKED)
2918 mm->locked_vm += (len >> PAGE_SHIFT);
2919 if (pgtable_supports_soft_dirty())
2920 vm_flags_set(vma, VM_SOFTDIRTY);
2921 return 0;
2922
2923 mas_store_fail:
2924 vm_area_free(vma);
2925 unacct_fail:
2926 vm_unacct_memory(len >> PAGE_SHIFT);
2927 return -ENOMEM;
2928 }
2929
2930 /**
2931 * unmapped_area() - Find an area between the low_limit and the high_limit with
2932 * the correct alignment and offset, all from @info. Note: current->mm is used
2933 * for the search.
2934 *
2935 * @info: The unmapped area information including the range [low_limit -
2936 * high_limit), the alignment offset and mask.
2937 *
2938 * Return: A memory address or -ENOMEM.
2939 */
unmapped_area(struct vm_unmapped_area_info * info)2940 unsigned long unmapped_area(struct vm_unmapped_area_info *info)
2941 {
2942 unsigned long length, gap;
2943 unsigned long low_limit, high_limit;
2944 struct vm_area_struct *tmp;
2945 VMA_ITERATOR(vmi, current->mm, 0);
2946
2947 /* Adjust search length to account for worst case alignment overhead */
2948 length = info->length + info->align_mask + info->start_gap;
2949 if (length < info->length)
2950 return -ENOMEM;
2951
2952 low_limit = info->low_limit;
2953 if (low_limit < mmap_min_addr)
2954 low_limit = mmap_min_addr;
2955 high_limit = info->high_limit;
2956 retry:
2957 if (vma_iter_area_lowest(&vmi, low_limit, high_limit, length))
2958 return -ENOMEM;
2959
2960 /*
2961 * Adjust for the gap first so it doesn't interfere with the later
2962 * alignment. The first step is the minimum needed to fulfill the start
2963 * gap, the next step is the minimum to align that. It is the minimum
2964 * needed to fulfill both.
2965 */
2966 gap = vma_iter_addr(&vmi) + info->start_gap;
2967 gap += (info->align_offset - gap) & info->align_mask;
2968 tmp = vma_next(&vmi);
2969 if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
2970 if (vm_start_gap(tmp) < gap + length - 1) {
2971 low_limit = tmp->vm_end;
2972 vma_iter_reset(&vmi);
2973 goto retry;
2974 }
2975 } else {
2976 tmp = vma_prev(&vmi);
2977 if (tmp && vm_end_gap(tmp) > gap) {
2978 low_limit = vm_end_gap(tmp);
2979 vma_iter_reset(&vmi);
2980 goto retry;
2981 }
2982 }
2983
2984 return gap;
2985 }
2986
2987 /**
2988 * unmapped_area_topdown() - Find an area between the low_limit and the
2989 * high_limit with the correct alignment and offset at the highest available
2990 * address, all from @info. Note: current->mm is used for the search.
2991 *
2992 * @info: The unmapped area information including the range [low_limit -
2993 * high_limit), the alignment offset and mask.
2994 *
2995 * Return: A memory address or -ENOMEM.
2996 */
unmapped_area_topdown(struct vm_unmapped_area_info * info)2997 unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
2998 {
2999 unsigned long length, gap, gap_end;
3000 unsigned long low_limit, high_limit;
3001 struct vm_area_struct *tmp;
3002 VMA_ITERATOR(vmi, current->mm, 0);
3003
3004 /* Adjust search length to account for worst case alignment overhead */
3005 length = info->length + info->align_mask + info->start_gap;
3006 if (length < info->length)
3007 return -ENOMEM;
3008
3009 low_limit = info->low_limit;
3010 if (low_limit < mmap_min_addr)
3011 low_limit = mmap_min_addr;
3012 high_limit = info->high_limit;
3013 retry:
3014 if (vma_iter_area_highest(&vmi, low_limit, high_limit, length))
3015 return -ENOMEM;
3016
3017 gap = vma_iter_end(&vmi) - info->length;
3018 gap -= (gap - info->align_offset) & info->align_mask;
3019 gap_end = vma_iter_end(&vmi);
3020 tmp = vma_next(&vmi);
3021 if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
3022 if (vm_start_gap(tmp) < gap_end) {
3023 high_limit = vm_start_gap(tmp);
3024 vma_iter_reset(&vmi);
3025 goto retry;
3026 }
3027 } else {
3028 tmp = vma_prev(&vmi);
3029 if (tmp && vm_end_gap(tmp) > gap) {
3030 high_limit = tmp->vm_start;
3031 vma_iter_reset(&vmi);
3032 goto retry;
3033 }
3034 }
3035
3036 return gap;
3037 }
3038
3039 /*
3040 * Verify that the stack growth is acceptable and
3041 * update accounting. This is shared with both the
3042 * grow-up and grow-down cases.
3043 */
acct_stack_growth(struct vm_area_struct * vma,unsigned long size,unsigned long grow)3044 static int acct_stack_growth(struct vm_area_struct *vma,
3045 unsigned long size, unsigned long grow)
3046 {
3047 struct mm_struct *mm = vma->vm_mm;
3048 unsigned long new_start;
3049
3050 /* address space limit tests */
3051 if (!may_expand_vm(mm, vma->vm_flags, grow))
3052 return -ENOMEM;
3053
3054 /* Stack limit test */
3055 if (size > rlimit(RLIMIT_STACK))
3056 return -ENOMEM;
3057
3058 /* mlock limit tests */
3059 if (!mlock_future_ok(mm, vma->vm_flags & VM_LOCKED, grow << PAGE_SHIFT))
3060 return -ENOMEM;
3061
3062 /* Check to ensure the stack will not grow into a hugetlb-only region */
3063 new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
3064 vma->vm_end - size;
3065 if (is_hugepage_only_range(vma->vm_mm, new_start, size))
3066 return -EFAULT;
3067
3068 /*
3069 * Overcommit.. This must be the final test, as it will
3070 * update security statistics.
3071 */
3072 if (security_vm_enough_memory_mm(mm, grow))
3073 return -ENOMEM;
3074
3075 return 0;
3076 }
3077
3078 #if defined(CONFIG_STACK_GROWSUP)
3079 /*
3080 * PA-RISC uses this for its stack.
3081 * vma is the last one with address > vma->vm_end. Have to extend vma.
3082 */
expand_upwards(struct vm_area_struct * vma,unsigned long address)3083 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
3084 {
3085 struct mm_struct *mm = vma->vm_mm;
3086 struct vm_area_struct *next;
3087 unsigned long gap_addr;
3088 int error = 0;
3089 VMA_ITERATOR(vmi, mm, vma->vm_start);
3090
3091 if (!(vma->vm_flags & VM_GROWSUP))
3092 return -EFAULT;
3093
3094 mmap_assert_write_locked(mm);
3095
3096 /* Guard against exceeding limits of the address space. */
3097 address &= PAGE_MASK;
3098 if (address >= (TASK_SIZE & PAGE_MASK))
3099 return -ENOMEM;
3100 address += PAGE_SIZE;
3101
3102 /* Enforce stack_guard_gap */
3103 gap_addr = address + stack_guard_gap;
3104
3105 /* Guard against overflow */
3106 if (gap_addr < address || gap_addr > TASK_SIZE)
3107 gap_addr = TASK_SIZE;
3108
3109 next = find_vma_intersection(mm, vma->vm_end, gap_addr);
3110 if (next && vma_is_accessible(next)) {
3111 if (!(next->vm_flags & VM_GROWSUP))
3112 return -ENOMEM;
3113 /* Check that both stack segments have the same anon_vma? */
3114 }
3115
3116 if (next)
3117 vma_iter_prev_range_limit(&vmi, address);
3118
3119 vma_iter_config(&vmi, vma->vm_start, address);
3120 if (vma_iter_prealloc(&vmi, vma))
3121 return -ENOMEM;
3122
3123 /* We must make sure the anon_vma is allocated. */
3124 if (unlikely(anon_vma_prepare(vma))) {
3125 vma_iter_free(&vmi);
3126 return -ENOMEM;
3127 }
3128
3129 /* Lock the VMA before expanding to prevent concurrent page faults */
3130 vma_start_write(vma);
3131 /* We update the anon VMA tree. */
3132 anon_vma_lock_write(vma->anon_vma);
3133
3134 /* Somebody else might have raced and expanded it already */
3135 if (address > vma->vm_end) {
3136 unsigned long size, grow;
3137
3138 size = address - vma->vm_start;
3139 grow = (address - vma->vm_end) >> PAGE_SHIFT;
3140
3141 error = -ENOMEM;
3142 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
3143 error = acct_stack_growth(vma, size, grow);
3144 if (!error) {
3145 if (vma->vm_flags & VM_LOCKED)
3146 mm->locked_vm += grow;
3147 vm_stat_account(mm, vma->vm_flags, grow);
3148 anon_vma_interval_tree_pre_update_vma(vma);
3149 vma->vm_end = address;
3150 /* Overwrite old entry in mtree. */
3151 vma_iter_store_overwrite(&vmi, vma);
3152 anon_vma_interval_tree_post_update_vma(vma);
3153
3154 perf_event_mmap(vma);
3155 }
3156 }
3157 }
3158 anon_vma_unlock_write(vma->anon_vma);
3159 vma_iter_free(&vmi);
3160 validate_mm(mm);
3161 return error;
3162 }
3163 #endif /* CONFIG_STACK_GROWSUP */
3164
3165 /*
3166 * vma is the first one with address < vma->vm_start. Have to extend vma.
3167 * mmap_lock held for writing.
3168 */
expand_downwards(struct vm_area_struct * vma,unsigned long address)3169 int expand_downwards(struct vm_area_struct *vma, unsigned long address)
3170 {
3171 struct mm_struct *mm = vma->vm_mm;
3172 struct vm_area_struct *prev;
3173 int error = 0;
3174 VMA_ITERATOR(vmi, mm, vma->vm_start);
3175
3176 if (!(vma->vm_flags & VM_GROWSDOWN))
3177 return -EFAULT;
3178
3179 mmap_assert_write_locked(mm);
3180
3181 address &= PAGE_MASK;
3182 if (address < mmap_min_addr || address < FIRST_USER_ADDRESS)
3183 return -EPERM;
3184
3185 /* Enforce stack_guard_gap */
3186 prev = vma_prev(&vmi);
3187 /* Check that both stack segments have the same anon_vma? */
3188 if (prev) {
3189 if (!(prev->vm_flags & VM_GROWSDOWN) &&
3190 vma_is_accessible(prev) &&
3191 (address - prev->vm_end < stack_guard_gap))
3192 return -ENOMEM;
3193 }
3194
3195 if (prev)
3196 vma_iter_next_range_limit(&vmi, vma->vm_start);
3197
3198 vma_iter_config(&vmi, address, vma->vm_end);
3199 if (vma_iter_prealloc(&vmi, vma))
3200 return -ENOMEM;
3201
3202 /* We must make sure the anon_vma is allocated. */
3203 if (unlikely(anon_vma_prepare(vma))) {
3204 vma_iter_free(&vmi);
3205 return -ENOMEM;
3206 }
3207
3208 /* Lock the VMA before expanding to prevent concurrent page faults */
3209 vma_start_write(vma);
3210 /* We update the anon VMA tree. */
3211 anon_vma_lock_write(vma->anon_vma);
3212
3213 /* Somebody else might have raced and expanded it already */
3214 if (address < vma->vm_start) {
3215 unsigned long size, grow;
3216
3217 size = vma->vm_end - address;
3218 grow = (vma->vm_start - address) >> PAGE_SHIFT;
3219
3220 error = -ENOMEM;
3221 if (grow <= vma->vm_pgoff) {
3222 error = acct_stack_growth(vma, size, grow);
3223 if (!error) {
3224 if (vma->vm_flags & VM_LOCKED)
3225 mm->locked_vm += grow;
3226 vm_stat_account(mm, vma->vm_flags, grow);
3227 anon_vma_interval_tree_pre_update_vma(vma);
3228 vma->vm_start = address;
3229 vma->vm_pgoff -= grow;
3230 /* Overwrite old entry in mtree. */
3231 vma_iter_store_overwrite(&vmi, vma);
3232 anon_vma_interval_tree_post_update_vma(vma);
3233
3234 perf_event_mmap(vma);
3235 }
3236 }
3237 }
3238 anon_vma_unlock_write(vma->anon_vma);
3239 vma_iter_free(&vmi);
3240 validate_mm(mm);
3241 return error;
3242 }
3243
__vm_munmap(unsigned long start,size_t len,bool unlock)3244 int __vm_munmap(unsigned long start, size_t len, bool unlock)
3245 {
3246 int ret;
3247 struct mm_struct *mm = current->mm;
3248 LIST_HEAD(uf);
3249 VMA_ITERATOR(vmi, mm, start);
3250
3251 if (mmap_write_lock_killable(mm))
3252 return -EINTR;
3253
3254 ret = do_vmi_munmap(&vmi, mm, start, len, &uf, unlock);
3255 if (ret || !unlock)
3256 mmap_write_unlock(mm);
3257
3258 userfaultfd_unmap_complete(mm, &uf);
3259 return ret;
3260 }
3261
3262 /* Insert vm structure into process list sorted by address
3263 * and into the inode's i_mmap tree. If vm_file is non-NULL
3264 * then i_mmap_rwsem is taken here.
3265 */
insert_vm_struct(struct mm_struct * mm,struct vm_area_struct * vma)3266 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
3267 {
3268 unsigned long charged = vma_pages(vma);
3269
3270
3271 if (find_vma_intersection(mm, vma->vm_start, vma->vm_end))
3272 return -ENOMEM;
3273
3274 if ((vma->vm_flags & VM_ACCOUNT) &&
3275 security_vm_enough_memory_mm(mm, charged))
3276 return -ENOMEM;
3277
3278 /*
3279 * The vm_pgoff of a purely anonymous vma should be irrelevant
3280 * until its first write fault, when page's anon_vma and index
3281 * are set. But now set the vm_pgoff it will almost certainly
3282 * end up with (unless mremap moves it elsewhere before that
3283 * first wfault), so /proc/pid/maps tells a consistent story.
3284 *
3285 * By setting it to reflect the virtual start address of the
3286 * vma, merges and splits can happen in a seamless way, just
3287 * using the existing file pgoff checks and manipulations.
3288 * Similarly in do_mmap and in do_brk_flags.
3289 */
3290 if (vma_is_anonymous(vma)) {
3291 BUG_ON(vma->anon_vma);
3292 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
3293 }
3294
3295 if (vma_link(mm, vma)) {
3296 if (vma->vm_flags & VM_ACCOUNT)
3297 vm_unacct_memory(charged);
3298 return -ENOMEM;
3299 }
3300
3301 return 0;
3302 }
3303