mmap.c (d61f0d59683d9c899211c300254d4140c482a6c0) mmap.c (49b1b8d6f6831026cb105b0eafa18f13db612d86)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * mm/mmap.c
4 *
5 * Written by obz.
6 *
7 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
8 */

--- 62 unchanged lines hidden (view full) ---

71const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN;
72const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX;
73int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS;
74#endif
75
76static bool ignore_rlimit_data;
77core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644);
78
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * mm/mmap.c
4 *
5 * Written by obz.
6 *
7 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
8 */

--- 62 unchanged lines hidden (view full) ---

71const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN;
72const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX;
73int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS;
74#endif
75
76static bool ignore_rlimit_data;
77core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644);
78
79static void unmap_region(struct mm_struct *mm, struct ma_state *mas,
80 struct vm_area_struct *vma, struct vm_area_struct *prev,
81 struct vm_area_struct *next, unsigned long start,
82 unsigned long end, unsigned long tree_end, bool mm_wr_locked);
83
84static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
85{
86 return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
87}
88
89/* Update vma->vm_page_prot to reflect vma->vm_flags. */
90void vma_set_page_prot(struct vm_area_struct *vma)
91{
92 unsigned long vm_flags = vma->vm_flags;
93 pgprot_t vm_page_prot;
94
95 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags);
96 if (vma_wants_writenotify(vma, vm_page_prot)) {
97 vm_flags &= ~VM_SHARED;
98 vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags);
99 }
100 /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */
101 WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
102}
103
104/*
79/* Update vma->vm_page_prot to reflect vma->vm_flags. */
80void vma_set_page_prot(struct vm_area_struct *vma)
81{
82 unsigned long vm_flags = vma->vm_flags;
83 pgprot_t vm_page_prot;
84
85 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags);
86 if (vma_wants_writenotify(vma, vm_page_prot)) {
87 vm_flags &= ~VM_SHARED;
88 vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags);
89 }
90 /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */
91 WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
92}
93
94/*
105 * Requires inode->i_mapping->i_mmap_rwsem
106 */
107static void __remove_shared_vm_struct(struct vm_area_struct *vma,
108 struct address_space *mapping)
109{
110 if (vma_is_shared_maywrite(vma))
111 mapping_unmap_writable(mapping);
112
113 flush_dcache_mmap_lock(mapping);
114 vma_interval_tree_remove(vma, &mapping->i_mmap);
115 flush_dcache_mmap_unlock(mapping);
116}
117
118/*
119 * Unlink a file-based vm structure from its interval tree, to hide
120 * vma from rmap and vmtruncate before freeing its page tables.
121 */
122void unlink_file_vma(struct vm_area_struct *vma)
123{
124 struct file *file = vma->vm_file;
125
126 if (file) {
127 struct address_space *mapping = file->f_mapping;
128 i_mmap_lock_write(mapping);
129 __remove_shared_vm_struct(vma, mapping);
130 i_mmap_unlock_write(mapping);
131 }
132}
133
134void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb)
135{
136 vb->count = 0;
137}
138
139static void unlink_file_vma_batch_process(struct unlink_vma_file_batch *vb)
140{
141 struct address_space *mapping;
142 int i;
143
144 mapping = vb->vmas[0]->vm_file->f_mapping;
145 i_mmap_lock_write(mapping);
146 for (i = 0; i < vb->count; i++) {
147 VM_WARN_ON_ONCE(vb->vmas[i]->vm_file->f_mapping != mapping);
148 __remove_shared_vm_struct(vb->vmas[i], mapping);
149 }
150 i_mmap_unlock_write(mapping);
151
152 unlink_file_vma_batch_init(vb);
153}
154
155void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb,
156 struct vm_area_struct *vma)
157{
158 if (vma->vm_file == NULL)
159 return;
160
161 if ((vb->count > 0 && vb->vmas[0]->vm_file != vma->vm_file) ||
162 vb->count == ARRAY_SIZE(vb->vmas))
163 unlink_file_vma_batch_process(vb);
164
165 vb->vmas[vb->count] = vma;
166 vb->count++;
167}
168
169void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb)
170{
171 if (vb->count > 0)
172 unlink_file_vma_batch_process(vb);
173}
174
175/*
176 * Close a vm structure and free it.
177 */
178static void remove_vma(struct vm_area_struct *vma, bool unreachable)
179{
180 might_sleep();
181 if (vma->vm_ops && vma->vm_ops->close)
182 vma->vm_ops->close(vma);
183 if (vma->vm_file)
184 fput(vma->vm_file);
185 mpol_put(vma_policy(vma));
186 if (unreachable)
187 __vm_area_free(vma);
188 else
189 vm_area_free(vma);
190}
191
192static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi,
193 unsigned long min)
194{
195 return mas_prev(&vmi->mas, min);
196}
197
198/*
199 * check_brk_limits() - Use platform specific check of range & verify mlock
200 * limits.
201 * @addr: The address to check
202 * @len: The size of increase.
203 *
204 * Return: 0 on success.
205 */
206static int check_brk_limits(unsigned long addr, unsigned long len)

--- 106 unchanged lines hidden (view full) ---

313 return brk;
314
315out:
316 mm->brk = origbrk;
317 mmap_write_unlock(mm);
318 return origbrk;
319}
320
95 * check_brk_limits() - Use platform specific check of range & verify mlock
96 * limits.
97 * @addr: The address to check
98 * @len: The size of increase.
99 *
100 * Return: 0 on success.
101 */
102static int check_brk_limits(unsigned long addr, unsigned long len)

--- 106 unchanged lines hidden (view full) ---

209 return brk;
210
211out:
212 mm->brk = origbrk;
213 mmap_write_unlock(mm);
214 return origbrk;
215}
216
321#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
322static void validate_mm(struct mm_struct *mm)
323{
324 int bug = 0;
325 int i = 0;
326 struct vm_area_struct *vma;
327 VMA_ITERATOR(vmi, mm, 0);
328
329 mt_validate(&mm->mm_mt);
330 for_each_vma(vmi, vma) {
331#ifdef CONFIG_DEBUG_VM_RB
332 struct anon_vma *anon_vma = vma->anon_vma;
333 struct anon_vma_chain *avc;
334#endif
335 unsigned long vmi_start, vmi_end;
336 bool warn = 0;
337
338 vmi_start = vma_iter_addr(&vmi);
339 vmi_end = vma_iter_end(&vmi);
340 if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm))
341 warn = 1;
342
343 if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm))
344 warn = 1;
345
346 if (warn) {
347 pr_emerg("issue in %s\n", current->comm);
348 dump_stack();
349 dump_vma(vma);
350 pr_emerg("tree range: %px start %lx end %lx\n", vma,
351 vmi_start, vmi_end - 1);
352 vma_iter_dump_tree(&vmi);
353 }
354
355#ifdef CONFIG_DEBUG_VM_RB
356 if (anon_vma) {
357 anon_vma_lock_read(anon_vma);
358 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
359 anon_vma_interval_tree_verify(avc);
360 anon_vma_unlock_read(anon_vma);
361 }
362#endif
363 i++;
364 }
365 if (i != mm->map_count) {
366 pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i);
367 bug = 1;
368 }
369 VM_BUG_ON_MM(bug, mm);
370}
371
372#else /* !CONFIG_DEBUG_VM_MAPLE_TREE */
373#define validate_mm(mm) do { } while (0)
374#endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
375
376/*
217/*
377 * vma has some anon_vma assigned, and is already inserted on that
378 * anon_vma's interval trees.
379 *
380 * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
381 * vma must be removed from the anon_vma's interval trees using
382 * anon_vma_interval_tree_pre_update_vma().
383 *
384 * After the update, the vma will be reinserted using
385 * anon_vma_interval_tree_post_update_vma().
386 *
387 * The entire update must be protected by exclusive mmap_lock and by
388 * the root anon_vma's mutex.
389 */
390static inline void
391anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
392{
393 struct anon_vma_chain *avc;
394
395 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
396 anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root);
397}
398
399static inline void
400anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
401{
402 struct anon_vma_chain *avc;
403
404 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
405 anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
406}
407
408static unsigned long count_vma_pages_range(struct mm_struct *mm,
409 unsigned long addr, unsigned long end)
410{
411 VMA_ITERATOR(vmi, mm, addr);
412 struct vm_area_struct *vma;
413 unsigned long nr_pages = 0;
414
415 for_each_vma_range(vmi, vma, end) {
416 unsigned long vm_start = max(addr, vma->vm_start);
417 unsigned long vm_end = min(end, vma->vm_end);
418
419 nr_pages += PHYS_PFN(vm_end - vm_start);
420 }
421
422 return nr_pages;
423}
424
425static void __vma_link_file(struct vm_area_struct *vma,
426 struct address_space *mapping)
427{
428 if (vma_is_shared_maywrite(vma))
429 mapping_allow_writable(mapping);
430
431 flush_dcache_mmap_lock(mapping);
432 vma_interval_tree_insert(vma, &mapping->i_mmap);
433 flush_dcache_mmap_unlock(mapping);
434}
435
436static void vma_link_file(struct vm_area_struct *vma)
437{
438 struct file *file = vma->vm_file;
439 struct address_space *mapping;
440
441 if (file) {
442 mapping = file->f_mapping;
443 i_mmap_lock_write(mapping);
444 __vma_link_file(vma, mapping);
445 i_mmap_unlock_write(mapping);
446 }
447}
448
449static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
450{
451 VMA_ITERATOR(vmi, mm, 0);
452
453 vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
454 if (vma_iter_prealloc(&vmi, vma))
455 return -ENOMEM;
456
457 vma_start_write(vma);
458 vma_iter_store(&vmi, vma);
459 vma_link_file(vma);
460 mm->map_count++;
461 validate_mm(mm);
462 return 0;
463}
464
465/*
466 * init_multi_vma_prep() - Initializer for struct vma_prepare
467 * @vp: The vma_prepare struct
468 * @vma: The vma that will be altered once locked
469 * @next: The next vma if it is to be adjusted
470 * @remove: The first vma to be removed
471 * @remove2: The second vma to be removed
472 */
473static inline void init_multi_vma_prep(struct vma_prepare *vp,
474 struct vm_area_struct *vma, struct vm_area_struct *next,
475 struct vm_area_struct *remove, struct vm_area_struct *remove2)
476{
477 memset(vp, 0, sizeof(struct vma_prepare));
478 vp->vma = vma;
479 vp->anon_vma = vma->anon_vma;
480 vp->remove = remove;
481 vp->remove2 = remove2;
482 vp->adj_next = next;
483 if (!vp->anon_vma && next)
484 vp->anon_vma = next->anon_vma;
485
486 vp->file = vma->vm_file;
487 if (vp->file)
488 vp->mapping = vma->vm_file->f_mapping;
489
490}
491
492/*
493 * init_vma_prep() - Initializer wrapper for vma_prepare struct
494 * @vp: The vma_prepare struct
495 * @vma: The vma that will be altered once locked
496 */
497static inline void init_vma_prep(struct vma_prepare *vp,
498 struct vm_area_struct *vma)
499{
500 init_multi_vma_prep(vp, vma, NULL, NULL, NULL);
501}
502
503
504/*
505 * vma_prepare() - Helper function for handling locking VMAs prior to altering
506 * @vp: The initialized vma_prepare struct
507 */
508static inline void vma_prepare(struct vma_prepare *vp)
509{
510 if (vp->file) {
511 uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end);
512
513 if (vp->adj_next)
514 uprobe_munmap(vp->adj_next, vp->adj_next->vm_start,
515 vp->adj_next->vm_end);
516
517 i_mmap_lock_write(vp->mapping);
518 if (vp->insert && vp->insert->vm_file) {
519 /*
520 * Put into interval tree now, so instantiated pages
521 * are visible to arm/parisc __flush_dcache_page
522 * throughout; but we cannot insert into address
523 * space until vma start or end is updated.
524 */
525 __vma_link_file(vp->insert,
526 vp->insert->vm_file->f_mapping);
527 }
528 }
529
530 if (vp->anon_vma) {
531 anon_vma_lock_write(vp->anon_vma);
532 anon_vma_interval_tree_pre_update_vma(vp->vma);
533 if (vp->adj_next)
534 anon_vma_interval_tree_pre_update_vma(vp->adj_next);
535 }
536
537 if (vp->file) {
538 flush_dcache_mmap_lock(vp->mapping);
539 vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap);
540 if (vp->adj_next)
541 vma_interval_tree_remove(vp->adj_next,
542 &vp->mapping->i_mmap);
543 }
544
545}
546
547/*
548 * vma_complete- Helper function for handling the unlocking after altering VMAs,
549 * or for inserting a VMA.
550 *
551 * @vp: The vma_prepare struct
552 * @vmi: The vma iterator
553 * @mm: The mm_struct
554 */
555static inline void vma_complete(struct vma_prepare *vp,
556 struct vma_iterator *vmi, struct mm_struct *mm)
557{
558 if (vp->file) {
559 if (vp->adj_next)
560 vma_interval_tree_insert(vp->adj_next,
561 &vp->mapping->i_mmap);
562 vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap);
563 flush_dcache_mmap_unlock(vp->mapping);
564 }
565
566 if (vp->remove && vp->file) {
567 __remove_shared_vm_struct(vp->remove, vp->mapping);
568 if (vp->remove2)
569 __remove_shared_vm_struct(vp->remove2, vp->mapping);
570 } else if (vp->insert) {
571 /*
572 * split_vma has split insert from vma, and needs
573 * us to insert it before dropping the locks
574 * (it may either follow vma or precede it).
575 */
576 vma_iter_store(vmi, vp->insert);
577 mm->map_count++;
578 }
579
580 if (vp->anon_vma) {
581 anon_vma_interval_tree_post_update_vma(vp->vma);
582 if (vp->adj_next)
583 anon_vma_interval_tree_post_update_vma(vp->adj_next);
584 anon_vma_unlock_write(vp->anon_vma);
585 }
586
587 if (vp->file) {
588 i_mmap_unlock_write(vp->mapping);
589 uprobe_mmap(vp->vma);
590
591 if (vp->adj_next)
592 uprobe_mmap(vp->adj_next);
593 }
594
595 if (vp->remove) {
596again:
597 vma_mark_detached(vp->remove, true);
598 if (vp->file) {
599 uprobe_munmap(vp->remove, vp->remove->vm_start,
600 vp->remove->vm_end);
601 fput(vp->file);
602 }
603 if (vp->remove->anon_vma)
604 anon_vma_merge(vp->vma, vp->remove);
605 mm->map_count--;
606 mpol_put(vma_policy(vp->remove));
607 if (!vp->remove2)
608 WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end);
609 vm_area_free(vp->remove);
610
611 /*
612 * In mprotect's case 6 (see comments on vma_merge),
613 * we are removing both mid and next vmas
614 */
615 if (vp->remove2) {
616 vp->remove = vp->remove2;
617 vp->remove2 = NULL;
618 goto again;
619 }
620 }
621 if (vp->insert && vp->file)
622 uprobe_mmap(vp->insert);
623 validate_mm(mm);
624}
625
626/*
627 * dup_anon_vma() - Helper function to duplicate anon_vma
628 * @dst: The destination VMA
629 * @src: The source VMA
630 * @dup: Pointer to the destination VMA when successful.
631 *
632 * Returns: 0 on success.
633 */
634static inline int dup_anon_vma(struct vm_area_struct *dst,
635 struct vm_area_struct *src, struct vm_area_struct **dup)
636{
637 /*
638 * Easily overlooked: when mprotect shifts the boundary, make sure the
639 * expanding vma has anon_vma set if the shrinking vma had, to cover any
640 * anon pages imported.
641 */
642 if (src->anon_vma && !dst->anon_vma) {
643 int ret;
644
645 vma_assert_write_locked(dst);
646 dst->anon_vma = src->anon_vma;
647 ret = anon_vma_clone(dst, src);
648 if (ret)
649 return ret;
650
651 *dup = dst;
652 }
653
654 return 0;
655}
656
657/*
658 * vma_expand - Expand an existing VMA
659 *
660 * @vmi: The vma iterator
661 * @vma: The vma to expand
662 * @start: The start of the vma
663 * @end: The exclusive end of the vma
664 * @pgoff: The page offset of vma
665 * @next: The current of next vma.
666 *
667 * Expand @vma to @start and @end. Can expand off the start and end. Will
668 * expand over @next if it's different from @vma and @end == @next->vm_end.
669 * Checking if the @vma can expand and merge with @next needs to be handled by
670 * the caller.
671 *
672 * Returns: 0 on success
673 */
674int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
675 unsigned long start, unsigned long end, pgoff_t pgoff,
676 struct vm_area_struct *next)
677{
678 struct vm_area_struct *anon_dup = NULL;
679 bool remove_next = false;
680 struct vma_prepare vp;
681
682 vma_start_write(vma);
683 if (next && (vma != next) && (end == next->vm_end)) {
684 int ret;
685
686 remove_next = true;
687 vma_start_write(next);
688 ret = dup_anon_vma(vma, next, &anon_dup);
689 if (ret)
690 return ret;
691 }
692
693 init_multi_vma_prep(&vp, vma, NULL, remove_next ? next : NULL, NULL);
694 /* Not merging but overwriting any part of next is not handled. */
695 VM_WARN_ON(next && !vp.remove &&
696 next != vma && end > next->vm_start);
697 /* Only handles expanding */
698 VM_WARN_ON(vma->vm_start < start || vma->vm_end > end);
699
700 /* Note: vma iterator must be pointing to 'start' */
701 vma_iter_config(vmi, start, end);
702 if (vma_iter_prealloc(vmi, vma))
703 goto nomem;
704
705 vma_prepare(&vp);
706 vma_adjust_trans_huge(vma, start, end, 0);
707 vma_set_range(vma, start, end, pgoff);
708 vma_iter_store(vmi, vma);
709
710 vma_complete(&vp, vmi, vma->vm_mm);
711 return 0;
712
713nomem:
714 if (anon_dup)
715 unlink_anon_vmas(anon_dup);
716 return -ENOMEM;
717}
718
719/*
720 * vma_shrink() - Reduce an existing VMAs memory area
721 * @vmi: The vma iterator
722 * @vma: The VMA to modify
723 * @start: The new start
724 * @end: The new end
725 *
726 * Returns: 0 on success, -ENOMEM otherwise
727 */
728int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
729 unsigned long start, unsigned long end, pgoff_t pgoff)
730{
731 struct vma_prepare vp;
732
733 WARN_ON((vma->vm_start != start) && (vma->vm_end != end));
734
735 if (vma->vm_start < start)
736 vma_iter_config(vmi, vma->vm_start, start);
737 else
738 vma_iter_config(vmi, end, vma->vm_end);
739
740 if (vma_iter_prealloc(vmi, NULL))
741 return -ENOMEM;
742
743 vma_start_write(vma);
744
745 init_vma_prep(&vp, vma);
746 vma_prepare(&vp);
747 vma_adjust_trans_huge(vma, start, end, 0);
748
749 vma_iter_clear(vmi);
750 vma_set_range(vma, start, end, pgoff);
751 vma_complete(&vp, vmi, vma->vm_mm);
752 return 0;
753}
754
755/*
756 * If the vma has a ->close operation then the driver probably needs to release
757 * per-vma resources, so we don't attempt to merge those if the caller indicates
758 * the current vma may be removed as part of the merge.
759 */
760static inline bool is_mergeable_vma(struct vm_area_struct *vma,
761 struct file *file, unsigned long vm_flags,
762 struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
763 struct anon_vma_name *anon_name, bool may_remove_vma)
764{
765 /*
766 * VM_SOFTDIRTY should not prevent from VMA merging, if we
767 * match the flags but dirty bit -- the caller should mark
768 * merged VMA as dirty. If dirty bit won't be excluded from
769 * comparison, we increase pressure on the memory system forcing
770 * the kernel to generate new VMAs when old one could be
771 * extended instead.
772 */
773 if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY)
774 return false;
775 if (vma->vm_file != file)
776 return false;
777 if (may_remove_vma && vma->vm_ops && vma->vm_ops->close)
778 return false;
779 if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx))
780 return false;
781 if (!anon_vma_name_eq(anon_vma_name(vma), anon_name))
782 return false;
783 return true;
784}
785
786static inline bool is_mergeable_anon_vma(struct anon_vma *anon_vma1,
787 struct anon_vma *anon_vma2, struct vm_area_struct *vma)
788{
789 /*
790 * The list_is_singular() test is to avoid merging VMA cloned from
791 * parents. This can improve scalability caused by anon_vma lock.
792 */
793 if ((!anon_vma1 || !anon_vma2) && (!vma ||
794 list_is_singular(&vma->anon_vma_chain)))
795 return true;
796 return anon_vma1 == anon_vma2;
797}
798
799/*
800 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
801 * in front of (at a lower virtual address and file offset than) the vma.
802 *
803 * We cannot merge two vmas if they have differently assigned (non-NULL)
804 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
805 *
806 * We don't check here for the merged mmap wrapping around the end of pagecache
807 * indices (16TB on ia32) because do_mmap() does not permit mmap's which
808 * wrap, nor mmaps which cover the final page at index -1UL.
809 *
810 * We assume the vma may be removed as part of the merge.
811 */
812static bool
813can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
814 struct anon_vma *anon_vma, struct file *file,
815 pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
816 struct anon_vma_name *anon_name)
817{
818 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name, true) &&
819 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
820 if (vma->vm_pgoff == vm_pgoff)
821 return true;
822 }
823 return false;
824}
825
826/*
827 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
828 * beyond (at a higher virtual address and file offset than) the vma.
829 *
830 * We cannot merge two vmas if they have differently assigned (non-NULL)
831 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
832 *
833 * We assume that vma is not removed as part of the merge.
834 */
835static bool
836can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
837 struct anon_vma *anon_vma, struct file *file,
838 pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
839 struct anon_vma_name *anon_name)
840{
841 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name, false) &&
842 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
843 pgoff_t vm_pglen;
844 vm_pglen = vma_pages(vma);
845 if (vma->vm_pgoff + vm_pglen == vm_pgoff)
846 return true;
847 }
848 return false;
849}
850
851/*
852 * Given a mapping request (addr,end,vm_flags,file,pgoff,anon_name),
853 * figure out whether that can be merged with its predecessor or its
854 * successor. Or both (it neatly fills a hole).
855 *
856 * In most cases - when called for mmap, brk or mremap - [addr,end) is
857 * certain not to be mapped by the time vma_merge is called; but when
858 * called for mprotect, it is certain to be already mapped (either at
859 * an offset within prev, or at the start of next), and the flags of
860 * this area are about to be changed to vm_flags - and the no-change
861 * case has already been eliminated.
862 *
863 * The following mprotect cases have to be considered, where **** is
864 * the area passed down from mprotect_fixup, never extending beyond one
865 * vma, PPPP is the previous vma, CCCC is a concurrent vma that starts
866 * at the same address as **** and is of the same or larger span, and
867 * NNNN the next vma after ****:
868 *
869 * **** **** ****
870 * PPPPPPNNNNNN PPPPPPNNNNNN PPPPPPCCCCCC
871 * cannot merge might become might become
872 * PPNNNNNNNNNN PPPPPPPPPPCC
873 * mmap, brk or case 4 below case 5 below
874 * mremap move:
875 * **** ****
876 * PPPP NNNN PPPPCCCCNNNN
877 * might become might become
878 * PPPPPPPPPPPP 1 or PPPPPPPPPPPP 6 or
879 * PPPPPPPPNNNN 2 or PPPPPPPPNNNN 7 or
880 * PPPPNNNNNNNN 3 PPPPNNNNNNNN 8
881 *
882 * It is important for case 8 that the vma CCCC overlapping the
883 * region **** is never going to extended over NNNN. Instead NNNN must
884 * be extended in region **** and CCCC must be removed. This way in
885 * all cases where vma_merge succeeds, the moment vma_merge drops the
886 * rmap_locks, the properties of the merged vma will be already
887 * correct for the whole merged range. Some of those properties like
888 * vm_page_prot/vm_flags may be accessed by rmap_walks and they must
889 * be correct for the whole merged range immediately after the
890 * rmap_locks are released. Otherwise if NNNN would be removed and
891 * CCCC would be extended over the NNNN range, remove_migration_ptes
892 * or other rmap walkers (if working on addresses beyond the "end"
893 * parameter) may establish ptes with the wrong permissions of CCCC
894 * instead of the right permissions of NNNN.
895 *
896 * In the code below:
897 * PPPP is represented by *prev
898 * CCCC is represented by *curr or not represented at all (NULL)
899 * NNNN is represented by *next or not represented at all (NULL)
900 * **** is not represented - it will be merged and the vma containing the
901 * area is returned, or the function will return NULL
902 */
903static struct vm_area_struct
904*vma_merge(struct vma_iterator *vmi, struct vm_area_struct *prev,
905 struct vm_area_struct *src, unsigned long addr, unsigned long end,
906 unsigned long vm_flags, pgoff_t pgoff, struct mempolicy *policy,
907 struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
908 struct anon_vma_name *anon_name)
909{
910 struct mm_struct *mm = src->vm_mm;
911 struct anon_vma *anon_vma = src->anon_vma;
912 struct file *file = src->vm_file;
913 struct vm_area_struct *curr, *next, *res;
914 struct vm_area_struct *vma, *adjust, *remove, *remove2;
915 struct vm_area_struct *anon_dup = NULL;
916 struct vma_prepare vp;
917 pgoff_t vma_pgoff;
918 int err = 0;
919 bool merge_prev = false;
920 bool merge_next = false;
921 bool vma_expanded = false;
922 unsigned long vma_start = addr;
923 unsigned long vma_end = end;
924 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
925 long adj_start = 0;
926
927 /*
928 * We later require that vma->vm_flags == vm_flags,
929 * so this tests vma->vm_flags & VM_SPECIAL, too.
930 */
931 if (vm_flags & VM_SPECIAL)
932 return NULL;
933
934 /* Does the input range span an existing VMA? (cases 5 - 8) */
935 curr = find_vma_intersection(mm, prev ? prev->vm_end : 0, end);
936
937 if (!curr || /* cases 1 - 4 */
938 end == curr->vm_end) /* cases 6 - 8, adjacent VMA */
939 next = vma_lookup(mm, end);
940 else
941 next = NULL; /* case 5 */
942
943 if (prev) {
944 vma_start = prev->vm_start;
945 vma_pgoff = prev->vm_pgoff;
946
947 /* Can we merge the predecessor? */
948 if (addr == prev->vm_end && mpol_equal(vma_policy(prev), policy)
949 && can_vma_merge_after(prev, vm_flags, anon_vma, file,
950 pgoff, vm_userfaultfd_ctx, anon_name)) {
951 merge_prev = true;
952 vma_prev(vmi);
953 }
954 }
955
956 /* Can we merge the successor? */
957 if (next && mpol_equal(policy, vma_policy(next)) &&
958 can_vma_merge_before(next, vm_flags, anon_vma, file, pgoff+pglen,
959 vm_userfaultfd_ctx, anon_name)) {
960 merge_next = true;
961 }
962
963 /* Verify some invariant that must be enforced by the caller. */
964 VM_WARN_ON(prev && addr <= prev->vm_start);
965 VM_WARN_ON(curr && (addr != curr->vm_start || end > curr->vm_end));
966 VM_WARN_ON(addr >= end);
967
968 if (!merge_prev && !merge_next)
969 return NULL; /* Not mergeable. */
970
971 if (merge_prev)
972 vma_start_write(prev);
973
974 res = vma = prev;
975 remove = remove2 = adjust = NULL;
976
977 /* Can we merge both the predecessor and the successor? */
978 if (merge_prev && merge_next &&
979 is_mergeable_anon_vma(prev->anon_vma, next->anon_vma, NULL)) {
980 vma_start_write(next);
981 remove = next; /* case 1 */
982 vma_end = next->vm_end;
983 err = dup_anon_vma(prev, next, &anon_dup);
984 if (curr) { /* case 6 */
985 vma_start_write(curr);
986 remove = curr;
987 remove2 = next;
988 /*
989 * Note that the dup_anon_vma below cannot overwrite err
990 * since the first caller would do nothing unless next
991 * has an anon_vma.
992 */
993 if (!next->anon_vma)
994 err = dup_anon_vma(prev, curr, &anon_dup);
995 }
996 } else if (merge_prev) { /* case 2 */
997 if (curr) {
998 vma_start_write(curr);
999 if (end == curr->vm_end) { /* case 7 */
1000 /*
1001 * can_vma_merge_after() assumed we would not be
1002 * removing prev vma, so it skipped the check
1003 * for vm_ops->close, but we are removing curr
1004 */
1005 if (curr->vm_ops && curr->vm_ops->close)
1006 err = -EINVAL;
1007 remove = curr;
1008 } else { /* case 5 */
1009 adjust = curr;
1010 adj_start = (end - curr->vm_start);
1011 }
1012 if (!err)
1013 err = dup_anon_vma(prev, curr, &anon_dup);
1014 }
1015 } else { /* merge_next */
1016 vma_start_write(next);
1017 res = next;
1018 if (prev && addr < prev->vm_end) { /* case 4 */
1019 vma_start_write(prev);
1020 vma_end = addr;
1021 adjust = next;
1022 adj_start = -(prev->vm_end - addr);
1023 err = dup_anon_vma(next, prev, &anon_dup);
1024 } else {
1025 /*
1026 * Note that cases 3 and 8 are the ONLY ones where prev
1027 * is permitted to be (but is not necessarily) NULL.
1028 */
1029 vma = next; /* case 3 */
1030 vma_start = addr;
1031 vma_end = next->vm_end;
1032 vma_pgoff = next->vm_pgoff - pglen;
1033 if (curr) { /* case 8 */
1034 vma_pgoff = curr->vm_pgoff;
1035 vma_start_write(curr);
1036 remove = curr;
1037 err = dup_anon_vma(next, curr, &anon_dup);
1038 }
1039 }
1040 }
1041
1042 /* Error in anon_vma clone. */
1043 if (err)
1044 goto anon_vma_fail;
1045
1046 if (vma_start < vma->vm_start || vma_end > vma->vm_end)
1047 vma_expanded = true;
1048
1049 if (vma_expanded) {
1050 vma_iter_config(vmi, vma_start, vma_end);
1051 } else {
1052 vma_iter_config(vmi, adjust->vm_start + adj_start,
1053 adjust->vm_end);
1054 }
1055
1056 if (vma_iter_prealloc(vmi, vma))
1057 goto prealloc_fail;
1058
1059 init_multi_vma_prep(&vp, vma, adjust, remove, remove2);
1060 VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma &&
1061 vp.anon_vma != adjust->anon_vma);
1062
1063 vma_prepare(&vp);
1064 vma_adjust_trans_huge(vma, vma_start, vma_end, adj_start);
1065 vma_set_range(vma, vma_start, vma_end, vma_pgoff);
1066
1067 if (vma_expanded)
1068 vma_iter_store(vmi, vma);
1069
1070 if (adj_start) {
1071 adjust->vm_start += adj_start;
1072 adjust->vm_pgoff += adj_start >> PAGE_SHIFT;
1073 if (adj_start < 0) {
1074 WARN_ON(vma_expanded);
1075 vma_iter_store(vmi, next);
1076 }
1077 }
1078
1079 vma_complete(&vp, vmi, mm);
1080 khugepaged_enter_vma(res, vm_flags);
1081 return res;
1082
1083prealloc_fail:
1084 if (anon_dup)
1085 unlink_anon_vmas(anon_dup);
1086
1087anon_vma_fail:
1088 vma_iter_set(vmi, addr);
1089 vma_iter_load(vmi);
1090 return NULL;
1091}
1092
1093/*
1094 * Rough compatibility check to quickly see if it's even worth looking
1095 * at sharing an anon_vma.
1096 *
1097 * They need to have the same vm_file, and the flags can only differ
1098 * in things that mprotect may change.
1099 *
1100 * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
1101 * we can merge the two vma's. For example, we refuse to merge a vma if
1102 * there is a vm_ops->close() function, because that indicates that the
1103 * driver is doing some kind of reference counting. But that doesn't
1104 * really matter for the anon_vma sharing case.
1105 */
1106static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
1107{
1108 return a->vm_end == b->vm_start &&
1109 mpol_equal(vma_policy(a), vma_policy(b)) &&
1110 a->vm_file == b->vm_file &&
1111 !((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_SOFTDIRTY)) &&
1112 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
1113}
1114
1115/*
1116 * Do some basic sanity checking to see if we can re-use the anon_vma
1117 * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
1118 * the same as 'old', the other will be the new one that is trying
1119 * to share the anon_vma.
1120 *
1121 * NOTE! This runs with mmap_lock held for reading, so it is possible that
1122 * the anon_vma of 'old' is concurrently in the process of being set up
1123 * by another page fault trying to merge _that_. But that's ok: if it
1124 * is being set up, that automatically means that it will be a singleton
1125 * acceptable for merging, so we can do all of this optimistically. But
1126 * we do that READ_ONCE() to make sure that we never re-load the pointer.
1127 *
1128 * IOW: that the "list_is_singular()" test on the anon_vma_chain only
1129 * matters for the 'stable anon_vma' case (ie the thing we want to avoid
1130 * is to return an anon_vma that is "complex" due to having gone through
1131 * a fork).
1132 *
1133 * We also make sure that the two vma's are compatible (adjacent,
1134 * and with the same memory policies). That's all stable, even with just
1135 * a read lock on the mmap_lock.
1136 */
1137static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b)
1138{
1139 if (anon_vma_compatible(a, b)) {
1140 struct anon_vma *anon_vma = READ_ONCE(old->anon_vma);
1141
1142 if (anon_vma && list_is_singular(&old->anon_vma_chain))
1143 return anon_vma;
1144 }
1145 return NULL;
1146}
1147
1148/*
1149 * find_mergeable_anon_vma is used by anon_vma_prepare, to check
1150 * neighbouring vmas for a suitable anon_vma, before it goes off
1151 * to allocate a new anon_vma. It checks because a repetitive
1152 * sequence of mprotects and faults may otherwise lead to distinct
1153 * anon_vmas being allocated, preventing vma merge in subsequent
1154 * mprotect.
1155 */
1156struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
1157{
1158 struct anon_vma *anon_vma = NULL;
1159 struct vm_area_struct *prev, *next;
1160 VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_end);
1161
1162 /* Try next first. */
1163 next = vma_iter_load(&vmi);
1164 if (next) {
1165 anon_vma = reusable_anon_vma(next, vma, next);
1166 if (anon_vma)
1167 return anon_vma;
1168 }
1169
1170 prev = vma_prev(&vmi);
1171 VM_BUG_ON_VMA(prev != vma, vma);
1172 prev = vma_prev(&vmi);
1173 /* Try prev next. */
1174 if (prev)
1175 anon_vma = reusable_anon_vma(prev, prev, vma);
1176
1177 /*
1178 * We might reach here with anon_vma == NULL if we can't find
1179 * any reusable anon_vma.
1180 * There's no absolute need to look only at touching neighbours:
1181 * we could search further afield for "compatible" anon_vmas.
1182 * But it would probably just be a waste of time searching,
1183 * or lead to too many vmas hanging off the same anon_vma.
1184 * We're trying to allow mprotect remerging later on,
1185 * not trying to minimize memory used for anon_vmas.
1186 */
1187 return anon_vma;
1188}
1189
1190/*
1191 * If a hint addr is less than mmap_min_addr change hint to be as
1192 * low as possible but still greater than mmap_min_addr
1193 */
1194static inline unsigned long round_hint_to_min(unsigned long hint)
1195{
1196 hint &= PAGE_MASK;
1197 if (((void *)hint != NULL) &&
1198 (hint < mmap_min_addr))

--- 345 unchanged lines hidden (view full) ---

1544 if (offset_in_page(a.offset))
1545 return -EINVAL;
1546
1547 return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1548 a.offset >> PAGE_SHIFT);
1549}
1550#endif /* __ARCH_WANT_SYS_OLD_MMAP */
1551
218 * If a hint addr is less than mmap_min_addr change hint to be as
219 * low as possible but still greater than mmap_min_addr
220 */
221static inline unsigned long round_hint_to_min(unsigned long hint)
222{
223 hint &= PAGE_MASK;
224 if (((void *)hint != NULL) &&
225 (hint < mmap_min_addr))

--- 345 unchanged lines hidden (view full) ---

571 if (offset_in_page(a.offset))
572 return -EINVAL;
573
574 return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
575 a.offset >> PAGE_SHIFT);
576}
577#endif /* __ARCH_WANT_SYS_OLD_MMAP */
578
1552static bool vm_ops_needs_writenotify(const struct vm_operations_struct *vm_ops)
1553{
1554 return vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite);
1555}
1556
1557static bool vma_is_shared_writable(struct vm_area_struct *vma)
1558{
1559 return (vma->vm_flags & (VM_WRITE | VM_SHARED)) ==
1560 (VM_WRITE | VM_SHARED);
1561}
1562
1563static bool vma_fs_can_writeback(struct vm_area_struct *vma)
1564{
1565 /* No managed pages to writeback. */
1566 if (vma->vm_flags & VM_PFNMAP)
1567 return false;
1568
1569 return vma->vm_file && vma->vm_file->f_mapping &&
1570 mapping_can_writeback(vma->vm_file->f_mapping);
1571}
1572
1573/*
579/*
1574 * Does this VMA require the underlying folios to have their dirty state
1575 * tracked?
1576 */
1577bool vma_needs_dirty_tracking(struct vm_area_struct *vma)
1578{
1579 /* Only shared, writable VMAs require dirty tracking. */
1580 if (!vma_is_shared_writable(vma))
1581 return false;
1582
1583 /* Does the filesystem need to be notified? */
1584 if (vm_ops_needs_writenotify(vma->vm_ops))
1585 return true;
1586
1587 /*
1588 * Even if the filesystem doesn't indicate a need for writenotify, if it
1589 * can writeback, dirty tracking is still required.
1590 */
1591 return vma_fs_can_writeback(vma);
1592}
1593
1594/*
1595 * Some shared mappings will want the pages marked read-only
1596 * to track write events. If so, we'll downgrade vm_page_prot
1597 * to the private version (using protection_map[] without the
1598 * VM_SHARED bit).
1599 */
1600bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
1601{
1602 /* If it was private or non-writable, the write bit is already clear */
1603 if (!vma_is_shared_writable(vma))
1604 return false;
1605
1606 /* The backer wishes to know when pages are first written to? */
1607 if (vm_ops_needs_writenotify(vma->vm_ops))
1608 return true;
1609
1610 /* The open routine did something to the protections that pgprot_modify
1611 * won't preserve? */
1612 if (pgprot_val(vm_page_prot) !=
1613 pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags)))
1614 return false;
1615
1616 /*
1617 * Do we need to track softdirty? hugetlb does not support softdirty
1618 * tracking yet.
1619 */
1620 if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma))
1621 return true;
1622
1623 /* Do we need write faults for uffd-wp tracking? */
1624 if (userfaultfd_wp(vma))
1625 return true;
1626
1627 /* Can the mapping track the dirty pages? */
1628 return vma_fs_can_writeback(vma);
1629}
1630
1631/*
1632 * We account for memory if it's a private writeable mapping,
1633 * not hugepages and VM_NORESERVE wasn't set.
1634 */
1635static inline bool accountable_mapping(struct file *file, vm_flags_t vm_flags)
1636{
1637 /*
1638 * hugetlb has its own accounting separate from the core VM
1639 * VM_HUGETLB may not be set yet so we cannot check for that flag.

--- 748 unchanged lines hidden (view full) ---

2388 mmap_write_unlock(mm);
2389 return NULL;
2390
2391success:
2392 mmap_write_downgrade(mm);
2393 return vma;
2394}
2395
580 * We account for memory if it's a private writeable mapping,
581 * not hugepages and VM_NORESERVE wasn't set.
582 */
583static inline bool accountable_mapping(struct file *file, vm_flags_t vm_flags)
584{
585 /*
586 * hugetlb has its own accounting separate from the core VM
587 * VM_HUGETLB may not be set yet so we cannot check for that flag.

--- 748 unchanged lines hidden (view full) ---

1336 mmap_write_unlock(mm);
1337 return NULL;
1338
1339success:
1340 mmap_write_downgrade(mm);
1341 return vma;
1342}
1343
2396/*
2397 * Ok - we have the memory areas we should free on a maple tree so release them,
2398 * and do the vma updates.
2399 *
2400 * Called with the mm semaphore held.
2401 */
2402static inline void remove_mt(struct mm_struct *mm, struct ma_state *mas)
2403{
2404 unsigned long nr_accounted = 0;
2405 struct vm_area_struct *vma;
2406
2407 /* Update high watermark before we lower total_vm */
2408 update_hiwater_vm(mm);
2409 mas_for_each(mas, vma, ULONG_MAX) {
2410 long nrpages = vma_pages(vma);
2411
2412 if (vma->vm_flags & VM_ACCOUNT)
2413 nr_accounted += nrpages;
2414 vm_stat_account(mm, vma->vm_flags, -nrpages);
2415 remove_vma(vma, false);
2416 }
2417 vm_unacct_memory(nr_accounted);
2418}
2419
2420/*
2421 * Get rid of page table information in the indicated region.
2422 *
2423 * Called with the mm semaphore held.
2424 */
2425static void unmap_region(struct mm_struct *mm, struct ma_state *mas,
2426 struct vm_area_struct *vma, struct vm_area_struct *prev,
2427 struct vm_area_struct *next, unsigned long start,
2428 unsigned long end, unsigned long tree_end, bool mm_wr_locked)
2429{
2430 struct mmu_gather tlb;
2431 unsigned long mt_start = mas->index;
2432
2433 lru_add_drain();
2434 tlb_gather_mmu(&tlb, mm);
2435 update_hiwater_rss(mm);
2436 unmap_vmas(&tlb, mas, vma, start, end, tree_end, mm_wr_locked);
2437 mas_set(mas, mt_start);
2438 free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
2439 next ? next->vm_start : USER_PGTABLES_CEILING,
2440 mm_wr_locked);
2441 tlb_finish_mmu(&tlb);
2442}
2443
2444/*
2445 * __split_vma() bypasses sysctl_max_map_count checking. We use this where it
2446 * has already been checked or doesn't make sense to fail.
2447 * VMA Iterator will point to the end VMA.
2448 */
2449static int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
2450 unsigned long addr, int new_below)
2451{
2452 struct vma_prepare vp;
2453 struct vm_area_struct *new;
2454 int err;
2455
2456 WARN_ON(vma->vm_start >= addr);
2457 WARN_ON(vma->vm_end <= addr);
2458
2459 if (vma->vm_ops && vma->vm_ops->may_split) {
2460 err = vma->vm_ops->may_split(vma, addr);
2461 if (err)
2462 return err;
2463 }
2464
2465 new = vm_area_dup(vma);
2466 if (!new)
2467 return -ENOMEM;
2468
2469 if (new_below) {
2470 new->vm_end = addr;
2471 } else {
2472 new->vm_start = addr;
2473 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
2474 }
2475
2476 err = -ENOMEM;
2477 vma_iter_config(vmi, new->vm_start, new->vm_end);
2478 if (vma_iter_prealloc(vmi, new))
2479 goto out_free_vma;
2480
2481 err = vma_dup_policy(vma, new);
2482 if (err)
2483 goto out_free_vmi;
2484
2485 err = anon_vma_clone(new, vma);
2486 if (err)
2487 goto out_free_mpol;
2488
2489 if (new->vm_file)
2490 get_file(new->vm_file);
2491
2492 if (new->vm_ops && new->vm_ops->open)
2493 new->vm_ops->open(new);
2494
2495 vma_start_write(vma);
2496 vma_start_write(new);
2497
2498 init_vma_prep(&vp, vma);
2499 vp.insert = new;
2500 vma_prepare(&vp);
2501 vma_adjust_trans_huge(vma, vma->vm_start, addr, 0);
2502
2503 if (new_below) {
2504 vma->vm_start = addr;
2505 vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT;
2506 } else {
2507 vma->vm_end = addr;
2508 }
2509
2510 /* vma_complete stores the new vma */
2511 vma_complete(&vp, vmi, vma->vm_mm);
2512
2513 /* Success. */
2514 if (new_below)
2515 vma_next(vmi);
2516 return 0;
2517
2518out_free_mpol:
2519 mpol_put(vma_policy(new));
2520out_free_vmi:
2521 vma_iter_free(vmi);
2522out_free_vma:
2523 vm_area_free(new);
2524 return err;
2525}
2526
2527/*
2528 * Split a vma into two pieces at address 'addr', a new vma is allocated
2529 * either for the first part or the tail.
2530 */
2531static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
2532 unsigned long addr, int new_below)
2533{
2534 if (vma->vm_mm->map_count >= sysctl_max_map_count)
2535 return -ENOMEM;
2536
2537 return __split_vma(vmi, vma, addr, new_below);
2538}
2539
2540/*
2541 * We are about to modify one or multiple of a VMA's flags, policy, userfaultfd
2542 * context and anonymous VMA name within the range [start, end).
2543 *
2544 * As a result, we might be able to merge the newly modified VMA range with an
2545 * adjacent VMA with identical properties.
2546 *
2547 * If no merge is possible and the range does not span the entirety of the VMA,
2548 * we then need to split the VMA to accommodate the change.
2549 *
2550 * The function returns either the merged VMA, the original VMA if a split was
2551 * required instead, or an error if the split failed.
2552 */
2553struct vm_area_struct *vma_modify(struct vma_iterator *vmi,
2554 struct vm_area_struct *prev,
2555 struct vm_area_struct *vma,
2556 unsigned long start, unsigned long end,
2557 unsigned long vm_flags,
2558 struct mempolicy *policy,
2559 struct vm_userfaultfd_ctx uffd_ctx,
2560 struct anon_vma_name *anon_name)
2561{
2562 pgoff_t pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
2563 struct vm_area_struct *merged;
2564
2565 merged = vma_merge(vmi, prev, vma, start, end, vm_flags,
2566 pgoff, policy, uffd_ctx, anon_name);
2567 if (merged)
2568 return merged;
2569
2570 if (vma->vm_start < start) {
2571 int err = split_vma(vmi, vma, start, 1);
2572
2573 if (err)
2574 return ERR_PTR(err);
2575 }
2576
2577 if (vma->vm_end > end) {
2578 int err = split_vma(vmi, vma, end, 0);
2579
2580 if (err)
2581 return ERR_PTR(err);
2582 }
2583
2584 return vma;
2585}
2586
2587/*
2588 * Attempt to merge a newly mapped VMA with those adjacent to it. The caller
2589 * must ensure that [start, end) does not overlap any existing VMA.
2590 */
2591static struct vm_area_struct
2592*vma_merge_new_vma(struct vma_iterator *vmi, struct vm_area_struct *prev,
2593 struct vm_area_struct *vma, unsigned long start,
2594 unsigned long end, pgoff_t pgoff)
2595{
2596 return vma_merge(vmi, prev, vma, start, end, vma->vm_flags, pgoff,
2597 vma_policy(vma), vma->vm_userfaultfd_ctx, anon_vma_name(vma));
2598}
2599
2600/*
2601 * Expand vma by delta bytes, potentially merging with an immediately adjacent
2602 * VMA with identical properties.
2603 */
2604struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
2605 struct vm_area_struct *vma,
2606 unsigned long delta)
2607{
2608 pgoff_t pgoff = vma->vm_pgoff + vma_pages(vma);
2609
2610 /* vma is specified as prev, so case 1 or 2 will apply. */
2611 return vma_merge(vmi, vma, vma, vma->vm_end, vma->vm_end + delta,
2612 vma->vm_flags, pgoff, vma_policy(vma),
2613 vma->vm_userfaultfd_ctx, anon_vma_name(vma));
2614}
2615
2616/*
2617 * do_vmi_align_munmap() - munmap the aligned region from @start to @end.
2618 * @vmi: The vma iterator
2619 * @vma: The starting vm_area_struct
2620 * @mm: The mm_struct
2621 * @start: The aligned start address to munmap.
2622 * @end: The aligned end address to munmap.
2623 * @uf: The userfaultfd list_head
2624 * @unlock: Set to true to drop the mmap_lock. unlocking only happens on
2625 * success.
2626 *
2627 * Return: 0 on success and drops the lock if so directed, error and leaves the
2628 * lock held otherwise.
2629 */
2630static int
2631do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
2632 struct mm_struct *mm, unsigned long start,
2633 unsigned long end, struct list_head *uf, bool unlock)
2634{
2635 struct vm_area_struct *prev, *next = NULL;
2636 struct maple_tree mt_detach;
2637 int count = 0;
2638 int error = -ENOMEM;
2639 unsigned long locked_vm = 0;
2640 MA_STATE(mas_detach, &mt_detach, 0, 0);
2641 mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
2642 mt_on_stack(mt_detach);
2643
2644 /*
2645 * If we need to split any vma, do it now to save pain later.
2646 *
2647 * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially
2648 * unmapped vm_area_struct will remain in use: so lower split_vma
2649 * places tmp vma above, and higher split_vma places tmp vma below.
2650 */
2651
2652 /* Does it split the first one? */
2653 if (start > vma->vm_start) {
2654
2655 /*
2656 * Make sure that map_count on return from munmap() will
2657 * not exceed its limit; but let map_count go just above
2658 * its limit temporarily, to help free resources as expected.
2659 */
2660 if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
2661 goto map_count_exceeded;
2662
2663 error = __split_vma(vmi, vma, start, 1);
2664 if (error)
2665 goto start_split_failed;
2666 }
2667
2668 /*
2669 * Detach a range of VMAs from the mm. Using next as a temp variable as
2670 * it is always overwritten.
2671 */
2672 next = vma;
2673 do {
2674 /* Does it split the end? */
2675 if (next->vm_end > end) {
2676 error = __split_vma(vmi, next, end, 0);
2677 if (error)
2678 goto end_split_failed;
2679 }
2680 vma_start_write(next);
2681 mas_set(&mas_detach, count);
2682 error = mas_store_gfp(&mas_detach, next, GFP_KERNEL);
2683 if (error)
2684 goto munmap_gather_failed;
2685 vma_mark_detached(next, true);
2686 if (next->vm_flags & VM_LOCKED)
2687 locked_vm += vma_pages(next);
2688
2689 count++;
2690 if (unlikely(uf)) {
2691 /*
2692 * If userfaultfd_unmap_prep returns an error the vmas
2693 * will remain split, but userland will get a
2694 * highly unexpected error anyway. This is no
2695 * different than the case where the first of the two
2696 * __split_vma fails, but we don't undo the first
2697 * split, despite we could. This is unlikely enough
2698 * failure that it's not worth optimizing it for.
2699 */
2700 error = userfaultfd_unmap_prep(next, start, end, uf);
2701
2702 if (error)
2703 goto userfaultfd_error;
2704 }
2705#ifdef CONFIG_DEBUG_VM_MAPLE_TREE
2706 BUG_ON(next->vm_start < start);
2707 BUG_ON(next->vm_start > end);
2708#endif
2709 } for_each_vma_range(*vmi, next, end);
2710
2711#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
2712 /* Make sure no VMAs are about to be lost. */
2713 {
2714 MA_STATE(test, &mt_detach, 0, 0);
2715 struct vm_area_struct *vma_mas, *vma_test;
2716 int test_count = 0;
2717
2718 vma_iter_set(vmi, start);
2719 rcu_read_lock();
2720 vma_test = mas_find(&test, count - 1);
2721 for_each_vma_range(*vmi, vma_mas, end) {
2722 BUG_ON(vma_mas != vma_test);
2723 test_count++;
2724 vma_test = mas_next(&test, count - 1);
2725 }
2726 rcu_read_unlock();
2727 BUG_ON(count != test_count);
2728 }
2729#endif
2730
2731 while (vma_iter_addr(vmi) > start)
2732 vma_iter_prev_range(vmi);
2733
2734 error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL);
2735 if (error)
2736 goto clear_tree_failed;
2737
2738 /* Point of no return */
2739 mm->locked_vm -= locked_vm;
2740 mm->map_count -= count;
2741 if (unlock)
2742 mmap_write_downgrade(mm);
2743
2744 prev = vma_iter_prev_range(vmi);
2745 next = vma_next(vmi);
2746 if (next)
2747 vma_iter_prev_range(vmi);
2748
2749 /*
2750 * We can free page tables without write-locking mmap_lock because VMAs
2751 * were isolated before we downgraded mmap_lock.
2752 */
2753 mas_set(&mas_detach, 1);
2754 unmap_region(mm, &mas_detach, vma, prev, next, start, end, count,
2755 !unlock);
2756 /* Statistics and freeing VMAs */
2757 mas_set(&mas_detach, 0);
2758 remove_mt(mm, &mas_detach);
2759 validate_mm(mm);
2760 if (unlock)
2761 mmap_read_unlock(mm);
2762
2763 __mt_destroy(&mt_detach);
2764 return 0;
2765
2766clear_tree_failed:
2767userfaultfd_error:
2768munmap_gather_failed:
2769end_split_failed:
2770 mas_set(&mas_detach, 0);
2771 mas_for_each(&mas_detach, next, end)
2772 vma_mark_detached(next, false);
2773
2774 __mt_destroy(&mt_detach);
2775start_split_failed:
2776map_count_exceeded:
2777 validate_mm(mm);
2778 return error;
2779}
2780
2781/*
2782 * do_vmi_munmap() - munmap a given range.
2783 * @vmi: The vma iterator
2784 * @mm: The mm_struct
2785 * @start: The start address to munmap
2786 * @len: The length of the range to munmap
2787 * @uf: The userfaultfd list_head
2788 * @unlock: set to true if the user wants to drop the mmap_lock on success
2789 *
2790 * This function takes a @mas that is either pointing to the previous VMA or set
2791 * to MA_START and sets it up to remove the mapping(s). The @len will be
2792 * aligned and any arch_unmap work will be preformed.
2793 *
2794 * Return: 0 on success and drops the lock if so directed, error and leaves the
2795 * lock held otherwise.
2796 */
2797int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
2798 unsigned long start, size_t len, struct list_head *uf,
2799 bool unlock)
2800{
2801 unsigned long end;
2802 struct vm_area_struct *vma;
2803
2804 if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start)
2805 return -EINVAL;
2806
2807 end = start + PAGE_ALIGN(len);
2808 if (end == start)
2809 return -EINVAL;
2810
2811 /*
2812 * Check if memory is sealed before arch_unmap.
2813 * Prevent unmapping a sealed VMA.
2814 * can_modify_mm assumes we have acquired the lock on MM.
2815 */
2816 if (unlikely(!can_modify_mm(mm, start, end)))
2817 return -EPERM;
2818
2819 /* arch_unmap() might do unmaps itself. */
2820 arch_unmap(mm, start, end);
2821
2822 /* Find the first overlapping VMA */
2823 vma = vma_find(vmi, end);
2824 if (!vma) {
2825 if (unlock)
2826 mmap_write_unlock(mm);
2827 return 0;
2828 }
2829
2830 return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
2831}
2832
2833/* do_munmap() - Wrapper function for non-maple tree aware do_munmap() calls.
2834 * @mm: The mm_struct
2835 * @start: The start address to munmap
2836 * @len: The length to be munmapped.
2837 * @uf: The userfaultfd list_head
2838 *
2839 * Return: 0 on success, error otherwise.
2840 */

--- 645 unchanged lines hidden (view full) ---

3486 vm_unacct_memory(charged);
3487 return -ENOMEM;
3488 }
3489
3490 return 0;
3491}
3492
3493/*
1344/* do_munmap() - Wrapper function for non-maple tree aware do_munmap() calls.
1345 * @mm: The mm_struct
1346 * @start: The start address to munmap
1347 * @len: The length to be munmapped.
1348 * @uf: The userfaultfd list_head
1349 *
1350 * Return: 0 on success, error otherwise.
1351 */

--- 645 unchanged lines hidden (view full) ---

1997 vm_unacct_memory(charged);
1998 return -ENOMEM;
1999 }
2000
2001 return 0;
2002}
2003
2004/*
3494 * Copy the vma structure to a new location in the same mm,
3495 * prior to moving page table entries, to effect an mremap move.
3496 */
3497struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
3498 unsigned long addr, unsigned long len, pgoff_t pgoff,
3499 bool *need_rmap_locks)
3500{
3501 struct vm_area_struct *vma = *vmap;
3502 unsigned long vma_start = vma->vm_start;
3503 struct mm_struct *mm = vma->vm_mm;
3504 struct vm_area_struct *new_vma, *prev;
3505 bool faulted_in_anon_vma = true;
3506 VMA_ITERATOR(vmi, mm, addr);
3507
3508 /*
3509 * If anonymous vma has not yet been faulted, update new pgoff
3510 * to match new location, to increase its chance of merging.
3511 */
3512 if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) {
3513 pgoff = addr >> PAGE_SHIFT;
3514 faulted_in_anon_vma = false;
3515 }
3516
3517 new_vma = find_vma_prev(mm, addr, &prev);
3518 if (new_vma && new_vma->vm_start < addr + len)
3519 return NULL; /* should never get here */
3520
3521 new_vma = vma_merge_new_vma(&vmi, prev, vma, addr, addr + len, pgoff);
3522 if (new_vma) {
3523 /*
3524 * Source vma may have been merged into new_vma
3525 */
3526 if (unlikely(vma_start >= new_vma->vm_start &&
3527 vma_start < new_vma->vm_end)) {
3528 /*
3529 * The only way we can get a vma_merge with
3530 * self during an mremap is if the vma hasn't
3531 * been faulted in yet and we were allowed to
3532 * reset the dst vma->vm_pgoff to the
3533 * destination address of the mremap to allow
3534 * the merge to happen. mremap must change the
3535 * vm_pgoff linearity between src and dst vmas
3536 * (in turn preventing a vma_merge) to be
3537 * safe. It is only safe to keep the vm_pgoff
3538 * linear if there are no pages mapped yet.
3539 */
3540 VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma);
3541 *vmap = vma = new_vma;
3542 }
3543 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
3544 } else {
3545 new_vma = vm_area_dup(vma);
3546 if (!new_vma)
3547 goto out;
3548 vma_set_range(new_vma, addr, addr + len, pgoff);
3549 if (vma_dup_policy(vma, new_vma))
3550 goto out_free_vma;
3551 if (anon_vma_clone(new_vma, vma))
3552 goto out_free_mempol;
3553 if (new_vma->vm_file)
3554 get_file(new_vma->vm_file);
3555 if (new_vma->vm_ops && new_vma->vm_ops->open)
3556 new_vma->vm_ops->open(new_vma);
3557 if (vma_link(mm, new_vma))
3558 goto out_vma_link;
3559 *need_rmap_locks = false;
3560 }
3561 return new_vma;
3562
3563out_vma_link:
3564 if (new_vma->vm_ops && new_vma->vm_ops->close)
3565 new_vma->vm_ops->close(new_vma);
3566
3567 if (new_vma->vm_file)
3568 fput(new_vma->vm_file);
3569
3570 unlink_anon_vmas(new_vma);
3571out_free_mempol:
3572 mpol_put(vma_policy(new_vma));
3573out_free_vma:
3574 vm_area_free(new_vma);
3575out:
3576 return NULL;
3577}
3578
3579/*
3580 * Return true if the calling process may expand its vm space by the passed
3581 * number of pages
3582 */
3583bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages)
3584{
3585 if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT)
3586 return false;
3587

--- 180 unchanged lines hidden (view full) ---

3768{
3769 struct vm_area_struct *vma = __install_special_mapping(
3770 mm, addr, len, vm_flags, (void *)pages,
3771 &legacy_special_mapping_vmops);
3772
3773 return PTR_ERR_OR_ZERO(vma);
3774}
3775
2005 * Return true if the calling process may expand its vm space by the passed
2006 * number of pages
2007 */
2008bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages)
2009{
2010 if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT)
2011 return false;
2012

--- 180 unchanged lines hidden (view full) ---

2193{
2194 struct vm_area_struct *vma = __install_special_mapping(
2195 mm, addr, len, vm_flags, (void *)pages,
2196 &legacy_special_mapping_vmops);
2197
2198 return PTR_ERR_OR_ZERO(vma);
2199}
2200
3776static DEFINE_MUTEX(mm_all_locks_mutex);
3777
3778static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
3779{
3780 if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
3781 /*
3782 * The LSB of head.next can't change from under us
3783 * because we hold the mm_all_locks_mutex.
3784 */
3785 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock);
3786 /*
3787 * We can safely modify head.next after taking the
3788 * anon_vma->root->rwsem. If some other vma in this mm shares
3789 * the same anon_vma we won't take it again.
3790 *
3791 * No need of atomic instructions here, head.next
3792 * can't change from under us thanks to the
3793 * anon_vma->root->rwsem.
3794 */
3795 if (__test_and_set_bit(0, (unsigned long *)
3796 &anon_vma->root->rb_root.rb_root.rb_node))
3797 BUG();
3798 }
3799}
3800
3801static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
3802{
3803 if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
3804 /*
3805 * AS_MM_ALL_LOCKS can't change from under us because
3806 * we hold the mm_all_locks_mutex.
3807 *
3808 * Operations on ->flags have to be atomic because
3809 * even if AS_MM_ALL_LOCKS is stable thanks to the
3810 * mm_all_locks_mutex, there may be other cpus
3811 * changing other bitflags in parallel to us.
3812 */
3813 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
3814 BUG();
3815 down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock);
3816 }
3817}
3818
3819/*
2201/*
3820 * This operation locks against the VM for all pte/vma/mm related
3821 * operations that could ever happen on a certain mm. This includes
3822 * vmtruncate, try_to_unmap, and all page faults.
3823 *
3824 * The caller must take the mmap_lock in write mode before calling
3825 * mm_take_all_locks(). The caller isn't allowed to release the
3826 * mmap_lock until mm_drop_all_locks() returns.
3827 *
3828 * mmap_lock in write mode is required in order to block all operations
3829 * that could modify pagetables and free pages without need of
3830 * altering the vma layout. It's also needed in write mode to avoid new
3831 * anon_vmas to be associated with existing vmas.
3832 *
3833 * A single task can't take more than one mm_take_all_locks() in a row
3834 * or it would deadlock.
3835 *
3836 * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in
3837 * mapping->flags avoid to take the same lock twice, if more than one
3838 * vma in this mm is backed by the same anon_vma or address_space.
3839 *
3840 * We take locks in following order, accordingly to comment at beginning
3841 * of mm/rmap.c:
3842 * - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for
3843 * hugetlb mapping);
3844 * - all vmas marked locked
3845 * - all i_mmap_rwsem locks;
3846 * - all anon_vma->rwseml
3847 *
3848 * We can take all locks within these types randomly because the VM code
3849 * doesn't nest them and we protected from parallel mm_take_all_locks() by
3850 * mm_all_locks_mutex.
3851 *
3852 * mm_take_all_locks() and mm_drop_all_locks are expensive operations
3853 * that may have to take thousand of locks.
3854 *
3855 * mm_take_all_locks() can fail if it's interrupted by signals.
3856 */
3857int mm_take_all_locks(struct mm_struct *mm)
3858{
3859 struct vm_area_struct *vma;
3860 struct anon_vma_chain *avc;
3861 VMA_ITERATOR(vmi, mm, 0);
3862
3863 mmap_assert_write_locked(mm);
3864
3865 mutex_lock(&mm_all_locks_mutex);
3866
3867 /*
3868 * vma_start_write() does not have a complement in mm_drop_all_locks()
3869 * because vma_start_write() is always asymmetrical; it marks a VMA as
3870 * being written to until mmap_write_unlock() or mmap_write_downgrade()
3871 * is reached.
3872 */
3873 for_each_vma(vmi, vma) {
3874 if (signal_pending(current))
3875 goto out_unlock;
3876 vma_start_write(vma);
3877 }
3878
3879 vma_iter_init(&vmi, mm, 0);
3880 for_each_vma(vmi, vma) {
3881 if (signal_pending(current))
3882 goto out_unlock;
3883 if (vma->vm_file && vma->vm_file->f_mapping &&
3884 is_vm_hugetlb_page(vma))
3885 vm_lock_mapping(mm, vma->vm_file->f_mapping);
3886 }
3887
3888 vma_iter_init(&vmi, mm, 0);
3889 for_each_vma(vmi, vma) {
3890 if (signal_pending(current))
3891 goto out_unlock;
3892 if (vma->vm_file && vma->vm_file->f_mapping &&
3893 !is_vm_hugetlb_page(vma))
3894 vm_lock_mapping(mm, vma->vm_file->f_mapping);
3895 }
3896
3897 vma_iter_init(&vmi, mm, 0);
3898 for_each_vma(vmi, vma) {
3899 if (signal_pending(current))
3900 goto out_unlock;
3901 if (vma->anon_vma)
3902 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
3903 vm_lock_anon_vma(mm, avc->anon_vma);
3904 }
3905
3906 return 0;
3907
3908out_unlock:
3909 mm_drop_all_locks(mm);
3910 return -EINTR;
3911}
3912
3913static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
3914{
3915 if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
3916 /*
3917 * The LSB of head.next can't change to 0 from under
3918 * us because we hold the mm_all_locks_mutex.
3919 *
3920 * We must however clear the bitflag before unlocking
3921 * the vma so the users using the anon_vma->rb_root will
3922 * never see our bitflag.
3923 *
3924 * No need of atomic instructions here, head.next
3925 * can't change from under us until we release the
3926 * anon_vma->root->rwsem.
3927 */
3928 if (!__test_and_clear_bit(0, (unsigned long *)
3929 &anon_vma->root->rb_root.rb_root.rb_node))
3930 BUG();
3931 anon_vma_unlock_write(anon_vma);
3932 }
3933}
3934
3935static void vm_unlock_mapping(struct address_space *mapping)
3936{
3937 if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
3938 /*
3939 * AS_MM_ALL_LOCKS can't change to 0 from under us
3940 * because we hold the mm_all_locks_mutex.
3941 */
3942 i_mmap_unlock_write(mapping);
3943 if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
3944 &mapping->flags))
3945 BUG();
3946 }
3947}
3948
3949/*
3950 * The mmap_lock cannot be released by the caller until
3951 * mm_drop_all_locks() returns.
3952 */
3953void mm_drop_all_locks(struct mm_struct *mm)
3954{
3955 struct vm_area_struct *vma;
3956 struct anon_vma_chain *avc;
3957 VMA_ITERATOR(vmi, mm, 0);
3958
3959 mmap_assert_write_locked(mm);
3960 BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
3961
3962 for_each_vma(vmi, vma) {
3963 if (vma->anon_vma)
3964 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
3965 vm_unlock_anon_vma(avc->anon_vma);
3966 if (vma->vm_file && vma->vm_file->f_mapping)
3967 vm_unlock_mapping(vma->vm_file->f_mapping);
3968 }
3969
3970 mutex_unlock(&mm_all_locks_mutex);
3971}
3972
3973/*
3974 * initialise the percpu counter for VM
3975 */
3976void __init mmap_init(void)
3977{
3978 int ret;
3979
3980 ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
3981 VM_BUG_ON(ret);

--- 190 unchanged lines hidden ---
2202 * initialise the percpu counter for VM
2203 */
2204void __init mmap_init(void)
2205{
2206 int ret;
2207
2208 ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
2209 VM_BUG_ON(ret);

--- 190 unchanged lines hidden ---