vma.c (cacded5e42b9609b07b22d80c10f0076d439f7d1) vma.c (25d3925fa51ddeec9d5a2c9b89140f5218ec3ef4)
1// SPDX-License-Identifier: GPL-2.0-or-later
2
3/*
4 * VMA-specific functions.
5 */
6
7#include "vma_internal.h"
8#include "vma.h"

--- 90 unchanged lines hidden (view full) ---

99 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
100 *
101 * We don't check here for the merged mmap wrapping around the end of pagecache
102 * indices (16TB on ia32) because do_mmap() does not permit mmap's which
103 * wrap, nor mmaps which cover the final page at index -1UL.
104 *
105 * We assume the vma may be removed as part of the merge.
106 */
1// SPDX-License-Identifier: GPL-2.0-or-later
2
3/*
4 * VMA-specific functions.
5 */
6
7#include "vma_internal.h"
8#include "vma.h"

--- 90 unchanged lines hidden (view full) ---

99 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
100 *
101 * We don't check here for the merged mmap wrapping around the end of pagecache
102 * indices (16TB on ia32) because do_mmap() does not permit mmap's which
103 * wrap, nor mmaps which cover the final page at index -1UL.
104 *
105 * We assume the vma may be removed as part of the merge.
106 */
107bool
108can_vma_merge_before(struct vma_merge_struct *vmg)
107static bool can_vma_merge_before(struct vma_merge_struct *vmg)
109{
110 pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start);
111
112 if (is_mergeable_vma(vmg, /* merge_next = */ true) &&
113 is_mergeable_anon_vma(vmg->anon_vma, vmg->next->anon_vma, vmg->next)) {
114 if (vmg->next->vm_pgoff == vmg->pgoff + pglen)
115 return true;
116 }

--- 5 unchanged lines hidden (view full) ---

122 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
123 * beyond (at a higher virtual address and file offset than) the vma.
124 *
125 * We cannot merge two vmas if they have differently assigned (non-NULL)
126 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
127 *
128 * We assume that vma is not removed as part of the merge.
129 */
108{
109 pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start);
110
111 if (is_mergeable_vma(vmg, /* merge_next = */ true) &&
112 is_mergeable_anon_vma(vmg->anon_vma, vmg->next->anon_vma, vmg->next)) {
113 if (vmg->next->vm_pgoff == vmg->pgoff + pglen)
114 return true;
115 }

--- 5 unchanged lines hidden (view full) ---

121 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
122 * beyond (at a higher virtual address and file offset than) the vma.
123 *
124 * We cannot merge two vmas if they have differently assigned (non-NULL)
125 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
126 *
127 * We assume that vma is not removed as part of the merge.
128 */
130bool can_vma_merge_after(struct vma_merge_struct *vmg)
129static bool can_vma_merge_after(struct vma_merge_struct *vmg)
131{
132 if (is_mergeable_vma(vmg, /* merge_next = */ false) &&
133 is_mergeable_anon_vma(vmg->anon_vma, vmg->prev->anon_vma, vmg->prev)) {
134 if (vmg->prev->vm_pgoff + vma_pages(vmg->prev) == vmg->pgoff)
135 return true;
136 }
137 return false;
138}
139
130{
131 if (is_mergeable_vma(vmg, /* merge_next = */ false) &&
132 is_mergeable_anon_vma(vmg->anon_vma, vmg->prev->anon_vma, vmg->prev)) {
133 if (vmg->prev->vm_pgoff + vma_pages(vmg->prev) == vmg->pgoff)
134 return true;
135 }
136 return false;
137}
138
139static void __vma_link_file(struct vm_area_struct *vma,
140 struct address_space *mapping)
141{
142 if (vma_is_shared_maywrite(vma))
143 mapping_allow_writable(mapping);
144
145 flush_dcache_mmap_lock(mapping);
146 vma_interval_tree_insert(vma, &mapping->i_mmap);
147 flush_dcache_mmap_unlock(mapping);
148}
149
140/*
150/*
151 * Requires inode->i_mapping->i_mmap_rwsem
152 */
153static void __remove_shared_vm_struct(struct vm_area_struct *vma,
154 struct address_space *mapping)
155{
156 if (vma_is_shared_maywrite(vma))
157 mapping_unmap_writable(mapping);
158
159 flush_dcache_mmap_lock(mapping);
160 vma_interval_tree_remove(vma, &mapping->i_mmap);
161 flush_dcache_mmap_unlock(mapping);
162}
163
164/*
165 * vma_prepare() - Helper function for handling locking VMAs prior to altering
166 * @vp: The initialized vma_prepare struct
167 */
168static void vma_prepare(struct vma_prepare *vp)
169{
170 if (vp->file) {
171 uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end);
172
173 if (vp->adj_next)
174 uprobe_munmap(vp->adj_next, vp->adj_next->vm_start,
175 vp->adj_next->vm_end);
176
177 i_mmap_lock_write(vp->mapping);
178 if (vp->insert && vp->insert->vm_file) {
179 /*
180 * Put into interval tree now, so instantiated pages
181 * are visible to arm/parisc __flush_dcache_page
182 * throughout; but we cannot insert into address
183 * space until vma start or end is updated.
184 */
185 __vma_link_file(vp->insert,
186 vp->insert->vm_file->f_mapping);
187 }
188 }
189
190 if (vp->anon_vma) {
191 anon_vma_lock_write(vp->anon_vma);
192 anon_vma_interval_tree_pre_update_vma(vp->vma);
193 if (vp->adj_next)
194 anon_vma_interval_tree_pre_update_vma(vp->adj_next);
195 }
196
197 if (vp->file) {
198 flush_dcache_mmap_lock(vp->mapping);
199 vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap);
200 if (vp->adj_next)
201 vma_interval_tree_remove(vp->adj_next,
202 &vp->mapping->i_mmap);
203 }
204
205}
206
207/*
208 * vma_complete- Helper function for handling the unlocking after altering VMAs,
209 * or for inserting a VMA.
210 *
211 * @vp: The vma_prepare struct
212 * @vmi: The vma iterator
213 * @mm: The mm_struct
214 */
215static void vma_complete(struct vma_prepare *vp, struct vma_iterator *vmi,
216 struct mm_struct *mm)
217{
218 if (vp->file) {
219 if (vp->adj_next)
220 vma_interval_tree_insert(vp->adj_next,
221 &vp->mapping->i_mmap);
222 vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap);
223 flush_dcache_mmap_unlock(vp->mapping);
224 }
225
226 if (vp->remove && vp->file) {
227 __remove_shared_vm_struct(vp->remove, vp->mapping);
228 if (vp->remove2)
229 __remove_shared_vm_struct(vp->remove2, vp->mapping);
230 } else if (vp->insert) {
231 /*
232 * split_vma has split insert from vma, and needs
233 * us to insert it before dropping the locks
234 * (it may either follow vma or precede it).
235 */
236 vma_iter_store(vmi, vp->insert);
237 mm->map_count++;
238 }
239
240 if (vp->anon_vma) {
241 anon_vma_interval_tree_post_update_vma(vp->vma);
242 if (vp->adj_next)
243 anon_vma_interval_tree_post_update_vma(vp->adj_next);
244 anon_vma_unlock_write(vp->anon_vma);
245 }
246
247 if (vp->file) {
248 i_mmap_unlock_write(vp->mapping);
249 uprobe_mmap(vp->vma);
250
251 if (vp->adj_next)
252 uprobe_mmap(vp->adj_next);
253 }
254
255 if (vp->remove) {
256again:
257 vma_mark_detached(vp->remove, true);
258 if (vp->file) {
259 uprobe_munmap(vp->remove, vp->remove->vm_start,
260 vp->remove->vm_end);
261 fput(vp->file);
262 }
263 if (vp->remove->anon_vma)
264 anon_vma_merge(vp->vma, vp->remove);
265 mm->map_count--;
266 mpol_put(vma_policy(vp->remove));
267 if (!vp->remove2)
268 WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end);
269 vm_area_free(vp->remove);
270
271 /*
272 * In mprotect's case 6 (see comments on vma_merge),
273 * we are removing both mid and next vmas
274 */
275 if (vp->remove2) {
276 vp->remove = vp->remove2;
277 vp->remove2 = NULL;
278 goto again;
279 }
280 }
281 if (vp->insert && vp->file)
282 uprobe_mmap(vp->insert);
283}
284
285/*
286 * init_vma_prep() - Initializer wrapper for vma_prepare struct
287 * @vp: The vma_prepare struct
288 * @vma: The vma that will be altered once locked
289 */
290static void init_vma_prep(struct vma_prepare *vp, struct vm_area_struct *vma)
291{
292 init_multi_vma_prep(vp, vma, NULL, NULL, NULL);
293}
294
295/*
141 * Can the proposed VMA be merged with the left (previous) VMA taking into
142 * account the start position of the proposed range.
143 */
144static bool can_vma_merge_left(struct vma_merge_struct *vmg)
145
146{
147 return vmg->prev && vmg->prev->vm_end == vmg->start &&
148 can_vma_merge_after(vmg);

--- 162 unchanged lines hidden (view full) ---

311{
312 if (vma->vm_mm->map_count >= sysctl_max_map_count)
313 return -ENOMEM;
314
315 return __split_vma(vmi, vma, addr, new_below);
316}
317
318/*
296 * Can the proposed VMA be merged with the left (previous) VMA taking into
297 * account the start position of the proposed range.
298 */
299static bool can_vma_merge_left(struct vma_merge_struct *vmg)
300
301{
302 return vmg->prev && vmg->prev->vm_end == vmg->start &&
303 can_vma_merge_after(vmg);

--- 162 unchanged lines hidden (view full) ---

466{
467 if (vma->vm_mm->map_count >= sysctl_max_map_count)
468 return -ENOMEM;
469
470 return __split_vma(vmi, vma, addr, new_below);
471}
472
473/*
319 * init_vma_prep() - Initializer wrapper for vma_prepare struct
320 * @vp: The vma_prepare struct
321 * @vma: The vma that will be altered once locked
322 */
323void init_vma_prep(struct vma_prepare *vp,
324 struct vm_area_struct *vma)
325{
326 init_multi_vma_prep(vp, vma, NULL, NULL, NULL);
327}
328
329/*
330 * Requires inode->i_mapping->i_mmap_rwsem
331 */
332static void __remove_shared_vm_struct(struct vm_area_struct *vma,
333 struct address_space *mapping)
334{
335 if (vma_is_shared_maywrite(vma))
336 mapping_unmap_writable(mapping);
337
338 flush_dcache_mmap_lock(mapping);
339 vma_interval_tree_remove(vma, &mapping->i_mmap);
340 flush_dcache_mmap_unlock(mapping);
341}
342
343/*
344 * vma has some anon_vma assigned, and is already inserted on that
345 * anon_vma's interval trees.
346 *
347 * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
348 * vma must be removed from the anon_vma's interval trees using
349 * anon_vma_interval_tree_pre_update_vma().
350 *
351 * After the update, the vma will be reinserted using

--- 15 unchanged lines hidden (view full) ---

367anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
368{
369 struct anon_vma_chain *avc;
370
371 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
372 anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
373}
374
474 * vma has some anon_vma assigned, and is already inserted on that
475 * anon_vma's interval trees.
476 *
477 * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
478 * vma must be removed from the anon_vma's interval trees using
479 * anon_vma_interval_tree_pre_update_vma().
480 *
481 * After the update, the vma will be reinserted using

--- 15 unchanged lines hidden (view full) ---

497anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
498{
499 struct anon_vma_chain *avc;
500
501 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
502 anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
503}
504
375static void __vma_link_file(struct vm_area_struct *vma,
376 struct address_space *mapping)
377{
378 if (vma_is_shared_maywrite(vma))
379 mapping_allow_writable(mapping);
380
381 flush_dcache_mmap_lock(mapping);
382 vma_interval_tree_insert(vma, &mapping->i_mmap);
383 flush_dcache_mmap_unlock(mapping);
384}
385
386/*
505/*
387 * vma_prepare() - Helper function for handling locking VMAs prior to altering
388 * @vp: The initialized vma_prepare struct
389 */
390void vma_prepare(struct vma_prepare *vp)
391{
392 if (vp->file) {
393 uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end);
394
395 if (vp->adj_next)
396 uprobe_munmap(vp->adj_next, vp->adj_next->vm_start,
397 vp->adj_next->vm_end);
398
399 i_mmap_lock_write(vp->mapping);
400 if (vp->insert && vp->insert->vm_file) {
401 /*
402 * Put into interval tree now, so instantiated pages
403 * are visible to arm/parisc __flush_dcache_page
404 * throughout; but we cannot insert into address
405 * space until vma start or end is updated.
406 */
407 __vma_link_file(vp->insert,
408 vp->insert->vm_file->f_mapping);
409 }
410 }
411
412 if (vp->anon_vma) {
413 anon_vma_lock_write(vp->anon_vma);
414 anon_vma_interval_tree_pre_update_vma(vp->vma);
415 if (vp->adj_next)
416 anon_vma_interval_tree_pre_update_vma(vp->adj_next);
417 }
418
419 if (vp->file) {
420 flush_dcache_mmap_lock(vp->mapping);
421 vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap);
422 if (vp->adj_next)
423 vma_interval_tree_remove(vp->adj_next,
424 &vp->mapping->i_mmap);
425 }
426
427}
428
429/*
430 * dup_anon_vma() - Helper function to duplicate anon_vma
431 * @dst: The destination VMA
432 * @src: The source VMA
433 * @dup: Pointer to the destination VMA when successful.
434 *
435 * Returns: 0 on success.
436 */
437static int dup_anon_vma(struct vm_area_struct *dst,

--- 272 unchanged lines hidden (view full) ---

710
711 vma_iter_clear(vmi);
712 vma_set_range(vma, start, end, pgoff);
713 vma_complete(&vp, vmi, vma->vm_mm);
714 validate_mm(vma->vm_mm);
715 return 0;
716}
717
506 * dup_anon_vma() - Helper function to duplicate anon_vma
507 * @dst: The destination VMA
508 * @src: The source VMA
509 * @dup: Pointer to the destination VMA when successful.
510 *
511 * Returns: 0 on success.
512 */
513static int dup_anon_vma(struct vm_area_struct *dst,

--- 272 unchanged lines hidden (view full) ---

786
787 vma_iter_clear(vmi);
788 vma_set_range(vma, start, end, pgoff);
789 vma_complete(&vp, vmi, vma->vm_mm);
790 validate_mm(vma->vm_mm);
791 return 0;
792}
793
718/*
719 * vma_complete- Helper function for handling the unlocking after altering VMAs,
720 * or for inserting a VMA.
721 *
722 * @vp: The vma_prepare struct
723 * @vmi: The vma iterator
724 * @mm: The mm_struct
725 */
726void vma_complete(struct vma_prepare *vp,
727 struct vma_iterator *vmi, struct mm_struct *mm)
728{
729 if (vp->file) {
730 if (vp->adj_next)
731 vma_interval_tree_insert(vp->adj_next,
732 &vp->mapping->i_mmap);
733 vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap);
734 flush_dcache_mmap_unlock(vp->mapping);
735 }
736
737 if (vp->remove && vp->file) {
738 __remove_shared_vm_struct(vp->remove, vp->mapping);
739 if (vp->remove2)
740 __remove_shared_vm_struct(vp->remove2, vp->mapping);
741 } else if (vp->insert) {
742 /*
743 * split_vma has split insert from vma, and needs
744 * us to insert it before dropping the locks
745 * (it may either follow vma or precede it).
746 */
747 vma_iter_store(vmi, vp->insert);
748 mm->map_count++;
749 }
750
751 if (vp->anon_vma) {
752 anon_vma_interval_tree_post_update_vma(vp->vma);
753 if (vp->adj_next)
754 anon_vma_interval_tree_post_update_vma(vp->adj_next);
755 anon_vma_unlock_write(vp->anon_vma);
756 }
757
758 if (vp->file) {
759 i_mmap_unlock_write(vp->mapping);
760 uprobe_mmap(vp->vma);
761
762 if (vp->adj_next)
763 uprobe_mmap(vp->adj_next);
764 }
765
766 if (vp->remove) {
767again:
768 vma_mark_detached(vp->remove, true);
769 if (vp->file) {
770 uprobe_munmap(vp->remove, vp->remove->vm_start,
771 vp->remove->vm_end);
772 fput(vp->file);
773 }
774 if (vp->remove->anon_vma)
775 anon_vma_merge(vp->vma, vp->remove);
776 mm->map_count--;
777 mpol_put(vma_policy(vp->remove));
778 if (!vp->remove2)
779 WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end);
780 vm_area_free(vp->remove);
781
782 /*
783 * In mprotect's case 6 (see comments on vma_merge),
784 * we are removing both mid and next vmas
785 */
786 if (vp->remove2) {
787 vp->remove = vp->remove2;
788 vp->remove2 = NULL;
789 goto again;
790 }
791 }
792 if (vp->insert && vp->file)
793 uprobe_mmap(vp->insert);
794}
795
796static inline void vms_clear_ptes(struct vma_munmap_struct *vms,
797 struct ma_state *mas_detach, bool mm_wr_locked)
798{
799 struct mmu_gather tlb;
800
801 if (!vms->clear_ptes) /* Nothing to do */
802 return;
803

--- 1225 unchanged lines hidden ---
794static inline void vms_clear_ptes(struct vma_munmap_struct *vms,
795 struct ma_state *mas_detach, bool mm_wr_locked)
796{
797 struct mmu_gather tlb;
798
799 if (!vms->clear_ptes) /* Nothing to do */
800 return;
801

--- 1225 unchanged lines hidden ---