vma.h (94f59ea591f17d5fb77f68e820b27522596a7e9e) vma.h (f8d112a4e657c65c888e6b8a8435ef61a66e4ab8)
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * vma.h
4 *
5 * Core VMA manipulation API implemented in vma.c.
6 */
7#ifndef __MM_VMA_H
8#define __MM_VMA_H

--- 34 unchanged lines hidden (view full) ---

43 int vma_count; /* Number of vmas that will be removed */
44 unsigned long nr_pages; /* Number of pages being removed */
45 unsigned long locked_vm; /* Number of locked pages */
46 unsigned long nr_accounted; /* Number of VM_ACCOUNT pages */
47 unsigned long exec_vm;
48 unsigned long stack_vm;
49 unsigned long data_vm;
50 bool unlock; /* Unlock after the munmap */
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * vma.h
4 *
5 * Core VMA manipulation API implemented in vma.c.
6 */
7#ifndef __MM_VMA_H
8#define __MM_VMA_H

--- 34 unchanged lines hidden (view full) ---

43 int vma_count; /* Number of vmas that will be removed */
44 unsigned long nr_pages; /* Number of pages being removed */
45 unsigned long locked_vm; /* Number of locked pages */
46 unsigned long nr_accounted; /* Number of VM_ACCOUNT pages */
47 unsigned long exec_vm;
48 unsigned long stack_vm;
49 unsigned long data_vm;
50 bool unlock; /* Unlock after the munmap */
51 bool clear_ptes; /* If there are outstanding PTE to be cleared */
52 bool closed_vm_ops; /* call_mmap() was encountered, so vmas may be closed */
51};
52
53#ifdef CONFIG_DEBUG_VM_MAPLE_TREE
54void validate_mm(struct mm_struct *mm);
55#else
56#define validate_mm(mm) do { } while (0)
57#endif
58

--- 32 unchanged lines hidden (view full) ---

91 * @uf: The userfaultfd list_head
92 * @unlock: Unlock after the operation. Only unlocked on success
93 */
94static inline void init_vma_munmap(struct vma_munmap_struct *vms,
95 struct vma_iterator *vmi, struct vm_area_struct *vma,
96 unsigned long start, unsigned long end, struct list_head *uf,
97 bool unlock)
98{
53};
54
55#ifdef CONFIG_DEBUG_VM_MAPLE_TREE
56void validate_mm(struct mm_struct *mm);
57#else
58#define validate_mm(mm) do { } while (0)
59#endif
60

--- 32 unchanged lines hidden (view full) ---

93 * @uf: The userfaultfd list_head
94 * @unlock: Unlock after the operation. Only unlocked on success
95 */
96static inline void init_vma_munmap(struct vma_munmap_struct *vms,
97 struct vma_iterator *vmi, struct vm_area_struct *vma,
98 unsigned long start, unsigned long end, struct list_head *uf,
99 bool unlock)
100{
101 vms->mm = current->mm;
99 vms->vmi = vmi;
100 vms->vma = vma;
101 if (vma) {
102 vms->vmi = vmi;
103 vms->vma = vma;
104 if (vma) {
102 vms->mm = vma->vm_mm;
103 vms->start = start;
104 vms->end = end;
105 } else {
105 vms->start = start;
106 vms->end = end;
107 } else {
106 vms->mm = NULL;
107 vms->start = vms->end = 0;
108 }
109 vms->unlock = unlock;
110 vms->uf = uf;
111 vms->vma_count = 0;
112 vms->nr_pages = vms->locked_vm = vms->nr_accounted = 0;
113 vms->exec_vm = vms->stack_vm = vms->data_vm = 0;
114 vms->unmap_start = FIRST_USER_ADDRESS;
115 vms->unmap_end = USER_PGTABLES_CEILING;
108 vms->start = vms->end = 0;
109 }
110 vms->unlock = unlock;
111 vms->uf = uf;
112 vms->vma_count = 0;
113 vms->nr_pages = vms->locked_vm = vms->nr_accounted = 0;
114 vms->exec_vm = vms->stack_vm = vms->data_vm = 0;
115 vms->unmap_start = FIRST_USER_ADDRESS;
116 vms->unmap_end = USER_PGTABLES_CEILING;
117 vms->clear_ptes = false;
118 vms->closed_vm_ops = false;
116}
117#endif
118
119int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
120 struct ma_state *mas_detach);
121
122void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
123 struct ma_state *mas_detach);
124
119}
120#endif
121
122int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
123 struct ma_state *mas_detach);
124
125void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
126 struct ma_state *mas_detach);
127
128void vms_clean_up_area(struct vma_munmap_struct *vms,
129 struct ma_state *mas_detach, bool mm_wr_locked);
130
125/*
126 * abort_munmap_vmas - Undo any munmap work and free resources
127 *
128 * Reattach any detached vmas and free up the maple tree used to track the vmas.
129 */
131/*
132 * abort_munmap_vmas - Undo any munmap work and free resources
133 *
134 * Reattach any detached vmas and free up the maple tree used to track the vmas.
135 */
130static inline void abort_munmap_vmas(struct ma_state *mas_detach)
136static inline void abort_munmap_vmas(struct ma_state *mas_detach, bool closed)
131{
132 struct vm_area_struct *vma;
133
134 mas_set(mas_detach, 0);
137{
138 struct vm_area_struct *vma;
139
140 mas_set(mas_detach, 0);
135 mas_for_each(mas_detach, vma, ULONG_MAX)
141 mas_for_each(mas_detach, vma, ULONG_MAX) {
136 vma_mark_detached(vma, false);
142 vma_mark_detached(vma, false);
143 if (closed && vma->vm_ops && vma->vm_ops->open)
144 vma->vm_ops->open(vma);
145 }
137
138 __mt_destroy(mas_detach->tree);
139}
140
141int
142do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
143 struct mm_struct *mm, unsigned long start,
144 unsigned long end, struct list_head *uf, bool unlock);
145
146int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
147 unsigned long start, size_t len, struct list_head *uf,
148 bool unlock);
149
146
147 __mt_destroy(mas_detach->tree);
148}
149
150int
151do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
152 struct mm_struct *mm, unsigned long start,
153 unsigned long end, struct list_head *uf, bool unlock);
154
155int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
156 unsigned long start, size_t len, struct list_head *uf,
157 bool unlock);
158
150void remove_vma(struct vm_area_struct *vma, bool unreachable);
159void remove_vma(struct vm_area_struct *vma, bool unreachable, bool closed);
151
152void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
153 struct vm_area_struct *prev, struct vm_area_struct *next);
154
155/* Required by mmap_region(). */
156bool
157can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
158 struct anon_vma *anon_vma, struct file *file,

--- 97 unchanged lines hidden (view full) ---

256struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma);
257
258bool vma_needs_dirty_tracking(struct vm_area_struct *vma);
259bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
260
261int mm_take_all_locks(struct mm_struct *mm);
262void mm_drop_all_locks(struct mm_struct *mm);
263unsigned long count_vma_pages_range(struct mm_struct *mm,
160
161void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
162 struct vm_area_struct *prev, struct vm_area_struct *next);
163
164/* Required by mmap_region(). */
165bool
166can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
167 struct anon_vma *anon_vma, struct file *file,

--- 97 unchanged lines hidden (view full) ---

265struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma);
266
267bool vma_needs_dirty_tracking(struct vm_area_struct *vma);
268bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
269
270int mm_take_all_locks(struct mm_struct *mm);
271void mm_drop_all_locks(struct mm_struct *mm);
272unsigned long count_vma_pages_range(struct mm_struct *mm,
264 unsigned long addr, unsigned long end);
273 unsigned long addr, unsigned long end,
274 unsigned long *nr_accounted);
265
266static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma)
267{
268 /*
269 * We want to check manually if we can change individual PTEs writable
270 * if we can't do that automatically for all PTEs in a mapping. For
271 * private mappings, that's always the case when we have write
272 * permissions as we properly have to handle COW.

--- 207 unchanged lines hidden ---
275
276static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma)
277{
278 /*
279 * We want to check manually if we can change individual PTEs writable
280 * if we can't do that automatically for all PTEs in a mapping. For
281 * private mappings, that's always the case when we have write
282 * permissions as we properly have to handle COW.

--- 207 unchanged lines hidden ---