vma.h (f8d112a4e657c65c888e6b8a8435ef61a66e4ab8) vma.h (4f87153e82c4906e917d273ab7accd0d540aab35)
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * vma.h
4 *
5 * Core VMA manipulation API implemented in vma.c.
6 */
7#ifndef __MM_VMA_H
8#define __MM_VMA_H

--- 68 unchanged lines hidden (view full) ---

77
78int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
79 unsigned long start, unsigned long end, pgoff_t pgoff,
80 struct vm_area_struct *next);
81
82int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
83 unsigned long start, unsigned long end, pgoff_t pgoff);
84
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * vma.h
4 *
5 * Core VMA manipulation API implemented in vma.c.
6 */
7#ifndef __MM_VMA_H
8#define __MM_VMA_H

--- 68 unchanged lines hidden (view full) ---

77
78int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
79 unsigned long start, unsigned long end, pgoff_t pgoff,
80 struct vm_area_struct *next);
81
82int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
83 unsigned long start, unsigned long end, pgoff_t pgoff);
84
85static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
86 struct vm_area_struct *vma, gfp_t gfp)
87
88{
89 if (vmi->mas.status != ma_start &&
90 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
91 vma_iter_invalidate(vmi);
92
93 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
94 mas_store_gfp(&vmi->mas, vma, gfp);
95 if (unlikely(mas_is_err(&vmi->mas)))
96 return -ENOMEM;
97
98 return 0;
99}
100
85#ifdef CONFIG_MMU
86/*
87 * init_vma_munmap() - Initializer wrapper for vma_munmap_struct
88 * @vms: The vma munmap struct
89 * @vmi: The vma iterator
90 * @vma: The first vm_area_struct to munmap
91 * @start: The aligned start address to munmap
92 * @end: The aligned end address to munmap

--- 31 unchanged lines hidden (view full) ---

124
125void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
126 struct ma_state *mas_detach);
127
128void vms_clean_up_area(struct vma_munmap_struct *vms,
129 struct ma_state *mas_detach, bool mm_wr_locked);
130
131/*
101#ifdef CONFIG_MMU
102/*
103 * init_vma_munmap() - Initializer wrapper for vma_munmap_struct
104 * @vms: The vma munmap struct
105 * @vmi: The vma iterator
106 * @vma: The first vm_area_struct to munmap
107 * @start: The aligned start address to munmap
108 * @end: The aligned end address to munmap

--- 31 unchanged lines hidden (view full) ---

140
141void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
142 struct ma_state *mas_detach);
143
144void vms_clean_up_area(struct vma_munmap_struct *vms,
145 struct ma_state *mas_detach, bool mm_wr_locked);
146
147/*
132 * abort_munmap_vmas - Undo any munmap work and free resources
148 * reattach_vmas() - Undo any munmap work and free resources
149 * @mas_detach: The maple state with the detached maple tree
133 *
134 * Reattach any detached vmas and free up the maple tree used to track the vmas.
135 */
150 *
151 * Reattach any detached vmas and free up the maple tree used to track the vmas.
152 */
136static inline void abort_munmap_vmas(struct ma_state *mas_detach, bool closed)
153static inline void reattach_vmas(struct ma_state *mas_detach)
137{
138 struct vm_area_struct *vma;
139
140 mas_set(mas_detach, 0);
154{
155 struct vm_area_struct *vma;
156
157 mas_set(mas_detach, 0);
141 mas_for_each(mas_detach, vma, ULONG_MAX) {
158 mas_for_each(mas_detach, vma, ULONG_MAX)
142 vma_mark_detached(vma, false);
159 vma_mark_detached(vma, false);
143 if (closed && vma->vm_ops && vma->vm_ops->open)
144 vma->vm_ops->open(vma);
145 }
146
147 __mt_destroy(mas_detach->tree);
148}
149
160
161 __mt_destroy(mas_detach->tree);
162}
163
164/*
165 * vms_abort_munmap_vmas() - Undo as much as possible from an aborted munmap()
166 * operation.
167 * @vms: The vma unmap structure
168 * @mas_detach: The maple state with the detached maple tree
169 *
170 * Reattach any detached vmas, free up the maple tree used to track the vmas.
171 * If that's not possible because the ptes are cleared (and vm_ops->closed() may
172 * have been called), then a NULL is written over the vmas and the vmas are
173 * removed (munmap() completed).
174 */
175static inline void vms_abort_munmap_vmas(struct vma_munmap_struct *vms,
176 struct ma_state *mas_detach)
177{
178 struct ma_state *mas = &vms->vmi->mas;
179 if (!vms->nr_pages)
180 return;
181
182 if (vms->clear_ptes)
183 return reattach_vmas(mas_detach);
184
185 /*
186 * Aborting cannot just call the vm_ops open() because they are often
187 * not symmetrical and state data has been lost. Resort to the old
188 * failure method of leaving a gap where the MAP_FIXED mapping failed.
189 */
190 mas_set_range(mas, vms->start, vms->end - 1);
191 if (unlikely(mas_store_gfp(mas, NULL, GFP_KERNEL))) {
192 pr_warn_once("%s: (%d) Unable to abort munmap() operation\n",
193 current->comm, current->pid);
194 /* Leaving vmas detached and in-tree may hamper recovery */
195 reattach_vmas(mas_detach);
196 } else {
197 /* Clean up the insertion of the unfortunate gap */
198 vms_complete_munmap_vmas(vms, mas_detach);
199 }
200}
201
150int
151do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
152 struct mm_struct *mm, unsigned long start,
153 unsigned long end, struct list_head *uf, bool unlock);
154
155int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
156 unsigned long start, size_t len, struct list_head *uf,
157 bool unlock);

--- 136 unchanged lines hidden (view full) ---

294#endif
295
296static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi,
297 unsigned long min)
298{
299 return mas_prev(&vmi->mas, min);
300}
301
202int
203do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
204 struct mm_struct *mm, unsigned long start,
205 unsigned long end, struct list_head *uf, bool unlock);
206
207int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
208 unsigned long start, size_t len, struct list_head *uf,
209 bool unlock);

--- 136 unchanged lines hidden (view full) ---

346#endif
347
348static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi,
349 unsigned long min)
350{
351 return mas_prev(&vmi->mas, min);
352}
353
302static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
303 struct vm_area_struct *vma, gfp_t gfp)
304{
305 if (vmi->mas.status != ma_start &&
306 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
307 vma_iter_invalidate(vmi);
308
309 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
310 mas_store_gfp(&vmi->mas, vma, gfp);
311 if (unlikely(mas_is_err(&vmi->mas)))
312 return -ENOMEM;
313
314 return 0;
315}
316
317
318/*
319 * These three helpers classifies VMAs for virtual memory accounting.
320 */
321
322/*
323 * Executable code area - executable, not writable, not stack
324 */
325static inline bool is_exec_mapping(vm_flags_t flags)

--- 164 unchanged lines hidden ---
354/*
355 * These three helpers classifies VMAs for virtual memory accounting.
356 */
357
358/*
359 * Executable code area - executable, not writable, not stack
360 */
361static inline bool is_exec_mapping(vm_flags_t flags)

--- 164 unchanged lines hidden ---