1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * vma.h
4 *
5 * Core VMA manipulation API implemented in vma.c.
6 */
7 #ifndef __MM_VMA_H
8 #define __MM_VMA_H
9
10 /*
11 * VMA lock generalization
12 */
13 struct vma_prepare {
14 struct vm_area_struct *vma;
15 struct vm_area_struct *adj_next;
16 struct file *file;
17 struct address_space *mapping;
18 struct anon_vma *anon_vma;
19 struct vm_area_struct *insert;
20 struct vm_area_struct *remove;
21 struct vm_area_struct *remove2;
22 };
23
24 struct unlink_vma_file_batch {
25 int count;
26 struct vm_area_struct *vmas[8];
27 };
28
29 /*
30 * vma munmap operation
31 */
32 struct vma_munmap_struct {
33 struct vma_iterator *vmi;
34 struct vm_area_struct *vma; /* The first vma to munmap */
35 struct vm_area_struct *prev; /* vma before the munmap area */
36 struct vm_area_struct *next; /* vma after the munmap area */
37 struct list_head *uf; /* Userfaultfd list_head */
38 unsigned long start; /* Aligned start addr (inclusive) */
39 unsigned long end; /* Aligned end addr (exclusive) */
40 unsigned long unmap_start; /* Unmap PTE start */
41 unsigned long unmap_end; /* Unmap PTE end */
42 int vma_count; /* Number of vmas that will be removed */
43 bool unlock; /* Unlock after the munmap */
44 bool clear_ptes; /* If there are outstanding PTE to be cleared */
45 bool closed_vm_ops; /* call_mmap() was encountered, so vmas may be closed */
46 /* 1 byte hole */
47 unsigned long nr_pages; /* Number of pages being removed */
48 unsigned long locked_vm; /* Number of locked pages */
49 unsigned long nr_accounted; /* Number of VM_ACCOUNT pages */
50 unsigned long exec_vm;
51 unsigned long stack_vm;
52 unsigned long data_vm;
53 };
54
55 enum vma_merge_state {
56 VMA_MERGE_START,
57 VMA_MERGE_ERROR_NOMEM,
58 VMA_MERGE_NOMERGE,
59 VMA_MERGE_SUCCESS,
60 };
61
62 enum vma_merge_flags {
63 VMG_FLAG_DEFAULT = 0,
64 /*
65 * If we can expand, simply do so. We know there is nothing to merge to
66 * the right. Does not reset state upon failure to merge. The VMA
67 * iterator is assumed to be positioned at the previous VMA, rather than
68 * at the gap.
69 */
70 VMG_FLAG_JUST_EXPAND = 1 << 0,
71 };
72
73 /* Represents a VMA merge operation. */
74 struct vma_merge_struct {
75 struct mm_struct *mm;
76 struct vma_iterator *vmi;
77 pgoff_t pgoff;
78 struct vm_area_struct *prev;
79 struct vm_area_struct *next; /* Modified by vma_merge(). */
80 struct vm_area_struct *vma; /* Either a new VMA or the one being modified. */
81 unsigned long start;
82 unsigned long end;
83 unsigned long flags;
84 struct file *file;
85 struct anon_vma *anon_vma;
86 struct mempolicy *policy;
87 struct vm_userfaultfd_ctx uffd_ctx;
88 struct anon_vma_name *anon_name;
89 enum vma_merge_flags merge_flags;
90 enum vma_merge_state state;
91 };
92
vmg_nomem(struct vma_merge_struct * vmg)93 static inline bool vmg_nomem(struct vma_merge_struct *vmg)
94 {
95 return vmg->state == VMA_MERGE_ERROR_NOMEM;
96 }
97
98 /* Assumes addr >= vma->vm_start. */
vma_pgoff_offset(struct vm_area_struct * vma,unsigned long addr)99 static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma,
100 unsigned long addr)
101 {
102 return vma->vm_pgoff + PHYS_PFN(addr - vma->vm_start);
103 }
104
105 #define VMG_STATE(name, mm_, vmi_, start_, end_, flags_, pgoff_) \
106 struct vma_merge_struct name = { \
107 .mm = mm_, \
108 .vmi = vmi_, \
109 .start = start_, \
110 .end = end_, \
111 .flags = flags_, \
112 .pgoff = pgoff_, \
113 .state = VMA_MERGE_START, \
114 .merge_flags = VMG_FLAG_DEFAULT, \
115 }
116
117 #define VMG_VMA_STATE(name, vmi_, prev_, vma_, start_, end_) \
118 struct vma_merge_struct name = { \
119 .mm = vma_->vm_mm, \
120 .vmi = vmi_, \
121 .prev = prev_, \
122 .next = NULL, \
123 .vma = vma_, \
124 .start = start_, \
125 .end = end_, \
126 .flags = vma_->vm_flags, \
127 .pgoff = vma_pgoff_offset(vma_, start_), \
128 .file = vma_->vm_file, \
129 .anon_vma = vma_->anon_vma, \
130 .policy = vma_policy(vma_), \
131 .uffd_ctx = vma_->vm_userfaultfd_ctx, \
132 .anon_name = anon_vma_name(vma_), \
133 .state = VMA_MERGE_START, \
134 .merge_flags = VMG_FLAG_DEFAULT, \
135 }
136
137 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
138 void validate_mm(struct mm_struct *mm);
139 #else
140 #define validate_mm(mm) do { } while (0)
141 #endif
142
143 /* Required for expand_downwards(). */
144 void anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma);
145
146 /* Required for expand_downwards(). */
147 void anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma);
148
149 int vma_expand(struct vma_merge_struct *vmg);
150 int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
151 unsigned long start, unsigned long end, pgoff_t pgoff);
152
vma_iter_store_gfp(struct vma_iterator * vmi,struct vm_area_struct * vma,gfp_t gfp)153 static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
154 struct vm_area_struct *vma, gfp_t gfp)
155
156 {
157 if (vmi->mas.status != ma_start &&
158 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
159 vma_iter_invalidate(vmi);
160
161 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
162 mas_store_gfp(&vmi->mas, vma, gfp);
163 if (unlikely(mas_is_err(&vmi->mas)))
164 return -ENOMEM;
165
166 return 0;
167 }
168
169 #ifdef CONFIG_MMU
170 /*
171 * init_vma_munmap() - Initializer wrapper for vma_munmap_struct
172 * @vms: The vma munmap struct
173 * @vmi: The vma iterator
174 * @vma: The first vm_area_struct to munmap
175 * @start: The aligned start address to munmap
176 * @end: The aligned end address to munmap
177 * @uf: The userfaultfd list_head
178 * @unlock: Unlock after the operation. Only unlocked on success
179 */
init_vma_munmap(struct vma_munmap_struct * vms,struct vma_iterator * vmi,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct list_head * uf,bool unlock)180 static inline void init_vma_munmap(struct vma_munmap_struct *vms,
181 struct vma_iterator *vmi, struct vm_area_struct *vma,
182 unsigned long start, unsigned long end, struct list_head *uf,
183 bool unlock)
184 {
185 vms->vmi = vmi;
186 vms->vma = vma;
187 if (vma) {
188 vms->start = start;
189 vms->end = end;
190 } else {
191 vms->start = vms->end = 0;
192 }
193 vms->unlock = unlock;
194 vms->uf = uf;
195 vms->vma_count = 0;
196 vms->nr_pages = vms->locked_vm = vms->nr_accounted = 0;
197 vms->exec_vm = vms->stack_vm = vms->data_vm = 0;
198 vms->unmap_start = FIRST_USER_ADDRESS;
199 vms->unmap_end = USER_PGTABLES_CEILING;
200 vms->clear_ptes = false;
201 vms->closed_vm_ops = false;
202 }
203 #endif
204
205 int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
206 struct ma_state *mas_detach);
207
208 void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
209 struct ma_state *mas_detach);
210
211 void vms_clean_up_area(struct vma_munmap_struct *vms,
212 struct ma_state *mas_detach);
213
214 /*
215 * reattach_vmas() - Undo any munmap work and free resources
216 * @mas_detach: The maple state with the detached maple tree
217 *
218 * Reattach any detached vmas and free up the maple tree used to track the vmas.
219 */
reattach_vmas(struct ma_state * mas_detach)220 static inline void reattach_vmas(struct ma_state *mas_detach)
221 {
222 struct vm_area_struct *vma;
223
224 mas_set(mas_detach, 0);
225 mas_for_each(mas_detach, vma, ULONG_MAX)
226 vma_mark_detached(vma, false);
227
228 __mt_destroy(mas_detach->tree);
229 }
230
231 /*
232 * vms_abort_munmap_vmas() - Undo as much as possible from an aborted munmap()
233 * operation.
234 * @vms: The vma unmap structure
235 * @mas_detach: The maple state with the detached maple tree
236 *
237 * Reattach any detached vmas, free up the maple tree used to track the vmas.
238 * If that's not possible because the ptes are cleared (and vm_ops->closed() may
239 * have been called), then a NULL is written over the vmas and the vmas are
240 * removed (munmap() completed).
241 */
vms_abort_munmap_vmas(struct vma_munmap_struct * vms,struct ma_state * mas_detach)242 static inline void vms_abort_munmap_vmas(struct vma_munmap_struct *vms,
243 struct ma_state *mas_detach)
244 {
245 struct ma_state *mas = &vms->vmi->mas;
246 if (!vms->nr_pages)
247 return;
248
249 if (vms->clear_ptes)
250 return reattach_vmas(mas_detach);
251
252 /*
253 * Aborting cannot just call the vm_ops open() because they are often
254 * not symmetrical and state data has been lost. Resort to the old
255 * failure method of leaving a gap where the MAP_FIXED mapping failed.
256 */
257 mas_set_range(mas, vms->start, vms->end - 1);
258 mas_store_gfp(mas, NULL, GFP_KERNEL|__GFP_NOFAIL);
259 /* Clean up the insertion of the unfortunate gap */
260 vms_complete_munmap_vmas(vms, mas_detach);
261 }
262
263 int
264 do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
265 struct mm_struct *mm, unsigned long start,
266 unsigned long end, struct list_head *uf, bool unlock);
267
268 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
269 unsigned long start, size_t len, struct list_head *uf,
270 bool unlock);
271
272 void remove_vma(struct vm_area_struct *vma, bool unreachable, bool closed);
273
274 void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
275 struct vm_area_struct *prev, struct vm_area_struct *next);
276
277 /* We are about to modify the VMA's flags. */
278 struct vm_area_struct *vma_modify_flags(struct vma_iterator *vmi,
279 struct vm_area_struct *prev, struct vm_area_struct *vma,
280 unsigned long start, unsigned long end,
281 unsigned long new_flags);
282
283 /* We are about to modify the VMA's flags and/or anon_name. */
284 struct vm_area_struct
285 *vma_modify_flags_name(struct vma_iterator *vmi,
286 struct vm_area_struct *prev,
287 struct vm_area_struct *vma,
288 unsigned long start,
289 unsigned long end,
290 unsigned long new_flags,
291 struct anon_vma_name *new_name);
292
293 /* We are about to modify the VMA's memory policy. */
294 struct vm_area_struct
295 *vma_modify_policy(struct vma_iterator *vmi,
296 struct vm_area_struct *prev,
297 struct vm_area_struct *vma,
298 unsigned long start, unsigned long end,
299 struct mempolicy *new_pol);
300
301 /* We are about to modify the VMA's flags and/or uffd context. */
302 struct vm_area_struct
303 *vma_modify_flags_uffd(struct vma_iterator *vmi,
304 struct vm_area_struct *prev,
305 struct vm_area_struct *vma,
306 unsigned long start, unsigned long end,
307 unsigned long new_flags,
308 struct vm_userfaultfd_ctx new_ctx);
309
310 struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg);
311
312 struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
313 struct vm_area_struct *vma,
314 unsigned long delta);
315
316 void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb);
317
318 void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb);
319
320 void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb,
321 struct vm_area_struct *vma);
322
323 void unlink_file_vma(struct vm_area_struct *vma);
324
325 void vma_link_file(struct vm_area_struct *vma);
326
327 int vma_link(struct mm_struct *mm, struct vm_area_struct *vma);
328
329 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
330 unsigned long addr, unsigned long len, pgoff_t pgoff,
331 bool *need_rmap_locks);
332
333 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma);
334
335 bool vma_needs_dirty_tracking(struct vm_area_struct *vma);
336 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
337
338 int mm_take_all_locks(struct mm_struct *mm);
339 void mm_drop_all_locks(struct mm_struct *mm);
340
vma_wants_manual_pte_write_upgrade(struct vm_area_struct * vma)341 static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma)
342 {
343 /*
344 * We want to check manually if we can change individual PTEs writable
345 * if we can't do that automatically for all PTEs in a mapping. For
346 * private mappings, that's always the case when we have write
347 * permissions as we properly have to handle COW.
348 */
349 if (vma->vm_flags & VM_SHARED)
350 return vma_wants_writenotify(vma, vma->vm_page_prot);
351 return !!(vma->vm_flags & VM_WRITE);
352 }
353
354 #ifdef CONFIG_MMU
vm_pgprot_modify(pgprot_t oldprot,unsigned long vm_flags)355 static inline pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
356 {
357 return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
358 }
359 #endif
360
vma_prev_limit(struct vma_iterator * vmi,unsigned long min)361 static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi,
362 unsigned long min)
363 {
364 return mas_prev(&vmi->mas, min);
365 }
366
367 /*
368 * These three helpers classifies VMAs for virtual memory accounting.
369 */
370
371 /*
372 * Executable code area - executable, not writable, not stack
373 */
is_exec_mapping(vm_flags_t flags)374 static inline bool is_exec_mapping(vm_flags_t flags)
375 {
376 return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
377 }
378
379 /*
380 * Stack area (including shadow stacks)
381 *
382 * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
383 * do_mmap() forbids all other combinations.
384 */
is_stack_mapping(vm_flags_t flags)385 static inline bool is_stack_mapping(vm_flags_t flags)
386 {
387 return ((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK);
388 }
389
390 /*
391 * Data area - private, writable, not stack
392 */
is_data_mapping(vm_flags_t flags)393 static inline bool is_data_mapping(vm_flags_t flags)
394 {
395 return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
396 }
397
398
vma_iter_config(struct vma_iterator * vmi,unsigned long index,unsigned long last)399 static inline void vma_iter_config(struct vma_iterator *vmi,
400 unsigned long index, unsigned long last)
401 {
402 __mas_set_range(&vmi->mas, index, last - 1);
403 }
404
vma_iter_reset(struct vma_iterator * vmi)405 static inline void vma_iter_reset(struct vma_iterator *vmi)
406 {
407 mas_reset(&vmi->mas);
408 }
409
410 static inline
vma_iter_prev_range_limit(struct vma_iterator * vmi,unsigned long min)411 struct vm_area_struct *vma_iter_prev_range_limit(struct vma_iterator *vmi, unsigned long min)
412 {
413 return mas_prev_range(&vmi->mas, min);
414 }
415
416 static inline
vma_iter_next_range_limit(struct vma_iterator * vmi,unsigned long max)417 struct vm_area_struct *vma_iter_next_range_limit(struct vma_iterator *vmi, unsigned long max)
418 {
419 return mas_next_range(&vmi->mas, max);
420 }
421
vma_iter_area_lowest(struct vma_iterator * vmi,unsigned long min,unsigned long max,unsigned long size)422 static inline int vma_iter_area_lowest(struct vma_iterator *vmi, unsigned long min,
423 unsigned long max, unsigned long size)
424 {
425 return mas_empty_area(&vmi->mas, min, max - 1, size);
426 }
427
vma_iter_area_highest(struct vma_iterator * vmi,unsigned long min,unsigned long max,unsigned long size)428 static inline int vma_iter_area_highest(struct vma_iterator *vmi, unsigned long min,
429 unsigned long max, unsigned long size)
430 {
431 return mas_empty_area_rev(&vmi->mas, min, max - 1, size);
432 }
433
434 /*
435 * VMA Iterator functions shared between nommu and mmap
436 */
vma_iter_prealloc(struct vma_iterator * vmi,struct vm_area_struct * vma)437 static inline int vma_iter_prealloc(struct vma_iterator *vmi,
438 struct vm_area_struct *vma)
439 {
440 return mas_preallocate(&vmi->mas, vma, GFP_KERNEL);
441 }
442
vma_iter_clear(struct vma_iterator * vmi)443 static inline void vma_iter_clear(struct vma_iterator *vmi)
444 {
445 mas_store_prealloc(&vmi->mas, NULL);
446 }
447
vma_iter_load(struct vma_iterator * vmi)448 static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
449 {
450 return mas_walk(&vmi->mas);
451 }
452
453 /* Store a VMA with preallocated memory */
vma_iter_store(struct vma_iterator * vmi,struct vm_area_struct * vma)454 static inline void vma_iter_store(struct vma_iterator *vmi,
455 struct vm_area_struct *vma)
456 {
457
458 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
459 if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
460 vmi->mas.index > vma->vm_start)) {
461 pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n",
462 vmi->mas.index, vma->vm_start, vma->vm_start,
463 vma->vm_end, vmi->mas.index, vmi->mas.last);
464 }
465 if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
466 vmi->mas.last < vma->vm_start)) {
467 pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n",
468 vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end,
469 vmi->mas.index, vmi->mas.last);
470 }
471 #endif
472
473 if (vmi->mas.status != ma_start &&
474 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
475 vma_iter_invalidate(vmi);
476
477 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
478 mas_store_prealloc(&vmi->mas, vma);
479 }
480
vma_iter_addr(struct vma_iterator * vmi)481 static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
482 {
483 return vmi->mas.index;
484 }
485
vma_iter_end(struct vma_iterator * vmi)486 static inline unsigned long vma_iter_end(struct vma_iterator *vmi)
487 {
488 return vmi->mas.last + 1;
489 }
490
vma_iter_bulk_alloc(struct vma_iterator * vmi,unsigned long count)491 static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi,
492 unsigned long count)
493 {
494 return mas_expected_entries(&vmi->mas, count);
495 }
496
497 static inline
vma_iter_prev_range(struct vma_iterator * vmi)498 struct vm_area_struct *vma_iter_prev_range(struct vma_iterator *vmi)
499 {
500 return mas_prev_range(&vmi->mas, 0);
501 }
502
503 /*
504 * Retrieve the next VMA and rewind the iterator to end of the previous VMA, or
505 * if no previous VMA, to index 0.
506 */
507 static inline
vma_iter_next_rewind(struct vma_iterator * vmi,struct vm_area_struct ** pprev)508 struct vm_area_struct *vma_iter_next_rewind(struct vma_iterator *vmi,
509 struct vm_area_struct **pprev)
510 {
511 struct vm_area_struct *next = vma_next(vmi);
512 struct vm_area_struct *prev = vma_prev(vmi);
513
514 /*
515 * Consider the case where no previous VMA exists. We advance to the
516 * next VMA, skipping any gap, then rewind to the start of the range.
517 *
518 * If we were to unconditionally advance to the next range we'd wind up
519 * at the next VMA again, so we check to ensure there is a previous VMA
520 * to skip over.
521 */
522 if (prev)
523 vma_iter_next_range(vmi);
524
525 if (pprev)
526 *pprev = prev;
527
528 return next;
529 }
530
531 #ifdef CONFIG_64BIT
532
vma_is_sealed(struct vm_area_struct * vma)533 static inline bool vma_is_sealed(struct vm_area_struct *vma)
534 {
535 return (vma->vm_flags & VM_SEALED);
536 }
537
538 /*
539 * check if a vma is sealed for modification.
540 * return true, if modification is allowed.
541 */
can_modify_vma(struct vm_area_struct * vma)542 static inline bool can_modify_vma(struct vm_area_struct *vma)
543 {
544 if (unlikely(vma_is_sealed(vma)))
545 return false;
546
547 return true;
548 }
549
550 bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior);
551
552 #else
553
can_modify_vma(struct vm_area_struct * vma)554 static inline bool can_modify_vma(struct vm_area_struct *vma)
555 {
556 return true;
557 }
558
can_modify_vma_madv(struct vm_area_struct * vma,int behavior)559 static inline bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior)
560 {
561 return true;
562 }
563
564 #endif
565
566 #endif /* __MM_VMA_H */
567