xref: /linux/mm/vma.h (revision cacded5e42b9609b07b22d80c10f0076d439f7d1)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * vma.h
4  *
5  * Core VMA manipulation API implemented in vma.c.
6  */
7 #ifndef __MM_VMA_H
8 #define __MM_VMA_H
9 
10 /*
11  * VMA lock generalization
12  */
13 struct vma_prepare {
14 	struct vm_area_struct *vma;
15 	struct vm_area_struct *adj_next;
16 	struct file *file;
17 	struct address_space *mapping;
18 	struct anon_vma *anon_vma;
19 	struct vm_area_struct *insert;
20 	struct vm_area_struct *remove;
21 	struct vm_area_struct *remove2;
22 };
23 
24 struct unlink_vma_file_batch {
25 	int count;
26 	struct vm_area_struct *vmas[8];
27 };
28 
29 /*
30  * vma munmap operation
31  */
32 struct vma_munmap_struct {
33 	struct vma_iterator *vmi;
34 	struct vm_area_struct *vma;     /* The first vma to munmap */
35 	struct vm_area_struct *prev;    /* vma before the munmap area */
36 	struct vm_area_struct *next;    /* vma after the munmap area */
37 	struct list_head *uf;           /* Userfaultfd list_head */
38 	unsigned long start;            /* Aligned start addr (inclusive) */
39 	unsigned long end;              /* Aligned end addr (exclusive) */
40 	unsigned long unmap_start;      /* Unmap PTE start */
41 	unsigned long unmap_end;        /* Unmap PTE end */
42 	int vma_count;                  /* Number of vmas that will be removed */
43 	bool unlock;                    /* Unlock after the munmap */
44 	bool clear_ptes;                /* If there are outstanding PTE to be cleared */
45 	bool closed_vm_ops;		/* call_mmap() was encountered, so vmas may be closed */
46 	/* 1 byte hole */
47 	unsigned long nr_pages;         /* Number of pages being removed */
48 	unsigned long locked_vm;        /* Number of locked pages */
49 	unsigned long nr_accounted;     /* Number of VM_ACCOUNT pages */
50 	unsigned long exec_vm;
51 	unsigned long stack_vm;
52 	unsigned long data_vm;
53 };
54 
55 enum vma_merge_state {
56 	VMA_MERGE_START,
57 	VMA_MERGE_ERROR_NOMEM,
58 	VMA_MERGE_NOMERGE,
59 	VMA_MERGE_SUCCESS,
60 };
61 
62 /* Represents a VMA merge operation. */
63 struct vma_merge_struct {
64 	struct mm_struct *mm;
65 	struct vma_iterator *vmi;
66 	pgoff_t pgoff;
67 	struct vm_area_struct *prev;
68 	struct vm_area_struct *next; /* Modified by vma_merge(). */
69 	struct vm_area_struct *vma; /* Either a new VMA or the one being modified. */
70 	unsigned long start;
71 	unsigned long end;
72 	unsigned long flags;
73 	struct file *file;
74 	struct anon_vma *anon_vma;
75 	struct mempolicy *policy;
76 	struct vm_userfaultfd_ctx uffd_ctx;
77 	struct anon_vma_name *anon_name;
78 	enum vma_merge_state state;
79 };
80 
81 static inline bool vmg_nomem(struct vma_merge_struct *vmg)
82 {
83 	return vmg->state == VMA_MERGE_ERROR_NOMEM;
84 }
85 
86 /* Assumes addr >= vma->vm_start. */
87 static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma,
88 				       unsigned long addr)
89 {
90 	return vma->vm_pgoff + PHYS_PFN(addr - vma->vm_start);
91 }
92 
93 #define VMG_STATE(name, mm_, vmi_, start_, end_, flags_, pgoff_)	\
94 	struct vma_merge_struct name = {				\
95 		.mm = mm_,						\
96 		.vmi = vmi_,						\
97 		.start = start_,					\
98 		.end = end_,						\
99 		.flags = flags_,					\
100 		.pgoff = pgoff_,					\
101 		.state = VMA_MERGE_START,				\
102 	}
103 
104 #define VMG_VMA_STATE(name, vmi_, prev_, vma_, start_, end_)	\
105 	struct vma_merge_struct name = {			\
106 		.mm = vma_->vm_mm,				\
107 		.vmi = vmi_,					\
108 		.prev = prev_,					\
109 		.next = NULL,					\
110 		.vma = vma_,					\
111 		.start = start_,				\
112 		.end = end_,					\
113 		.flags = vma_->vm_flags,			\
114 		.pgoff = vma_pgoff_offset(vma_, start_),	\
115 		.file = vma_->vm_file,				\
116 		.anon_vma = vma_->anon_vma,			\
117 		.policy = vma_policy(vma_),			\
118 		.uffd_ctx = vma_->vm_userfaultfd_ctx,		\
119 		.anon_name = anon_vma_name(vma_),		\
120 		.state = VMA_MERGE_START,			\
121 	}
122 
123 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
124 void validate_mm(struct mm_struct *mm);
125 #else
126 #define validate_mm(mm) do { } while (0)
127 #endif
128 
129 /* Required for expand_downwards(). */
130 void anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma);
131 
132 /* Required for expand_downwards(). */
133 void anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma);
134 
135 /* Required for do_brk_flags(). */
136 void vma_prepare(struct vma_prepare *vp);
137 
138 /* Required for do_brk_flags(). */
139 void init_vma_prep(struct vma_prepare *vp,
140 		   struct vm_area_struct *vma);
141 
142 /* Required for do_brk_flags(). */
143 void vma_complete(struct vma_prepare *vp,
144 		  struct vma_iterator *vmi, struct mm_struct *mm);
145 
146 int vma_expand(struct vma_merge_struct *vmg);
147 int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
148 	       unsigned long start, unsigned long end, pgoff_t pgoff);
149 
150 static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
151 			struct vm_area_struct *vma, gfp_t gfp)
152 
153 {
154 	if (vmi->mas.status != ma_start &&
155 	    ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
156 		vma_iter_invalidate(vmi);
157 
158 	__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
159 	mas_store_gfp(&vmi->mas, vma, gfp);
160 	if (unlikely(mas_is_err(&vmi->mas)))
161 		return -ENOMEM;
162 
163 	return 0;
164 }
165 
166 #ifdef CONFIG_MMU
167 /*
168  * init_vma_munmap() - Initializer wrapper for vma_munmap_struct
169  * @vms: The vma munmap struct
170  * @vmi: The vma iterator
171  * @vma: The first vm_area_struct to munmap
172  * @start: The aligned start address to munmap
173  * @end: The aligned end address to munmap
174  * @uf: The userfaultfd list_head
175  * @unlock: Unlock after the operation.  Only unlocked on success
176  */
177 static inline void init_vma_munmap(struct vma_munmap_struct *vms,
178 		struct vma_iterator *vmi, struct vm_area_struct *vma,
179 		unsigned long start, unsigned long end, struct list_head *uf,
180 		bool unlock)
181 {
182 	vms->vmi = vmi;
183 	vms->vma = vma;
184 	if (vma) {
185 		vms->start = start;
186 		vms->end = end;
187 	} else {
188 		vms->start = vms->end = 0;
189 	}
190 	vms->unlock = unlock;
191 	vms->uf = uf;
192 	vms->vma_count = 0;
193 	vms->nr_pages = vms->locked_vm = vms->nr_accounted = 0;
194 	vms->exec_vm = vms->stack_vm = vms->data_vm = 0;
195 	vms->unmap_start = FIRST_USER_ADDRESS;
196 	vms->unmap_end = USER_PGTABLES_CEILING;
197 	vms->clear_ptes = false;
198 	vms->closed_vm_ops = false;
199 }
200 #endif
201 
202 int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
203 		struct ma_state *mas_detach);
204 
205 void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
206 		struct ma_state *mas_detach);
207 
208 void vms_clean_up_area(struct vma_munmap_struct *vms,
209 		struct ma_state *mas_detach);
210 
211 /*
212  * reattach_vmas() - Undo any munmap work and free resources
213  * @mas_detach: The maple state with the detached maple tree
214  *
215  * Reattach any detached vmas and free up the maple tree used to track the vmas.
216  */
217 static inline void reattach_vmas(struct ma_state *mas_detach)
218 {
219 	struct vm_area_struct *vma;
220 
221 	mas_set(mas_detach, 0);
222 	mas_for_each(mas_detach, vma, ULONG_MAX)
223 		vma_mark_detached(vma, false);
224 
225 	__mt_destroy(mas_detach->tree);
226 }
227 
228 /*
229  * vms_abort_munmap_vmas() - Undo as much as possible from an aborted munmap()
230  * operation.
231  * @vms: The vma unmap structure
232  * @mas_detach: The maple state with the detached maple tree
233  *
234  * Reattach any detached vmas, free up the maple tree used to track the vmas.
235  * If that's not possible because the ptes are cleared (and vm_ops->closed() may
236  * have been called), then a NULL is written over the vmas and the vmas are
237  * removed (munmap() completed).
238  */
239 static inline void vms_abort_munmap_vmas(struct vma_munmap_struct *vms,
240 		struct ma_state *mas_detach)
241 {
242 	struct ma_state *mas = &vms->vmi->mas;
243 	if (!vms->nr_pages)
244 		return;
245 
246 	if (vms->clear_ptes)
247 		return reattach_vmas(mas_detach);
248 
249 	/*
250 	 * Aborting cannot just call the vm_ops open() because they are often
251 	 * not symmetrical and state data has been lost.  Resort to the old
252 	 * failure method of leaving a gap where the MAP_FIXED mapping failed.
253 	 */
254 	mas_set_range(mas, vms->start, vms->end - 1);
255 	if (unlikely(mas_store_gfp(mas, NULL, GFP_KERNEL))) {
256 		pr_warn_once("%s: (%d) Unable to abort munmap() operation\n",
257 			     current->comm, current->pid);
258 		/* Leaving vmas detached and in-tree may hamper recovery */
259 		reattach_vmas(mas_detach);
260 	} else {
261 		/* Clean up the insertion of the unfortunate gap */
262 		vms_complete_munmap_vmas(vms, mas_detach);
263 	}
264 }
265 
266 int
267 do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
268 		    struct mm_struct *mm, unsigned long start,
269 		    unsigned long end, struct list_head *uf, bool unlock);
270 
271 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
272 		  unsigned long start, size_t len, struct list_head *uf,
273 		  bool unlock);
274 
275 void remove_vma(struct vm_area_struct *vma, bool unreachable, bool closed);
276 
277 void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
278 		struct vm_area_struct *prev, struct vm_area_struct *next);
279 
280 /*
281  * Can we merge the VMA described by vmg into the following VMA vmg->next?
282  *
283  * Required by mmap_region().
284  */
285 bool can_vma_merge_before(struct vma_merge_struct *vmg);
286 
287 /*
288  * Can we merge the VMA described by vmg into the preceding VMA vmg->prev?
289  *
290  * Required by mmap_region() and do_brk_flags().
291  */
292 bool can_vma_merge_after(struct vma_merge_struct *vmg);
293 
294 /* We are about to modify the VMA's flags. */
295 struct vm_area_struct *vma_modify_flags(struct vma_iterator *vmi,
296 		struct vm_area_struct *prev, struct vm_area_struct *vma,
297 		unsigned long start, unsigned long end,
298 		unsigned long new_flags);
299 
300 /* We are about to modify the VMA's flags and/or anon_name. */
301 struct vm_area_struct
302 *vma_modify_flags_name(struct vma_iterator *vmi,
303 		       struct vm_area_struct *prev,
304 		       struct vm_area_struct *vma,
305 		       unsigned long start,
306 		       unsigned long end,
307 		       unsigned long new_flags,
308 		       struct anon_vma_name *new_name);
309 
310 /* We are about to modify the VMA's memory policy. */
311 struct vm_area_struct
312 *vma_modify_policy(struct vma_iterator *vmi,
313 		   struct vm_area_struct *prev,
314 		   struct vm_area_struct *vma,
315 		   unsigned long start, unsigned long end,
316 		   struct mempolicy *new_pol);
317 
318 /* We are about to modify the VMA's flags and/or uffd context. */
319 struct vm_area_struct
320 *vma_modify_flags_uffd(struct vma_iterator *vmi,
321 		       struct vm_area_struct *prev,
322 		       struct vm_area_struct *vma,
323 		       unsigned long start, unsigned long end,
324 		       unsigned long new_flags,
325 		       struct vm_userfaultfd_ctx new_ctx);
326 
327 struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg);
328 
329 struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
330 					struct vm_area_struct *vma,
331 					unsigned long delta);
332 
333 void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb);
334 
335 void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb);
336 
337 void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb,
338 			       struct vm_area_struct *vma);
339 
340 void unlink_file_vma(struct vm_area_struct *vma);
341 
342 void vma_link_file(struct vm_area_struct *vma);
343 
344 int vma_link(struct mm_struct *mm, struct vm_area_struct *vma);
345 
346 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
347 	unsigned long addr, unsigned long len, pgoff_t pgoff,
348 	bool *need_rmap_locks);
349 
350 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma);
351 
352 bool vma_needs_dirty_tracking(struct vm_area_struct *vma);
353 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
354 
355 int mm_take_all_locks(struct mm_struct *mm);
356 void mm_drop_all_locks(struct mm_struct *mm);
357 
358 static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma)
359 {
360 	/*
361 	 * We want to check manually if we can change individual PTEs writable
362 	 * if we can't do that automatically for all PTEs in a mapping. For
363 	 * private mappings, that's always the case when we have write
364 	 * permissions as we properly have to handle COW.
365 	 */
366 	if (vma->vm_flags & VM_SHARED)
367 		return vma_wants_writenotify(vma, vma->vm_page_prot);
368 	return !!(vma->vm_flags & VM_WRITE);
369 }
370 
371 #ifdef CONFIG_MMU
372 static inline pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
373 {
374 	return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
375 }
376 #endif
377 
378 static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi,
379 						    unsigned long min)
380 {
381 	return mas_prev(&vmi->mas, min);
382 }
383 
384 /*
385  * These three helpers classifies VMAs for virtual memory accounting.
386  */
387 
388 /*
389  * Executable code area - executable, not writable, not stack
390  */
391 static inline bool is_exec_mapping(vm_flags_t flags)
392 {
393 	return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
394 }
395 
396 /*
397  * Stack area (including shadow stacks)
398  *
399  * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
400  * do_mmap() forbids all other combinations.
401  */
402 static inline bool is_stack_mapping(vm_flags_t flags)
403 {
404 	return ((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK);
405 }
406 
407 /*
408  * Data area - private, writable, not stack
409  */
410 static inline bool is_data_mapping(vm_flags_t flags)
411 {
412 	return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
413 }
414 
415 
416 static inline void vma_iter_config(struct vma_iterator *vmi,
417 		unsigned long index, unsigned long last)
418 {
419 	__mas_set_range(&vmi->mas, index, last - 1);
420 }
421 
422 static inline void vma_iter_reset(struct vma_iterator *vmi)
423 {
424 	mas_reset(&vmi->mas);
425 }
426 
427 static inline
428 struct vm_area_struct *vma_iter_prev_range_limit(struct vma_iterator *vmi, unsigned long min)
429 {
430 	return mas_prev_range(&vmi->mas, min);
431 }
432 
433 static inline
434 struct vm_area_struct *vma_iter_next_range_limit(struct vma_iterator *vmi, unsigned long max)
435 {
436 	return mas_next_range(&vmi->mas, max);
437 }
438 
439 static inline int vma_iter_area_lowest(struct vma_iterator *vmi, unsigned long min,
440 				       unsigned long max, unsigned long size)
441 {
442 	return mas_empty_area(&vmi->mas, min, max - 1, size);
443 }
444 
445 static inline int vma_iter_area_highest(struct vma_iterator *vmi, unsigned long min,
446 					unsigned long max, unsigned long size)
447 {
448 	return mas_empty_area_rev(&vmi->mas, min, max - 1, size);
449 }
450 
451 /*
452  * VMA Iterator functions shared between nommu and mmap
453  */
454 static inline int vma_iter_prealloc(struct vma_iterator *vmi,
455 		struct vm_area_struct *vma)
456 {
457 	return mas_preallocate(&vmi->mas, vma, GFP_KERNEL);
458 }
459 
460 static inline void vma_iter_clear(struct vma_iterator *vmi)
461 {
462 	mas_store_prealloc(&vmi->mas, NULL);
463 }
464 
465 static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
466 {
467 	return mas_walk(&vmi->mas);
468 }
469 
470 /* Store a VMA with preallocated memory */
471 static inline void vma_iter_store(struct vma_iterator *vmi,
472 				  struct vm_area_struct *vma)
473 {
474 
475 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
476 	if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
477 			vmi->mas.index > vma->vm_start)) {
478 		pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n",
479 			vmi->mas.index, vma->vm_start, vma->vm_start,
480 			vma->vm_end, vmi->mas.index, vmi->mas.last);
481 	}
482 	if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
483 			vmi->mas.last <  vma->vm_start)) {
484 		pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n",
485 		       vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end,
486 		       vmi->mas.index, vmi->mas.last);
487 	}
488 #endif
489 
490 	if (vmi->mas.status != ma_start &&
491 	    ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
492 		vma_iter_invalidate(vmi);
493 
494 	__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
495 	mas_store_prealloc(&vmi->mas, vma);
496 }
497 
498 static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
499 {
500 	return vmi->mas.index;
501 }
502 
503 static inline unsigned long vma_iter_end(struct vma_iterator *vmi)
504 {
505 	return vmi->mas.last + 1;
506 }
507 
508 static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi,
509 				      unsigned long count)
510 {
511 	return mas_expected_entries(&vmi->mas, count);
512 }
513 
514 static inline
515 struct vm_area_struct *vma_iter_prev_range(struct vma_iterator *vmi)
516 {
517 	return mas_prev_range(&vmi->mas, 0);
518 }
519 
520 /*
521  * Retrieve the next VMA and rewind the iterator to end of the previous VMA, or
522  * if no previous VMA, to index 0.
523  */
524 static inline
525 struct vm_area_struct *vma_iter_next_rewind(struct vma_iterator *vmi,
526 		struct vm_area_struct **pprev)
527 {
528 	struct vm_area_struct *next = vma_next(vmi);
529 	struct vm_area_struct *prev = vma_prev(vmi);
530 
531 	/*
532 	 * Consider the case where no previous VMA exists. We advance to the
533 	 * next VMA, skipping any gap, then rewind to the start of the range.
534 	 *
535 	 * If we were to unconditionally advance to the next range we'd wind up
536 	 * at the next VMA again, so we check to ensure there is a previous VMA
537 	 * to skip over.
538 	 */
539 	if (prev)
540 		vma_iter_next_range(vmi);
541 
542 	if (pprev)
543 		*pprev = prev;
544 
545 	return next;
546 }
547 
548 #ifdef CONFIG_64BIT
549 
550 static inline bool vma_is_sealed(struct vm_area_struct *vma)
551 {
552 	return (vma->vm_flags & VM_SEALED);
553 }
554 
555 /*
556  * check if a vma is sealed for modification.
557  * return true, if modification is allowed.
558  */
559 static inline bool can_modify_vma(struct vm_area_struct *vma)
560 {
561 	if (unlikely(vma_is_sealed(vma)))
562 		return false;
563 
564 	return true;
565 }
566 
567 bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior);
568 
569 #else
570 
571 static inline bool can_modify_vma(struct vm_area_struct *vma)
572 {
573 	return true;
574 }
575 
576 static inline bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior)
577 {
578 	return true;
579 }
580 
581 #endif
582 
583 #endif	/* __MM_VMA_H */
584