xref: /linux/mm/vma.h (revision 566ab427f827b0256d3e8ce0235d088e6a9c28bd)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * vma.h
4  *
5  * Core VMA manipulation API implemented in vma.c.
6  */
7 #ifndef __MM_VMA_H
8 #define __MM_VMA_H
9 
10 /*
11  * VMA lock generalization
12  */
13 struct vma_prepare {
14 	struct vm_area_struct *vma;
15 	struct vm_area_struct *adj_next;
16 	struct file *file;
17 	struct address_space *mapping;
18 	struct anon_vma *anon_vma;
19 	struct vm_area_struct *insert;
20 	struct vm_area_struct *remove;
21 	struct vm_area_struct *remove2;
22 };
23 
24 struct unlink_vma_file_batch {
25 	int count;
26 	struct vm_area_struct *vmas[8];
27 };
28 
29 /*
30  * vma munmap operation
31  */
32 struct vma_munmap_struct {
33 	struct vma_iterator *vmi;
34 	struct vm_area_struct *vma;     /* The first vma to munmap */
35 	struct vm_area_struct *prev;    /* vma before the munmap area */
36 	struct vm_area_struct *next;    /* vma after the munmap area */
37 	struct list_head *uf;           /* Userfaultfd list_head */
38 	unsigned long start;            /* Aligned start addr (inclusive) */
39 	unsigned long end;              /* Aligned end addr (exclusive) */
40 	unsigned long unmap_start;      /* Unmap PTE start */
41 	unsigned long unmap_end;        /* Unmap PTE end */
42 	int vma_count;                  /* Number of vmas that will be removed */
43 	bool unlock;                    /* Unlock after the munmap */
44 	bool clear_ptes;                /* If there are outstanding PTE to be cleared */
45 	bool closed_vm_ops;		/* call_mmap() was encountered, so vmas may be closed */
46 	/* 1 byte hole */
47 	unsigned long nr_pages;         /* Number of pages being removed */
48 	unsigned long locked_vm;        /* Number of locked pages */
49 	unsigned long nr_accounted;     /* Number of VM_ACCOUNT pages */
50 	unsigned long exec_vm;
51 	unsigned long stack_vm;
52 	unsigned long data_vm;
53 };
54 
55 enum vma_merge_state {
56 	VMA_MERGE_START,
57 	VMA_MERGE_ERROR_NOMEM,
58 	VMA_MERGE_NOMERGE,
59 	VMA_MERGE_SUCCESS,
60 };
61 
62 /* Represents a VMA merge operation. */
63 struct vma_merge_struct {
64 	struct mm_struct *mm;
65 	struct vma_iterator *vmi;
66 	pgoff_t pgoff;
67 	struct vm_area_struct *prev;
68 	struct vm_area_struct *next; /* Modified by vma_merge(). */
69 	struct vm_area_struct *vma; /* Either a new VMA or the one being modified. */
70 	unsigned long start;
71 	unsigned long end;
72 	unsigned long flags;
73 	struct file *file;
74 	struct anon_vma *anon_vma;
75 	struct mempolicy *policy;
76 	struct vm_userfaultfd_ctx uffd_ctx;
77 	struct anon_vma_name *anon_name;
78 	enum vma_merge_state state;
79 };
80 
81 static inline bool vmg_nomem(struct vma_merge_struct *vmg)
82 {
83 	return vmg->state == VMA_MERGE_ERROR_NOMEM;
84 }
85 
86 /* Assumes addr >= vma->vm_start. */
87 static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma,
88 				       unsigned long addr)
89 {
90 	return vma->vm_pgoff + PHYS_PFN(addr - vma->vm_start);
91 }
92 
93 #define VMG_STATE(name, mm_, vmi_, start_, end_, flags_, pgoff_)	\
94 	struct vma_merge_struct name = {				\
95 		.mm = mm_,						\
96 		.vmi = vmi_,						\
97 		.start = start_,					\
98 		.end = end_,						\
99 		.flags = flags_,					\
100 		.pgoff = pgoff_,					\
101 		.state = VMA_MERGE_START,				\
102 	}
103 
104 #define VMG_VMA_STATE(name, vmi_, prev_, vma_, start_, end_)	\
105 	struct vma_merge_struct name = {			\
106 		.mm = vma_->vm_mm,				\
107 		.vmi = vmi_,					\
108 		.prev = prev_,					\
109 		.next = NULL,					\
110 		.vma = vma_,					\
111 		.start = start_,				\
112 		.end = end_,					\
113 		.flags = vma_->vm_flags,			\
114 		.pgoff = vma_pgoff_offset(vma_, start_),	\
115 		.file = vma_->vm_file,				\
116 		.anon_vma = vma_->anon_vma,			\
117 		.policy = vma_policy(vma_),			\
118 		.uffd_ctx = vma_->vm_userfaultfd_ctx,		\
119 		.anon_name = anon_vma_name(vma_),		\
120 		.state = VMA_MERGE_START,			\
121 	}
122 
123 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
124 void validate_mm(struct mm_struct *mm);
125 #else
126 #define validate_mm(mm) do { } while (0)
127 #endif
128 
129 /* Required for expand_downwards(). */
130 void anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma);
131 
132 /* Required for expand_downwards(). */
133 void anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma);
134 
135 int vma_expand(struct vma_merge_struct *vmg);
136 int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
137 	       unsigned long start, unsigned long end, pgoff_t pgoff);
138 
139 static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
140 			struct vm_area_struct *vma, gfp_t gfp)
141 
142 {
143 	if (vmi->mas.status != ma_start &&
144 	    ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
145 		vma_iter_invalidate(vmi);
146 
147 	__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
148 	mas_store_gfp(&vmi->mas, vma, gfp);
149 	if (unlikely(mas_is_err(&vmi->mas)))
150 		return -ENOMEM;
151 
152 	return 0;
153 }
154 
155 #ifdef CONFIG_MMU
156 /*
157  * init_vma_munmap() - Initializer wrapper for vma_munmap_struct
158  * @vms: The vma munmap struct
159  * @vmi: The vma iterator
160  * @vma: The first vm_area_struct to munmap
161  * @start: The aligned start address to munmap
162  * @end: The aligned end address to munmap
163  * @uf: The userfaultfd list_head
164  * @unlock: Unlock after the operation.  Only unlocked on success
165  */
166 static inline void init_vma_munmap(struct vma_munmap_struct *vms,
167 		struct vma_iterator *vmi, struct vm_area_struct *vma,
168 		unsigned long start, unsigned long end, struct list_head *uf,
169 		bool unlock)
170 {
171 	vms->vmi = vmi;
172 	vms->vma = vma;
173 	if (vma) {
174 		vms->start = start;
175 		vms->end = end;
176 	} else {
177 		vms->start = vms->end = 0;
178 	}
179 	vms->unlock = unlock;
180 	vms->uf = uf;
181 	vms->vma_count = 0;
182 	vms->nr_pages = vms->locked_vm = vms->nr_accounted = 0;
183 	vms->exec_vm = vms->stack_vm = vms->data_vm = 0;
184 	vms->unmap_start = FIRST_USER_ADDRESS;
185 	vms->unmap_end = USER_PGTABLES_CEILING;
186 	vms->clear_ptes = false;
187 	vms->closed_vm_ops = false;
188 }
189 #endif
190 
191 int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
192 		struct ma_state *mas_detach);
193 
194 void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
195 		struct ma_state *mas_detach);
196 
197 void vms_clean_up_area(struct vma_munmap_struct *vms,
198 		struct ma_state *mas_detach);
199 
200 /*
201  * reattach_vmas() - Undo any munmap work and free resources
202  * @mas_detach: The maple state with the detached maple tree
203  *
204  * Reattach any detached vmas and free up the maple tree used to track the vmas.
205  */
206 static inline void reattach_vmas(struct ma_state *mas_detach)
207 {
208 	struct vm_area_struct *vma;
209 
210 	mas_set(mas_detach, 0);
211 	mas_for_each(mas_detach, vma, ULONG_MAX)
212 		vma_mark_detached(vma, false);
213 
214 	__mt_destroy(mas_detach->tree);
215 }
216 
217 /*
218  * vms_abort_munmap_vmas() - Undo as much as possible from an aborted munmap()
219  * operation.
220  * @vms: The vma unmap structure
221  * @mas_detach: The maple state with the detached maple tree
222  *
223  * Reattach any detached vmas, free up the maple tree used to track the vmas.
224  * If that's not possible because the ptes are cleared (and vm_ops->closed() may
225  * have been called), then a NULL is written over the vmas and the vmas are
226  * removed (munmap() completed).
227  */
228 static inline void vms_abort_munmap_vmas(struct vma_munmap_struct *vms,
229 		struct ma_state *mas_detach)
230 {
231 	struct ma_state *mas = &vms->vmi->mas;
232 	if (!vms->nr_pages)
233 		return;
234 
235 	if (vms->clear_ptes)
236 		return reattach_vmas(mas_detach);
237 
238 	/*
239 	 * Aborting cannot just call the vm_ops open() because they are often
240 	 * not symmetrical and state data has been lost.  Resort to the old
241 	 * failure method of leaving a gap where the MAP_FIXED mapping failed.
242 	 */
243 	mas_set_range(mas, vms->start, vms->end - 1);
244 	if (unlikely(mas_store_gfp(mas, NULL, GFP_KERNEL))) {
245 		pr_warn_once("%s: (%d) Unable to abort munmap() operation\n",
246 			     current->comm, current->pid);
247 		/* Leaving vmas detached and in-tree may hamper recovery */
248 		reattach_vmas(mas_detach);
249 	} else {
250 		/* Clean up the insertion of the unfortunate gap */
251 		vms_complete_munmap_vmas(vms, mas_detach);
252 	}
253 }
254 
255 int
256 do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
257 		    struct mm_struct *mm, unsigned long start,
258 		    unsigned long end, struct list_head *uf, bool unlock);
259 
260 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
261 		  unsigned long start, size_t len, struct list_head *uf,
262 		  bool unlock);
263 
264 void remove_vma(struct vm_area_struct *vma, bool unreachable, bool closed);
265 
266 void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
267 		struct vm_area_struct *prev, struct vm_area_struct *next);
268 
269 /* We are about to modify the VMA's flags. */
270 struct vm_area_struct *vma_modify_flags(struct vma_iterator *vmi,
271 		struct vm_area_struct *prev, struct vm_area_struct *vma,
272 		unsigned long start, unsigned long end,
273 		unsigned long new_flags);
274 
275 /* We are about to modify the VMA's flags and/or anon_name. */
276 struct vm_area_struct
277 *vma_modify_flags_name(struct vma_iterator *vmi,
278 		       struct vm_area_struct *prev,
279 		       struct vm_area_struct *vma,
280 		       unsigned long start,
281 		       unsigned long end,
282 		       unsigned long new_flags,
283 		       struct anon_vma_name *new_name);
284 
285 /* We are about to modify the VMA's memory policy. */
286 struct vm_area_struct
287 *vma_modify_policy(struct vma_iterator *vmi,
288 		   struct vm_area_struct *prev,
289 		   struct vm_area_struct *vma,
290 		   unsigned long start, unsigned long end,
291 		   struct mempolicy *new_pol);
292 
293 /* We are about to modify the VMA's flags and/or uffd context. */
294 struct vm_area_struct
295 *vma_modify_flags_uffd(struct vma_iterator *vmi,
296 		       struct vm_area_struct *prev,
297 		       struct vm_area_struct *vma,
298 		       unsigned long start, unsigned long end,
299 		       unsigned long new_flags,
300 		       struct vm_userfaultfd_ctx new_ctx);
301 
302 struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg);
303 
304 struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
305 					struct vm_area_struct *vma,
306 					unsigned long delta);
307 
308 void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb);
309 
310 void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb);
311 
312 void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb,
313 			       struct vm_area_struct *vma);
314 
315 void unlink_file_vma(struct vm_area_struct *vma);
316 
317 void vma_link_file(struct vm_area_struct *vma);
318 
319 int vma_link(struct mm_struct *mm, struct vm_area_struct *vma);
320 
321 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
322 	unsigned long addr, unsigned long len, pgoff_t pgoff,
323 	bool *need_rmap_locks);
324 
325 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma);
326 
327 bool vma_needs_dirty_tracking(struct vm_area_struct *vma);
328 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
329 
330 int mm_take_all_locks(struct mm_struct *mm);
331 void mm_drop_all_locks(struct mm_struct *mm);
332 
333 static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma)
334 {
335 	/*
336 	 * We want to check manually if we can change individual PTEs writable
337 	 * if we can't do that automatically for all PTEs in a mapping. For
338 	 * private mappings, that's always the case when we have write
339 	 * permissions as we properly have to handle COW.
340 	 */
341 	if (vma->vm_flags & VM_SHARED)
342 		return vma_wants_writenotify(vma, vma->vm_page_prot);
343 	return !!(vma->vm_flags & VM_WRITE);
344 }
345 
346 #ifdef CONFIG_MMU
347 static inline pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
348 {
349 	return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
350 }
351 #endif
352 
353 static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi,
354 						    unsigned long min)
355 {
356 	return mas_prev(&vmi->mas, min);
357 }
358 
359 /*
360  * These three helpers classifies VMAs for virtual memory accounting.
361  */
362 
363 /*
364  * Executable code area - executable, not writable, not stack
365  */
366 static inline bool is_exec_mapping(vm_flags_t flags)
367 {
368 	return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
369 }
370 
371 /*
372  * Stack area (including shadow stacks)
373  *
374  * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
375  * do_mmap() forbids all other combinations.
376  */
377 static inline bool is_stack_mapping(vm_flags_t flags)
378 {
379 	return ((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK);
380 }
381 
382 /*
383  * Data area - private, writable, not stack
384  */
385 static inline bool is_data_mapping(vm_flags_t flags)
386 {
387 	return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
388 }
389 
390 
391 static inline void vma_iter_config(struct vma_iterator *vmi,
392 		unsigned long index, unsigned long last)
393 {
394 	__mas_set_range(&vmi->mas, index, last - 1);
395 }
396 
397 static inline void vma_iter_reset(struct vma_iterator *vmi)
398 {
399 	mas_reset(&vmi->mas);
400 }
401 
402 static inline
403 struct vm_area_struct *vma_iter_prev_range_limit(struct vma_iterator *vmi, unsigned long min)
404 {
405 	return mas_prev_range(&vmi->mas, min);
406 }
407 
408 static inline
409 struct vm_area_struct *vma_iter_next_range_limit(struct vma_iterator *vmi, unsigned long max)
410 {
411 	return mas_next_range(&vmi->mas, max);
412 }
413 
414 static inline int vma_iter_area_lowest(struct vma_iterator *vmi, unsigned long min,
415 				       unsigned long max, unsigned long size)
416 {
417 	return mas_empty_area(&vmi->mas, min, max - 1, size);
418 }
419 
420 static inline int vma_iter_area_highest(struct vma_iterator *vmi, unsigned long min,
421 					unsigned long max, unsigned long size)
422 {
423 	return mas_empty_area_rev(&vmi->mas, min, max - 1, size);
424 }
425 
426 /*
427  * VMA Iterator functions shared between nommu and mmap
428  */
429 static inline int vma_iter_prealloc(struct vma_iterator *vmi,
430 		struct vm_area_struct *vma)
431 {
432 	return mas_preallocate(&vmi->mas, vma, GFP_KERNEL);
433 }
434 
435 static inline void vma_iter_clear(struct vma_iterator *vmi)
436 {
437 	mas_store_prealloc(&vmi->mas, NULL);
438 }
439 
440 static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
441 {
442 	return mas_walk(&vmi->mas);
443 }
444 
445 /* Store a VMA with preallocated memory */
446 static inline void vma_iter_store(struct vma_iterator *vmi,
447 				  struct vm_area_struct *vma)
448 {
449 
450 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
451 	if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
452 			vmi->mas.index > vma->vm_start)) {
453 		pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n",
454 			vmi->mas.index, vma->vm_start, vma->vm_start,
455 			vma->vm_end, vmi->mas.index, vmi->mas.last);
456 	}
457 	if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
458 			vmi->mas.last <  vma->vm_start)) {
459 		pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n",
460 		       vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end,
461 		       vmi->mas.index, vmi->mas.last);
462 	}
463 #endif
464 
465 	if (vmi->mas.status != ma_start &&
466 	    ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
467 		vma_iter_invalidate(vmi);
468 
469 	__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
470 	mas_store_prealloc(&vmi->mas, vma);
471 }
472 
473 static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
474 {
475 	return vmi->mas.index;
476 }
477 
478 static inline unsigned long vma_iter_end(struct vma_iterator *vmi)
479 {
480 	return vmi->mas.last + 1;
481 }
482 
483 static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi,
484 				      unsigned long count)
485 {
486 	return mas_expected_entries(&vmi->mas, count);
487 }
488 
489 static inline
490 struct vm_area_struct *vma_iter_prev_range(struct vma_iterator *vmi)
491 {
492 	return mas_prev_range(&vmi->mas, 0);
493 }
494 
495 /*
496  * Retrieve the next VMA and rewind the iterator to end of the previous VMA, or
497  * if no previous VMA, to index 0.
498  */
499 static inline
500 struct vm_area_struct *vma_iter_next_rewind(struct vma_iterator *vmi,
501 		struct vm_area_struct **pprev)
502 {
503 	struct vm_area_struct *next = vma_next(vmi);
504 	struct vm_area_struct *prev = vma_prev(vmi);
505 
506 	/*
507 	 * Consider the case where no previous VMA exists. We advance to the
508 	 * next VMA, skipping any gap, then rewind to the start of the range.
509 	 *
510 	 * If we were to unconditionally advance to the next range we'd wind up
511 	 * at the next VMA again, so we check to ensure there is a previous VMA
512 	 * to skip over.
513 	 */
514 	if (prev)
515 		vma_iter_next_range(vmi);
516 
517 	if (pprev)
518 		*pprev = prev;
519 
520 	return next;
521 }
522 
523 #ifdef CONFIG_64BIT
524 
525 static inline bool vma_is_sealed(struct vm_area_struct *vma)
526 {
527 	return (vma->vm_flags & VM_SEALED);
528 }
529 
530 /*
531  * check if a vma is sealed for modification.
532  * return true, if modification is allowed.
533  */
534 static inline bool can_modify_vma(struct vm_area_struct *vma)
535 {
536 	if (unlikely(vma_is_sealed(vma)))
537 		return false;
538 
539 	return true;
540 }
541 
542 bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior);
543 
544 #else
545 
546 static inline bool can_modify_vma(struct vm_area_struct *vma)
547 {
548 	return true;
549 }
550 
551 static inline bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior)
552 {
553 	return true;
554 }
555 
556 #endif
557 
558 #endif	/* __MM_VMA_H */
559