xref: /linux/mm/vma.h (revision 2f1c6611b0a89afcb8641471af5f223c9caa01e0)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * vma.h
4  *
5  * Core VMA manipulation API implemented in vma.c.
6  */
7 #ifndef __MM_VMA_H
8 #define __MM_VMA_H
9 
10 /*
11  * VMA lock generalization
12  */
13 struct vma_prepare {
14 	struct vm_area_struct *vma;
15 	struct vm_area_struct *adj_next;
16 	struct file *file;
17 	struct address_space *mapping;
18 	struct anon_vma *anon_vma;
19 	struct vm_area_struct *insert;
20 	struct vm_area_struct *remove;
21 	struct vm_area_struct *remove2;
22 };
23 
24 struct unlink_vma_file_batch {
25 	int count;
26 	struct vm_area_struct *vmas[8];
27 };
28 
29 /*
30  * vma munmap operation
31  */
32 struct vma_munmap_struct {
33 	struct vma_iterator *vmi;
34 	struct vm_area_struct *vma;     /* The first vma to munmap */
35 	struct vm_area_struct *prev;    /* vma before the munmap area */
36 	struct vm_area_struct *next;    /* vma after the munmap area */
37 	struct list_head *uf;           /* Userfaultfd list_head */
38 	unsigned long start;            /* Aligned start addr (inclusive) */
39 	unsigned long end;              /* Aligned end addr (exclusive) */
40 	unsigned long unmap_start;      /* Unmap PTE start */
41 	unsigned long unmap_end;        /* Unmap PTE end */
42 	int vma_count;                  /* Number of vmas that will be removed */
43 	bool unlock;                    /* Unlock after the munmap */
44 	bool clear_ptes;                /* If there are outstanding PTE to be cleared */
45 	bool closed_vm_ops;		/* call_mmap() was encountered, so vmas may be closed */
46 	/* 1 byte hole */
47 	unsigned long nr_pages;         /* Number of pages being removed */
48 	unsigned long locked_vm;        /* Number of locked pages */
49 	unsigned long nr_accounted;     /* Number of VM_ACCOUNT pages */
50 	unsigned long exec_vm;
51 	unsigned long stack_vm;
52 	unsigned long data_vm;
53 };
54 
55 /* Represents a VMA merge operation. */
56 struct vma_merge_struct {
57 	struct mm_struct *mm;
58 	struct vma_iterator *vmi;
59 	pgoff_t pgoff;
60 	struct vm_area_struct *prev;
61 	struct vm_area_struct *next; /* Modified by vma_merge(). */
62 	struct vm_area_struct *vma; /* Either a new VMA or the one being modified. */
63 	unsigned long start;
64 	unsigned long end;
65 	unsigned long flags;
66 	struct file *file;
67 	struct anon_vma *anon_vma;
68 	struct mempolicy *policy;
69 	struct vm_userfaultfd_ctx uffd_ctx;
70 	struct anon_vma_name *anon_name;
71 };
72 
73 /* Assumes addr >= vma->vm_start. */
74 static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma,
75 				       unsigned long addr)
76 {
77 	return vma->vm_pgoff + PHYS_PFN(addr - vma->vm_start);
78 }
79 
80 #define VMG_STATE(name, mm_, vmi_, start_, end_, flags_, pgoff_)	\
81 	struct vma_merge_struct name = {				\
82 		.mm = mm_,						\
83 		.vmi = vmi_,						\
84 		.start = start_,					\
85 		.end = end_,						\
86 		.flags = flags_,					\
87 		.pgoff = pgoff_,					\
88 	}
89 
90 #define VMG_VMA_STATE(name, vmi_, prev_, vma_, start_, end_)	\
91 	struct vma_merge_struct name = {			\
92 		.mm = vma_->vm_mm,				\
93 		.vmi = vmi_,					\
94 		.prev = prev_,					\
95 		.next = NULL,					\
96 		.vma = vma_,					\
97 		.start = start_,				\
98 		.end = end_,					\
99 		.flags = vma_->vm_flags,			\
100 		.pgoff = vma_pgoff_offset(vma_, start_),	\
101 		.file = vma_->vm_file,				\
102 		.anon_vma = vma_->anon_vma,			\
103 		.policy = vma_policy(vma_),			\
104 		.uffd_ctx = vma_->vm_userfaultfd_ctx,		\
105 		.anon_name = anon_vma_name(vma_),		\
106 	}
107 
108 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
109 void validate_mm(struct mm_struct *mm);
110 #else
111 #define validate_mm(mm) do { } while (0)
112 #endif
113 
114 /* Required for expand_downwards(). */
115 void anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma);
116 
117 /* Required for expand_downwards(). */
118 void anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma);
119 
120 /* Required for do_brk_flags(). */
121 void vma_prepare(struct vma_prepare *vp);
122 
123 /* Required for do_brk_flags(). */
124 void init_vma_prep(struct vma_prepare *vp,
125 		   struct vm_area_struct *vma);
126 
127 /* Required for do_brk_flags(). */
128 void vma_complete(struct vma_prepare *vp,
129 		  struct vma_iterator *vmi, struct mm_struct *mm);
130 
131 int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
132 	       unsigned long start, unsigned long end, pgoff_t pgoff,
133 	       struct vm_area_struct *next);
134 
135 int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
136 	       unsigned long start, unsigned long end, pgoff_t pgoff);
137 
138 static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
139 			struct vm_area_struct *vma, gfp_t gfp)
140 
141 {
142 	if (vmi->mas.status != ma_start &&
143 	    ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
144 		vma_iter_invalidate(vmi);
145 
146 	__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
147 	mas_store_gfp(&vmi->mas, vma, gfp);
148 	if (unlikely(mas_is_err(&vmi->mas)))
149 		return -ENOMEM;
150 
151 	return 0;
152 }
153 
154 #ifdef CONFIG_MMU
155 /*
156  * init_vma_munmap() - Initializer wrapper for vma_munmap_struct
157  * @vms: The vma munmap struct
158  * @vmi: The vma iterator
159  * @vma: The first vm_area_struct to munmap
160  * @start: The aligned start address to munmap
161  * @end: The aligned end address to munmap
162  * @uf: The userfaultfd list_head
163  * @unlock: Unlock after the operation.  Only unlocked on success
164  */
165 static inline void init_vma_munmap(struct vma_munmap_struct *vms,
166 		struct vma_iterator *vmi, struct vm_area_struct *vma,
167 		unsigned long start, unsigned long end, struct list_head *uf,
168 		bool unlock)
169 {
170 	vms->vmi = vmi;
171 	vms->vma = vma;
172 	if (vma) {
173 		vms->start = start;
174 		vms->end = end;
175 	} else {
176 		vms->start = vms->end = 0;
177 	}
178 	vms->unlock = unlock;
179 	vms->uf = uf;
180 	vms->vma_count = 0;
181 	vms->nr_pages = vms->locked_vm = vms->nr_accounted = 0;
182 	vms->exec_vm = vms->stack_vm = vms->data_vm = 0;
183 	vms->unmap_start = FIRST_USER_ADDRESS;
184 	vms->unmap_end = USER_PGTABLES_CEILING;
185 	vms->clear_ptes = false;
186 	vms->closed_vm_ops = false;
187 }
188 #endif
189 
190 int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
191 		struct ma_state *mas_detach);
192 
193 void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
194 		struct ma_state *mas_detach);
195 
196 void vms_clean_up_area(struct vma_munmap_struct *vms,
197 		struct ma_state *mas_detach);
198 
199 /*
200  * reattach_vmas() - Undo any munmap work and free resources
201  * @mas_detach: The maple state with the detached maple tree
202  *
203  * Reattach any detached vmas and free up the maple tree used to track the vmas.
204  */
205 static inline void reattach_vmas(struct ma_state *mas_detach)
206 {
207 	struct vm_area_struct *vma;
208 
209 	mas_set(mas_detach, 0);
210 	mas_for_each(mas_detach, vma, ULONG_MAX)
211 		vma_mark_detached(vma, false);
212 
213 	__mt_destroy(mas_detach->tree);
214 }
215 
216 /*
217  * vms_abort_munmap_vmas() - Undo as much as possible from an aborted munmap()
218  * operation.
219  * @vms: The vma unmap structure
220  * @mas_detach: The maple state with the detached maple tree
221  *
222  * Reattach any detached vmas, free up the maple tree used to track the vmas.
223  * If that's not possible because the ptes are cleared (and vm_ops->closed() may
224  * have been called), then a NULL is written over the vmas and the vmas are
225  * removed (munmap() completed).
226  */
227 static inline void vms_abort_munmap_vmas(struct vma_munmap_struct *vms,
228 		struct ma_state *mas_detach)
229 {
230 	struct ma_state *mas = &vms->vmi->mas;
231 	if (!vms->nr_pages)
232 		return;
233 
234 	if (vms->clear_ptes)
235 		return reattach_vmas(mas_detach);
236 
237 	/*
238 	 * Aborting cannot just call the vm_ops open() because they are often
239 	 * not symmetrical and state data has been lost.  Resort to the old
240 	 * failure method of leaving a gap where the MAP_FIXED mapping failed.
241 	 */
242 	mas_set_range(mas, vms->start, vms->end - 1);
243 	if (unlikely(mas_store_gfp(mas, NULL, GFP_KERNEL))) {
244 		pr_warn_once("%s: (%d) Unable to abort munmap() operation\n",
245 			     current->comm, current->pid);
246 		/* Leaving vmas detached and in-tree may hamper recovery */
247 		reattach_vmas(mas_detach);
248 	} else {
249 		/* Clean up the insertion of the unfortunate gap */
250 		vms_complete_munmap_vmas(vms, mas_detach);
251 	}
252 }
253 
254 int
255 do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
256 		    struct mm_struct *mm, unsigned long start,
257 		    unsigned long end, struct list_head *uf, bool unlock);
258 
259 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
260 		  unsigned long start, size_t len, struct list_head *uf,
261 		  bool unlock);
262 
263 void remove_vma(struct vm_area_struct *vma, bool unreachable, bool closed);
264 
265 void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
266 		struct vm_area_struct *prev, struct vm_area_struct *next);
267 
268 /*
269  * Can we merge the VMA described by vmg into the following VMA vmg->next?
270  *
271  * Required by mmap_region().
272  */
273 bool can_vma_merge_before(struct vma_merge_struct *vmg);
274 
275 /*
276  * Can we merge the VMA described by vmg into the preceding VMA vmg->prev?
277  *
278  * Required by mmap_region() and do_brk_flags().
279  */
280 bool can_vma_merge_after(struct vma_merge_struct *vmg);
281 
282 /* We are about to modify the VMA's flags. */
283 struct vm_area_struct *vma_modify_flags(struct vma_iterator *vmi,
284 		struct vm_area_struct *prev, struct vm_area_struct *vma,
285 		unsigned long start, unsigned long end,
286 		unsigned long new_flags);
287 
288 /* We are about to modify the VMA's flags and/or anon_name. */
289 struct vm_area_struct
290 *vma_modify_flags_name(struct vma_iterator *vmi,
291 		       struct vm_area_struct *prev,
292 		       struct vm_area_struct *vma,
293 		       unsigned long start,
294 		       unsigned long end,
295 		       unsigned long new_flags,
296 		       struct anon_vma_name *new_name);
297 
298 /* We are about to modify the VMA's memory policy. */
299 struct vm_area_struct
300 *vma_modify_policy(struct vma_iterator *vmi,
301 		   struct vm_area_struct *prev,
302 		   struct vm_area_struct *vma,
303 		   unsigned long start, unsigned long end,
304 		   struct mempolicy *new_pol);
305 
306 /* We are about to modify the VMA's flags and/or uffd context. */
307 struct vm_area_struct
308 *vma_modify_flags_uffd(struct vma_iterator *vmi,
309 		       struct vm_area_struct *prev,
310 		       struct vm_area_struct *vma,
311 		       unsigned long start, unsigned long end,
312 		       unsigned long new_flags,
313 		       struct vm_userfaultfd_ctx new_ctx);
314 
315 struct vm_area_struct
316 *vma_merge_new_vma(struct vma_iterator *vmi, struct vm_area_struct *prev,
317 		   struct vm_area_struct *vma, unsigned long start,
318 		   unsigned long end, pgoff_t pgoff);
319 
320 struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
321 					struct vm_area_struct *vma,
322 					unsigned long delta);
323 
324 void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb);
325 
326 void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb);
327 
328 void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb,
329 			       struct vm_area_struct *vma);
330 
331 void unlink_file_vma(struct vm_area_struct *vma);
332 
333 void vma_link_file(struct vm_area_struct *vma);
334 
335 int vma_link(struct mm_struct *mm, struct vm_area_struct *vma);
336 
337 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
338 	unsigned long addr, unsigned long len, pgoff_t pgoff,
339 	bool *need_rmap_locks);
340 
341 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma);
342 
343 bool vma_needs_dirty_tracking(struct vm_area_struct *vma);
344 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
345 
346 int mm_take_all_locks(struct mm_struct *mm);
347 void mm_drop_all_locks(struct mm_struct *mm);
348 
349 static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma)
350 {
351 	/*
352 	 * We want to check manually if we can change individual PTEs writable
353 	 * if we can't do that automatically for all PTEs in a mapping. For
354 	 * private mappings, that's always the case when we have write
355 	 * permissions as we properly have to handle COW.
356 	 */
357 	if (vma->vm_flags & VM_SHARED)
358 		return vma_wants_writenotify(vma, vma->vm_page_prot);
359 	return !!(vma->vm_flags & VM_WRITE);
360 }
361 
362 #ifdef CONFIG_MMU
363 static inline pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
364 {
365 	return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
366 }
367 #endif
368 
369 static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi,
370 						    unsigned long min)
371 {
372 	return mas_prev(&vmi->mas, min);
373 }
374 
375 /*
376  * These three helpers classifies VMAs for virtual memory accounting.
377  */
378 
379 /*
380  * Executable code area - executable, not writable, not stack
381  */
382 static inline bool is_exec_mapping(vm_flags_t flags)
383 {
384 	return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
385 }
386 
387 /*
388  * Stack area (including shadow stacks)
389  *
390  * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
391  * do_mmap() forbids all other combinations.
392  */
393 static inline bool is_stack_mapping(vm_flags_t flags)
394 {
395 	return ((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK);
396 }
397 
398 /*
399  * Data area - private, writable, not stack
400  */
401 static inline bool is_data_mapping(vm_flags_t flags)
402 {
403 	return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
404 }
405 
406 
407 static inline void vma_iter_config(struct vma_iterator *vmi,
408 		unsigned long index, unsigned long last)
409 {
410 	__mas_set_range(&vmi->mas, index, last - 1);
411 }
412 
413 static inline void vma_iter_reset(struct vma_iterator *vmi)
414 {
415 	mas_reset(&vmi->mas);
416 }
417 
418 static inline
419 struct vm_area_struct *vma_iter_prev_range_limit(struct vma_iterator *vmi, unsigned long min)
420 {
421 	return mas_prev_range(&vmi->mas, min);
422 }
423 
424 static inline
425 struct vm_area_struct *vma_iter_next_range_limit(struct vma_iterator *vmi, unsigned long max)
426 {
427 	return mas_next_range(&vmi->mas, max);
428 }
429 
430 static inline int vma_iter_area_lowest(struct vma_iterator *vmi, unsigned long min,
431 				       unsigned long max, unsigned long size)
432 {
433 	return mas_empty_area(&vmi->mas, min, max - 1, size);
434 }
435 
436 static inline int vma_iter_area_highest(struct vma_iterator *vmi, unsigned long min,
437 					unsigned long max, unsigned long size)
438 {
439 	return mas_empty_area_rev(&vmi->mas, min, max - 1, size);
440 }
441 
442 /*
443  * VMA Iterator functions shared between nommu and mmap
444  */
445 static inline int vma_iter_prealloc(struct vma_iterator *vmi,
446 		struct vm_area_struct *vma)
447 {
448 	return mas_preallocate(&vmi->mas, vma, GFP_KERNEL);
449 }
450 
451 static inline void vma_iter_clear(struct vma_iterator *vmi)
452 {
453 	mas_store_prealloc(&vmi->mas, NULL);
454 }
455 
456 static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
457 {
458 	return mas_walk(&vmi->mas);
459 }
460 
461 /* Store a VMA with preallocated memory */
462 static inline void vma_iter_store(struct vma_iterator *vmi,
463 				  struct vm_area_struct *vma)
464 {
465 
466 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
467 	if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
468 			vmi->mas.index > vma->vm_start)) {
469 		pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n",
470 			vmi->mas.index, vma->vm_start, vma->vm_start,
471 			vma->vm_end, vmi->mas.index, vmi->mas.last);
472 	}
473 	if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
474 			vmi->mas.last <  vma->vm_start)) {
475 		pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n",
476 		       vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end,
477 		       vmi->mas.index, vmi->mas.last);
478 	}
479 #endif
480 
481 	if (vmi->mas.status != ma_start &&
482 	    ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
483 		vma_iter_invalidate(vmi);
484 
485 	__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
486 	mas_store_prealloc(&vmi->mas, vma);
487 }
488 
489 static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
490 {
491 	return vmi->mas.index;
492 }
493 
494 static inline unsigned long vma_iter_end(struct vma_iterator *vmi)
495 {
496 	return vmi->mas.last + 1;
497 }
498 
499 static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi,
500 				      unsigned long count)
501 {
502 	return mas_expected_entries(&vmi->mas, count);
503 }
504 
505 static inline
506 struct vm_area_struct *vma_iter_prev_range(struct vma_iterator *vmi)
507 {
508 	return mas_prev_range(&vmi->mas, 0);
509 }
510 
511 #ifdef CONFIG_64BIT
512 
513 static inline bool vma_is_sealed(struct vm_area_struct *vma)
514 {
515 	return (vma->vm_flags & VM_SEALED);
516 }
517 
518 /*
519  * check if a vma is sealed for modification.
520  * return true, if modification is allowed.
521  */
522 static inline bool can_modify_vma(struct vm_area_struct *vma)
523 {
524 	if (unlikely(vma_is_sealed(vma)))
525 		return false;
526 
527 	return true;
528 }
529 
530 bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior);
531 
532 #else
533 
534 static inline bool can_modify_vma(struct vm_area_struct *vma)
535 {
536 	return true;
537 }
538 
539 static inline bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior)
540 {
541 	return true;
542 }
543 
544 #endif
545 
546 #endif	/* __MM_VMA_H */
547