xref: /linux/mm/vma.h (revision 8804d970fab45726b3c7cd7f240b31122aa94219)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * vma.h
4  *
5  * Core VMA manipulation API implemented in vma.c.
6  */
7 #ifndef __MM_VMA_H
8 #define __MM_VMA_H
9 
10 /*
11  * VMA lock generalization
12  */
13 struct vma_prepare {
14 	struct vm_area_struct *vma;
15 	struct vm_area_struct *adj_next;
16 	struct file *file;
17 	struct address_space *mapping;
18 	struct anon_vma *anon_vma;
19 	struct vm_area_struct *insert;
20 	struct vm_area_struct *remove;
21 	struct vm_area_struct *remove2;
22 
23 	bool skip_vma_uprobe :1;
24 };
25 
26 struct unlink_vma_file_batch {
27 	int count;
28 	struct vm_area_struct *vmas[8];
29 };
30 
31 /*
32  * vma munmap operation
33  */
34 struct vma_munmap_struct {
35 	struct vma_iterator *vmi;
36 	struct vm_area_struct *vma;     /* The first vma to munmap */
37 	struct vm_area_struct *prev;    /* vma before the munmap area */
38 	struct vm_area_struct *next;    /* vma after the munmap area */
39 	struct list_head *uf;           /* Userfaultfd list_head */
40 	unsigned long start;            /* Aligned start addr (inclusive) */
41 	unsigned long end;              /* Aligned end addr (exclusive) */
42 	unsigned long unmap_start;      /* Unmap PTE start */
43 	unsigned long unmap_end;        /* Unmap PTE end */
44 	int vma_count;                  /* Number of vmas that will be removed */
45 	bool unlock;                    /* Unlock after the munmap */
46 	bool clear_ptes;                /* If there are outstanding PTE to be cleared */
47 	/* 2 byte hole */
48 	unsigned long nr_pages;         /* Number of pages being removed */
49 	unsigned long locked_vm;        /* Number of locked pages */
50 	unsigned long nr_accounted;     /* Number of VM_ACCOUNT pages */
51 	unsigned long exec_vm;
52 	unsigned long stack_vm;
53 	unsigned long data_vm;
54 };
55 
56 enum vma_merge_state {
57 	VMA_MERGE_START,
58 	VMA_MERGE_ERROR_NOMEM,
59 	VMA_MERGE_NOMERGE,
60 	VMA_MERGE_SUCCESS,
61 };
62 
63 /*
64  * Describes a VMA merge operation and is threaded throughout it.
65  *
66  * Any of the fields may be mutated by the merge operation, so no guarantees are
67  * made to the contents of this structure after a merge operation has completed.
68  */
69 struct vma_merge_struct {
70 	struct mm_struct *mm;
71 	struct vma_iterator *vmi;
72 	/*
73 	 * Adjacent VMAs, any of which may be NULL if not present:
74 	 *
75 	 * |------|--------|------|
76 	 * | prev | middle | next |
77 	 * |------|--------|------|
78 	 *
79 	 * middle may not yet exist in the case of a proposed new VMA being
80 	 * merged, or it may be an existing VMA.
81 	 *
82 	 * next may be assigned by the caller.
83 	 */
84 	struct vm_area_struct *prev;
85 	struct vm_area_struct *middle;
86 	struct vm_area_struct *next;
87 	/* This is the VMA we ultimately target to become the merged VMA. */
88 	struct vm_area_struct *target;
89 	/*
90 	 * Initially, the start, end, pgoff fields are provided by the caller
91 	 * and describe the proposed new VMA range, whether modifying an
92 	 * existing VMA (which will be 'middle'), or adding a new one.
93 	 *
94 	 * During the merge process these fields are updated to describe the new
95 	 * range _including those VMAs which will be merged_.
96 	 */
97 	unsigned long start;
98 	unsigned long end;
99 	pgoff_t pgoff;
100 
101 	vm_flags_t vm_flags;
102 	struct file *file;
103 	struct anon_vma *anon_vma;
104 	struct mempolicy *policy;
105 	struct vm_userfaultfd_ctx uffd_ctx;
106 	struct anon_vma_name *anon_name;
107 	enum vma_merge_state state;
108 
109 	/* Flags which callers can use to modify merge behaviour: */
110 
111 	/*
112 	 * If we can expand, simply do so. We know there is nothing to merge to
113 	 * the right. Does not reset state upon failure to merge. The VMA
114 	 * iterator is assumed to be positioned at the previous VMA, rather than
115 	 * at the gap.
116 	 */
117 	bool just_expand :1;
118 
119 	/*
120 	 * If a merge is possible, but an OOM error occurs, give up and don't
121 	 * execute the merge, returning NULL.
122 	 */
123 	bool give_up_on_oom :1;
124 
125 	/*
126 	 * If set, skip uprobe_mmap upon merged vma.
127 	 */
128 	bool skip_vma_uprobe :1;
129 
130 	/* Internal flags set during merge process: */
131 
132 	/*
133 	 * Internal flag indicating the merge increases vmg->middle->vm_start
134 	 * (and thereby, vmg->prev->vm_end).
135 	 */
136 	bool __adjust_middle_start :1;
137 	/*
138 	 * Internal flag indicating the merge decreases vmg->next->vm_start
139 	 * (and thereby, vmg->middle->vm_end).
140 	 */
141 	bool __adjust_next_start :1;
142 	/*
143 	 * Internal flag used during the merge operation to indicate we will
144 	 * remove vmg->middle.
145 	 */
146 	bool __remove_middle :1;
147 	/*
148 	 * Internal flag used during the merge operation to indicate we will
149 	 * remove vmg->next.
150 	 */
151 	bool __remove_next :1;
152 
153 };
154 
vmg_nomem(struct vma_merge_struct * vmg)155 static inline bool vmg_nomem(struct vma_merge_struct *vmg)
156 {
157 	return vmg->state == VMA_MERGE_ERROR_NOMEM;
158 }
159 
160 /* Assumes addr >= vma->vm_start. */
vma_pgoff_offset(struct vm_area_struct * vma,unsigned long addr)161 static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma,
162 				       unsigned long addr)
163 {
164 	return vma->vm_pgoff + PHYS_PFN(addr - vma->vm_start);
165 }
166 
167 #define VMG_STATE(name, mm_, vmi_, start_, end_, vm_flags_, pgoff_)	\
168 	struct vma_merge_struct name = {				\
169 		.mm = mm_,						\
170 		.vmi = vmi_,						\
171 		.start = start_,					\
172 		.end = end_,						\
173 		.vm_flags = vm_flags_,					\
174 		.pgoff = pgoff_,					\
175 		.state = VMA_MERGE_START,				\
176 	}
177 
178 #define VMG_VMA_STATE(name, vmi_, prev_, vma_, start_, end_)	\
179 	struct vma_merge_struct name = {			\
180 		.mm = vma_->vm_mm,				\
181 		.vmi = vmi_,					\
182 		.prev = prev_,					\
183 		.middle = vma_,					\
184 		.next = NULL,					\
185 		.start = start_,				\
186 		.end = end_,					\
187 		.vm_flags = vma_->vm_flags,			\
188 		.pgoff = vma_pgoff_offset(vma_, start_),	\
189 		.file = vma_->vm_file,				\
190 		.anon_vma = vma_->anon_vma,			\
191 		.policy = vma_policy(vma_),			\
192 		.uffd_ctx = vma_->vm_userfaultfd_ctx,		\
193 		.anon_name = anon_vma_name(vma_),		\
194 		.state = VMA_MERGE_START,			\
195 	}
196 
197 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
198 void validate_mm(struct mm_struct *mm);
199 #else
200 #define validate_mm(mm) do { } while (0)
201 #endif
202 
203 __must_check int vma_expand(struct vma_merge_struct *vmg);
204 __must_check int vma_shrink(struct vma_iterator *vmi,
205 		struct vm_area_struct *vma,
206 		unsigned long start, unsigned long end, pgoff_t pgoff);
207 
vma_iter_store_gfp(struct vma_iterator * vmi,struct vm_area_struct * vma,gfp_t gfp)208 static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
209 			struct vm_area_struct *vma, gfp_t gfp)
210 
211 {
212 	if (vmi->mas.status != ma_start &&
213 	    ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
214 		vma_iter_invalidate(vmi);
215 
216 	__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
217 	mas_store_gfp(&vmi->mas, vma, gfp);
218 	if (unlikely(mas_is_err(&vmi->mas)))
219 		return -ENOMEM;
220 
221 	vma_mark_attached(vma);
222 	return 0;
223 }
224 
225 /*
226  * Temporary helper function for stacked mmap handlers which specify
227  * f_op->mmap() but which might have an underlying file system which implements
228  * f_op->mmap_prepare().
229  */
set_vma_from_desc(struct vm_area_struct * vma,struct vm_area_desc * desc)230 static inline void set_vma_from_desc(struct vm_area_struct *vma,
231 		struct vm_area_desc *desc)
232 {
233 	/*
234 	 * Since we're invoking .mmap_prepare() despite having a partially
235 	 * established VMA, we must take care to handle setting fields
236 	 * correctly.
237 	 */
238 
239 	/* Mutable fields. Populated with initial state. */
240 	vma->vm_pgoff = desc->pgoff;
241 	if (desc->vm_file != vma->vm_file)
242 		vma_set_file(vma, desc->vm_file);
243 	if (desc->vm_flags != vma->vm_flags)
244 		vm_flags_set(vma, desc->vm_flags);
245 	vma->vm_page_prot = desc->page_prot;
246 
247 	/* User-defined fields. */
248 	vma->vm_ops = desc->vm_ops;
249 	vma->vm_private_data = desc->private_data;
250 }
251 
252 int
253 do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
254 		    struct mm_struct *mm, unsigned long start,
255 		    unsigned long end, struct list_head *uf, bool unlock);
256 
257 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
258 		  unsigned long start, size_t len, struct list_head *uf,
259 		  bool unlock);
260 
261 void remove_vma(struct vm_area_struct *vma);
262 
263 void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
264 		struct vm_area_struct *prev, struct vm_area_struct *next);
265 
266 /* We are about to modify the VMA's flags. */
267 __must_check struct vm_area_struct
268 *vma_modify_flags(struct vma_iterator *vmi,
269 		struct vm_area_struct *prev, struct vm_area_struct *vma,
270 		unsigned long start, unsigned long end,
271 		vm_flags_t vm_flags);
272 
273 /* We are about to modify the VMA's anon_name. */
274 __must_check struct vm_area_struct
275 *vma_modify_name(struct vma_iterator *vmi,
276 		 struct vm_area_struct *prev,
277 		 struct vm_area_struct *vma,
278 		 unsigned long start,
279 		 unsigned long end,
280 		 struct anon_vma_name *new_name);
281 
282 /* We are about to modify the VMA's memory policy. */
283 __must_check struct vm_area_struct
284 *vma_modify_policy(struct vma_iterator *vmi,
285 		   struct vm_area_struct *prev,
286 		   struct vm_area_struct *vma,
287 		   unsigned long start, unsigned long end,
288 		   struct mempolicy *new_pol);
289 
290 /* We are about to modify the VMA's flags and/or uffd context. */
291 __must_check struct vm_area_struct
292 *vma_modify_flags_uffd(struct vma_iterator *vmi,
293 		       struct vm_area_struct *prev,
294 		       struct vm_area_struct *vma,
295 		       unsigned long start, unsigned long end,
296 		       vm_flags_t vm_flags,
297 		       struct vm_userfaultfd_ctx new_ctx,
298 		       bool give_up_on_oom);
299 
300 __must_check struct vm_area_struct
301 *vma_merge_new_range(struct vma_merge_struct *vmg);
302 
303 __must_check struct vm_area_struct
304 *vma_merge_extend(struct vma_iterator *vmi,
305 		  struct vm_area_struct *vma,
306 		  unsigned long delta);
307 
308 void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb);
309 
310 void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb);
311 
312 void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb,
313 			       struct vm_area_struct *vma);
314 
315 void unlink_file_vma(struct vm_area_struct *vma);
316 
317 void vma_link_file(struct vm_area_struct *vma);
318 
319 int vma_link(struct mm_struct *mm, struct vm_area_struct *vma);
320 
321 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
322 	unsigned long addr, unsigned long len, pgoff_t pgoff,
323 	bool *need_rmap_locks);
324 
325 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma);
326 
327 bool vma_needs_dirty_tracking(struct vm_area_struct *vma);
328 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
329 
330 int mm_take_all_locks(struct mm_struct *mm);
331 void mm_drop_all_locks(struct mm_struct *mm);
332 
333 unsigned long mmap_region(struct file *file, unsigned long addr,
334 		unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
335 		struct list_head *uf);
336 
337 int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma,
338 		 unsigned long addr, unsigned long request, unsigned long flags);
339 
340 unsigned long unmapped_area(struct vm_unmapped_area_info *info);
341 unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
342 
vma_wants_manual_pte_write_upgrade(struct vm_area_struct * vma)343 static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma)
344 {
345 	/*
346 	 * We want to check manually if we can change individual PTEs writable
347 	 * if we can't do that automatically for all PTEs in a mapping. For
348 	 * private mappings, that's always the case when we have write
349 	 * permissions as we properly have to handle COW.
350 	 */
351 	if (vma->vm_flags & VM_SHARED)
352 		return vma_wants_writenotify(vma, vma->vm_page_prot);
353 	return !!(vma->vm_flags & VM_WRITE);
354 }
355 
356 #ifdef CONFIG_MMU
vm_pgprot_modify(pgprot_t oldprot,vm_flags_t vm_flags)357 static inline pgprot_t vm_pgprot_modify(pgprot_t oldprot, vm_flags_t vm_flags)
358 {
359 	return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
360 }
361 #endif
362 
vma_prev_limit(struct vma_iterator * vmi,unsigned long min)363 static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi,
364 						    unsigned long min)
365 {
366 	return mas_prev(&vmi->mas, min);
367 }
368 
369 /*
370  * These three helpers classifies VMAs for virtual memory accounting.
371  */
372 
373 /*
374  * Executable code area - executable, not writable, not stack
375  */
is_exec_mapping(vm_flags_t flags)376 static inline bool is_exec_mapping(vm_flags_t flags)
377 {
378 	return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
379 }
380 
381 /*
382  * Stack area (including shadow stacks)
383  *
384  * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
385  * do_mmap() forbids all other combinations.
386  */
is_stack_mapping(vm_flags_t flags)387 static inline bool is_stack_mapping(vm_flags_t flags)
388 {
389 	return ((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK);
390 }
391 
392 /*
393  * Data area - private, writable, not stack
394  */
is_data_mapping(vm_flags_t flags)395 static inline bool is_data_mapping(vm_flags_t flags)
396 {
397 	return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
398 }
399 
400 
vma_iter_config(struct vma_iterator * vmi,unsigned long index,unsigned long last)401 static inline void vma_iter_config(struct vma_iterator *vmi,
402 		unsigned long index, unsigned long last)
403 {
404 	__mas_set_range(&vmi->mas, index, last - 1);
405 }
406 
vma_iter_reset(struct vma_iterator * vmi)407 static inline void vma_iter_reset(struct vma_iterator *vmi)
408 {
409 	mas_reset(&vmi->mas);
410 }
411 
412 static inline
vma_iter_prev_range_limit(struct vma_iterator * vmi,unsigned long min)413 struct vm_area_struct *vma_iter_prev_range_limit(struct vma_iterator *vmi, unsigned long min)
414 {
415 	return mas_prev_range(&vmi->mas, min);
416 }
417 
418 static inline
vma_iter_next_range_limit(struct vma_iterator * vmi,unsigned long max)419 struct vm_area_struct *vma_iter_next_range_limit(struct vma_iterator *vmi, unsigned long max)
420 {
421 	return mas_next_range(&vmi->mas, max);
422 }
423 
vma_iter_area_lowest(struct vma_iterator * vmi,unsigned long min,unsigned long max,unsigned long size)424 static inline int vma_iter_area_lowest(struct vma_iterator *vmi, unsigned long min,
425 				       unsigned long max, unsigned long size)
426 {
427 	return mas_empty_area(&vmi->mas, min, max - 1, size);
428 }
429 
vma_iter_area_highest(struct vma_iterator * vmi,unsigned long min,unsigned long max,unsigned long size)430 static inline int vma_iter_area_highest(struct vma_iterator *vmi, unsigned long min,
431 					unsigned long max, unsigned long size)
432 {
433 	return mas_empty_area_rev(&vmi->mas, min, max - 1, size);
434 }
435 
436 /*
437  * VMA Iterator functions shared between nommu and mmap
438  */
vma_iter_prealloc(struct vma_iterator * vmi,struct vm_area_struct * vma)439 static inline int vma_iter_prealloc(struct vma_iterator *vmi,
440 		struct vm_area_struct *vma)
441 {
442 	return mas_preallocate(&vmi->mas, vma, GFP_KERNEL);
443 }
444 
vma_iter_clear(struct vma_iterator * vmi)445 static inline void vma_iter_clear(struct vma_iterator *vmi)
446 {
447 	mas_store_prealloc(&vmi->mas, NULL);
448 }
449 
vma_iter_load(struct vma_iterator * vmi)450 static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
451 {
452 	return mas_walk(&vmi->mas);
453 }
454 
455 /* Store a VMA with preallocated memory */
vma_iter_store_overwrite(struct vma_iterator * vmi,struct vm_area_struct * vma)456 static inline void vma_iter_store_overwrite(struct vma_iterator *vmi,
457 					    struct vm_area_struct *vma)
458 {
459 	vma_assert_attached(vma);
460 
461 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
462 	if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
463 			vmi->mas.index > vma->vm_start)) {
464 		pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n",
465 			vmi->mas.index, vma->vm_start, vma->vm_start,
466 			vma->vm_end, vmi->mas.index, vmi->mas.last);
467 	}
468 	if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
469 			vmi->mas.last <  vma->vm_start)) {
470 		pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n",
471 		       vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end,
472 		       vmi->mas.index, vmi->mas.last);
473 	}
474 #endif
475 
476 	if (vmi->mas.status != ma_start &&
477 	    ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
478 		vma_iter_invalidate(vmi);
479 
480 	__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
481 	mas_store_prealloc(&vmi->mas, vma);
482 }
483 
vma_iter_store_new(struct vma_iterator * vmi,struct vm_area_struct * vma)484 static inline void vma_iter_store_new(struct vma_iterator *vmi,
485 				      struct vm_area_struct *vma)
486 {
487 	vma_mark_attached(vma);
488 	vma_iter_store_overwrite(vmi, vma);
489 }
490 
vma_iter_addr(struct vma_iterator * vmi)491 static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
492 {
493 	return vmi->mas.index;
494 }
495 
vma_iter_end(struct vma_iterator * vmi)496 static inline unsigned long vma_iter_end(struct vma_iterator *vmi)
497 {
498 	return vmi->mas.last + 1;
499 }
500 
vma_iter_bulk_alloc(struct vma_iterator * vmi,unsigned long count)501 static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi,
502 				      unsigned long count)
503 {
504 	return mas_expected_entries(&vmi->mas, count);
505 }
506 
507 static inline
vma_iter_prev_range(struct vma_iterator * vmi)508 struct vm_area_struct *vma_iter_prev_range(struct vma_iterator *vmi)
509 {
510 	return mas_prev_range(&vmi->mas, 0);
511 }
512 
513 /*
514  * Retrieve the next VMA and rewind the iterator to end of the previous VMA, or
515  * if no previous VMA, to index 0.
516  */
517 static inline
vma_iter_next_rewind(struct vma_iterator * vmi,struct vm_area_struct ** pprev)518 struct vm_area_struct *vma_iter_next_rewind(struct vma_iterator *vmi,
519 		struct vm_area_struct **pprev)
520 {
521 	struct vm_area_struct *next = vma_next(vmi);
522 	struct vm_area_struct *prev = vma_prev(vmi);
523 
524 	/*
525 	 * Consider the case where no previous VMA exists. We advance to the
526 	 * next VMA, skipping any gap, then rewind to the start of the range.
527 	 *
528 	 * If we were to unconditionally advance to the next range we'd wind up
529 	 * at the next VMA again, so we check to ensure there is a previous VMA
530 	 * to skip over.
531 	 */
532 	if (prev)
533 		vma_iter_next_range(vmi);
534 
535 	if (pprev)
536 		*pprev = prev;
537 
538 	return next;
539 }
540 
541 #ifdef CONFIG_64BIT
vma_is_sealed(struct vm_area_struct * vma)542 static inline bool vma_is_sealed(struct vm_area_struct *vma)
543 {
544 	return (vma->vm_flags & VM_SEALED);
545 }
546 #else
vma_is_sealed(struct vm_area_struct * vma)547 static inline bool vma_is_sealed(struct vm_area_struct *vma)
548 {
549 	return false;
550 }
551 #endif
552 
553 #if defined(CONFIG_STACK_GROWSUP)
554 int expand_upwards(struct vm_area_struct *vma, unsigned long address);
555 #endif
556 
557 int expand_downwards(struct vm_area_struct *vma, unsigned long address);
558 
559 int __vm_munmap(unsigned long start, size_t len, bool unlock);
560 
561 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma);
562 
563 /* vma_init.h, shared between CONFIG_MMU and nommu. */
564 void __init vma_state_init(void);
565 struct vm_area_struct *vm_area_alloc(struct mm_struct *mm);
566 struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig);
567 void vm_area_free(struct vm_area_struct *vma);
568 
569 /* vma_exec.c */
570 #ifdef CONFIG_MMU
571 int create_init_stack_vma(struct mm_struct *mm, struct vm_area_struct **vmap,
572 			  unsigned long *top_mem_p);
573 int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift);
574 #endif
575 
576 #endif	/* __MM_VMA_H */
577