xref: /linux/mm/vma.h (revision 13b2d15d991b3f0f4ebfffbed081dbff27ac1c9d)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * vma.h
4  *
5  * Core VMA manipulation API implemented in vma.c.
6  */
7 #ifndef __MM_VMA_H
8 #define __MM_VMA_H
9 
10 /*
11  * VMA lock generalization
12  */
13 struct vma_prepare {
14 	struct vm_area_struct *vma;
15 	struct vm_area_struct *adj_next;
16 	struct file *file;
17 	struct address_space *mapping;
18 	struct anon_vma *anon_vma;
19 	struct vm_area_struct *insert;
20 	struct vm_area_struct *remove;
21 	struct vm_area_struct *remove2;
22 
23 	bool skip_vma_uprobe :1;
24 };
25 
26 struct unlink_vma_file_batch {
27 	int count;
28 	struct vm_area_struct *vmas[8];
29 };
30 
31 /*
32  * vma munmap operation
33  */
34 struct vma_munmap_struct {
35 	struct vma_iterator *vmi;
36 	struct vm_area_struct *vma;     /* The first vma to munmap */
37 	struct vm_area_struct *prev;    /* vma before the munmap area */
38 	struct vm_area_struct *next;    /* vma after the munmap area */
39 	struct list_head *uf;           /* Userfaultfd list_head */
40 	unsigned long start;            /* Aligned start addr (inclusive) */
41 	unsigned long end;              /* Aligned end addr (exclusive) */
42 	unsigned long unmap_start;      /* Unmap PTE start */
43 	unsigned long unmap_end;        /* Unmap PTE end */
44 	int vma_count;                  /* Number of vmas that will be removed */
45 	bool unlock;                    /* Unlock after the munmap */
46 	bool clear_ptes;                /* If there are outstanding PTE to be cleared */
47 	/* 2 byte hole */
48 	unsigned long nr_pages;         /* Number of pages being removed */
49 	unsigned long locked_vm;        /* Number of locked pages */
50 	unsigned long nr_accounted;     /* Number of VM_ACCOUNT pages */
51 	unsigned long exec_vm;
52 	unsigned long stack_vm;
53 	unsigned long data_vm;
54 };
55 
56 enum vma_merge_state {
57 	VMA_MERGE_START,
58 	VMA_MERGE_ERROR_NOMEM,
59 	VMA_MERGE_NOMERGE,
60 	VMA_MERGE_SUCCESS,
61 };
62 
63 /*
64  * Describes a VMA merge operation and is threaded throughout it.
65  *
66  * Any of the fields may be mutated by the merge operation, so no guarantees are
67  * made to the contents of this structure after a merge operation has completed.
68  */
69 struct vma_merge_struct {
70 	struct mm_struct *mm;
71 	struct vma_iterator *vmi;
72 	/*
73 	 * Adjacent VMAs, any of which may be NULL if not present:
74 	 *
75 	 * |------|--------|------|
76 	 * | prev | middle | next |
77 	 * |------|--------|------|
78 	 *
79 	 * middle may not yet exist in the case of a proposed new VMA being
80 	 * merged, or it may be an existing VMA.
81 	 *
82 	 * next may be assigned by the caller.
83 	 */
84 	struct vm_area_struct *prev;
85 	struct vm_area_struct *middle;
86 	struct vm_area_struct *next;
87 	/* This is the VMA we ultimately target to become the merged VMA. */
88 	struct vm_area_struct *target;
89 	/*
90 	 * Initially, the start, end, pgoff fields are provided by the caller
91 	 * and describe the proposed new VMA range, whether modifying an
92 	 * existing VMA (which will be 'middle'), or adding a new one.
93 	 *
94 	 * During the merge process these fields are updated to describe the new
95 	 * range _including those VMAs which will be merged_.
96 	 */
97 	unsigned long start;
98 	unsigned long end;
99 	pgoff_t pgoff;
100 
101 	vm_flags_t vm_flags;
102 	struct file *file;
103 	struct anon_vma *anon_vma;
104 	struct mempolicy *policy;
105 	struct vm_userfaultfd_ctx uffd_ctx;
106 	struct anon_vma_name *anon_name;
107 	enum vma_merge_state state;
108 
109 	/* If copied from (i.e. mremap()'d) the VMA from which we are copying. */
110 	struct vm_area_struct *copied_from;
111 
112 	/* Flags which callers can use to modify merge behaviour: */
113 
114 	/*
115 	 * If we can expand, simply do so. We know there is nothing to merge to
116 	 * the right. Does not reset state upon failure to merge. The VMA
117 	 * iterator is assumed to be positioned at the previous VMA, rather than
118 	 * at the gap.
119 	 */
120 	bool just_expand :1;
121 
122 	/*
123 	 * If a merge is possible, but an OOM error occurs, give up and don't
124 	 * execute the merge, returning NULL.
125 	 */
126 	bool give_up_on_oom :1;
127 
128 	/*
129 	 * If set, skip uprobe_mmap upon merged vma.
130 	 */
131 	bool skip_vma_uprobe :1;
132 
133 	/* Internal flags set during merge process: */
134 
135 	/*
136 	 * Internal flag indicating the merge increases vmg->middle->vm_start
137 	 * (and thereby, vmg->prev->vm_end).
138 	 */
139 	bool __adjust_middle_start :1;
140 	/*
141 	 * Internal flag indicating the merge decreases vmg->next->vm_start
142 	 * (and thereby, vmg->middle->vm_end).
143 	 */
144 	bool __adjust_next_start :1;
145 	/*
146 	 * Internal flag used during the merge operation to indicate we will
147 	 * remove vmg->middle.
148 	 */
149 	bool __remove_middle :1;
150 	/*
151 	 * Internal flag used during the merge operation to indicate we will
152 	 * remove vmg->next.
153 	 */
154 	bool __remove_next :1;
155 
156 };
157 
vmg_nomem(struct vma_merge_struct * vmg)158 static inline bool vmg_nomem(struct vma_merge_struct *vmg)
159 {
160 	return vmg->state == VMA_MERGE_ERROR_NOMEM;
161 }
162 
163 /* Assumes addr >= vma->vm_start. */
vma_pgoff_offset(struct vm_area_struct * vma,unsigned long addr)164 static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma,
165 				       unsigned long addr)
166 {
167 	return vma->vm_pgoff + PHYS_PFN(addr - vma->vm_start);
168 }
169 
170 #define VMG_STATE(name, mm_, vmi_, start_, end_, vm_flags_, pgoff_)	\
171 	struct vma_merge_struct name = {				\
172 		.mm = mm_,						\
173 		.vmi = vmi_,						\
174 		.start = start_,					\
175 		.end = end_,						\
176 		.vm_flags = vm_flags_,					\
177 		.pgoff = pgoff_,					\
178 		.state = VMA_MERGE_START,				\
179 	}
180 
181 #define VMG_VMA_STATE(name, vmi_, prev_, vma_, start_, end_)	\
182 	struct vma_merge_struct name = {			\
183 		.mm = vma_->vm_mm,				\
184 		.vmi = vmi_,					\
185 		.prev = prev_,					\
186 		.middle = vma_,					\
187 		.next = NULL,					\
188 		.start = start_,				\
189 		.end = end_,					\
190 		.vm_flags = vma_->vm_flags,			\
191 		.pgoff = vma_pgoff_offset(vma_, start_),	\
192 		.file = vma_->vm_file,				\
193 		.anon_vma = vma_->anon_vma,			\
194 		.policy = vma_policy(vma_),			\
195 		.uffd_ctx = vma_->vm_userfaultfd_ctx,		\
196 		.anon_name = anon_vma_name(vma_),		\
197 		.state = VMA_MERGE_START,			\
198 	}
199 
200 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
201 void validate_mm(struct mm_struct *mm);
202 #else
203 #define validate_mm(mm) do { } while (0)
204 #endif
205 
206 __must_check int vma_expand(struct vma_merge_struct *vmg);
207 __must_check int vma_shrink(struct vma_iterator *vmi,
208 		struct vm_area_struct *vma,
209 		unsigned long start, unsigned long end, pgoff_t pgoff);
210 
vma_iter_store_gfp(struct vma_iterator * vmi,struct vm_area_struct * vma,gfp_t gfp)211 static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
212 			struct vm_area_struct *vma, gfp_t gfp)
213 
214 {
215 	if (vmi->mas.status != ma_start &&
216 	    ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
217 		vma_iter_invalidate(vmi);
218 
219 	__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
220 	mas_store_gfp(&vmi->mas, vma, gfp);
221 	if (unlikely(mas_is_err(&vmi->mas)))
222 		return -ENOMEM;
223 
224 	vma_mark_attached(vma);
225 	return 0;
226 }
227 
228 /*
229  * Temporary helper function for stacked mmap handlers which specify
230  * f_op->mmap() but which might have an underlying file system which implements
231  * f_op->mmap_prepare().
232  */
set_vma_from_desc(struct vm_area_struct * vma,struct vm_area_desc * desc)233 static inline void set_vma_from_desc(struct vm_area_struct *vma,
234 		struct vm_area_desc *desc)
235 {
236 	/*
237 	 * Since we're invoking .mmap_prepare() despite having a partially
238 	 * established VMA, we must take care to handle setting fields
239 	 * correctly.
240 	 */
241 
242 	/* Mutable fields. Populated with initial state. */
243 	vma->vm_pgoff = desc->pgoff;
244 	if (desc->vm_file != vma->vm_file)
245 		vma_set_file(vma, desc->vm_file);
246 	if (desc->vm_flags != vma->vm_flags)
247 		vm_flags_set(vma, desc->vm_flags);
248 	vma->vm_page_prot = desc->page_prot;
249 
250 	/* User-defined fields. */
251 	vma->vm_ops = desc->vm_ops;
252 	vma->vm_private_data = desc->private_data;
253 }
254 
255 int
256 do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
257 		    struct mm_struct *mm, unsigned long start,
258 		    unsigned long end, struct list_head *uf, bool unlock);
259 
260 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
261 		  unsigned long start, size_t len, struct list_head *uf,
262 		  bool unlock);
263 
264 void remove_vma(struct vm_area_struct *vma);
265 
266 void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
267 		struct vm_area_struct *prev, struct vm_area_struct *next);
268 
269 /**
270  * vma_modify_flags() - Peform any necessary split/merge in preparation for
271  * setting VMA flags to *@vm_flags in the range @start to @end contained within
272  * @vma.
273  * @vmi: Valid VMA iterator positioned at @vma.
274  * @prev: The VMA immediately prior to @vma or NULL if @vma is the first.
275  * @vma: The VMA containing the range @start to @end to be updated.
276  * @start: The start of the range to update. May be offset within @vma.
277  * @end: The exclusive end of the range to update, may be offset within @vma.
278  * @vm_flags_ptr: A pointer to the VMA flags that the @start to @end range is
279  * about to be set to. On merge, this will be updated to include sticky flags.
280  *
281  * IMPORTANT: The actual modification being requested here is NOT applied,
282  * rather the VMA is perhaps split, perhaps merged to accommodate the change,
283  * and the caller is expected to perform the actual modification.
284  *
285  * In order to account for sticky VMA flags, the @vm_flags_ptr parameter points
286  * to the requested flags which are then updated so the caller, should they
287  * overwrite any existing flags, correctly retains these.
288  *
289  * Returns: A VMA which contains the range @start to @end ready to have its
290  * flags altered to *@vm_flags.
291  */
292 __must_check struct vm_area_struct *vma_modify_flags(struct vma_iterator *vmi,
293 		struct vm_area_struct *prev, struct vm_area_struct *vma,
294 		unsigned long start, unsigned long end,
295 		vm_flags_t *vm_flags_ptr);
296 
297 /**
298  * vma_modify_name() - Peform any necessary split/merge in preparation for
299  * setting anonymous VMA name to @new_name in the range @start to @end contained
300  * within @vma.
301  * @vmi: Valid VMA iterator positioned at @vma.
302  * @prev: The VMA immediately prior to @vma or NULL if @vma is the first.
303  * @vma: The VMA containing the range @start to @end to be updated.
304  * @start: The start of the range to update. May be offset within @vma.
305  * @end: The exclusive end of the range to update, may be offset within @vma.
306  * @new_name: The anonymous VMA name that the @start to @end range is about to
307  * be set to.
308  *
309  * IMPORTANT: The actual modification being requested here is NOT applied,
310  * rather the VMA is perhaps split, perhaps merged to accommodate the change,
311  * and the caller is expected to perform the actual modification.
312  *
313  * Returns: A VMA which contains the range @start to @end ready to have its
314  * anonymous VMA name changed to @new_name.
315  */
316 __must_check struct vm_area_struct *vma_modify_name(struct vma_iterator *vmi,
317 		struct vm_area_struct *prev, struct vm_area_struct *vma,
318 		unsigned long start, unsigned long end,
319 		struct anon_vma_name *new_name);
320 
321 /**
322  * vma_modify_policy() - Peform any necessary split/merge in preparation for
323  * setting NUMA policy to @new_pol in the range @start to @end contained
324  * within @vma.
325  * @vmi: Valid VMA iterator positioned at @vma.
326  * @prev: The VMA immediately prior to @vma or NULL if @vma is the first.
327  * @vma: The VMA containing the range @start to @end to be updated.
328  * @start: The start of the range to update. May be offset within @vma.
329  * @end: The exclusive end of the range to update, may be offset within @vma.
330  * @new_pol: The NUMA policy that the @start to @end range is about to be set
331  * to.
332  *
333  * IMPORTANT: The actual modification being requested here is NOT applied,
334  * rather the VMA is perhaps split, perhaps merged to accommodate the change,
335  * and the caller is expected to perform the actual modification.
336  *
337  * Returns: A VMA which contains the range @start to @end ready to have its
338  * NUMA policy changed to @new_pol.
339  */
340 __must_check struct vm_area_struct *vma_modify_policy(struct vma_iterator *vmi,
341 		   struct vm_area_struct *prev, struct vm_area_struct *vma,
342 		   unsigned long start, unsigned long end,
343 		   struct mempolicy *new_pol);
344 
345 /**
346  * vma_modify_flags_uffd() - Peform any necessary split/merge in preparation for
347  * setting VMA flags to @vm_flags and UFFD context to @new_ctx in the range
348  * @start to @end contained within @vma.
349  * @vmi: Valid VMA iterator positioned at @vma.
350  * @prev: The VMA immediately prior to @vma or NULL if @vma is the first.
351  * @vma: The VMA containing the range @start to @end to be updated.
352  * @start: The start of the range to update. May be offset within @vma.
353  * @end: The exclusive end of the range to update, may be offset within @vma.
354  * @vm_flags: The VMA flags that the @start to @end range is about to be set to.
355  * @new_ctx: The userfaultfd context that the @start to @end range is about to
356  * be set to.
357  * @give_up_on_oom: If an out of memory condition occurs on merge, simply give
358  * up on it and treat the merge as best-effort.
359  *
360  * IMPORTANT: The actual modification being requested here is NOT applied,
361  * rather the VMA is perhaps split, perhaps merged to accommodate the change,
362  * and the caller is expected to perform the actual modification.
363  *
364  * Returns: A VMA which contains the range @start to @end ready to have its VMA
365  * flags changed to @vm_flags and its userfaultfd context changed to @new_ctx.
366  */
367 __must_check struct vm_area_struct *vma_modify_flags_uffd(struct vma_iterator *vmi,
368 		struct vm_area_struct *prev, struct vm_area_struct *vma,
369 		unsigned long start, unsigned long end, vm_flags_t vm_flags,
370 		struct vm_userfaultfd_ctx new_ctx, bool give_up_on_oom);
371 
372 __must_check struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg);
373 
374 __must_check struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
375 		  struct vm_area_struct *vma, unsigned long delta);
376 
377 void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb);
378 
379 void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb);
380 
381 void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb,
382 			       struct vm_area_struct *vma);
383 
384 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
385 	unsigned long addr, unsigned long len, pgoff_t pgoff,
386 	bool *need_rmap_locks);
387 
388 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma);
389 
390 bool vma_needs_dirty_tracking(struct vm_area_struct *vma);
391 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
392 
393 int mm_take_all_locks(struct mm_struct *mm);
394 void mm_drop_all_locks(struct mm_struct *mm);
395 
396 unsigned long mmap_region(struct file *file, unsigned long addr,
397 		unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
398 		struct list_head *uf);
399 
400 int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma,
401 		 unsigned long addr, unsigned long request, unsigned long flags);
402 
403 unsigned long unmapped_area(struct vm_unmapped_area_info *info);
404 unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
405 
vma_wants_manual_pte_write_upgrade(struct vm_area_struct * vma)406 static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma)
407 {
408 	/*
409 	 * We want to check manually if we can change individual PTEs writable
410 	 * if we can't do that automatically for all PTEs in a mapping. For
411 	 * private mappings, that's always the case when we have write
412 	 * permissions as we properly have to handle COW.
413 	 */
414 	if (vma->vm_flags & VM_SHARED)
415 		return vma_wants_writenotify(vma, vma->vm_page_prot);
416 	return !!(vma->vm_flags & VM_WRITE);
417 }
418 
419 #ifdef CONFIG_MMU
vm_pgprot_modify(pgprot_t oldprot,vm_flags_t vm_flags)420 static inline pgprot_t vm_pgprot_modify(pgprot_t oldprot, vm_flags_t vm_flags)
421 {
422 	return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
423 }
424 #endif
425 
vma_prev_limit(struct vma_iterator * vmi,unsigned long min)426 static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi,
427 						    unsigned long min)
428 {
429 	return mas_prev(&vmi->mas, min);
430 }
431 
432 /*
433  * These three helpers classifies VMAs for virtual memory accounting.
434  */
435 
436 /*
437  * Executable code area - executable, not writable, not stack
438  */
is_exec_mapping(vm_flags_t flags)439 static inline bool is_exec_mapping(vm_flags_t flags)
440 {
441 	return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
442 }
443 
444 /*
445  * Stack area (including shadow stacks)
446  *
447  * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
448  * do_mmap() forbids all other combinations.
449  */
is_stack_mapping(vm_flags_t flags)450 static inline bool is_stack_mapping(vm_flags_t flags)
451 {
452 	return ((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK);
453 }
454 
455 /*
456  * Data area - private, writable, not stack
457  */
is_data_mapping(vm_flags_t flags)458 static inline bool is_data_mapping(vm_flags_t flags)
459 {
460 	return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
461 }
462 
463 
vma_iter_config(struct vma_iterator * vmi,unsigned long index,unsigned long last)464 static inline void vma_iter_config(struct vma_iterator *vmi,
465 		unsigned long index, unsigned long last)
466 {
467 	__mas_set_range(&vmi->mas, index, last - 1);
468 }
469 
vma_iter_reset(struct vma_iterator * vmi)470 static inline void vma_iter_reset(struct vma_iterator *vmi)
471 {
472 	mas_reset(&vmi->mas);
473 }
474 
475 static inline
vma_iter_prev_range_limit(struct vma_iterator * vmi,unsigned long min)476 struct vm_area_struct *vma_iter_prev_range_limit(struct vma_iterator *vmi, unsigned long min)
477 {
478 	return mas_prev_range(&vmi->mas, min);
479 }
480 
481 static inline
vma_iter_next_range_limit(struct vma_iterator * vmi,unsigned long max)482 struct vm_area_struct *vma_iter_next_range_limit(struct vma_iterator *vmi, unsigned long max)
483 {
484 	return mas_next_range(&vmi->mas, max);
485 }
486 
vma_iter_area_lowest(struct vma_iterator * vmi,unsigned long min,unsigned long max,unsigned long size)487 static inline int vma_iter_area_lowest(struct vma_iterator *vmi, unsigned long min,
488 				       unsigned long max, unsigned long size)
489 {
490 	return mas_empty_area(&vmi->mas, min, max - 1, size);
491 }
492 
vma_iter_area_highest(struct vma_iterator * vmi,unsigned long min,unsigned long max,unsigned long size)493 static inline int vma_iter_area_highest(struct vma_iterator *vmi, unsigned long min,
494 					unsigned long max, unsigned long size)
495 {
496 	return mas_empty_area_rev(&vmi->mas, min, max - 1, size);
497 }
498 
499 /*
500  * VMA Iterator functions shared between nommu and mmap
501  */
vma_iter_prealloc(struct vma_iterator * vmi,struct vm_area_struct * vma)502 static inline int vma_iter_prealloc(struct vma_iterator *vmi,
503 		struct vm_area_struct *vma)
504 {
505 	return mas_preallocate(&vmi->mas, vma, GFP_KERNEL);
506 }
507 
vma_iter_clear(struct vma_iterator * vmi)508 static inline void vma_iter_clear(struct vma_iterator *vmi)
509 {
510 	mas_store_prealloc(&vmi->mas, NULL);
511 }
512 
vma_iter_load(struct vma_iterator * vmi)513 static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
514 {
515 	return mas_walk(&vmi->mas);
516 }
517 
518 /* Store a VMA with preallocated memory */
vma_iter_store_overwrite(struct vma_iterator * vmi,struct vm_area_struct * vma)519 static inline void vma_iter_store_overwrite(struct vma_iterator *vmi,
520 					    struct vm_area_struct *vma)
521 {
522 	vma_assert_attached(vma);
523 
524 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
525 	if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
526 			vmi->mas.index > vma->vm_start)) {
527 		pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n",
528 			vmi->mas.index, vma->vm_start, vma->vm_start,
529 			vma->vm_end, vmi->mas.index, vmi->mas.last);
530 	}
531 	if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
532 			vmi->mas.last <  vma->vm_start)) {
533 		pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n",
534 		       vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end,
535 		       vmi->mas.index, vmi->mas.last);
536 	}
537 #endif
538 
539 	if (vmi->mas.status != ma_start &&
540 	    ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
541 		vma_iter_invalidate(vmi);
542 
543 	__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
544 	mas_store_prealloc(&vmi->mas, vma);
545 }
546 
vma_iter_store_new(struct vma_iterator * vmi,struct vm_area_struct * vma)547 static inline void vma_iter_store_new(struct vma_iterator *vmi,
548 				      struct vm_area_struct *vma)
549 {
550 	vma_mark_attached(vma);
551 	vma_iter_store_overwrite(vmi, vma);
552 }
553 
vma_iter_addr(struct vma_iterator * vmi)554 static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
555 {
556 	return vmi->mas.index;
557 }
558 
vma_iter_end(struct vma_iterator * vmi)559 static inline unsigned long vma_iter_end(struct vma_iterator *vmi)
560 {
561 	return vmi->mas.last + 1;
562 }
563 
vma_iter_bulk_alloc(struct vma_iterator * vmi,unsigned long count)564 static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi,
565 				      unsigned long count)
566 {
567 	return mas_expected_entries(&vmi->mas, count);
568 }
569 
570 static inline
vma_iter_prev_range(struct vma_iterator * vmi)571 struct vm_area_struct *vma_iter_prev_range(struct vma_iterator *vmi)
572 {
573 	return mas_prev_range(&vmi->mas, 0);
574 }
575 
576 /*
577  * Retrieve the next VMA and rewind the iterator to end of the previous VMA, or
578  * if no previous VMA, to index 0.
579  */
580 static inline
vma_iter_next_rewind(struct vma_iterator * vmi,struct vm_area_struct ** pprev)581 struct vm_area_struct *vma_iter_next_rewind(struct vma_iterator *vmi,
582 		struct vm_area_struct **pprev)
583 {
584 	struct vm_area_struct *next = vma_next(vmi);
585 	struct vm_area_struct *prev = vma_prev(vmi);
586 
587 	/*
588 	 * Consider the case where no previous VMA exists. We advance to the
589 	 * next VMA, skipping any gap, then rewind to the start of the range.
590 	 *
591 	 * If we were to unconditionally advance to the next range we'd wind up
592 	 * at the next VMA again, so we check to ensure there is a previous VMA
593 	 * to skip over.
594 	 */
595 	if (prev)
596 		vma_iter_next_range(vmi);
597 
598 	if (pprev)
599 		*pprev = prev;
600 
601 	return next;
602 }
603 
604 #ifdef CONFIG_64BIT
vma_is_sealed(struct vm_area_struct * vma)605 static inline bool vma_is_sealed(struct vm_area_struct *vma)
606 {
607 	return (vma->vm_flags & VM_SEALED);
608 }
609 #else
vma_is_sealed(struct vm_area_struct * vma)610 static inline bool vma_is_sealed(struct vm_area_struct *vma)
611 {
612 	return false;
613 }
614 #endif
615 
616 #if defined(CONFIG_STACK_GROWSUP)
617 int expand_upwards(struct vm_area_struct *vma, unsigned long address);
618 #endif
619 
620 int expand_downwards(struct vm_area_struct *vma, unsigned long address);
621 
622 int __vm_munmap(unsigned long start, size_t len, bool unlock);
623 
624 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma);
625 
626 /* vma_init.h, shared between CONFIG_MMU and nommu. */
627 void __init vma_state_init(void);
628 struct vm_area_struct *vm_area_alloc(struct mm_struct *mm);
629 struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig);
630 void vm_area_free(struct vm_area_struct *vma);
631 
632 /* vma_exec.c */
633 #ifdef CONFIG_MMU
634 int create_init_stack_vma(struct mm_struct *mm, struct vm_area_struct **vmap,
635 			  unsigned long *top_mem_p);
636 int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift);
637 #endif
638 
639 #endif	/* __MM_VMA_H */
640