xref: /linux/mm/vma.h (revision eeccf287a2a517954b57cf9d733b3cf5d47afa34)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * vma.h
4  *
5  * Core VMA manipulation API implemented in vma.c.
6  */
7 #ifndef __MM_VMA_H
8 #define __MM_VMA_H
9 
10 /*
11  * VMA lock generalization
12  */
13 struct vma_prepare {
14 	struct vm_area_struct *vma;
15 	struct vm_area_struct *adj_next;
16 	struct file *file;
17 	struct address_space *mapping;
18 	struct anon_vma *anon_vma;
19 	struct vm_area_struct *insert;
20 	struct vm_area_struct *remove;
21 	struct vm_area_struct *remove2;
22 
23 	bool skip_vma_uprobe :1;
24 };
25 
26 struct unlink_vma_file_batch {
27 	int count;
28 	struct vm_area_struct *vmas[8];
29 };
30 
31 /*
32  * vma munmap operation
33  */
34 struct vma_munmap_struct {
35 	struct vma_iterator *vmi;
36 	struct vm_area_struct *vma;     /* The first vma to munmap */
37 	struct vm_area_struct *prev;    /* vma before the munmap area */
38 	struct vm_area_struct *next;    /* vma after the munmap area */
39 	struct list_head *uf;           /* Userfaultfd list_head */
40 	unsigned long start;            /* Aligned start addr (inclusive) */
41 	unsigned long end;              /* Aligned end addr (exclusive) */
42 	unsigned long unmap_start;      /* Unmap PTE start */
43 	unsigned long unmap_end;        /* Unmap PTE end */
44 	int vma_count;                  /* Number of vmas that will be removed */
45 	bool unlock;                    /* Unlock after the munmap */
46 	bool clear_ptes;                /* If there are outstanding PTE to be cleared */
47 	/* 2 byte hole */
48 	unsigned long nr_pages;         /* Number of pages being removed */
49 	unsigned long locked_vm;        /* Number of locked pages */
50 	unsigned long nr_accounted;     /* Number of VM_ACCOUNT pages */
51 	unsigned long exec_vm;
52 	unsigned long stack_vm;
53 	unsigned long data_vm;
54 };
55 
56 enum vma_merge_state {
57 	VMA_MERGE_START,
58 	VMA_MERGE_ERROR_NOMEM,
59 	VMA_MERGE_NOMERGE,
60 	VMA_MERGE_SUCCESS,
61 };
62 
63 /*
64  * Describes a VMA merge operation and is threaded throughout it.
65  *
66  * Any of the fields may be mutated by the merge operation, so no guarantees are
67  * made to the contents of this structure after a merge operation has completed.
68  */
69 struct vma_merge_struct {
70 	struct mm_struct *mm;
71 	struct vma_iterator *vmi;
72 	/*
73 	 * Adjacent VMAs, any of which may be NULL if not present:
74 	 *
75 	 * |------|--------|------|
76 	 * | prev | middle | next |
77 	 * |------|--------|------|
78 	 *
79 	 * middle may not yet exist in the case of a proposed new VMA being
80 	 * merged, or it may be an existing VMA.
81 	 *
82 	 * next may be assigned by the caller.
83 	 */
84 	struct vm_area_struct *prev;
85 	struct vm_area_struct *middle;
86 	struct vm_area_struct *next;
87 	/* This is the VMA we ultimately target to become the merged VMA. */
88 	struct vm_area_struct *target;
89 	/*
90 	 * Initially, the start, end, pgoff fields are provided by the caller
91 	 * and describe the proposed new VMA range, whether modifying an
92 	 * existing VMA (which will be 'middle'), or adding a new one.
93 	 *
94 	 * During the merge process these fields are updated to describe the new
95 	 * range _including those VMAs which will be merged_.
96 	 */
97 	unsigned long start;
98 	unsigned long end;
99 	pgoff_t pgoff;
100 
101 	vm_flags_t vm_flags;
102 	struct file *file;
103 	struct anon_vma *anon_vma;
104 	struct mempolicy *policy;
105 	struct vm_userfaultfd_ctx uffd_ctx;
106 	struct anon_vma_name *anon_name;
107 	enum vma_merge_state state;
108 
109 	/* If copied from (i.e. mremap()'d) the VMA from which we are copying. */
110 	struct vm_area_struct *copied_from;
111 
112 	/* Flags which callers can use to modify merge behaviour: */
113 
114 	/*
115 	 * If we can expand, simply do so. We know there is nothing to merge to
116 	 * the right. Does not reset state upon failure to merge. The VMA
117 	 * iterator is assumed to be positioned at the previous VMA, rather than
118 	 * at the gap.
119 	 */
120 	bool just_expand :1;
121 
122 	/*
123 	 * If a merge is possible, but an OOM error occurs, give up and don't
124 	 * execute the merge, returning NULL.
125 	 */
126 	bool give_up_on_oom :1;
127 
128 	/*
129 	 * If set, skip uprobe_mmap upon merged vma.
130 	 */
131 	bool skip_vma_uprobe :1;
132 
133 	/* Internal flags set during merge process: */
134 
135 	/*
136 	 * Internal flag indicating the merge increases vmg->middle->vm_start
137 	 * (and thereby, vmg->prev->vm_end).
138 	 */
139 	bool __adjust_middle_start :1;
140 	/*
141 	 * Internal flag indicating the merge decreases vmg->next->vm_start
142 	 * (and thereby, vmg->middle->vm_end).
143 	 */
144 	bool __adjust_next_start :1;
145 	/*
146 	 * Internal flag used during the merge operation to indicate we will
147 	 * remove vmg->middle.
148 	 */
149 	bool __remove_middle :1;
150 	/*
151 	 * Internal flag used during the merge operation to indicate we will
152 	 * remove vmg->next.
153 	 */
154 	bool __remove_next :1;
155 
156 };
157 
158 struct unmap_desc {
159 	struct  ma_state *mas;        /* the maple state point to the first vma */
160 	struct vm_area_struct *first; /* The first vma */
161 	unsigned long pg_start;       /* The first pagetable address to free (floor) */
162 	unsigned long pg_end;         /* The last pagetable address to free (ceiling) */
163 	unsigned long vma_start;      /* The min vma address */
164 	unsigned long vma_end;        /* The max vma address */
165 	unsigned long tree_end;       /* Maximum for the vma tree search */
166 	unsigned long tree_reset;     /* Where to reset the vma tree walk */
167 	bool mm_wr_locked;            /* If the mmap write lock is held */
168 };
169 
170 /*
171  * unmap_all_init() - Initialize unmap_desc to remove all vmas, point the
172  * pg_start and pg_end to a safe location.
173  */
unmap_all_init(struct unmap_desc * unmap,struct vma_iterator * vmi,struct vm_area_struct * vma)174 static inline void unmap_all_init(struct unmap_desc *unmap,
175 		struct vma_iterator *vmi, struct vm_area_struct *vma)
176 {
177 	unmap->mas = &vmi->mas;
178 	unmap->first = vma;
179 	unmap->pg_start = FIRST_USER_ADDRESS;
180 	unmap->pg_end = USER_PGTABLES_CEILING;
181 	unmap->vma_start = 0;
182 	unmap->vma_end = ULONG_MAX;
183 	unmap->tree_end = ULONG_MAX;
184 	unmap->tree_reset = vma->vm_end;
185 	unmap->mm_wr_locked = false;
186 }
187 
188 /*
189  * unmap_pgtable_init() - Initialize unmap_desc to remove all page tables within
190  * the user range.
191  *
192  * ARM can have mappings outside of vmas.
193  * See: e2cdef8c847b4 ("[PATCH] freepgt: free_pgtables from FIRST_USER_ADDRESS")
194  *
195  * ARM LPAE uses page table mappings beyond the USER_PGTABLES_CEILING
196  * See: CONFIG_ARM_LPAE in arch/arm/include/asm/pgtable.h
197  */
unmap_pgtable_init(struct unmap_desc * unmap,struct vma_iterator * vmi)198 static inline void unmap_pgtable_init(struct unmap_desc *unmap,
199 				      struct vma_iterator *vmi)
200 {
201 	vma_iter_set(vmi, unmap->tree_reset);
202 	unmap->vma_start = FIRST_USER_ADDRESS;
203 	unmap->vma_end = USER_PGTABLES_CEILING;
204 	unmap->tree_end = USER_PGTABLES_CEILING;
205 }
206 
207 #define UNMAP_STATE(name, _vmi, _vma, _vma_start, _vma_end, _prev, _next)      \
208 	struct unmap_desc name = {                                             \
209 		.mas = &(_vmi)->mas,                                           \
210 		.first = _vma,                                                 \
211 		.pg_start = _prev ? ((struct vm_area_struct *)_prev)->vm_end : \
212 			FIRST_USER_ADDRESS,                                    \
213 		.pg_end = _next ? ((struct vm_area_struct *)_next)->vm_start : \
214 			USER_PGTABLES_CEILING,                                 \
215 		.vma_start = _vma_start,                                       \
216 		.vma_end = _vma_end,                                           \
217 		.tree_end = _next ?                                            \
218 			((struct vm_area_struct *)_next)->vm_start :           \
219 			USER_PGTABLES_CEILING,                                 \
220 		.tree_reset = _vma->vm_end,                                    \
221 		.mm_wr_locked = true,                                          \
222 	}
223 
vmg_nomem(struct vma_merge_struct * vmg)224 static inline bool vmg_nomem(struct vma_merge_struct *vmg)
225 {
226 	return vmg->state == VMA_MERGE_ERROR_NOMEM;
227 }
228 
229 /* Assumes addr >= vma->vm_start. */
vma_pgoff_offset(struct vm_area_struct * vma,unsigned long addr)230 static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma,
231 				       unsigned long addr)
232 {
233 	return vma->vm_pgoff + PHYS_PFN(addr - vma->vm_start);
234 }
235 
236 #define VMG_STATE(name, mm_, vmi_, start_, end_, vm_flags_, pgoff_)	\
237 	struct vma_merge_struct name = {				\
238 		.mm = mm_,						\
239 		.vmi = vmi_,						\
240 		.start = start_,					\
241 		.end = end_,						\
242 		.vm_flags = vm_flags_,					\
243 		.pgoff = pgoff_,					\
244 		.state = VMA_MERGE_START,				\
245 	}
246 
247 #define VMG_VMA_STATE(name, vmi_, prev_, vma_, start_, end_)	\
248 	struct vma_merge_struct name = {			\
249 		.mm = vma_->vm_mm,				\
250 		.vmi = vmi_,					\
251 		.prev = prev_,					\
252 		.middle = vma_,					\
253 		.next = NULL,					\
254 		.start = start_,				\
255 		.end = end_,					\
256 		.vm_flags = vma_->vm_flags,			\
257 		.pgoff = vma_pgoff_offset(vma_, start_),	\
258 		.file = vma_->vm_file,				\
259 		.anon_vma = vma_->anon_vma,			\
260 		.policy = vma_policy(vma_),			\
261 		.uffd_ctx = vma_->vm_userfaultfd_ctx,		\
262 		.anon_name = anon_vma_name(vma_),		\
263 		.state = VMA_MERGE_START,			\
264 	}
265 
266 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
267 void validate_mm(struct mm_struct *mm);
268 #else
269 #define validate_mm(mm) do { } while (0)
270 #endif
271 
272 __must_check int vma_expand(struct vma_merge_struct *vmg);
273 __must_check int vma_shrink(struct vma_iterator *vmi,
274 		struct vm_area_struct *vma,
275 		unsigned long start, unsigned long end, pgoff_t pgoff);
276 
vma_iter_store_gfp(struct vma_iterator * vmi,struct vm_area_struct * vma,gfp_t gfp)277 static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
278 			struct vm_area_struct *vma, gfp_t gfp)
279 
280 {
281 	if (vmi->mas.status != ma_start &&
282 	    ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
283 		vma_iter_invalidate(vmi);
284 
285 	__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
286 	mas_store_gfp(&vmi->mas, vma, gfp);
287 	if (unlikely(mas_is_err(&vmi->mas)))
288 		return -ENOMEM;
289 
290 	vma_mark_attached(vma);
291 	return 0;
292 }
293 
294 /*
295  * Temporary helper function for stacked mmap handlers which specify
296  * f_op->mmap() but which might have an underlying file system which implements
297  * f_op->mmap_prepare().
298  */
set_vma_from_desc(struct vm_area_struct * vma,struct vm_area_desc * desc)299 static inline void set_vma_from_desc(struct vm_area_struct *vma,
300 		struct vm_area_desc *desc)
301 {
302 	/*
303 	 * Since we're invoking .mmap_prepare() despite having a partially
304 	 * established VMA, we must take care to handle setting fields
305 	 * correctly.
306 	 */
307 
308 	/* Mutable fields. Populated with initial state. */
309 	vma->vm_pgoff = desc->pgoff;
310 	if (desc->vm_file != vma->vm_file)
311 		vma_set_file(vma, desc->vm_file);
312 	vma->flags = desc->vma_flags;
313 	vma->vm_page_prot = desc->page_prot;
314 
315 	/* User-defined fields. */
316 	vma->vm_ops = desc->vm_ops;
317 	vma->vm_private_data = desc->private_data;
318 }
319 
320 int
321 do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
322 		    struct mm_struct *mm, unsigned long start,
323 		    unsigned long end, struct list_head *uf, bool unlock);
324 
325 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
326 		  unsigned long start, size_t len, struct list_head *uf,
327 		  bool unlock);
328 
329 void remove_vma(struct vm_area_struct *vma);
330 void unmap_region(struct unmap_desc *unmap);
331 
332 /**
333  * vma_modify_flags() - Perform any necessary split/merge in preparation for
334  * setting VMA flags to *@vm_flags in the range @start to @end contained within
335  * @vma.
336  * @vmi: Valid VMA iterator positioned at @vma.
337  * @prev: The VMA immediately prior to @vma or NULL if @vma is the first.
338  * @vma: The VMA containing the range @start to @end to be updated.
339  * @start: The start of the range to update. May be offset within @vma.
340  * @end: The exclusive end of the range to update, may be offset within @vma.
341  * @vm_flags_ptr: A pointer to the VMA flags that the @start to @end range is
342  * about to be set to. On merge, this will be updated to include sticky flags.
343  *
344  * IMPORTANT: The actual modification being requested here is NOT applied,
345  * rather the VMA is perhaps split, perhaps merged to accommodate the change,
346  * and the caller is expected to perform the actual modification.
347  *
348  * In order to account for sticky VMA flags, the @vm_flags_ptr parameter points
349  * to the requested flags which are then updated so the caller, should they
350  * overwrite any existing flags, correctly retains these.
351  *
352  * Returns: A VMA which contains the range @start to @end ready to have its
353  * flags altered to *@vm_flags.
354  */
355 __must_check struct vm_area_struct *vma_modify_flags(struct vma_iterator *vmi,
356 		struct vm_area_struct *prev, struct vm_area_struct *vma,
357 		unsigned long start, unsigned long end,
358 		vm_flags_t *vm_flags_ptr);
359 
360 /**
361  * vma_modify_name() - Perform any necessary split/merge in preparation for
362  * setting anonymous VMA name to @new_name in the range @start to @end contained
363  * within @vma.
364  * @vmi: Valid VMA iterator positioned at @vma.
365  * @prev: The VMA immediately prior to @vma or NULL if @vma is the first.
366  * @vma: The VMA containing the range @start to @end to be updated.
367  * @start: The start of the range to update. May be offset within @vma.
368  * @end: The exclusive end of the range to update, may be offset within @vma.
369  * @new_name: The anonymous VMA name that the @start to @end range is about to
370  * be set to.
371  *
372  * IMPORTANT: The actual modification being requested here is NOT applied,
373  * rather the VMA is perhaps split, perhaps merged to accommodate the change,
374  * and the caller is expected to perform the actual modification.
375  *
376  * Returns: A VMA which contains the range @start to @end ready to have its
377  * anonymous VMA name changed to @new_name.
378  */
379 __must_check struct vm_area_struct *vma_modify_name(struct vma_iterator *vmi,
380 		struct vm_area_struct *prev, struct vm_area_struct *vma,
381 		unsigned long start, unsigned long end,
382 		struct anon_vma_name *new_name);
383 
384 /**
385  * vma_modify_policy() - Perform any necessary split/merge in preparation for
386  * setting NUMA policy to @new_pol in the range @start to @end contained
387  * within @vma.
388  * @vmi: Valid VMA iterator positioned at @vma.
389  * @prev: The VMA immediately prior to @vma or NULL if @vma is the first.
390  * @vma: The VMA containing the range @start to @end to be updated.
391  * @start: The start of the range to update. May be offset within @vma.
392  * @end: The exclusive end of the range to update, may be offset within @vma.
393  * @new_pol: The NUMA policy that the @start to @end range is about to be set
394  * to.
395  *
396  * IMPORTANT: The actual modification being requested here is NOT applied,
397  * rather the VMA is perhaps split, perhaps merged to accommodate the change,
398  * and the caller is expected to perform the actual modification.
399  *
400  * Returns: A VMA which contains the range @start to @end ready to have its
401  * NUMA policy changed to @new_pol.
402  */
403 __must_check struct vm_area_struct *vma_modify_policy(struct vma_iterator *vmi,
404 		   struct vm_area_struct *prev, struct vm_area_struct *vma,
405 		   unsigned long start, unsigned long end,
406 		   struct mempolicy *new_pol);
407 
408 /**
409  * vma_modify_flags_uffd() - Perform any necessary split/merge in preparation for
410  * setting VMA flags to @vm_flags and UFFD context to @new_ctx in the range
411  * @start to @end contained within @vma.
412  * @vmi: Valid VMA iterator positioned at @vma.
413  * @prev: The VMA immediately prior to @vma or NULL if @vma is the first.
414  * @vma: The VMA containing the range @start to @end to be updated.
415  * @start: The start of the range to update. May be offset within @vma.
416  * @end: The exclusive end of the range to update, may be offset within @vma.
417  * @vm_flags: The VMA flags that the @start to @end range is about to be set to.
418  * @new_ctx: The userfaultfd context that the @start to @end range is about to
419  * be set to.
420  * @give_up_on_oom: If an out of memory condition occurs on merge, simply give
421  * up on it and treat the merge as best-effort.
422  *
423  * IMPORTANT: The actual modification being requested here is NOT applied,
424  * rather the VMA is perhaps split, perhaps merged to accommodate the change,
425  * and the caller is expected to perform the actual modification.
426  *
427  * Returns: A VMA which contains the range @start to @end ready to have its VMA
428  * flags changed to @vm_flags and its userfaultfd context changed to @new_ctx.
429  */
430 __must_check struct vm_area_struct *vma_modify_flags_uffd(struct vma_iterator *vmi,
431 		struct vm_area_struct *prev, struct vm_area_struct *vma,
432 		unsigned long start, unsigned long end, vm_flags_t vm_flags,
433 		struct vm_userfaultfd_ctx new_ctx, bool give_up_on_oom);
434 
435 __must_check struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg);
436 
437 __must_check struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
438 		  struct vm_area_struct *vma, unsigned long delta);
439 
440 void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb);
441 
442 void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb);
443 
444 void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb,
445 			       struct vm_area_struct *vma);
446 
447 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
448 	unsigned long addr, unsigned long len, pgoff_t pgoff,
449 	bool *need_rmap_locks);
450 
451 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma);
452 
453 bool vma_needs_dirty_tracking(struct vm_area_struct *vma);
454 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
455 
456 int mm_take_all_locks(struct mm_struct *mm);
457 void mm_drop_all_locks(struct mm_struct *mm);
458 
459 unsigned long mmap_region(struct file *file, unsigned long addr,
460 		unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
461 		struct list_head *uf);
462 
463 int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma,
464 		 unsigned long addr, unsigned long request, unsigned long flags);
465 
466 unsigned long unmapped_area(struct vm_unmapped_area_info *info);
467 unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
468 
vma_wants_manual_pte_write_upgrade(struct vm_area_struct * vma)469 static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma)
470 {
471 	/*
472 	 * We want to check manually if we can change individual PTEs writable
473 	 * if we can't do that automatically for all PTEs in a mapping. For
474 	 * private mappings, that's always the case when we have write
475 	 * permissions as we properly have to handle COW.
476 	 */
477 	if (vma->vm_flags & VM_SHARED)
478 		return vma_wants_writenotify(vma, vma->vm_page_prot);
479 	return !!(vma->vm_flags & VM_WRITE);
480 }
481 
482 #ifdef CONFIG_MMU
vm_pgprot_modify(pgprot_t oldprot,vm_flags_t vm_flags)483 static inline pgprot_t vm_pgprot_modify(pgprot_t oldprot, vm_flags_t vm_flags)
484 {
485 	return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
486 }
487 #endif
488 
vma_prev_limit(struct vma_iterator * vmi,unsigned long min)489 static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi,
490 						    unsigned long min)
491 {
492 	return mas_prev(&vmi->mas, min);
493 }
494 
495 /*
496  * These three helpers classifies VMAs for virtual memory accounting.
497  */
498 
499 /*
500  * Executable code area - executable, not writable, not stack
501  */
is_exec_mapping(vm_flags_t flags)502 static inline bool is_exec_mapping(vm_flags_t flags)
503 {
504 	return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
505 }
506 
507 /*
508  * Stack area (including shadow stacks)
509  *
510  * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
511  * do_mmap() forbids all other combinations.
512  */
is_stack_mapping(vm_flags_t flags)513 static inline bool is_stack_mapping(vm_flags_t flags)
514 {
515 	return ((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK);
516 }
517 
518 /*
519  * Data area - private, writable, not stack
520  */
is_data_mapping(vm_flags_t flags)521 static inline bool is_data_mapping(vm_flags_t flags)
522 {
523 	return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
524 }
525 
526 
vma_iter_config(struct vma_iterator * vmi,unsigned long index,unsigned long last)527 static inline void vma_iter_config(struct vma_iterator *vmi,
528 		unsigned long index, unsigned long last)
529 {
530 	__mas_set_range(&vmi->mas, index, last - 1);
531 }
532 
vma_iter_reset(struct vma_iterator * vmi)533 static inline void vma_iter_reset(struct vma_iterator *vmi)
534 {
535 	mas_reset(&vmi->mas);
536 }
537 
538 static inline
vma_iter_prev_range_limit(struct vma_iterator * vmi,unsigned long min)539 struct vm_area_struct *vma_iter_prev_range_limit(struct vma_iterator *vmi, unsigned long min)
540 {
541 	return mas_prev_range(&vmi->mas, min);
542 }
543 
544 static inline
vma_iter_next_range_limit(struct vma_iterator * vmi,unsigned long max)545 struct vm_area_struct *vma_iter_next_range_limit(struct vma_iterator *vmi, unsigned long max)
546 {
547 	return mas_next_range(&vmi->mas, max);
548 }
549 
vma_iter_area_lowest(struct vma_iterator * vmi,unsigned long min,unsigned long max,unsigned long size)550 static inline int vma_iter_area_lowest(struct vma_iterator *vmi, unsigned long min,
551 				       unsigned long max, unsigned long size)
552 {
553 	return mas_empty_area(&vmi->mas, min, max - 1, size);
554 }
555 
vma_iter_area_highest(struct vma_iterator * vmi,unsigned long min,unsigned long max,unsigned long size)556 static inline int vma_iter_area_highest(struct vma_iterator *vmi, unsigned long min,
557 					unsigned long max, unsigned long size)
558 {
559 	return mas_empty_area_rev(&vmi->mas, min, max - 1, size);
560 }
561 
562 /*
563  * VMA Iterator functions shared between nommu and mmap
564  */
vma_iter_prealloc(struct vma_iterator * vmi,struct vm_area_struct * vma)565 static inline int vma_iter_prealloc(struct vma_iterator *vmi,
566 		struct vm_area_struct *vma)
567 {
568 	return mas_preallocate(&vmi->mas, vma, GFP_KERNEL);
569 }
570 
vma_iter_clear(struct vma_iterator * vmi)571 static inline void vma_iter_clear(struct vma_iterator *vmi)
572 {
573 	mas_store_prealloc(&vmi->mas, NULL);
574 }
575 
vma_iter_load(struct vma_iterator * vmi)576 static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
577 {
578 	return mas_walk(&vmi->mas);
579 }
580 
581 /* Store a VMA with preallocated memory */
vma_iter_store_overwrite(struct vma_iterator * vmi,struct vm_area_struct * vma)582 static inline void vma_iter_store_overwrite(struct vma_iterator *vmi,
583 					    struct vm_area_struct *vma)
584 {
585 	vma_assert_attached(vma);
586 
587 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
588 	if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
589 			vmi->mas.index > vma->vm_start)) {
590 		pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n",
591 			vmi->mas.index, vma->vm_start, vma->vm_start,
592 			vma->vm_end, vmi->mas.index, vmi->mas.last);
593 	}
594 	if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
595 			vmi->mas.last <  vma->vm_start)) {
596 		pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n",
597 		       vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end,
598 		       vmi->mas.index, vmi->mas.last);
599 	}
600 #endif
601 
602 	if (vmi->mas.status != ma_start &&
603 	    ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
604 		vma_iter_invalidate(vmi);
605 
606 	__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
607 	mas_store_prealloc(&vmi->mas, vma);
608 }
609 
vma_iter_store_new(struct vma_iterator * vmi,struct vm_area_struct * vma)610 static inline void vma_iter_store_new(struct vma_iterator *vmi,
611 				      struct vm_area_struct *vma)
612 {
613 	vma_mark_attached(vma);
614 	vma_iter_store_overwrite(vmi, vma);
615 }
616 
vma_iter_addr(struct vma_iterator * vmi)617 static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
618 {
619 	return vmi->mas.index;
620 }
621 
vma_iter_end(struct vma_iterator * vmi)622 static inline unsigned long vma_iter_end(struct vma_iterator *vmi)
623 {
624 	return vmi->mas.last + 1;
625 }
626 
627 static inline
vma_iter_prev_range(struct vma_iterator * vmi)628 struct vm_area_struct *vma_iter_prev_range(struct vma_iterator *vmi)
629 {
630 	return mas_prev_range(&vmi->mas, 0);
631 }
632 
633 /*
634  * Retrieve the next VMA and rewind the iterator to end of the previous VMA, or
635  * if no previous VMA, to index 0.
636  */
637 static inline
vma_iter_next_rewind(struct vma_iterator * vmi,struct vm_area_struct ** pprev)638 struct vm_area_struct *vma_iter_next_rewind(struct vma_iterator *vmi,
639 		struct vm_area_struct **pprev)
640 {
641 	struct vm_area_struct *next = vma_next(vmi);
642 	struct vm_area_struct *prev = vma_prev(vmi);
643 
644 	/*
645 	 * Consider the case where no previous VMA exists. We advance to the
646 	 * next VMA, skipping any gap, then rewind to the start of the range.
647 	 *
648 	 * If we were to unconditionally advance to the next range we'd wind up
649 	 * at the next VMA again, so we check to ensure there is a previous VMA
650 	 * to skip over.
651 	 */
652 	if (prev)
653 		vma_iter_next_range(vmi);
654 
655 	if (pprev)
656 		*pprev = prev;
657 
658 	return next;
659 }
660 
661 #ifdef CONFIG_64BIT
vma_is_sealed(struct vm_area_struct * vma)662 static inline bool vma_is_sealed(struct vm_area_struct *vma)
663 {
664 	return (vma->vm_flags & VM_SEALED);
665 }
666 #else
vma_is_sealed(struct vm_area_struct * vma)667 static inline bool vma_is_sealed(struct vm_area_struct *vma)
668 {
669 	return false;
670 }
671 #endif
672 
673 #if defined(CONFIG_STACK_GROWSUP)
674 int expand_upwards(struct vm_area_struct *vma, unsigned long address);
675 #endif
676 
677 int expand_downwards(struct vm_area_struct *vma, unsigned long address);
678 
679 int __vm_munmap(unsigned long start, size_t len, bool unlock);
680 
681 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma);
682 
683 /* vma_init.h, shared between CONFIG_MMU and nommu. */
684 void __init vma_state_init(void);
685 struct vm_area_struct *vm_area_alloc(struct mm_struct *mm);
686 struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig);
687 void vm_area_free(struct vm_area_struct *vma);
688 
689 /* vma_exec.c */
690 #ifdef CONFIG_MMU
691 int create_init_stack_vma(struct mm_struct *mm, struct vm_area_struct **vmap,
692 			  unsigned long *top_mem_p);
693 int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift);
694 #endif
695 
696 #endif	/* __MM_VMA_H */
697