xref: /linux/mm/vma.h (revision 27b9989b87119da2f33f2c0fcbb8984ab4ebdf1a)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * vma.h
4  *
5  * Core VMA manipulation API implemented in vma.c.
6  */
7 #ifndef __MM_VMA_H
8 #define __MM_VMA_H
9 
10 /*
11  * VMA lock generalization
12  */
13 struct vma_prepare {
14 	struct vm_area_struct *vma;
15 	struct vm_area_struct *adj_next;
16 	struct file *file;
17 	struct address_space *mapping;
18 	struct anon_vma *anon_vma;
19 	struct vm_area_struct *insert;
20 	struct vm_area_struct *remove;
21 	struct vm_area_struct *remove2;
22 
23 	bool skip_vma_uprobe :1;
24 };
25 
26 struct unlink_vma_file_batch {
27 	int count;
28 	struct vm_area_struct *vmas[8];
29 };
30 
31 /*
32  * vma munmap operation
33  */
34 struct vma_munmap_struct {
35 	struct vma_iterator *vmi;
36 	struct vm_area_struct *vma;     /* The first vma to munmap */
37 	struct vm_area_struct *prev;    /* vma before the munmap area */
38 	struct vm_area_struct *next;    /* vma after the munmap area */
39 	struct list_head *uf;           /* Userfaultfd list_head */
40 	unsigned long start;            /* Aligned start addr (inclusive) */
41 	unsigned long end;              /* Aligned end addr (exclusive) */
42 	unsigned long unmap_start;      /* Unmap PTE start */
43 	unsigned long unmap_end;        /* Unmap PTE end */
44 	int vma_count;                  /* Number of vmas that will be removed */
45 	bool unlock;                    /* Unlock after the munmap */
46 	bool clear_ptes;                /* If there are outstanding PTE to be cleared */
47 	/* 2 byte hole */
48 	unsigned long nr_pages;         /* Number of pages being removed */
49 	unsigned long locked_vm;        /* Number of locked pages */
50 	unsigned long nr_accounted;     /* Number of VM_ACCOUNT pages */
51 	unsigned long exec_vm;
52 	unsigned long stack_vm;
53 	unsigned long data_vm;
54 };
55 
56 enum vma_merge_state {
57 	VMA_MERGE_START,
58 	VMA_MERGE_ERROR_NOMEM,
59 	VMA_MERGE_NOMERGE,
60 	VMA_MERGE_SUCCESS,
61 };
62 
63 /*
64  * Describes a VMA merge operation and is threaded throughout it.
65  *
66  * Any of the fields may be mutated by the merge operation, so no guarantees are
67  * made to the contents of this structure after a merge operation has completed.
68  */
69 struct vma_merge_struct {
70 	struct mm_struct *mm;
71 	struct vma_iterator *vmi;
72 	/*
73 	 * Adjacent VMAs, any of which may be NULL if not present:
74 	 *
75 	 * |------|--------|------|
76 	 * | prev | middle | next |
77 	 * |------|--------|------|
78 	 *
79 	 * middle may not yet exist in the case of a proposed new VMA being
80 	 * merged, or it may be an existing VMA.
81 	 *
82 	 * next may be assigned by the caller.
83 	 */
84 	struct vm_area_struct *prev;
85 	struct vm_area_struct *middle;
86 	struct vm_area_struct *next;
87 	/* This is the VMA we ultimately target to become the merged VMA. */
88 	struct vm_area_struct *target;
89 	/*
90 	 * Initially, the start, end, pgoff fields are provided by the caller
91 	 * and describe the proposed new VMA range, whether modifying an
92 	 * existing VMA (which will be 'middle'), or adding a new one.
93 	 *
94 	 * During the merge process these fields are updated to describe the new
95 	 * range _including those VMAs which will be merged_.
96 	 */
97 	unsigned long start;
98 	unsigned long end;
99 	pgoff_t pgoff;
100 
101 	unsigned long flags;
102 	struct file *file;
103 	struct anon_vma *anon_vma;
104 	struct mempolicy *policy;
105 	struct vm_userfaultfd_ctx uffd_ctx;
106 	struct anon_vma_name *anon_name;
107 	enum vma_merge_state state;
108 
109 	/* Flags which callers can use to modify merge behaviour: */
110 
111 	/*
112 	 * If we can expand, simply do so. We know there is nothing to merge to
113 	 * the right. Does not reset state upon failure to merge. The VMA
114 	 * iterator is assumed to be positioned at the previous VMA, rather than
115 	 * at the gap.
116 	 */
117 	bool just_expand :1;
118 
119 	/*
120 	 * If a merge is possible, but an OOM error occurs, give up and don't
121 	 * execute the merge, returning NULL.
122 	 */
123 	bool give_up_on_oom :1;
124 
125 	/*
126 	 * If set, skip uprobe_mmap upon merged vma.
127 	 */
128 	bool skip_vma_uprobe :1;
129 
130 	/* Internal flags set during merge process: */
131 
132 	/*
133 	 * Internal flag indicating the merge increases vmg->middle->vm_start
134 	 * (and thereby, vmg->prev->vm_end).
135 	 */
136 	bool __adjust_middle_start :1;
137 	/*
138 	 * Internal flag indicating the merge decreases vmg->next->vm_start
139 	 * (and thereby, vmg->middle->vm_end).
140 	 */
141 	bool __adjust_next_start :1;
142 	/*
143 	 * Internal flag used during the merge operation to indicate we will
144 	 * remove vmg->middle.
145 	 */
146 	bool __remove_middle :1;
147 	/*
148 	 * Internal flag used during the merge operationr to indicate we will
149 	 * remove vmg->next.
150 	 */
151 	bool __remove_next :1;
152 
153 };
154 
vmg_nomem(struct vma_merge_struct * vmg)155 static inline bool vmg_nomem(struct vma_merge_struct *vmg)
156 {
157 	return vmg->state == VMA_MERGE_ERROR_NOMEM;
158 }
159 
160 /* Assumes addr >= vma->vm_start. */
vma_pgoff_offset(struct vm_area_struct * vma,unsigned long addr)161 static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma,
162 				       unsigned long addr)
163 {
164 	return vma->vm_pgoff + PHYS_PFN(addr - vma->vm_start);
165 }
166 
167 #define VMG_STATE(name, mm_, vmi_, start_, end_, flags_, pgoff_)	\
168 	struct vma_merge_struct name = {				\
169 		.mm = mm_,						\
170 		.vmi = vmi_,						\
171 		.start = start_,					\
172 		.end = end_,						\
173 		.flags = flags_,					\
174 		.pgoff = pgoff_,					\
175 		.state = VMA_MERGE_START,				\
176 	}
177 
178 #define VMG_VMA_STATE(name, vmi_, prev_, vma_, start_, end_)	\
179 	struct vma_merge_struct name = {			\
180 		.mm = vma_->vm_mm,				\
181 		.vmi = vmi_,					\
182 		.prev = prev_,					\
183 		.middle = vma_,					\
184 		.next = NULL,					\
185 		.start = start_,				\
186 		.end = end_,					\
187 		.flags = vma_->vm_flags,			\
188 		.pgoff = vma_pgoff_offset(vma_, start_),	\
189 		.file = vma_->vm_file,				\
190 		.anon_vma = vma_->anon_vma,			\
191 		.policy = vma_policy(vma_),			\
192 		.uffd_ctx = vma_->vm_userfaultfd_ctx,		\
193 		.anon_name = anon_vma_name(vma_),		\
194 		.state = VMA_MERGE_START,			\
195 	}
196 
197 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
198 void validate_mm(struct mm_struct *mm);
199 #else
200 #define validate_mm(mm) do { } while (0)
201 #endif
202 
203 __must_check int vma_expand(struct vma_merge_struct *vmg);
204 __must_check int vma_shrink(struct vma_iterator *vmi,
205 		struct vm_area_struct *vma,
206 		unsigned long start, unsigned long end, pgoff_t pgoff);
207 
vma_iter_store_gfp(struct vma_iterator * vmi,struct vm_area_struct * vma,gfp_t gfp)208 static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
209 			struct vm_area_struct *vma, gfp_t gfp)
210 
211 {
212 	if (vmi->mas.status != ma_start &&
213 	    ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
214 		vma_iter_invalidate(vmi);
215 
216 	__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
217 	mas_store_gfp(&vmi->mas, vma, gfp);
218 	if (unlikely(mas_is_err(&vmi->mas)))
219 		return -ENOMEM;
220 
221 	vma_mark_attached(vma);
222 	return 0;
223 }
224 
225 
226 /*
227  * Temporary helper functions for file systems which wrap an invocation of
228  * f_op->mmap() but which might have an underlying file system which implements
229  * f_op->mmap_prepare().
230  */
231 
vma_to_desc(struct vm_area_struct * vma,struct vm_area_desc * desc)232 static inline struct vm_area_desc *vma_to_desc(struct vm_area_struct *vma,
233 		struct vm_area_desc *desc)
234 {
235 	desc->mm = vma->vm_mm;
236 	desc->start = vma->vm_start;
237 	desc->end = vma->vm_end;
238 
239 	desc->pgoff = vma->vm_pgoff;
240 	desc->file = vma->vm_file;
241 	desc->vm_flags = vma->vm_flags;
242 	desc->page_prot = vma->vm_page_prot;
243 
244 	desc->vm_ops = NULL;
245 	desc->private_data = NULL;
246 
247 	return desc;
248 }
249 
set_vma_from_desc(struct vm_area_struct * vma,struct vm_area_desc * desc)250 static inline void set_vma_from_desc(struct vm_area_struct *vma,
251 		struct vm_area_desc *desc)
252 {
253 	/*
254 	 * Since we're invoking .mmap_prepare() despite having a partially
255 	 * established VMA, we must take care to handle setting fields
256 	 * correctly.
257 	 */
258 
259 	/* Mutable fields. Populated with initial state. */
260 	vma->vm_pgoff = desc->pgoff;
261 	if (vma->vm_file != desc->file)
262 		vma_set_file(vma, desc->file);
263 	if (vma->vm_flags != desc->vm_flags)
264 		vm_flags_set(vma, desc->vm_flags);
265 	vma->vm_page_prot = desc->page_prot;
266 
267 	/* User-defined fields. */
268 	vma->vm_ops = desc->vm_ops;
269 	vma->vm_private_data = desc->private_data;
270 }
271 
272 int
273 do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
274 		    struct mm_struct *mm, unsigned long start,
275 		    unsigned long end, struct list_head *uf, bool unlock);
276 
277 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
278 		  unsigned long start, size_t len, struct list_head *uf,
279 		  bool unlock);
280 
281 void remove_vma(struct vm_area_struct *vma);
282 
283 void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
284 		struct vm_area_struct *prev, struct vm_area_struct *next);
285 
286 /* We are about to modify the VMA's flags. */
287 __must_check struct vm_area_struct
288 *vma_modify_flags(struct vma_iterator *vmi,
289 		struct vm_area_struct *prev, struct vm_area_struct *vma,
290 		unsigned long start, unsigned long end,
291 		unsigned long new_flags);
292 
293 /* We are about to modify the VMA's flags and/or anon_name. */
294 __must_check struct vm_area_struct
295 *vma_modify_flags_name(struct vma_iterator *vmi,
296 		       struct vm_area_struct *prev,
297 		       struct vm_area_struct *vma,
298 		       unsigned long start,
299 		       unsigned long end,
300 		       unsigned long new_flags,
301 		       struct anon_vma_name *new_name);
302 
303 /* We are about to modify the VMA's memory policy. */
304 __must_check struct vm_area_struct
305 *vma_modify_policy(struct vma_iterator *vmi,
306 		   struct vm_area_struct *prev,
307 		   struct vm_area_struct *vma,
308 		   unsigned long start, unsigned long end,
309 		   struct mempolicy *new_pol);
310 
311 /* We are about to modify the VMA's flags and/or uffd context. */
312 __must_check struct vm_area_struct
313 *vma_modify_flags_uffd(struct vma_iterator *vmi,
314 		       struct vm_area_struct *prev,
315 		       struct vm_area_struct *vma,
316 		       unsigned long start, unsigned long end,
317 		       unsigned long new_flags,
318 		       struct vm_userfaultfd_ctx new_ctx,
319 		       bool give_up_on_oom);
320 
321 __must_check struct vm_area_struct
322 *vma_merge_new_range(struct vma_merge_struct *vmg);
323 
324 __must_check struct vm_area_struct
325 *vma_merge_extend(struct vma_iterator *vmi,
326 		  struct vm_area_struct *vma,
327 		  unsigned long delta);
328 
329 void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb);
330 
331 void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb);
332 
333 void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb,
334 			       struct vm_area_struct *vma);
335 
336 void unlink_file_vma(struct vm_area_struct *vma);
337 
338 void vma_link_file(struct vm_area_struct *vma);
339 
340 int vma_link(struct mm_struct *mm, struct vm_area_struct *vma);
341 
342 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
343 	unsigned long addr, unsigned long len, pgoff_t pgoff,
344 	bool *need_rmap_locks);
345 
346 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma);
347 
348 bool vma_needs_dirty_tracking(struct vm_area_struct *vma);
349 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
350 
351 int mm_take_all_locks(struct mm_struct *mm);
352 void mm_drop_all_locks(struct mm_struct *mm);
353 
354 unsigned long mmap_region(struct file *file, unsigned long addr,
355 		unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
356 		struct list_head *uf);
357 
358 int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma,
359 		 unsigned long addr, unsigned long request, unsigned long flags);
360 
361 unsigned long unmapped_area(struct vm_unmapped_area_info *info);
362 unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
363 
vma_wants_manual_pte_write_upgrade(struct vm_area_struct * vma)364 static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma)
365 {
366 	/*
367 	 * We want to check manually if we can change individual PTEs writable
368 	 * if we can't do that automatically for all PTEs in a mapping. For
369 	 * private mappings, that's always the case when we have write
370 	 * permissions as we properly have to handle COW.
371 	 */
372 	if (vma->vm_flags & VM_SHARED)
373 		return vma_wants_writenotify(vma, vma->vm_page_prot);
374 	return !!(vma->vm_flags & VM_WRITE);
375 }
376 
377 #ifdef CONFIG_MMU
vm_pgprot_modify(pgprot_t oldprot,unsigned long vm_flags)378 static inline pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
379 {
380 	return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
381 }
382 #endif
383 
vma_prev_limit(struct vma_iterator * vmi,unsigned long min)384 static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi,
385 						    unsigned long min)
386 {
387 	return mas_prev(&vmi->mas, min);
388 }
389 
390 /*
391  * These three helpers classifies VMAs for virtual memory accounting.
392  */
393 
394 /*
395  * Executable code area - executable, not writable, not stack
396  */
is_exec_mapping(vm_flags_t flags)397 static inline bool is_exec_mapping(vm_flags_t flags)
398 {
399 	return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
400 }
401 
402 /*
403  * Stack area (including shadow stacks)
404  *
405  * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
406  * do_mmap() forbids all other combinations.
407  */
is_stack_mapping(vm_flags_t flags)408 static inline bool is_stack_mapping(vm_flags_t flags)
409 {
410 	return ((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK);
411 }
412 
413 /*
414  * Data area - private, writable, not stack
415  */
is_data_mapping(vm_flags_t flags)416 static inline bool is_data_mapping(vm_flags_t flags)
417 {
418 	return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
419 }
420 
421 
vma_iter_config(struct vma_iterator * vmi,unsigned long index,unsigned long last)422 static inline void vma_iter_config(struct vma_iterator *vmi,
423 		unsigned long index, unsigned long last)
424 {
425 	__mas_set_range(&vmi->mas, index, last - 1);
426 }
427 
vma_iter_reset(struct vma_iterator * vmi)428 static inline void vma_iter_reset(struct vma_iterator *vmi)
429 {
430 	mas_reset(&vmi->mas);
431 }
432 
433 static inline
vma_iter_prev_range_limit(struct vma_iterator * vmi,unsigned long min)434 struct vm_area_struct *vma_iter_prev_range_limit(struct vma_iterator *vmi, unsigned long min)
435 {
436 	return mas_prev_range(&vmi->mas, min);
437 }
438 
439 static inline
vma_iter_next_range_limit(struct vma_iterator * vmi,unsigned long max)440 struct vm_area_struct *vma_iter_next_range_limit(struct vma_iterator *vmi, unsigned long max)
441 {
442 	return mas_next_range(&vmi->mas, max);
443 }
444 
vma_iter_area_lowest(struct vma_iterator * vmi,unsigned long min,unsigned long max,unsigned long size)445 static inline int vma_iter_area_lowest(struct vma_iterator *vmi, unsigned long min,
446 				       unsigned long max, unsigned long size)
447 {
448 	return mas_empty_area(&vmi->mas, min, max - 1, size);
449 }
450 
vma_iter_area_highest(struct vma_iterator * vmi,unsigned long min,unsigned long max,unsigned long size)451 static inline int vma_iter_area_highest(struct vma_iterator *vmi, unsigned long min,
452 					unsigned long max, unsigned long size)
453 {
454 	return mas_empty_area_rev(&vmi->mas, min, max - 1, size);
455 }
456 
457 /*
458  * VMA Iterator functions shared between nommu and mmap
459  */
vma_iter_prealloc(struct vma_iterator * vmi,struct vm_area_struct * vma)460 static inline int vma_iter_prealloc(struct vma_iterator *vmi,
461 		struct vm_area_struct *vma)
462 {
463 	return mas_preallocate(&vmi->mas, vma, GFP_KERNEL);
464 }
465 
vma_iter_clear(struct vma_iterator * vmi)466 static inline void vma_iter_clear(struct vma_iterator *vmi)
467 {
468 	mas_store_prealloc(&vmi->mas, NULL);
469 }
470 
vma_iter_load(struct vma_iterator * vmi)471 static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
472 {
473 	return mas_walk(&vmi->mas);
474 }
475 
476 /* Store a VMA with preallocated memory */
vma_iter_store_overwrite(struct vma_iterator * vmi,struct vm_area_struct * vma)477 static inline void vma_iter_store_overwrite(struct vma_iterator *vmi,
478 					    struct vm_area_struct *vma)
479 {
480 	vma_assert_attached(vma);
481 
482 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
483 	if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
484 			vmi->mas.index > vma->vm_start)) {
485 		pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n",
486 			vmi->mas.index, vma->vm_start, vma->vm_start,
487 			vma->vm_end, vmi->mas.index, vmi->mas.last);
488 	}
489 	if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
490 			vmi->mas.last <  vma->vm_start)) {
491 		pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n",
492 		       vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end,
493 		       vmi->mas.index, vmi->mas.last);
494 	}
495 #endif
496 
497 	if (vmi->mas.status != ma_start &&
498 	    ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
499 		vma_iter_invalidate(vmi);
500 
501 	__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
502 	mas_store_prealloc(&vmi->mas, vma);
503 }
504 
vma_iter_store_new(struct vma_iterator * vmi,struct vm_area_struct * vma)505 static inline void vma_iter_store_new(struct vma_iterator *vmi,
506 				      struct vm_area_struct *vma)
507 {
508 	vma_mark_attached(vma);
509 	vma_iter_store_overwrite(vmi, vma);
510 }
511 
vma_iter_addr(struct vma_iterator * vmi)512 static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
513 {
514 	return vmi->mas.index;
515 }
516 
vma_iter_end(struct vma_iterator * vmi)517 static inline unsigned long vma_iter_end(struct vma_iterator *vmi)
518 {
519 	return vmi->mas.last + 1;
520 }
521 
vma_iter_bulk_alloc(struct vma_iterator * vmi,unsigned long count)522 static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi,
523 				      unsigned long count)
524 {
525 	return mas_expected_entries(&vmi->mas, count);
526 }
527 
528 static inline
vma_iter_prev_range(struct vma_iterator * vmi)529 struct vm_area_struct *vma_iter_prev_range(struct vma_iterator *vmi)
530 {
531 	return mas_prev_range(&vmi->mas, 0);
532 }
533 
534 /*
535  * Retrieve the next VMA and rewind the iterator to end of the previous VMA, or
536  * if no previous VMA, to index 0.
537  */
538 static inline
vma_iter_next_rewind(struct vma_iterator * vmi,struct vm_area_struct ** pprev)539 struct vm_area_struct *vma_iter_next_rewind(struct vma_iterator *vmi,
540 		struct vm_area_struct **pprev)
541 {
542 	struct vm_area_struct *next = vma_next(vmi);
543 	struct vm_area_struct *prev = vma_prev(vmi);
544 
545 	/*
546 	 * Consider the case where no previous VMA exists. We advance to the
547 	 * next VMA, skipping any gap, then rewind to the start of the range.
548 	 *
549 	 * If we were to unconditionally advance to the next range we'd wind up
550 	 * at the next VMA again, so we check to ensure there is a previous VMA
551 	 * to skip over.
552 	 */
553 	if (prev)
554 		vma_iter_next_range(vmi);
555 
556 	if (pprev)
557 		*pprev = prev;
558 
559 	return next;
560 }
561 
562 #ifdef CONFIG_64BIT
563 
vma_is_sealed(struct vm_area_struct * vma)564 static inline bool vma_is_sealed(struct vm_area_struct *vma)
565 {
566 	return (vma->vm_flags & VM_SEALED);
567 }
568 
569 /*
570  * check if a vma is sealed for modification.
571  * return true, if modification is allowed.
572  */
can_modify_vma(struct vm_area_struct * vma)573 static inline bool can_modify_vma(struct vm_area_struct *vma)
574 {
575 	if (unlikely(vma_is_sealed(vma)))
576 		return false;
577 
578 	return true;
579 }
580 
581 bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior);
582 
583 #else
584 
can_modify_vma(struct vm_area_struct * vma)585 static inline bool can_modify_vma(struct vm_area_struct *vma)
586 {
587 	return true;
588 }
589 
can_modify_vma_madv(struct vm_area_struct * vma,int behavior)590 static inline bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior)
591 {
592 	return true;
593 }
594 
595 #endif
596 
597 #if defined(CONFIG_STACK_GROWSUP)
598 int expand_upwards(struct vm_area_struct *vma, unsigned long address);
599 #endif
600 
601 int expand_downwards(struct vm_area_struct *vma, unsigned long address);
602 
603 int __vm_munmap(unsigned long start, size_t len, bool unlock);
604 
605 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma);
606 
607 /* vma_init.h, shared between CONFIG_MMU and nommu. */
608 void __init vma_state_init(void);
609 struct vm_area_struct *vm_area_alloc(struct mm_struct *mm);
610 struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig);
611 void vm_area_free(struct vm_area_struct *vma);
612 
613 /* vma_exec.c */
614 #ifdef CONFIG_MMU
615 int create_init_stack_vma(struct mm_struct *mm, struct vm_area_struct **vmap,
616 			  unsigned long *top_mem_p);
617 int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift);
618 #endif
619 
620 #endif	/* __MM_VMA_H */
621