xref: /linux/mm/vma.h (revision e9ef810dfee7a2227da9d423aecb0ced35faddbe)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * vma.h
4  *
5  * Core VMA manipulation API implemented in vma.c.
6  */
7 #ifndef __MM_VMA_H
8 #define __MM_VMA_H
9 
10 /*
11  * VMA lock generalization
12  */
13 struct vma_prepare {
14 	struct vm_area_struct *vma;
15 	struct vm_area_struct *adj_next;
16 	struct file *file;
17 	struct address_space *mapping;
18 	struct anon_vma *anon_vma;
19 	struct vm_area_struct *insert;
20 	struct vm_area_struct *remove;
21 	struct vm_area_struct *remove2;
22 
23 	bool skip_vma_uprobe :1;
24 };
25 
26 struct unlink_vma_file_batch {
27 	int count;
28 	struct vm_area_struct *vmas[8];
29 };
30 
31 /*
32  * vma munmap operation
33  */
34 struct vma_munmap_struct {
35 	struct vma_iterator *vmi;
36 	struct vm_area_struct *vma;     /* The first vma to munmap */
37 	struct vm_area_struct *prev;    /* vma before the munmap area */
38 	struct vm_area_struct *next;    /* vma after the munmap area */
39 	struct list_head *uf;           /* Userfaultfd list_head */
40 	unsigned long start;            /* Aligned start addr (inclusive) */
41 	unsigned long end;              /* Aligned end addr (exclusive) */
42 	unsigned long unmap_start;      /* Unmap PTE start */
43 	unsigned long unmap_end;        /* Unmap PTE end */
44 	int vma_count;                  /* Number of vmas that will be removed */
45 	bool unlock;                    /* Unlock after the munmap */
46 	bool clear_ptes;                /* If there are outstanding PTE to be cleared */
47 	/* 2 byte hole */
48 	unsigned long nr_pages;         /* Number of pages being removed */
49 	unsigned long locked_vm;        /* Number of locked pages */
50 	unsigned long nr_accounted;     /* Number of VM_ACCOUNT pages */
51 	unsigned long exec_vm;
52 	unsigned long stack_vm;
53 	unsigned long data_vm;
54 };
55 
56 enum vma_merge_state {
57 	VMA_MERGE_START,
58 	VMA_MERGE_ERROR_NOMEM,
59 	VMA_MERGE_NOMERGE,
60 	VMA_MERGE_SUCCESS,
61 };
62 
63 /*
64  * Describes a VMA merge operation and is threaded throughout it.
65  *
66  * Any of the fields may be mutated by the merge operation, so no guarantees are
67  * made to the contents of this structure after a merge operation has completed.
68  */
69 struct vma_merge_struct {
70 	struct mm_struct *mm;
71 	struct vma_iterator *vmi;
72 	/*
73 	 * Adjacent VMAs, any of which may be NULL if not present:
74 	 *
75 	 * |------|--------|------|
76 	 * | prev | middle | next |
77 	 * |------|--------|------|
78 	 *
79 	 * middle may not yet exist in the case of a proposed new VMA being
80 	 * merged, or it may be an existing VMA.
81 	 *
82 	 * next may be assigned by the caller.
83 	 */
84 	struct vm_area_struct *prev;
85 	struct vm_area_struct *middle;
86 	struct vm_area_struct *next;
87 	/* This is the VMA we ultimately target to become the merged VMA. */
88 	struct vm_area_struct *target;
89 	/*
90 	 * Initially, the start, end, pgoff fields are provided by the caller
91 	 * and describe the proposed new VMA range, whether modifying an
92 	 * existing VMA (which will be 'middle'), or adding a new one.
93 	 *
94 	 * During the merge process these fields are updated to describe the new
95 	 * range _including those VMAs which will be merged_.
96 	 */
97 	unsigned long start;
98 	unsigned long end;
99 	pgoff_t pgoff;
100 
101 	vm_flags_t vm_flags;
102 	struct file *file;
103 	struct anon_vma *anon_vma;
104 	struct mempolicy *policy;
105 	struct vm_userfaultfd_ctx uffd_ctx;
106 	struct anon_vma_name *anon_name;
107 	enum vma_merge_state state;
108 
109 	/* Flags which callers can use to modify merge behaviour: */
110 
111 	/*
112 	 * If we can expand, simply do so. We know there is nothing to merge to
113 	 * the right. Does not reset state upon failure to merge. The VMA
114 	 * iterator is assumed to be positioned at the previous VMA, rather than
115 	 * at the gap.
116 	 */
117 	bool just_expand :1;
118 
119 	/*
120 	 * If a merge is possible, but an OOM error occurs, give up and don't
121 	 * execute the merge, returning NULL.
122 	 */
123 	bool give_up_on_oom :1;
124 
125 	/*
126 	 * If set, skip uprobe_mmap upon merged vma.
127 	 */
128 	bool skip_vma_uprobe :1;
129 
130 	/* Internal flags set during merge process: */
131 
132 	/*
133 	 * Internal flag indicating the merge increases vmg->middle->vm_start
134 	 * (and thereby, vmg->prev->vm_end).
135 	 */
136 	bool __adjust_middle_start :1;
137 	/*
138 	 * Internal flag indicating the merge decreases vmg->next->vm_start
139 	 * (and thereby, vmg->middle->vm_end).
140 	 */
141 	bool __adjust_next_start :1;
142 	/*
143 	 * Internal flag used during the merge operation to indicate we will
144 	 * remove vmg->middle.
145 	 */
146 	bool __remove_middle :1;
147 	/*
148 	 * Internal flag used during the merge operationr to indicate we will
149 	 * remove vmg->next.
150 	 */
151 	bool __remove_next :1;
152 
153 };
154 
vmg_nomem(struct vma_merge_struct * vmg)155 static inline bool vmg_nomem(struct vma_merge_struct *vmg)
156 {
157 	return vmg->state == VMA_MERGE_ERROR_NOMEM;
158 }
159 
160 /* Assumes addr >= vma->vm_start. */
vma_pgoff_offset(struct vm_area_struct * vma,unsigned long addr)161 static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma,
162 				       unsigned long addr)
163 {
164 	return vma->vm_pgoff + PHYS_PFN(addr - vma->vm_start);
165 }
166 
167 #define VMG_STATE(name, mm_, vmi_, start_, end_, vm_flags_, pgoff_)	\
168 	struct vma_merge_struct name = {				\
169 		.mm = mm_,						\
170 		.vmi = vmi_,						\
171 		.start = start_,					\
172 		.end = end_,						\
173 		.vm_flags = vm_flags_,					\
174 		.pgoff = pgoff_,					\
175 		.state = VMA_MERGE_START,				\
176 	}
177 
178 #define VMG_VMA_STATE(name, vmi_, prev_, vma_, start_, end_)	\
179 	struct vma_merge_struct name = {			\
180 		.mm = vma_->vm_mm,				\
181 		.vmi = vmi_,					\
182 		.prev = prev_,					\
183 		.middle = vma_,					\
184 		.next = NULL,					\
185 		.start = start_,				\
186 		.end = end_,					\
187 		.vm_flags = vma_->vm_flags,			\
188 		.pgoff = vma_pgoff_offset(vma_, start_),	\
189 		.file = vma_->vm_file,				\
190 		.anon_vma = vma_->anon_vma,			\
191 		.policy = vma_policy(vma_),			\
192 		.uffd_ctx = vma_->vm_userfaultfd_ctx,		\
193 		.anon_name = anon_vma_name(vma_),		\
194 		.state = VMA_MERGE_START,			\
195 	}
196 
197 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
198 void validate_mm(struct mm_struct *mm);
199 #else
200 #define validate_mm(mm) do { } while (0)
201 #endif
202 
203 __must_check int vma_expand(struct vma_merge_struct *vmg);
204 __must_check int vma_shrink(struct vma_iterator *vmi,
205 		struct vm_area_struct *vma,
206 		unsigned long start, unsigned long end, pgoff_t pgoff);
207 
vma_iter_store_gfp(struct vma_iterator * vmi,struct vm_area_struct * vma,gfp_t gfp)208 static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
209 			struct vm_area_struct *vma, gfp_t gfp)
210 
211 {
212 	if (vmi->mas.status != ma_start &&
213 	    ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
214 		vma_iter_invalidate(vmi);
215 
216 	__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
217 	mas_store_gfp(&vmi->mas, vma, gfp);
218 	if (unlikely(mas_is_err(&vmi->mas)))
219 		return -ENOMEM;
220 
221 	vma_mark_attached(vma);
222 	return 0;
223 }
224 
225 
226 /*
227  * Temporary helper functions for file systems which wrap an invocation of
228  * f_op->mmap() but which might have an underlying file system which implements
229  * f_op->mmap_prepare().
230  */
231 
vma_to_desc(struct vm_area_struct * vma,struct vm_area_desc * desc)232 static inline struct vm_area_desc *vma_to_desc(struct vm_area_struct *vma,
233 		struct vm_area_desc *desc)
234 {
235 	desc->mm = vma->vm_mm;
236 	desc->start = vma->vm_start;
237 	desc->end = vma->vm_end;
238 
239 	desc->pgoff = vma->vm_pgoff;
240 	desc->file = vma->vm_file;
241 	desc->vm_flags = vma->vm_flags;
242 	desc->page_prot = vma->vm_page_prot;
243 
244 	desc->vm_ops = NULL;
245 	desc->private_data = NULL;
246 
247 	return desc;
248 }
249 
set_vma_from_desc(struct vm_area_struct * vma,struct vm_area_desc * desc)250 static inline void set_vma_from_desc(struct vm_area_struct *vma,
251 		struct vm_area_desc *desc)
252 {
253 	/*
254 	 * Since we're invoking .mmap_prepare() despite having a partially
255 	 * established VMA, we must take care to handle setting fields
256 	 * correctly.
257 	 */
258 
259 	/* Mutable fields. Populated with initial state. */
260 	vma->vm_pgoff = desc->pgoff;
261 	if (vma->vm_file != desc->file)
262 		vma_set_file(vma, desc->file);
263 	if (vma->vm_flags != desc->vm_flags)
264 		vm_flags_set(vma, desc->vm_flags);
265 	vma->vm_page_prot = desc->page_prot;
266 
267 	/* User-defined fields. */
268 	vma->vm_ops = desc->vm_ops;
269 	vma->vm_private_data = desc->private_data;
270 }
271 
272 int
273 do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
274 		    struct mm_struct *mm, unsigned long start,
275 		    unsigned long end, struct list_head *uf, bool unlock);
276 
277 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
278 		  unsigned long start, size_t len, struct list_head *uf,
279 		  bool unlock);
280 
281 void remove_vma(struct vm_area_struct *vma);
282 
283 void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
284 		struct vm_area_struct *prev, struct vm_area_struct *next);
285 
286 /* We are about to modify the VMA's flags. */
287 __must_check struct vm_area_struct
288 *vma_modify_flags(struct vma_iterator *vmi,
289 		struct vm_area_struct *prev, struct vm_area_struct *vma,
290 		unsigned long start, unsigned long end,
291 		vm_flags_t vm_flags);
292 
293 /* We are about to modify the VMA's anon_name. */
294 __must_check struct vm_area_struct
295 *vma_modify_name(struct vma_iterator *vmi,
296 		 struct vm_area_struct *prev,
297 		 struct vm_area_struct *vma,
298 		 unsigned long start,
299 		 unsigned long end,
300 		 struct anon_vma_name *new_name);
301 
302 /* We are about to modify the VMA's memory policy. */
303 __must_check struct vm_area_struct
304 *vma_modify_policy(struct vma_iterator *vmi,
305 		   struct vm_area_struct *prev,
306 		   struct vm_area_struct *vma,
307 		   unsigned long start, unsigned long end,
308 		   struct mempolicy *new_pol);
309 
310 /* We are about to modify the VMA's flags and/or uffd context. */
311 __must_check struct vm_area_struct
312 *vma_modify_flags_uffd(struct vma_iterator *vmi,
313 		       struct vm_area_struct *prev,
314 		       struct vm_area_struct *vma,
315 		       unsigned long start, unsigned long end,
316 		       vm_flags_t vm_flags,
317 		       struct vm_userfaultfd_ctx new_ctx,
318 		       bool give_up_on_oom);
319 
320 __must_check struct vm_area_struct
321 *vma_merge_new_range(struct vma_merge_struct *vmg);
322 
323 __must_check struct vm_area_struct
324 *vma_merge_extend(struct vma_iterator *vmi,
325 		  struct vm_area_struct *vma,
326 		  unsigned long delta);
327 
328 void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb);
329 
330 void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb);
331 
332 void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb,
333 			       struct vm_area_struct *vma);
334 
335 void unlink_file_vma(struct vm_area_struct *vma);
336 
337 void vma_link_file(struct vm_area_struct *vma);
338 
339 int vma_link(struct mm_struct *mm, struct vm_area_struct *vma);
340 
341 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
342 	unsigned long addr, unsigned long len, pgoff_t pgoff,
343 	bool *need_rmap_locks);
344 
345 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma);
346 
347 bool vma_needs_dirty_tracking(struct vm_area_struct *vma);
348 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
349 
350 int mm_take_all_locks(struct mm_struct *mm);
351 void mm_drop_all_locks(struct mm_struct *mm);
352 
353 unsigned long mmap_region(struct file *file, unsigned long addr,
354 		unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
355 		struct list_head *uf);
356 
357 int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma,
358 		 unsigned long addr, unsigned long request, unsigned long flags);
359 
360 unsigned long unmapped_area(struct vm_unmapped_area_info *info);
361 unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
362 
vma_wants_manual_pte_write_upgrade(struct vm_area_struct * vma)363 static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma)
364 {
365 	/*
366 	 * We want to check manually if we can change individual PTEs writable
367 	 * if we can't do that automatically for all PTEs in a mapping. For
368 	 * private mappings, that's always the case when we have write
369 	 * permissions as we properly have to handle COW.
370 	 */
371 	if (vma->vm_flags & VM_SHARED)
372 		return vma_wants_writenotify(vma, vma->vm_page_prot);
373 	return !!(vma->vm_flags & VM_WRITE);
374 }
375 
376 #ifdef CONFIG_MMU
vm_pgprot_modify(pgprot_t oldprot,vm_flags_t vm_flags)377 static inline pgprot_t vm_pgprot_modify(pgprot_t oldprot, vm_flags_t vm_flags)
378 {
379 	return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
380 }
381 #endif
382 
vma_prev_limit(struct vma_iterator * vmi,unsigned long min)383 static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi,
384 						    unsigned long min)
385 {
386 	return mas_prev(&vmi->mas, min);
387 }
388 
389 /*
390  * These three helpers classifies VMAs for virtual memory accounting.
391  */
392 
393 /*
394  * Executable code area - executable, not writable, not stack
395  */
is_exec_mapping(vm_flags_t flags)396 static inline bool is_exec_mapping(vm_flags_t flags)
397 {
398 	return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
399 }
400 
401 /*
402  * Stack area (including shadow stacks)
403  *
404  * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
405  * do_mmap() forbids all other combinations.
406  */
is_stack_mapping(vm_flags_t flags)407 static inline bool is_stack_mapping(vm_flags_t flags)
408 {
409 	return ((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK);
410 }
411 
412 /*
413  * Data area - private, writable, not stack
414  */
is_data_mapping(vm_flags_t flags)415 static inline bool is_data_mapping(vm_flags_t flags)
416 {
417 	return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
418 }
419 
420 
vma_iter_config(struct vma_iterator * vmi,unsigned long index,unsigned long last)421 static inline void vma_iter_config(struct vma_iterator *vmi,
422 		unsigned long index, unsigned long last)
423 {
424 	__mas_set_range(&vmi->mas, index, last - 1);
425 }
426 
vma_iter_reset(struct vma_iterator * vmi)427 static inline void vma_iter_reset(struct vma_iterator *vmi)
428 {
429 	mas_reset(&vmi->mas);
430 }
431 
432 static inline
vma_iter_prev_range_limit(struct vma_iterator * vmi,unsigned long min)433 struct vm_area_struct *vma_iter_prev_range_limit(struct vma_iterator *vmi, unsigned long min)
434 {
435 	return mas_prev_range(&vmi->mas, min);
436 }
437 
438 static inline
vma_iter_next_range_limit(struct vma_iterator * vmi,unsigned long max)439 struct vm_area_struct *vma_iter_next_range_limit(struct vma_iterator *vmi, unsigned long max)
440 {
441 	return mas_next_range(&vmi->mas, max);
442 }
443 
vma_iter_area_lowest(struct vma_iterator * vmi,unsigned long min,unsigned long max,unsigned long size)444 static inline int vma_iter_area_lowest(struct vma_iterator *vmi, unsigned long min,
445 				       unsigned long max, unsigned long size)
446 {
447 	return mas_empty_area(&vmi->mas, min, max - 1, size);
448 }
449 
vma_iter_area_highest(struct vma_iterator * vmi,unsigned long min,unsigned long max,unsigned long size)450 static inline int vma_iter_area_highest(struct vma_iterator *vmi, unsigned long min,
451 					unsigned long max, unsigned long size)
452 {
453 	return mas_empty_area_rev(&vmi->mas, min, max - 1, size);
454 }
455 
456 /*
457  * VMA Iterator functions shared between nommu and mmap
458  */
vma_iter_prealloc(struct vma_iterator * vmi,struct vm_area_struct * vma)459 static inline int vma_iter_prealloc(struct vma_iterator *vmi,
460 		struct vm_area_struct *vma)
461 {
462 	return mas_preallocate(&vmi->mas, vma, GFP_KERNEL);
463 }
464 
vma_iter_clear(struct vma_iterator * vmi)465 static inline void vma_iter_clear(struct vma_iterator *vmi)
466 {
467 	mas_store_prealloc(&vmi->mas, NULL);
468 }
469 
vma_iter_load(struct vma_iterator * vmi)470 static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
471 {
472 	return mas_walk(&vmi->mas);
473 }
474 
475 /* Store a VMA with preallocated memory */
vma_iter_store_overwrite(struct vma_iterator * vmi,struct vm_area_struct * vma)476 static inline void vma_iter_store_overwrite(struct vma_iterator *vmi,
477 					    struct vm_area_struct *vma)
478 {
479 	vma_assert_attached(vma);
480 
481 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
482 	if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
483 			vmi->mas.index > vma->vm_start)) {
484 		pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n",
485 			vmi->mas.index, vma->vm_start, vma->vm_start,
486 			vma->vm_end, vmi->mas.index, vmi->mas.last);
487 	}
488 	if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
489 			vmi->mas.last <  vma->vm_start)) {
490 		pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n",
491 		       vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end,
492 		       vmi->mas.index, vmi->mas.last);
493 	}
494 #endif
495 
496 	if (vmi->mas.status != ma_start &&
497 	    ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
498 		vma_iter_invalidate(vmi);
499 
500 	__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
501 	mas_store_prealloc(&vmi->mas, vma);
502 }
503 
vma_iter_store_new(struct vma_iterator * vmi,struct vm_area_struct * vma)504 static inline void vma_iter_store_new(struct vma_iterator *vmi,
505 				      struct vm_area_struct *vma)
506 {
507 	vma_mark_attached(vma);
508 	vma_iter_store_overwrite(vmi, vma);
509 }
510 
vma_iter_addr(struct vma_iterator * vmi)511 static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
512 {
513 	return vmi->mas.index;
514 }
515 
vma_iter_end(struct vma_iterator * vmi)516 static inline unsigned long vma_iter_end(struct vma_iterator *vmi)
517 {
518 	return vmi->mas.last + 1;
519 }
520 
vma_iter_bulk_alloc(struct vma_iterator * vmi,unsigned long count)521 static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi,
522 				      unsigned long count)
523 {
524 	return mas_expected_entries(&vmi->mas, count);
525 }
526 
527 static inline
vma_iter_prev_range(struct vma_iterator * vmi)528 struct vm_area_struct *vma_iter_prev_range(struct vma_iterator *vmi)
529 {
530 	return mas_prev_range(&vmi->mas, 0);
531 }
532 
533 /*
534  * Retrieve the next VMA and rewind the iterator to end of the previous VMA, or
535  * if no previous VMA, to index 0.
536  */
537 static inline
vma_iter_next_rewind(struct vma_iterator * vmi,struct vm_area_struct ** pprev)538 struct vm_area_struct *vma_iter_next_rewind(struct vma_iterator *vmi,
539 		struct vm_area_struct **pprev)
540 {
541 	struct vm_area_struct *next = vma_next(vmi);
542 	struct vm_area_struct *prev = vma_prev(vmi);
543 
544 	/*
545 	 * Consider the case where no previous VMA exists. We advance to the
546 	 * next VMA, skipping any gap, then rewind to the start of the range.
547 	 *
548 	 * If we were to unconditionally advance to the next range we'd wind up
549 	 * at the next VMA again, so we check to ensure there is a previous VMA
550 	 * to skip over.
551 	 */
552 	if (prev)
553 		vma_iter_next_range(vmi);
554 
555 	if (pprev)
556 		*pprev = prev;
557 
558 	return next;
559 }
560 
561 #ifdef CONFIG_64BIT
562 
vma_is_sealed(struct vm_area_struct * vma)563 static inline bool vma_is_sealed(struct vm_area_struct *vma)
564 {
565 	return (vma->vm_flags & VM_SEALED);
566 }
567 
568 /*
569  * check if a vma is sealed for modification.
570  * return true, if modification is allowed.
571  */
can_modify_vma(struct vm_area_struct * vma)572 static inline bool can_modify_vma(struct vm_area_struct *vma)
573 {
574 	if (unlikely(vma_is_sealed(vma)))
575 		return false;
576 
577 	return true;
578 }
579 
580 bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior);
581 
582 #else
583 
can_modify_vma(struct vm_area_struct * vma)584 static inline bool can_modify_vma(struct vm_area_struct *vma)
585 {
586 	return true;
587 }
588 
can_modify_vma_madv(struct vm_area_struct * vma,int behavior)589 static inline bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior)
590 {
591 	return true;
592 }
593 
594 #endif
595 
596 #if defined(CONFIG_STACK_GROWSUP)
597 int expand_upwards(struct vm_area_struct *vma, unsigned long address);
598 #endif
599 
600 int expand_downwards(struct vm_area_struct *vma, unsigned long address);
601 
602 int __vm_munmap(unsigned long start, size_t len, bool unlock);
603 
604 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma);
605 
606 /* vma_init.h, shared between CONFIG_MMU and nommu. */
607 void __init vma_state_init(void);
608 struct vm_area_struct *vm_area_alloc(struct mm_struct *mm);
609 struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig);
610 void vm_area_free(struct vm_area_struct *vma);
611 
612 /* vma_exec.c */
613 #ifdef CONFIG_MMU
614 int create_init_stack_vma(struct mm_struct *mm, struct vm_area_struct **vmap,
615 			  unsigned long *top_mem_p);
616 int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift);
617 #endif
618 
619 #endif	/* __MM_VMA_H */
620