xref: /linux/mm/vma.h (revision 23c57d1fa2b9530e38f7964b4e457fed5a7a0ae8)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * vma.h
4  *
5  * Core VMA manipulation API implemented in vma.c.
6  */
7 #ifndef __MM_VMA_H
8 #define __MM_VMA_H
9 
10 /*
11  * VMA lock generalization
12  */
13 struct vma_prepare {
14 	struct vm_area_struct *vma;
15 	struct vm_area_struct *adj_next;
16 	struct file *file;
17 	struct address_space *mapping;
18 	struct anon_vma *anon_vma;
19 	struct vm_area_struct *insert;
20 	struct vm_area_struct *remove;
21 	struct vm_area_struct *remove2;
22 };
23 
24 struct unlink_vma_file_batch {
25 	int count;
26 	struct vm_area_struct *vmas[8];
27 };
28 
29 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
30 void validate_mm(struct mm_struct *mm);
31 #else
32 #define validate_mm(mm) do { } while (0)
33 #endif
34 
35 /* Required for expand_downwards(). */
36 void anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma);
37 
38 /* Required for expand_downwards(). */
39 void anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma);
40 
41 /* Required for do_brk_flags(). */
42 void vma_prepare(struct vma_prepare *vp);
43 
44 /* Required for do_brk_flags(). */
45 void init_vma_prep(struct vma_prepare *vp,
46 		   struct vm_area_struct *vma);
47 
48 /* Required for do_brk_flags(). */
49 void vma_complete(struct vma_prepare *vp,
50 		  struct vma_iterator *vmi, struct mm_struct *mm);
51 
52 int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
53 	       unsigned long start, unsigned long end, pgoff_t pgoff,
54 	       struct vm_area_struct *next);
55 
56 int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
57 	       unsigned long start, unsigned long end, pgoff_t pgoff);
58 
59 int
60 do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
61 		    struct mm_struct *mm, unsigned long start,
62 		    unsigned long end, struct list_head *uf, bool unlock);
63 
64 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
65 		  unsigned long start, size_t len, struct list_head *uf,
66 		  bool unlock);
67 
68 void remove_vma(struct vm_area_struct *vma, bool unreachable);
69 
70 void unmap_region(struct mm_struct *mm, struct ma_state *mas,
71 		struct vm_area_struct *vma, struct vm_area_struct *prev,
72 		struct vm_area_struct *next, unsigned long start,
73 		unsigned long end, unsigned long tree_end, bool mm_wr_locked);
74 
75 /* Required by mmap_region(). */
76 bool
77 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
78 		struct anon_vma *anon_vma, struct file *file,
79 		pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
80 		struct anon_vma_name *anon_name);
81 
82 /* Required by mmap_region() and do_brk_flags(). */
83 bool
84 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
85 		struct anon_vma *anon_vma, struct file *file,
86 		pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
87 		struct anon_vma_name *anon_name);
88 
89 struct vm_area_struct *vma_modify(struct vma_iterator *vmi,
90 				  struct vm_area_struct *prev,
91 				  struct vm_area_struct *vma,
92 				  unsigned long start, unsigned long end,
93 				  unsigned long vm_flags,
94 				  struct mempolicy *policy,
95 				  struct vm_userfaultfd_ctx uffd_ctx,
96 				  struct anon_vma_name *anon_name);
97 
98 /* We are about to modify the VMA's flags. */
99 static inline struct vm_area_struct
100 *vma_modify_flags(struct vma_iterator *vmi,
101 		  struct vm_area_struct *prev,
102 		  struct vm_area_struct *vma,
103 		  unsigned long start, unsigned long end,
104 		  unsigned long new_flags)
105 {
106 	return vma_modify(vmi, prev, vma, start, end, new_flags,
107 			  vma_policy(vma), vma->vm_userfaultfd_ctx,
108 			  anon_vma_name(vma));
109 }
110 
111 /* We are about to modify the VMA's flags and/or anon_name. */
112 static inline struct vm_area_struct
113 *vma_modify_flags_name(struct vma_iterator *vmi,
114 		       struct vm_area_struct *prev,
115 		       struct vm_area_struct *vma,
116 		       unsigned long start,
117 		       unsigned long end,
118 		       unsigned long new_flags,
119 		       struct anon_vma_name *new_name)
120 {
121 	return vma_modify(vmi, prev, vma, start, end, new_flags,
122 			  vma_policy(vma), vma->vm_userfaultfd_ctx, new_name);
123 }
124 
125 /* We are about to modify the VMA's memory policy. */
126 static inline struct vm_area_struct
127 *vma_modify_policy(struct vma_iterator *vmi,
128 		   struct vm_area_struct *prev,
129 		   struct vm_area_struct *vma,
130 		   unsigned long start, unsigned long end,
131 		   struct mempolicy *new_pol)
132 {
133 	return vma_modify(vmi, prev, vma, start, end, vma->vm_flags,
134 			  new_pol, vma->vm_userfaultfd_ctx, anon_vma_name(vma));
135 }
136 
137 /* We are about to modify the VMA's flags and/or uffd context. */
138 static inline struct vm_area_struct
139 *vma_modify_flags_uffd(struct vma_iterator *vmi,
140 		       struct vm_area_struct *prev,
141 		       struct vm_area_struct *vma,
142 		       unsigned long start, unsigned long end,
143 		       unsigned long new_flags,
144 		       struct vm_userfaultfd_ctx new_ctx)
145 {
146 	return vma_modify(vmi, prev, vma, start, end, new_flags,
147 			  vma_policy(vma), new_ctx, anon_vma_name(vma));
148 }
149 
150 struct vm_area_struct
151 *vma_merge_new_vma(struct vma_iterator *vmi, struct vm_area_struct *prev,
152 		   struct vm_area_struct *vma, unsigned long start,
153 		   unsigned long end, pgoff_t pgoff);
154 
155 struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
156 					struct vm_area_struct *vma,
157 					unsigned long delta);
158 
159 void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb);
160 
161 void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb);
162 
163 void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb,
164 			       struct vm_area_struct *vma);
165 
166 void unlink_file_vma(struct vm_area_struct *vma);
167 
168 void vma_link_file(struct vm_area_struct *vma);
169 
170 int vma_link(struct mm_struct *mm, struct vm_area_struct *vma);
171 
172 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
173 	unsigned long addr, unsigned long len, pgoff_t pgoff,
174 	bool *need_rmap_locks);
175 
176 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma);
177 
178 bool vma_needs_dirty_tracking(struct vm_area_struct *vma);
179 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
180 
181 int mm_take_all_locks(struct mm_struct *mm);
182 void mm_drop_all_locks(struct mm_struct *mm);
183 unsigned long count_vma_pages_range(struct mm_struct *mm,
184 				    unsigned long addr, unsigned long end);
185 
186 static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma)
187 {
188 	/*
189 	 * We want to check manually if we can change individual PTEs writable
190 	 * if we can't do that automatically for all PTEs in a mapping. For
191 	 * private mappings, that's always the case when we have write
192 	 * permissions as we properly have to handle COW.
193 	 */
194 	if (vma->vm_flags & VM_SHARED)
195 		return vma_wants_writenotify(vma, vma->vm_page_prot);
196 	return !!(vma->vm_flags & VM_WRITE);
197 }
198 
199 #ifdef CONFIG_MMU
200 static inline pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
201 {
202 	return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
203 }
204 #endif
205 
206 static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi,
207 						    unsigned long min)
208 {
209 	return mas_prev(&vmi->mas, min);
210 }
211 
212 static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
213 			struct vm_area_struct *vma, gfp_t gfp)
214 {
215 	if (vmi->mas.status != ma_start &&
216 	    ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
217 		vma_iter_invalidate(vmi);
218 
219 	__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
220 	mas_store_gfp(&vmi->mas, vma, gfp);
221 	if (unlikely(mas_is_err(&vmi->mas)))
222 		return -ENOMEM;
223 
224 	return 0;
225 }
226 
227 
228 /*
229  * These three helpers classifies VMAs for virtual memory accounting.
230  */
231 
232 /*
233  * Executable code area - executable, not writable, not stack
234  */
235 static inline bool is_exec_mapping(vm_flags_t flags)
236 {
237 	return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
238 }
239 
240 /*
241  * Stack area (including shadow stacks)
242  *
243  * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
244  * do_mmap() forbids all other combinations.
245  */
246 static inline bool is_stack_mapping(vm_flags_t flags)
247 {
248 	return ((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK);
249 }
250 
251 /*
252  * Data area - private, writable, not stack
253  */
254 static inline bool is_data_mapping(vm_flags_t flags)
255 {
256 	return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
257 }
258 
259 
260 static inline void vma_iter_config(struct vma_iterator *vmi,
261 		unsigned long index, unsigned long last)
262 {
263 	__mas_set_range(&vmi->mas, index, last - 1);
264 }
265 
266 static inline void vma_iter_reset(struct vma_iterator *vmi)
267 {
268 	mas_reset(&vmi->mas);
269 }
270 
271 static inline
272 struct vm_area_struct *vma_iter_prev_range_limit(struct vma_iterator *vmi, unsigned long min)
273 {
274 	return mas_prev_range(&vmi->mas, min);
275 }
276 
277 static inline
278 struct vm_area_struct *vma_iter_next_range_limit(struct vma_iterator *vmi, unsigned long max)
279 {
280 	return mas_next_range(&vmi->mas, max);
281 }
282 
283 static inline int vma_iter_area_lowest(struct vma_iterator *vmi, unsigned long min,
284 				       unsigned long max, unsigned long size)
285 {
286 	return mas_empty_area(&vmi->mas, min, max - 1, size);
287 }
288 
289 static inline int vma_iter_area_highest(struct vma_iterator *vmi, unsigned long min,
290 					unsigned long max, unsigned long size)
291 {
292 	return mas_empty_area_rev(&vmi->mas, min, max - 1, size);
293 }
294 
295 /*
296  * VMA Iterator functions shared between nommu and mmap
297  */
298 static inline int vma_iter_prealloc(struct vma_iterator *vmi,
299 		struct vm_area_struct *vma)
300 {
301 	return mas_preallocate(&vmi->mas, vma, GFP_KERNEL);
302 }
303 
304 static inline void vma_iter_clear(struct vma_iterator *vmi)
305 {
306 	mas_store_prealloc(&vmi->mas, NULL);
307 }
308 
309 static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
310 {
311 	return mas_walk(&vmi->mas);
312 }
313 
314 /* Store a VMA with preallocated memory */
315 static inline void vma_iter_store(struct vma_iterator *vmi,
316 				  struct vm_area_struct *vma)
317 {
318 
319 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
320 	if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
321 			vmi->mas.index > vma->vm_start)) {
322 		pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n",
323 			vmi->mas.index, vma->vm_start, vma->vm_start,
324 			vma->vm_end, vmi->mas.index, vmi->mas.last);
325 	}
326 	if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
327 			vmi->mas.last <  vma->vm_start)) {
328 		pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n",
329 		       vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end,
330 		       vmi->mas.index, vmi->mas.last);
331 	}
332 #endif
333 
334 	if (vmi->mas.status != ma_start &&
335 	    ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
336 		vma_iter_invalidate(vmi);
337 
338 	__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
339 	mas_store_prealloc(&vmi->mas, vma);
340 }
341 
342 static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
343 {
344 	return vmi->mas.index;
345 }
346 
347 static inline unsigned long vma_iter_end(struct vma_iterator *vmi)
348 {
349 	return vmi->mas.last + 1;
350 }
351 
352 static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi,
353 				      unsigned long count)
354 {
355 	return mas_expected_entries(&vmi->mas, count);
356 }
357 
358 static inline
359 struct vm_area_struct *vma_iter_prev_range(struct vma_iterator *vmi)
360 {
361 	return mas_prev_range(&vmi->mas, 0);
362 }
363 
364 #ifdef CONFIG_64BIT
365 
366 static inline bool vma_is_sealed(struct vm_area_struct *vma)
367 {
368 	return (vma->vm_flags & VM_SEALED);
369 }
370 
371 /*
372  * check if a vma is sealed for modification.
373  * return true, if modification is allowed.
374  */
375 static inline bool can_modify_vma(struct vm_area_struct *vma)
376 {
377 	if (unlikely(vma_is_sealed(vma)))
378 		return false;
379 
380 	return true;
381 }
382 
383 bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior);
384 
385 #else
386 
387 static inline bool can_modify_vma(struct vm_area_struct *vma)
388 {
389 	return true;
390 }
391 
392 static inline bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior)
393 {
394 	return true;
395 }
396 
397 #endif
398 
399 #endif	/* __MM_VMA_H */
400