1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /*
4 * VMA-specific functions.
5 */
6
7 #include "vma_internal.h"
8 #include "vma.h"
9
10 struct mmap_state {
11 struct mm_struct *mm;
12 struct vma_iterator *vmi;
13
14 unsigned long addr;
15 unsigned long end;
16 pgoff_t pgoff;
17 unsigned long pglen;
18 unsigned long flags;
19 struct file *file;
20
21 unsigned long charged;
22 bool retry_merge;
23
24 struct vm_area_struct *prev;
25 struct vm_area_struct *next;
26
27 /* Unmapping state. */
28 struct vma_munmap_struct vms;
29 struct ma_state mas_detach;
30 struct maple_tree mt_detach;
31 };
32
33 #define MMAP_STATE(name, mm_, vmi_, addr_, len_, pgoff_, flags_, file_) \
34 struct mmap_state name = { \
35 .mm = mm_, \
36 .vmi = vmi_, \
37 .addr = addr_, \
38 .end = (addr_) + (len_), \
39 .pgoff = pgoff_, \
40 .pglen = PHYS_PFN(len_), \
41 .flags = flags_, \
42 .file = file_, \
43 }
44
45 #define VMG_MMAP_STATE(name, map_, vma_) \
46 struct vma_merge_struct name = { \
47 .mm = (map_)->mm, \
48 .vmi = (map_)->vmi, \
49 .start = (map_)->addr, \
50 .end = (map_)->end, \
51 .flags = (map_)->flags, \
52 .pgoff = (map_)->pgoff, \
53 .file = (map_)->file, \
54 .prev = (map_)->prev, \
55 .vma = vma_, \
56 .next = (vma_) ? NULL : (map_)->next, \
57 .state = VMA_MERGE_START, \
58 .merge_flags = VMG_FLAG_DEFAULT, \
59 }
60
is_mergeable_vma(struct vma_merge_struct * vmg,bool merge_next)61 static inline bool is_mergeable_vma(struct vma_merge_struct *vmg, bool merge_next)
62 {
63 struct vm_area_struct *vma = merge_next ? vmg->next : vmg->prev;
64
65 if (!mpol_equal(vmg->policy, vma_policy(vma)))
66 return false;
67 /*
68 * VM_SOFTDIRTY should not prevent from VMA merging, if we
69 * match the flags but dirty bit -- the caller should mark
70 * merged VMA as dirty. If dirty bit won't be excluded from
71 * comparison, we increase pressure on the memory system forcing
72 * the kernel to generate new VMAs when old one could be
73 * extended instead.
74 */
75 if ((vma->vm_flags ^ vmg->flags) & ~VM_SOFTDIRTY)
76 return false;
77 if (vma->vm_file != vmg->file)
78 return false;
79 if (!is_mergeable_vm_userfaultfd_ctx(vma, vmg->uffd_ctx))
80 return false;
81 if (!anon_vma_name_eq(anon_vma_name(vma), vmg->anon_name))
82 return false;
83 return true;
84 }
85
is_mergeable_anon_vma(struct anon_vma * anon_vma1,struct anon_vma * anon_vma2,struct vm_area_struct * vma)86 static inline bool is_mergeable_anon_vma(struct anon_vma *anon_vma1,
87 struct anon_vma *anon_vma2, struct vm_area_struct *vma)
88 {
89 /*
90 * The list_is_singular() test is to avoid merging VMA cloned from
91 * parents. This can improve scalability caused by anon_vma lock.
92 */
93 if ((!anon_vma1 || !anon_vma2) && (!vma ||
94 list_is_singular(&vma->anon_vma_chain)))
95 return true;
96 return anon_vma1 == anon_vma2;
97 }
98
99 /* Are the anon_vma's belonging to each VMA compatible with one another? */
are_anon_vmas_compatible(struct vm_area_struct * vma1,struct vm_area_struct * vma2)100 static inline bool are_anon_vmas_compatible(struct vm_area_struct *vma1,
101 struct vm_area_struct *vma2)
102 {
103 return is_mergeable_anon_vma(vma1->anon_vma, vma2->anon_vma, NULL);
104 }
105
106 /*
107 * init_multi_vma_prep() - Initializer for struct vma_prepare
108 * @vp: The vma_prepare struct
109 * @vma: The vma that will be altered once locked
110 * @next: The next vma if it is to be adjusted
111 * @remove: The first vma to be removed
112 * @remove2: The second vma to be removed
113 */
init_multi_vma_prep(struct vma_prepare * vp,struct vm_area_struct * vma,struct vm_area_struct * next,struct vm_area_struct * remove,struct vm_area_struct * remove2)114 static void init_multi_vma_prep(struct vma_prepare *vp,
115 struct vm_area_struct *vma,
116 struct vm_area_struct *next,
117 struct vm_area_struct *remove,
118 struct vm_area_struct *remove2)
119 {
120 memset(vp, 0, sizeof(struct vma_prepare));
121 vp->vma = vma;
122 vp->anon_vma = vma->anon_vma;
123 vp->remove = remove;
124 vp->remove2 = remove2;
125 vp->adj_next = next;
126 if (!vp->anon_vma && next)
127 vp->anon_vma = next->anon_vma;
128
129 vp->file = vma->vm_file;
130 if (vp->file)
131 vp->mapping = vma->vm_file->f_mapping;
132
133 }
134
135 /*
136 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
137 * in front of (at a lower virtual address and file offset than) the vma.
138 *
139 * We cannot merge two vmas if they have differently assigned (non-NULL)
140 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
141 *
142 * We don't check here for the merged mmap wrapping around the end of pagecache
143 * indices (16TB on ia32) because do_mmap() does not permit mmap's which
144 * wrap, nor mmaps which cover the final page at index -1UL.
145 *
146 * We assume the vma may be removed as part of the merge.
147 */
can_vma_merge_before(struct vma_merge_struct * vmg)148 static bool can_vma_merge_before(struct vma_merge_struct *vmg)
149 {
150 pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start);
151
152 if (is_mergeable_vma(vmg, /* merge_next = */ true) &&
153 is_mergeable_anon_vma(vmg->anon_vma, vmg->next->anon_vma, vmg->next)) {
154 if (vmg->next->vm_pgoff == vmg->pgoff + pglen)
155 return true;
156 }
157
158 return false;
159 }
160
161 /*
162 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
163 * beyond (at a higher virtual address and file offset than) the vma.
164 *
165 * We cannot merge two vmas if they have differently assigned (non-NULL)
166 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
167 *
168 * We assume that vma is not removed as part of the merge.
169 */
can_vma_merge_after(struct vma_merge_struct * vmg)170 static bool can_vma_merge_after(struct vma_merge_struct *vmg)
171 {
172 if (is_mergeable_vma(vmg, /* merge_next = */ false) &&
173 is_mergeable_anon_vma(vmg->anon_vma, vmg->prev->anon_vma, vmg->prev)) {
174 if (vmg->prev->vm_pgoff + vma_pages(vmg->prev) == vmg->pgoff)
175 return true;
176 }
177 return false;
178 }
179
__vma_link_file(struct vm_area_struct * vma,struct address_space * mapping)180 static void __vma_link_file(struct vm_area_struct *vma,
181 struct address_space *mapping)
182 {
183 if (vma_is_shared_maywrite(vma))
184 mapping_allow_writable(mapping);
185
186 flush_dcache_mmap_lock(mapping);
187 vma_interval_tree_insert(vma, &mapping->i_mmap);
188 flush_dcache_mmap_unlock(mapping);
189 }
190
191 /*
192 * Requires inode->i_mapping->i_mmap_rwsem
193 */
__remove_shared_vm_struct(struct vm_area_struct * vma,struct address_space * mapping)194 static void __remove_shared_vm_struct(struct vm_area_struct *vma,
195 struct address_space *mapping)
196 {
197 if (vma_is_shared_maywrite(vma))
198 mapping_unmap_writable(mapping);
199
200 flush_dcache_mmap_lock(mapping);
201 vma_interval_tree_remove(vma, &mapping->i_mmap);
202 flush_dcache_mmap_unlock(mapping);
203 }
204
205 /*
206 * vma has some anon_vma assigned, and is already inserted on that
207 * anon_vma's interval trees.
208 *
209 * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
210 * vma must be removed from the anon_vma's interval trees using
211 * anon_vma_interval_tree_pre_update_vma().
212 *
213 * After the update, the vma will be reinserted using
214 * anon_vma_interval_tree_post_update_vma().
215 *
216 * The entire update must be protected by exclusive mmap_lock and by
217 * the root anon_vma's mutex.
218 */
219 static void
anon_vma_interval_tree_pre_update_vma(struct vm_area_struct * vma)220 anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
221 {
222 struct anon_vma_chain *avc;
223
224 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
225 anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root);
226 }
227
228 static void
anon_vma_interval_tree_post_update_vma(struct vm_area_struct * vma)229 anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
230 {
231 struct anon_vma_chain *avc;
232
233 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
234 anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
235 }
236
237 /*
238 * vma_prepare() - Helper function for handling locking VMAs prior to altering
239 * @vp: The initialized vma_prepare struct
240 */
vma_prepare(struct vma_prepare * vp)241 static void vma_prepare(struct vma_prepare *vp)
242 {
243 if (vp->file) {
244 uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end);
245
246 if (vp->adj_next)
247 uprobe_munmap(vp->adj_next, vp->adj_next->vm_start,
248 vp->adj_next->vm_end);
249
250 i_mmap_lock_write(vp->mapping);
251 if (vp->insert && vp->insert->vm_file) {
252 /*
253 * Put into interval tree now, so instantiated pages
254 * are visible to arm/parisc __flush_dcache_page
255 * throughout; but we cannot insert into address
256 * space until vma start or end is updated.
257 */
258 __vma_link_file(vp->insert,
259 vp->insert->vm_file->f_mapping);
260 }
261 }
262
263 if (vp->anon_vma) {
264 anon_vma_lock_write(vp->anon_vma);
265 anon_vma_interval_tree_pre_update_vma(vp->vma);
266 if (vp->adj_next)
267 anon_vma_interval_tree_pre_update_vma(vp->adj_next);
268 }
269
270 if (vp->file) {
271 flush_dcache_mmap_lock(vp->mapping);
272 vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap);
273 if (vp->adj_next)
274 vma_interval_tree_remove(vp->adj_next,
275 &vp->mapping->i_mmap);
276 }
277
278 }
279
280 /*
281 * vma_complete- Helper function for handling the unlocking after altering VMAs,
282 * or for inserting a VMA.
283 *
284 * @vp: The vma_prepare struct
285 * @vmi: The vma iterator
286 * @mm: The mm_struct
287 */
vma_complete(struct vma_prepare * vp,struct vma_iterator * vmi,struct mm_struct * mm)288 static void vma_complete(struct vma_prepare *vp, struct vma_iterator *vmi,
289 struct mm_struct *mm)
290 {
291 if (vp->file) {
292 if (vp->adj_next)
293 vma_interval_tree_insert(vp->adj_next,
294 &vp->mapping->i_mmap);
295 vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap);
296 flush_dcache_mmap_unlock(vp->mapping);
297 }
298
299 if (vp->remove && vp->file) {
300 __remove_shared_vm_struct(vp->remove, vp->mapping);
301 if (vp->remove2)
302 __remove_shared_vm_struct(vp->remove2, vp->mapping);
303 } else if (vp->insert) {
304 /*
305 * split_vma has split insert from vma, and needs
306 * us to insert it before dropping the locks
307 * (it may either follow vma or precede it).
308 */
309 vma_iter_store(vmi, vp->insert);
310 mm->map_count++;
311 }
312
313 if (vp->anon_vma) {
314 anon_vma_interval_tree_post_update_vma(vp->vma);
315 if (vp->adj_next)
316 anon_vma_interval_tree_post_update_vma(vp->adj_next);
317 anon_vma_unlock_write(vp->anon_vma);
318 }
319
320 if (vp->file) {
321 i_mmap_unlock_write(vp->mapping);
322 uprobe_mmap(vp->vma);
323
324 if (vp->adj_next)
325 uprobe_mmap(vp->adj_next);
326 }
327
328 if (vp->remove) {
329 again:
330 vma_mark_detached(vp->remove, true);
331 if (vp->file) {
332 uprobe_munmap(vp->remove, vp->remove->vm_start,
333 vp->remove->vm_end);
334 fput(vp->file);
335 }
336 if (vp->remove->anon_vma)
337 anon_vma_merge(vp->vma, vp->remove);
338 mm->map_count--;
339 mpol_put(vma_policy(vp->remove));
340 if (!vp->remove2)
341 WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end);
342 vm_area_free(vp->remove);
343
344 /*
345 * In mprotect's case 6 (see comments on vma_merge),
346 * we are removing both mid and next vmas
347 */
348 if (vp->remove2) {
349 vp->remove = vp->remove2;
350 vp->remove2 = NULL;
351 goto again;
352 }
353 }
354 if (vp->insert && vp->file)
355 uprobe_mmap(vp->insert);
356 }
357
358 /*
359 * init_vma_prep() - Initializer wrapper for vma_prepare struct
360 * @vp: The vma_prepare struct
361 * @vma: The vma that will be altered once locked
362 */
init_vma_prep(struct vma_prepare * vp,struct vm_area_struct * vma)363 static void init_vma_prep(struct vma_prepare *vp, struct vm_area_struct *vma)
364 {
365 init_multi_vma_prep(vp, vma, NULL, NULL, NULL);
366 }
367
368 /*
369 * Can the proposed VMA be merged with the left (previous) VMA taking into
370 * account the start position of the proposed range.
371 */
can_vma_merge_left(struct vma_merge_struct * vmg)372 static bool can_vma_merge_left(struct vma_merge_struct *vmg)
373
374 {
375 return vmg->prev && vmg->prev->vm_end == vmg->start &&
376 can_vma_merge_after(vmg);
377 }
378
379 /*
380 * Can the proposed VMA be merged with the right (next) VMA taking into
381 * account the end position of the proposed range.
382 *
383 * In addition, if we can merge with the left VMA, ensure that left and right
384 * anon_vma's are also compatible.
385 */
can_vma_merge_right(struct vma_merge_struct * vmg,bool can_merge_left)386 static bool can_vma_merge_right(struct vma_merge_struct *vmg,
387 bool can_merge_left)
388 {
389 if (!vmg->next || vmg->end != vmg->next->vm_start ||
390 !can_vma_merge_before(vmg))
391 return false;
392
393 if (!can_merge_left)
394 return true;
395
396 /*
397 * If we can merge with prev (left) and next (right), indicating that
398 * each VMA's anon_vma is compatible with the proposed anon_vma, this
399 * does not mean prev and next are compatible with EACH OTHER.
400 *
401 * We therefore check this in addition to mergeability to either side.
402 */
403 return are_anon_vmas_compatible(vmg->prev, vmg->next);
404 }
405
406 /*
407 * Close a vm structure and free it.
408 */
remove_vma(struct vm_area_struct * vma,bool unreachable)409 void remove_vma(struct vm_area_struct *vma, bool unreachable)
410 {
411 might_sleep();
412 vma_close(vma);
413 if (vma->vm_file)
414 fput(vma->vm_file);
415 mpol_put(vma_policy(vma));
416 if (unreachable)
417 __vm_area_free(vma);
418 else
419 vm_area_free(vma);
420 }
421
422 /*
423 * Get rid of page table information in the indicated region.
424 *
425 * Called with the mm semaphore held.
426 */
unmap_region(struct ma_state * mas,struct vm_area_struct * vma,struct vm_area_struct * prev,struct vm_area_struct * next)427 void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
428 struct vm_area_struct *prev, struct vm_area_struct *next)
429 {
430 struct mm_struct *mm = vma->vm_mm;
431 struct mmu_gather tlb;
432
433 tlb_gather_mmu(&tlb, mm);
434 update_hiwater_rss(mm);
435 unmap_vmas(&tlb, mas, vma, vma->vm_start, vma->vm_end, vma->vm_end,
436 /* mm_wr_locked = */ true);
437 mas_set(mas, vma->vm_end);
438 free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
439 next ? next->vm_start : USER_PGTABLES_CEILING,
440 /* mm_wr_locked = */ true);
441 tlb_finish_mmu(&tlb);
442 }
443
444 /*
445 * __split_vma() bypasses sysctl_max_map_count checking. We use this where it
446 * has already been checked or doesn't make sense to fail.
447 * VMA Iterator will point to the original VMA.
448 */
449 static __must_check int
__split_vma(struct vma_iterator * vmi,struct vm_area_struct * vma,unsigned long addr,int new_below)450 __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
451 unsigned long addr, int new_below)
452 {
453 struct vma_prepare vp;
454 struct vm_area_struct *new;
455 int err;
456
457 WARN_ON(vma->vm_start >= addr);
458 WARN_ON(vma->vm_end <= addr);
459
460 if (vma->vm_ops && vma->vm_ops->may_split) {
461 err = vma->vm_ops->may_split(vma, addr);
462 if (err)
463 return err;
464 }
465
466 new = vm_area_dup(vma);
467 if (!new)
468 return -ENOMEM;
469
470 if (new_below) {
471 new->vm_end = addr;
472 } else {
473 new->vm_start = addr;
474 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
475 }
476
477 err = -ENOMEM;
478 vma_iter_config(vmi, new->vm_start, new->vm_end);
479 if (vma_iter_prealloc(vmi, new))
480 goto out_free_vma;
481
482 err = vma_dup_policy(vma, new);
483 if (err)
484 goto out_free_vmi;
485
486 err = anon_vma_clone(new, vma);
487 if (err)
488 goto out_free_mpol;
489
490 if (new->vm_file)
491 get_file(new->vm_file);
492
493 if (new->vm_ops && new->vm_ops->open)
494 new->vm_ops->open(new);
495
496 vma_start_write(vma);
497 vma_start_write(new);
498
499 init_vma_prep(&vp, vma);
500 vp.insert = new;
501 vma_prepare(&vp);
502 vma_adjust_trans_huge(vma, vma->vm_start, addr, 0);
503
504 if (new_below) {
505 vma->vm_start = addr;
506 vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT;
507 } else {
508 vma->vm_end = addr;
509 }
510
511 /* vma_complete stores the new vma */
512 vma_complete(&vp, vmi, vma->vm_mm);
513 validate_mm(vma->vm_mm);
514
515 /* Success. */
516 if (new_below)
517 vma_next(vmi);
518 else
519 vma_prev(vmi);
520
521 return 0;
522
523 out_free_mpol:
524 mpol_put(vma_policy(new));
525 out_free_vmi:
526 vma_iter_free(vmi);
527 out_free_vma:
528 vm_area_free(new);
529 return err;
530 }
531
532 /*
533 * Split a vma into two pieces at address 'addr', a new vma is allocated
534 * either for the first part or the tail.
535 */
split_vma(struct vma_iterator * vmi,struct vm_area_struct * vma,unsigned long addr,int new_below)536 static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
537 unsigned long addr, int new_below)
538 {
539 if (vma->vm_mm->map_count >= sysctl_max_map_count)
540 return -ENOMEM;
541
542 return __split_vma(vmi, vma, addr, new_below);
543 }
544
545 /*
546 * dup_anon_vma() - Helper function to duplicate anon_vma
547 * @dst: The destination VMA
548 * @src: The source VMA
549 * @dup: Pointer to the destination VMA when successful.
550 *
551 * Returns: 0 on success.
552 */
dup_anon_vma(struct vm_area_struct * dst,struct vm_area_struct * src,struct vm_area_struct ** dup)553 static int dup_anon_vma(struct vm_area_struct *dst,
554 struct vm_area_struct *src, struct vm_area_struct **dup)
555 {
556 /*
557 * Easily overlooked: when mprotect shifts the boundary, make sure the
558 * expanding vma has anon_vma set if the shrinking vma had, to cover any
559 * anon pages imported.
560 */
561 if (src->anon_vma && !dst->anon_vma) {
562 int ret;
563
564 vma_assert_write_locked(dst);
565 dst->anon_vma = src->anon_vma;
566 ret = anon_vma_clone(dst, src);
567 if (ret)
568 return ret;
569
570 *dup = dst;
571 }
572
573 return 0;
574 }
575
576 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
validate_mm(struct mm_struct * mm)577 void validate_mm(struct mm_struct *mm)
578 {
579 int bug = 0;
580 int i = 0;
581 struct vm_area_struct *vma;
582 VMA_ITERATOR(vmi, mm, 0);
583
584 mt_validate(&mm->mm_mt);
585 for_each_vma(vmi, vma) {
586 #ifdef CONFIG_DEBUG_VM_RB
587 struct anon_vma *anon_vma = vma->anon_vma;
588 struct anon_vma_chain *avc;
589 #endif
590 unsigned long vmi_start, vmi_end;
591 bool warn = 0;
592
593 vmi_start = vma_iter_addr(&vmi);
594 vmi_end = vma_iter_end(&vmi);
595 if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm))
596 warn = 1;
597
598 if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm))
599 warn = 1;
600
601 if (warn) {
602 pr_emerg("issue in %s\n", current->comm);
603 dump_stack();
604 dump_vma(vma);
605 pr_emerg("tree range: %px start %lx end %lx\n", vma,
606 vmi_start, vmi_end - 1);
607 vma_iter_dump_tree(&vmi);
608 }
609
610 #ifdef CONFIG_DEBUG_VM_RB
611 if (anon_vma) {
612 anon_vma_lock_read(anon_vma);
613 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
614 anon_vma_interval_tree_verify(avc);
615 anon_vma_unlock_read(anon_vma);
616 }
617 #endif
618 /* Check for a infinite loop */
619 if (++i > mm->map_count + 10) {
620 i = -1;
621 break;
622 }
623 }
624 if (i != mm->map_count) {
625 pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i);
626 bug = 1;
627 }
628 VM_BUG_ON_MM(bug, mm);
629 }
630 #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
631
632 /* Actually perform the VMA merge operation. */
commit_merge(struct vma_merge_struct * vmg,struct vm_area_struct * adjust,struct vm_area_struct * remove,struct vm_area_struct * remove2,long adj_start,bool expanded)633 static int commit_merge(struct vma_merge_struct *vmg,
634 struct vm_area_struct *adjust,
635 struct vm_area_struct *remove,
636 struct vm_area_struct *remove2,
637 long adj_start,
638 bool expanded)
639 {
640 struct vma_prepare vp;
641
642 init_multi_vma_prep(&vp, vmg->vma, adjust, remove, remove2);
643
644 VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma &&
645 vp.anon_vma != adjust->anon_vma);
646
647 if (expanded) {
648 /* Note: vma iterator must be pointing to 'start'. */
649 vma_iter_config(vmg->vmi, vmg->start, vmg->end);
650 } else {
651 vma_iter_config(vmg->vmi, adjust->vm_start + adj_start,
652 adjust->vm_end);
653 }
654
655 if (vma_iter_prealloc(vmg->vmi, vmg->vma))
656 return -ENOMEM;
657
658 vma_prepare(&vp);
659 vma_adjust_trans_huge(vmg->vma, vmg->start, vmg->end, adj_start);
660 vma_set_range(vmg->vma, vmg->start, vmg->end, vmg->pgoff);
661
662 if (expanded)
663 vma_iter_store(vmg->vmi, vmg->vma);
664
665 if (adj_start) {
666 adjust->vm_start += adj_start;
667 adjust->vm_pgoff += PHYS_PFN(adj_start);
668 if (adj_start < 0) {
669 WARN_ON(expanded);
670 vma_iter_store(vmg->vmi, adjust);
671 }
672 }
673
674 vma_complete(&vp, vmg->vmi, vmg->vma->vm_mm);
675
676 return 0;
677 }
678
679 /* We can only remove VMAs when merging if they do not have a close hook. */
can_merge_remove_vma(struct vm_area_struct * vma)680 static bool can_merge_remove_vma(struct vm_area_struct *vma)
681 {
682 return !vma->vm_ops || !vma->vm_ops->close;
683 }
684
685 /*
686 * vma_merge_existing_range - Attempt to merge VMAs based on a VMA having its
687 * attributes modified.
688 *
689 * @vmg: Describes the modifications being made to a VMA and associated
690 * metadata.
691 *
692 * When the attributes of a range within a VMA change, then it might be possible
693 * for immediately adjacent VMAs to be merged into that VMA due to having
694 * identical properties.
695 *
696 * This function checks for the existence of any such mergeable VMAs and updates
697 * the maple tree describing the @vmg->vma->vm_mm address space to account for
698 * this, as well as any VMAs shrunk/expanded/deleted as a result of this merge.
699 *
700 * As part of this operation, if a merge occurs, the @vmg object will have its
701 * vma, start, end, and pgoff fields modified to execute the merge. Subsequent
702 * calls to this function should reset these fields.
703 *
704 * Returns: The merged VMA if merge succeeds, or NULL otherwise.
705 *
706 * ASSUMPTIONS:
707 * - The caller must assign the VMA to be modifed to @vmg->vma.
708 * - The caller must have set @vmg->prev to the previous VMA, if there is one.
709 * - The caller must not set @vmg->next, as we determine this.
710 * - The caller must hold a WRITE lock on the mm_struct->mmap_lock.
711 * - vmi must be positioned within [@vmg->vma->vm_start, @vmg->vma->vm_end).
712 */
vma_merge_existing_range(struct vma_merge_struct * vmg)713 static __must_check struct vm_area_struct *vma_merge_existing_range(
714 struct vma_merge_struct *vmg)
715 {
716 struct vm_area_struct *vma = vmg->vma;
717 struct vm_area_struct *prev = vmg->prev;
718 struct vm_area_struct *next, *res;
719 struct vm_area_struct *anon_dup = NULL;
720 struct vm_area_struct *adjust = NULL;
721 unsigned long start = vmg->start;
722 unsigned long end = vmg->end;
723 bool left_side = vma && start == vma->vm_start;
724 bool right_side = vma && end == vma->vm_end;
725 int err = 0;
726 long adj_start = 0;
727 bool merge_will_delete_vma, merge_will_delete_next;
728 bool merge_left, merge_right, merge_both;
729 bool expanded;
730
731 mmap_assert_write_locked(vmg->mm);
732 VM_WARN_ON_VMG(!vma, vmg); /* We are modifying a VMA, so caller must specify. */
733 VM_WARN_ON_VMG(vmg->next, vmg); /* We set this. */
734 VM_WARN_ON_VMG(prev && start <= prev->vm_start, vmg);
735 VM_WARN_ON_VMG(start >= end, vmg);
736
737 /*
738 * If vma == prev, then we are offset into a VMA. Otherwise, if we are
739 * not, we must span a portion of the VMA.
740 */
741 VM_WARN_ON_VMG(vma && ((vma != prev && vmg->start != vma->vm_start) ||
742 vmg->end > vma->vm_end), vmg);
743 /* The vmi must be positioned within vmg->vma. */
744 VM_WARN_ON_VMG(vma && !(vma_iter_addr(vmg->vmi) >= vma->vm_start &&
745 vma_iter_addr(vmg->vmi) < vma->vm_end), vmg);
746
747 vmg->state = VMA_MERGE_NOMERGE;
748
749 /*
750 * If a special mapping or if the range being modified is neither at the
751 * furthermost left or right side of the VMA, then we have no chance of
752 * merging and should abort.
753 */
754 if (vmg->flags & VM_SPECIAL || (!left_side && !right_side))
755 return NULL;
756
757 if (left_side)
758 merge_left = can_vma_merge_left(vmg);
759 else
760 merge_left = false;
761
762 if (right_side) {
763 next = vmg->next = vma_iter_next_range(vmg->vmi);
764 vma_iter_prev_range(vmg->vmi);
765
766 merge_right = can_vma_merge_right(vmg, merge_left);
767 } else {
768 merge_right = false;
769 next = NULL;
770 }
771
772 if (merge_left) /* If merging prev, position iterator there. */
773 vma_prev(vmg->vmi);
774 else if (!merge_right) /* If we have nothing to merge, abort. */
775 return NULL;
776
777 merge_both = merge_left && merge_right;
778 /* If we span the entire VMA, a merge implies it will be deleted. */
779 merge_will_delete_vma = left_side && right_side;
780
781 /*
782 * If we need to remove vma in its entirety but are unable to do so,
783 * we have no sensible recourse but to abort the merge.
784 */
785 if (merge_will_delete_vma && !can_merge_remove_vma(vma))
786 return NULL;
787
788 /*
789 * If we merge both VMAs, then next is also deleted. This implies
790 * merge_will_delete_vma also.
791 */
792 merge_will_delete_next = merge_both;
793
794 /*
795 * If we cannot delete next, then we can reduce the operation to merging
796 * prev and vma (thereby deleting vma).
797 */
798 if (merge_will_delete_next && !can_merge_remove_vma(next)) {
799 merge_will_delete_next = false;
800 merge_right = false;
801 merge_both = false;
802 }
803
804 /* No matter what happens, we will be adjusting vma. */
805 vma_start_write(vma);
806
807 if (merge_left)
808 vma_start_write(prev);
809
810 if (merge_right)
811 vma_start_write(next);
812
813 if (merge_both) {
814 /*
815 * |<----->|
816 * |-------*********-------|
817 * prev vma next
818 * extend delete delete
819 */
820
821 vmg->vma = prev;
822 vmg->start = prev->vm_start;
823 vmg->end = next->vm_end;
824 vmg->pgoff = prev->vm_pgoff;
825
826 /*
827 * We already ensured anon_vma compatibility above, so now it's
828 * simply a case of, if prev has no anon_vma object, which of
829 * next or vma contains the anon_vma we must duplicate.
830 */
831 err = dup_anon_vma(prev, next->anon_vma ? next : vma, &anon_dup);
832 } else if (merge_left) {
833 /*
834 * |<----->| OR
835 * |<--------->|
836 * |-------*************
837 * prev vma
838 * extend shrink/delete
839 */
840
841 vmg->vma = prev;
842 vmg->start = prev->vm_start;
843 vmg->pgoff = prev->vm_pgoff;
844
845 if (!merge_will_delete_vma) {
846 adjust = vma;
847 adj_start = vmg->end - vma->vm_start;
848 }
849
850 err = dup_anon_vma(prev, vma, &anon_dup);
851 } else { /* merge_right */
852 /*
853 * |<----->| OR
854 * |<--------->|
855 * *************-------|
856 * vma next
857 * shrink/delete extend
858 */
859
860 pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start);
861
862 VM_WARN_ON_VMG(!merge_right, vmg);
863 /* If we are offset into a VMA, then prev must be vma. */
864 VM_WARN_ON_VMG(vmg->start > vma->vm_start && prev && vma != prev, vmg);
865
866 if (merge_will_delete_vma) {
867 vmg->vma = next;
868 vmg->end = next->vm_end;
869 vmg->pgoff = next->vm_pgoff - pglen;
870 } else {
871 /*
872 * We shrink vma and expand next.
873 *
874 * IMPORTANT: This is the ONLY case where the final
875 * merged VMA is NOT vmg->vma, but rather vmg->next.
876 */
877
878 vmg->start = vma->vm_start;
879 vmg->end = start;
880 vmg->pgoff = vma->vm_pgoff;
881
882 adjust = next;
883 adj_start = -(vma->vm_end - start);
884 }
885
886 err = dup_anon_vma(next, vma, &anon_dup);
887 }
888
889 if (err)
890 goto abort;
891
892 /*
893 * In nearly all cases, we expand vmg->vma. There is one exception -
894 * merge_right where we partially span the VMA. In this case we shrink
895 * the end of vmg->vma and adjust the start of vmg->next accordingly.
896 */
897 expanded = !merge_right || merge_will_delete_vma;
898
899 if (commit_merge(vmg, adjust,
900 merge_will_delete_vma ? vma : NULL,
901 merge_will_delete_next ? next : NULL,
902 adj_start, expanded)) {
903 if (anon_dup)
904 unlink_anon_vmas(anon_dup);
905
906 vmg->state = VMA_MERGE_ERROR_NOMEM;
907 return NULL;
908 }
909
910 res = merge_left ? prev : next;
911 khugepaged_enter_vma(res, vmg->flags);
912
913 vmg->state = VMA_MERGE_SUCCESS;
914 return res;
915
916 abort:
917 vma_iter_set(vmg->vmi, start);
918 vma_iter_load(vmg->vmi);
919 vmg->state = VMA_MERGE_ERROR_NOMEM;
920 return NULL;
921 }
922
923 /*
924 * vma_merge_new_range - Attempt to merge a new VMA into address space
925 *
926 * @vmg: Describes the VMA we are adding, in the range @vmg->start to @vmg->end
927 * (exclusive), which we try to merge with any adjacent VMAs if possible.
928 *
929 * We are about to add a VMA to the address space starting at @vmg->start and
930 * ending at @vmg->end. There are three different possible scenarios:
931 *
932 * 1. There is a VMA with identical properties immediately adjacent to the
933 * proposed new VMA [@vmg->start, @vmg->end) either before or after it -
934 * EXPAND that VMA:
935 *
936 * Proposed: |-----| or |-----|
937 * Existing: |----| |----|
938 *
939 * 2. There are VMAs with identical properties immediately adjacent to the
940 * proposed new VMA [@vmg->start, @vmg->end) both before AND after it -
941 * EXPAND the former and REMOVE the latter:
942 *
943 * Proposed: |-----|
944 * Existing: |----| |----|
945 *
946 * 3. There are no VMAs immediately adjacent to the proposed new VMA or those
947 * VMAs do not have identical attributes - NO MERGE POSSIBLE.
948 *
949 * In instances where we can merge, this function returns the expanded VMA which
950 * will have its range adjusted accordingly and the underlying maple tree also
951 * adjusted.
952 *
953 * Returns: In instances where no merge was possible, NULL. Otherwise, a pointer
954 * to the VMA we expanded.
955 *
956 * This function adjusts @vmg to provide @vmg->next if not already specified,
957 * and adjusts [@vmg->start, @vmg->end) to span the expanded range.
958 *
959 * ASSUMPTIONS:
960 * - The caller must hold a WRITE lock on the mm_struct->mmap_lock.
961 * - The caller must have determined that [@vmg->start, @vmg->end) is empty,
962 other than VMAs that will be unmapped should the operation succeed.
963 * - The caller must have specified the previous vma in @vmg->prev.
964 * - The caller must have specified the next vma in @vmg->next.
965 * - The caller must have positioned the vmi at or before the gap.
966 */
vma_merge_new_range(struct vma_merge_struct * vmg)967 struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg)
968 {
969 struct vm_area_struct *prev = vmg->prev;
970 struct vm_area_struct *next = vmg->next;
971 unsigned long end = vmg->end;
972 bool can_merge_left, can_merge_right;
973 bool just_expand = vmg->merge_flags & VMG_FLAG_JUST_EXPAND;
974
975 mmap_assert_write_locked(vmg->mm);
976 VM_WARN_ON_VMG(vmg->vma, vmg);
977 /* vmi must point at or before the gap. */
978 VM_WARN_ON_VMG(vma_iter_addr(vmg->vmi) > end, vmg);
979
980 vmg->state = VMA_MERGE_NOMERGE;
981
982 /* Special VMAs are unmergeable, also if no prev/next. */
983 if ((vmg->flags & VM_SPECIAL) || (!prev && !next))
984 return NULL;
985
986 can_merge_left = can_vma_merge_left(vmg);
987 can_merge_right = !just_expand && can_vma_merge_right(vmg, can_merge_left);
988
989 /* If we can merge with the next VMA, adjust vmg accordingly. */
990 if (can_merge_right) {
991 vmg->end = next->vm_end;
992 vmg->vma = next;
993 }
994
995 /* If we can merge with the previous VMA, adjust vmg accordingly. */
996 if (can_merge_left) {
997 vmg->start = prev->vm_start;
998 vmg->vma = prev;
999 vmg->pgoff = prev->vm_pgoff;
1000
1001 /*
1002 * If this merge would result in removal of the next VMA but we
1003 * are not permitted to do so, reduce the operation to merging
1004 * prev and vma.
1005 */
1006 if (can_merge_right && !can_merge_remove_vma(next))
1007 vmg->end = end;
1008
1009 /* In expand-only case we are already positioned at prev. */
1010 if (!just_expand) {
1011 /* Equivalent to going to the previous range. */
1012 vma_prev(vmg->vmi);
1013 }
1014 }
1015
1016 /*
1017 * Now try to expand adjacent VMA(s). This takes care of removing the
1018 * following VMA if we have VMAs on both sides.
1019 */
1020 if (vmg->vma && !vma_expand(vmg)) {
1021 khugepaged_enter_vma(vmg->vma, vmg->flags);
1022 vmg->state = VMA_MERGE_SUCCESS;
1023 return vmg->vma;
1024 }
1025
1026 return NULL;
1027 }
1028
1029 /*
1030 * vma_expand - Expand an existing VMA
1031 *
1032 * @vmg: Describes a VMA expansion operation.
1033 *
1034 * Expand @vma to vmg->start and vmg->end. Can expand off the start and end.
1035 * Will expand over vmg->next if it's different from vmg->vma and vmg->end ==
1036 * vmg->next->vm_end. Checking if the vmg->vma can expand and merge with
1037 * vmg->next needs to be handled by the caller.
1038 *
1039 * Returns: 0 on success.
1040 *
1041 * ASSUMPTIONS:
1042 * - The caller must hold a WRITE lock on vmg->vma->mm->mmap_lock.
1043 * - The caller must have set @vmg->vma and @vmg->next.
1044 */
vma_expand(struct vma_merge_struct * vmg)1045 int vma_expand(struct vma_merge_struct *vmg)
1046 {
1047 struct vm_area_struct *anon_dup = NULL;
1048 bool remove_next = false;
1049 struct vm_area_struct *vma = vmg->vma;
1050 struct vm_area_struct *next = vmg->next;
1051
1052 mmap_assert_write_locked(vmg->mm);
1053
1054 vma_start_write(vma);
1055 if (next && (vma != next) && (vmg->end == next->vm_end)) {
1056 int ret;
1057
1058 remove_next = true;
1059 /* This should already have been checked by this point. */
1060 VM_WARN_ON_VMG(!can_merge_remove_vma(next), vmg);
1061 vma_start_write(next);
1062 ret = dup_anon_vma(vma, next, &anon_dup);
1063 if (ret)
1064 return ret;
1065 }
1066
1067 /* Not merging but overwriting any part of next is not handled. */
1068 VM_WARN_ON_VMG(next && !remove_next &&
1069 next != vma && vmg->end > next->vm_start, vmg);
1070 /* Only handles expanding */
1071 VM_WARN_ON_VMG(vma->vm_start < vmg->start || vma->vm_end > vmg->end, vmg);
1072
1073 if (commit_merge(vmg, NULL, remove_next ? next : NULL, NULL, 0, true))
1074 goto nomem;
1075
1076 return 0;
1077
1078 nomem:
1079 vmg->state = VMA_MERGE_ERROR_NOMEM;
1080 if (anon_dup)
1081 unlink_anon_vmas(anon_dup);
1082 return -ENOMEM;
1083 }
1084
1085 /*
1086 * vma_shrink() - Reduce an existing VMAs memory area
1087 * @vmi: The vma iterator
1088 * @vma: The VMA to modify
1089 * @start: The new start
1090 * @end: The new end
1091 *
1092 * Returns: 0 on success, -ENOMEM otherwise
1093 */
vma_shrink(struct vma_iterator * vmi,struct vm_area_struct * vma,unsigned long start,unsigned long end,pgoff_t pgoff)1094 int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
1095 unsigned long start, unsigned long end, pgoff_t pgoff)
1096 {
1097 struct vma_prepare vp;
1098
1099 WARN_ON((vma->vm_start != start) && (vma->vm_end != end));
1100
1101 if (vma->vm_start < start)
1102 vma_iter_config(vmi, vma->vm_start, start);
1103 else
1104 vma_iter_config(vmi, end, vma->vm_end);
1105
1106 if (vma_iter_prealloc(vmi, NULL))
1107 return -ENOMEM;
1108
1109 vma_start_write(vma);
1110
1111 init_vma_prep(&vp, vma);
1112 vma_prepare(&vp);
1113 vma_adjust_trans_huge(vma, start, end, 0);
1114
1115 vma_iter_clear(vmi);
1116 vma_set_range(vma, start, end, pgoff);
1117 vma_complete(&vp, vmi, vma->vm_mm);
1118 validate_mm(vma->vm_mm);
1119 return 0;
1120 }
1121
vms_clear_ptes(struct vma_munmap_struct * vms,struct ma_state * mas_detach,bool mm_wr_locked)1122 static inline void vms_clear_ptes(struct vma_munmap_struct *vms,
1123 struct ma_state *mas_detach, bool mm_wr_locked)
1124 {
1125 struct mmu_gather tlb;
1126
1127 if (!vms->clear_ptes) /* Nothing to do */
1128 return;
1129
1130 /*
1131 * We can free page tables without write-locking mmap_lock because VMAs
1132 * were isolated before we downgraded mmap_lock.
1133 */
1134 mas_set(mas_detach, 1);
1135 tlb_gather_mmu(&tlb, vms->vma->vm_mm);
1136 update_hiwater_rss(vms->vma->vm_mm);
1137 unmap_vmas(&tlb, mas_detach, vms->vma, vms->start, vms->end,
1138 vms->vma_count, mm_wr_locked);
1139
1140 mas_set(mas_detach, 1);
1141 /* start and end may be different if there is no prev or next vma. */
1142 free_pgtables(&tlb, mas_detach, vms->vma, vms->unmap_start,
1143 vms->unmap_end, mm_wr_locked);
1144 tlb_finish_mmu(&tlb);
1145 vms->clear_ptes = false;
1146 }
1147
vms_clean_up_area(struct vma_munmap_struct * vms,struct ma_state * mas_detach)1148 static void vms_clean_up_area(struct vma_munmap_struct *vms,
1149 struct ma_state *mas_detach)
1150 {
1151 struct vm_area_struct *vma;
1152
1153 if (!vms->nr_pages)
1154 return;
1155
1156 vms_clear_ptes(vms, mas_detach, true);
1157 mas_set(mas_detach, 0);
1158 mas_for_each(mas_detach, vma, ULONG_MAX)
1159 vma_close(vma);
1160 }
1161
1162 /*
1163 * vms_complete_munmap_vmas() - Finish the munmap() operation
1164 * @vms: The vma munmap struct
1165 * @mas_detach: The maple state of the detached vmas
1166 *
1167 * This updates the mm_struct, unmaps the region, frees the resources
1168 * used for the munmap() and may downgrade the lock - if requested. Everything
1169 * needed to be done once the vma maple tree is updated.
1170 */
vms_complete_munmap_vmas(struct vma_munmap_struct * vms,struct ma_state * mas_detach)1171 static void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
1172 struct ma_state *mas_detach)
1173 {
1174 struct vm_area_struct *vma;
1175 struct mm_struct *mm;
1176
1177 mm = current->mm;
1178 mm->map_count -= vms->vma_count;
1179 mm->locked_vm -= vms->locked_vm;
1180 if (vms->unlock)
1181 mmap_write_downgrade(mm);
1182
1183 if (!vms->nr_pages)
1184 return;
1185
1186 vms_clear_ptes(vms, mas_detach, !vms->unlock);
1187 /* Update high watermark before we lower total_vm */
1188 update_hiwater_vm(mm);
1189 /* Stat accounting */
1190 WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm) - vms->nr_pages);
1191 /* Paranoid bookkeeping */
1192 VM_WARN_ON(vms->exec_vm > mm->exec_vm);
1193 VM_WARN_ON(vms->stack_vm > mm->stack_vm);
1194 VM_WARN_ON(vms->data_vm > mm->data_vm);
1195 mm->exec_vm -= vms->exec_vm;
1196 mm->stack_vm -= vms->stack_vm;
1197 mm->data_vm -= vms->data_vm;
1198
1199 /* Remove and clean up vmas */
1200 mas_set(mas_detach, 0);
1201 mas_for_each(mas_detach, vma, ULONG_MAX)
1202 remove_vma(vma, /* unreachable = */ false);
1203
1204 vm_unacct_memory(vms->nr_accounted);
1205 validate_mm(mm);
1206 if (vms->unlock)
1207 mmap_read_unlock(mm);
1208
1209 __mt_destroy(mas_detach->tree);
1210 }
1211
1212 /*
1213 * reattach_vmas() - Undo any munmap work and free resources
1214 * @mas_detach: The maple state with the detached maple tree
1215 *
1216 * Reattach any detached vmas and free up the maple tree used to track the vmas.
1217 */
reattach_vmas(struct ma_state * mas_detach)1218 static void reattach_vmas(struct ma_state *mas_detach)
1219 {
1220 struct vm_area_struct *vma;
1221
1222 mas_set(mas_detach, 0);
1223 mas_for_each(mas_detach, vma, ULONG_MAX)
1224 vma_mark_detached(vma, false);
1225
1226 __mt_destroy(mas_detach->tree);
1227 }
1228
1229 /*
1230 * vms_gather_munmap_vmas() - Put all VMAs within a range into a maple tree
1231 * for removal at a later date. Handles splitting first and last if necessary
1232 * and marking the vmas as isolated.
1233 *
1234 * @vms: The vma munmap struct
1235 * @mas_detach: The maple state tracking the detached tree
1236 *
1237 * Return: 0 on success, error otherwise
1238 */
vms_gather_munmap_vmas(struct vma_munmap_struct * vms,struct ma_state * mas_detach)1239 static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
1240 struct ma_state *mas_detach)
1241 {
1242 struct vm_area_struct *next = NULL;
1243 int error;
1244
1245 /*
1246 * If we need to split any vma, do it now to save pain later.
1247 * Does it split the first one?
1248 */
1249 if (vms->start > vms->vma->vm_start) {
1250
1251 /*
1252 * Make sure that map_count on return from munmap() will
1253 * not exceed its limit; but let map_count go just above
1254 * its limit temporarily, to help free resources as expected.
1255 */
1256 if (vms->end < vms->vma->vm_end &&
1257 vms->vma->vm_mm->map_count >= sysctl_max_map_count) {
1258 error = -ENOMEM;
1259 goto map_count_exceeded;
1260 }
1261
1262 /* Don't bother splitting the VMA if we can't unmap it anyway */
1263 if (!can_modify_vma(vms->vma)) {
1264 error = -EPERM;
1265 goto start_split_failed;
1266 }
1267
1268 error = __split_vma(vms->vmi, vms->vma, vms->start, 1);
1269 if (error)
1270 goto start_split_failed;
1271 }
1272 vms->prev = vma_prev(vms->vmi);
1273 if (vms->prev)
1274 vms->unmap_start = vms->prev->vm_end;
1275
1276 /*
1277 * Detach a range of VMAs from the mm. Using next as a temp variable as
1278 * it is always overwritten.
1279 */
1280 for_each_vma_range(*(vms->vmi), next, vms->end) {
1281 long nrpages;
1282
1283 if (!can_modify_vma(next)) {
1284 error = -EPERM;
1285 goto modify_vma_failed;
1286 }
1287 /* Does it split the end? */
1288 if (next->vm_end > vms->end) {
1289 error = __split_vma(vms->vmi, next, vms->end, 0);
1290 if (error)
1291 goto end_split_failed;
1292 }
1293 vma_start_write(next);
1294 mas_set(mas_detach, vms->vma_count++);
1295 error = mas_store_gfp(mas_detach, next, GFP_KERNEL);
1296 if (error)
1297 goto munmap_gather_failed;
1298
1299 vma_mark_detached(next, true);
1300 nrpages = vma_pages(next);
1301
1302 vms->nr_pages += nrpages;
1303 if (next->vm_flags & VM_LOCKED)
1304 vms->locked_vm += nrpages;
1305
1306 if (next->vm_flags & VM_ACCOUNT)
1307 vms->nr_accounted += nrpages;
1308
1309 if (is_exec_mapping(next->vm_flags))
1310 vms->exec_vm += nrpages;
1311 else if (is_stack_mapping(next->vm_flags))
1312 vms->stack_vm += nrpages;
1313 else if (is_data_mapping(next->vm_flags))
1314 vms->data_vm += nrpages;
1315
1316 if (vms->uf) {
1317 /*
1318 * If userfaultfd_unmap_prep returns an error the vmas
1319 * will remain split, but userland will get a
1320 * highly unexpected error anyway. This is no
1321 * different than the case where the first of the two
1322 * __split_vma fails, but we don't undo the first
1323 * split, despite we could. This is unlikely enough
1324 * failure that it's not worth optimizing it for.
1325 */
1326 error = userfaultfd_unmap_prep(next, vms->start,
1327 vms->end, vms->uf);
1328 if (error)
1329 goto userfaultfd_error;
1330 }
1331 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
1332 BUG_ON(next->vm_start < vms->start);
1333 BUG_ON(next->vm_start > vms->end);
1334 #endif
1335 }
1336
1337 vms->next = vma_next(vms->vmi);
1338 if (vms->next)
1339 vms->unmap_end = vms->next->vm_start;
1340
1341 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
1342 /* Make sure no VMAs are about to be lost. */
1343 {
1344 MA_STATE(test, mas_detach->tree, 0, 0);
1345 struct vm_area_struct *vma_mas, *vma_test;
1346 int test_count = 0;
1347
1348 vma_iter_set(vms->vmi, vms->start);
1349 rcu_read_lock();
1350 vma_test = mas_find(&test, vms->vma_count - 1);
1351 for_each_vma_range(*(vms->vmi), vma_mas, vms->end) {
1352 BUG_ON(vma_mas != vma_test);
1353 test_count++;
1354 vma_test = mas_next(&test, vms->vma_count - 1);
1355 }
1356 rcu_read_unlock();
1357 BUG_ON(vms->vma_count != test_count);
1358 }
1359 #endif
1360
1361 while (vma_iter_addr(vms->vmi) > vms->start)
1362 vma_iter_prev_range(vms->vmi);
1363
1364 vms->clear_ptes = true;
1365 return 0;
1366
1367 userfaultfd_error:
1368 munmap_gather_failed:
1369 end_split_failed:
1370 modify_vma_failed:
1371 reattach_vmas(mas_detach);
1372 start_split_failed:
1373 map_count_exceeded:
1374 return error;
1375 }
1376
1377 /*
1378 * init_vma_munmap() - Initializer wrapper for vma_munmap_struct
1379 * @vms: The vma munmap struct
1380 * @vmi: The vma iterator
1381 * @vma: The first vm_area_struct to munmap
1382 * @start: The aligned start address to munmap
1383 * @end: The aligned end address to munmap
1384 * @uf: The userfaultfd list_head
1385 * @unlock: Unlock after the operation. Only unlocked on success
1386 */
init_vma_munmap(struct vma_munmap_struct * vms,struct vma_iterator * vmi,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct list_head * uf,bool unlock)1387 static void init_vma_munmap(struct vma_munmap_struct *vms,
1388 struct vma_iterator *vmi, struct vm_area_struct *vma,
1389 unsigned long start, unsigned long end, struct list_head *uf,
1390 bool unlock)
1391 {
1392 vms->vmi = vmi;
1393 vms->vma = vma;
1394 if (vma) {
1395 vms->start = start;
1396 vms->end = end;
1397 } else {
1398 vms->start = vms->end = 0;
1399 }
1400 vms->unlock = unlock;
1401 vms->uf = uf;
1402 vms->vma_count = 0;
1403 vms->nr_pages = vms->locked_vm = vms->nr_accounted = 0;
1404 vms->exec_vm = vms->stack_vm = vms->data_vm = 0;
1405 vms->unmap_start = FIRST_USER_ADDRESS;
1406 vms->unmap_end = USER_PGTABLES_CEILING;
1407 vms->clear_ptes = false;
1408 }
1409
1410 /*
1411 * do_vmi_align_munmap() - munmap the aligned region from @start to @end.
1412 * @vmi: The vma iterator
1413 * @vma: The starting vm_area_struct
1414 * @mm: The mm_struct
1415 * @start: The aligned start address to munmap.
1416 * @end: The aligned end address to munmap.
1417 * @uf: The userfaultfd list_head
1418 * @unlock: Set to true to drop the mmap_lock. unlocking only happens on
1419 * success.
1420 *
1421 * Return: 0 on success and drops the lock if so directed, error and leaves the
1422 * lock held otherwise.
1423 */
do_vmi_align_munmap(struct vma_iterator * vmi,struct vm_area_struct * vma,struct mm_struct * mm,unsigned long start,unsigned long end,struct list_head * uf,bool unlock)1424 int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
1425 struct mm_struct *mm, unsigned long start, unsigned long end,
1426 struct list_head *uf, bool unlock)
1427 {
1428 struct maple_tree mt_detach;
1429 MA_STATE(mas_detach, &mt_detach, 0, 0);
1430 mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
1431 mt_on_stack(mt_detach);
1432 struct vma_munmap_struct vms;
1433 int error;
1434
1435 init_vma_munmap(&vms, vmi, vma, start, end, uf, unlock);
1436 error = vms_gather_munmap_vmas(&vms, &mas_detach);
1437 if (error)
1438 goto gather_failed;
1439
1440 error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL);
1441 if (error)
1442 goto clear_tree_failed;
1443
1444 /* Point of no return */
1445 vms_complete_munmap_vmas(&vms, &mas_detach);
1446 return 0;
1447
1448 clear_tree_failed:
1449 reattach_vmas(&mas_detach);
1450 gather_failed:
1451 validate_mm(mm);
1452 return error;
1453 }
1454
1455 /*
1456 * do_vmi_munmap() - munmap a given range.
1457 * @vmi: The vma iterator
1458 * @mm: The mm_struct
1459 * @start: The start address to munmap
1460 * @len: The length of the range to munmap
1461 * @uf: The userfaultfd list_head
1462 * @unlock: set to true if the user wants to drop the mmap_lock on success
1463 *
1464 * This function takes a @mas that is either pointing to the previous VMA or set
1465 * to MA_START and sets it up to remove the mapping(s). The @len will be
1466 * aligned.
1467 *
1468 * Return: 0 on success and drops the lock if so directed, error and leaves the
1469 * lock held otherwise.
1470 */
do_vmi_munmap(struct vma_iterator * vmi,struct mm_struct * mm,unsigned long start,size_t len,struct list_head * uf,bool unlock)1471 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
1472 unsigned long start, size_t len, struct list_head *uf,
1473 bool unlock)
1474 {
1475 unsigned long end;
1476 struct vm_area_struct *vma;
1477
1478 if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start)
1479 return -EINVAL;
1480
1481 end = start + PAGE_ALIGN(len);
1482 if (end == start)
1483 return -EINVAL;
1484
1485 /* Find the first overlapping VMA */
1486 vma = vma_find(vmi, end);
1487 if (!vma) {
1488 if (unlock)
1489 mmap_write_unlock(mm);
1490 return 0;
1491 }
1492
1493 return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
1494 }
1495
1496 /*
1497 * We are about to modify one or multiple of a VMA's flags, policy, userfaultfd
1498 * context and anonymous VMA name within the range [start, end).
1499 *
1500 * As a result, we might be able to merge the newly modified VMA range with an
1501 * adjacent VMA with identical properties.
1502 *
1503 * If no merge is possible and the range does not span the entirety of the VMA,
1504 * we then need to split the VMA to accommodate the change.
1505 *
1506 * The function returns either the merged VMA, the original VMA if a split was
1507 * required instead, or an error if the split failed.
1508 */
vma_modify(struct vma_merge_struct * vmg)1509 static struct vm_area_struct *vma_modify(struct vma_merge_struct *vmg)
1510 {
1511 struct vm_area_struct *vma = vmg->vma;
1512 unsigned long start = vmg->start;
1513 unsigned long end = vmg->end;
1514 struct vm_area_struct *merged;
1515
1516 /* First, try to merge. */
1517 merged = vma_merge_existing_range(vmg);
1518 if (merged)
1519 return merged;
1520 if (vmg_nomem(vmg))
1521 return ERR_PTR(-ENOMEM);
1522
1523 /* Split any preceding portion of the VMA. */
1524 if (vma->vm_start < start) {
1525 int err = split_vma(vmg->vmi, vma, start, 1);
1526
1527 if (err)
1528 return ERR_PTR(err);
1529 }
1530
1531 /* Split any trailing portion of the VMA. */
1532 if (vma->vm_end > end) {
1533 int err = split_vma(vmg->vmi, vma, end, 0);
1534
1535 if (err)
1536 return ERR_PTR(err);
1537 }
1538
1539 return vma;
1540 }
1541
vma_modify_flags(struct vma_iterator * vmi,struct vm_area_struct * prev,struct vm_area_struct * vma,unsigned long start,unsigned long end,unsigned long new_flags)1542 struct vm_area_struct *vma_modify_flags(
1543 struct vma_iterator *vmi, struct vm_area_struct *prev,
1544 struct vm_area_struct *vma, unsigned long start, unsigned long end,
1545 unsigned long new_flags)
1546 {
1547 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
1548
1549 vmg.flags = new_flags;
1550
1551 return vma_modify(&vmg);
1552 }
1553
1554 struct vm_area_struct
vma_modify_flags_name(struct vma_iterator * vmi,struct vm_area_struct * prev,struct vm_area_struct * vma,unsigned long start,unsigned long end,unsigned long new_flags,struct anon_vma_name * new_name)1555 *vma_modify_flags_name(struct vma_iterator *vmi,
1556 struct vm_area_struct *prev,
1557 struct vm_area_struct *vma,
1558 unsigned long start,
1559 unsigned long end,
1560 unsigned long new_flags,
1561 struct anon_vma_name *new_name)
1562 {
1563 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
1564
1565 vmg.flags = new_flags;
1566 vmg.anon_name = new_name;
1567
1568 return vma_modify(&vmg);
1569 }
1570
1571 struct vm_area_struct
vma_modify_policy(struct vma_iterator * vmi,struct vm_area_struct * prev,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct mempolicy * new_pol)1572 *vma_modify_policy(struct vma_iterator *vmi,
1573 struct vm_area_struct *prev,
1574 struct vm_area_struct *vma,
1575 unsigned long start, unsigned long end,
1576 struct mempolicy *new_pol)
1577 {
1578 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
1579
1580 vmg.policy = new_pol;
1581
1582 return vma_modify(&vmg);
1583 }
1584
1585 struct vm_area_struct
vma_modify_flags_uffd(struct vma_iterator * vmi,struct vm_area_struct * prev,struct vm_area_struct * vma,unsigned long start,unsigned long end,unsigned long new_flags,struct vm_userfaultfd_ctx new_ctx)1586 *vma_modify_flags_uffd(struct vma_iterator *vmi,
1587 struct vm_area_struct *prev,
1588 struct vm_area_struct *vma,
1589 unsigned long start, unsigned long end,
1590 unsigned long new_flags,
1591 struct vm_userfaultfd_ctx new_ctx)
1592 {
1593 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
1594
1595 vmg.flags = new_flags;
1596 vmg.uffd_ctx = new_ctx;
1597
1598 return vma_modify(&vmg);
1599 }
1600
1601 /*
1602 * Expand vma by delta bytes, potentially merging with an immediately adjacent
1603 * VMA with identical properties.
1604 */
vma_merge_extend(struct vma_iterator * vmi,struct vm_area_struct * vma,unsigned long delta)1605 struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
1606 struct vm_area_struct *vma,
1607 unsigned long delta)
1608 {
1609 VMG_VMA_STATE(vmg, vmi, vma, vma, vma->vm_end, vma->vm_end + delta);
1610
1611 vmg.next = vma_iter_next_rewind(vmi, NULL);
1612 vmg.vma = NULL; /* We use the VMA to populate VMG fields only. */
1613
1614 return vma_merge_new_range(&vmg);
1615 }
1616
unlink_file_vma_batch_init(struct unlink_vma_file_batch * vb)1617 void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb)
1618 {
1619 vb->count = 0;
1620 }
1621
unlink_file_vma_batch_process(struct unlink_vma_file_batch * vb)1622 static void unlink_file_vma_batch_process(struct unlink_vma_file_batch *vb)
1623 {
1624 struct address_space *mapping;
1625 int i;
1626
1627 mapping = vb->vmas[0]->vm_file->f_mapping;
1628 i_mmap_lock_write(mapping);
1629 for (i = 0; i < vb->count; i++) {
1630 VM_WARN_ON_ONCE(vb->vmas[i]->vm_file->f_mapping != mapping);
1631 __remove_shared_vm_struct(vb->vmas[i], mapping);
1632 }
1633 i_mmap_unlock_write(mapping);
1634
1635 unlink_file_vma_batch_init(vb);
1636 }
1637
unlink_file_vma_batch_add(struct unlink_vma_file_batch * vb,struct vm_area_struct * vma)1638 void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb,
1639 struct vm_area_struct *vma)
1640 {
1641 if (vma->vm_file == NULL)
1642 return;
1643
1644 if ((vb->count > 0 && vb->vmas[0]->vm_file != vma->vm_file) ||
1645 vb->count == ARRAY_SIZE(vb->vmas))
1646 unlink_file_vma_batch_process(vb);
1647
1648 vb->vmas[vb->count] = vma;
1649 vb->count++;
1650 }
1651
unlink_file_vma_batch_final(struct unlink_vma_file_batch * vb)1652 void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb)
1653 {
1654 if (vb->count > 0)
1655 unlink_file_vma_batch_process(vb);
1656 }
1657
1658 /*
1659 * Unlink a file-based vm structure from its interval tree, to hide
1660 * vma from rmap and vmtruncate before freeing its page tables.
1661 */
unlink_file_vma(struct vm_area_struct * vma)1662 void unlink_file_vma(struct vm_area_struct *vma)
1663 {
1664 struct file *file = vma->vm_file;
1665
1666 if (file) {
1667 struct address_space *mapping = file->f_mapping;
1668
1669 i_mmap_lock_write(mapping);
1670 __remove_shared_vm_struct(vma, mapping);
1671 i_mmap_unlock_write(mapping);
1672 }
1673 }
1674
vma_link_file(struct vm_area_struct * vma)1675 void vma_link_file(struct vm_area_struct *vma)
1676 {
1677 struct file *file = vma->vm_file;
1678 struct address_space *mapping;
1679
1680 if (file) {
1681 mapping = file->f_mapping;
1682 i_mmap_lock_write(mapping);
1683 __vma_link_file(vma, mapping);
1684 i_mmap_unlock_write(mapping);
1685 }
1686 }
1687
vma_link(struct mm_struct * mm,struct vm_area_struct * vma)1688 int vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
1689 {
1690 VMA_ITERATOR(vmi, mm, 0);
1691
1692 vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
1693 if (vma_iter_prealloc(&vmi, vma))
1694 return -ENOMEM;
1695
1696 vma_start_write(vma);
1697 vma_iter_store(&vmi, vma);
1698 vma_link_file(vma);
1699 mm->map_count++;
1700 validate_mm(mm);
1701 return 0;
1702 }
1703
1704 /*
1705 * Copy the vma structure to a new location in the same mm,
1706 * prior to moving page table entries, to effect an mremap move.
1707 */
copy_vma(struct vm_area_struct ** vmap,unsigned long addr,unsigned long len,pgoff_t pgoff,bool * need_rmap_locks)1708 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
1709 unsigned long addr, unsigned long len, pgoff_t pgoff,
1710 bool *need_rmap_locks)
1711 {
1712 struct vm_area_struct *vma = *vmap;
1713 unsigned long vma_start = vma->vm_start;
1714 struct mm_struct *mm = vma->vm_mm;
1715 struct vm_area_struct *new_vma;
1716 bool faulted_in_anon_vma = true;
1717 VMA_ITERATOR(vmi, mm, addr);
1718 VMG_VMA_STATE(vmg, &vmi, NULL, vma, addr, addr + len);
1719
1720 /*
1721 * If anonymous vma has not yet been faulted, update new pgoff
1722 * to match new location, to increase its chance of merging.
1723 */
1724 if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) {
1725 pgoff = addr >> PAGE_SHIFT;
1726 faulted_in_anon_vma = false;
1727 }
1728
1729 new_vma = find_vma_prev(mm, addr, &vmg.prev);
1730 if (new_vma && new_vma->vm_start < addr + len)
1731 return NULL; /* should never get here */
1732
1733 vmg.vma = NULL; /* New VMA range. */
1734 vmg.pgoff = pgoff;
1735 vmg.next = vma_iter_next_rewind(&vmi, NULL);
1736 new_vma = vma_merge_new_range(&vmg);
1737
1738 if (new_vma) {
1739 /*
1740 * Source vma may have been merged into new_vma
1741 */
1742 if (unlikely(vma_start >= new_vma->vm_start &&
1743 vma_start < new_vma->vm_end)) {
1744 /*
1745 * The only way we can get a vma_merge with
1746 * self during an mremap is if the vma hasn't
1747 * been faulted in yet and we were allowed to
1748 * reset the dst vma->vm_pgoff to the
1749 * destination address of the mremap to allow
1750 * the merge to happen. mremap must change the
1751 * vm_pgoff linearity between src and dst vmas
1752 * (in turn preventing a vma_merge) to be
1753 * safe. It is only safe to keep the vm_pgoff
1754 * linear if there are no pages mapped yet.
1755 */
1756 VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma);
1757 *vmap = vma = new_vma;
1758 }
1759 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
1760 } else {
1761 new_vma = vm_area_dup(vma);
1762 if (!new_vma)
1763 goto out;
1764 vma_set_range(new_vma, addr, addr + len, pgoff);
1765 if (vma_dup_policy(vma, new_vma))
1766 goto out_free_vma;
1767 if (anon_vma_clone(new_vma, vma))
1768 goto out_free_mempol;
1769 if (new_vma->vm_file)
1770 get_file(new_vma->vm_file);
1771 if (new_vma->vm_ops && new_vma->vm_ops->open)
1772 new_vma->vm_ops->open(new_vma);
1773 if (vma_link(mm, new_vma))
1774 goto out_vma_link;
1775 *need_rmap_locks = false;
1776 }
1777 return new_vma;
1778
1779 out_vma_link:
1780 vma_close(new_vma);
1781
1782 if (new_vma->vm_file)
1783 fput(new_vma->vm_file);
1784
1785 unlink_anon_vmas(new_vma);
1786 out_free_mempol:
1787 mpol_put(vma_policy(new_vma));
1788 out_free_vma:
1789 vm_area_free(new_vma);
1790 out:
1791 return NULL;
1792 }
1793
1794 /*
1795 * Rough compatibility check to quickly see if it's even worth looking
1796 * at sharing an anon_vma.
1797 *
1798 * They need to have the same vm_file, and the flags can only differ
1799 * in things that mprotect may change.
1800 *
1801 * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
1802 * we can merge the two vma's. For example, we refuse to merge a vma if
1803 * there is a vm_ops->close() function, because that indicates that the
1804 * driver is doing some kind of reference counting. But that doesn't
1805 * really matter for the anon_vma sharing case.
1806 */
anon_vma_compatible(struct vm_area_struct * a,struct vm_area_struct * b)1807 static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
1808 {
1809 return a->vm_end == b->vm_start &&
1810 mpol_equal(vma_policy(a), vma_policy(b)) &&
1811 a->vm_file == b->vm_file &&
1812 !((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_SOFTDIRTY)) &&
1813 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
1814 }
1815
1816 /*
1817 * Do some basic sanity checking to see if we can re-use the anon_vma
1818 * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
1819 * the same as 'old', the other will be the new one that is trying
1820 * to share the anon_vma.
1821 *
1822 * NOTE! This runs with mmap_lock held for reading, so it is possible that
1823 * the anon_vma of 'old' is concurrently in the process of being set up
1824 * by another page fault trying to merge _that_. But that's ok: if it
1825 * is being set up, that automatically means that it will be a singleton
1826 * acceptable for merging, so we can do all of this optimistically. But
1827 * we do that READ_ONCE() to make sure that we never re-load the pointer.
1828 *
1829 * IOW: that the "list_is_singular()" test on the anon_vma_chain only
1830 * matters for the 'stable anon_vma' case (ie the thing we want to avoid
1831 * is to return an anon_vma that is "complex" due to having gone through
1832 * a fork).
1833 *
1834 * We also make sure that the two vma's are compatible (adjacent,
1835 * and with the same memory policies). That's all stable, even with just
1836 * a read lock on the mmap_lock.
1837 */
reusable_anon_vma(struct vm_area_struct * old,struct vm_area_struct * a,struct vm_area_struct * b)1838 static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old,
1839 struct vm_area_struct *a,
1840 struct vm_area_struct *b)
1841 {
1842 if (anon_vma_compatible(a, b)) {
1843 struct anon_vma *anon_vma = READ_ONCE(old->anon_vma);
1844
1845 if (anon_vma && list_is_singular(&old->anon_vma_chain))
1846 return anon_vma;
1847 }
1848 return NULL;
1849 }
1850
1851 /*
1852 * find_mergeable_anon_vma is used by anon_vma_prepare, to check
1853 * neighbouring vmas for a suitable anon_vma, before it goes off
1854 * to allocate a new anon_vma. It checks because a repetitive
1855 * sequence of mprotects and faults may otherwise lead to distinct
1856 * anon_vmas being allocated, preventing vma merge in subsequent
1857 * mprotect.
1858 */
find_mergeable_anon_vma(struct vm_area_struct * vma)1859 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
1860 {
1861 struct anon_vma *anon_vma = NULL;
1862 struct vm_area_struct *prev, *next;
1863 VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_end);
1864
1865 /* Try next first. */
1866 next = vma_iter_load(&vmi);
1867 if (next) {
1868 anon_vma = reusable_anon_vma(next, vma, next);
1869 if (anon_vma)
1870 return anon_vma;
1871 }
1872
1873 prev = vma_prev(&vmi);
1874 VM_BUG_ON_VMA(prev != vma, vma);
1875 prev = vma_prev(&vmi);
1876 /* Try prev next. */
1877 if (prev)
1878 anon_vma = reusable_anon_vma(prev, prev, vma);
1879
1880 /*
1881 * We might reach here with anon_vma == NULL if we can't find
1882 * any reusable anon_vma.
1883 * There's no absolute need to look only at touching neighbours:
1884 * we could search further afield for "compatible" anon_vmas.
1885 * But it would probably just be a waste of time searching,
1886 * or lead to too many vmas hanging off the same anon_vma.
1887 * We're trying to allow mprotect remerging later on,
1888 * not trying to minimize memory used for anon_vmas.
1889 */
1890 return anon_vma;
1891 }
1892
vm_ops_needs_writenotify(const struct vm_operations_struct * vm_ops)1893 static bool vm_ops_needs_writenotify(const struct vm_operations_struct *vm_ops)
1894 {
1895 return vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite);
1896 }
1897
vma_is_shared_writable(struct vm_area_struct * vma)1898 static bool vma_is_shared_writable(struct vm_area_struct *vma)
1899 {
1900 return (vma->vm_flags & (VM_WRITE | VM_SHARED)) ==
1901 (VM_WRITE | VM_SHARED);
1902 }
1903
vma_fs_can_writeback(struct vm_area_struct * vma)1904 static bool vma_fs_can_writeback(struct vm_area_struct *vma)
1905 {
1906 /* No managed pages to writeback. */
1907 if (vma->vm_flags & VM_PFNMAP)
1908 return false;
1909
1910 return vma->vm_file && vma->vm_file->f_mapping &&
1911 mapping_can_writeback(vma->vm_file->f_mapping);
1912 }
1913
1914 /*
1915 * Does this VMA require the underlying folios to have their dirty state
1916 * tracked?
1917 */
vma_needs_dirty_tracking(struct vm_area_struct * vma)1918 bool vma_needs_dirty_tracking(struct vm_area_struct *vma)
1919 {
1920 /* Only shared, writable VMAs require dirty tracking. */
1921 if (!vma_is_shared_writable(vma))
1922 return false;
1923
1924 /* Does the filesystem need to be notified? */
1925 if (vm_ops_needs_writenotify(vma->vm_ops))
1926 return true;
1927
1928 /*
1929 * Even if the filesystem doesn't indicate a need for writenotify, if it
1930 * can writeback, dirty tracking is still required.
1931 */
1932 return vma_fs_can_writeback(vma);
1933 }
1934
1935 /*
1936 * Some shared mappings will want the pages marked read-only
1937 * to track write events. If so, we'll downgrade vm_page_prot
1938 * to the private version (using protection_map[] without the
1939 * VM_SHARED bit).
1940 */
vma_wants_writenotify(struct vm_area_struct * vma,pgprot_t vm_page_prot)1941 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
1942 {
1943 /* If it was private or non-writable, the write bit is already clear */
1944 if (!vma_is_shared_writable(vma))
1945 return false;
1946
1947 /* The backer wishes to know when pages are first written to? */
1948 if (vm_ops_needs_writenotify(vma->vm_ops))
1949 return true;
1950
1951 /* The open routine did something to the protections that pgprot_modify
1952 * won't preserve? */
1953 if (pgprot_val(vm_page_prot) !=
1954 pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags)))
1955 return false;
1956
1957 /*
1958 * Do we need to track softdirty? hugetlb does not support softdirty
1959 * tracking yet.
1960 */
1961 if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma))
1962 return true;
1963
1964 /* Do we need write faults for uffd-wp tracking? */
1965 if (userfaultfd_wp(vma))
1966 return true;
1967
1968 /* Can the mapping track the dirty pages? */
1969 return vma_fs_can_writeback(vma);
1970 }
1971
1972 static DEFINE_MUTEX(mm_all_locks_mutex);
1973
vm_lock_anon_vma(struct mm_struct * mm,struct anon_vma * anon_vma)1974 static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
1975 {
1976 if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
1977 /*
1978 * The LSB of head.next can't change from under us
1979 * because we hold the mm_all_locks_mutex.
1980 */
1981 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock);
1982 /*
1983 * We can safely modify head.next after taking the
1984 * anon_vma->root->rwsem. If some other vma in this mm shares
1985 * the same anon_vma we won't take it again.
1986 *
1987 * No need of atomic instructions here, head.next
1988 * can't change from under us thanks to the
1989 * anon_vma->root->rwsem.
1990 */
1991 if (__test_and_set_bit(0, (unsigned long *)
1992 &anon_vma->root->rb_root.rb_root.rb_node))
1993 BUG();
1994 }
1995 }
1996
vm_lock_mapping(struct mm_struct * mm,struct address_space * mapping)1997 static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
1998 {
1999 if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
2000 /*
2001 * AS_MM_ALL_LOCKS can't change from under us because
2002 * we hold the mm_all_locks_mutex.
2003 *
2004 * Operations on ->flags have to be atomic because
2005 * even if AS_MM_ALL_LOCKS is stable thanks to the
2006 * mm_all_locks_mutex, there may be other cpus
2007 * changing other bitflags in parallel to us.
2008 */
2009 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
2010 BUG();
2011 down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock);
2012 }
2013 }
2014
2015 /*
2016 * This operation locks against the VM for all pte/vma/mm related
2017 * operations that could ever happen on a certain mm. This includes
2018 * vmtruncate, try_to_unmap, and all page faults.
2019 *
2020 * The caller must take the mmap_lock in write mode before calling
2021 * mm_take_all_locks(). The caller isn't allowed to release the
2022 * mmap_lock until mm_drop_all_locks() returns.
2023 *
2024 * mmap_lock in write mode is required in order to block all operations
2025 * that could modify pagetables and free pages without need of
2026 * altering the vma layout. It's also needed in write mode to avoid new
2027 * anon_vmas to be associated with existing vmas.
2028 *
2029 * A single task can't take more than one mm_take_all_locks() in a row
2030 * or it would deadlock.
2031 *
2032 * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in
2033 * mapping->flags avoid to take the same lock twice, if more than one
2034 * vma in this mm is backed by the same anon_vma or address_space.
2035 *
2036 * We take locks in following order, accordingly to comment at beginning
2037 * of mm/rmap.c:
2038 * - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for
2039 * hugetlb mapping);
2040 * - all vmas marked locked
2041 * - all i_mmap_rwsem locks;
2042 * - all anon_vma->rwseml
2043 *
2044 * We can take all locks within these types randomly because the VM code
2045 * doesn't nest them and we protected from parallel mm_take_all_locks() by
2046 * mm_all_locks_mutex.
2047 *
2048 * mm_take_all_locks() and mm_drop_all_locks are expensive operations
2049 * that may have to take thousand of locks.
2050 *
2051 * mm_take_all_locks() can fail if it's interrupted by signals.
2052 */
mm_take_all_locks(struct mm_struct * mm)2053 int mm_take_all_locks(struct mm_struct *mm)
2054 {
2055 struct vm_area_struct *vma;
2056 struct anon_vma_chain *avc;
2057 VMA_ITERATOR(vmi, mm, 0);
2058
2059 mmap_assert_write_locked(mm);
2060
2061 mutex_lock(&mm_all_locks_mutex);
2062
2063 /*
2064 * vma_start_write() does not have a complement in mm_drop_all_locks()
2065 * because vma_start_write() is always asymmetrical; it marks a VMA as
2066 * being written to until mmap_write_unlock() or mmap_write_downgrade()
2067 * is reached.
2068 */
2069 for_each_vma(vmi, vma) {
2070 if (signal_pending(current))
2071 goto out_unlock;
2072 vma_start_write(vma);
2073 }
2074
2075 vma_iter_init(&vmi, mm, 0);
2076 for_each_vma(vmi, vma) {
2077 if (signal_pending(current))
2078 goto out_unlock;
2079 if (vma->vm_file && vma->vm_file->f_mapping &&
2080 is_vm_hugetlb_page(vma))
2081 vm_lock_mapping(mm, vma->vm_file->f_mapping);
2082 }
2083
2084 vma_iter_init(&vmi, mm, 0);
2085 for_each_vma(vmi, vma) {
2086 if (signal_pending(current))
2087 goto out_unlock;
2088 if (vma->vm_file && vma->vm_file->f_mapping &&
2089 !is_vm_hugetlb_page(vma))
2090 vm_lock_mapping(mm, vma->vm_file->f_mapping);
2091 }
2092
2093 vma_iter_init(&vmi, mm, 0);
2094 for_each_vma(vmi, vma) {
2095 if (signal_pending(current))
2096 goto out_unlock;
2097 if (vma->anon_vma)
2098 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
2099 vm_lock_anon_vma(mm, avc->anon_vma);
2100 }
2101
2102 return 0;
2103
2104 out_unlock:
2105 mm_drop_all_locks(mm);
2106 return -EINTR;
2107 }
2108
vm_unlock_anon_vma(struct anon_vma * anon_vma)2109 static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
2110 {
2111 if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
2112 /*
2113 * The LSB of head.next can't change to 0 from under
2114 * us because we hold the mm_all_locks_mutex.
2115 *
2116 * We must however clear the bitflag before unlocking
2117 * the vma so the users using the anon_vma->rb_root will
2118 * never see our bitflag.
2119 *
2120 * No need of atomic instructions here, head.next
2121 * can't change from under us until we release the
2122 * anon_vma->root->rwsem.
2123 */
2124 if (!__test_and_clear_bit(0, (unsigned long *)
2125 &anon_vma->root->rb_root.rb_root.rb_node))
2126 BUG();
2127 anon_vma_unlock_write(anon_vma);
2128 }
2129 }
2130
vm_unlock_mapping(struct address_space * mapping)2131 static void vm_unlock_mapping(struct address_space *mapping)
2132 {
2133 if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
2134 /*
2135 * AS_MM_ALL_LOCKS can't change to 0 from under us
2136 * because we hold the mm_all_locks_mutex.
2137 */
2138 i_mmap_unlock_write(mapping);
2139 if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
2140 &mapping->flags))
2141 BUG();
2142 }
2143 }
2144
2145 /*
2146 * The mmap_lock cannot be released by the caller until
2147 * mm_drop_all_locks() returns.
2148 */
mm_drop_all_locks(struct mm_struct * mm)2149 void mm_drop_all_locks(struct mm_struct *mm)
2150 {
2151 struct vm_area_struct *vma;
2152 struct anon_vma_chain *avc;
2153 VMA_ITERATOR(vmi, mm, 0);
2154
2155 mmap_assert_write_locked(mm);
2156 BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
2157
2158 for_each_vma(vmi, vma) {
2159 if (vma->anon_vma)
2160 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
2161 vm_unlock_anon_vma(avc->anon_vma);
2162 if (vma->vm_file && vma->vm_file->f_mapping)
2163 vm_unlock_mapping(vma->vm_file->f_mapping);
2164 }
2165
2166 mutex_unlock(&mm_all_locks_mutex);
2167 }
2168
2169 /*
2170 * We account for memory if it's a private writeable mapping,
2171 * not hugepages and VM_NORESERVE wasn't set.
2172 */
accountable_mapping(struct file * file,vm_flags_t vm_flags)2173 static bool accountable_mapping(struct file *file, vm_flags_t vm_flags)
2174 {
2175 /*
2176 * hugetlb has its own accounting separate from the core VM
2177 * VM_HUGETLB may not be set yet so we cannot check for that flag.
2178 */
2179 if (file && is_file_hugepages(file))
2180 return false;
2181
2182 return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
2183 }
2184
2185 /*
2186 * vms_abort_munmap_vmas() - Undo as much as possible from an aborted munmap()
2187 * operation.
2188 * @vms: The vma unmap structure
2189 * @mas_detach: The maple state with the detached maple tree
2190 *
2191 * Reattach any detached vmas, free up the maple tree used to track the vmas.
2192 * If that's not possible because the ptes are cleared (and vm_ops->closed() may
2193 * have been called), then a NULL is written over the vmas and the vmas are
2194 * removed (munmap() completed).
2195 */
vms_abort_munmap_vmas(struct vma_munmap_struct * vms,struct ma_state * mas_detach)2196 static void vms_abort_munmap_vmas(struct vma_munmap_struct *vms,
2197 struct ma_state *mas_detach)
2198 {
2199 struct ma_state *mas = &vms->vmi->mas;
2200
2201 if (!vms->nr_pages)
2202 return;
2203
2204 if (vms->clear_ptes)
2205 return reattach_vmas(mas_detach);
2206
2207 /*
2208 * Aborting cannot just call the vm_ops open() because they are often
2209 * not symmetrical and state data has been lost. Resort to the old
2210 * failure method of leaving a gap where the MAP_FIXED mapping failed.
2211 */
2212 mas_set_range(mas, vms->start, vms->end - 1);
2213 mas_store_gfp(mas, NULL, GFP_KERNEL|__GFP_NOFAIL);
2214 /* Clean up the insertion of the unfortunate gap */
2215 vms_complete_munmap_vmas(vms, mas_detach);
2216 }
2217
2218 /*
2219 * __mmap_prepare() - Prepare to gather any overlapping VMAs that need to be
2220 * unmapped once the map operation is completed, check limits, account mapping
2221 * and clean up any pre-existing VMAs.
2222 *
2223 * @map: Mapping state.
2224 * @uf: Userfaultfd context list.
2225 *
2226 * Returns: 0 on success, error code otherwise.
2227 */
__mmap_prepare(struct mmap_state * map,struct list_head * uf)2228 static int __mmap_prepare(struct mmap_state *map, struct list_head *uf)
2229 {
2230 int error;
2231 struct vma_iterator *vmi = map->vmi;
2232 struct vma_munmap_struct *vms = &map->vms;
2233
2234 /* Find the first overlapping VMA and initialise unmap state. */
2235 vms->vma = vma_find(vmi, map->end);
2236 init_vma_munmap(vms, vmi, vms->vma, map->addr, map->end, uf,
2237 /* unlock = */ false);
2238
2239 /* OK, we have overlapping VMAs - prepare to unmap them. */
2240 if (vms->vma) {
2241 mt_init_flags(&map->mt_detach,
2242 vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
2243 mt_on_stack(map->mt_detach);
2244 mas_init(&map->mas_detach, &map->mt_detach, /* addr = */ 0);
2245 /* Prepare to unmap any existing mapping in the area */
2246 error = vms_gather_munmap_vmas(vms, &map->mas_detach);
2247 if (error) {
2248 /* On error VMAs will already have been reattached. */
2249 vms->nr_pages = 0;
2250 return error;
2251 }
2252
2253 map->next = vms->next;
2254 map->prev = vms->prev;
2255 } else {
2256 map->next = vma_iter_next_rewind(vmi, &map->prev);
2257 }
2258
2259 /* Check against address space limit. */
2260 if (!may_expand_vm(map->mm, map->flags, map->pglen - vms->nr_pages))
2261 return -ENOMEM;
2262
2263 /* Private writable mapping: check memory availability. */
2264 if (accountable_mapping(map->file, map->flags)) {
2265 map->charged = map->pglen;
2266 map->charged -= vms->nr_accounted;
2267 if (map->charged) {
2268 error = security_vm_enough_memory_mm(map->mm, map->charged);
2269 if (error)
2270 return error;
2271 }
2272
2273 vms->nr_accounted = 0;
2274 map->flags |= VM_ACCOUNT;
2275 }
2276
2277 /*
2278 * Clear PTEs while the vma is still in the tree so that rmap
2279 * cannot race with the freeing later in the truncate scenario.
2280 * This is also needed for mmap_file(), which is why vm_ops
2281 * close function is called.
2282 */
2283 vms_clean_up_area(vms, &map->mas_detach);
2284
2285 return 0;
2286 }
2287
2288
__mmap_new_file_vma(struct mmap_state * map,struct vm_area_struct * vma)2289 static int __mmap_new_file_vma(struct mmap_state *map,
2290 struct vm_area_struct *vma)
2291 {
2292 struct vma_iterator *vmi = map->vmi;
2293 int error;
2294
2295 vma->vm_file = get_file(map->file);
2296 error = mmap_file(vma->vm_file, vma);
2297 if (error) {
2298 fput(vma->vm_file);
2299 vma->vm_file = NULL;
2300
2301 vma_iter_set(vmi, vma->vm_end);
2302 /* Undo any partial mapping done by a device driver. */
2303 unmap_region(&vmi->mas, vma, map->prev, map->next);
2304
2305 return error;
2306 }
2307
2308 /* Drivers cannot alter the address of the VMA. */
2309 WARN_ON_ONCE(map->addr != vma->vm_start);
2310 /*
2311 * Drivers should not permit writability when previously it was
2312 * disallowed.
2313 */
2314 VM_WARN_ON_ONCE(map->flags != vma->vm_flags &&
2315 !(map->flags & VM_MAYWRITE) &&
2316 (vma->vm_flags & VM_MAYWRITE));
2317
2318 /* If the flags change (and are mergeable), let's retry later. */
2319 map->retry_merge = vma->vm_flags != map->flags && !(vma->vm_flags & VM_SPECIAL);
2320 map->flags = vma->vm_flags;
2321
2322 return 0;
2323 }
2324
2325 /*
2326 * __mmap_new_vma() - Allocate a new VMA for the region, as merging was not
2327 * possible.
2328 *
2329 * @map: Mapping state.
2330 * @vmap: Output pointer for the new VMA.
2331 *
2332 * Returns: Zero on success, or an error.
2333 */
__mmap_new_vma(struct mmap_state * map,struct vm_area_struct ** vmap)2334 static int __mmap_new_vma(struct mmap_state *map, struct vm_area_struct **vmap)
2335 {
2336 struct vma_iterator *vmi = map->vmi;
2337 int error = 0;
2338 struct vm_area_struct *vma;
2339
2340 /*
2341 * Determine the object being mapped and call the appropriate
2342 * specific mapper. the address has already been validated, but
2343 * not unmapped, but the maps are removed from the list.
2344 */
2345 vma = vm_area_alloc(map->mm);
2346 if (!vma)
2347 return -ENOMEM;
2348
2349 vma_iter_config(vmi, map->addr, map->end);
2350 vma_set_range(vma, map->addr, map->end, map->pgoff);
2351 vm_flags_init(vma, map->flags);
2352 vma->vm_page_prot = vm_get_page_prot(map->flags);
2353
2354 if (vma_iter_prealloc(vmi, vma)) {
2355 error = -ENOMEM;
2356 goto free_vma;
2357 }
2358
2359 if (map->file)
2360 error = __mmap_new_file_vma(map, vma);
2361 else if (map->flags & VM_SHARED)
2362 error = shmem_zero_setup(vma);
2363 else
2364 vma_set_anonymous(vma);
2365
2366 if (error)
2367 goto free_iter_vma;
2368
2369 #ifdef CONFIG_SPARC64
2370 /* TODO: Fix SPARC ADI! */
2371 WARN_ON_ONCE(!arch_validate_flags(map->flags));
2372 #endif
2373
2374 /* Lock the VMA since it is modified after insertion into VMA tree */
2375 vma_start_write(vma);
2376 vma_iter_store(vmi, vma);
2377 map->mm->map_count++;
2378 vma_link_file(vma);
2379
2380 /*
2381 * vma_merge_new_range() calls khugepaged_enter_vma() too, the below
2382 * call covers the non-merge case.
2383 */
2384 khugepaged_enter_vma(vma, map->flags);
2385 ksm_add_vma(vma);
2386 *vmap = vma;
2387 return 0;
2388
2389 free_iter_vma:
2390 vma_iter_free(vmi);
2391 free_vma:
2392 vm_area_free(vma);
2393 return error;
2394 }
2395
2396 /*
2397 * __mmap_complete() - Unmap any VMAs we overlap, account memory mapping
2398 * statistics, handle locking and finalise the VMA.
2399 *
2400 * @map: Mapping state.
2401 * @vma: Merged or newly allocated VMA for the mmap()'d region.
2402 */
__mmap_complete(struct mmap_state * map,struct vm_area_struct * vma)2403 static void __mmap_complete(struct mmap_state *map, struct vm_area_struct *vma)
2404 {
2405 struct mm_struct *mm = map->mm;
2406 unsigned long vm_flags = vma->vm_flags;
2407
2408 perf_event_mmap(vma);
2409
2410 /* Unmap any existing mapping in the area. */
2411 vms_complete_munmap_vmas(&map->vms, &map->mas_detach);
2412
2413 vm_stat_account(mm, vma->vm_flags, map->pglen);
2414 if (vm_flags & VM_LOCKED) {
2415 if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
2416 is_vm_hugetlb_page(vma) ||
2417 vma == get_gate_vma(mm))
2418 vm_flags_clear(vma, VM_LOCKED_MASK);
2419 else
2420 mm->locked_vm += map->pglen;
2421 }
2422
2423 if (vma->vm_file)
2424 uprobe_mmap(vma);
2425
2426 /*
2427 * New (or expanded) vma always get soft dirty status.
2428 * Otherwise user-space soft-dirty page tracker won't
2429 * be able to distinguish situation when vma area unmapped,
2430 * then new mapped in-place (which must be aimed as
2431 * a completely new data area).
2432 */
2433 vm_flags_set(vma, VM_SOFTDIRTY);
2434
2435 vma_set_page_prot(vma);
2436 }
2437
__mmap_region(struct file * file,unsigned long addr,unsigned long len,vm_flags_t vm_flags,unsigned long pgoff,struct list_head * uf)2438 static unsigned long __mmap_region(struct file *file, unsigned long addr,
2439 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
2440 struct list_head *uf)
2441 {
2442 struct mm_struct *mm = current->mm;
2443 struct vm_area_struct *vma = NULL;
2444 int error;
2445 VMA_ITERATOR(vmi, mm, addr);
2446 MMAP_STATE(map, mm, &vmi, addr, len, pgoff, vm_flags, file);
2447
2448 error = __mmap_prepare(&map, uf);
2449 if (error)
2450 goto abort_munmap;
2451
2452 /* Attempt to merge with adjacent VMAs... */
2453 if (map.prev || map.next) {
2454 VMG_MMAP_STATE(vmg, &map, /* vma = */ NULL);
2455
2456 vma = vma_merge_new_range(&vmg);
2457 }
2458
2459 /* ...but if we can't, allocate a new VMA. */
2460 if (!vma) {
2461 error = __mmap_new_vma(&map, &vma);
2462 if (error)
2463 goto unacct_error;
2464 }
2465
2466 /* If flags changed, we might be able to merge, so try again. */
2467 if (map.retry_merge) {
2468 struct vm_area_struct *merged;
2469 VMG_MMAP_STATE(vmg, &map, vma);
2470
2471 vma_iter_config(map.vmi, map.addr, map.end);
2472 merged = vma_merge_existing_range(&vmg);
2473 if (merged)
2474 vma = merged;
2475 }
2476
2477 __mmap_complete(&map, vma);
2478
2479 return addr;
2480
2481 /* Accounting was done by __mmap_prepare(). */
2482 unacct_error:
2483 if (map.charged)
2484 vm_unacct_memory(map.charged);
2485 abort_munmap:
2486 vms_abort_munmap_vmas(&map.vms, &map.mas_detach);
2487 return error;
2488 }
2489
2490 /**
2491 * mmap_region() - Actually perform the userland mapping of a VMA into
2492 * current->mm with known, aligned and overflow-checked @addr and @len, and
2493 * correctly determined VMA flags @vm_flags and page offset @pgoff.
2494 *
2495 * This is an internal memory management function, and should not be used
2496 * directly.
2497 *
2498 * The caller must write-lock current->mm->mmap_lock.
2499 *
2500 * @file: If a file-backed mapping, a pointer to the struct file describing the
2501 * file to be mapped, otherwise NULL.
2502 * @addr: The page-aligned address at which to perform the mapping.
2503 * @len: The page-aligned, non-zero, length of the mapping.
2504 * @vm_flags: The VMA flags which should be applied to the mapping.
2505 * @pgoff: If @file is specified, the page offset into the file, if not then
2506 * the virtual page offset in memory of the anonymous mapping.
2507 * @uf: Optionally, a pointer to a list head used for tracking userfaultfd unmap
2508 * events.
2509 *
2510 * Returns: Either an error, or the address at which the requested mapping has
2511 * been performed.
2512 */
mmap_region(struct file * file,unsigned long addr,unsigned long len,vm_flags_t vm_flags,unsigned long pgoff,struct list_head * uf)2513 unsigned long mmap_region(struct file *file, unsigned long addr,
2514 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
2515 struct list_head *uf)
2516 {
2517 unsigned long ret;
2518 bool writable_file_mapping = false;
2519
2520 mmap_assert_write_locked(current->mm);
2521
2522 /* Check to see if MDWE is applicable. */
2523 if (map_deny_write_exec(vm_flags, vm_flags))
2524 return -EACCES;
2525
2526 /* Allow architectures to sanity-check the vm_flags. */
2527 if (!arch_validate_flags(vm_flags))
2528 return -EINVAL;
2529
2530 /* Map writable and ensure this isn't a sealed memfd. */
2531 if (file && is_shared_maywrite(vm_flags)) {
2532 int error = mapping_map_writable(file->f_mapping);
2533
2534 if (error)
2535 return error;
2536 writable_file_mapping = true;
2537 }
2538
2539 ret = __mmap_region(file, addr, len, vm_flags, pgoff, uf);
2540
2541 /* Clear our write mapping regardless of error. */
2542 if (writable_file_mapping)
2543 mapping_unmap_writable(file->f_mapping);
2544
2545 validate_mm(current->mm);
2546 return ret;
2547 }
2548
2549 /*
2550 * do_brk_flags() - Increase the brk vma if the flags match.
2551 * @vmi: The vma iterator
2552 * @addr: The start address
2553 * @len: The length of the increase
2554 * @vma: The vma,
2555 * @flags: The VMA Flags
2556 *
2557 * Extend the brk VMA from addr to addr + len. If the VMA is NULL or the flags
2558 * do not match then create a new anonymous VMA. Eventually we may be able to
2559 * do some brk-specific accounting here.
2560 */
do_brk_flags(struct vma_iterator * vmi,struct vm_area_struct * vma,unsigned long addr,unsigned long len,unsigned long flags)2561 int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
2562 unsigned long addr, unsigned long len, unsigned long flags)
2563 {
2564 struct mm_struct *mm = current->mm;
2565
2566 /*
2567 * Check against address space limits by the changed size
2568 * Note: This happens *after* clearing old mappings in some code paths.
2569 */
2570 flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
2571 if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT))
2572 return -ENOMEM;
2573
2574 if (mm->map_count > sysctl_max_map_count)
2575 return -ENOMEM;
2576
2577 if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
2578 return -ENOMEM;
2579
2580 /*
2581 * Expand the existing vma if possible; Note that singular lists do not
2582 * occur after forking, so the expand will only happen on new VMAs.
2583 */
2584 if (vma && vma->vm_end == addr) {
2585 VMG_STATE(vmg, mm, vmi, addr, addr + len, flags, PHYS_PFN(addr));
2586
2587 vmg.prev = vma;
2588 /* vmi is positioned at prev, which this mode expects. */
2589 vmg.merge_flags = VMG_FLAG_JUST_EXPAND;
2590
2591 if (vma_merge_new_range(&vmg))
2592 goto out;
2593 else if (vmg_nomem(&vmg))
2594 goto unacct_fail;
2595 }
2596
2597 if (vma)
2598 vma_iter_next_range(vmi);
2599 /* create a vma struct for an anonymous mapping */
2600 vma = vm_area_alloc(mm);
2601 if (!vma)
2602 goto unacct_fail;
2603
2604 vma_set_anonymous(vma);
2605 vma_set_range(vma, addr, addr + len, addr >> PAGE_SHIFT);
2606 vm_flags_init(vma, flags);
2607 vma->vm_page_prot = vm_get_page_prot(flags);
2608 vma_start_write(vma);
2609 if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL))
2610 goto mas_store_fail;
2611
2612 mm->map_count++;
2613 validate_mm(mm);
2614 ksm_add_vma(vma);
2615 out:
2616 perf_event_mmap(vma);
2617 mm->total_vm += len >> PAGE_SHIFT;
2618 mm->data_vm += len >> PAGE_SHIFT;
2619 if (flags & VM_LOCKED)
2620 mm->locked_vm += (len >> PAGE_SHIFT);
2621 vm_flags_set(vma, VM_SOFTDIRTY);
2622 return 0;
2623
2624 mas_store_fail:
2625 vm_area_free(vma);
2626 unacct_fail:
2627 vm_unacct_memory(len >> PAGE_SHIFT);
2628 return -ENOMEM;
2629 }
2630
2631 /**
2632 * unmapped_area() - Find an area between the low_limit and the high_limit with
2633 * the correct alignment and offset, all from @info. Note: current->mm is used
2634 * for the search.
2635 *
2636 * @info: The unmapped area information including the range [low_limit -
2637 * high_limit), the alignment offset and mask.
2638 *
2639 * Return: A memory address or -ENOMEM.
2640 */
unmapped_area(struct vm_unmapped_area_info * info)2641 unsigned long unmapped_area(struct vm_unmapped_area_info *info)
2642 {
2643 unsigned long length, gap;
2644 unsigned long low_limit, high_limit;
2645 struct vm_area_struct *tmp;
2646 VMA_ITERATOR(vmi, current->mm, 0);
2647
2648 /* Adjust search length to account for worst case alignment overhead */
2649 length = info->length + info->align_mask + info->start_gap;
2650 if (length < info->length)
2651 return -ENOMEM;
2652
2653 low_limit = info->low_limit;
2654 if (low_limit < mmap_min_addr)
2655 low_limit = mmap_min_addr;
2656 high_limit = info->high_limit;
2657 retry:
2658 if (vma_iter_area_lowest(&vmi, low_limit, high_limit, length))
2659 return -ENOMEM;
2660
2661 /*
2662 * Adjust for the gap first so it doesn't interfere with the
2663 * later alignment. The first step is the minimum needed to
2664 * fulill the start gap, the next steps is the minimum to align
2665 * that. It is the minimum needed to fulill both.
2666 */
2667 gap = vma_iter_addr(&vmi) + info->start_gap;
2668 gap += (info->align_offset - gap) & info->align_mask;
2669 tmp = vma_next(&vmi);
2670 if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
2671 if (vm_start_gap(tmp) < gap + length - 1) {
2672 low_limit = tmp->vm_end;
2673 vma_iter_reset(&vmi);
2674 goto retry;
2675 }
2676 } else {
2677 tmp = vma_prev(&vmi);
2678 if (tmp && vm_end_gap(tmp) > gap) {
2679 low_limit = vm_end_gap(tmp);
2680 vma_iter_reset(&vmi);
2681 goto retry;
2682 }
2683 }
2684
2685 return gap;
2686 }
2687
2688 /**
2689 * unmapped_area_topdown() - Find an area between the low_limit and the
2690 * high_limit with the correct alignment and offset at the highest available
2691 * address, all from @info. Note: current->mm is used for the search.
2692 *
2693 * @info: The unmapped area information including the range [low_limit -
2694 * high_limit), the alignment offset and mask.
2695 *
2696 * Return: A memory address or -ENOMEM.
2697 */
unmapped_area_topdown(struct vm_unmapped_area_info * info)2698 unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
2699 {
2700 unsigned long length, gap, gap_end;
2701 unsigned long low_limit, high_limit;
2702 struct vm_area_struct *tmp;
2703 VMA_ITERATOR(vmi, current->mm, 0);
2704
2705 /* Adjust search length to account for worst case alignment overhead */
2706 length = info->length + info->align_mask + info->start_gap;
2707 if (length < info->length)
2708 return -ENOMEM;
2709
2710 low_limit = info->low_limit;
2711 if (low_limit < mmap_min_addr)
2712 low_limit = mmap_min_addr;
2713 high_limit = info->high_limit;
2714 retry:
2715 if (vma_iter_area_highest(&vmi, low_limit, high_limit, length))
2716 return -ENOMEM;
2717
2718 gap = vma_iter_end(&vmi) - info->length;
2719 gap -= (gap - info->align_offset) & info->align_mask;
2720 gap_end = vma_iter_end(&vmi);
2721 tmp = vma_next(&vmi);
2722 if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
2723 if (vm_start_gap(tmp) < gap_end) {
2724 high_limit = vm_start_gap(tmp);
2725 vma_iter_reset(&vmi);
2726 goto retry;
2727 }
2728 } else {
2729 tmp = vma_prev(&vmi);
2730 if (tmp && vm_end_gap(tmp) > gap) {
2731 high_limit = tmp->vm_start;
2732 vma_iter_reset(&vmi);
2733 goto retry;
2734 }
2735 }
2736
2737 return gap;
2738 }
2739
2740 /*
2741 * Verify that the stack growth is acceptable and
2742 * update accounting. This is shared with both the
2743 * grow-up and grow-down cases.
2744 */
acct_stack_growth(struct vm_area_struct * vma,unsigned long size,unsigned long grow)2745 static int acct_stack_growth(struct vm_area_struct *vma,
2746 unsigned long size, unsigned long grow)
2747 {
2748 struct mm_struct *mm = vma->vm_mm;
2749 unsigned long new_start;
2750
2751 /* address space limit tests */
2752 if (!may_expand_vm(mm, vma->vm_flags, grow))
2753 return -ENOMEM;
2754
2755 /* Stack limit test */
2756 if (size > rlimit(RLIMIT_STACK))
2757 return -ENOMEM;
2758
2759 /* mlock limit tests */
2760 if (!mlock_future_ok(mm, vma->vm_flags, grow << PAGE_SHIFT))
2761 return -ENOMEM;
2762
2763 /* Check to ensure the stack will not grow into a hugetlb-only region */
2764 new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
2765 vma->vm_end - size;
2766 if (is_hugepage_only_range(vma->vm_mm, new_start, size))
2767 return -EFAULT;
2768
2769 /*
2770 * Overcommit.. This must be the final test, as it will
2771 * update security statistics.
2772 */
2773 if (security_vm_enough_memory_mm(mm, grow))
2774 return -ENOMEM;
2775
2776 return 0;
2777 }
2778
2779 #if defined(CONFIG_STACK_GROWSUP)
2780 /*
2781 * PA-RISC uses this for its stack.
2782 * vma is the last one with address > vma->vm_end. Have to extend vma.
2783 */
expand_upwards(struct vm_area_struct * vma,unsigned long address)2784 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
2785 {
2786 struct mm_struct *mm = vma->vm_mm;
2787 struct vm_area_struct *next;
2788 unsigned long gap_addr;
2789 int error = 0;
2790 VMA_ITERATOR(vmi, mm, vma->vm_start);
2791
2792 if (!(vma->vm_flags & VM_GROWSUP))
2793 return -EFAULT;
2794
2795 mmap_assert_write_locked(mm);
2796
2797 /* Guard against exceeding limits of the address space. */
2798 address &= PAGE_MASK;
2799 if (address >= (TASK_SIZE & PAGE_MASK))
2800 return -ENOMEM;
2801 address += PAGE_SIZE;
2802
2803 /* Enforce stack_guard_gap */
2804 gap_addr = address + stack_guard_gap;
2805
2806 /* Guard against overflow */
2807 if (gap_addr < address || gap_addr > TASK_SIZE)
2808 gap_addr = TASK_SIZE;
2809
2810 next = find_vma_intersection(mm, vma->vm_end, gap_addr);
2811 if (next && vma_is_accessible(next)) {
2812 if (!(next->vm_flags & VM_GROWSUP))
2813 return -ENOMEM;
2814 /* Check that both stack segments have the same anon_vma? */
2815 }
2816
2817 if (next)
2818 vma_iter_prev_range_limit(&vmi, address);
2819
2820 vma_iter_config(&vmi, vma->vm_start, address);
2821 if (vma_iter_prealloc(&vmi, vma))
2822 return -ENOMEM;
2823
2824 /* We must make sure the anon_vma is allocated. */
2825 if (unlikely(anon_vma_prepare(vma))) {
2826 vma_iter_free(&vmi);
2827 return -ENOMEM;
2828 }
2829
2830 /* Lock the VMA before expanding to prevent concurrent page faults */
2831 vma_start_write(vma);
2832 /* We update the anon VMA tree. */
2833 anon_vma_lock_write(vma->anon_vma);
2834
2835 /* Somebody else might have raced and expanded it already */
2836 if (address > vma->vm_end) {
2837 unsigned long size, grow;
2838
2839 size = address - vma->vm_start;
2840 grow = (address - vma->vm_end) >> PAGE_SHIFT;
2841
2842 error = -ENOMEM;
2843 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
2844 error = acct_stack_growth(vma, size, grow);
2845 if (!error) {
2846 if (vma->vm_flags & VM_LOCKED)
2847 mm->locked_vm += grow;
2848 vm_stat_account(mm, vma->vm_flags, grow);
2849 anon_vma_interval_tree_pre_update_vma(vma);
2850 vma->vm_end = address;
2851 /* Overwrite old entry in mtree. */
2852 vma_iter_store(&vmi, vma);
2853 anon_vma_interval_tree_post_update_vma(vma);
2854
2855 perf_event_mmap(vma);
2856 }
2857 }
2858 }
2859 anon_vma_unlock_write(vma->anon_vma);
2860 vma_iter_free(&vmi);
2861 validate_mm(mm);
2862 return error;
2863 }
2864 #endif /* CONFIG_STACK_GROWSUP */
2865
2866 /*
2867 * vma is the first one with address < vma->vm_start. Have to extend vma.
2868 * mmap_lock held for writing.
2869 */
expand_downwards(struct vm_area_struct * vma,unsigned long address)2870 int expand_downwards(struct vm_area_struct *vma, unsigned long address)
2871 {
2872 struct mm_struct *mm = vma->vm_mm;
2873 struct vm_area_struct *prev;
2874 int error = 0;
2875 VMA_ITERATOR(vmi, mm, vma->vm_start);
2876
2877 if (!(vma->vm_flags & VM_GROWSDOWN))
2878 return -EFAULT;
2879
2880 mmap_assert_write_locked(mm);
2881
2882 address &= PAGE_MASK;
2883 if (address < mmap_min_addr || address < FIRST_USER_ADDRESS)
2884 return -EPERM;
2885
2886 /* Enforce stack_guard_gap */
2887 prev = vma_prev(&vmi);
2888 /* Check that both stack segments have the same anon_vma? */
2889 if (prev) {
2890 if (!(prev->vm_flags & VM_GROWSDOWN) &&
2891 vma_is_accessible(prev) &&
2892 (address - prev->vm_end < stack_guard_gap))
2893 return -ENOMEM;
2894 }
2895
2896 if (prev)
2897 vma_iter_next_range_limit(&vmi, vma->vm_start);
2898
2899 vma_iter_config(&vmi, address, vma->vm_end);
2900 if (vma_iter_prealloc(&vmi, vma))
2901 return -ENOMEM;
2902
2903 /* We must make sure the anon_vma is allocated. */
2904 if (unlikely(anon_vma_prepare(vma))) {
2905 vma_iter_free(&vmi);
2906 return -ENOMEM;
2907 }
2908
2909 /* Lock the VMA before expanding to prevent concurrent page faults */
2910 vma_start_write(vma);
2911 /* We update the anon VMA tree. */
2912 anon_vma_lock_write(vma->anon_vma);
2913
2914 /* Somebody else might have raced and expanded it already */
2915 if (address < vma->vm_start) {
2916 unsigned long size, grow;
2917
2918 size = vma->vm_end - address;
2919 grow = (vma->vm_start - address) >> PAGE_SHIFT;
2920
2921 error = -ENOMEM;
2922 if (grow <= vma->vm_pgoff) {
2923 error = acct_stack_growth(vma, size, grow);
2924 if (!error) {
2925 if (vma->vm_flags & VM_LOCKED)
2926 mm->locked_vm += grow;
2927 vm_stat_account(mm, vma->vm_flags, grow);
2928 anon_vma_interval_tree_pre_update_vma(vma);
2929 vma->vm_start = address;
2930 vma->vm_pgoff -= grow;
2931 /* Overwrite old entry in mtree. */
2932 vma_iter_store(&vmi, vma);
2933 anon_vma_interval_tree_post_update_vma(vma);
2934
2935 perf_event_mmap(vma);
2936 }
2937 }
2938 }
2939 anon_vma_unlock_write(vma->anon_vma);
2940 vma_iter_free(&vmi);
2941 validate_mm(mm);
2942 return error;
2943 }
2944
__vm_munmap(unsigned long start,size_t len,bool unlock)2945 int __vm_munmap(unsigned long start, size_t len, bool unlock)
2946 {
2947 int ret;
2948 struct mm_struct *mm = current->mm;
2949 LIST_HEAD(uf);
2950 VMA_ITERATOR(vmi, mm, start);
2951
2952 if (mmap_write_lock_killable(mm))
2953 return -EINTR;
2954
2955 ret = do_vmi_munmap(&vmi, mm, start, len, &uf, unlock);
2956 if (ret || !unlock)
2957 mmap_write_unlock(mm);
2958
2959 userfaultfd_unmap_complete(mm, &uf);
2960 return ret;
2961 }
2962