1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * mm/mremap.c
4 *
5 * (C) Copyright 1996 Linus Torvalds
6 *
7 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
8 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved
9 */
10
11 #include <linux/mm.h>
12 #include <linux/mm_inline.h>
13 #include <linux/hugetlb.h>
14 #include <linux/shm.h>
15 #include <linux/ksm.h>
16 #include <linux/mman.h>
17 #include <linux/swap.h>
18 #include <linux/capability.h>
19 #include <linux/fs.h>
20 #include <linux/swapops.h>
21 #include <linux/highmem.h>
22 #include <linux/security.h>
23 #include <linux/syscalls.h>
24 #include <linux/mmu_notifier.h>
25 #include <linux/uaccess.h>
26 #include <linux/userfaultfd_k.h>
27 #include <linux/mempolicy.h>
28
29 #include <asm/cacheflush.h>
30 #include <asm/tlb.h>
31 #include <asm/pgalloc.h>
32
33 #include "internal.h"
34
get_old_pud(struct mm_struct * mm,unsigned long addr)35 static pud_t *get_old_pud(struct mm_struct *mm, unsigned long addr)
36 {
37 pgd_t *pgd;
38 p4d_t *p4d;
39 pud_t *pud;
40
41 pgd = pgd_offset(mm, addr);
42 if (pgd_none_or_clear_bad(pgd))
43 return NULL;
44
45 p4d = p4d_offset(pgd, addr);
46 if (p4d_none_or_clear_bad(p4d))
47 return NULL;
48
49 pud = pud_offset(p4d, addr);
50 if (pud_none_or_clear_bad(pud))
51 return NULL;
52
53 return pud;
54 }
55
get_old_pmd(struct mm_struct * mm,unsigned long addr)56 static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
57 {
58 pud_t *pud;
59 pmd_t *pmd;
60
61 pud = get_old_pud(mm, addr);
62 if (!pud)
63 return NULL;
64
65 pmd = pmd_offset(pud, addr);
66 if (pmd_none(*pmd))
67 return NULL;
68
69 return pmd;
70 }
71
alloc_new_pud(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr)72 static pud_t *alloc_new_pud(struct mm_struct *mm, struct vm_area_struct *vma,
73 unsigned long addr)
74 {
75 pgd_t *pgd;
76 p4d_t *p4d;
77
78 pgd = pgd_offset(mm, addr);
79 p4d = p4d_alloc(mm, pgd, addr);
80 if (!p4d)
81 return NULL;
82
83 return pud_alloc(mm, p4d, addr);
84 }
85
alloc_new_pmd(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr)86 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
87 unsigned long addr)
88 {
89 pud_t *pud;
90 pmd_t *pmd;
91
92 pud = alloc_new_pud(mm, vma, addr);
93 if (!pud)
94 return NULL;
95
96 pmd = pmd_alloc(mm, pud, addr);
97 if (!pmd)
98 return NULL;
99
100 VM_BUG_ON(pmd_trans_huge(*pmd));
101
102 return pmd;
103 }
104
take_rmap_locks(struct vm_area_struct * vma)105 static void take_rmap_locks(struct vm_area_struct *vma)
106 {
107 if (vma->vm_file)
108 i_mmap_lock_write(vma->vm_file->f_mapping);
109 if (vma->anon_vma)
110 anon_vma_lock_write(vma->anon_vma);
111 }
112
drop_rmap_locks(struct vm_area_struct * vma)113 static void drop_rmap_locks(struct vm_area_struct *vma)
114 {
115 if (vma->anon_vma)
116 anon_vma_unlock_write(vma->anon_vma);
117 if (vma->vm_file)
118 i_mmap_unlock_write(vma->vm_file->f_mapping);
119 }
120
move_soft_dirty_pte(pte_t pte)121 static pte_t move_soft_dirty_pte(pte_t pte)
122 {
123 /*
124 * Set soft dirty bit so we can notice
125 * in userspace the ptes were moved.
126 */
127 #ifdef CONFIG_MEM_SOFT_DIRTY
128 if (pte_present(pte))
129 pte = pte_mksoft_dirty(pte);
130 else if (is_swap_pte(pte))
131 pte = pte_swp_mksoft_dirty(pte);
132 #endif
133 return pte;
134 }
135
move_ptes(struct vm_area_struct * vma,pmd_t * old_pmd,unsigned long old_addr,unsigned long old_end,struct vm_area_struct * new_vma,pmd_t * new_pmd,unsigned long new_addr,bool need_rmap_locks)136 static int move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
137 unsigned long old_addr, unsigned long old_end,
138 struct vm_area_struct *new_vma, pmd_t *new_pmd,
139 unsigned long new_addr, bool need_rmap_locks)
140 {
141 struct mm_struct *mm = vma->vm_mm;
142 pte_t *old_pte, *new_pte, pte;
143 pmd_t dummy_pmdval;
144 spinlock_t *old_ptl, *new_ptl;
145 bool force_flush = false;
146 unsigned long len = old_end - old_addr;
147 int err = 0;
148
149 /*
150 * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma
151 * locks to ensure that rmap will always observe either the old or the
152 * new ptes. This is the easiest way to avoid races with
153 * truncate_pagecache(), page migration, etc...
154 *
155 * When need_rmap_locks is false, we use other ways to avoid
156 * such races:
157 *
158 * - During exec() shift_arg_pages(), we use a specially tagged vma
159 * which rmap call sites look for using vma_is_temporary_stack().
160 *
161 * - During mremap(), new_vma is often known to be placed after vma
162 * in rmap traversal order. This ensures rmap will always observe
163 * either the old pte, or the new pte, or both (the page table locks
164 * serialize access to individual ptes, but only rmap traversal
165 * order guarantees that we won't miss both the old and new ptes).
166 */
167 if (need_rmap_locks)
168 take_rmap_locks(vma);
169
170 /*
171 * We don't have to worry about the ordering of src and dst
172 * pte locks because exclusive mmap_lock prevents deadlock.
173 */
174 old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
175 if (!old_pte) {
176 err = -EAGAIN;
177 goto out;
178 }
179 /*
180 * Now new_pte is none, so hpage_collapse_scan_file() path can not find
181 * this by traversing file->f_mapping, so there is no concurrency with
182 * retract_page_tables(). In addition, we already hold the exclusive
183 * mmap_lock, so this new_pte page is stable, so there is no need to get
184 * pmdval and do pmd_same() check.
185 */
186 new_pte = pte_offset_map_rw_nolock(mm, new_pmd, new_addr, &dummy_pmdval,
187 &new_ptl);
188 if (!new_pte) {
189 pte_unmap_unlock(old_pte, old_ptl);
190 err = -EAGAIN;
191 goto out;
192 }
193 if (new_ptl != old_ptl)
194 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
195 flush_tlb_batched_pending(vma->vm_mm);
196 arch_enter_lazy_mmu_mode();
197
198 for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
199 new_pte++, new_addr += PAGE_SIZE) {
200 if (pte_none(ptep_get(old_pte)))
201 continue;
202
203 pte = ptep_get_and_clear(mm, old_addr, old_pte);
204 /*
205 * If we are remapping a valid PTE, make sure
206 * to flush TLB before we drop the PTL for the
207 * PTE.
208 *
209 * NOTE! Both old and new PTL matter: the old one
210 * for racing with folio_mkclean(), the new one to
211 * make sure the physical page stays valid until
212 * the TLB entry for the old mapping has been
213 * flushed.
214 */
215 if (pte_present(pte))
216 force_flush = true;
217 pte = move_pte(pte, old_addr, new_addr);
218 pte = move_soft_dirty_pte(pte);
219 set_pte_at(mm, new_addr, new_pte, pte);
220 }
221
222 arch_leave_lazy_mmu_mode();
223 if (force_flush)
224 flush_tlb_range(vma, old_end - len, old_end);
225 if (new_ptl != old_ptl)
226 spin_unlock(new_ptl);
227 pte_unmap(new_pte - 1);
228 pte_unmap_unlock(old_pte - 1, old_ptl);
229 out:
230 if (need_rmap_locks)
231 drop_rmap_locks(vma);
232 return err;
233 }
234
235 #ifndef arch_supports_page_table_move
236 #define arch_supports_page_table_move arch_supports_page_table_move
arch_supports_page_table_move(void)237 static inline bool arch_supports_page_table_move(void)
238 {
239 return IS_ENABLED(CONFIG_HAVE_MOVE_PMD) ||
240 IS_ENABLED(CONFIG_HAVE_MOVE_PUD);
241 }
242 #endif
243
244 #ifdef CONFIG_HAVE_MOVE_PMD
move_normal_pmd(struct vm_area_struct * vma,unsigned long old_addr,unsigned long new_addr,pmd_t * old_pmd,pmd_t * new_pmd)245 static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
246 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
247 {
248 spinlock_t *old_ptl, *new_ptl;
249 struct mm_struct *mm = vma->vm_mm;
250 bool res = false;
251 pmd_t pmd;
252
253 if (!arch_supports_page_table_move())
254 return false;
255 /*
256 * The destination pmd shouldn't be established, free_pgtables()
257 * should have released it.
258 *
259 * However, there's a case during execve() where we use mremap
260 * to move the initial stack, and in that case the target area
261 * may overlap the source area (always moving down).
262 *
263 * If everything is PMD-aligned, that works fine, as moving
264 * each pmd down will clear the source pmd. But if we first
265 * have a few 4kB-only pages that get moved down, and then
266 * hit the "now the rest is PMD-aligned, let's do everything
267 * one pmd at a time", we will still have the old (now empty
268 * of any 4kB pages, but still there) PMD in the page table
269 * tree.
270 *
271 * Warn on it once - because we really should try to figure
272 * out how to do this better - but then say "I won't move
273 * this pmd".
274 *
275 * One alternative might be to just unmap the target pmd at
276 * this point, and verify that it really is empty. We'll see.
277 */
278 if (WARN_ON_ONCE(!pmd_none(*new_pmd)))
279 return false;
280
281 /*
282 * We don't have to worry about the ordering of src and dst
283 * ptlocks because exclusive mmap_lock prevents deadlock.
284 */
285 old_ptl = pmd_lock(vma->vm_mm, old_pmd);
286 new_ptl = pmd_lockptr(mm, new_pmd);
287 if (new_ptl != old_ptl)
288 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
289
290 pmd = *old_pmd;
291
292 /* Racing with collapse? */
293 if (unlikely(!pmd_present(pmd) || pmd_leaf(pmd)))
294 goto out_unlock;
295 /* Clear the pmd */
296 pmd_clear(old_pmd);
297 res = true;
298
299 VM_BUG_ON(!pmd_none(*new_pmd));
300
301 pmd_populate(mm, new_pmd, pmd_pgtable(pmd));
302 flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
303 out_unlock:
304 if (new_ptl != old_ptl)
305 spin_unlock(new_ptl);
306 spin_unlock(old_ptl);
307
308 return res;
309 }
310 #else
move_normal_pmd(struct vm_area_struct * vma,unsigned long old_addr,unsigned long new_addr,pmd_t * old_pmd,pmd_t * new_pmd)311 static inline bool move_normal_pmd(struct vm_area_struct *vma,
312 unsigned long old_addr, unsigned long new_addr, pmd_t *old_pmd,
313 pmd_t *new_pmd)
314 {
315 return false;
316 }
317 #endif
318
319 #if CONFIG_PGTABLE_LEVELS > 2 && defined(CONFIG_HAVE_MOVE_PUD)
move_normal_pud(struct vm_area_struct * vma,unsigned long old_addr,unsigned long new_addr,pud_t * old_pud,pud_t * new_pud)320 static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr,
321 unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
322 {
323 spinlock_t *old_ptl, *new_ptl;
324 struct mm_struct *mm = vma->vm_mm;
325 pud_t pud;
326
327 if (!arch_supports_page_table_move())
328 return false;
329 /*
330 * The destination pud shouldn't be established, free_pgtables()
331 * should have released it.
332 */
333 if (WARN_ON_ONCE(!pud_none(*new_pud)))
334 return false;
335
336 /*
337 * We don't have to worry about the ordering of src and dst
338 * ptlocks because exclusive mmap_lock prevents deadlock.
339 */
340 old_ptl = pud_lock(vma->vm_mm, old_pud);
341 new_ptl = pud_lockptr(mm, new_pud);
342 if (new_ptl != old_ptl)
343 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
344
345 /* Clear the pud */
346 pud = *old_pud;
347 pud_clear(old_pud);
348
349 VM_BUG_ON(!pud_none(*new_pud));
350
351 pud_populate(mm, new_pud, pud_pgtable(pud));
352 flush_tlb_range(vma, old_addr, old_addr + PUD_SIZE);
353 if (new_ptl != old_ptl)
354 spin_unlock(new_ptl);
355 spin_unlock(old_ptl);
356
357 return true;
358 }
359 #else
move_normal_pud(struct vm_area_struct * vma,unsigned long old_addr,unsigned long new_addr,pud_t * old_pud,pud_t * new_pud)360 static inline bool move_normal_pud(struct vm_area_struct *vma,
361 unsigned long old_addr, unsigned long new_addr, pud_t *old_pud,
362 pud_t *new_pud)
363 {
364 return false;
365 }
366 #endif
367
368 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
move_huge_pud(struct vm_area_struct * vma,unsigned long old_addr,unsigned long new_addr,pud_t * old_pud,pud_t * new_pud)369 static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr,
370 unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
371 {
372 spinlock_t *old_ptl, *new_ptl;
373 struct mm_struct *mm = vma->vm_mm;
374 pud_t pud;
375
376 /*
377 * The destination pud shouldn't be established, free_pgtables()
378 * should have released it.
379 */
380 if (WARN_ON_ONCE(!pud_none(*new_pud)))
381 return false;
382
383 /*
384 * We don't have to worry about the ordering of src and dst
385 * ptlocks because exclusive mmap_lock prevents deadlock.
386 */
387 old_ptl = pud_lock(vma->vm_mm, old_pud);
388 new_ptl = pud_lockptr(mm, new_pud);
389 if (new_ptl != old_ptl)
390 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
391
392 /* Clear the pud */
393 pud = *old_pud;
394 pud_clear(old_pud);
395
396 VM_BUG_ON(!pud_none(*new_pud));
397
398 /* Set the new pud */
399 /* mark soft_ditry when we add pud level soft dirty support */
400 set_pud_at(mm, new_addr, new_pud, pud);
401 flush_pud_tlb_range(vma, old_addr, old_addr + HPAGE_PUD_SIZE);
402 if (new_ptl != old_ptl)
403 spin_unlock(new_ptl);
404 spin_unlock(old_ptl);
405
406 return true;
407 }
408 #else
move_huge_pud(struct vm_area_struct * vma,unsigned long old_addr,unsigned long new_addr,pud_t * old_pud,pud_t * new_pud)409 static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr,
410 unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
411 {
412 WARN_ON_ONCE(1);
413 return false;
414
415 }
416 #endif
417
418 enum pgt_entry {
419 NORMAL_PMD,
420 HPAGE_PMD,
421 NORMAL_PUD,
422 HPAGE_PUD,
423 };
424
425 /*
426 * Returns an extent of the corresponding size for the pgt_entry specified if
427 * valid. Else returns a smaller extent bounded by the end of the source and
428 * destination pgt_entry.
429 */
get_extent(enum pgt_entry entry,unsigned long old_addr,unsigned long old_end,unsigned long new_addr)430 static __always_inline unsigned long get_extent(enum pgt_entry entry,
431 unsigned long old_addr, unsigned long old_end,
432 unsigned long new_addr)
433 {
434 unsigned long next, extent, mask, size;
435
436 switch (entry) {
437 case HPAGE_PMD:
438 case NORMAL_PMD:
439 mask = PMD_MASK;
440 size = PMD_SIZE;
441 break;
442 case HPAGE_PUD:
443 case NORMAL_PUD:
444 mask = PUD_MASK;
445 size = PUD_SIZE;
446 break;
447 default:
448 BUILD_BUG();
449 break;
450 }
451
452 next = (old_addr + size) & mask;
453 /* even if next overflowed, extent below will be ok */
454 extent = next - old_addr;
455 if (extent > old_end - old_addr)
456 extent = old_end - old_addr;
457 next = (new_addr + size) & mask;
458 if (extent > next - new_addr)
459 extent = next - new_addr;
460 return extent;
461 }
462
463 /*
464 * Attempts to speedup the move by moving entry at the level corresponding to
465 * pgt_entry. Returns true if the move was successful, else false.
466 */
move_pgt_entry(enum pgt_entry entry,struct vm_area_struct * vma,unsigned long old_addr,unsigned long new_addr,void * old_entry,void * new_entry,bool need_rmap_locks)467 static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma,
468 unsigned long old_addr, unsigned long new_addr,
469 void *old_entry, void *new_entry, bool need_rmap_locks)
470 {
471 bool moved = false;
472
473 /* See comment in move_ptes() */
474 if (need_rmap_locks)
475 take_rmap_locks(vma);
476
477 switch (entry) {
478 case NORMAL_PMD:
479 moved = move_normal_pmd(vma, old_addr, new_addr, old_entry,
480 new_entry);
481 break;
482 case NORMAL_PUD:
483 moved = move_normal_pud(vma, old_addr, new_addr, old_entry,
484 new_entry);
485 break;
486 case HPAGE_PMD:
487 moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
488 move_huge_pmd(vma, old_addr, new_addr, old_entry,
489 new_entry);
490 break;
491 case HPAGE_PUD:
492 moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
493 move_huge_pud(vma, old_addr, new_addr, old_entry,
494 new_entry);
495 break;
496
497 default:
498 WARN_ON_ONCE(1);
499 break;
500 }
501
502 if (need_rmap_locks)
503 drop_rmap_locks(vma);
504
505 return moved;
506 }
507
508 /*
509 * A helper to check if aligning down is OK. The aligned address should fall
510 * on *no mapping*. For the stack moving down, that's a special move within
511 * the VMA that is created to span the source and destination of the move,
512 * so we make an exception for it.
513 */
can_align_down(struct vm_area_struct * vma,unsigned long addr_to_align,unsigned long mask,bool for_stack)514 static bool can_align_down(struct vm_area_struct *vma, unsigned long addr_to_align,
515 unsigned long mask, bool for_stack)
516 {
517 unsigned long addr_masked = addr_to_align & mask;
518
519 /*
520 * If @addr_to_align of either source or destination is not the beginning
521 * of the corresponding VMA, we can't align down or we will destroy part
522 * of the current mapping.
523 */
524 if (!for_stack && vma->vm_start != addr_to_align)
525 return false;
526
527 /* In the stack case we explicitly permit in-VMA alignment. */
528 if (for_stack && addr_masked >= vma->vm_start)
529 return true;
530
531 /*
532 * Make sure the realignment doesn't cause the address to fall on an
533 * existing mapping.
534 */
535 return find_vma_intersection(vma->vm_mm, addr_masked, vma->vm_start) == NULL;
536 }
537
538 /* Opportunistically realign to specified boundary for faster copy. */
try_realign_addr(unsigned long * old_addr,struct vm_area_struct * old_vma,unsigned long * new_addr,struct vm_area_struct * new_vma,unsigned long mask,bool for_stack)539 static void try_realign_addr(unsigned long *old_addr, struct vm_area_struct *old_vma,
540 unsigned long *new_addr, struct vm_area_struct *new_vma,
541 unsigned long mask, bool for_stack)
542 {
543 /* Skip if the addresses are already aligned. */
544 if ((*old_addr & ~mask) == 0)
545 return;
546
547 /* Only realign if the new and old addresses are mutually aligned. */
548 if ((*old_addr & ~mask) != (*new_addr & ~mask))
549 return;
550
551 /* Ensure realignment doesn't cause overlap with existing mappings. */
552 if (!can_align_down(old_vma, *old_addr, mask, for_stack) ||
553 !can_align_down(new_vma, *new_addr, mask, for_stack))
554 return;
555
556 *old_addr = *old_addr & mask;
557 *new_addr = *new_addr & mask;
558 }
559
move_page_tables(struct vm_area_struct * vma,unsigned long old_addr,struct vm_area_struct * new_vma,unsigned long new_addr,unsigned long len,bool need_rmap_locks,bool for_stack)560 unsigned long move_page_tables(struct vm_area_struct *vma,
561 unsigned long old_addr, struct vm_area_struct *new_vma,
562 unsigned long new_addr, unsigned long len,
563 bool need_rmap_locks, bool for_stack)
564 {
565 unsigned long extent, old_end;
566 struct mmu_notifier_range range;
567 pmd_t *old_pmd, *new_pmd;
568 pud_t *old_pud, *new_pud;
569
570 if (!len)
571 return 0;
572
573 old_end = old_addr + len;
574
575 if (is_vm_hugetlb_page(vma))
576 return move_hugetlb_page_tables(vma, new_vma, old_addr,
577 new_addr, len);
578
579 /*
580 * If possible, realign addresses to PMD boundary for faster copy.
581 * Only realign if the mremap copying hits a PMD boundary.
582 */
583 if (len >= PMD_SIZE - (old_addr & ~PMD_MASK))
584 try_realign_addr(&old_addr, vma, &new_addr, new_vma, PMD_MASK,
585 for_stack);
586
587 flush_cache_range(vma, old_addr, old_end);
588 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm,
589 old_addr, old_end);
590 mmu_notifier_invalidate_range_start(&range);
591
592 for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
593 cond_resched();
594 /*
595 * If extent is PUD-sized try to speed up the move by moving at the
596 * PUD level if possible.
597 */
598 extent = get_extent(NORMAL_PUD, old_addr, old_end, new_addr);
599
600 old_pud = get_old_pud(vma->vm_mm, old_addr);
601 if (!old_pud)
602 continue;
603 new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr);
604 if (!new_pud)
605 break;
606 if (pud_trans_huge(*old_pud) || pud_devmap(*old_pud)) {
607 if (extent == HPAGE_PUD_SIZE) {
608 move_pgt_entry(HPAGE_PUD, vma, old_addr, new_addr,
609 old_pud, new_pud, need_rmap_locks);
610 /* We ignore and continue on error? */
611 continue;
612 }
613 } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) {
614
615 if (move_pgt_entry(NORMAL_PUD, vma, old_addr, new_addr,
616 old_pud, new_pud, true))
617 continue;
618 }
619
620 extent = get_extent(NORMAL_PMD, old_addr, old_end, new_addr);
621 old_pmd = get_old_pmd(vma->vm_mm, old_addr);
622 if (!old_pmd)
623 continue;
624 new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
625 if (!new_pmd)
626 break;
627 again:
628 if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd) ||
629 pmd_devmap(*old_pmd)) {
630 if (extent == HPAGE_PMD_SIZE &&
631 move_pgt_entry(HPAGE_PMD, vma, old_addr, new_addr,
632 old_pmd, new_pmd, need_rmap_locks))
633 continue;
634 split_huge_pmd(vma, old_pmd, old_addr);
635 } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PMD) &&
636 extent == PMD_SIZE) {
637 /*
638 * If the extent is PMD-sized, try to speed the move by
639 * moving at the PMD level if possible.
640 */
641 if (move_pgt_entry(NORMAL_PMD, vma, old_addr, new_addr,
642 old_pmd, new_pmd, true))
643 continue;
644 }
645 if (pmd_none(*old_pmd))
646 continue;
647 if (pte_alloc(new_vma->vm_mm, new_pmd))
648 break;
649 if (move_ptes(vma, old_pmd, old_addr, old_addr + extent,
650 new_vma, new_pmd, new_addr, need_rmap_locks) < 0)
651 goto again;
652 }
653
654 mmu_notifier_invalidate_range_end(&range);
655
656 /*
657 * Prevent negative return values when {old,new}_addr was realigned
658 * but we broke out of the above loop for the first PMD itself.
659 */
660 if (old_addr < old_end - len)
661 return 0;
662
663 return len + old_addr - old_end; /* how much done */
664 }
665
move_vma(struct vm_area_struct * vma,unsigned long old_addr,unsigned long old_len,unsigned long new_len,unsigned long new_addr,bool * locked,unsigned long flags,struct vm_userfaultfd_ctx * uf,struct list_head * uf_unmap)666 static unsigned long move_vma(struct vm_area_struct *vma,
667 unsigned long old_addr, unsigned long old_len,
668 unsigned long new_len, unsigned long new_addr,
669 bool *locked, unsigned long flags,
670 struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap)
671 {
672 long to_account = new_len - old_len;
673 struct mm_struct *mm = vma->vm_mm;
674 struct vm_area_struct *new_vma;
675 unsigned long vm_flags = vma->vm_flags;
676 unsigned long new_pgoff;
677 unsigned long moved_len;
678 unsigned long account_start = 0;
679 unsigned long account_end = 0;
680 unsigned long hiwater_vm;
681 int err = 0;
682 bool need_rmap_locks;
683 struct vma_iterator vmi;
684
685 /*
686 * We'd prefer to avoid failure later on in do_munmap:
687 * which may split one vma into three before unmapping.
688 */
689 if (mm->map_count >= sysctl_max_map_count - 3)
690 return -ENOMEM;
691
692 if (unlikely(flags & MREMAP_DONTUNMAP))
693 to_account = new_len;
694
695 if (vma->vm_ops && vma->vm_ops->may_split) {
696 if (vma->vm_start != old_addr)
697 err = vma->vm_ops->may_split(vma, old_addr);
698 if (!err && vma->vm_end != old_addr + old_len)
699 err = vma->vm_ops->may_split(vma, old_addr + old_len);
700 if (err)
701 return err;
702 }
703
704 /*
705 * Advise KSM to break any KSM pages in the area to be moved:
706 * it would be confusing if they were to turn up at the new
707 * location, where they happen to coincide with different KSM
708 * pages recently unmapped. But leave vma->vm_flags as it was,
709 * so KSM can come around to merge on vma and new_vma afterwards.
710 */
711 err = ksm_madvise(vma, old_addr, old_addr + old_len,
712 MADV_UNMERGEABLE, &vm_flags);
713 if (err)
714 return err;
715
716 if (vm_flags & VM_ACCOUNT) {
717 if (security_vm_enough_memory_mm(mm, to_account >> PAGE_SHIFT))
718 return -ENOMEM;
719 }
720
721 vma_start_write(vma);
722 new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
723 new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
724 &need_rmap_locks);
725 if (!new_vma) {
726 if (vm_flags & VM_ACCOUNT)
727 vm_unacct_memory(to_account >> PAGE_SHIFT);
728 return -ENOMEM;
729 }
730
731 moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
732 need_rmap_locks, false);
733 if (moved_len < old_len) {
734 err = -ENOMEM;
735 } else if (vma->vm_ops && vma->vm_ops->mremap) {
736 err = vma->vm_ops->mremap(new_vma);
737 }
738
739 if (unlikely(err)) {
740 /*
741 * On error, move entries back from new area to old,
742 * which will succeed since page tables still there,
743 * and then proceed to unmap new area instead of old.
744 */
745 move_page_tables(new_vma, new_addr, vma, old_addr, moved_len,
746 true, false);
747 vma = new_vma;
748 old_len = new_len;
749 old_addr = new_addr;
750 new_addr = err;
751 } else {
752 mremap_userfaultfd_prep(new_vma, uf);
753 }
754
755 if (is_vm_hugetlb_page(vma)) {
756 clear_vma_resv_huge_pages(vma);
757 }
758
759 /* Conceal VM_ACCOUNT so old reservation is not undone */
760 if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) {
761 vm_flags_clear(vma, VM_ACCOUNT);
762 if (vma->vm_start < old_addr)
763 account_start = vma->vm_start;
764 if (vma->vm_end > old_addr + old_len)
765 account_end = vma->vm_end;
766 }
767
768 /*
769 * If we failed to move page tables we still do total_vm increment
770 * since do_munmap() will decrement it by old_len == new_len.
771 *
772 * Since total_vm is about to be raised artificially high for a
773 * moment, we need to restore high watermark afterwards: if stats
774 * are taken meanwhile, total_vm and hiwater_vm appear too high.
775 * If this were a serious issue, we'd add a flag to do_munmap().
776 */
777 hiwater_vm = mm->hiwater_vm;
778 vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT);
779
780 /* Tell pfnmap has moved from this vma */
781 if (unlikely(vma->vm_flags & VM_PFNMAP))
782 untrack_pfn_clear(vma);
783
784 if (unlikely(!err && (flags & MREMAP_DONTUNMAP))) {
785 /* We always clear VM_LOCKED[ONFAULT] on the old vma */
786 vm_flags_clear(vma, VM_LOCKED_MASK);
787
788 /*
789 * anon_vma links of the old vma is no longer needed after its page
790 * table has been moved.
791 */
792 if (new_vma != vma && vma->vm_start == old_addr &&
793 vma->vm_end == (old_addr + old_len))
794 unlink_anon_vmas(vma);
795
796 /* Because we won't unmap we don't need to touch locked_vm */
797 return new_addr;
798 }
799
800 vma_iter_init(&vmi, mm, old_addr);
801 if (do_vmi_munmap(&vmi, mm, old_addr, old_len, uf_unmap, false) < 0) {
802 /* OOM: unable to split vma, just get accounts right */
803 if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP))
804 vm_acct_memory(old_len >> PAGE_SHIFT);
805 account_start = account_end = 0;
806 }
807
808 if (vm_flags & VM_LOCKED) {
809 mm->locked_vm += new_len >> PAGE_SHIFT;
810 *locked = true;
811 }
812
813 mm->hiwater_vm = hiwater_vm;
814
815 /* Restore VM_ACCOUNT if one or two pieces of vma left */
816 if (account_start) {
817 vma = vma_prev(&vmi);
818 vm_flags_set(vma, VM_ACCOUNT);
819 }
820
821 if (account_end) {
822 vma = vma_next(&vmi);
823 vm_flags_set(vma, VM_ACCOUNT);
824 }
825
826 return new_addr;
827 }
828
829 /*
830 * resize_is_valid() - Ensure the vma can be resized to the new length at the give
831 * address.
832 *
833 * @vma: The vma to resize
834 * @addr: The old address
835 * @old_len: The current size
836 * @new_len: The desired size
837 * @flags: The vma flags
838 *
839 * Return 0 on success, error otherwise.
840 */
resize_is_valid(struct vm_area_struct * vma,unsigned long addr,unsigned long old_len,unsigned long new_len,unsigned long flags)841 static int resize_is_valid(struct vm_area_struct *vma, unsigned long addr,
842 unsigned long old_len, unsigned long new_len, unsigned long flags)
843 {
844 struct mm_struct *mm = current->mm;
845 unsigned long pgoff;
846
847 /*
848 * !old_len is a special case where an attempt is made to 'duplicate'
849 * a mapping. This makes no sense for private mappings as it will
850 * instead create a fresh/new mapping unrelated to the original. This
851 * is contrary to the basic idea of mremap which creates new mappings
852 * based on the original. There are no known use cases for this
853 * behavior. As a result, fail such attempts.
854 */
855 if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) {
856 pr_warn_once("%s (%d): attempted to duplicate a private mapping with mremap. This is not supported.\n", current->comm, current->pid);
857 return -EINVAL;
858 }
859
860 if ((flags & MREMAP_DONTUNMAP) &&
861 (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)))
862 return -EINVAL;
863
864 /* We can't remap across vm area boundaries */
865 if (old_len > vma->vm_end - addr)
866 return -EFAULT;
867
868 if (new_len == old_len)
869 return 0;
870
871 /* Need to be careful about a growing mapping */
872 pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
873 pgoff += vma->vm_pgoff;
874 if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
875 return -EINVAL;
876
877 if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
878 return -EFAULT;
879
880 if (!mlock_future_ok(mm, vma->vm_flags, new_len - old_len))
881 return -EAGAIN;
882
883 if (!may_expand_vm(mm, vma->vm_flags,
884 (new_len - old_len) >> PAGE_SHIFT))
885 return -ENOMEM;
886
887 return 0;
888 }
889
890 /*
891 * mremap_to() - remap a vma to a new location
892 * @addr: The old address
893 * @old_len: The old size
894 * @new_addr: The target address
895 * @new_len: The new size
896 * @locked: If the returned vma is locked (VM_LOCKED)
897 * @flags: the mremap flags
898 * @uf: The mremap userfaultfd context
899 * @uf_unmap_early: The userfaultfd unmap early context
900 * @uf_unmap: The userfaultfd unmap context
901 *
902 * Returns: The new address of the vma or an error.
903 */
mremap_to(unsigned long addr,unsigned long old_len,unsigned long new_addr,unsigned long new_len,bool * locked,unsigned long flags,struct vm_userfaultfd_ctx * uf,struct list_head * uf_unmap_early,struct list_head * uf_unmap)904 static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
905 unsigned long new_addr, unsigned long new_len, bool *locked,
906 unsigned long flags, struct vm_userfaultfd_ctx *uf,
907 struct list_head *uf_unmap_early,
908 struct list_head *uf_unmap)
909 {
910 struct mm_struct *mm = current->mm;
911 struct vm_area_struct *vma;
912 unsigned long ret;
913 unsigned long map_flags = 0;
914
915 if (offset_in_page(new_addr))
916 return -EINVAL;
917
918 if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
919 return -EINVAL;
920
921 /* Ensure the old/new locations do not overlap */
922 if (addr + old_len > new_addr && new_addr + new_len > addr)
923 return -EINVAL;
924
925 /*
926 * move_vma() need us to stay 4 maps below the threshold, otherwise
927 * it will bail out at the very beginning.
928 * That is a problem if we have already unmaped the regions here
929 * (new_addr, and old_addr), because userspace will not know the
930 * state of the vma's after it gets -ENOMEM.
931 * So, to avoid such scenario we can pre-compute if the whole
932 * operation has high chances to success map-wise.
933 * Worst-scenario case is when both vma's (new_addr and old_addr) get
934 * split in 3 before unmapping it.
935 * That means 2 more maps (1 for each) to the ones we already hold.
936 * Check whether current map count plus 2 still leads us to 4 maps below
937 * the threshold, otherwise return -ENOMEM here to be more safe.
938 */
939 if ((mm->map_count + 2) >= sysctl_max_map_count - 3)
940 return -ENOMEM;
941
942 if (flags & MREMAP_FIXED) {
943 /*
944 * In mremap_to().
945 * VMA is moved to dst address, and munmap dst first.
946 * do_munmap will check if dst is sealed.
947 */
948 ret = do_munmap(mm, new_addr, new_len, uf_unmap_early);
949 if (ret)
950 return ret;
951 }
952
953 if (old_len > new_len) {
954 ret = do_munmap(mm, addr+new_len, old_len - new_len, uf_unmap);
955 if (ret)
956 return ret;
957 old_len = new_len;
958 }
959
960 vma = vma_lookup(mm, addr);
961 if (!vma)
962 return -EFAULT;
963
964 ret = resize_is_valid(vma, addr, old_len, new_len, flags);
965 if (ret)
966 return ret;
967
968 /* MREMAP_DONTUNMAP expands by old_len since old_len == new_len */
969 if (flags & MREMAP_DONTUNMAP &&
970 !may_expand_vm(mm, vma->vm_flags, old_len >> PAGE_SHIFT)) {
971 return -ENOMEM;
972 }
973
974 if (flags & MREMAP_FIXED)
975 map_flags |= MAP_FIXED;
976
977 if (vma->vm_flags & VM_MAYSHARE)
978 map_flags |= MAP_SHARED;
979
980 ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
981 ((addr - vma->vm_start) >> PAGE_SHIFT),
982 map_flags);
983 if (IS_ERR_VALUE(ret))
984 return ret;
985
986 /* We got a new mapping */
987 if (!(flags & MREMAP_FIXED))
988 new_addr = ret;
989
990 return move_vma(vma, addr, old_len, new_len, new_addr, locked, flags,
991 uf, uf_unmap);
992 }
993
vma_expandable(struct vm_area_struct * vma,unsigned long delta)994 static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
995 {
996 unsigned long end = vma->vm_end + delta;
997
998 if (end < vma->vm_end) /* overflow */
999 return 0;
1000 if (find_vma_intersection(vma->vm_mm, vma->vm_end, end))
1001 return 0;
1002 if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
1003 0, MAP_FIXED) & ~PAGE_MASK)
1004 return 0;
1005 return 1;
1006 }
1007
1008 /*
1009 * Expand (or shrink) an existing mapping, potentially moving it at the
1010 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
1011 *
1012 * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
1013 * This option implies MREMAP_MAYMOVE.
1014 */
SYSCALL_DEFINE5(mremap,unsigned long,addr,unsigned long,old_len,unsigned long,new_len,unsigned long,flags,unsigned long,new_addr)1015 SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
1016 unsigned long, new_len, unsigned long, flags,
1017 unsigned long, new_addr)
1018 {
1019 struct mm_struct *mm = current->mm;
1020 struct vm_area_struct *vma;
1021 unsigned long ret = -EINVAL;
1022 bool locked = false;
1023 struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
1024 LIST_HEAD(uf_unmap_early);
1025 LIST_HEAD(uf_unmap);
1026
1027 /*
1028 * There is a deliberate asymmetry here: we strip the pointer tag
1029 * from the old address but leave the new address alone. This is
1030 * for consistency with mmap(), where we prevent the creation of
1031 * aliasing mappings in userspace by leaving the tag bits of the
1032 * mapping address intact. A non-zero tag will cause the subsequent
1033 * range checks to reject the address as invalid.
1034 *
1035 * See Documentation/arch/arm64/tagged-address-abi.rst for more
1036 * information.
1037 */
1038 addr = untagged_addr(addr);
1039
1040 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP))
1041 return ret;
1042
1043 if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
1044 return ret;
1045
1046 /*
1047 * MREMAP_DONTUNMAP is always a move and it does not allow resizing
1048 * in the process.
1049 */
1050 if (flags & MREMAP_DONTUNMAP &&
1051 (!(flags & MREMAP_MAYMOVE) || old_len != new_len))
1052 return ret;
1053
1054
1055 if (offset_in_page(addr))
1056 return ret;
1057
1058 old_len = PAGE_ALIGN(old_len);
1059 new_len = PAGE_ALIGN(new_len);
1060
1061 /*
1062 * We allow a zero old-len as a special case
1063 * for DOS-emu "duplicate shm area" thing. But
1064 * a zero new-len is nonsensical.
1065 */
1066 if (!new_len)
1067 return ret;
1068
1069 if (mmap_write_lock_killable(current->mm))
1070 return -EINTR;
1071 vma = vma_lookup(mm, addr);
1072 if (!vma) {
1073 ret = -EFAULT;
1074 goto out;
1075 }
1076
1077 /* Don't allow remapping vmas when they have already been sealed */
1078 if (!can_modify_vma(vma)) {
1079 ret = -EPERM;
1080 goto out;
1081 }
1082
1083 if (is_vm_hugetlb_page(vma)) {
1084 struct hstate *h __maybe_unused = hstate_vma(vma);
1085
1086 old_len = ALIGN(old_len, huge_page_size(h));
1087 new_len = ALIGN(new_len, huge_page_size(h));
1088
1089 /* addrs must be huge page aligned */
1090 if (addr & ~huge_page_mask(h))
1091 goto out;
1092 if (new_addr & ~huge_page_mask(h))
1093 goto out;
1094
1095 /*
1096 * Don't allow remap expansion, because the underlying hugetlb
1097 * reservation is not yet capable to handle split reservation.
1098 */
1099 if (new_len > old_len)
1100 goto out;
1101 }
1102
1103 if (flags & (MREMAP_FIXED | MREMAP_DONTUNMAP)) {
1104 ret = mremap_to(addr, old_len, new_addr, new_len,
1105 &locked, flags, &uf, &uf_unmap_early,
1106 &uf_unmap);
1107 goto out;
1108 }
1109
1110 /*
1111 * Always allow a shrinking remap: that just unmaps
1112 * the unnecessary pages..
1113 * do_vmi_munmap does all the needed commit accounting, and
1114 * unlocks the mmap_lock if so directed.
1115 */
1116 if (old_len >= new_len) {
1117 VMA_ITERATOR(vmi, mm, addr + new_len);
1118
1119 if (old_len == new_len) {
1120 ret = addr;
1121 goto out;
1122 }
1123
1124 ret = do_vmi_munmap(&vmi, mm, addr + new_len, old_len - new_len,
1125 &uf_unmap, true);
1126 if (ret)
1127 goto out;
1128
1129 ret = addr;
1130 goto out_unlocked;
1131 }
1132
1133 /*
1134 * Ok, we need to grow..
1135 */
1136 ret = resize_is_valid(vma, addr, old_len, new_len, flags);
1137 if (ret)
1138 goto out;
1139
1140 /* old_len exactly to the end of the area..
1141 */
1142 if (old_len == vma->vm_end - addr) {
1143 unsigned long delta = new_len - old_len;
1144
1145 /* can we just expand the current mapping? */
1146 if (vma_expandable(vma, delta)) {
1147 long pages = delta >> PAGE_SHIFT;
1148 VMA_ITERATOR(vmi, mm, vma->vm_end);
1149 long charged = 0;
1150
1151 if (vma->vm_flags & VM_ACCOUNT) {
1152 if (security_vm_enough_memory_mm(mm, pages)) {
1153 ret = -ENOMEM;
1154 goto out;
1155 }
1156 charged = pages;
1157 }
1158
1159 /*
1160 * Function vma_merge_extend() is called on the
1161 * extension we are adding to the already existing vma,
1162 * vma_merge_extend() will merge this extension with the
1163 * already existing vma (expand operation itself) and
1164 * possibly also with the next vma if it becomes
1165 * adjacent to the expanded vma and otherwise
1166 * compatible.
1167 */
1168 vma = vma_merge_extend(&vmi, vma, delta);
1169 if (!vma) {
1170 vm_unacct_memory(charged);
1171 ret = -ENOMEM;
1172 goto out;
1173 }
1174
1175 vm_stat_account(mm, vma->vm_flags, pages);
1176 if (vma->vm_flags & VM_LOCKED) {
1177 mm->locked_vm += pages;
1178 locked = true;
1179 new_addr = addr;
1180 }
1181 ret = addr;
1182 goto out;
1183 }
1184 }
1185
1186 /*
1187 * We weren't able to just expand or shrink the area,
1188 * we need to create a new one and move it..
1189 */
1190 ret = -ENOMEM;
1191 if (flags & MREMAP_MAYMOVE) {
1192 unsigned long map_flags = 0;
1193 if (vma->vm_flags & VM_MAYSHARE)
1194 map_flags |= MAP_SHARED;
1195
1196 new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
1197 vma->vm_pgoff +
1198 ((addr - vma->vm_start) >> PAGE_SHIFT),
1199 map_flags);
1200 if (IS_ERR_VALUE(new_addr)) {
1201 ret = new_addr;
1202 goto out;
1203 }
1204
1205 ret = move_vma(vma, addr, old_len, new_len, new_addr,
1206 &locked, flags, &uf, &uf_unmap);
1207 }
1208 out:
1209 if (offset_in_page(ret))
1210 locked = false;
1211 mmap_write_unlock(current->mm);
1212 if (locked && new_len > old_len)
1213 mm_populate(new_addr + old_len, new_len - old_len);
1214 out_unlocked:
1215 userfaultfd_unmap_complete(mm, &uf_unmap_early);
1216 mremap_userfaultfd_complete(&uf, addr, ret, old_len);
1217 userfaultfd_unmap_complete(mm, &uf_unmap);
1218 return ret;
1219 }
1220