1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * mm/mremap.c
4 *
5 * (C) Copyright 1996 Linus Torvalds
6 *
7 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
8 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved
9 */
10
11 #include <linux/mm.h>
12 #include <linux/mm_inline.h>
13 #include <linux/hugetlb.h>
14 #include <linux/shm.h>
15 #include <linux/ksm.h>
16 #include <linux/mman.h>
17 #include <linux/swap.h>
18 #include <linux/capability.h>
19 #include <linux/fs.h>
20 #include <linux/swapops.h>
21 #include <linux/highmem.h>
22 #include <linux/security.h>
23 #include <linux/syscalls.h>
24 #include <linux/mmu_notifier.h>
25 #include <linux/uaccess.h>
26 #include <linux/userfaultfd_k.h>
27 #include <linux/mempolicy.h>
28
29 #include <asm/cacheflush.h>
30 #include <asm/tlb.h>
31 #include <asm/pgalloc.h>
32
33 #include "internal.h"
34
35 /* Classify the kind of remap operation being performed. */
36 enum mremap_type {
37 MREMAP_INVALID, /* Initial state. */
38 MREMAP_NO_RESIZE, /* old_len == new_len, if not moved, do nothing. */
39 MREMAP_SHRINK, /* old_len > new_len. */
40 MREMAP_EXPAND, /* old_len < new_len. */
41 };
42
43 /*
44 * Describes a VMA mremap() operation and is threaded throughout it.
45 *
46 * Any of the fields may be mutated by the operation, however these values will
47 * always accurately reflect the remap (for instance, we may adjust lengths and
48 * delta to account for hugetlb alignment).
49 */
50 struct vma_remap_struct {
51 /* User-provided state. */
52 unsigned long addr; /* User-specified address from which we remap. */
53 unsigned long old_len; /* Length of range being remapped. */
54 unsigned long new_len; /* Desired new length of mapping. */
55 const unsigned long flags; /* user-specified MREMAP_* flags. */
56 unsigned long new_addr; /* Optionally, desired new address. */
57
58 /* uffd state. */
59 struct vm_userfaultfd_ctx *uf;
60 struct list_head *uf_unmap_early;
61 struct list_head *uf_unmap;
62
63 /* VMA state, determined in do_mremap(). */
64 struct vm_area_struct *vma;
65
66 /* Internal state, determined in do_mremap(). */
67 unsigned long delta; /* Absolute delta of old_len,new_len. */
68 bool populate_expand; /* mlock()'d expanded, must populate. */
69 enum mremap_type remap_type; /* expand, shrink, etc. */
70 bool mmap_locked; /* Is mm currently write-locked? */
71 unsigned long charged; /* If VM_ACCOUNT, # pages to account. */
72 bool vmi_needs_invalidate; /* Is the VMA iterator invalidated? */
73 };
74
get_old_pud(struct mm_struct * mm,unsigned long addr)75 static pud_t *get_old_pud(struct mm_struct *mm, unsigned long addr)
76 {
77 pgd_t *pgd;
78 p4d_t *p4d;
79 pud_t *pud;
80
81 pgd = pgd_offset(mm, addr);
82 if (pgd_none_or_clear_bad(pgd))
83 return NULL;
84
85 p4d = p4d_offset(pgd, addr);
86 if (p4d_none_or_clear_bad(p4d))
87 return NULL;
88
89 pud = pud_offset(p4d, addr);
90 if (pud_none_or_clear_bad(pud))
91 return NULL;
92
93 return pud;
94 }
95
get_old_pmd(struct mm_struct * mm,unsigned long addr)96 static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
97 {
98 pud_t *pud;
99 pmd_t *pmd;
100
101 pud = get_old_pud(mm, addr);
102 if (!pud)
103 return NULL;
104
105 pmd = pmd_offset(pud, addr);
106 if (pmd_none(*pmd))
107 return NULL;
108
109 return pmd;
110 }
111
alloc_new_pud(struct mm_struct * mm,unsigned long addr)112 static pud_t *alloc_new_pud(struct mm_struct *mm, unsigned long addr)
113 {
114 pgd_t *pgd;
115 p4d_t *p4d;
116
117 pgd = pgd_offset(mm, addr);
118 p4d = p4d_alloc(mm, pgd, addr);
119 if (!p4d)
120 return NULL;
121
122 return pud_alloc(mm, p4d, addr);
123 }
124
alloc_new_pmd(struct mm_struct * mm,unsigned long addr)125 static pmd_t *alloc_new_pmd(struct mm_struct *mm, unsigned long addr)
126 {
127 pud_t *pud;
128 pmd_t *pmd;
129
130 pud = alloc_new_pud(mm, addr);
131 if (!pud)
132 return NULL;
133
134 pmd = pmd_alloc(mm, pud, addr);
135 if (!pmd)
136 return NULL;
137
138 VM_BUG_ON(pmd_trans_huge(*pmd));
139
140 return pmd;
141 }
142
take_rmap_locks(struct vm_area_struct * vma)143 static void take_rmap_locks(struct vm_area_struct *vma)
144 {
145 if (vma->vm_file)
146 i_mmap_lock_write(vma->vm_file->f_mapping);
147 if (vma->anon_vma)
148 anon_vma_lock_write(vma->anon_vma);
149 }
150
drop_rmap_locks(struct vm_area_struct * vma)151 static void drop_rmap_locks(struct vm_area_struct *vma)
152 {
153 if (vma->anon_vma)
154 anon_vma_unlock_write(vma->anon_vma);
155 if (vma->vm_file)
156 i_mmap_unlock_write(vma->vm_file->f_mapping);
157 }
158
move_soft_dirty_pte(pte_t pte)159 static pte_t move_soft_dirty_pte(pte_t pte)
160 {
161 /*
162 * Set soft dirty bit so we can notice
163 * in userspace the ptes were moved.
164 */
165 #ifdef CONFIG_MEM_SOFT_DIRTY
166 if (pte_present(pte))
167 pte = pte_mksoft_dirty(pte);
168 else if (is_swap_pte(pte))
169 pte = pte_swp_mksoft_dirty(pte);
170 #endif
171 return pte;
172 }
173
mremap_folio_pte_batch(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t pte,int max_nr)174 static int mremap_folio_pte_batch(struct vm_area_struct *vma, unsigned long addr,
175 pte_t *ptep, pte_t pte, int max_nr)
176 {
177 struct folio *folio;
178
179 if (max_nr == 1)
180 return 1;
181
182 folio = vm_normal_folio(vma, addr, pte);
183 if (!folio || !folio_test_large(folio))
184 return 1;
185
186 return folio_pte_batch(folio, ptep, pte, max_nr);
187 }
188
move_ptes(struct pagetable_move_control * pmc,unsigned long extent,pmd_t * old_pmd,pmd_t * new_pmd)189 static int move_ptes(struct pagetable_move_control *pmc,
190 unsigned long extent, pmd_t *old_pmd, pmd_t *new_pmd)
191 {
192 struct vm_area_struct *vma = pmc->old;
193 bool need_clear_uffd_wp = vma_has_uffd_without_event_remap(vma);
194 struct mm_struct *mm = vma->vm_mm;
195 pte_t *old_ptep, *new_ptep;
196 pte_t old_pte, pte;
197 pmd_t dummy_pmdval;
198 spinlock_t *old_ptl, *new_ptl;
199 bool force_flush = false;
200 unsigned long old_addr = pmc->old_addr;
201 unsigned long new_addr = pmc->new_addr;
202 unsigned long old_end = old_addr + extent;
203 unsigned long len = old_end - old_addr;
204 int max_nr_ptes;
205 int nr_ptes;
206 int err = 0;
207
208 /*
209 * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma
210 * locks to ensure that rmap will always observe either the old or the
211 * new ptes. This is the easiest way to avoid races with
212 * truncate_pagecache(), page migration, etc...
213 *
214 * When need_rmap_locks is false, we use other ways to avoid
215 * such races:
216 *
217 * - During exec() shift_arg_pages(), we use a specially tagged vma
218 * which rmap call sites look for using vma_is_temporary_stack().
219 *
220 * - During mremap(), new_vma is often known to be placed after vma
221 * in rmap traversal order. This ensures rmap will always observe
222 * either the old pte, or the new pte, or both (the page table locks
223 * serialize access to individual ptes, but only rmap traversal
224 * order guarantees that we won't miss both the old and new ptes).
225 */
226 if (pmc->need_rmap_locks)
227 take_rmap_locks(vma);
228
229 /*
230 * We don't have to worry about the ordering of src and dst
231 * pte locks because exclusive mmap_lock prevents deadlock.
232 */
233 old_ptep = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
234 if (!old_ptep) {
235 err = -EAGAIN;
236 goto out;
237 }
238 /*
239 * Now new_pte is none, so hpage_collapse_scan_file() path can not find
240 * this by traversing file->f_mapping, so there is no concurrency with
241 * retract_page_tables(). In addition, we already hold the exclusive
242 * mmap_lock, so this new_pte page is stable, so there is no need to get
243 * pmdval and do pmd_same() check.
244 */
245 new_ptep = pte_offset_map_rw_nolock(mm, new_pmd, new_addr, &dummy_pmdval,
246 &new_ptl);
247 if (!new_ptep) {
248 pte_unmap_unlock(old_ptep, old_ptl);
249 err = -EAGAIN;
250 goto out;
251 }
252 if (new_ptl != old_ptl)
253 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
254 flush_tlb_batched_pending(vma->vm_mm);
255 arch_enter_lazy_mmu_mode();
256
257 for (; old_addr < old_end; old_ptep += nr_ptes, old_addr += nr_ptes * PAGE_SIZE,
258 new_ptep += nr_ptes, new_addr += nr_ptes * PAGE_SIZE) {
259 VM_WARN_ON_ONCE(!pte_none(*new_ptep));
260
261 nr_ptes = 1;
262 max_nr_ptes = (old_end - old_addr) >> PAGE_SHIFT;
263 old_pte = ptep_get(old_ptep);
264 if (pte_none(old_pte))
265 continue;
266
267 /*
268 * If we are remapping a valid PTE, make sure
269 * to flush TLB before we drop the PTL for the
270 * PTE.
271 *
272 * NOTE! Both old and new PTL matter: the old one
273 * for racing with folio_mkclean(), the new one to
274 * make sure the physical page stays valid until
275 * the TLB entry for the old mapping has been
276 * flushed.
277 */
278 if (pte_present(old_pte)) {
279 nr_ptes = mremap_folio_pte_batch(vma, old_addr, old_ptep,
280 old_pte, max_nr_ptes);
281 force_flush = true;
282 }
283 pte = get_and_clear_full_ptes(mm, old_addr, old_ptep, nr_ptes, 0);
284 pte = move_pte(pte, old_addr, new_addr);
285 pte = move_soft_dirty_pte(pte);
286
287 if (need_clear_uffd_wp && pte_marker_uffd_wp(pte))
288 pte_clear(mm, new_addr, new_ptep);
289 else {
290 if (need_clear_uffd_wp) {
291 if (pte_present(pte))
292 pte = pte_clear_uffd_wp(pte);
293 else if (is_swap_pte(pte))
294 pte = pte_swp_clear_uffd_wp(pte);
295 }
296 set_ptes(mm, new_addr, new_ptep, pte, nr_ptes);
297 }
298 }
299
300 arch_leave_lazy_mmu_mode();
301 if (force_flush)
302 flush_tlb_range(vma, old_end - len, old_end);
303 if (new_ptl != old_ptl)
304 spin_unlock(new_ptl);
305 pte_unmap(new_ptep - 1);
306 pte_unmap_unlock(old_ptep - 1, old_ptl);
307 out:
308 if (pmc->need_rmap_locks)
309 drop_rmap_locks(vma);
310 return err;
311 }
312
313 #ifndef arch_supports_page_table_move
314 #define arch_supports_page_table_move arch_supports_page_table_move
arch_supports_page_table_move(void)315 static inline bool arch_supports_page_table_move(void)
316 {
317 return IS_ENABLED(CONFIG_HAVE_MOVE_PMD) ||
318 IS_ENABLED(CONFIG_HAVE_MOVE_PUD);
319 }
320 #endif
321
322 #ifdef CONFIG_HAVE_MOVE_PMD
move_normal_pmd(struct pagetable_move_control * pmc,pmd_t * old_pmd,pmd_t * new_pmd)323 static bool move_normal_pmd(struct pagetable_move_control *pmc,
324 pmd_t *old_pmd, pmd_t *new_pmd)
325 {
326 spinlock_t *old_ptl, *new_ptl;
327 struct vm_area_struct *vma = pmc->old;
328 struct mm_struct *mm = vma->vm_mm;
329 bool res = false;
330 pmd_t pmd;
331
332 if (!arch_supports_page_table_move())
333 return false;
334 /*
335 * The destination pmd shouldn't be established, free_pgtables()
336 * should have released it.
337 *
338 * However, there's a case during execve() where we use mremap
339 * to move the initial stack, and in that case the target area
340 * may overlap the source area (always moving down).
341 *
342 * If everything is PMD-aligned, that works fine, as moving
343 * each pmd down will clear the source pmd. But if we first
344 * have a few 4kB-only pages that get moved down, and then
345 * hit the "now the rest is PMD-aligned, let's do everything
346 * one pmd at a time", we will still have the old (now empty
347 * of any 4kB pages, but still there) PMD in the page table
348 * tree.
349 *
350 * Warn on it once - because we really should try to figure
351 * out how to do this better - but then say "I won't move
352 * this pmd".
353 *
354 * One alternative might be to just unmap the target pmd at
355 * this point, and verify that it really is empty. We'll see.
356 */
357 if (WARN_ON_ONCE(!pmd_none(*new_pmd)))
358 return false;
359
360 /* If this pmd belongs to a uffd vma with remap events disabled, we need
361 * to ensure that the uffd-wp state is cleared from all pgtables. This
362 * means recursing into lower page tables in move_page_tables(), and we
363 * can reuse the existing code if we simply treat the entry as "not
364 * moved".
365 */
366 if (vma_has_uffd_without_event_remap(vma))
367 return false;
368
369 /*
370 * We don't have to worry about the ordering of src and dst
371 * ptlocks because exclusive mmap_lock prevents deadlock.
372 */
373 old_ptl = pmd_lock(mm, old_pmd);
374 new_ptl = pmd_lockptr(mm, new_pmd);
375 if (new_ptl != old_ptl)
376 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
377
378 pmd = *old_pmd;
379
380 /* Racing with collapse? */
381 if (unlikely(!pmd_present(pmd) || pmd_leaf(pmd)))
382 goto out_unlock;
383 /* Clear the pmd */
384 pmd_clear(old_pmd);
385 res = true;
386
387 VM_BUG_ON(!pmd_none(*new_pmd));
388
389 pmd_populate(mm, new_pmd, pmd_pgtable(pmd));
390 flush_tlb_range(vma, pmc->old_addr, pmc->old_addr + PMD_SIZE);
391 out_unlock:
392 if (new_ptl != old_ptl)
393 spin_unlock(new_ptl);
394 spin_unlock(old_ptl);
395
396 return res;
397 }
398 #else
move_normal_pmd(struct pagetable_move_control * pmc,pmd_t * old_pmd,pmd_t * new_pmd)399 static inline bool move_normal_pmd(struct pagetable_move_control *pmc,
400 pmd_t *old_pmd, pmd_t *new_pmd)
401 {
402 return false;
403 }
404 #endif
405
406 #if CONFIG_PGTABLE_LEVELS > 2 && defined(CONFIG_HAVE_MOVE_PUD)
move_normal_pud(struct pagetable_move_control * pmc,pud_t * old_pud,pud_t * new_pud)407 static bool move_normal_pud(struct pagetable_move_control *pmc,
408 pud_t *old_pud, pud_t *new_pud)
409 {
410 spinlock_t *old_ptl, *new_ptl;
411 struct vm_area_struct *vma = pmc->old;
412 struct mm_struct *mm = vma->vm_mm;
413 pud_t pud;
414
415 if (!arch_supports_page_table_move())
416 return false;
417 /*
418 * The destination pud shouldn't be established, free_pgtables()
419 * should have released it.
420 */
421 if (WARN_ON_ONCE(!pud_none(*new_pud)))
422 return false;
423
424 /* If this pud belongs to a uffd vma with remap events disabled, we need
425 * to ensure that the uffd-wp state is cleared from all pgtables. This
426 * means recursing into lower page tables in move_page_tables(), and we
427 * can reuse the existing code if we simply treat the entry as "not
428 * moved".
429 */
430 if (vma_has_uffd_without_event_remap(vma))
431 return false;
432
433 /*
434 * We don't have to worry about the ordering of src and dst
435 * ptlocks because exclusive mmap_lock prevents deadlock.
436 */
437 old_ptl = pud_lock(mm, old_pud);
438 new_ptl = pud_lockptr(mm, new_pud);
439 if (new_ptl != old_ptl)
440 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
441
442 /* Clear the pud */
443 pud = *old_pud;
444 pud_clear(old_pud);
445
446 VM_BUG_ON(!pud_none(*new_pud));
447
448 pud_populate(mm, new_pud, pud_pgtable(pud));
449 flush_tlb_range(vma, pmc->old_addr, pmc->old_addr + PUD_SIZE);
450 if (new_ptl != old_ptl)
451 spin_unlock(new_ptl);
452 spin_unlock(old_ptl);
453
454 return true;
455 }
456 #else
move_normal_pud(struct pagetable_move_control * pmc,pud_t * old_pud,pud_t * new_pud)457 static inline bool move_normal_pud(struct pagetable_move_control *pmc,
458 pud_t *old_pud, pud_t *new_pud)
459 {
460 return false;
461 }
462 #endif
463
464 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
move_huge_pud(struct pagetable_move_control * pmc,pud_t * old_pud,pud_t * new_pud)465 static bool move_huge_pud(struct pagetable_move_control *pmc,
466 pud_t *old_pud, pud_t *new_pud)
467 {
468 spinlock_t *old_ptl, *new_ptl;
469 struct vm_area_struct *vma = pmc->old;
470 struct mm_struct *mm = vma->vm_mm;
471 pud_t pud;
472
473 /*
474 * The destination pud shouldn't be established, free_pgtables()
475 * should have released it.
476 */
477 if (WARN_ON_ONCE(!pud_none(*new_pud)))
478 return false;
479
480 /*
481 * We don't have to worry about the ordering of src and dst
482 * ptlocks because exclusive mmap_lock prevents deadlock.
483 */
484 old_ptl = pud_lock(mm, old_pud);
485 new_ptl = pud_lockptr(mm, new_pud);
486 if (new_ptl != old_ptl)
487 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
488
489 /* Clear the pud */
490 pud = *old_pud;
491 pud_clear(old_pud);
492
493 VM_BUG_ON(!pud_none(*new_pud));
494
495 /* Set the new pud */
496 /* mark soft_ditry when we add pud level soft dirty support */
497 set_pud_at(mm, pmc->new_addr, new_pud, pud);
498 flush_pud_tlb_range(vma, pmc->old_addr, pmc->old_addr + HPAGE_PUD_SIZE);
499 if (new_ptl != old_ptl)
500 spin_unlock(new_ptl);
501 spin_unlock(old_ptl);
502
503 return true;
504 }
505 #else
move_huge_pud(struct pagetable_move_control * pmc,pud_t * old_pud,pud_t * new_pud)506 static bool move_huge_pud(struct pagetable_move_control *pmc,
507 pud_t *old_pud, pud_t *new_pud)
508
509 {
510 WARN_ON_ONCE(1);
511 return false;
512
513 }
514 #endif
515
516 enum pgt_entry {
517 NORMAL_PMD,
518 HPAGE_PMD,
519 NORMAL_PUD,
520 HPAGE_PUD,
521 };
522
523 /*
524 * Returns an extent of the corresponding size for the pgt_entry specified if
525 * valid. Else returns a smaller extent bounded by the end of the source and
526 * destination pgt_entry.
527 */
get_extent(enum pgt_entry entry,struct pagetable_move_control * pmc)528 static __always_inline unsigned long get_extent(enum pgt_entry entry,
529 struct pagetable_move_control *pmc)
530 {
531 unsigned long next, extent, mask, size;
532 unsigned long old_addr = pmc->old_addr;
533 unsigned long old_end = pmc->old_end;
534 unsigned long new_addr = pmc->new_addr;
535
536 switch (entry) {
537 case HPAGE_PMD:
538 case NORMAL_PMD:
539 mask = PMD_MASK;
540 size = PMD_SIZE;
541 break;
542 case HPAGE_PUD:
543 case NORMAL_PUD:
544 mask = PUD_MASK;
545 size = PUD_SIZE;
546 break;
547 default:
548 BUILD_BUG();
549 break;
550 }
551
552 next = (old_addr + size) & mask;
553 /* even if next overflowed, extent below will be ok */
554 extent = next - old_addr;
555 if (extent > old_end - old_addr)
556 extent = old_end - old_addr;
557 next = (new_addr + size) & mask;
558 if (extent > next - new_addr)
559 extent = next - new_addr;
560 return extent;
561 }
562
563 /*
564 * Should move_pgt_entry() acquire the rmap locks? This is either expressed in
565 * the PMC, or overridden in the case of normal, larger page tables.
566 */
should_take_rmap_locks(struct pagetable_move_control * pmc,enum pgt_entry entry)567 static bool should_take_rmap_locks(struct pagetable_move_control *pmc,
568 enum pgt_entry entry)
569 {
570 switch (entry) {
571 case NORMAL_PMD:
572 case NORMAL_PUD:
573 return true;
574 default:
575 return pmc->need_rmap_locks;
576 }
577 }
578
579 /*
580 * Attempts to speedup the move by moving entry at the level corresponding to
581 * pgt_entry. Returns true if the move was successful, else false.
582 */
move_pgt_entry(struct pagetable_move_control * pmc,enum pgt_entry entry,void * old_entry,void * new_entry)583 static bool move_pgt_entry(struct pagetable_move_control *pmc,
584 enum pgt_entry entry, void *old_entry, void *new_entry)
585 {
586 bool moved = false;
587 bool need_rmap_locks = should_take_rmap_locks(pmc, entry);
588
589 /* See comment in move_ptes() */
590 if (need_rmap_locks)
591 take_rmap_locks(pmc->old);
592
593 switch (entry) {
594 case NORMAL_PMD:
595 moved = move_normal_pmd(pmc, old_entry, new_entry);
596 break;
597 case NORMAL_PUD:
598 moved = move_normal_pud(pmc, old_entry, new_entry);
599 break;
600 case HPAGE_PMD:
601 moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
602 move_huge_pmd(pmc->old, pmc->old_addr, pmc->new_addr, old_entry,
603 new_entry);
604 break;
605 case HPAGE_PUD:
606 moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
607 move_huge_pud(pmc, old_entry, new_entry);
608 break;
609
610 default:
611 WARN_ON_ONCE(1);
612 break;
613 }
614
615 if (need_rmap_locks)
616 drop_rmap_locks(pmc->old);
617
618 return moved;
619 }
620
621 /*
622 * A helper to check if aligning down is OK. The aligned address should fall
623 * on *no mapping*. For the stack moving down, that's a special move within
624 * the VMA that is created to span the source and destination of the move,
625 * so we make an exception for it.
626 */
can_align_down(struct pagetable_move_control * pmc,struct vm_area_struct * vma,unsigned long addr_to_align,unsigned long mask)627 static bool can_align_down(struct pagetable_move_control *pmc,
628 struct vm_area_struct *vma, unsigned long addr_to_align,
629 unsigned long mask)
630 {
631 unsigned long addr_masked = addr_to_align & mask;
632
633 /*
634 * If @addr_to_align of either source or destination is not the beginning
635 * of the corresponding VMA, we can't align down or we will destroy part
636 * of the current mapping.
637 */
638 if (!pmc->for_stack && vma->vm_start != addr_to_align)
639 return false;
640
641 /* In the stack case we explicitly permit in-VMA alignment. */
642 if (pmc->for_stack && addr_masked >= vma->vm_start)
643 return true;
644
645 /*
646 * Make sure the realignment doesn't cause the address to fall on an
647 * existing mapping.
648 */
649 return find_vma_intersection(vma->vm_mm, addr_masked, vma->vm_start) == NULL;
650 }
651
652 /*
653 * Determine if are in fact able to realign for efficiency to a higher page
654 * table boundary.
655 */
can_realign_addr(struct pagetable_move_control * pmc,unsigned long pagetable_mask)656 static bool can_realign_addr(struct pagetable_move_control *pmc,
657 unsigned long pagetable_mask)
658 {
659 unsigned long align_mask = ~pagetable_mask;
660 unsigned long old_align = pmc->old_addr & align_mask;
661 unsigned long new_align = pmc->new_addr & align_mask;
662 unsigned long pagetable_size = align_mask + 1;
663 unsigned long old_align_next = pagetable_size - old_align;
664
665 /*
666 * We don't want to have to go hunting for VMAs from the end of the old
667 * VMA to the next page table boundary, also we want to make sure the
668 * operation is wortwhile.
669 *
670 * So ensure that we only perform this realignment if the end of the
671 * range being copied reaches or crosses the page table boundary.
672 *
673 * boundary boundary
674 * .<- old_align -> .
675 * . |----------------.-----------|
676 * . | vma . |
677 * . |----------------.-----------|
678 * . <----------------.----------->
679 * . len_in
680 * <------------------------------->
681 * . pagetable_size .
682 * . <---------------->
683 * . old_align_next .
684 */
685 if (pmc->len_in < old_align_next)
686 return false;
687
688 /* Skip if the addresses are already aligned. */
689 if (old_align == 0)
690 return false;
691
692 /* Only realign if the new and old addresses are mutually aligned. */
693 if (old_align != new_align)
694 return false;
695
696 /* Ensure realignment doesn't cause overlap with existing mappings. */
697 if (!can_align_down(pmc, pmc->old, pmc->old_addr, pagetable_mask) ||
698 !can_align_down(pmc, pmc->new, pmc->new_addr, pagetable_mask))
699 return false;
700
701 return true;
702 }
703
704 /*
705 * Opportunistically realign to specified boundary for faster copy.
706 *
707 * Consider an mremap() of a VMA with page table boundaries as below, and no
708 * preceding VMAs from the lower page table boundary to the start of the VMA,
709 * with the end of the range reaching or crossing the page table boundary.
710 *
711 * boundary boundary
712 * . |----------------.-----------|
713 * . | vma . |
714 * . |----------------.-----------|
715 * . pmc->old_addr . pmc->old_end
716 * . <---------------------------->
717 * . move these page tables
718 *
719 * If we proceed with moving page tables in this scenario, we will have a lot of
720 * work to do traversing old page tables and establishing new ones in the
721 * destination across multiple lower level page tables.
722 *
723 * The idea here is simply to align pmc->old_addr, pmc->new_addr down to the
724 * page table boundary, so we can simply copy a single page table entry for the
725 * aligned portion of the VMA instead:
726 *
727 * boundary boundary
728 * . |----------------.-----------|
729 * . | vma . |
730 * . |----------------.-----------|
731 * pmc->old_addr . pmc->old_end
732 * <------------------------------------------->
733 * . move these page tables
734 */
try_realign_addr(struct pagetable_move_control * pmc,unsigned long pagetable_mask)735 static void try_realign_addr(struct pagetable_move_control *pmc,
736 unsigned long pagetable_mask)
737 {
738
739 if (!can_realign_addr(pmc, pagetable_mask))
740 return;
741
742 /*
743 * Simply align to page table boundaries. Note that we do NOT update the
744 * pmc->old_end value, and since the move_page_tables() operation spans
745 * from [old_addr, old_end) (offsetting new_addr as it is performed),
746 * this simply changes the start of the copy, not the end.
747 */
748 pmc->old_addr &= pagetable_mask;
749 pmc->new_addr &= pagetable_mask;
750 }
751
752 /* Is the page table move operation done? */
pmc_done(struct pagetable_move_control * pmc)753 static bool pmc_done(struct pagetable_move_control *pmc)
754 {
755 return pmc->old_addr >= pmc->old_end;
756 }
757
758 /* Advance to the next page table, offset by extent bytes. */
pmc_next(struct pagetable_move_control * pmc,unsigned long extent)759 static void pmc_next(struct pagetable_move_control *pmc, unsigned long extent)
760 {
761 pmc->old_addr += extent;
762 pmc->new_addr += extent;
763 }
764
765 /*
766 * Determine how many bytes in the specified input range have had their page
767 * tables moved so far.
768 */
pmc_progress(struct pagetable_move_control * pmc)769 static unsigned long pmc_progress(struct pagetable_move_control *pmc)
770 {
771 unsigned long orig_old_addr = pmc->old_end - pmc->len_in;
772 unsigned long old_addr = pmc->old_addr;
773
774 /*
775 * Prevent negative return values when {old,new}_addr was realigned but
776 * we broke out of the loop in move_page_tables() for the first PMD
777 * itself.
778 */
779 return old_addr < orig_old_addr ? 0 : old_addr - orig_old_addr;
780 }
781
move_page_tables(struct pagetable_move_control * pmc)782 unsigned long move_page_tables(struct pagetable_move_control *pmc)
783 {
784 unsigned long extent;
785 struct mmu_notifier_range range;
786 pmd_t *old_pmd, *new_pmd;
787 pud_t *old_pud, *new_pud;
788 struct mm_struct *mm = pmc->old->vm_mm;
789
790 if (!pmc->len_in)
791 return 0;
792
793 if (is_vm_hugetlb_page(pmc->old))
794 return move_hugetlb_page_tables(pmc->old, pmc->new, pmc->old_addr,
795 pmc->new_addr, pmc->len_in);
796
797 /*
798 * If possible, realign addresses to PMD boundary for faster copy.
799 * Only realign if the mremap copying hits a PMD boundary.
800 */
801 try_realign_addr(pmc, PMD_MASK);
802
803 flush_cache_range(pmc->old, pmc->old_addr, pmc->old_end);
804 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, mm,
805 pmc->old_addr, pmc->old_end);
806 mmu_notifier_invalidate_range_start(&range);
807
808 for (; !pmc_done(pmc); pmc_next(pmc, extent)) {
809 cond_resched();
810 /*
811 * If extent is PUD-sized try to speed up the move by moving at the
812 * PUD level if possible.
813 */
814 extent = get_extent(NORMAL_PUD, pmc);
815
816 old_pud = get_old_pud(mm, pmc->old_addr);
817 if (!old_pud)
818 continue;
819 new_pud = alloc_new_pud(mm, pmc->new_addr);
820 if (!new_pud)
821 break;
822 if (pud_trans_huge(*old_pud)) {
823 if (extent == HPAGE_PUD_SIZE) {
824 move_pgt_entry(pmc, HPAGE_PUD, old_pud, new_pud);
825 /* We ignore and continue on error? */
826 continue;
827 }
828 } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) {
829 if (move_pgt_entry(pmc, NORMAL_PUD, old_pud, new_pud))
830 continue;
831 }
832
833 extent = get_extent(NORMAL_PMD, pmc);
834 old_pmd = get_old_pmd(mm, pmc->old_addr);
835 if (!old_pmd)
836 continue;
837 new_pmd = alloc_new_pmd(mm, pmc->new_addr);
838 if (!new_pmd)
839 break;
840 again:
841 if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd)) {
842 if (extent == HPAGE_PMD_SIZE &&
843 move_pgt_entry(pmc, HPAGE_PMD, old_pmd, new_pmd))
844 continue;
845 split_huge_pmd(pmc->old, old_pmd, pmc->old_addr);
846 } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PMD) &&
847 extent == PMD_SIZE) {
848 /*
849 * If the extent is PMD-sized, try to speed the move by
850 * moving at the PMD level if possible.
851 */
852 if (move_pgt_entry(pmc, NORMAL_PMD, old_pmd, new_pmd))
853 continue;
854 }
855 if (pmd_none(*old_pmd))
856 continue;
857 if (pte_alloc(pmc->new->vm_mm, new_pmd))
858 break;
859 if (move_ptes(pmc, extent, old_pmd, new_pmd) < 0)
860 goto again;
861 }
862
863 mmu_notifier_invalidate_range_end(&range);
864
865 return pmc_progress(pmc);
866 }
867
868 /* Set vrm->delta to the difference in VMA size specified by user. */
vrm_set_delta(struct vma_remap_struct * vrm)869 static void vrm_set_delta(struct vma_remap_struct *vrm)
870 {
871 vrm->delta = abs_diff(vrm->old_len, vrm->new_len);
872 }
873
874 /* Determine what kind of remap this is - shrink, expand or no resize at all. */
vrm_remap_type(struct vma_remap_struct * vrm)875 static enum mremap_type vrm_remap_type(struct vma_remap_struct *vrm)
876 {
877 if (vrm->delta == 0)
878 return MREMAP_NO_RESIZE;
879
880 if (vrm->old_len > vrm->new_len)
881 return MREMAP_SHRINK;
882
883 return MREMAP_EXPAND;
884 }
885
886 /*
887 * When moving a VMA to vrm->new_adr, does this result in the new and old VMAs
888 * overlapping?
889 */
vrm_overlaps(struct vma_remap_struct * vrm)890 static bool vrm_overlaps(struct vma_remap_struct *vrm)
891 {
892 unsigned long start_old = vrm->addr;
893 unsigned long start_new = vrm->new_addr;
894 unsigned long end_old = vrm->addr + vrm->old_len;
895 unsigned long end_new = vrm->new_addr + vrm->new_len;
896
897 /*
898 * start_old end_old
899 * |-----------|
900 * | |
901 * |-----------|
902 * |-------------|
903 * | |
904 * |-------------|
905 * start_new end_new
906 */
907 if (end_old > start_new && end_new > start_old)
908 return true;
909
910 return false;
911 }
912
913 /*
914 * Will a new address definitely be assigned? This either if the user specifies
915 * it via MREMAP_FIXED, or if MREMAP_DONTUNMAP is used, indicating we will
916 * always detemrine a target address.
917 */
vrm_implies_new_addr(struct vma_remap_struct * vrm)918 static bool vrm_implies_new_addr(struct vma_remap_struct *vrm)
919 {
920 return vrm->flags & (MREMAP_FIXED | MREMAP_DONTUNMAP);
921 }
922
923 /*
924 * Find an unmapped area for the requested vrm->new_addr.
925 *
926 * If MREMAP_FIXED then this is equivalent to a MAP_FIXED mmap() call. If only
927 * MREMAP_DONTUNMAP is set, then this is equivalent to providing a hint to
928 * mmap(), otherwise this is equivalent to mmap() specifying a NULL address.
929 *
930 * Returns 0 on success (with vrm->new_addr updated), or an error code upon
931 * failure.
932 */
vrm_set_new_addr(struct vma_remap_struct * vrm)933 static unsigned long vrm_set_new_addr(struct vma_remap_struct *vrm)
934 {
935 struct vm_area_struct *vma = vrm->vma;
936 unsigned long map_flags = 0;
937 /* Page Offset _into_ the VMA. */
938 pgoff_t internal_pgoff = (vrm->addr - vma->vm_start) >> PAGE_SHIFT;
939 pgoff_t pgoff = vma->vm_pgoff + internal_pgoff;
940 unsigned long new_addr = vrm_implies_new_addr(vrm) ? vrm->new_addr : 0;
941 unsigned long res;
942
943 if (vrm->flags & MREMAP_FIXED)
944 map_flags |= MAP_FIXED;
945 if (vma->vm_flags & VM_MAYSHARE)
946 map_flags |= MAP_SHARED;
947
948 res = get_unmapped_area(vma->vm_file, new_addr, vrm->new_len, pgoff,
949 map_flags);
950 if (IS_ERR_VALUE(res))
951 return res;
952
953 vrm->new_addr = res;
954 return 0;
955 }
956
957 /*
958 * Keep track of pages which have been added to the memory mapping. If the VMA
959 * is accounted, also check to see if there is sufficient memory.
960 *
961 * Returns true on success, false if insufficient memory to charge.
962 */
vrm_calc_charge(struct vma_remap_struct * vrm)963 static bool vrm_calc_charge(struct vma_remap_struct *vrm)
964 {
965 unsigned long charged;
966
967 if (!(vrm->vma->vm_flags & VM_ACCOUNT))
968 return true;
969
970 /*
971 * If we don't unmap the old mapping, then we account the entirety of
972 * the length of the new one. Otherwise it's just the delta in size.
973 */
974 if (vrm->flags & MREMAP_DONTUNMAP)
975 charged = vrm->new_len >> PAGE_SHIFT;
976 else
977 charged = vrm->delta >> PAGE_SHIFT;
978
979
980 /* This accounts 'charged' pages of memory. */
981 if (security_vm_enough_memory_mm(current->mm, charged))
982 return false;
983
984 vrm->charged = charged;
985 return true;
986 }
987
988 /*
989 * an error has occurred so we will not be using vrm->charged memory. Unaccount
990 * this memory if the VMA is accounted.
991 */
vrm_uncharge(struct vma_remap_struct * vrm)992 static void vrm_uncharge(struct vma_remap_struct *vrm)
993 {
994 if (!(vrm->vma->vm_flags & VM_ACCOUNT))
995 return;
996
997 vm_unacct_memory(vrm->charged);
998 vrm->charged = 0;
999 }
1000
1001 /*
1002 * Update mm exec_vm, stack_vm, data_vm, and locked_vm fields as needed to
1003 * account for 'bytes' memory used, and if locked, indicate this in the VRM so
1004 * we can handle this correctly later.
1005 */
vrm_stat_account(struct vma_remap_struct * vrm,unsigned long bytes)1006 static void vrm_stat_account(struct vma_remap_struct *vrm,
1007 unsigned long bytes)
1008 {
1009 unsigned long pages = bytes >> PAGE_SHIFT;
1010 struct mm_struct *mm = current->mm;
1011 struct vm_area_struct *vma = vrm->vma;
1012
1013 vm_stat_account(mm, vma->vm_flags, pages);
1014 if (vma->vm_flags & VM_LOCKED)
1015 mm->locked_vm += pages;
1016 }
1017
1018 /*
1019 * Perform checks before attempting to write a VMA prior to it being
1020 * moved.
1021 */
prep_move_vma(struct vma_remap_struct * vrm)1022 static unsigned long prep_move_vma(struct vma_remap_struct *vrm)
1023 {
1024 unsigned long err = 0;
1025 struct vm_area_struct *vma = vrm->vma;
1026 unsigned long old_addr = vrm->addr;
1027 unsigned long old_len = vrm->old_len;
1028 vm_flags_t dummy = vma->vm_flags;
1029
1030 /*
1031 * We'd prefer to avoid failure later on in do_munmap:
1032 * which may split one vma into three before unmapping.
1033 */
1034 if (current->mm->map_count >= sysctl_max_map_count - 3)
1035 return -ENOMEM;
1036
1037 if (vma->vm_ops && vma->vm_ops->may_split) {
1038 if (vma->vm_start != old_addr)
1039 err = vma->vm_ops->may_split(vma, old_addr);
1040 if (!err && vma->vm_end != old_addr + old_len)
1041 err = vma->vm_ops->may_split(vma, old_addr + old_len);
1042 if (err)
1043 return err;
1044 }
1045
1046 /*
1047 * Advise KSM to break any KSM pages in the area to be moved:
1048 * it would be confusing if they were to turn up at the new
1049 * location, where they happen to coincide with different KSM
1050 * pages recently unmapped. But leave vma->vm_flags as it was,
1051 * so KSM can come around to merge on vma and new_vma afterwards.
1052 */
1053 err = ksm_madvise(vma, old_addr, old_addr + old_len,
1054 MADV_UNMERGEABLE, &dummy);
1055 if (err)
1056 return err;
1057
1058 return 0;
1059 }
1060
1061 /*
1062 * Unmap source VMA for VMA move, turning it from a copy to a move, being
1063 * careful to ensure we do not underflow memory account while doing so if an
1064 * accountable move.
1065 *
1066 * This is best effort, if we fail to unmap then we simply try to correct
1067 * accounting and exit.
1068 */
unmap_source_vma(struct vma_remap_struct * vrm)1069 static void unmap_source_vma(struct vma_remap_struct *vrm)
1070 {
1071 struct mm_struct *mm = current->mm;
1072 unsigned long addr = vrm->addr;
1073 unsigned long len = vrm->old_len;
1074 struct vm_area_struct *vma = vrm->vma;
1075 VMA_ITERATOR(vmi, mm, addr);
1076 int err;
1077 unsigned long vm_start;
1078 unsigned long vm_end;
1079 /*
1080 * It might seem odd that we check for MREMAP_DONTUNMAP here, given this
1081 * function implies that we unmap the original VMA, which seems
1082 * contradictory.
1083 *
1084 * However, this occurs when this operation was attempted and an error
1085 * arose, in which case we _do_ wish to unmap the _new_ VMA, which means
1086 * we actually _do_ want it be unaccounted.
1087 */
1088 bool accountable_move = (vma->vm_flags & VM_ACCOUNT) &&
1089 !(vrm->flags & MREMAP_DONTUNMAP);
1090
1091 /*
1092 * So we perform a trick here to prevent incorrect accounting. Any merge
1093 * or new VMA allocation performed in copy_vma() does not adjust
1094 * accounting, it is expected that callers handle this.
1095 *
1096 * And indeed we already have, accounting appropriately in the case of
1097 * both in vrm_charge().
1098 *
1099 * However, when we unmap the existing VMA (to effect the move), this
1100 * code will, if the VMA has VM_ACCOUNT set, attempt to unaccount
1101 * removed pages.
1102 *
1103 * To avoid this we temporarily clear this flag, reinstating on any
1104 * portions of the original VMA that remain.
1105 */
1106 if (accountable_move) {
1107 vm_flags_clear(vma, VM_ACCOUNT);
1108 /* We are about to split vma, so store the start/end. */
1109 vm_start = vma->vm_start;
1110 vm_end = vma->vm_end;
1111 }
1112
1113 err = do_vmi_munmap(&vmi, mm, addr, len, vrm->uf_unmap, /* unlock= */false);
1114 vrm->vma = NULL; /* Invalidated. */
1115 vrm->vmi_needs_invalidate = true;
1116 if (err) {
1117 /* OOM: unable to split vma, just get accounts right */
1118 vm_acct_memory(len >> PAGE_SHIFT);
1119 return;
1120 }
1121
1122 /*
1123 * If we mremap() from a VMA like this:
1124 *
1125 * addr end
1126 * | |
1127 * v v
1128 * |-------------|
1129 * | |
1130 * |-------------|
1131 *
1132 * Having cleared VM_ACCOUNT from the whole VMA, after we unmap above
1133 * we'll end up with:
1134 *
1135 * addr end
1136 * | |
1137 * v v
1138 * |---| |---|
1139 * | A | | B |
1140 * |---| |---|
1141 *
1142 * The VMI is still pointing at addr, so vma_prev() will give us A, and
1143 * a subsequent or lone vma_next() will give as B.
1144 *
1145 * do_vmi_munmap() will have restored the VMI back to addr.
1146 */
1147 if (accountable_move) {
1148 unsigned long end = addr + len;
1149
1150 if (vm_start < addr) {
1151 struct vm_area_struct *prev = vma_prev(&vmi);
1152
1153 vm_flags_set(prev, VM_ACCOUNT); /* Acquires VMA lock. */
1154 }
1155
1156 if (vm_end > end) {
1157 struct vm_area_struct *next = vma_next(&vmi);
1158
1159 vm_flags_set(next, VM_ACCOUNT); /* Acquires VMA lock. */
1160 }
1161 }
1162 }
1163
1164 /*
1165 * Copy vrm->vma over to vrm->new_addr possibly adjusting size as part of the
1166 * process. Additionally handle an error occurring on moving of page tables,
1167 * where we reset vrm state to cause unmapping of the new VMA.
1168 *
1169 * Outputs the newly installed VMA to new_vma_ptr. Returns 0 on success or an
1170 * error code.
1171 */
copy_vma_and_data(struct vma_remap_struct * vrm,struct vm_area_struct ** new_vma_ptr)1172 static int copy_vma_and_data(struct vma_remap_struct *vrm,
1173 struct vm_area_struct **new_vma_ptr)
1174 {
1175 unsigned long internal_offset = vrm->addr - vrm->vma->vm_start;
1176 unsigned long internal_pgoff = internal_offset >> PAGE_SHIFT;
1177 unsigned long new_pgoff = vrm->vma->vm_pgoff + internal_pgoff;
1178 unsigned long moved_len;
1179 struct vm_area_struct *vma = vrm->vma;
1180 struct vm_area_struct *new_vma;
1181 int err = 0;
1182 PAGETABLE_MOVE(pmc, NULL, NULL, vrm->addr, vrm->new_addr, vrm->old_len);
1183
1184 new_vma = copy_vma(&vma, vrm->new_addr, vrm->new_len, new_pgoff,
1185 &pmc.need_rmap_locks);
1186 if (!new_vma) {
1187 vrm_uncharge(vrm);
1188 *new_vma_ptr = NULL;
1189 return -ENOMEM;
1190 }
1191 /* By merging, we may have invalidated any iterator in use. */
1192 if (vma != vrm->vma)
1193 vrm->vmi_needs_invalidate = true;
1194
1195 vrm->vma = vma;
1196 pmc.old = vma;
1197 pmc.new = new_vma;
1198
1199 moved_len = move_page_tables(&pmc);
1200 if (moved_len < vrm->old_len)
1201 err = -ENOMEM;
1202 else if (vma->vm_ops && vma->vm_ops->mremap)
1203 err = vma->vm_ops->mremap(new_vma);
1204
1205 if (unlikely(err)) {
1206 PAGETABLE_MOVE(pmc_revert, new_vma, vma, vrm->new_addr,
1207 vrm->addr, moved_len);
1208
1209 /*
1210 * On error, move entries back from new area to old,
1211 * which will succeed since page tables still there,
1212 * and then proceed to unmap new area instead of old.
1213 */
1214 pmc_revert.need_rmap_locks = true;
1215 move_page_tables(&pmc_revert);
1216
1217 vrm->vma = new_vma;
1218 vrm->old_len = vrm->new_len;
1219 vrm->addr = vrm->new_addr;
1220 } else {
1221 mremap_userfaultfd_prep(new_vma, vrm->uf);
1222 }
1223
1224 fixup_hugetlb_reservations(vma);
1225
1226 *new_vma_ptr = new_vma;
1227 return err;
1228 }
1229
1230 /*
1231 * Perform final tasks for MADV_DONTUNMAP operation, clearing mlock() and
1232 * account flags on remaining VMA by convention (it cannot be mlock()'d any
1233 * longer, as pages in range are no longer mapped), and removing anon_vma_chain
1234 * links from it (if the entire VMA was copied over).
1235 */
dontunmap_complete(struct vma_remap_struct * vrm,struct vm_area_struct * new_vma)1236 static void dontunmap_complete(struct vma_remap_struct *vrm,
1237 struct vm_area_struct *new_vma)
1238 {
1239 unsigned long start = vrm->addr;
1240 unsigned long end = vrm->addr + vrm->old_len;
1241 unsigned long old_start = vrm->vma->vm_start;
1242 unsigned long old_end = vrm->vma->vm_end;
1243
1244 /*
1245 * We always clear VM_LOCKED[ONFAULT] | VM_ACCOUNT on the old
1246 * vma.
1247 */
1248 vm_flags_clear(vrm->vma, VM_LOCKED_MASK | VM_ACCOUNT);
1249
1250 /*
1251 * anon_vma links of the old vma is no longer needed after its page
1252 * table has been moved.
1253 */
1254 if (new_vma != vrm->vma && start == old_start && end == old_end)
1255 unlink_anon_vmas(vrm->vma);
1256
1257 /* Because we won't unmap we don't need to touch locked_vm. */
1258 }
1259
move_vma(struct vma_remap_struct * vrm)1260 static unsigned long move_vma(struct vma_remap_struct *vrm)
1261 {
1262 struct mm_struct *mm = current->mm;
1263 struct vm_area_struct *new_vma;
1264 unsigned long hiwater_vm;
1265 int err;
1266
1267 err = prep_move_vma(vrm);
1268 if (err)
1269 return err;
1270
1271 /*
1272 * If accounted, determine the number of bytes the operation will
1273 * charge.
1274 */
1275 if (!vrm_calc_charge(vrm))
1276 return -ENOMEM;
1277
1278 /* We don't want racing faults. */
1279 vma_start_write(vrm->vma);
1280
1281 /* Perform copy step. */
1282 err = copy_vma_and_data(vrm, &new_vma);
1283 /*
1284 * If we established the copied-to VMA, we attempt to recover from the
1285 * error by setting the destination VMA to the source VMA and unmapping
1286 * it below.
1287 */
1288 if (err && !new_vma)
1289 return err;
1290
1291 /*
1292 * If we failed to move page tables we still do total_vm increment
1293 * since do_munmap() will decrement it by old_len == new_len.
1294 *
1295 * Since total_vm is about to be raised artificially high for a
1296 * moment, we need to restore high watermark afterwards: if stats
1297 * are taken meanwhile, total_vm and hiwater_vm appear too high.
1298 * If this were a serious issue, we'd add a flag to do_munmap().
1299 */
1300 hiwater_vm = mm->hiwater_vm;
1301
1302 vrm_stat_account(vrm, vrm->new_len);
1303 if (unlikely(!err && (vrm->flags & MREMAP_DONTUNMAP)))
1304 dontunmap_complete(vrm, new_vma);
1305 else
1306 unmap_source_vma(vrm);
1307
1308 mm->hiwater_vm = hiwater_vm;
1309
1310 return err ? (unsigned long)err : vrm->new_addr;
1311 }
1312
1313 /*
1314 * The user has requested that the VMA be shrunk (i.e., old_len > new_len), so
1315 * execute this, optionally dropping the mmap lock when we do so.
1316 *
1317 * In both cases this invalidates the VMA, however if we don't drop the lock,
1318 * then load the correct VMA into vrm->vma afterwards.
1319 */
shrink_vma(struct vma_remap_struct * vrm,bool drop_lock)1320 static unsigned long shrink_vma(struct vma_remap_struct *vrm,
1321 bool drop_lock)
1322 {
1323 struct mm_struct *mm = current->mm;
1324 unsigned long unmap_start = vrm->addr + vrm->new_len;
1325 unsigned long unmap_bytes = vrm->delta;
1326 unsigned long res;
1327 VMA_ITERATOR(vmi, mm, unmap_start);
1328
1329 VM_BUG_ON(vrm->remap_type != MREMAP_SHRINK);
1330
1331 res = do_vmi_munmap(&vmi, mm, unmap_start, unmap_bytes,
1332 vrm->uf_unmap, drop_lock);
1333 vrm->vma = NULL; /* Invalidated. */
1334 if (res)
1335 return res;
1336
1337 /*
1338 * If we've not dropped the lock, then we should reload the VMA to
1339 * replace the invalidated VMA with the one that may have now been
1340 * split.
1341 */
1342 if (drop_lock) {
1343 vrm->mmap_locked = false;
1344 } else {
1345 vrm->vma = vma_lookup(mm, vrm->addr);
1346 if (!vrm->vma)
1347 return -EFAULT;
1348 }
1349
1350 return 0;
1351 }
1352
1353 /*
1354 * mremap_to() - remap a vma to a new location.
1355 * Returns: The new address of the vma or an error.
1356 */
mremap_to(struct vma_remap_struct * vrm)1357 static unsigned long mremap_to(struct vma_remap_struct *vrm)
1358 {
1359 struct mm_struct *mm = current->mm;
1360 unsigned long err;
1361
1362 if (vrm->flags & MREMAP_FIXED) {
1363 /*
1364 * In mremap_to().
1365 * VMA is moved to dst address, and munmap dst first.
1366 * do_munmap will check if dst is sealed.
1367 */
1368 err = do_munmap(mm, vrm->new_addr, vrm->new_len,
1369 vrm->uf_unmap_early);
1370 vrm->vma = NULL; /* Invalidated. */
1371 vrm->vmi_needs_invalidate = true;
1372 if (err)
1373 return err;
1374
1375 /*
1376 * If we remap a portion of a VMA elsewhere in the same VMA,
1377 * this can invalidate the old VMA. Reset.
1378 */
1379 vrm->vma = vma_lookup(mm, vrm->addr);
1380 if (!vrm->vma)
1381 return -EFAULT;
1382 }
1383
1384 if (vrm->remap_type == MREMAP_SHRINK) {
1385 err = shrink_vma(vrm, /* drop_lock= */false);
1386 if (err)
1387 return err;
1388
1389 /* Set up for the move now shrink has been executed. */
1390 vrm->old_len = vrm->new_len;
1391 }
1392
1393 /* MREMAP_DONTUNMAP expands by old_len since old_len == new_len */
1394 if (vrm->flags & MREMAP_DONTUNMAP) {
1395 vm_flags_t vm_flags = vrm->vma->vm_flags;
1396 unsigned long pages = vrm->old_len >> PAGE_SHIFT;
1397
1398 if (!may_expand_vm(mm, vm_flags, pages))
1399 return -ENOMEM;
1400 }
1401
1402 err = vrm_set_new_addr(vrm);
1403 if (err)
1404 return err;
1405
1406 return move_vma(vrm);
1407 }
1408
vma_expandable(struct vm_area_struct * vma,unsigned long delta)1409 static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
1410 {
1411 unsigned long end = vma->vm_end + delta;
1412
1413 if (end < vma->vm_end) /* overflow */
1414 return 0;
1415 if (find_vma_intersection(vma->vm_mm, vma->vm_end, end))
1416 return 0;
1417 if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
1418 0, MAP_FIXED) & ~PAGE_MASK)
1419 return 0;
1420 return 1;
1421 }
1422
1423 /* Determine whether we are actually able to execute an in-place expansion. */
vrm_can_expand_in_place(struct vma_remap_struct * vrm)1424 static bool vrm_can_expand_in_place(struct vma_remap_struct *vrm)
1425 {
1426 /* Number of bytes from vrm->addr to end of VMA. */
1427 unsigned long suffix_bytes = vrm->vma->vm_end - vrm->addr;
1428
1429 /* If end of range aligns to end of VMA, we can just expand in-place. */
1430 if (suffix_bytes != vrm->old_len)
1431 return false;
1432
1433 /* Check whether this is feasible. */
1434 if (!vma_expandable(vrm->vma, vrm->delta))
1435 return false;
1436
1437 return true;
1438 }
1439
1440 /*
1441 * We know we can expand the VMA in-place by delta pages, so do so.
1442 *
1443 * If we discover the VMA is locked, update mm_struct statistics accordingly and
1444 * indicate so to the caller.
1445 */
expand_vma_in_place(struct vma_remap_struct * vrm)1446 static unsigned long expand_vma_in_place(struct vma_remap_struct *vrm)
1447 {
1448 struct mm_struct *mm = current->mm;
1449 struct vm_area_struct *vma = vrm->vma;
1450 VMA_ITERATOR(vmi, mm, vma->vm_end);
1451
1452 if (!vrm_calc_charge(vrm))
1453 return -ENOMEM;
1454
1455 /*
1456 * Function vma_merge_extend() is called on the
1457 * extension we are adding to the already existing vma,
1458 * vma_merge_extend() will merge this extension with the
1459 * already existing vma (expand operation itself) and
1460 * possibly also with the next vma if it becomes
1461 * adjacent to the expanded vma and otherwise
1462 * compatible.
1463 */
1464 vma = vma_merge_extend(&vmi, vma, vrm->delta);
1465 if (!vma) {
1466 vrm_uncharge(vrm);
1467 return -ENOMEM;
1468 }
1469 vrm->vma = vma;
1470
1471 vrm_stat_account(vrm, vrm->delta);
1472
1473 return 0;
1474 }
1475
align_hugetlb(struct vma_remap_struct * vrm)1476 static bool align_hugetlb(struct vma_remap_struct *vrm)
1477 {
1478 struct hstate *h __maybe_unused = hstate_vma(vrm->vma);
1479
1480 vrm->old_len = ALIGN(vrm->old_len, huge_page_size(h));
1481 vrm->new_len = ALIGN(vrm->new_len, huge_page_size(h));
1482
1483 /* addrs must be huge page aligned */
1484 if (vrm->addr & ~huge_page_mask(h))
1485 return false;
1486 if (vrm->new_addr & ~huge_page_mask(h))
1487 return false;
1488
1489 /*
1490 * Don't allow remap expansion, because the underlying hugetlb
1491 * reservation is not yet capable to handle split reservation.
1492 */
1493 if (vrm->new_len > vrm->old_len)
1494 return false;
1495
1496 return true;
1497 }
1498
1499 /*
1500 * We are mremap()'ing without specifying a fixed address to move to, but are
1501 * requesting that the VMA's size be increased.
1502 *
1503 * Try to do so in-place, if this fails, then move the VMA to a new location to
1504 * action the change.
1505 */
expand_vma(struct vma_remap_struct * vrm)1506 static unsigned long expand_vma(struct vma_remap_struct *vrm)
1507 {
1508 unsigned long err;
1509
1510 /*
1511 * [addr, old_len) spans precisely to the end of the VMA, so try to
1512 * expand it in-place.
1513 */
1514 if (vrm_can_expand_in_place(vrm)) {
1515 err = expand_vma_in_place(vrm);
1516 if (err)
1517 return err;
1518
1519 /* OK we're done! */
1520 return vrm->addr;
1521 }
1522
1523 /*
1524 * We weren't able to just expand or shrink the area,
1525 * we need to create a new one and move it.
1526 */
1527
1528 /* We're not allowed to move the VMA, so error out. */
1529 if (!(vrm->flags & MREMAP_MAYMOVE))
1530 return -ENOMEM;
1531
1532 /* Find a new location to move the VMA to. */
1533 err = vrm_set_new_addr(vrm);
1534 if (err)
1535 return err;
1536
1537 return move_vma(vrm);
1538 }
1539
1540 /*
1541 * Attempt to resize the VMA in-place, if we cannot, then move the VMA to the
1542 * first available address to perform the operation.
1543 */
mremap_at(struct vma_remap_struct * vrm)1544 static unsigned long mremap_at(struct vma_remap_struct *vrm)
1545 {
1546 unsigned long res;
1547
1548 switch (vrm->remap_type) {
1549 case MREMAP_INVALID:
1550 break;
1551 case MREMAP_NO_RESIZE:
1552 /* NO-OP CASE - resizing to the same size. */
1553 return vrm->addr;
1554 case MREMAP_SHRINK:
1555 /*
1556 * SHRINK CASE. Can always be done in-place.
1557 *
1558 * Simply unmap the shrunken portion of the VMA. This does all
1559 * the needed commit accounting, and we indicate that the mmap
1560 * lock should be dropped.
1561 */
1562 res = shrink_vma(vrm, /* drop_lock= */true);
1563 if (res)
1564 return res;
1565
1566 return vrm->addr;
1567 case MREMAP_EXPAND:
1568 return expand_vma(vrm);
1569 }
1570
1571 /* Should not be possible. */
1572 WARN_ON_ONCE(1);
1573 return -EINVAL;
1574 }
1575
1576 /*
1577 * Will this operation result in the VMA being expanded or moved and thus need
1578 * to map a new portion of virtual address space?
1579 */
vrm_will_map_new(struct vma_remap_struct * vrm)1580 static bool vrm_will_map_new(struct vma_remap_struct *vrm)
1581 {
1582 if (vrm->remap_type == MREMAP_EXPAND)
1583 return true;
1584
1585 if (vrm_implies_new_addr(vrm))
1586 return true;
1587
1588 return false;
1589 }
1590
1591 /* Does this remap ONLY move mappings? */
vrm_move_only(struct vma_remap_struct * vrm)1592 static bool vrm_move_only(struct vma_remap_struct *vrm)
1593 {
1594 if (!(vrm->flags & MREMAP_FIXED))
1595 return false;
1596
1597 if (vrm->old_len != vrm->new_len)
1598 return false;
1599
1600 return true;
1601 }
1602
notify_uffd(struct vma_remap_struct * vrm,bool failed)1603 static void notify_uffd(struct vma_remap_struct *vrm, bool failed)
1604 {
1605 struct mm_struct *mm = current->mm;
1606
1607 /* Regardless of success/failure, we always notify of any unmaps. */
1608 userfaultfd_unmap_complete(mm, vrm->uf_unmap_early);
1609 if (failed)
1610 mremap_userfaultfd_fail(vrm->uf);
1611 else
1612 mremap_userfaultfd_complete(vrm->uf, vrm->addr,
1613 vrm->new_addr, vrm->old_len);
1614 userfaultfd_unmap_complete(mm, vrm->uf_unmap);
1615 }
1616
vma_multi_allowed(struct vm_area_struct * vma)1617 static bool vma_multi_allowed(struct vm_area_struct *vma)
1618 {
1619 struct file *file;
1620
1621 /*
1622 * We can't support moving multiple uffd VMAs as notify requires
1623 * mmap lock to be dropped.
1624 */
1625 if (userfaultfd_armed(vma))
1626 return false;
1627
1628 /*
1629 * Custom get unmapped area might result in MREMAP_FIXED not
1630 * being obeyed.
1631 */
1632 file = vma->vm_file;
1633 if (file && !vma_is_shmem(vma) && !is_vm_hugetlb_page(vma)) {
1634 const struct file_operations *fop = file->f_op;
1635
1636 if (fop->get_unmapped_area)
1637 return false;
1638 }
1639
1640 return true;
1641 }
1642
check_prep_vma(struct vma_remap_struct * vrm)1643 static int check_prep_vma(struct vma_remap_struct *vrm)
1644 {
1645 struct vm_area_struct *vma = vrm->vma;
1646 struct mm_struct *mm = current->mm;
1647 unsigned long addr = vrm->addr;
1648 unsigned long old_len, new_len, pgoff;
1649
1650 if (!vma)
1651 return -EFAULT;
1652
1653 /* If mseal()'d, mremap() is prohibited. */
1654 if (!can_modify_vma(vma))
1655 return -EPERM;
1656
1657 /* Align to hugetlb page size, if required. */
1658 if (is_vm_hugetlb_page(vma) && !align_hugetlb(vrm))
1659 return -EINVAL;
1660
1661 vrm_set_delta(vrm);
1662 vrm->remap_type = vrm_remap_type(vrm);
1663 /* For convenience, we set new_addr even if VMA won't move. */
1664 if (!vrm_implies_new_addr(vrm))
1665 vrm->new_addr = addr;
1666
1667 /* Below only meaningful if we expand or move a VMA. */
1668 if (!vrm_will_map_new(vrm))
1669 return 0;
1670
1671 old_len = vrm->old_len;
1672 new_len = vrm->new_len;
1673
1674 /*
1675 * !old_len is a special case where an attempt is made to 'duplicate'
1676 * a mapping. This makes no sense for private mappings as it will
1677 * instead create a fresh/new mapping unrelated to the original. This
1678 * is contrary to the basic idea of mremap which creates new mappings
1679 * based on the original. There are no known use cases for this
1680 * behavior. As a result, fail such attempts.
1681 */
1682 if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) {
1683 pr_warn_once("%s (%d): attempted to duplicate a private mapping with mremap. This is not supported.\n",
1684 current->comm, current->pid);
1685 return -EINVAL;
1686 }
1687
1688 if ((vrm->flags & MREMAP_DONTUNMAP) &&
1689 (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)))
1690 return -EINVAL;
1691
1692 /*
1693 * We permit crossing of boundaries for the range being unmapped due to
1694 * a shrink.
1695 */
1696 if (vrm->remap_type == MREMAP_SHRINK)
1697 old_len = new_len;
1698
1699 /*
1700 * We can't remap across the end of VMAs, as another VMA may be
1701 * adjacent:
1702 *
1703 * addr vma->vm_end
1704 * |-----.----------|
1705 * | . |
1706 * |-----.----------|
1707 * .<--------->xxx>
1708 * old_len
1709 *
1710 * We also require that vma->vm_start <= addr < vma->vm_end.
1711 */
1712 if (old_len > vma->vm_end - addr)
1713 return -EFAULT;
1714
1715 if (new_len == old_len)
1716 return 0;
1717
1718 /* We are expanding and the VMA is mlock()'d so we need to populate. */
1719 if (vma->vm_flags & VM_LOCKED)
1720 vrm->populate_expand = true;
1721
1722 /* Need to be careful about a growing mapping */
1723 pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
1724 pgoff += vma->vm_pgoff;
1725 if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
1726 return -EINVAL;
1727
1728 if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
1729 return -EFAULT;
1730
1731 if (!mlock_future_ok(mm, vma->vm_flags, vrm->delta))
1732 return -EAGAIN;
1733
1734 if (!may_expand_vm(mm, vma->vm_flags, vrm->delta >> PAGE_SHIFT))
1735 return -ENOMEM;
1736
1737 return 0;
1738 }
1739
1740 /*
1741 * Are the parameters passed to mremap() valid? If so return 0, otherwise return
1742 * error.
1743 */
check_mremap_params(struct vma_remap_struct * vrm)1744 static unsigned long check_mremap_params(struct vma_remap_struct *vrm)
1745
1746 {
1747 unsigned long addr = vrm->addr;
1748 unsigned long flags = vrm->flags;
1749
1750 /* Ensure no unexpected flag values. */
1751 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP))
1752 return -EINVAL;
1753
1754 /* Start address must be page-aligned. */
1755 if (offset_in_page(addr))
1756 return -EINVAL;
1757
1758 /*
1759 * We allow a zero old-len as a special case
1760 * for DOS-emu "duplicate shm area" thing. But
1761 * a zero new-len is nonsensical.
1762 */
1763 if (!vrm->new_len)
1764 return -EINVAL;
1765
1766 /* Is the new length or address silly? */
1767 if (vrm->new_len > TASK_SIZE ||
1768 vrm->new_addr > TASK_SIZE - vrm->new_len)
1769 return -EINVAL;
1770
1771 /* Remainder of checks are for cases with specific new_addr. */
1772 if (!vrm_implies_new_addr(vrm))
1773 return 0;
1774
1775 /* The new address must be page-aligned. */
1776 if (offset_in_page(vrm->new_addr))
1777 return -EINVAL;
1778
1779 /* A fixed address implies a move. */
1780 if (!(flags & MREMAP_MAYMOVE))
1781 return -EINVAL;
1782
1783 /* MREMAP_DONTUNMAP does not allow resizing in the process. */
1784 if (flags & MREMAP_DONTUNMAP && vrm->old_len != vrm->new_len)
1785 return -EINVAL;
1786
1787 /* Target VMA must not overlap source VMA. */
1788 if (vrm_overlaps(vrm))
1789 return -EINVAL;
1790
1791 /*
1792 * move_vma() need us to stay 4 maps below the threshold, otherwise
1793 * it will bail out at the very beginning.
1794 * That is a problem if we have already unmaped the regions here
1795 * (new_addr, and old_addr), because userspace will not know the
1796 * state of the vma's after it gets -ENOMEM.
1797 * So, to avoid such scenario we can pre-compute if the whole
1798 * operation has high chances to success map-wise.
1799 * Worst-scenario case is when both vma's (new_addr and old_addr) get
1800 * split in 3 before unmapping it.
1801 * That means 2 more maps (1 for each) to the ones we already hold.
1802 * Check whether current map count plus 2 still leads us to 4 maps below
1803 * the threshold, otherwise return -ENOMEM here to be more safe.
1804 */
1805 if ((current->mm->map_count + 2) >= sysctl_max_map_count - 3)
1806 return -ENOMEM;
1807
1808 return 0;
1809 }
1810
remap_move(struct vma_remap_struct * vrm)1811 static unsigned long remap_move(struct vma_remap_struct *vrm)
1812 {
1813 struct vm_area_struct *vma;
1814 unsigned long start = vrm->addr;
1815 unsigned long end = vrm->addr + vrm->old_len;
1816 unsigned long new_addr = vrm->new_addr;
1817 bool allowed = true, seen_vma = false;
1818 unsigned long target_addr = new_addr;
1819 unsigned long res = -EFAULT;
1820 unsigned long last_end;
1821 VMA_ITERATOR(vmi, current->mm, start);
1822
1823 /*
1824 * When moving VMAs we allow for batched moves across multiple VMAs,
1825 * with all VMAs in the input range [addr, addr + old_len) being moved
1826 * (and split as necessary).
1827 */
1828 for_each_vma_range(vmi, vma, end) {
1829 /* Account for start, end not aligned with VMA start, end. */
1830 unsigned long addr = max(vma->vm_start, start);
1831 unsigned long len = min(end, vma->vm_end) - addr;
1832 unsigned long offset, res_vma;
1833
1834 if (!allowed)
1835 return -EFAULT;
1836
1837 /* No gap permitted at the start of the range. */
1838 if (!seen_vma && start < vma->vm_start)
1839 return -EFAULT;
1840
1841 /*
1842 * To sensibly move multiple VMAs, accounting for the fact that
1843 * get_unmapped_area() may align even MAP_FIXED moves, we simply
1844 * attempt to move such that the gaps between source VMAs remain
1845 * consistent in destination VMAs, e.g.:
1846 *
1847 * X Y X Y
1848 * <---> <-> <---> <->
1849 * |-------| |-----| |-----| |-------| |-----| |-----|
1850 * | A | | B | | C | ---> | A' | | B' | | C' |
1851 * |-------| |-----| |-----| |-------| |-----| |-----|
1852 * new_addr
1853 *
1854 * So we map B' at A'->vm_end + X, and C' at B'->vm_end + Y.
1855 */
1856 offset = seen_vma ? vma->vm_start - last_end : 0;
1857 last_end = vma->vm_end;
1858
1859 vrm->vma = vma;
1860 vrm->addr = addr;
1861 vrm->new_addr = target_addr + offset;
1862 vrm->old_len = vrm->new_len = len;
1863
1864 allowed = vma_multi_allowed(vma);
1865 if (seen_vma && !allowed)
1866 return -EFAULT;
1867
1868 res_vma = check_prep_vma(vrm);
1869 if (!res_vma)
1870 res_vma = mremap_to(vrm);
1871 if (IS_ERR_VALUE(res_vma))
1872 return res_vma;
1873
1874 if (!seen_vma) {
1875 VM_WARN_ON_ONCE(allowed && res_vma != new_addr);
1876 res = res_vma;
1877 }
1878
1879 /* mmap lock is only dropped on shrink. */
1880 VM_WARN_ON_ONCE(!vrm->mmap_locked);
1881 /* This is a move, no expand should occur. */
1882 VM_WARN_ON_ONCE(vrm->populate_expand);
1883
1884 if (vrm->vmi_needs_invalidate) {
1885 vma_iter_invalidate(&vmi);
1886 vrm->vmi_needs_invalidate = false;
1887 }
1888 seen_vma = true;
1889 target_addr = res_vma + vrm->new_len;
1890 }
1891
1892 return res;
1893 }
1894
do_mremap(struct vma_remap_struct * vrm)1895 static unsigned long do_mremap(struct vma_remap_struct *vrm)
1896 {
1897 struct mm_struct *mm = current->mm;
1898 unsigned long res;
1899 bool failed;
1900
1901 vrm->old_len = PAGE_ALIGN(vrm->old_len);
1902 vrm->new_len = PAGE_ALIGN(vrm->new_len);
1903
1904 res = check_mremap_params(vrm);
1905 if (res)
1906 return res;
1907
1908 if (mmap_write_lock_killable(mm))
1909 return -EINTR;
1910 vrm->mmap_locked = true;
1911
1912 if (vrm_move_only(vrm)) {
1913 res = remap_move(vrm);
1914 } else {
1915 vrm->vma = vma_lookup(current->mm, vrm->addr);
1916 res = check_prep_vma(vrm);
1917 if (res)
1918 goto out;
1919
1920 /* Actually execute mremap. */
1921 res = vrm_implies_new_addr(vrm) ? mremap_to(vrm) : mremap_at(vrm);
1922 }
1923
1924 out:
1925 failed = IS_ERR_VALUE(res);
1926
1927 if (vrm->mmap_locked)
1928 mmap_write_unlock(mm);
1929
1930 /* VMA mlock'd + was expanded, so populated expanded region. */
1931 if (!failed && vrm->populate_expand)
1932 mm_populate(vrm->new_addr + vrm->old_len, vrm->delta);
1933
1934 notify_uffd(vrm, failed);
1935 return res;
1936 }
1937
1938 /*
1939 * Expand (or shrink) an existing mapping, potentially moving it at the
1940 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
1941 *
1942 * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
1943 * This option implies MREMAP_MAYMOVE.
1944 */
SYSCALL_DEFINE5(mremap,unsigned long,addr,unsigned long,old_len,unsigned long,new_len,unsigned long,flags,unsigned long,new_addr)1945 SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
1946 unsigned long, new_len, unsigned long, flags,
1947 unsigned long, new_addr)
1948 {
1949 struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
1950 LIST_HEAD(uf_unmap_early);
1951 LIST_HEAD(uf_unmap);
1952 /*
1953 * There is a deliberate asymmetry here: we strip the pointer tag
1954 * from the old address but leave the new address alone. This is
1955 * for consistency with mmap(), where we prevent the creation of
1956 * aliasing mappings in userspace by leaving the tag bits of the
1957 * mapping address intact. A non-zero tag will cause the subsequent
1958 * range checks to reject the address as invalid.
1959 *
1960 * See Documentation/arch/arm64/tagged-address-abi.rst for more
1961 * information.
1962 */
1963 struct vma_remap_struct vrm = {
1964 .addr = untagged_addr(addr),
1965 .old_len = old_len,
1966 .new_len = new_len,
1967 .flags = flags,
1968 .new_addr = new_addr,
1969
1970 .uf = &uf,
1971 .uf_unmap_early = &uf_unmap_early,
1972 .uf_unmap = &uf_unmap,
1973
1974 .remap_type = MREMAP_INVALID, /* We set later. */
1975 };
1976
1977 return do_mremap(&vrm);
1978 }
1979