xref: /linux/mm/mremap.c (revision 22c55fb9eb92395d999b8404d73e58540d11bdd8)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *	mm/mremap.c
4  *
5  *	(C) Copyright 1996 Linus Torvalds
6  *
7  *	Address space accounting code	<alan@lxorguk.ukuu.org.uk>
8  *	(C) Copyright 2002 Red Hat Inc, All Rights Reserved
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/mm_inline.h>
13 #include <linux/hugetlb.h>
14 #include <linux/shm.h>
15 #include <linux/ksm.h>
16 #include <linux/mman.h>
17 #include <linux/swap.h>
18 #include <linux/capability.h>
19 #include <linux/fs.h>
20 #include <linux/swapops.h>
21 #include <linux/highmem.h>
22 #include <linux/security.h>
23 #include <linux/syscalls.h>
24 #include <linux/mmu_notifier.h>
25 #include <linux/uaccess.h>
26 #include <linux/userfaultfd_k.h>
27 #include <linux/mempolicy.h>
28 
29 #include <asm/cacheflush.h>
30 #include <asm/tlb.h>
31 #include <asm/pgalloc.h>
32 
33 #include "internal.h"
34 
35 /* Classify the kind of remap operation being performed. */
36 enum mremap_type {
37 	MREMAP_INVALID,		/* Initial state. */
38 	MREMAP_NO_RESIZE,	/* old_len == new_len, if not moved, do nothing. */
39 	MREMAP_SHRINK,		/* old_len > new_len. */
40 	MREMAP_EXPAND,		/* old_len < new_len. */
41 };
42 
43 /*
44  * Describes a VMA mremap() operation and is threaded throughout it.
45  *
46  * Any of the fields may be mutated by the operation, however these values will
47  * always accurately reflect the remap (for instance, we may adjust lengths and
48  * delta to account for hugetlb alignment).
49  */
50 struct vma_remap_struct {
51 	/* User-provided state. */
52 	unsigned long addr;	/* User-specified address from which we remap. */
53 	unsigned long old_len;	/* Length of range being remapped. */
54 	unsigned long new_len;	/* Desired new length of mapping. */
55 	const unsigned long flags; /* user-specified MREMAP_* flags. */
56 	unsigned long new_addr;	/* Optionally, desired new address. */
57 
58 	/* uffd state. */
59 	struct vm_userfaultfd_ctx *uf;
60 	struct list_head *uf_unmap_early;
61 	struct list_head *uf_unmap;
62 
63 	/* VMA state, determined in do_mremap(). */
64 	struct vm_area_struct *vma;
65 
66 	/* Internal state, determined in do_mremap(). */
67 	unsigned long delta;		/* Absolute delta of old_len,new_len. */
68 	bool populate_expand;		/* mlock()'d expanded, must populate. */
69 	enum mremap_type remap_type;	/* expand, shrink, etc. */
70 	bool mmap_locked;		/* Is mm currently write-locked? */
71 	unsigned long charged;		/* If VM_ACCOUNT, # pages to account. */
72 	bool vmi_needs_invalidate;	/* Is the VMA iterator invalidated? */
73 };
74 
75 static pud_t *get_old_pud(struct mm_struct *mm, unsigned long addr)
76 {
77 	pgd_t *pgd;
78 	p4d_t *p4d;
79 	pud_t *pud;
80 
81 	pgd = pgd_offset(mm, addr);
82 	if (pgd_none_or_clear_bad(pgd))
83 		return NULL;
84 
85 	p4d = p4d_offset(pgd, addr);
86 	if (p4d_none_or_clear_bad(p4d))
87 		return NULL;
88 
89 	pud = pud_offset(p4d, addr);
90 	if (pud_none_or_clear_bad(pud))
91 		return NULL;
92 
93 	return pud;
94 }
95 
96 static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
97 {
98 	pud_t *pud;
99 	pmd_t *pmd;
100 
101 	pud = get_old_pud(mm, addr);
102 	if (!pud)
103 		return NULL;
104 
105 	pmd = pmd_offset(pud, addr);
106 	if (pmd_none(*pmd))
107 		return NULL;
108 
109 	return pmd;
110 }
111 
112 static pud_t *alloc_new_pud(struct mm_struct *mm, unsigned long addr)
113 {
114 	pgd_t *pgd;
115 	p4d_t *p4d;
116 
117 	pgd = pgd_offset(mm, addr);
118 	p4d = p4d_alloc(mm, pgd, addr);
119 	if (!p4d)
120 		return NULL;
121 
122 	return pud_alloc(mm, p4d, addr);
123 }
124 
125 static pmd_t *alloc_new_pmd(struct mm_struct *mm, unsigned long addr)
126 {
127 	pud_t *pud;
128 	pmd_t *pmd;
129 
130 	pud = alloc_new_pud(mm, addr);
131 	if (!pud)
132 		return NULL;
133 
134 	pmd = pmd_alloc(mm, pud, addr);
135 	if (!pmd)
136 		return NULL;
137 
138 	VM_BUG_ON(pmd_trans_huge(*pmd));
139 
140 	return pmd;
141 }
142 
143 static void take_rmap_locks(struct vm_area_struct *vma)
144 {
145 	if (vma->vm_file)
146 		i_mmap_lock_write(vma->vm_file->f_mapping);
147 	if (vma->anon_vma)
148 		anon_vma_lock_write(vma->anon_vma);
149 }
150 
151 static void drop_rmap_locks(struct vm_area_struct *vma)
152 {
153 	if (vma->anon_vma)
154 		anon_vma_unlock_write(vma->anon_vma);
155 	if (vma->vm_file)
156 		i_mmap_unlock_write(vma->vm_file->f_mapping);
157 }
158 
159 static pte_t move_soft_dirty_pte(pte_t pte)
160 {
161 	/*
162 	 * Set soft dirty bit so we can notice
163 	 * in userspace the ptes were moved.
164 	 */
165 #ifdef CONFIG_MEM_SOFT_DIRTY
166 	if (pte_present(pte))
167 		pte = pte_mksoft_dirty(pte);
168 	else if (is_swap_pte(pte))
169 		pte = pte_swp_mksoft_dirty(pte);
170 #endif
171 	return pte;
172 }
173 
174 static int mremap_folio_pte_batch(struct vm_area_struct *vma, unsigned long addr,
175 		pte_t *ptep, pte_t pte, int max_nr)
176 {
177 	struct folio *folio;
178 
179 	if (max_nr == 1)
180 		return 1;
181 
182 	/* Avoid expensive folio lookup if we stand no chance of benefit. */
183 	if (pte_batch_hint(ptep, pte) == 1)
184 		return 1;
185 
186 	folio = vm_normal_folio(vma, addr, pte);
187 	if (!folio || !folio_test_large(folio))
188 		return 1;
189 
190 	return folio_pte_batch(folio, ptep, pte, max_nr);
191 }
192 
193 static int move_ptes(struct pagetable_move_control *pmc,
194 		unsigned long extent, pmd_t *old_pmd, pmd_t *new_pmd)
195 {
196 	struct vm_area_struct *vma = pmc->old;
197 	bool need_clear_uffd_wp = vma_has_uffd_without_event_remap(vma);
198 	struct mm_struct *mm = vma->vm_mm;
199 	pte_t *old_ptep, *new_ptep;
200 	pte_t old_pte, pte;
201 	pmd_t dummy_pmdval;
202 	spinlock_t *old_ptl, *new_ptl;
203 	bool force_flush = false;
204 	unsigned long old_addr = pmc->old_addr;
205 	unsigned long new_addr = pmc->new_addr;
206 	unsigned long old_end = old_addr + extent;
207 	unsigned long len = old_end - old_addr;
208 	int max_nr_ptes;
209 	int nr_ptes;
210 	int err = 0;
211 
212 	/*
213 	 * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma
214 	 * locks to ensure that rmap will always observe either the old or the
215 	 * new ptes. This is the easiest way to avoid races with
216 	 * truncate_pagecache(), page migration, etc...
217 	 *
218 	 * When need_rmap_locks is false, we use other ways to avoid
219 	 * such races:
220 	 *
221 	 * - During exec() shift_arg_pages(), we use a specially tagged vma
222 	 *   which rmap call sites look for using vma_is_temporary_stack().
223 	 *
224 	 * - During mremap(), new_vma is often known to be placed after vma
225 	 *   in rmap traversal order. This ensures rmap will always observe
226 	 *   either the old pte, or the new pte, or both (the page table locks
227 	 *   serialize access to individual ptes, but only rmap traversal
228 	 *   order guarantees that we won't miss both the old and new ptes).
229 	 */
230 	if (pmc->need_rmap_locks)
231 		take_rmap_locks(vma);
232 
233 	/*
234 	 * We don't have to worry about the ordering of src and dst
235 	 * pte locks because exclusive mmap_lock prevents deadlock.
236 	 */
237 	old_ptep = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
238 	if (!old_ptep) {
239 		err = -EAGAIN;
240 		goto out;
241 	}
242 	/*
243 	 * Now new_pte is none, so hpage_collapse_scan_file() path can not find
244 	 * this by traversing file->f_mapping, so there is no concurrency with
245 	 * retract_page_tables(). In addition, we already hold the exclusive
246 	 * mmap_lock, so this new_pte page is stable, so there is no need to get
247 	 * pmdval and do pmd_same() check.
248 	 */
249 	new_ptep = pte_offset_map_rw_nolock(mm, new_pmd, new_addr, &dummy_pmdval,
250 					   &new_ptl);
251 	if (!new_ptep) {
252 		pte_unmap_unlock(old_ptep, old_ptl);
253 		err = -EAGAIN;
254 		goto out;
255 	}
256 	if (new_ptl != old_ptl)
257 		spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
258 	flush_tlb_batched_pending(vma->vm_mm);
259 	arch_enter_lazy_mmu_mode();
260 
261 	for (; old_addr < old_end; old_ptep += nr_ptes, old_addr += nr_ptes * PAGE_SIZE,
262 		new_ptep += nr_ptes, new_addr += nr_ptes * PAGE_SIZE) {
263 		VM_WARN_ON_ONCE(!pte_none(*new_ptep));
264 
265 		nr_ptes = 1;
266 		max_nr_ptes = (old_end - old_addr) >> PAGE_SHIFT;
267 		old_pte = ptep_get(old_ptep);
268 		if (pte_none(old_pte))
269 			continue;
270 
271 		/*
272 		 * If we are remapping a valid PTE, make sure
273 		 * to flush TLB before we drop the PTL for the
274 		 * PTE.
275 		 *
276 		 * NOTE! Both old and new PTL matter: the old one
277 		 * for racing with folio_mkclean(), the new one to
278 		 * make sure the physical page stays valid until
279 		 * the TLB entry for the old mapping has been
280 		 * flushed.
281 		 */
282 		if (pte_present(old_pte)) {
283 			nr_ptes = mremap_folio_pte_batch(vma, old_addr, old_ptep,
284 							 old_pte, max_nr_ptes);
285 			force_flush = true;
286 		}
287 		pte = get_and_clear_ptes(mm, old_addr, old_ptep, nr_ptes);
288 		pte = move_pte(pte, old_addr, new_addr);
289 		pte = move_soft_dirty_pte(pte);
290 
291 		if (need_clear_uffd_wp && pte_marker_uffd_wp(pte))
292 			pte_clear(mm, new_addr, new_ptep);
293 		else {
294 			if (need_clear_uffd_wp) {
295 				if (pte_present(pte))
296 					pte = pte_clear_uffd_wp(pte);
297 				else if (is_swap_pte(pte))
298 					pte = pte_swp_clear_uffd_wp(pte);
299 			}
300 			set_ptes(mm, new_addr, new_ptep, pte, nr_ptes);
301 		}
302 	}
303 
304 	arch_leave_lazy_mmu_mode();
305 	if (force_flush)
306 		flush_tlb_range(vma, old_end - len, old_end);
307 	if (new_ptl != old_ptl)
308 		spin_unlock(new_ptl);
309 	pte_unmap(new_ptep - 1);
310 	pte_unmap_unlock(old_ptep - 1, old_ptl);
311 out:
312 	if (pmc->need_rmap_locks)
313 		drop_rmap_locks(vma);
314 	return err;
315 }
316 
317 #ifndef arch_supports_page_table_move
318 #define arch_supports_page_table_move arch_supports_page_table_move
319 static inline bool arch_supports_page_table_move(void)
320 {
321 	return IS_ENABLED(CONFIG_HAVE_MOVE_PMD) ||
322 		IS_ENABLED(CONFIG_HAVE_MOVE_PUD);
323 }
324 #endif
325 
326 #ifdef CONFIG_HAVE_MOVE_PMD
327 static bool move_normal_pmd(struct pagetable_move_control *pmc,
328 			pmd_t *old_pmd, pmd_t *new_pmd)
329 {
330 	spinlock_t *old_ptl, *new_ptl;
331 	struct vm_area_struct *vma = pmc->old;
332 	struct mm_struct *mm = vma->vm_mm;
333 	bool res = false;
334 	pmd_t pmd;
335 
336 	if (!arch_supports_page_table_move())
337 		return false;
338 	/*
339 	 * The destination pmd shouldn't be established, free_pgtables()
340 	 * should have released it.
341 	 *
342 	 * However, there's a case during execve() where we use mremap
343 	 * to move the initial stack, and in that case the target area
344 	 * may overlap the source area (always moving down).
345 	 *
346 	 * If everything is PMD-aligned, that works fine, as moving
347 	 * each pmd down will clear the source pmd. But if we first
348 	 * have a few 4kB-only pages that get moved down, and then
349 	 * hit the "now the rest is PMD-aligned, let's do everything
350 	 * one pmd at a time", we will still have the old (now empty
351 	 * of any 4kB pages, but still there) PMD in the page table
352 	 * tree.
353 	 *
354 	 * Warn on it once - because we really should try to figure
355 	 * out how to do this better - but then say "I won't move
356 	 * this pmd".
357 	 *
358 	 * One alternative might be to just unmap the target pmd at
359 	 * this point, and verify that it really is empty. We'll see.
360 	 */
361 	if (WARN_ON_ONCE(!pmd_none(*new_pmd)))
362 		return false;
363 
364 	/* If this pmd belongs to a uffd vma with remap events disabled, we need
365 	 * to ensure that the uffd-wp state is cleared from all pgtables. This
366 	 * means recursing into lower page tables in move_page_tables(), and we
367 	 * can reuse the existing code if we simply treat the entry as "not
368 	 * moved".
369 	 */
370 	if (vma_has_uffd_without_event_remap(vma))
371 		return false;
372 
373 	/*
374 	 * We don't have to worry about the ordering of src and dst
375 	 * ptlocks because exclusive mmap_lock prevents deadlock.
376 	 */
377 	old_ptl = pmd_lock(mm, old_pmd);
378 	new_ptl = pmd_lockptr(mm, new_pmd);
379 	if (new_ptl != old_ptl)
380 		spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
381 
382 	pmd = *old_pmd;
383 
384 	/* Racing with collapse? */
385 	if (unlikely(!pmd_present(pmd) || pmd_leaf(pmd)))
386 		goto out_unlock;
387 	/* Clear the pmd */
388 	pmd_clear(old_pmd);
389 	res = true;
390 
391 	VM_BUG_ON(!pmd_none(*new_pmd));
392 
393 	pmd_populate(mm, new_pmd, pmd_pgtable(pmd));
394 	flush_tlb_range(vma, pmc->old_addr, pmc->old_addr + PMD_SIZE);
395 out_unlock:
396 	if (new_ptl != old_ptl)
397 		spin_unlock(new_ptl);
398 	spin_unlock(old_ptl);
399 
400 	return res;
401 }
402 #else
403 static inline bool move_normal_pmd(struct pagetable_move_control *pmc,
404 		pmd_t *old_pmd, pmd_t *new_pmd)
405 {
406 	return false;
407 }
408 #endif
409 
410 #if CONFIG_PGTABLE_LEVELS > 2 && defined(CONFIG_HAVE_MOVE_PUD)
411 static bool move_normal_pud(struct pagetable_move_control *pmc,
412 		pud_t *old_pud, pud_t *new_pud)
413 {
414 	spinlock_t *old_ptl, *new_ptl;
415 	struct vm_area_struct *vma = pmc->old;
416 	struct mm_struct *mm = vma->vm_mm;
417 	pud_t pud;
418 
419 	if (!arch_supports_page_table_move())
420 		return false;
421 	/*
422 	 * The destination pud shouldn't be established, free_pgtables()
423 	 * should have released it.
424 	 */
425 	if (WARN_ON_ONCE(!pud_none(*new_pud)))
426 		return false;
427 
428 	/* If this pud belongs to a uffd vma with remap events disabled, we need
429 	 * to ensure that the uffd-wp state is cleared from all pgtables. This
430 	 * means recursing into lower page tables in move_page_tables(), and we
431 	 * can reuse the existing code if we simply treat the entry as "not
432 	 * moved".
433 	 */
434 	if (vma_has_uffd_without_event_remap(vma))
435 		return false;
436 
437 	/*
438 	 * We don't have to worry about the ordering of src and dst
439 	 * ptlocks because exclusive mmap_lock prevents deadlock.
440 	 */
441 	old_ptl = pud_lock(mm, old_pud);
442 	new_ptl = pud_lockptr(mm, new_pud);
443 	if (new_ptl != old_ptl)
444 		spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
445 
446 	/* Clear the pud */
447 	pud = *old_pud;
448 	pud_clear(old_pud);
449 
450 	VM_BUG_ON(!pud_none(*new_pud));
451 
452 	pud_populate(mm, new_pud, pud_pgtable(pud));
453 	flush_tlb_range(vma, pmc->old_addr, pmc->old_addr + PUD_SIZE);
454 	if (new_ptl != old_ptl)
455 		spin_unlock(new_ptl);
456 	spin_unlock(old_ptl);
457 
458 	return true;
459 }
460 #else
461 static inline bool move_normal_pud(struct pagetable_move_control *pmc,
462 		pud_t *old_pud, pud_t *new_pud)
463 {
464 	return false;
465 }
466 #endif
467 
468 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
469 static bool move_huge_pud(struct pagetable_move_control *pmc,
470 		pud_t *old_pud, pud_t *new_pud)
471 {
472 	spinlock_t *old_ptl, *new_ptl;
473 	struct vm_area_struct *vma = pmc->old;
474 	struct mm_struct *mm = vma->vm_mm;
475 	pud_t pud;
476 
477 	/*
478 	 * The destination pud shouldn't be established, free_pgtables()
479 	 * should have released it.
480 	 */
481 	if (WARN_ON_ONCE(!pud_none(*new_pud)))
482 		return false;
483 
484 	/*
485 	 * We don't have to worry about the ordering of src and dst
486 	 * ptlocks because exclusive mmap_lock prevents deadlock.
487 	 */
488 	old_ptl = pud_lock(mm, old_pud);
489 	new_ptl = pud_lockptr(mm, new_pud);
490 	if (new_ptl != old_ptl)
491 		spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
492 
493 	/* Clear the pud */
494 	pud = *old_pud;
495 	pud_clear(old_pud);
496 
497 	VM_BUG_ON(!pud_none(*new_pud));
498 
499 	/* Set the new pud */
500 	/* mark soft_ditry when we add pud level soft dirty support */
501 	set_pud_at(mm, pmc->new_addr, new_pud, pud);
502 	flush_pud_tlb_range(vma, pmc->old_addr, pmc->old_addr + HPAGE_PUD_SIZE);
503 	if (new_ptl != old_ptl)
504 		spin_unlock(new_ptl);
505 	spin_unlock(old_ptl);
506 
507 	return true;
508 }
509 #else
510 static bool move_huge_pud(struct pagetable_move_control *pmc,
511 		pud_t *old_pud, pud_t *new_pud)
512 
513 {
514 	WARN_ON_ONCE(1);
515 	return false;
516 
517 }
518 #endif
519 
520 enum pgt_entry {
521 	NORMAL_PMD,
522 	HPAGE_PMD,
523 	NORMAL_PUD,
524 	HPAGE_PUD,
525 };
526 
527 /*
528  * Returns an extent of the corresponding size for the pgt_entry specified if
529  * valid. Else returns a smaller extent bounded by the end of the source and
530  * destination pgt_entry.
531  */
532 static __always_inline unsigned long get_extent(enum pgt_entry entry,
533 						struct pagetable_move_control *pmc)
534 {
535 	unsigned long next, extent, mask, size;
536 	unsigned long old_addr = pmc->old_addr;
537 	unsigned long old_end = pmc->old_end;
538 	unsigned long new_addr = pmc->new_addr;
539 
540 	switch (entry) {
541 	case HPAGE_PMD:
542 	case NORMAL_PMD:
543 		mask = PMD_MASK;
544 		size = PMD_SIZE;
545 		break;
546 	case HPAGE_PUD:
547 	case NORMAL_PUD:
548 		mask = PUD_MASK;
549 		size = PUD_SIZE;
550 		break;
551 	default:
552 		BUILD_BUG();
553 		break;
554 	}
555 
556 	next = (old_addr + size) & mask;
557 	/* even if next overflowed, extent below will be ok */
558 	extent = next - old_addr;
559 	if (extent > old_end - old_addr)
560 		extent = old_end - old_addr;
561 	next = (new_addr + size) & mask;
562 	if (extent > next - new_addr)
563 		extent = next - new_addr;
564 	return extent;
565 }
566 
567 /*
568  * Should move_pgt_entry() acquire the rmap locks? This is either expressed in
569  * the PMC, or overridden in the case of normal, larger page tables.
570  */
571 static bool should_take_rmap_locks(struct pagetable_move_control *pmc,
572 				   enum pgt_entry entry)
573 {
574 	switch (entry) {
575 	case NORMAL_PMD:
576 	case NORMAL_PUD:
577 		return true;
578 	default:
579 		return pmc->need_rmap_locks;
580 	}
581 }
582 
583 /*
584  * Attempts to speedup the move by moving entry at the level corresponding to
585  * pgt_entry. Returns true if the move was successful, else false.
586  */
587 static bool move_pgt_entry(struct pagetable_move_control *pmc,
588 			   enum pgt_entry entry, void *old_entry, void *new_entry)
589 {
590 	bool moved = false;
591 	bool need_rmap_locks = should_take_rmap_locks(pmc, entry);
592 
593 	/* See comment in move_ptes() */
594 	if (need_rmap_locks)
595 		take_rmap_locks(pmc->old);
596 
597 	switch (entry) {
598 	case NORMAL_PMD:
599 		moved = move_normal_pmd(pmc, old_entry, new_entry);
600 		break;
601 	case NORMAL_PUD:
602 		moved = move_normal_pud(pmc, old_entry, new_entry);
603 		break;
604 	case HPAGE_PMD:
605 		moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
606 			move_huge_pmd(pmc->old, pmc->old_addr, pmc->new_addr, old_entry,
607 				      new_entry);
608 		break;
609 	case HPAGE_PUD:
610 		moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
611 			move_huge_pud(pmc, old_entry, new_entry);
612 		break;
613 
614 	default:
615 		WARN_ON_ONCE(1);
616 		break;
617 	}
618 
619 	if (need_rmap_locks)
620 		drop_rmap_locks(pmc->old);
621 
622 	return moved;
623 }
624 
625 /*
626  * A helper to check if aligning down is OK. The aligned address should fall
627  * on *no mapping*. For the stack moving down, that's a special move within
628  * the VMA that is created to span the source and destination of the move,
629  * so we make an exception for it.
630  */
631 static bool can_align_down(struct pagetable_move_control *pmc,
632 			   struct vm_area_struct *vma, unsigned long addr_to_align,
633 			   unsigned long mask)
634 {
635 	unsigned long addr_masked = addr_to_align & mask;
636 
637 	/*
638 	 * If @addr_to_align of either source or destination is not the beginning
639 	 * of the corresponding VMA, we can't align down or we will destroy part
640 	 * of the current mapping.
641 	 */
642 	if (!pmc->for_stack && vma->vm_start != addr_to_align)
643 		return false;
644 
645 	/* In the stack case we explicitly permit in-VMA alignment. */
646 	if (pmc->for_stack && addr_masked >= vma->vm_start)
647 		return true;
648 
649 	/*
650 	 * Make sure the realignment doesn't cause the address to fall on an
651 	 * existing mapping.
652 	 */
653 	return find_vma_intersection(vma->vm_mm, addr_masked, vma->vm_start) == NULL;
654 }
655 
656 /*
657  * Determine if are in fact able to realign for efficiency to a higher page
658  * table boundary.
659  */
660 static bool can_realign_addr(struct pagetable_move_control *pmc,
661 			     unsigned long pagetable_mask)
662 {
663 	unsigned long align_mask = ~pagetable_mask;
664 	unsigned long old_align = pmc->old_addr & align_mask;
665 	unsigned long new_align = pmc->new_addr & align_mask;
666 	unsigned long pagetable_size = align_mask + 1;
667 	unsigned long old_align_next = pagetable_size - old_align;
668 
669 	/*
670 	 * We don't want to have to go hunting for VMAs from the end of the old
671 	 * VMA to the next page table boundary, also we want to make sure the
672 	 * operation is wortwhile.
673 	 *
674 	 * So ensure that we only perform this realignment if the end of the
675 	 * range being copied reaches or crosses the page table boundary.
676 	 *
677 	 * boundary                        boundary
678 	 *    .<- old_align ->                .
679 	 *    .              |----------------.-----------|
680 	 *    .              |          vma   .           |
681 	 *    .              |----------------.-----------|
682 	 *    .              <----------------.----------->
683 	 *    .                          len_in
684 	 *    <------------------------------->
685 	 *    .         pagetable_size        .
686 	 *    .              <---------------->
687 	 *    .                old_align_next .
688 	 */
689 	if (pmc->len_in < old_align_next)
690 		return false;
691 
692 	/* Skip if the addresses are already aligned. */
693 	if (old_align == 0)
694 		return false;
695 
696 	/* Only realign if the new and old addresses are mutually aligned. */
697 	if (old_align != new_align)
698 		return false;
699 
700 	/* Ensure realignment doesn't cause overlap with existing mappings. */
701 	if (!can_align_down(pmc, pmc->old, pmc->old_addr, pagetable_mask) ||
702 	    !can_align_down(pmc, pmc->new, pmc->new_addr, pagetable_mask))
703 		return false;
704 
705 	return true;
706 }
707 
708 /*
709  * Opportunistically realign to specified boundary for faster copy.
710  *
711  * Consider an mremap() of a VMA with page table boundaries as below, and no
712  * preceding VMAs from the lower page table boundary to the start of the VMA,
713  * with the end of the range reaching or crossing the page table boundary.
714  *
715  *   boundary                        boundary
716  *      .              |----------------.-----------|
717  *      .              |          vma   .           |
718  *      .              |----------------.-----------|
719  *      .         pmc->old_addr         .      pmc->old_end
720  *      .              <---------------------------->
721  *      .                  move these page tables
722  *
723  * If we proceed with moving page tables in this scenario, we will have a lot of
724  * work to do traversing old page tables and establishing new ones in the
725  * destination across multiple lower level page tables.
726  *
727  * The idea here is simply to align pmc->old_addr, pmc->new_addr down to the
728  * page table boundary, so we can simply copy a single page table entry for the
729  * aligned portion of the VMA instead:
730  *
731  *   boundary                        boundary
732  *      .              |----------------.-----------|
733  *      .              |          vma   .           |
734  *      .              |----------------.-----------|
735  * pmc->old_addr                        .      pmc->old_end
736  *      <------------------------------------------->
737  *      .           move these page tables
738  */
739 static void try_realign_addr(struct pagetable_move_control *pmc,
740 			     unsigned long pagetable_mask)
741 {
742 
743 	if (!can_realign_addr(pmc, pagetable_mask))
744 		return;
745 
746 	/*
747 	 * Simply align to page table boundaries. Note that we do NOT update the
748 	 * pmc->old_end value, and since the move_page_tables() operation spans
749 	 * from [old_addr, old_end) (offsetting new_addr as it is performed),
750 	 * this simply changes the start of the copy, not the end.
751 	 */
752 	pmc->old_addr &= pagetable_mask;
753 	pmc->new_addr &= pagetable_mask;
754 }
755 
756 /* Is the page table move operation done? */
757 static bool pmc_done(struct pagetable_move_control *pmc)
758 {
759 	return pmc->old_addr >= pmc->old_end;
760 }
761 
762 /* Advance to the next page table, offset by extent bytes. */
763 static void pmc_next(struct pagetable_move_control *pmc, unsigned long extent)
764 {
765 	pmc->old_addr += extent;
766 	pmc->new_addr += extent;
767 }
768 
769 /*
770  * Determine how many bytes in the specified input range have had their page
771  * tables moved so far.
772  */
773 static unsigned long pmc_progress(struct pagetable_move_control *pmc)
774 {
775 	unsigned long orig_old_addr = pmc->old_end - pmc->len_in;
776 	unsigned long old_addr = pmc->old_addr;
777 
778 	/*
779 	 * Prevent negative return values when {old,new}_addr was realigned but
780 	 * we broke out of the loop in move_page_tables() for the first PMD
781 	 * itself.
782 	 */
783 	return old_addr < orig_old_addr ? 0 : old_addr - orig_old_addr;
784 }
785 
786 unsigned long move_page_tables(struct pagetable_move_control *pmc)
787 {
788 	unsigned long extent;
789 	struct mmu_notifier_range range;
790 	pmd_t *old_pmd, *new_pmd;
791 	pud_t *old_pud, *new_pud;
792 	struct mm_struct *mm = pmc->old->vm_mm;
793 
794 	if (!pmc->len_in)
795 		return 0;
796 
797 	if (is_vm_hugetlb_page(pmc->old))
798 		return move_hugetlb_page_tables(pmc->old, pmc->new, pmc->old_addr,
799 						pmc->new_addr, pmc->len_in);
800 
801 	/*
802 	 * If possible, realign addresses to PMD boundary for faster copy.
803 	 * Only realign if the mremap copying hits a PMD boundary.
804 	 */
805 	try_realign_addr(pmc, PMD_MASK);
806 
807 	flush_cache_range(pmc->old, pmc->old_addr, pmc->old_end);
808 	mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, mm,
809 				pmc->old_addr, pmc->old_end);
810 	mmu_notifier_invalidate_range_start(&range);
811 
812 	for (; !pmc_done(pmc); pmc_next(pmc, extent)) {
813 		cond_resched();
814 		/*
815 		 * If extent is PUD-sized try to speed up the move by moving at the
816 		 * PUD level if possible.
817 		 */
818 		extent = get_extent(NORMAL_PUD, pmc);
819 
820 		old_pud = get_old_pud(mm, pmc->old_addr);
821 		if (!old_pud)
822 			continue;
823 		new_pud = alloc_new_pud(mm, pmc->new_addr);
824 		if (!new_pud)
825 			break;
826 		if (pud_trans_huge(*old_pud)) {
827 			if (extent == HPAGE_PUD_SIZE) {
828 				move_pgt_entry(pmc, HPAGE_PUD, old_pud, new_pud);
829 				/* We ignore and continue on error? */
830 				continue;
831 			}
832 		} else if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) {
833 			if (move_pgt_entry(pmc, NORMAL_PUD, old_pud, new_pud))
834 				continue;
835 		}
836 
837 		extent = get_extent(NORMAL_PMD, pmc);
838 		old_pmd = get_old_pmd(mm, pmc->old_addr);
839 		if (!old_pmd)
840 			continue;
841 		new_pmd = alloc_new_pmd(mm, pmc->new_addr);
842 		if (!new_pmd)
843 			break;
844 again:
845 		if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd)) {
846 			if (extent == HPAGE_PMD_SIZE &&
847 			    move_pgt_entry(pmc, HPAGE_PMD, old_pmd, new_pmd))
848 				continue;
849 			split_huge_pmd(pmc->old, old_pmd, pmc->old_addr);
850 		} else if (IS_ENABLED(CONFIG_HAVE_MOVE_PMD) &&
851 			   extent == PMD_SIZE) {
852 			/*
853 			 * If the extent is PMD-sized, try to speed the move by
854 			 * moving at the PMD level if possible.
855 			 */
856 			if (move_pgt_entry(pmc, NORMAL_PMD, old_pmd, new_pmd))
857 				continue;
858 		}
859 		if (pmd_none(*old_pmd))
860 			continue;
861 		if (pte_alloc(pmc->new->vm_mm, new_pmd))
862 			break;
863 		if (move_ptes(pmc, extent, old_pmd, new_pmd) < 0)
864 			goto again;
865 	}
866 
867 	mmu_notifier_invalidate_range_end(&range);
868 
869 	return pmc_progress(pmc);
870 }
871 
872 /* Set vrm->delta to the difference in VMA size specified by user. */
873 static void vrm_set_delta(struct vma_remap_struct *vrm)
874 {
875 	vrm->delta = abs_diff(vrm->old_len, vrm->new_len);
876 }
877 
878 /* Determine what kind of remap this is - shrink, expand or no resize at all. */
879 static enum mremap_type vrm_remap_type(struct vma_remap_struct *vrm)
880 {
881 	if (vrm->delta == 0)
882 		return MREMAP_NO_RESIZE;
883 
884 	if (vrm->old_len > vrm->new_len)
885 		return MREMAP_SHRINK;
886 
887 	return MREMAP_EXPAND;
888 }
889 
890 /*
891  * When moving a VMA to vrm->new_adr, does this result in the new and old VMAs
892  * overlapping?
893  */
894 static bool vrm_overlaps(struct vma_remap_struct *vrm)
895 {
896 	unsigned long start_old = vrm->addr;
897 	unsigned long start_new = vrm->new_addr;
898 	unsigned long end_old = vrm->addr + vrm->old_len;
899 	unsigned long end_new = vrm->new_addr + vrm->new_len;
900 
901 	/*
902 	 * start_old    end_old
903 	 *     |-----------|
904 	 *     |           |
905 	 *     |-----------|
906 	 *             |-------------|
907 	 *             |             |
908 	 *             |-------------|
909 	 *         start_new      end_new
910 	 */
911 	if (end_old > start_new && end_new > start_old)
912 		return true;
913 
914 	return false;
915 }
916 
917 /*
918  * Will a new address definitely be assigned? This either if the user specifies
919  * it via MREMAP_FIXED, or if MREMAP_DONTUNMAP is used, indicating we will
920  * always detemrine a target address.
921  */
922 static bool vrm_implies_new_addr(struct vma_remap_struct *vrm)
923 {
924 	return vrm->flags & (MREMAP_FIXED | MREMAP_DONTUNMAP);
925 }
926 
927 /*
928  * Find an unmapped area for the requested vrm->new_addr.
929  *
930  * If MREMAP_FIXED then this is equivalent to a MAP_FIXED mmap() call. If only
931  * MREMAP_DONTUNMAP is set, then this is equivalent to providing a hint to
932  * mmap(), otherwise this is equivalent to mmap() specifying a NULL address.
933  *
934  * Returns 0 on success (with vrm->new_addr updated), or an error code upon
935  * failure.
936  */
937 static unsigned long vrm_set_new_addr(struct vma_remap_struct *vrm)
938 {
939 	struct vm_area_struct *vma = vrm->vma;
940 	unsigned long map_flags = 0;
941 	/* Page Offset _into_ the VMA. */
942 	pgoff_t internal_pgoff = (vrm->addr - vma->vm_start) >> PAGE_SHIFT;
943 	pgoff_t pgoff = vma->vm_pgoff + internal_pgoff;
944 	unsigned long new_addr = vrm_implies_new_addr(vrm) ? vrm->new_addr : 0;
945 	unsigned long res;
946 
947 	if (vrm->flags & MREMAP_FIXED)
948 		map_flags |= MAP_FIXED;
949 	if (vma->vm_flags & VM_MAYSHARE)
950 		map_flags |= MAP_SHARED;
951 
952 	res = get_unmapped_area(vma->vm_file, new_addr, vrm->new_len, pgoff,
953 				map_flags);
954 	if (IS_ERR_VALUE(res))
955 		return res;
956 
957 	vrm->new_addr = res;
958 	return 0;
959 }
960 
961 /*
962  * Keep track of pages which have been added to the memory mapping. If the VMA
963  * is accounted, also check to see if there is sufficient memory.
964  *
965  * Returns true on success, false if insufficient memory to charge.
966  */
967 static bool vrm_calc_charge(struct vma_remap_struct *vrm)
968 {
969 	unsigned long charged;
970 
971 	if (!(vrm->vma->vm_flags & VM_ACCOUNT))
972 		return true;
973 
974 	/*
975 	 * If we don't unmap the old mapping, then we account the entirety of
976 	 * the length of the new one. Otherwise it's just the delta in size.
977 	 */
978 	if (vrm->flags & MREMAP_DONTUNMAP)
979 		charged = vrm->new_len >> PAGE_SHIFT;
980 	else
981 		charged = vrm->delta >> PAGE_SHIFT;
982 
983 
984 	/* This accounts 'charged' pages of memory. */
985 	if (security_vm_enough_memory_mm(current->mm, charged))
986 		return false;
987 
988 	vrm->charged = charged;
989 	return true;
990 }
991 
992 /*
993  * an error has occurred so we will not be using vrm->charged memory. Unaccount
994  * this memory if the VMA is accounted.
995  */
996 static void vrm_uncharge(struct vma_remap_struct *vrm)
997 {
998 	if (!(vrm->vma->vm_flags & VM_ACCOUNT))
999 		return;
1000 
1001 	vm_unacct_memory(vrm->charged);
1002 	vrm->charged = 0;
1003 }
1004 
1005 /*
1006  * Update mm exec_vm, stack_vm, data_vm, and locked_vm fields as needed to
1007  * account for 'bytes' memory used, and if locked, indicate this in the VRM so
1008  * we can handle this correctly later.
1009  */
1010 static void vrm_stat_account(struct vma_remap_struct *vrm,
1011 			     unsigned long bytes)
1012 {
1013 	unsigned long pages = bytes >> PAGE_SHIFT;
1014 	struct mm_struct *mm = current->mm;
1015 	struct vm_area_struct *vma = vrm->vma;
1016 
1017 	vm_stat_account(mm, vma->vm_flags, pages);
1018 	if (vma->vm_flags & VM_LOCKED)
1019 		mm->locked_vm += pages;
1020 }
1021 
1022 /*
1023  * Perform checks before attempting to write a VMA prior to it being
1024  * moved.
1025  */
1026 static unsigned long prep_move_vma(struct vma_remap_struct *vrm)
1027 {
1028 	unsigned long err = 0;
1029 	struct vm_area_struct *vma = vrm->vma;
1030 	unsigned long old_addr = vrm->addr;
1031 	unsigned long old_len = vrm->old_len;
1032 	vm_flags_t dummy = vma->vm_flags;
1033 
1034 	/*
1035 	 * We'd prefer to avoid failure later on in do_munmap:
1036 	 * which may split one vma into three before unmapping.
1037 	 */
1038 	if (current->mm->map_count >= sysctl_max_map_count - 3)
1039 		return -ENOMEM;
1040 
1041 	if (vma->vm_ops && vma->vm_ops->may_split) {
1042 		if (vma->vm_start != old_addr)
1043 			err = vma->vm_ops->may_split(vma, old_addr);
1044 		if (!err && vma->vm_end != old_addr + old_len)
1045 			err = vma->vm_ops->may_split(vma, old_addr + old_len);
1046 		if (err)
1047 			return err;
1048 	}
1049 
1050 	/*
1051 	 * Advise KSM to break any KSM pages in the area to be moved:
1052 	 * it would be confusing if they were to turn up at the new
1053 	 * location, where they happen to coincide with different KSM
1054 	 * pages recently unmapped.  But leave vma->vm_flags as it was,
1055 	 * so KSM can come around to merge on vma and new_vma afterwards.
1056 	 */
1057 	err = ksm_madvise(vma, old_addr, old_addr + old_len,
1058 			  MADV_UNMERGEABLE, &dummy);
1059 	if (err)
1060 		return err;
1061 
1062 	return 0;
1063 }
1064 
1065 /*
1066  * Unmap source VMA for VMA move, turning it from a copy to a move, being
1067  * careful to ensure we do not underflow memory account while doing so if an
1068  * accountable move.
1069  *
1070  * This is best effort, if we fail to unmap then we simply try to correct
1071  * accounting and exit.
1072  */
1073 static void unmap_source_vma(struct vma_remap_struct *vrm)
1074 {
1075 	struct mm_struct *mm = current->mm;
1076 	unsigned long addr = vrm->addr;
1077 	unsigned long len = vrm->old_len;
1078 	struct vm_area_struct *vma = vrm->vma;
1079 	VMA_ITERATOR(vmi, mm, addr);
1080 	int err;
1081 	unsigned long vm_start;
1082 	unsigned long vm_end;
1083 	/*
1084 	 * It might seem odd that we check for MREMAP_DONTUNMAP here, given this
1085 	 * function implies that we unmap the original VMA, which seems
1086 	 * contradictory.
1087 	 *
1088 	 * However, this occurs when this operation was attempted and an error
1089 	 * arose, in which case we _do_ wish to unmap the _new_ VMA, which means
1090 	 * we actually _do_ want it be unaccounted.
1091 	 */
1092 	bool accountable_move = (vma->vm_flags & VM_ACCOUNT) &&
1093 		!(vrm->flags & MREMAP_DONTUNMAP);
1094 
1095 	/*
1096 	 * So we perform a trick here to prevent incorrect accounting. Any merge
1097 	 * or new VMA allocation performed in copy_vma() does not adjust
1098 	 * accounting, it is expected that callers handle this.
1099 	 *
1100 	 * And indeed we already have, accounting appropriately in the case of
1101 	 * both in vrm_charge().
1102 	 *
1103 	 * However, when we unmap the existing VMA (to effect the move), this
1104 	 * code will, if the VMA has VM_ACCOUNT set, attempt to unaccount
1105 	 * removed pages.
1106 	 *
1107 	 * To avoid this we temporarily clear this flag, reinstating on any
1108 	 * portions of the original VMA that remain.
1109 	 */
1110 	if (accountable_move) {
1111 		vm_flags_clear(vma, VM_ACCOUNT);
1112 		/* We are about to split vma, so store the start/end. */
1113 		vm_start = vma->vm_start;
1114 		vm_end = vma->vm_end;
1115 	}
1116 
1117 	err = do_vmi_munmap(&vmi, mm, addr, len, vrm->uf_unmap, /* unlock= */false);
1118 	vrm->vma = NULL; /* Invalidated. */
1119 	vrm->vmi_needs_invalidate = true;
1120 	if (err) {
1121 		/* OOM: unable to split vma, just get accounts right */
1122 		vm_acct_memory(len >> PAGE_SHIFT);
1123 		return;
1124 	}
1125 
1126 	/*
1127 	 * If we mremap() from a VMA like this:
1128 	 *
1129 	 *    addr  end
1130 	 *     |     |
1131 	 *     v     v
1132 	 * |-------------|
1133 	 * |             |
1134 	 * |-------------|
1135 	 *
1136 	 * Having cleared VM_ACCOUNT from the whole VMA, after we unmap above
1137 	 * we'll end up with:
1138 	 *
1139 	 *    addr  end
1140 	 *     |     |
1141 	 *     v     v
1142 	 * |---|     |---|
1143 	 * | A |     | B |
1144 	 * |---|     |---|
1145 	 *
1146 	 * The VMI is still pointing at addr, so vma_prev() will give us A, and
1147 	 * a subsequent or lone vma_next() will give as B.
1148 	 *
1149 	 * do_vmi_munmap() will have restored the VMI back to addr.
1150 	 */
1151 	if (accountable_move) {
1152 		unsigned long end = addr + len;
1153 
1154 		if (vm_start < addr) {
1155 			struct vm_area_struct *prev = vma_prev(&vmi);
1156 
1157 			vm_flags_set(prev, VM_ACCOUNT); /* Acquires VMA lock. */
1158 		}
1159 
1160 		if (vm_end > end) {
1161 			struct vm_area_struct *next = vma_next(&vmi);
1162 
1163 			vm_flags_set(next, VM_ACCOUNT); /* Acquires VMA lock. */
1164 		}
1165 	}
1166 }
1167 
1168 /*
1169  * Copy vrm->vma over to vrm->new_addr possibly adjusting size as part of the
1170  * process. Additionally handle an error occurring on moving of page tables,
1171  * where we reset vrm state to cause unmapping of the new VMA.
1172  *
1173  * Outputs the newly installed VMA to new_vma_ptr. Returns 0 on success or an
1174  * error code.
1175  */
1176 static int copy_vma_and_data(struct vma_remap_struct *vrm,
1177 			     struct vm_area_struct **new_vma_ptr)
1178 {
1179 	unsigned long internal_offset = vrm->addr - vrm->vma->vm_start;
1180 	unsigned long internal_pgoff = internal_offset >> PAGE_SHIFT;
1181 	unsigned long new_pgoff = vrm->vma->vm_pgoff + internal_pgoff;
1182 	unsigned long moved_len;
1183 	struct vm_area_struct *vma = vrm->vma;
1184 	struct vm_area_struct *new_vma;
1185 	int err = 0;
1186 	PAGETABLE_MOVE(pmc, NULL, NULL, vrm->addr, vrm->new_addr, vrm->old_len);
1187 
1188 	new_vma = copy_vma(&vma, vrm->new_addr, vrm->new_len, new_pgoff,
1189 			   &pmc.need_rmap_locks);
1190 	if (!new_vma) {
1191 		vrm_uncharge(vrm);
1192 		*new_vma_ptr = NULL;
1193 		return -ENOMEM;
1194 	}
1195 	/* By merging, we may have invalidated any iterator in use. */
1196 	if (vma != vrm->vma)
1197 		vrm->vmi_needs_invalidate = true;
1198 
1199 	vrm->vma = vma;
1200 	pmc.old = vma;
1201 	pmc.new = new_vma;
1202 
1203 	moved_len = move_page_tables(&pmc);
1204 	if (moved_len < vrm->old_len)
1205 		err = -ENOMEM;
1206 	else if (vma->vm_ops && vma->vm_ops->mremap)
1207 		err = vma->vm_ops->mremap(new_vma);
1208 
1209 	if (unlikely(err)) {
1210 		PAGETABLE_MOVE(pmc_revert, new_vma, vma, vrm->new_addr,
1211 			       vrm->addr, moved_len);
1212 
1213 		/*
1214 		 * On error, move entries back from new area to old,
1215 		 * which will succeed since page tables still there,
1216 		 * and then proceed to unmap new area instead of old.
1217 		 */
1218 		pmc_revert.need_rmap_locks = true;
1219 		move_page_tables(&pmc_revert);
1220 
1221 		vrm->vma = new_vma;
1222 		vrm->old_len = vrm->new_len;
1223 		vrm->addr = vrm->new_addr;
1224 	} else {
1225 		mremap_userfaultfd_prep(new_vma, vrm->uf);
1226 	}
1227 
1228 	fixup_hugetlb_reservations(vma);
1229 
1230 	*new_vma_ptr = new_vma;
1231 	return err;
1232 }
1233 
1234 /*
1235  * Perform final tasks for MADV_DONTUNMAP operation, clearing mlock() and
1236  * account flags on remaining VMA by convention (it cannot be mlock()'d any
1237  * longer, as pages in range are no longer mapped), and removing anon_vma_chain
1238  * links from it (if the entire VMA was copied over).
1239  */
1240 static void dontunmap_complete(struct vma_remap_struct *vrm,
1241 			       struct vm_area_struct *new_vma)
1242 {
1243 	unsigned long start = vrm->addr;
1244 	unsigned long end = vrm->addr + vrm->old_len;
1245 	unsigned long old_start = vrm->vma->vm_start;
1246 	unsigned long old_end = vrm->vma->vm_end;
1247 
1248 	/*
1249 	 * We always clear VM_LOCKED[ONFAULT] | VM_ACCOUNT on the old
1250 	 * vma.
1251 	 */
1252 	vm_flags_clear(vrm->vma, VM_LOCKED_MASK | VM_ACCOUNT);
1253 
1254 	/*
1255 	 * anon_vma links of the old vma is no longer needed after its page
1256 	 * table has been moved.
1257 	 */
1258 	if (new_vma != vrm->vma && start == old_start && end == old_end)
1259 		unlink_anon_vmas(vrm->vma);
1260 
1261 	/* Because we won't unmap we don't need to touch locked_vm. */
1262 }
1263 
1264 static unsigned long move_vma(struct vma_remap_struct *vrm)
1265 {
1266 	struct mm_struct *mm = current->mm;
1267 	struct vm_area_struct *new_vma;
1268 	unsigned long hiwater_vm;
1269 	int err;
1270 
1271 	err = prep_move_vma(vrm);
1272 	if (err)
1273 		return err;
1274 
1275 	/*
1276 	 * If accounted, determine the number of bytes the operation will
1277 	 * charge.
1278 	 */
1279 	if (!vrm_calc_charge(vrm))
1280 		return -ENOMEM;
1281 
1282 	/* We don't want racing faults. */
1283 	vma_start_write(vrm->vma);
1284 
1285 	/* Perform copy step. */
1286 	err = copy_vma_and_data(vrm, &new_vma);
1287 	/*
1288 	 * If we established the copied-to VMA, we attempt to recover from the
1289 	 * error by setting the destination VMA to the source VMA and unmapping
1290 	 * it below.
1291 	 */
1292 	if (err && !new_vma)
1293 		return err;
1294 
1295 	/*
1296 	 * If we failed to move page tables we still do total_vm increment
1297 	 * since do_munmap() will decrement it by old_len == new_len.
1298 	 *
1299 	 * Since total_vm is about to be raised artificially high for a
1300 	 * moment, we need to restore high watermark afterwards: if stats
1301 	 * are taken meanwhile, total_vm and hiwater_vm appear too high.
1302 	 * If this were a serious issue, we'd add a flag to do_munmap().
1303 	 */
1304 	hiwater_vm = mm->hiwater_vm;
1305 
1306 	vrm_stat_account(vrm, vrm->new_len);
1307 	if (unlikely(!err && (vrm->flags & MREMAP_DONTUNMAP)))
1308 		dontunmap_complete(vrm, new_vma);
1309 	else
1310 		unmap_source_vma(vrm);
1311 
1312 	mm->hiwater_vm = hiwater_vm;
1313 
1314 	return err ? (unsigned long)err : vrm->new_addr;
1315 }
1316 
1317 /*
1318  * The user has requested that the VMA be shrunk (i.e., old_len > new_len), so
1319  * execute this, optionally dropping the mmap lock when we do so.
1320  *
1321  * In both cases this invalidates the VMA, however if we don't drop the lock,
1322  * then load the correct VMA into vrm->vma afterwards.
1323  */
1324 static unsigned long shrink_vma(struct vma_remap_struct *vrm,
1325 				bool drop_lock)
1326 {
1327 	struct mm_struct *mm = current->mm;
1328 	unsigned long unmap_start = vrm->addr + vrm->new_len;
1329 	unsigned long unmap_bytes = vrm->delta;
1330 	unsigned long res;
1331 	VMA_ITERATOR(vmi, mm, unmap_start);
1332 
1333 	VM_BUG_ON(vrm->remap_type != MREMAP_SHRINK);
1334 
1335 	res = do_vmi_munmap(&vmi, mm, unmap_start, unmap_bytes,
1336 			    vrm->uf_unmap, drop_lock);
1337 	vrm->vma = NULL; /* Invalidated. */
1338 	if (res)
1339 		return res;
1340 
1341 	/*
1342 	 * If we've not dropped the lock, then we should reload the VMA to
1343 	 * replace the invalidated VMA with the one that may have now been
1344 	 * split.
1345 	 */
1346 	if (drop_lock) {
1347 		vrm->mmap_locked = false;
1348 	} else {
1349 		vrm->vma = vma_lookup(mm, vrm->addr);
1350 		if (!vrm->vma)
1351 			return -EFAULT;
1352 	}
1353 
1354 	return 0;
1355 }
1356 
1357 /*
1358  * mremap_to() - remap a vma to a new location.
1359  * Returns: The new address of the vma or an error.
1360  */
1361 static unsigned long mremap_to(struct vma_remap_struct *vrm)
1362 {
1363 	struct mm_struct *mm = current->mm;
1364 	unsigned long err;
1365 
1366 	if (vrm->flags & MREMAP_FIXED) {
1367 		/*
1368 		 * In mremap_to().
1369 		 * VMA is moved to dst address, and munmap dst first.
1370 		 * do_munmap will check if dst is sealed.
1371 		 */
1372 		err = do_munmap(mm, vrm->new_addr, vrm->new_len,
1373 				vrm->uf_unmap_early);
1374 		vrm->vma = NULL; /* Invalidated. */
1375 		vrm->vmi_needs_invalidate = true;
1376 		if (err)
1377 			return err;
1378 
1379 		/*
1380 		 * If we remap a portion of a VMA elsewhere in the same VMA,
1381 		 * this can invalidate the old VMA. Reset.
1382 		 */
1383 		vrm->vma = vma_lookup(mm, vrm->addr);
1384 		if (!vrm->vma)
1385 			return -EFAULT;
1386 	}
1387 
1388 	if (vrm->remap_type == MREMAP_SHRINK) {
1389 		err = shrink_vma(vrm, /* drop_lock= */false);
1390 		if (err)
1391 			return err;
1392 
1393 		/* Set up for the move now shrink has been executed. */
1394 		vrm->old_len = vrm->new_len;
1395 	}
1396 
1397 	/* MREMAP_DONTUNMAP expands by old_len since old_len == new_len */
1398 	if (vrm->flags & MREMAP_DONTUNMAP) {
1399 		vm_flags_t vm_flags = vrm->vma->vm_flags;
1400 		unsigned long pages = vrm->old_len >> PAGE_SHIFT;
1401 
1402 		if (!may_expand_vm(mm, vm_flags, pages))
1403 			return -ENOMEM;
1404 	}
1405 
1406 	err = vrm_set_new_addr(vrm);
1407 	if (err)
1408 		return err;
1409 
1410 	return move_vma(vrm);
1411 }
1412 
1413 static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
1414 {
1415 	unsigned long end = vma->vm_end + delta;
1416 
1417 	if (end < vma->vm_end) /* overflow */
1418 		return 0;
1419 	if (find_vma_intersection(vma->vm_mm, vma->vm_end, end))
1420 		return 0;
1421 	if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
1422 			      0, MAP_FIXED) & ~PAGE_MASK)
1423 		return 0;
1424 	return 1;
1425 }
1426 
1427 /* Determine whether we are actually able to execute an in-place expansion. */
1428 static bool vrm_can_expand_in_place(struct vma_remap_struct *vrm)
1429 {
1430 	/* Number of bytes from vrm->addr to end of VMA. */
1431 	unsigned long suffix_bytes = vrm->vma->vm_end - vrm->addr;
1432 
1433 	/* If end of range aligns to end of VMA, we can just expand in-place. */
1434 	if (suffix_bytes != vrm->old_len)
1435 		return false;
1436 
1437 	/* Check whether this is feasible. */
1438 	if (!vma_expandable(vrm->vma, vrm->delta))
1439 		return false;
1440 
1441 	return true;
1442 }
1443 
1444 /*
1445  * We know we can expand the VMA in-place by delta pages, so do so.
1446  *
1447  * If we discover the VMA is locked, update mm_struct statistics accordingly and
1448  * indicate so to the caller.
1449  */
1450 static unsigned long expand_vma_in_place(struct vma_remap_struct *vrm)
1451 {
1452 	struct mm_struct *mm = current->mm;
1453 	struct vm_area_struct *vma = vrm->vma;
1454 	VMA_ITERATOR(vmi, mm, vma->vm_end);
1455 
1456 	if (!vrm_calc_charge(vrm))
1457 		return -ENOMEM;
1458 
1459 	/*
1460 	 * Function vma_merge_extend() is called on the
1461 	 * extension we are adding to the already existing vma,
1462 	 * vma_merge_extend() will merge this extension with the
1463 	 * already existing vma (expand operation itself) and
1464 	 * possibly also with the next vma if it becomes
1465 	 * adjacent to the expanded vma and otherwise
1466 	 * compatible.
1467 	 */
1468 	vma = vma_merge_extend(&vmi, vma, vrm->delta);
1469 	if (!vma) {
1470 		vrm_uncharge(vrm);
1471 		return -ENOMEM;
1472 	}
1473 	vrm->vma = vma;
1474 
1475 	vrm_stat_account(vrm, vrm->delta);
1476 
1477 	return 0;
1478 }
1479 
1480 static bool align_hugetlb(struct vma_remap_struct *vrm)
1481 {
1482 	struct hstate *h __maybe_unused = hstate_vma(vrm->vma);
1483 
1484 	vrm->old_len = ALIGN(vrm->old_len, huge_page_size(h));
1485 	vrm->new_len = ALIGN(vrm->new_len, huge_page_size(h));
1486 
1487 	/* addrs must be huge page aligned */
1488 	if (vrm->addr & ~huge_page_mask(h))
1489 		return false;
1490 	if (vrm->new_addr & ~huge_page_mask(h))
1491 		return false;
1492 
1493 	/*
1494 	 * Don't allow remap expansion, because the underlying hugetlb
1495 	 * reservation is not yet capable to handle split reservation.
1496 	 */
1497 	if (vrm->new_len > vrm->old_len)
1498 		return false;
1499 
1500 	return true;
1501 }
1502 
1503 /*
1504  * We are mremap()'ing without specifying a fixed address to move to, but are
1505  * requesting that the VMA's size be increased.
1506  *
1507  * Try to do so in-place, if this fails, then move the VMA to a new location to
1508  * action the change.
1509  */
1510 static unsigned long expand_vma(struct vma_remap_struct *vrm)
1511 {
1512 	unsigned long err;
1513 
1514 	/*
1515 	 * [addr, old_len) spans precisely to the end of the VMA, so try to
1516 	 * expand it in-place.
1517 	 */
1518 	if (vrm_can_expand_in_place(vrm)) {
1519 		err = expand_vma_in_place(vrm);
1520 		if (err)
1521 			return err;
1522 
1523 		/* OK we're done! */
1524 		return vrm->addr;
1525 	}
1526 
1527 	/*
1528 	 * We weren't able to just expand or shrink the area,
1529 	 * we need to create a new one and move it.
1530 	 */
1531 
1532 	/* We're not allowed to move the VMA, so error out. */
1533 	if (!(vrm->flags & MREMAP_MAYMOVE))
1534 		return -ENOMEM;
1535 
1536 	/* Find a new location to move the VMA to. */
1537 	err = vrm_set_new_addr(vrm);
1538 	if (err)
1539 		return err;
1540 
1541 	return move_vma(vrm);
1542 }
1543 
1544 /*
1545  * Attempt to resize the VMA in-place, if we cannot, then move the VMA to the
1546  * first available address to perform the operation.
1547  */
1548 static unsigned long mremap_at(struct vma_remap_struct *vrm)
1549 {
1550 	unsigned long res;
1551 
1552 	switch (vrm->remap_type) {
1553 	case MREMAP_INVALID:
1554 		break;
1555 	case MREMAP_NO_RESIZE:
1556 		/* NO-OP CASE - resizing to the same size. */
1557 		return vrm->addr;
1558 	case MREMAP_SHRINK:
1559 		/*
1560 		 * SHRINK CASE. Can always be done in-place.
1561 		 *
1562 		 * Simply unmap the shrunken portion of the VMA. This does all
1563 		 * the needed commit accounting, and we indicate that the mmap
1564 		 * lock should be dropped.
1565 		 */
1566 		res = shrink_vma(vrm, /* drop_lock= */true);
1567 		if (res)
1568 			return res;
1569 
1570 		return vrm->addr;
1571 	case MREMAP_EXPAND:
1572 		return expand_vma(vrm);
1573 	}
1574 
1575 	/* Should not be possible. */
1576 	WARN_ON_ONCE(1);
1577 	return -EINVAL;
1578 }
1579 
1580 /*
1581  * Will this operation result in the VMA being expanded or moved and thus need
1582  * to map a new portion of virtual address space?
1583  */
1584 static bool vrm_will_map_new(struct vma_remap_struct *vrm)
1585 {
1586 	if (vrm->remap_type == MREMAP_EXPAND)
1587 		return true;
1588 
1589 	if (vrm_implies_new_addr(vrm))
1590 		return true;
1591 
1592 	return false;
1593 }
1594 
1595 /* Does this remap ONLY move mappings? */
1596 static bool vrm_move_only(struct vma_remap_struct *vrm)
1597 {
1598 	if (!(vrm->flags & MREMAP_FIXED))
1599 		return false;
1600 
1601 	if (vrm->old_len != vrm->new_len)
1602 		return false;
1603 
1604 	return true;
1605 }
1606 
1607 static void notify_uffd(struct vma_remap_struct *vrm, bool failed)
1608 {
1609 	struct mm_struct *mm = current->mm;
1610 
1611 	/* Regardless of success/failure, we always notify of any unmaps. */
1612 	userfaultfd_unmap_complete(mm, vrm->uf_unmap_early);
1613 	if (failed)
1614 		mremap_userfaultfd_fail(vrm->uf);
1615 	else
1616 		mremap_userfaultfd_complete(vrm->uf, vrm->addr,
1617 			vrm->new_addr, vrm->old_len);
1618 	userfaultfd_unmap_complete(mm, vrm->uf_unmap);
1619 }
1620 
1621 static bool vma_multi_allowed(struct vm_area_struct *vma)
1622 {
1623 	struct file *file;
1624 
1625 	/*
1626 	 * We can't support moving multiple uffd VMAs as notify requires
1627 	 * mmap lock to be dropped.
1628 	 */
1629 	if (userfaultfd_armed(vma))
1630 		return false;
1631 
1632 	/*
1633 	 * Custom get unmapped area might result in MREMAP_FIXED not
1634 	 * being obeyed.
1635 	 */
1636 	file = vma->vm_file;
1637 	if (file && !vma_is_shmem(vma) && !is_vm_hugetlb_page(vma)) {
1638 		const struct file_operations *fop = file->f_op;
1639 
1640 		if (fop->get_unmapped_area)
1641 			return false;
1642 	}
1643 
1644 	return true;
1645 }
1646 
1647 static int check_prep_vma(struct vma_remap_struct *vrm)
1648 {
1649 	struct vm_area_struct *vma = vrm->vma;
1650 	struct mm_struct *mm = current->mm;
1651 	unsigned long addr = vrm->addr;
1652 	unsigned long old_len, new_len, pgoff;
1653 
1654 	if (!vma)
1655 		return -EFAULT;
1656 
1657 	/* If mseal()'d, mremap() is prohibited. */
1658 	if (vma_is_sealed(vma))
1659 		return -EPERM;
1660 
1661 	/* Align to hugetlb page size, if required. */
1662 	if (is_vm_hugetlb_page(vma) && !align_hugetlb(vrm))
1663 		return -EINVAL;
1664 
1665 	vrm_set_delta(vrm);
1666 	vrm->remap_type = vrm_remap_type(vrm);
1667 	/* For convenience, we set new_addr even if VMA won't move. */
1668 	if (!vrm_implies_new_addr(vrm))
1669 		vrm->new_addr = addr;
1670 
1671 	/* Below only meaningful if we expand or move a VMA. */
1672 	if (!vrm_will_map_new(vrm))
1673 		return 0;
1674 
1675 	old_len = vrm->old_len;
1676 	new_len = vrm->new_len;
1677 
1678 	/*
1679 	 * !old_len is a special case where an attempt is made to 'duplicate'
1680 	 * a mapping.  This makes no sense for private mappings as it will
1681 	 * instead create a fresh/new mapping unrelated to the original.  This
1682 	 * is contrary to the basic idea of mremap which creates new mappings
1683 	 * based on the original.  There are no known use cases for this
1684 	 * behavior.  As a result, fail such attempts.
1685 	 */
1686 	if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) {
1687 		pr_warn_once("%s (%d): attempted to duplicate a private mapping with mremap.  This is not supported.\n",
1688 			     current->comm, current->pid);
1689 		return -EINVAL;
1690 	}
1691 
1692 	if ((vrm->flags & MREMAP_DONTUNMAP) &&
1693 			(vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)))
1694 		return -EINVAL;
1695 
1696 	/*
1697 	 * We permit crossing of boundaries for the range being unmapped due to
1698 	 * a shrink.
1699 	 */
1700 	if (vrm->remap_type == MREMAP_SHRINK)
1701 		old_len = new_len;
1702 
1703 	/*
1704 	 * We can't remap across the end of VMAs, as another VMA may be
1705 	 * adjacent:
1706 	 *
1707 	 *       addr   vma->vm_end
1708 	 *  |-----.----------|
1709 	 *  |     .          |
1710 	 *  |-----.----------|
1711 	 *        .<--------->xxx>
1712 	 *            old_len
1713 	 *
1714 	 * We also require that vma->vm_start <= addr < vma->vm_end.
1715 	 */
1716 	if (old_len > vma->vm_end - addr)
1717 		return -EFAULT;
1718 
1719 	if (new_len == old_len)
1720 		return 0;
1721 
1722 	/* We are expanding and the VMA is mlock()'d so we need to populate. */
1723 	if (vma->vm_flags & VM_LOCKED)
1724 		vrm->populate_expand = true;
1725 
1726 	/* Need to be careful about a growing mapping */
1727 	pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
1728 	pgoff += vma->vm_pgoff;
1729 	if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
1730 		return -EINVAL;
1731 
1732 	if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
1733 		return -EFAULT;
1734 
1735 	if (!mlock_future_ok(mm, vma->vm_flags, vrm->delta))
1736 		return -EAGAIN;
1737 
1738 	if (!may_expand_vm(mm, vma->vm_flags, vrm->delta >> PAGE_SHIFT))
1739 		return -ENOMEM;
1740 
1741 	return 0;
1742 }
1743 
1744 /*
1745  * Are the parameters passed to mremap() valid? If so return 0, otherwise return
1746  * error.
1747  */
1748 static unsigned long check_mremap_params(struct vma_remap_struct *vrm)
1749 
1750 {
1751 	unsigned long addr = vrm->addr;
1752 	unsigned long flags = vrm->flags;
1753 
1754 	/* Ensure no unexpected flag values. */
1755 	if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP))
1756 		return -EINVAL;
1757 
1758 	/* Start address must be page-aligned. */
1759 	if (offset_in_page(addr))
1760 		return -EINVAL;
1761 
1762 	/*
1763 	 * We allow a zero old-len as a special case
1764 	 * for DOS-emu "duplicate shm area" thing. But
1765 	 * a zero new-len is nonsensical.
1766 	 */
1767 	if (!vrm->new_len)
1768 		return -EINVAL;
1769 
1770 	/* Is the new length or address silly? */
1771 	if (vrm->new_len > TASK_SIZE ||
1772 	    vrm->new_addr > TASK_SIZE - vrm->new_len)
1773 		return -EINVAL;
1774 
1775 	/* Remainder of checks are for cases with specific new_addr. */
1776 	if (!vrm_implies_new_addr(vrm))
1777 		return 0;
1778 
1779 	/* The new address must be page-aligned. */
1780 	if (offset_in_page(vrm->new_addr))
1781 		return -EINVAL;
1782 
1783 	/* A fixed address implies a move. */
1784 	if (!(flags & MREMAP_MAYMOVE))
1785 		return -EINVAL;
1786 
1787 	/* MREMAP_DONTUNMAP does not allow resizing in the process. */
1788 	if (flags & MREMAP_DONTUNMAP && vrm->old_len != vrm->new_len)
1789 		return -EINVAL;
1790 
1791 	/* Target VMA must not overlap source VMA. */
1792 	if (vrm_overlaps(vrm))
1793 		return -EINVAL;
1794 
1795 	/*
1796 	 * move_vma() need us to stay 4 maps below the threshold, otherwise
1797 	 * it will bail out at the very beginning.
1798 	 * That is a problem if we have already unmaped the regions here
1799 	 * (new_addr, and old_addr), because userspace will not know the
1800 	 * state of the vma's after it gets -ENOMEM.
1801 	 * So, to avoid such scenario we can pre-compute if the whole
1802 	 * operation has high chances to success map-wise.
1803 	 * Worst-scenario case is when both vma's (new_addr and old_addr) get
1804 	 * split in 3 before unmapping it.
1805 	 * That means 2 more maps (1 for each) to the ones we already hold.
1806 	 * Check whether current map count plus 2 still leads us to 4 maps below
1807 	 * the threshold, otherwise return -ENOMEM here to be more safe.
1808 	 */
1809 	if ((current->mm->map_count + 2) >= sysctl_max_map_count - 3)
1810 		return -ENOMEM;
1811 
1812 	return 0;
1813 }
1814 
1815 static unsigned long remap_move(struct vma_remap_struct *vrm)
1816 {
1817 	struct vm_area_struct *vma;
1818 	unsigned long start = vrm->addr;
1819 	unsigned long end = vrm->addr + vrm->old_len;
1820 	unsigned long new_addr = vrm->new_addr;
1821 	bool allowed = true, seen_vma = false;
1822 	unsigned long target_addr = new_addr;
1823 	unsigned long res = -EFAULT;
1824 	unsigned long last_end;
1825 	VMA_ITERATOR(vmi, current->mm, start);
1826 
1827 	/*
1828 	 * When moving VMAs we allow for batched moves across multiple VMAs,
1829 	 * with all VMAs in the input range [addr, addr + old_len) being moved
1830 	 * (and split as necessary).
1831 	 */
1832 	for_each_vma_range(vmi, vma, end) {
1833 		/* Account for start, end not aligned with VMA start, end. */
1834 		unsigned long addr = max(vma->vm_start, start);
1835 		unsigned long len = min(end, vma->vm_end) - addr;
1836 		unsigned long offset, res_vma;
1837 
1838 		if (!allowed)
1839 			return -EFAULT;
1840 
1841 		/* No gap permitted at the start of the range. */
1842 		if (!seen_vma && start < vma->vm_start)
1843 			return -EFAULT;
1844 
1845 		/*
1846 		 * To sensibly move multiple VMAs, accounting for the fact that
1847 		 * get_unmapped_area() may align even MAP_FIXED moves, we simply
1848 		 * attempt to move such that the gaps between source VMAs remain
1849 		 * consistent in destination VMAs, e.g.:
1850 		 *
1851 		 *           X        Y                       X        Y
1852 		 *         <--->     <->                    <--->     <->
1853 		 * |-------|   |-----| |-----|      |-------|   |-----| |-----|
1854 		 * |   A   |   |  B  | |  C  | ---> |   A'  |   |  B' | |  C' |
1855 		 * |-------|   |-----| |-----|      |-------|   |-----| |-----|
1856 		 *                               new_addr
1857 		 *
1858 		 * So we map B' at A'->vm_end + X, and C' at B'->vm_end + Y.
1859 		 */
1860 		offset = seen_vma ? vma->vm_start - last_end : 0;
1861 		last_end = vma->vm_end;
1862 
1863 		vrm->vma = vma;
1864 		vrm->addr = addr;
1865 		vrm->new_addr = target_addr + offset;
1866 		vrm->old_len = vrm->new_len = len;
1867 
1868 		allowed = vma_multi_allowed(vma);
1869 		if (seen_vma && !allowed)
1870 			return -EFAULT;
1871 
1872 		res_vma = check_prep_vma(vrm);
1873 		if (!res_vma)
1874 			res_vma = mremap_to(vrm);
1875 		if (IS_ERR_VALUE(res_vma))
1876 			return res_vma;
1877 
1878 		if (!seen_vma) {
1879 			VM_WARN_ON_ONCE(allowed && res_vma != new_addr);
1880 			res = res_vma;
1881 		}
1882 
1883 		/* mmap lock is only dropped on shrink. */
1884 		VM_WARN_ON_ONCE(!vrm->mmap_locked);
1885 		/* This is a move, no expand should occur. */
1886 		VM_WARN_ON_ONCE(vrm->populate_expand);
1887 
1888 		if (vrm->vmi_needs_invalidate) {
1889 			vma_iter_invalidate(&vmi);
1890 			vrm->vmi_needs_invalidate = false;
1891 		}
1892 		seen_vma = true;
1893 		target_addr = res_vma + vrm->new_len;
1894 	}
1895 
1896 	return res;
1897 }
1898 
1899 static unsigned long do_mremap(struct vma_remap_struct *vrm)
1900 {
1901 	struct mm_struct *mm = current->mm;
1902 	unsigned long res;
1903 	bool failed;
1904 
1905 	vrm->old_len = PAGE_ALIGN(vrm->old_len);
1906 	vrm->new_len = PAGE_ALIGN(vrm->new_len);
1907 
1908 	res = check_mremap_params(vrm);
1909 	if (res)
1910 		return res;
1911 
1912 	if (mmap_write_lock_killable(mm))
1913 		return -EINTR;
1914 	vrm->mmap_locked = true;
1915 
1916 	if (vrm_move_only(vrm)) {
1917 		res = remap_move(vrm);
1918 	} else {
1919 		vrm->vma = vma_lookup(current->mm, vrm->addr);
1920 		res = check_prep_vma(vrm);
1921 		if (res)
1922 			goto out;
1923 
1924 		/* Actually execute mremap. */
1925 		res = vrm_implies_new_addr(vrm) ? mremap_to(vrm) : mremap_at(vrm);
1926 	}
1927 
1928 out:
1929 	failed = IS_ERR_VALUE(res);
1930 
1931 	if (vrm->mmap_locked)
1932 		mmap_write_unlock(mm);
1933 
1934 	/* VMA mlock'd + was expanded, so populated expanded region. */
1935 	if (!failed && vrm->populate_expand)
1936 		mm_populate(vrm->new_addr + vrm->old_len, vrm->delta);
1937 
1938 	notify_uffd(vrm, failed);
1939 	return res;
1940 }
1941 
1942 /*
1943  * Expand (or shrink) an existing mapping, potentially moving it at the
1944  * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
1945  *
1946  * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
1947  * This option implies MREMAP_MAYMOVE.
1948  */
1949 SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
1950 		unsigned long, new_len, unsigned long, flags,
1951 		unsigned long, new_addr)
1952 {
1953 	struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
1954 	LIST_HEAD(uf_unmap_early);
1955 	LIST_HEAD(uf_unmap);
1956 	/*
1957 	 * There is a deliberate asymmetry here: we strip the pointer tag
1958 	 * from the old address but leave the new address alone. This is
1959 	 * for consistency with mmap(), where we prevent the creation of
1960 	 * aliasing mappings in userspace by leaving the tag bits of the
1961 	 * mapping address intact. A non-zero tag will cause the subsequent
1962 	 * range checks to reject the address as invalid.
1963 	 *
1964 	 * See Documentation/arch/arm64/tagged-address-abi.rst for more
1965 	 * information.
1966 	 */
1967 	struct vma_remap_struct vrm = {
1968 		.addr = untagged_addr(addr),
1969 		.old_len = old_len,
1970 		.new_len = new_len,
1971 		.flags = flags,
1972 		.new_addr = new_addr,
1973 
1974 		.uf = &uf,
1975 		.uf_unmap_early = &uf_unmap_early,
1976 		.uf_unmap = &uf_unmap,
1977 
1978 		.remap_type = MREMAP_INVALID, /* We set later. */
1979 	};
1980 
1981 	return do_mremap(&vrm);
1982 }
1983