xref: /linux/mm/mremap.c (revision 10accd2e6890b57db8e717e9aee91b791f90fe14)
1 /*
2  *	mm/mremap.c
3  *
4  *	(C) Copyright 1996 Linus Torvalds
5  *
6  *	Address space accounting code	<alan@lxorguk.ukuu.org.uk>
7  *	(C) Copyright 2002 Red Hat Inc, All Rights Reserved
8  */
9 
10 #include <linux/mm.h>
11 #include <linux/hugetlb.h>
12 #include <linux/shm.h>
13 #include <linux/ksm.h>
14 #include <linux/mman.h>
15 #include <linux/swap.h>
16 #include <linux/capability.h>
17 #include <linux/fs.h>
18 #include <linux/swapops.h>
19 #include <linux/highmem.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/mmu_notifier.h>
23 #include <linux/uaccess.h>
24 #include <linux/mm-arch-hooks.h>
25 
26 #include <asm/cacheflush.h>
27 #include <asm/tlbflush.h>
28 
29 #include "internal.h"
30 
31 static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
32 {
33 	pgd_t *pgd;
34 	pud_t *pud;
35 	pmd_t *pmd;
36 
37 	pgd = pgd_offset(mm, addr);
38 	if (pgd_none_or_clear_bad(pgd))
39 		return NULL;
40 
41 	pud = pud_offset(pgd, addr);
42 	if (pud_none_or_clear_bad(pud))
43 		return NULL;
44 
45 	pmd = pmd_offset(pud, addr);
46 	if (pmd_none(*pmd))
47 		return NULL;
48 
49 	return pmd;
50 }
51 
52 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
53 			    unsigned long addr)
54 {
55 	pgd_t *pgd;
56 	pud_t *pud;
57 	pmd_t *pmd;
58 
59 	pgd = pgd_offset(mm, addr);
60 	pud = pud_alloc(mm, pgd, addr);
61 	if (!pud)
62 		return NULL;
63 
64 	pmd = pmd_alloc(mm, pud, addr);
65 	if (!pmd)
66 		return NULL;
67 
68 	VM_BUG_ON(pmd_trans_huge(*pmd));
69 
70 	return pmd;
71 }
72 
73 static void take_rmap_locks(struct vm_area_struct *vma)
74 {
75 	if (vma->vm_file)
76 		i_mmap_lock_write(vma->vm_file->f_mapping);
77 	if (vma->anon_vma)
78 		anon_vma_lock_write(vma->anon_vma);
79 }
80 
81 static void drop_rmap_locks(struct vm_area_struct *vma)
82 {
83 	if (vma->anon_vma)
84 		anon_vma_unlock_write(vma->anon_vma);
85 	if (vma->vm_file)
86 		i_mmap_unlock_write(vma->vm_file->f_mapping);
87 }
88 
89 static pte_t move_soft_dirty_pte(pte_t pte)
90 {
91 	/*
92 	 * Set soft dirty bit so we can notice
93 	 * in userspace the ptes were moved.
94 	 */
95 #ifdef CONFIG_MEM_SOFT_DIRTY
96 	if (pte_present(pte))
97 		pte = pte_mksoft_dirty(pte);
98 	else if (is_swap_pte(pte))
99 		pte = pte_swp_mksoft_dirty(pte);
100 #endif
101 	return pte;
102 }
103 
104 static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
105 		unsigned long old_addr, unsigned long old_end,
106 		struct vm_area_struct *new_vma, pmd_t *new_pmd,
107 		unsigned long new_addr, bool need_rmap_locks)
108 {
109 	struct mm_struct *mm = vma->vm_mm;
110 	pte_t *old_pte, *new_pte, pte;
111 	spinlock_t *old_ptl, *new_ptl;
112 
113 	/*
114 	 * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma
115 	 * locks to ensure that rmap will always observe either the old or the
116 	 * new ptes. This is the easiest way to avoid races with
117 	 * truncate_pagecache(), page migration, etc...
118 	 *
119 	 * When need_rmap_locks is false, we use other ways to avoid
120 	 * such races:
121 	 *
122 	 * - During exec() shift_arg_pages(), we use a specially tagged vma
123 	 *   which rmap call sites look for using is_vma_temporary_stack().
124 	 *
125 	 * - During mremap(), new_vma is often known to be placed after vma
126 	 *   in rmap traversal order. This ensures rmap will always observe
127 	 *   either the old pte, or the new pte, or both (the page table locks
128 	 *   serialize access to individual ptes, but only rmap traversal
129 	 *   order guarantees that we won't miss both the old and new ptes).
130 	 */
131 	if (need_rmap_locks)
132 		take_rmap_locks(vma);
133 
134 	/*
135 	 * We don't have to worry about the ordering of src and dst
136 	 * pte locks because exclusive mmap_sem prevents deadlock.
137 	 */
138 	old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
139 	new_pte = pte_offset_map(new_pmd, new_addr);
140 	new_ptl = pte_lockptr(mm, new_pmd);
141 	if (new_ptl != old_ptl)
142 		spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
143 	arch_enter_lazy_mmu_mode();
144 
145 	for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
146 				   new_pte++, new_addr += PAGE_SIZE) {
147 		if (pte_none(*old_pte))
148 			continue;
149 		pte = ptep_get_and_clear(mm, old_addr, old_pte);
150 		pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
151 		pte = move_soft_dirty_pte(pte);
152 		set_pte_at(mm, new_addr, new_pte, pte);
153 	}
154 
155 	arch_leave_lazy_mmu_mode();
156 	if (new_ptl != old_ptl)
157 		spin_unlock(new_ptl);
158 	pte_unmap(new_pte - 1);
159 	pte_unmap_unlock(old_pte - 1, old_ptl);
160 	if (need_rmap_locks)
161 		drop_rmap_locks(vma);
162 }
163 
164 #define LATENCY_LIMIT	(64 * PAGE_SIZE)
165 
166 unsigned long move_page_tables(struct vm_area_struct *vma,
167 		unsigned long old_addr, struct vm_area_struct *new_vma,
168 		unsigned long new_addr, unsigned long len,
169 		bool need_rmap_locks)
170 {
171 	unsigned long extent, next, old_end;
172 	pmd_t *old_pmd, *new_pmd;
173 	bool need_flush = false;
174 	unsigned long mmun_start;	/* For mmu_notifiers */
175 	unsigned long mmun_end;		/* For mmu_notifiers */
176 
177 	old_end = old_addr + len;
178 	flush_cache_range(vma, old_addr, old_end);
179 
180 	mmun_start = old_addr;
181 	mmun_end   = old_end;
182 	mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end);
183 
184 	for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
185 		cond_resched();
186 		next = (old_addr + PMD_SIZE) & PMD_MASK;
187 		/* even if next overflowed, extent below will be ok */
188 		extent = next - old_addr;
189 		if (extent > old_end - old_addr)
190 			extent = old_end - old_addr;
191 		old_pmd = get_old_pmd(vma->vm_mm, old_addr);
192 		if (!old_pmd)
193 			continue;
194 		new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
195 		if (!new_pmd)
196 			break;
197 		if (pmd_trans_huge(*old_pmd)) {
198 			if (extent == HPAGE_PMD_SIZE) {
199 				bool moved;
200 				/* See comment in move_ptes() */
201 				if (need_rmap_locks)
202 					take_rmap_locks(vma);
203 				moved = move_huge_pmd(vma, old_addr, new_addr,
204 						    old_end, old_pmd, new_pmd);
205 				if (need_rmap_locks)
206 					drop_rmap_locks(vma);
207 				if (moved) {
208 					need_flush = true;
209 					continue;
210 				}
211 			}
212 			split_huge_pmd(vma, old_pmd, old_addr);
213 			if (pmd_trans_unstable(old_pmd))
214 				continue;
215 		}
216 		if (pte_alloc(new_vma->vm_mm, new_pmd, new_addr))
217 			break;
218 		next = (new_addr + PMD_SIZE) & PMD_MASK;
219 		if (extent > next - new_addr)
220 			extent = next - new_addr;
221 		if (extent > LATENCY_LIMIT)
222 			extent = LATENCY_LIMIT;
223 		move_ptes(vma, old_pmd, old_addr, old_addr + extent,
224 			  new_vma, new_pmd, new_addr, need_rmap_locks);
225 		need_flush = true;
226 	}
227 	if (likely(need_flush))
228 		flush_tlb_range(vma, old_end-len, old_addr);
229 
230 	mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
231 
232 	return len + old_addr - old_end;	/* how much done */
233 }
234 
235 static unsigned long move_vma(struct vm_area_struct *vma,
236 		unsigned long old_addr, unsigned long old_len,
237 		unsigned long new_len, unsigned long new_addr, bool *locked)
238 {
239 	struct mm_struct *mm = vma->vm_mm;
240 	struct vm_area_struct *new_vma;
241 	unsigned long vm_flags = vma->vm_flags;
242 	unsigned long new_pgoff;
243 	unsigned long moved_len;
244 	unsigned long excess = 0;
245 	unsigned long hiwater_vm;
246 	int split = 0;
247 	int err;
248 	bool need_rmap_locks;
249 
250 	/*
251 	 * We'd prefer to avoid failure later on in do_munmap:
252 	 * which may split one vma into three before unmapping.
253 	 */
254 	if (mm->map_count >= sysctl_max_map_count - 3)
255 		return -ENOMEM;
256 
257 	/*
258 	 * Advise KSM to break any KSM pages in the area to be moved:
259 	 * it would be confusing if they were to turn up at the new
260 	 * location, where they happen to coincide with different KSM
261 	 * pages recently unmapped.  But leave vma->vm_flags as it was,
262 	 * so KSM can come around to merge on vma and new_vma afterwards.
263 	 */
264 	err = ksm_madvise(vma, old_addr, old_addr + old_len,
265 						MADV_UNMERGEABLE, &vm_flags);
266 	if (err)
267 		return err;
268 
269 	new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
270 	new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
271 			   &need_rmap_locks);
272 	if (!new_vma)
273 		return -ENOMEM;
274 
275 	moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
276 				     need_rmap_locks);
277 	if (moved_len < old_len) {
278 		err = -ENOMEM;
279 	} else if (vma->vm_ops && vma->vm_ops->mremap) {
280 		err = vma->vm_ops->mremap(new_vma);
281 	}
282 
283 	if (unlikely(err)) {
284 		/*
285 		 * On error, move entries back from new area to old,
286 		 * which will succeed since page tables still there,
287 		 * and then proceed to unmap new area instead of old.
288 		 */
289 		move_page_tables(new_vma, new_addr, vma, old_addr, moved_len,
290 				 true);
291 		vma = new_vma;
292 		old_len = new_len;
293 		old_addr = new_addr;
294 		new_addr = err;
295 	} else {
296 		arch_remap(mm, old_addr, old_addr + old_len,
297 			   new_addr, new_addr + new_len);
298 	}
299 
300 	/* Conceal VM_ACCOUNT so old reservation is not undone */
301 	if (vm_flags & VM_ACCOUNT) {
302 		vma->vm_flags &= ~VM_ACCOUNT;
303 		excess = vma->vm_end - vma->vm_start - old_len;
304 		if (old_addr > vma->vm_start &&
305 		    old_addr + old_len < vma->vm_end)
306 			split = 1;
307 	}
308 
309 	/*
310 	 * If we failed to move page tables we still do total_vm increment
311 	 * since do_munmap() will decrement it by old_len == new_len.
312 	 *
313 	 * Since total_vm is about to be raised artificially high for a
314 	 * moment, we need to restore high watermark afterwards: if stats
315 	 * are taken meanwhile, total_vm and hiwater_vm appear too high.
316 	 * If this were a serious issue, we'd add a flag to do_munmap().
317 	 */
318 	hiwater_vm = mm->hiwater_vm;
319 	vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT);
320 
321 	/* Tell pfnmap has moved from this vma */
322 	if (unlikely(vma->vm_flags & VM_PFNMAP))
323 		untrack_pfn_moved(vma);
324 
325 	if (do_munmap(mm, old_addr, old_len) < 0) {
326 		/* OOM: unable to split vma, just get accounts right */
327 		vm_unacct_memory(excess >> PAGE_SHIFT);
328 		excess = 0;
329 	}
330 	mm->hiwater_vm = hiwater_vm;
331 
332 	/* Restore VM_ACCOUNT if one or two pieces of vma left */
333 	if (excess) {
334 		vma->vm_flags |= VM_ACCOUNT;
335 		if (split)
336 			vma->vm_next->vm_flags |= VM_ACCOUNT;
337 	}
338 
339 	if (vm_flags & VM_LOCKED) {
340 		mm->locked_vm += new_len >> PAGE_SHIFT;
341 		*locked = true;
342 	}
343 
344 	return new_addr;
345 }
346 
347 static struct vm_area_struct *vma_to_resize(unsigned long addr,
348 	unsigned long old_len, unsigned long new_len, unsigned long *p)
349 {
350 	struct mm_struct *mm = current->mm;
351 	struct vm_area_struct *vma = find_vma(mm, addr);
352 	unsigned long pgoff;
353 
354 	if (!vma || vma->vm_start > addr)
355 		return ERR_PTR(-EFAULT);
356 
357 	if (is_vm_hugetlb_page(vma))
358 		return ERR_PTR(-EINVAL);
359 
360 	/* We can't remap across vm area boundaries */
361 	if (old_len > vma->vm_end - addr)
362 		return ERR_PTR(-EFAULT);
363 
364 	if (new_len == old_len)
365 		return vma;
366 
367 	/* Need to be careful about a growing mapping */
368 	pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
369 	pgoff += vma->vm_pgoff;
370 	if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
371 		return ERR_PTR(-EINVAL);
372 
373 	if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
374 		return ERR_PTR(-EFAULT);
375 
376 	if (vma->vm_flags & VM_LOCKED) {
377 		unsigned long locked, lock_limit;
378 		locked = mm->locked_vm << PAGE_SHIFT;
379 		lock_limit = rlimit(RLIMIT_MEMLOCK);
380 		locked += new_len - old_len;
381 		if (locked > lock_limit && !capable(CAP_IPC_LOCK))
382 			return ERR_PTR(-EAGAIN);
383 	}
384 
385 	if (!may_expand_vm(mm, vma->vm_flags,
386 				(new_len - old_len) >> PAGE_SHIFT))
387 		return ERR_PTR(-ENOMEM);
388 
389 	if (vma->vm_flags & VM_ACCOUNT) {
390 		unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
391 		if (security_vm_enough_memory_mm(mm, charged))
392 			return ERR_PTR(-ENOMEM);
393 		*p = charged;
394 	}
395 
396 	return vma;
397 }
398 
399 static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
400 		unsigned long new_addr, unsigned long new_len, bool *locked)
401 {
402 	struct mm_struct *mm = current->mm;
403 	struct vm_area_struct *vma;
404 	unsigned long ret = -EINVAL;
405 	unsigned long charged = 0;
406 	unsigned long map_flags;
407 
408 	if (offset_in_page(new_addr))
409 		goto out;
410 
411 	if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
412 		goto out;
413 
414 	/* Ensure the old/new locations do not overlap */
415 	if (addr + old_len > new_addr && new_addr + new_len > addr)
416 		goto out;
417 
418 	ret = do_munmap(mm, new_addr, new_len);
419 	if (ret)
420 		goto out;
421 
422 	if (old_len >= new_len) {
423 		ret = do_munmap(mm, addr+new_len, old_len - new_len);
424 		if (ret && old_len != new_len)
425 			goto out;
426 		old_len = new_len;
427 	}
428 
429 	vma = vma_to_resize(addr, old_len, new_len, &charged);
430 	if (IS_ERR(vma)) {
431 		ret = PTR_ERR(vma);
432 		goto out;
433 	}
434 
435 	map_flags = MAP_FIXED;
436 	if (vma->vm_flags & VM_MAYSHARE)
437 		map_flags |= MAP_SHARED;
438 
439 	ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
440 				((addr - vma->vm_start) >> PAGE_SHIFT),
441 				map_flags);
442 	if (offset_in_page(ret))
443 		goto out1;
444 
445 	ret = move_vma(vma, addr, old_len, new_len, new_addr, locked);
446 	if (!(offset_in_page(ret)))
447 		goto out;
448 out1:
449 	vm_unacct_memory(charged);
450 
451 out:
452 	return ret;
453 }
454 
455 static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
456 {
457 	unsigned long end = vma->vm_end + delta;
458 	if (end < vma->vm_end) /* overflow */
459 		return 0;
460 	if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */
461 		return 0;
462 	if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
463 			      0, MAP_FIXED) & ~PAGE_MASK)
464 		return 0;
465 	return 1;
466 }
467 
468 /*
469  * Expand (or shrink) an existing mapping, potentially moving it at the
470  * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
471  *
472  * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
473  * This option implies MREMAP_MAYMOVE.
474  */
475 SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
476 		unsigned long, new_len, unsigned long, flags,
477 		unsigned long, new_addr)
478 {
479 	struct mm_struct *mm = current->mm;
480 	struct vm_area_struct *vma;
481 	unsigned long ret = -EINVAL;
482 	unsigned long charged = 0;
483 	bool locked = false;
484 
485 	if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
486 		return ret;
487 
488 	if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
489 		return ret;
490 
491 	if (offset_in_page(addr))
492 		return ret;
493 
494 	old_len = PAGE_ALIGN(old_len);
495 	new_len = PAGE_ALIGN(new_len);
496 
497 	/*
498 	 * We allow a zero old-len as a special case
499 	 * for DOS-emu "duplicate shm area" thing. But
500 	 * a zero new-len is nonsensical.
501 	 */
502 	if (!new_len)
503 		return ret;
504 
505 	if (down_write_killable(&current->mm->mmap_sem))
506 		return -EINTR;
507 
508 	if (flags & MREMAP_FIXED) {
509 		ret = mremap_to(addr, old_len, new_addr, new_len,
510 				&locked);
511 		goto out;
512 	}
513 
514 	/*
515 	 * Always allow a shrinking remap: that just unmaps
516 	 * the unnecessary pages..
517 	 * do_munmap does all the needed commit accounting
518 	 */
519 	if (old_len >= new_len) {
520 		ret = do_munmap(mm, addr+new_len, old_len - new_len);
521 		if (ret && old_len != new_len)
522 			goto out;
523 		ret = addr;
524 		goto out;
525 	}
526 
527 	/*
528 	 * Ok, we need to grow..
529 	 */
530 	vma = vma_to_resize(addr, old_len, new_len, &charged);
531 	if (IS_ERR(vma)) {
532 		ret = PTR_ERR(vma);
533 		goto out;
534 	}
535 
536 	/* old_len exactly to the end of the area..
537 	 */
538 	if (old_len == vma->vm_end - addr) {
539 		/* can we just expand the current mapping? */
540 		if (vma_expandable(vma, new_len - old_len)) {
541 			int pages = (new_len - old_len) >> PAGE_SHIFT;
542 
543 			if (vma_adjust(vma, vma->vm_start, addr + new_len,
544 				       vma->vm_pgoff, NULL)) {
545 				ret = -ENOMEM;
546 				goto out;
547 			}
548 
549 			vm_stat_account(mm, vma->vm_flags, pages);
550 			if (vma->vm_flags & VM_LOCKED) {
551 				mm->locked_vm += pages;
552 				locked = true;
553 				new_addr = addr;
554 			}
555 			ret = addr;
556 			goto out;
557 		}
558 	}
559 
560 	/*
561 	 * We weren't able to just expand or shrink the area,
562 	 * we need to create a new one and move it..
563 	 */
564 	ret = -ENOMEM;
565 	if (flags & MREMAP_MAYMOVE) {
566 		unsigned long map_flags = 0;
567 		if (vma->vm_flags & VM_MAYSHARE)
568 			map_flags |= MAP_SHARED;
569 
570 		new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
571 					vma->vm_pgoff +
572 					((addr - vma->vm_start) >> PAGE_SHIFT),
573 					map_flags);
574 		if (offset_in_page(new_addr)) {
575 			ret = new_addr;
576 			goto out;
577 		}
578 
579 		ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
580 	}
581 out:
582 	if (offset_in_page(ret)) {
583 		vm_unacct_memory(charged);
584 		locked = 0;
585 	}
586 	up_write(&current->mm->mmap_sem);
587 	if (locked && new_len > old_len)
588 		mm_populate(new_addr + old_len, new_len - old_len);
589 	return ret;
590 }
591