1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * mm/mmap.c
4 *
5 * Written by obz.
6 *
7 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
8 */
9
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/backing-dev.h>
15 #include <linux/mm.h>
16 #include <linux/mm_inline.h>
17 #include <linux/shm.h>
18 #include <linux/mman.h>
19 #include <linux/pagemap.h>
20 #include <linux/swap.h>
21 #include <linux/syscalls.h>
22 #include <linux/capability.h>
23 #include <linux/init.h>
24 #include <linux/file.h>
25 #include <linux/fs.h>
26 #include <linux/personality.h>
27 #include <linux/security.h>
28 #include <linux/hugetlb.h>
29 #include <linux/shmem_fs.h>
30 #include <linux/profile.h>
31 #include <linux/export.h>
32 #include <linux/mount.h>
33 #include <linux/mempolicy.h>
34 #include <linux/rmap.h>
35 #include <linux/mmu_notifier.h>
36 #include <linux/mmdebug.h>
37 #include <linux/perf_event.h>
38 #include <linux/audit.h>
39 #include <linux/khugepaged.h>
40 #include <linux/uprobes.h>
41 #include <linux/notifier.h>
42 #include <linux/memory.h>
43 #include <linux/printk.h>
44 #include <linux/userfaultfd_k.h>
45 #include <linux/moduleparam.h>
46 #include <linux/pkeys.h>
47 #include <linux/oom.h>
48 #include <linux/sched/mm.h>
49 #include <linux/ksm.h>
50 #include <linux/memfd.h>
51
52 #include <linux/uaccess.h>
53 #include <asm/cacheflush.h>
54 #include <asm/tlb.h>
55 #include <asm/mmu_context.h>
56
57 #define CREATE_TRACE_POINTS
58 #include <trace/events/mmap.h>
59
60 #include "internal.h"
61
62 #ifndef arch_mmap_check
63 #define arch_mmap_check(addr, len, flags) (0)
64 #endif
65
66 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
67 const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN;
68 int mmap_rnd_bits_max __ro_after_init = CONFIG_ARCH_MMAP_RND_BITS_MAX;
69 int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS;
70 #endif
71 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
72 const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN;
73 const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX;
74 int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS;
75 #endif
76
77 static bool ignore_rlimit_data;
78 core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644);
79
80 /* Update vma->vm_page_prot to reflect vma->vm_flags. */
vma_set_page_prot(struct vm_area_struct * vma)81 void vma_set_page_prot(struct vm_area_struct *vma)
82 {
83 vm_flags_t vm_flags = vma->vm_flags;
84 pgprot_t vm_page_prot;
85
86 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags);
87 if (vma_wants_writenotify(vma, vm_page_prot)) {
88 vm_flags &= ~VM_SHARED;
89 vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags);
90 }
91 /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */
92 WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
93 }
94
95 /*
96 * check_brk_limits() - Use platform specific check of range & verify mlock
97 * limits.
98 * @addr: The address to check
99 * @len: The size of increase.
100 *
101 * Return: 0 on success.
102 */
check_brk_limits(unsigned long addr,unsigned long len)103 static int check_brk_limits(unsigned long addr, unsigned long len)
104 {
105 unsigned long mapped_addr;
106
107 mapped_addr = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
108 if (IS_ERR_VALUE(mapped_addr))
109 return mapped_addr;
110
111 return mlock_future_ok(current->mm,
112 current->mm->def_flags & VM_LOCKED, len)
113 ? 0 : -EAGAIN;
114 }
115
SYSCALL_DEFINE1(brk,unsigned long,brk)116 SYSCALL_DEFINE1(brk, unsigned long, brk)
117 {
118 unsigned long newbrk, oldbrk, origbrk;
119 struct mm_struct *mm = current->mm;
120 struct vm_area_struct *brkvma, *next = NULL;
121 unsigned long min_brk;
122 bool populate = false;
123 LIST_HEAD(uf);
124 struct vma_iterator vmi;
125
126 if (mmap_write_lock_killable(mm))
127 return -EINTR;
128
129 origbrk = mm->brk;
130
131 min_brk = mm->start_brk;
132 #ifdef CONFIG_COMPAT_BRK
133 /*
134 * CONFIG_COMPAT_BRK can still be overridden by setting
135 * randomize_va_space to 2, which will still cause mm->start_brk
136 * to be arbitrarily shifted
137 */
138 if (!current->brk_randomized)
139 min_brk = mm->end_data;
140 #endif
141 if (brk < min_brk)
142 goto out;
143
144 /*
145 * Check against rlimit here. If this check is done later after the test
146 * of oldbrk with newbrk then it can escape the test and let the data
147 * segment grow beyond its set limit the in case where the limit is
148 * not page aligned -Ram Gupta
149 */
150 if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
151 mm->end_data, mm->start_data))
152 goto out;
153
154 newbrk = PAGE_ALIGN(brk);
155 oldbrk = PAGE_ALIGN(mm->brk);
156 if (oldbrk == newbrk) {
157 mm->brk = brk;
158 goto success;
159 }
160
161 /* Always allow shrinking brk. */
162 if (brk <= mm->brk) {
163 /* Search one past newbrk */
164 vma_iter_init(&vmi, mm, newbrk);
165 brkvma = vma_find(&vmi, oldbrk);
166 if (!brkvma || brkvma->vm_start >= oldbrk)
167 goto out; /* mapping intersects with an existing non-brk vma. */
168 /*
169 * mm->brk must be protected by write mmap_lock.
170 * do_vmi_align_munmap() will drop the lock on success, so
171 * update it before calling do_vma_munmap().
172 */
173 mm->brk = brk;
174 if (do_vmi_align_munmap(&vmi, brkvma, mm, newbrk, oldbrk, &uf,
175 /* unlock = */ true))
176 goto out;
177
178 goto success_unlocked;
179 }
180
181 if (check_brk_limits(oldbrk, newbrk - oldbrk))
182 goto out;
183
184 /*
185 * Only check if the next VMA is within the stack_guard_gap of the
186 * expansion area
187 */
188 vma_iter_init(&vmi, mm, oldbrk);
189 next = vma_find(&vmi, newbrk + PAGE_SIZE + stack_guard_gap);
190 if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
191 goto out;
192
193 brkvma = vma_prev_limit(&vmi, mm->start_brk);
194 /* Ok, looks good - let it rip. */
195 if (do_brk_flags(&vmi, brkvma, oldbrk, newbrk - oldbrk, 0) < 0)
196 goto out;
197
198 mm->brk = brk;
199 if (mm->def_flags & VM_LOCKED)
200 populate = true;
201
202 success:
203 mmap_write_unlock(mm);
204 success_unlocked:
205 userfaultfd_unmap_complete(mm, &uf);
206 if (populate)
207 mm_populate(oldbrk, newbrk - oldbrk);
208 return brk;
209
210 out:
211 mm->brk = origbrk;
212 mmap_write_unlock(mm);
213 return origbrk;
214 }
215
216 /*
217 * If a hint addr is less than mmap_min_addr change hint to be as
218 * low as possible but still greater than mmap_min_addr
219 */
round_hint_to_min(unsigned long hint)220 static inline unsigned long round_hint_to_min(unsigned long hint)
221 {
222 hint &= PAGE_MASK;
223 if (((void *)hint != NULL) &&
224 (hint < mmap_min_addr))
225 return PAGE_ALIGN(mmap_min_addr);
226 return hint;
227 }
228
mlock_future_ok(const struct mm_struct * mm,bool is_vma_locked,unsigned long bytes)229 bool mlock_future_ok(const struct mm_struct *mm, bool is_vma_locked,
230 unsigned long bytes)
231 {
232 unsigned long locked_pages, limit_pages;
233
234 if (!is_vma_locked || capable(CAP_IPC_LOCK))
235 return true;
236
237 locked_pages = bytes >> PAGE_SHIFT;
238 locked_pages += mm->locked_vm;
239
240 limit_pages = rlimit(RLIMIT_MEMLOCK);
241 limit_pages >>= PAGE_SHIFT;
242
243 return locked_pages <= limit_pages;
244 }
245
file_mmap_size_max(struct file * file,struct inode * inode)246 static inline u64 file_mmap_size_max(struct file *file, struct inode *inode)
247 {
248 if (S_ISREG(inode->i_mode))
249 return MAX_LFS_FILESIZE;
250
251 if (S_ISBLK(inode->i_mode))
252 return MAX_LFS_FILESIZE;
253
254 if (S_ISSOCK(inode->i_mode))
255 return MAX_LFS_FILESIZE;
256
257 /* Special "we do even unsigned file positions" case */
258 if (file->f_op->fop_flags & FOP_UNSIGNED_OFFSET)
259 return 0;
260
261 /* Yes, random drivers might want more. But I'm tired of buggy drivers */
262 return ULONG_MAX;
263 }
264
file_mmap_ok(struct file * file,struct inode * inode,unsigned long pgoff,unsigned long len)265 static inline bool file_mmap_ok(struct file *file, struct inode *inode,
266 unsigned long pgoff, unsigned long len)
267 {
268 u64 maxsize = file_mmap_size_max(file, inode);
269
270 if (maxsize && len > maxsize)
271 return false;
272 maxsize -= len;
273 if (pgoff > maxsize >> PAGE_SHIFT)
274 return false;
275 return true;
276 }
277
278 /**
279 * do_mmap() - Perform a userland memory mapping into the current process
280 * address space of length @len with protection bits @prot, mmap flags @flags
281 * (from which VMA flags will be inferred), and any additional VMA flags to
282 * apply @vm_flags. If this is a file-backed mapping then the file is specified
283 * in @file and page offset into the file via @pgoff.
284 *
285 * This function does not perform security checks on the file and assumes, if
286 * @uf is non-NULL, the caller has provided a list head to track unmap events
287 * for userfaultfd @uf.
288 *
289 * It also simply indicates whether memory population is required by setting
290 * @populate, which must be non-NULL, expecting the caller to actually perform
291 * this task itself if appropriate.
292 *
293 * This function will invoke architecture-specific (and if provided and
294 * relevant, file system-specific) logic to determine the most appropriate
295 * unmapped area in which to place the mapping if not MAP_FIXED.
296 *
297 * Callers which require userland mmap() behaviour should invoke vm_mmap(),
298 * which is also exported for module use.
299 *
300 * Those which require this behaviour less security checks, userfaultfd and
301 * populate behaviour, and who handle the mmap write lock themselves, should
302 * call this function.
303 *
304 * Note that the returned address may reside within a merged VMA if an
305 * appropriate merge were to take place, so it doesn't necessarily specify the
306 * start of a VMA, rather only the start of a valid mapped range of length
307 * @len bytes, rounded down to the nearest page size.
308 *
309 * The caller must write-lock current->mm->mmap_lock.
310 *
311 * @file: An optional struct file pointer describing the file which is to be
312 * mapped, if a file-backed mapping.
313 * @addr: If non-zero, hints at (or if @flags has MAP_FIXED set, specifies) the
314 * address at which to perform this mapping. See mmap (2) for details. Must be
315 * page-aligned.
316 * @len: The length of the mapping. Will be page-aligned and must be at least 1
317 * page in size.
318 * @prot: Protection bits describing access required to the mapping. See mmap
319 * (2) for details.
320 * @flags: Flags specifying how the mapping should be performed, see mmap (2)
321 * for details.
322 * @vm_flags: VMA flags which should be set by default, or 0 otherwise.
323 * @pgoff: Page offset into the @file if file-backed, should be 0 otherwise.
324 * @populate: A pointer to a value which will be set to 0 if no population of
325 * the range is required, or the number of bytes to populate if it is. Must be
326 * non-NULL. See mmap (2) for details as to under what circumstances population
327 * of the range occurs.
328 * @uf: An optional pointer to a list head to track userfaultfd unmap events
329 * should unmapping events arise. If provided, it is up to the caller to manage
330 * this.
331 *
332 * Returns: Either an error, or the address at which the requested mapping has
333 * been performed.
334 */
do_mmap(struct file * file,unsigned long addr,unsigned long len,unsigned long prot,unsigned long flags,vm_flags_t vm_flags,unsigned long pgoff,unsigned long * populate,struct list_head * uf)335 unsigned long do_mmap(struct file *file, unsigned long addr,
336 unsigned long len, unsigned long prot,
337 unsigned long flags, vm_flags_t vm_flags,
338 unsigned long pgoff, unsigned long *populate,
339 struct list_head *uf)
340 {
341 struct mm_struct *mm = current->mm;
342 int pkey = 0;
343
344 *populate = 0;
345
346 mmap_assert_write_locked(mm);
347
348 if (!len)
349 return -EINVAL;
350
351 /*
352 * Does the application expect PROT_READ to imply PROT_EXEC?
353 *
354 * (the exception is when the underlying filesystem is noexec
355 * mounted, in which case we don't add PROT_EXEC.)
356 */
357 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
358 if (!(file && path_noexec(&file->f_path)))
359 prot |= PROT_EXEC;
360
361 /* force arch specific MAP_FIXED handling in get_unmapped_area */
362 if (flags & MAP_FIXED_NOREPLACE)
363 flags |= MAP_FIXED;
364
365 if (!(flags & MAP_FIXED))
366 addr = round_hint_to_min(addr);
367
368 /* Careful about overflows.. */
369 len = PAGE_ALIGN(len);
370 if (!len)
371 return -ENOMEM;
372
373 /* offset overflow? */
374 if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
375 return -EOVERFLOW;
376
377 /* Too many mappings? */
378 if (mm->map_count > sysctl_max_map_count)
379 return -ENOMEM;
380
381 /*
382 * addr is returned from get_unmapped_area,
383 * There are two cases:
384 * 1> MAP_FIXED == false
385 * unallocated memory, no need to check sealing.
386 * 1> MAP_FIXED == true
387 * sealing is checked inside mmap_region when
388 * do_vmi_munmap is called.
389 */
390
391 if (prot == PROT_EXEC) {
392 pkey = execute_only_pkey(mm);
393 if (pkey < 0)
394 pkey = 0;
395 }
396
397 /* Do simple checking here so the lower-level routines won't have
398 * to. we assume access permissions have been handled by the open
399 * of the memory object, so we don't do any here.
400 */
401 vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(file, flags) |
402 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
403
404 /* Obtain the address to map to. we verify (or select) it and ensure
405 * that it represents a valid section of the address space.
406 */
407 addr = __get_unmapped_area(file, addr, len, pgoff, flags, vm_flags);
408 if (IS_ERR_VALUE(addr))
409 return addr;
410
411 if (flags & MAP_FIXED_NOREPLACE) {
412 if (find_vma_intersection(mm, addr, addr + len))
413 return -EEXIST;
414 }
415
416 if (flags & MAP_LOCKED)
417 if (!can_do_mlock())
418 return -EPERM;
419
420 if (!mlock_future_ok(mm, vm_flags & VM_LOCKED, len))
421 return -EAGAIN;
422
423 if (file) {
424 struct inode *inode = file_inode(file);
425 unsigned long flags_mask;
426 int err;
427
428 if (!file_mmap_ok(file, inode, pgoff, len))
429 return -EOVERFLOW;
430
431 flags_mask = LEGACY_MAP_MASK;
432 if (file->f_op->fop_flags & FOP_MMAP_SYNC)
433 flags_mask |= MAP_SYNC;
434
435 switch (flags & MAP_TYPE) {
436 case MAP_SHARED:
437 /*
438 * Force use of MAP_SHARED_VALIDATE with non-legacy
439 * flags. E.g. MAP_SYNC is dangerous to use with
440 * MAP_SHARED as you don't know which consistency model
441 * you will get. We silently ignore unsupported flags
442 * with MAP_SHARED to preserve backward compatibility.
443 */
444 flags &= LEGACY_MAP_MASK;
445 fallthrough;
446 case MAP_SHARED_VALIDATE:
447 if (flags & ~flags_mask)
448 return -EOPNOTSUPP;
449 if (prot & PROT_WRITE) {
450 if (!(file->f_mode & FMODE_WRITE))
451 return -EACCES;
452 if (IS_SWAPFILE(file->f_mapping->host))
453 return -ETXTBSY;
454 }
455
456 /*
457 * Make sure we don't allow writing to an append-only
458 * file..
459 */
460 if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
461 return -EACCES;
462
463 vm_flags |= VM_SHARED | VM_MAYSHARE;
464 if (!(file->f_mode & FMODE_WRITE))
465 vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
466 fallthrough;
467 case MAP_PRIVATE:
468 if (!(file->f_mode & FMODE_READ))
469 return -EACCES;
470 if (path_noexec(&file->f_path)) {
471 if (vm_flags & VM_EXEC)
472 return -EPERM;
473 vm_flags &= ~VM_MAYEXEC;
474 }
475
476 if (!can_mmap_file(file))
477 return -ENODEV;
478 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
479 return -EINVAL;
480 break;
481
482 default:
483 return -EINVAL;
484 }
485
486 /*
487 * Check to see if we are violating any seals and update VMA
488 * flags if necessary to avoid future seal violations.
489 */
490 err = memfd_check_seals_mmap(file, &vm_flags);
491 if (err)
492 return (unsigned long)err;
493 } else {
494 switch (flags & MAP_TYPE) {
495 case MAP_SHARED:
496 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
497 return -EINVAL;
498 /*
499 * Ignore pgoff.
500 */
501 pgoff = 0;
502 vm_flags |= VM_SHARED | VM_MAYSHARE;
503 break;
504 case MAP_DROPPABLE:
505 if (VM_DROPPABLE == VM_NONE)
506 return -ENOTSUPP;
507 /*
508 * A locked or stack area makes no sense to be droppable.
509 *
510 * Also, since droppable pages can just go away at any time
511 * it makes no sense to copy them on fork or dump them.
512 *
513 * And don't attempt to combine with hugetlb for now.
514 */
515 if (flags & (MAP_LOCKED | MAP_HUGETLB))
516 return -EINVAL;
517 if (vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
518 return -EINVAL;
519
520 vm_flags |= VM_DROPPABLE;
521
522 /*
523 * If the pages can be dropped, then it doesn't make
524 * sense to reserve them.
525 */
526 vm_flags |= VM_NORESERVE;
527
528 /*
529 * Likewise, they're volatile enough that they
530 * shouldn't survive forks or coredumps.
531 */
532 vm_flags |= VM_WIPEONFORK | VM_DONTDUMP;
533 fallthrough;
534 case MAP_PRIVATE:
535 /*
536 * Set pgoff according to addr for anon_vma.
537 */
538 pgoff = addr >> PAGE_SHIFT;
539 break;
540 default:
541 return -EINVAL;
542 }
543 }
544
545 /*
546 * Set 'VM_NORESERVE' if we should not account for the
547 * memory use of this mapping.
548 */
549 if (flags & MAP_NORESERVE) {
550 /* We honor MAP_NORESERVE if allowed to overcommit */
551 if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
552 vm_flags |= VM_NORESERVE;
553
554 /* hugetlb applies strict overcommit unless MAP_NORESERVE */
555 if (file && is_file_hugepages(file))
556 vm_flags |= VM_NORESERVE;
557 }
558
559 addr = mmap_region(file, addr, len, vm_flags, pgoff, uf);
560 if (!IS_ERR_VALUE(addr) &&
561 ((vm_flags & VM_LOCKED) ||
562 (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE))
563 *populate = len;
564 return addr;
565 }
566
ksys_mmap_pgoff(unsigned long addr,unsigned long len,unsigned long prot,unsigned long flags,unsigned long fd,unsigned long pgoff)567 unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
568 unsigned long prot, unsigned long flags,
569 unsigned long fd, unsigned long pgoff)
570 {
571 struct file *file = NULL;
572 unsigned long retval;
573
574 if (!(flags & MAP_ANONYMOUS)) {
575 audit_mmap_fd(fd, flags);
576 file = fget(fd);
577 if (!file)
578 return -EBADF;
579 if (is_file_hugepages(file)) {
580 len = ALIGN(len, huge_page_size(hstate_file(file)));
581 } else if (unlikely(flags & MAP_HUGETLB)) {
582 retval = -EINVAL;
583 goto out_fput;
584 }
585 } else if (flags & MAP_HUGETLB) {
586 struct hstate *hs;
587
588 hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
589 if (!hs)
590 return -EINVAL;
591
592 len = ALIGN(len, huge_page_size(hs));
593 /*
594 * VM_NORESERVE is used because the reservations will be
595 * taken when vm_ops->mmap() is called
596 */
597 file = hugetlb_file_setup(HUGETLB_ANON_FILE, len,
598 mk_vma_flags(VMA_NORESERVE_BIT),
599 HUGETLB_ANONHUGE_INODE,
600 (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
601 if (IS_ERR(file))
602 return PTR_ERR(file);
603 }
604
605 retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
606 out_fput:
607 if (file)
608 fput(file);
609 return retval;
610 }
611
SYSCALL_DEFINE6(mmap_pgoff,unsigned long,addr,unsigned long,len,unsigned long,prot,unsigned long,flags,unsigned long,fd,unsigned long,pgoff)612 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
613 unsigned long, prot, unsigned long, flags,
614 unsigned long, fd, unsigned long, pgoff)
615 {
616 return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
617 }
618
619 #ifdef __ARCH_WANT_SYS_OLD_MMAP
620 struct mmap_arg_struct {
621 unsigned long addr;
622 unsigned long len;
623 unsigned long prot;
624 unsigned long flags;
625 unsigned long fd;
626 unsigned long offset;
627 };
628
SYSCALL_DEFINE1(old_mmap,struct mmap_arg_struct __user *,arg)629 SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
630 {
631 struct mmap_arg_struct a;
632
633 if (copy_from_user(&a, arg, sizeof(a)))
634 return -EFAULT;
635 if (offset_in_page(a.offset))
636 return -EINVAL;
637
638 return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
639 a.offset >> PAGE_SHIFT);
640 }
641 #endif /* __ARCH_WANT_SYS_OLD_MMAP */
642
643 /*
644 * Determine if the allocation needs to ensure that there is no
645 * existing mapping within it's guard gaps, for use as start_gap.
646 */
stack_guard_placement(vm_flags_t vm_flags)647 static inline unsigned long stack_guard_placement(vm_flags_t vm_flags)
648 {
649 if (vm_flags & VM_SHADOW_STACK)
650 return PAGE_SIZE;
651
652 return 0;
653 }
654
655 /*
656 * Search for an unmapped address range.
657 *
658 * We are looking for a range that:
659 * - does not intersect with any VMA;
660 * - is contained within the [low_limit, high_limit) interval;
661 * - is at least the desired size.
662 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
663 */
vm_unmapped_area(struct vm_unmapped_area_info * info)664 unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info)
665 {
666 unsigned long addr;
667
668 if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
669 addr = unmapped_area_topdown(info);
670 else
671 addr = unmapped_area(info);
672
673 trace_vm_unmapped_area(addr, info);
674 return addr;
675 }
676
677 /* Get an address range which is currently unmapped.
678 * For shmat() with addr=0.
679 *
680 * Ugly calling convention alert:
681 * Return value with the low bits set means error value,
682 * ie
683 * if (ret & ~PAGE_MASK)
684 * error = ret;
685 *
686 * This function "knows" that -ENOMEM has the bits set.
687 */
688 unsigned long
generic_get_unmapped_area(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags,vm_flags_t vm_flags)689 generic_get_unmapped_area(struct file *filp, unsigned long addr,
690 unsigned long len, unsigned long pgoff,
691 unsigned long flags, vm_flags_t vm_flags)
692 {
693 struct mm_struct *mm = current->mm;
694 struct vm_area_struct *vma, *prev;
695 struct vm_unmapped_area_info info = {};
696 const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
697
698 if (len > mmap_end - mmap_min_addr)
699 return -ENOMEM;
700
701 if (flags & MAP_FIXED)
702 return addr;
703
704 if (addr) {
705 addr = PAGE_ALIGN(addr);
706 vma = find_vma_prev(mm, addr, &prev);
707 if (mmap_end - len >= addr && addr >= mmap_min_addr &&
708 (!vma || addr + len <= vm_start_gap(vma)) &&
709 (!prev || addr >= vm_end_gap(prev)))
710 return addr;
711 }
712
713 info.length = len;
714 info.low_limit = mm->mmap_base;
715 info.high_limit = mmap_end;
716 info.start_gap = stack_guard_placement(vm_flags);
717 if (filp && is_file_hugepages(filp))
718 info.align_mask = huge_page_mask_align(filp);
719 return vm_unmapped_area(&info);
720 }
721
722 #ifndef HAVE_ARCH_UNMAPPED_AREA
723 unsigned long
arch_get_unmapped_area(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags,vm_flags_t vm_flags)724 arch_get_unmapped_area(struct file *filp, unsigned long addr,
725 unsigned long len, unsigned long pgoff,
726 unsigned long flags, vm_flags_t vm_flags)
727 {
728 return generic_get_unmapped_area(filp, addr, len, pgoff, flags,
729 vm_flags);
730 }
731 #endif
732
733 /*
734 * This mmap-allocator allocates new areas top-down from below the
735 * stack's low limit (the base):
736 */
737 unsigned long
generic_get_unmapped_area_topdown(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags,vm_flags_t vm_flags)738 generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
739 unsigned long len, unsigned long pgoff,
740 unsigned long flags, vm_flags_t vm_flags)
741 {
742 struct vm_area_struct *vma, *prev;
743 struct mm_struct *mm = current->mm;
744 struct vm_unmapped_area_info info = {};
745 const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
746
747 /* requested length too big for entire address space */
748 if (len > mmap_end - mmap_min_addr)
749 return -ENOMEM;
750
751 if (flags & MAP_FIXED)
752 return addr;
753
754 /* requesting a specific address */
755 if (addr) {
756 addr = PAGE_ALIGN(addr);
757 vma = find_vma_prev(mm, addr, &prev);
758 if (mmap_end - len >= addr && addr >= mmap_min_addr &&
759 (!vma || addr + len <= vm_start_gap(vma)) &&
760 (!prev || addr >= vm_end_gap(prev)))
761 return addr;
762 }
763
764 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
765 info.length = len;
766 info.low_limit = PAGE_SIZE;
767 info.high_limit = arch_get_mmap_base(addr, mm->mmap_base);
768 info.start_gap = stack_guard_placement(vm_flags);
769 if (filp && is_file_hugepages(filp))
770 info.align_mask = huge_page_mask_align(filp);
771 addr = vm_unmapped_area(&info);
772
773 /*
774 * A failed mmap() very likely causes application failure,
775 * so fall back to the bottom-up function here. This scenario
776 * can happen with large stack limits and large mmap()
777 * allocations.
778 */
779 if (offset_in_page(addr)) {
780 VM_BUG_ON(addr != -ENOMEM);
781 info.flags = 0;
782 info.low_limit = TASK_UNMAPPED_BASE;
783 info.high_limit = mmap_end;
784 addr = vm_unmapped_area(&info);
785 }
786
787 return addr;
788 }
789
790 #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
791 unsigned long
arch_get_unmapped_area_topdown(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags,vm_flags_t vm_flags)792 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
793 unsigned long len, unsigned long pgoff,
794 unsigned long flags, vm_flags_t vm_flags)
795 {
796 return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags,
797 vm_flags);
798 }
799 #endif
800
mm_get_unmapped_area_vmflags(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags,vm_flags_t vm_flags)801 unsigned long mm_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
802 unsigned long len, unsigned long pgoff,
803 unsigned long flags, vm_flags_t vm_flags)
804 {
805 if (mm_flags_test(MMF_TOPDOWN, current->mm))
806 return arch_get_unmapped_area_topdown(filp, addr, len, pgoff,
807 flags, vm_flags);
808 return arch_get_unmapped_area(filp, addr, len, pgoff, flags, vm_flags);
809 }
810
811 unsigned long
__get_unmapped_area(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags,vm_flags_t vm_flags)812 __get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
813 unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags)
814 {
815 unsigned long (*get_area)(struct file *, unsigned long,
816 unsigned long, unsigned long, unsigned long)
817 = NULL;
818
819 unsigned long error = arch_mmap_check(addr, len, flags);
820 if (error)
821 return error;
822
823 /* Careful about overflows.. */
824 if (len > TASK_SIZE)
825 return -ENOMEM;
826
827 if (file) {
828 if (file->f_op->get_unmapped_area)
829 get_area = file->f_op->get_unmapped_area;
830 } else if (flags & MAP_SHARED) {
831 /*
832 * mmap_region() will call shmem_zero_setup() to create a file,
833 * so use shmem's get_unmapped_area in case it can be huge.
834 */
835 get_area = shmem_get_unmapped_area;
836 }
837
838 /* Always treat pgoff as zero for anonymous memory. */
839 if (!file)
840 pgoff = 0;
841
842 if (get_area) {
843 addr = get_area(file, addr, len, pgoff, flags);
844 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && !file
845 && !addr /* no hint */
846 && IS_ALIGNED(len, PMD_SIZE)) {
847 /* Ensures that larger anonymous mappings are THP aligned. */
848 addr = thp_get_unmapped_area_vmflags(file, addr, len,
849 pgoff, flags, vm_flags);
850 } else {
851 addr = mm_get_unmapped_area_vmflags(file, addr, len,
852 pgoff, flags, vm_flags);
853 }
854 if (IS_ERR_VALUE(addr))
855 return addr;
856
857 if (addr > TASK_SIZE - len)
858 return -ENOMEM;
859 if (offset_in_page(addr))
860 return -EINVAL;
861
862 error = security_mmap_addr(addr);
863 return error ? error : addr;
864 }
865
866 unsigned long
mm_get_unmapped_area(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)867 mm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
868 unsigned long pgoff, unsigned long flags)
869 {
870 return mm_get_unmapped_area_vmflags(file, addr, len, pgoff, flags, 0);
871 }
872 EXPORT_SYMBOL(mm_get_unmapped_area);
873
874 /**
875 * find_vma_intersection() - Look up the first VMA which intersects the interval
876 * @mm: The process address space.
877 * @start_addr: The inclusive start user address.
878 * @end_addr: The exclusive end user address.
879 *
880 * Returns: The first VMA within the provided range, %NULL otherwise. Assumes
881 * start_addr < end_addr.
882 */
find_vma_intersection(struct mm_struct * mm,unsigned long start_addr,unsigned long end_addr)883 struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
884 unsigned long start_addr,
885 unsigned long end_addr)
886 {
887 unsigned long index = start_addr;
888
889 mmap_assert_locked(mm);
890 return mt_find(&mm->mm_mt, &index, end_addr - 1);
891 }
892 EXPORT_SYMBOL(find_vma_intersection);
893
894 /**
895 * find_vma() - Find the VMA for a given address, or the next VMA.
896 * @mm: The mm_struct to check
897 * @addr: The address
898 *
899 * Returns: The VMA associated with addr, or the next VMA.
900 * May return %NULL in the case of no VMA at addr or above.
901 */
find_vma(struct mm_struct * mm,unsigned long addr)902 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
903 {
904 unsigned long index = addr;
905
906 mmap_assert_locked(mm);
907 return mt_find(&mm->mm_mt, &index, ULONG_MAX);
908 }
909 EXPORT_SYMBOL(find_vma);
910
911 /**
912 * find_vma_prev() - Find the VMA for a given address, or the next vma and
913 * set %pprev to the previous VMA, if any.
914 * @mm: The mm_struct to check
915 * @addr: The address
916 * @pprev: The pointer to set to the previous VMA
917 *
918 * Note that RCU lock is missing here since the external mmap_lock() is used
919 * instead.
920 *
921 * Returns: The VMA associated with @addr, or the next vma.
922 * May return %NULL in the case of no vma at addr or above.
923 */
924 struct vm_area_struct *
find_vma_prev(struct mm_struct * mm,unsigned long addr,struct vm_area_struct ** pprev)925 find_vma_prev(struct mm_struct *mm, unsigned long addr,
926 struct vm_area_struct **pprev)
927 {
928 struct vm_area_struct *vma;
929 VMA_ITERATOR(vmi, mm, addr);
930
931 vma = vma_iter_load(&vmi);
932 *pprev = vma_prev(&vmi);
933 if (!vma)
934 vma = vma_next(&vmi);
935 return vma;
936 }
937
938 /* enforced gap between the expanding stack and other mappings. */
939 unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
940
cmdline_parse_stack_guard_gap(char * p)941 static int __init cmdline_parse_stack_guard_gap(char *p)
942 {
943 unsigned long val;
944 char *endptr;
945
946 val = simple_strtoul(p, &endptr, 10);
947 if (!*endptr)
948 stack_guard_gap = val << PAGE_SHIFT;
949
950 return 1;
951 }
952 __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
953
954 #ifdef CONFIG_STACK_GROWSUP
expand_stack_locked(struct vm_area_struct * vma,unsigned long address)955 int expand_stack_locked(struct vm_area_struct *vma, unsigned long address)
956 {
957 return expand_upwards(vma, address);
958 }
959
find_extend_vma_locked(struct mm_struct * mm,unsigned long addr)960 struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr)
961 {
962 struct vm_area_struct *vma, *prev;
963
964 addr &= PAGE_MASK;
965 vma = find_vma_prev(mm, addr, &prev);
966 if (vma && (vma->vm_start <= addr))
967 return vma;
968 if (!prev)
969 return NULL;
970 if (expand_stack_locked(prev, addr))
971 return NULL;
972 if (prev->vm_flags & VM_LOCKED)
973 populate_vma_page_range(prev, addr, prev->vm_end, NULL);
974 return prev;
975 }
976 #else
expand_stack_locked(struct vm_area_struct * vma,unsigned long address)977 int expand_stack_locked(struct vm_area_struct *vma, unsigned long address)
978 {
979 return expand_downwards(vma, address);
980 }
981
find_extend_vma_locked(struct mm_struct * mm,unsigned long addr)982 struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr)
983 {
984 struct vm_area_struct *vma;
985 unsigned long start;
986
987 addr &= PAGE_MASK;
988 vma = find_vma(mm, addr);
989 if (!vma)
990 return NULL;
991 if (vma->vm_start <= addr)
992 return vma;
993 start = vma->vm_start;
994 if (expand_stack_locked(vma, addr))
995 return NULL;
996 if (vma->vm_flags & VM_LOCKED)
997 populate_vma_page_range(vma, addr, start, NULL);
998 return vma;
999 }
1000 #endif
1001
1002 #if defined(CONFIG_STACK_GROWSUP)
1003
1004 #define vma_expand_up(vma,addr) expand_upwards(vma, addr)
1005 #define vma_expand_down(vma, addr) (-EFAULT)
1006
1007 #else
1008
1009 #define vma_expand_up(vma,addr) (-EFAULT)
1010 #define vma_expand_down(vma, addr) expand_downwards(vma, addr)
1011
1012 #endif
1013
1014 /*
1015 * expand_stack(): legacy interface for page faulting. Don't use unless
1016 * you have to.
1017 *
1018 * This is called with the mm locked for reading, drops the lock, takes
1019 * the lock for writing, tries to look up a vma again, expands it if
1020 * necessary, and downgrades the lock to reading again.
1021 *
1022 * If no vma is found or it can't be expanded, it returns NULL and has
1023 * dropped the lock.
1024 */
expand_stack(struct mm_struct * mm,unsigned long addr)1025 struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr)
1026 {
1027 struct vm_area_struct *vma, *prev;
1028
1029 mmap_read_unlock(mm);
1030 if (mmap_write_lock_killable(mm))
1031 return NULL;
1032
1033 vma = find_vma_prev(mm, addr, &prev);
1034 if (vma && vma->vm_start <= addr)
1035 goto success;
1036
1037 if (prev && !vma_expand_up(prev, addr)) {
1038 vma = prev;
1039 goto success;
1040 }
1041
1042 if (vma && !vma_expand_down(vma, addr))
1043 goto success;
1044
1045 mmap_write_unlock(mm);
1046 return NULL;
1047
1048 success:
1049 mmap_write_downgrade(mm);
1050 return vma;
1051 }
1052
1053 /* do_munmap() - Wrapper function for non-maple tree aware do_munmap() calls.
1054 * @mm: The mm_struct
1055 * @start: The start address to munmap
1056 * @len: The length to be munmapped.
1057 * @uf: The userfaultfd list_head
1058 *
1059 * Return: 0 on success, error otherwise.
1060 */
do_munmap(struct mm_struct * mm,unsigned long start,size_t len,struct list_head * uf)1061 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
1062 struct list_head *uf)
1063 {
1064 VMA_ITERATOR(vmi, mm, start);
1065
1066 return do_vmi_munmap(&vmi, mm, start, len, uf, false);
1067 }
1068
vm_munmap(unsigned long start,size_t len)1069 int vm_munmap(unsigned long start, size_t len)
1070 {
1071 return __vm_munmap(start, len, false);
1072 }
1073 EXPORT_SYMBOL(vm_munmap);
1074
SYSCALL_DEFINE2(munmap,unsigned long,addr,size_t,len)1075 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
1076 {
1077 addr = untagged_addr(addr);
1078 return __vm_munmap(addr, len, true);
1079 }
1080
1081
1082 /*
1083 * Emulation of deprecated remap_file_pages() syscall.
1084 */
SYSCALL_DEFINE5(remap_file_pages,unsigned long,start,unsigned long,size,unsigned long,prot,unsigned long,pgoff,unsigned long,flags)1085 SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
1086 unsigned long, prot, unsigned long, pgoff, unsigned long, flags)
1087 {
1088
1089 struct mm_struct *mm = current->mm;
1090 struct vm_area_struct *vma;
1091 unsigned long populate = 0;
1092 unsigned long ret = -EINVAL;
1093 struct file *file;
1094 vm_flags_t vm_flags;
1095
1096 pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/mm/remap_file_pages.rst.\n",
1097 current->comm, current->pid);
1098
1099 if (prot)
1100 return ret;
1101 start = start & PAGE_MASK;
1102 size = size & PAGE_MASK;
1103
1104 if (start + size <= start)
1105 return ret;
1106
1107 /* Does pgoff wrap? */
1108 if (pgoff + (size >> PAGE_SHIFT) < pgoff)
1109 return ret;
1110
1111 if (mmap_read_lock_killable(mm))
1112 return -EINTR;
1113
1114 /*
1115 * Look up VMA under read lock first so we can perform the security
1116 * without holding locks (which can be problematic). We reacquire a
1117 * write lock later and check nothing changed underneath us.
1118 */
1119 vma = vma_lookup(mm, start);
1120
1121 if (!vma || !(vma->vm_flags & VM_SHARED)) {
1122 mmap_read_unlock(mm);
1123 return -EINVAL;
1124 }
1125
1126 prot |= vma->vm_flags & VM_READ ? PROT_READ : 0;
1127 prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0;
1128 prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0;
1129
1130 flags &= MAP_NONBLOCK;
1131 flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE;
1132 if (vma->vm_flags & VM_LOCKED)
1133 flags |= MAP_LOCKED;
1134
1135 /* Save vm_flags used to calculate prot and flags, and recheck later. */
1136 vm_flags = vma->vm_flags;
1137 file = get_file(vma->vm_file);
1138
1139 mmap_read_unlock(mm);
1140
1141 /* Call outside mmap_lock to be consistent with other callers. */
1142 ret = security_mmap_file(file, prot, flags);
1143 if (ret) {
1144 fput(file);
1145 return ret;
1146 }
1147
1148 ret = -EINVAL;
1149
1150 /* OK security check passed, take write lock + let it rip. */
1151 if (mmap_write_lock_killable(mm)) {
1152 fput(file);
1153 return -EINTR;
1154 }
1155
1156 vma = vma_lookup(mm, start);
1157
1158 if (!vma)
1159 goto out;
1160
1161 /* Make sure things didn't change under us. */
1162 if (vma->vm_flags != vm_flags)
1163 goto out;
1164 if (vma->vm_file != file)
1165 goto out;
1166
1167 if (start + size > vma->vm_end) {
1168 VMA_ITERATOR(vmi, mm, vma->vm_end);
1169 struct vm_area_struct *next, *prev = vma;
1170
1171 for_each_vma_range(vmi, next, start + size) {
1172 /* hole between vmas ? */
1173 if (next->vm_start != prev->vm_end)
1174 goto out;
1175
1176 if (next->vm_file != vma->vm_file)
1177 goto out;
1178
1179 if (next->vm_flags != vma->vm_flags)
1180 goto out;
1181
1182 if (start + size <= next->vm_end)
1183 break;
1184
1185 prev = next;
1186 }
1187
1188 if (!next)
1189 goto out;
1190 }
1191
1192 ret = do_mmap(vma->vm_file, start, size,
1193 prot, flags, 0, pgoff, &populate, NULL);
1194 out:
1195 mmap_write_unlock(mm);
1196 fput(file);
1197 if (populate)
1198 mm_populate(ret, populate);
1199 if (!IS_ERR_VALUE(ret))
1200 ret = 0;
1201 return ret;
1202 }
1203
vm_brk_flags(unsigned long addr,unsigned long request,vm_flags_t vm_flags)1204 int vm_brk_flags(unsigned long addr, unsigned long request, vm_flags_t vm_flags)
1205 {
1206 struct mm_struct *mm = current->mm;
1207 struct vm_area_struct *vma = NULL;
1208 unsigned long len;
1209 int ret;
1210 bool populate;
1211 LIST_HEAD(uf);
1212 VMA_ITERATOR(vmi, mm, addr);
1213
1214 len = PAGE_ALIGN(request);
1215 if (len < request)
1216 return -ENOMEM;
1217 if (!len)
1218 return 0;
1219
1220 /* Until we need other flags, refuse anything except VM_EXEC. */
1221 if ((vm_flags & (~VM_EXEC)) != 0)
1222 return -EINVAL;
1223
1224 if (mmap_write_lock_killable(mm))
1225 return -EINTR;
1226
1227 ret = check_brk_limits(addr, len);
1228 if (ret)
1229 goto limits_failed;
1230
1231 ret = do_vmi_munmap(&vmi, mm, addr, len, &uf, 0);
1232 if (ret)
1233 goto munmap_failed;
1234
1235 vma = vma_prev(&vmi);
1236 ret = do_brk_flags(&vmi, vma, addr, len, vm_flags);
1237 populate = ((mm->def_flags & VM_LOCKED) != 0);
1238 mmap_write_unlock(mm);
1239 userfaultfd_unmap_complete(mm, &uf);
1240 if (populate && !ret)
1241 mm_populate(addr, len);
1242 return ret;
1243
1244 munmap_failed:
1245 limits_failed:
1246 mmap_write_unlock(mm);
1247 return ret;
1248 }
1249 EXPORT_SYMBOL(vm_brk_flags);
1250
1251 static
tear_down_vmas(struct mm_struct * mm,struct vma_iterator * vmi,struct vm_area_struct * vma,unsigned long end)1252 unsigned long tear_down_vmas(struct mm_struct *mm, struct vma_iterator *vmi,
1253 struct vm_area_struct *vma, unsigned long end)
1254 {
1255 unsigned long nr_accounted = 0;
1256 int count = 0;
1257
1258 mmap_assert_write_locked(mm);
1259 vma_iter_set(vmi, vma->vm_end);
1260 do {
1261 if (vma->vm_flags & VM_ACCOUNT)
1262 nr_accounted += vma_pages(vma);
1263 vma_mark_detached(vma);
1264 remove_vma(vma);
1265 count++;
1266 cond_resched();
1267 vma = vma_next(vmi);
1268 } while (vma && vma->vm_end <= end);
1269
1270 VM_WARN_ON_ONCE(count != mm->map_count);
1271 return nr_accounted;
1272 }
1273
1274 /* Release all mmaps. */
exit_mmap(struct mm_struct * mm)1275 void exit_mmap(struct mm_struct *mm)
1276 {
1277 struct mmu_gather tlb;
1278 struct vm_area_struct *vma;
1279 unsigned long nr_accounted = 0;
1280 VMA_ITERATOR(vmi, mm, 0);
1281 struct unmap_desc unmap;
1282
1283 /* mm's last user has gone, and its about to be pulled down */
1284 mmu_notifier_release(mm);
1285
1286 mmap_read_lock(mm);
1287 arch_exit_mmap(mm);
1288
1289 vma = vma_next(&vmi);
1290 if (!vma) {
1291 /* Can happen if dup_mmap() received an OOM */
1292 mmap_read_unlock(mm);
1293 mmap_write_lock(mm);
1294 goto destroy;
1295 }
1296
1297 unmap_all_init(&unmap, &vmi, vma);
1298 flush_cache_mm(mm);
1299 tlb_gather_mmu_fullmm(&tlb, mm);
1300 /* update_hiwater_rss(mm) here? but nobody should be looking */
1301 /* Use ULONG_MAX here to ensure all VMAs in the mm are unmapped */
1302 unmap_vmas(&tlb, &unmap);
1303 mmap_read_unlock(mm);
1304
1305 /*
1306 * Set MMF_OOM_SKIP to hide this task from the oom killer/reaper
1307 * because the memory has been already freed.
1308 */
1309 mm_flags_set(MMF_OOM_SKIP, mm);
1310 mmap_write_lock(mm);
1311 unmap.mm_wr_locked = true;
1312 mt_clear_in_rcu(&mm->mm_mt);
1313 unmap_pgtable_init(&unmap, &vmi);
1314 free_pgtables(&tlb, &unmap);
1315 tlb_finish_mmu(&tlb);
1316
1317 /*
1318 * Walk the list again, actually closing and freeing it, with preemption
1319 * enabled, without holding any MM locks besides the unreachable
1320 * mmap_write_lock.
1321 */
1322 nr_accounted = tear_down_vmas(mm, &vmi, vma, ULONG_MAX);
1323
1324 destroy:
1325 __mt_destroy(&mm->mm_mt);
1326 trace_exit_mmap(mm);
1327 mmap_write_unlock(mm);
1328 vm_unacct_memory(nr_accounted);
1329 }
1330
1331 /*
1332 * Return true if the calling process may expand its vm space by the passed
1333 * number of pages
1334 */
may_expand_vm(struct mm_struct * mm,vm_flags_t flags,unsigned long npages)1335 bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages)
1336 {
1337 if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT)
1338 return false;
1339
1340 if (is_data_mapping(flags) &&
1341 mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) {
1342 /* Workaround for Valgrind */
1343 if (rlimit(RLIMIT_DATA) == 0 &&
1344 mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT)
1345 return true;
1346
1347 pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits%s.\n",
1348 current->comm, current->pid,
1349 (mm->data_vm + npages) << PAGE_SHIFT,
1350 rlimit(RLIMIT_DATA),
1351 ignore_rlimit_data ? "" : " or use boot option ignore_rlimit_data");
1352
1353 if (!ignore_rlimit_data)
1354 return false;
1355 }
1356
1357 return true;
1358 }
1359
vm_stat_account(struct mm_struct * mm,vm_flags_t flags,long npages)1360 void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages)
1361 {
1362 WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages);
1363
1364 if (is_exec_mapping(flags))
1365 mm->exec_vm += npages;
1366 else if (is_stack_mapping(flags))
1367 mm->stack_vm += npages;
1368 else if (is_data_mapping(flags))
1369 mm->data_vm += npages;
1370 }
1371
1372 static vm_fault_t special_mapping_fault(struct vm_fault *vmf);
1373
1374 /*
1375 * Close hook, called for unmap() and on the old vma for mremap().
1376 *
1377 * Having a close hook prevents vma merging regardless of flags.
1378 */
special_mapping_close(struct vm_area_struct * vma)1379 static void special_mapping_close(struct vm_area_struct *vma)
1380 {
1381 const struct vm_special_mapping *sm = vma->vm_private_data;
1382
1383 if (sm->close)
1384 sm->close(sm, vma);
1385 }
1386
special_mapping_name(struct vm_area_struct * vma)1387 static const char *special_mapping_name(struct vm_area_struct *vma)
1388 {
1389 return ((struct vm_special_mapping *)vma->vm_private_data)->name;
1390 }
1391
special_mapping_mremap(struct vm_area_struct * new_vma)1392 static int special_mapping_mremap(struct vm_area_struct *new_vma)
1393 {
1394 struct vm_special_mapping *sm = new_vma->vm_private_data;
1395
1396 if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
1397 return -EFAULT;
1398
1399 if (sm->mremap)
1400 return sm->mremap(sm, new_vma);
1401
1402 return 0;
1403 }
1404
special_mapping_split(struct vm_area_struct * vma,unsigned long addr)1405 static int special_mapping_split(struct vm_area_struct *vma, unsigned long addr)
1406 {
1407 /*
1408 * Forbid splitting special mappings - kernel has expectations over
1409 * the number of pages in mapping. Together with VM_DONTEXPAND
1410 * the size of vma should stay the same over the special mapping's
1411 * lifetime.
1412 */
1413 return -EINVAL;
1414 }
1415
1416 static const struct vm_operations_struct special_mapping_vmops = {
1417 .close = special_mapping_close,
1418 .fault = special_mapping_fault,
1419 .mremap = special_mapping_mremap,
1420 .name = special_mapping_name,
1421 /* vDSO code relies that VVAR can't be accessed remotely */
1422 .access = NULL,
1423 .may_split = special_mapping_split,
1424 };
1425
special_mapping_fault(struct vm_fault * vmf)1426 static vm_fault_t special_mapping_fault(struct vm_fault *vmf)
1427 {
1428 struct vm_area_struct *vma = vmf->vma;
1429 pgoff_t pgoff;
1430 struct page **pages;
1431 struct vm_special_mapping *sm = vma->vm_private_data;
1432
1433 if (sm->fault)
1434 return sm->fault(sm, vmf->vma, vmf);
1435
1436 pages = sm->pages;
1437
1438 for (pgoff = vmf->pgoff; pgoff && *pages; ++pages)
1439 pgoff--;
1440
1441 if (*pages) {
1442 struct page *page = *pages;
1443 get_page(page);
1444 vmf->page = page;
1445 return 0;
1446 }
1447
1448 return VM_FAULT_SIGBUS;
1449 }
1450
__install_special_mapping(struct mm_struct * mm,unsigned long addr,unsigned long len,vm_flags_t vm_flags,void * priv,const struct vm_operations_struct * ops)1451 static struct vm_area_struct *__install_special_mapping(
1452 struct mm_struct *mm,
1453 unsigned long addr, unsigned long len,
1454 vm_flags_t vm_flags, void *priv,
1455 const struct vm_operations_struct *ops)
1456 {
1457 int ret;
1458 struct vm_area_struct *vma;
1459
1460 vma = vm_area_alloc(mm);
1461 if (unlikely(vma == NULL))
1462 return ERR_PTR(-ENOMEM);
1463
1464 vma_set_range(vma, addr, addr + len, 0);
1465 vm_flags |= mm->def_flags | VM_DONTEXPAND;
1466 if (pgtable_supports_soft_dirty())
1467 vm_flags |= VM_SOFTDIRTY;
1468 vm_flags_init(vma, vm_flags & ~VM_LOCKED_MASK);
1469 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1470
1471 vma->vm_ops = ops;
1472 vma->vm_private_data = priv;
1473
1474 ret = insert_vm_struct(mm, vma);
1475 if (ret)
1476 goto out;
1477
1478 vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT);
1479
1480 perf_event_mmap(vma);
1481
1482 return vma;
1483
1484 out:
1485 vm_area_free(vma);
1486 return ERR_PTR(ret);
1487 }
1488
vma_is_special_mapping(const struct vm_area_struct * vma,const struct vm_special_mapping * sm)1489 bool vma_is_special_mapping(const struct vm_area_struct *vma,
1490 const struct vm_special_mapping *sm)
1491 {
1492 return vma->vm_private_data == sm &&
1493 vma->vm_ops == &special_mapping_vmops;
1494 }
1495
1496 /*
1497 * Called with mm->mmap_lock held for writing.
1498 * Insert a new vma covering the given region, with the given flags.
1499 * Its pages are supplied by the given array of struct page *.
1500 * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated.
1501 * The region past the last page supplied will always produce SIGBUS.
1502 * The array pointer and the pages it points to are assumed to stay alive
1503 * for as long as this mapping might exist.
1504 */
_install_special_mapping(struct mm_struct * mm,unsigned long addr,unsigned long len,vm_flags_t vm_flags,const struct vm_special_mapping * spec)1505 struct vm_area_struct *_install_special_mapping(
1506 struct mm_struct *mm,
1507 unsigned long addr, unsigned long len,
1508 vm_flags_t vm_flags, const struct vm_special_mapping *spec)
1509 {
1510 return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec,
1511 &special_mapping_vmops);
1512 }
1513
1514 #ifdef CONFIG_SYSCTL
1515 #if defined(HAVE_ARCH_PICK_MMAP_LAYOUT) || \
1516 defined(CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT)
1517 int sysctl_legacy_va_layout;
1518 #endif
1519
1520 static const struct ctl_table mmap_table[] = {
1521 {
1522 .procname = "max_map_count",
1523 .data = &sysctl_max_map_count,
1524 .maxlen = sizeof(sysctl_max_map_count),
1525 .mode = 0644,
1526 .proc_handler = proc_dointvec_minmax,
1527 .extra1 = SYSCTL_ZERO,
1528 },
1529 #if defined(HAVE_ARCH_PICK_MMAP_LAYOUT) || \
1530 defined(CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT)
1531 {
1532 .procname = "legacy_va_layout",
1533 .data = &sysctl_legacy_va_layout,
1534 .maxlen = sizeof(sysctl_legacy_va_layout),
1535 .mode = 0644,
1536 .proc_handler = proc_dointvec_minmax,
1537 .extra1 = SYSCTL_ZERO,
1538 },
1539 #endif
1540 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
1541 {
1542 .procname = "mmap_rnd_bits",
1543 .data = &mmap_rnd_bits,
1544 .maxlen = sizeof(mmap_rnd_bits),
1545 .mode = 0600,
1546 .proc_handler = proc_dointvec_minmax,
1547 .extra1 = (void *)&mmap_rnd_bits_min,
1548 .extra2 = (void *)&mmap_rnd_bits_max,
1549 },
1550 #endif
1551 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
1552 {
1553 .procname = "mmap_rnd_compat_bits",
1554 .data = &mmap_rnd_compat_bits,
1555 .maxlen = sizeof(mmap_rnd_compat_bits),
1556 .mode = 0600,
1557 .proc_handler = proc_dointvec_minmax,
1558 .extra1 = (void *)&mmap_rnd_compat_bits_min,
1559 .extra2 = (void *)&mmap_rnd_compat_bits_max,
1560 },
1561 #endif
1562 };
1563 #endif /* CONFIG_SYSCTL */
1564
1565 /*
1566 * initialise the percpu counter for VM, initialise VMA state.
1567 */
mmap_init(void)1568 void __init mmap_init(void)
1569 {
1570 int ret;
1571
1572 ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
1573 VM_BUG_ON(ret);
1574 #ifdef CONFIG_SYSCTL
1575 register_sysctl_init("vm", mmap_table);
1576 #endif
1577 vma_state_init();
1578 }
1579
1580 /*
1581 * Initialise sysctl_user_reserve_kbytes.
1582 *
1583 * This is intended to prevent a user from starting a single memory hogging
1584 * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
1585 * mode.
1586 *
1587 * The default value is min(3% of free memory, 128MB)
1588 * 128MB is enough to recover with sshd/login, bash, and top/kill.
1589 */
init_user_reserve(void)1590 static int init_user_reserve(void)
1591 {
1592 unsigned long free_kbytes;
1593
1594 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
1595
1596 sysctl_user_reserve_kbytes = min(free_kbytes / 32, SZ_128K);
1597 return 0;
1598 }
1599 subsys_initcall(init_user_reserve);
1600
1601 /*
1602 * Initialise sysctl_admin_reserve_kbytes.
1603 *
1604 * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
1605 * to log in and kill a memory hogging process.
1606 *
1607 * Systems with more than 256MB will reserve 8MB, enough to recover
1608 * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
1609 * only reserve 3% of free pages by default.
1610 */
init_admin_reserve(void)1611 static int init_admin_reserve(void)
1612 {
1613 unsigned long free_kbytes;
1614
1615 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
1616
1617 sysctl_admin_reserve_kbytes = min(free_kbytes / 32, SZ_8K);
1618 return 0;
1619 }
1620 subsys_initcall(init_admin_reserve);
1621
1622 /*
1623 * Reinititalise user and admin reserves if memory is added or removed.
1624 *
1625 * The default user reserve max is 128MB, and the default max for the
1626 * admin reserve is 8MB. These are usually, but not always, enough to
1627 * enable recovery from a memory hogging process using login/sshd, a shell,
1628 * and tools like top. It may make sense to increase or even disable the
1629 * reserve depending on the existence of swap or variations in the recovery
1630 * tools. So, the admin may have changed them.
1631 *
1632 * If memory is added and the reserves have been eliminated or increased above
1633 * the default max, then we'll trust the admin.
1634 *
1635 * If memory is removed and there isn't enough free memory, then we
1636 * need to reset the reserves.
1637 *
1638 * Otherwise keep the reserve set by the admin.
1639 */
reserve_mem_notifier(struct notifier_block * nb,unsigned long action,void * data)1640 static int reserve_mem_notifier(struct notifier_block *nb,
1641 unsigned long action, void *data)
1642 {
1643 unsigned long tmp, free_kbytes;
1644
1645 switch (action) {
1646 case MEM_ONLINE:
1647 /* Default max is 128MB. Leave alone if modified by operator. */
1648 tmp = sysctl_user_reserve_kbytes;
1649 if (tmp > 0 && tmp < SZ_128K)
1650 init_user_reserve();
1651
1652 /* Default max is 8MB. Leave alone if modified by operator. */
1653 tmp = sysctl_admin_reserve_kbytes;
1654 if (tmp > 0 && tmp < SZ_8K)
1655 init_admin_reserve();
1656
1657 break;
1658 case MEM_OFFLINE:
1659 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
1660
1661 if (sysctl_user_reserve_kbytes > free_kbytes) {
1662 init_user_reserve();
1663 pr_info("vm.user_reserve_kbytes reset to %lu\n",
1664 sysctl_user_reserve_kbytes);
1665 }
1666
1667 if (sysctl_admin_reserve_kbytes > free_kbytes) {
1668 init_admin_reserve();
1669 pr_info("vm.admin_reserve_kbytes reset to %lu\n",
1670 sysctl_admin_reserve_kbytes);
1671 }
1672 break;
1673 default:
1674 break;
1675 }
1676 return NOTIFY_OK;
1677 }
1678
init_reserve_notifier(void)1679 static int __meminit init_reserve_notifier(void)
1680 {
1681 if (hotplug_memory_notifier(reserve_mem_notifier, DEFAULT_CALLBACK_PRI))
1682 pr_err("Failed registering memory add/remove notifier for admin reserve\n");
1683
1684 return 0;
1685 }
1686 subsys_initcall(init_reserve_notifier);
1687
1688 /*
1689 * Obtain a read lock on mm->mmap_lock, if the specified address is below the
1690 * start of the VMA, the intent is to perform a write, and it is a
1691 * downward-growing stack, then attempt to expand the stack to contain it.
1692 *
1693 * This function is intended only for obtaining an argument page from an ELF
1694 * image, and is almost certainly NOT what you want to use for any other
1695 * purpose.
1696 *
1697 * IMPORTANT - VMA fields are accessed without an mmap lock being held, so the
1698 * VMA referenced must not be linked in any user-visible tree, i.e. it must be a
1699 * new VMA being mapped.
1700 *
1701 * The function assumes that addr is either contained within the VMA or below
1702 * it, and makes no attempt to validate this value beyond that.
1703 *
1704 * Returns true if the read lock was obtained and a stack was perhaps expanded,
1705 * false if the stack expansion failed.
1706 *
1707 * On stack expansion the function temporarily acquires an mmap write lock
1708 * before downgrading it.
1709 */
mmap_read_lock_maybe_expand(struct mm_struct * mm,struct vm_area_struct * new_vma,unsigned long addr,bool write)1710 bool mmap_read_lock_maybe_expand(struct mm_struct *mm,
1711 struct vm_area_struct *new_vma,
1712 unsigned long addr, bool write)
1713 {
1714 if (!write || addr >= new_vma->vm_start) {
1715 mmap_read_lock(mm);
1716 return true;
1717 }
1718
1719 if (!(new_vma->vm_flags & VM_GROWSDOWN))
1720 return false;
1721
1722 mmap_write_lock(mm);
1723 if (expand_downwards(new_vma, addr)) {
1724 mmap_write_unlock(mm);
1725 return false;
1726 }
1727
1728 mmap_write_downgrade(mm);
1729 return true;
1730 }
1731
dup_mmap(struct mm_struct * mm,struct mm_struct * oldmm)1732 __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
1733 {
1734 struct vm_area_struct *mpnt, *tmp;
1735 int retval;
1736 unsigned long charge = 0;
1737 LIST_HEAD(uf);
1738 VMA_ITERATOR(vmi, mm, 0);
1739
1740 if (mmap_write_lock_killable(oldmm))
1741 return -EINTR;
1742 flush_cache_dup_mm(oldmm);
1743 uprobe_dup_mmap(oldmm, mm);
1744 /*
1745 * Not linked in yet - no deadlock potential:
1746 */
1747 mmap_write_lock_nested(mm, SINGLE_DEPTH_NESTING);
1748
1749 /* No ordering required: file already has been exposed. */
1750 dup_mm_exe_file(mm, oldmm);
1751
1752 mm->total_vm = oldmm->total_vm;
1753 mm->data_vm = oldmm->data_vm;
1754 mm->exec_vm = oldmm->exec_vm;
1755 mm->stack_vm = oldmm->stack_vm;
1756
1757 /* Use __mt_dup() to efficiently build an identical maple tree. */
1758 retval = __mt_dup(&oldmm->mm_mt, &mm->mm_mt, GFP_KERNEL);
1759 if (unlikely(retval))
1760 goto out;
1761
1762 mt_clear_in_rcu(vmi.mas.tree);
1763 for_each_vma(vmi, mpnt) {
1764 struct file *file;
1765
1766 retval = vma_start_write_killable(mpnt);
1767 if (retval < 0)
1768 goto loop_out;
1769 if (mpnt->vm_flags & VM_DONTCOPY) {
1770 retval = vma_iter_clear_gfp(&vmi, mpnt->vm_start,
1771 mpnt->vm_end, GFP_KERNEL);
1772 if (retval)
1773 goto loop_out;
1774
1775 vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt));
1776 continue;
1777 }
1778 charge = 0;
1779 if (mpnt->vm_flags & VM_ACCOUNT) {
1780 unsigned long len = vma_pages(mpnt);
1781
1782 if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
1783 goto fail_nomem;
1784 charge = len;
1785 }
1786
1787 tmp = vm_area_dup(mpnt);
1788 if (!tmp)
1789 goto fail_nomem;
1790 retval = vma_dup_policy(mpnt, tmp);
1791 if (retval)
1792 goto fail_nomem_policy;
1793 tmp->vm_mm = mm;
1794 retval = dup_userfaultfd(tmp, &uf);
1795 if (retval)
1796 goto fail_nomem_anon_vma_fork;
1797 if (tmp->vm_flags & VM_WIPEONFORK) {
1798 /*
1799 * VM_WIPEONFORK gets a clean slate in the child.
1800 * Don't prepare anon_vma until fault since we don't
1801 * copy page for current vma.
1802 */
1803 tmp->anon_vma = NULL;
1804 } else if (anon_vma_fork(tmp, mpnt))
1805 goto fail_nomem_anon_vma_fork;
1806 vm_flags_clear(tmp, VM_LOCKED_MASK);
1807 /*
1808 * Copy/update hugetlb private vma information.
1809 */
1810 if (is_vm_hugetlb_page(tmp))
1811 hugetlb_dup_vma_private(tmp);
1812
1813 /*
1814 * Link the vma into the MT. After using __mt_dup(), memory
1815 * allocation is not necessary here, so it cannot fail.
1816 */
1817 vma_iter_bulk_store(&vmi, tmp);
1818
1819 mm->map_count++;
1820
1821 if (tmp->vm_ops && tmp->vm_ops->open)
1822 tmp->vm_ops->open(tmp);
1823
1824 file = tmp->vm_file;
1825 if (file) {
1826 struct address_space *mapping = file->f_mapping;
1827
1828 get_file(file);
1829 i_mmap_lock_write(mapping);
1830 if (vma_is_shared_maywrite(tmp))
1831 mapping_allow_writable(mapping);
1832 flush_dcache_mmap_lock(mapping);
1833 /* insert tmp into the share list, just after mpnt */
1834 vma_interval_tree_insert_after(tmp, mpnt,
1835 &mapping->i_mmap);
1836 flush_dcache_mmap_unlock(mapping);
1837 i_mmap_unlock_write(mapping);
1838 }
1839
1840 if (!(tmp->vm_flags & VM_WIPEONFORK))
1841 retval = copy_page_range(tmp, mpnt);
1842
1843 if (retval) {
1844 mpnt = vma_next(&vmi);
1845 goto loop_out;
1846 }
1847 }
1848 /* a new mm has just been created */
1849 retval = arch_dup_mmap(oldmm, mm);
1850 loop_out:
1851 vma_iter_free(&vmi);
1852 if (!retval) {
1853 mt_set_in_rcu(vmi.mas.tree);
1854 ksm_fork(mm, oldmm);
1855 khugepaged_fork(mm, oldmm);
1856 } else {
1857 unsigned long end;
1858
1859 /*
1860 * The entire maple tree has already been duplicated, but
1861 * replacing the vmas failed at mpnt (which could be NULL if
1862 * all were allocated but the last vma was not fully set up).
1863 * Use the start address of the failure point to clean up the
1864 * partially initialized tree.
1865 */
1866 if (!mm->map_count) {
1867 /* zero vmas were written to the new tree. */
1868 end = 0;
1869 } else if (mpnt) {
1870 /* partial tree failure */
1871 end = mpnt->vm_start;
1872 } else {
1873 /* All vmas were written to the new tree */
1874 end = ULONG_MAX;
1875 }
1876
1877 /* Hide mm from oom killer because the memory is being freed */
1878 mm_flags_set(MMF_OOM_SKIP, mm);
1879 if (end) {
1880 vma_iter_set(&vmi, 0);
1881 tmp = vma_next(&vmi);
1882 UNMAP_STATE(unmap, &vmi, /* first = */ tmp,
1883 /* vma_start = */ 0, /* vma_end = */ end,
1884 /* prev = */ NULL, /* next = */ NULL);
1885
1886 /*
1887 * Don't iterate over vmas beyond the failure point for
1888 * both unmap_vma() and free_pgtables().
1889 */
1890 unmap.tree_end = end;
1891 flush_cache_mm(mm);
1892 unmap_region(&unmap);
1893 charge = tear_down_vmas(mm, &vmi, tmp, end);
1894 vm_unacct_memory(charge);
1895 }
1896 __mt_destroy(&mm->mm_mt);
1897 /*
1898 * The mm_struct is going to exit, but the locks will be dropped
1899 * first. Set the mm_struct as unstable is advisable as it is
1900 * not fully initialised.
1901 */
1902 mm_flags_set(MMF_UNSTABLE, mm);
1903 }
1904 out:
1905 mmap_write_unlock(mm);
1906 flush_tlb_mm(oldmm);
1907 mmap_write_unlock(oldmm);
1908 if (!retval)
1909 dup_userfaultfd_complete(&uf);
1910 else
1911 dup_userfaultfd_fail(&uf);
1912 return retval;
1913
1914 fail_nomem_anon_vma_fork:
1915 mpol_put(vma_policy(tmp));
1916 fail_nomem_policy:
1917 vm_area_free(tmp);
1918 fail_nomem:
1919 retval = -ENOMEM;
1920 vm_unacct_memory(charge);
1921 goto loop_out;
1922 }
1923