xref: /linux/mm/mmap.c (revision d744f4acb81ae2f2c33bce71da1f65be32ed1d65)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * mm/mmap.c
4  *
5  * Written by obz.
6  *
7  * Address space accounting code	<alan@lxorguk.ukuu.org.uk>
8  */
9 
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/backing-dev.h>
15 #include <linux/mm.h>
16 #include <linux/mm_inline.h>
17 #include <linux/shm.h>
18 #include <linux/mman.h>
19 #include <linux/pagemap.h>
20 #include <linux/swap.h>
21 #include <linux/syscalls.h>
22 #include <linux/capability.h>
23 #include <linux/init.h>
24 #include <linux/file.h>
25 #include <linux/fs.h>
26 #include <linux/personality.h>
27 #include <linux/security.h>
28 #include <linux/hugetlb.h>
29 #include <linux/shmem_fs.h>
30 #include <linux/profile.h>
31 #include <linux/export.h>
32 #include <linux/mount.h>
33 #include <linux/mempolicy.h>
34 #include <linux/rmap.h>
35 #include <linux/mmu_notifier.h>
36 #include <linux/mmdebug.h>
37 #include <linux/perf_event.h>
38 #include <linux/audit.h>
39 #include <linux/khugepaged.h>
40 #include <linux/uprobes.h>
41 #include <linux/notifier.h>
42 #include <linux/memory.h>
43 #include <linux/printk.h>
44 #include <linux/userfaultfd_k.h>
45 #include <linux/moduleparam.h>
46 #include <linux/pkeys.h>
47 #include <linux/oom.h>
48 #include <linux/sched/mm.h>
49 #include <linux/ksm.h>
50 
51 #include <linux/uaccess.h>
52 #include <asm/cacheflush.h>
53 #include <asm/tlb.h>
54 #include <asm/mmu_context.h>
55 
56 #define CREATE_TRACE_POINTS
57 #include <trace/events/mmap.h>
58 
59 #include "internal.h"
60 
61 #ifndef arch_mmap_check
62 #define arch_mmap_check(addr, len, flags)	(0)
63 #endif
64 
65 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
66 const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN;
67 int mmap_rnd_bits_max __ro_after_init = CONFIG_ARCH_MMAP_RND_BITS_MAX;
68 int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS;
69 #endif
70 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
71 const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN;
72 const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX;
73 int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS;
74 #endif
75 
76 static bool ignore_rlimit_data;
77 core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644);
78 
79 /* Update vma->vm_page_prot to reflect vma->vm_flags. */
80 void vma_set_page_prot(struct vm_area_struct *vma)
81 {
82 	unsigned long vm_flags = vma->vm_flags;
83 	pgprot_t vm_page_prot;
84 
85 	vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags);
86 	if (vma_wants_writenotify(vma, vm_page_prot)) {
87 		vm_flags &= ~VM_SHARED;
88 		vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags);
89 	}
90 	/* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */
91 	WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
92 }
93 
94 /*
95  * check_brk_limits() - Use platform specific check of range & verify mlock
96  * limits.
97  * @addr: The address to check
98  * @len: The size of increase.
99  *
100  * Return: 0 on success.
101  */
102 static int check_brk_limits(unsigned long addr, unsigned long len)
103 {
104 	unsigned long mapped_addr;
105 
106 	mapped_addr = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
107 	if (IS_ERR_VALUE(mapped_addr))
108 		return mapped_addr;
109 
110 	return mlock_future_ok(current->mm, current->mm->def_flags, len)
111 		? 0 : -EAGAIN;
112 }
113 static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma,
114 		unsigned long addr, unsigned long request, unsigned long flags);
115 SYSCALL_DEFINE1(brk, unsigned long, brk)
116 {
117 	unsigned long newbrk, oldbrk, origbrk;
118 	struct mm_struct *mm = current->mm;
119 	struct vm_area_struct *brkvma, *next = NULL;
120 	unsigned long min_brk;
121 	bool populate = false;
122 	LIST_HEAD(uf);
123 	struct vma_iterator vmi;
124 
125 	if (mmap_write_lock_killable(mm))
126 		return -EINTR;
127 
128 	origbrk = mm->brk;
129 
130 #ifdef CONFIG_COMPAT_BRK
131 	/*
132 	 * CONFIG_COMPAT_BRK can still be overridden by setting
133 	 * randomize_va_space to 2, which will still cause mm->start_brk
134 	 * to be arbitrarily shifted
135 	 */
136 	if (current->brk_randomized)
137 		min_brk = mm->start_brk;
138 	else
139 		min_brk = mm->end_data;
140 #else
141 	min_brk = mm->start_brk;
142 #endif
143 	if (brk < min_brk)
144 		goto out;
145 
146 	/*
147 	 * Check against rlimit here. If this check is done later after the test
148 	 * of oldbrk with newbrk then it can escape the test and let the data
149 	 * segment grow beyond its set limit the in case where the limit is
150 	 * not page aligned -Ram Gupta
151 	 */
152 	if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
153 			      mm->end_data, mm->start_data))
154 		goto out;
155 
156 	newbrk = PAGE_ALIGN(brk);
157 	oldbrk = PAGE_ALIGN(mm->brk);
158 	if (oldbrk == newbrk) {
159 		mm->brk = brk;
160 		goto success;
161 	}
162 
163 	/* Always allow shrinking brk. */
164 	if (brk <= mm->brk) {
165 		/* Search one past newbrk */
166 		vma_iter_init(&vmi, mm, newbrk);
167 		brkvma = vma_find(&vmi, oldbrk);
168 		if (!brkvma || brkvma->vm_start >= oldbrk)
169 			goto out; /* mapping intersects with an existing non-brk vma. */
170 		/*
171 		 * mm->brk must be protected by write mmap_lock.
172 		 * do_vma_munmap() will drop the lock on success,  so update it
173 		 * before calling do_vma_munmap().
174 		 */
175 		mm->brk = brk;
176 		if (do_vma_munmap(&vmi, brkvma, newbrk, oldbrk, &uf, true))
177 			goto out;
178 
179 		goto success_unlocked;
180 	}
181 
182 	if (check_brk_limits(oldbrk, newbrk - oldbrk))
183 		goto out;
184 
185 	/*
186 	 * Only check if the next VMA is within the stack_guard_gap of the
187 	 * expansion area
188 	 */
189 	vma_iter_init(&vmi, mm, oldbrk);
190 	next = vma_find(&vmi, newbrk + PAGE_SIZE + stack_guard_gap);
191 	if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
192 		goto out;
193 
194 	brkvma = vma_prev_limit(&vmi, mm->start_brk);
195 	/* Ok, looks good - let it rip. */
196 	if (do_brk_flags(&vmi, brkvma, oldbrk, newbrk - oldbrk, 0) < 0)
197 		goto out;
198 
199 	mm->brk = brk;
200 	if (mm->def_flags & VM_LOCKED)
201 		populate = true;
202 
203 success:
204 	mmap_write_unlock(mm);
205 success_unlocked:
206 	userfaultfd_unmap_complete(mm, &uf);
207 	if (populate)
208 		mm_populate(oldbrk, newbrk - oldbrk);
209 	return brk;
210 
211 out:
212 	mm->brk = origbrk;
213 	mmap_write_unlock(mm);
214 	return origbrk;
215 }
216 
217 /*
218  * If a hint addr is less than mmap_min_addr change hint to be as
219  * low as possible but still greater than mmap_min_addr
220  */
221 static inline unsigned long round_hint_to_min(unsigned long hint)
222 {
223 	hint &= PAGE_MASK;
224 	if (((void *)hint != NULL) &&
225 	    (hint < mmap_min_addr))
226 		return PAGE_ALIGN(mmap_min_addr);
227 	return hint;
228 }
229 
230 bool mlock_future_ok(struct mm_struct *mm, unsigned long flags,
231 			unsigned long bytes)
232 {
233 	unsigned long locked_pages, limit_pages;
234 
235 	if (!(flags & VM_LOCKED) || capable(CAP_IPC_LOCK))
236 		return true;
237 
238 	locked_pages = bytes >> PAGE_SHIFT;
239 	locked_pages += mm->locked_vm;
240 
241 	limit_pages = rlimit(RLIMIT_MEMLOCK);
242 	limit_pages >>= PAGE_SHIFT;
243 
244 	return locked_pages <= limit_pages;
245 }
246 
247 static inline u64 file_mmap_size_max(struct file *file, struct inode *inode)
248 {
249 	if (S_ISREG(inode->i_mode))
250 		return MAX_LFS_FILESIZE;
251 
252 	if (S_ISBLK(inode->i_mode))
253 		return MAX_LFS_FILESIZE;
254 
255 	if (S_ISSOCK(inode->i_mode))
256 		return MAX_LFS_FILESIZE;
257 
258 	/* Special "we do even unsigned file positions" case */
259 	if (file->f_mode & FMODE_UNSIGNED_OFFSET)
260 		return 0;
261 
262 	/* Yes, random drivers might want more. But I'm tired of buggy drivers */
263 	return ULONG_MAX;
264 }
265 
266 static inline bool file_mmap_ok(struct file *file, struct inode *inode,
267 				unsigned long pgoff, unsigned long len)
268 {
269 	u64 maxsize = file_mmap_size_max(file, inode);
270 
271 	if (maxsize && len > maxsize)
272 		return false;
273 	maxsize -= len;
274 	if (pgoff > maxsize >> PAGE_SHIFT)
275 		return false;
276 	return true;
277 }
278 
279 /*
280  * The caller must write-lock current->mm->mmap_lock.
281  */
282 unsigned long do_mmap(struct file *file, unsigned long addr,
283 			unsigned long len, unsigned long prot,
284 			unsigned long flags, vm_flags_t vm_flags,
285 			unsigned long pgoff, unsigned long *populate,
286 			struct list_head *uf)
287 {
288 	struct mm_struct *mm = current->mm;
289 	int pkey = 0;
290 
291 	*populate = 0;
292 
293 	if (!len)
294 		return -EINVAL;
295 
296 	/*
297 	 * Does the application expect PROT_READ to imply PROT_EXEC?
298 	 *
299 	 * (the exception is when the underlying filesystem is noexec
300 	 *  mounted, in which case we don't add PROT_EXEC.)
301 	 */
302 	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
303 		if (!(file && path_noexec(&file->f_path)))
304 			prot |= PROT_EXEC;
305 
306 	/* force arch specific MAP_FIXED handling in get_unmapped_area */
307 	if (flags & MAP_FIXED_NOREPLACE)
308 		flags |= MAP_FIXED;
309 
310 	if (!(flags & MAP_FIXED))
311 		addr = round_hint_to_min(addr);
312 
313 	/* Careful about overflows.. */
314 	len = PAGE_ALIGN(len);
315 	if (!len)
316 		return -ENOMEM;
317 
318 	/* offset overflow? */
319 	if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
320 		return -EOVERFLOW;
321 
322 	/* Too many mappings? */
323 	if (mm->map_count > sysctl_max_map_count)
324 		return -ENOMEM;
325 
326 	/*
327 	 * addr is returned from get_unmapped_area,
328 	 * There are two cases:
329 	 * 1> MAP_FIXED == false
330 	 *	unallocated memory, no need to check sealing.
331 	 * 1> MAP_FIXED == true
332 	 *	sealing is checked inside mmap_region when
333 	 *	do_vmi_munmap is called.
334 	 */
335 
336 	if (prot == PROT_EXEC) {
337 		pkey = execute_only_pkey(mm);
338 		if (pkey < 0)
339 			pkey = 0;
340 	}
341 
342 	/* Do simple checking here so the lower-level routines won't have
343 	 * to. we assume access permissions have been handled by the open
344 	 * of the memory object, so we don't do any here.
345 	 */
346 	vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) |
347 			mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
348 
349 	/* Obtain the address to map to. we verify (or select) it and ensure
350 	 * that it represents a valid section of the address space.
351 	 */
352 	addr = __get_unmapped_area(file, addr, len, pgoff, flags, vm_flags);
353 	if (IS_ERR_VALUE(addr))
354 		return addr;
355 
356 	if (flags & MAP_FIXED_NOREPLACE) {
357 		if (find_vma_intersection(mm, addr, addr + len))
358 			return -EEXIST;
359 	}
360 
361 	if (flags & MAP_LOCKED)
362 		if (!can_do_mlock())
363 			return -EPERM;
364 
365 	if (!mlock_future_ok(mm, vm_flags, len))
366 		return -EAGAIN;
367 
368 	if (file) {
369 		struct inode *inode = file_inode(file);
370 		unsigned long flags_mask;
371 
372 		if (!file_mmap_ok(file, inode, pgoff, len))
373 			return -EOVERFLOW;
374 
375 		flags_mask = LEGACY_MAP_MASK;
376 		if (file->f_op->fop_flags & FOP_MMAP_SYNC)
377 			flags_mask |= MAP_SYNC;
378 
379 		switch (flags & MAP_TYPE) {
380 		case MAP_SHARED:
381 			/*
382 			 * Force use of MAP_SHARED_VALIDATE with non-legacy
383 			 * flags. E.g. MAP_SYNC is dangerous to use with
384 			 * MAP_SHARED as you don't know which consistency model
385 			 * you will get. We silently ignore unsupported flags
386 			 * with MAP_SHARED to preserve backward compatibility.
387 			 */
388 			flags &= LEGACY_MAP_MASK;
389 			fallthrough;
390 		case MAP_SHARED_VALIDATE:
391 			if (flags & ~flags_mask)
392 				return -EOPNOTSUPP;
393 			if (prot & PROT_WRITE) {
394 				if (!(file->f_mode & FMODE_WRITE))
395 					return -EACCES;
396 				if (IS_SWAPFILE(file->f_mapping->host))
397 					return -ETXTBSY;
398 			}
399 
400 			/*
401 			 * Make sure we don't allow writing to an append-only
402 			 * file..
403 			 */
404 			if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
405 				return -EACCES;
406 
407 			vm_flags |= VM_SHARED | VM_MAYSHARE;
408 			if (!(file->f_mode & FMODE_WRITE))
409 				vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
410 			fallthrough;
411 		case MAP_PRIVATE:
412 			if (!(file->f_mode & FMODE_READ))
413 				return -EACCES;
414 			if (path_noexec(&file->f_path)) {
415 				if (vm_flags & VM_EXEC)
416 					return -EPERM;
417 				vm_flags &= ~VM_MAYEXEC;
418 			}
419 
420 			if (!file->f_op->mmap)
421 				return -ENODEV;
422 			if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
423 				return -EINVAL;
424 			break;
425 
426 		default:
427 			return -EINVAL;
428 		}
429 	} else {
430 		switch (flags & MAP_TYPE) {
431 		case MAP_SHARED:
432 			if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
433 				return -EINVAL;
434 			/*
435 			 * Ignore pgoff.
436 			 */
437 			pgoff = 0;
438 			vm_flags |= VM_SHARED | VM_MAYSHARE;
439 			break;
440 		case MAP_DROPPABLE:
441 			if (VM_DROPPABLE == VM_NONE)
442 				return -ENOTSUPP;
443 			/*
444 			 * A locked or stack area makes no sense to be droppable.
445 			 *
446 			 * Also, since droppable pages can just go away at any time
447 			 * it makes no sense to copy them on fork or dump them.
448 			 *
449 			 * And don't attempt to combine with hugetlb for now.
450 			 */
451 			if (flags & (MAP_LOCKED | MAP_HUGETLB))
452 			        return -EINVAL;
453 			if (vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
454 			        return -EINVAL;
455 
456 			vm_flags |= VM_DROPPABLE;
457 
458 			/*
459 			 * If the pages can be dropped, then it doesn't make
460 			 * sense to reserve them.
461 			 */
462 			vm_flags |= VM_NORESERVE;
463 
464 			/*
465 			 * Likewise, they're volatile enough that they
466 			 * shouldn't survive forks or coredumps.
467 			 */
468 			vm_flags |= VM_WIPEONFORK | VM_DONTDUMP;
469 			fallthrough;
470 		case MAP_PRIVATE:
471 			/*
472 			 * Set pgoff according to addr for anon_vma.
473 			 */
474 			pgoff = addr >> PAGE_SHIFT;
475 			break;
476 		default:
477 			return -EINVAL;
478 		}
479 	}
480 
481 	/*
482 	 * Set 'VM_NORESERVE' if we should not account for the
483 	 * memory use of this mapping.
484 	 */
485 	if (flags & MAP_NORESERVE) {
486 		/* We honor MAP_NORESERVE if allowed to overcommit */
487 		if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
488 			vm_flags |= VM_NORESERVE;
489 
490 		/* hugetlb applies strict overcommit unless MAP_NORESERVE */
491 		if (file && is_file_hugepages(file))
492 			vm_flags |= VM_NORESERVE;
493 	}
494 
495 	addr = mmap_region(file, addr, len, vm_flags, pgoff, uf);
496 	if (!IS_ERR_VALUE(addr) &&
497 	    ((vm_flags & VM_LOCKED) ||
498 	     (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE))
499 		*populate = len;
500 	return addr;
501 }
502 
503 unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
504 			      unsigned long prot, unsigned long flags,
505 			      unsigned long fd, unsigned long pgoff)
506 {
507 	struct file *file = NULL;
508 	unsigned long retval;
509 
510 	if (!(flags & MAP_ANONYMOUS)) {
511 		audit_mmap_fd(fd, flags);
512 		file = fget(fd);
513 		if (!file)
514 			return -EBADF;
515 		if (is_file_hugepages(file)) {
516 			len = ALIGN(len, huge_page_size(hstate_file(file)));
517 		} else if (unlikely(flags & MAP_HUGETLB)) {
518 			retval = -EINVAL;
519 			goto out_fput;
520 		}
521 	} else if (flags & MAP_HUGETLB) {
522 		struct hstate *hs;
523 
524 		hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
525 		if (!hs)
526 			return -EINVAL;
527 
528 		len = ALIGN(len, huge_page_size(hs));
529 		/*
530 		 * VM_NORESERVE is used because the reservations will be
531 		 * taken when vm_ops->mmap() is called
532 		 */
533 		file = hugetlb_file_setup(HUGETLB_ANON_FILE, len,
534 				VM_NORESERVE,
535 				HUGETLB_ANONHUGE_INODE,
536 				(flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
537 		if (IS_ERR(file))
538 			return PTR_ERR(file);
539 	}
540 
541 	retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
542 out_fput:
543 	if (file)
544 		fput(file);
545 	return retval;
546 }
547 
548 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
549 		unsigned long, prot, unsigned long, flags,
550 		unsigned long, fd, unsigned long, pgoff)
551 {
552 	return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
553 }
554 
555 #ifdef __ARCH_WANT_SYS_OLD_MMAP
556 struct mmap_arg_struct {
557 	unsigned long addr;
558 	unsigned long len;
559 	unsigned long prot;
560 	unsigned long flags;
561 	unsigned long fd;
562 	unsigned long offset;
563 };
564 
565 SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
566 {
567 	struct mmap_arg_struct a;
568 
569 	if (copy_from_user(&a, arg, sizeof(a)))
570 		return -EFAULT;
571 	if (offset_in_page(a.offset))
572 		return -EINVAL;
573 
574 	return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
575 			       a.offset >> PAGE_SHIFT);
576 }
577 #endif /* __ARCH_WANT_SYS_OLD_MMAP */
578 
579 /*
580  * We account for memory if it's a private writeable mapping,
581  * not hugepages and VM_NORESERVE wasn't set.
582  */
583 static inline bool accountable_mapping(struct file *file, vm_flags_t vm_flags)
584 {
585 	/*
586 	 * hugetlb has its own accounting separate from the core VM
587 	 * VM_HUGETLB may not be set yet so we cannot check for that flag.
588 	 */
589 	if (file && is_file_hugepages(file))
590 		return false;
591 
592 	return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
593 }
594 
595 /**
596  * unmapped_area() - Find an area between the low_limit and the high_limit with
597  * the correct alignment and offset, all from @info. Note: current->mm is used
598  * for the search.
599  *
600  * @info: The unmapped area information including the range [low_limit -
601  * high_limit), the alignment offset and mask.
602  *
603  * Return: A memory address or -ENOMEM.
604  */
605 static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
606 {
607 	unsigned long length, gap;
608 	unsigned long low_limit, high_limit;
609 	struct vm_area_struct *tmp;
610 	VMA_ITERATOR(vmi, current->mm, 0);
611 
612 	/* Adjust search length to account for worst case alignment overhead */
613 	length = info->length + info->align_mask + info->start_gap;
614 	if (length < info->length)
615 		return -ENOMEM;
616 
617 	low_limit = info->low_limit;
618 	if (low_limit < mmap_min_addr)
619 		low_limit = mmap_min_addr;
620 	high_limit = info->high_limit;
621 retry:
622 	if (vma_iter_area_lowest(&vmi, low_limit, high_limit, length))
623 		return -ENOMEM;
624 
625 	/*
626 	 * Adjust for the gap first so it doesn't interfere with the
627 	 * later alignment. The first step is the minimum needed to
628 	 * fulill the start gap, the next steps is the minimum to align
629 	 * that. It is the minimum needed to fulill both.
630 	 */
631 	gap = vma_iter_addr(&vmi) + info->start_gap;
632 	gap += (info->align_offset - gap) & info->align_mask;
633 	tmp = vma_next(&vmi);
634 	if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
635 		if (vm_start_gap(tmp) < gap + length - 1) {
636 			low_limit = tmp->vm_end;
637 			vma_iter_reset(&vmi);
638 			goto retry;
639 		}
640 	} else {
641 		tmp = vma_prev(&vmi);
642 		if (tmp && vm_end_gap(tmp) > gap) {
643 			low_limit = vm_end_gap(tmp);
644 			vma_iter_reset(&vmi);
645 			goto retry;
646 		}
647 	}
648 
649 	return gap;
650 }
651 
652 /**
653  * unmapped_area_topdown() - Find an area between the low_limit and the
654  * high_limit with the correct alignment and offset at the highest available
655  * address, all from @info. Note: current->mm is used for the search.
656  *
657  * @info: The unmapped area information including the range [low_limit -
658  * high_limit), the alignment offset and mask.
659  *
660  * Return: A memory address or -ENOMEM.
661  */
662 static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
663 {
664 	unsigned long length, gap, gap_end;
665 	unsigned long low_limit, high_limit;
666 	struct vm_area_struct *tmp;
667 	VMA_ITERATOR(vmi, current->mm, 0);
668 
669 	/* Adjust search length to account for worst case alignment overhead */
670 	length = info->length + info->align_mask + info->start_gap;
671 	if (length < info->length)
672 		return -ENOMEM;
673 
674 	low_limit = info->low_limit;
675 	if (low_limit < mmap_min_addr)
676 		low_limit = mmap_min_addr;
677 	high_limit = info->high_limit;
678 retry:
679 	if (vma_iter_area_highest(&vmi, low_limit, high_limit, length))
680 		return -ENOMEM;
681 
682 	gap = vma_iter_end(&vmi) - info->length;
683 	gap -= (gap - info->align_offset) & info->align_mask;
684 	gap_end = vma_iter_end(&vmi);
685 	tmp = vma_next(&vmi);
686 	if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
687 		if (vm_start_gap(tmp) < gap_end) {
688 			high_limit = vm_start_gap(tmp);
689 			vma_iter_reset(&vmi);
690 			goto retry;
691 		}
692 	} else {
693 		tmp = vma_prev(&vmi);
694 		if (tmp && vm_end_gap(tmp) > gap) {
695 			high_limit = tmp->vm_start;
696 			vma_iter_reset(&vmi);
697 			goto retry;
698 		}
699 	}
700 
701 	return gap;
702 }
703 
704 /*
705  * Search for an unmapped address range.
706  *
707  * We are looking for a range that:
708  * - does not intersect with any VMA;
709  * - is contained within the [low_limit, high_limit) interval;
710  * - is at least the desired size.
711  * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
712  */
713 unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info)
714 {
715 	unsigned long addr;
716 
717 	if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
718 		addr = unmapped_area_topdown(info);
719 	else
720 		addr = unmapped_area(info);
721 
722 	trace_vm_unmapped_area(addr, info);
723 	return addr;
724 }
725 
726 /* Get an address range which is currently unmapped.
727  * For shmat() with addr=0.
728  *
729  * Ugly calling convention alert:
730  * Return value with the low bits set means error value,
731  * ie
732  *	if (ret & ~PAGE_MASK)
733  *		error = ret;
734  *
735  * This function "knows" that -ENOMEM has the bits set.
736  */
737 unsigned long
738 generic_get_unmapped_area(struct file *filp, unsigned long addr,
739 			  unsigned long len, unsigned long pgoff,
740 			  unsigned long flags)
741 {
742 	struct mm_struct *mm = current->mm;
743 	struct vm_area_struct *vma, *prev;
744 	struct vm_unmapped_area_info info = {};
745 	const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
746 
747 	if (len > mmap_end - mmap_min_addr)
748 		return -ENOMEM;
749 
750 	if (flags & MAP_FIXED)
751 		return addr;
752 
753 	if (addr) {
754 		addr = PAGE_ALIGN(addr);
755 		vma = find_vma_prev(mm, addr, &prev);
756 		if (mmap_end - len >= addr && addr >= mmap_min_addr &&
757 		    (!vma || addr + len <= vm_start_gap(vma)) &&
758 		    (!prev || addr >= vm_end_gap(prev)))
759 			return addr;
760 	}
761 
762 	info.length = len;
763 	info.low_limit = mm->mmap_base;
764 	info.high_limit = mmap_end;
765 	return vm_unmapped_area(&info);
766 }
767 
768 #ifndef HAVE_ARCH_UNMAPPED_AREA
769 unsigned long
770 arch_get_unmapped_area(struct file *filp, unsigned long addr,
771 		       unsigned long len, unsigned long pgoff,
772 		       unsigned long flags)
773 {
774 	return generic_get_unmapped_area(filp, addr, len, pgoff, flags);
775 }
776 #endif
777 
778 /*
779  * This mmap-allocator allocates new areas top-down from below the
780  * stack's low limit (the base):
781  */
782 unsigned long
783 generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
784 				  unsigned long len, unsigned long pgoff,
785 				  unsigned long flags)
786 {
787 	struct vm_area_struct *vma, *prev;
788 	struct mm_struct *mm = current->mm;
789 	struct vm_unmapped_area_info info = {};
790 	const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
791 
792 	/* requested length too big for entire address space */
793 	if (len > mmap_end - mmap_min_addr)
794 		return -ENOMEM;
795 
796 	if (flags & MAP_FIXED)
797 		return addr;
798 
799 	/* requesting a specific address */
800 	if (addr) {
801 		addr = PAGE_ALIGN(addr);
802 		vma = find_vma_prev(mm, addr, &prev);
803 		if (mmap_end - len >= addr && addr >= mmap_min_addr &&
804 				(!vma || addr + len <= vm_start_gap(vma)) &&
805 				(!prev || addr >= vm_end_gap(prev)))
806 			return addr;
807 	}
808 
809 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
810 	info.length = len;
811 	info.low_limit = PAGE_SIZE;
812 	info.high_limit = arch_get_mmap_base(addr, mm->mmap_base);
813 	addr = vm_unmapped_area(&info);
814 
815 	/*
816 	 * A failed mmap() very likely causes application failure,
817 	 * so fall back to the bottom-up function here. This scenario
818 	 * can happen with large stack limits and large mmap()
819 	 * allocations.
820 	 */
821 	if (offset_in_page(addr)) {
822 		VM_BUG_ON(addr != -ENOMEM);
823 		info.flags = 0;
824 		info.low_limit = TASK_UNMAPPED_BASE;
825 		info.high_limit = mmap_end;
826 		addr = vm_unmapped_area(&info);
827 	}
828 
829 	return addr;
830 }
831 
832 #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
833 unsigned long
834 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
835 			       unsigned long len, unsigned long pgoff,
836 			       unsigned long flags)
837 {
838 	return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
839 }
840 #endif
841 
842 #ifndef HAVE_ARCH_UNMAPPED_AREA_VMFLAGS
843 unsigned long
844 arch_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, unsigned long len,
845 			       unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags)
846 {
847 	return arch_get_unmapped_area(filp, addr, len, pgoff, flags);
848 }
849 
850 unsigned long
851 arch_get_unmapped_area_topdown_vmflags(struct file *filp, unsigned long addr,
852 				       unsigned long len, unsigned long pgoff,
853 				       unsigned long flags, vm_flags_t vm_flags)
854 {
855 	return arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
856 }
857 #endif
858 
859 unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm, struct file *filp,
860 					   unsigned long addr, unsigned long len,
861 					   unsigned long pgoff, unsigned long flags,
862 					   vm_flags_t vm_flags)
863 {
864 	if (test_bit(MMF_TOPDOWN, &mm->flags))
865 		return arch_get_unmapped_area_topdown_vmflags(filp, addr, len, pgoff,
866 							      flags, vm_flags);
867 	return arch_get_unmapped_area_vmflags(filp, addr, len, pgoff, flags, vm_flags);
868 }
869 
870 unsigned long
871 __get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
872 		unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags)
873 {
874 	unsigned long (*get_area)(struct file *, unsigned long,
875 				  unsigned long, unsigned long, unsigned long)
876 				  = NULL;
877 
878 	unsigned long error = arch_mmap_check(addr, len, flags);
879 	if (error)
880 		return error;
881 
882 	/* Careful about overflows.. */
883 	if (len > TASK_SIZE)
884 		return -ENOMEM;
885 
886 	if (file) {
887 		if (file->f_op->get_unmapped_area)
888 			get_area = file->f_op->get_unmapped_area;
889 	} else if (flags & MAP_SHARED) {
890 		/*
891 		 * mmap_region() will call shmem_zero_setup() to create a file,
892 		 * so use shmem's get_unmapped_area in case it can be huge.
893 		 */
894 		get_area = shmem_get_unmapped_area;
895 	}
896 
897 	/* Always treat pgoff as zero for anonymous memory. */
898 	if (!file)
899 		pgoff = 0;
900 
901 	if (get_area) {
902 		addr = get_area(file, addr, len, pgoff, flags);
903 	} else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
904 		/* Ensures that larger anonymous mappings are THP aligned. */
905 		addr = thp_get_unmapped_area_vmflags(file, addr, len,
906 						     pgoff, flags, vm_flags);
907 	} else {
908 		addr = mm_get_unmapped_area_vmflags(current->mm, file, addr, len,
909 						    pgoff, flags, vm_flags);
910 	}
911 	if (IS_ERR_VALUE(addr))
912 		return addr;
913 
914 	if (addr > TASK_SIZE - len)
915 		return -ENOMEM;
916 	if (offset_in_page(addr))
917 		return -EINVAL;
918 
919 	error = security_mmap_addr(addr);
920 	return error ? error : addr;
921 }
922 
923 unsigned long
924 mm_get_unmapped_area(struct mm_struct *mm, struct file *file,
925 		     unsigned long addr, unsigned long len,
926 		     unsigned long pgoff, unsigned long flags)
927 {
928 	if (test_bit(MMF_TOPDOWN, &mm->flags))
929 		return arch_get_unmapped_area_topdown(file, addr, len, pgoff, flags);
930 	return arch_get_unmapped_area(file, addr, len, pgoff, flags);
931 }
932 EXPORT_SYMBOL(mm_get_unmapped_area);
933 
934 /**
935  * find_vma_intersection() - Look up the first VMA which intersects the interval
936  * @mm: The process address space.
937  * @start_addr: The inclusive start user address.
938  * @end_addr: The exclusive end user address.
939  *
940  * Returns: The first VMA within the provided range, %NULL otherwise.  Assumes
941  * start_addr < end_addr.
942  */
943 struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
944 					     unsigned long start_addr,
945 					     unsigned long end_addr)
946 {
947 	unsigned long index = start_addr;
948 
949 	mmap_assert_locked(mm);
950 	return mt_find(&mm->mm_mt, &index, end_addr - 1);
951 }
952 EXPORT_SYMBOL(find_vma_intersection);
953 
954 /**
955  * find_vma() - Find the VMA for a given address, or the next VMA.
956  * @mm: The mm_struct to check
957  * @addr: The address
958  *
959  * Returns: The VMA associated with addr, or the next VMA.
960  * May return %NULL in the case of no VMA at addr or above.
961  */
962 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
963 {
964 	unsigned long index = addr;
965 
966 	mmap_assert_locked(mm);
967 	return mt_find(&mm->mm_mt, &index, ULONG_MAX);
968 }
969 EXPORT_SYMBOL(find_vma);
970 
971 /**
972  * find_vma_prev() - Find the VMA for a given address, or the next vma and
973  * set %pprev to the previous VMA, if any.
974  * @mm: The mm_struct to check
975  * @addr: The address
976  * @pprev: The pointer to set to the previous VMA
977  *
978  * Note that RCU lock is missing here since the external mmap_lock() is used
979  * instead.
980  *
981  * Returns: The VMA associated with @addr, or the next vma.
982  * May return %NULL in the case of no vma at addr or above.
983  */
984 struct vm_area_struct *
985 find_vma_prev(struct mm_struct *mm, unsigned long addr,
986 			struct vm_area_struct **pprev)
987 {
988 	struct vm_area_struct *vma;
989 	VMA_ITERATOR(vmi, mm, addr);
990 
991 	vma = vma_iter_load(&vmi);
992 	*pprev = vma_prev(&vmi);
993 	if (!vma)
994 		vma = vma_next(&vmi);
995 	return vma;
996 }
997 
998 /*
999  * Verify that the stack growth is acceptable and
1000  * update accounting. This is shared with both the
1001  * grow-up and grow-down cases.
1002  */
1003 static int acct_stack_growth(struct vm_area_struct *vma,
1004 			     unsigned long size, unsigned long grow)
1005 {
1006 	struct mm_struct *mm = vma->vm_mm;
1007 	unsigned long new_start;
1008 
1009 	/* address space limit tests */
1010 	if (!may_expand_vm(mm, vma->vm_flags, grow))
1011 		return -ENOMEM;
1012 
1013 	/* Stack limit test */
1014 	if (size > rlimit(RLIMIT_STACK))
1015 		return -ENOMEM;
1016 
1017 	/* mlock limit tests */
1018 	if (!mlock_future_ok(mm, vma->vm_flags, grow << PAGE_SHIFT))
1019 		return -ENOMEM;
1020 
1021 	/* Check to ensure the stack will not grow into a hugetlb-only region */
1022 	new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
1023 			vma->vm_end - size;
1024 	if (is_hugepage_only_range(vma->vm_mm, new_start, size))
1025 		return -EFAULT;
1026 
1027 	/*
1028 	 * Overcommit..  This must be the final test, as it will
1029 	 * update security statistics.
1030 	 */
1031 	if (security_vm_enough_memory_mm(mm, grow))
1032 		return -ENOMEM;
1033 
1034 	return 0;
1035 }
1036 
1037 #if defined(CONFIG_STACK_GROWSUP)
1038 /*
1039  * PA-RISC uses this for its stack.
1040  * vma is the last one with address > vma->vm_end.  Have to extend vma.
1041  */
1042 static int expand_upwards(struct vm_area_struct *vma, unsigned long address)
1043 {
1044 	struct mm_struct *mm = vma->vm_mm;
1045 	struct vm_area_struct *next;
1046 	unsigned long gap_addr;
1047 	int error = 0;
1048 	VMA_ITERATOR(vmi, mm, vma->vm_start);
1049 
1050 	if (!(vma->vm_flags & VM_GROWSUP))
1051 		return -EFAULT;
1052 
1053 	/* Guard against exceeding limits of the address space. */
1054 	address &= PAGE_MASK;
1055 	if (address >= (TASK_SIZE & PAGE_MASK))
1056 		return -ENOMEM;
1057 	address += PAGE_SIZE;
1058 
1059 	/* Enforce stack_guard_gap */
1060 	gap_addr = address + stack_guard_gap;
1061 
1062 	/* Guard against overflow */
1063 	if (gap_addr < address || gap_addr > TASK_SIZE)
1064 		gap_addr = TASK_SIZE;
1065 
1066 	next = find_vma_intersection(mm, vma->vm_end, gap_addr);
1067 	if (next && vma_is_accessible(next)) {
1068 		if (!(next->vm_flags & VM_GROWSUP))
1069 			return -ENOMEM;
1070 		/* Check that both stack segments have the same anon_vma? */
1071 	}
1072 
1073 	if (next)
1074 		vma_iter_prev_range_limit(&vmi, address);
1075 
1076 	vma_iter_config(&vmi, vma->vm_start, address);
1077 	if (vma_iter_prealloc(&vmi, vma))
1078 		return -ENOMEM;
1079 
1080 	/* We must make sure the anon_vma is allocated. */
1081 	if (unlikely(anon_vma_prepare(vma))) {
1082 		vma_iter_free(&vmi);
1083 		return -ENOMEM;
1084 	}
1085 
1086 	/* Lock the VMA before expanding to prevent concurrent page faults */
1087 	vma_start_write(vma);
1088 	/*
1089 	 * vma->vm_start/vm_end cannot change under us because the caller
1090 	 * is required to hold the mmap_lock in read mode.  We need the
1091 	 * anon_vma lock to serialize against concurrent expand_stacks.
1092 	 */
1093 	anon_vma_lock_write(vma->anon_vma);
1094 
1095 	/* Somebody else might have raced and expanded it already */
1096 	if (address > vma->vm_end) {
1097 		unsigned long size, grow;
1098 
1099 		size = address - vma->vm_start;
1100 		grow = (address - vma->vm_end) >> PAGE_SHIFT;
1101 
1102 		error = -ENOMEM;
1103 		if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
1104 			error = acct_stack_growth(vma, size, grow);
1105 			if (!error) {
1106 				/*
1107 				 * We only hold a shared mmap_lock lock here, so
1108 				 * we need to protect against concurrent vma
1109 				 * expansions.  anon_vma_lock_write() doesn't
1110 				 * help here, as we don't guarantee that all
1111 				 * growable vmas in a mm share the same root
1112 				 * anon vma.  So, we reuse mm->page_table_lock
1113 				 * to guard against concurrent vma expansions.
1114 				 */
1115 				spin_lock(&mm->page_table_lock);
1116 				if (vma->vm_flags & VM_LOCKED)
1117 					mm->locked_vm += grow;
1118 				vm_stat_account(mm, vma->vm_flags, grow);
1119 				anon_vma_interval_tree_pre_update_vma(vma);
1120 				vma->vm_end = address;
1121 				/* Overwrite old entry in mtree. */
1122 				vma_iter_store(&vmi, vma);
1123 				anon_vma_interval_tree_post_update_vma(vma);
1124 				spin_unlock(&mm->page_table_lock);
1125 
1126 				perf_event_mmap(vma);
1127 			}
1128 		}
1129 	}
1130 	anon_vma_unlock_write(vma->anon_vma);
1131 	vma_iter_free(&vmi);
1132 	validate_mm(mm);
1133 	return error;
1134 }
1135 #endif /* CONFIG_STACK_GROWSUP */
1136 
1137 /*
1138  * vma is the first one with address < vma->vm_start.  Have to extend vma.
1139  * mmap_lock held for writing.
1140  */
1141 int expand_downwards(struct vm_area_struct *vma, unsigned long address)
1142 {
1143 	struct mm_struct *mm = vma->vm_mm;
1144 	struct vm_area_struct *prev;
1145 	int error = 0;
1146 	VMA_ITERATOR(vmi, mm, vma->vm_start);
1147 
1148 	if (!(vma->vm_flags & VM_GROWSDOWN))
1149 		return -EFAULT;
1150 
1151 	address &= PAGE_MASK;
1152 	if (address < mmap_min_addr || address < FIRST_USER_ADDRESS)
1153 		return -EPERM;
1154 
1155 	/* Enforce stack_guard_gap */
1156 	prev = vma_prev(&vmi);
1157 	/* Check that both stack segments have the same anon_vma? */
1158 	if (prev) {
1159 		if (!(prev->vm_flags & VM_GROWSDOWN) &&
1160 		    vma_is_accessible(prev) &&
1161 		    (address - prev->vm_end < stack_guard_gap))
1162 			return -ENOMEM;
1163 	}
1164 
1165 	if (prev)
1166 		vma_iter_next_range_limit(&vmi, vma->vm_start);
1167 
1168 	vma_iter_config(&vmi, address, vma->vm_end);
1169 	if (vma_iter_prealloc(&vmi, vma))
1170 		return -ENOMEM;
1171 
1172 	/* We must make sure the anon_vma is allocated. */
1173 	if (unlikely(anon_vma_prepare(vma))) {
1174 		vma_iter_free(&vmi);
1175 		return -ENOMEM;
1176 	}
1177 
1178 	/* Lock the VMA before expanding to prevent concurrent page faults */
1179 	vma_start_write(vma);
1180 	/*
1181 	 * vma->vm_start/vm_end cannot change under us because the caller
1182 	 * is required to hold the mmap_lock in read mode.  We need the
1183 	 * anon_vma lock to serialize against concurrent expand_stacks.
1184 	 */
1185 	anon_vma_lock_write(vma->anon_vma);
1186 
1187 	/* Somebody else might have raced and expanded it already */
1188 	if (address < vma->vm_start) {
1189 		unsigned long size, grow;
1190 
1191 		size = vma->vm_end - address;
1192 		grow = (vma->vm_start - address) >> PAGE_SHIFT;
1193 
1194 		error = -ENOMEM;
1195 		if (grow <= vma->vm_pgoff) {
1196 			error = acct_stack_growth(vma, size, grow);
1197 			if (!error) {
1198 				/*
1199 				 * We only hold a shared mmap_lock lock here, so
1200 				 * we need to protect against concurrent vma
1201 				 * expansions.  anon_vma_lock_write() doesn't
1202 				 * help here, as we don't guarantee that all
1203 				 * growable vmas in a mm share the same root
1204 				 * anon vma.  So, we reuse mm->page_table_lock
1205 				 * to guard against concurrent vma expansions.
1206 				 */
1207 				spin_lock(&mm->page_table_lock);
1208 				if (vma->vm_flags & VM_LOCKED)
1209 					mm->locked_vm += grow;
1210 				vm_stat_account(mm, vma->vm_flags, grow);
1211 				anon_vma_interval_tree_pre_update_vma(vma);
1212 				vma->vm_start = address;
1213 				vma->vm_pgoff -= grow;
1214 				/* Overwrite old entry in mtree. */
1215 				vma_iter_store(&vmi, vma);
1216 				anon_vma_interval_tree_post_update_vma(vma);
1217 				spin_unlock(&mm->page_table_lock);
1218 
1219 				perf_event_mmap(vma);
1220 			}
1221 		}
1222 	}
1223 	anon_vma_unlock_write(vma->anon_vma);
1224 	vma_iter_free(&vmi);
1225 	validate_mm(mm);
1226 	return error;
1227 }
1228 
1229 /* enforced gap between the expanding stack and other mappings. */
1230 unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
1231 
1232 static int __init cmdline_parse_stack_guard_gap(char *p)
1233 {
1234 	unsigned long val;
1235 	char *endptr;
1236 
1237 	val = simple_strtoul(p, &endptr, 10);
1238 	if (!*endptr)
1239 		stack_guard_gap = val << PAGE_SHIFT;
1240 
1241 	return 1;
1242 }
1243 __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
1244 
1245 #ifdef CONFIG_STACK_GROWSUP
1246 int expand_stack_locked(struct vm_area_struct *vma, unsigned long address)
1247 {
1248 	return expand_upwards(vma, address);
1249 }
1250 
1251 struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr)
1252 {
1253 	struct vm_area_struct *vma, *prev;
1254 
1255 	addr &= PAGE_MASK;
1256 	vma = find_vma_prev(mm, addr, &prev);
1257 	if (vma && (vma->vm_start <= addr))
1258 		return vma;
1259 	if (!prev)
1260 		return NULL;
1261 	if (expand_stack_locked(prev, addr))
1262 		return NULL;
1263 	if (prev->vm_flags & VM_LOCKED)
1264 		populate_vma_page_range(prev, addr, prev->vm_end, NULL);
1265 	return prev;
1266 }
1267 #else
1268 int expand_stack_locked(struct vm_area_struct *vma, unsigned long address)
1269 {
1270 	return expand_downwards(vma, address);
1271 }
1272 
1273 struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr)
1274 {
1275 	struct vm_area_struct *vma;
1276 	unsigned long start;
1277 
1278 	addr &= PAGE_MASK;
1279 	vma = find_vma(mm, addr);
1280 	if (!vma)
1281 		return NULL;
1282 	if (vma->vm_start <= addr)
1283 		return vma;
1284 	start = vma->vm_start;
1285 	if (expand_stack_locked(vma, addr))
1286 		return NULL;
1287 	if (vma->vm_flags & VM_LOCKED)
1288 		populate_vma_page_range(vma, addr, start, NULL);
1289 	return vma;
1290 }
1291 #endif
1292 
1293 #if defined(CONFIG_STACK_GROWSUP)
1294 
1295 #define vma_expand_up(vma,addr) expand_upwards(vma, addr)
1296 #define vma_expand_down(vma, addr) (-EFAULT)
1297 
1298 #else
1299 
1300 #define vma_expand_up(vma,addr) (-EFAULT)
1301 #define vma_expand_down(vma, addr) expand_downwards(vma, addr)
1302 
1303 #endif
1304 
1305 /*
1306  * expand_stack(): legacy interface for page faulting. Don't use unless
1307  * you have to.
1308  *
1309  * This is called with the mm locked for reading, drops the lock, takes
1310  * the lock for writing, tries to look up a vma again, expands it if
1311  * necessary, and downgrades the lock to reading again.
1312  *
1313  * If no vma is found or it can't be expanded, it returns NULL and has
1314  * dropped the lock.
1315  */
1316 struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr)
1317 {
1318 	struct vm_area_struct *vma, *prev;
1319 
1320 	mmap_read_unlock(mm);
1321 	if (mmap_write_lock_killable(mm))
1322 		return NULL;
1323 
1324 	vma = find_vma_prev(mm, addr, &prev);
1325 	if (vma && vma->vm_start <= addr)
1326 		goto success;
1327 
1328 	if (prev && !vma_expand_up(prev, addr)) {
1329 		vma = prev;
1330 		goto success;
1331 	}
1332 
1333 	if (vma && !vma_expand_down(vma, addr))
1334 		goto success;
1335 
1336 	mmap_write_unlock(mm);
1337 	return NULL;
1338 
1339 success:
1340 	mmap_write_downgrade(mm);
1341 	return vma;
1342 }
1343 
1344 /* do_munmap() - Wrapper function for non-maple tree aware do_munmap() calls.
1345  * @mm: The mm_struct
1346  * @start: The start address to munmap
1347  * @len: The length to be munmapped.
1348  * @uf: The userfaultfd list_head
1349  *
1350  * Return: 0 on success, error otherwise.
1351  */
1352 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
1353 	      struct list_head *uf)
1354 {
1355 	VMA_ITERATOR(vmi, mm, start);
1356 
1357 	return do_vmi_munmap(&vmi, mm, start, len, uf, false);
1358 }
1359 
1360 unsigned long mmap_region(struct file *file, unsigned long addr,
1361 		unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
1362 		struct list_head *uf)
1363 {
1364 	struct mm_struct *mm = current->mm;
1365 	struct vm_area_struct *vma = NULL;
1366 	struct vm_area_struct *next, *prev, *merge;
1367 	pgoff_t pglen = len >> PAGE_SHIFT;
1368 	unsigned long charged = 0;
1369 	struct vma_munmap_struct vms;
1370 	struct ma_state mas_detach;
1371 	struct maple_tree mt_detach;
1372 	unsigned long end = addr + len;
1373 	unsigned long merge_start = addr, merge_end = end;
1374 	bool writable_file_mapping = false;
1375 	pgoff_t vm_pgoff;
1376 	int error;
1377 	VMA_ITERATOR(vmi, mm, addr);
1378 
1379 	/* Check against address space limit. */
1380 	if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) {
1381 		unsigned long nr_pages;
1382 
1383 		/*
1384 		 * MAP_FIXED may remove pages of mappings that intersects with
1385 		 * requested mapping. Account for the pages it would unmap.
1386 		 */
1387 		nr_pages = count_vma_pages_range(mm, addr, end);
1388 
1389 		if (!may_expand_vm(mm, vm_flags,
1390 					(len >> PAGE_SHIFT) - nr_pages))
1391 			return -ENOMEM;
1392 	}
1393 
1394 	/* Find the first overlapping VMA */
1395 	vma = vma_find(&vmi, end);
1396 	init_vma_munmap(&vms, &vmi, vma, addr, end, uf, /* unlock = */ false);
1397 	if (vma) {
1398 		mt_init_flags(&mt_detach, vmi.mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
1399 		mt_on_stack(mt_detach);
1400 		mas_init(&mas_detach, &mt_detach, /* addr = */ 0);
1401 		/* Prepare to unmap any existing mapping in the area */
1402 		error = vms_gather_munmap_vmas(&vms, &mas_detach);
1403 		if (error)
1404 			goto gather_failed;
1405 
1406 		/* Remove any existing mappings from the vma tree */
1407 		error = vma_iter_clear_gfp(&vmi, addr, end, GFP_KERNEL);
1408 		if (error)
1409 			goto clear_tree_failed;
1410 
1411 		/* Unmap any existing mapping in the area */
1412 		vms_complete_munmap_vmas(&vms, &mas_detach);
1413 		next = vms.next;
1414 		prev = vms.prev;
1415 		vma = NULL;
1416 	} else {
1417 		next = vma_next(&vmi);
1418 		prev = vma_prev(&vmi);
1419 		if (prev)
1420 			vma_iter_next_range(&vmi);
1421 	}
1422 
1423 	/*
1424 	 * Private writable mapping: check memory availability
1425 	 */
1426 	if (accountable_mapping(file, vm_flags)) {
1427 		charged = len >> PAGE_SHIFT;
1428 		if (security_vm_enough_memory_mm(mm, charged))
1429 			return -ENOMEM;
1430 		vm_flags |= VM_ACCOUNT;
1431 	}
1432 
1433 	if (vm_flags & VM_SPECIAL)
1434 		goto cannot_expand;
1435 
1436 	/* Attempt to expand an old mapping */
1437 	/* Check next */
1438 	if (next && next->vm_start == end && !vma_policy(next) &&
1439 	    can_vma_merge_before(next, vm_flags, NULL, file, pgoff+pglen,
1440 				 NULL_VM_UFFD_CTX, NULL)) {
1441 		merge_end = next->vm_end;
1442 		vma = next;
1443 		vm_pgoff = next->vm_pgoff - pglen;
1444 	}
1445 
1446 	/* Check prev */
1447 	if (prev && prev->vm_end == addr && !vma_policy(prev) &&
1448 	    (vma ? can_vma_merge_after(prev, vm_flags, vma->anon_vma, file,
1449 				       pgoff, vma->vm_userfaultfd_ctx, NULL) :
1450 		   can_vma_merge_after(prev, vm_flags, NULL, file, pgoff,
1451 				       NULL_VM_UFFD_CTX, NULL))) {
1452 		merge_start = prev->vm_start;
1453 		vma = prev;
1454 		vm_pgoff = prev->vm_pgoff;
1455 		vma_prev(&vmi); /* Equivalent to going to the previous range */
1456 	}
1457 
1458 	if (vma) {
1459 		/* Actually expand, if possible */
1460 		if (!vma_expand(&vmi, vma, merge_start, merge_end, vm_pgoff, next)) {
1461 			khugepaged_enter_vma(vma, vm_flags);
1462 			goto expanded;
1463 		}
1464 
1465 		/* If the expand fails, then reposition the vma iterator */
1466 		if (unlikely(vma == prev))
1467 			vma_iter_set(&vmi, addr);
1468 	}
1469 
1470 cannot_expand:
1471 
1472 	/*
1473 	 * Determine the object being mapped and call the appropriate
1474 	 * specific mapper. the address has already been validated, but
1475 	 * not unmapped, but the maps are removed from the list.
1476 	 */
1477 	vma = vm_area_alloc(mm);
1478 	if (!vma) {
1479 		error = -ENOMEM;
1480 		goto unacct_error;
1481 	}
1482 
1483 	vma_iter_config(&vmi, addr, end);
1484 	vma_set_range(vma, addr, end, pgoff);
1485 	vm_flags_init(vma, vm_flags);
1486 	vma->vm_page_prot = vm_get_page_prot(vm_flags);
1487 
1488 	if (file) {
1489 		vma->vm_file = get_file(file);
1490 		error = call_mmap(file, vma);
1491 		if (error)
1492 			goto unmap_and_free_vma;
1493 
1494 		if (vma_is_shared_maywrite(vma)) {
1495 			error = mapping_map_writable(file->f_mapping);
1496 			if (error)
1497 				goto close_and_free_vma;
1498 
1499 			writable_file_mapping = true;
1500 		}
1501 
1502 		/*
1503 		 * Expansion is handled above, merging is handled below.
1504 		 * Drivers should not alter the address of the VMA.
1505 		 */
1506 		error = -EINVAL;
1507 		if (WARN_ON((addr != vma->vm_start)))
1508 			goto close_and_free_vma;
1509 
1510 		vma_iter_config(&vmi, addr, end);
1511 		/*
1512 		 * If vm_flags changed after call_mmap(), we should try merge
1513 		 * vma again as we may succeed this time.
1514 		 */
1515 		if (unlikely(vm_flags != vma->vm_flags && prev)) {
1516 			merge = vma_merge_new_vma(&vmi, prev, vma,
1517 						  vma->vm_start, vma->vm_end,
1518 						  vma->vm_pgoff);
1519 			if (merge) {
1520 				/*
1521 				 * ->mmap() can change vma->vm_file and fput
1522 				 * the original file. So fput the vma->vm_file
1523 				 * here or we would add an extra fput for file
1524 				 * and cause general protection fault
1525 				 * ultimately.
1526 				 */
1527 				fput(vma->vm_file);
1528 				vm_area_free(vma);
1529 				vma = merge;
1530 				/* Update vm_flags to pick up the change. */
1531 				vm_flags = vma->vm_flags;
1532 				goto unmap_writable;
1533 			}
1534 		}
1535 
1536 		vm_flags = vma->vm_flags;
1537 	} else if (vm_flags & VM_SHARED) {
1538 		error = shmem_zero_setup(vma);
1539 		if (error)
1540 			goto free_vma;
1541 	} else {
1542 		vma_set_anonymous(vma);
1543 	}
1544 
1545 	if (map_deny_write_exec(vma, vma->vm_flags)) {
1546 		error = -EACCES;
1547 		goto close_and_free_vma;
1548 	}
1549 
1550 	/* Allow architectures to sanity-check the vm_flags */
1551 	error = -EINVAL;
1552 	if (!arch_validate_flags(vma->vm_flags))
1553 		goto close_and_free_vma;
1554 
1555 	error = -ENOMEM;
1556 	if (vma_iter_prealloc(&vmi, vma))
1557 		goto close_and_free_vma;
1558 
1559 	/* Lock the VMA since it is modified after insertion into VMA tree */
1560 	vma_start_write(vma);
1561 	vma_iter_store(&vmi, vma);
1562 	mm->map_count++;
1563 	vma_link_file(vma);
1564 
1565 	/*
1566 	 * vma_merge() calls khugepaged_enter_vma() either, the below
1567 	 * call covers the non-merge case.
1568 	 */
1569 	khugepaged_enter_vma(vma, vma->vm_flags);
1570 
1571 	/* Once vma denies write, undo our temporary denial count */
1572 unmap_writable:
1573 	if (writable_file_mapping)
1574 		mapping_unmap_writable(file->f_mapping);
1575 	file = vma->vm_file;
1576 	ksm_add_vma(vma);
1577 expanded:
1578 	perf_event_mmap(vma);
1579 
1580 	vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT);
1581 	if (vm_flags & VM_LOCKED) {
1582 		if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
1583 					is_vm_hugetlb_page(vma) ||
1584 					vma == get_gate_vma(current->mm))
1585 			vm_flags_clear(vma, VM_LOCKED_MASK);
1586 		else
1587 			mm->locked_vm += (len >> PAGE_SHIFT);
1588 	}
1589 
1590 	if (file)
1591 		uprobe_mmap(vma);
1592 
1593 	/*
1594 	 * New (or expanded) vma always get soft dirty status.
1595 	 * Otherwise user-space soft-dirty page tracker won't
1596 	 * be able to distinguish situation when vma area unmapped,
1597 	 * then new mapped in-place (which must be aimed as
1598 	 * a completely new data area).
1599 	 */
1600 	vm_flags_set(vma, VM_SOFTDIRTY);
1601 
1602 	vma_set_page_prot(vma);
1603 
1604 	validate_mm(mm);
1605 	return addr;
1606 
1607 close_and_free_vma:
1608 	if (file && vma->vm_ops && vma->vm_ops->close)
1609 		vma->vm_ops->close(vma);
1610 
1611 	if (file || vma->vm_file) {
1612 unmap_and_free_vma:
1613 		fput(vma->vm_file);
1614 		vma->vm_file = NULL;
1615 
1616 		vma_iter_set(&vmi, vma->vm_end);
1617 		/* Undo any partial mapping done by a device driver. */
1618 		unmap_region(mm, &vmi.mas, vma, prev, next, vma->vm_start,
1619 			     vma->vm_end, vma->vm_end, true);
1620 	}
1621 	if (writable_file_mapping)
1622 		mapping_unmap_writable(file->f_mapping);
1623 free_vma:
1624 	vm_area_free(vma);
1625 unacct_error:
1626 	if (charged)
1627 		vm_unacct_memory(charged);
1628 
1629 clear_tree_failed:
1630 	if (vms.vma_count)
1631 		abort_munmap_vmas(&mas_detach);
1632 gather_failed:
1633 	validate_mm(mm);
1634 	return error;
1635 }
1636 
1637 static int __vm_munmap(unsigned long start, size_t len, bool unlock)
1638 {
1639 	int ret;
1640 	struct mm_struct *mm = current->mm;
1641 	LIST_HEAD(uf);
1642 	VMA_ITERATOR(vmi, mm, start);
1643 
1644 	if (mmap_write_lock_killable(mm))
1645 		return -EINTR;
1646 
1647 	ret = do_vmi_munmap(&vmi, mm, start, len, &uf, unlock);
1648 	if (ret || !unlock)
1649 		mmap_write_unlock(mm);
1650 
1651 	userfaultfd_unmap_complete(mm, &uf);
1652 	return ret;
1653 }
1654 
1655 int vm_munmap(unsigned long start, size_t len)
1656 {
1657 	return __vm_munmap(start, len, false);
1658 }
1659 EXPORT_SYMBOL(vm_munmap);
1660 
1661 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
1662 {
1663 	addr = untagged_addr(addr);
1664 	return __vm_munmap(addr, len, true);
1665 }
1666 
1667 
1668 /*
1669  * Emulation of deprecated remap_file_pages() syscall.
1670  */
1671 SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
1672 		unsigned long, prot, unsigned long, pgoff, unsigned long, flags)
1673 {
1674 
1675 	struct mm_struct *mm = current->mm;
1676 	struct vm_area_struct *vma;
1677 	unsigned long populate = 0;
1678 	unsigned long ret = -EINVAL;
1679 	struct file *file;
1680 
1681 	pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/mm/remap_file_pages.rst.\n",
1682 		     current->comm, current->pid);
1683 
1684 	if (prot)
1685 		return ret;
1686 	start = start & PAGE_MASK;
1687 	size = size & PAGE_MASK;
1688 
1689 	if (start + size <= start)
1690 		return ret;
1691 
1692 	/* Does pgoff wrap? */
1693 	if (pgoff + (size >> PAGE_SHIFT) < pgoff)
1694 		return ret;
1695 
1696 	if (mmap_write_lock_killable(mm))
1697 		return -EINTR;
1698 
1699 	vma = vma_lookup(mm, start);
1700 
1701 	if (!vma || !(vma->vm_flags & VM_SHARED))
1702 		goto out;
1703 
1704 	if (start + size > vma->vm_end) {
1705 		VMA_ITERATOR(vmi, mm, vma->vm_end);
1706 		struct vm_area_struct *next, *prev = vma;
1707 
1708 		for_each_vma_range(vmi, next, start + size) {
1709 			/* hole between vmas ? */
1710 			if (next->vm_start != prev->vm_end)
1711 				goto out;
1712 
1713 			if (next->vm_file != vma->vm_file)
1714 				goto out;
1715 
1716 			if (next->vm_flags != vma->vm_flags)
1717 				goto out;
1718 
1719 			if (start + size <= next->vm_end)
1720 				break;
1721 
1722 			prev = next;
1723 		}
1724 
1725 		if (!next)
1726 			goto out;
1727 	}
1728 
1729 	prot |= vma->vm_flags & VM_READ ? PROT_READ : 0;
1730 	prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0;
1731 	prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0;
1732 
1733 	flags &= MAP_NONBLOCK;
1734 	flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE;
1735 	if (vma->vm_flags & VM_LOCKED)
1736 		flags |= MAP_LOCKED;
1737 
1738 	file = get_file(vma->vm_file);
1739 	ret = do_mmap(vma->vm_file, start, size,
1740 			prot, flags, 0, pgoff, &populate, NULL);
1741 	fput(file);
1742 out:
1743 	mmap_write_unlock(mm);
1744 	if (populate)
1745 		mm_populate(ret, populate);
1746 	if (!IS_ERR_VALUE(ret))
1747 		ret = 0;
1748 	return ret;
1749 }
1750 
1751 /*
1752  * do_vma_munmap() - Unmap a full or partial vma.
1753  * @vmi: The vma iterator pointing at the vma
1754  * @vma: The first vma to be munmapped
1755  * @start: the start of the address to unmap
1756  * @end: The end of the address to unmap
1757  * @uf: The userfaultfd list_head
1758  * @unlock: Drop the lock on success
1759  *
1760  * unmaps a VMA mapping when the vma iterator is already in position.
1761  * Does not handle alignment.
1762  *
1763  * Return: 0 on success drops the lock of so directed, error on failure and will
1764  * still hold the lock.
1765  */
1766 int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
1767 		unsigned long start, unsigned long end, struct list_head *uf,
1768 		bool unlock)
1769 {
1770 	return do_vmi_align_munmap(vmi, vma, vma->vm_mm, start, end, uf, unlock);
1771 }
1772 
1773 /*
1774  * do_brk_flags() - Increase the brk vma if the flags match.
1775  * @vmi: The vma iterator
1776  * @addr: The start address
1777  * @len: The length of the increase
1778  * @vma: The vma,
1779  * @flags: The VMA Flags
1780  *
1781  * Extend the brk VMA from addr to addr + len.  If the VMA is NULL or the flags
1782  * do not match then create a new anonymous VMA.  Eventually we may be able to
1783  * do some brk-specific accounting here.
1784  */
1785 static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
1786 		unsigned long addr, unsigned long len, unsigned long flags)
1787 {
1788 	struct mm_struct *mm = current->mm;
1789 	struct vma_prepare vp;
1790 
1791 	/*
1792 	 * Check against address space limits by the changed size
1793 	 * Note: This happens *after* clearing old mappings in some code paths.
1794 	 */
1795 	flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
1796 	if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT))
1797 		return -ENOMEM;
1798 
1799 	if (mm->map_count > sysctl_max_map_count)
1800 		return -ENOMEM;
1801 
1802 	if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
1803 		return -ENOMEM;
1804 
1805 	/*
1806 	 * Expand the existing vma if possible; Note that singular lists do not
1807 	 * occur after forking, so the expand will only happen on new VMAs.
1808 	 */
1809 	if (vma && vma->vm_end == addr && !vma_policy(vma) &&
1810 	    can_vma_merge_after(vma, flags, NULL, NULL,
1811 				addr >> PAGE_SHIFT, NULL_VM_UFFD_CTX, NULL)) {
1812 		vma_iter_config(vmi, vma->vm_start, addr + len);
1813 		if (vma_iter_prealloc(vmi, vma))
1814 			goto unacct_fail;
1815 
1816 		vma_start_write(vma);
1817 
1818 		init_vma_prep(&vp, vma);
1819 		vma_prepare(&vp);
1820 		vma_adjust_trans_huge(vma, vma->vm_start, addr + len, 0);
1821 		vma->vm_end = addr + len;
1822 		vm_flags_set(vma, VM_SOFTDIRTY);
1823 		vma_iter_store(vmi, vma);
1824 
1825 		vma_complete(&vp, vmi, mm);
1826 		validate_mm(mm);
1827 		khugepaged_enter_vma(vma, flags);
1828 		goto out;
1829 	}
1830 
1831 	if (vma)
1832 		vma_iter_next_range(vmi);
1833 	/* create a vma struct for an anonymous mapping */
1834 	vma = vm_area_alloc(mm);
1835 	if (!vma)
1836 		goto unacct_fail;
1837 
1838 	vma_set_anonymous(vma);
1839 	vma_set_range(vma, addr, addr + len, addr >> PAGE_SHIFT);
1840 	vm_flags_init(vma, flags);
1841 	vma->vm_page_prot = vm_get_page_prot(flags);
1842 	vma_start_write(vma);
1843 	if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL))
1844 		goto mas_store_fail;
1845 
1846 	mm->map_count++;
1847 	validate_mm(mm);
1848 	ksm_add_vma(vma);
1849 out:
1850 	perf_event_mmap(vma);
1851 	mm->total_vm += len >> PAGE_SHIFT;
1852 	mm->data_vm += len >> PAGE_SHIFT;
1853 	if (flags & VM_LOCKED)
1854 		mm->locked_vm += (len >> PAGE_SHIFT);
1855 	vm_flags_set(vma, VM_SOFTDIRTY);
1856 	return 0;
1857 
1858 mas_store_fail:
1859 	vm_area_free(vma);
1860 unacct_fail:
1861 	vm_unacct_memory(len >> PAGE_SHIFT);
1862 	return -ENOMEM;
1863 }
1864 
1865 int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
1866 {
1867 	struct mm_struct *mm = current->mm;
1868 	struct vm_area_struct *vma = NULL;
1869 	unsigned long len;
1870 	int ret;
1871 	bool populate;
1872 	LIST_HEAD(uf);
1873 	VMA_ITERATOR(vmi, mm, addr);
1874 
1875 	len = PAGE_ALIGN(request);
1876 	if (len < request)
1877 		return -ENOMEM;
1878 	if (!len)
1879 		return 0;
1880 
1881 	/* Until we need other flags, refuse anything except VM_EXEC. */
1882 	if ((flags & (~VM_EXEC)) != 0)
1883 		return -EINVAL;
1884 
1885 	if (mmap_write_lock_killable(mm))
1886 		return -EINTR;
1887 
1888 	ret = check_brk_limits(addr, len);
1889 	if (ret)
1890 		goto limits_failed;
1891 
1892 	ret = do_vmi_munmap(&vmi, mm, addr, len, &uf, 0);
1893 	if (ret)
1894 		goto munmap_failed;
1895 
1896 	vma = vma_prev(&vmi);
1897 	ret = do_brk_flags(&vmi, vma, addr, len, flags);
1898 	populate = ((mm->def_flags & VM_LOCKED) != 0);
1899 	mmap_write_unlock(mm);
1900 	userfaultfd_unmap_complete(mm, &uf);
1901 	if (populate && !ret)
1902 		mm_populate(addr, len);
1903 	return ret;
1904 
1905 munmap_failed:
1906 limits_failed:
1907 	mmap_write_unlock(mm);
1908 	return ret;
1909 }
1910 EXPORT_SYMBOL(vm_brk_flags);
1911 
1912 /* Release all mmaps. */
1913 void exit_mmap(struct mm_struct *mm)
1914 {
1915 	struct mmu_gather tlb;
1916 	struct vm_area_struct *vma;
1917 	unsigned long nr_accounted = 0;
1918 	VMA_ITERATOR(vmi, mm, 0);
1919 	int count = 0;
1920 
1921 	/* mm's last user has gone, and its about to be pulled down */
1922 	mmu_notifier_release(mm);
1923 
1924 	mmap_read_lock(mm);
1925 	arch_exit_mmap(mm);
1926 
1927 	vma = vma_next(&vmi);
1928 	if (!vma || unlikely(xa_is_zero(vma))) {
1929 		/* Can happen if dup_mmap() received an OOM */
1930 		mmap_read_unlock(mm);
1931 		mmap_write_lock(mm);
1932 		goto destroy;
1933 	}
1934 
1935 	lru_add_drain();
1936 	flush_cache_mm(mm);
1937 	tlb_gather_mmu_fullmm(&tlb, mm);
1938 	/* update_hiwater_rss(mm) here? but nobody should be looking */
1939 	/* Use ULONG_MAX here to ensure all VMAs in the mm are unmapped */
1940 	unmap_vmas(&tlb, &vmi.mas, vma, 0, ULONG_MAX, ULONG_MAX, false);
1941 	mmap_read_unlock(mm);
1942 
1943 	/*
1944 	 * Set MMF_OOM_SKIP to hide this task from the oom killer/reaper
1945 	 * because the memory has been already freed.
1946 	 */
1947 	set_bit(MMF_OOM_SKIP, &mm->flags);
1948 	mmap_write_lock(mm);
1949 	mt_clear_in_rcu(&mm->mm_mt);
1950 	vma_iter_set(&vmi, vma->vm_end);
1951 	free_pgtables(&tlb, &vmi.mas, vma, FIRST_USER_ADDRESS,
1952 		      USER_PGTABLES_CEILING, true);
1953 	tlb_finish_mmu(&tlb);
1954 
1955 	/*
1956 	 * Walk the list again, actually closing and freeing it, with preemption
1957 	 * enabled, without holding any MM locks besides the unreachable
1958 	 * mmap_write_lock.
1959 	 */
1960 	vma_iter_set(&vmi, vma->vm_end);
1961 	do {
1962 		if (vma->vm_flags & VM_ACCOUNT)
1963 			nr_accounted += vma_pages(vma);
1964 		remove_vma(vma, true);
1965 		count++;
1966 		cond_resched();
1967 		vma = vma_next(&vmi);
1968 	} while (vma && likely(!xa_is_zero(vma)));
1969 
1970 	BUG_ON(count != mm->map_count);
1971 
1972 	trace_exit_mmap(mm);
1973 destroy:
1974 	__mt_destroy(&mm->mm_mt);
1975 	mmap_write_unlock(mm);
1976 	vm_unacct_memory(nr_accounted);
1977 }
1978 
1979 /* Insert vm structure into process list sorted by address
1980  * and into the inode's i_mmap tree.  If vm_file is non-NULL
1981  * then i_mmap_rwsem is taken here.
1982  */
1983 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
1984 {
1985 	unsigned long charged = vma_pages(vma);
1986 
1987 
1988 	if (find_vma_intersection(mm, vma->vm_start, vma->vm_end))
1989 		return -ENOMEM;
1990 
1991 	if ((vma->vm_flags & VM_ACCOUNT) &&
1992 	     security_vm_enough_memory_mm(mm, charged))
1993 		return -ENOMEM;
1994 
1995 	/*
1996 	 * The vm_pgoff of a purely anonymous vma should be irrelevant
1997 	 * until its first write fault, when page's anon_vma and index
1998 	 * are set.  But now set the vm_pgoff it will almost certainly
1999 	 * end up with (unless mremap moves it elsewhere before that
2000 	 * first wfault), so /proc/pid/maps tells a consistent story.
2001 	 *
2002 	 * By setting it to reflect the virtual start address of the
2003 	 * vma, merges and splits can happen in a seamless way, just
2004 	 * using the existing file pgoff checks and manipulations.
2005 	 * Similarly in do_mmap and in do_brk_flags.
2006 	 */
2007 	if (vma_is_anonymous(vma)) {
2008 		BUG_ON(vma->anon_vma);
2009 		vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
2010 	}
2011 
2012 	if (vma_link(mm, vma)) {
2013 		if (vma->vm_flags & VM_ACCOUNT)
2014 			vm_unacct_memory(charged);
2015 		return -ENOMEM;
2016 	}
2017 
2018 	return 0;
2019 }
2020 
2021 /*
2022  * Return true if the calling process may expand its vm space by the passed
2023  * number of pages
2024  */
2025 bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages)
2026 {
2027 	if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT)
2028 		return false;
2029 
2030 	if (is_data_mapping(flags) &&
2031 	    mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) {
2032 		/* Workaround for Valgrind */
2033 		if (rlimit(RLIMIT_DATA) == 0 &&
2034 		    mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT)
2035 			return true;
2036 
2037 		pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits%s.\n",
2038 			     current->comm, current->pid,
2039 			     (mm->data_vm + npages) << PAGE_SHIFT,
2040 			     rlimit(RLIMIT_DATA),
2041 			     ignore_rlimit_data ? "" : " or use boot option ignore_rlimit_data");
2042 
2043 		if (!ignore_rlimit_data)
2044 			return false;
2045 	}
2046 
2047 	return true;
2048 }
2049 
2050 void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages)
2051 {
2052 	WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages);
2053 
2054 	if (is_exec_mapping(flags))
2055 		mm->exec_vm += npages;
2056 	else if (is_stack_mapping(flags))
2057 		mm->stack_vm += npages;
2058 	else if (is_data_mapping(flags))
2059 		mm->data_vm += npages;
2060 }
2061 
2062 static vm_fault_t special_mapping_fault(struct vm_fault *vmf);
2063 
2064 /*
2065  * Close hook, called for unmap() and on the old vma for mremap().
2066  *
2067  * Having a close hook prevents vma merging regardless of flags.
2068  */
2069 static void special_mapping_close(struct vm_area_struct *vma)
2070 {
2071 	const struct vm_special_mapping *sm = vma->vm_private_data;
2072 
2073 	if (sm->close)
2074 		sm->close(sm, vma);
2075 }
2076 
2077 static const char *special_mapping_name(struct vm_area_struct *vma)
2078 {
2079 	return ((struct vm_special_mapping *)vma->vm_private_data)->name;
2080 }
2081 
2082 static int special_mapping_mremap(struct vm_area_struct *new_vma)
2083 {
2084 	struct vm_special_mapping *sm = new_vma->vm_private_data;
2085 
2086 	if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
2087 		return -EFAULT;
2088 
2089 	if (sm->mremap)
2090 		return sm->mremap(sm, new_vma);
2091 
2092 	return 0;
2093 }
2094 
2095 static int special_mapping_split(struct vm_area_struct *vma, unsigned long addr)
2096 {
2097 	/*
2098 	 * Forbid splitting special mappings - kernel has expectations over
2099 	 * the number of pages in mapping. Together with VM_DONTEXPAND
2100 	 * the size of vma should stay the same over the special mapping's
2101 	 * lifetime.
2102 	 */
2103 	return -EINVAL;
2104 }
2105 
2106 static const struct vm_operations_struct special_mapping_vmops = {
2107 	.close = special_mapping_close,
2108 	.fault = special_mapping_fault,
2109 	.mremap = special_mapping_mremap,
2110 	.name = special_mapping_name,
2111 	/* vDSO code relies that VVAR can't be accessed remotely */
2112 	.access = NULL,
2113 	.may_split = special_mapping_split,
2114 };
2115 
2116 static vm_fault_t special_mapping_fault(struct vm_fault *vmf)
2117 {
2118 	struct vm_area_struct *vma = vmf->vma;
2119 	pgoff_t pgoff;
2120 	struct page **pages;
2121 	struct vm_special_mapping *sm = vma->vm_private_data;
2122 
2123 	if (sm->fault)
2124 		return sm->fault(sm, vmf->vma, vmf);
2125 
2126 	pages = sm->pages;
2127 
2128 	for (pgoff = vmf->pgoff; pgoff && *pages; ++pages)
2129 		pgoff--;
2130 
2131 	if (*pages) {
2132 		struct page *page = *pages;
2133 		get_page(page);
2134 		vmf->page = page;
2135 		return 0;
2136 	}
2137 
2138 	return VM_FAULT_SIGBUS;
2139 }
2140 
2141 static struct vm_area_struct *__install_special_mapping(
2142 	struct mm_struct *mm,
2143 	unsigned long addr, unsigned long len,
2144 	unsigned long vm_flags, void *priv,
2145 	const struct vm_operations_struct *ops)
2146 {
2147 	int ret;
2148 	struct vm_area_struct *vma;
2149 
2150 	vma = vm_area_alloc(mm);
2151 	if (unlikely(vma == NULL))
2152 		return ERR_PTR(-ENOMEM);
2153 
2154 	vma_set_range(vma, addr, addr + len, 0);
2155 	vm_flags_init(vma, (vm_flags | mm->def_flags |
2156 		      VM_DONTEXPAND | VM_SOFTDIRTY) & ~VM_LOCKED_MASK);
2157 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2158 
2159 	vma->vm_ops = ops;
2160 	vma->vm_private_data = priv;
2161 
2162 	ret = insert_vm_struct(mm, vma);
2163 	if (ret)
2164 		goto out;
2165 
2166 	vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT);
2167 
2168 	perf_event_mmap(vma);
2169 
2170 	return vma;
2171 
2172 out:
2173 	vm_area_free(vma);
2174 	return ERR_PTR(ret);
2175 }
2176 
2177 bool vma_is_special_mapping(const struct vm_area_struct *vma,
2178 	const struct vm_special_mapping *sm)
2179 {
2180 	return vma->vm_private_data == sm &&
2181 		vma->vm_ops == &special_mapping_vmops;
2182 }
2183 
2184 /*
2185  * Called with mm->mmap_lock held for writing.
2186  * Insert a new vma covering the given region, with the given flags.
2187  * Its pages are supplied by the given array of struct page *.
2188  * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated.
2189  * The region past the last page supplied will always produce SIGBUS.
2190  * The array pointer and the pages it points to are assumed to stay alive
2191  * for as long as this mapping might exist.
2192  */
2193 struct vm_area_struct *_install_special_mapping(
2194 	struct mm_struct *mm,
2195 	unsigned long addr, unsigned long len,
2196 	unsigned long vm_flags, const struct vm_special_mapping *spec)
2197 {
2198 	return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec,
2199 					&special_mapping_vmops);
2200 }
2201 
2202 /*
2203  * initialise the percpu counter for VM
2204  */
2205 void __init mmap_init(void)
2206 {
2207 	int ret;
2208 
2209 	ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
2210 	VM_BUG_ON(ret);
2211 }
2212 
2213 /*
2214  * Initialise sysctl_user_reserve_kbytes.
2215  *
2216  * This is intended to prevent a user from starting a single memory hogging
2217  * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
2218  * mode.
2219  *
2220  * The default value is min(3% of free memory, 128MB)
2221  * 128MB is enough to recover with sshd/login, bash, and top/kill.
2222  */
2223 static int init_user_reserve(void)
2224 {
2225 	unsigned long free_kbytes;
2226 
2227 	free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
2228 
2229 	sysctl_user_reserve_kbytes = min(free_kbytes / 32, SZ_128K);
2230 	return 0;
2231 }
2232 subsys_initcall(init_user_reserve);
2233 
2234 /*
2235  * Initialise sysctl_admin_reserve_kbytes.
2236  *
2237  * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
2238  * to log in and kill a memory hogging process.
2239  *
2240  * Systems with more than 256MB will reserve 8MB, enough to recover
2241  * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
2242  * only reserve 3% of free pages by default.
2243  */
2244 static int init_admin_reserve(void)
2245 {
2246 	unsigned long free_kbytes;
2247 
2248 	free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
2249 
2250 	sysctl_admin_reserve_kbytes = min(free_kbytes / 32, SZ_8K);
2251 	return 0;
2252 }
2253 subsys_initcall(init_admin_reserve);
2254 
2255 /*
2256  * Reinititalise user and admin reserves if memory is added or removed.
2257  *
2258  * The default user reserve max is 128MB, and the default max for the
2259  * admin reserve is 8MB. These are usually, but not always, enough to
2260  * enable recovery from a memory hogging process using login/sshd, a shell,
2261  * and tools like top. It may make sense to increase or even disable the
2262  * reserve depending on the existence of swap or variations in the recovery
2263  * tools. So, the admin may have changed them.
2264  *
2265  * If memory is added and the reserves have been eliminated or increased above
2266  * the default max, then we'll trust the admin.
2267  *
2268  * If memory is removed and there isn't enough free memory, then we
2269  * need to reset the reserves.
2270  *
2271  * Otherwise keep the reserve set by the admin.
2272  */
2273 static int reserve_mem_notifier(struct notifier_block *nb,
2274 			     unsigned long action, void *data)
2275 {
2276 	unsigned long tmp, free_kbytes;
2277 
2278 	switch (action) {
2279 	case MEM_ONLINE:
2280 		/* Default max is 128MB. Leave alone if modified by operator. */
2281 		tmp = sysctl_user_reserve_kbytes;
2282 		if (tmp > 0 && tmp < SZ_128K)
2283 			init_user_reserve();
2284 
2285 		/* Default max is 8MB.  Leave alone if modified by operator. */
2286 		tmp = sysctl_admin_reserve_kbytes;
2287 		if (tmp > 0 && tmp < SZ_8K)
2288 			init_admin_reserve();
2289 
2290 		break;
2291 	case MEM_OFFLINE:
2292 		free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
2293 
2294 		if (sysctl_user_reserve_kbytes > free_kbytes) {
2295 			init_user_reserve();
2296 			pr_info("vm.user_reserve_kbytes reset to %lu\n",
2297 				sysctl_user_reserve_kbytes);
2298 		}
2299 
2300 		if (sysctl_admin_reserve_kbytes > free_kbytes) {
2301 			init_admin_reserve();
2302 			pr_info("vm.admin_reserve_kbytes reset to %lu\n",
2303 				sysctl_admin_reserve_kbytes);
2304 		}
2305 		break;
2306 	default:
2307 		break;
2308 	}
2309 	return NOTIFY_OK;
2310 }
2311 
2312 static int __meminit init_reserve_notifier(void)
2313 {
2314 	if (hotplug_memory_notifier(reserve_mem_notifier, DEFAULT_CALLBACK_PRI))
2315 		pr_err("Failed registering memory add/remove notifier for admin reserve\n");
2316 
2317 	return 0;
2318 }
2319 subsys_initcall(init_reserve_notifier);
2320 
2321 /*
2322  * Relocate a VMA downwards by shift bytes. There cannot be any VMAs between
2323  * this VMA and its relocated range, which will now reside at [vma->vm_start -
2324  * shift, vma->vm_end - shift).
2325  *
2326  * This function is almost certainly NOT what you want for anything other than
2327  * early executable temporary stack relocation.
2328  */
2329 int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift)
2330 {
2331 	/*
2332 	 * The process proceeds as follows:
2333 	 *
2334 	 * 1) Use shift to calculate the new vma endpoints.
2335 	 * 2) Extend vma to cover both the old and new ranges.  This ensures the
2336 	 *    arguments passed to subsequent functions are consistent.
2337 	 * 3) Move vma's page tables to the new range.
2338 	 * 4) Free up any cleared pgd range.
2339 	 * 5) Shrink the vma to cover only the new range.
2340 	 */
2341 
2342 	struct mm_struct *mm = vma->vm_mm;
2343 	unsigned long old_start = vma->vm_start;
2344 	unsigned long old_end = vma->vm_end;
2345 	unsigned long length = old_end - old_start;
2346 	unsigned long new_start = old_start - shift;
2347 	unsigned long new_end = old_end - shift;
2348 	VMA_ITERATOR(vmi, mm, new_start);
2349 	struct vm_area_struct *next;
2350 	struct mmu_gather tlb;
2351 
2352 	BUG_ON(new_start > new_end);
2353 
2354 	/*
2355 	 * ensure there are no vmas between where we want to go
2356 	 * and where we are
2357 	 */
2358 	if (vma != vma_next(&vmi))
2359 		return -EFAULT;
2360 
2361 	vma_iter_prev_range(&vmi);
2362 	/*
2363 	 * cover the whole range: [new_start, old_end)
2364 	 */
2365 	if (vma_expand(&vmi, vma, new_start, old_end, vma->vm_pgoff, NULL))
2366 		return -ENOMEM;
2367 
2368 	/*
2369 	 * move the page tables downwards, on failure we rely on
2370 	 * process cleanup to remove whatever mess we made.
2371 	 */
2372 	if (length != move_page_tables(vma, old_start,
2373 				       vma, new_start, length, false, true))
2374 		return -ENOMEM;
2375 
2376 	lru_add_drain();
2377 	tlb_gather_mmu(&tlb, mm);
2378 	next = vma_next(&vmi);
2379 	if (new_end > old_start) {
2380 		/*
2381 		 * when the old and new regions overlap clear from new_end.
2382 		 */
2383 		free_pgd_range(&tlb, new_end, old_end, new_end,
2384 			next ? next->vm_start : USER_PGTABLES_CEILING);
2385 	} else {
2386 		/*
2387 		 * otherwise, clean from old_start; this is done to not touch
2388 		 * the address space in [new_end, old_start) some architectures
2389 		 * have constraints on va-space that make this illegal (IA64) -
2390 		 * for the others its just a little faster.
2391 		 */
2392 		free_pgd_range(&tlb, old_start, old_end, new_end,
2393 			next ? next->vm_start : USER_PGTABLES_CEILING);
2394 	}
2395 	tlb_finish_mmu(&tlb);
2396 
2397 	vma_prev(&vmi);
2398 	/* Shrink the vma to just the new range */
2399 	return vma_shrink(&vmi, vma, new_start, new_end, vma->vm_pgoff);
2400 }
2401