xref: /linux/mm/mmap.c (revision 3f6bc9e3ab9b127171d39f9ac6eca1abb693b731)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * mm/mmap.c
4  *
5  * Written by obz.
6  *
7  * Address space accounting code	<alan@lxorguk.ukuu.org.uk>
8  */
9 
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/backing-dev.h>
15 #include <linux/mm.h>
16 #include <linux/mm_inline.h>
17 #include <linux/shm.h>
18 #include <linux/mman.h>
19 #include <linux/pagemap.h>
20 #include <linux/swap.h>
21 #include <linux/syscalls.h>
22 #include <linux/capability.h>
23 #include <linux/init.h>
24 #include <linux/file.h>
25 #include <linux/fs.h>
26 #include <linux/personality.h>
27 #include <linux/security.h>
28 #include <linux/hugetlb.h>
29 #include <linux/shmem_fs.h>
30 #include <linux/profile.h>
31 #include <linux/export.h>
32 #include <linux/mount.h>
33 #include <linux/mempolicy.h>
34 #include <linux/rmap.h>
35 #include <linux/mmu_notifier.h>
36 #include <linux/mmdebug.h>
37 #include <linux/perf_event.h>
38 #include <linux/audit.h>
39 #include <linux/khugepaged.h>
40 #include <linux/uprobes.h>
41 #include <linux/notifier.h>
42 #include <linux/memory.h>
43 #include <linux/printk.h>
44 #include <linux/userfaultfd_k.h>
45 #include <linux/moduleparam.h>
46 #include <linux/pkeys.h>
47 #include <linux/oom.h>
48 #include <linux/sched/mm.h>
49 #include <linux/ksm.h>
50 #include <linux/memfd.h>
51 
52 #include <linux/uaccess.h>
53 #include <asm/cacheflush.h>
54 #include <asm/tlb.h>
55 #include <asm/mmu_context.h>
56 
57 #define CREATE_TRACE_POINTS
58 #include <trace/events/mmap.h>
59 
60 #include "internal.h"
61 
62 #ifndef arch_mmap_check
63 #define arch_mmap_check(addr, len, flags)	(0)
64 #endif
65 
66 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
67 const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN;
68 int mmap_rnd_bits_max __ro_after_init = CONFIG_ARCH_MMAP_RND_BITS_MAX;
69 int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS;
70 #endif
71 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
72 const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN;
73 const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX;
74 int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS;
75 #endif
76 
77 static bool ignore_rlimit_data;
78 core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644);
79 
80 /* Update vma->vm_page_prot to reflect vma->vm_flags. */
81 void vma_set_page_prot(struct vm_area_struct *vma)
82 {
83 	unsigned long vm_flags = vma->vm_flags;
84 	pgprot_t vm_page_prot;
85 
86 	vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags);
87 	if (vma_wants_writenotify(vma, vm_page_prot)) {
88 		vm_flags &= ~VM_SHARED;
89 		vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags);
90 	}
91 	/* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */
92 	WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
93 }
94 
95 /*
96  * check_brk_limits() - Use platform specific check of range & verify mlock
97  * limits.
98  * @addr: The address to check
99  * @len: The size of increase.
100  *
101  * Return: 0 on success.
102  */
103 static int check_brk_limits(unsigned long addr, unsigned long len)
104 {
105 	unsigned long mapped_addr;
106 
107 	mapped_addr = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
108 	if (IS_ERR_VALUE(mapped_addr))
109 		return mapped_addr;
110 
111 	return mlock_future_ok(current->mm, current->mm->def_flags, len)
112 		? 0 : -EAGAIN;
113 }
114 static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma,
115 		unsigned long addr, unsigned long request, unsigned long flags);
116 SYSCALL_DEFINE1(brk, unsigned long, brk)
117 {
118 	unsigned long newbrk, oldbrk, origbrk;
119 	struct mm_struct *mm = current->mm;
120 	struct vm_area_struct *brkvma, *next = NULL;
121 	unsigned long min_brk;
122 	bool populate = false;
123 	LIST_HEAD(uf);
124 	struct vma_iterator vmi;
125 
126 	if (mmap_write_lock_killable(mm))
127 		return -EINTR;
128 
129 	origbrk = mm->brk;
130 
131 #ifdef CONFIG_COMPAT_BRK
132 	/*
133 	 * CONFIG_COMPAT_BRK can still be overridden by setting
134 	 * randomize_va_space to 2, which will still cause mm->start_brk
135 	 * to be arbitrarily shifted
136 	 */
137 	if (current->brk_randomized)
138 		min_brk = mm->start_brk;
139 	else
140 		min_brk = mm->end_data;
141 #else
142 	min_brk = mm->start_brk;
143 #endif
144 	if (brk < min_brk)
145 		goto out;
146 
147 	/*
148 	 * Check against rlimit here. If this check is done later after the test
149 	 * of oldbrk with newbrk then it can escape the test and let the data
150 	 * segment grow beyond its set limit the in case where the limit is
151 	 * not page aligned -Ram Gupta
152 	 */
153 	if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
154 			      mm->end_data, mm->start_data))
155 		goto out;
156 
157 	newbrk = PAGE_ALIGN(brk);
158 	oldbrk = PAGE_ALIGN(mm->brk);
159 	if (oldbrk == newbrk) {
160 		mm->brk = brk;
161 		goto success;
162 	}
163 
164 	/* Always allow shrinking brk. */
165 	if (brk <= mm->brk) {
166 		/* Search one past newbrk */
167 		vma_iter_init(&vmi, mm, newbrk);
168 		brkvma = vma_find(&vmi, oldbrk);
169 		if (!brkvma || brkvma->vm_start >= oldbrk)
170 			goto out; /* mapping intersects with an existing non-brk vma. */
171 		/*
172 		 * mm->brk must be protected by write mmap_lock.
173 		 * do_vmi_align_munmap() will drop the lock on success,  so
174 		 * update it before calling do_vma_munmap().
175 		 */
176 		mm->brk = brk;
177 		if (do_vmi_align_munmap(&vmi, brkvma, mm, newbrk, oldbrk, &uf,
178 					/* unlock = */ true))
179 			goto out;
180 
181 		goto success_unlocked;
182 	}
183 
184 	if (check_brk_limits(oldbrk, newbrk - oldbrk))
185 		goto out;
186 
187 	/*
188 	 * Only check if the next VMA is within the stack_guard_gap of the
189 	 * expansion area
190 	 */
191 	vma_iter_init(&vmi, mm, oldbrk);
192 	next = vma_find(&vmi, newbrk + PAGE_SIZE + stack_guard_gap);
193 	if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
194 		goto out;
195 
196 	brkvma = vma_prev_limit(&vmi, mm->start_brk);
197 	/* Ok, looks good - let it rip. */
198 	if (do_brk_flags(&vmi, brkvma, oldbrk, newbrk - oldbrk, 0) < 0)
199 		goto out;
200 
201 	mm->brk = brk;
202 	if (mm->def_flags & VM_LOCKED)
203 		populate = true;
204 
205 success:
206 	mmap_write_unlock(mm);
207 success_unlocked:
208 	userfaultfd_unmap_complete(mm, &uf);
209 	if (populate)
210 		mm_populate(oldbrk, newbrk - oldbrk);
211 	return brk;
212 
213 out:
214 	mm->brk = origbrk;
215 	mmap_write_unlock(mm);
216 	return origbrk;
217 }
218 
219 /*
220  * If a hint addr is less than mmap_min_addr change hint to be as
221  * low as possible but still greater than mmap_min_addr
222  */
223 static inline unsigned long round_hint_to_min(unsigned long hint)
224 {
225 	hint &= PAGE_MASK;
226 	if (((void *)hint != NULL) &&
227 	    (hint < mmap_min_addr))
228 		return PAGE_ALIGN(mmap_min_addr);
229 	return hint;
230 }
231 
232 bool mlock_future_ok(struct mm_struct *mm, unsigned long flags,
233 			unsigned long bytes)
234 {
235 	unsigned long locked_pages, limit_pages;
236 
237 	if (!(flags & VM_LOCKED) || capable(CAP_IPC_LOCK))
238 		return true;
239 
240 	locked_pages = bytes >> PAGE_SHIFT;
241 	locked_pages += mm->locked_vm;
242 
243 	limit_pages = rlimit(RLIMIT_MEMLOCK);
244 	limit_pages >>= PAGE_SHIFT;
245 
246 	return locked_pages <= limit_pages;
247 }
248 
249 static inline u64 file_mmap_size_max(struct file *file, struct inode *inode)
250 {
251 	if (S_ISREG(inode->i_mode))
252 		return MAX_LFS_FILESIZE;
253 
254 	if (S_ISBLK(inode->i_mode))
255 		return MAX_LFS_FILESIZE;
256 
257 	if (S_ISSOCK(inode->i_mode))
258 		return MAX_LFS_FILESIZE;
259 
260 	/* Special "we do even unsigned file positions" case */
261 	if (file->f_op->fop_flags & FOP_UNSIGNED_OFFSET)
262 		return 0;
263 
264 	/* Yes, random drivers might want more. But I'm tired of buggy drivers */
265 	return ULONG_MAX;
266 }
267 
268 static inline bool file_mmap_ok(struct file *file, struct inode *inode,
269 				unsigned long pgoff, unsigned long len)
270 {
271 	u64 maxsize = file_mmap_size_max(file, inode);
272 
273 	if (maxsize && len > maxsize)
274 		return false;
275 	maxsize -= len;
276 	if (pgoff > maxsize >> PAGE_SHIFT)
277 		return false;
278 	return true;
279 }
280 
281 /*
282  * The caller must write-lock current->mm->mmap_lock.
283  */
284 unsigned long do_mmap(struct file *file, unsigned long addr,
285 			unsigned long len, unsigned long prot,
286 			unsigned long flags, vm_flags_t vm_flags,
287 			unsigned long pgoff, unsigned long *populate,
288 			struct list_head *uf)
289 {
290 	struct mm_struct *mm = current->mm;
291 	int pkey = 0;
292 
293 	*populate = 0;
294 
295 	if (!len)
296 		return -EINVAL;
297 
298 	/*
299 	 * Does the application expect PROT_READ to imply PROT_EXEC?
300 	 *
301 	 * (the exception is when the underlying filesystem is noexec
302 	 *  mounted, in which case we don't add PROT_EXEC.)
303 	 */
304 	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
305 		if (!(file && path_noexec(&file->f_path)))
306 			prot |= PROT_EXEC;
307 
308 	/* force arch specific MAP_FIXED handling in get_unmapped_area */
309 	if (flags & MAP_FIXED_NOREPLACE)
310 		flags |= MAP_FIXED;
311 
312 	if (!(flags & MAP_FIXED))
313 		addr = round_hint_to_min(addr);
314 
315 	/* Careful about overflows.. */
316 	len = PAGE_ALIGN(len);
317 	if (!len)
318 		return -ENOMEM;
319 
320 	/* offset overflow? */
321 	if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
322 		return -EOVERFLOW;
323 
324 	/* Too many mappings? */
325 	if (mm->map_count > sysctl_max_map_count)
326 		return -ENOMEM;
327 
328 	/*
329 	 * addr is returned from get_unmapped_area,
330 	 * There are two cases:
331 	 * 1> MAP_FIXED == false
332 	 *	unallocated memory, no need to check sealing.
333 	 * 1> MAP_FIXED == true
334 	 *	sealing is checked inside mmap_region when
335 	 *	do_vmi_munmap is called.
336 	 */
337 
338 	if (prot == PROT_EXEC) {
339 		pkey = execute_only_pkey(mm);
340 		if (pkey < 0)
341 			pkey = 0;
342 	}
343 
344 	/* Do simple checking here so the lower-level routines won't have
345 	 * to. we assume access permissions have been handled by the open
346 	 * of the memory object, so we don't do any here.
347 	 */
348 	vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(file, flags) |
349 			mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
350 
351 	/* Obtain the address to map to. we verify (or select) it and ensure
352 	 * that it represents a valid section of the address space.
353 	 */
354 	addr = __get_unmapped_area(file, addr, len, pgoff, flags, vm_flags);
355 	if (IS_ERR_VALUE(addr))
356 		return addr;
357 
358 	if (flags & MAP_FIXED_NOREPLACE) {
359 		if (find_vma_intersection(mm, addr, addr + len))
360 			return -EEXIST;
361 	}
362 
363 	if (flags & MAP_LOCKED)
364 		if (!can_do_mlock())
365 			return -EPERM;
366 
367 	if (!mlock_future_ok(mm, vm_flags, len))
368 		return -EAGAIN;
369 
370 	if (file) {
371 		struct inode *inode = file_inode(file);
372 		unsigned int seals = memfd_file_seals(file);
373 		unsigned long flags_mask;
374 
375 		if (!file_mmap_ok(file, inode, pgoff, len))
376 			return -EOVERFLOW;
377 
378 		flags_mask = LEGACY_MAP_MASK;
379 		if (file->f_op->fop_flags & FOP_MMAP_SYNC)
380 			flags_mask |= MAP_SYNC;
381 
382 		switch (flags & MAP_TYPE) {
383 		case MAP_SHARED:
384 			/*
385 			 * Force use of MAP_SHARED_VALIDATE with non-legacy
386 			 * flags. E.g. MAP_SYNC is dangerous to use with
387 			 * MAP_SHARED as you don't know which consistency model
388 			 * you will get. We silently ignore unsupported flags
389 			 * with MAP_SHARED to preserve backward compatibility.
390 			 */
391 			flags &= LEGACY_MAP_MASK;
392 			fallthrough;
393 		case MAP_SHARED_VALIDATE:
394 			if (flags & ~flags_mask)
395 				return -EOPNOTSUPP;
396 			if (prot & PROT_WRITE) {
397 				if (!(file->f_mode & FMODE_WRITE))
398 					return -EACCES;
399 				if (IS_SWAPFILE(file->f_mapping->host))
400 					return -ETXTBSY;
401 			}
402 
403 			/*
404 			 * Make sure we don't allow writing to an append-only
405 			 * file..
406 			 */
407 			if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
408 				return -EACCES;
409 
410 			vm_flags |= VM_SHARED | VM_MAYSHARE;
411 			if (!(file->f_mode & FMODE_WRITE))
412 				vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
413 			else if (is_readonly_sealed(seals, vm_flags))
414 				vm_flags &= ~VM_MAYWRITE;
415 			fallthrough;
416 		case MAP_PRIVATE:
417 			if (!(file->f_mode & FMODE_READ))
418 				return -EACCES;
419 			if (path_noexec(&file->f_path)) {
420 				if (vm_flags & VM_EXEC)
421 					return -EPERM;
422 				vm_flags &= ~VM_MAYEXEC;
423 			}
424 
425 			if (!file->f_op->mmap)
426 				return -ENODEV;
427 			if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
428 				return -EINVAL;
429 			break;
430 
431 		default:
432 			return -EINVAL;
433 		}
434 	} else {
435 		switch (flags & MAP_TYPE) {
436 		case MAP_SHARED:
437 			if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
438 				return -EINVAL;
439 			/*
440 			 * Ignore pgoff.
441 			 */
442 			pgoff = 0;
443 			vm_flags |= VM_SHARED | VM_MAYSHARE;
444 			break;
445 		case MAP_DROPPABLE:
446 			if (VM_DROPPABLE == VM_NONE)
447 				return -ENOTSUPP;
448 			/*
449 			 * A locked or stack area makes no sense to be droppable.
450 			 *
451 			 * Also, since droppable pages can just go away at any time
452 			 * it makes no sense to copy them on fork or dump them.
453 			 *
454 			 * And don't attempt to combine with hugetlb for now.
455 			 */
456 			if (flags & (MAP_LOCKED | MAP_HUGETLB))
457 			        return -EINVAL;
458 			if (vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
459 			        return -EINVAL;
460 
461 			vm_flags |= VM_DROPPABLE;
462 
463 			/*
464 			 * If the pages can be dropped, then it doesn't make
465 			 * sense to reserve them.
466 			 */
467 			vm_flags |= VM_NORESERVE;
468 
469 			/*
470 			 * Likewise, they're volatile enough that they
471 			 * shouldn't survive forks or coredumps.
472 			 */
473 			vm_flags |= VM_WIPEONFORK | VM_DONTDUMP;
474 			fallthrough;
475 		case MAP_PRIVATE:
476 			/*
477 			 * Set pgoff according to addr for anon_vma.
478 			 */
479 			pgoff = addr >> PAGE_SHIFT;
480 			break;
481 		default:
482 			return -EINVAL;
483 		}
484 	}
485 
486 	/*
487 	 * Set 'VM_NORESERVE' if we should not account for the
488 	 * memory use of this mapping.
489 	 */
490 	if (flags & MAP_NORESERVE) {
491 		/* We honor MAP_NORESERVE if allowed to overcommit */
492 		if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
493 			vm_flags |= VM_NORESERVE;
494 
495 		/* hugetlb applies strict overcommit unless MAP_NORESERVE */
496 		if (file && is_file_hugepages(file))
497 			vm_flags |= VM_NORESERVE;
498 	}
499 
500 	addr = mmap_region(file, addr, len, vm_flags, pgoff, uf);
501 	if (!IS_ERR_VALUE(addr) &&
502 	    ((vm_flags & VM_LOCKED) ||
503 	     (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE))
504 		*populate = len;
505 	return addr;
506 }
507 
508 unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
509 			      unsigned long prot, unsigned long flags,
510 			      unsigned long fd, unsigned long pgoff)
511 {
512 	struct file *file = NULL;
513 	unsigned long retval;
514 
515 	if (!(flags & MAP_ANONYMOUS)) {
516 		audit_mmap_fd(fd, flags);
517 		file = fget(fd);
518 		if (!file)
519 			return -EBADF;
520 		if (is_file_hugepages(file)) {
521 			len = ALIGN(len, huge_page_size(hstate_file(file)));
522 		} else if (unlikely(flags & MAP_HUGETLB)) {
523 			retval = -EINVAL;
524 			goto out_fput;
525 		}
526 	} else if (flags & MAP_HUGETLB) {
527 		struct hstate *hs;
528 
529 		hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
530 		if (!hs)
531 			return -EINVAL;
532 
533 		len = ALIGN(len, huge_page_size(hs));
534 		/*
535 		 * VM_NORESERVE is used because the reservations will be
536 		 * taken when vm_ops->mmap() is called
537 		 */
538 		file = hugetlb_file_setup(HUGETLB_ANON_FILE, len,
539 				VM_NORESERVE,
540 				HUGETLB_ANONHUGE_INODE,
541 				(flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
542 		if (IS_ERR(file))
543 			return PTR_ERR(file);
544 	}
545 
546 	retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
547 out_fput:
548 	if (file)
549 		fput(file);
550 	return retval;
551 }
552 
553 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
554 		unsigned long, prot, unsigned long, flags,
555 		unsigned long, fd, unsigned long, pgoff)
556 {
557 	return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
558 }
559 
560 #ifdef __ARCH_WANT_SYS_OLD_MMAP
561 struct mmap_arg_struct {
562 	unsigned long addr;
563 	unsigned long len;
564 	unsigned long prot;
565 	unsigned long flags;
566 	unsigned long fd;
567 	unsigned long offset;
568 };
569 
570 SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
571 {
572 	struct mmap_arg_struct a;
573 
574 	if (copy_from_user(&a, arg, sizeof(a)))
575 		return -EFAULT;
576 	if (offset_in_page(a.offset))
577 		return -EINVAL;
578 
579 	return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
580 			       a.offset >> PAGE_SHIFT);
581 }
582 #endif /* __ARCH_WANT_SYS_OLD_MMAP */
583 
584 /**
585  * unmapped_area() - Find an area between the low_limit and the high_limit with
586  * the correct alignment and offset, all from @info. Note: current->mm is used
587  * for the search.
588  *
589  * @info: The unmapped area information including the range [low_limit -
590  * high_limit), the alignment offset and mask.
591  *
592  * Return: A memory address or -ENOMEM.
593  */
594 static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
595 {
596 	unsigned long length, gap;
597 	unsigned long low_limit, high_limit;
598 	struct vm_area_struct *tmp;
599 	VMA_ITERATOR(vmi, current->mm, 0);
600 
601 	/* Adjust search length to account for worst case alignment overhead */
602 	length = info->length + info->align_mask + info->start_gap;
603 	if (length < info->length)
604 		return -ENOMEM;
605 
606 	low_limit = info->low_limit;
607 	if (low_limit < mmap_min_addr)
608 		low_limit = mmap_min_addr;
609 	high_limit = info->high_limit;
610 retry:
611 	if (vma_iter_area_lowest(&vmi, low_limit, high_limit, length))
612 		return -ENOMEM;
613 
614 	/*
615 	 * Adjust for the gap first so it doesn't interfere with the
616 	 * later alignment. The first step is the minimum needed to
617 	 * fulill the start gap, the next steps is the minimum to align
618 	 * that. It is the minimum needed to fulill both.
619 	 */
620 	gap = vma_iter_addr(&vmi) + info->start_gap;
621 	gap += (info->align_offset - gap) & info->align_mask;
622 	tmp = vma_next(&vmi);
623 	if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
624 		if (vm_start_gap(tmp) < gap + length - 1) {
625 			low_limit = tmp->vm_end;
626 			vma_iter_reset(&vmi);
627 			goto retry;
628 		}
629 	} else {
630 		tmp = vma_prev(&vmi);
631 		if (tmp && vm_end_gap(tmp) > gap) {
632 			low_limit = vm_end_gap(tmp);
633 			vma_iter_reset(&vmi);
634 			goto retry;
635 		}
636 	}
637 
638 	return gap;
639 }
640 
641 /**
642  * unmapped_area_topdown() - Find an area between the low_limit and the
643  * high_limit with the correct alignment and offset at the highest available
644  * address, all from @info. Note: current->mm is used for the search.
645  *
646  * @info: The unmapped area information including the range [low_limit -
647  * high_limit), the alignment offset and mask.
648  *
649  * Return: A memory address or -ENOMEM.
650  */
651 static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
652 {
653 	unsigned long length, gap, gap_end;
654 	unsigned long low_limit, high_limit;
655 	struct vm_area_struct *tmp;
656 	VMA_ITERATOR(vmi, current->mm, 0);
657 
658 	/* Adjust search length to account for worst case alignment overhead */
659 	length = info->length + info->align_mask + info->start_gap;
660 	if (length < info->length)
661 		return -ENOMEM;
662 
663 	low_limit = info->low_limit;
664 	if (low_limit < mmap_min_addr)
665 		low_limit = mmap_min_addr;
666 	high_limit = info->high_limit;
667 retry:
668 	if (vma_iter_area_highest(&vmi, low_limit, high_limit, length))
669 		return -ENOMEM;
670 
671 	gap = vma_iter_end(&vmi) - info->length;
672 	gap -= (gap - info->align_offset) & info->align_mask;
673 	gap_end = vma_iter_end(&vmi);
674 	tmp = vma_next(&vmi);
675 	if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
676 		if (vm_start_gap(tmp) < gap_end) {
677 			high_limit = vm_start_gap(tmp);
678 			vma_iter_reset(&vmi);
679 			goto retry;
680 		}
681 	} else {
682 		tmp = vma_prev(&vmi);
683 		if (tmp && vm_end_gap(tmp) > gap) {
684 			high_limit = tmp->vm_start;
685 			vma_iter_reset(&vmi);
686 			goto retry;
687 		}
688 	}
689 
690 	return gap;
691 }
692 
693 /*
694  * Determine if the allocation needs to ensure that there is no
695  * existing mapping within it's guard gaps, for use as start_gap.
696  */
697 static inline unsigned long stack_guard_placement(vm_flags_t vm_flags)
698 {
699 	if (vm_flags & VM_SHADOW_STACK)
700 		return PAGE_SIZE;
701 
702 	return 0;
703 }
704 
705 /*
706  * Search for an unmapped address range.
707  *
708  * We are looking for a range that:
709  * - does not intersect with any VMA;
710  * - is contained within the [low_limit, high_limit) interval;
711  * - is at least the desired size.
712  * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
713  */
714 unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info)
715 {
716 	unsigned long addr;
717 
718 	if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
719 		addr = unmapped_area_topdown(info);
720 	else
721 		addr = unmapped_area(info);
722 
723 	trace_vm_unmapped_area(addr, info);
724 	return addr;
725 }
726 
727 /* Get an address range which is currently unmapped.
728  * For shmat() with addr=0.
729  *
730  * Ugly calling convention alert:
731  * Return value with the low bits set means error value,
732  * ie
733  *	if (ret & ~PAGE_MASK)
734  *		error = ret;
735  *
736  * This function "knows" that -ENOMEM has the bits set.
737  */
738 unsigned long
739 generic_get_unmapped_area(struct file *filp, unsigned long addr,
740 			  unsigned long len, unsigned long pgoff,
741 			  unsigned long flags, vm_flags_t vm_flags)
742 {
743 	struct mm_struct *mm = current->mm;
744 	struct vm_area_struct *vma, *prev;
745 	struct vm_unmapped_area_info info = {};
746 	const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
747 
748 	if (len > mmap_end - mmap_min_addr)
749 		return -ENOMEM;
750 
751 	if (flags & MAP_FIXED)
752 		return addr;
753 
754 	if (addr) {
755 		addr = PAGE_ALIGN(addr);
756 		vma = find_vma_prev(mm, addr, &prev);
757 		if (mmap_end - len >= addr && addr >= mmap_min_addr &&
758 		    (!vma || addr + len <= vm_start_gap(vma)) &&
759 		    (!prev || addr >= vm_end_gap(prev)))
760 			return addr;
761 	}
762 
763 	info.length = len;
764 	info.low_limit = mm->mmap_base;
765 	info.high_limit = mmap_end;
766 	info.start_gap = stack_guard_placement(vm_flags);
767 	if (filp && is_file_hugepages(filp))
768 		info.align_mask = huge_page_mask_align(filp);
769 	return vm_unmapped_area(&info);
770 }
771 
772 #ifndef HAVE_ARCH_UNMAPPED_AREA
773 unsigned long
774 arch_get_unmapped_area(struct file *filp, unsigned long addr,
775 		       unsigned long len, unsigned long pgoff,
776 		       unsigned long flags, vm_flags_t vm_flags)
777 {
778 	return generic_get_unmapped_area(filp, addr, len, pgoff, flags,
779 					 vm_flags);
780 }
781 #endif
782 
783 /*
784  * This mmap-allocator allocates new areas top-down from below the
785  * stack's low limit (the base):
786  */
787 unsigned long
788 generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
789 				  unsigned long len, unsigned long pgoff,
790 				  unsigned long flags, vm_flags_t vm_flags)
791 {
792 	struct vm_area_struct *vma, *prev;
793 	struct mm_struct *mm = current->mm;
794 	struct vm_unmapped_area_info info = {};
795 	const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
796 
797 	/* requested length too big for entire address space */
798 	if (len > mmap_end - mmap_min_addr)
799 		return -ENOMEM;
800 
801 	if (flags & MAP_FIXED)
802 		return addr;
803 
804 	/* requesting a specific address */
805 	if (addr) {
806 		addr = PAGE_ALIGN(addr);
807 		vma = find_vma_prev(mm, addr, &prev);
808 		if (mmap_end - len >= addr && addr >= mmap_min_addr &&
809 				(!vma || addr + len <= vm_start_gap(vma)) &&
810 				(!prev || addr >= vm_end_gap(prev)))
811 			return addr;
812 	}
813 
814 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
815 	info.length = len;
816 	info.low_limit = PAGE_SIZE;
817 	info.high_limit = arch_get_mmap_base(addr, mm->mmap_base);
818 	info.start_gap = stack_guard_placement(vm_flags);
819 	if (filp && is_file_hugepages(filp))
820 		info.align_mask = huge_page_mask_align(filp);
821 	addr = vm_unmapped_area(&info);
822 
823 	/*
824 	 * A failed mmap() very likely causes application failure,
825 	 * so fall back to the bottom-up function here. This scenario
826 	 * can happen with large stack limits and large mmap()
827 	 * allocations.
828 	 */
829 	if (offset_in_page(addr)) {
830 		VM_BUG_ON(addr != -ENOMEM);
831 		info.flags = 0;
832 		info.low_limit = TASK_UNMAPPED_BASE;
833 		info.high_limit = mmap_end;
834 		addr = vm_unmapped_area(&info);
835 	}
836 
837 	return addr;
838 }
839 
840 #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
841 unsigned long
842 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
843 			       unsigned long len, unsigned long pgoff,
844 			       unsigned long flags, vm_flags_t vm_flags)
845 {
846 	return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags,
847 						 vm_flags);
848 }
849 #endif
850 
851 unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm, struct file *filp,
852 					   unsigned long addr, unsigned long len,
853 					   unsigned long pgoff, unsigned long flags,
854 					   vm_flags_t vm_flags)
855 {
856 	if (test_bit(MMF_TOPDOWN, &mm->flags))
857 		return arch_get_unmapped_area_topdown(filp, addr, len, pgoff,
858 						      flags, vm_flags);
859 	return arch_get_unmapped_area(filp, addr, len, pgoff, flags, vm_flags);
860 }
861 
862 unsigned long
863 __get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
864 		unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags)
865 {
866 	unsigned long (*get_area)(struct file *, unsigned long,
867 				  unsigned long, unsigned long, unsigned long)
868 				  = NULL;
869 
870 	unsigned long error = arch_mmap_check(addr, len, flags);
871 	if (error)
872 		return error;
873 
874 	/* Careful about overflows.. */
875 	if (len > TASK_SIZE)
876 		return -ENOMEM;
877 
878 	if (file) {
879 		if (file->f_op->get_unmapped_area)
880 			get_area = file->f_op->get_unmapped_area;
881 	} else if (flags & MAP_SHARED) {
882 		/*
883 		 * mmap_region() will call shmem_zero_setup() to create a file,
884 		 * so use shmem's get_unmapped_area in case it can be huge.
885 		 */
886 		get_area = shmem_get_unmapped_area;
887 	}
888 
889 	/* Always treat pgoff as zero for anonymous memory. */
890 	if (!file)
891 		pgoff = 0;
892 
893 	if (get_area) {
894 		addr = get_area(file, addr, len, pgoff, flags);
895 	} else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && !file
896 		   && !addr /* no hint */
897 		   && IS_ALIGNED(len, PMD_SIZE)) {
898 		/* Ensures that larger anonymous mappings are THP aligned. */
899 		addr = thp_get_unmapped_area_vmflags(file, addr, len,
900 						     pgoff, flags, vm_flags);
901 	} else {
902 		addr = mm_get_unmapped_area_vmflags(current->mm, file, addr, len,
903 						    pgoff, flags, vm_flags);
904 	}
905 	if (IS_ERR_VALUE(addr))
906 		return addr;
907 
908 	if (addr > TASK_SIZE - len)
909 		return -ENOMEM;
910 	if (offset_in_page(addr))
911 		return -EINVAL;
912 
913 	error = security_mmap_addr(addr);
914 	return error ? error : addr;
915 }
916 
917 unsigned long
918 mm_get_unmapped_area(struct mm_struct *mm, struct file *file,
919 		     unsigned long addr, unsigned long len,
920 		     unsigned long pgoff, unsigned long flags)
921 {
922 	if (test_bit(MMF_TOPDOWN, &mm->flags))
923 		return arch_get_unmapped_area_topdown(file, addr, len, pgoff, flags, 0);
924 	return arch_get_unmapped_area(file, addr, len, pgoff, flags, 0);
925 }
926 EXPORT_SYMBOL(mm_get_unmapped_area);
927 
928 /**
929  * find_vma_intersection() - Look up the first VMA which intersects the interval
930  * @mm: The process address space.
931  * @start_addr: The inclusive start user address.
932  * @end_addr: The exclusive end user address.
933  *
934  * Returns: The first VMA within the provided range, %NULL otherwise.  Assumes
935  * start_addr < end_addr.
936  */
937 struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
938 					     unsigned long start_addr,
939 					     unsigned long end_addr)
940 {
941 	unsigned long index = start_addr;
942 
943 	mmap_assert_locked(mm);
944 	return mt_find(&mm->mm_mt, &index, end_addr - 1);
945 }
946 EXPORT_SYMBOL(find_vma_intersection);
947 
948 /**
949  * find_vma() - Find the VMA for a given address, or the next VMA.
950  * @mm: The mm_struct to check
951  * @addr: The address
952  *
953  * Returns: The VMA associated with addr, or the next VMA.
954  * May return %NULL in the case of no VMA at addr or above.
955  */
956 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
957 {
958 	unsigned long index = addr;
959 
960 	mmap_assert_locked(mm);
961 	return mt_find(&mm->mm_mt, &index, ULONG_MAX);
962 }
963 EXPORT_SYMBOL(find_vma);
964 
965 /**
966  * find_vma_prev() - Find the VMA for a given address, or the next vma and
967  * set %pprev to the previous VMA, if any.
968  * @mm: The mm_struct to check
969  * @addr: The address
970  * @pprev: The pointer to set to the previous VMA
971  *
972  * Note that RCU lock is missing here since the external mmap_lock() is used
973  * instead.
974  *
975  * Returns: The VMA associated with @addr, or the next vma.
976  * May return %NULL in the case of no vma at addr or above.
977  */
978 struct vm_area_struct *
979 find_vma_prev(struct mm_struct *mm, unsigned long addr,
980 			struct vm_area_struct **pprev)
981 {
982 	struct vm_area_struct *vma;
983 	VMA_ITERATOR(vmi, mm, addr);
984 
985 	vma = vma_iter_load(&vmi);
986 	*pprev = vma_prev(&vmi);
987 	if (!vma)
988 		vma = vma_next(&vmi);
989 	return vma;
990 }
991 
992 /*
993  * Verify that the stack growth is acceptable and
994  * update accounting. This is shared with both the
995  * grow-up and grow-down cases.
996  */
997 static int acct_stack_growth(struct vm_area_struct *vma,
998 			     unsigned long size, unsigned long grow)
999 {
1000 	struct mm_struct *mm = vma->vm_mm;
1001 	unsigned long new_start;
1002 
1003 	/* address space limit tests */
1004 	if (!may_expand_vm(mm, vma->vm_flags, grow))
1005 		return -ENOMEM;
1006 
1007 	/* Stack limit test */
1008 	if (size > rlimit(RLIMIT_STACK))
1009 		return -ENOMEM;
1010 
1011 	/* mlock limit tests */
1012 	if (!mlock_future_ok(mm, vma->vm_flags, grow << PAGE_SHIFT))
1013 		return -ENOMEM;
1014 
1015 	/* Check to ensure the stack will not grow into a hugetlb-only region */
1016 	new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
1017 			vma->vm_end - size;
1018 	if (is_hugepage_only_range(vma->vm_mm, new_start, size))
1019 		return -EFAULT;
1020 
1021 	/*
1022 	 * Overcommit..  This must be the final test, as it will
1023 	 * update security statistics.
1024 	 */
1025 	if (security_vm_enough_memory_mm(mm, grow))
1026 		return -ENOMEM;
1027 
1028 	return 0;
1029 }
1030 
1031 #if defined(CONFIG_STACK_GROWSUP)
1032 /*
1033  * PA-RISC uses this for its stack.
1034  * vma is the last one with address > vma->vm_end.  Have to extend vma.
1035  */
1036 static int expand_upwards(struct vm_area_struct *vma, unsigned long address)
1037 {
1038 	struct mm_struct *mm = vma->vm_mm;
1039 	struct vm_area_struct *next;
1040 	unsigned long gap_addr;
1041 	int error = 0;
1042 	VMA_ITERATOR(vmi, mm, vma->vm_start);
1043 
1044 	if (!(vma->vm_flags & VM_GROWSUP))
1045 		return -EFAULT;
1046 
1047 	mmap_assert_write_locked(mm);
1048 
1049 	/* Guard against exceeding limits of the address space. */
1050 	address &= PAGE_MASK;
1051 	if (address >= (TASK_SIZE & PAGE_MASK))
1052 		return -ENOMEM;
1053 	address += PAGE_SIZE;
1054 
1055 	/* Enforce stack_guard_gap */
1056 	gap_addr = address + stack_guard_gap;
1057 
1058 	/* Guard against overflow */
1059 	if (gap_addr < address || gap_addr > TASK_SIZE)
1060 		gap_addr = TASK_SIZE;
1061 
1062 	next = find_vma_intersection(mm, vma->vm_end, gap_addr);
1063 	if (next && vma_is_accessible(next)) {
1064 		if (!(next->vm_flags & VM_GROWSUP))
1065 			return -ENOMEM;
1066 		/* Check that both stack segments have the same anon_vma? */
1067 	}
1068 
1069 	if (next)
1070 		vma_iter_prev_range_limit(&vmi, address);
1071 
1072 	vma_iter_config(&vmi, vma->vm_start, address);
1073 	if (vma_iter_prealloc(&vmi, vma))
1074 		return -ENOMEM;
1075 
1076 	/* We must make sure the anon_vma is allocated. */
1077 	if (unlikely(anon_vma_prepare(vma))) {
1078 		vma_iter_free(&vmi);
1079 		return -ENOMEM;
1080 	}
1081 
1082 	/* Lock the VMA before expanding to prevent concurrent page faults */
1083 	vma_start_write(vma);
1084 	/* We update the anon VMA tree. */
1085 	anon_vma_lock_write(vma->anon_vma);
1086 
1087 	/* Somebody else might have raced and expanded it already */
1088 	if (address > vma->vm_end) {
1089 		unsigned long size, grow;
1090 
1091 		size = address - vma->vm_start;
1092 		grow = (address - vma->vm_end) >> PAGE_SHIFT;
1093 
1094 		error = -ENOMEM;
1095 		if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
1096 			error = acct_stack_growth(vma, size, grow);
1097 			if (!error) {
1098 				if (vma->vm_flags & VM_LOCKED)
1099 					mm->locked_vm += grow;
1100 				vm_stat_account(mm, vma->vm_flags, grow);
1101 				anon_vma_interval_tree_pre_update_vma(vma);
1102 				vma->vm_end = address;
1103 				/* Overwrite old entry in mtree. */
1104 				vma_iter_store(&vmi, vma);
1105 				anon_vma_interval_tree_post_update_vma(vma);
1106 
1107 				perf_event_mmap(vma);
1108 			}
1109 		}
1110 	}
1111 	anon_vma_unlock_write(vma->anon_vma);
1112 	vma_iter_free(&vmi);
1113 	validate_mm(mm);
1114 	return error;
1115 }
1116 #endif /* CONFIG_STACK_GROWSUP */
1117 
1118 /*
1119  * vma is the first one with address < vma->vm_start.  Have to extend vma.
1120  * mmap_lock held for writing.
1121  */
1122 int expand_downwards(struct vm_area_struct *vma, unsigned long address)
1123 {
1124 	struct mm_struct *mm = vma->vm_mm;
1125 	struct vm_area_struct *prev;
1126 	int error = 0;
1127 	VMA_ITERATOR(vmi, mm, vma->vm_start);
1128 
1129 	if (!(vma->vm_flags & VM_GROWSDOWN))
1130 		return -EFAULT;
1131 
1132 	mmap_assert_write_locked(mm);
1133 
1134 	address &= PAGE_MASK;
1135 	if (address < mmap_min_addr || address < FIRST_USER_ADDRESS)
1136 		return -EPERM;
1137 
1138 	/* Enforce stack_guard_gap */
1139 	prev = vma_prev(&vmi);
1140 	/* Check that both stack segments have the same anon_vma? */
1141 	if (prev) {
1142 		if (!(prev->vm_flags & VM_GROWSDOWN) &&
1143 		    vma_is_accessible(prev) &&
1144 		    (address - prev->vm_end < stack_guard_gap))
1145 			return -ENOMEM;
1146 	}
1147 
1148 	if (prev)
1149 		vma_iter_next_range_limit(&vmi, vma->vm_start);
1150 
1151 	vma_iter_config(&vmi, address, vma->vm_end);
1152 	if (vma_iter_prealloc(&vmi, vma))
1153 		return -ENOMEM;
1154 
1155 	/* We must make sure the anon_vma is allocated. */
1156 	if (unlikely(anon_vma_prepare(vma))) {
1157 		vma_iter_free(&vmi);
1158 		return -ENOMEM;
1159 	}
1160 
1161 	/* Lock the VMA before expanding to prevent concurrent page faults */
1162 	vma_start_write(vma);
1163 	/* We update the anon VMA tree. */
1164 	anon_vma_lock_write(vma->anon_vma);
1165 
1166 	/* Somebody else might have raced and expanded it already */
1167 	if (address < vma->vm_start) {
1168 		unsigned long size, grow;
1169 
1170 		size = vma->vm_end - address;
1171 		grow = (vma->vm_start - address) >> PAGE_SHIFT;
1172 
1173 		error = -ENOMEM;
1174 		if (grow <= vma->vm_pgoff) {
1175 			error = acct_stack_growth(vma, size, grow);
1176 			if (!error) {
1177 				if (vma->vm_flags & VM_LOCKED)
1178 					mm->locked_vm += grow;
1179 				vm_stat_account(mm, vma->vm_flags, grow);
1180 				anon_vma_interval_tree_pre_update_vma(vma);
1181 				vma->vm_start = address;
1182 				vma->vm_pgoff -= grow;
1183 				/* Overwrite old entry in mtree. */
1184 				vma_iter_store(&vmi, vma);
1185 				anon_vma_interval_tree_post_update_vma(vma);
1186 
1187 				perf_event_mmap(vma);
1188 			}
1189 		}
1190 	}
1191 	anon_vma_unlock_write(vma->anon_vma);
1192 	vma_iter_free(&vmi);
1193 	validate_mm(mm);
1194 	return error;
1195 }
1196 
1197 /* enforced gap between the expanding stack and other mappings. */
1198 unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
1199 
1200 static int __init cmdline_parse_stack_guard_gap(char *p)
1201 {
1202 	unsigned long val;
1203 	char *endptr;
1204 
1205 	val = simple_strtoul(p, &endptr, 10);
1206 	if (!*endptr)
1207 		stack_guard_gap = val << PAGE_SHIFT;
1208 
1209 	return 1;
1210 }
1211 __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
1212 
1213 #ifdef CONFIG_STACK_GROWSUP
1214 int expand_stack_locked(struct vm_area_struct *vma, unsigned long address)
1215 {
1216 	return expand_upwards(vma, address);
1217 }
1218 
1219 struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr)
1220 {
1221 	struct vm_area_struct *vma, *prev;
1222 
1223 	addr &= PAGE_MASK;
1224 	vma = find_vma_prev(mm, addr, &prev);
1225 	if (vma && (vma->vm_start <= addr))
1226 		return vma;
1227 	if (!prev)
1228 		return NULL;
1229 	if (expand_stack_locked(prev, addr))
1230 		return NULL;
1231 	if (prev->vm_flags & VM_LOCKED)
1232 		populate_vma_page_range(prev, addr, prev->vm_end, NULL);
1233 	return prev;
1234 }
1235 #else
1236 int expand_stack_locked(struct vm_area_struct *vma, unsigned long address)
1237 {
1238 	return expand_downwards(vma, address);
1239 }
1240 
1241 struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr)
1242 {
1243 	struct vm_area_struct *vma;
1244 	unsigned long start;
1245 
1246 	addr &= PAGE_MASK;
1247 	vma = find_vma(mm, addr);
1248 	if (!vma)
1249 		return NULL;
1250 	if (vma->vm_start <= addr)
1251 		return vma;
1252 	start = vma->vm_start;
1253 	if (expand_stack_locked(vma, addr))
1254 		return NULL;
1255 	if (vma->vm_flags & VM_LOCKED)
1256 		populate_vma_page_range(vma, addr, start, NULL);
1257 	return vma;
1258 }
1259 #endif
1260 
1261 #if defined(CONFIG_STACK_GROWSUP)
1262 
1263 #define vma_expand_up(vma,addr) expand_upwards(vma, addr)
1264 #define vma_expand_down(vma, addr) (-EFAULT)
1265 
1266 #else
1267 
1268 #define vma_expand_up(vma,addr) (-EFAULT)
1269 #define vma_expand_down(vma, addr) expand_downwards(vma, addr)
1270 
1271 #endif
1272 
1273 /*
1274  * expand_stack(): legacy interface for page faulting. Don't use unless
1275  * you have to.
1276  *
1277  * This is called with the mm locked for reading, drops the lock, takes
1278  * the lock for writing, tries to look up a vma again, expands it if
1279  * necessary, and downgrades the lock to reading again.
1280  *
1281  * If no vma is found or it can't be expanded, it returns NULL and has
1282  * dropped the lock.
1283  */
1284 struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr)
1285 {
1286 	struct vm_area_struct *vma, *prev;
1287 
1288 	mmap_read_unlock(mm);
1289 	if (mmap_write_lock_killable(mm))
1290 		return NULL;
1291 
1292 	vma = find_vma_prev(mm, addr, &prev);
1293 	if (vma && vma->vm_start <= addr)
1294 		goto success;
1295 
1296 	if (prev && !vma_expand_up(prev, addr)) {
1297 		vma = prev;
1298 		goto success;
1299 	}
1300 
1301 	if (vma && !vma_expand_down(vma, addr))
1302 		goto success;
1303 
1304 	mmap_write_unlock(mm);
1305 	return NULL;
1306 
1307 success:
1308 	mmap_write_downgrade(mm);
1309 	return vma;
1310 }
1311 
1312 /* do_munmap() - Wrapper function for non-maple tree aware do_munmap() calls.
1313  * @mm: The mm_struct
1314  * @start: The start address to munmap
1315  * @len: The length to be munmapped.
1316  * @uf: The userfaultfd list_head
1317  *
1318  * Return: 0 on success, error otherwise.
1319  */
1320 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
1321 	      struct list_head *uf)
1322 {
1323 	VMA_ITERATOR(vmi, mm, start);
1324 
1325 	return do_vmi_munmap(&vmi, mm, start, len, uf, false);
1326 }
1327 
1328 unsigned long mmap_region(struct file *file, unsigned long addr,
1329 			  unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
1330 			  struct list_head *uf)
1331 {
1332 	unsigned long ret;
1333 	bool writable_file_mapping = false;
1334 
1335 	/* Check to see if MDWE is applicable. */
1336 	if (map_deny_write_exec(vm_flags, vm_flags))
1337 		return -EACCES;
1338 
1339 	/* Allow architectures to sanity-check the vm_flags. */
1340 	if (!arch_validate_flags(vm_flags))
1341 		return -EINVAL;
1342 
1343 	/* Map writable and ensure this isn't a sealed memfd. */
1344 	if (file && is_shared_maywrite(vm_flags)) {
1345 		int error = mapping_map_writable(file->f_mapping);
1346 
1347 		if (error)
1348 			return error;
1349 		writable_file_mapping = true;
1350 	}
1351 
1352 	ret = __mmap_region(file, addr, len, vm_flags, pgoff, uf);
1353 
1354 	/* Clear our write mapping regardless of error. */
1355 	if (writable_file_mapping)
1356 		mapping_unmap_writable(file->f_mapping);
1357 
1358 	validate_mm(current->mm);
1359 	return ret;
1360 }
1361 
1362 static int __vm_munmap(unsigned long start, size_t len, bool unlock)
1363 {
1364 	int ret;
1365 	struct mm_struct *mm = current->mm;
1366 	LIST_HEAD(uf);
1367 	VMA_ITERATOR(vmi, mm, start);
1368 
1369 	if (mmap_write_lock_killable(mm))
1370 		return -EINTR;
1371 
1372 	ret = do_vmi_munmap(&vmi, mm, start, len, &uf, unlock);
1373 	if (ret || !unlock)
1374 		mmap_write_unlock(mm);
1375 
1376 	userfaultfd_unmap_complete(mm, &uf);
1377 	return ret;
1378 }
1379 
1380 int vm_munmap(unsigned long start, size_t len)
1381 {
1382 	return __vm_munmap(start, len, false);
1383 }
1384 EXPORT_SYMBOL(vm_munmap);
1385 
1386 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
1387 {
1388 	addr = untagged_addr(addr);
1389 	return __vm_munmap(addr, len, true);
1390 }
1391 
1392 
1393 /*
1394  * Emulation of deprecated remap_file_pages() syscall.
1395  */
1396 SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
1397 		unsigned long, prot, unsigned long, pgoff, unsigned long, flags)
1398 {
1399 
1400 	struct mm_struct *mm = current->mm;
1401 	struct vm_area_struct *vma;
1402 	unsigned long populate = 0;
1403 	unsigned long ret = -EINVAL;
1404 	struct file *file;
1405 	vm_flags_t vm_flags;
1406 
1407 	pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/mm/remap_file_pages.rst.\n",
1408 		     current->comm, current->pid);
1409 
1410 	if (prot)
1411 		return ret;
1412 	start = start & PAGE_MASK;
1413 	size = size & PAGE_MASK;
1414 
1415 	if (start + size <= start)
1416 		return ret;
1417 
1418 	/* Does pgoff wrap? */
1419 	if (pgoff + (size >> PAGE_SHIFT) < pgoff)
1420 		return ret;
1421 
1422 	if (mmap_read_lock_killable(mm))
1423 		return -EINTR;
1424 
1425 	/*
1426 	 * Look up VMA under read lock first so we can perform the security
1427 	 * without holding locks (which can be problematic). We reacquire a
1428 	 * write lock later and check nothing changed underneath us.
1429 	 */
1430 	vma = vma_lookup(mm, start);
1431 
1432 	if (!vma || !(vma->vm_flags & VM_SHARED)) {
1433 		mmap_read_unlock(mm);
1434 		return -EINVAL;
1435 	}
1436 
1437 	prot |= vma->vm_flags & VM_READ ? PROT_READ : 0;
1438 	prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0;
1439 	prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0;
1440 
1441 	flags &= MAP_NONBLOCK;
1442 	flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE;
1443 	if (vma->vm_flags & VM_LOCKED)
1444 		flags |= MAP_LOCKED;
1445 
1446 	/* Save vm_flags used to calculate prot and flags, and recheck later. */
1447 	vm_flags = vma->vm_flags;
1448 	file = get_file(vma->vm_file);
1449 
1450 	mmap_read_unlock(mm);
1451 
1452 	/* Call outside mmap_lock to be consistent with other callers. */
1453 	ret = security_mmap_file(file, prot, flags);
1454 	if (ret) {
1455 		fput(file);
1456 		return ret;
1457 	}
1458 
1459 	ret = -EINVAL;
1460 
1461 	/* OK security check passed, take write lock + let it rip. */
1462 	if (mmap_write_lock_killable(mm)) {
1463 		fput(file);
1464 		return -EINTR;
1465 	}
1466 
1467 	vma = vma_lookup(mm, start);
1468 
1469 	if (!vma)
1470 		goto out;
1471 
1472 	/* Make sure things didn't change under us. */
1473 	if (vma->vm_flags != vm_flags)
1474 		goto out;
1475 	if (vma->vm_file != file)
1476 		goto out;
1477 
1478 	if (start + size > vma->vm_end) {
1479 		VMA_ITERATOR(vmi, mm, vma->vm_end);
1480 		struct vm_area_struct *next, *prev = vma;
1481 
1482 		for_each_vma_range(vmi, next, start + size) {
1483 			/* hole between vmas ? */
1484 			if (next->vm_start != prev->vm_end)
1485 				goto out;
1486 
1487 			if (next->vm_file != vma->vm_file)
1488 				goto out;
1489 
1490 			if (next->vm_flags != vma->vm_flags)
1491 				goto out;
1492 
1493 			if (start + size <= next->vm_end)
1494 				break;
1495 
1496 			prev = next;
1497 		}
1498 
1499 		if (!next)
1500 			goto out;
1501 	}
1502 
1503 	ret = do_mmap(vma->vm_file, start, size,
1504 			prot, flags, 0, pgoff, &populate, NULL);
1505 out:
1506 	mmap_write_unlock(mm);
1507 	fput(file);
1508 	if (populate)
1509 		mm_populate(ret, populate);
1510 	if (!IS_ERR_VALUE(ret))
1511 		ret = 0;
1512 	return ret;
1513 }
1514 
1515 /*
1516  * do_brk_flags() - Increase the brk vma if the flags match.
1517  * @vmi: The vma iterator
1518  * @addr: The start address
1519  * @len: The length of the increase
1520  * @vma: The vma,
1521  * @flags: The VMA Flags
1522  *
1523  * Extend the brk VMA from addr to addr + len.  If the VMA is NULL or the flags
1524  * do not match then create a new anonymous VMA.  Eventually we may be able to
1525  * do some brk-specific accounting here.
1526  */
1527 static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
1528 		unsigned long addr, unsigned long len, unsigned long flags)
1529 {
1530 	struct mm_struct *mm = current->mm;
1531 
1532 	/*
1533 	 * Check against address space limits by the changed size
1534 	 * Note: This happens *after* clearing old mappings in some code paths.
1535 	 */
1536 	flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
1537 	if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT))
1538 		return -ENOMEM;
1539 
1540 	if (mm->map_count > sysctl_max_map_count)
1541 		return -ENOMEM;
1542 
1543 	if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
1544 		return -ENOMEM;
1545 
1546 	/*
1547 	 * Expand the existing vma if possible; Note that singular lists do not
1548 	 * occur after forking, so the expand will only happen on new VMAs.
1549 	 */
1550 	if (vma && vma->vm_end == addr) {
1551 		VMG_STATE(vmg, mm, vmi, addr, addr + len, flags, PHYS_PFN(addr));
1552 
1553 		vmg.prev = vma;
1554 		/* vmi is positioned at prev, which this mode expects. */
1555 		vmg.merge_flags = VMG_FLAG_JUST_EXPAND;
1556 
1557 		if (vma_merge_new_range(&vmg))
1558 			goto out;
1559 		else if (vmg_nomem(&vmg))
1560 			goto unacct_fail;
1561 	}
1562 
1563 	if (vma)
1564 		vma_iter_next_range(vmi);
1565 	/* create a vma struct for an anonymous mapping */
1566 	vma = vm_area_alloc(mm);
1567 	if (!vma)
1568 		goto unacct_fail;
1569 
1570 	vma_set_anonymous(vma);
1571 	vma_set_range(vma, addr, addr + len, addr >> PAGE_SHIFT);
1572 	vm_flags_init(vma, flags);
1573 	vma->vm_page_prot = vm_get_page_prot(flags);
1574 	vma_start_write(vma);
1575 	if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL))
1576 		goto mas_store_fail;
1577 
1578 	mm->map_count++;
1579 	validate_mm(mm);
1580 	ksm_add_vma(vma);
1581 out:
1582 	perf_event_mmap(vma);
1583 	mm->total_vm += len >> PAGE_SHIFT;
1584 	mm->data_vm += len >> PAGE_SHIFT;
1585 	if (flags & VM_LOCKED)
1586 		mm->locked_vm += (len >> PAGE_SHIFT);
1587 	vm_flags_set(vma, VM_SOFTDIRTY);
1588 	return 0;
1589 
1590 mas_store_fail:
1591 	vm_area_free(vma);
1592 unacct_fail:
1593 	vm_unacct_memory(len >> PAGE_SHIFT);
1594 	return -ENOMEM;
1595 }
1596 
1597 int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
1598 {
1599 	struct mm_struct *mm = current->mm;
1600 	struct vm_area_struct *vma = NULL;
1601 	unsigned long len;
1602 	int ret;
1603 	bool populate;
1604 	LIST_HEAD(uf);
1605 	VMA_ITERATOR(vmi, mm, addr);
1606 
1607 	len = PAGE_ALIGN(request);
1608 	if (len < request)
1609 		return -ENOMEM;
1610 	if (!len)
1611 		return 0;
1612 
1613 	/* Until we need other flags, refuse anything except VM_EXEC. */
1614 	if ((flags & (~VM_EXEC)) != 0)
1615 		return -EINVAL;
1616 
1617 	if (mmap_write_lock_killable(mm))
1618 		return -EINTR;
1619 
1620 	ret = check_brk_limits(addr, len);
1621 	if (ret)
1622 		goto limits_failed;
1623 
1624 	ret = do_vmi_munmap(&vmi, mm, addr, len, &uf, 0);
1625 	if (ret)
1626 		goto munmap_failed;
1627 
1628 	vma = vma_prev(&vmi);
1629 	ret = do_brk_flags(&vmi, vma, addr, len, flags);
1630 	populate = ((mm->def_flags & VM_LOCKED) != 0);
1631 	mmap_write_unlock(mm);
1632 	userfaultfd_unmap_complete(mm, &uf);
1633 	if (populate && !ret)
1634 		mm_populate(addr, len);
1635 	return ret;
1636 
1637 munmap_failed:
1638 limits_failed:
1639 	mmap_write_unlock(mm);
1640 	return ret;
1641 }
1642 EXPORT_SYMBOL(vm_brk_flags);
1643 
1644 /* Release all mmaps. */
1645 void exit_mmap(struct mm_struct *mm)
1646 {
1647 	struct mmu_gather tlb;
1648 	struct vm_area_struct *vma;
1649 	unsigned long nr_accounted = 0;
1650 	VMA_ITERATOR(vmi, mm, 0);
1651 	int count = 0;
1652 
1653 	/* mm's last user has gone, and its about to be pulled down */
1654 	mmu_notifier_release(mm);
1655 
1656 	mmap_read_lock(mm);
1657 	arch_exit_mmap(mm);
1658 
1659 	vma = vma_next(&vmi);
1660 	if (!vma || unlikely(xa_is_zero(vma))) {
1661 		/* Can happen if dup_mmap() received an OOM */
1662 		mmap_read_unlock(mm);
1663 		mmap_write_lock(mm);
1664 		goto destroy;
1665 	}
1666 
1667 	lru_add_drain();
1668 	flush_cache_mm(mm);
1669 	tlb_gather_mmu_fullmm(&tlb, mm);
1670 	/* update_hiwater_rss(mm) here? but nobody should be looking */
1671 	/* Use ULONG_MAX here to ensure all VMAs in the mm are unmapped */
1672 	unmap_vmas(&tlb, &vmi.mas, vma, 0, ULONG_MAX, ULONG_MAX, false);
1673 	mmap_read_unlock(mm);
1674 
1675 	/*
1676 	 * Set MMF_OOM_SKIP to hide this task from the oom killer/reaper
1677 	 * because the memory has been already freed.
1678 	 */
1679 	set_bit(MMF_OOM_SKIP, &mm->flags);
1680 	mmap_write_lock(mm);
1681 	mt_clear_in_rcu(&mm->mm_mt);
1682 	vma_iter_set(&vmi, vma->vm_end);
1683 	free_pgtables(&tlb, &vmi.mas, vma, FIRST_USER_ADDRESS,
1684 		      USER_PGTABLES_CEILING, true);
1685 	tlb_finish_mmu(&tlb);
1686 
1687 	/*
1688 	 * Walk the list again, actually closing and freeing it, with preemption
1689 	 * enabled, without holding any MM locks besides the unreachable
1690 	 * mmap_write_lock.
1691 	 */
1692 	vma_iter_set(&vmi, vma->vm_end);
1693 	do {
1694 		if (vma->vm_flags & VM_ACCOUNT)
1695 			nr_accounted += vma_pages(vma);
1696 		remove_vma(vma, /* unreachable = */ true);
1697 		count++;
1698 		cond_resched();
1699 		vma = vma_next(&vmi);
1700 	} while (vma && likely(!xa_is_zero(vma)));
1701 
1702 	BUG_ON(count != mm->map_count);
1703 
1704 	trace_exit_mmap(mm);
1705 destroy:
1706 	__mt_destroy(&mm->mm_mt);
1707 	mmap_write_unlock(mm);
1708 	vm_unacct_memory(nr_accounted);
1709 }
1710 
1711 /* Insert vm structure into process list sorted by address
1712  * and into the inode's i_mmap tree.  If vm_file is non-NULL
1713  * then i_mmap_rwsem is taken here.
1714  */
1715 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
1716 {
1717 	unsigned long charged = vma_pages(vma);
1718 
1719 
1720 	if (find_vma_intersection(mm, vma->vm_start, vma->vm_end))
1721 		return -ENOMEM;
1722 
1723 	if ((vma->vm_flags & VM_ACCOUNT) &&
1724 	     security_vm_enough_memory_mm(mm, charged))
1725 		return -ENOMEM;
1726 
1727 	/*
1728 	 * The vm_pgoff of a purely anonymous vma should be irrelevant
1729 	 * until its first write fault, when page's anon_vma and index
1730 	 * are set.  But now set the vm_pgoff it will almost certainly
1731 	 * end up with (unless mremap moves it elsewhere before that
1732 	 * first wfault), so /proc/pid/maps tells a consistent story.
1733 	 *
1734 	 * By setting it to reflect the virtual start address of the
1735 	 * vma, merges and splits can happen in a seamless way, just
1736 	 * using the existing file pgoff checks and manipulations.
1737 	 * Similarly in do_mmap and in do_brk_flags.
1738 	 */
1739 	if (vma_is_anonymous(vma)) {
1740 		BUG_ON(vma->anon_vma);
1741 		vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
1742 	}
1743 
1744 	if (vma_link(mm, vma)) {
1745 		if (vma->vm_flags & VM_ACCOUNT)
1746 			vm_unacct_memory(charged);
1747 		return -ENOMEM;
1748 	}
1749 
1750 	return 0;
1751 }
1752 
1753 /*
1754  * Return true if the calling process may expand its vm space by the passed
1755  * number of pages
1756  */
1757 bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages)
1758 {
1759 	if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT)
1760 		return false;
1761 
1762 	if (is_data_mapping(flags) &&
1763 	    mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) {
1764 		/* Workaround for Valgrind */
1765 		if (rlimit(RLIMIT_DATA) == 0 &&
1766 		    mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT)
1767 			return true;
1768 
1769 		pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits%s.\n",
1770 			     current->comm, current->pid,
1771 			     (mm->data_vm + npages) << PAGE_SHIFT,
1772 			     rlimit(RLIMIT_DATA),
1773 			     ignore_rlimit_data ? "" : " or use boot option ignore_rlimit_data");
1774 
1775 		if (!ignore_rlimit_data)
1776 			return false;
1777 	}
1778 
1779 	return true;
1780 }
1781 
1782 void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages)
1783 {
1784 	WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages);
1785 
1786 	if (is_exec_mapping(flags))
1787 		mm->exec_vm += npages;
1788 	else if (is_stack_mapping(flags))
1789 		mm->stack_vm += npages;
1790 	else if (is_data_mapping(flags))
1791 		mm->data_vm += npages;
1792 }
1793 
1794 static vm_fault_t special_mapping_fault(struct vm_fault *vmf);
1795 
1796 /*
1797  * Close hook, called for unmap() and on the old vma for mremap().
1798  *
1799  * Having a close hook prevents vma merging regardless of flags.
1800  */
1801 static void special_mapping_close(struct vm_area_struct *vma)
1802 {
1803 	const struct vm_special_mapping *sm = vma->vm_private_data;
1804 
1805 	if (sm->close)
1806 		sm->close(sm, vma);
1807 }
1808 
1809 static const char *special_mapping_name(struct vm_area_struct *vma)
1810 {
1811 	return ((struct vm_special_mapping *)vma->vm_private_data)->name;
1812 }
1813 
1814 static int special_mapping_mremap(struct vm_area_struct *new_vma)
1815 {
1816 	struct vm_special_mapping *sm = new_vma->vm_private_data;
1817 
1818 	if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
1819 		return -EFAULT;
1820 
1821 	if (sm->mremap)
1822 		return sm->mremap(sm, new_vma);
1823 
1824 	return 0;
1825 }
1826 
1827 static int special_mapping_split(struct vm_area_struct *vma, unsigned long addr)
1828 {
1829 	/*
1830 	 * Forbid splitting special mappings - kernel has expectations over
1831 	 * the number of pages in mapping. Together with VM_DONTEXPAND
1832 	 * the size of vma should stay the same over the special mapping's
1833 	 * lifetime.
1834 	 */
1835 	return -EINVAL;
1836 }
1837 
1838 static const struct vm_operations_struct special_mapping_vmops = {
1839 	.close = special_mapping_close,
1840 	.fault = special_mapping_fault,
1841 	.mremap = special_mapping_mremap,
1842 	.name = special_mapping_name,
1843 	/* vDSO code relies that VVAR can't be accessed remotely */
1844 	.access = NULL,
1845 	.may_split = special_mapping_split,
1846 };
1847 
1848 static vm_fault_t special_mapping_fault(struct vm_fault *vmf)
1849 {
1850 	struct vm_area_struct *vma = vmf->vma;
1851 	pgoff_t pgoff;
1852 	struct page **pages;
1853 	struct vm_special_mapping *sm = vma->vm_private_data;
1854 
1855 	if (sm->fault)
1856 		return sm->fault(sm, vmf->vma, vmf);
1857 
1858 	pages = sm->pages;
1859 
1860 	for (pgoff = vmf->pgoff; pgoff && *pages; ++pages)
1861 		pgoff--;
1862 
1863 	if (*pages) {
1864 		struct page *page = *pages;
1865 		get_page(page);
1866 		vmf->page = page;
1867 		return 0;
1868 	}
1869 
1870 	return VM_FAULT_SIGBUS;
1871 }
1872 
1873 static struct vm_area_struct *__install_special_mapping(
1874 	struct mm_struct *mm,
1875 	unsigned long addr, unsigned long len,
1876 	unsigned long vm_flags, void *priv,
1877 	const struct vm_operations_struct *ops)
1878 {
1879 	int ret;
1880 	struct vm_area_struct *vma;
1881 
1882 	vma = vm_area_alloc(mm);
1883 	if (unlikely(vma == NULL))
1884 		return ERR_PTR(-ENOMEM);
1885 
1886 	vma_set_range(vma, addr, addr + len, 0);
1887 	vm_flags_init(vma, (vm_flags | mm->def_flags |
1888 		      VM_DONTEXPAND | VM_SOFTDIRTY) & ~VM_LOCKED_MASK);
1889 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1890 
1891 	vma->vm_ops = ops;
1892 	vma->vm_private_data = priv;
1893 
1894 	ret = insert_vm_struct(mm, vma);
1895 	if (ret)
1896 		goto out;
1897 
1898 	vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT);
1899 
1900 	perf_event_mmap(vma);
1901 
1902 	return vma;
1903 
1904 out:
1905 	vm_area_free(vma);
1906 	return ERR_PTR(ret);
1907 }
1908 
1909 bool vma_is_special_mapping(const struct vm_area_struct *vma,
1910 	const struct vm_special_mapping *sm)
1911 {
1912 	return vma->vm_private_data == sm &&
1913 		vma->vm_ops == &special_mapping_vmops;
1914 }
1915 
1916 /*
1917  * Called with mm->mmap_lock held for writing.
1918  * Insert a new vma covering the given region, with the given flags.
1919  * Its pages are supplied by the given array of struct page *.
1920  * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated.
1921  * The region past the last page supplied will always produce SIGBUS.
1922  * The array pointer and the pages it points to are assumed to stay alive
1923  * for as long as this mapping might exist.
1924  */
1925 struct vm_area_struct *_install_special_mapping(
1926 	struct mm_struct *mm,
1927 	unsigned long addr, unsigned long len,
1928 	unsigned long vm_flags, const struct vm_special_mapping *spec)
1929 {
1930 	return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec,
1931 					&special_mapping_vmops);
1932 }
1933 
1934 /*
1935  * initialise the percpu counter for VM
1936  */
1937 void __init mmap_init(void)
1938 {
1939 	int ret;
1940 
1941 	ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
1942 	VM_BUG_ON(ret);
1943 }
1944 
1945 /*
1946  * Initialise sysctl_user_reserve_kbytes.
1947  *
1948  * This is intended to prevent a user from starting a single memory hogging
1949  * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
1950  * mode.
1951  *
1952  * The default value is min(3% of free memory, 128MB)
1953  * 128MB is enough to recover with sshd/login, bash, and top/kill.
1954  */
1955 static int init_user_reserve(void)
1956 {
1957 	unsigned long free_kbytes;
1958 
1959 	free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
1960 
1961 	sysctl_user_reserve_kbytes = min(free_kbytes / 32, SZ_128K);
1962 	return 0;
1963 }
1964 subsys_initcall(init_user_reserve);
1965 
1966 /*
1967  * Initialise sysctl_admin_reserve_kbytes.
1968  *
1969  * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
1970  * to log in and kill a memory hogging process.
1971  *
1972  * Systems with more than 256MB will reserve 8MB, enough to recover
1973  * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
1974  * only reserve 3% of free pages by default.
1975  */
1976 static int init_admin_reserve(void)
1977 {
1978 	unsigned long free_kbytes;
1979 
1980 	free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
1981 
1982 	sysctl_admin_reserve_kbytes = min(free_kbytes / 32, SZ_8K);
1983 	return 0;
1984 }
1985 subsys_initcall(init_admin_reserve);
1986 
1987 /*
1988  * Reinititalise user and admin reserves if memory is added or removed.
1989  *
1990  * The default user reserve max is 128MB, and the default max for the
1991  * admin reserve is 8MB. These are usually, but not always, enough to
1992  * enable recovery from a memory hogging process using login/sshd, a shell,
1993  * and tools like top. It may make sense to increase or even disable the
1994  * reserve depending on the existence of swap or variations in the recovery
1995  * tools. So, the admin may have changed them.
1996  *
1997  * If memory is added and the reserves have been eliminated or increased above
1998  * the default max, then we'll trust the admin.
1999  *
2000  * If memory is removed and there isn't enough free memory, then we
2001  * need to reset the reserves.
2002  *
2003  * Otherwise keep the reserve set by the admin.
2004  */
2005 static int reserve_mem_notifier(struct notifier_block *nb,
2006 			     unsigned long action, void *data)
2007 {
2008 	unsigned long tmp, free_kbytes;
2009 
2010 	switch (action) {
2011 	case MEM_ONLINE:
2012 		/* Default max is 128MB. Leave alone if modified by operator. */
2013 		tmp = sysctl_user_reserve_kbytes;
2014 		if (tmp > 0 && tmp < SZ_128K)
2015 			init_user_reserve();
2016 
2017 		/* Default max is 8MB.  Leave alone if modified by operator. */
2018 		tmp = sysctl_admin_reserve_kbytes;
2019 		if (tmp > 0 && tmp < SZ_8K)
2020 			init_admin_reserve();
2021 
2022 		break;
2023 	case MEM_OFFLINE:
2024 		free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
2025 
2026 		if (sysctl_user_reserve_kbytes > free_kbytes) {
2027 			init_user_reserve();
2028 			pr_info("vm.user_reserve_kbytes reset to %lu\n",
2029 				sysctl_user_reserve_kbytes);
2030 		}
2031 
2032 		if (sysctl_admin_reserve_kbytes > free_kbytes) {
2033 			init_admin_reserve();
2034 			pr_info("vm.admin_reserve_kbytes reset to %lu\n",
2035 				sysctl_admin_reserve_kbytes);
2036 		}
2037 		break;
2038 	default:
2039 		break;
2040 	}
2041 	return NOTIFY_OK;
2042 }
2043 
2044 static int __meminit init_reserve_notifier(void)
2045 {
2046 	if (hotplug_memory_notifier(reserve_mem_notifier, DEFAULT_CALLBACK_PRI))
2047 		pr_err("Failed registering memory add/remove notifier for admin reserve\n");
2048 
2049 	return 0;
2050 }
2051 subsys_initcall(init_reserve_notifier);
2052 
2053 /*
2054  * Relocate a VMA downwards by shift bytes. There cannot be any VMAs between
2055  * this VMA and its relocated range, which will now reside at [vma->vm_start -
2056  * shift, vma->vm_end - shift).
2057  *
2058  * This function is almost certainly NOT what you want for anything other than
2059  * early executable temporary stack relocation.
2060  */
2061 int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift)
2062 {
2063 	/*
2064 	 * The process proceeds as follows:
2065 	 *
2066 	 * 1) Use shift to calculate the new vma endpoints.
2067 	 * 2) Extend vma to cover both the old and new ranges.  This ensures the
2068 	 *    arguments passed to subsequent functions are consistent.
2069 	 * 3) Move vma's page tables to the new range.
2070 	 * 4) Free up any cleared pgd range.
2071 	 * 5) Shrink the vma to cover only the new range.
2072 	 */
2073 
2074 	struct mm_struct *mm = vma->vm_mm;
2075 	unsigned long old_start = vma->vm_start;
2076 	unsigned long old_end = vma->vm_end;
2077 	unsigned long length = old_end - old_start;
2078 	unsigned long new_start = old_start - shift;
2079 	unsigned long new_end = old_end - shift;
2080 	VMA_ITERATOR(vmi, mm, new_start);
2081 	VMG_STATE(vmg, mm, &vmi, new_start, old_end, 0, vma->vm_pgoff);
2082 	struct vm_area_struct *next;
2083 	struct mmu_gather tlb;
2084 
2085 	BUG_ON(new_start > new_end);
2086 
2087 	/*
2088 	 * ensure there are no vmas between where we want to go
2089 	 * and where we are
2090 	 */
2091 	if (vma != vma_next(&vmi))
2092 		return -EFAULT;
2093 
2094 	vma_iter_prev_range(&vmi);
2095 	/*
2096 	 * cover the whole range: [new_start, old_end)
2097 	 */
2098 	vmg.vma = vma;
2099 	if (vma_expand(&vmg))
2100 		return -ENOMEM;
2101 
2102 	/*
2103 	 * move the page tables downwards, on failure we rely on
2104 	 * process cleanup to remove whatever mess we made.
2105 	 */
2106 	if (length != move_page_tables(vma, old_start,
2107 				       vma, new_start, length, false, true))
2108 		return -ENOMEM;
2109 
2110 	lru_add_drain();
2111 	tlb_gather_mmu(&tlb, mm);
2112 	next = vma_next(&vmi);
2113 	if (new_end > old_start) {
2114 		/*
2115 		 * when the old and new regions overlap clear from new_end.
2116 		 */
2117 		free_pgd_range(&tlb, new_end, old_end, new_end,
2118 			next ? next->vm_start : USER_PGTABLES_CEILING);
2119 	} else {
2120 		/*
2121 		 * otherwise, clean from old_start; this is done to not touch
2122 		 * the address space in [new_end, old_start) some architectures
2123 		 * have constraints on va-space that make this illegal (IA64) -
2124 		 * for the others its just a little faster.
2125 		 */
2126 		free_pgd_range(&tlb, old_start, old_end, new_end,
2127 			next ? next->vm_start : USER_PGTABLES_CEILING);
2128 	}
2129 	tlb_finish_mmu(&tlb);
2130 
2131 	vma_prev(&vmi);
2132 	/* Shrink the vma to just the new range */
2133 	return vma_shrink(&vmi, vma, new_start, new_end, vma->vm_pgoff);
2134 }
2135