xref: /linux/mm/nommu.c (revision 7b667acd69e316c2ed1b47e5dcd9d093be4a843f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/mm/nommu.c
4  *
5  *  Replacement code for mm functions to support CPU's that don't
6  *  have any form of memory management unit (thus no virtual memory).
7  *
8  *  See Documentation/admin-guide/mm/nommu-mmap.rst
9  *
10  *  Copyright (c) 2004-2008 David Howells <dhowells@redhat.com>
11  *  Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
12  *  Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
13  *  Copyright (c) 2002      Greg Ungerer <gerg@snapgear.com>
14  *  Copyright (c) 2007-2010 Paul Mundt <lethal@linux-sh.org>
15  */
16 
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 
19 #include <linux/export.h>
20 #include <linux/mm.h>
21 #include <linux/sched/mm.h>
22 #include <linux/mman.h>
23 #include <linux/swap.h>
24 #include <linux/file.h>
25 #include <linux/highmem.h>
26 #include <linux/pagemap.h>
27 #include <linux/slab.h>
28 #include <linux/vmalloc.h>
29 #include <linux/backing-dev.h>
30 #include <linux/compiler.h>
31 #include <linux/mount.h>
32 #include <linux/personality.h>
33 #include <linux/security.h>
34 #include <linux/syscalls.h>
35 #include <linux/audit.h>
36 #include <linux/printk.h>
37 
38 #include <linux/uaccess.h>
39 #include <linux/uio.h>
40 #include <asm/tlb.h>
41 #include <asm/tlbflush.h>
42 #include <asm/mmu_context.h>
43 #include "internal.h"
44 
45 void *high_memory;
46 EXPORT_SYMBOL(high_memory);
47 struct page *mem_map;
48 unsigned long max_mapnr;
49 EXPORT_SYMBOL(max_mapnr);
50 unsigned long highest_memmap_pfn;
51 int heap_stack_gap = 0;
52 
53 atomic_long_t mmap_pages_allocated;
54 
55 EXPORT_SYMBOL(mem_map);
56 
57 /* list of mapped, potentially shareable regions */
58 static struct kmem_cache *vm_region_jar;
59 struct rb_root nommu_region_tree = RB_ROOT;
60 DECLARE_RWSEM(nommu_region_sem);
61 
62 const struct vm_operations_struct generic_file_vm_ops = {
63 };
64 
65 /*
66  * Return the total memory allocated for this pointer, not
67  * just what the caller asked for.
68  *
69  * Doesn't have to be accurate, i.e. may have races.
70  */
71 unsigned int kobjsize(const void *objp)
72 {
73 	struct page *page;
74 
75 	/*
76 	 * If the object we have should not have ksize performed on it,
77 	 * return size of 0
78 	 */
79 	if (!objp || !virt_addr_valid(objp))
80 		return 0;
81 
82 	page = virt_to_head_page(objp);
83 
84 	/*
85 	 * If the allocator sets PageSlab, we know the pointer came from
86 	 * kmalloc().
87 	 */
88 	if (PageSlab(page))
89 		return ksize(objp);
90 
91 	/*
92 	 * If it's not a compound page, see if we have a matching VMA
93 	 * region. This test is intentionally done in reverse order,
94 	 * so if there's no VMA, we still fall through and hand back
95 	 * PAGE_SIZE for 0-order pages.
96 	 */
97 	if (!PageCompound(page)) {
98 		struct vm_area_struct *vma;
99 
100 		vma = find_vma(current->mm, (unsigned long)objp);
101 		if (vma)
102 			return vma->vm_end - vma->vm_start;
103 	}
104 
105 	/*
106 	 * The ksize() function is only guaranteed to work for pointers
107 	 * returned by kmalloc(). So handle arbitrary pointers here.
108 	 */
109 	return page_size(page);
110 }
111 
112 void vfree(const void *addr)
113 {
114 	kfree(addr);
115 }
116 EXPORT_SYMBOL(vfree);
117 
118 void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask)
119 {
120 	/*
121 	 *  You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc()
122 	 * returns only a logical address.
123 	 */
124 	return kmalloc_noprof(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
125 }
126 EXPORT_SYMBOL(__vmalloc_noprof);
127 
128 void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
129 {
130 	return krealloc_noprof(p, size, (flags | __GFP_COMP) & ~__GFP_HIGHMEM);
131 }
132 
133 void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align,
134 		unsigned long start, unsigned long end, gfp_t gfp_mask,
135 		pgprot_t prot, unsigned long vm_flags, int node,
136 		const void *caller)
137 {
138 	return __vmalloc_noprof(size, gfp_mask);
139 }
140 
141 void *__vmalloc_node_noprof(unsigned long size, unsigned long align, gfp_t gfp_mask,
142 		int node, const void *caller)
143 {
144 	return __vmalloc_noprof(size, gfp_mask);
145 }
146 
147 static void *__vmalloc_user_flags(unsigned long size, gfp_t flags)
148 {
149 	void *ret;
150 
151 	ret = __vmalloc(size, flags);
152 	if (ret) {
153 		struct vm_area_struct *vma;
154 
155 		mmap_write_lock(current->mm);
156 		vma = find_vma(current->mm, (unsigned long)ret);
157 		if (vma)
158 			vm_flags_set(vma, VM_USERMAP);
159 		mmap_write_unlock(current->mm);
160 	}
161 
162 	return ret;
163 }
164 
165 void *vmalloc_user_noprof(unsigned long size)
166 {
167 	return __vmalloc_user_flags(size, GFP_KERNEL | __GFP_ZERO);
168 }
169 EXPORT_SYMBOL(vmalloc_user_noprof);
170 
171 struct page *vmalloc_to_page(const void *addr)
172 {
173 	return virt_to_page(addr);
174 }
175 EXPORT_SYMBOL(vmalloc_to_page);
176 
177 unsigned long vmalloc_to_pfn(const void *addr)
178 {
179 	return page_to_pfn(virt_to_page(addr));
180 }
181 EXPORT_SYMBOL(vmalloc_to_pfn);
182 
183 long vread_iter(struct iov_iter *iter, const char *addr, size_t count)
184 {
185 	/* Don't allow overflow */
186 	if ((unsigned long) addr + count < count)
187 		count = -(unsigned long) addr;
188 
189 	return copy_to_iter(addr, count, iter);
190 }
191 
192 /*
193  *	vmalloc  -  allocate virtually contiguous memory
194  *
195  *	@size:		allocation size
196  *
197  *	Allocate enough pages to cover @size from the page level
198  *	allocator and map them into contiguous kernel virtual space.
199  *
200  *	For tight control over page level allocator and protection flags
201  *	use __vmalloc() instead.
202  */
203 void *vmalloc_noprof(unsigned long size)
204 {
205 	return __vmalloc_noprof(size, GFP_KERNEL);
206 }
207 EXPORT_SYMBOL(vmalloc_noprof);
208 
209 void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask) __weak __alias(__vmalloc_noprof);
210 
211 /*
212  *	vzalloc - allocate virtually contiguous memory with zero fill
213  *
214  *	@size:		allocation size
215  *
216  *	Allocate enough pages to cover @size from the page level
217  *	allocator and map them into contiguous kernel virtual space.
218  *	The memory allocated is set to zero.
219  *
220  *	For tight control over page level allocator and protection flags
221  *	use __vmalloc() instead.
222  */
223 void *vzalloc_noprof(unsigned long size)
224 {
225 	return __vmalloc_noprof(size, GFP_KERNEL | __GFP_ZERO);
226 }
227 EXPORT_SYMBOL(vzalloc_noprof);
228 
229 /**
230  * vmalloc_node - allocate memory on a specific node
231  * @size:	allocation size
232  * @node:	numa node
233  *
234  * Allocate enough pages to cover @size from the page level
235  * allocator and map them into contiguous kernel virtual space.
236  *
237  * For tight control over page level allocator and protection flags
238  * use __vmalloc() instead.
239  */
240 void *vmalloc_node_noprof(unsigned long size, int node)
241 {
242 	return vmalloc_noprof(size);
243 }
244 EXPORT_SYMBOL(vmalloc_node_noprof);
245 
246 /**
247  * vzalloc_node - allocate memory on a specific node with zero fill
248  * @size:	allocation size
249  * @node:	numa node
250  *
251  * Allocate enough pages to cover @size from the page level
252  * allocator and map them into contiguous kernel virtual space.
253  * The memory allocated is set to zero.
254  *
255  * For tight control over page level allocator and protection flags
256  * use __vmalloc() instead.
257  */
258 void *vzalloc_node_noprof(unsigned long size, int node)
259 {
260 	return vzalloc_noprof(size);
261 }
262 EXPORT_SYMBOL(vzalloc_node_noprof);
263 
264 /**
265  * vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
266  *	@size:		allocation size
267  *
268  *	Allocate enough 32bit PA addressable pages to cover @size from the
269  *	page level allocator and map them into contiguous kernel virtual space.
270  */
271 void *vmalloc_32_noprof(unsigned long size)
272 {
273 	return __vmalloc_noprof(size, GFP_KERNEL);
274 }
275 EXPORT_SYMBOL(vmalloc_32_noprof);
276 
277 /**
278  * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
279  *	@size:		allocation size
280  *
281  * The resulting memory area is 32bit addressable and zeroed so it can be
282  * mapped to userspace without leaking data.
283  *
284  * VM_USERMAP is set on the corresponding VMA so that subsequent calls to
285  * remap_vmalloc_range() are permissible.
286  */
287 void *vmalloc_32_user_noprof(unsigned long size)
288 {
289 	/*
290 	 * We'll have to sort out the ZONE_DMA bits for 64-bit,
291 	 * but for now this can simply use vmalloc_user() directly.
292 	 */
293 	return vmalloc_user_noprof(size);
294 }
295 EXPORT_SYMBOL(vmalloc_32_user_noprof);
296 
297 void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
298 {
299 	BUG();
300 	return NULL;
301 }
302 EXPORT_SYMBOL(vmap);
303 
304 void vunmap(const void *addr)
305 {
306 	BUG();
307 }
308 EXPORT_SYMBOL(vunmap);
309 
310 void *vm_map_ram(struct page **pages, unsigned int count, int node)
311 {
312 	BUG();
313 	return NULL;
314 }
315 EXPORT_SYMBOL(vm_map_ram);
316 
317 void vm_unmap_ram(const void *mem, unsigned int count)
318 {
319 	BUG();
320 }
321 EXPORT_SYMBOL(vm_unmap_ram);
322 
323 void vm_unmap_aliases(void)
324 {
325 }
326 EXPORT_SYMBOL_GPL(vm_unmap_aliases);
327 
328 void free_vm_area(struct vm_struct *area)
329 {
330 	BUG();
331 }
332 EXPORT_SYMBOL_GPL(free_vm_area);
333 
334 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
335 		   struct page *page)
336 {
337 	return -EINVAL;
338 }
339 EXPORT_SYMBOL(vm_insert_page);
340 
341 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
342 			struct page **pages, unsigned long *num)
343 {
344 	return -EINVAL;
345 }
346 EXPORT_SYMBOL(vm_insert_pages);
347 
348 int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
349 			unsigned long num)
350 {
351 	return -EINVAL;
352 }
353 EXPORT_SYMBOL(vm_map_pages);
354 
355 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
356 				unsigned long num)
357 {
358 	return -EINVAL;
359 }
360 EXPORT_SYMBOL(vm_map_pages_zero);
361 
362 /*
363  *  sys_brk() for the most part doesn't need the global kernel
364  *  lock, except when an application is doing something nasty
365  *  like trying to un-brk an area that has already been mapped
366  *  to a regular file.  in this case, the unmapping will need
367  *  to invoke file system routines that need the global lock.
368  */
369 SYSCALL_DEFINE1(brk, unsigned long, brk)
370 {
371 	struct mm_struct *mm = current->mm;
372 
373 	if (brk < mm->start_brk || brk > mm->context.end_brk)
374 		return mm->brk;
375 
376 	if (mm->brk == brk)
377 		return mm->brk;
378 
379 	/*
380 	 * Always allow shrinking brk
381 	 */
382 	if (brk <= mm->brk) {
383 		mm->brk = brk;
384 		return brk;
385 	}
386 
387 	/*
388 	 * Ok, looks good - let it rip.
389 	 */
390 	flush_icache_user_range(mm->brk, brk);
391 	return mm->brk = brk;
392 }
393 
394 static int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
395 
396 static const struct ctl_table nommu_table[] = {
397 	{
398 		.procname	= "nr_trim_pages",
399 		.data		= &sysctl_nr_trim_pages,
400 		.maxlen		= sizeof(sysctl_nr_trim_pages),
401 		.mode		= 0644,
402 		.proc_handler	= proc_dointvec_minmax,
403 		.extra1		= SYSCTL_ZERO,
404 	},
405 };
406 
407 /*
408  * initialise the percpu counter for VM and region record slabs
409  */
410 void __init mmap_init(void)
411 {
412 	int ret;
413 
414 	ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
415 	VM_BUG_ON(ret);
416 	vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC|SLAB_ACCOUNT);
417 	register_sysctl_init("vm", nommu_table);
418 }
419 
420 /*
421  * validate the region tree
422  * - the caller must hold the region lock
423  */
424 #ifdef CONFIG_DEBUG_NOMMU_REGIONS
425 static noinline void validate_nommu_regions(void)
426 {
427 	struct vm_region *region, *last;
428 	struct rb_node *p, *lastp;
429 
430 	lastp = rb_first(&nommu_region_tree);
431 	if (!lastp)
432 		return;
433 
434 	last = rb_entry(lastp, struct vm_region, vm_rb);
435 	BUG_ON(last->vm_end <= last->vm_start);
436 	BUG_ON(last->vm_top < last->vm_end);
437 
438 	while ((p = rb_next(lastp))) {
439 		region = rb_entry(p, struct vm_region, vm_rb);
440 		last = rb_entry(lastp, struct vm_region, vm_rb);
441 
442 		BUG_ON(region->vm_end <= region->vm_start);
443 		BUG_ON(region->vm_top < region->vm_end);
444 		BUG_ON(region->vm_start < last->vm_top);
445 
446 		lastp = p;
447 	}
448 }
449 #else
450 static void validate_nommu_regions(void)
451 {
452 }
453 #endif
454 
455 /*
456  * add a region into the global tree
457  */
458 static void add_nommu_region(struct vm_region *region)
459 {
460 	struct vm_region *pregion;
461 	struct rb_node **p, *parent;
462 
463 	validate_nommu_regions();
464 
465 	parent = NULL;
466 	p = &nommu_region_tree.rb_node;
467 	while (*p) {
468 		parent = *p;
469 		pregion = rb_entry(parent, struct vm_region, vm_rb);
470 		if (region->vm_start < pregion->vm_start)
471 			p = &(*p)->rb_left;
472 		else if (region->vm_start > pregion->vm_start)
473 			p = &(*p)->rb_right;
474 		else if (pregion == region)
475 			return;
476 		else
477 			BUG();
478 	}
479 
480 	rb_link_node(&region->vm_rb, parent, p);
481 	rb_insert_color(&region->vm_rb, &nommu_region_tree);
482 
483 	validate_nommu_regions();
484 }
485 
486 /*
487  * delete a region from the global tree
488  */
489 static void delete_nommu_region(struct vm_region *region)
490 {
491 	BUG_ON(!nommu_region_tree.rb_node);
492 
493 	validate_nommu_regions();
494 	rb_erase(&region->vm_rb, &nommu_region_tree);
495 	validate_nommu_regions();
496 }
497 
498 /*
499  * free a contiguous series of pages
500  */
501 static void free_page_series(unsigned long from, unsigned long to)
502 {
503 	for (; from < to; from += PAGE_SIZE) {
504 		struct page *page = virt_to_page((void *)from);
505 
506 		atomic_long_dec(&mmap_pages_allocated);
507 		put_page(page);
508 	}
509 }
510 
511 /*
512  * release a reference to a region
513  * - the caller must hold the region semaphore for writing, which this releases
514  * - the region may not have been added to the tree yet, in which case vm_top
515  *   will equal vm_start
516  */
517 static void __put_nommu_region(struct vm_region *region)
518 	__releases(nommu_region_sem)
519 {
520 	BUG_ON(!nommu_region_tree.rb_node);
521 
522 	if (--region->vm_usage == 0) {
523 		if (region->vm_top > region->vm_start)
524 			delete_nommu_region(region);
525 		up_write(&nommu_region_sem);
526 
527 		if (region->vm_file)
528 			fput(region->vm_file);
529 
530 		/* IO memory and memory shared directly out of the pagecache
531 		 * from ramfs/tmpfs mustn't be released here */
532 		if (region->vm_flags & VM_MAPPED_COPY)
533 			free_page_series(region->vm_start, region->vm_top);
534 		kmem_cache_free(vm_region_jar, region);
535 	} else {
536 		up_write(&nommu_region_sem);
537 	}
538 }
539 
540 /*
541  * release a reference to a region
542  */
543 static void put_nommu_region(struct vm_region *region)
544 {
545 	down_write(&nommu_region_sem);
546 	__put_nommu_region(region);
547 }
548 
549 static void setup_vma_to_mm(struct vm_area_struct *vma, struct mm_struct *mm)
550 {
551 	vma->vm_mm = mm;
552 
553 	/* add the VMA to the mapping */
554 	if (vma->vm_file) {
555 		struct address_space *mapping = vma->vm_file->f_mapping;
556 
557 		i_mmap_lock_write(mapping);
558 		flush_dcache_mmap_lock(mapping);
559 		vma_interval_tree_insert(vma, &mapping->i_mmap);
560 		flush_dcache_mmap_unlock(mapping);
561 		i_mmap_unlock_write(mapping);
562 	}
563 }
564 
565 static void cleanup_vma_from_mm(struct vm_area_struct *vma)
566 {
567 	vma->vm_mm->map_count--;
568 	/* remove the VMA from the mapping */
569 	if (vma->vm_file) {
570 		struct address_space *mapping;
571 		mapping = vma->vm_file->f_mapping;
572 
573 		i_mmap_lock_write(mapping);
574 		flush_dcache_mmap_lock(mapping);
575 		vma_interval_tree_remove(vma, &mapping->i_mmap);
576 		flush_dcache_mmap_unlock(mapping);
577 		i_mmap_unlock_write(mapping);
578 	}
579 }
580 
581 /*
582  * delete a VMA from its owning mm_struct and address space
583  */
584 static int delete_vma_from_mm(struct vm_area_struct *vma)
585 {
586 	VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_start);
587 
588 	vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
589 	if (vma_iter_prealloc(&vmi, NULL)) {
590 		pr_warn("Allocation of vma tree for process %d failed\n",
591 		       current->pid);
592 		return -ENOMEM;
593 	}
594 	cleanup_vma_from_mm(vma);
595 
596 	/* remove from the MM's tree and list */
597 	vma_iter_clear(&vmi);
598 	return 0;
599 }
600 /*
601  * destroy a VMA record
602  */
603 static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
604 {
605 	vma_close(vma);
606 	if (vma->vm_file)
607 		fput(vma->vm_file);
608 	put_nommu_region(vma->vm_region);
609 	vm_area_free(vma);
610 }
611 
612 struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
613 					     unsigned long start_addr,
614 					     unsigned long end_addr)
615 {
616 	unsigned long index = start_addr;
617 
618 	mmap_assert_locked(mm);
619 	return mt_find(&mm->mm_mt, &index, end_addr - 1);
620 }
621 EXPORT_SYMBOL(find_vma_intersection);
622 
623 /*
624  * look up the first VMA in which addr resides, NULL if none
625  * - should be called with mm->mmap_lock at least held readlocked
626  */
627 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
628 {
629 	VMA_ITERATOR(vmi, mm, addr);
630 
631 	return vma_iter_load(&vmi);
632 }
633 EXPORT_SYMBOL(find_vma);
634 
635 /*
636  * At least xtensa ends up having protection faults even with no
637  * MMU.. No stack expansion, at least.
638  */
639 struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
640 			unsigned long addr, struct pt_regs *regs)
641 {
642 	struct vm_area_struct *vma;
643 
644 	mmap_read_lock(mm);
645 	vma = vma_lookup(mm, addr);
646 	if (!vma)
647 		mmap_read_unlock(mm);
648 	return vma;
649 }
650 
651 /*
652  * expand a stack to a given address
653  * - not supported under NOMMU conditions
654  */
655 int expand_stack_locked(struct vm_area_struct *vma, unsigned long addr)
656 {
657 	return -ENOMEM;
658 }
659 
660 struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr)
661 {
662 	mmap_read_unlock(mm);
663 	return NULL;
664 }
665 
666 /*
667  * look up the first VMA exactly that exactly matches addr
668  * - should be called with mm->mmap_lock at least held readlocked
669  */
670 static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
671 					     unsigned long addr,
672 					     unsigned long len)
673 {
674 	struct vm_area_struct *vma;
675 	unsigned long end = addr + len;
676 	VMA_ITERATOR(vmi, mm, addr);
677 
678 	vma = vma_iter_load(&vmi);
679 	if (!vma)
680 		return NULL;
681 	if (vma->vm_start != addr)
682 		return NULL;
683 	if (vma->vm_end != end)
684 		return NULL;
685 
686 	return vma;
687 }
688 
689 /*
690  * determine whether a mapping should be permitted and, if so, what sort of
691  * mapping we're capable of supporting
692  */
693 static int validate_mmap_request(struct file *file,
694 				 unsigned long addr,
695 				 unsigned long len,
696 				 unsigned long prot,
697 				 unsigned long flags,
698 				 unsigned long pgoff,
699 				 unsigned long *_capabilities)
700 {
701 	unsigned long capabilities, rlen;
702 	int ret;
703 
704 	/* do the simple checks first */
705 	if (flags & MAP_FIXED)
706 		return -EINVAL;
707 
708 	if ((flags & MAP_TYPE) != MAP_PRIVATE &&
709 	    (flags & MAP_TYPE) != MAP_SHARED)
710 		return -EINVAL;
711 
712 	if (!len)
713 		return -EINVAL;
714 
715 	/* Careful about overflows.. */
716 	rlen = PAGE_ALIGN(len);
717 	if (!rlen || rlen > TASK_SIZE)
718 		return -ENOMEM;
719 
720 	/* offset overflow? */
721 	if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff)
722 		return -EOVERFLOW;
723 
724 	if (file) {
725 		/* files must support mmap */
726 		if (!file->f_op->mmap)
727 			return -ENODEV;
728 
729 		/* work out if what we've got could possibly be shared
730 		 * - we support chardevs that provide their own "memory"
731 		 * - we support files/blockdevs that are memory backed
732 		 */
733 		if (file->f_op->mmap_capabilities) {
734 			capabilities = file->f_op->mmap_capabilities(file);
735 		} else {
736 			/* no explicit capabilities set, so assume some
737 			 * defaults */
738 			switch (file_inode(file)->i_mode & S_IFMT) {
739 			case S_IFREG:
740 			case S_IFBLK:
741 				capabilities = NOMMU_MAP_COPY;
742 				break;
743 
744 			case S_IFCHR:
745 				capabilities =
746 					NOMMU_MAP_DIRECT |
747 					NOMMU_MAP_READ |
748 					NOMMU_MAP_WRITE;
749 				break;
750 
751 			default:
752 				return -EINVAL;
753 			}
754 		}
755 
756 		/* eliminate any capabilities that we can't support on this
757 		 * device */
758 		if (!file->f_op->get_unmapped_area)
759 			capabilities &= ~NOMMU_MAP_DIRECT;
760 		if (!(file->f_mode & FMODE_CAN_READ))
761 			capabilities &= ~NOMMU_MAP_COPY;
762 
763 		/* The file shall have been opened with read permission. */
764 		if (!(file->f_mode & FMODE_READ))
765 			return -EACCES;
766 
767 		if (flags & MAP_SHARED) {
768 			/* do checks for writing, appending and locking */
769 			if ((prot & PROT_WRITE) &&
770 			    !(file->f_mode & FMODE_WRITE))
771 				return -EACCES;
772 
773 			if (IS_APPEND(file_inode(file)) &&
774 			    (file->f_mode & FMODE_WRITE))
775 				return -EACCES;
776 
777 			if (!(capabilities & NOMMU_MAP_DIRECT))
778 				return -ENODEV;
779 
780 			/* we mustn't privatise shared mappings */
781 			capabilities &= ~NOMMU_MAP_COPY;
782 		} else {
783 			/* we're going to read the file into private memory we
784 			 * allocate */
785 			if (!(capabilities & NOMMU_MAP_COPY))
786 				return -ENODEV;
787 
788 			/* we don't permit a private writable mapping to be
789 			 * shared with the backing device */
790 			if (prot & PROT_WRITE)
791 				capabilities &= ~NOMMU_MAP_DIRECT;
792 		}
793 
794 		if (capabilities & NOMMU_MAP_DIRECT) {
795 			if (((prot & PROT_READ)  && !(capabilities & NOMMU_MAP_READ))  ||
796 			    ((prot & PROT_WRITE) && !(capabilities & NOMMU_MAP_WRITE)) ||
797 			    ((prot & PROT_EXEC)  && !(capabilities & NOMMU_MAP_EXEC))
798 			    ) {
799 				capabilities &= ~NOMMU_MAP_DIRECT;
800 				if (flags & MAP_SHARED) {
801 					pr_warn("MAP_SHARED not completely supported on !MMU\n");
802 					return -EINVAL;
803 				}
804 			}
805 		}
806 
807 		/* handle executable mappings and implied executable
808 		 * mappings */
809 		if (path_noexec(&file->f_path)) {
810 			if (prot & PROT_EXEC)
811 				return -EPERM;
812 		} else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
813 			/* handle implication of PROT_EXEC by PROT_READ */
814 			if (current->personality & READ_IMPLIES_EXEC) {
815 				if (capabilities & NOMMU_MAP_EXEC)
816 					prot |= PROT_EXEC;
817 			}
818 		} else if ((prot & PROT_READ) &&
819 			 (prot & PROT_EXEC) &&
820 			 !(capabilities & NOMMU_MAP_EXEC)
821 			 ) {
822 			/* backing file is not executable, try to copy */
823 			capabilities &= ~NOMMU_MAP_DIRECT;
824 		}
825 	} else {
826 		/* anonymous mappings are always memory backed and can be
827 		 * privately mapped
828 		 */
829 		capabilities = NOMMU_MAP_COPY;
830 
831 		/* handle PROT_EXEC implication by PROT_READ */
832 		if ((prot & PROT_READ) &&
833 		    (current->personality & READ_IMPLIES_EXEC))
834 			prot |= PROT_EXEC;
835 	}
836 
837 	/* allow the security API to have its say */
838 	ret = security_mmap_addr(addr);
839 	if (ret < 0)
840 		return ret;
841 
842 	/* looks okay */
843 	*_capabilities = capabilities;
844 	return 0;
845 }
846 
847 /*
848  * we've determined that we can make the mapping, now translate what we
849  * now know into VMA flags
850  */
851 static unsigned long determine_vm_flags(struct file *file,
852 					unsigned long prot,
853 					unsigned long flags,
854 					unsigned long capabilities)
855 {
856 	unsigned long vm_flags;
857 
858 	vm_flags = calc_vm_prot_bits(prot, 0) | calc_vm_flag_bits(file, flags);
859 
860 	if (!file) {
861 		/*
862 		 * MAP_ANONYMOUS. MAP_SHARED is mapped to MAP_PRIVATE, because
863 		 * there is no fork().
864 		 */
865 		vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
866 	} else if (flags & MAP_PRIVATE) {
867 		/* MAP_PRIVATE file mapping */
868 		if (capabilities & NOMMU_MAP_DIRECT)
869 			vm_flags |= (capabilities & NOMMU_VMFLAGS);
870 		else
871 			vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
872 
873 		if (!(prot & PROT_WRITE) && !current->ptrace)
874 			/*
875 			 * R/O private file mapping which cannot be used to
876 			 * modify memory, especially also not via active ptrace
877 			 * (e.g., set breakpoints) or later by upgrading
878 			 * permissions (no mprotect()). We can try overlaying
879 			 * the file mapping, which will work e.g., on chardevs,
880 			 * ramfs/tmpfs/shmfs and romfs/cramf.
881 			 */
882 			vm_flags |= VM_MAYOVERLAY;
883 	} else {
884 		/* MAP_SHARED file mapping: NOMMU_MAP_DIRECT is set. */
885 		vm_flags |= VM_SHARED | VM_MAYSHARE |
886 			    (capabilities & NOMMU_VMFLAGS);
887 	}
888 
889 	return vm_flags;
890 }
891 
892 /*
893  * set up a shared mapping on a file (the driver or filesystem provides and
894  * pins the storage)
895  */
896 static int do_mmap_shared_file(struct vm_area_struct *vma)
897 {
898 	int ret;
899 
900 	ret = mmap_file(vma->vm_file, vma);
901 	if (ret == 0) {
902 		vma->vm_region->vm_top = vma->vm_region->vm_end;
903 		return 0;
904 	}
905 	if (ret != -ENOSYS)
906 		return ret;
907 
908 	/* getting -ENOSYS indicates that direct mmap isn't possible (as
909 	 * opposed to tried but failed) so we can only give a suitable error as
910 	 * it's not possible to make a private copy if MAP_SHARED was given */
911 	return -ENODEV;
912 }
913 
914 /*
915  * set up a private mapping or an anonymous shared mapping
916  */
917 static int do_mmap_private(struct vm_area_struct *vma,
918 			   struct vm_region *region,
919 			   unsigned long len,
920 			   unsigned long capabilities)
921 {
922 	unsigned long total, point;
923 	void *base;
924 	int ret, order;
925 
926 	/*
927 	 * Invoke the file's mapping function so that it can keep track of
928 	 * shared mappings on devices or memory. VM_MAYOVERLAY will be set if
929 	 * it may attempt to share, which will make is_nommu_shared_mapping()
930 	 * happy.
931 	 */
932 	if (capabilities & NOMMU_MAP_DIRECT) {
933 		ret = mmap_file(vma->vm_file, vma);
934 		/* shouldn't return success if we're not sharing */
935 		if (WARN_ON_ONCE(!is_nommu_shared_mapping(vma->vm_flags)))
936 			ret = -ENOSYS;
937 		if (ret == 0) {
938 			vma->vm_region->vm_top = vma->vm_region->vm_end;
939 			return 0;
940 		}
941 		if (ret != -ENOSYS)
942 			return ret;
943 
944 		/* getting an ENOSYS error indicates that direct mmap isn't
945 		 * possible (as opposed to tried but failed) so we'll try to
946 		 * make a private copy of the data and map that instead */
947 	}
948 
949 
950 	/* allocate some memory to hold the mapping
951 	 * - note that this may not return a page-aligned address if the object
952 	 *   we're allocating is smaller than a page
953 	 */
954 	order = get_order(len);
955 	total = 1 << order;
956 	point = len >> PAGE_SHIFT;
957 
958 	/* we don't want to allocate a power-of-2 sized page set */
959 	if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages)
960 		total = point;
961 
962 	base = alloc_pages_exact(total << PAGE_SHIFT, GFP_KERNEL);
963 	if (!base)
964 		goto enomem;
965 
966 	atomic_long_add(total, &mmap_pages_allocated);
967 
968 	vm_flags_set(vma, VM_MAPPED_COPY);
969 	region->vm_flags = vma->vm_flags;
970 	region->vm_start = (unsigned long) base;
971 	region->vm_end   = region->vm_start + len;
972 	region->vm_top   = region->vm_start + (total << PAGE_SHIFT);
973 
974 	vma->vm_start = region->vm_start;
975 	vma->vm_end   = region->vm_start + len;
976 
977 	if (vma->vm_file) {
978 		/* read the contents of a file into the copy */
979 		loff_t fpos;
980 
981 		fpos = vma->vm_pgoff;
982 		fpos <<= PAGE_SHIFT;
983 
984 		ret = kernel_read(vma->vm_file, base, len, &fpos);
985 		if (ret < 0)
986 			goto error_free;
987 
988 		/* clear the last little bit */
989 		if (ret < len)
990 			memset(base + ret, 0, len - ret);
991 
992 	} else {
993 		vma_set_anonymous(vma);
994 	}
995 
996 	return 0;
997 
998 error_free:
999 	free_page_series(region->vm_start, region->vm_top);
1000 	region->vm_start = vma->vm_start = 0;
1001 	region->vm_end   = vma->vm_end = 0;
1002 	region->vm_top   = 0;
1003 	return ret;
1004 
1005 enomem:
1006 	pr_err("Allocation of length %lu from process %d (%s) failed\n",
1007 	       len, current->pid, current->comm);
1008 	show_mem();
1009 	return -ENOMEM;
1010 }
1011 
1012 /*
1013  * handle mapping creation for uClinux
1014  */
1015 unsigned long do_mmap(struct file *file,
1016 			unsigned long addr,
1017 			unsigned long len,
1018 			unsigned long prot,
1019 			unsigned long flags,
1020 			vm_flags_t vm_flags,
1021 			unsigned long pgoff,
1022 			unsigned long *populate,
1023 			struct list_head *uf)
1024 {
1025 	struct vm_area_struct *vma;
1026 	struct vm_region *region;
1027 	struct rb_node *rb;
1028 	unsigned long capabilities, result;
1029 	int ret;
1030 	VMA_ITERATOR(vmi, current->mm, 0);
1031 
1032 	*populate = 0;
1033 
1034 	/* decide whether we should attempt the mapping, and if so what sort of
1035 	 * mapping */
1036 	ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
1037 				    &capabilities);
1038 	if (ret < 0)
1039 		return ret;
1040 
1041 	/* we ignore the address hint */
1042 	addr = 0;
1043 	len = PAGE_ALIGN(len);
1044 
1045 	/* we've determined that we can make the mapping, now translate what we
1046 	 * now know into VMA flags */
1047 	vm_flags |= determine_vm_flags(file, prot, flags, capabilities);
1048 
1049 
1050 	/* we're going to need to record the mapping */
1051 	region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
1052 	if (!region)
1053 		goto error_getting_region;
1054 
1055 	vma = vm_area_alloc(current->mm);
1056 	if (!vma)
1057 		goto error_getting_vma;
1058 
1059 	region->vm_usage = 1;
1060 	region->vm_flags = vm_flags;
1061 	region->vm_pgoff = pgoff;
1062 
1063 	vm_flags_init(vma, vm_flags);
1064 	vma->vm_pgoff = pgoff;
1065 
1066 	if (file) {
1067 		region->vm_file = get_file(file);
1068 		vma->vm_file = get_file(file);
1069 	}
1070 
1071 	down_write(&nommu_region_sem);
1072 
1073 	/* if we want to share, we need to check for regions created by other
1074 	 * mmap() calls that overlap with our proposed mapping
1075 	 * - we can only share with a superset match on most regular files
1076 	 * - shared mappings on character devices and memory backed files are
1077 	 *   permitted to overlap inexactly as far as we are concerned for in
1078 	 *   these cases, sharing is handled in the driver or filesystem rather
1079 	 *   than here
1080 	 */
1081 	if (is_nommu_shared_mapping(vm_flags)) {
1082 		struct vm_region *pregion;
1083 		unsigned long pglen, rpglen, pgend, rpgend, start;
1084 
1085 		pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1086 		pgend = pgoff + pglen;
1087 
1088 		for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) {
1089 			pregion = rb_entry(rb, struct vm_region, vm_rb);
1090 
1091 			if (!is_nommu_shared_mapping(pregion->vm_flags))
1092 				continue;
1093 
1094 			/* search for overlapping mappings on the same file */
1095 			if (file_inode(pregion->vm_file) !=
1096 			    file_inode(file))
1097 				continue;
1098 
1099 			if (pregion->vm_pgoff >= pgend)
1100 				continue;
1101 
1102 			rpglen = pregion->vm_end - pregion->vm_start;
1103 			rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT;
1104 			rpgend = pregion->vm_pgoff + rpglen;
1105 			if (pgoff >= rpgend)
1106 				continue;
1107 
1108 			/* handle inexactly overlapping matches between
1109 			 * mappings */
1110 			if ((pregion->vm_pgoff != pgoff || rpglen != pglen) &&
1111 			    !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) {
1112 				/* new mapping is not a subset of the region */
1113 				if (!(capabilities & NOMMU_MAP_DIRECT))
1114 					goto sharing_violation;
1115 				continue;
1116 			}
1117 
1118 			/* we've found a region we can share */
1119 			pregion->vm_usage++;
1120 			vma->vm_region = pregion;
1121 			start = pregion->vm_start;
1122 			start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT;
1123 			vma->vm_start = start;
1124 			vma->vm_end = start + len;
1125 
1126 			if (pregion->vm_flags & VM_MAPPED_COPY)
1127 				vm_flags_set(vma, VM_MAPPED_COPY);
1128 			else {
1129 				ret = do_mmap_shared_file(vma);
1130 				if (ret < 0) {
1131 					vma->vm_region = NULL;
1132 					vma->vm_start = 0;
1133 					vma->vm_end = 0;
1134 					pregion->vm_usage--;
1135 					pregion = NULL;
1136 					goto error_just_free;
1137 				}
1138 			}
1139 			fput(region->vm_file);
1140 			kmem_cache_free(vm_region_jar, region);
1141 			region = pregion;
1142 			result = start;
1143 			goto share;
1144 		}
1145 
1146 		/* obtain the address at which to make a shared mapping
1147 		 * - this is the hook for quasi-memory character devices to
1148 		 *   tell us the location of a shared mapping
1149 		 */
1150 		if (capabilities & NOMMU_MAP_DIRECT) {
1151 			addr = file->f_op->get_unmapped_area(file, addr, len,
1152 							     pgoff, flags);
1153 			if (IS_ERR_VALUE(addr)) {
1154 				ret = addr;
1155 				if (ret != -ENOSYS)
1156 					goto error_just_free;
1157 
1158 				/* the driver refused to tell us where to site
1159 				 * the mapping so we'll have to attempt to copy
1160 				 * it */
1161 				ret = -ENODEV;
1162 				if (!(capabilities & NOMMU_MAP_COPY))
1163 					goto error_just_free;
1164 
1165 				capabilities &= ~NOMMU_MAP_DIRECT;
1166 			} else {
1167 				vma->vm_start = region->vm_start = addr;
1168 				vma->vm_end = region->vm_end = addr + len;
1169 			}
1170 		}
1171 	}
1172 
1173 	vma->vm_region = region;
1174 
1175 	/* set up the mapping
1176 	 * - the region is filled in if NOMMU_MAP_DIRECT is still set
1177 	 */
1178 	if (file && vma->vm_flags & VM_SHARED)
1179 		ret = do_mmap_shared_file(vma);
1180 	else
1181 		ret = do_mmap_private(vma, region, len, capabilities);
1182 	if (ret < 0)
1183 		goto error_just_free;
1184 	add_nommu_region(region);
1185 
1186 	/* clear anonymous mappings that don't ask for uninitialized data */
1187 	if (!vma->vm_file &&
1188 	    (!IS_ENABLED(CONFIG_MMAP_ALLOW_UNINITIALIZED) ||
1189 	     !(flags & MAP_UNINITIALIZED)))
1190 		memset((void *)region->vm_start, 0,
1191 		       region->vm_end - region->vm_start);
1192 
1193 	/* okay... we have a mapping; now we have to register it */
1194 	result = vma->vm_start;
1195 
1196 	current->mm->total_vm += len >> PAGE_SHIFT;
1197 
1198 share:
1199 	BUG_ON(!vma->vm_region);
1200 	vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
1201 	if (vma_iter_prealloc(&vmi, vma))
1202 		goto error_just_free;
1203 
1204 	setup_vma_to_mm(vma, current->mm);
1205 	current->mm->map_count++;
1206 	/* add the VMA to the tree */
1207 	vma_iter_store(&vmi, vma);
1208 
1209 	/* we flush the region from the icache only when the first executable
1210 	 * mapping of it is made  */
1211 	if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
1212 		flush_icache_user_range(region->vm_start, region->vm_end);
1213 		region->vm_icache_flushed = true;
1214 	}
1215 
1216 	up_write(&nommu_region_sem);
1217 
1218 	return result;
1219 
1220 error_just_free:
1221 	up_write(&nommu_region_sem);
1222 error:
1223 	vma_iter_free(&vmi);
1224 	if (region->vm_file)
1225 		fput(region->vm_file);
1226 	kmem_cache_free(vm_region_jar, region);
1227 	if (vma->vm_file)
1228 		fput(vma->vm_file);
1229 	vm_area_free(vma);
1230 	return ret;
1231 
1232 sharing_violation:
1233 	up_write(&nommu_region_sem);
1234 	pr_warn("Attempt to share mismatched mappings\n");
1235 	ret = -EINVAL;
1236 	goto error;
1237 
1238 error_getting_vma:
1239 	kmem_cache_free(vm_region_jar, region);
1240 	pr_warn("Allocation of vma for %lu byte allocation from process %d failed\n",
1241 			len, current->pid);
1242 	show_mem();
1243 	return -ENOMEM;
1244 
1245 error_getting_region:
1246 	pr_warn("Allocation of vm region for %lu byte allocation from process %d failed\n",
1247 			len, current->pid);
1248 	show_mem();
1249 	return -ENOMEM;
1250 }
1251 
1252 unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
1253 			      unsigned long prot, unsigned long flags,
1254 			      unsigned long fd, unsigned long pgoff)
1255 {
1256 	struct file *file = NULL;
1257 	unsigned long retval = -EBADF;
1258 
1259 	audit_mmap_fd(fd, flags);
1260 	if (!(flags & MAP_ANONYMOUS)) {
1261 		file = fget(fd);
1262 		if (!file)
1263 			goto out;
1264 	}
1265 
1266 	retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1267 
1268 	if (file)
1269 		fput(file);
1270 out:
1271 	return retval;
1272 }
1273 
1274 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1275 		unsigned long, prot, unsigned long, flags,
1276 		unsigned long, fd, unsigned long, pgoff)
1277 {
1278 	return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
1279 }
1280 
1281 #ifdef __ARCH_WANT_SYS_OLD_MMAP
1282 struct mmap_arg_struct {
1283 	unsigned long addr;
1284 	unsigned long len;
1285 	unsigned long prot;
1286 	unsigned long flags;
1287 	unsigned long fd;
1288 	unsigned long offset;
1289 };
1290 
1291 SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1292 {
1293 	struct mmap_arg_struct a;
1294 
1295 	if (copy_from_user(&a, arg, sizeof(a)))
1296 		return -EFAULT;
1297 	if (offset_in_page(a.offset))
1298 		return -EINVAL;
1299 
1300 	return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1301 			       a.offset >> PAGE_SHIFT);
1302 }
1303 #endif /* __ARCH_WANT_SYS_OLD_MMAP */
1304 
1305 /*
1306  * split a vma into two pieces at address 'addr', a new vma is allocated either
1307  * for the first part or the tail.
1308  */
1309 static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
1310 		     unsigned long addr, int new_below)
1311 {
1312 	struct vm_area_struct *new;
1313 	struct vm_region *region;
1314 	unsigned long npages;
1315 	struct mm_struct *mm;
1316 
1317 	/* we're only permitted to split anonymous regions (these should have
1318 	 * only a single usage on the region) */
1319 	if (vma->vm_file)
1320 		return -ENOMEM;
1321 
1322 	mm = vma->vm_mm;
1323 	if (mm->map_count >= sysctl_max_map_count)
1324 		return -ENOMEM;
1325 
1326 	region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL);
1327 	if (!region)
1328 		return -ENOMEM;
1329 
1330 	new = vm_area_dup(vma);
1331 	if (!new)
1332 		goto err_vma_dup;
1333 
1334 	/* most fields are the same, copy all, and then fixup */
1335 	*region = *vma->vm_region;
1336 	new->vm_region = region;
1337 
1338 	npages = (addr - vma->vm_start) >> PAGE_SHIFT;
1339 
1340 	if (new_below) {
1341 		region->vm_top = region->vm_end = new->vm_end = addr;
1342 	} else {
1343 		region->vm_start = new->vm_start = addr;
1344 		region->vm_pgoff = new->vm_pgoff += npages;
1345 	}
1346 
1347 	vma_iter_config(vmi, new->vm_start, new->vm_end);
1348 	if (vma_iter_prealloc(vmi, vma)) {
1349 		pr_warn("Allocation of vma tree for process %d failed\n",
1350 			current->pid);
1351 		goto err_vmi_preallocate;
1352 	}
1353 
1354 	if (new->vm_ops && new->vm_ops->open)
1355 		new->vm_ops->open(new);
1356 
1357 	down_write(&nommu_region_sem);
1358 	delete_nommu_region(vma->vm_region);
1359 	if (new_below) {
1360 		vma->vm_region->vm_start = vma->vm_start = addr;
1361 		vma->vm_region->vm_pgoff = vma->vm_pgoff += npages;
1362 	} else {
1363 		vma->vm_region->vm_end = vma->vm_end = addr;
1364 		vma->vm_region->vm_top = addr;
1365 	}
1366 	add_nommu_region(vma->vm_region);
1367 	add_nommu_region(new->vm_region);
1368 	up_write(&nommu_region_sem);
1369 
1370 	setup_vma_to_mm(vma, mm);
1371 	setup_vma_to_mm(new, mm);
1372 	vma_iter_store(vmi, new);
1373 	mm->map_count++;
1374 	return 0;
1375 
1376 err_vmi_preallocate:
1377 	vm_area_free(new);
1378 err_vma_dup:
1379 	kmem_cache_free(vm_region_jar, region);
1380 	return -ENOMEM;
1381 }
1382 
1383 /*
1384  * shrink a VMA by removing the specified chunk from either the beginning or
1385  * the end
1386  */
1387 static int vmi_shrink_vma(struct vma_iterator *vmi,
1388 		      struct vm_area_struct *vma,
1389 		      unsigned long from, unsigned long to)
1390 {
1391 	struct vm_region *region;
1392 
1393 	/* adjust the VMA's pointers, which may reposition it in the MM's tree
1394 	 * and list */
1395 	if (from > vma->vm_start) {
1396 		if (vma_iter_clear_gfp(vmi, from, vma->vm_end, GFP_KERNEL))
1397 			return -ENOMEM;
1398 		vma->vm_end = from;
1399 	} else {
1400 		if (vma_iter_clear_gfp(vmi, vma->vm_start, to, GFP_KERNEL))
1401 			return -ENOMEM;
1402 		vma->vm_start = to;
1403 	}
1404 
1405 	/* cut the backing region down to size */
1406 	region = vma->vm_region;
1407 	BUG_ON(region->vm_usage != 1);
1408 
1409 	down_write(&nommu_region_sem);
1410 	delete_nommu_region(region);
1411 	if (from > region->vm_start) {
1412 		to = region->vm_top;
1413 		region->vm_top = region->vm_end = from;
1414 	} else {
1415 		region->vm_start = to;
1416 	}
1417 	add_nommu_region(region);
1418 	up_write(&nommu_region_sem);
1419 
1420 	free_page_series(from, to);
1421 	return 0;
1422 }
1423 
1424 /*
1425  * release a mapping
1426  * - under NOMMU conditions the chunk to be unmapped must be backed by a single
1427  *   VMA, though it need not cover the whole VMA
1428  */
1429 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf)
1430 {
1431 	VMA_ITERATOR(vmi, mm, start);
1432 	struct vm_area_struct *vma;
1433 	unsigned long end;
1434 	int ret = 0;
1435 
1436 	len = PAGE_ALIGN(len);
1437 	if (len == 0)
1438 		return -EINVAL;
1439 
1440 	end = start + len;
1441 
1442 	/* find the first potentially overlapping VMA */
1443 	vma = vma_find(&vmi, end);
1444 	if (!vma) {
1445 		static int limit;
1446 		if (limit < 5) {
1447 			pr_warn("munmap of memory not mmapped by process %d (%s): 0x%lx-0x%lx\n",
1448 					current->pid, current->comm,
1449 					start, start + len - 1);
1450 			limit++;
1451 		}
1452 		return -EINVAL;
1453 	}
1454 
1455 	/* we're allowed to split an anonymous VMA but not a file-backed one */
1456 	if (vma->vm_file) {
1457 		do {
1458 			if (start > vma->vm_start)
1459 				return -EINVAL;
1460 			if (end == vma->vm_end)
1461 				goto erase_whole_vma;
1462 			vma = vma_find(&vmi, end);
1463 		} while (vma);
1464 		return -EINVAL;
1465 	} else {
1466 		/* the chunk must be a subset of the VMA found */
1467 		if (start == vma->vm_start && end == vma->vm_end)
1468 			goto erase_whole_vma;
1469 		if (start < vma->vm_start || end > vma->vm_end)
1470 			return -EINVAL;
1471 		if (offset_in_page(start))
1472 			return -EINVAL;
1473 		if (end != vma->vm_end && offset_in_page(end))
1474 			return -EINVAL;
1475 		if (start != vma->vm_start && end != vma->vm_end) {
1476 			ret = split_vma(&vmi, vma, start, 1);
1477 			if (ret < 0)
1478 				return ret;
1479 		}
1480 		return vmi_shrink_vma(&vmi, vma, start, end);
1481 	}
1482 
1483 erase_whole_vma:
1484 	if (delete_vma_from_mm(vma))
1485 		ret = -ENOMEM;
1486 	else
1487 		delete_vma(mm, vma);
1488 	return ret;
1489 }
1490 
1491 int vm_munmap(unsigned long addr, size_t len)
1492 {
1493 	struct mm_struct *mm = current->mm;
1494 	int ret;
1495 
1496 	mmap_write_lock(mm);
1497 	ret = do_munmap(mm, addr, len, NULL);
1498 	mmap_write_unlock(mm);
1499 	return ret;
1500 }
1501 EXPORT_SYMBOL(vm_munmap);
1502 
1503 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
1504 {
1505 	return vm_munmap(addr, len);
1506 }
1507 
1508 /*
1509  * release all the mappings made in a process's VM space
1510  */
1511 void exit_mmap(struct mm_struct *mm)
1512 {
1513 	VMA_ITERATOR(vmi, mm, 0);
1514 	struct vm_area_struct *vma;
1515 
1516 	if (!mm)
1517 		return;
1518 
1519 	mm->total_vm = 0;
1520 
1521 	/*
1522 	 * Lock the mm to avoid assert complaining even though this is the only
1523 	 * user of the mm
1524 	 */
1525 	mmap_write_lock(mm);
1526 	for_each_vma(vmi, vma) {
1527 		cleanup_vma_from_mm(vma);
1528 		delete_vma(mm, vma);
1529 		cond_resched();
1530 	}
1531 	__mt_destroy(&mm->mm_mt);
1532 	mmap_write_unlock(mm);
1533 }
1534 
1535 /*
1536  * expand (or shrink) an existing mapping, potentially moving it at the same
1537  * time (controlled by the MREMAP_MAYMOVE flag and available VM space)
1538  *
1539  * under NOMMU conditions, we only permit changing a mapping's size, and only
1540  * as long as it stays within the region allocated by do_mmap_private() and the
1541  * block is not shareable
1542  *
1543  * MREMAP_FIXED is not supported under NOMMU conditions
1544  */
1545 static unsigned long do_mremap(unsigned long addr,
1546 			unsigned long old_len, unsigned long new_len,
1547 			unsigned long flags, unsigned long new_addr)
1548 {
1549 	struct vm_area_struct *vma;
1550 
1551 	/* insanity checks first */
1552 	old_len = PAGE_ALIGN(old_len);
1553 	new_len = PAGE_ALIGN(new_len);
1554 	if (old_len == 0 || new_len == 0)
1555 		return (unsigned long) -EINVAL;
1556 
1557 	if (offset_in_page(addr))
1558 		return -EINVAL;
1559 
1560 	if (flags & MREMAP_FIXED && new_addr != addr)
1561 		return (unsigned long) -EINVAL;
1562 
1563 	vma = find_vma_exact(current->mm, addr, old_len);
1564 	if (!vma)
1565 		return (unsigned long) -EINVAL;
1566 
1567 	if (vma->vm_end != vma->vm_start + old_len)
1568 		return (unsigned long) -EFAULT;
1569 
1570 	if (is_nommu_shared_mapping(vma->vm_flags))
1571 		return (unsigned long) -EPERM;
1572 
1573 	if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start)
1574 		return (unsigned long) -ENOMEM;
1575 
1576 	/* all checks complete - do it */
1577 	vma->vm_end = vma->vm_start + new_len;
1578 	return vma->vm_start;
1579 }
1580 
1581 SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
1582 		unsigned long, new_len, unsigned long, flags,
1583 		unsigned long, new_addr)
1584 {
1585 	unsigned long ret;
1586 
1587 	mmap_write_lock(current->mm);
1588 	ret = do_mremap(addr, old_len, new_len, flags, new_addr);
1589 	mmap_write_unlock(current->mm);
1590 	return ret;
1591 }
1592 
1593 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1594 		unsigned long pfn, unsigned long size, pgprot_t prot)
1595 {
1596 	if (addr != (pfn << PAGE_SHIFT))
1597 		return -EINVAL;
1598 
1599 	vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1600 	return 0;
1601 }
1602 EXPORT_SYMBOL(remap_pfn_range);
1603 
1604 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
1605 {
1606 	unsigned long pfn = start >> PAGE_SHIFT;
1607 	unsigned long vm_len = vma->vm_end - vma->vm_start;
1608 
1609 	pfn += vma->vm_pgoff;
1610 	return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
1611 }
1612 EXPORT_SYMBOL(vm_iomap_memory);
1613 
1614 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
1615 			unsigned long pgoff)
1616 {
1617 	unsigned int size = vma->vm_end - vma->vm_start;
1618 
1619 	if (!(vma->vm_flags & VM_USERMAP))
1620 		return -EINVAL;
1621 
1622 	vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT));
1623 	vma->vm_end = vma->vm_start + size;
1624 
1625 	return 0;
1626 }
1627 EXPORT_SYMBOL(remap_vmalloc_range);
1628 
1629 vm_fault_t filemap_fault(struct vm_fault *vmf)
1630 {
1631 	BUG();
1632 	return 0;
1633 }
1634 EXPORT_SYMBOL(filemap_fault);
1635 
1636 vm_fault_t filemap_map_pages(struct vm_fault *vmf,
1637 		pgoff_t start_pgoff, pgoff_t end_pgoff)
1638 {
1639 	BUG();
1640 	return 0;
1641 }
1642 EXPORT_SYMBOL(filemap_map_pages);
1643 
1644 static int __access_remote_vm(struct mm_struct *mm, unsigned long addr,
1645 			      void *buf, int len, unsigned int gup_flags)
1646 {
1647 	struct vm_area_struct *vma;
1648 	int write = gup_flags & FOLL_WRITE;
1649 
1650 	if (mmap_read_lock_killable(mm))
1651 		return 0;
1652 
1653 	/* the access must start within one of the target process's mappings */
1654 	vma = find_vma(mm, addr);
1655 	if (vma) {
1656 		/* don't overrun this mapping */
1657 		if (addr + len >= vma->vm_end)
1658 			len = vma->vm_end - addr;
1659 
1660 		/* only read or write mappings where it is permitted */
1661 		if (write && vma->vm_flags & VM_MAYWRITE)
1662 			copy_to_user_page(vma, NULL, addr,
1663 					 (void *) addr, buf, len);
1664 		else if (!write && vma->vm_flags & VM_MAYREAD)
1665 			copy_from_user_page(vma, NULL, addr,
1666 					    buf, (void *) addr, len);
1667 		else
1668 			len = 0;
1669 	} else {
1670 		len = 0;
1671 	}
1672 
1673 	mmap_read_unlock(mm);
1674 
1675 	return len;
1676 }
1677 
1678 /**
1679  * access_remote_vm - access another process' address space
1680  * @mm:		the mm_struct of the target address space
1681  * @addr:	start address to access
1682  * @buf:	source or destination buffer
1683  * @len:	number of bytes to transfer
1684  * @gup_flags:	flags modifying lookup behaviour
1685  *
1686  * The caller must hold a reference on @mm.
1687  */
1688 int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1689 		void *buf, int len, unsigned int gup_flags)
1690 {
1691 	return __access_remote_vm(mm, addr, buf, len, gup_flags);
1692 }
1693 
1694 /*
1695  * Access another process' address space.
1696  * - source/target buffer must be kernel space
1697  */
1698 int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
1699 		unsigned int gup_flags)
1700 {
1701 	struct mm_struct *mm;
1702 
1703 	if (addr + len < addr)
1704 		return 0;
1705 
1706 	mm = get_task_mm(tsk);
1707 	if (!mm)
1708 		return 0;
1709 
1710 	len = __access_remote_vm(mm, addr, buf, len, gup_flags);
1711 
1712 	mmput(mm);
1713 	return len;
1714 }
1715 EXPORT_SYMBOL_GPL(access_process_vm);
1716 
1717 /**
1718  * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode
1719  * @inode: The inode to check
1720  * @size: The current filesize of the inode
1721  * @newsize: The proposed filesize of the inode
1722  *
1723  * Check the shared mappings on an inode on behalf of a shrinking truncate to
1724  * make sure that any outstanding VMAs aren't broken and then shrink the
1725  * vm_regions that extend beyond so that do_mmap() doesn't
1726  * automatically grant mappings that are too large.
1727  */
1728 int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
1729 				size_t newsize)
1730 {
1731 	struct vm_area_struct *vma;
1732 	struct vm_region *region;
1733 	pgoff_t low, high;
1734 	size_t r_size, r_top;
1735 
1736 	low = newsize >> PAGE_SHIFT;
1737 	high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1738 
1739 	down_write(&nommu_region_sem);
1740 	i_mmap_lock_read(inode->i_mapping);
1741 
1742 	/* search for VMAs that fall within the dead zone */
1743 	vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) {
1744 		/* found one - only interested if it's shared out of the page
1745 		 * cache */
1746 		if (vma->vm_flags & VM_SHARED) {
1747 			i_mmap_unlock_read(inode->i_mapping);
1748 			up_write(&nommu_region_sem);
1749 			return -ETXTBSY; /* not quite true, but near enough */
1750 		}
1751 	}
1752 
1753 	/* reduce any regions that overlap the dead zone - if in existence,
1754 	 * these will be pointed to by VMAs that don't overlap the dead zone
1755 	 *
1756 	 * we don't check for any regions that start beyond the EOF as there
1757 	 * shouldn't be any
1758 	 */
1759 	vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) {
1760 		if (!(vma->vm_flags & VM_SHARED))
1761 			continue;
1762 
1763 		region = vma->vm_region;
1764 		r_size = region->vm_top - region->vm_start;
1765 		r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size;
1766 
1767 		if (r_top > newsize) {
1768 			region->vm_top -= r_top - newsize;
1769 			if (region->vm_end > region->vm_top)
1770 				region->vm_end = region->vm_top;
1771 		}
1772 	}
1773 
1774 	i_mmap_unlock_read(inode->i_mapping);
1775 	up_write(&nommu_region_sem);
1776 	return 0;
1777 }
1778 
1779 /*
1780  * Initialise sysctl_user_reserve_kbytes.
1781  *
1782  * This is intended to prevent a user from starting a single memory hogging
1783  * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
1784  * mode.
1785  *
1786  * The default value is min(3% of free memory, 128MB)
1787  * 128MB is enough to recover with sshd/login, bash, and top/kill.
1788  */
1789 static int __meminit init_user_reserve(void)
1790 {
1791 	unsigned long free_kbytes;
1792 
1793 	free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
1794 
1795 	sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
1796 	return 0;
1797 }
1798 subsys_initcall(init_user_reserve);
1799 
1800 /*
1801  * Initialise sysctl_admin_reserve_kbytes.
1802  *
1803  * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
1804  * to log in and kill a memory hogging process.
1805  *
1806  * Systems with more than 256MB will reserve 8MB, enough to recover
1807  * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
1808  * only reserve 3% of free pages by default.
1809  */
1810 static int __meminit init_admin_reserve(void)
1811 {
1812 	unsigned long free_kbytes;
1813 
1814 	free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
1815 
1816 	sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
1817 	return 0;
1818 }
1819 subsys_initcall(init_admin_reserve);
1820