xref: /linux/mm/mmap.c (revision ecba1060583635ab55092072441ff903b5e9a659)
1 /*
2  * mm/mmap.c
3  *
4  * Written by obz.
5  *
6  * Address space accounting code	<alan@lxorguk.ukuu.org.uk>
7  */
8 
9 #include <linux/slab.h>
10 #include <linux/backing-dev.h>
11 #include <linux/mm.h>
12 #include <linux/shm.h>
13 #include <linux/mman.h>
14 #include <linux/pagemap.h>
15 #include <linux/swap.h>
16 #include <linux/syscalls.h>
17 #include <linux/capability.h>
18 #include <linux/init.h>
19 #include <linux/file.h>
20 #include <linux/fs.h>
21 #include <linux/personality.h>
22 #include <linux/security.h>
23 #include <linux/ima.h>
24 #include <linux/hugetlb.h>
25 #include <linux/profile.h>
26 #include <linux/module.h>
27 #include <linux/mount.h>
28 #include <linux/mempolicy.h>
29 #include <linux/rmap.h>
30 #include <linux/mmu_notifier.h>
31 #include <linux/perf_counter.h>
32 
33 #include <asm/uaccess.h>
34 #include <asm/cacheflush.h>
35 #include <asm/tlb.h>
36 #include <asm/mmu_context.h>
37 
38 #include "internal.h"
39 
40 #ifndef arch_mmap_check
41 #define arch_mmap_check(addr, len, flags)	(0)
42 #endif
43 
44 #ifndef arch_rebalance_pgtables
45 #define arch_rebalance_pgtables(addr, len)		(addr)
46 #endif
47 
48 static void unmap_region(struct mm_struct *mm,
49 		struct vm_area_struct *vma, struct vm_area_struct *prev,
50 		unsigned long start, unsigned long end);
51 
52 /*
53  * WARNING: the debugging will use recursive algorithms so never enable this
54  * unless you know what you are doing.
55  */
56 #undef DEBUG_MM_RB
57 
58 /* description of effects of mapping type and prot in current implementation.
59  * this is due to the limited x86 page protection hardware.  The expected
60  * behavior is in parens:
61  *
62  * map_type	prot
63  *		PROT_NONE	PROT_READ	PROT_WRITE	PROT_EXEC
64  * MAP_SHARED	r: (no) no	r: (yes) yes	r: (no) yes	r: (no) yes
65  *		w: (no) no	w: (no) no	w: (yes) yes	w: (no) no
66  *		x: (no) no	x: (no) yes	x: (no) yes	x: (yes) yes
67  *
68  * MAP_PRIVATE	r: (no) no	r: (yes) yes	r: (no) yes	r: (no) yes
69  *		w: (no) no	w: (no) no	w: (copy) copy	w: (no) no
70  *		x: (no) no	x: (no) yes	x: (no) yes	x: (yes) yes
71  *
72  */
73 pgprot_t protection_map[16] = {
74 	__P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
75 	__S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
76 };
77 
78 pgprot_t vm_get_page_prot(unsigned long vm_flags)
79 {
80 	return __pgprot(pgprot_val(protection_map[vm_flags &
81 				(VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
82 			pgprot_val(arch_vm_get_page_prot(vm_flags)));
83 }
84 EXPORT_SYMBOL(vm_get_page_prot);
85 
86 int sysctl_overcommit_memory = OVERCOMMIT_GUESS;  /* heuristic overcommit */
87 int sysctl_overcommit_ratio = 50;	/* default is 50% */
88 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
89 struct percpu_counter vm_committed_as;
90 
91 /* amount of vm to protect from userspace access */
92 unsigned long mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
93 
94 /*
95  * Check that a process has enough memory to allocate a new virtual
96  * mapping. 0 means there is enough memory for the allocation to
97  * succeed and -ENOMEM implies there is not.
98  *
99  * We currently support three overcommit policies, which are set via the
100  * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting
101  *
102  * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
103  * Additional code 2002 Jul 20 by Robert Love.
104  *
105  * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
106  *
107  * Note this is a helper function intended to be used by LSMs which
108  * wish to use this logic.
109  */
110 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
111 {
112 	unsigned long free, allowed;
113 
114 	vm_acct_memory(pages);
115 
116 	/*
117 	 * Sometimes we want to use more memory than we have
118 	 */
119 	if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
120 		return 0;
121 
122 	if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
123 		unsigned long n;
124 
125 		free = global_page_state(NR_FILE_PAGES);
126 		free += nr_swap_pages;
127 
128 		/*
129 		 * Any slabs which are created with the
130 		 * SLAB_RECLAIM_ACCOUNT flag claim to have contents
131 		 * which are reclaimable, under pressure.  The dentry
132 		 * cache and most inode caches should fall into this
133 		 */
134 		free += global_page_state(NR_SLAB_RECLAIMABLE);
135 
136 		/*
137 		 * Leave the last 3% for root
138 		 */
139 		if (!cap_sys_admin)
140 			free -= free / 32;
141 
142 		if (free > pages)
143 			return 0;
144 
145 		/*
146 		 * nr_free_pages() is very expensive on large systems,
147 		 * only call if we're about to fail.
148 		 */
149 		n = nr_free_pages();
150 
151 		/*
152 		 * Leave reserved pages. The pages are not for anonymous pages.
153 		 */
154 		if (n <= totalreserve_pages)
155 			goto error;
156 		else
157 			n -= totalreserve_pages;
158 
159 		/*
160 		 * Leave the last 3% for root
161 		 */
162 		if (!cap_sys_admin)
163 			n -= n / 32;
164 		free += n;
165 
166 		if (free > pages)
167 			return 0;
168 
169 		goto error;
170 	}
171 
172 	allowed = (totalram_pages - hugetlb_total_pages())
173 	       	* sysctl_overcommit_ratio / 100;
174 	/*
175 	 * Leave the last 3% for root
176 	 */
177 	if (!cap_sys_admin)
178 		allowed -= allowed / 32;
179 	allowed += total_swap_pages;
180 
181 	/* Don't let a single process grow too big:
182 	   leave 3% of the size of this process for other processes */
183 	if (mm)
184 		allowed -= mm->total_vm / 32;
185 
186 	if (percpu_counter_read_positive(&vm_committed_as) < allowed)
187 		return 0;
188 error:
189 	vm_unacct_memory(pages);
190 
191 	return -ENOMEM;
192 }
193 
194 /*
195  * Requires inode->i_mapping->i_mmap_lock
196  */
197 static void __remove_shared_vm_struct(struct vm_area_struct *vma,
198 		struct file *file, struct address_space *mapping)
199 {
200 	if (vma->vm_flags & VM_DENYWRITE)
201 		atomic_inc(&file->f_path.dentry->d_inode->i_writecount);
202 	if (vma->vm_flags & VM_SHARED)
203 		mapping->i_mmap_writable--;
204 
205 	flush_dcache_mmap_lock(mapping);
206 	if (unlikely(vma->vm_flags & VM_NONLINEAR))
207 		list_del_init(&vma->shared.vm_set.list);
208 	else
209 		vma_prio_tree_remove(vma, &mapping->i_mmap);
210 	flush_dcache_mmap_unlock(mapping);
211 }
212 
213 /*
214  * Unlink a file-based vm structure from its prio_tree, to hide
215  * vma from rmap and vmtruncate before freeing its page tables.
216  */
217 void unlink_file_vma(struct vm_area_struct *vma)
218 {
219 	struct file *file = vma->vm_file;
220 
221 	if (file) {
222 		struct address_space *mapping = file->f_mapping;
223 		spin_lock(&mapping->i_mmap_lock);
224 		__remove_shared_vm_struct(vma, file, mapping);
225 		spin_unlock(&mapping->i_mmap_lock);
226 	}
227 }
228 
229 /*
230  * Close a vm structure and free it, returning the next.
231  */
232 static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
233 {
234 	struct vm_area_struct *next = vma->vm_next;
235 
236 	might_sleep();
237 	if (vma->vm_ops && vma->vm_ops->close)
238 		vma->vm_ops->close(vma);
239 	if (vma->vm_file) {
240 		fput(vma->vm_file);
241 		if (vma->vm_flags & VM_EXECUTABLE)
242 			removed_exe_file_vma(vma->vm_mm);
243 	}
244 	mpol_put(vma_policy(vma));
245 	kmem_cache_free(vm_area_cachep, vma);
246 	return next;
247 }
248 
249 SYSCALL_DEFINE1(brk, unsigned long, brk)
250 {
251 	unsigned long rlim, retval;
252 	unsigned long newbrk, oldbrk;
253 	struct mm_struct *mm = current->mm;
254 	unsigned long min_brk;
255 
256 	down_write(&mm->mmap_sem);
257 
258 #ifdef CONFIG_COMPAT_BRK
259 	min_brk = mm->end_code;
260 #else
261 	min_brk = mm->start_brk;
262 #endif
263 	if (brk < min_brk)
264 		goto out;
265 
266 	/*
267 	 * Check against rlimit here. If this check is done later after the test
268 	 * of oldbrk with newbrk then it can escape the test and let the data
269 	 * segment grow beyond its set limit the in case where the limit is
270 	 * not page aligned -Ram Gupta
271 	 */
272 	rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
273 	if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
274 			(mm->end_data - mm->start_data) > rlim)
275 		goto out;
276 
277 	newbrk = PAGE_ALIGN(brk);
278 	oldbrk = PAGE_ALIGN(mm->brk);
279 	if (oldbrk == newbrk)
280 		goto set_brk;
281 
282 	/* Always allow shrinking brk. */
283 	if (brk <= mm->brk) {
284 		if (!do_munmap(mm, newbrk, oldbrk-newbrk))
285 			goto set_brk;
286 		goto out;
287 	}
288 
289 	/* Check against existing mmap mappings. */
290 	if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
291 		goto out;
292 
293 	/* Ok, looks good - let it rip. */
294 	if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
295 		goto out;
296 set_brk:
297 	mm->brk = brk;
298 out:
299 	retval = mm->brk;
300 	up_write(&mm->mmap_sem);
301 	return retval;
302 }
303 
304 #ifdef DEBUG_MM_RB
305 static int browse_rb(struct rb_root *root)
306 {
307 	int i = 0, j;
308 	struct rb_node *nd, *pn = NULL;
309 	unsigned long prev = 0, pend = 0;
310 
311 	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
312 		struct vm_area_struct *vma;
313 		vma = rb_entry(nd, struct vm_area_struct, vm_rb);
314 		if (vma->vm_start < prev)
315 			printk("vm_start %lx prev %lx\n", vma->vm_start, prev), i = -1;
316 		if (vma->vm_start < pend)
317 			printk("vm_start %lx pend %lx\n", vma->vm_start, pend);
318 		if (vma->vm_start > vma->vm_end)
319 			printk("vm_end %lx < vm_start %lx\n", vma->vm_end, vma->vm_start);
320 		i++;
321 		pn = nd;
322 		prev = vma->vm_start;
323 		pend = vma->vm_end;
324 	}
325 	j = 0;
326 	for (nd = pn; nd; nd = rb_prev(nd)) {
327 		j++;
328 	}
329 	if (i != j)
330 		printk("backwards %d, forwards %d\n", j, i), i = 0;
331 	return i;
332 }
333 
334 void validate_mm(struct mm_struct *mm)
335 {
336 	int bug = 0;
337 	int i = 0;
338 	struct vm_area_struct *tmp = mm->mmap;
339 	while (tmp) {
340 		tmp = tmp->vm_next;
341 		i++;
342 	}
343 	if (i != mm->map_count)
344 		printk("map_count %d vm_next %d\n", mm->map_count, i), bug = 1;
345 	i = browse_rb(&mm->mm_rb);
346 	if (i != mm->map_count)
347 		printk("map_count %d rb %d\n", mm->map_count, i), bug = 1;
348 	BUG_ON(bug);
349 }
350 #else
351 #define validate_mm(mm) do { } while (0)
352 #endif
353 
354 static struct vm_area_struct *
355 find_vma_prepare(struct mm_struct *mm, unsigned long addr,
356 		struct vm_area_struct **pprev, struct rb_node ***rb_link,
357 		struct rb_node ** rb_parent)
358 {
359 	struct vm_area_struct * vma;
360 	struct rb_node ** __rb_link, * __rb_parent, * rb_prev;
361 
362 	__rb_link = &mm->mm_rb.rb_node;
363 	rb_prev = __rb_parent = NULL;
364 	vma = NULL;
365 
366 	while (*__rb_link) {
367 		struct vm_area_struct *vma_tmp;
368 
369 		__rb_parent = *__rb_link;
370 		vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb);
371 
372 		if (vma_tmp->vm_end > addr) {
373 			vma = vma_tmp;
374 			if (vma_tmp->vm_start <= addr)
375 				break;
376 			__rb_link = &__rb_parent->rb_left;
377 		} else {
378 			rb_prev = __rb_parent;
379 			__rb_link = &__rb_parent->rb_right;
380 		}
381 	}
382 
383 	*pprev = NULL;
384 	if (rb_prev)
385 		*pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
386 	*rb_link = __rb_link;
387 	*rb_parent = __rb_parent;
388 	return vma;
389 }
390 
391 static inline void
392 __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
393 		struct vm_area_struct *prev, struct rb_node *rb_parent)
394 {
395 	if (prev) {
396 		vma->vm_next = prev->vm_next;
397 		prev->vm_next = vma;
398 	} else {
399 		mm->mmap = vma;
400 		if (rb_parent)
401 			vma->vm_next = rb_entry(rb_parent,
402 					struct vm_area_struct, vm_rb);
403 		else
404 			vma->vm_next = NULL;
405 	}
406 }
407 
408 void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
409 		struct rb_node **rb_link, struct rb_node *rb_parent)
410 {
411 	rb_link_node(&vma->vm_rb, rb_parent, rb_link);
412 	rb_insert_color(&vma->vm_rb, &mm->mm_rb);
413 }
414 
415 static void __vma_link_file(struct vm_area_struct *vma)
416 {
417 	struct file *file;
418 
419 	file = vma->vm_file;
420 	if (file) {
421 		struct address_space *mapping = file->f_mapping;
422 
423 		if (vma->vm_flags & VM_DENYWRITE)
424 			atomic_dec(&file->f_path.dentry->d_inode->i_writecount);
425 		if (vma->vm_flags & VM_SHARED)
426 			mapping->i_mmap_writable++;
427 
428 		flush_dcache_mmap_lock(mapping);
429 		if (unlikely(vma->vm_flags & VM_NONLINEAR))
430 			vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
431 		else
432 			vma_prio_tree_insert(vma, &mapping->i_mmap);
433 		flush_dcache_mmap_unlock(mapping);
434 	}
435 }
436 
437 static void
438 __vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
439 	struct vm_area_struct *prev, struct rb_node **rb_link,
440 	struct rb_node *rb_parent)
441 {
442 	__vma_link_list(mm, vma, prev, rb_parent);
443 	__vma_link_rb(mm, vma, rb_link, rb_parent);
444 	__anon_vma_link(vma);
445 }
446 
447 static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
448 			struct vm_area_struct *prev, struct rb_node **rb_link,
449 			struct rb_node *rb_parent)
450 {
451 	struct address_space *mapping = NULL;
452 
453 	if (vma->vm_file)
454 		mapping = vma->vm_file->f_mapping;
455 
456 	if (mapping) {
457 		spin_lock(&mapping->i_mmap_lock);
458 		vma->vm_truncate_count = mapping->truncate_count;
459 	}
460 	anon_vma_lock(vma);
461 
462 	__vma_link(mm, vma, prev, rb_link, rb_parent);
463 	__vma_link_file(vma);
464 
465 	anon_vma_unlock(vma);
466 	if (mapping)
467 		spin_unlock(&mapping->i_mmap_lock);
468 
469 	mm->map_count++;
470 	validate_mm(mm);
471 }
472 
473 /*
474  * Helper for vma_adjust in the split_vma insert case:
475  * insert vm structure into list and rbtree and anon_vma,
476  * but it has already been inserted into prio_tree earlier.
477  */
478 static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
479 {
480 	struct vm_area_struct *__vma, *prev;
481 	struct rb_node **rb_link, *rb_parent;
482 
483 	__vma = find_vma_prepare(mm, vma->vm_start,&prev, &rb_link, &rb_parent);
484 	BUG_ON(__vma && __vma->vm_start < vma->vm_end);
485 	__vma_link(mm, vma, prev, rb_link, rb_parent);
486 	mm->map_count++;
487 }
488 
489 static inline void
490 __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
491 		struct vm_area_struct *prev)
492 {
493 	prev->vm_next = vma->vm_next;
494 	rb_erase(&vma->vm_rb, &mm->mm_rb);
495 	if (mm->mmap_cache == vma)
496 		mm->mmap_cache = prev;
497 }
498 
499 /*
500  * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that
501  * is already present in an i_mmap tree without adjusting the tree.
502  * The following helper function should be used when such adjustments
503  * are necessary.  The "insert" vma (if any) is to be inserted
504  * before we drop the necessary locks.
505  */
506 void vma_adjust(struct vm_area_struct *vma, unsigned long start,
507 	unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
508 {
509 	struct mm_struct *mm = vma->vm_mm;
510 	struct vm_area_struct *next = vma->vm_next;
511 	struct vm_area_struct *importer = NULL;
512 	struct address_space *mapping = NULL;
513 	struct prio_tree_root *root = NULL;
514 	struct file *file = vma->vm_file;
515 	struct anon_vma *anon_vma = NULL;
516 	long adjust_next = 0;
517 	int remove_next = 0;
518 
519 	if (next && !insert) {
520 		if (end >= next->vm_end) {
521 			/*
522 			 * vma expands, overlapping all the next, and
523 			 * perhaps the one after too (mprotect case 6).
524 			 */
525 again:			remove_next = 1 + (end > next->vm_end);
526 			end = next->vm_end;
527 			anon_vma = next->anon_vma;
528 			importer = vma;
529 		} else if (end > next->vm_start) {
530 			/*
531 			 * vma expands, overlapping part of the next:
532 			 * mprotect case 5 shifting the boundary up.
533 			 */
534 			adjust_next = (end - next->vm_start) >> PAGE_SHIFT;
535 			anon_vma = next->anon_vma;
536 			importer = vma;
537 		} else if (end < vma->vm_end) {
538 			/*
539 			 * vma shrinks, and !insert tells it's not
540 			 * split_vma inserting another: so it must be
541 			 * mprotect case 4 shifting the boundary down.
542 			 */
543 			adjust_next = - ((vma->vm_end - end) >> PAGE_SHIFT);
544 			anon_vma = next->anon_vma;
545 			importer = next;
546 		}
547 	}
548 
549 	if (file) {
550 		mapping = file->f_mapping;
551 		if (!(vma->vm_flags & VM_NONLINEAR))
552 			root = &mapping->i_mmap;
553 		spin_lock(&mapping->i_mmap_lock);
554 		if (importer &&
555 		    vma->vm_truncate_count != next->vm_truncate_count) {
556 			/*
557 			 * unmap_mapping_range might be in progress:
558 			 * ensure that the expanding vma is rescanned.
559 			 */
560 			importer->vm_truncate_count = 0;
561 		}
562 		if (insert) {
563 			insert->vm_truncate_count = vma->vm_truncate_count;
564 			/*
565 			 * Put into prio_tree now, so instantiated pages
566 			 * are visible to arm/parisc __flush_dcache_page
567 			 * throughout; but we cannot insert into address
568 			 * space until vma start or end is updated.
569 			 */
570 			__vma_link_file(insert);
571 		}
572 	}
573 
574 	/*
575 	 * When changing only vma->vm_end, we don't really need
576 	 * anon_vma lock: but is that case worth optimizing out?
577 	 */
578 	if (vma->anon_vma)
579 		anon_vma = vma->anon_vma;
580 	if (anon_vma) {
581 		spin_lock(&anon_vma->lock);
582 		/*
583 		 * Easily overlooked: when mprotect shifts the boundary,
584 		 * make sure the expanding vma has anon_vma set if the
585 		 * shrinking vma had, to cover any anon pages imported.
586 		 */
587 		if (importer && !importer->anon_vma) {
588 			importer->anon_vma = anon_vma;
589 			__anon_vma_link(importer);
590 		}
591 	}
592 
593 	if (root) {
594 		flush_dcache_mmap_lock(mapping);
595 		vma_prio_tree_remove(vma, root);
596 		if (adjust_next)
597 			vma_prio_tree_remove(next, root);
598 	}
599 
600 	vma->vm_start = start;
601 	vma->vm_end = end;
602 	vma->vm_pgoff = pgoff;
603 	if (adjust_next) {
604 		next->vm_start += adjust_next << PAGE_SHIFT;
605 		next->vm_pgoff += adjust_next;
606 	}
607 
608 	if (root) {
609 		if (adjust_next)
610 			vma_prio_tree_insert(next, root);
611 		vma_prio_tree_insert(vma, root);
612 		flush_dcache_mmap_unlock(mapping);
613 	}
614 
615 	if (remove_next) {
616 		/*
617 		 * vma_merge has merged next into vma, and needs
618 		 * us to remove next before dropping the locks.
619 		 */
620 		__vma_unlink(mm, next, vma);
621 		if (file)
622 			__remove_shared_vm_struct(next, file, mapping);
623 		if (next->anon_vma)
624 			__anon_vma_merge(vma, next);
625 	} else if (insert) {
626 		/*
627 		 * split_vma has split insert from vma, and needs
628 		 * us to insert it before dropping the locks
629 		 * (it may either follow vma or precede it).
630 		 */
631 		__insert_vm_struct(mm, insert);
632 	}
633 
634 	if (anon_vma)
635 		spin_unlock(&anon_vma->lock);
636 	if (mapping)
637 		spin_unlock(&mapping->i_mmap_lock);
638 
639 	if (remove_next) {
640 		if (file) {
641 			fput(file);
642 			if (next->vm_flags & VM_EXECUTABLE)
643 				removed_exe_file_vma(mm);
644 		}
645 		mm->map_count--;
646 		mpol_put(vma_policy(next));
647 		kmem_cache_free(vm_area_cachep, next);
648 		/*
649 		 * In mprotect's case 6 (see comments on vma_merge),
650 		 * we must remove another next too. It would clutter
651 		 * up the code too much to do both in one go.
652 		 */
653 		if (remove_next == 2) {
654 			next = vma->vm_next;
655 			goto again;
656 		}
657 	}
658 
659 	validate_mm(mm);
660 }
661 
662 /* Flags that can be inherited from an existing mapping when merging */
663 #define VM_MERGEABLE_FLAGS (VM_CAN_NONLINEAR)
664 
665 /*
666  * If the vma has a ->close operation then the driver probably needs to release
667  * per-vma resources, so we don't attempt to merge those.
668  */
669 static inline int is_mergeable_vma(struct vm_area_struct *vma,
670 			struct file *file, unsigned long vm_flags)
671 {
672 	if ((vma->vm_flags ^ vm_flags) & ~VM_MERGEABLE_FLAGS)
673 		return 0;
674 	if (vma->vm_file != file)
675 		return 0;
676 	if (vma->vm_ops && vma->vm_ops->close)
677 		return 0;
678 	return 1;
679 }
680 
681 static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1,
682 					struct anon_vma *anon_vma2)
683 {
684 	return !anon_vma1 || !anon_vma2 || (anon_vma1 == anon_vma2);
685 }
686 
687 /*
688  * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
689  * in front of (at a lower virtual address and file offset than) the vma.
690  *
691  * We cannot merge two vmas if they have differently assigned (non-NULL)
692  * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
693  *
694  * We don't check here for the merged mmap wrapping around the end of pagecache
695  * indices (16TB on ia32) because do_mmap_pgoff() does not permit mmap's which
696  * wrap, nor mmaps which cover the final page at index -1UL.
697  */
698 static int
699 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
700 	struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
701 {
702 	if (is_mergeable_vma(vma, file, vm_flags) &&
703 	    is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
704 		if (vma->vm_pgoff == vm_pgoff)
705 			return 1;
706 	}
707 	return 0;
708 }
709 
710 /*
711  * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
712  * beyond (at a higher virtual address and file offset than) the vma.
713  *
714  * We cannot merge two vmas if they have differently assigned (non-NULL)
715  * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
716  */
717 static int
718 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
719 	struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
720 {
721 	if (is_mergeable_vma(vma, file, vm_flags) &&
722 	    is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
723 		pgoff_t vm_pglen;
724 		vm_pglen = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
725 		if (vma->vm_pgoff + vm_pglen == vm_pgoff)
726 			return 1;
727 	}
728 	return 0;
729 }
730 
731 /*
732  * Given a mapping request (addr,end,vm_flags,file,pgoff), figure out
733  * whether that can be merged with its predecessor or its successor.
734  * Or both (it neatly fills a hole).
735  *
736  * In most cases - when called for mmap, brk or mremap - [addr,end) is
737  * certain not to be mapped by the time vma_merge is called; but when
738  * called for mprotect, it is certain to be already mapped (either at
739  * an offset within prev, or at the start of next), and the flags of
740  * this area are about to be changed to vm_flags - and the no-change
741  * case has already been eliminated.
742  *
743  * The following mprotect cases have to be considered, where AAAA is
744  * the area passed down from mprotect_fixup, never extending beyond one
745  * vma, PPPPPP is the prev vma specified, and NNNNNN the next vma after:
746  *
747  *     AAAA             AAAA                AAAA          AAAA
748  *    PPPPPPNNNNNN    PPPPPPNNNNNN    PPPPPPNNNNNN    PPPPNNNNXXXX
749  *    cannot merge    might become    might become    might become
750  *                    PPNNNNNNNNNN    PPPPPPPPPPNN    PPPPPPPPPPPP 6 or
751  *    mmap, brk or    case 4 below    case 5 below    PPPPPPPPXXXX 7 or
752  *    mremap move:                                    PPPPNNNNNNNN 8
753  *        AAAA
754  *    PPPP    NNNN    PPPPPPPPPPPP    PPPPPPPPNNNN    PPPPNNNNNNNN
755  *    might become    case 1 below    case 2 below    case 3 below
756  *
757  * Odd one out? Case 8, because it extends NNNN but needs flags of XXXX:
758  * mprotect_fixup updates vm_flags & vm_page_prot on successful return.
759  */
760 struct vm_area_struct *vma_merge(struct mm_struct *mm,
761 			struct vm_area_struct *prev, unsigned long addr,
762 			unsigned long end, unsigned long vm_flags,
763 		     	struct anon_vma *anon_vma, struct file *file,
764 			pgoff_t pgoff, struct mempolicy *policy)
765 {
766 	pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
767 	struct vm_area_struct *area, *next;
768 
769 	/*
770 	 * We later require that vma->vm_flags == vm_flags,
771 	 * so this tests vma->vm_flags & VM_SPECIAL, too.
772 	 */
773 	if (vm_flags & VM_SPECIAL)
774 		return NULL;
775 
776 	if (prev)
777 		next = prev->vm_next;
778 	else
779 		next = mm->mmap;
780 	area = next;
781 	if (next && next->vm_end == end)		/* cases 6, 7, 8 */
782 		next = next->vm_next;
783 
784 	/*
785 	 * Can it merge with the predecessor?
786 	 */
787 	if (prev && prev->vm_end == addr &&
788   			mpol_equal(vma_policy(prev), policy) &&
789 			can_vma_merge_after(prev, vm_flags,
790 						anon_vma, file, pgoff)) {
791 		/*
792 		 * OK, it can.  Can we now merge in the successor as well?
793 		 */
794 		if (next && end == next->vm_start &&
795 				mpol_equal(policy, vma_policy(next)) &&
796 				can_vma_merge_before(next, vm_flags,
797 					anon_vma, file, pgoff+pglen) &&
798 				is_mergeable_anon_vma(prev->anon_vma,
799 						      next->anon_vma)) {
800 							/* cases 1, 6 */
801 			vma_adjust(prev, prev->vm_start,
802 				next->vm_end, prev->vm_pgoff, NULL);
803 		} else					/* cases 2, 5, 7 */
804 			vma_adjust(prev, prev->vm_start,
805 				end, prev->vm_pgoff, NULL);
806 		return prev;
807 	}
808 
809 	/*
810 	 * Can this new request be merged in front of next?
811 	 */
812 	if (next && end == next->vm_start &&
813  			mpol_equal(policy, vma_policy(next)) &&
814 			can_vma_merge_before(next, vm_flags,
815 					anon_vma, file, pgoff+pglen)) {
816 		if (prev && addr < prev->vm_end)	/* case 4 */
817 			vma_adjust(prev, prev->vm_start,
818 				addr, prev->vm_pgoff, NULL);
819 		else					/* cases 3, 8 */
820 			vma_adjust(area, addr, next->vm_end,
821 				next->vm_pgoff - pglen, NULL);
822 		return area;
823 	}
824 
825 	return NULL;
826 }
827 
828 /*
829  * find_mergeable_anon_vma is used by anon_vma_prepare, to check
830  * neighbouring vmas for a suitable anon_vma, before it goes off
831  * to allocate a new anon_vma.  It checks because a repetitive
832  * sequence of mprotects and faults may otherwise lead to distinct
833  * anon_vmas being allocated, preventing vma merge in subsequent
834  * mprotect.
835  */
836 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
837 {
838 	struct vm_area_struct *near;
839 	unsigned long vm_flags;
840 
841 	near = vma->vm_next;
842 	if (!near)
843 		goto try_prev;
844 
845 	/*
846 	 * Since only mprotect tries to remerge vmas, match flags
847 	 * which might be mprotected into each other later on.
848 	 * Neither mlock nor madvise tries to remerge at present,
849 	 * so leave their flags as obstructing a merge.
850 	 */
851 	vm_flags = vma->vm_flags & ~(VM_READ|VM_WRITE|VM_EXEC);
852 	vm_flags |= near->vm_flags & (VM_READ|VM_WRITE|VM_EXEC);
853 
854 	if (near->anon_vma && vma->vm_end == near->vm_start &&
855  			mpol_equal(vma_policy(vma), vma_policy(near)) &&
856 			can_vma_merge_before(near, vm_flags,
857 				NULL, vma->vm_file, vma->vm_pgoff +
858 				((vma->vm_end - vma->vm_start) >> PAGE_SHIFT)))
859 		return near->anon_vma;
860 try_prev:
861 	/*
862 	 * It is potentially slow to have to call find_vma_prev here.
863 	 * But it's only on the first write fault on the vma, not
864 	 * every time, and we could devise a way to avoid it later
865 	 * (e.g. stash info in next's anon_vma_node when assigning
866 	 * an anon_vma, or when trying vma_merge).  Another time.
867 	 */
868 	BUG_ON(find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma);
869 	if (!near)
870 		goto none;
871 
872 	vm_flags = vma->vm_flags & ~(VM_READ|VM_WRITE|VM_EXEC);
873 	vm_flags |= near->vm_flags & (VM_READ|VM_WRITE|VM_EXEC);
874 
875 	if (near->anon_vma && near->vm_end == vma->vm_start &&
876   			mpol_equal(vma_policy(near), vma_policy(vma)) &&
877 			can_vma_merge_after(near, vm_flags,
878 				NULL, vma->vm_file, vma->vm_pgoff))
879 		return near->anon_vma;
880 none:
881 	/*
882 	 * There's no absolute need to look only at touching neighbours:
883 	 * we could search further afield for "compatible" anon_vmas.
884 	 * But it would probably just be a waste of time searching,
885 	 * or lead to too many vmas hanging off the same anon_vma.
886 	 * We're trying to allow mprotect remerging later on,
887 	 * not trying to minimize memory used for anon_vmas.
888 	 */
889 	return NULL;
890 }
891 
892 #ifdef CONFIG_PROC_FS
893 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
894 						struct file *file, long pages)
895 {
896 	const unsigned long stack_flags
897 		= VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
898 
899 	if (file) {
900 		mm->shared_vm += pages;
901 		if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
902 			mm->exec_vm += pages;
903 	} else if (flags & stack_flags)
904 		mm->stack_vm += pages;
905 	if (flags & (VM_RESERVED|VM_IO))
906 		mm->reserved_vm += pages;
907 }
908 #endif /* CONFIG_PROC_FS */
909 
910 /*
911  * The caller must hold down_write(current->mm->mmap_sem).
912  */
913 
914 unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
915 			unsigned long len, unsigned long prot,
916 			unsigned long flags, unsigned long pgoff)
917 {
918 	struct mm_struct * mm = current->mm;
919 	struct inode *inode;
920 	unsigned int vm_flags;
921 	int error;
922 	unsigned long reqprot = prot;
923 
924 	/*
925 	 * Does the application expect PROT_READ to imply PROT_EXEC?
926 	 *
927 	 * (the exception is when the underlying filesystem is noexec
928 	 *  mounted, in which case we dont add PROT_EXEC.)
929 	 */
930 	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
931 		if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
932 			prot |= PROT_EXEC;
933 
934 	if (!len)
935 		return -EINVAL;
936 
937 	if (!(flags & MAP_FIXED))
938 		addr = round_hint_to_min(addr);
939 
940 	error = arch_mmap_check(addr, len, flags);
941 	if (error)
942 		return error;
943 
944 	/* Careful about overflows.. */
945 	len = PAGE_ALIGN(len);
946 	if (!len || len > TASK_SIZE)
947 		return -ENOMEM;
948 
949 	/* offset overflow? */
950 	if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
951                return -EOVERFLOW;
952 
953 	/* Too many mappings? */
954 	if (mm->map_count > sysctl_max_map_count)
955 		return -ENOMEM;
956 
957 	/* Obtain the address to map to. we verify (or select) it and ensure
958 	 * that it represents a valid section of the address space.
959 	 */
960 	addr = get_unmapped_area(file, addr, len, pgoff, flags);
961 	if (addr & ~PAGE_MASK)
962 		return addr;
963 
964 	/* Do simple checking here so the lower-level routines won't have
965 	 * to. we assume access permissions have been handled by the open
966 	 * of the memory object, so we don't do any here.
967 	 */
968 	vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
969 			mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
970 
971 	if (flags & MAP_LOCKED) {
972 		if (!can_do_mlock())
973 			return -EPERM;
974 		vm_flags |= VM_LOCKED;
975 	}
976 
977 	/* mlock MCL_FUTURE? */
978 	if (vm_flags & VM_LOCKED) {
979 		unsigned long locked, lock_limit;
980 		locked = len >> PAGE_SHIFT;
981 		locked += mm->locked_vm;
982 		lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
983 		lock_limit >>= PAGE_SHIFT;
984 		if (locked > lock_limit && !capable(CAP_IPC_LOCK))
985 			return -EAGAIN;
986 	}
987 
988 	inode = file ? file->f_path.dentry->d_inode : NULL;
989 
990 	if (file) {
991 		switch (flags & MAP_TYPE) {
992 		case MAP_SHARED:
993 			if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE))
994 				return -EACCES;
995 
996 			/*
997 			 * Make sure we don't allow writing to an append-only
998 			 * file..
999 			 */
1000 			if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
1001 				return -EACCES;
1002 
1003 			/*
1004 			 * Make sure there are no mandatory locks on the file.
1005 			 */
1006 			if (locks_verify_locked(inode))
1007 				return -EAGAIN;
1008 
1009 			vm_flags |= VM_SHARED | VM_MAYSHARE;
1010 			if (!(file->f_mode & FMODE_WRITE))
1011 				vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
1012 
1013 			/* fall through */
1014 		case MAP_PRIVATE:
1015 			if (!(file->f_mode & FMODE_READ))
1016 				return -EACCES;
1017 			if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {
1018 				if (vm_flags & VM_EXEC)
1019 					return -EPERM;
1020 				vm_flags &= ~VM_MAYEXEC;
1021 			}
1022 
1023 			if (!file->f_op || !file->f_op->mmap)
1024 				return -ENODEV;
1025 			break;
1026 
1027 		default:
1028 			return -EINVAL;
1029 		}
1030 	} else {
1031 		switch (flags & MAP_TYPE) {
1032 		case MAP_SHARED:
1033 			/*
1034 			 * Ignore pgoff.
1035 			 */
1036 			pgoff = 0;
1037 			vm_flags |= VM_SHARED | VM_MAYSHARE;
1038 			break;
1039 		case MAP_PRIVATE:
1040 			/*
1041 			 * Set pgoff according to addr for anon_vma.
1042 			 */
1043 			pgoff = addr >> PAGE_SHIFT;
1044 			break;
1045 		default:
1046 			return -EINVAL;
1047 		}
1048 	}
1049 
1050 	error = security_file_mmap(file, reqprot, prot, flags, addr, 0);
1051 	if (error)
1052 		return error;
1053 	error = ima_file_mmap(file, prot);
1054 	if (error)
1055 		return error;
1056 
1057 	return mmap_region(file, addr, len, flags, vm_flags, pgoff);
1058 }
1059 EXPORT_SYMBOL(do_mmap_pgoff);
1060 
1061 /*
1062  * Some shared mappigns will want the pages marked read-only
1063  * to track write events. If so, we'll downgrade vm_page_prot
1064  * to the private version (using protection_map[] without the
1065  * VM_SHARED bit).
1066  */
1067 int vma_wants_writenotify(struct vm_area_struct *vma)
1068 {
1069 	unsigned int vm_flags = vma->vm_flags;
1070 
1071 	/* If it was private or non-writable, the write bit is already clear */
1072 	if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
1073 		return 0;
1074 
1075 	/* The backer wishes to know when pages are first written to? */
1076 	if (vma->vm_ops && vma->vm_ops->page_mkwrite)
1077 		return 1;
1078 
1079 	/* The open routine did something to the protections already? */
1080 	if (pgprot_val(vma->vm_page_prot) !=
1081 	    pgprot_val(vm_get_page_prot(vm_flags)))
1082 		return 0;
1083 
1084 	/* Specialty mapping? */
1085 	if (vm_flags & (VM_PFNMAP|VM_INSERTPAGE))
1086 		return 0;
1087 
1088 	/* Can the mapping track the dirty pages? */
1089 	return vma->vm_file && vma->vm_file->f_mapping &&
1090 		mapping_cap_account_dirty(vma->vm_file->f_mapping);
1091 }
1092 
1093 /*
1094  * We account for memory if it's a private writeable mapping,
1095  * not hugepages and VM_NORESERVE wasn't set.
1096  */
1097 static inline int accountable_mapping(struct file *file, unsigned int vm_flags)
1098 {
1099 	/*
1100 	 * hugetlb has its own accounting separate from the core VM
1101 	 * VM_HUGETLB may not be set yet so we cannot check for that flag.
1102 	 */
1103 	if (file && is_file_hugepages(file))
1104 		return 0;
1105 
1106 	return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
1107 }
1108 
1109 unsigned long mmap_region(struct file *file, unsigned long addr,
1110 			  unsigned long len, unsigned long flags,
1111 			  unsigned int vm_flags, unsigned long pgoff)
1112 {
1113 	struct mm_struct *mm = current->mm;
1114 	struct vm_area_struct *vma, *prev;
1115 	int correct_wcount = 0;
1116 	int error;
1117 	struct rb_node **rb_link, *rb_parent;
1118 	unsigned long charged = 0;
1119 	struct inode *inode =  file ? file->f_path.dentry->d_inode : NULL;
1120 
1121 	/* Clear old maps */
1122 	error = -ENOMEM;
1123 munmap_back:
1124 	vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
1125 	if (vma && vma->vm_start < addr + len) {
1126 		if (do_munmap(mm, addr, len))
1127 			return -ENOMEM;
1128 		goto munmap_back;
1129 	}
1130 
1131 	/* Check against address space limit. */
1132 	if (!may_expand_vm(mm, len >> PAGE_SHIFT))
1133 		return -ENOMEM;
1134 
1135 	/*
1136 	 * Set 'VM_NORESERVE' if we should not account for the
1137 	 * memory use of this mapping.
1138 	 */
1139 	if ((flags & MAP_NORESERVE)) {
1140 		/* We honor MAP_NORESERVE if allowed to overcommit */
1141 		if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
1142 			vm_flags |= VM_NORESERVE;
1143 
1144 		/* hugetlb applies strict overcommit unless MAP_NORESERVE */
1145 		if (file && is_file_hugepages(file))
1146 			vm_flags |= VM_NORESERVE;
1147 	}
1148 
1149 	/*
1150 	 * Private writable mapping: check memory availability
1151 	 */
1152 	if (accountable_mapping(file, vm_flags)) {
1153 		charged = len >> PAGE_SHIFT;
1154 		if (security_vm_enough_memory(charged))
1155 			return -ENOMEM;
1156 		vm_flags |= VM_ACCOUNT;
1157 	}
1158 
1159 	/*
1160 	 * Can we just expand an old mapping?
1161 	 */
1162 	vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, NULL);
1163 	if (vma)
1164 		goto out;
1165 
1166 	/*
1167 	 * Determine the object being mapped and call the appropriate
1168 	 * specific mapper. the address has already been validated, but
1169 	 * not unmapped, but the maps are removed from the list.
1170 	 */
1171 	vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
1172 	if (!vma) {
1173 		error = -ENOMEM;
1174 		goto unacct_error;
1175 	}
1176 
1177 	vma->vm_mm = mm;
1178 	vma->vm_start = addr;
1179 	vma->vm_end = addr + len;
1180 	vma->vm_flags = vm_flags;
1181 	vma->vm_page_prot = vm_get_page_prot(vm_flags);
1182 	vma->vm_pgoff = pgoff;
1183 
1184 	if (file) {
1185 		error = -EINVAL;
1186 		if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1187 			goto free_vma;
1188 		if (vm_flags & VM_DENYWRITE) {
1189 			error = deny_write_access(file);
1190 			if (error)
1191 				goto free_vma;
1192 			correct_wcount = 1;
1193 		}
1194 		vma->vm_file = file;
1195 		get_file(file);
1196 		error = file->f_op->mmap(file, vma);
1197 		if (error)
1198 			goto unmap_and_free_vma;
1199 		if (vm_flags & VM_EXECUTABLE)
1200 			added_exe_file_vma(mm);
1201 	} else if (vm_flags & VM_SHARED) {
1202 		error = shmem_zero_setup(vma);
1203 		if (error)
1204 			goto free_vma;
1205 	}
1206 
1207 	/* Can addr have changed??
1208 	 *
1209 	 * Answer: Yes, several device drivers can do it in their
1210 	 *         f_op->mmap method. -DaveM
1211 	 */
1212 	addr = vma->vm_start;
1213 	pgoff = vma->vm_pgoff;
1214 	vm_flags = vma->vm_flags;
1215 
1216 	if (vma_wants_writenotify(vma))
1217 		vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED);
1218 
1219 	vma_link(mm, vma, prev, rb_link, rb_parent);
1220 	file = vma->vm_file;
1221 
1222 	/* Once vma denies write, undo our temporary denial count */
1223 	if (correct_wcount)
1224 		atomic_inc(&inode->i_writecount);
1225 out:
1226 	perf_counter_mmap(vma);
1227 
1228 	mm->total_vm += len >> PAGE_SHIFT;
1229 	vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
1230 	if (vm_flags & VM_LOCKED) {
1231 		/*
1232 		 * makes pages present; downgrades, drops, reacquires mmap_sem
1233 		 */
1234 		long nr_pages = mlock_vma_pages_range(vma, addr, addr + len);
1235 		if (nr_pages < 0)
1236 			return nr_pages;	/* vma gone! */
1237 		mm->locked_vm += (len >> PAGE_SHIFT) - nr_pages;
1238 	} else if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK))
1239 		make_pages_present(addr, addr + len);
1240 	return addr;
1241 
1242 unmap_and_free_vma:
1243 	if (correct_wcount)
1244 		atomic_inc(&inode->i_writecount);
1245 	vma->vm_file = NULL;
1246 	fput(file);
1247 
1248 	/* Undo any partial mapping done by a device driver. */
1249 	unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
1250 	charged = 0;
1251 free_vma:
1252 	kmem_cache_free(vm_area_cachep, vma);
1253 unacct_error:
1254 	if (charged)
1255 		vm_unacct_memory(charged);
1256 	return error;
1257 }
1258 
1259 /* Get an address range which is currently unmapped.
1260  * For shmat() with addr=0.
1261  *
1262  * Ugly calling convention alert:
1263  * Return value with the low bits set means error value,
1264  * ie
1265  *	if (ret & ~PAGE_MASK)
1266  *		error = ret;
1267  *
1268  * This function "knows" that -ENOMEM has the bits set.
1269  */
1270 #ifndef HAVE_ARCH_UNMAPPED_AREA
1271 unsigned long
1272 arch_get_unmapped_area(struct file *filp, unsigned long addr,
1273 		unsigned long len, unsigned long pgoff, unsigned long flags)
1274 {
1275 	struct mm_struct *mm = current->mm;
1276 	struct vm_area_struct *vma;
1277 	unsigned long start_addr;
1278 
1279 	if (len > TASK_SIZE)
1280 		return -ENOMEM;
1281 
1282 	if (flags & MAP_FIXED)
1283 		return addr;
1284 
1285 	if (addr) {
1286 		addr = PAGE_ALIGN(addr);
1287 		vma = find_vma(mm, addr);
1288 		if (TASK_SIZE - len >= addr &&
1289 		    (!vma || addr + len <= vma->vm_start))
1290 			return addr;
1291 	}
1292 	if (len > mm->cached_hole_size) {
1293 	        start_addr = addr = mm->free_area_cache;
1294 	} else {
1295 	        start_addr = addr = TASK_UNMAPPED_BASE;
1296 	        mm->cached_hole_size = 0;
1297 	}
1298 
1299 full_search:
1300 	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1301 		/* At this point:  (!vma || addr < vma->vm_end). */
1302 		if (TASK_SIZE - len < addr) {
1303 			/*
1304 			 * Start a new search - just in case we missed
1305 			 * some holes.
1306 			 */
1307 			if (start_addr != TASK_UNMAPPED_BASE) {
1308 				addr = TASK_UNMAPPED_BASE;
1309 			        start_addr = addr;
1310 				mm->cached_hole_size = 0;
1311 				goto full_search;
1312 			}
1313 			return -ENOMEM;
1314 		}
1315 		if (!vma || addr + len <= vma->vm_start) {
1316 			/*
1317 			 * Remember the place where we stopped the search:
1318 			 */
1319 			mm->free_area_cache = addr + len;
1320 			return addr;
1321 		}
1322 		if (addr + mm->cached_hole_size < vma->vm_start)
1323 		        mm->cached_hole_size = vma->vm_start - addr;
1324 		addr = vma->vm_end;
1325 	}
1326 }
1327 #endif
1328 
1329 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
1330 {
1331 	/*
1332 	 * Is this a new hole at the lowest possible address?
1333 	 */
1334 	if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
1335 		mm->free_area_cache = addr;
1336 		mm->cached_hole_size = ~0UL;
1337 	}
1338 }
1339 
1340 /*
1341  * This mmap-allocator allocates new areas top-down from below the
1342  * stack's low limit (the base):
1343  */
1344 #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1345 unsigned long
1346 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
1347 			  const unsigned long len, const unsigned long pgoff,
1348 			  const unsigned long flags)
1349 {
1350 	struct vm_area_struct *vma;
1351 	struct mm_struct *mm = current->mm;
1352 	unsigned long addr = addr0;
1353 
1354 	/* requested length too big for entire address space */
1355 	if (len > TASK_SIZE)
1356 		return -ENOMEM;
1357 
1358 	if (flags & MAP_FIXED)
1359 		return addr;
1360 
1361 	/* requesting a specific address */
1362 	if (addr) {
1363 		addr = PAGE_ALIGN(addr);
1364 		vma = find_vma(mm, addr);
1365 		if (TASK_SIZE - len >= addr &&
1366 				(!vma || addr + len <= vma->vm_start))
1367 			return addr;
1368 	}
1369 
1370 	/* check if free_area_cache is useful for us */
1371 	if (len <= mm->cached_hole_size) {
1372  	        mm->cached_hole_size = 0;
1373  		mm->free_area_cache = mm->mmap_base;
1374  	}
1375 
1376 	/* either no address requested or can't fit in requested address hole */
1377 	addr = mm->free_area_cache;
1378 
1379 	/* make sure it can fit in the remaining address space */
1380 	if (addr > len) {
1381 		vma = find_vma(mm, addr-len);
1382 		if (!vma || addr <= vma->vm_start)
1383 			/* remember the address as a hint for next time */
1384 			return (mm->free_area_cache = addr-len);
1385 	}
1386 
1387 	if (mm->mmap_base < len)
1388 		goto bottomup;
1389 
1390 	addr = mm->mmap_base-len;
1391 
1392 	do {
1393 		/*
1394 		 * Lookup failure means no vma is above this address,
1395 		 * else if new region fits below vma->vm_start,
1396 		 * return with success:
1397 		 */
1398 		vma = find_vma(mm, addr);
1399 		if (!vma || addr+len <= vma->vm_start)
1400 			/* remember the address as a hint for next time */
1401 			return (mm->free_area_cache = addr);
1402 
1403  		/* remember the largest hole we saw so far */
1404  		if (addr + mm->cached_hole_size < vma->vm_start)
1405  		        mm->cached_hole_size = vma->vm_start - addr;
1406 
1407 		/* try just below the current vma->vm_start */
1408 		addr = vma->vm_start-len;
1409 	} while (len < vma->vm_start);
1410 
1411 bottomup:
1412 	/*
1413 	 * A failed mmap() very likely causes application failure,
1414 	 * so fall back to the bottom-up function here. This scenario
1415 	 * can happen with large stack limits and large mmap()
1416 	 * allocations.
1417 	 */
1418 	mm->cached_hole_size = ~0UL;
1419   	mm->free_area_cache = TASK_UNMAPPED_BASE;
1420 	addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
1421 	/*
1422 	 * Restore the topdown base:
1423 	 */
1424 	mm->free_area_cache = mm->mmap_base;
1425 	mm->cached_hole_size = ~0UL;
1426 
1427 	return addr;
1428 }
1429 #endif
1430 
1431 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
1432 {
1433 	/*
1434 	 * Is this a new hole at the highest possible address?
1435 	 */
1436 	if (addr > mm->free_area_cache)
1437 		mm->free_area_cache = addr;
1438 
1439 	/* dont allow allocations above current base */
1440 	if (mm->free_area_cache > mm->mmap_base)
1441 		mm->free_area_cache = mm->mmap_base;
1442 }
1443 
1444 unsigned long
1445 get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
1446 		unsigned long pgoff, unsigned long flags)
1447 {
1448 	unsigned long (*get_area)(struct file *, unsigned long,
1449 				  unsigned long, unsigned long, unsigned long);
1450 
1451 	get_area = current->mm->get_unmapped_area;
1452 	if (file && file->f_op && file->f_op->get_unmapped_area)
1453 		get_area = file->f_op->get_unmapped_area;
1454 	addr = get_area(file, addr, len, pgoff, flags);
1455 	if (IS_ERR_VALUE(addr))
1456 		return addr;
1457 
1458 	if (addr > TASK_SIZE - len)
1459 		return -ENOMEM;
1460 	if (addr & ~PAGE_MASK)
1461 		return -EINVAL;
1462 
1463 	return arch_rebalance_pgtables(addr, len);
1464 }
1465 
1466 EXPORT_SYMBOL(get_unmapped_area);
1467 
1468 /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
1469 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
1470 {
1471 	struct vm_area_struct *vma = NULL;
1472 
1473 	if (mm) {
1474 		/* Check the cache first. */
1475 		/* (Cache hit rate is typically around 35%.) */
1476 		vma = mm->mmap_cache;
1477 		if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
1478 			struct rb_node * rb_node;
1479 
1480 			rb_node = mm->mm_rb.rb_node;
1481 			vma = NULL;
1482 
1483 			while (rb_node) {
1484 				struct vm_area_struct * vma_tmp;
1485 
1486 				vma_tmp = rb_entry(rb_node,
1487 						struct vm_area_struct, vm_rb);
1488 
1489 				if (vma_tmp->vm_end > addr) {
1490 					vma = vma_tmp;
1491 					if (vma_tmp->vm_start <= addr)
1492 						break;
1493 					rb_node = rb_node->rb_left;
1494 				} else
1495 					rb_node = rb_node->rb_right;
1496 			}
1497 			if (vma)
1498 				mm->mmap_cache = vma;
1499 		}
1500 	}
1501 	return vma;
1502 }
1503 
1504 EXPORT_SYMBOL(find_vma);
1505 
1506 /* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
1507 struct vm_area_struct *
1508 find_vma_prev(struct mm_struct *mm, unsigned long addr,
1509 			struct vm_area_struct **pprev)
1510 {
1511 	struct vm_area_struct *vma = NULL, *prev = NULL;
1512 	struct rb_node *rb_node;
1513 	if (!mm)
1514 		goto out;
1515 
1516 	/* Guard against addr being lower than the first VMA */
1517 	vma = mm->mmap;
1518 
1519 	/* Go through the RB tree quickly. */
1520 	rb_node = mm->mm_rb.rb_node;
1521 
1522 	while (rb_node) {
1523 		struct vm_area_struct *vma_tmp;
1524 		vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
1525 
1526 		if (addr < vma_tmp->vm_end) {
1527 			rb_node = rb_node->rb_left;
1528 		} else {
1529 			prev = vma_tmp;
1530 			if (!prev->vm_next || (addr < prev->vm_next->vm_end))
1531 				break;
1532 			rb_node = rb_node->rb_right;
1533 		}
1534 	}
1535 
1536 out:
1537 	*pprev = prev;
1538 	return prev ? prev->vm_next : vma;
1539 }
1540 
1541 /*
1542  * Verify that the stack growth is acceptable and
1543  * update accounting. This is shared with both the
1544  * grow-up and grow-down cases.
1545  */
1546 static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow)
1547 {
1548 	struct mm_struct *mm = vma->vm_mm;
1549 	struct rlimit *rlim = current->signal->rlim;
1550 	unsigned long new_start;
1551 
1552 	/* address space limit tests */
1553 	if (!may_expand_vm(mm, grow))
1554 		return -ENOMEM;
1555 
1556 	/* Stack limit test */
1557 	if (size > rlim[RLIMIT_STACK].rlim_cur)
1558 		return -ENOMEM;
1559 
1560 	/* mlock limit tests */
1561 	if (vma->vm_flags & VM_LOCKED) {
1562 		unsigned long locked;
1563 		unsigned long limit;
1564 		locked = mm->locked_vm + grow;
1565 		limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
1566 		if (locked > limit && !capable(CAP_IPC_LOCK))
1567 			return -ENOMEM;
1568 	}
1569 
1570 	/* Check to ensure the stack will not grow into a hugetlb-only region */
1571 	new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
1572 			vma->vm_end - size;
1573 	if (is_hugepage_only_range(vma->vm_mm, new_start, size))
1574 		return -EFAULT;
1575 
1576 	/*
1577 	 * Overcommit..  This must be the final test, as it will
1578 	 * update security statistics.
1579 	 */
1580 	if (security_vm_enough_memory_mm(mm, grow))
1581 		return -ENOMEM;
1582 
1583 	/* Ok, everything looks good - let it rip */
1584 	mm->total_vm += grow;
1585 	if (vma->vm_flags & VM_LOCKED)
1586 		mm->locked_vm += grow;
1587 	vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow);
1588 	return 0;
1589 }
1590 
1591 #if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
1592 /*
1593  * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
1594  * vma is the last one with address > vma->vm_end.  Have to extend vma.
1595  */
1596 #ifndef CONFIG_IA64
1597 static
1598 #endif
1599 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
1600 {
1601 	int error;
1602 
1603 	if (!(vma->vm_flags & VM_GROWSUP))
1604 		return -EFAULT;
1605 
1606 	/*
1607 	 * We must make sure the anon_vma is allocated
1608 	 * so that the anon_vma locking is not a noop.
1609 	 */
1610 	if (unlikely(anon_vma_prepare(vma)))
1611 		return -ENOMEM;
1612 	anon_vma_lock(vma);
1613 
1614 	/*
1615 	 * vma->vm_start/vm_end cannot change under us because the caller
1616 	 * is required to hold the mmap_sem in read mode.  We need the
1617 	 * anon_vma lock to serialize against concurrent expand_stacks.
1618 	 * Also guard against wrapping around to address 0.
1619 	 */
1620 	if (address < PAGE_ALIGN(address+4))
1621 		address = PAGE_ALIGN(address+4);
1622 	else {
1623 		anon_vma_unlock(vma);
1624 		return -ENOMEM;
1625 	}
1626 	error = 0;
1627 
1628 	/* Somebody else might have raced and expanded it already */
1629 	if (address > vma->vm_end) {
1630 		unsigned long size, grow;
1631 
1632 		size = address - vma->vm_start;
1633 		grow = (address - vma->vm_end) >> PAGE_SHIFT;
1634 
1635 		error = acct_stack_growth(vma, size, grow);
1636 		if (!error)
1637 			vma->vm_end = address;
1638 	}
1639 	anon_vma_unlock(vma);
1640 	return error;
1641 }
1642 #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
1643 
1644 /*
1645  * vma is the first one with address < vma->vm_start.  Have to extend vma.
1646  */
1647 static int expand_downwards(struct vm_area_struct *vma,
1648 				   unsigned long address)
1649 {
1650 	int error;
1651 
1652 	/*
1653 	 * We must make sure the anon_vma is allocated
1654 	 * so that the anon_vma locking is not a noop.
1655 	 */
1656 	if (unlikely(anon_vma_prepare(vma)))
1657 		return -ENOMEM;
1658 
1659 	address &= PAGE_MASK;
1660 	error = security_file_mmap(NULL, 0, 0, 0, address, 1);
1661 	if (error)
1662 		return error;
1663 
1664 	anon_vma_lock(vma);
1665 
1666 	/*
1667 	 * vma->vm_start/vm_end cannot change under us because the caller
1668 	 * is required to hold the mmap_sem in read mode.  We need the
1669 	 * anon_vma lock to serialize against concurrent expand_stacks.
1670 	 */
1671 
1672 	/* Somebody else might have raced and expanded it already */
1673 	if (address < vma->vm_start) {
1674 		unsigned long size, grow;
1675 
1676 		size = vma->vm_end - address;
1677 		grow = (vma->vm_start - address) >> PAGE_SHIFT;
1678 
1679 		error = acct_stack_growth(vma, size, grow);
1680 		if (!error) {
1681 			vma->vm_start = address;
1682 			vma->vm_pgoff -= grow;
1683 		}
1684 	}
1685 	anon_vma_unlock(vma);
1686 	return error;
1687 }
1688 
1689 int expand_stack_downwards(struct vm_area_struct *vma, unsigned long address)
1690 {
1691 	return expand_downwards(vma, address);
1692 }
1693 
1694 #ifdef CONFIG_STACK_GROWSUP
1695 int expand_stack(struct vm_area_struct *vma, unsigned long address)
1696 {
1697 	return expand_upwards(vma, address);
1698 }
1699 
1700 struct vm_area_struct *
1701 find_extend_vma(struct mm_struct *mm, unsigned long addr)
1702 {
1703 	struct vm_area_struct *vma, *prev;
1704 
1705 	addr &= PAGE_MASK;
1706 	vma = find_vma_prev(mm, addr, &prev);
1707 	if (vma && (vma->vm_start <= addr))
1708 		return vma;
1709 	if (!prev || expand_stack(prev, addr))
1710 		return NULL;
1711 	if (prev->vm_flags & VM_LOCKED) {
1712 		if (mlock_vma_pages_range(prev, addr, prev->vm_end) < 0)
1713 			return NULL;	/* vma gone! */
1714 	}
1715 	return prev;
1716 }
1717 #else
1718 int expand_stack(struct vm_area_struct *vma, unsigned long address)
1719 {
1720 	return expand_downwards(vma, address);
1721 }
1722 
1723 struct vm_area_struct *
1724 find_extend_vma(struct mm_struct * mm, unsigned long addr)
1725 {
1726 	struct vm_area_struct * vma;
1727 	unsigned long start;
1728 
1729 	addr &= PAGE_MASK;
1730 	vma = find_vma(mm,addr);
1731 	if (!vma)
1732 		return NULL;
1733 	if (vma->vm_start <= addr)
1734 		return vma;
1735 	if (!(vma->vm_flags & VM_GROWSDOWN))
1736 		return NULL;
1737 	start = vma->vm_start;
1738 	if (expand_stack(vma, addr))
1739 		return NULL;
1740 	if (vma->vm_flags & VM_LOCKED) {
1741 		if (mlock_vma_pages_range(vma, addr, start) < 0)
1742 			return NULL;	/* vma gone! */
1743 	}
1744 	return vma;
1745 }
1746 #endif
1747 
1748 /*
1749  * Ok - we have the memory areas we should free on the vma list,
1750  * so release them, and do the vma updates.
1751  *
1752  * Called with the mm semaphore held.
1753  */
1754 static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
1755 {
1756 	/* Update high watermark before we lower total_vm */
1757 	update_hiwater_vm(mm);
1758 	do {
1759 		long nrpages = vma_pages(vma);
1760 
1761 		mm->total_vm -= nrpages;
1762 		vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
1763 		vma = remove_vma(vma);
1764 	} while (vma);
1765 	validate_mm(mm);
1766 }
1767 
1768 /*
1769  * Get rid of page table information in the indicated region.
1770  *
1771  * Called with the mm semaphore held.
1772  */
1773 static void unmap_region(struct mm_struct *mm,
1774 		struct vm_area_struct *vma, struct vm_area_struct *prev,
1775 		unsigned long start, unsigned long end)
1776 {
1777 	struct vm_area_struct *next = prev? prev->vm_next: mm->mmap;
1778 	struct mmu_gather *tlb;
1779 	unsigned long nr_accounted = 0;
1780 
1781 	lru_add_drain();
1782 	tlb = tlb_gather_mmu(mm, 0);
1783 	update_hiwater_rss(mm);
1784 	unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL);
1785 	vm_unacct_memory(nr_accounted);
1786 	free_pgtables(tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS,
1787 				 next? next->vm_start: 0);
1788 	tlb_finish_mmu(tlb, start, end);
1789 }
1790 
1791 /*
1792  * Create a list of vma's touched by the unmap, removing them from the mm's
1793  * vma list as we go..
1794  */
1795 static void
1796 detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
1797 	struct vm_area_struct *prev, unsigned long end)
1798 {
1799 	struct vm_area_struct **insertion_point;
1800 	struct vm_area_struct *tail_vma = NULL;
1801 	unsigned long addr;
1802 
1803 	insertion_point = (prev ? &prev->vm_next : &mm->mmap);
1804 	do {
1805 		rb_erase(&vma->vm_rb, &mm->mm_rb);
1806 		mm->map_count--;
1807 		tail_vma = vma;
1808 		vma = vma->vm_next;
1809 	} while (vma && vma->vm_start < end);
1810 	*insertion_point = vma;
1811 	tail_vma->vm_next = NULL;
1812 	if (mm->unmap_area == arch_unmap_area)
1813 		addr = prev ? prev->vm_end : mm->mmap_base;
1814 	else
1815 		addr = vma ?  vma->vm_start : mm->mmap_base;
1816 	mm->unmap_area(mm, addr);
1817 	mm->mmap_cache = NULL;		/* Kill the cache. */
1818 }
1819 
1820 /*
1821  * Split a vma into two pieces at address 'addr', a new vma is allocated
1822  * either for the first part or the tail.
1823  */
1824 int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
1825 	      unsigned long addr, int new_below)
1826 {
1827 	struct mempolicy *pol;
1828 	struct vm_area_struct *new;
1829 
1830 	if (is_vm_hugetlb_page(vma) && (addr &
1831 					~(huge_page_mask(hstate_vma(vma)))))
1832 		return -EINVAL;
1833 
1834 	if (mm->map_count >= sysctl_max_map_count)
1835 		return -ENOMEM;
1836 
1837 	new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
1838 	if (!new)
1839 		return -ENOMEM;
1840 
1841 	/* most fields are the same, copy all, and then fixup */
1842 	*new = *vma;
1843 
1844 	if (new_below)
1845 		new->vm_end = addr;
1846 	else {
1847 		new->vm_start = addr;
1848 		new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
1849 	}
1850 
1851 	pol = mpol_dup(vma_policy(vma));
1852 	if (IS_ERR(pol)) {
1853 		kmem_cache_free(vm_area_cachep, new);
1854 		return PTR_ERR(pol);
1855 	}
1856 	vma_set_policy(new, pol);
1857 
1858 	if (new->vm_file) {
1859 		get_file(new->vm_file);
1860 		if (vma->vm_flags & VM_EXECUTABLE)
1861 			added_exe_file_vma(mm);
1862 	}
1863 
1864 	if (new->vm_ops && new->vm_ops->open)
1865 		new->vm_ops->open(new);
1866 
1867 	if (new_below)
1868 		vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
1869 			((addr - new->vm_start) >> PAGE_SHIFT), new);
1870 	else
1871 		vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
1872 
1873 	return 0;
1874 }
1875 
1876 /* Munmap is split into 2 main parts -- this part which finds
1877  * what needs doing, and the areas themselves, which do the
1878  * work.  This now handles partial unmappings.
1879  * Jeremy Fitzhardinge <jeremy@goop.org>
1880  */
1881 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
1882 {
1883 	unsigned long end;
1884 	struct vm_area_struct *vma, *prev, *last;
1885 
1886 	if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
1887 		return -EINVAL;
1888 
1889 	if ((len = PAGE_ALIGN(len)) == 0)
1890 		return -EINVAL;
1891 
1892 	/* Find the first overlapping VMA */
1893 	vma = find_vma_prev(mm, start, &prev);
1894 	if (!vma)
1895 		return 0;
1896 	/* we have  start < vma->vm_end  */
1897 
1898 	/* if it doesn't overlap, we have nothing.. */
1899 	end = start + len;
1900 	if (vma->vm_start >= end)
1901 		return 0;
1902 
1903 	/*
1904 	 * If we need to split any vma, do it now to save pain later.
1905 	 *
1906 	 * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially
1907 	 * unmapped vm_area_struct will remain in use: so lower split_vma
1908 	 * places tmp vma above, and higher split_vma places tmp vma below.
1909 	 */
1910 	if (start > vma->vm_start) {
1911 		int error = split_vma(mm, vma, start, 0);
1912 		if (error)
1913 			return error;
1914 		prev = vma;
1915 	}
1916 
1917 	/* Does it split the last one? */
1918 	last = find_vma(mm, end);
1919 	if (last && end > last->vm_start) {
1920 		int error = split_vma(mm, last, end, 1);
1921 		if (error)
1922 			return error;
1923 	}
1924 	vma = prev? prev->vm_next: mm->mmap;
1925 
1926 	/*
1927 	 * unlock any mlock()ed ranges before detaching vmas
1928 	 */
1929 	if (mm->locked_vm) {
1930 		struct vm_area_struct *tmp = vma;
1931 		while (tmp && tmp->vm_start < end) {
1932 			if (tmp->vm_flags & VM_LOCKED) {
1933 				mm->locked_vm -= vma_pages(tmp);
1934 				munlock_vma_pages_all(tmp);
1935 			}
1936 			tmp = tmp->vm_next;
1937 		}
1938 	}
1939 
1940 	/*
1941 	 * Remove the vma's, and unmap the actual pages
1942 	 */
1943 	detach_vmas_to_be_unmapped(mm, vma, prev, end);
1944 	unmap_region(mm, vma, prev, start, end);
1945 
1946 	/* Fix up all other VM information */
1947 	remove_vma_list(mm, vma);
1948 
1949 	return 0;
1950 }
1951 
1952 EXPORT_SYMBOL(do_munmap);
1953 
1954 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
1955 {
1956 	int ret;
1957 	struct mm_struct *mm = current->mm;
1958 
1959 	profile_munmap(addr);
1960 
1961 	down_write(&mm->mmap_sem);
1962 	ret = do_munmap(mm, addr, len);
1963 	up_write(&mm->mmap_sem);
1964 	return ret;
1965 }
1966 
1967 static inline void verify_mm_writelocked(struct mm_struct *mm)
1968 {
1969 #ifdef CONFIG_DEBUG_VM
1970 	if (unlikely(down_read_trylock(&mm->mmap_sem))) {
1971 		WARN_ON(1);
1972 		up_read(&mm->mmap_sem);
1973 	}
1974 #endif
1975 }
1976 
1977 /*
1978  *  this is really a simplified "do_mmap".  it only handles
1979  *  anonymous maps.  eventually we may be able to do some
1980  *  brk-specific accounting here.
1981  */
1982 unsigned long do_brk(unsigned long addr, unsigned long len)
1983 {
1984 	struct mm_struct * mm = current->mm;
1985 	struct vm_area_struct * vma, * prev;
1986 	unsigned long flags;
1987 	struct rb_node ** rb_link, * rb_parent;
1988 	pgoff_t pgoff = addr >> PAGE_SHIFT;
1989 	int error;
1990 
1991 	len = PAGE_ALIGN(len);
1992 	if (!len)
1993 		return addr;
1994 
1995 	if ((addr + len) > TASK_SIZE || (addr + len) < addr)
1996 		return -EINVAL;
1997 
1998 	if (is_hugepage_only_range(mm, addr, len))
1999 		return -EINVAL;
2000 
2001 	error = security_file_mmap(NULL, 0, 0, 0, addr, 1);
2002 	if (error)
2003 		return error;
2004 
2005 	flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
2006 
2007 	error = arch_mmap_check(addr, len, flags);
2008 	if (error)
2009 		return error;
2010 
2011 	/*
2012 	 * mlock MCL_FUTURE?
2013 	 */
2014 	if (mm->def_flags & VM_LOCKED) {
2015 		unsigned long locked, lock_limit;
2016 		locked = len >> PAGE_SHIFT;
2017 		locked += mm->locked_vm;
2018 		lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
2019 		lock_limit >>= PAGE_SHIFT;
2020 		if (locked > lock_limit && !capable(CAP_IPC_LOCK))
2021 			return -EAGAIN;
2022 	}
2023 
2024 	/*
2025 	 * mm->mmap_sem is required to protect against another thread
2026 	 * changing the mappings in case we sleep.
2027 	 */
2028 	verify_mm_writelocked(mm);
2029 
2030 	/*
2031 	 * Clear old maps.  this also does some error checking for us
2032 	 */
2033  munmap_back:
2034 	vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
2035 	if (vma && vma->vm_start < addr + len) {
2036 		if (do_munmap(mm, addr, len))
2037 			return -ENOMEM;
2038 		goto munmap_back;
2039 	}
2040 
2041 	/* Check against address space limits *after* clearing old maps... */
2042 	if (!may_expand_vm(mm, len >> PAGE_SHIFT))
2043 		return -ENOMEM;
2044 
2045 	if (mm->map_count > sysctl_max_map_count)
2046 		return -ENOMEM;
2047 
2048 	if (security_vm_enough_memory(len >> PAGE_SHIFT))
2049 		return -ENOMEM;
2050 
2051 	/* Can we just expand an old private anonymous mapping? */
2052 	vma = vma_merge(mm, prev, addr, addr + len, flags,
2053 					NULL, NULL, pgoff, NULL);
2054 	if (vma)
2055 		goto out;
2056 
2057 	/*
2058 	 * create a vma struct for an anonymous mapping
2059 	 */
2060 	vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
2061 	if (!vma) {
2062 		vm_unacct_memory(len >> PAGE_SHIFT);
2063 		return -ENOMEM;
2064 	}
2065 
2066 	vma->vm_mm = mm;
2067 	vma->vm_start = addr;
2068 	vma->vm_end = addr + len;
2069 	vma->vm_pgoff = pgoff;
2070 	vma->vm_flags = flags;
2071 	vma->vm_page_prot = vm_get_page_prot(flags);
2072 	vma_link(mm, vma, prev, rb_link, rb_parent);
2073 out:
2074 	mm->total_vm += len >> PAGE_SHIFT;
2075 	if (flags & VM_LOCKED) {
2076 		if (!mlock_vma_pages_range(vma, addr, addr + len))
2077 			mm->locked_vm += (len >> PAGE_SHIFT);
2078 	}
2079 	return addr;
2080 }
2081 
2082 EXPORT_SYMBOL(do_brk);
2083 
2084 /* Release all mmaps. */
2085 void exit_mmap(struct mm_struct *mm)
2086 {
2087 	struct mmu_gather *tlb;
2088 	struct vm_area_struct *vma;
2089 	unsigned long nr_accounted = 0;
2090 	unsigned long end;
2091 
2092 	/* mm's last user has gone, and its about to be pulled down */
2093 	mmu_notifier_release(mm);
2094 
2095 	if (mm->locked_vm) {
2096 		vma = mm->mmap;
2097 		while (vma) {
2098 			if (vma->vm_flags & VM_LOCKED)
2099 				munlock_vma_pages_all(vma);
2100 			vma = vma->vm_next;
2101 		}
2102 	}
2103 
2104 	arch_exit_mmap(mm);
2105 
2106 	vma = mm->mmap;
2107 	if (!vma)	/* Can happen if dup_mmap() received an OOM */
2108 		return;
2109 
2110 	lru_add_drain();
2111 	flush_cache_mm(mm);
2112 	tlb = tlb_gather_mmu(mm, 1);
2113 	/* update_hiwater_rss(mm) here? but nobody should be looking */
2114 	/* Use -1 here to ensure all VMAs in the mm are unmapped */
2115 	end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL);
2116 	vm_unacct_memory(nr_accounted);
2117 	free_pgtables(tlb, vma, FIRST_USER_ADDRESS, 0);
2118 	tlb_finish_mmu(tlb, 0, end);
2119 
2120 	/*
2121 	 * Walk the list again, actually closing and freeing it,
2122 	 * with preemption enabled, without holding any MM locks.
2123 	 */
2124 	while (vma)
2125 		vma = remove_vma(vma);
2126 
2127 	BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
2128 }
2129 
2130 /* Insert vm structure into process list sorted by address
2131  * and into the inode's i_mmap tree.  If vm_file is non-NULL
2132  * then i_mmap_lock is taken here.
2133  */
2134 int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
2135 {
2136 	struct vm_area_struct * __vma, * prev;
2137 	struct rb_node ** rb_link, * rb_parent;
2138 
2139 	/*
2140 	 * The vm_pgoff of a purely anonymous vma should be irrelevant
2141 	 * until its first write fault, when page's anon_vma and index
2142 	 * are set.  But now set the vm_pgoff it will almost certainly
2143 	 * end up with (unless mremap moves it elsewhere before that
2144 	 * first wfault), so /proc/pid/maps tells a consistent story.
2145 	 *
2146 	 * By setting it to reflect the virtual start address of the
2147 	 * vma, merges and splits can happen in a seamless way, just
2148 	 * using the existing file pgoff checks and manipulations.
2149 	 * Similarly in do_mmap_pgoff and in do_brk.
2150 	 */
2151 	if (!vma->vm_file) {
2152 		BUG_ON(vma->anon_vma);
2153 		vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
2154 	}
2155 	__vma = find_vma_prepare(mm,vma->vm_start,&prev,&rb_link,&rb_parent);
2156 	if (__vma && __vma->vm_start < vma->vm_end)
2157 		return -ENOMEM;
2158 	if ((vma->vm_flags & VM_ACCOUNT) &&
2159 	     security_vm_enough_memory_mm(mm, vma_pages(vma)))
2160 		return -ENOMEM;
2161 	vma_link(mm, vma, prev, rb_link, rb_parent);
2162 	return 0;
2163 }
2164 
2165 /*
2166  * Copy the vma structure to a new location in the same mm,
2167  * prior to moving page table entries, to effect an mremap move.
2168  */
2169 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
2170 	unsigned long addr, unsigned long len, pgoff_t pgoff)
2171 {
2172 	struct vm_area_struct *vma = *vmap;
2173 	unsigned long vma_start = vma->vm_start;
2174 	struct mm_struct *mm = vma->vm_mm;
2175 	struct vm_area_struct *new_vma, *prev;
2176 	struct rb_node **rb_link, *rb_parent;
2177 	struct mempolicy *pol;
2178 
2179 	/*
2180 	 * If anonymous vma has not yet been faulted, update new pgoff
2181 	 * to match new location, to increase its chance of merging.
2182 	 */
2183 	if (!vma->vm_file && !vma->anon_vma)
2184 		pgoff = addr >> PAGE_SHIFT;
2185 
2186 	find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
2187 	new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
2188 			vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
2189 	if (new_vma) {
2190 		/*
2191 		 * Source vma may have been merged into new_vma
2192 		 */
2193 		if (vma_start >= new_vma->vm_start &&
2194 		    vma_start < new_vma->vm_end)
2195 			*vmap = new_vma;
2196 	} else {
2197 		new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
2198 		if (new_vma) {
2199 			*new_vma = *vma;
2200 			pol = mpol_dup(vma_policy(vma));
2201 			if (IS_ERR(pol)) {
2202 				kmem_cache_free(vm_area_cachep, new_vma);
2203 				return NULL;
2204 			}
2205 			vma_set_policy(new_vma, pol);
2206 			new_vma->vm_start = addr;
2207 			new_vma->vm_end = addr + len;
2208 			new_vma->vm_pgoff = pgoff;
2209 			if (new_vma->vm_file) {
2210 				get_file(new_vma->vm_file);
2211 				if (vma->vm_flags & VM_EXECUTABLE)
2212 					added_exe_file_vma(mm);
2213 			}
2214 			if (new_vma->vm_ops && new_vma->vm_ops->open)
2215 				new_vma->vm_ops->open(new_vma);
2216 			vma_link(mm, new_vma, prev, rb_link, rb_parent);
2217 		}
2218 	}
2219 	return new_vma;
2220 }
2221 
2222 /*
2223  * Return true if the calling process may expand its vm space by the passed
2224  * number of pages
2225  */
2226 int may_expand_vm(struct mm_struct *mm, unsigned long npages)
2227 {
2228 	unsigned long cur = mm->total_vm;	/* pages */
2229 	unsigned long lim;
2230 
2231 	lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
2232 
2233 	if (cur + npages > lim)
2234 		return 0;
2235 	return 1;
2236 }
2237 
2238 
2239 static int special_mapping_fault(struct vm_area_struct *vma,
2240 				struct vm_fault *vmf)
2241 {
2242 	pgoff_t pgoff;
2243 	struct page **pages;
2244 
2245 	/*
2246 	 * special mappings have no vm_file, and in that case, the mm
2247 	 * uses vm_pgoff internally. So we have to subtract it from here.
2248 	 * We are allowed to do this because we are the mm; do not copy
2249 	 * this code into drivers!
2250 	 */
2251 	pgoff = vmf->pgoff - vma->vm_pgoff;
2252 
2253 	for (pages = vma->vm_private_data; pgoff && *pages; ++pages)
2254 		pgoff--;
2255 
2256 	if (*pages) {
2257 		struct page *page = *pages;
2258 		get_page(page);
2259 		vmf->page = page;
2260 		return 0;
2261 	}
2262 
2263 	return VM_FAULT_SIGBUS;
2264 }
2265 
2266 /*
2267  * Having a close hook prevents vma merging regardless of flags.
2268  */
2269 static void special_mapping_close(struct vm_area_struct *vma)
2270 {
2271 }
2272 
2273 static struct vm_operations_struct special_mapping_vmops = {
2274 	.close = special_mapping_close,
2275 	.fault = special_mapping_fault,
2276 };
2277 
2278 /*
2279  * Called with mm->mmap_sem held for writing.
2280  * Insert a new vma covering the given region, with the given flags.
2281  * Its pages are supplied by the given array of struct page *.
2282  * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated.
2283  * The region past the last page supplied will always produce SIGBUS.
2284  * The array pointer and the pages it points to are assumed to stay alive
2285  * for as long as this mapping might exist.
2286  */
2287 int install_special_mapping(struct mm_struct *mm,
2288 			    unsigned long addr, unsigned long len,
2289 			    unsigned long vm_flags, struct page **pages)
2290 {
2291 	struct vm_area_struct *vma;
2292 
2293 	vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
2294 	if (unlikely(vma == NULL))
2295 		return -ENOMEM;
2296 
2297 	vma->vm_mm = mm;
2298 	vma->vm_start = addr;
2299 	vma->vm_end = addr + len;
2300 
2301 	vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
2302 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2303 
2304 	vma->vm_ops = &special_mapping_vmops;
2305 	vma->vm_private_data = pages;
2306 
2307 	if (unlikely(insert_vm_struct(mm, vma))) {
2308 		kmem_cache_free(vm_area_cachep, vma);
2309 		return -ENOMEM;
2310 	}
2311 
2312 	mm->total_vm += len >> PAGE_SHIFT;
2313 
2314 	perf_counter_mmap(vma);
2315 
2316 	return 0;
2317 }
2318 
2319 static DEFINE_MUTEX(mm_all_locks_mutex);
2320 
2321 static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
2322 {
2323 	if (!test_bit(0, (unsigned long *) &anon_vma->head.next)) {
2324 		/*
2325 		 * The LSB of head.next can't change from under us
2326 		 * because we hold the mm_all_locks_mutex.
2327 		 */
2328 		spin_lock_nest_lock(&anon_vma->lock, &mm->mmap_sem);
2329 		/*
2330 		 * We can safely modify head.next after taking the
2331 		 * anon_vma->lock. If some other vma in this mm shares
2332 		 * the same anon_vma we won't take it again.
2333 		 *
2334 		 * No need of atomic instructions here, head.next
2335 		 * can't change from under us thanks to the
2336 		 * anon_vma->lock.
2337 		 */
2338 		if (__test_and_set_bit(0, (unsigned long *)
2339 				       &anon_vma->head.next))
2340 			BUG();
2341 	}
2342 }
2343 
2344 static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
2345 {
2346 	if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
2347 		/*
2348 		 * AS_MM_ALL_LOCKS can't change from under us because
2349 		 * we hold the mm_all_locks_mutex.
2350 		 *
2351 		 * Operations on ->flags have to be atomic because
2352 		 * even if AS_MM_ALL_LOCKS is stable thanks to the
2353 		 * mm_all_locks_mutex, there may be other cpus
2354 		 * changing other bitflags in parallel to us.
2355 		 */
2356 		if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
2357 			BUG();
2358 		spin_lock_nest_lock(&mapping->i_mmap_lock, &mm->mmap_sem);
2359 	}
2360 }
2361 
2362 /*
2363  * This operation locks against the VM for all pte/vma/mm related
2364  * operations that could ever happen on a certain mm. This includes
2365  * vmtruncate, try_to_unmap, and all page faults.
2366  *
2367  * The caller must take the mmap_sem in write mode before calling
2368  * mm_take_all_locks(). The caller isn't allowed to release the
2369  * mmap_sem until mm_drop_all_locks() returns.
2370  *
2371  * mmap_sem in write mode is required in order to block all operations
2372  * that could modify pagetables and free pages without need of
2373  * altering the vma layout (for example populate_range() with
2374  * nonlinear vmas). It's also needed in write mode to avoid new
2375  * anon_vmas to be associated with existing vmas.
2376  *
2377  * A single task can't take more than one mm_take_all_locks() in a row
2378  * or it would deadlock.
2379  *
2380  * The LSB in anon_vma->head.next and the AS_MM_ALL_LOCKS bitflag in
2381  * mapping->flags avoid to take the same lock twice, if more than one
2382  * vma in this mm is backed by the same anon_vma or address_space.
2383  *
2384  * We can take all the locks in random order because the VM code
2385  * taking i_mmap_lock or anon_vma->lock outside the mmap_sem never
2386  * takes more than one of them in a row. Secondly we're protected
2387  * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex.
2388  *
2389  * mm_take_all_locks() and mm_drop_all_locks are expensive operations
2390  * that may have to take thousand of locks.
2391  *
2392  * mm_take_all_locks() can fail if it's interrupted by signals.
2393  */
2394 int mm_take_all_locks(struct mm_struct *mm)
2395 {
2396 	struct vm_area_struct *vma;
2397 	int ret = -EINTR;
2398 
2399 	BUG_ON(down_read_trylock(&mm->mmap_sem));
2400 
2401 	mutex_lock(&mm_all_locks_mutex);
2402 
2403 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
2404 		if (signal_pending(current))
2405 			goto out_unlock;
2406 		if (vma->vm_file && vma->vm_file->f_mapping)
2407 			vm_lock_mapping(mm, vma->vm_file->f_mapping);
2408 	}
2409 
2410 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
2411 		if (signal_pending(current))
2412 			goto out_unlock;
2413 		if (vma->anon_vma)
2414 			vm_lock_anon_vma(mm, vma->anon_vma);
2415 	}
2416 
2417 	ret = 0;
2418 
2419 out_unlock:
2420 	if (ret)
2421 		mm_drop_all_locks(mm);
2422 
2423 	return ret;
2424 }
2425 
2426 static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
2427 {
2428 	if (test_bit(0, (unsigned long *) &anon_vma->head.next)) {
2429 		/*
2430 		 * The LSB of head.next can't change to 0 from under
2431 		 * us because we hold the mm_all_locks_mutex.
2432 		 *
2433 		 * We must however clear the bitflag before unlocking
2434 		 * the vma so the users using the anon_vma->head will
2435 		 * never see our bitflag.
2436 		 *
2437 		 * No need of atomic instructions here, head.next
2438 		 * can't change from under us until we release the
2439 		 * anon_vma->lock.
2440 		 */
2441 		if (!__test_and_clear_bit(0, (unsigned long *)
2442 					  &anon_vma->head.next))
2443 			BUG();
2444 		spin_unlock(&anon_vma->lock);
2445 	}
2446 }
2447 
2448 static void vm_unlock_mapping(struct address_space *mapping)
2449 {
2450 	if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
2451 		/*
2452 		 * AS_MM_ALL_LOCKS can't change to 0 from under us
2453 		 * because we hold the mm_all_locks_mutex.
2454 		 */
2455 		spin_unlock(&mapping->i_mmap_lock);
2456 		if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
2457 					&mapping->flags))
2458 			BUG();
2459 	}
2460 }
2461 
2462 /*
2463  * The mmap_sem cannot be released by the caller until
2464  * mm_drop_all_locks() returns.
2465  */
2466 void mm_drop_all_locks(struct mm_struct *mm)
2467 {
2468 	struct vm_area_struct *vma;
2469 
2470 	BUG_ON(down_read_trylock(&mm->mmap_sem));
2471 	BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
2472 
2473 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
2474 		if (vma->anon_vma)
2475 			vm_unlock_anon_vma(vma->anon_vma);
2476 		if (vma->vm_file && vma->vm_file->f_mapping)
2477 			vm_unlock_mapping(vma->vm_file->f_mapping);
2478 	}
2479 
2480 	mutex_unlock(&mm_all_locks_mutex);
2481 }
2482 
2483 /*
2484  * initialise the VMA slab
2485  */
2486 void __init mmap_init(void)
2487 {
2488 	int ret;
2489 
2490 	ret = percpu_counter_init(&vm_committed_as, 0);
2491 	VM_BUG_ON(ret);
2492 }
2493