xref: /linux/mm/memory.c (revision 60b2737de1b1ddfdb90f3ba622634eb49d6f3603)
1 /*
2  *  linux/mm/memory.c
3  *
4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5  */
6 
7 /*
8  * demand-loading started 01.12.91 - seems it is high on the list of
9  * things wanted, and it should be easy to implement. - Linus
10  */
11 
12 /*
13  * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
14  * pages started 02.12.91, seems to work. - Linus.
15  *
16  * Tested sharing by executing about 30 /bin/sh: under the old kernel it
17  * would have taken more than the 6M I have free, but it worked well as
18  * far as I could see.
19  *
20  * Also corrected some "invalidate()"s - I wasn't doing enough of them.
21  */
22 
23 /*
24  * Real VM (paging to/from disk) started 18.12.91. Much more work and
25  * thought has to go into this. Oh, well..
26  * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
27  *		Found it. Everything seems to work now.
28  * 20.12.91  -  Ok, making the swap-device changeable like the root.
29  */
30 
31 /*
32  * 05.04.94  -  Multi-page memory management added for v1.1.
33  * 		Idea by Alex Bligh (alex@cconcepts.co.uk)
34  *
35  * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
36  *		(Gerhard.Wichert@pdb.siemens.de)
37  *
38  * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
39  */
40 
41 #include <linux/kernel_stat.h>
42 #include <linux/mm.h>
43 #include <linux/hugetlb.h>
44 #include <linux/mman.h>
45 #include <linux/swap.h>
46 #include <linux/highmem.h>
47 #include <linux/pagemap.h>
48 #include <linux/rmap.h>
49 #include <linux/module.h>
50 #include <linux/init.h>
51 
52 #include <asm/pgalloc.h>
53 #include <asm/uaccess.h>
54 #include <asm/tlb.h>
55 #include <asm/tlbflush.h>
56 #include <asm/pgtable.h>
57 
58 #include <linux/swapops.h>
59 #include <linux/elf.h>
60 
61 #ifndef CONFIG_DISCONTIGMEM
62 /* use the per-pgdat data instead for discontigmem - mbligh */
63 unsigned long max_mapnr;
64 struct page *mem_map;
65 
66 EXPORT_SYMBOL(max_mapnr);
67 EXPORT_SYMBOL(mem_map);
68 #endif
69 
70 unsigned long num_physpages;
71 /*
72  * A number of key systems in x86 including ioremap() rely on the assumption
73  * that high_memory defines the upper bound on direct map memory, then end
74  * of ZONE_NORMAL.  Under CONFIG_DISCONTIG this means that max_low_pfn and
75  * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
76  * and ZONE_HIGHMEM.
77  */
78 void * high_memory;
79 unsigned long vmalloc_earlyreserve;
80 
81 EXPORT_SYMBOL(num_physpages);
82 EXPORT_SYMBOL(high_memory);
83 EXPORT_SYMBOL(vmalloc_earlyreserve);
84 
85 /*
86  * If a p?d_bad entry is found while walking page tables, report
87  * the error, before resetting entry to p?d_none.  Usually (but
88  * very seldom) called out from the p?d_none_or_clear_bad macros.
89  */
90 
91 void pgd_clear_bad(pgd_t *pgd)
92 {
93 	pgd_ERROR(*pgd);
94 	pgd_clear(pgd);
95 }
96 
97 void pud_clear_bad(pud_t *pud)
98 {
99 	pud_ERROR(*pud);
100 	pud_clear(pud);
101 }
102 
103 void pmd_clear_bad(pmd_t *pmd)
104 {
105 	pmd_ERROR(*pmd);
106 	pmd_clear(pmd);
107 }
108 
109 /*
110  * Note: this doesn't free the actual pages themselves. That
111  * has been handled earlier when unmapping all the memory regions.
112  */
113 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd)
114 {
115 	struct page *page = pmd_page(*pmd);
116 	pmd_clear(pmd);
117 	pte_free_tlb(tlb, page);
118 	dec_page_state(nr_page_table_pages);
119 	tlb->mm->nr_ptes--;
120 }
121 
122 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
123 				unsigned long addr, unsigned long end,
124 				unsigned long floor, unsigned long ceiling)
125 {
126 	pmd_t *pmd;
127 	unsigned long next;
128 	unsigned long start;
129 
130 	start = addr;
131 	pmd = pmd_offset(pud, addr);
132 	do {
133 		next = pmd_addr_end(addr, end);
134 		if (pmd_none_or_clear_bad(pmd))
135 			continue;
136 		free_pte_range(tlb, pmd);
137 	} while (pmd++, addr = next, addr != end);
138 
139 	start &= PUD_MASK;
140 	if (start < floor)
141 		return;
142 	if (ceiling) {
143 		ceiling &= PUD_MASK;
144 		if (!ceiling)
145 			return;
146 	}
147 	if (end - 1 > ceiling - 1)
148 		return;
149 
150 	pmd = pmd_offset(pud, start);
151 	pud_clear(pud);
152 	pmd_free_tlb(tlb, pmd);
153 }
154 
155 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
156 				unsigned long addr, unsigned long end,
157 				unsigned long floor, unsigned long ceiling)
158 {
159 	pud_t *pud;
160 	unsigned long next;
161 	unsigned long start;
162 
163 	start = addr;
164 	pud = pud_offset(pgd, addr);
165 	do {
166 		next = pud_addr_end(addr, end);
167 		if (pud_none_or_clear_bad(pud))
168 			continue;
169 		free_pmd_range(tlb, pud, addr, next, floor, ceiling);
170 	} while (pud++, addr = next, addr != end);
171 
172 	start &= PGDIR_MASK;
173 	if (start < floor)
174 		return;
175 	if (ceiling) {
176 		ceiling &= PGDIR_MASK;
177 		if (!ceiling)
178 			return;
179 	}
180 	if (end - 1 > ceiling - 1)
181 		return;
182 
183 	pud = pud_offset(pgd, start);
184 	pgd_clear(pgd);
185 	pud_free_tlb(tlb, pud);
186 }
187 
188 /*
189  * This function frees user-level page tables of a process.
190  *
191  * Must be called with pagetable lock held.
192  */
193 void free_pgd_range(struct mmu_gather **tlb,
194 			unsigned long addr, unsigned long end,
195 			unsigned long floor, unsigned long ceiling)
196 {
197 	pgd_t *pgd;
198 	unsigned long next;
199 	unsigned long start;
200 
201 	/*
202 	 * The next few lines have given us lots of grief...
203 	 *
204 	 * Why are we testing PMD* at this top level?  Because often
205 	 * there will be no work to do at all, and we'd prefer not to
206 	 * go all the way down to the bottom just to discover that.
207 	 *
208 	 * Why all these "- 1"s?  Because 0 represents both the bottom
209 	 * of the address space and the top of it (using -1 for the
210 	 * top wouldn't help much: the masks would do the wrong thing).
211 	 * The rule is that addr 0 and floor 0 refer to the bottom of
212 	 * the address space, but end 0 and ceiling 0 refer to the top
213 	 * Comparisons need to use "end - 1" and "ceiling - 1" (though
214 	 * that end 0 case should be mythical).
215 	 *
216 	 * Wherever addr is brought up or ceiling brought down, we must
217 	 * be careful to reject "the opposite 0" before it confuses the
218 	 * subsequent tests.  But what about where end is brought down
219 	 * by PMD_SIZE below? no, end can't go down to 0 there.
220 	 *
221 	 * Whereas we round start (addr) and ceiling down, by different
222 	 * masks at different levels, in order to test whether a table
223 	 * now has no other vmas using it, so can be freed, we don't
224 	 * bother to round floor or end up - the tests don't need that.
225 	 */
226 
227 	addr &= PMD_MASK;
228 	if (addr < floor) {
229 		addr += PMD_SIZE;
230 		if (!addr)
231 			return;
232 	}
233 	if (ceiling) {
234 		ceiling &= PMD_MASK;
235 		if (!ceiling)
236 			return;
237 	}
238 	if (end - 1 > ceiling - 1)
239 		end -= PMD_SIZE;
240 	if (addr > end - 1)
241 		return;
242 
243 	start = addr;
244 	pgd = pgd_offset((*tlb)->mm, addr);
245 	do {
246 		next = pgd_addr_end(addr, end);
247 		if (pgd_none_or_clear_bad(pgd))
248 			continue;
249 		free_pud_range(*tlb, pgd, addr, next, floor, ceiling);
250 	} while (pgd++, addr = next, addr != end);
251 
252 	if (!tlb_is_full_mm(*tlb))
253 		flush_tlb_pgtables((*tlb)->mm, start, end);
254 }
255 
256 void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
257 		unsigned long floor, unsigned long ceiling)
258 {
259 	while (vma) {
260 		struct vm_area_struct *next = vma->vm_next;
261 		unsigned long addr = vma->vm_start;
262 
263 		if (is_hugepage_only_range(vma->vm_mm, addr, HPAGE_SIZE)) {
264 			hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
265 				floor, next? next->vm_start: ceiling);
266 		} else {
267 			/*
268 			 * Optimization: gather nearby vmas into one call down
269 			 */
270 			while (next && next->vm_start <= vma->vm_end + PMD_SIZE
271 			  && !is_hugepage_only_range(vma->vm_mm, next->vm_start,
272 							HPAGE_SIZE)) {
273 				vma = next;
274 				next = vma->vm_next;
275 			}
276 			free_pgd_range(tlb, addr, vma->vm_end,
277 				floor, next? next->vm_start: ceiling);
278 		}
279 		vma = next;
280 	}
281 }
282 
283 pte_t fastcall *pte_alloc_map(struct mm_struct *mm, pmd_t *pmd,
284 				unsigned long address)
285 {
286 	if (!pmd_present(*pmd)) {
287 		struct page *new;
288 
289 		spin_unlock(&mm->page_table_lock);
290 		new = pte_alloc_one(mm, address);
291 		spin_lock(&mm->page_table_lock);
292 		if (!new)
293 			return NULL;
294 		/*
295 		 * Because we dropped the lock, we should re-check the
296 		 * entry, as somebody else could have populated it..
297 		 */
298 		if (pmd_present(*pmd)) {
299 			pte_free(new);
300 			goto out;
301 		}
302 		mm->nr_ptes++;
303 		inc_page_state(nr_page_table_pages);
304 		pmd_populate(mm, pmd, new);
305 	}
306 out:
307 	return pte_offset_map(pmd, address);
308 }
309 
310 pte_t fastcall * pte_alloc_kernel(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
311 {
312 	if (!pmd_present(*pmd)) {
313 		pte_t *new;
314 
315 		spin_unlock(&mm->page_table_lock);
316 		new = pte_alloc_one_kernel(mm, address);
317 		spin_lock(&mm->page_table_lock);
318 		if (!new)
319 			return NULL;
320 
321 		/*
322 		 * Because we dropped the lock, we should re-check the
323 		 * entry, as somebody else could have populated it..
324 		 */
325 		if (pmd_present(*pmd)) {
326 			pte_free_kernel(new);
327 			goto out;
328 		}
329 		pmd_populate_kernel(mm, pmd, new);
330 	}
331 out:
332 	return pte_offset_kernel(pmd, address);
333 }
334 
335 /*
336  * copy one vm_area from one task to the other. Assumes the page tables
337  * already present in the new task to be cleared in the whole range
338  * covered by this vma.
339  *
340  * dst->page_table_lock is held on entry and exit,
341  * but may be dropped within p[mg]d_alloc() and pte_alloc_map().
342  */
343 
344 static inline void
345 copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
346 		pte_t *dst_pte, pte_t *src_pte, unsigned long vm_flags,
347 		unsigned long addr)
348 {
349 	pte_t pte = *src_pte;
350 	struct page *page;
351 	unsigned long pfn;
352 
353 	/* pte contains position in swap or file, so copy. */
354 	if (unlikely(!pte_present(pte))) {
355 		if (!pte_file(pte)) {
356 			swap_duplicate(pte_to_swp_entry(pte));
357 			/* make sure dst_mm is on swapoff's mmlist. */
358 			if (unlikely(list_empty(&dst_mm->mmlist))) {
359 				spin_lock(&mmlist_lock);
360 				list_add(&dst_mm->mmlist, &src_mm->mmlist);
361 				spin_unlock(&mmlist_lock);
362 			}
363 		}
364 		set_pte_at(dst_mm, addr, dst_pte, pte);
365 		return;
366 	}
367 
368 	pfn = pte_pfn(pte);
369 	/* the pte points outside of valid memory, the
370 	 * mapping is assumed to be good, meaningful
371 	 * and not mapped via rmap - duplicate the
372 	 * mapping as is.
373 	 */
374 	page = NULL;
375 	if (pfn_valid(pfn))
376 		page = pfn_to_page(pfn);
377 
378 	if (!page || PageReserved(page)) {
379 		set_pte_at(dst_mm, addr, dst_pte, pte);
380 		return;
381 	}
382 
383 	/*
384 	 * If it's a COW mapping, write protect it both
385 	 * in the parent and the child
386 	 */
387 	if ((vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE) {
388 		ptep_set_wrprotect(src_mm, addr, src_pte);
389 		pte = *src_pte;
390 	}
391 
392 	/*
393 	 * If it's a shared mapping, mark it clean in
394 	 * the child
395 	 */
396 	if (vm_flags & VM_SHARED)
397 		pte = pte_mkclean(pte);
398 	pte = pte_mkold(pte);
399 	get_page(page);
400 	inc_mm_counter(dst_mm, rss);
401 	if (PageAnon(page))
402 		inc_mm_counter(dst_mm, anon_rss);
403 	set_pte_at(dst_mm, addr, dst_pte, pte);
404 	page_dup_rmap(page);
405 }
406 
407 static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
408 		pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
409 		unsigned long addr, unsigned long end)
410 {
411 	pte_t *src_pte, *dst_pte;
412 	unsigned long vm_flags = vma->vm_flags;
413 	int progress;
414 
415 again:
416 	dst_pte = pte_alloc_map(dst_mm, dst_pmd, addr);
417 	if (!dst_pte)
418 		return -ENOMEM;
419 	src_pte = pte_offset_map_nested(src_pmd, addr);
420 
421 	progress = 0;
422 	spin_lock(&src_mm->page_table_lock);
423 	do {
424 		/*
425 		 * We are holding two locks at this point - either of them
426 		 * could generate latencies in another task on another CPU.
427 		 */
428 		if (progress >= 32 && (need_resched() ||
429 		    need_lockbreak(&src_mm->page_table_lock) ||
430 		    need_lockbreak(&dst_mm->page_table_lock)))
431 			break;
432 		if (pte_none(*src_pte)) {
433 			progress++;
434 			continue;
435 		}
436 		copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vm_flags, addr);
437 		progress += 8;
438 	} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
439 	spin_unlock(&src_mm->page_table_lock);
440 
441 	pte_unmap_nested(src_pte - 1);
442 	pte_unmap(dst_pte - 1);
443 	cond_resched_lock(&dst_mm->page_table_lock);
444 	if (addr != end)
445 		goto again;
446 	return 0;
447 }
448 
449 static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
450 		pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
451 		unsigned long addr, unsigned long end)
452 {
453 	pmd_t *src_pmd, *dst_pmd;
454 	unsigned long next;
455 
456 	dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
457 	if (!dst_pmd)
458 		return -ENOMEM;
459 	src_pmd = pmd_offset(src_pud, addr);
460 	do {
461 		next = pmd_addr_end(addr, end);
462 		if (pmd_none_or_clear_bad(src_pmd))
463 			continue;
464 		if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
465 						vma, addr, next))
466 			return -ENOMEM;
467 	} while (dst_pmd++, src_pmd++, addr = next, addr != end);
468 	return 0;
469 }
470 
471 static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
472 		pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
473 		unsigned long addr, unsigned long end)
474 {
475 	pud_t *src_pud, *dst_pud;
476 	unsigned long next;
477 
478 	dst_pud = pud_alloc(dst_mm, dst_pgd, addr);
479 	if (!dst_pud)
480 		return -ENOMEM;
481 	src_pud = pud_offset(src_pgd, addr);
482 	do {
483 		next = pud_addr_end(addr, end);
484 		if (pud_none_or_clear_bad(src_pud))
485 			continue;
486 		if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
487 						vma, addr, next))
488 			return -ENOMEM;
489 	} while (dst_pud++, src_pud++, addr = next, addr != end);
490 	return 0;
491 }
492 
493 int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
494 		struct vm_area_struct *vma)
495 {
496 	pgd_t *src_pgd, *dst_pgd;
497 	unsigned long next;
498 	unsigned long addr = vma->vm_start;
499 	unsigned long end = vma->vm_end;
500 
501 	if (is_vm_hugetlb_page(vma))
502 		return copy_hugetlb_page_range(dst_mm, src_mm, vma);
503 
504 	dst_pgd = pgd_offset(dst_mm, addr);
505 	src_pgd = pgd_offset(src_mm, addr);
506 	do {
507 		next = pgd_addr_end(addr, end);
508 		if (pgd_none_or_clear_bad(src_pgd))
509 			continue;
510 		if (copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
511 						vma, addr, next))
512 			return -ENOMEM;
513 	} while (dst_pgd++, src_pgd++, addr = next, addr != end);
514 	return 0;
515 }
516 
517 static void zap_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
518 				unsigned long addr, unsigned long end,
519 				struct zap_details *details)
520 {
521 	pte_t *pte;
522 
523 	pte = pte_offset_map(pmd, addr);
524 	do {
525 		pte_t ptent = *pte;
526 		if (pte_none(ptent))
527 			continue;
528 		if (pte_present(ptent)) {
529 			struct page *page = NULL;
530 			unsigned long pfn = pte_pfn(ptent);
531 			if (pfn_valid(pfn)) {
532 				page = pfn_to_page(pfn);
533 				if (PageReserved(page))
534 					page = NULL;
535 			}
536 			if (unlikely(details) && page) {
537 				/*
538 				 * unmap_shared_mapping_pages() wants to
539 				 * invalidate cache without truncating:
540 				 * unmap shared but keep private pages.
541 				 */
542 				if (details->check_mapping &&
543 				    details->check_mapping != page->mapping)
544 					continue;
545 				/*
546 				 * Each page->index must be checked when
547 				 * invalidating or truncating nonlinear.
548 				 */
549 				if (details->nonlinear_vma &&
550 				    (page->index < details->first_index ||
551 				     page->index > details->last_index))
552 					continue;
553 			}
554 			ptent = ptep_get_and_clear(tlb->mm, addr, pte);
555 			tlb_remove_tlb_entry(tlb, pte, addr);
556 			if (unlikely(!page))
557 				continue;
558 			if (unlikely(details) && details->nonlinear_vma
559 			    && linear_page_index(details->nonlinear_vma,
560 						addr) != page->index)
561 				set_pte_at(tlb->mm, addr, pte,
562 					   pgoff_to_pte(page->index));
563 			if (pte_dirty(ptent))
564 				set_page_dirty(page);
565 			if (PageAnon(page))
566 				dec_mm_counter(tlb->mm, anon_rss);
567 			else if (pte_young(ptent))
568 				mark_page_accessed(page);
569 			tlb->freed++;
570 			page_remove_rmap(page);
571 			tlb_remove_page(tlb, page);
572 			continue;
573 		}
574 		/*
575 		 * If details->check_mapping, we leave swap entries;
576 		 * if details->nonlinear_vma, we leave file entries.
577 		 */
578 		if (unlikely(details))
579 			continue;
580 		if (!pte_file(ptent))
581 			free_swap_and_cache(pte_to_swp_entry(ptent));
582 		pte_clear(tlb->mm, addr, pte);
583 	} while (pte++, addr += PAGE_SIZE, addr != end);
584 	pte_unmap(pte - 1);
585 }
586 
587 static inline void zap_pmd_range(struct mmu_gather *tlb, pud_t *pud,
588 				unsigned long addr, unsigned long end,
589 				struct zap_details *details)
590 {
591 	pmd_t *pmd;
592 	unsigned long next;
593 
594 	pmd = pmd_offset(pud, addr);
595 	do {
596 		next = pmd_addr_end(addr, end);
597 		if (pmd_none_or_clear_bad(pmd))
598 			continue;
599 		zap_pte_range(tlb, pmd, addr, next, details);
600 	} while (pmd++, addr = next, addr != end);
601 }
602 
603 static inline void zap_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
604 				unsigned long addr, unsigned long end,
605 				struct zap_details *details)
606 {
607 	pud_t *pud;
608 	unsigned long next;
609 
610 	pud = pud_offset(pgd, addr);
611 	do {
612 		next = pud_addr_end(addr, end);
613 		if (pud_none_or_clear_bad(pud))
614 			continue;
615 		zap_pmd_range(tlb, pud, addr, next, details);
616 	} while (pud++, addr = next, addr != end);
617 }
618 
619 static void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
620 				unsigned long addr, unsigned long end,
621 				struct zap_details *details)
622 {
623 	pgd_t *pgd;
624 	unsigned long next;
625 
626 	if (details && !details->check_mapping && !details->nonlinear_vma)
627 		details = NULL;
628 
629 	BUG_ON(addr >= end);
630 	tlb_start_vma(tlb, vma);
631 	pgd = pgd_offset(vma->vm_mm, addr);
632 	do {
633 		next = pgd_addr_end(addr, end);
634 		if (pgd_none_or_clear_bad(pgd))
635 			continue;
636 		zap_pud_range(tlb, pgd, addr, next, details);
637 	} while (pgd++, addr = next, addr != end);
638 	tlb_end_vma(tlb, vma);
639 }
640 
641 #ifdef CONFIG_PREEMPT
642 # define ZAP_BLOCK_SIZE	(8 * PAGE_SIZE)
643 #else
644 /* No preempt: go for improved straight-line efficiency */
645 # define ZAP_BLOCK_SIZE	(1024 * PAGE_SIZE)
646 #endif
647 
648 /**
649  * unmap_vmas - unmap a range of memory covered by a list of vma's
650  * @tlbp: address of the caller's struct mmu_gather
651  * @mm: the controlling mm_struct
652  * @vma: the starting vma
653  * @start_addr: virtual address at which to start unmapping
654  * @end_addr: virtual address at which to end unmapping
655  * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
656  * @details: details of nonlinear truncation or shared cache invalidation
657  *
658  * Returns the end address of the unmapping (restart addr if interrupted).
659  *
660  * Unmap all pages in the vma list.  Called under page_table_lock.
661  *
662  * We aim to not hold page_table_lock for too long (for scheduling latency
663  * reasons).  So zap pages in ZAP_BLOCK_SIZE bytecounts.  This means we need to
664  * return the ending mmu_gather to the caller.
665  *
666  * Only addresses between `start' and `end' will be unmapped.
667  *
668  * The VMA list must be sorted in ascending virtual address order.
669  *
670  * unmap_vmas() assumes that the caller will flush the whole unmapped address
671  * range after unmap_vmas() returns.  So the only responsibility here is to
672  * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
673  * drops the lock and schedules.
674  */
675 unsigned long unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm,
676 		struct vm_area_struct *vma, unsigned long start_addr,
677 		unsigned long end_addr, unsigned long *nr_accounted,
678 		struct zap_details *details)
679 {
680 	unsigned long zap_bytes = ZAP_BLOCK_SIZE;
681 	unsigned long tlb_start = 0;	/* For tlb_finish_mmu */
682 	int tlb_start_valid = 0;
683 	unsigned long start = start_addr;
684 	spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL;
685 	int fullmm = tlb_is_full_mm(*tlbp);
686 
687 	for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) {
688 		unsigned long end;
689 
690 		start = max(vma->vm_start, start_addr);
691 		if (start >= vma->vm_end)
692 			continue;
693 		end = min(vma->vm_end, end_addr);
694 		if (end <= vma->vm_start)
695 			continue;
696 
697 		if (vma->vm_flags & VM_ACCOUNT)
698 			*nr_accounted += (end - start) >> PAGE_SHIFT;
699 
700 		while (start != end) {
701 			unsigned long block;
702 
703 			if (!tlb_start_valid) {
704 				tlb_start = start;
705 				tlb_start_valid = 1;
706 			}
707 
708 			if (is_vm_hugetlb_page(vma)) {
709 				block = end - start;
710 				unmap_hugepage_range(vma, start, end);
711 			} else {
712 				block = min(zap_bytes, end - start);
713 				unmap_page_range(*tlbp, vma, start,
714 						start + block, details);
715 			}
716 
717 			start += block;
718 			zap_bytes -= block;
719 			if ((long)zap_bytes > 0)
720 				continue;
721 
722 			tlb_finish_mmu(*tlbp, tlb_start, start);
723 
724 			if (need_resched() ||
725 				need_lockbreak(&mm->page_table_lock) ||
726 				(i_mmap_lock && need_lockbreak(i_mmap_lock))) {
727 				if (i_mmap_lock) {
728 					/* must reset count of rss freed */
729 					*tlbp = tlb_gather_mmu(mm, fullmm);
730 					goto out;
731 				}
732 				spin_unlock(&mm->page_table_lock);
733 				cond_resched();
734 				spin_lock(&mm->page_table_lock);
735 			}
736 
737 			*tlbp = tlb_gather_mmu(mm, fullmm);
738 			tlb_start_valid = 0;
739 			zap_bytes = ZAP_BLOCK_SIZE;
740 		}
741 	}
742 out:
743 	return start;	/* which is now the end (or restart) address */
744 }
745 
746 /**
747  * zap_page_range - remove user pages in a given range
748  * @vma: vm_area_struct holding the applicable pages
749  * @address: starting address of pages to zap
750  * @size: number of bytes to zap
751  * @details: details of nonlinear truncation or shared cache invalidation
752  */
753 unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
754 		unsigned long size, struct zap_details *details)
755 {
756 	struct mm_struct *mm = vma->vm_mm;
757 	struct mmu_gather *tlb;
758 	unsigned long end = address + size;
759 	unsigned long nr_accounted = 0;
760 
761 	if (is_vm_hugetlb_page(vma)) {
762 		zap_hugepage_range(vma, address, size);
763 		return end;
764 	}
765 
766 	lru_add_drain();
767 	spin_lock(&mm->page_table_lock);
768 	tlb = tlb_gather_mmu(mm, 0);
769 	end = unmap_vmas(&tlb, mm, vma, address, end, &nr_accounted, details);
770 	tlb_finish_mmu(tlb, address, end);
771 	spin_unlock(&mm->page_table_lock);
772 	return end;
773 }
774 
775 /*
776  * Do a quick page-table lookup for a single page.
777  * mm->page_table_lock must be held.
778  */
779 static struct page *
780 __follow_page(struct mm_struct *mm, unsigned long address, int read, int write)
781 {
782 	pgd_t *pgd;
783 	pud_t *pud;
784 	pmd_t *pmd;
785 	pte_t *ptep, pte;
786 	unsigned long pfn;
787 	struct page *page;
788 
789 	page = follow_huge_addr(mm, address, write);
790 	if (! IS_ERR(page))
791 		return page;
792 
793 	pgd = pgd_offset(mm, address);
794 	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
795 		goto out;
796 
797 	pud = pud_offset(pgd, address);
798 	if (pud_none(*pud) || unlikely(pud_bad(*pud)))
799 		goto out;
800 
801 	pmd = pmd_offset(pud, address);
802 	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
803 		goto out;
804 	if (pmd_huge(*pmd))
805 		return follow_huge_pmd(mm, address, pmd, write);
806 
807 	ptep = pte_offset_map(pmd, address);
808 	if (!ptep)
809 		goto out;
810 
811 	pte = *ptep;
812 	pte_unmap(ptep);
813 	if (pte_present(pte)) {
814 		if (write && !pte_write(pte))
815 			goto out;
816 		if (read && !pte_read(pte))
817 			goto out;
818 		pfn = pte_pfn(pte);
819 		if (pfn_valid(pfn)) {
820 			page = pfn_to_page(pfn);
821 			if (write && !pte_dirty(pte) && !PageDirty(page))
822 				set_page_dirty(page);
823 			mark_page_accessed(page);
824 			return page;
825 		}
826 	}
827 
828 out:
829 	return NULL;
830 }
831 
832 struct page *
833 follow_page(struct mm_struct *mm, unsigned long address, int write)
834 {
835 	return __follow_page(mm, address, /*read*/0, write);
836 }
837 
838 int
839 check_user_page_readable(struct mm_struct *mm, unsigned long address)
840 {
841 	return __follow_page(mm, address, /*read*/1, /*write*/0) != NULL;
842 }
843 EXPORT_SYMBOL(check_user_page_readable);
844 
845 static inline int
846 untouched_anonymous_page(struct mm_struct* mm, struct vm_area_struct *vma,
847 			 unsigned long address)
848 {
849 	pgd_t *pgd;
850 	pud_t *pud;
851 	pmd_t *pmd;
852 
853 	/* Check if the vma is for an anonymous mapping. */
854 	if (vma->vm_ops && vma->vm_ops->nopage)
855 		return 0;
856 
857 	/* Check if page directory entry exists. */
858 	pgd = pgd_offset(mm, address);
859 	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
860 		return 1;
861 
862 	pud = pud_offset(pgd, address);
863 	if (pud_none(*pud) || unlikely(pud_bad(*pud)))
864 		return 1;
865 
866 	/* Check if page middle directory entry exists. */
867 	pmd = pmd_offset(pud, address);
868 	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
869 		return 1;
870 
871 	/* There is a pte slot for 'address' in 'mm'. */
872 	return 0;
873 }
874 
875 int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
876 		unsigned long start, int len, int write, int force,
877 		struct page **pages, struct vm_area_struct **vmas)
878 {
879 	int i;
880 	unsigned int flags;
881 
882 	/*
883 	 * Require read or write permissions.
884 	 * If 'force' is set, we only require the "MAY" flags.
885 	 */
886 	flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
887 	flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
888 	i = 0;
889 
890 	do {
891 		struct vm_area_struct *	vma;
892 
893 		vma = find_extend_vma(mm, start);
894 		if (!vma && in_gate_area(tsk, start)) {
895 			unsigned long pg = start & PAGE_MASK;
896 			struct vm_area_struct *gate_vma = get_gate_vma(tsk);
897 			pgd_t *pgd;
898 			pud_t *pud;
899 			pmd_t *pmd;
900 			pte_t *pte;
901 			if (write) /* user gate pages are read-only */
902 				return i ? : -EFAULT;
903 			if (pg > TASK_SIZE)
904 				pgd = pgd_offset_k(pg);
905 			else
906 				pgd = pgd_offset_gate(mm, pg);
907 			BUG_ON(pgd_none(*pgd));
908 			pud = pud_offset(pgd, pg);
909 			BUG_ON(pud_none(*pud));
910 			pmd = pmd_offset(pud, pg);
911 			BUG_ON(pmd_none(*pmd));
912 			pte = pte_offset_map(pmd, pg);
913 			BUG_ON(pte_none(*pte));
914 			if (pages) {
915 				pages[i] = pte_page(*pte);
916 				get_page(pages[i]);
917 			}
918 			pte_unmap(pte);
919 			if (vmas)
920 				vmas[i] = gate_vma;
921 			i++;
922 			start += PAGE_SIZE;
923 			len--;
924 			continue;
925 		}
926 
927 		if (!vma || (vma->vm_flags & VM_IO)
928 				|| !(flags & vma->vm_flags))
929 			return i ? : -EFAULT;
930 
931 		if (is_vm_hugetlb_page(vma)) {
932 			i = follow_hugetlb_page(mm, vma, pages, vmas,
933 						&start, &len, i);
934 			continue;
935 		}
936 		spin_lock(&mm->page_table_lock);
937 		do {
938 			struct page *page;
939 			int lookup_write = write;
940 
941 			cond_resched_lock(&mm->page_table_lock);
942 			while (!(page = follow_page(mm, start, lookup_write))) {
943 				/*
944 				 * Shortcut for anonymous pages. We don't want
945 				 * to force the creation of pages tables for
946 				 * insanely big anonymously mapped areas that
947 				 * nobody touched so far. This is important
948 				 * for doing a core dump for these mappings.
949 				 */
950 				if (!lookup_write &&
951 				    untouched_anonymous_page(mm,vma,start)) {
952 					page = ZERO_PAGE(start);
953 					break;
954 				}
955 				spin_unlock(&mm->page_table_lock);
956 				switch (handle_mm_fault(mm,vma,start,write)) {
957 				case VM_FAULT_MINOR:
958 					tsk->min_flt++;
959 					break;
960 				case VM_FAULT_MAJOR:
961 					tsk->maj_flt++;
962 					break;
963 				case VM_FAULT_SIGBUS:
964 					return i ? i : -EFAULT;
965 				case VM_FAULT_OOM:
966 					return i ? i : -ENOMEM;
967 				default:
968 					BUG();
969 				}
970 				/*
971 				 * Now that we have performed a write fault
972 				 * and surely no longer have a shared page we
973 				 * shouldn't write, we shouldn't ignore an
974 				 * unwritable page in the page table if
975 				 * we are forcing write access.
976 				 */
977 				lookup_write = write && !force;
978 				spin_lock(&mm->page_table_lock);
979 			}
980 			if (pages) {
981 				pages[i] = page;
982 				flush_dcache_page(page);
983 				if (!PageReserved(page))
984 					page_cache_get(page);
985 			}
986 			if (vmas)
987 				vmas[i] = vma;
988 			i++;
989 			start += PAGE_SIZE;
990 			len--;
991 		} while (len && start < vma->vm_end);
992 		spin_unlock(&mm->page_table_lock);
993 	} while (len);
994 	return i;
995 }
996 EXPORT_SYMBOL(get_user_pages);
997 
998 static int zeromap_pte_range(struct mm_struct *mm, pmd_t *pmd,
999 			unsigned long addr, unsigned long end, pgprot_t prot)
1000 {
1001 	pte_t *pte;
1002 
1003 	pte = pte_alloc_map(mm, pmd, addr);
1004 	if (!pte)
1005 		return -ENOMEM;
1006 	do {
1007 		pte_t zero_pte = pte_wrprotect(mk_pte(ZERO_PAGE(addr), prot));
1008 		BUG_ON(!pte_none(*pte));
1009 		set_pte_at(mm, addr, pte, zero_pte);
1010 	} while (pte++, addr += PAGE_SIZE, addr != end);
1011 	pte_unmap(pte - 1);
1012 	return 0;
1013 }
1014 
1015 static inline int zeromap_pmd_range(struct mm_struct *mm, pud_t *pud,
1016 			unsigned long addr, unsigned long end, pgprot_t prot)
1017 {
1018 	pmd_t *pmd;
1019 	unsigned long next;
1020 
1021 	pmd = pmd_alloc(mm, pud, addr);
1022 	if (!pmd)
1023 		return -ENOMEM;
1024 	do {
1025 		next = pmd_addr_end(addr, end);
1026 		if (zeromap_pte_range(mm, pmd, addr, next, prot))
1027 			return -ENOMEM;
1028 	} while (pmd++, addr = next, addr != end);
1029 	return 0;
1030 }
1031 
1032 static inline int zeromap_pud_range(struct mm_struct *mm, pgd_t *pgd,
1033 			unsigned long addr, unsigned long end, pgprot_t prot)
1034 {
1035 	pud_t *pud;
1036 	unsigned long next;
1037 
1038 	pud = pud_alloc(mm, pgd, addr);
1039 	if (!pud)
1040 		return -ENOMEM;
1041 	do {
1042 		next = pud_addr_end(addr, end);
1043 		if (zeromap_pmd_range(mm, pud, addr, next, prot))
1044 			return -ENOMEM;
1045 	} while (pud++, addr = next, addr != end);
1046 	return 0;
1047 }
1048 
1049 int zeromap_page_range(struct vm_area_struct *vma,
1050 			unsigned long addr, unsigned long size, pgprot_t prot)
1051 {
1052 	pgd_t *pgd;
1053 	unsigned long next;
1054 	unsigned long end = addr + size;
1055 	struct mm_struct *mm = vma->vm_mm;
1056 	int err;
1057 
1058 	BUG_ON(addr >= end);
1059 	pgd = pgd_offset(mm, addr);
1060 	flush_cache_range(vma, addr, end);
1061 	spin_lock(&mm->page_table_lock);
1062 	do {
1063 		next = pgd_addr_end(addr, end);
1064 		err = zeromap_pud_range(mm, pgd, addr, next, prot);
1065 		if (err)
1066 			break;
1067 	} while (pgd++, addr = next, addr != end);
1068 	spin_unlock(&mm->page_table_lock);
1069 	return err;
1070 }
1071 
1072 /*
1073  * maps a range of physical memory into the requested pages. the old
1074  * mappings are removed. any references to nonexistent pages results
1075  * in null mappings (currently treated as "copy-on-access")
1076  */
1077 static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
1078 			unsigned long addr, unsigned long end,
1079 			unsigned long pfn, pgprot_t prot)
1080 {
1081 	pte_t *pte;
1082 
1083 	pte = pte_alloc_map(mm, pmd, addr);
1084 	if (!pte)
1085 		return -ENOMEM;
1086 	do {
1087 		BUG_ON(!pte_none(*pte));
1088 		if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
1089 			set_pte_at(mm, addr, pte, pfn_pte(pfn, prot));
1090 		pfn++;
1091 	} while (pte++, addr += PAGE_SIZE, addr != end);
1092 	pte_unmap(pte - 1);
1093 	return 0;
1094 }
1095 
1096 static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
1097 			unsigned long addr, unsigned long end,
1098 			unsigned long pfn, pgprot_t prot)
1099 {
1100 	pmd_t *pmd;
1101 	unsigned long next;
1102 
1103 	pfn -= addr >> PAGE_SHIFT;
1104 	pmd = pmd_alloc(mm, pud, addr);
1105 	if (!pmd)
1106 		return -ENOMEM;
1107 	do {
1108 		next = pmd_addr_end(addr, end);
1109 		if (remap_pte_range(mm, pmd, addr, next,
1110 				pfn + (addr >> PAGE_SHIFT), prot))
1111 			return -ENOMEM;
1112 	} while (pmd++, addr = next, addr != end);
1113 	return 0;
1114 }
1115 
1116 static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
1117 			unsigned long addr, unsigned long end,
1118 			unsigned long pfn, pgprot_t prot)
1119 {
1120 	pud_t *pud;
1121 	unsigned long next;
1122 
1123 	pfn -= addr >> PAGE_SHIFT;
1124 	pud = pud_alloc(mm, pgd, addr);
1125 	if (!pud)
1126 		return -ENOMEM;
1127 	do {
1128 		next = pud_addr_end(addr, end);
1129 		if (remap_pmd_range(mm, pud, addr, next,
1130 				pfn + (addr >> PAGE_SHIFT), prot))
1131 			return -ENOMEM;
1132 	} while (pud++, addr = next, addr != end);
1133 	return 0;
1134 }
1135 
1136 /*  Note: this is only safe if the mm semaphore is held when called. */
1137 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1138 		    unsigned long pfn, unsigned long size, pgprot_t prot)
1139 {
1140 	pgd_t *pgd;
1141 	unsigned long next;
1142 	unsigned long end = addr + size;
1143 	struct mm_struct *mm = vma->vm_mm;
1144 	int err;
1145 
1146 	/*
1147 	 * Physically remapped pages are special. Tell the
1148 	 * rest of the world about it:
1149 	 *   VM_IO tells people not to look at these pages
1150 	 *	(accesses can have side effects).
1151 	 *   VM_RESERVED tells swapout not to try to touch
1152 	 *	this region.
1153 	 */
1154 	vma->vm_flags |= VM_IO | VM_RESERVED;
1155 
1156 	BUG_ON(addr >= end);
1157 	pfn -= addr >> PAGE_SHIFT;
1158 	pgd = pgd_offset(mm, addr);
1159 	flush_cache_range(vma, addr, end);
1160 	spin_lock(&mm->page_table_lock);
1161 	do {
1162 		next = pgd_addr_end(addr, end);
1163 		err = remap_pud_range(mm, pgd, addr, next,
1164 				pfn + (addr >> PAGE_SHIFT), prot);
1165 		if (err)
1166 			break;
1167 	} while (pgd++, addr = next, addr != end);
1168 	spin_unlock(&mm->page_table_lock);
1169 	return err;
1170 }
1171 EXPORT_SYMBOL(remap_pfn_range);
1172 
1173 /*
1174  * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
1175  * servicing faults for write access.  In the normal case, do always want
1176  * pte_mkwrite.  But get_user_pages can cause write faults for mappings
1177  * that do not have writing enabled, when used by access_process_vm.
1178  */
1179 static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
1180 {
1181 	if (likely(vma->vm_flags & VM_WRITE))
1182 		pte = pte_mkwrite(pte);
1183 	return pte;
1184 }
1185 
1186 /*
1187  * We hold the mm semaphore for reading and vma->vm_mm->page_table_lock
1188  */
1189 static inline void break_cow(struct vm_area_struct * vma, struct page * new_page, unsigned long address,
1190 		pte_t *page_table)
1191 {
1192 	pte_t entry;
1193 
1194 	entry = maybe_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot)),
1195 			      vma);
1196 	ptep_establish(vma, address, page_table, entry);
1197 	update_mmu_cache(vma, address, entry);
1198 	lazy_mmu_prot_update(entry);
1199 }
1200 
1201 /*
1202  * This routine handles present pages, when users try to write
1203  * to a shared page. It is done by copying the page to a new address
1204  * and decrementing the shared-page counter for the old page.
1205  *
1206  * Goto-purists beware: the only reason for goto's here is that it results
1207  * in better assembly code.. The "default" path will see no jumps at all.
1208  *
1209  * Note that this routine assumes that the protection checks have been
1210  * done by the caller (the low-level page fault routine in most cases).
1211  * Thus we can safely just mark it writable once we've done any necessary
1212  * COW.
1213  *
1214  * We also mark the page dirty at this point even though the page will
1215  * change only once the write actually happens. This avoids a few races,
1216  * and potentially makes it more efficient.
1217  *
1218  * We hold the mm semaphore and the page_table_lock on entry and exit
1219  * with the page_table_lock released.
1220  */
1221 static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
1222 	unsigned long address, pte_t *page_table, pmd_t *pmd, pte_t pte)
1223 {
1224 	struct page *old_page, *new_page;
1225 	unsigned long pfn = pte_pfn(pte);
1226 	pte_t entry;
1227 
1228 	if (unlikely(!pfn_valid(pfn))) {
1229 		/*
1230 		 * This should really halt the system so it can be debugged or
1231 		 * at least the kernel stops what it's doing before it corrupts
1232 		 * data, but for the moment just pretend this is OOM.
1233 		 */
1234 		pte_unmap(page_table);
1235 		printk(KERN_ERR "do_wp_page: bogus page at address %08lx\n",
1236 				address);
1237 		spin_unlock(&mm->page_table_lock);
1238 		return VM_FAULT_OOM;
1239 	}
1240 	old_page = pfn_to_page(pfn);
1241 
1242 	if (PageAnon(old_page) && !TestSetPageLocked(old_page)) {
1243 		int reuse = can_share_swap_page(old_page);
1244 		unlock_page(old_page);
1245 		if (reuse) {
1246 			flush_cache_page(vma, address, pfn);
1247 			entry = maybe_mkwrite(pte_mkyoung(pte_mkdirty(pte)),
1248 					      vma);
1249 			ptep_set_access_flags(vma, address, page_table, entry, 1);
1250 			update_mmu_cache(vma, address, entry);
1251 			lazy_mmu_prot_update(entry);
1252 			pte_unmap(page_table);
1253 			spin_unlock(&mm->page_table_lock);
1254 			return VM_FAULT_MINOR;
1255 		}
1256 	}
1257 	pte_unmap(page_table);
1258 
1259 	/*
1260 	 * Ok, we need to copy. Oh, well..
1261 	 */
1262 	if (!PageReserved(old_page))
1263 		page_cache_get(old_page);
1264 	spin_unlock(&mm->page_table_lock);
1265 
1266 	if (unlikely(anon_vma_prepare(vma)))
1267 		goto no_new_page;
1268 	if (old_page == ZERO_PAGE(address)) {
1269 		new_page = alloc_zeroed_user_highpage(vma, address);
1270 		if (!new_page)
1271 			goto no_new_page;
1272 	} else {
1273 		new_page = alloc_page_vma(GFP_HIGHUSER, vma, address);
1274 		if (!new_page)
1275 			goto no_new_page;
1276 		copy_user_highpage(new_page, old_page, address);
1277 	}
1278 	/*
1279 	 * Re-check the pte - we dropped the lock
1280 	 */
1281 	spin_lock(&mm->page_table_lock);
1282 	page_table = pte_offset_map(pmd, address);
1283 	if (likely(pte_same(*page_table, pte))) {
1284 		if (PageAnon(old_page))
1285 			dec_mm_counter(mm, anon_rss);
1286 		if (PageReserved(old_page))
1287 			inc_mm_counter(mm, rss);
1288 		else
1289 			page_remove_rmap(old_page);
1290 		flush_cache_page(vma, address, pfn);
1291 		break_cow(vma, new_page, address, page_table);
1292 		lru_cache_add_active(new_page);
1293 		page_add_anon_rmap(new_page, vma, address);
1294 
1295 		/* Free the old page.. */
1296 		new_page = old_page;
1297 	}
1298 	pte_unmap(page_table);
1299 	page_cache_release(new_page);
1300 	page_cache_release(old_page);
1301 	spin_unlock(&mm->page_table_lock);
1302 	return VM_FAULT_MINOR;
1303 
1304 no_new_page:
1305 	page_cache_release(old_page);
1306 	return VM_FAULT_OOM;
1307 }
1308 
1309 /*
1310  * Helper functions for unmap_mapping_range().
1311  *
1312  * __ Notes on dropping i_mmap_lock to reduce latency while unmapping __
1313  *
1314  * We have to restart searching the prio_tree whenever we drop the lock,
1315  * since the iterator is only valid while the lock is held, and anyway
1316  * a later vma might be split and reinserted earlier while lock dropped.
1317  *
1318  * The list of nonlinear vmas could be handled more efficiently, using
1319  * a placeholder, but handle it in the same way until a need is shown.
1320  * It is important to search the prio_tree before nonlinear list: a vma
1321  * may become nonlinear and be shifted from prio_tree to nonlinear list
1322  * while the lock is dropped; but never shifted from list to prio_tree.
1323  *
1324  * In order to make forward progress despite restarting the search,
1325  * vm_truncate_count is used to mark a vma as now dealt with, so we can
1326  * quickly skip it next time around.  Since the prio_tree search only
1327  * shows us those vmas affected by unmapping the range in question, we
1328  * can't efficiently keep all vmas in step with mapping->truncate_count:
1329  * so instead reset them all whenever it wraps back to 0 (then go to 1).
1330  * mapping->truncate_count and vma->vm_truncate_count are protected by
1331  * i_mmap_lock.
1332  *
1333  * In order to make forward progress despite repeatedly restarting some
1334  * large vma, note the restart_addr from unmap_vmas when it breaks out:
1335  * and restart from that address when we reach that vma again.  It might
1336  * have been split or merged, shrunk or extended, but never shifted: so
1337  * restart_addr remains valid so long as it remains in the vma's range.
1338  * unmap_mapping_range forces truncate_count to leap over page-aligned
1339  * values so we can save vma's restart_addr in its truncate_count field.
1340  */
1341 #define is_restart_addr(truncate_count) (!((truncate_count) & ~PAGE_MASK))
1342 
1343 static void reset_vma_truncate_counts(struct address_space *mapping)
1344 {
1345 	struct vm_area_struct *vma;
1346 	struct prio_tree_iter iter;
1347 
1348 	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, 0, ULONG_MAX)
1349 		vma->vm_truncate_count = 0;
1350 	list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
1351 		vma->vm_truncate_count = 0;
1352 }
1353 
1354 static int unmap_mapping_range_vma(struct vm_area_struct *vma,
1355 		unsigned long start_addr, unsigned long end_addr,
1356 		struct zap_details *details)
1357 {
1358 	unsigned long restart_addr;
1359 	int need_break;
1360 
1361 again:
1362 	restart_addr = vma->vm_truncate_count;
1363 	if (is_restart_addr(restart_addr) && start_addr < restart_addr) {
1364 		start_addr = restart_addr;
1365 		if (start_addr >= end_addr) {
1366 			/* Top of vma has been split off since last time */
1367 			vma->vm_truncate_count = details->truncate_count;
1368 			return 0;
1369 		}
1370 	}
1371 
1372 	restart_addr = zap_page_range(vma, start_addr,
1373 					end_addr - start_addr, details);
1374 
1375 	/*
1376 	 * We cannot rely on the break test in unmap_vmas:
1377 	 * on the one hand, we don't want to restart our loop
1378 	 * just because that broke out for the page_table_lock;
1379 	 * on the other hand, it does no test when vma is small.
1380 	 */
1381 	need_break = need_resched() ||
1382 			need_lockbreak(details->i_mmap_lock);
1383 
1384 	if (restart_addr >= end_addr) {
1385 		/* We have now completed this vma: mark it so */
1386 		vma->vm_truncate_count = details->truncate_count;
1387 		if (!need_break)
1388 			return 0;
1389 	} else {
1390 		/* Note restart_addr in vma's truncate_count field */
1391 		vma->vm_truncate_count = restart_addr;
1392 		if (!need_break)
1393 			goto again;
1394 	}
1395 
1396 	spin_unlock(details->i_mmap_lock);
1397 	cond_resched();
1398 	spin_lock(details->i_mmap_lock);
1399 	return -EINTR;
1400 }
1401 
1402 static inline void unmap_mapping_range_tree(struct prio_tree_root *root,
1403 					    struct zap_details *details)
1404 {
1405 	struct vm_area_struct *vma;
1406 	struct prio_tree_iter iter;
1407 	pgoff_t vba, vea, zba, zea;
1408 
1409 restart:
1410 	vma_prio_tree_foreach(vma, &iter, root,
1411 			details->first_index, details->last_index) {
1412 		/* Skip quickly over those we have already dealt with */
1413 		if (vma->vm_truncate_count == details->truncate_count)
1414 			continue;
1415 
1416 		vba = vma->vm_pgoff;
1417 		vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1;
1418 		/* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */
1419 		zba = details->first_index;
1420 		if (zba < vba)
1421 			zba = vba;
1422 		zea = details->last_index;
1423 		if (zea > vea)
1424 			zea = vea;
1425 
1426 		if (unmap_mapping_range_vma(vma,
1427 			((zba - vba) << PAGE_SHIFT) + vma->vm_start,
1428 			((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
1429 				details) < 0)
1430 			goto restart;
1431 	}
1432 }
1433 
1434 static inline void unmap_mapping_range_list(struct list_head *head,
1435 					    struct zap_details *details)
1436 {
1437 	struct vm_area_struct *vma;
1438 
1439 	/*
1440 	 * In nonlinear VMAs there is no correspondence between virtual address
1441 	 * offset and file offset.  So we must perform an exhaustive search
1442 	 * across *all* the pages in each nonlinear VMA, not just the pages
1443 	 * whose virtual address lies outside the file truncation point.
1444 	 */
1445 restart:
1446 	list_for_each_entry(vma, head, shared.vm_set.list) {
1447 		/* Skip quickly over those we have already dealt with */
1448 		if (vma->vm_truncate_count == details->truncate_count)
1449 			continue;
1450 		details->nonlinear_vma = vma;
1451 		if (unmap_mapping_range_vma(vma, vma->vm_start,
1452 					vma->vm_end, details) < 0)
1453 			goto restart;
1454 	}
1455 }
1456 
1457 /**
1458  * unmap_mapping_range - unmap the portion of all mmaps
1459  * in the specified address_space corresponding to the specified
1460  * page range in the underlying file.
1461  * @address_space: the address space containing mmaps to be unmapped.
1462  * @holebegin: byte in first page to unmap, relative to the start of
1463  * the underlying file.  This will be rounded down to a PAGE_SIZE
1464  * boundary.  Note that this is different from vmtruncate(), which
1465  * must keep the partial page.  In contrast, we must get rid of
1466  * partial pages.
1467  * @holelen: size of prospective hole in bytes.  This will be rounded
1468  * up to a PAGE_SIZE boundary.  A holelen of zero truncates to the
1469  * end of the file.
1470  * @even_cows: 1 when truncating a file, unmap even private COWed pages;
1471  * but 0 when invalidating pagecache, don't throw away private data.
1472  */
1473 void unmap_mapping_range(struct address_space *mapping,
1474 		loff_t const holebegin, loff_t const holelen, int even_cows)
1475 {
1476 	struct zap_details details;
1477 	pgoff_t hba = holebegin >> PAGE_SHIFT;
1478 	pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
1479 
1480 	/* Check for overflow. */
1481 	if (sizeof(holelen) > sizeof(hlen)) {
1482 		long long holeend =
1483 			(holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
1484 		if (holeend & ~(long long)ULONG_MAX)
1485 			hlen = ULONG_MAX - hba + 1;
1486 	}
1487 
1488 	details.check_mapping = even_cows? NULL: mapping;
1489 	details.nonlinear_vma = NULL;
1490 	details.first_index = hba;
1491 	details.last_index = hba + hlen - 1;
1492 	if (details.last_index < details.first_index)
1493 		details.last_index = ULONG_MAX;
1494 	details.i_mmap_lock = &mapping->i_mmap_lock;
1495 
1496 	spin_lock(&mapping->i_mmap_lock);
1497 
1498 	/* serialize i_size write against truncate_count write */
1499 	smp_wmb();
1500 	/* Protect against page faults, and endless unmapping loops */
1501 	mapping->truncate_count++;
1502 	/*
1503 	 * For archs where spin_lock has inclusive semantics like ia64
1504 	 * this smp_mb() will prevent to read pagetable contents
1505 	 * before the truncate_count increment is visible to
1506 	 * other cpus.
1507 	 */
1508 	smp_mb();
1509 	if (unlikely(is_restart_addr(mapping->truncate_count))) {
1510 		if (mapping->truncate_count == 0)
1511 			reset_vma_truncate_counts(mapping);
1512 		mapping->truncate_count++;
1513 	}
1514 	details.truncate_count = mapping->truncate_count;
1515 
1516 	if (unlikely(!prio_tree_empty(&mapping->i_mmap)))
1517 		unmap_mapping_range_tree(&mapping->i_mmap, &details);
1518 	if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
1519 		unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
1520 	spin_unlock(&mapping->i_mmap_lock);
1521 }
1522 EXPORT_SYMBOL(unmap_mapping_range);
1523 
1524 /*
1525  * Handle all mappings that got truncated by a "truncate()"
1526  * system call.
1527  *
1528  * NOTE! We have to be ready to update the memory sharing
1529  * between the file and the memory map for a potential last
1530  * incomplete page.  Ugly, but necessary.
1531  */
1532 int vmtruncate(struct inode * inode, loff_t offset)
1533 {
1534 	struct address_space *mapping = inode->i_mapping;
1535 	unsigned long limit;
1536 
1537 	if (inode->i_size < offset)
1538 		goto do_expand;
1539 	/*
1540 	 * truncation of in-use swapfiles is disallowed - it would cause
1541 	 * subsequent swapout to scribble on the now-freed blocks.
1542 	 */
1543 	if (IS_SWAPFILE(inode))
1544 		goto out_busy;
1545 	i_size_write(inode, offset);
1546 	unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
1547 	truncate_inode_pages(mapping, offset);
1548 	goto out_truncate;
1549 
1550 do_expand:
1551 	limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
1552 	if (limit != RLIM_INFINITY && offset > limit)
1553 		goto out_sig;
1554 	if (offset > inode->i_sb->s_maxbytes)
1555 		goto out_big;
1556 	i_size_write(inode, offset);
1557 
1558 out_truncate:
1559 	if (inode->i_op && inode->i_op->truncate)
1560 		inode->i_op->truncate(inode);
1561 	return 0;
1562 out_sig:
1563 	send_sig(SIGXFSZ, current, 0);
1564 out_big:
1565 	return -EFBIG;
1566 out_busy:
1567 	return -ETXTBSY;
1568 }
1569 
1570 EXPORT_SYMBOL(vmtruncate);
1571 
1572 /*
1573  * Primitive swap readahead code. We simply read an aligned block of
1574  * (1 << page_cluster) entries in the swap area. This method is chosen
1575  * because it doesn't cost us any seek time.  We also make sure to queue
1576  * the 'original' request together with the readahead ones...
1577  *
1578  * This has been extended to use the NUMA policies from the mm triggering
1579  * the readahead.
1580  *
1581  * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
1582  */
1583 void swapin_readahead(swp_entry_t entry, unsigned long addr,struct vm_area_struct *vma)
1584 {
1585 #ifdef CONFIG_NUMA
1586 	struct vm_area_struct *next_vma = vma ? vma->vm_next : NULL;
1587 #endif
1588 	int i, num;
1589 	struct page *new_page;
1590 	unsigned long offset;
1591 
1592 	/*
1593 	 * Get the number of handles we should do readahead io to.
1594 	 */
1595 	num = valid_swaphandles(entry, &offset);
1596 	for (i = 0; i < num; offset++, i++) {
1597 		/* Ok, do the async read-ahead now */
1598 		new_page = read_swap_cache_async(swp_entry(swp_type(entry),
1599 							   offset), vma, addr);
1600 		if (!new_page)
1601 			break;
1602 		page_cache_release(new_page);
1603 #ifdef CONFIG_NUMA
1604 		/*
1605 		 * Find the next applicable VMA for the NUMA policy.
1606 		 */
1607 		addr += PAGE_SIZE;
1608 		if (addr == 0)
1609 			vma = NULL;
1610 		if (vma) {
1611 			if (addr >= vma->vm_end) {
1612 				vma = next_vma;
1613 				next_vma = vma ? vma->vm_next : NULL;
1614 			}
1615 			if (vma && addr < vma->vm_start)
1616 				vma = NULL;
1617 		} else {
1618 			if (next_vma && addr >= next_vma->vm_start) {
1619 				vma = next_vma;
1620 				next_vma = vma->vm_next;
1621 			}
1622 		}
1623 #endif
1624 	}
1625 	lru_add_drain();	/* Push any new pages onto the LRU now */
1626 }
1627 
1628 /*
1629  * We hold the mm semaphore and the page_table_lock on entry and
1630  * should release the pagetable lock on exit..
1631  */
1632 static int do_swap_page(struct mm_struct * mm,
1633 	struct vm_area_struct * vma, unsigned long address,
1634 	pte_t *page_table, pmd_t *pmd, pte_t orig_pte, int write_access)
1635 {
1636 	struct page *page;
1637 	swp_entry_t entry = pte_to_swp_entry(orig_pte);
1638 	pte_t pte;
1639 	int ret = VM_FAULT_MINOR;
1640 
1641 	pte_unmap(page_table);
1642 	spin_unlock(&mm->page_table_lock);
1643 	page = lookup_swap_cache(entry);
1644 	if (!page) {
1645  		swapin_readahead(entry, address, vma);
1646  		page = read_swap_cache_async(entry, vma, address);
1647 		if (!page) {
1648 			/*
1649 			 * Back out if somebody else faulted in this pte while
1650 			 * we released the page table lock.
1651 			 */
1652 			spin_lock(&mm->page_table_lock);
1653 			page_table = pte_offset_map(pmd, address);
1654 			if (likely(pte_same(*page_table, orig_pte)))
1655 				ret = VM_FAULT_OOM;
1656 			else
1657 				ret = VM_FAULT_MINOR;
1658 			pte_unmap(page_table);
1659 			spin_unlock(&mm->page_table_lock);
1660 			goto out;
1661 		}
1662 
1663 		/* Had to read the page from swap area: Major fault */
1664 		ret = VM_FAULT_MAJOR;
1665 		inc_page_state(pgmajfault);
1666 		grab_swap_token();
1667 	}
1668 
1669 	mark_page_accessed(page);
1670 	lock_page(page);
1671 
1672 	/*
1673 	 * Back out if somebody else faulted in this pte while we
1674 	 * released the page table lock.
1675 	 */
1676 	spin_lock(&mm->page_table_lock);
1677 	page_table = pte_offset_map(pmd, address);
1678 	if (unlikely(!pte_same(*page_table, orig_pte))) {
1679 		ret = VM_FAULT_MINOR;
1680 		goto out_nomap;
1681 	}
1682 
1683 	if (unlikely(!PageUptodate(page))) {
1684 		ret = VM_FAULT_SIGBUS;
1685 		goto out_nomap;
1686 	}
1687 
1688 	/* The page isn't present yet, go ahead with the fault. */
1689 
1690 	inc_mm_counter(mm, rss);
1691 	pte = mk_pte(page, vma->vm_page_prot);
1692 	if (write_access && can_share_swap_page(page)) {
1693 		pte = maybe_mkwrite(pte_mkdirty(pte), vma);
1694 		write_access = 0;
1695 	}
1696 
1697 	flush_icache_page(vma, page);
1698 	set_pte_at(mm, address, page_table, pte);
1699 	page_add_anon_rmap(page, vma, address);
1700 
1701 	swap_free(entry);
1702 	if (vm_swap_full())
1703 		remove_exclusive_swap_page(page);
1704 	unlock_page(page);
1705 
1706 	if (write_access) {
1707 		if (do_wp_page(mm, vma, address,
1708 				page_table, pmd, pte) == VM_FAULT_OOM)
1709 			ret = VM_FAULT_OOM;
1710 		goto out;
1711 	}
1712 
1713 	/* No need to invalidate - it was non-present before */
1714 	update_mmu_cache(vma, address, pte);
1715 	lazy_mmu_prot_update(pte);
1716 	pte_unmap(page_table);
1717 	spin_unlock(&mm->page_table_lock);
1718 out:
1719 	return ret;
1720 out_nomap:
1721 	pte_unmap(page_table);
1722 	spin_unlock(&mm->page_table_lock);
1723 	unlock_page(page);
1724 	page_cache_release(page);
1725 	goto out;
1726 }
1727 
1728 /*
1729  * We are called with the MM semaphore and page_table_lock
1730  * spinlock held to protect against concurrent faults in
1731  * multithreaded programs.
1732  */
1733 static int
1734 do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
1735 		pte_t *page_table, pmd_t *pmd, int write_access,
1736 		unsigned long addr)
1737 {
1738 	pte_t entry;
1739 	struct page * page = ZERO_PAGE(addr);
1740 
1741 	/* Read-only mapping of ZERO_PAGE. */
1742 	entry = pte_wrprotect(mk_pte(ZERO_PAGE(addr), vma->vm_page_prot));
1743 
1744 	/* ..except if it's a write access */
1745 	if (write_access) {
1746 		/* Allocate our own private page. */
1747 		pte_unmap(page_table);
1748 		spin_unlock(&mm->page_table_lock);
1749 
1750 		if (unlikely(anon_vma_prepare(vma)))
1751 			goto no_mem;
1752 		page = alloc_zeroed_user_highpage(vma, addr);
1753 		if (!page)
1754 			goto no_mem;
1755 
1756 		spin_lock(&mm->page_table_lock);
1757 		page_table = pte_offset_map(pmd, addr);
1758 
1759 		if (!pte_none(*page_table)) {
1760 			pte_unmap(page_table);
1761 			page_cache_release(page);
1762 			spin_unlock(&mm->page_table_lock);
1763 			goto out;
1764 		}
1765 		inc_mm_counter(mm, rss);
1766 		entry = maybe_mkwrite(pte_mkdirty(mk_pte(page,
1767 							 vma->vm_page_prot)),
1768 				      vma);
1769 		lru_cache_add_active(page);
1770 		SetPageReferenced(page);
1771 		page_add_anon_rmap(page, vma, addr);
1772 	}
1773 
1774 	set_pte_at(mm, addr, page_table, entry);
1775 	pte_unmap(page_table);
1776 
1777 	/* No need to invalidate - it was non-present before */
1778 	update_mmu_cache(vma, addr, entry);
1779 	lazy_mmu_prot_update(entry);
1780 	spin_unlock(&mm->page_table_lock);
1781 out:
1782 	return VM_FAULT_MINOR;
1783 no_mem:
1784 	return VM_FAULT_OOM;
1785 }
1786 
1787 /*
1788  * do_no_page() tries to create a new page mapping. It aggressively
1789  * tries to share with existing pages, but makes a separate copy if
1790  * the "write_access" parameter is true in order to avoid the next
1791  * page fault.
1792  *
1793  * As this is called only for pages that do not currently exist, we
1794  * do not need to flush old virtual caches or the TLB.
1795  *
1796  * This is called with the MM semaphore held and the page table
1797  * spinlock held. Exit with the spinlock released.
1798  */
1799 static int
1800 do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
1801 	unsigned long address, int write_access, pte_t *page_table, pmd_t *pmd)
1802 {
1803 	struct page * new_page;
1804 	struct address_space *mapping = NULL;
1805 	pte_t entry;
1806 	unsigned int sequence = 0;
1807 	int ret = VM_FAULT_MINOR;
1808 	int anon = 0;
1809 
1810 	if (!vma->vm_ops || !vma->vm_ops->nopage)
1811 		return do_anonymous_page(mm, vma, page_table,
1812 					pmd, write_access, address);
1813 	pte_unmap(page_table);
1814 	spin_unlock(&mm->page_table_lock);
1815 
1816 	if (vma->vm_file) {
1817 		mapping = vma->vm_file->f_mapping;
1818 		sequence = mapping->truncate_count;
1819 		smp_rmb(); /* serializes i_size against truncate_count */
1820 	}
1821 retry:
1822 	cond_resched();
1823 	new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, &ret);
1824 	/*
1825 	 * No smp_rmb is needed here as long as there's a full
1826 	 * spin_lock/unlock sequence inside the ->nopage callback
1827 	 * (for the pagecache lookup) that acts as an implicit
1828 	 * smp_mb() and prevents the i_size read to happen
1829 	 * after the next truncate_count read.
1830 	 */
1831 
1832 	/* no page was available -- either SIGBUS or OOM */
1833 	if (new_page == NOPAGE_SIGBUS)
1834 		return VM_FAULT_SIGBUS;
1835 	if (new_page == NOPAGE_OOM)
1836 		return VM_FAULT_OOM;
1837 
1838 	/*
1839 	 * Should we do an early C-O-W break?
1840 	 */
1841 	if (write_access && !(vma->vm_flags & VM_SHARED)) {
1842 		struct page *page;
1843 
1844 		if (unlikely(anon_vma_prepare(vma)))
1845 			goto oom;
1846 		page = alloc_page_vma(GFP_HIGHUSER, vma, address);
1847 		if (!page)
1848 			goto oom;
1849 		copy_user_highpage(page, new_page, address);
1850 		page_cache_release(new_page);
1851 		new_page = page;
1852 		anon = 1;
1853 	}
1854 
1855 	spin_lock(&mm->page_table_lock);
1856 	/*
1857 	 * For a file-backed vma, someone could have truncated or otherwise
1858 	 * invalidated this page.  If unmap_mapping_range got called,
1859 	 * retry getting the page.
1860 	 */
1861 	if (mapping && unlikely(sequence != mapping->truncate_count)) {
1862 		sequence = mapping->truncate_count;
1863 		spin_unlock(&mm->page_table_lock);
1864 		page_cache_release(new_page);
1865 		goto retry;
1866 	}
1867 	page_table = pte_offset_map(pmd, address);
1868 
1869 	/*
1870 	 * This silly early PAGE_DIRTY setting removes a race
1871 	 * due to the bad i386 page protection. But it's valid
1872 	 * for other architectures too.
1873 	 *
1874 	 * Note that if write_access is true, we either now have
1875 	 * an exclusive copy of the page, or this is a shared mapping,
1876 	 * so we can make it writable and dirty to avoid having to
1877 	 * handle that later.
1878 	 */
1879 	/* Only go through if we didn't race with anybody else... */
1880 	if (pte_none(*page_table)) {
1881 		if (!PageReserved(new_page))
1882 			inc_mm_counter(mm, rss);
1883 
1884 		flush_icache_page(vma, new_page);
1885 		entry = mk_pte(new_page, vma->vm_page_prot);
1886 		if (write_access)
1887 			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1888 		set_pte_at(mm, address, page_table, entry);
1889 		if (anon) {
1890 			lru_cache_add_active(new_page);
1891 			page_add_anon_rmap(new_page, vma, address);
1892 		} else
1893 			page_add_file_rmap(new_page);
1894 		pte_unmap(page_table);
1895 	} else {
1896 		/* One of our sibling threads was faster, back out. */
1897 		pte_unmap(page_table);
1898 		page_cache_release(new_page);
1899 		spin_unlock(&mm->page_table_lock);
1900 		goto out;
1901 	}
1902 
1903 	/* no need to invalidate: a not-present page shouldn't be cached */
1904 	update_mmu_cache(vma, address, entry);
1905 	lazy_mmu_prot_update(entry);
1906 	spin_unlock(&mm->page_table_lock);
1907 out:
1908 	return ret;
1909 oom:
1910 	page_cache_release(new_page);
1911 	ret = VM_FAULT_OOM;
1912 	goto out;
1913 }
1914 
1915 /*
1916  * Fault of a previously existing named mapping. Repopulate the pte
1917  * from the encoded file_pte if possible. This enables swappable
1918  * nonlinear vmas.
1919  */
1920 static int do_file_page(struct mm_struct * mm, struct vm_area_struct * vma,
1921 	unsigned long address, int write_access, pte_t *pte, pmd_t *pmd)
1922 {
1923 	unsigned long pgoff;
1924 	int err;
1925 
1926 	BUG_ON(!vma->vm_ops || !vma->vm_ops->nopage);
1927 	/*
1928 	 * Fall back to the linear mapping if the fs does not support
1929 	 * ->populate:
1930 	 */
1931 	if (!vma->vm_ops || !vma->vm_ops->populate ||
1932 			(write_access && !(vma->vm_flags & VM_SHARED))) {
1933 		pte_clear(mm, address, pte);
1934 		return do_no_page(mm, vma, address, write_access, pte, pmd);
1935 	}
1936 
1937 	pgoff = pte_to_pgoff(*pte);
1938 
1939 	pte_unmap(pte);
1940 	spin_unlock(&mm->page_table_lock);
1941 
1942 	err = vma->vm_ops->populate(vma, address & PAGE_MASK, PAGE_SIZE, vma->vm_page_prot, pgoff, 0);
1943 	if (err == -ENOMEM)
1944 		return VM_FAULT_OOM;
1945 	if (err)
1946 		return VM_FAULT_SIGBUS;
1947 	return VM_FAULT_MAJOR;
1948 }
1949 
1950 /*
1951  * These routines also need to handle stuff like marking pages dirty
1952  * and/or accessed for architectures that don't do it in hardware (most
1953  * RISC architectures).  The early dirtying is also good on the i386.
1954  *
1955  * There is also a hook called "update_mmu_cache()" that architectures
1956  * with external mmu caches can use to update those (ie the Sparc or
1957  * PowerPC hashed page tables that act as extended TLBs).
1958  *
1959  * Note the "page_table_lock". It is to protect against kswapd removing
1960  * pages from under us. Note that kswapd only ever _removes_ pages, never
1961  * adds them. As such, once we have noticed that the page is not present,
1962  * we can drop the lock early.
1963  *
1964  * The adding of pages is protected by the MM semaphore (which we hold),
1965  * so we don't need to worry about a page being suddenly been added into
1966  * our VM.
1967  *
1968  * We enter with the pagetable spinlock held, we are supposed to
1969  * release it when done.
1970  */
1971 static inline int handle_pte_fault(struct mm_struct *mm,
1972 	struct vm_area_struct * vma, unsigned long address,
1973 	int write_access, pte_t *pte, pmd_t *pmd)
1974 {
1975 	pte_t entry;
1976 
1977 	entry = *pte;
1978 	if (!pte_present(entry)) {
1979 		/*
1980 		 * If it truly wasn't present, we know that kswapd
1981 		 * and the PTE updates will not touch it later. So
1982 		 * drop the lock.
1983 		 */
1984 		if (pte_none(entry))
1985 			return do_no_page(mm, vma, address, write_access, pte, pmd);
1986 		if (pte_file(entry))
1987 			return do_file_page(mm, vma, address, write_access, pte, pmd);
1988 		return do_swap_page(mm, vma, address, pte, pmd, entry, write_access);
1989 	}
1990 
1991 	if (write_access) {
1992 		if (!pte_write(entry))
1993 			return do_wp_page(mm, vma, address, pte, pmd, entry);
1994 
1995 		entry = pte_mkdirty(entry);
1996 	}
1997 	entry = pte_mkyoung(entry);
1998 	ptep_set_access_flags(vma, address, pte, entry, write_access);
1999 	update_mmu_cache(vma, address, entry);
2000 	lazy_mmu_prot_update(entry);
2001 	pte_unmap(pte);
2002 	spin_unlock(&mm->page_table_lock);
2003 	return VM_FAULT_MINOR;
2004 }
2005 
2006 /*
2007  * By the time we get here, we already hold the mm semaphore
2008  */
2009 int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma,
2010 		unsigned long address, int write_access)
2011 {
2012 	pgd_t *pgd;
2013 	pud_t *pud;
2014 	pmd_t *pmd;
2015 	pte_t *pte;
2016 
2017 	__set_current_state(TASK_RUNNING);
2018 
2019 	inc_page_state(pgfault);
2020 
2021 	if (is_vm_hugetlb_page(vma))
2022 		return VM_FAULT_SIGBUS;	/* mapping truncation does this. */
2023 
2024 	/*
2025 	 * We need the page table lock to synchronize with kswapd
2026 	 * and the SMP-safe atomic PTE updates.
2027 	 */
2028 	pgd = pgd_offset(mm, address);
2029 	spin_lock(&mm->page_table_lock);
2030 
2031 	pud = pud_alloc(mm, pgd, address);
2032 	if (!pud)
2033 		goto oom;
2034 
2035 	pmd = pmd_alloc(mm, pud, address);
2036 	if (!pmd)
2037 		goto oom;
2038 
2039 	pte = pte_alloc_map(mm, pmd, address);
2040 	if (!pte)
2041 		goto oom;
2042 
2043 	return handle_pte_fault(mm, vma, address, write_access, pte, pmd);
2044 
2045  oom:
2046 	spin_unlock(&mm->page_table_lock);
2047 	return VM_FAULT_OOM;
2048 }
2049 
2050 #ifndef __PAGETABLE_PUD_FOLDED
2051 /*
2052  * Allocate page upper directory.
2053  *
2054  * We've already handled the fast-path in-line, and we own the
2055  * page table lock.
2056  */
2057 pud_t fastcall *__pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
2058 {
2059 	pud_t *new;
2060 
2061 	spin_unlock(&mm->page_table_lock);
2062 	new = pud_alloc_one(mm, address);
2063 	spin_lock(&mm->page_table_lock);
2064 	if (!new)
2065 		return NULL;
2066 
2067 	/*
2068 	 * Because we dropped the lock, we should re-check the
2069 	 * entry, as somebody else could have populated it..
2070 	 */
2071 	if (pgd_present(*pgd)) {
2072 		pud_free(new);
2073 		goto out;
2074 	}
2075 	pgd_populate(mm, pgd, new);
2076  out:
2077 	return pud_offset(pgd, address);
2078 }
2079 #endif /* __PAGETABLE_PUD_FOLDED */
2080 
2081 #ifndef __PAGETABLE_PMD_FOLDED
2082 /*
2083  * Allocate page middle directory.
2084  *
2085  * We've already handled the fast-path in-line, and we own the
2086  * page table lock.
2087  */
2088 pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
2089 {
2090 	pmd_t *new;
2091 
2092 	spin_unlock(&mm->page_table_lock);
2093 	new = pmd_alloc_one(mm, address);
2094 	spin_lock(&mm->page_table_lock);
2095 	if (!new)
2096 		return NULL;
2097 
2098 	/*
2099 	 * Because we dropped the lock, we should re-check the
2100 	 * entry, as somebody else could have populated it..
2101 	 */
2102 #ifndef __ARCH_HAS_4LEVEL_HACK
2103 	if (pud_present(*pud)) {
2104 		pmd_free(new);
2105 		goto out;
2106 	}
2107 	pud_populate(mm, pud, new);
2108 #else
2109 	if (pgd_present(*pud)) {
2110 		pmd_free(new);
2111 		goto out;
2112 	}
2113 	pgd_populate(mm, pud, new);
2114 #endif /* __ARCH_HAS_4LEVEL_HACK */
2115 
2116  out:
2117 	return pmd_offset(pud, address);
2118 }
2119 #endif /* __PAGETABLE_PMD_FOLDED */
2120 
2121 int make_pages_present(unsigned long addr, unsigned long end)
2122 {
2123 	int ret, len, write;
2124 	struct vm_area_struct * vma;
2125 
2126 	vma = find_vma(current->mm, addr);
2127 	if (!vma)
2128 		return -1;
2129 	write = (vma->vm_flags & VM_WRITE) != 0;
2130 	if (addr >= end)
2131 		BUG();
2132 	if (end > vma->vm_end)
2133 		BUG();
2134 	len = (end+PAGE_SIZE-1)/PAGE_SIZE-addr/PAGE_SIZE;
2135 	ret = get_user_pages(current, current->mm, addr,
2136 			len, write, 0, NULL, NULL);
2137 	if (ret < 0)
2138 		return ret;
2139 	return ret == len ? 0 : -1;
2140 }
2141 
2142 /*
2143  * Map a vmalloc()-space virtual address to the physical page.
2144  */
2145 struct page * vmalloc_to_page(void * vmalloc_addr)
2146 {
2147 	unsigned long addr = (unsigned long) vmalloc_addr;
2148 	struct page *page = NULL;
2149 	pgd_t *pgd = pgd_offset_k(addr);
2150 	pud_t *pud;
2151 	pmd_t *pmd;
2152 	pte_t *ptep, pte;
2153 
2154 	if (!pgd_none(*pgd)) {
2155 		pud = pud_offset(pgd, addr);
2156 		if (!pud_none(*pud)) {
2157 			pmd = pmd_offset(pud, addr);
2158 			if (!pmd_none(*pmd)) {
2159 				ptep = pte_offset_map(pmd, addr);
2160 				pte = *ptep;
2161 				if (pte_present(pte))
2162 					page = pte_page(pte);
2163 				pte_unmap(ptep);
2164 			}
2165 		}
2166 	}
2167 	return page;
2168 }
2169 
2170 EXPORT_SYMBOL(vmalloc_to_page);
2171 
2172 /*
2173  * Map a vmalloc()-space virtual address to the physical page frame number.
2174  */
2175 unsigned long vmalloc_to_pfn(void * vmalloc_addr)
2176 {
2177 	return page_to_pfn(vmalloc_to_page(vmalloc_addr));
2178 }
2179 
2180 EXPORT_SYMBOL(vmalloc_to_pfn);
2181 
2182 /*
2183  * update_mem_hiwater
2184  *	- update per process rss and vm high water data
2185  */
2186 void update_mem_hiwater(struct task_struct *tsk)
2187 {
2188 	if (tsk->mm) {
2189 		unsigned long rss = get_mm_counter(tsk->mm, rss);
2190 
2191 		if (tsk->mm->hiwater_rss < rss)
2192 			tsk->mm->hiwater_rss = rss;
2193 		if (tsk->mm->hiwater_vm < tsk->mm->total_vm)
2194 			tsk->mm->hiwater_vm = tsk->mm->total_vm;
2195 	}
2196 }
2197 
2198 #if !defined(__HAVE_ARCH_GATE_AREA)
2199 
2200 #if defined(AT_SYSINFO_EHDR)
2201 struct vm_area_struct gate_vma;
2202 
2203 static int __init gate_vma_init(void)
2204 {
2205 	gate_vma.vm_mm = NULL;
2206 	gate_vma.vm_start = FIXADDR_USER_START;
2207 	gate_vma.vm_end = FIXADDR_USER_END;
2208 	gate_vma.vm_page_prot = PAGE_READONLY;
2209 	gate_vma.vm_flags = 0;
2210 	return 0;
2211 }
2212 __initcall(gate_vma_init);
2213 #endif
2214 
2215 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
2216 {
2217 #ifdef AT_SYSINFO_EHDR
2218 	return &gate_vma;
2219 #else
2220 	return NULL;
2221 #endif
2222 }
2223 
2224 int in_gate_area_no_task(unsigned long addr)
2225 {
2226 #ifdef AT_SYSINFO_EHDR
2227 	if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
2228 		return 1;
2229 #endif
2230 	return 0;
2231 }
2232 
2233 #endif	/* __HAVE_ARCH_GATE_AREA */
2234