xref: /linux/mm/memory.c (revision 03668a4debf4f50de55c34b6e66dae63e1c73716)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  *  linux/mm/memory.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
51da177e4SLinus Torvalds  */
61da177e4SLinus Torvalds 
71da177e4SLinus Torvalds /*
81da177e4SLinus Torvalds  * demand-loading started 01.12.91 - seems it is high on the list of
91da177e4SLinus Torvalds  * things wanted, and it should be easy to implement. - Linus
101da177e4SLinus Torvalds  */
111da177e4SLinus Torvalds 
121da177e4SLinus Torvalds /*
131da177e4SLinus Torvalds  * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
141da177e4SLinus Torvalds  * pages started 02.12.91, seems to work. - Linus.
151da177e4SLinus Torvalds  *
161da177e4SLinus Torvalds  * Tested sharing by executing about 30 /bin/sh: under the old kernel it
171da177e4SLinus Torvalds  * would have taken more than the 6M I have free, but it worked well as
181da177e4SLinus Torvalds  * far as I could see.
191da177e4SLinus Torvalds  *
201da177e4SLinus Torvalds  * Also corrected some "invalidate()"s - I wasn't doing enough of them.
211da177e4SLinus Torvalds  */
221da177e4SLinus Torvalds 
231da177e4SLinus Torvalds /*
241da177e4SLinus Torvalds  * Real VM (paging to/from disk) started 18.12.91. Much more work and
251da177e4SLinus Torvalds  * thought has to go into this. Oh, well..
261da177e4SLinus Torvalds  * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
271da177e4SLinus Torvalds  *		Found it. Everything seems to work now.
281da177e4SLinus Torvalds  * 20.12.91  -  Ok, making the swap-device changeable like the root.
291da177e4SLinus Torvalds  */
301da177e4SLinus Torvalds 
311da177e4SLinus Torvalds /*
321da177e4SLinus Torvalds  * 05.04.94  -  Multi-page memory management added for v1.1.
331da177e4SLinus Torvalds  * 		Idea by Alex Bligh (alex@cconcepts.co.uk)
341da177e4SLinus Torvalds  *
351da177e4SLinus Torvalds  * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
361da177e4SLinus Torvalds  *		(Gerhard.Wichert@pdb.siemens.de)
371da177e4SLinus Torvalds  *
381da177e4SLinus Torvalds  * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
391da177e4SLinus Torvalds  */
401da177e4SLinus Torvalds 
411da177e4SLinus Torvalds #include <linux/kernel_stat.h>
421da177e4SLinus Torvalds #include <linux/mm.h>
431da177e4SLinus Torvalds #include <linux/hugetlb.h>
441da177e4SLinus Torvalds #include <linux/mman.h>
451da177e4SLinus Torvalds #include <linux/swap.h>
461da177e4SLinus Torvalds #include <linux/highmem.h>
471da177e4SLinus Torvalds #include <linux/pagemap.h>
481da177e4SLinus Torvalds #include <linux/rmap.h>
491da177e4SLinus Torvalds #include <linux/module.h>
500ff92245SShailabh Nagar #include <linux/delayacct.h>
511da177e4SLinus Torvalds #include <linux/init.h>
52edc79b2aSPeter Zijlstra #include <linux/writeback.h>
538a9f3ccdSBalbir Singh #include <linux/memcontrol.h>
54cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h>
553dc14741SHugh Dickins #include <linux/kallsyms.h>
563dc14741SHugh Dickins #include <linux/swapops.h>
573dc14741SHugh Dickins #include <linux/elf.h>
581da177e4SLinus Torvalds 
591da177e4SLinus Torvalds #include <asm/pgalloc.h>
601da177e4SLinus Torvalds #include <asm/uaccess.h>
611da177e4SLinus Torvalds #include <asm/tlb.h>
621da177e4SLinus Torvalds #include <asm/tlbflush.h>
631da177e4SLinus Torvalds #include <asm/pgtable.h>
641da177e4SLinus Torvalds 
6542b77728SJan Beulich #include "internal.h"
6642b77728SJan Beulich 
67d41dee36SAndy Whitcroft #ifndef CONFIG_NEED_MULTIPLE_NODES
681da177e4SLinus Torvalds /* use the per-pgdat data instead for discontigmem - mbligh */
691da177e4SLinus Torvalds unsigned long max_mapnr;
701da177e4SLinus Torvalds struct page *mem_map;
711da177e4SLinus Torvalds 
721da177e4SLinus Torvalds EXPORT_SYMBOL(max_mapnr);
731da177e4SLinus Torvalds EXPORT_SYMBOL(mem_map);
741da177e4SLinus Torvalds #endif
751da177e4SLinus Torvalds 
761da177e4SLinus Torvalds unsigned long num_physpages;
771da177e4SLinus Torvalds /*
781da177e4SLinus Torvalds  * A number of key systems in x86 including ioremap() rely on the assumption
791da177e4SLinus Torvalds  * that high_memory defines the upper bound on direct map memory, then end
801da177e4SLinus Torvalds  * of ZONE_NORMAL.  Under CONFIG_DISCONTIG this means that max_low_pfn and
811da177e4SLinus Torvalds  * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
821da177e4SLinus Torvalds  * and ZONE_HIGHMEM.
831da177e4SLinus Torvalds  */
841da177e4SLinus Torvalds void * high_memory;
851da177e4SLinus Torvalds 
861da177e4SLinus Torvalds EXPORT_SYMBOL(num_physpages);
871da177e4SLinus Torvalds EXPORT_SYMBOL(high_memory);
881da177e4SLinus Torvalds 
8932a93233SIngo Molnar /*
9032a93233SIngo Molnar  * Randomize the address space (stacks, mmaps, brk, etc.).
9132a93233SIngo Molnar  *
9232a93233SIngo Molnar  * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
9332a93233SIngo Molnar  *   as ancient (libc5 based) binaries can segfault. )
9432a93233SIngo Molnar  */
9532a93233SIngo Molnar int randomize_va_space __read_mostly =
9632a93233SIngo Molnar #ifdef CONFIG_COMPAT_BRK
9732a93233SIngo Molnar 					1;
9832a93233SIngo Molnar #else
9932a93233SIngo Molnar 					2;
10032a93233SIngo Molnar #endif
101a62eaf15SAndi Kleen 
102a62eaf15SAndi Kleen static int __init disable_randmaps(char *s)
103a62eaf15SAndi Kleen {
104a62eaf15SAndi Kleen 	randomize_va_space = 0;
1059b41046cSOGAWA Hirofumi 	return 1;
106a62eaf15SAndi Kleen }
107a62eaf15SAndi Kleen __setup("norandmaps", disable_randmaps);
108a62eaf15SAndi Kleen 
109a62eaf15SAndi Kleen 
1101da177e4SLinus Torvalds /*
1111da177e4SLinus Torvalds  * If a p?d_bad entry is found while walking page tables, report
1121da177e4SLinus Torvalds  * the error, before resetting entry to p?d_none.  Usually (but
1131da177e4SLinus Torvalds  * very seldom) called out from the p?d_none_or_clear_bad macros.
1141da177e4SLinus Torvalds  */
1151da177e4SLinus Torvalds 
1161da177e4SLinus Torvalds void pgd_clear_bad(pgd_t *pgd)
1171da177e4SLinus Torvalds {
1181da177e4SLinus Torvalds 	pgd_ERROR(*pgd);
1191da177e4SLinus Torvalds 	pgd_clear(pgd);
1201da177e4SLinus Torvalds }
1211da177e4SLinus Torvalds 
1221da177e4SLinus Torvalds void pud_clear_bad(pud_t *pud)
1231da177e4SLinus Torvalds {
1241da177e4SLinus Torvalds 	pud_ERROR(*pud);
1251da177e4SLinus Torvalds 	pud_clear(pud);
1261da177e4SLinus Torvalds }
1271da177e4SLinus Torvalds 
1281da177e4SLinus Torvalds void pmd_clear_bad(pmd_t *pmd)
1291da177e4SLinus Torvalds {
1301da177e4SLinus Torvalds 	pmd_ERROR(*pmd);
1311da177e4SLinus Torvalds 	pmd_clear(pmd);
1321da177e4SLinus Torvalds }
1331da177e4SLinus Torvalds 
1341da177e4SLinus Torvalds /*
1351da177e4SLinus Torvalds  * Note: this doesn't free the actual pages themselves. That
1361da177e4SLinus Torvalds  * has been handled earlier when unmapping all the memory regions.
1371da177e4SLinus Torvalds  */
138e0da382cSHugh Dickins static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd)
1391da177e4SLinus Torvalds {
1402f569afdSMartin Schwidefsky 	pgtable_t token = pmd_pgtable(*pmd);
1411da177e4SLinus Torvalds 	pmd_clear(pmd);
1422f569afdSMartin Schwidefsky 	pte_free_tlb(tlb, token);
1431da177e4SLinus Torvalds 	tlb->mm->nr_ptes--;
1441da177e4SLinus Torvalds }
1451da177e4SLinus Torvalds 
146e0da382cSHugh Dickins static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
147e0da382cSHugh Dickins 				unsigned long addr, unsigned long end,
148e0da382cSHugh Dickins 				unsigned long floor, unsigned long ceiling)
1491da177e4SLinus Torvalds {
1501da177e4SLinus Torvalds 	pmd_t *pmd;
1511da177e4SLinus Torvalds 	unsigned long next;
152e0da382cSHugh Dickins 	unsigned long start;
1531da177e4SLinus Torvalds 
154e0da382cSHugh Dickins 	start = addr;
1551da177e4SLinus Torvalds 	pmd = pmd_offset(pud, addr);
1561da177e4SLinus Torvalds 	do {
1571da177e4SLinus Torvalds 		next = pmd_addr_end(addr, end);
1581da177e4SLinus Torvalds 		if (pmd_none_or_clear_bad(pmd))
1591da177e4SLinus Torvalds 			continue;
160e0da382cSHugh Dickins 		free_pte_range(tlb, pmd);
1611da177e4SLinus Torvalds 	} while (pmd++, addr = next, addr != end);
1621da177e4SLinus Torvalds 
163e0da382cSHugh Dickins 	start &= PUD_MASK;
164e0da382cSHugh Dickins 	if (start < floor)
165e0da382cSHugh Dickins 		return;
166e0da382cSHugh Dickins 	if (ceiling) {
167e0da382cSHugh Dickins 		ceiling &= PUD_MASK;
168e0da382cSHugh Dickins 		if (!ceiling)
169e0da382cSHugh Dickins 			return;
1701da177e4SLinus Torvalds 	}
171e0da382cSHugh Dickins 	if (end - 1 > ceiling - 1)
172e0da382cSHugh Dickins 		return;
173e0da382cSHugh Dickins 
174e0da382cSHugh Dickins 	pmd = pmd_offset(pud, start);
175e0da382cSHugh Dickins 	pud_clear(pud);
176e0da382cSHugh Dickins 	pmd_free_tlb(tlb, pmd);
1771da177e4SLinus Torvalds }
1781da177e4SLinus Torvalds 
179e0da382cSHugh Dickins static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
180e0da382cSHugh Dickins 				unsigned long addr, unsigned long end,
181e0da382cSHugh Dickins 				unsigned long floor, unsigned long ceiling)
1821da177e4SLinus Torvalds {
1831da177e4SLinus Torvalds 	pud_t *pud;
1841da177e4SLinus Torvalds 	unsigned long next;
185e0da382cSHugh Dickins 	unsigned long start;
1861da177e4SLinus Torvalds 
187e0da382cSHugh Dickins 	start = addr;
1881da177e4SLinus Torvalds 	pud = pud_offset(pgd, addr);
1891da177e4SLinus Torvalds 	do {
1901da177e4SLinus Torvalds 		next = pud_addr_end(addr, end);
1911da177e4SLinus Torvalds 		if (pud_none_or_clear_bad(pud))
1921da177e4SLinus Torvalds 			continue;
193e0da382cSHugh Dickins 		free_pmd_range(tlb, pud, addr, next, floor, ceiling);
1941da177e4SLinus Torvalds 	} while (pud++, addr = next, addr != end);
1951da177e4SLinus Torvalds 
196e0da382cSHugh Dickins 	start &= PGDIR_MASK;
197e0da382cSHugh Dickins 	if (start < floor)
198e0da382cSHugh Dickins 		return;
199e0da382cSHugh Dickins 	if (ceiling) {
200e0da382cSHugh Dickins 		ceiling &= PGDIR_MASK;
201e0da382cSHugh Dickins 		if (!ceiling)
202e0da382cSHugh Dickins 			return;
2031da177e4SLinus Torvalds 	}
204e0da382cSHugh Dickins 	if (end - 1 > ceiling - 1)
205e0da382cSHugh Dickins 		return;
206e0da382cSHugh Dickins 
207e0da382cSHugh Dickins 	pud = pud_offset(pgd, start);
208e0da382cSHugh Dickins 	pgd_clear(pgd);
209e0da382cSHugh Dickins 	pud_free_tlb(tlb, pud);
2101da177e4SLinus Torvalds }
2111da177e4SLinus Torvalds 
2121da177e4SLinus Torvalds /*
213e0da382cSHugh Dickins  * This function frees user-level page tables of a process.
214e0da382cSHugh Dickins  *
2151da177e4SLinus Torvalds  * Must be called with pagetable lock held.
2161da177e4SLinus Torvalds  */
21742b77728SJan Beulich void free_pgd_range(struct mmu_gather *tlb,
218e0da382cSHugh Dickins 			unsigned long addr, unsigned long end,
219e0da382cSHugh Dickins 			unsigned long floor, unsigned long ceiling)
2201da177e4SLinus Torvalds {
2211da177e4SLinus Torvalds 	pgd_t *pgd;
2221da177e4SLinus Torvalds 	unsigned long next;
223e0da382cSHugh Dickins 	unsigned long start;
2241da177e4SLinus Torvalds 
225e0da382cSHugh Dickins 	/*
226e0da382cSHugh Dickins 	 * The next few lines have given us lots of grief...
227e0da382cSHugh Dickins 	 *
228e0da382cSHugh Dickins 	 * Why are we testing PMD* at this top level?  Because often
229e0da382cSHugh Dickins 	 * there will be no work to do at all, and we'd prefer not to
230e0da382cSHugh Dickins 	 * go all the way down to the bottom just to discover that.
231e0da382cSHugh Dickins 	 *
232e0da382cSHugh Dickins 	 * Why all these "- 1"s?  Because 0 represents both the bottom
233e0da382cSHugh Dickins 	 * of the address space and the top of it (using -1 for the
234e0da382cSHugh Dickins 	 * top wouldn't help much: the masks would do the wrong thing).
235e0da382cSHugh Dickins 	 * The rule is that addr 0 and floor 0 refer to the bottom of
236e0da382cSHugh Dickins 	 * the address space, but end 0 and ceiling 0 refer to the top
237e0da382cSHugh Dickins 	 * Comparisons need to use "end - 1" and "ceiling - 1" (though
238e0da382cSHugh Dickins 	 * that end 0 case should be mythical).
239e0da382cSHugh Dickins 	 *
240e0da382cSHugh Dickins 	 * Wherever addr is brought up or ceiling brought down, we must
241e0da382cSHugh Dickins 	 * be careful to reject "the opposite 0" before it confuses the
242e0da382cSHugh Dickins 	 * subsequent tests.  But what about where end is brought down
243e0da382cSHugh Dickins 	 * by PMD_SIZE below? no, end can't go down to 0 there.
244e0da382cSHugh Dickins 	 *
245e0da382cSHugh Dickins 	 * Whereas we round start (addr) and ceiling down, by different
246e0da382cSHugh Dickins 	 * masks at different levels, in order to test whether a table
247e0da382cSHugh Dickins 	 * now has no other vmas using it, so can be freed, we don't
248e0da382cSHugh Dickins 	 * bother to round floor or end up - the tests don't need that.
249e0da382cSHugh Dickins 	 */
250e0da382cSHugh Dickins 
251e0da382cSHugh Dickins 	addr &= PMD_MASK;
252e0da382cSHugh Dickins 	if (addr < floor) {
253e0da382cSHugh Dickins 		addr += PMD_SIZE;
254e0da382cSHugh Dickins 		if (!addr)
255e0da382cSHugh Dickins 			return;
256e0da382cSHugh Dickins 	}
257e0da382cSHugh Dickins 	if (ceiling) {
258e0da382cSHugh Dickins 		ceiling &= PMD_MASK;
259e0da382cSHugh Dickins 		if (!ceiling)
260e0da382cSHugh Dickins 			return;
261e0da382cSHugh Dickins 	}
262e0da382cSHugh Dickins 	if (end - 1 > ceiling - 1)
263e0da382cSHugh Dickins 		end -= PMD_SIZE;
264e0da382cSHugh Dickins 	if (addr > end - 1)
265e0da382cSHugh Dickins 		return;
266e0da382cSHugh Dickins 
267e0da382cSHugh Dickins 	start = addr;
26842b77728SJan Beulich 	pgd = pgd_offset(tlb->mm, addr);
2691da177e4SLinus Torvalds 	do {
2701da177e4SLinus Torvalds 		next = pgd_addr_end(addr, end);
2711da177e4SLinus Torvalds 		if (pgd_none_or_clear_bad(pgd))
2721da177e4SLinus Torvalds 			continue;
27342b77728SJan Beulich 		free_pud_range(tlb, pgd, addr, next, floor, ceiling);
2741da177e4SLinus Torvalds 	} while (pgd++, addr = next, addr != end);
275e0da382cSHugh Dickins }
276e0da382cSHugh Dickins 
27742b77728SJan Beulich void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
278e0da382cSHugh Dickins 		unsigned long floor, unsigned long ceiling)
279e0da382cSHugh Dickins {
280e0da382cSHugh Dickins 	while (vma) {
281e0da382cSHugh Dickins 		struct vm_area_struct *next = vma->vm_next;
282e0da382cSHugh Dickins 		unsigned long addr = vma->vm_start;
283e0da382cSHugh Dickins 
2848f4f8c16SHugh Dickins 		/*
2858f4f8c16SHugh Dickins 		 * Hide vma from rmap and vmtruncate before freeing pgtables
2868f4f8c16SHugh Dickins 		 */
2878f4f8c16SHugh Dickins 		anon_vma_unlink(vma);
2888f4f8c16SHugh Dickins 		unlink_file_vma(vma);
2898f4f8c16SHugh Dickins 
2909da61aefSDavid Gibson 		if (is_vm_hugetlb_page(vma)) {
2913bf5ee95SHugh Dickins 			hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
2923bf5ee95SHugh Dickins 				floor, next? next->vm_start: ceiling);
2933bf5ee95SHugh Dickins 		} else {
2943bf5ee95SHugh Dickins 			/*
2953bf5ee95SHugh Dickins 			 * Optimization: gather nearby vmas into one call down
2963bf5ee95SHugh Dickins 			 */
2973bf5ee95SHugh Dickins 			while (next && next->vm_start <= vma->vm_end + PMD_SIZE
2984866920bSDavid Gibson 			       && !is_vm_hugetlb_page(next)) {
299e0da382cSHugh Dickins 				vma = next;
300e0da382cSHugh Dickins 				next = vma->vm_next;
3018f4f8c16SHugh Dickins 				anon_vma_unlink(vma);
3028f4f8c16SHugh Dickins 				unlink_file_vma(vma);
303e0da382cSHugh Dickins 			}
3043bf5ee95SHugh Dickins 			free_pgd_range(tlb, addr, vma->vm_end,
305e0da382cSHugh Dickins 				floor, next? next->vm_start: ceiling);
3063bf5ee95SHugh Dickins 		}
307e0da382cSHugh Dickins 		vma = next;
308e0da382cSHugh Dickins 	}
3091da177e4SLinus Torvalds }
3101da177e4SLinus Torvalds 
3111bb3630eSHugh Dickins int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
3121da177e4SLinus Torvalds {
3132f569afdSMartin Schwidefsky 	pgtable_t new = pte_alloc_one(mm, address);
3141da177e4SLinus Torvalds 	if (!new)
3151bb3630eSHugh Dickins 		return -ENOMEM;
3161bb3630eSHugh Dickins 
317362a61adSNick Piggin 	/*
318362a61adSNick Piggin 	 * Ensure all pte setup (eg. pte page lock and page clearing) are
319362a61adSNick Piggin 	 * visible before the pte is made visible to other CPUs by being
320362a61adSNick Piggin 	 * put into page tables.
321362a61adSNick Piggin 	 *
322362a61adSNick Piggin 	 * The other side of the story is the pointer chasing in the page
323362a61adSNick Piggin 	 * table walking code (when walking the page table without locking;
324362a61adSNick Piggin 	 * ie. most of the time). Fortunately, these data accesses consist
325362a61adSNick Piggin 	 * of a chain of data-dependent loads, meaning most CPUs (alpha
326362a61adSNick Piggin 	 * being the notable exception) will already guarantee loads are
327362a61adSNick Piggin 	 * seen in-order. See the alpha page table accessors for the
328362a61adSNick Piggin 	 * smp_read_barrier_depends() barriers in page table walking code.
329362a61adSNick Piggin 	 */
330362a61adSNick Piggin 	smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
331362a61adSNick Piggin 
332c74df32cSHugh Dickins 	spin_lock(&mm->page_table_lock);
3332f569afdSMartin Schwidefsky 	if (!pmd_present(*pmd)) {	/* Has another populated it ? */
3341da177e4SLinus Torvalds 		mm->nr_ptes++;
3351da177e4SLinus Torvalds 		pmd_populate(mm, pmd, new);
3362f569afdSMartin Schwidefsky 		new = NULL;
3371da177e4SLinus Torvalds 	}
338c74df32cSHugh Dickins 	spin_unlock(&mm->page_table_lock);
3392f569afdSMartin Schwidefsky 	if (new)
3402f569afdSMartin Schwidefsky 		pte_free(mm, new);
3411bb3630eSHugh Dickins 	return 0;
3421da177e4SLinus Torvalds }
3431da177e4SLinus Torvalds 
3441bb3630eSHugh Dickins int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
3451da177e4SLinus Torvalds {
3461bb3630eSHugh Dickins 	pte_t *new = pte_alloc_one_kernel(&init_mm, address);
3471da177e4SLinus Torvalds 	if (!new)
3481bb3630eSHugh Dickins 		return -ENOMEM;
3491da177e4SLinus Torvalds 
350362a61adSNick Piggin 	smp_wmb(); /* See comment in __pte_alloc */
351362a61adSNick Piggin 
352872fec16SHugh Dickins 	spin_lock(&init_mm.page_table_lock);
3532f569afdSMartin Schwidefsky 	if (!pmd_present(*pmd)) {	/* Has another populated it ? */
354872fec16SHugh Dickins 		pmd_populate_kernel(&init_mm, pmd, new);
3552f569afdSMartin Schwidefsky 		new = NULL;
3562f569afdSMartin Schwidefsky 	}
357872fec16SHugh Dickins 	spin_unlock(&init_mm.page_table_lock);
3582f569afdSMartin Schwidefsky 	if (new)
3592f569afdSMartin Schwidefsky 		pte_free_kernel(&init_mm, new);
3601bb3630eSHugh Dickins 	return 0;
3611da177e4SLinus Torvalds }
3621da177e4SLinus Torvalds 
363ae859762SHugh Dickins static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss)
364ae859762SHugh Dickins {
365ae859762SHugh Dickins 	if (file_rss)
366ae859762SHugh Dickins 		add_mm_counter(mm, file_rss, file_rss);
367ae859762SHugh Dickins 	if (anon_rss)
368ae859762SHugh Dickins 		add_mm_counter(mm, anon_rss, anon_rss);
369ae859762SHugh Dickins }
370ae859762SHugh Dickins 
3711da177e4SLinus Torvalds /*
3726aab341eSLinus Torvalds  * This function is called to print an error when a bad pte
3736aab341eSLinus Torvalds  * is found. For example, we might have a PFN-mapped pte in
3746aab341eSLinus Torvalds  * a region that doesn't allow it.
375b5810039SNick Piggin  *
376b5810039SNick Piggin  * The calling function must still handle the error.
377b5810039SNick Piggin  */
3783dc14741SHugh Dickins static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
3793dc14741SHugh Dickins 			  pte_t pte, struct page *page)
380b5810039SNick Piggin {
3813dc14741SHugh Dickins 	pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
3823dc14741SHugh Dickins 	pud_t *pud = pud_offset(pgd, addr);
3833dc14741SHugh Dickins 	pmd_t *pmd = pmd_offset(pud, addr);
3843dc14741SHugh Dickins 	struct address_space *mapping;
3853dc14741SHugh Dickins 	pgoff_t index;
386d936cf9bSHugh Dickins 	static unsigned long resume;
387d936cf9bSHugh Dickins 	static unsigned long nr_shown;
388d936cf9bSHugh Dickins 	static unsigned long nr_unshown;
389d936cf9bSHugh Dickins 
390d936cf9bSHugh Dickins 	/*
391d936cf9bSHugh Dickins 	 * Allow a burst of 60 reports, then keep quiet for that minute;
392d936cf9bSHugh Dickins 	 * or allow a steady drip of one report per second.
393d936cf9bSHugh Dickins 	 */
394d936cf9bSHugh Dickins 	if (nr_shown == 60) {
395d936cf9bSHugh Dickins 		if (time_before(jiffies, resume)) {
396d936cf9bSHugh Dickins 			nr_unshown++;
397d936cf9bSHugh Dickins 			return;
398d936cf9bSHugh Dickins 		}
399d936cf9bSHugh Dickins 		if (nr_unshown) {
4001e9e6365SHugh Dickins 			printk(KERN_ALERT
4011e9e6365SHugh Dickins 				"BUG: Bad page map: %lu messages suppressed\n",
402d936cf9bSHugh Dickins 				nr_unshown);
403d936cf9bSHugh Dickins 			nr_unshown = 0;
404d936cf9bSHugh Dickins 		}
405d936cf9bSHugh Dickins 		nr_shown = 0;
406d936cf9bSHugh Dickins 	}
407d936cf9bSHugh Dickins 	if (nr_shown++ == 0)
408d936cf9bSHugh Dickins 		resume = jiffies + 60 * HZ;
4093dc14741SHugh Dickins 
4103dc14741SHugh Dickins 	mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
4113dc14741SHugh Dickins 	index = linear_page_index(vma, addr);
4123dc14741SHugh Dickins 
4131e9e6365SHugh Dickins 	printk(KERN_ALERT
4141e9e6365SHugh Dickins 		"BUG: Bad page map in process %s  pte:%08llx pmd:%08llx\n",
4153dc14741SHugh Dickins 		current->comm,
4163dc14741SHugh Dickins 		(long long)pte_val(pte), (long long)pmd_val(*pmd));
4173dc14741SHugh Dickins 	if (page) {
4181e9e6365SHugh Dickins 		printk(KERN_ALERT
4193dc14741SHugh Dickins 		"page:%p flags:%p count:%d mapcount:%d mapping:%p index:%lx\n",
4203dc14741SHugh Dickins 		page, (void *)page->flags, page_count(page),
4213dc14741SHugh Dickins 		page_mapcount(page), page->mapping, page->index);
4223dc14741SHugh Dickins 	}
4231e9e6365SHugh Dickins 	printk(KERN_ALERT
4243dc14741SHugh Dickins 		"addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n",
4253dc14741SHugh Dickins 		(void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
4263dc14741SHugh Dickins 	/*
4273dc14741SHugh Dickins 	 * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
4283dc14741SHugh Dickins 	 */
4293dc14741SHugh Dickins 	if (vma->vm_ops)
4301e9e6365SHugh Dickins 		print_symbol(KERN_ALERT "vma->vm_ops->fault: %s\n",
4313dc14741SHugh Dickins 				(unsigned long)vma->vm_ops->fault);
4323dc14741SHugh Dickins 	if (vma->vm_file && vma->vm_file->f_op)
4331e9e6365SHugh Dickins 		print_symbol(KERN_ALERT "vma->vm_file->f_op->mmap: %s\n",
4343dc14741SHugh Dickins 				(unsigned long)vma->vm_file->f_op->mmap);
435b5810039SNick Piggin 	dump_stack();
4363dc14741SHugh Dickins 	add_taint(TAINT_BAD_PAGE);
437b5810039SNick Piggin }
438b5810039SNick Piggin 
43967121172SLinus Torvalds static inline int is_cow_mapping(unsigned int flags)
44067121172SLinus Torvalds {
44167121172SLinus Torvalds 	return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
44267121172SLinus Torvalds }
44367121172SLinus Torvalds 
444b5810039SNick Piggin /*
4457e675137SNick Piggin  * vm_normal_page -- This function gets the "struct page" associated with a pte.
4466aab341eSLinus Torvalds  *
4477e675137SNick Piggin  * "Special" mappings do not wish to be associated with a "struct page" (either
4487e675137SNick Piggin  * it doesn't exist, or it exists but they don't want to touch it). In this
4497e675137SNick Piggin  * case, NULL is returned here. "Normal" mappings do have a struct page.
450b379d790SJared Hulbert  *
4517e675137SNick Piggin  * There are 2 broad cases. Firstly, an architecture may define a pte_special()
4527e675137SNick Piggin  * pte bit, in which case this function is trivial. Secondly, an architecture
4537e675137SNick Piggin  * may not have a spare pte bit, which requires a more complicated scheme,
4547e675137SNick Piggin  * described below.
4557e675137SNick Piggin  *
4567e675137SNick Piggin  * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
4577e675137SNick Piggin  * special mapping (even if there are underlying and valid "struct pages").
4587e675137SNick Piggin  * COWed pages of a VM_PFNMAP are always normal.
4596aab341eSLinus Torvalds  *
460b379d790SJared Hulbert  * The way we recognize COWed pages within VM_PFNMAP mappings is through the
461b379d790SJared Hulbert  * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
4627e675137SNick Piggin  * set, and the vm_pgoff will point to the first PFN mapped: thus every special
4637e675137SNick Piggin  * mapping will always honor the rule
4646aab341eSLinus Torvalds  *
4656aab341eSLinus Torvalds  *	pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
4666aab341eSLinus Torvalds  *
4677e675137SNick Piggin  * And for normal mappings this is false.
468b379d790SJared Hulbert  *
4697e675137SNick Piggin  * This restricts such mappings to be a linear translation from virtual address
4707e675137SNick Piggin  * to pfn. To get around this restriction, we allow arbitrary mappings so long
4717e675137SNick Piggin  * as the vma is not a COW mapping; in that case, we know that all ptes are
4727e675137SNick Piggin  * special (because none can have been COWed).
473b379d790SJared Hulbert  *
474b379d790SJared Hulbert  *
4757e675137SNick Piggin  * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
4767e675137SNick Piggin  *
477b379d790SJared Hulbert  * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
478b379d790SJared Hulbert  * page" backing, however the difference is that _all_ pages with a struct
479b379d790SJared Hulbert  * page (that is, those where pfn_valid is true) are refcounted and considered
480b379d790SJared Hulbert  * normal pages by the VM. The disadvantage is that pages are refcounted
481b379d790SJared Hulbert  * (which can be slower and simply not an option for some PFNMAP users). The
482b379d790SJared Hulbert  * advantage is that we don't have to follow the strict linearity rule of
483b379d790SJared Hulbert  * PFNMAP mappings in order to support COWable mappings.
484b379d790SJared Hulbert  *
485ee498ed7SHugh Dickins  */
4867e675137SNick Piggin #ifdef __HAVE_ARCH_PTE_SPECIAL
4877e675137SNick Piggin # define HAVE_PTE_SPECIAL 1
4887e675137SNick Piggin #else
4897e675137SNick Piggin # define HAVE_PTE_SPECIAL 0
4907e675137SNick Piggin #endif
4917e675137SNick Piggin struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
4927e675137SNick Piggin 				pte_t pte)
493ee498ed7SHugh Dickins {
49422b31eecSHugh Dickins 	unsigned long pfn = pte_pfn(pte);
4957e675137SNick Piggin 
4967e675137SNick Piggin 	if (HAVE_PTE_SPECIAL) {
49722b31eecSHugh Dickins 		if (likely(!pte_special(pte)))
49822b31eecSHugh Dickins 			goto check_pfn;
49922b31eecSHugh Dickins 		if (!(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)))
50022b31eecSHugh Dickins 			print_bad_pte(vma, addr, pte, NULL);
5017e675137SNick Piggin 		return NULL;
5027e675137SNick Piggin 	}
5037e675137SNick Piggin 
5047e675137SNick Piggin 	/* !HAVE_PTE_SPECIAL case follows: */
5057e675137SNick Piggin 
506b379d790SJared Hulbert 	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
507b379d790SJared Hulbert 		if (vma->vm_flags & VM_MIXEDMAP) {
508b379d790SJared Hulbert 			if (!pfn_valid(pfn))
509b379d790SJared Hulbert 				return NULL;
510b379d790SJared Hulbert 			goto out;
511b379d790SJared Hulbert 		} else {
5127e675137SNick Piggin 			unsigned long off;
5137e675137SNick Piggin 			off = (addr - vma->vm_start) >> PAGE_SHIFT;
5146aab341eSLinus Torvalds 			if (pfn == vma->vm_pgoff + off)
5156aab341eSLinus Torvalds 				return NULL;
51667121172SLinus Torvalds 			if (!is_cow_mapping(vma->vm_flags))
517fb155c16SLinus Torvalds 				return NULL;
5186aab341eSLinus Torvalds 		}
519b379d790SJared Hulbert 	}
5206aab341eSLinus Torvalds 
52122b31eecSHugh Dickins check_pfn:
52222b31eecSHugh Dickins 	if (unlikely(pfn > highest_memmap_pfn)) {
52322b31eecSHugh Dickins 		print_bad_pte(vma, addr, pte, NULL);
52422b31eecSHugh Dickins 		return NULL;
52522b31eecSHugh Dickins 	}
5266aab341eSLinus Torvalds 
5276aab341eSLinus Torvalds 	/*
5287e675137SNick Piggin 	 * NOTE! We still have PageReserved() pages in the page tables.
5297e675137SNick Piggin 	 * eg. VDSO mappings can cause them to exist.
5306aab341eSLinus Torvalds 	 */
531b379d790SJared Hulbert out:
5326aab341eSLinus Torvalds 	return pfn_to_page(pfn);
533ee498ed7SHugh Dickins }
534ee498ed7SHugh Dickins 
535ee498ed7SHugh Dickins /*
5361da177e4SLinus Torvalds  * copy one vm_area from one task to the other. Assumes the page tables
5371da177e4SLinus Torvalds  * already present in the new task to be cleared in the whole range
5381da177e4SLinus Torvalds  * covered by this vma.
5391da177e4SLinus Torvalds  */
5401da177e4SLinus Torvalds 
5418c103762SHugh Dickins static inline void
5421da177e4SLinus Torvalds copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
543b5810039SNick Piggin 		pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
5448c103762SHugh Dickins 		unsigned long addr, int *rss)
5451da177e4SLinus Torvalds {
546b5810039SNick Piggin 	unsigned long vm_flags = vma->vm_flags;
5471da177e4SLinus Torvalds 	pte_t pte = *src_pte;
5481da177e4SLinus Torvalds 	struct page *page;
5491da177e4SLinus Torvalds 
5501da177e4SLinus Torvalds 	/* pte contains position in swap or file, so copy. */
5511da177e4SLinus Torvalds 	if (unlikely(!pte_present(pte))) {
5521da177e4SLinus Torvalds 		if (!pte_file(pte)) {
5530697212aSChristoph Lameter 			swp_entry_t entry = pte_to_swp_entry(pte);
5540697212aSChristoph Lameter 
5550697212aSChristoph Lameter 			swap_duplicate(entry);
5561da177e4SLinus Torvalds 			/* make sure dst_mm is on swapoff's mmlist. */
5571da177e4SLinus Torvalds 			if (unlikely(list_empty(&dst_mm->mmlist))) {
5581da177e4SLinus Torvalds 				spin_lock(&mmlist_lock);
559f412ac08SHugh Dickins 				if (list_empty(&dst_mm->mmlist))
560f412ac08SHugh Dickins 					list_add(&dst_mm->mmlist,
561f412ac08SHugh Dickins 						 &src_mm->mmlist);
5621da177e4SLinus Torvalds 				spin_unlock(&mmlist_lock);
5631da177e4SLinus Torvalds 			}
5640697212aSChristoph Lameter 			if (is_write_migration_entry(entry) &&
5650697212aSChristoph Lameter 					is_cow_mapping(vm_flags)) {
5660697212aSChristoph Lameter 				/*
5670697212aSChristoph Lameter 				 * COW mappings require pages in both parent
5680697212aSChristoph Lameter 				 * and child to be set to read.
5690697212aSChristoph Lameter 				 */
5700697212aSChristoph Lameter 				make_migration_entry_read(&entry);
5710697212aSChristoph Lameter 				pte = swp_entry_to_pte(entry);
5720697212aSChristoph Lameter 				set_pte_at(src_mm, addr, src_pte, pte);
5730697212aSChristoph Lameter 			}
5741da177e4SLinus Torvalds 		}
575ae859762SHugh Dickins 		goto out_set_pte;
5761da177e4SLinus Torvalds 	}
5771da177e4SLinus Torvalds 
5781da177e4SLinus Torvalds 	/*
5791da177e4SLinus Torvalds 	 * If it's a COW mapping, write protect it both
5801da177e4SLinus Torvalds 	 * in the parent and the child
5811da177e4SLinus Torvalds 	 */
58267121172SLinus Torvalds 	if (is_cow_mapping(vm_flags)) {
5831da177e4SLinus Torvalds 		ptep_set_wrprotect(src_mm, addr, src_pte);
5843dc90795SZachary Amsden 		pte = pte_wrprotect(pte);
5851da177e4SLinus Torvalds 	}
5861da177e4SLinus Torvalds 
5871da177e4SLinus Torvalds 	/*
5881da177e4SLinus Torvalds 	 * If it's a shared mapping, mark it clean in
5891da177e4SLinus Torvalds 	 * the child
5901da177e4SLinus Torvalds 	 */
5911da177e4SLinus Torvalds 	if (vm_flags & VM_SHARED)
5921da177e4SLinus Torvalds 		pte = pte_mkclean(pte);
5931da177e4SLinus Torvalds 	pte = pte_mkold(pte);
5946aab341eSLinus Torvalds 
5956aab341eSLinus Torvalds 	page = vm_normal_page(vma, addr, pte);
5966aab341eSLinus Torvalds 	if (page) {
5971da177e4SLinus Torvalds 		get_page(page);
598c97a9e10SNick Piggin 		page_dup_rmap(page, vma, addr);
5998c103762SHugh Dickins 		rss[!!PageAnon(page)]++;
6006aab341eSLinus Torvalds 	}
601ae859762SHugh Dickins 
602ae859762SHugh Dickins out_set_pte:
603ae859762SHugh Dickins 	set_pte_at(dst_mm, addr, dst_pte, pte);
6041da177e4SLinus Torvalds }
6051da177e4SLinus Torvalds 
6061da177e4SLinus Torvalds static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
6071da177e4SLinus Torvalds 		pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
6081da177e4SLinus Torvalds 		unsigned long addr, unsigned long end)
6091da177e4SLinus Torvalds {
6101da177e4SLinus Torvalds 	pte_t *src_pte, *dst_pte;
611c74df32cSHugh Dickins 	spinlock_t *src_ptl, *dst_ptl;
612e040f218SHugh Dickins 	int progress = 0;
6138c103762SHugh Dickins 	int rss[2];
6141da177e4SLinus Torvalds 
6151da177e4SLinus Torvalds again:
616ae859762SHugh Dickins 	rss[1] = rss[0] = 0;
617c74df32cSHugh Dickins 	dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
6181da177e4SLinus Torvalds 	if (!dst_pte)
6191da177e4SLinus Torvalds 		return -ENOMEM;
6201da177e4SLinus Torvalds 	src_pte = pte_offset_map_nested(src_pmd, addr);
6214c21e2f2SHugh Dickins 	src_ptl = pte_lockptr(src_mm, src_pmd);
622f20dc5f7SIngo Molnar 	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
6236606c3e0SZachary Amsden 	arch_enter_lazy_mmu_mode();
6241da177e4SLinus Torvalds 
6251da177e4SLinus Torvalds 	do {
6261da177e4SLinus Torvalds 		/*
6271da177e4SLinus Torvalds 		 * We are holding two locks at this point - either of them
6281da177e4SLinus Torvalds 		 * could generate latencies in another task on another CPU.
6291da177e4SLinus Torvalds 		 */
630e040f218SHugh Dickins 		if (progress >= 32) {
631e040f218SHugh Dickins 			progress = 0;
632e040f218SHugh Dickins 			if (need_resched() ||
63395c354feSNick Piggin 			    spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
6341da177e4SLinus Torvalds 				break;
635e040f218SHugh Dickins 		}
6361da177e4SLinus Torvalds 		if (pte_none(*src_pte)) {
6371da177e4SLinus Torvalds 			progress++;
6381da177e4SLinus Torvalds 			continue;
6391da177e4SLinus Torvalds 		}
6408c103762SHugh Dickins 		copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vma, addr, rss);
6411da177e4SLinus Torvalds 		progress += 8;
6421da177e4SLinus Torvalds 	} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
6431da177e4SLinus Torvalds 
6446606c3e0SZachary Amsden 	arch_leave_lazy_mmu_mode();
645c74df32cSHugh Dickins 	spin_unlock(src_ptl);
6461da177e4SLinus Torvalds 	pte_unmap_nested(src_pte - 1);
647ae859762SHugh Dickins 	add_mm_rss(dst_mm, rss[0], rss[1]);
648c74df32cSHugh Dickins 	pte_unmap_unlock(dst_pte - 1, dst_ptl);
649c74df32cSHugh Dickins 	cond_resched();
6501da177e4SLinus Torvalds 	if (addr != end)
6511da177e4SLinus Torvalds 		goto again;
6521da177e4SLinus Torvalds 	return 0;
6531da177e4SLinus Torvalds }
6541da177e4SLinus Torvalds 
6551da177e4SLinus Torvalds static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
6561da177e4SLinus Torvalds 		pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
6571da177e4SLinus Torvalds 		unsigned long addr, unsigned long end)
6581da177e4SLinus Torvalds {
6591da177e4SLinus Torvalds 	pmd_t *src_pmd, *dst_pmd;
6601da177e4SLinus Torvalds 	unsigned long next;
6611da177e4SLinus Torvalds 
6621da177e4SLinus Torvalds 	dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
6631da177e4SLinus Torvalds 	if (!dst_pmd)
6641da177e4SLinus Torvalds 		return -ENOMEM;
6651da177e4SLinus Torvalds 	src_pmd = pmd_offset(src_pud, addr);
6661da177e4SLinus Torvalds 	do {
6671da177e4SLinus Torvalds 		next = pmd_addr_end(addr, end);
6681da177e4SLinus Torvalds 		if (pmd_none_or_clear_bad(src_pmd))
6691da177e4SLinus Torvalds 			continue;
6701da177e4SLinus Torvalds 		if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
6711da177e4SLinus Torvalds 						vma, addr, next))
6721da177e4SLinus Torvalds 			return -ENOMEM;
6731da177e4SLinus Torvalds 	} while (dst_pmd++, src_pmd++, addr = next, addr != end);
6741da177e4SLinus Torvalds 	return 0;
6751da177e4SLinus Torvalds }
6761da177e4SLinus Torvalds 
6771da177e4SLinus Torvalds static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
6781da177e4SLinus Torvalds 		pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
6791da177e4SLinus Torvalds 		unsigned long addr, unsigned long end)
6801da177e4SLinus Torvalds {
6811da177e4SLinus Torvalds 	pud_t *src_pud, *dst_pud;
6821da177e4SLinus Torvalds 	unsigned long next;
6831da177e4SLinus Torvalds 
6841da177e4SLinus Torvalds 	dst_pud = pud_alloc(dst_mm, dst_pgd, addr);
6851da177e4SLinus Torvalds 	if (!dst_pud)
6861da177e4SLinus Torvalds 		return -ENOMEM;
6871da177e4SLinus Torvalds 	src_pud = pud_offset(src_pgd, addr);
6881da177e4SLinus Torvalds 	do {
6891da177e4SLinus Torvalds 		next = pud_addr_end(addr, end);
6901da177e4SLinus Torvalds 		if (pud_none_or_clear_bad(src_pud))
6911da177e4SLinus Torvalds 			continue;
6921da177e4SLinus Torvalds 		if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
6931da177e4SLinus Torvalds 						vma, addr, next))
6941da177e4SLinus Torvalds 			return -ENOMEM;
6951da177e4SLinus Torvalds 	} while (dst_pud++, src_pud++, addr = next, addr != end);
6961da177e4SLinus Torvalds 	return 0;
6971da177e4SLinus Torvalds }
6981da177e4SLinus Torvalds 
6991da177e4SLinus Torvalds int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
7001da177e4SLinus Torvalds 		struct vm_area_struct *vma)
7011da177e4SLinus Torvalds {
7021da177e4SLinus Torvalds 	pgd_t *src_pgd, *dst_pgd;
7031da177e4SLinus Torvalds 	unsigned long next;
7041da177e4SLinus Torvalds 	unsigned long addr = vma->vm_start;
7051da177e4SLinus Torvalds 	unsigned long end = vma->vm_end;
706cddb8a5cSAndrea Arcangeli 	int ret;
7071da177e4SLinus Torvalds 
708d992895bSNick Piggin 	/*
709d992895bSNick Piggin 	 * Don't copy ptes where a page fault will fill them correctly.
710d992895bSNick Piggin 	 * Fork becomes much lighter when there are big shared or private
711d992895bSNick Piggin 	 * readonly mappings. The tradeoff is that copy_page_range is more
712d992895bSNick Piggin 	 * efficient than faulting.
713d992895bSNick Piggin 	 */
7144d7672b4SLinus Torvalds 	if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) {
715d992895bSNick Piggin 		if (!vma->anon_vma)
716d992895bSNick Piggin 			return 0;
717d992895bSNick Piggin 	}
718d992895bSNick Piggin 
7191da177e4SLinus Torvalds 	if (is_vm_hugetlb_page(vma))
7201da177e4SLinus Torvalds 		return copy_hugetlb_page_range(dst_mm, src_mm, vma);
7211da177e4SLinus Torvalds 
72234801ba9Svenkatesh.pallipadi@intel.com 	if (unlikely(is_pfn_mapping(vma))) {
7232ab64037Svenkatesh.pallipadi@intel.com 		/*
7242ab64037Svenkatesh.pallipadi@intel.com 		 * We do not free on error cases below as remove_vma
7252ab64037Svenkatesh.pallipadi@intel.com 		 * gets called on error from higher level routine
7262ab64037Svenkatesh.pallipadi@intel.com 		 */
7272ab64037Svenkatesh.pallipadi@intel.com 		ret = track_pfn_vma_copy(vma);
7282ab64037Svenkatesh.pallipadi@intel.com 		if (ret)
7292ab64037Svenkatesh.pallipadi@intel.com 			return ret;
7302ab64037Svenkatesh.pallipadi@intel.com 	}
7312ab64037Svenkatesh.pallipadi@intel.com 
732cddb8a5cSAndrea Arcangeli 	/*
733cddb8a5cSAndrea Arcangeli 	 * We need to invalidate the secondary MMU mappings only when
734cddb8a5cSAndrea Arcangeli 	 * there could be a permission downgrade on the ptes of the
735cddb8a5cSAndrea Arcangeli 	 * parent mm. And a permission downgrade will only happen if
736cddb8a5cSAndrea Arcangeli 	 * is_cow_mapping() returns true.
737cddb8a5cSAndrea Arcangeli 	 */
738cddb8a5cSAndrea Arcangeli 	if (is_cow_mapping(vma->vm_flags))
739cddb8a5cSAndrea Arcangeli 		mmu_notifier_invalidate_range_start(src_mm, addr, end);
740cddb8a5cSAndrea Arcangeli 
741cddb8a5cSAndrea Arcangeli 	ret = 0;
7421da177e4SLinus Torvalds 	dst_pgd = pgd_offset(dst_mm, addr);
7431da177e4SLinus Torvalds 	src_pgd = pgd_offset(src_mm, addr);
7441da177e4SLinus Torvalds 	do {
7451da177e4SLinus Torvalds 		next = pgd_addr_end(addr, end);
7461da177e4SLinus Torvalds 		if (pgd_none_or_clear_bad(src_pgd))
7471da177e4SLinus Torvalds 			continue;
748cddb8a5cSAndrea Arcangeli 		if (unlikely(copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
749cddb8a5cSAndrea Arcangeli 					    vma, addr, next))) {
750cddb8a5cSAndrea Arcangeli 			ret = -ENOMEM;
751cddb8a5cSAndrea Arcangeli 			break;
752cddb8a5cSAndrea Arcangeli 		}
7531da177e4SLinus Torvalds 	} while (dst_pgd++, src_pgd++, addr = next, addr != end);
754cddb8a5cSAndrea Arcangeli 
755cddb8a5cSAndrea Arcangeli 	if (is_cow_mapping(vma->vm_flags))
756cddb8a5cSAndrea Arcangeli 		mmu_notifier_invalidate_range_end(src_mm,
757cddb8a5cSAndrea Arcangeli 						  vma->vm_start, end);
758cddb8a5cSAndrea Arcangeli 	return ret;
7591da177e4SLinus Torvalds }
7601da177e4SLinus Torvalds 
76151c6f666SRobin Holt static unsigned long zap_pte_range(struct mmu_gather *tlb,
762b5810039SNick Piggin 				struct vm_area_struct *vma, pmd_t *pmd,
7631da177e4SLinus Torvalds 				unsigned long addr, unsigned long end,
76451c6f666SRobin Holt 				long *zap_work, struct zap_details *details)
7651da177e4SLinus Torvalds {
766b5810039SNick Piggin 	struct mm_struct *mm = tlb->mm;
7671da177e4SLinus Torvalds 	pte_t *pte;
768508034a3SHugh Dickins 	spinlock_t *ptl;
769ae859762SHugh Dickins 	int file_rss = 0;
770ae859762SHugh Dickins 	int anon_rss = 0;
7711da177e4SLinus Torvalds 
772508034a3SHugh Dickins 	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
7736606c3e0SZachary Amsden 	arch_enter_lazy_mmu_mode();
7741da177e4SLinus Torvalds 	do {
7751da177e4SLinus Torvalds 		pte_t ptent = *pte;
77651c6f666SRobin Holt 		if (pte_none(ptent)) {
77751c6f666SRobin Holt 			(*zap_work)--;
7781da177e4SLinus Torvalds 			continue;
77951c6f666SRobin Holt 		}
78051c6f666SRobin Holt 
78151c6f666SRobin Holt 		(*zap_work) -= PAGE_SIZE;
78251c6f666SRobin Holt 
7836f5e6b9eSHugh Dickins 		if (pte_present(ptent)) {
7846f5e6b9eSHugh Dickins 			struct page *page;
7856f5e6b9eSHugh Dickins 
7866aab341eSLinus Torvalds 			page = vm_normal_page(vma, addr, ptent);
7871da177e4SLinus Torvalds 			if (unlikely(details) && page) {
7881da177e4SLinus Torvalds 				/*
7891da177e4SLinus Torvalds 				 * unmap_shared_mapping_pages() wants to
7901da177e4SLinus Torvalds 				 * invalidate cache without truncating:
7911da177e4SLinus Torvalds 				 * unmap shared but keep private pages.
7921da177e4SLinus Torvalds 				 */
7931da177e4SLinus Torvalds 				if (details->check_mapping &&
7941da177e4SLinus Torvalds 				    details->check_mapping != page->mapping)
7951da177e4SLinus Torvalds 					continue;
7961da177e4SLinus Torvalds 				/*
7971da177e4SLinus Torvalds 				 * Each page->index must be checked when
7981da177e4SLinus Torvalds 				 * invalidating or truncating nonlinear.
7991da177e4SLinus Torvalds 				 */
8001da177e4SLinus Torvalds 				if (details->nonlinear_vma &&
8011da177e4SLinus Torvalds 				    (page->index < details->first_index ||
8021da177e4SLinus Torvalds 				     page->index > details->last_index))
8031da177e4SLinus Torvalds 					continue;
8041da177e4SLinus Torvalds 			}
805b5810039SNick Piggin 			ptent = ptep_get_and_clear_full(mm, addr, pte,
806a600388dSZachary Amsden 							tlb->fullmm);
8071da177e4SLinus Torvalds 			tlb_remove_tlb_entry(tlb, pte, addr);
8081da177e4SLinus Torvalds 			if (unlikely(!page))
8091da177e4SLinus Torvalds 				continue;
8101da177e4SLinus Torvalds 			if (unlikely(details) && details->nonlinear_vma
8111da177e4SLinus Torvalds 			    && linear_page_index(details->nonlinear_vma,
8121da177e4SLinus Torvalds 						addr) != page->index)
813b5810039SNick Piggin 				set_pte_at(mm, addr, pte,
8141da177e4SLinus Torvalds 					   pgoff_to_pte(page->index));
8151da177e4SLinus Torvalds 			if (PageAnon(page))
81686d912f4SHugh Dickins 				anon_rss--;
8176237bcd9SHugh Dickins 			else {
8186237bcd9SHugh Dickins 				if (pte_dirty(ptent))
8196237bcd9SHugh Dickins 					set_page_dirty(page);
8204917e5d0SJohannes Weiner 				if (pte_young(ptent) &&
8214917e5d0SJohannes Weiner 				    likely(!VM_SequentialReadHint(vma)))
822bf3f3bc5SNick Piggin 					mark_page_accessed(page);
82386d912f4SHugh Dickins 				file_rss--;
8246237bcd9SHugh Dickins 			}
825edc315fdSHugh Dickins 			page_remove_rmap(page);
8263dc14741SHugh Dickins 			if (unlikely(page_mapcount(page) < 0))
8273dc14741SHugh Dickins 				print_bad_pte(vma, addr, ptent, page);
8281da177e4SLinus Torvalds 			tlb_remove_page(tlb, page);
8291da177e4SLinus Torvalds 			continue;
8301da177e4SLinus Torvalds 		}
8311da177e4SLinus Torvalds 		/*
8321da177e4SLinus Torvalds 		 * If details->check_mapping, we leave swap entries;
8331da177e4SLinus Torvalds 		 * if details->nonlinear_vma, we leave file entries.
8341da177e4SLinus Torvalds 		 */
8351da177e4SLinus Torvalds 		if (unlikely(details))
8361da177e4SLinus Torvalds 			continue;
8372509ef26SHugh Dickins 		if (pte_file(ptent)) {
8382509ef26SHugh Dickins 			if (unlikely(!(vma->vm_flags & VM_NONLINEAR)))
8392509ef26SHugh Dickins 				print_bad_pte(vma, addr, ptent, NULL);
8402509ef26SHugh Dickins 		} else if
8412509ef26SHugh Dickins 		  (unlikely(!free_swap_and_cache(pte_to_swp_entry(ptent))))
8422509ef26SHugh Dickins 			print_bad_pte(vma, addr, ptent, NULL);
8439888a1caSZachary Amsden 		pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
84451c6f666SRobin Holt 	} while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0));
845ae859762SHugh Dickins 
84686d912f4SHugh Dickins 	add_mm_rss(mm, file_rss, anon_rss);
8476606c3e0SZachary Amsden 	arch_leave_lazy_mmu_mode();
848508034a3SHugh Dickins 	pte_unmap_unlock(pte - 1, ptl);
84951c6f666SRobin Holt 
85051c6f666SRobin Holt 	return addr;
8511da177e4SLinus Torvalds }
8521da177e4SLinus Torvalds 
85351c6f666SRobin Holt static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
854b5810039SNick Piggin 				struct vm_area_struct *vma, pud_t *pud,
8551da177e4SLinus Torvalds 				unsigned long addr, unsigned long end,
85651c6f666SRobin Holt 				long *zap_work, struct zap_details *details)
8571da177e4SLinus Torvalds {
8581da177e4SLinus Torvalds 	pmd_t *pmd;
8591da177e4SLinus Torvalds 	unsigned long next;
8601da177e4SLinus Torvalds 
8611da177e4SLinus Torvalds 	pmd = pmd_offset(pud, addr);
8621da177e4SLinus Torvalds 	do {
8631da177e4SLinus Torvalds 		next = pmd_addr_end(addr, end);
86451c6f666SRobin Holt 		if (pmd_none_or_clear_bad(pmd)) {
86551c6f666SRobin Holt 			(*zap_work)--;
8661da177e4SLinus Torvalds 			continue;
86751c6f666SRobin Holt 		}
86851c6f666SRobin Holt 		next = zap_pte_range(tlb, vma, pmd, addr, next,
86951c6f666SRobin Holt 						zap_work, details);
87051c6f666SRobin Holt 	} while (pmd++, addr = next, (addr != end && *zap_work > 0));
87151c6f666SRobin Holt 
87251c6f666SRobin Holt 	return addr;
8731da177e4SLinus Torvalds }
8741da177e4SLinus Torvalds 
87551c6f666SRobin Holt static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
876b5810039SNick Piggin 				struct vm_area_struct *vma, pgd_t *pgd,
8771da177e4SLinus Torvalds 				unsigned long addr, unsigned long end,
87851c6f666SRobin Holt 				long *zap_work, struct zap_details *details)
8791da177e4SLinus Torvalds {
8801da177e4SLinus Torvalds 	pud_t *pud;
8811da177e4SLinus Torvalds 	unsigned long next;
8821da177e4SLinus Torvalds 
8831da177e4SLinus Torvalds 	pud = pud_offset(pgd, addr);
8841da177e4SLinus Torvalds 	do {
8851da177e4SLinus Torvalds 		next = pud_addr_end(addr, end);
88651c6f666SRobin Holt 		if (pud_none_or_clear_bad(pud)) {
88751c6f666SRobin Holt 			(*zap_work)--;
8881da177e4SLinus Torvalds 			continue;
88951c6f666SRobin Holt 		}
89051c6f666SRobin Holt 		next = zap_pmd_range(tlb, vma, pud, addr, next,
89151c6f666SRobin Holt 						zap_work, details);
89251c6f666SRobin Holt 	} while (pud++, addr = next, (addr != end && *zap_work > 0));
89351c6f666SRobin Holt 
89451c6f666SRobin Holt 	return addr;
8951da177e4SLinus Torvalds }
8961da177e4SLinus Torvalds 
89751c6f666SRobin Holt static unsigned long unmap_page_range(struct mmu_gather *tlb,
89851c6f666SRobin Holt 				struct vm_area_struct *vma,
8991da177e4SLinus Torvalds 				unsigned long addr, unsigned long end,
90051c6f666SRobin Holt 				long *zap_work, struct zap_details *details)
9011da177e4SLinus Torvalds {
9021da177e4SLinus Torvalds 	pgd_t *pgd;
9031da177e4SLinus Torvalds 	unsigned long next;
9041da177e4SLinus Torvalds 
9051da177e4SLinus Torvalds 	if (details && !details->check_mapping && !details->nonlinear_vma)
9061da177e4SLinus Torvalds 		details = NULL;
9071da177e4SLinus Torvalds 
9081da177e4SLinus Torvalds 	BUG_ON(addr >= end);
9091da177e4SLinus Torvalds 	tlb_start_vma(tlb, vma);
9101da177e4SLinus Torvalds 	pgd = pgd_offset(vma->vm_mm, addr);
9111da177e4SLinus Torvalds 	do {
9121da177e4SLinus Torvalds 		next = pgd_addr_end(addr, end);
91351c6f666SRobin Holt 		if (pgd_none_or_clear_bad(pgd)) {
91451c6f666SRobin Holt 			(*zap_work)--;
9151da177e4SLinus Torvalds 			continue;
91651c6f666SRobin Holt 		}
91751c6f666SRobin Holt 		next = zap_pud_range(tlb, vma, pgd, addr, next,
91851c6f666SRobin Holt 						zap_work, details);
91951c6f666SRobin Holt 	} while (pgd++, addr = next, (addr != end && *zap_work > 0));
9201da177e4SLinus Torvalds 	tlb_end_vma(tlb, vma);
92151c6f666SRobin Holt 
92251c6f666SRobin Holt 	return addr;
9231da177e4SLinus Torvalds }
9241da177e4SLinus Torvalds 
9251da177e4SLinus Torvalds #ifdef CONFIG_PREEMPT
9261da177e4SLinus Torvalds # define ZAP_BLOCK_SIZE	(8 * PAGE_SIZE)
9271da177e4SLinus Torvalds #else
9281da177e4SLinus Torvalds /* No preempt: go for improved straight-line efficiency */
9291da177e4SLinus Torvalds # define ZAP_BLOCK_SIZE	(1024 * PAGE_SIZE)
9301da177e4SLinus Torvalds #endif
9311da177e4SLinus Torvalds 
9321da177e4SLinus Torvalds /**
9331da177e4SLinus Torvalds  * unmap_vmas - unmap a range of memory covered by a list of vma's
9341da177e4SLinus Torvalds  * @tlbp: address of the caller's struct mmu_gather
9351da177e4SLinus Torvalds  * @vma: the starting vma
9361da177e4SLinus Torvalds  * @start_addr: virtual address at which to start unmapping
9371da177e4SLinus Torvalds  * @end_addr: virtual address at which to end unmapping
9381da177e4SLinus Torvalds  * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
9391da177e4SLinus Torvalds  * @details: details of nonlinear truncation or shared cache invalidation
9401da177e4SLinus Torvalds  *
941ee39b37bSHugh Dickins  * Returns the end address of the unmapping (restart addr if interrupted).
9421da177e4SLinus Torvalds  *
943508034a3SHugh Dickins  * Unmap all pages in the vma list.
9441da177e4SLinus Torvalds  *
945508034a3SHugh Dickins  * We aim to not hold locks for too long (for scheduling latency reasons).
946508034a3SHugh Dickins  * So zap pages in ZAP_BLOCK_SIZE bytecounts.  This means we need to
9471da177e4SLinus Torvalds  * return the ending mmu_gather to the caller.
9481da177e4SLinus Torvalds  *
9491da177e4SLinus Torvalds  * Only addresses between `start' and `end' will be unmapped.
9501da177e4SLinus Torvalds  *
9511da177e4SLinus Torvalds  * The VMA list must be sorted in ascending virtual address order.
9521da177e4SLinus Torvalds  *
9531da177e4SLinus Torvalds  * unmap_vmas() assumes that the caller will flush the whole unmapped address
9541da177e4SLinus Torvalds  * range after unmap_vmas() returns.  So the only responsibility here is to
9551da177e4SLinus Torvalds  * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
9561da177e4SLinus Torvalds  * drops the lock and schedules.
9571da177e4SLinus Torvalds  */
958508034a3SHugh Dickins unsigned long unmap_vmas(struct mmu_gather **tlbp,
9591da177e4SLinus Torvalds 		struct vm_area_struct *vma, unsigned long start_addr,
9601da177e4SLinus Torvalds 		unsigned long end_addr, unsigned long *nr_accounted,
9611da177e4SLinus Torvalds 		struct zap_details *details)
9621da177e4SLinus Torvalds {
96351c6f666SRobin Holt 	long zap_work = ZAP_BLOCK_SIZE;
9641da177e4SLinus Torvalds 	unsigned long tlb_start = 0;	/* For tlb_finish_mmu */
9651da177e4SLinus Torvalds 	int tlb_start_valid = 0;
966ee39b37bSHugh Dickins 	unsigned long start = start_addr;
9671da177e4SLinus Torvalds 	spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL;
9684d6ddfa9SHugh Dickins 	int fullmm = (*tlbp)->fullmm;
969cddb8a5cSAndrea Arcangeli 	struct mm_struct *mm = vma->vm_mm;
9701da177e4SLinus Torvalds 
971cddb8a5cSAndrea Arcangeli 	mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
9721da177e4SLinus Torvalds 	for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) {
9731da177e4SLinus Torvalds 		unsigned long end;
9741da177e4SLinus Torvalds 
9751da177e4SLinus Torvalds 		start = max(vma->vm_start, start_addr);
9761da177e4SLinus Torvalds 		if (start >= vma->vm_end)
9771da177e4SLinus Torvalds 			continue;
9781da177e4SLinus Torvalds 		end = min(vma->vm_end, end_addr);
9791da177e4SLinus Torvalds 		if (end <= vma->vm_start)
9801da177e4SLinus Torvalds 			continue;
9811da177e4SLinus Torvalds 
9821da177e4SLinus Torvalds 		if (vma->vm_flags & VM_ACCOUNT)
9831da177e4SLinus Torvalds 			*nr_accounted += (end - start) >> PAGE_SHIFT;
9841da177e4SLinus Torvalds 
98534801ba9Svenkatesh.pallipadi@intel.com 		if (unlikely(is_pfn_mapping(vma)))
9862ab64037Svenkatesh.pallipadi@intel.com 			untrack_pfn_vma(vma, 0, 0);
9872ab64037Svenkatesh.pallipadi@intel.com 
9881da177e4SLinus Torvalds 		while (start != end) {
9891da177e4SLinus Torvalds 			if (!tlb_start_valid) {
9901da177e4SLinus Torvalds 				tlb_start = start;
9911da177e4SLinus Torvalds 				tlb_start_valid = 1;
9921da177e4SLinus Torvalds 			}
9931da177e4SLinus Torvalds 
99451c6f666SRobin Holt 			if (unlikely(is_vm_hugetlb_page(vma))) {
995a137e1ccSAndi Kleen 				/*
996a137e1ccSAndi Kleen 				 * It is undesirable to test vma->vm_file as it
997a137e1ccSAndi Kleen 				 * should be non-null for valid hugetlb area.
998a137e1ccSAndi Kleen 				 * However, vm_file will be NULL in the error
999a137e1ccSAndi Kleen 				 * cleanup path of do_mmap_pgoff. When
1000a137e1ccSAndi Kleen 				 * hugetlbfs ->mmap method fails,
1001a137e1ccSAndi Kleen 				 * do_mmap_pgoff() nullifies vma->vm_file
1002a137e1ccSAndi Kleen 				 * before calling this function to clean up.
1003a137e1ccSAndi Kleen 				 * Since no pte has actually been setup, it is
1004a137e1ccSAndi Kleen 				 * safe to do nothing in this case.
1005a137e1ccSAndi Kleen 				 */
1006a137e1ccSAndi Kleen 				if (vma->vm_file) {
100704f2cbe3SMel Gorman 					unmap_hugepage_range(vma, start, end, NULL);
100851c6f666SRobin Holt 					zap_work -= (end - start) /
1009a5516438SAndi Kleen 					pages_per_huge_page(hstate_vma(vma));
1010a137e1ccSAndi Kleen 				}
1011a137e1ccSAndi Kleen 
101251c6f666SRobin Holt 				start = end;
101351c6f666SRobin Holt 			} else
101451c6f666SRobin Holt 				start = unmap_page_range(*tlbp, vma,
101551c6f666SRobin Holt 						start, end, &zap_work, details);
10161da177e4SLinus Torvalds 
101751c6f666SRobin Holt 			if (zap_work > 0) {
101851c6f666SRobin Holt 				BUG_ON(start != end);
101951c6f666SRobin Holt 				break;
102051c6f666SRobin Holt 			}
10211da177e4SLinus Torvalds 
10221da177e4SLinus Torvalds 			tlb_finish_mmu(*tlbp, tlb_start, start);
10231da177e4SLinus Torvalds 
10241da177e4SLinus Torvalds 			if (need_resched() ||
102595c354feSNick Piggin 				(i_mmap_lock && spin_needbreak(i_mmap_lock))) {
10261da177e4SLinus Torvalds 				if (i_mmap_lock) {
1027508034a3SHugh Dickins 					*tlbp = NULL;
10281da177e4SLinus Torvalds 					goto out;
10291da177e4SLinus Torvalds 				}
10301da177e4SLinus Torvalds 				cond_resched();
10311da177e4SLinus Torvalds 			}
10321da177e4SLinus Torvalds 
1033508034a3SHugh Dickins 			*tlbp = tlb_gather_mmu(vma->vm_mm, fullmm);
10341da177e4SLinus Torvalds 			tlb_start_valid = 0;
103551c6f666SRobin Holt 			zap_work = ZAP_BLOCK_SIZE;
10361da177e4SLinus Torvalds 		}
10371da177e4SLinus Torvalds 	}
10381da177e4SLinus Torvalds out:
1039cddb8a5cSAndrea Arcangeli 	mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
1040ee39b37bSHugh Dickins 	return start;	/* which is now the end (or restart) address */
10411da177e4SLinus Torvalds }
10421da177e4SLinus Torvalds 
10431da177e4SLinus Torvalds /**
10441da177e4SLinus Torvalds  * zap_page_range - remove user pages in a given range
10451da177e4SLinus Torvalds  * @vma: vm_area_struct holding the applicable pages
10461da177e4SLinus Torvalds  * @address: starting address of pages to zap
10471da177e4SLinus Torvalds  * @size: number of bytes to zap
10481da177e4SLinus Torvalds  * @details: details of nonlinear truncation or shared cache invalidation
10491da177e4SLinus Torvalds  */
1050ee39b37bSHugh Dickins unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
10511da177e4SLinus Torvalds 		unsigned long size, struct zap_details *details)
10521da177e4SLinus Torvalds {
10531da177e4SLinus Torvalds 	struct mm_struct *mm = vma->vm_mm;
10541da177e4SLinus Torvalds 	struct mmu_gather *tlb;
10551da177e4SLinus Torvalds 	unsigned long end = address + size;
10561da177e4SLinus Torvalds 	unsigned long nr_accounted = 0;
10571da177e4SLinus Torvalds 
10581da177e4SLinus Torvalds 	lru_add_drain();
10591da177e4SLinus Torvalds 	tlb = tlb_gather_mmu(mm, 0);
1060365e9c87SHugh Dickins 	update_hiwater_rss(mm);
1061508034a3SHugh Dickins 	end = unmap_vmas(&tlb, vma, address, end, &nr_accounted, details);
1062508034a3SHugh Dickins 	if (tlb)
10638f4f8c16SHugh Dickins 		tlb_finish_mmu(tlb, address, end);
1064ee39b37bSHugh Dickins 	return end;
10651da177e4SLinus Torvalds }
10661da177e4SLinus Torvalds 
1067c627f9ccSJack Steiner /**
1068c627f9ccSJack Steiner  * zap_vma_ptes - remove ptes mapping the vma
1069c627f9ccSJack Steiner  * @vma: vm_area_struct holding ptes to be zapped
1070c627f9ccSJack Steiner  * @address: starting address of pages to zap
1071c627f9ccSJack Steiner  * @size: number of bytes to zap
1072c627f9ccSJack Steiner  *
1073c627f9ccSJack Steiner  * This function only unmaps ptes assigned to VM_PFNMAP vmas.
1074c627f9ccSJack Steiner  *
1075c627f9ccSJack Steiner  * The entire address range must be fully contained within the vma.
1076c627f9ccSJack Steiner  *
1077c627f9ccSJack Steiner  * Returns 0 if successful.
1078c627f9ccSJack Steiner  */
1079c627f9ccSJack Steiner int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1080c627f9ccSJack Steiner 		unsigned long size)
1081c627f9ccSJack Steiner {
1082c627f9ccSJack Steiner 	if (address < vma->vm_start || address + size > vma->vm_end ||
1083c627f9ccSJack Steiner 	    		!(vma->vm_flags & VM_PFNMAP))
1084c627f9ccSJack Steiner 		return -1;
1085c627f9ccSJack Steiner 	zap_page_range(vma, address, size, NULL);
1086c627f9ccSJack Steiner 	return 0;
1087c627f9ccSJack Steiner }
1088c627f9ccSJack Steiner EXPORT_SYMBOL_GPL(zap_vma_ptes);
1089c627f9ccSJack Steiner 
10901da177e4SLinus Torvalds /*
10911da177e4SLinus Torvalds  * Do a quick page-table lookup for a single page.
10921da177e4SLinus Torvalds  */
10936aab341eSLinus Torvalds struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
1094deceb6cdSHugh Dickins 			unsigned int flags)
10951da177e4SLinus Torvalds {
10961da177e4SLinus Torvalds 	pgd_t *pgd;
10971da177e4SLinus Torvalds 	pud_t *pud;
10981da177e4SLinus Torvalds 	pmd_t *pmd;
10991da177e4SLinus Torvalds 	pte_t *ptep, pte;
1100deceb6cdSHugh Dickins 	spinlock_t *ptl;
11011da177e4SLinus Torvalds 	struct page *page;
11026aab341eSLinus Torvalds 	struct mm_struct *mm = vma->vm_mm;
11031da177e4SLinus Torvalds 
1104deceb6cdSHugh Dickins 	page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
1105deceb6cdSHugh Dickins 	if (!IS_ERR(page)) {
1106deceb6cdSHugh Dickins 		BUG_ON(flags & FOLL_GET);
1107deceb6cdSHugh Dickins 		goto out;
1108deceb6cdSHugh Dickins 	}
11091da177e4SLinus Torvalds 
1110deceb6cdSHugh Dickins 	page = NULL;
11111da177e4SLinus Torvalds 	pgd = pgd_offset(mm, address);
11121da177e4SLinus Torvalds 	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
1113deceb6cdSHugh Dickins 		goto no_page_table;
11141da177e4SLinus Torvalds 
11151da177e4SLinus Torvalds 	pud = pud_offset(pgd, address);
1116ceb86879SAndi Kleen 	if (pud_none(*pud))
1117ceb86879SAndi Kleen 		goto no_page_table;
1118ceb86879SAndi Kleen 	if (pud_huge(*pud)) {
1119ceb86879SAndi Kleen 		BUG_ON(flags & FOLL_GET);
1120ceb86879SAndi Kleen 		page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE);
1121ceb86879SAndi Kleen 		goto out;
1122ceb86879SAndi Kleen 	}
1123ceb86879SAndi Kleen 	if (unlikely(pud_bad(*pud)))
1124deceb6cdSHugh Dickins 		goto no_page_table;
11251da177e4SLinus Torvalds 
11261da177e4SLinus Torvalds 	pmd = pmd_offset(pud, address);
1127aeed5fceSHugh Dickins 	if (pmd_none(*pmd))
1128deceb6cdSHugh Dickins 		goto no_page_table;
1129deceb6cdSHugh Dickins 	if (pmd_huge(*pmd)) {
1130deceb6cdSHugh Dickins 		BUG_ON(flags & FOLL_GET);
1131deceb6cdSHugh Dickins 		page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
1132deceb6cdSHugh Dickins 		goto out;
1133deceb6cdSHugh Dickins 	}
1134aeed5fceSHugh Dickins 	if (unlikely(pmd_bad(*pmd)))
1135aeed5fceSHugh Dickins 		goto no_page_table;
1136aeed5fceSHugh Dickins 
1137deceb6cdSHugh Dickins 	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
11381da177e4SLinus Torvalds 
11391da177e4SLinus Torvalds 	pte = *ptep;
1140deceb6cdSHugh Dickins 	if (!pte_present(pte))
114189f5b7daSLinus Torvalds 		goto no_page;
1142deceb6cdSHugh Dickins 	if ((flags & FOLL_WRITE) && !pte_write(pte))
1143deceb6cdSHugh Dickins 		goto unlock;
11446aab341eSLinus Torvalds 	page = vm_normal_page(vma, address, pte);
11456aab341eSLinus Torvalds 	if (unlikely(!page))
114689f5b7daSLinus Torvalds 		goto bad_page;
1147deceb6cdSHugh Dickins 
1148deceb6cdSHugh Dickins 	if (flags & FOLL_GET)
1149deceb6cdSHugh Dickins 		get_page(page);
1150deceb6cdSHugh Dickins 	if (flags & FOLL_TOUCH) {
1151deceb6cdSHugh Dickins 		if ((flags & FOLL_WRITE) &&
1152deceb6cdSHugh Dickins 		    !pte_dirty(pte) && !PageDirty(page))
1153f33ea7f4SNick Piggin 			set_page_dirty(page);
1154bd775c42SKOSAKI Motohiro 		/*
1155bd775c42SKOSAKI Motohiro 		 * pte_mkyoung() would be more correct here, but atomic care
1156bd775c42SKOSAKI Motohiro 		 * is needed to avoid losing the dirty bit: it is easier to use
1157bd775c42SKOSAKI Motohiro 		 * mark_page_accessed().
1158bd775c42SKOSAKI Motohiro 		 */
11591da177e4SLinus Torvalds 		mark_page_accessed(page);
11601da177e4SLinus Torvalds 	}
1161deceb6cdSHugh Dickins unlock:
1162deceb6cdSHugh Dickins 	pte_unmap_unlock(ptep, ptl);
11631da177e4SLinus Torvalds out:
1164deceb6cdSHugh Dickins 	return page;
1165deceb6cdSHugh Dickins 
116689f5b7daSLinus Torvalds bad_page:
116789f5b7daSLinus Torvalds 	pte_unmap_unlock(ptep, ptl);
116889f5b7daSLinus Torvalds 	return ERR_PTR(-EFAULT);
116989f5b7daSLinus Torvalds 
117089f5b7daSLinus Torvalds no_page:
117189f5b7daSLinus Torvalds 	pte_unmap_unlock(ptep, ptl);
117289f5b7daSLinus Torvalds 	if (!pte_none(pte))
117389f5b7daSLinus Torvalds 		return page;
117489f5b7daSLinus Torvalds 	/* Fall through to ZERO_PAGE handling */
1175deceb6cdSHugh Dickins no_page_table:
1176deceb6cdSHugh Dickins 	/*
1177deceb6cdSHugh Dickins 	 * When core dumping an enormous anonymous area that nobody
1178deceb6cdSHugh Dickins 	 * has touched so far, we don't want to allocate page tables.
1179deceb6cdSHugh Dickins 	 */
1180deceb6cdSHugh Dickins 	if (flags & FOLL_ANON) {
1181557ed1faSNick Piggin 		page = ZERO_PAGE(0);
1182deceb6cdSHugh Dickins 		if (flags & FOLL_GET)
1183deceb6cdSHugh Dickins 			get_page(page);
1184deceb6cdSHugh Dickins 		BUG_ON(flags & FOLL_WRITE);
11851da177e4SLinus Torvalds 	}
1186deceb6cdSHugh Dickins 	return page;
11871da177e4SLinus Torvalds }
11881da177e4SLinus Torvalds 
1189672ca28eSLinus Torvalds /* Can we do the FOLL_ANON optimization? */
1190672ca28eSLinus Torvalds static inline int use_zero_page(struct vm_area_struct *vma)
1191672ca28eSLinus Torvalds {
1192672ca28eSLinus Torvalds 	/*
1193672ca28eSLinus Torvalds 	 * We don't want to optimize FOLL_ANON for make_pages_present()
1194672ca28eSLinus Torvalds 	 * when it tries to page in a VM_LOCKED region. As to VM_SHARED,
1195672ca28eSLinus Torvalds 	 * we want to get the page from the page tables to make sure
1196672ca28eSLinus Torvalds 	 * that we serialize and update with any other user of that
1197672ca28eSLinus Torvalds 	 * mapping.
1198672ca28eSLinus Torvalds 	 */
1199672ca28eSLinus Torvalds 	if (vma->vm_flags & (VM_LOCKED | VM_SHARED))
1200672ca28eSLinus Torvalds 		return 0;
1201672ca28eSLinus Torvalds 	/*
12020d71d10aSNick Piggin 	 * And if we have a fault routine, it's not an anonymous region.
1203672ca28eSLinus Torvalds 	 */
12040d71d10aSNick Piggin 	return !vma->vm_ops || !vma->vm_ops->fault;
1205672ca28eSLinus Torvalds }
1206672ca28eSLinus Torvalds 
1207b291f000SNick Piggin 
1208b291f000SNick Piggin 
1209b291f000SNick Piggin int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1210b291f000SNick Piggin 		     unsigned long start, int len, int flags,
12111da177e4SLinus Torvalds 		struct page **pages, struct vm_area_struct **vmas)
12121da177e4SLinus Torvalds {
12131da177e4SLinus Torvalds 	int i;
1214b291f000SNick Piggin 	unsigned int vm_flags = 0;
1215b291f000SNick Piggin 	int write = !!(flags & GUP_FLAGS_WRITE);
1216b291f000SNick Piggin 	int force = !!(flags & GUP_FLAGS_FORCE);
1217b291f000SNick Piggin 	int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS);
12184779280dSYing Han 	int ignore_sigkill = !!(flags & GUP_FLAGS_IGNORE_SIGKILL);
12191da177e4SLinus Torvalds 
1220900cf086SJonathan Corbet 	if (len <= 0)
1221900cf086SJonathan Corbet 		return 0;
12221da177e4SLinus Torvalds 	/*
12231da177e4SLinus Torvalds 	 * Require read or write permissions.
12241da177e4SLinus Torvalds 	 * If 'force' is set, we only require the "MAY" flags.
12251da177e4SLinus Torvalds 	 */
1226deceb6cdSHugh Dickins 	vm_flags  = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
1227deceb6cdSHugh Dickins 	vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
12281da177e4SLinus Torvalds 	i = 0;
12291da177e4SLinus Torvalds 
12301da177e4SLinus Torvalds 	do {
12311da177e4SLinus Torvalds 		struct vm_area_struct *vma;
1232deceb6cdSHugh Dickins 		unsigned int foll_flags;
12331da177e4SLinus Torvalds 
12341da177e4SLinus Torvalds 		vma = find_extend_vma(mm, start);
12351da177e4SLinus Torvalds 		if (!vma && in_gate_area(tsk, start)) {
12361da177e4SLinus Torvalds 			unsigned long pg = start & PAGE_MASK;
12371da177e4SLinus Torvalds 			struct vm_area_struct *gate_vma = get_gate_vma(tsk);
12381da177e4SLinus Torvalds 			pgd_t *pgd;
12391da177e4SLinus Torvalds 			pud_t *pud;
12401da177e4SLinus Torvalds 			pmd_t *pmd;
12411da177e4SLinus Torvalds 			pte_t *pte;
1242b291f000SNick Piggin 
1243b291f000SNick Piggin 			/* user gate pages are read-only */
1244b291f000SNick Piggin 			if (!ignore && write)
12451da177e4SLinus Torvalds 				return i ? : -EFAULT;
12461da177e4SLinus Torvalds 			if (pg > TASK_SIZE)
12471da177e4SLinus Torvalds 				pgd = pgd_offset_k(pg);
12481da177e4SLinus Torvalds 			else
12491da177e4SLinus Torvalds 				pgd = pgd_offset_gate(mm, pg);
12501da177e4SLinus Torvalds 			BUG_ON(pgd_none(*pgd));
12511da177e4SLinus Torvalds 			pud = pud_offset(pgd, pg);
12521da177e4SLinus Torvalds 			BUG_ON(pud_none(*pud));
12531da177e4SLinus Torvalds 			pmd = pmd_offset(pud, pg);
1254690dbe1cSHugh Dickins 			if (pmd_none(*pmd))
1255690dbe1cSHugh Dickins 				return i ? : -EFAULT;
12561da177e4SLinus Torvalds 			pte = pte_offset_map(pmd, pg);
1257690dbe1cSHugh Dickins 			if (pte_none(*pte)) {
1258690dbe1cSHugh Dickins 				pte_unmap(pte);
1259690dbe1cSHugh Dickins 				return i ? : -EFAULT;
1260690dbe1cSHugh Dickins 			}
12611da177e4SLinus Torvalds 			if (pages) {
1262fa2a455bSNick Piggin 				struct page *page = vm_normal_page(gate_vma, start, *pte);
12636aab341eSLinus Torvalds 				pages[i] = page;
12646aab341eSLinus Torvalds 				if (page)
12656aab341eSLinus Torvalds 					get_page(page);
12661da177e4SLinus Torvalds 			}
12671da177e4SLinus Torvalds 			pte_unmap(pte);
12681da177e4SLinus Torvalds 			if (vmas)
12691da177e4SLinus Torvalds 				vmas[i] = gate_vma;
12701da177e4SLinus Torvalds 			i++;
12711da177e4SLinus Torvalds 			start += PAGE_SIZE;
12721da177e4SLinus Torvalds 			len--;
12731da177e4SLinus Torvalds 			continue;
12741da177e4SLinus Torvalds 		}
12751da177e4SLinus Torvalds 
1276b291f000SNick Piggin 		if (!vma ||
1277b291f000SNick Piggin 		    (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
1278b291f000SNick Piggin 		    (!ignore && !(vm_flags & vma->vm_flags)))
12791da177e4SLinus Torvalds 			return i ? : -EFAULT;
12801da177e4SLinus Torvalds 
12811da177e4SLinus Torvalds 		if (is_vm_hugetlb_page(vma)) {
12821da177e4SLinus Torvalds 			i = follow_hugetlb_page(mm, vma, pages, vmas,
12835b23dbe8SAdam Litke 						&start, &len, i, write);
12841da177e4SLinus Torvalds 			continue;
12851da177e4SLinus Torvalds 		}
1286deceb6cdSHugh Dickins 
1287deceb6cdSHugh Dickins 		foll_flags = FOLL_TOUCH;
1288deceb6cdSHugh Dickins 		if (pages)
1289deceb6cdSHugh Dickins 			foll_flags |= FOLL_GET;
1290672ca28eSLinus Torvalds 		if (!write && use_zero_page(vma))
1291deceb6cdSHugh Dickins 			foll_flags |= FOLL_ANON;
1292deceb6cdSHugh Dickins 
12931da177e4SLinus Torvalds 		do {
129408ef4729SHugh Dickins 			struct page *page;
12951da177e4SLinus Torvalds 
1296462e00ccSEthan Solomita 			/*
12974779280dSYing Han 			 * If we have a pending SIGKILL, don't keep faulting
12984779280dSYing Han 			 * pages and potentially allocating memory, unless
12994779280dSYing Han 			 * current is handling munlock--e.g., on exit. In
13004779280dSYing Han 			 * that case, we are not allocating memory.  Rather,
13014779280dSYing Han 			 * we're only unlocking already resident/mapped pages.
1302462e00ccSEthan Solomita 			 */
13034779280dSYing Han 			if (unlikely(!ignore_sigkill &&
13044779280dSYing Han 					fatal_signal_pending(current)))
13054779280dSYing Han 				return i ? i : -ERESTARTSYS;
1306462e00ccSEthan Solomita 
1307deceb6cdSHugh Dickins 			if (write)
1308deceb6cdSHugh Dickins 				foll_flags |= FOLL_WRITE;
1309deceb6cdSHugh Dickins 
1310deceb6cdSHugh Dickins 			cond_resched();
13116aab341eSLinus Torvalds 			while (!(page = follow_page(vma, start, foll_flags))) {
1312a68d2ebcSLinus Torvalds 				int ret;
131383c54070SNick Piggin 				ret = handle_mm_fault(mm, vma, start,
1314deceb6cdSHugh Dickins 						foll_flags & FOLL_WRITE);
131583c54070SNick Piggin 				if (ret & VM_FAULT_ERROR) {
131683c54070SNick Piggin 					if (ret & VM_FAULT_OOM)
131783c54070SNick Piggin 						return i ? i : -ENOMEM;
131883c54070SNick Piggin 					else if (ret & VM_FAULT_SIGBUS)
131983c54070SNick Piggin 						return i ? i : -EFAULT;
132083c54070SNick Piggin 					BUG();
132183c54070SNick Piggin 				}
132283c54070SNick Piggin 				if (ret & VM_FAULT_MAJOR)
132383c54070SNick Piggin 					tsk->maj_flt++;
132483c54070SNick Piggin 				else
132583c54070SNick Piggin 					tsk->min_flt++;
132683c54070SNick Piggin 
1327f33ea7f4SNick Piggin 				/*
132883c54070SNick Piggin 				 * The VM_FAULT_WRITE bit tells us that
132983c54070SNick Piggin 				 * do_wp_page has broken COW when necessary,
133083c54070SNick Piggin 				 * even if maybe_mkwrite decided not to set
133183c54070SNick Piggin 				 * pte_write. We can thus safely do subsequent
1332878b63acSHugh Dickins 				 * page lookups as if they were reads. But only
1333878b63acSHugh Dickins 				 * do so when looping for pte_write is futile:
1334878b63acSHugh Dickins 				 * in some cases userspace may also be wanting
1335878b63acSHugh Dickins 				 * to write to the gotten user page, which a
1336878b63acSHugh Dickins 				 * read fault here might prevent (a readonly
1337878b63acSHugh Dickins 				 * page might get reCOWed by userspace write).
1338f33ea7f4SNick Piggin 				 */
1339878b63acSHugh Dickins 				if ((ret & VM_FAULT_WRITE) &&
1340878b63acSHugh Dickins 				    !(vma->vm_flags & VM_WRITE))
1341deceb6cdSHugh Dickins 					foll_flags &= ~FOLL_WRITE;
1342a68d2ebcSLinus Torvalds 
13437f7bbbe5SBenjamin Herrenschmidt 				cond_resched();
13441da177e4SLinus Torvalds 			}
134589f5b7daSLinus Torvalds 			if (IS_ERR(page))
134689f5b7daSLinus Torvalds 				return i ? i : PTR_ERR(page);
13471da177e4SLinus Torvalds 			if (pages) {
134808ef4729SHugh Dickins 				pages[i] = page;
134903beb076SJames Bottomley 
1350a6f36be3SRussell King 				flush_anon_page(vma, page, start);
135108ef4729SHugh Dickins 				flush_dcache_page(page);
13521da177e4SLinus Torvalds 			}
13531da177e4SLinus Torvalds 			if (vmas)
13541da177e4SLinus Torvalds 				vmas[i] = vma;
13551da177e4SLinus Torvalds 			i++;
13561da177e4SLinus Torvalds 			start += PAGE_SIZE;
13571da177e4SLinus Torvalds 			len--;
13581da177e4SLinus Torvalds 		} while (len && start < vma->vm_end);
13591da177e4SLinus Torvalds 	} while (len);
13601da177e4SLinus Torvalds 	return i;
13611da177e4SLinus Torvalds }
1362b291f000SNick Piggin 
1363d2bf6be8SNick Piggin /**
1364d2bf6be8SNick Piggin  * get_user_pages() - pin user pages in memory
1365d2bf6be8SNick Piggin  * @tsk:	task_struct of target task
1366d2bf6be8SNick Piggin  * @mm:		mm_struct of target mm
1367d2bf6be8SNick Piggin  * @start:	starting user address
1368d2bf6be8SNick Piggin  * @len:	number of pages from start to pin
1369d2bf6be8SNick Piggin  * @write:	whether pages will be written to by the caller
1370d2bf6be8SNick Piggin  * @force:	whether to force write access even if user mapping is
1371d2bf6be8SNick Piggin  *		readonly. This will result in the page being COWed even
1372d2bf6be8SNick Piggin  *		in MAP_SHARED mappings. You do not want this.
1373d2bf6be8SNick Piggin  * @pages:	array that receives pointers to the pages pinned.
1374d2bf6be8SNick Piggin  *		Should be at least nr_pages long. Or NULL, if caller
1375d2bf6be8SNick Piggin  *		only intends to ensure the pages are faulted in.
1376d2bf6be8SNick Piggin  * @vmas:	array of pointers to vmas corresponding to each page.
1377d2bf6be8SNick Piggin  *		Or NULL if the caller does not require them.
1378d2bf6be8SNick Piggin  *
1379d2bf6be8SNick Piggin  * Returns number of pages pinned. This may be fewer than the number
1380d2bf6be8SNick Piggin  * requested. If len is 0 or negative, returns 0. If no pages
1381d2bf6be8SNick Piggin  * were pinned, returns -errno. Each page returned must be released
1382d2bf6be8SNick Piggin  * with a put_page() call when it is finished with. vmas will only
1383d2bf6be8SNick Piggin  * remain valid while mmap_sem is held.
1384d2bf6be8SNick Piggin  *
1385d2bf6be8SNick Piggin  * Must be called with mmap_sem held for read or write.
1386d2bf6be8SNick Piggin  *
1387d2bf6be8SNick Piggin  * get_user_pages walks a process's page tables and takes a reference to
1388d2bf6be8SNick Piggin  * each struct page that each user address corresponds to at a given
1389d2bf6be8SNick Piggin  * instant. That is, it takes the page that would be accessed if a user
1390d2bf6be8SNick Piggin  * thread accesses the given user virtual address at that instant.
1391d2bf6be8SNick Piggin  *
1392d2bf6be8SNick Piggin  * This does not guarantee that the page exists in the user mappings when
1393d2bf6be8SNick Piggin  * get_user_pages returns, and there may even be a completely different
1394d2bf6be8SNick Piggin  * page there in some cases (eg. if mmapped pagecache has been invalidated
1395d2bf6be8SNick Piggin  * and subsequently re faulted). However it does guarantee that the page
1396d2bf6be8SNick Piggin  * won't be freed completely. And mostly callers simply care that the page
1397d2bf6be8SNick Piggin  * contains data that was valid *at some point in time*. Typically, an IO
1398d2bf6be8SNick Piggin  * or similar operation cannot guarantee anything stronger anyway because
1399d2bf6be8SNick Piggin  * locks can't be held over the syscall boundary.
1400d2bf6be8SNick Piggin  *
1401d2bf6be8SNick Piggin  * If write=0, the page must not be written to. If the page is written to,
1402d2bf6be8SNick Piggin  * set_page_dirty (or set_page_dirty_lock, as appropriate) must be called
1403d2bf6be8SNick Piggin  * after the page is finished with, and before put_page is called.
1404d2bf6be8SNick Piggin  *
1405d2bf6be8SNick Piggin  * get_user_pages is typically used for fewer-copy IO operations, to get a
1406d2bf6be8SNick Piggin  * handle on the memory by some means other than accesses via the user virtual
1407d2bf6be8SNick Piggin  * addresses. The pages may be submitted for DMA to devices or accessed via
1408d2bf6be8SNick Piggin  * their kernel linear mapping (via the kmap APIs). Care should be taken to
1409d2bf6be8SNick Piggin  * use the correct cache flushing APIs.
1410d2bf6be8SNick Piggin  *
1411d2bf6be8SNick Piggin  * See also get_user_pages_fast, for performance critical applications.
1412d2bf6be8SNick Piggin  */
1413b291f000SNick Piggin int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1414b291f000SNick Piggin 		unsigned long start, int len, int write, int force,
1415b291f000SNick Piggin 		struct page **pages, struct vm_area_struct **vmas)
1416b291f000SNick Piggin {
1417b291f000SNick Piggin 	int flags = 0;
1418b291f000SNick Piggin 
1419b291f000SNick Piggin 	if (write)
1420b291f000SNick Piggin 		flags |= GUP_FLAGS_WRITE;
1421b291f000SNick Piggin 	if (force)
1422b291f000SNick Piggin 		flags |= GUP_FLAGS_FORCE;
1423b291f000SNick Piggin 
1424b291f000SNick Piggin 	return __get_user_pages(tsk, mm,
1425b291f000SNick Piggin 				start, len, flags,
1426b291f000SNick Piggin 				pages, vmas);
1427b291f000SNick Piggin }
1428b291f000SNick Piggin 
14291da177e4SLinus Torvalds EXPORT_SYMBOL(get_user_pages);
14301da177e4SLinus Torvalds 
1431920c7a5dSHarvey Harrison pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
1432920c7a5dSHarvey Harrison 			spinlock_t **ptl)
1433c9cfcddfSLinus Torvalds {
1434c9cfcddfSLinus Torvalds 	pgd_t * pgd = pgd_offset(mm, addr);
1435c9cfcddfSLinus Torvalds 	pud_t * pud = pud_alloc(mm, pgd, addr);
1436c9cfcddfSLinus Torvalds 	if (pud) {
143749c91fb0STrond Myklebust 		pmd_t * pmd = pmd_alloc(mm, pud, addr);
1438c9cfcddfSLinus Torvalds 		if (pmd)
1439c9cfcddfSLinus Torvalds 			return pte_alloc_map_lock(mm, pmd, addr, ptl);
1440c9cfcddfSLinus Torvalds 	}
1441c9cfcddfSLinus Torvalds 	return NULL;
1442c9cfcddfSLinus Torvalds }
1443c9cfcddfSLinus Torvalds 
14441da177e4SLinus Torvalds /*
1445238f58d8SLinus Torvalds  * This is the old fallback for page remapping.
1446238f58d8SLinus Torvalds  *
1447238f58d8SLinus Torvalds  * For historical reasons, it only allows reserved pages. Only
1448238f58d8SLinus Torvalds  * old drivers should use this, and they needed to mark their
1449238f58d8SLinus Torvalds  * pages reserved for the old functions anyway.
1450238f58d8SLinus Torvalds  */
1451423bad60SNick Piggin static int insert_page(struct vm_area_struct *vma, unsigned long addr,
1452423bad60SNick Piggin 			struct page *page, pgprot_t prot)
1453238f58d8SLinus Torvalds {
1454423bad60SNick Piggin 	struct mm_struct *mm = vma->vm_mm;
1455238f58d8SLinus Torvalds 	int retval;
1456238f58d8SLinus Torvalds 	pte_t *pte;
1457238f58d8SLinus Torvalds 	spinlock_t *ptl;
1458238f58d8SLinus Torvalds 
1459238f58d8SLinus Torvalds 	retval = -EINVAL;
1460a145dd41SLinus Torvalds 	if (PageAnon(page))
14615b4e655eSKAMEZAWA Hiroyuki 		goto out;
1462238f58d8SLinus Torvalds 	retval = -ENOMEM;
1463238f58d8SLinus Torvalds 	flush_dcache_page(page);
1464c9cfcddfSLinus Torvalds 	pte = get_locked_pte(mm, addr, &ptl);
1465238f58d8SLinus Torvalds 	if (!pte)
14665b4e655eSKAMEZAWA Hiroyuki 		goto out;
1467238f58d8SLinus Torvalds 	retval = -EBUSY;
1468238f58d8SLinus Torvalds 	if (!pte_none(*pte))
1469238f58d8SLinus Torvalds 		goto out_unlock;
1470238f58d8SLinus Torvalds 
1471238f58d8SLinus Torvalds 	/* Ok, finally just insert the thing.. */
1472238f58d8SLinus Torvalds 	get_page(page);
1473238f58d8SLinus Torvalds 	inc_mm_counter(mm, file_rss);
1474238f58d8SLinus Torvalds 	page_add_file_rmap(page);
1475238f58d8SLinus Torvalds 	set_pte_at(mm, addr, pte, mk_pte(page, prot));
1476238f58d8SLinus Torvalds 
1477238f58d8SLinus Torvalds 	retval = 0;
14788a9f3ccdSBalbir Singh 	pte_unmap_unlock(pte, ptl);
14798a9f3ccdSBalbir Singh 	return retval;
1480238f58d8SLinus Torvalds out_unlock:
1481238f58d8SLinus Torvalds 	pte_unmap_unlock(pte, ptl);
1482238f58d8SLinus Torvalds out:
1483238f58d8SLinus Torvalds 	return retval;
1484238f58d8SLinus Torvalds }
1485238f58d8SLinus Torvalds 
1486bfa5bf6dSRolf Eike Beer /**
1487bfa5bf6dSRolf Eike Beer  * vm_insert_page - insert single page into user vma
1488bfa5bf6dSRolf Eike Beer  * @vma: user vma to map to
1489bfa5bf6dSRolf Eike Beer  * @addr: target user address of this page
1490bfa5bf6dSRolf Eike Beer  * @page: source kernel page
1491bfa5bf6dSRolf Eike Beer  *
1492a145dd41SLinus Torvalds  * This allows drivers to insert individual pages they've allocated
1493a145dd41SLinus Torvalds  * into a user vma.
1494a145dd41SLinus Torvalds  *
1495a145dd41SLinus Torvalds  * The page has to be a nice clean _individual_ kernel allocation.
1496a145dd41SLinus Torvalds  * If you allocate a compound page, you need to have marked it as
1497a145dd41SLinus Torvalds  * such (__GFP_COMP), or manually just split the page up yourself
14988dfcc9baSNick Piggin  * (see split_page()).
1499a145dd41SLinus Torvalds  *
1500a145dd41SLinus Torvalds  * NOTE! Traditionally this was done with "remap_pfn_range()" which
1501a145dd41SLinus Torvalds  * took an arbitrary page protection parameter. This doesn't allow
1502a145dd41SLinus Torvalds  * that. Your vma protection will have to be set up correctly, which
1503a145dd41SLinus Torvalds  * means that if you want a shared writable mapping, you'd better
1504a145dd41SLinus Torvalds  * ask for a shared writable mapping!
1505a145dd41SLinus Torvalds  *
1506a145dd41SLinus Torvalds  * The page does not need to be reserved.
1507a145dd41SLinus Torvalds  */
1508423bad60SNick Piggin int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
1509423bad60SNick Piggin 			struct page *page)
1510a145dd41SLinus Torvalds {
1511a145dd41SLinus Torvalds 	if (addr < vma->vm_start || addr >= vma->vm_end)
1512a145dd41SLinus Torvalds 		return -EFAULT;
1513a145dd41SLinus Torvalds 	if (!page_count(page))
1514a145dd41SLinus Torvalds 		return -EINVAL;
15154d7672b4SLinus Torvalds 	vma->vm_flags |= VM_INSERTPAGE;
1516423bad60SNick Piggin 	return insert_page(vma, addr, page, vma->vm_page_prot);
1517a145dd41SLinus Torvalds }
1518e3c3374fSLinus Torvalds EXPORT_SYMBOL(vm_insert_page);
1519a145dd41SLinus Torvalds 
1520423bad60SNick Piggin static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1521423bad60SNick Piggin 			unsigned long pfn, pgprot_t prot)
1522423bad60SNick Piggin {
1523423bad60SNick Piggin 	struct mm_struct *mm = vma->vm_mm;
1524423bad60SNick Piggin 	int retval;
1525423bad60SNick Piggin 	pte_t *pte, entry;
1526423bad60SNick Piggin 	spinlock_t *ptl;
1527423bad60SNick Piggin 
1528423bad60SNick Piggin 	retval = -ENOMEM;
1529423bad60SNick Piggin 	pte = get_locked_pte(mm, addr, &ptl);
1530423bad60SNick Piggin 	if (!pte)
1531423bad60SNick Piggin 		goto out;
1532423bad60SNick Piggin 	retval = -EBUSY;
1533423bad60SNick Piggin 	if (!pte_none(*pte))
1534423bad60SNick Piggin 		goto out_unlock;
1535423bad60SNick Piggin 
1536423bad60SNick Piggin 	/* Ok, finally just insert the thing.. */
1537423bad60SNick Piggin 	entry = pte_mkspecial(pfn_pte(pfn, prot));
1538423bad60SNick Piggin 	set_pte_at(mm, addr, pte, entry);
1539423bad60SNick Piggin 	update_mmu_cache(vma, addr, entry); /* XXX: why not for insert_page? */
1540423bad60SNick Piggin 
1541423bad60SNick Piggin 	retval = 0;
1542423bad60SNick Piggin out_unlock:
1543423bad60SNick Piggin 	pte_unmap_unlock(pte, ptl);
1544423bad60SNick Piggin out:
1545423bad60SNick Piggin 	return retval;
1546423bad60SNick Piggin }
1547423bad60SNick Piggin 
1548e0dc0d8fSNick Piggin /**
1549e0dc0d8fSNick Piggin  * vm_insert_pfn - insert single pfn into user vma
1550e0dc0d8fSNick Piggin  * @vma: user vma to map to
1551e0dc0d8fSNick Piggin  * @addr: target user address of this page
1552e0dc0d8fSNick Piggin  * @pfn: source kernel pfn
1553e0dc0d8fSNick Piggin  *
1554e0dc0d8fSNick Piggin  * Similar to vm_inert_page, this allows drivers to insert individual pages
1555e0dc0d8fSNick Piggin  * they've allocated into a user vma. Same comments apply.
1556e0dc0d8fSNick Piggin  *
1557e0dc0d8fSNick Piggin  * This function should only be called from a vm_ops->fault handler, and
1558e0dc0d8fSNick Piggin  * in that case the handler should return NULL.
15590d71d10aSNick Piggin  *
15600d71d10aSNick Piggin  * vma cannot be a COW mapping.
15610d71d10aSNick Piggin  *
15620d71d10aSNick Piggin  * As this is called only for pages that do not currently exist, we
15630d71d10aSNick Piggin  * do not need to flush old virtual caches or the TLB.
1564e0dc0d8fSNick Piggin  */
1565e0dc0d8fSNick Piggin int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1566e0dc0d8fSNick Piggin 			unsigned long pfn)
1567e0dc0d8fSNick Piggin {
15682ab64037Svenkatesh.pallipadi@intel.com 	int ret;
1569e4b866edSvenkatesh.pallipadi@intel.com 	pgprot_t pgprot = vma->vm_page_prot;
15707e675137SNick Piggin 	/*
15717e675137SNick Piggin 	 * Technically, architectures with pte_special can avoid all these
15727e675137SNick Piggin 	 * restrictions (same for remap_pfn_range).  However we would like
15737e675137SNick Piggin 	 * consistency in testing and feature parity among all, so we should
15747e675137SNick Piggin 	 * try to keep these invariants in place for everybody.
15757e675137SNick Piggin 	 */
1576b379d790SJared Hulbert 	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
1577b379d790SJared Hulbert 	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
1578b379d790SJared Hulbert 						(VM_PFNMAP|VM_MIXEDMAP));
1579b379d790SJared Hulbert 	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
1580b379d790SJared Hulbert 	BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
1581e0dc0d8fSNick Piggin 
1582423bad60SNick Piggin 	if (addr < vma->vm_start || addr >= vma->vm_end)
1583423bad60SNick Piggin 		return -EFAULT;
1584e4b866edSvenkatesh.pallipadi@intel.com 	if (track_pfn_vma_new(vma, &pgprot, pfn, PAGE_SIZE))
15852ab64037Svenkatesh.pallipadi@intel.com 		return -EINVAL;
15862ab64037Svenkatesh.pallipadi@intel.com 
1587e4b866edSvenkatesh.pallipadi@intel.com 	ret = insert_pfn(vma, addr, pfn, pgprot);
15882ab64037Svenkatesh.pallipadi@intel.com 
15892ab64037Svenkatesh.pallipadi@intel.com 	if (ret)
15902ab64037Svenkatesh.pallipadi@intel.com 		untrack_pfn_vma(vma, pfn, PAGE_SIZE);
15912ab64037Svenkatesh.pallipadi@intel.com 
15922ab64037Svenkatesh.pallipadi@intel.com 	return ret;
1593e0dc0d8fSNick Piggin }
1594e0dc0d8fSNick Piggin EXPORT_SYMBOL(vm_insert_pfn);
1595e0dc0d8fSNick Piggin 
1596423bad60SNick Piggin int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
1597423bad60SNick Piggin 			unsigned long pfn)
1598423bad60SNick Piggin {
1599423bad60SNick Piggin 	BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
1600423bad60SNick Piggin 
1601423bad60SNick Piggin 	if (addr < vma->vm_start || addr >= vma->vm_end)
1602423bad60SNick Piggin 		return -EFAULT;
1603423bad60SNick Piggin 
1604423bad60SNick Piggin 	/*
1605423bad60SNick Piggin 	 * If we don't have pte special, then we have to use the pfn_valid()
1606423bad60SNick Piggin 	 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
1607423bad60SNick Piggin 	 * refcount the page if pfn_valid is true (hence insert_page rather
1608423bad60SNick Piggin 	 * than insert_pfn).
1609423bad60SNick Piggin 	 */
1610423bad60SNick Piggin 	if (!HAVE_PTE_SPECIAL && pfn_valid(pfn)) {
1611423bad60SNick Piggin 		struct page *page;
1612423bad60SNick Piggin 
1613423bad60SNick Piggin 		page = pfn_to_page(pfn);
1614423bad60SNick Piggin 		return insert_page(vma, addr, page, vma->vm_page_prot);
1615423bad60SNick Piggin 	}
1616423bad60SNick Piggin 	return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
1617423bad60SNick Piggin }
1618423bad60SNick Piggin EXPORT_SYMBOL(vm_insert_mixed);
1619423bad60SNick Piggin 
1620a145dd41SLinus Torvalds /*
16211da177e4SLinus Torvalds  * maps a range of physical memory into the requested pages. the old
16221da177e4SLinus Torvalds  * mappings are removed. any references to nonexistent pages results
16231da177e4SLinus Torvalds  * in null mappings (currently treated as "copy-on-access")
16241da177e4SLinus Torvalds  */
16251da177e4SLinus Torvalds static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
16261da177e4SLinus Torvalds 			unsigned long addr, unsigned long end,
16271da177e4SLinus Torvalds 			unsigned long pfn, pgprot_t prot)
16281da177e4SLinus Torvalds {
16291da177e4SLinus Torvalds 	pte_t *pte;
1630c74df32cSHugh Dickins 	spinlock_t *ptl;
16311da177e4SLinus Torvalds 
1632c74df32cSHugh Dickins 	pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
16331da177e4SLinus Torvalds 	if (!pte)
16341da177e4SLinus Torvalds 		return -ENOMEM;
16356606c3e0SZachary Amsden 	arch_enter_lazy_mmu_mode();
16361da177e4SLinus Torvalds 	do {
16371da177e4SLinus Torvalds 		BUG_ON(!pte_none(*pte));
16387e675137SNick Piggin 		set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
16391da177e4SLinus Torvalds 		pfn++;
16401da177e4SLinus Torvalds 	} while (pte++, addr += PAGE_SIZE, addr != end);
16416606c3e0SZachary Amsden 	arch_leave_lazy_mmu_mode();
1642c74df32cSHugh Dickins 	pte_unmap_unlock(pte - 1, ptl);
16431da177e4SLinus Torvalds 	return 0;
16441da177e4SLinus Torvalds }
16451da177e4SLinus Torvalds 
16461da177e4SLinus Torvalds static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
16471da177e4SLinus Torvalds 			unsigned long addr, unsigned long end,
16481da177e4SLinus Torvalds 			unsigned long pfn, pgprot_t prot)
16491da177e4SLinus Torvalds {
16501da177e4SLinus Torvalds 	pmd_t *pmd;
16511da177e4SLinus Torvalds 	unsigned long next;
16521da177e4SLinus Torvalds 
16531da177e4SLinus Torvalds 	pfn -= addr >> PAGE_SHIFT;
16541da177e4SLinus Torvalds 	pmd = pmd_alloc(mm, pud, addr);
16551da177e4SLinus Torvalds 	if (!pmd)
16561da177e4SLinus Torvalds 		return -ENOMEM;
16571da177e4SLinus Torvalds 	do {
16581da177e4SLinus Torvalds 		next = pmd_addr_end(addr, end);
16591da177e4SLinus Torvalds 		if (remap_pte_range(mm, pmd, addr, next,
16601da177e4SLinus Torvalds 				pfn + (addr >> PAGE_SHIFT), prot))
16611da177e4SLinus Torvalds 			return -ENOMEM;
16621da177e4SLinus Torvalds 	} while (pmd++, addr = next, addr != end);
16631da177e4SLinus Torvalds 	return 0;
16641da177e4SLinus Torvalds }
16651da177e4SLinus Torvalds 
16661da177e4SLinus Torvalds static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
16671da177e4SLinus Torvalds 			unsigned long addr, unsigned long end,
16681da177e4SLinus Torvalds 			unsigned long pfn, pgprot_t prot)
16691da177e4SLinus Torvalds {
16701da177e4SLinus Torvalds 	pud_t *pud;
16711da177e4SLinus Torvalds 	unsigned long next;
16721da177e4SLinus Torvalds 
16731da177e4SLinus Torvalds 	pfn -= addr >> PAGE_SHIFT;
16741da177e4SLinus Torvalds 	pud = pud_alloc(mm, pgd, addr);
16751da177e4SLinus Torvalds 	if (!pud)
16761da177e4SLinus Torvalds 		return -ENOMEM;
16771da177e4SLinus Torvalds 	do {
16781da177e4SLinus Torvalds 		next = pud_addr_end(addr, end);
16791da177e4SLinus Torvalds 		if (remap_pmd_range(mm, pud, addr, next,
16801da177e4SLinus Torvalds 				pfn + (addr >> PAGE_SHIFT), prot))
16811da177e4SLinus Torvalds 			return -ENOMEM;
16821da177e4SLinus Torvalds 	} while (pud++, addr = next, addr != end);
16831da177e4SLinus Torvalds 	return 0;
16841da177e4SLinus Torvalds }
16851da177e4SLinus Torvalds 
1686bfa5bf6dSRolf Eike Beer /**
1687bfa5bf6dSRolf Eike Beer  * remap_pfn_range - remap kernel memory to userspace
1688bfa5bf6dSRolf Eike Beer  * @vma: user vma to map to
1689bfa5bf6dSRolf Eike Beer  * @addr: target user address to start at
1690bfa5bf6dSRolf Eike Beer  * @pfn: physical address of kernel memory
1691bfa5bf6dSRolf Eike Beer  * @size: size of map area
1692bfa5bf6dSRolf Eike Beer  * @prot: page protection flags for this mapping
1693bfa5bf6dSRolf Eike Beer  *
1694bfa5bf6dSRolf Eike Beer  *  Note: this is only safe if the mm semaphore is held when called.
1695bfa5bf6dSRolf Eike Beer  */
16961da177e4SLinus Torvalds int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
16971da177e4SLinus Torvalds 		    unsigned long pfn, unsigned long size, pgprot_t prot)
16981da177e4SLinus Torvalds {
16991da177e4SLinus Torvalds 	pgd_t *pgd;
17001da177e4SLinus Torvalds 	unsigned long next;
17012d15cab8SHugh Dickins 	unsigned long end = addr + PAGE_ALIGN(size);
17021da177e4SLinus Torvalds 	struct mm_struct *mm = vma->vm_mm;
17031da177e4SLinus Torvalds 	int err;
17041da177e4SLinus Torvalds 
17051da177e4SLinus Torvalds 	/*
17061da177e4SLinus Torvalds 	 * Physically remapped pages are special. Tell the
17071da177e4SLinus Torvalds 	 * rest of the world about it:
17081da177e4SLinus Torvalds 	 *   VM_IO tells people not to look at these pages
17091da177e4SLinus Torvalds 	 *	(accesses can have side effects).
17100b14c179SHugh Dickins 	 *   VM_RESERVED is specified all over the place, because
17110b14c179SHugh Dickins 	 *	in 2.4 it kept swapout's vma scan off this vma; but
17120b14c179SHugh Dickins 	 *	in 2.6 the LRU scan won't even find its pages, so this
17130b14c179SHugh Dickins 	 *	flag means no more than count its pages in reserved_vm,
17140b14c179SHugh Dickins 	 * 	and omit it from core dump, even when VM_IO turned off.
17156aab341eSLinus Torvalds 	 *   VM_PFNMAP tells the core MM that the base pages are just
17166aab341eSLinus Torvalds 	 *	raw PFN mappings, and do not have a "struct page" associated
17176aab341eSLinus Torvalds 	 *	with them.
1718fb155c16SLinus Torvalds 	 *
1719fb155c16SLinus Torvalds 	 * There's a horrible special case to handle copy-on-write
1720fb155c16SLinus Torvalds 	 * behaviour that some programs depend on. We mark the "original"
1721fb155c16SLinus Torvalds 	 * un-COW'ed pages by matching them up with "vma->vm_pgoff".
17221da177e4SLinus Torvalds 	 */
17234bb9c5c0SPallipadi, Venkatesh 	if (addr == vma->vm_start && end == vma->vm_end) {
17246aab341eSLinus Torvalds 		vma->vm_pgoff = pfn;
1725895791daSPallipadi, Venkatesh 		vma->vm_flags |= VM_PFN_AT_MMAP;
17264bb9c5c0SPallipadi, Venkatesh 	} else if (is_cow_mapping(vma->vm_flags))
17273c8bb73aSvenkatesh.pallipadi@intel.com 		return -EINVAL;
1728fb155c16SLinus Torvalds 
1729fb155c16SLinus Torvalds 	vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
17301da177e4SLinus Torvalds 
1731e4b866edSvenkatesh.pallipadi@intel.com 	err = track_pfn_vma_new(vma, &prot, pfn, PAGE_ALIGN(size));
1732a3670613Svenkatesh.pallipadi@intel.com 	if (err) {
1733a3670613Svenkatesh.pallipadi@intel.com 		/*
1734a3670613Svenkatesh.pallipadi@intel.com 		 * To indicate that track_pfn related cleanup is not
1735a3670613Svenkatesh.pallipadi@intel.com 		 * needed from higher level routine calling unmap_vmas
1736a3670613Svenkatesh.pallipadi@intel.com 		 */
1737a3670613Svenkatesh.pallipadi@intel.com 		vma->vm_flags &= ~(VM_IO | VM_RESERVED | VM_PFNMAP);
1738895791daSPallipadi, Venkatesh 		vma->vm_flags &= ~VM_PFN_AT_MMAP;
17392ab64037Svenkatesh.pallipadi@intel.com 		return -EINVAL;
1740a3670613Svenkatesh.pallipadi@intel.com 	}
17412ab64037Svenkatesh.pallipadi@intel.com 
17421da177e4SLinus Torvalds 	BUG_ON(addr >= end);
17431da177e4SLinus Torvalds 	pfn -= addr >> PAGE_SHIFT;
17441da177e4SLinus Torvalds 	pgd = pgd_offset(mm, addr);
17451da177e4SLinus Torvalds 	flush_cache_range(vma, addr, end);
17461da177e4SLinus Torvalds 	do {
17471da177e4SLinus Torvalds 		next = pgd_addr_end(addr, end);
17481da177e4SLinus Torvalds 		err = remap_pud_range(mm, pgd, addr, next,
17491da177e4SLinus Torvalds 				pfn + (addr >> PAGE_SHIFT), prot);
17501da177e4SLinus Torvalds 		if (err)
17511da177e4SLinus Torvalds 			break;
17521da177e4SLinus Torvalds 	} while (pgd++, addr = next, addr != end);
17532ab64037Svenkatesh.pallipadi@intel.com 
17542ab64037Svenkatesh.pallipadi@intel.com 	if (err)
17552ab64037Svenkatesh.pallipadi@intel.com 		untrack_pfn_vma(vma, pfn, PAGE_ALIGN(size));
17562ab64037Svenkatesh.pallipadi@intel.com 
17571da177e4SLinus Torvalds 	return err;
17581da177e4SLinus Torvalds }
17591da177e4SLinus Torvalds EXPORT_SYMBOL(remap_pfn_range);
17601da177e4SLinus Torvalds 
1761aee16b3cSJeremy Fitzhardinge static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
1762aee16b3cSJeremy Fitzhardinge 				     unsigned long addr, unsigned long end,
1763aee16b3cSJeremy Fitzhardinge 				     pte_fn_t fn, void *data)
1764aee16b3cSJeremy Fitzhardinge {
1765aee16b3cSJeremy Fitzhardinge 	pte_t *pte;
1766aee16b3cSJeremy Fitzhardinge 	int err;
17672f569afdSMartin Schwidefsky 	pgtable_t token;
176894909914SBorislav Petkov 	spinlock_t *uninitialized_var(ptl);
1769aee16b3cSJeremy Fitzhardinge 
1770aee16b3cSJeremy Fitzhardinge 	pte = (mm == &init_mm) ?
1771aee16b3cSJeremy Fitzhardinge 		pte_alloc_kernel(pmd, addr) :
1772aee16b3cSJeremy Fitzhardinge 		pte_alloc_map_lock(mm, pmd, addr, &ptl);
1773aee16b3cSJeremy Fitzhardinge 	if (!pte)
1774aee16b3cSJeremy Fitzhardinge 		return -ENOMEM;
1775aee16b3cSJeremy Fitzhardinge 
1776aee16b3cSJeremy Fitzhardinge 	BUG_ON(pmd_huge(*pmd));
1777aee16b3cSJeremy Fitzhardinge 
177838e0edb1SJeremy Fitzhardinge 	arch_enter_lazy_mmu_mode();
177938e0edb1SJeremy Fitzhardinge 
17802f569afdSMartin Schwidefsky 	token = pmd_pgtable(*pmd);
1781aee16b3cSJeremy Fitzhardinge 
1782aee16b3cSJeremy Fitzhardinge 	do {
17832f569afdSMartin Schwidefsky 		err = fn(pte, token, addr, data);
1784aee16b3cSJeremy Fitzhardinge 		if (err)
1785aee16b3cSJeremy Fitzhardinge 			break;
1786aee16b3cSJeremy Fitzhardinge 	} while (pte++, addr += PAGE_SIZE, addr != end);
1787aee16b3cSJeremy Fitzhardinge 
178838e0edb1SJeremy Fitzhardinge 	arch_leave_lazy_mmu_mode();
178938e0edb1SJeremy Fitzhardinge 
1790aee16b3cSJeremy Fitzhardinge 	if (mm != &init_mm)
1791aee16b3cSJeremy Fitzhardinge 		pte_unmap_unlock(pte-1, ptl);
1792aee16b3cSJeremy Fitzhardinge 	return err;
1793aee16b3cSJeremy Fitzhardinge }
1794aee16b3cSJeremy Fitzhardinge 
1795aee16b3cSJeremy Fitzhardinge static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
1796aee16b3cSJeremy Fitzhardinge 				     unsigned long addr, unsigned long end,
1797aee16b3cSJeremy Fitzhardinge 				     pte_fn_t fn, void *data)
1798aee16b3cSJeremy Fitzhardinge {
1799aee16b3cSJeremy Fitzhardinge 	pmd_t *pmd;
1800aee16b3cSJeremy Fitzhardinge 	unsigned long next;
1801aee16b3cSJeremy Fitzhardinge 	int err;
1802aee16b3cSJeremy Fitzhardinge 
1803ceb86879SAndi Kleen 	BUG_ON(pud_huge(*pud));
1804ceb86879SAndi Kleen 
1805aee16b3cSJeremy Fitzhardinge 	pmd = pmd_alloc(mm, pud, addr);
1806aee16b3cSJeremy Fitzhardinge 	if (!pmd)
1807aee16b3cSJeremy Fitzhardinge 		return -ENOMEM;
1808aee16b3cSJeremy Fitzhardinge 	do {
1809aee16b3cSJeremy Fitzhardinge 		next = pmd_addr_end(addr, end);
1810aee16b3cSJeremy Fitzhardinge 		err = apply_to_pte_range(mm, pmd, addr, next, fn, data);
1811aee16b3cSJeremy Fitzhardinge 		if (err)
1812aee16b3cSJeremy Fitzhardinge 			break;
1813aee16b3cSJeremy Fitzhardinge 	} while (pmd++, addr = next, addr != end);
1814aee16b3cSJeremy Fitzhardinge 	return err;
1815aee16b3cSJeremy Fitzhardinge }
1816aee16b3cSJeremy Fitzhardinge 
1817aee16b3cSJeremy Fitzhardinge static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
1818aee16b3cSJeremy Fitzhardinge 				     unsigned long addr, unsigned long end,
1819aee16b3cSJeremy Fitzhardinge 				     pte_fn_t fn, void *data)
1820aee16b3cSJeremy Fitzhardinge {
1821aee16b3cSJeremy Fitzhardinge 	pud_t *pud;
1822aee16b3cSJeremy Fitzhardinge 	unsigned long next;
1823aee16b3cSJeremy Fitzhardinge 	int err;
1824aee16b3cSJeremy Fitzhardinge 
1825aee16b3cSJeremy Fitzhardinge 	pud = pud_alloc(mm, pgd, addr);
1826aee16b3cSJeremy Fitzhardinge 	if (!pud)
1827aee16b3cSJeremy Fitzhardinge 		return -ENOMEM;
1828aee16b3cSJeremy Fitzhardinge 	do {
1829aee16b3cSJeremy Fitzhardinge 		next = pud_addr_end(addr, end);
1830aee16b3cSJeremy Fitzhardinge 		err = apply_to_pmd_range(mm, pud, addr, next, fn, data);
1831aee16b3cSJeremy Fitzhardinge 		if (err)
1832aee16b3cSJeremy Fitzhardinge 			break;
1833aee16b3cSJeremy Fitzhardinge 	} while (pud++, addr = next, addr != end);
1834aee16b3cSJeremy Fitzhardinge 	return err;
1835aee16b3cSJeremy Fitzhardinge }
1836aee16b3cSJeremy Fitzhardinge 
1837aee16b3cSJeremy Fitzhardinge /*
1838aee16b3cSJeremy Fitzhardinge  * Scan a region of virtual memory, filling in page tables as necessary
1839aee16b3cSJeremy Fitzhardinge  * and calling a provided function on each leaf page table.
1840aee16b3cSJeremy Fitzhardinge  */
1841aee16b3cSJeremy Fitzhardinge int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
1842aee16b3cSJeremy Fitzhardinge 			unsigned long size, pte_fn_t fn, void *data)
1843aee16b3cSJeremy Fitzhardinge {
1844aee16b3cSJeremy Fitzhardinge 	pgd_t *pgd;
1845aee16b3cSJeremy Fitzhardinge 	unsigned long next;
1846cddb8a5cSAndrea Arcangeli 	unsigned long start = addr, end = addr + size;
1847aee16b3cSJeremy Fitzhardinge 	int err;
1848aee16b3cSJeremy Fitzhardinge 
1849aee16b3cSJeremy Fitzhardinge 	BUG_ON(addr >= end);
1850cddb8a5cSAndrea Arcangeli 	mmu_notifier_invalidate_range_start(mm, start, end);
1851aee16b3cSJeremy Fitzhardinge 	pgd = pgd_offset(mm, addr);
1852aee16b3cSJeremy Fitzhardinge 	do {
1853aee16b3cSJeremy Fitzhardinge 		next = pgd_addr_end(addr, end);
1854aee16b3cSJeremy Fitzhardinge 		err = apply_to_pud_range(mm, pgd, addr, next, fn, data);
1855aee16b3cSJeremy Fitzhardinge 		if (err)
1856aee16b3cSJeremy Fitzhardinge 			break;
1857aee16b3cSJeremy Fitzhardinge 	} while (pgd++, addr = next, addr != end);
1858cddb8a5cSAndrea Arcangeli 	mmu_notifier_invalidate_range_end(mm, start, end);
1859aee16b3cSJeremy Fitzhardinge 	return err;
1860aee16b3cSJeremy Fitzhardinge }
1861aee16b3cSJeremy Fitzhardinge EXPORT_SYMBOL_GPL(apply_to_page_range);
1862aee16b3cSJeremy Fitzhardinge 
18631da177e4SLinus Torvalds /*
18648f4e2101SHugh Dickins  * handle_pte_fault chooses page fault handler according to an entry
18658f4e2101SHugh Dickins  * which was read non-atomically.  Before making any commitment, on
18668f4e2101SHugh Dickins  * those architectures or configurations (e.g. i386 with PAE) which
18678f4e2101SHugh Dickins  * might give a mix of unmatched parts, do_swap_page and do_file_page
18688f4e2101SHugh Dickins  * must check under lock before unmapping the pte and proceeding
18698f4e2101SHugh Dickins  * (but do_wp_page is only called after already making such a check;
18708f4e2101SHugh Dickins  * and do_anonymous_page and do_no_page can safely check later on).
18718f4e2101SHugh Dickins  */
18724c21e2f2SHugh Dickins static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
18738f4e2101SHugh Dickins 				pte_t *page_table, pte_t orig_pte)
18748f4e2101SHugh Dickins {
18758f4e2101SHugh Dickins 	int same = 1;
18768f4e2101SHugh Dickins #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
18778f4e2101SHugh Dickins 	if (sizeof(pte_t) > sizeof(unsigned long)) {
18784c21e2f2SHugh Dickins 		spinlock_t *ptl = pte_lockptr(mm, pmd);
18794c21e2f2SHugh Dickins 		spin_lock(ptl);
18808f4e2101SHugh Dickins 		same = pte_same(*page_table, orig_pte);
18814c21e2f2SHugh Dickins 		spin_unlock(ptl);
18828f4e2101SHugh Dickins 	}
18838f4e2101SHugh Dickins #endif
18848f4e2101SHugh Dickins 	pte_unmap(page_table);
18858f4e2101SHugh Dickins 	return same;
18868f4e2101SHugh Dickins }
18878f4e2101SHugh Dickins 
18888f4e2101SHugh Dickins /*
18891da177e4SLinus Torvalds  * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
18901da177e4SLinus Torvalds  * servicing faults for write access.  In the normal case, do always want
18911da177e4SLinus Torvalds  * pte_mkwrite.  But get_user_pages can cause write faults for mappings
18921da177e4SLinus Torvalds  * that do not have writing enabled, when used by access_process_vm.
18931da177e4SLinus Torvalds  */
18941da177e4SLinus Torvalds static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
18951da177e4SLinus Torvalds {
18961da177e4SLinus Torvalds 	if (likely(vma->vm_flags & VM_WRITE))
18971da177e4SLinus Torvalds 		pte = pte_mkwrite(pte);
18981da177e4SLinus Torvalds 	return pte;
18991da177e4SLinus Torvalds }
19001da177e4SLinus Torvalds 
19019de455b2SAtsushi Nemoto static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
19026aab341eSLinus Torvalds {
19036aab341eSLinus Torvalds 	/*
19046aab341eSLinus Torvalds 	 * If the source page was a PFN mapping, we don't have
19056aab341eSLinus Torvalds 	 * a "struct page" for it. We do a best-effort copy by
19066aab341eSLinus Torvalds 	 * just copying from the original user address. If that
19076aab341eSLinus Torvalds 	 * fails, we just zero-fill it. Live with it.
19086aab341eSLinus Torvalds 	 */
19096aab341eSLinus Torvalds 	if (unlikely(!src)) {
19106aab341eSLinus Torvalds 		void *kaddr = kmap_atomic(dst, KM_USER0);
19115d2a2dbbSLinus Torvalds 		void __user *uaddr = (void __user *)(va & PAGE_MASK);
19125d2a2dbbSLinus Torvalds 
19135d2a2dbbSLinus Torvalds 		/*
19145d2a2dbbSLinus Torvalds 		 * This really shouldn't fail, because the page is there
19155d2a2dbbSLinus Torvalds 		 * in the page tables. But it might just be unreadable,
19165d2a2dbbSLinus Torvalds 		 * in which case we just give up and fill the result with
19175d2a2dbbSLinus Torvalds 		 * zeroes.
19185d2a2dbbSLinus Torvalds 		 */
19195d2a2dbbSLinus Torvalds 		if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
19206aab341eSLinus Torvalds 			memset(kaddr, 0, PAGE_SIZE);
19216aab341eSLinus Torvalds 		kunmap_atomic(kaddr, KM_USER0);
1922c4ec7b0dSDmitriy Monakhov 		flush_dcache_page(dst);
19230ed361deSNick Piggin 	} else
19249de455b2SAtsushi Nemoto 		copy_user_highpage(dst, src, va, vma);
19256aab341eSLinus Torvalds }
19266aab341eSLinus Torvalds 
19271da177e4SLinus Torvalds /*
19281da177e4SLinus Torvalds  * This routine handles present pages, when users try to write
19291da177e4SLinus Torvalds  * to a shared page. It is done by copying the page to a new address
19301da177e4SLinus Torvalds  * and decrementing the shared-page counter for the old page.
19311da177e4SLinus Torvalds  *
19321da177e4SLinus Torvalds  * Note that this routine assumes that the protection checks have been
19331da177e4SLinus Torvalds  * done by the caller (the low-level page fault routine in most cases).
19341da177e4SLinus Torvalds  * Thus we can safely just mark it writable once we've done any necessary
19351da177e4SLinus Torvalds  * COW.
19361da177e4SLinus Torvalds  *
19371da177e4SLinus Torvalds  * We also mark the page dirty at this point even though the page will
19381da177e4SLinus Torvalds  * change only once the write actually happens. This avoids a few races,
19391da177e4SLinus Torvalds  * and potentially makes it more efficient.
19401da177e4SLinus Torvalds  *
19418f4e2101SHugh Dickins  * We enter with non-exclusive mmap_sem (to exclude vma changes,
19428f4e2101SHugh Dickins  * but allow concurrent faults), with pte both mapped and locked.
19438f4e2101SHugh Dickins  * We return with mmap_sem still held, but pte unmapped and unlocked.
19441da177e4SLinus Torvalds  */
19451da177e4SLinus Torvalds static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
194665500d23SHugh Dickins 		unsigned long address, pte_t *page_table, pmd_t *pmd,
19478f4e2101SHugh Dickins 		spinlock_t *ptl, pte_t orig_pte)
19481da177e4SLinus Torvalds {
1949e5bbe4dfSHugh Dickins 	struct page *old_page, *new_page;
19501da177e4SLinus Torvalds 	pte_t entry;
195183c54070SNick Piggin 	int reuse = 0, ret = 0;
1952a200ee18SPeter Zijlstra 	int page_mkwrite = 0;
1953d08b3851SPeter Zijlstra 	struct page *dirty_page = NULL;
19541da177e4SLinus Torvalds 
19556aab341eSLinus Torvalds 	old_page = vm_normal_page(vma, address, orig_pte);
1956251b97f5SPeter Zijlstra 	if (!old_page) {
1957251b97f5SPeter Zijlstra 		/*
1958251b97f5SPeter Zijlstra 		 * VM_MIXEDMAP !pfn_valid() case
1959251b97f5SPeter Zijlstra 		 *
1960251b97f5SPeter Zijlstra 		 * We should not cow pages in a shared writeable mapping.
1961251b97f5SPeter Zijlstra 		 * Just mark the pages writable as we can't do any dirty
1962251b97f5SPeter Zijlstra 		 * accounting on raw pfn maps.
1963251b97f5SPeter Zijlstra 		 */
1964251b97f5SPeter Zijlstra 		if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
1965251b97f5SPeter Zijlstra 				     (VM_WRITE|VM_SHARED))
1966251b97f5SPeter Zijlstra 			goto reuse;
1967920fc356SHugh Dickins 		goto gotten;
1968251b97f5SPeter Zijlstra 	}
19691da177e4SLinus Torvalds 
1970d08b3851SPeter Zijlstra 	/*
1971ee6a6457SPeter Zijlstra 	 * Take out anonymous pages first, anonymous shared vmas are
1972ee6a6457SPeter Zijlstra 	 * not dirty accountable.
1973d08b3851SPeter Zijlstra 	 */
1974ee6a6457SPeter Zijlstra 	if (PageAnon(old_page)) {
1975ab967d86SHugh Dickins 		if (!trylock_page(old_page)) {
1976ab967d86SHugh Dickins 			page_cache_get(old_page);
1977ab967d86SHugh Dickins 			pte_unmap_unlock(page_table, ptl);
1978ab967d86SHugh Dickins 			lock_page(old_page);
1979ab967d86SHugh Dickins 			page_table = pte_offset_map_lock(mm, pmd, address,
1980ab967d86SHugh Dickins 							 &ptl);
1981ab967d86SHugh Dickins 			if (!pte_same(*page_table, orig_pte)) {
1982ab967d86SHugh Dickins 				unlock_page(old_page);
1983ab967d86SHugh Dickins 				page_cache_release(old_page);
1984ab967d86SHugh Dickins 				goto unlock;
1985ab967d86SHugh Dickins 			}
1986ab967d86SHugh Dickins 			page_cache_release(old_page);
1987ab967d86SHugh Dickins 		}
19887b1fe597SHugh Dickins 		reuse = reuse_swap_page(old_page);
1989ee6a6457SPeter Zijlstra 		unlock_page(old_page);
1990ee6a6457SPeter Zijlstra 	} else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
1991d08b3851SPeter Zijlstra 					(VM_WRITE|VM_SHARED))) {
1992ee6a6457SPeter Zijlstra 		/*
1993ee6a6457SPeter Zijlstra 		 * Only catch write-faults on shared writable pages,
1994ee6a6457SPeter Zijlstra 		 * read-only shared pages can get COWed by
1995ee6a6457SPeter Zijlstra 		 * get_user_pages(.write=1, .force=1).
1996ee6a6457SPeter Zijlstra 		 */
19979637a5efSDavid Howells 		if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
1998c2ec175cSNick Piggin 			struct vm_fault vmf;
1999c2ec175cSNick Piggin 			int tmp;
2000c2ec175cSNick Piggin 
2001c2ec175cSNick Piggin 			vmf.virtual_address = (void __user *)(address &
2002c2ec175cSNick Piggin 								PAGE_MASK);
2003c2ec175cSNick Piggin 			vmf.pgoff = old_page->index;
2004c2ec175cSNick Piggin 			vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
2005c2ec175cSNick Piggin 			vmf.page = old_page;
2006c2ec175cSNick Piggin 
20079637a5efSDavid Howells 			/*
20089637a5efSDavid Howells 			 * Notify the address space that the page is about to
20099637a5efSDavid Howells 			 * become writable so that it can prohibit this or wait
20109637a5efSDavid Howells 			 * for the page to get into an appropriate state.
20119637a5efSDavid Howells 			 *
20129637a5efSDavid Howells 			 * We do this without the lock held, so that it can
20139637a5efSDavid Howells 			 * sleep if it needs to.
20149637a5efSDavid Howells 			 */
20159637a5efSDavid Howells 			page_cache_get(old_page);
20169637a5efSDavid Howells 			pte_unmap_unlock(page_table, ptl);
20179637a5efSDavid Howells 
2018c2ec175cSNick Piggin 			tmp = vma->vm_ops->page_mkwrite(vma, &vmf);
2019c2ec175cSNick Piggin 			if (unlikely(tmp &
2020c2ec175cSNick Piggin 					(VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
2021c2ec175cSNick Piggin 				ret = tmp;
20229637a5efSDavid Howells 				goto unwritable_page;
2023c2ec175cSNick Piggin 			}
2024b827e496SNick Piggin 			if (unlikely(!(tmp & VM_FAULT_LOCKED))) {
2025b827e496SNick Piggin 				lock_page(old_page);
2026b827e496SNick Piggin 				if (!old_page->mapping) {
2027b827e496SNick Piggin 					ret = 0; /* retry the fault */
2028b827e496SNick Piggin 					unlock_page(old_page);
2029b827e496SNick Piggin 					goto unwritable_page;
2030b827e496SNick Piggin 				}
2031b827e496SNick Piggin 			} else
2032b827e496SNick Piggin 				VM_BUG_ON(!PageLocked(old_page));
20339637a5efSDavid Howells 
20349637a5efSDavid Howells 			/*
20359637a5efSDavid Howells 			 * Since we dropped the lock we need to revalidate
20369637a5efSDavid Howells 			 * the PTE as someone else may have changed it.  If
20379637a5efSDavid Howells 			 * they did, we just return, as we can count on the
20389637a5efSDavid Howells 			 * MMU to tell us if they didn't also make it writable.
20399637a5efSDavid Howells 			 */
20409637a5efSDavid Howells 			page_table = pte_offset_map_lock(mm, pmd, address,
20419637a5efSDavid Howells 							 &ptl);
2042b827e496SNick Piggin 			if (!pte_same(*page_table, orig_pte)) {
2043b827e496SNick Piggin 				unlock_page(old_page);
2044c3704cebSHugh Dickins 				page_cache_release(old_page);
20459637a5efSDavid Howells 				goto unlock;
2046b827e496SNick Piggin 			}
2047a200ee18SPeter Zijlstra 
2048a200ee18SPeter Zijlstra 			page_mkwrite = 1;
20499637a5efSDavid Howells 		}
2050d08b3851SPeter Zijlstra 		dirty_page = old_page;
2051d08b3851SPeter Zijlstra 		get_page(dirty_page);
20529637a5efSDavid Howells 		reuse = 1;
20539637a5efSDavid Howells 	}
20549637a5efSDavid Howells 
20551da177e4SLinus Torvalds 	if (reuse) {
2056251b97f5SPeter Zijlstra reuse:
2057eca35133SBen Collins 		flush_cache_page(vma, address, pte_pfn(orig_pte));
205865500d23SHugh Dickins 		entry = pte_mkyoung(orig_pte);
205965500d23SHugh Dickins 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2060954ffcb3SKAMEZAWA Hiroyuki 		if (ptep_set_access_flags(vma, address, page_table, entry,1))
20611da177e4SLinus Torvalds 			update_mmu_cache(vma, address, entry);
206265500d23SHugh Dickins 		ret |= VM_FAULT_WRITE;
206365500d23SHugh Dickins 		goto unlock;
20641da177e4SLinus Torvalds 	}
20651da177e4SLinus Torvalds 
20661da177e4SLinus Torvalds 	/*
20671da177e4SLinus Torvalds 	 * Ok, we need to copy. Oh, well..
20681da177e4SLinus Torvalds 	 */
20691da177e4SLinus Torvalds 	page_cache_get(old_page);
2070920fc356SHugh Dickins gotten:
20718f4e2101SHugh Dickins 	pte_unmap_unlock(page_table, ptl);
20721da177e4SLinus Torvalds 
20731da177e4SLinus Torvalds 	if (unlikely(anon_vma_prepare(vma)))
207465500d23SHugh Dickins 		goto oom;
2075557ed1faSNick Piggin 	VM_BUG_ON(old_page == ZERO_PAGE(0));
2076769848c0SMel Gorman 	new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
20771da177e4SLinus Torvalds 	if (!new_page)
207865500d23SHugh Dickins 		goto oom;
2079b291f000SNick Piggin 	/*
2080b291f000SNick Piggin 	 * Don't let another task, with possibly unlocked vma,
2081b291f000SNick Piggin 	 * keep the mlocked page.
2082b291f000SNick Piggin 	 */
2083ab92661dSCarsten Otte 	if ((vma->vm_flags & VM_LOCKED) && old_page) {
2084b291f000SNick Piggin 		lock_page(old_page);	/* for LRU manipulation */
2085b291f000SNick Piggin 		clear_page_mlock(old_page);
2086b291f000SNick Piggin 		unlock_page(old_page);
2087b291f000SNick Piggin 	}
20889de455b2SAtsushi Nemoto 	cow_user_page(new_page, old_page, address, vma);
20890ed361deSNick Piggin 	__SetPageUptodate(new_page);
209065500d23SHugh Dickins 
20912c26fdd7SKAMEZAWA Hiroyuki 	if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))
20928a9f3ccdSBalbir Singh 		goto oom_free_new;
20938a9f3ccdSBalbir Singh 
20941da177e4SLinus Torvalds 	/*
20951da177e4SLinus Torvalds 	 * Re-check the pte - we dropped the lock
20961da177e4SLinus Torvalds 	 */
20978f4e2101SHugh Dickins 	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
209865500d23SHugh Dickins 	if (likely(pte_same(*page_table, orig_pte))) {
2099920fc356SHugh Dickins 		if (old_page) {
21004294621fSHugh Dickins 			if (!PageAnon(old_page)) {
21014294621fSHugh Dickins 				dec_mm_counter(mm, file_rss);
2102920fc356SHugh Dickins 				inc_mm_counter(mm, anon_rss);
21034294621fSHugh Dickins 			}
2104920fc356SHugh Dickins 		} else
2105920fc356SHugh Dickins 			inc_mm_counter(mm, anon_rss);
2106eca35133SBen Collins 		flush_cache_page(vma, address, pte_pfn(orig_pte));
210765500d23SHugh Dickins 		entry = mk_pte(new_page, vma->vm_page_prot);
210865500d23SHugh Dickins 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
21094ce072f1SSiddha, Suresh B 		/*
21104ce072f1SSiddha, Suresh B 		 * Clear the pte entry and flush it first, before updating the
21114ce072f1SSiddha, Suresh B 		 * pte with the new entry. This will avoid a race condition
21124ce072f1SSiddha, Suresh B 		 * seen in the presence of one thread doing SMC and another
21134ce072f1SSiddha, Suresh B 		 * thread doing COW.
21144ce072f1SSiddha, Suresh B 		 */
2115cddb8a5cSAndrea Arcangeli 		ptep_clear_flush_notify(vma, address, page_table);
21169617d95eSNick Piggin 		page_add_new_anon_rmap(new_page, vma, address);
211764d6519dSLee Schermerhorn 		set_pte_at(mm, address, page_table, entry);
211864d6519dSLee Schermerhorn 		update_mmu_cache(vma, address, entry);
2119945754a1SNick Piggin 		if (old_page) {
2120945754a1SNick Piggin 			/*
2121945754a1SNick Piggin 			 * Only after switching the pte to the new page may
2122945754a1SNick Piggin 			 * we remove the mapcount here. Otherwise another
2123945754a1SNick Piggin 			 * process may come and find the rmap count decremented
2124945754a1SNick Piggin 			 * before the pte is switched to the new page, and
2125945754a1SNick Piggin 			 * "reuse" the old page writing into it while our pte
2126945754a1SNick Piggin 			 * here still points into it and can be read by other
2127945754a1SNick Piggin 			 * threads.
2128945754a1SNick Piggin 			 *
2129945754a1SNick Piggin 			 * The critical issue is to order this
2130945754a1SNick Piggin 			 * page_remove_rmap with the ptp_clear_flush above.
2131945754a1SNick Piggin 			 * Those stores are ordered by (if nothing else,)
2132945754a1SNick Piggin 			 * the barrier present in the atomic_add_negative
2133945754a1SNick Piggin 			 * in page_remove_rmap.
2134945754a1SNick Piggin 			 *
2135945754a1SNick Piggin 			 * Then the TLB flush in ptep_clear_flush ensures that
2136945754a1SNick Piggin 			 * no process can access the old page before the
2137945754a1SNick Piggin 			 * decremented mapcount is visible. And the old page
2138945754a1SNick Piggin 			 * cannot be reused until after the decremented
2139945754a1SNick Piggin 			 * mapcount is visible. So transitively, TLBs to
2140945754a1SNick Piggin 			 * old page will be flushed before it can be reused.
2141945754a1SNick Piggin 			 */
2142edc315fdSHugh Dickins 			page_remove_rmap(old_page);
2143945754a1SNick Piggin 		}
2144945754a1SNick Piggin 
21451da177e4SLinus Torvalds 		/* Free the old page.. */
21461da177e4SLinus Torvalds 		new_page = old_page;
2147f33ea7f4SNick Piggin 		ret |= VM_FAULT_WRITE;
21488a9f3ccdSBalbir Singh 	} else
21498a9f3ccdSBalbir Singh 		mem_cgroup_uncharge_page(new_page);
21508a9f3ccdSBalbir Singh 
2151920fc356SHugh Dickins 	if (new_page)
21521da177e4SLinus Torvalds 		page_cache_release(new_page);
2153920fc356SHugh Dickins 	if (old_page)
21541da177e4SLinus Torvalds 		page_cache_release(old_page);
215565500d23SHugh Dickins unlock:
21568f4e2101SHugh Dickins 	pte_unmap_unlock(page_table, ptl);
2157d08b3851SPeter Zijlstra 	if (dirty_page) {
215879352894SNick Piggin 		/*
215979352894SNick Piggin 		 * Yes, Virginia, this is actually required to prevent a race
216079352894SNick Piggin 		 * with clear_page_dirty_for_io() from clearing the page dirty
216179352894SNick Piggin 		 * bit after it clear all dirty ptes, but before a racing
216279352894SNick Piggin 		 * do_wp_page installs a dirty pte.
216379352894SNick Piggin 		 *
216479352894SNick Piggin 		 * do_no_page is protected similarly.
216579352894SNick Piggin 		 */
2166b827e496SNick Piggin 		if (!page_mkwrite) {
216779352894SNick Piggin 			wait_on_page_locked(dirty_page);
2168a200ee18SPeter Zijlstra 			set_page_dirty_balance(dirty_page, page_mkwrite);
2169b827e496SNick Piggin 		}
2170d08b3851SPeter Zijlstra 		put_page(dirty_page);
2171b827e496SNick Piggin 		if (page_mkwrite) {
2172b827e496SNick Piggin 			struct address_space *mapping = dirty_page->mapping;
2173b827e496SNick Piggin 
2174b827e496SNick Piggin 			set_page_dirty(dirty_page);
2175b827e496SNick Piggin 			unlock_page(dirty_page);
2176b827e496SNick Piggin 			page_cache_release(dirty_page);
2177b827e496SNick Piggin 			if (mapping)	{
2178b827e496SNick Piggin 				/*
2179b827e496SNick Piggin 				 * Some device drivers do not set page.mapping
2180b827e496SNick Piggin 				 * but still dirty their pages
2181b827e496SNick Piggin 				 */
2182b827e496SNick Piggin 				balance_dirty_pages_ratelimited(mapping);
2183b827e496SNick Piggin 			}
2184b827e496SNick Piggin 		}
2185b827e496SNick Piggin 
2186b827e496SNick Piggin 		/* file_update_time outside page_lock */
2187b827e496SNick Piggin 		if (vma->vm_file)
2188b827e496SNick Piggin 			file_update_time(vma->vm_file);
2189d08b3851SPeter Zijlstra 	}
2190f33ea7f4SNick Piggin 	return ret;
21918a9f3ccdSBalbir Singh oom_free_new:
21926dbf6d3bSHugh Dickins 	page_cache_release(new_page);
219365500d23SHugh Dickins oom:
2194b827e496SNick Piggin 	if (old_page) {
2195b827e496SNick Piggin 		if (page_mkwrite) {
2196b827e496SNick Piggin 			unlock_page(old_page);
21971da177e4SLinus Torvalds 			page_cache_release(old_page);
2198b827e496SNick Piggin 		}
2199b827e496SNick Piggin 		page_cache_release(old_page);
2200b827e496SNick Piggin 	}
22011da177e4SLinus Torvalds 	return VM_FAULT_OOM;
22029637a5efSDavid Howells 
22039637a5efSDavid Howells unwritable_page:
22049637a5efSDavid Howells 	page_cache_release(old_page);
2205c2ec175cSNick Piggin 	return ret;
22061da177e4SLinus Torvalds }
22071da177e4SLinus Torvalds 
22081da177e4SLinus Torvalds /*
22091da177e4SLinus Torvalds  * Helper functions for unmap_mapping_range().
22101da177e4SLinus Torvalds  *
22111da177e4SLinus Torvalds  * __ Notes on dropping i_mmap_lock to reduce latency while unmapping __
22121da177e4SLinus Torvalds  *
22131da177e4SLinus Torvalds  * We have to restart searching the prio_tree whenever we drop the lock,
22141da177e4SLinus Torvalds  * since the iterator is only valid while the lock is held, and anyway
22151da177e4SLinus Torvalds  * a later vma might be split and reinserted earlier while lock dropped.
22161da177e4SLinus Torvalds  *
22171da177e4SLinus Torvalds  * The list of nonlinear vmas could be handled more efficiently, using
22181da177e4SLinus Torvalds  * a placeholder, but handle it in the same way until a need is shown.
22191da177e4SLinus Torvalds  * It is important to search the prio_tree before nonlinear list: a vma
22201da177e4SLinus Torvalds  * may become nonlinear and be shifted from prio_tree to nonlinear list
22211da177e4SLinus Torvalds  * while the lock is dropped; but never shifted from list to prio_tree.
22221da177e4SLinus Torvalds  *
22231da177e4SLinus Torvalds  * In order to make forward progress despite restarting the search,
22241da177e4SLinus Torvalds  * vm_truncate_count is used to mark a vma as now dealt with, so we can
22251da177e4SLinus Torvalds  * quickly skip it next time around.  Since the prio_tree search only
22261da177e4SLinus Torvalds  * shows us those vmas affected by unmapping the range in question, we
22271da177e4SLinus Torvalds  * can't efficiently keep all vmas in step with mapping->truncate_count:
22281da177e4SLinus Torvalds  * so instead reset them all whenever it wraps back to 0 (then go to 1).
22291da177e4SLinus Torvalds  * mapping->truncate_count and vma->vm_truncate_count are protected by
22301da177e4SLinus Torvalds  * i_mmap_lock.
22311da177e4SLinus Torvalds  *
22321da177e4SLinus Torvalds  * In order to make forward progress despite repeatedly restarting some
2233ee39b37bSHugh Dickins  * large vma, note the restart_addr from unmap_vmas when it breaks out:
22341da177e4SLinus Torvalds  * and restart from that address when we reach that vma again.  It might
22351da177e4SLinus Torvalds  * have been split or merged, shrunk or extended, but never shifted: so
22361da177e4SLinus Torvalds  * restart_addr remains valid so long as it remains in the vma's range.
22371da177e4SLinus Torvalds  * unmap_mapping_range forces truncate_count to leap over page-aligned
22381da177e4SLinus Torvalds  * values so we can save vma's restart_addr in its truncate_count field.
22391da177e4SLinus Torvalds  */
22401da177e4SLinus Torvalds #define is_restart_addr(truncate_count) (!((truncate_count) & ~PAGE_MASK))
22411da177e4SLinus Torvalds 
22421da177e4SLinus Torvalds static void reset_vma_truncate_counts(struct address_space *mapping)
22431da177e4SLinus Torvalds {
22441da177e4SLinus Torvalds 	struct vm_area_struct *vma;
22451da177e4SLinus Torvalds 	struct prio_tree_iter iter;
22461da177e4SLinus Torvalds 
22471da177e4SLinus Torvalds 	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, 0, ULONG_MAX)
22481da177e4SLinus Torvalds 		vma->vm_truncate_count = 0;
22491da177e4SLinus Torvalds 	list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
22501da177e4SLinus Torvalds 		vma->vm_truncate_count = 0;
22511da177e4SLinus Torvalds }
22521da177e4SLinus Torvalds 
22531da177e4SLinus Torvalds static int unmap_mapping_range_vma(struct vm_area_struct *vma,
22541da177e4SLinus Torvalds 		unsigned long start_addr, unsigned long end_addr,
22551da177e4SLinus Torvalds 		struct zap_details *details)
22561da177e4SLinus Torvalds {
22571da177e4SLinus Torvalds 	unsigned long restart_addr;
22581da177e4SLinus Torvalds 	int need_break;
22591da177e4SLinus Torvalds 
2260d00806b1SNick Piggin 	/*
2261d00806b1SNick Piggin 	 * files that support invalidating or truncating portions of the
2262d0217ac0SNick Piggin 	 * file from under mmaped areas must have their ->fault function
226383c54070SNick Piggin 	 * return a locked page (and set VM_FAULT_LOCKED in the return).
226483c54070SNick Piggin 	 * This provides synchronisation against concurrent unmapping here.
2265d00806b1SNick Piggin 	 */
2266d00806b1SNick Piggin 
22671da177e4SLinus Torvalds again:
22681da177e4SLinus Torvalds 	restart_addr = vma->vm_truncate_count;
22691da177e4SLinus Torvalds 	if (is_restart_addr(restart_addr) && start_addr < restart_addr) {
22701da177e4SLinus Torvalds 		start_addr = restart_addr;
22711da177e4SLinus Torvalds 		if (start_addr >= end_addr) {
22721da177e4SLinus Torvalds 			/* Top of vma has been split off since last time */
22731da177e4SLinus Torvalds 			vma->vm_truncate_count = details->truncate_count;
22741da177e4SLinus Torvalds 			return 0;
22751da177e4SLinus Torvalds 		}
22761da177e4SLinus Torvalds 	}
22771da177e4SLinus Torvalds 
2278ee39b37bSHugh Dickins 	restart_addr = zap_page_range(vma, start_addr,
2279ee39b37bSHugh Dickins 					end_addr - start_addr, details);
228095c354feSNick Piggin 	need_break = need_resched() || spin_needbreak(details->i_mmap_lock);
22811da177e4SLinus Torvalds 
2282ee39b37bSHugh Dickins 	if (restart_addr >= end_addr) {
22831da177e4SLinus Torvalds 		/* We have now completed this vma: mark it so */
22841da177e4SLinus Torvalds 		vma->vm_truncate_count = details->truncate_count;
22851da177e4SLinus Torvalds 		if (!need_break)
22861da177e4SLinus Torvalds 			return 0;
22871da177e4SLinus Torvalds 	} else {
22881da177e4SLinus Torvalds 		/* Note restart_addr in vma's truncate_count field */
2289ee39b37bSHugh Dickins 		vma->vm_truncate_count = restart_addr;
22901da177e4SLinus Torvalds 		if (!need_break)
22911da177e4SLinus Torvalds 			goto again;
22921da177e4SLinus Torvalds 	}
22931da177e4SLinus Torvalds 
22941da177e4SLinus Torvalds 	spin_unlock(details->i_mmap_lock);
22951da177e4SLinus Torvalds 	cond_resched();
22961da177e4SLinus Torvalds 	spin_lock(details->i_mmap_lock);
22971da177e4SLinus Torvalds 	return -EINTR;
22981da177e4SLinus Torvalds }
22991da177e4SLinus Torvalds 
23001da177e4SLinus Torvalds static inline void unmap_mapping_range_tree(struct prio_tree_root *root,
23011da177e4SLinus Torvalds 					    struct zap_details *details)
23021da177e4SLinus Torvalds {
23031da177e4SLinus Torvalds 	struct vm_area_struct *vma;
23041da177e4SLinus Torvalds 	struct prio_tree_iter iter;
23051da177e4SLinus Torvalds 	pgoff_t vba, vea, zba, zea;
23061da177e4SLinus Torvalds 
23071da177e4SLinus Torvalds restart:
23081da177e4SLinus Torvalds 	vma_prio_tree_foreach(vma, &iter, root,
23091da177e4SLinus Torvalds 			details->first_index, details->last_index) {
23101da177e4SLinus Torvalds 		/* Skip quickly over those we have already dealt with */
23111da177e4SLinus Torvalds 		if (vma->vm_truncate_count == details->truncate_count)
23121da177e4SLinus Torvalds 			continue;
23131da177e4SLinus Torvalds 
23141da177e4SLinus Torvalds 		vba = vma->vm_pgoff;
23151da177e4SLinus Torvalds 		vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1;
23161da177e4SLinus Torvalds 		/* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */
23171da177e4SLinus Torvalds 		zba = details->first_index;
23181da177e4SLinus Torvalds 		if (zba < vba)
23191da177e4SLinus Torvalds 			zba = vba;
23201da177e4SLinus Torvalds 		zea = details->last_index;
23211da177e4SLinus Torvalds 		if (zea > vea)
23221da177e4SLinus Torvalds 			zea = vea;
23231da177e4SLinus Torvalds 
23241da177e4SLinus Torvalds 		if (unmap_mapping_range_vma(vma,
23251da177e4SLinus Torvalds 			((zba - vba) << PAGE_SHIFT) + vma->vm_start,
23261da177e4SLinus Torvalds 			((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
23271da177e4SLinus Torvalds 				details) < 0)
23281da177e4SLinus Torvalds 			goto restart;
23291da177e4SLinus Torvalds 	}
23301da177e4SLinus Torvalds }
23311da177e4SLinus Torvalds 
23321da177e4SLinus Torvalds static inline void unmap_mapping_range_list(struct list_head *head,
23331da177e4SLinus Torvalds 					    struct zap_details *details)
23341da177e4SLinus Torvalds {
23351da177e4SLinus Torvalds 	struct vm_area_struct *vma;
23361da177e4SLinus Torvalds 
23371da177e4SLinus Torvalds 	/*
23381da177e4SLinus Torvalds 	 * In nonlinear VMAs there is no correspondence between virtual address
23391da177e4SLinus Torvalds 	 * offset and file offset.  So we must perform an exhaustive search
23401da177e4SLinus Torvalds 	 * across *all* the pages in each nonlinear VMA, not just the pages
23411da177e4SLinus Torvalds 	 * whose virtual address lies outside the file truncation point.
23421da177e4SLinus Torvalds 	 */
23431da177e4SLinus Torvalds restart:
23441da177e4SLinus Torvalds 	list_for_each_entry(vma, head, shared.vm_set.list) {
23451da177e4SLinus Torvalds 		/* Skip quickly over those we have already dealt with */
23461da177e4SLinus Torvalds 		if (vma->vm_truncate_count == details->truncate_count)
23471da177e4SLinus Torvalds 			continue;
23481da177e4SLinus Torvalds 		details->nonlinear_vma = vma;
23491da177e4SLinus Torvalds 		if (unmap_mapping_range_vma(vma, vma->vm_start,
23501da177e4SLinus Torvalds 					vma->vm_end, details) < 0)
23511da177e4SLinus Torvalds 			goto restart;
23521da177e4SLinus Torvalds 	}
23531da177e4SLinus Torvalds }
23541da177e4SLinus Torvalds 
23551da177e4SLinus Torvalds /**
235672fd4a35SRobert P. J. Day  * unmap_mapping_range - unmap the portion of all mmaps in the specified address_space corresponding to the specified page range in the underlying file.
23573d41088fSMartin Waitz  * @mapping: the address space containing mmaps to be unmapped.
23581da177e4SLinus Torvalds  * @holebegin: byte in first page to unmap, relative to the start of
23591da177e4SLinus Torvalds  * the underlying file.  This will be rounded down to a PAGE_SIZE
23601da177e4SLinus Torvalds  * boundary.  Note that this is different from vmtruncate(), which
23611da177e4SLinus Torvalds  * must keep the partial page.  In contrast, we must get rid of
23621da177e4SLinus Torvalds  * partial pages.
23631da177e4SLinus Torvalds  * @holelen: size of prospective hole in bytes.  This will be rounded
23641da177e4SLinus Torvalds  * up to a PAGE_SIZE boundary.  A holelen of zero truncates to the
23651da177e4SLinus Torvalds  * end of the file.
23661da177e4SLinus Torvalds  * @even_cows: 1 when truncating a file, unmap even private COWed pages;
23671da177e4SLinus Torvalds  * but 0 when invalidating pagecache, don't throw away private data.
23681da177e4SLinus Torvalds  */
23691da177e4SLinus Torvalds void unmap_mapping_range(struct address_space *mapping,
23701da177e4SLinus Torvalds 		loff_t const holebegin, loff_t const holelen, int even_cows)
23711da177e4SLinus Torvalds {
23721da177e4SLinus Torvalds 	struct zap_details details;
23731da177e4SLinus Torvalds 	pgoff_t hba = holebegin >> PAGE_SHIFT;
23741da177e4SLinus Torvalds 	pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
23751da177e4SLinus Torvalds 
23761da177e4SLinus Torvalds 	/* Check for overflow. */
23771da177e4SLinus Torvalds 	if (sizeof(holelen) > sizeof(hlen)) {
23781da177e4SLinus Torvalds 		long long holeend =
23791da177e4SLinus Torvalds 			(holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
23801da177e4SLinus Torvalds 		if (holeend & ~(long long)ULONG_MAX)
23811da177e4SLinus Torvalds 			hlen = ULONG_MAX - hba + 1;
23821da177e4SLinus Torvalds 	}
23831da177e4SLinus Torvalds 
23841da177e4SLinus Torvalds 	details.check_mapping = even_cows? NULL: mapping;
23851da177e4SLinus Torvalds 	details.nonlinear_vma = NULL;
23861da177e4SLinus Torvalds 	details.first_index = hba;
23871da177e4SLinus Torvalds 	details.last_index = hba + hlen - 1;
23881da177e4SLinus Torvalds 	if (details.last_index < details.first_index)
23891da177e4SLinus Torvalds 		details.last_index = ULONG_MAX;
23901da177e4SLinus Torvalds 	details.i_mmap_lock = &mapping->i_mmap_lock;
23911da177e4SLinus Torvalds 
23921da177e4SLinus Torvalds 	spin_lock(&mapping->i_mmap_lock);
23931da177e4SLinus Torvalds 
2394d00806b1SNick Piggin 	/* Protect against endless unmapping loops */
23951da177e4SLinus Torvalds 	mapping->truncate_count++;
23961da177e4SLinus Torvalds 	if (unlikely(is_restart_addr(mapping->truncate_count))) {
23971da177e4SLinus Torvalds 		if (mapping->truncate_count == 0)
23981da177e4SLinus Torvalds 			reset_vma_truncate_counts(mapping);
23991da177e4SLinus Torvalds 		mapping->truncate_count++;
24001da177e4SLinus Torvalds 	}
24011da177e4SLinus Torvalds 	details.truncate_count = mapping->truncate_count;
24021da177e4SLinus Torvalds 
24031da177e4SLinus Torvalds 	if (unlikely(!prio_tree_empty(&mapping->i_mmap)))
24041da177e4SLinus Torvalds 		unmap_mapping_range_tree(&mapping->i_mmap, &details);
24051da177e4SLinus Torvalds 	if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
24061da177e4SLinus Torvalds 		unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
24071da177e4SLinus Torvalds 	spin_unlock(&mapping->i_mmap_lock);
24081da177e4SLinus Torvalds }
24091da177e4SLinus Torvalds EXPORT_SYMBOL(unmap_mapping_range);
24101da177e4SLinus Torvalds 
2411bfa5bf6dSRolf Eike Beer /**
2412bfa5bf6dSRolf Eike Beer  * vmtruncate - unmap mappings "freed" by truncate() syscall
2413bfa5bf6dSRolf Eike Beer  * @inode: inode of the file used
2414bfa5bf6dSRolf Eike Beer  * @offset: file offset to start truncating
24151da177e4SLinus Torvalds  *
24161da177e4SLinus Torvalds  * NOTE! We have to be ready to update the memory sharing
24171da177e4SLinus Torvalds  * between the file and the memory map for a potential last
24181da177e4SLinus Torvalds  * incomplete page.  Ugly, but necessary.
24191da177e4SLinus Torvalds  */
24201da177e4SLinus Torvalds int vmtruncate(struct inode * inode, loff_t offset)
24211da177e4SLinus Torvalds {
242261d5048fSChristoph Hellwig 	if (inode->i_size < offset) {
24231da177e4SLinus Torvalds 		unsigned long limit;
24241da177e4SLinus Torvalds 
24251da177e4SLinus Torvalds 		limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
24261da177e4SLinus Torvalds 		if (limit != RLIM_INFINITY && offset > limit)
24271da177e4SLinus Torvalds 			goto out_sig;
24281da177e4SLinus Torvalds 		if (offset > inode->i_sb->s_maxbytes)
24291da177e4SLinus Torvalds 			goto out_big;
24301da177e4SLinus Torvalds 		i_size_write(inode, offset);
243161d5048fSChristoph Hellwig 	} else {
243261d5048fSChristoph Hellwig 		struct address_space *mapping = inode->i_mapping;
24331da177e4SLinus Torvalds 
243461d5048fSChristoph Hellwig 		/*
243561d5048fSChristoph Hellwig 		 * truncation of in-use swapfiles is disallowed - it would
243661d5048fSChristoph Hellwig 		 * cause subsequent swapout to scribble on the now-freed
243761d5048fSChristoph Hellwig 		 * blocks.
243861d5048fSChristoph Hellwig 		 */
243961d5048fSChristoph Hellwig 		if (IS_SWAPFILE(inode))
244061d5048fSChristoph Hellwig 			return -ETXTBSY;
244161d5048fSChristoph Hellwig 		i_size_write(inode, offset);
244261d5048fSChristoph Hellwig 
244361d5048fSChristoph Hellwig 		/*
244461d5048fSChristoph Hellwig 		 * unmap_mapping_range is called twice, first simply for
244561d5048fSChristoph Hellwig 		 * efficiency so that truncate_inode_pages does fewer
244661d5048fSChristoph Hellwig 		 * single-page unmaps.  However after this first call, and
244761d5048fSChristoph Hellwig 		 * before truncate_inode_pages finishes, it is possible for
244861d5048fSChristoph Hellwig 		 * private pages to be COWed, which remain after
244961d5048fSChristoph Hellwig 		 * truncate_inode_pages finishes, hence the second
245061d5048fSChristoph Hellwig 		 * unmap_mapping_range call must be made for correctness.
245161d5048fSChristoph Hellwig 		 */
245261d5048fSChristoph Hellwig 		unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
245361d5048fSChristoph Hellwig 		truncate_inode_pages(mapping, offset);
245461d5048fSChristoph Hellwig 		unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
245561d5048fSChristoph Hellwig 	}
245661d5048fSChristoph Hellwig 
2457acfa4380SAl Viro 	if (inode->i_op->truncate)
24581da177e4SLinus Torvalds 		inode->i_op->truncate(inode);
24591da177e4SLinus Torvalds 	return 0;
246061d5048fSChristoph Hellwig 
24611da177e4SLinus Torvalds out_sig:
24621da177e4SLinus Torvalds 	send_sig(SIGXFSZ, current, 0);
24631da177e4SLinus Torvalds out_big:
24641da177e4SLinus Torvalds 	return -EFBIG;
24651da177e4SLinus Torvalds }
24661da177e4SLinus Torvalds EXPORT_SYMBOL(vmtruncate);
24671da177e4SLinus Torvalds 
2468f6b3ec23SBadari Pulavarty int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
2469f6b3ec23SBadari Pulavarty {
2470f6b3ec23SBadari Pulavarty 	struct address_space *mapping = inode->i_mapping;
2471f6b3ec23SBadari Pulavarty 
2472f6b3ec23SBadari Pulavarty 	/*
2473f6b3ec23SBadari Pulavarty 	 * If the underlying filesystem is not going to provide
2474f6b3ec23SBadari Pulavarty 	 * a way to truncate a range of blocks (punch a hole) -
2475f6b3ec23SBadari Pulavarty 	 * we should return failure right now.
2476f6b3ec23SBadari Pulavarty 	 */
2477acfa4380SAl Viro 	if (!inode->i_op->truncate_range)
2478f6b3ec23SBadari Pulavarty 		return -ENOSYS;
2479f6b3ec23SBadari Pulavarty 
24801b1dcc1bSJes Sorensen 	mutex_lock(&inode->i_mutex);
2481f6b3ec23SBadari Pulavarty 	down_write(&inode->i_alloc_sem);
2482f6b3ec23SBadari Pulavarty 	unmap_mapping_range(mapping, offset, (end - offset), 1);
2483f6b3ec23SBadari Pulavarty 	truncate_inode_pages_range(mapping, offset, end);
2484d00806b1SNick Piggin 	unmap_mapping_range(mapping, offset, (end - offset), 1);
2485f6b3ec23SBadari Pulavarty 	inode->i_op->truncate_range(inode, offset, end);
2486f6b3ec23SBadari Pulavarty 	up_write(&inode->i_alloc_sem);
24871b1dcc1bSJes Sorensen 	mutex_unlock(&inode->i_mutex);
2488f6b3ec23SBadari Pulavarty 
2489f6b3ec23SBadari Pulavarty 	return 0;
2490f6b3ec23SBadari Pulavarty }
2491f6b3ec23SBadari Pulavarty 
24921da177e4SLinus Torvalds /*
24938f4e2101SHugh Dickins  * We enter with non-exclusive mmap_sem (to exclude vma changes,
24948f4e2101SHugh Dickins  * but allow concurrent faults), and pte mapped but not yet locked.
24958f4e2101SHugh Dickins  * We return with mmap_sem still held, but pte unmapped and unlocked.
24961da177e4SLinus Torvalds  */
249765500d23SHugh Dickins static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
249865500d23SHugh Dickins 		unsigned long address, pte_t *page_table, pmd_t *pmd,
249965500d23SHugh Dickins 		int write_access, pte_t orig_pte)
25001da177e4SLinus Torvalds {
25018f4e2101SHugh Dickins 	spinlock_t *ptl;
25021da177e4SLinus Torvalds 	struct page *page;
250365500d23SHugh Dickins 	swp_entry_t entry;
25041da177e4SLinus Torvalds 	pte_t pte;
25057a81b88cSKAMEZAWA Hiroyuki 	struct mem_cgroup *ptr = NULL;
250683c54070SNick Piggin 	int ret = 0;
25071da177e4SLinus Torvalds 
25084c21e2f2SHugh Dickins 	if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
25098f4e2101SHugh Dickins 		goto out;
251065500d23SHugh Dickins 
251165500d23SHugh Dickins 	entry = pte_to_swp_entry(orig_pte);
25120697212aSChristoph Lameter 	if (is_migration_entry(entry)) {
25130697212aSChristoph Lameter 		migration_entry_wait(mm, pmd, address);
25140697212aSChristoph Lameter 		goto out;
25150697212aSChristoph Lameter 	}
25160ff92245SShailabh Nagar 	delayacct_set_flag(DELAYACCT_PF_SWAPIN);
25171da177e4SLinus Torvalds 	page = lookup_swap_cache(entry);
25181da177e4SLinus Torvalds 	if (!page) {
2519098fe651SAshwin Chaugule 		grab_swap_token(); /* Contend for token _before_ read-in */
252002098feaSHugh Dickins 		page = swapin_readahead(entry,
252102098feaSHugh Dickins 					GFP_HIGHUSER_MOVABLE, vma, address);
25221da177e4SLinus Torvalds 		if (!page) {
25231da177e4SLinus Torvalds 			/*
25248f4e2101SHugh Dickins 			 * Back out if somebody else faulted in this pte
25258f4e2101SHugh Dickins 			 * while we released the pte lock.
25261da177e4SLinus Torvalds 			 */
25278f4e2101SHugh Dickins 			page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
25281da177e4SLinus Torvalds 			if (likely(pte_same(*page_table, orig_pte)))
25291da177e4SLinus Torvalds 				ret = VM_FAULT_OOM;
25300ff92245SShailabh Nagar 			delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
253165500d23SHugh Dickins 			goto unlock;
25321da177e4SLinus Torvalds 		}
25331da177e4SLinus Torvalds 
25341da177e4SLinus Torvalds 		/* Had to read the page from swap area: Major fault */
25351da177e4SLinus Torvalds 		ret = VM_FAULT_MAJOR;
2536f8891e5eSChristoph Lameter 		count_vm_event(PGMAJFAULT);
25371da177e4SLinus Torvalds 	}
25381da177e4SLinus Torvalds 
25391da177e4SLinus Torvalds 	lock_page(page);
254020a1022dSBalbir Singh 	delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
25411da177e4SLinus Torvalds 
25422c26fdd7SKAMEZAWA Hiroyuki 	if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
2543073e587eSKAMEZAWA Hiroyuki 		ret = VM_FAULT_OOM;
2544bc43f75cSJohannes Weiner 		goto out_page;
2545073e587eSKAMEZAWA Hiroyuki 	}
2546073e587eSKAMEZAWA Hiroyuki 
25471da177e4SLinus Torvalds 	/*
25488f4e2101SHugh Dickins 	 * Back out if somebody else already faulted in this pte.
25491da177e4SLinus Torvalds 	 */
25508f4e2101SHugh Dickins 	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
25519e9bef07SHugh Dickins 	if (unlikely(!pte_same(*page_table, orig_pte)))
2552b8107480SKirill Korotaev 		goto out_nomap;
2553b8107480SKirill Korotaev 
2554b8107480SKirill Korotaev 	if (unlikely(!PageUptodate(page))) {
2555b8107480SKirill Korotaev 		ret = VM_FAULT_SIGBUS;
2556b8107480SKirill Korotaev 		goto out_nomap;
25571da177e4SLinus Torvalds 	}
25581da177e4SLinus Torvalds 
25598c7c6e34SKAMEZAWA Hiroyuki 	/*
25608c7c6e34SKAMEZAWA Hiroyuki 	 * The page isn't present yet, go ahead with the fault.
25618c7c6e34SKAMEZAWA Hiroyuki 	 *
25628c7c6e34SKAMEZAWA Hiroyuki 	 * Be careful about the sequence of operations here.
25638c7c6e34SKAMEZAWA Hiroyuki 	 * To get its accounting right, reuse_swap_page() must be called
25648c7c6e34SKAMEZAWA Hiroyuki 	 * while the page is counted on swap but not yet in mapcount i.e.
25658c7c6e34SKAMEZAWA Hiroyuki 	 * before page_add_anon_rmap() and swap_free(); try_to_free_swap()
25668c7c6e34SKAMEZAWA Hiroyuki 	 * must be called after the swap_free(), or it will never succeed.
256703f3c433SKAMEZAWA Hiroyuki 	 * Because delete_from_swap_page() may be called by reuse_swap_page(),
256803f3c433SKAMEZAWA Hiroyuki 	 * mem_cgroup_commit_charge_swapin() may not be able to find swp_entry
256903f3c433SKAMEZAWA Hiroyuki 	 * in page->private. In this case, a record in swap_cgroup  is silently
257003f3c433SKAMEZAWA Hiroyuki 	 * discarded at swap_free().
25718c7c6e34SKAMEZAWA Hiroyuki 	 */
25721da177e4SLinus Torvalds 
25734294621fSHugh Dickins 	inc_mm_counter(mm, anon_rss);
25741da177e4SLinus Torvalds 	pte = mk_pte(page, vma->vm_page_prot);
25757b1fe597SHugh Dickins 	if (write_access && reuse_swap_page(page)) {
25761da177e4SLinus Torvalds 		pte = maybe_mkwrite(pte_mkdirty(pte), vma);
25771da177e4SLinus Torvalds 		write_access = 0;
25781da177e4SLinus Torvalds 	}
25791da177e4SLinus Torvalds 	flush_icache_page(vma, page);
25801da177e4SLinus Torvalds 	set_pte_at(mm, address, page_table, pte);
25811da177e4SLinus Torvalds 	page_add_anon_rmap(page, vma, address);
258203f3c433SKAMEZAWA Hiroyuki 	/* It's better to call commit-charge after rmap is established */
258303f3c433SKAMEZAWA Hiroyuki 	mem_cgroup_commit_charge_swapin(page, ptr);
25841da177e4SLinus Torvalds 
2585c475a8abSHugh Dickins 	swap_free(entry);
2586b291f000SNick Piggin 	if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
2587a2c43eedSHugh Dickins 		try_to_free_swap(page);
2588c475a8abSHugh Dickins 	unlock_page(page);
2589c475a8abSHugh Dickins 
25901da177e4SLinus Torvalds 	if (write_access) {
259161469f1dSHugh Dickins 		ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte);
259261469f1dSHugh Dickins 		if (ret & VM_FAULT_ERROR)
259361469f1dSHugh Dickins 			ret &= VM_FAULT_ERROR;
25941da177e4SLinus Torvalds 		goto out;
25951da177e4SLinus Torvalds 	}
25961da177e4SLinus Torvalds 
25971da177e4SLinus Torvalds 	/* No need to invalidate - it was non-present before */
25981da177e4SLinus Torvalds 	update_mmu_cache(vma, address, pte);
259965500d23SHugh Dickins unlock:
26008f4e2101SHugh Dickins 	pte_unmap_unlock(page_table, ptl);
26011da177e4SLinus Torvalds out:
26021da177e4SLinus Torvalds 	return ret;
2603b8107480SKirill Korotaev out_nomap:
26047a81b88cSKAMEZAWA Hiroyuki 	mem_cgroup_cancel_charge_swapin(ptr);
26058f4e2101SHugh Dickins 	pte_unmap_unlock(page_table, ptl);
2606bc43f75cSJohannes Weiner out_page:
2607b8107480SKirill Korotaev 	unlock_page(page);
2608b8107480SKirill Korotaev 	page_cache_release(page);
260965500d23SHugh Dickins 	return ret;
26101da177e4SLinus Torvalds }
26111da177e4SLinus Torvalds 
26121da177e4SLinus Torvalds /*
26138f4e2101SHugh Dickins  * We enter with non-exclusive mmap_sem (to exclude vma changes,
26148f4e2101SHugh Dickins  * but allow concurrent faults), and pte mapped but not yet locked.
26158f4e2101SHugh Dickins  * We return with mmap_sem still held, but pte unmapped and unlocked.
26161da177e4SLinus Torvalds  */
261765500d23SHugh Dickins static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
261865500d23SHugh Dickins 		unsigned long address, pte_t *page_table, pmd_t *pmd,
261965500d23SHugh Dickins 		int write_access)
26201da177e4SLinus Torvalds {
26218f4e2101SHugh Dickins 	struct page *page;
26228f4e2101SHugh Dickins 	spinlock_t *ptl;
26231da177e4SLinus Torvalds 	pte_t entry;
26241da177e4SLinus Torvalds 
26251da177e4SLinus Torvalds 	/* Allocate our own private page. */
26261da177e4SLinus Torvalds 	pte_unmap(page_table);
26271da177e4SLinus Torvalds 
26281da177e4SLinus Torvalds 	if (unlikely(anon_vma_prepare(vma)))
262965500d23SHugh Dickins 		goto oom;
2630769848c0SMel Gorman 	page = alloc_zeroed_user_highpage_movable(vma, address);
26311da177e4SLinus Torvalds 	if (!page)
263265500d23SHugh Dickins 		goto oom;
26330ed361deSNick Piggin 	__SetPageUptodate(page);
26341da177e4SLinus Torvalds 
26352c26fdd7SKAMEZAWA Hiroyuki 	if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))
26368a9f3ccdSBalbir Singh 		goto oom_free_page;
26378a9f3ccdSBalbir Singh 
263865500d23SHugh Dickins 	entry = mk_pte(page, vma->vm_page_prot);
263965500d23SHugh Dickins 	entry = maybe_mkwrite(pte_mkdirty(entry), vma);
26408f4e2101SHugh Dickins 
26418f4e2101SHugh Dickins 	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
26428f4e2101SHugh Dickins 	if (!pte_none(*page_table))
26438f4e2101SHugh Dickins 		goto release;
26448f4e2101SHugh Dickins 	inc_mm_counter(mm, anon_rss);
26459617d95eSNick Piggin 	page_add_new_anon_rmap(page, vma, address);
264665500d23SHugh Dickins 	set_pte_at(mm, address, page_table, entry);
26471da177e4SLinus Torvalds 
26481da177e4SLinus Torvalds 	/* No need to invalidate - it was non-present before */
264965500d23SHugh Dickins 	update_mmu_cache(vma, address, entry);
265065500d23SHugh Dickins unlock:
26518f4e2101SHugh Dickins 	pte_unmap_unlock(page_table, ptl);
265283c54070SNick Piggin 	return 0;
26538f4e2101SHugh Dickins release:
26548a9f3ccdSBalbir Singh 	mem_cgroup_uncharge_page(page);
26558f4e2101SHugh Dickins 	page_cache_release(page);
26568f4e2101SHugh Dickins 	goto unlock;
26578a9f3ccdSBalbir Singh oom_free_page:
26586dbf6d3bSHugh Dickins 	page_cache_release(page);
265965500d23SHugh Dickins oom:
26601da177e4SLinus Torvalds 	return VM_FAULT_OOM;
26611da177e4SLinus Torvalds }
26621da177e4SLinus Torvalds 
26631da177e4SLinus Torvalds /*
266454cb8821SNick Piggin  * __do_fault() tries to create a new page mapping. It aggressively
26651da177e4SLinus Torvalds  * tries to share with existing pages, but makes a separate copy if
266654cb8821SNick Piggin  * the FAULT_FLAG_WRITE is set in the flags parameter in order to avoid
266754cb8821SNick Piggin  * the next page fault.
26681da177e4SLinus Torvalds  *
26691da177e4SLinus Torvalds  * As this is called only for pages that do not currently exist, we
26701da177e4SLinus Torvalds  * do not need to flush old virtual caches or the TLB.
26711da177e4SLinus Torvalds  *
26728f4e2101SHugh Dickins  * We enter with non-exclusive mmap_sem (to exclude vma changes,
267316abfa08SHugh Dickins  * but allow concurrent faults), and pte neither mapped nor locked.
26748f4e2101SHugh Dickins  * We return with mmap_sem still held, but pte unmapped and unlocked.
26751da177e4SLinus Torvalds  */
267654cb8821SNick Piggin static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
267716abfa08SHugh Dickins 		unsigned long address, pmd_t *pmd,
267854cb8821SNick Piggin 		pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
26791da177e4SLinus Torvalds {
268016abfa08SHugh Dickins 	pte_t *page_table;
26818f4e2101SHugh Dickins 	spinlock_t *ptl;
2682d0217ac0SNick Piggin 	struct page *page;
26831da177e4SLinus Torvalds 	pte_t entry;
26841da177e4SLinus Torvalds 	int anon = 0;
26855b4e655eSKAMEZAWA Hiroyuki 	int charged = 0;
2686d08b3851SPeter Zijlstra 	struct page *dirty_page = NULL;
2687d0217ac0SNick Piggin 	struct vm_fault vmf;
2688d0217ac0SNick Piggin 	int ret;
2689a200ee18SPeter Zijlstra 	int page_mkwrite = 0;
269054cb8821SNick Piggin 
2691d0217ac0SNick Piggin 	vmf.virtual_address = (void __user *)(address & PAGE_MASK);
2692d0217ac0SNick Piggin 	vmf.pgoff = pgoff;
2693d0217ac0SNick Piggin 	vmf.flags = flags;
2694d0217ac0SNick Piggin 	vmf.page = NULL;
26951da177e4SLinus Torvalds 
2696d0217ac0SNick Piggin 	ret = vma->vm_ops->fault(vma, &vmf);
269783c54070SNick Piggin 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
269883c54070SNick Piggin 		return ret;
26991da177e4SLinus Torvalds 
2700d00806b1SNick Piggin 	/*
2701d0217ac0SNick Piggin 	 * For consistency in subsequent calls, make the faulted page always
2702d00806b1SNick Piggin 	 * locked.
2703d00806b1SNick Piggin 	 */
270483c54070SNick Piggin 	if (unlikely(!(ret & VM_FAULT_LOCKED)))
2705d0217ac0SNick Piggin 		lock_page(vmf.page);
270654cb8821SNick Piggin 	else
2707d0217ac0SNick Piggin 		VM_BUG_ON(!PageLocked(vmf.page));
2708d00806b1SNick Piggin 
27091da177e4SLinus Torvalds 	/*
27101da177e4SLinus Torvalds 	 * Should we do an early C-O-W break?
27111da177e4SLinus Torvalds 	 */
2712d0217ac0SNick Piggin 	page = vmf.page;
271354cb8821SNick Piggin 	if (flags & FAULT_FLAG_WRITE) {
27149637a5efSDavid Howells 		if (!(vma->vm_flags & VM_SHARED)) {
271554cb8821SNick Piggin 			anon = 1;
2716d00806b1SNick Piggin 			if (unlikely(anon_vma_prepare(vma))) {
2717d0217ac0SNick Piggin 				ret = VM_FAULT_OOM;
271854cb8821SNick Piggin 				goto out;
2719d00806b1SNick Piggin 			}
272083c54070SNick Piggin 			page = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
272183c54070SNick Piggin 						vma, address);
2722d00806b1SNick Piggin 			if (!page) {
2723d0217ac0SNick Piggin 				ret = VM_FAULT_OOM;
272454cb8821SNick Piggin 				goto out;
2725d00806b1SNick Piggin 			}
27262c26fdd7SKAMEZAWA Hiroyuki 			if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) {
27275b4e655eSKAMEZAWA Hiroyuki 				ret = VM_FAULT_OOM;
27285b4e655eSKAMEZAWA Hiroyuki 				page_cache_release(page);
27295b4e655eSKAMEZAWA Hiroyuki 				goto out;
27305b4e655eSKAMEZAWA Hiroyuki 			}
27315b4e655eSKAMEZAWA Hiroyuki 			charged = 1;
2732b291f000SNick Piggin 			/*
2733b291f000SNick Piggin 			 * Don't let another task, with possibly unlocked vma,
2734b291f000SNick Piggin 			 * keep the mlocked page.
2735b291f000SNick Piggin 			 */
2736b291f000SNick Piggin 			if (vma->vm_flags & VM_LOCKED)
2737b291f000SNick Piggin 				clear_page_mlock(vmf.page);
2738d0217ac0SNick Piggin 			copy_user_highpage(page, vmf.page, address, vma);
27390ed361deSNick Piggin 			__SetPageUptodate(page);
27409637a5efSDavid Howells 		} else {
274154cb8821SNick Piggin 			/*
274254cb8821SNick Piggin 			 * If the page will be shareable, see if the backing
27439637a5efSDavid Howells 			 * address space wants to know that the page is about
274454cb8821SNick Piggin 			 * to become writable
274554cb8821SNick Piggin 			 */
274669676147SMark Fasheh 			if (vma->vm_ops->page_mkwrite) {
2747c2ec175cSNick Piggin 				int tmp;
2748c2ec175cSNick Piggin 
274969676147SMark Fasheh 				unlock_page(page);
2750b827e496SNick Piggin 				vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
2751c2ec175cSNick Piggin 				tmp = vma->vm_ops->page_mkwrite(vma, &vmf);
2752c2ec175cSNick Piggin 				if (unlikely(tmp &
2753c2ec175cSNick Piggin 					  (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
2754c2ec175cSNick Piggin 					ret = tmp;
2755b827e496SNick Piggin 					goto unwritable_page;
275669676147SMark Fasheh 				}
2757b827e496SNick Piggin 				if (unlikely(!(tmp & VM_FAULT_LOCKED))) {
275869676147SMark Fasheh 					lock_page(page);
2759d0217ac0SNick Piggin 					if (!page->mapping) {
2760b827e496SNick Piggin 						ret = 0; /* retry the fault */
2761b827e496SNick Piggin 						unlock_page(page);
2762b827e496SNick Piggin 						goto unwritable_page;
2763d0217ac0SNick Piggin 					}
2764b827e496SNick Piggin 				} else
2765b827e496SNick Piggin 					VM_BUG_ON(!PageLocked(page));
2766a200ee18SPeter Zijlstra 				page_mkwrite = 1;
27679637a5efSDavid Howells 			}
27689637a5efSDavid Howells 		}
276954cb8821SNick Piggin 
27701da177e4SLinus Torvalds 	}
27711da177e4SLinus Torvalds 
27728f4e2101SHugh Dickins 	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
27731da177e4SLinus Torvalds 
27741da177e4SLinus Torvalds 	/*
27751da177e4SLinus Torvalds 	 * This silly early PAGE_DIRTY setting removes a race
27761da177e4SLinus Torvalds 	 * due to the bad i386 page protection. But it's valid
27771da177e4SLinus Torvalds 	 * for other architectures too.
27781da177e4SLinus Torvalds 	 *
27791da177e4SLinus Torvalds 	 * Note that if write_access is true, we either now have
27801da177e4SLinus Torvalds 	 * an exclusive copy of the page, or this is a shared mapping,
27811da177e4SLinus Torvalds 	 * so we can make it writable and dirty to avoid having to
27821da177e4SLinus Torvalds 	 * handle that later.
27831da177e4SLinus Torvalds 	 */
27841da177e4SLinus Torvalds 	/* Only go through if we didn't race with anybody else... */
278554cb8821SNick Piggin 	if (likely(pte_same(*page_table, orig_pte))) {
2786d00806b1SNick Piggin 		flush_icache_page(vma, page);
2787d00806b1SNick Piggin 		entry = mk_pte(page, vma->vm_page_prot);
278854cb8821SNick Piggin 		if (flags & FAULT_FLAG_WRITE)
27891da177e4SLinus Torvalds 			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
27901da177e4SLinus Torvalds 		if (anon) {
27914294621fSHugh Dickins 			inc_mm_counter(mm, anon_rss);
2792d00806b1SNick Piggin 			page_add_new_anon_rmap(page, vma, address);
2793f57e88a8SHugh Dickins 		} else {
27944294621fSHugh Dickins 			inc_mm_counter(mm, file_rss);
2795d00806b1SNick Piggin 			page_add_file_rmap(page);
279654cb8821SNick Piggin 			if (flags & FAULT_FLAG_WRITE) {
2797d00806b1SNick Piggin 				dirty_page = page;
2798d08b3851SPeter Zijlstra 				get_page(dirty_page);
2799d08b3851SPeter Zijlstra 			}
28004294621fSHugh Dickins 		}
280164d6519dSLee Schermerhorn 		set_pte_at(mm, address, page_table, entry);
28021da177e4SLinus Torvalds 
2803d00806b1SNick Piggin 		/* no need to invalidate: a not-present page won't be cached */
28041da177e4SLinus Torvalds 		update_mmu_cache(vma, address, entry);
2805d00806b1SNick Piggin 	} else {
28065b4e655eSKAMEZAWA Hiroyuki 		if (charged)
28078a9f3ccdSBalbir Singh 			mem_cgroup_uncharge_page(page);
2808d00806b1SNick Piggin 		if (anon)
2809d00806b1SNick Piggin 			page_cache_release(page);
2810d00806b1SNick Piggin 		else
281154cb8821SNick Piggin 			anon = 1; /* no anon but release faulted_page */
2812d00806b1SNick Piggin 	}
2813d00806b1SNick Piggin 
28148f4e2101SHugh Dickins 	pte_unmap_unlock(page_table, ptl);
2815d00806b1SNick Piggin 
2816d00806b1SNick Piggin out:
2817b827e496SNick Piggin 	if (dirty_page) {
2818b827e496SNick Piggin 		struct address_space *mapping = page->mapping;
28198f7b3d15SAnton Salikhmetov 
2820b827e496SNick Piggin 		if (set_page_dirty(dirty_page))
2821b827e496SNick Piggin 			page_mkwrite = 1;
2822b827e496SNick Piggin 		unlock_page(dirty_page);
2823d08b3851SPeter Zijlstra 		put_page(dirty_page);
2824b827e496SNick Piggin 		if (page_mkwrite && mapping) {
2825b827e496SNick Piggin 			/*
2826b827e496SNick Piggin 			 * Some device drivers do not set page.mapping but still
2827b827e496SNick Piggin 			 * dirty their pages
2828b827e496SNick Piggin 			 */
2829b827e496SNick Piggin 			balance_dirty_pages_ratelimited(mapping);
2830d08b3851SPeter Zijlstra 		}
2831d00806b1SNick Piggin 
2832b827e496SNick Piggin 		/* file_update_time outside page_lock */
2833b827e496SNick Piggin 		if (vma->vm_file)
2834b827e496SNick Piggin 			file_update_time(vma->vm_file);
2835b827e496SNick Piggin 	} else {
2836b827e496SNick Piggin 		unlock_page(vmf.page);
2837b827e496SNick Piggin 		if (anon)
2838b827e496SNick Piggin 			page_cache_release(vmf.page);
2839b827e496SNick Piggin 	}
2840b827e496SNick Piggin 
2841b827e496SNick Piggin 	return ret;
2842b827e496SNick Piggin 
2843b827e496SNick Piggin unwritable_page:
2844b827e496SNick Piggin 	page_cache_release(page);
284583c54070SNick Piggin 	return ret;
284654cb8821SNick Piggin }
2847d00806b1SNick Piggin 
284854cb8821SNick Piggin static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
284954cb8821SNick Piggin 		unsigned long address, pte_t *page_table, pmd_t *pmd,
285054cb8821SNick Piggin 		int write_access, pte_t orig_pte)
285154cb8821SNick Piggin {
285254cb8821SNick Piggin 	pgoff_t pgoff = (((address & PAGE_MASK)
28530da7e01fSDean Nelson 			- vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
285454cb8821SNick Piggin 	unsigned int flags = (write_access ? FAULT_FLAG_WRITE : 0);
285554cb8821SNick Piggin 
285616abfa08SHugh Dickins 	pte_unmap(page_table);
285716abfa08SHugh Dickins 	return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
285854cb8821SNick Piggin }
285954cb8821SNick Piggin 
2860f4b81804SJes Sorensen /*
28611da177e4SLinus Torvalds  * Fault of a previously existing named mapping. Repopulate the pte
28621da177e4SLinus Torvalds  * from the encoded file_pte if possible. This enables swappable
28631da177e4SLinus Torvalds  * nonlinear vmas.
28648f4e2101SHugh Dickins  *
28658f4e2101SHugh Dickins  * We enter with non-exclusive mmap_sem (to exclude vma changes,
28668f4e2101SHugh Dickins  * but allow concurrent faults), and pte mapped but not yet locked.
28678f4e2101SHugh Dickins  * We return with mmap_sem still held, but pte unmapped and unlocked.
28681da177e4SLinus Torvalds  */
2869d0217ac0SNick Piggin static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
287065500d23SHugh Dickins 		unsigned long address, pte_t *page_table, pmd_t *pmd,
287165500d23SHugh Dickins 		int write_access, pte_t orig_pte)
28721da177e4SLinus Torvalds {
2873d0217ac0SNick Piggin 	unsigned int flags = FAULT_FLAG_NONLINEAR |
2874d0217ac0SNick Piggin 				(write_access ? FAULT_FLAG_WRITE : 0);
287565500d23SHugh Dickins 	pgoff_t pgoff;
28761da177e4SLinus Torvalds 
28774c21e2f2SHugh Dickins 	if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
287883c54070SNick Piggin 		return 0;
28791da177e4SLinus Torvalds 
28802509ef26SHugh Dickins 	if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) {
288165500d23SHugh Dickins 		/*
288265500d23SHugh Dickins 		 * Page table corrupted: show pte and kill process.
288365500d23SHugh Dickins 		 */
28843dc14741SHugh Dickins 		print_bad_pte(vma, address, orig_pte, NULL);
288565500d23SHugh Dickins 		return VM_FAULT_OOM;
288665500d23SHugh Dickins 	}
288765500d23SHugh Dickins 
288865500d23SHugh Dickins 	pgoff = pte_to_pgoff(orig_pte);
288916abfa08SHugh Dickins 	return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
28901da177e4SLinus Torvalds }
28911da177e4SLinus Torvalds 
28921da177e4SLinus Torvalds /*
28931da177e4SLinus Torvalds  * These routines also need to handle stuff like marking pages dirty
28941da177e4SLinus Torvalds  * and/or accessed for architectures that don't do it in hardware (most
28951da177e4SLinus Torvalds  * RISC architectures).  The early dirtying is also good on the i386.
28961da177e4SLinus Torvalds  *
28971da177e4SLinus Torvalds  * There is also a hook called "update_mmu_cache()" that architectures
28981da177e4SLinus Torvalds  * with external mmu caches can use to update those (ie the Sparc or
28991da177e4SLinus Torvalds  * PowerPC hashed page tables that act as extended TLBs).
29001da177e4SLinus Torvalds  *
2901c74df32cSHugh Dickins  * We enter with non-exclusive mmap_sem (to exclude vma changes,
2902c74df32cSHugh Dickins  * but allow concurrent faults), and pte mapped but not yet locked.
2903c74df32cSHugh Dickins  * We return with mmap_sem still held, but pte unmapped and unlocked.
29041da177e4SLinus Torvalds  */
29051da177e4SLinus Torvalds static inline int handle_pte_fault(struct mm_struct *mm,
29061da177e4SLinus Torvalds 		struct vm_area_struct *vma, unsigned long address,
290765500d23SHugh Dickins 		pte_t *pte, pmd_t *pmd, int write_access)
29081da177e4SLinus Torvalds {
29091da177e4SLinus Torvalds 	pte_t entry;
29108f4e2101SHugh Dickins 	spinlock_t *ptl;
29111da177e4SLinus Torvalds 
29128dab5241SBenjamin Herrenschmidt 	entry = *pte;
29131da177e4SLinus Torvalds 	if (!pte_present(entry)) {
291465500d23SHugh Dickins 		if (pte_none(entry)) {
2915f4b81804SJes Sorensen 			if (vma->vm_ops) {
29163c18ddd1SNick Piggin 				if (likely(vma->vm_ops->fault))
291754cb8821SNick Piggin 					return do_linear_fault(mm, vma, address,
291854cb8821SNick Piggin 						pte, pmd, write_access, entry);
2919f4b81804SJes Sorensen 			}
2920f4b81804SJes Sorensen 			return do_anonymous_page(mm, vma, address,
292165500d23SHugh Dickins 						 pte, pmd, write_access);
292265500d23SHugh Dickins 		}
29231da177e4SLinus Torvalds 		if (pte_file(entry))
2924d0217ac0SNick Piggin 			return do_nonlinear_fault(mm, vma, address,
292565500d23SHugh Dickins 					pte, pmd, write_access, entry);
292665500d23SHugh Dickins 		return do_swap_page(mm, vma, address,
292765500d23SHugh Dickins 					pte, pmd, write_access, entry);
29281da177e4SLinus Torvalds 	}
29291da177e4SLinus Torvalds 
29304c21e2f2SHugh Dickins 	ptl = pte_lockptr(mm, pmd);
29318f4e2101SHugh Dickins 	spin_lock(ptl);
29328f4e2101SHugh Dickins 	if (unlikely(!pte_same(*pte, entry)))
29338f4e2101SHugh Dickins 		goto unlock;
29341da177e4SLinus Torvalds 	if (write_access) {
29351da177e4SLinus Torvalds 		if (!pte_write(entry))
29368f4e2101SHugh Dickins 			return do_wp_page(mm, vma, address,
29378f4e2101SHugh Dickins 					pte, pmd, ptl, entry);
29381da177e4SLinus Torvalds 		entry = pte_mkdirty(entry);
29391da177e4SLinus Torvalds 	}
29401da177e4SLinus Torvalds 	entry = pte_mkyoung(entry);
29418dab5241SBenjamin Herrenschmidt 	if (ptep_set_access_flags(vma, address, pte, entry, write_access)) {
29421da177e4SLinus Torvalds 		update_mmu_cache(vma, address, entry);
29431a44e149SAndrea Arcangeli 	} else {
29441a44e149SAndrea Arcangeli 		/*
29451a44e149SAndrea Arcangeli 		 * This is needed only for protection faults but the arch code
29461a44e149SAndrea Arcangeli 		 * is not yet telling us if this is a protection fault or not.
29471a44e149SAndrea Arcangeli 		 * This still avoids useless tlb flushes for .text page faults
29481a44e149SAndrea Arcangeli 		 * with threads.
29491a44e149SAndrea Arcangeli 		 */
29501a44e149SAndrea Arcangeli 		if (write_access)
29511a44e149SAndrea Arcangeli 			flush_tlb_page(vma, address);
29521a44e149SAndrea Arcangeli 	}
29538f4e2101SHugh Dickins unlock:
29548f4e2101SHugh Dickins 	pte_unmap_unlock(pte, ptl);
295583c54070SNick Piggin 	return 0;
29561da177e4SLinus Torvalds }
29571da177e4SLinus Torvalds 
29581da177e4SLinus Torvalds /*
29591da177e4SLinus Torvalds  * By the time we get here, we already hold the mm semaphore
29601da177e4SLinus Torvalds  */
296183c54070SNick Piggin int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
29621da177e4SLinus Torvalds 		unsigned long address, int write_access)
29631da177e4SLinus Torvalds {
29641da177e4SLinus Torvalds 	pgd_t *pgd;
29651da177e4SLinus Torvalds 	pud_t *pud;
29661da177e4SLinus Torvalds 	pmd_t *pmd;
29671da177e4SLinus Torvalds 	pte_t *pte;
29681da177e4SLinus Torvalds 
29691da177e4SLinus Torvalds 	__set_current_state(TASK_RUNNING);
29701da177e4SLinus Torvalds 
2971f8891e5eSChristoph Lameter 	count_vm_event(PGFAULT);
29721da177e4SLinus Torvalds 
2973ac9b9c66SHugh Dickins 	if (unlikely(is_vm_hugetlb_page(vma)))
2974ac9b9c66SHugh Dickins 		return hugetlb_fault(mm, vma, address, write_access);
29751da177e4SLinus Torvalds 
29761da177e4SLinus Torvalds 	pgd = pgd_offset(mm, address);
29771da177e4SLinus Torvalds 	pud = pud_alloc(mm, pgd, address);
29781da177e4SLinus Torvalds 	if (!pud)
2979c74df32cSHugh Dickins 		return VM_FAULT_OOM;
29801da177e4SLinus Torvalds 	pmd = pmd_alloc(mm, pud, address);
29811da177e4SLinus Torvalds 	if (!pmd)
2982c74df32cSHugh Dickins 		return VM_FAULT_OOM;
29831da177e4SLinus Torvalds 	pte = pte_alloc_map(mm, pmd, address);
29841da177e4SLinus Torvalds 	if (!pte)
2985c74df32cSHugh Dickins 		return VM_FAULT_OOM;
29861da177e4SLinus Torvalds 
298765500d23SHugh Dickins 	return handle_pte_fault(mm, vma, address, pte, pmd, write_access);
29881da177e4SLinus Torvalds }
29891da177e4SLinus Torvalds 
29901da177e4SLinus Torvalds #ifndef __PAGETABLE_PUD_FOLDED
29911da177e4SLinus Torvalds /*
29921da177e4SLinus Torvalds  * Allocate page upper directory.
2993872fec16SHugh Dickins  * We've already handled the fast-path in-line.
29941da177e4SLinus Torvalds  */
29951bb3630eSHugh Dickins int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
29961da177e4SLinus Torvalds {
2997c74df32cSHugh Dickins 	pud_t *new = pud_alloc_one(mm, address);
2998c74df32cSHugh Dickins 	if (!new)
29991bb3630eSHugh Dickins 		return -ENOMEM;
30001da177e4SLinus Torvalds 
3001362a61adSNick Piggin 	smp_wmb(); /* See comment in __pte_alloc */
3002362a61adSNick Piggin 
3003872fec16SHugh Dickins 	spin_lock(&mm->page_table_lock);
30041bb3630eSHugh Dickins 	if (pgd_present(*pgd))		/* Another has populated it */
30055e541973SBenjamin Herrenschmidt 		pud_free(mm, new);
30061bb3630eSHugh Dickins 	else
30071da177e4SLinus Torvalds 		pgd_populate(mm, pgd, new);
3008872fec16SHugh Dickins 	spin_unlock(&mm->page_table_lock);
30091bb3630eSHugh Dickins 	return 0;
30101da177e4SLinus Torvalds }
30111da177e4SLinus Torvalds #endif /* __PAGETABLE_PUD_FOLDED */
30121da177e4SLinus Torvalds 
30131da177e4SLinus Torvalds #ifndef __PAGETABLE_PMD_FOLDED
30141da177e4SLinus Torvalds /*
30151da177e4SLinus Torvalds  * Allocate page middle directory.
3016872fec16SHugh Dickins  * We've already handled the fast-path in-line.
30171da177e4SLinus Torvalds  */
30181bb3630eSHugh Dickins int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
30191da177e4SLinus Torvalds {
3020c74df32cSHugh Dickins 	pmd_t *new = pmd_alloc_one(mm, address);
3021c74df32cSHugh Dickins 	if (!new)
30221bb3630eSHugh Dickins 		return -ENOMEM;
30231da177e4SLinus Torvalds 
3024362a61adSNick Piggin 	smp_wmb(); /* See comment in __pte_alloc */
3025362a61adSNick Piggin 
3026872fec16SHugh Dickins 	spin_lock(&mm->page_table_lock);
30271da177e4SLinus Torvalds #ifndef __ARCH_HAS_4LEVEL_HACK
30281bb3630eSHugh Dickins 	if (pud_present(*pud))		/* Another has populated it */
30295e541973SBenjamin Herrenschmidt 		pmd_free(mm, new);
30301bb3630eSHugh Dickins 	else
30311da177e4SLinus Torvalds 		pud_populate(mm, pud, new);
30321da177e4SLinus Torvalds #else
30331bb3630eSHugh Dickins 	if (pgd_present(*pud))		/* Another has populated it */
30345e541973SBenjamin Herrenschmidt 		pmd_free(mm, new);
30351bb3630eSHugh Dickins 	else
30361da177e4SLinus Torvalds 		pgd_populate(mm, pud, new);
30371da177e4SLinus Torvalds #endif /* __ARCH_HAS_4LEVEL_HACK */
3038872fec16SHugh Dickins 	spin_unlock(&mm->page_table_lock);
30391bb3630eSHugh Dickins 	return 0;
30401da177e4SLinus Torvalds }
30411da177e4SLinus Torvalds #endif /* __PAGETABLE_PMD_FOLDED */
30421da177e4SLinus Torvalds 
30431da177e4SLinus Torvalds int make_pages_present(unsigned long addr, unsigned long end)
30441da177e4SLinus Torvalds {
30451da177e4SLinus Torvalds 	int ret, len, write;
30461da177e4SLinus Torvalds 	struct vm_area_struct * vma;
30471da177e4SLinus Torvalds 
30481da177e4SLinus Torvalds 	vma = find_vma(current->mm, addr);
30491da177e4SLinus Torvalds 	if (!vma)
3050a477097dSKOSAKI Motohiro 		return -ENOMEM;
30511da177e4SLinus Torvalds 	write = (vma->vm_flags & VM_WRITE) != 0;
30525bcb28b1SEric Sesterhenn 	BUG_ON(addr >= end);
30535bcb28b1SEric Sesterhenn 	BUG_ON(end > vma->vm_end);
305468e116a3SRolf Eike Beer 	len = DIV_ROUND_UP(end, PAGE_SIZE) - addr/PAGE_SIZE;
30551da177e4SLinus Torvalds 	ret = get_user_pages(current, current->mm, addr,
30561da177e4SLinus Torvalds 			len, write, 0, NULL, NULL);
3057c11d69d8SLee Schermerhorn 	if (ret < 0)
30581da177e4SLinus Torvalds 		return ret;
30599978ad58SLee Schermerhorn 	return ret == len ? 0 : -EFAULT;
30601da177e4SLinus Torvalds }
30611da177e4SLinus Torvalds 
30621da177e4SLinus Torvalds #if !defined(__HAVE_ARCH_GATE_AREA)
30631da177e4SLinus Torvalds 
30641da177e4SLinus Torvalds #if defined(AT_SYSINFO_EHDR)
30655ce7852cSAdrian Bunk static struct vm_area_struct gate_vma;
30661da177e4SLinus Torvalds 
30671da177e4SLinus Torvalds static int __init gate_vma_init(void)
30681da177e4SLinus Torvalds {
30691da177e4SLinus Torvalds 	gate_vma.vm_mm = NULL;
30701da177e4SLinus Torvalds 	gate_vma.vm_start = FIXADDR_USER_START;
30711da177e4SLinus Torvalds 	gate_vma.vm_end = FIXADDR_USER_END;
3072b6558c4aSRoland McGrath 	gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
3073b6558c4aSRoland McGrath 	gate_vma.vm_page_prot = __P101;
3074f47aef55SRoland McGrath 	/*
3075f47aef55SRoland McGrath 	 * Make sure the vDSO gets into every core dump.
3076f47aef55SRoland McGrath 	 * Dumping its contents makes post-mortem fully interpretable later
3077f47aef55SRoland McGrath 	 * without matching up the same kernel and hardware config to see
3078f47aef55SRoland McGrath 	 * what PC values meant.
3079f47aef55SRoland McGrath 	 */
3080f47aef55SRoland McGrath 	gate_vma.vm_flags |= VM_ALWAYSDUMP;
30811da177e4SLinus Torvalds 	return 0;
30821da177e4SLinus Torvalds }
30831da177e4SLinus Torvalds __initcall(gate_vma_init);
30841da177e4SLinus Torvalds #endif
30851da177e4SLinus Torvalds 
30861da177e4SLinus Torvalds struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
30871da177e4SLinus Torvalds {
30881da177e4SLinus Torvalds #ifdef AT_SYSINFO_EHDR
30891da177e4SLinus Torvalds 	return &gate_vma;
30901da177e4SLinus Torvalds #else
30911da177e4SLinus Torvalds 	return NULL;
30921da177e4SLinus Torvalds #endif
30931da177e4SLinus Torvalds }
30941da177e4SLinus Torvalds 
30951da177e4SLinus Torvalds int in_gate_area_no_task(unsigned long addr)
30961da177e4SLinus Torvalds {
30971da177e4SLinus Torvalds #ifdef AT_SYSINFO_EHDR
30981da177e4SLinus Torvalds 	if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
30991da177e4SLinus Torvalds 		return 1;
31001da177e4SLinus Torvalds #endif
31011da177e4SLinus Torvalds 	return 0;
31021da177e4SLinus Torvalds }
31031da177e4SLinus Torvalds 
31041da177e4SLinus Torvalds #endif	/* __HAVE_ARCH_GATE_AREA */
31050ec76a11SDavid Howells 
3106f8ad0f49SJohannes Weiner static int follow_pte(struct mm_struct *mm, unsigned long address,
3107f8ad0f49SJohannes Weiner 		pte_t **ptepp, spinlock_t **ptlp)
3108f8ad0f49SJohannes Weiner {
3109f8ad0f49SJohannes Weiner 	pgd_t *pgd;
3110f8ad0f49SJohannes Weiner 	pud_t *pud;
3111f8ad0f49SJohannes Weiner 	pmd_t *pmd;
3112f8ad0f49SJohannes Weiner 	pte_t *ptep;
3113f8ad0f49SJohannes Weiner 
3114f8ad0f49SJohannes Weiner 	pgd = pgd_offset(mm, address);
3115f8ad0f49SJohannes Weiner 	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
3116f8ad0f49SJohannes Weiner 		goto out;
3117f8ad0f49SJohannes Weiner 
3118f8ad0f49SJohannes Weiner 	pud = pud_offset(pgd, address);
3119f8ad0f49SJohannes Weiner 	if (pud_none(*pud) || unlikely(pud_bad(*pud)))
3120f8ad0f49SJohannes Weiner 		goto out;
3121f8ad0f49SJohannes Weiner 
3122f8ad0f49SJohannes Weiner 	pmd = pmd_offset(pud, address);
3123f8ad0f49SJohannes Weiner 	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
3124f8ad0f49SJohannes Weiner 		goto out;
3125f8ad0f49SJohannes Weiner 
3126f8ad0f49SJohannes Weiner 	/* We cannot handle huge page PFN maps. Luckily they don't exist. */
3127f8ad0f49SJohannes Weiner 	if (pmd_huge(*pmd))
3128f8ad0f49SJohannes Weiner 		goto out;
3129f8ad0f49SJohannes Weiner 
3130f8ad0f49SJohannes Weiner 	ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
3131f8ad0f49SJohannes Weiner 	if (!ptep)
3132f8ad0f49SJohannes Weiner 		goto out;
3133f8ad0f49SJohannes Weiner 	if (!pte_present(*ptep))
3134f8ad0f49SJohannes Weiner 		goto unlock;
3135f8ad0f49SJohannes Weiner 	*ptepp = ptep;
3136f8ad0f49SJohannes Weiner 	return 0;
3137f8ad0f49SJohannes Weiner unlock:
3138f8ad0f49SJohannes Weiner 	pte_unmap_unlock(ptep, *ptlp);
3139f8ad0f49SJohannes Weiner out:
3140f8ad0f49SJohannes Weiner 	return -EINVAL;
3141f8ad0f49SJohannes Weiner }
3142f8ad0f49SJohannes Weiner 
314328b2ee20SRik van Riel #ifdef CONFIG_HAVE_IOREMAP_PROT
3144d87fe660Svenkatesh.pallipadi@intel.com int follow_phys(struct vm_area_struct *vma,
314528b2ee20SRik van Riel 		unsigned long address, unsigned int flags,
3146d87fe660Svenkatesh.pallipadi@intel.com 		unsigned long *prot, resource_size_t *phys)
314728b2ee20SRik van Riel {
3148*03668a4dSJohannes Weiner 	int ret = -EINVAL;
314928b2ee20SRik van Riel 	pte_t *ptep, pte;
315028b2ee20SRik van Riel 	spinlock_t *ptl;
315128b2ee20SRik van Riel 
3152d87fe660Svenkatesh.pallipadi@intel.com 	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
3153d87fe660Svenkatesh.pallipadi@intel.com 		goto out;
315428b2ee20SRik van Riel 
3155*03668a4dSJohannes Weiner 	if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
3156d87fe660Svenkatesh.pallipadi@intel.com 		goto out;
315728b2ee20SRik van Riel 	pte = *ptep;
3158*03668a4dSJohannes Weiner 
315928b2ee20SRik van Riel 	if ((flags & FOLL_WRITE) && !pte_write(pte))
316028b2ee20SRik van Riel 		goto unlock;
316128b2ee20SRik van Riel 
316228b2ee20SRik van Riel 	*prot = pgprot_val(pte_pgprot(pte));
3163*03668a4dSJohannes Weiner 	*phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
316428b2ee20SRik van Riel 
3165*03668a4dSJohannes Weiner 	ret = 0;
316628b2ee20SRik van Riel unlock:
316728b2ee20SRik van Riel 	pte_unmap_unlock(ptep, ptl);
316828b2ee20SRik van Riel out:
3169d87fe660Svenkatesh.pallipadi@intel.com 	return ret;
317028b2ee20SRik van Riel }
317128b2ee20SRik van Riel 
317228b2ee20SRik van Riel int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
317328b2ee20SRik van Riel 			void *buf, int len, int write)
317428b2ee20SRik van Riel {
317528b2ee20SRik van Riel 	resource_size_t phys_addr;
317628b2ee20SRik van Riel 	unsigned long prot = 0;
31772bc7273bSKOSAKI Motohiro 	void __iomem *maddr;
317828b2ee20SRik van Riel 	int offset = addr & (PAGE_SIZE-1);
317928b2ee20SRik van Riel 
3180d87fe660Svenkatesh.pallipadi@intel.com 	if (follow_phys(vma, addr, write, &prot, &phys_addr))
318128b2ee20SRik van Riel 		return -EINVAL;
318228b2ee20SRik van Riel 
318328b2ee20SRik van Riel 	maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot);
318428b2ee20SRik van Riel 	if (write)
318528b2ee20SRik van Riel 		memcpy_toio(maddr + offset, buf, len);
318628b2ee20SRik van Riel 	else
318728b2ee20SRik van Riel 		memcpy_fromio(buf, maddr + offset, len);
318828b2ee20SRik van Riel 	iounmap(maddr);
318928b2ee20SRik van Riel 
319028b2ee20SRik van Riel 	return len;
319128b2ee20SRik van Riel }
319228b2ee20SRik van Riel #endif
319328b2ee20SRik van Riel 
31940ec76a11SDavid Howells /*
31950ec76a11SDavid Howells  * Access another process' address space.
31960ec76a11SDavid Howells  * Source/target buffer must be kernel space,
31970ec76a11SDavid Howells  * Do not walk the page table directly, use get_user_pages
31980ec76a11SDavid Howells  */
31990ec76a11SDavid Howells int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
32000ec76a11SDavid Howells {
32010ec76a11SDavid Howells 	struct mm_struct *mm;
32020ec76a11SDavid Howells 	struct vm_area_struct *vma;
32030ec76a11SDavid Howells 	void *old_buf = buf;
32040ec76a11SDavid Howells 
32050ec76a11SDavid Howells 	mm = get_task_mm(tsk);
32060ec76a11SDavid Howells 	if (!mm)
32070ec76a11SDavid Howells 		return 0;
32080ec76a11SDavid Howells 
32090ec76a11SDavid Howells 	down_read(&mm->mmap_sem);
3210183ff22bSSimon Arlott 	/* ignore errors, just check how much was successfully transferred */
32110ec76a11SDavid Howells 	while (len) {
32120ec76a11SDavid Howells 		int bytes, ret, offset;
32130ec76a11SDavid Howells 		void *maddr;
321428b2ee20SRik van Riel 		struct page *page = NULL;
32150ec76a11SDavid Howells 
32160ec76a11SDavid Howells 		ret = get_user_pages(tsk, mm, addr, 1,
32170ec76a11SDavid Howells 				write, 1, &page, &vma);
321828b2ee20SRik van Riel 		if (ret <= 0) {
321928b2ee20SRik van Riel 			/*
322028b2ee20SRik van Riel 			 * Check if this is a VM_IO | VM_PFNMAP VMA, which
322128b2ee20SRik van Riel 			 * we can access using slightly different code.
322228b2ee20SRik van Riel 			 */
322328b2ee20SRik van Riel #ifdef CONFIG_HAVE_IOREMAP_PROT
322428b2ee20SRik van Riel 			vma = find_vma(mm, addr);
322528b2ee20SRik van Riel 			if (!vma)
32260ec76a11SDavid Howells 				break;
322728b2ee20SRik van Riel 			if (vma->vm_ops && vma->vm_ops->access)
322828b2ee20SRik van Riel 				ret = vma->vm_ops->access(vma, addr, buf,
322928b2ee20SRik van Riel 							  len, write);
323028b2ee20SRik van Riel 			if (ret <= 0)
323128b2ee20SRik van Riel #endif
323228b2ee20SRik van Riel 				break;
323328b2ee20SRik van Riel 			bytes = ret;
323428b2ee20SRik van Riel 		} else {
32350ec76a11SDavid Howells 			bytes = len;
32360ec76a11SDavid Howells 			offset = addr & (PAGE_SIZE-1);
32370ec76a11SDavid Howells 			if (bytes > PAGE_SIZE-offset)
32380ec76a11SDavid Howells 				bytes = PAGE_SIZE-offset;
32390ec76a11SDavid Howells 
32400ec76a11SDavid Howells 			maddr = kmap(page);
32410ec76a11SDavid Howells 			if (write) {
32420ec76a11SDavid Howells 				copy_to_user_page(vma, page, addr,
32430ec76a11SDavid Howells 						  maddr + offset, buf, bytes);
32440ec76a11SDavid Howells 				set_page_dirty_lock(page);
32450ec76a11SDavid Howells 			} else {
32460ec76a11SDavid Howells 				copy_from_user_page(vma, page, addr,
32470ec76a11SDavid Howells 						    buf, maddr + offset, bytes);
32480ec76a11SDavid Howells 			}
32490ec76a11SDavid Howells 			kunmap(page);
32500ec76a11SDavid Howells 			page_cache_release(page);
325128b2ee20SRik van Riel 		}
32520ec76a11SDavid Howells 		len -= bytes;
32530ec76a11SDavid Howells 		buf += bytes;
32540ec76a11SDavid Howells 		addr += bytes;
32550ec76a11SDavid Howells 	}
32560ec76a11SDavid Howells 	up_read(&mm->mmap_sem);
32570ec76a11SDavid Howells 	mmput(mm);
32580ec76a11SDavid Howells 
32590ec76a11SDavid Howells 	return buf - old_buf;
32600ec76a11SDavid Howells }
326103252919SAndi Kleen 
326203252919SAndi Kleen /*
326303252919SAndi Kleen  * Print the name of a VMA.
326403252919SAndi Kleen  */
326503252919SAndi Kleen void print_vma_addr(char *prefix, unsigned long ip)
326603252919SAndi Kleen {
326703252919SAndi Kleen 	struct mm_struct *mm = current->mm;
326803252919SAndi Kleen 	struct vm_area_struct *vma;
326903252919SAndi Kleen 
3270e8bff74aSIngo Molnar 	/*
3271e8bff74aSIngo Molnar 	 * Do not print if we are in atomic
3272e8bff74aSIngo Molnar 	 * contexts (in exception stacks, etc.):
3273e8bff74aSIngo Molnar 	 */
3274e8bff74aSIngo Molnar 	if (preempt_count())
3275e8bff74aSIngo Molnar 		return;
3276e8bff74aSIngo Molnar 
327703252919SAndi Kleen 	down_read(&mm->mmap_sem);
327803252919SAndi Kleen 	vma = find_vma(mm, ip);
327903252919SAndi Kleen 	if (vma && vma->vm_file) {
328003252919SAndi Kleen 		struct file *f = vma->vm_file;
328103252919SAndi Kleen 		char *buf = (char *)__get_free_page(GFP_KERNEL);
328203252919SAndi Kleen 		if (buf) {
328303252919SAndi Kleen 			char *p, *s;
328403252919SAndi Kleen 
3285cf28b486SJan Blunck 			p = d_path(&f->f_path, buf, PAGE_SIZE);
328603252919SAndi Kleen 			if (IS_ERR(p))
328703252919SAndi Kleen 				p = "?";
328803252919SAndi Kleen 			s = strrchr(p, '/');
328903252919SAndi Kleen 			if (s)
329003252919SAndi Kleen 				p = s+1;
329103252919SAndi Kleen 			printk("%s%s[%lx+%lx]", prefix, p,
329203252919SAndi Kleen 					vma->vm_start,
329303252919SAndi Kleen 					vma->vm_end - vma->vm_start);
329403252919SAndi Kleen 			free_page((unsigned long)buf);
329503252919SAndi Kleen 		}
329603252919SAndi Kleen 	}
329703252919SAndi Kleen 	up_read(&current->mm->mmap_sem);
329803252919SAndi Kleen }
32993ee1afa3SNick Piggin 
33003ee1afa3SNick Piggin #ifdef CONFIG_PROVE_LOCKING
33013ee1afa3SNick Piggin void might_fault(void)
33023ee1afa3SNick Piggin {
330395156f00SPeter Zijlstra 	/*
330495156f00SPeter Zijlstra 	 * Some code (nfs/sunrpc) uses socket ops on kernel memory while
330595156f00SPeter Zijlstra 	 * holding the mmap_sem, this is safe because kernel memory doesn't
330695156f00SPeter Zijlstra 	 * get paged out, therefore we'll never actually fault, and the
330795156f00SPeter Zijlstra 	 * below annotations will generate false positives.
330895156f00SPeter Zijlstra 	 */
330995156f00SPeter Zijlstra 	if (segment_eq(get_fs(), KERNEL_DS))
331095156f00SPeter Zijlstra 		return;
331195156f00SPeter Zijlstra 
33123ee1afa3SNick Piggin 	might_sleep();
33133ee1afa3SNick Piggin 	/*
33143ee1afa3SNick Piggin 	 * it would be nicer only to annotate paths which are not under
33153ee1afa3SNick Piggin 	 * pagefault_disable, however that requires a larger audit and
33163ee1afa3SNick Piggin 	 * providing helpers like get_user_atomic.
33173ee1afa3SNick Piggin 	 */
33183ee1afa3SNick Piggin 	if (!in_atomic() && current->mm)
33193ee1afa3SNick Piggin 		might_lock_read(&current->mm->mmap_sem);
33203ee1afa3SNick Piggin }
33213ee1afa3SNick Piggin EXPORT_SYMBOL(might_fault);
33223ee1afa3SNick Piggin #endif
3323