xref: /linux/mm/memory.c (revision 1a44e149084d772a1bcf4cdbdde8a013a8a1cfde)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  *  linux/mm/memory.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
51da177e4SLinus Torvalds  */
61da177e4SLinus Torvalds 
71da177e4SLinus Torvalds /*
81da177e4SLinus Torvalds  * demand-loading started 01.12.91 - seems it is high on the list of
91da177e4SLinus Torvalds  * things wanted, and it should be easy to implement. - Linus
101da177e4SLinus Torvalds  */
111da177e4SLinus Torvalds 
121da177e4SLinus Torvalds /*
131da177e4SLinus Torvalds  * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
141da177e4SLinus Torvalds  * pages started 02.12.91, seems to work. - Linus.
151da177e4SLinus Torvalds  *
161da177e4SLinus Torvalds  * Tested sharing by executing about 30 /bin/sh: under the old kernel it
171da177e4SLinus Torvalds  * would have taken more than the 6M I have free, but it worked well as
181da177e4SLinus Torvalds  * far as I could see.
191da177e4SLinus Torvalds  *
201da177e4SLinus Torvalds  * Also corrected some "invalidate()"s - I wasn't doing enough of them.
211da177e4SLinus Torvalds  */
221da177e4SLinus Torvalds 
231da177e4SLinus Torvalds /*
241da177e4SLinus Torvalds  * Real VM (paging to/from disk) started 18.12.91. Much more work and
251da177e4SLinus Torvalds  * thought has to go into this. Oh, well..
261da177e4SLinus Torvalds  * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
271da177e4SLinus Torvalds  *		Found it. Everything seems to work now.
281da177e4SLinus Torvalds  * 20.12.91  -  Ok, making the swap-device changeable like the root.
291da177e4SLinus Torvalds  */
301da177e4SLinus Torvalds 
311da177e4SLinus Torvalds /*
321da177e4SLinus Torvalds  * 05.04.94  -  Multi-page memory management added for v1.1.
331da177e4SLinus Torvalds  * 		Idea by Alex Bligh (alex@cconcepts.co.uk)
341da177e4SLinus Torvalds  *
351da177e4SLinus Torvalds  * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
361da177e4SLinus Torvalds  *		(Gerhard.Wichert@pdb.siemens.de)
371da177e4SLinus Torvalds  *
381da177e4SLinus Torvalds  * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
391da177e4SLinus Torvalds  */
401da177e4SLinus Torvalds 
411da177e4SLinus Torvalds #include <linux/kernel_stat.h>
421da177e4SLinus Torvalds #include <linux/mm.h>
431da177e4SLinus Torvalds #include <linux/hugetlb.h>
441da177e4SLinus Torvalds #include <linux/mman.h>
451da177e4SLinus Torvalds #include <linux/swap.h>
461da177e4SLinus Torvalds #include <linux/highmem.h>
471da177e4SLinus Torvalds #include <linux/pagemap.h>
481da177e4SLinus Torvalds #include <linux/rmap.h>
491da177e4SLinus Torvalds #include <linux/module.h>
501da177e4SLinus Torvalds #include <linux/init.h>
511da177e4SLinus Torvalds 
521da177e4SLinus Torvalds #include <asm/pgalloc.h>
531da177e4SLinus Torvalds #include <asm/uaccess.h>
541da177e4SLinus Torvalds #include <asm/tlb.h>
551da177e4SLinus Torvalds #include <asm/tlbflush.h>
561da177e4SLinus Torvalds #include <asm/pgtable.h>
571da177e4SLinus Torvalds 
581da177e4SLinus Torvalds #include <linux/swapops.h>
591da177e4SLinus Torvalds #include <linux/elf.h>
601da177e4SLinus Torvalds 
61d41dee36SAndy Whitcroft #ifndef CONFIG_NEED_MULTIPLE_NODES
621da177e4SLinus Torvalds /* use the per-pgdat data instead for discontigmem - mbligh */
631da177e4SLinus Torvalds unsigned long max_mapnr;
641da177e4SLinus Torvalds struct page *mem_map;
651da177e4SLinus Torvalds 
661da177e4SLinus Torvalds EXPORT_SYMBOL(max_mapnr);
671da177e4SLinus Torvalds EXPORT_SYMBOL(mem_map);
681da177e4SLinus Torvalds #endif
691da177e4SLinus Torvalds 
701da177e4SLinus Torvalds unsigned long num_physpages;
711da177e4SLinus Torvalds /*
721da177e4SLinus Torvalds  * A number of key systems in x86 including ioremap() rely on the assumption
731da177e4SLinus Torvalds  * that high_memory defines the upper bound on direct map memory, then end
741da177e4SLinus Torvalds  * of ZONE_NORMAL.  Under CONFIG_DISCONTIG this means that max_low_pfn and
751da177e4SLinus Torvalds  * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
761da177e4SLinus Torvalds  * and ZONE_HIGHMEM.
771da177e4SLinus Torvalds  */
781da177e4SLinus Torvalds void * high_memory;
791da177e4SLinus Torvalds unsigned long vmalloc_earlyreserve;
801da177e4SLinus Torvalds 
811da177e4SLinus Torvalds EXPORT_SYMBOL(num_physpages);
821da177e4SLinus Torvalds EXPORT_SYMBOL(high_memory);
831da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc_earlyreserve);
841da177e4SLinus Torvalds 
851da177e4SLinus Torvalds /*
861da177e4SLinus Torvalds  * If a p?d_bad entry is found while walking page tables, report
871da177e4SLinus Torvalds  * the error, before resetting entry to p?d_none.  Usually (but
881da177e4SLinus Torvalds  * very seldom) called out from the p?d_none_or_clear_bad macros.
891da177e4SLinus Torvalds  */
901da177e4SLinus Torvalds 
911da177e4SLinus Torvalds void pgd_clear_bad(pgd_t *pgd)
921da177e4SLinus Torvalds {
931da177e4SLinus Torvalds 	pgd_ERROR(*pgd);
941da177e4SLinus Torvalds 	pgd_clear(pgd);
951da177e4SLinus Torvalds }
961da177e4SLinus Torvalds 
971da177e4SLinus Torvalds void pud_clear_bad(pud_t *pud)
981da177e4SLinus Torvalds {
991da177e4SLinus Torvalds 	pud_ERROR(*pud);
1001da177e4SLinus Torvalds 	pud_clear(pud);
1011da177e4SLinus Torvalds }
1021da177e4SLinus Torvalds 
1031da177e4SLinus Torvalds void pmd_clear_bad(pmd_t *pmd)
1041da177e4SLinus Torvalds {
1051da177e4SLinus Torvalds 	pmd_ERROR(*pmd);
1061da177e4SLinus Torvalds 	pmd_clear(pmd);
1071da177e4SLinus Torvalds }
1081da177e4SLinus Torvalds 
1091da177e4SLinus Torvalds /*
1101da177e4SLinus Torvalds  * Note: this doesn't free the actual pages themselves. That
1111da177e4SLinus Torvalds  * has been handled earlier when unmapping all the memory regions.
1121da177e4SLinus Torvalds  */
113e0da382cSHugh Dickins static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd)
1141da177e4SLinus Torvalds {
1151da177e4SLinus Torvalds 	struct page *page = pmd_page(*pmd);
1161da177e4SLinus Torvalds 	pmd_clear(pmd);
1174c21e2f2SHugh Dickins 	pte_lock_deinit(page);
118e0da382cSHugh Dickins 	pte_free_tlb(tlb, page);
1191da177e4SLinus Torvalds 	dec_page_state(nr_page_table_pages);
1201da177e4SLinus Torvalds 	tlb->mm->nr_ptes--;
1211da177e4SLinus Torvalds }
1221da177e4SLinus Torvalds 
123e0da382cSHugh Dickins static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
124e0da382cSHugh Dickins 				unsigned long addr, unsigned long end,
125e0da382cSHugh Dickins 				unsigned long floor, unsigned long ceiling)
1261da177e4SLinus Torvalds {
1271da177e4SLinus Torvalds 	pmd_t *pmd;
1281da177e4SLinus Torvalds 	unsigned long next;
129e0da382cSHugh Dickins 	unsigned long start;
1301da177e4SLinus Torvalds 
131e0da382cSHugh Dickins 	start = addr;
1321da177e4SLinus Torvalds 	pmd = pmd_offset(pud, addr);
1331da177e4SLinus Torvalds 	do {
1341da177e4SLinus Torvalds 		next = pmd_addr_end(addr, end);
1351da177e4SLinus Torvalds 		if (pmd_none_or_clear_bad(pmd))
1361da177e4SLinus Torvalds 			continue;
137e0da382cSHugh Dickins 		free_pte_range(tlb, pmd);
1381da177e4SLinus Torvalds 	} while (pmd++, addr = next, addr != end);
1391da177e4SLinus Torvalds 
140e0da382cSHugh Dickins 	start &= PUD_MASK;
141e0da382cSHugh Dickins 	if (start < floor)
142e0da382cSHugh Dickins 		return;
143e0da382cSHugh Dickins 	if (ceiling) {
144e0da382cSHugh Dickins 		ceiling &= PUD_MASK;
145e0da382cSHugh Dickins 		if (!ceiling)
146e0da382cSHugh Dickins 			return;
1471da177e4SLinus Torvalds 	}
148e0da382cSHugh Dickins 	if (end - 1 > ceiling - 1)
149e0da382cSHugh Dickins 		return;
150e0da382cSHugh Dickins 
151e0da382cSHugh Dickins 	pmd = pmd_offset(pud, start);
152e0da382cSHugh Dickins 	pud_clear(pud);
153e0da382cSHugh Dickins 	pmd_free_tlb(tlb, pmd);
1541da177e4SLinus Torvalds }
1551da177e4SLinus Torvalds 
156e0da382cSHugh Dickins static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
157e0da382cSHugh Dickins 				unsigned long addr, unsigned long end,
158e0da382cSHugh Dickins 				unsigned long floor, unsigned long ceiling)
1591da177e4SLinus Torvalds {
1601da177e4SLinus Torvalds 	pud_t *pud;
1611da177e4SLinus Torvalds 	unsigned long next;
162e0da382cSHugh Dickins 	unsigned long start;
1631da177e4SLinus Torvalds 
164e0da382cSHugh Dickins 	start = addr;
1651da177e4SLinus Torvalds 	pud = pud_offset(pgd, addr);
1661da177e4SLinus Torvalds 	do {
1671da177e4SLinus Torvalds 		next = pud_addr_end(addr, end);
1681da177e4SLinus Torvalds 		if (pud_none_or_clear_bad(pud))
1691da177e4SLinus Torvalds 			continue;
170e0da382cSHugh Dickins 		free_pmd_range(tlb, pud, addr, next, floor, ceiling);
1711da177e4SLinus Torvalds 	} while (pud++, addr = next, addr != end);
1721da177e4SLinus Torvalds 
173e0da382cSHugh Dickins 	start &= PGDIR_MASK;
174e0da382cSHugh Dickins 	if (start < floor)
175e0da382cSHugh Dickins 		return;
176e0da382cSHugh Dickins 	if (ceiling) {
177e0da382cSHugh Dickins 		ceiling &= PGDIR_MASK;
178e0da382cSHugh Dickins 		if (!ceiling)
179e0da382cSHugh Dickins 			return;
1801da177e4SLinus Torvalds 	}
181e0da382cSHugh Dickins 	if (end - 1 > ceiling - 1)
182e0da382cSHugh Dickins 		return;
183e0da382cSHugh Dickins 
184e0da382cSHugh Dickins 	pud = pud_offset(pgd, start);
185e0da382cSHugh Dickins 	pgd_clear(pgd);
186e0da382cSHugh Dickins 	pud_free_tlb(tlb, pud);
1871da177e4SLinus Torvalds }
1881da177e4SLinus Torvalds 
1891da177e4SLinus Torvalds /*
190e0da382cSHugh Dickins  * This function frees user-level page tables of a process.
191e0da382cSHugh Dickins  *
1921da177e4SLinus Torvalds  * Must be called with pagetable lock held.
1931da177e4SLinus Torvalds  */
1943bf5ee95SHugh Dickins void free_pgd_range(struct mmu_gather **tlb,
195e0da382cSHugh Dickins 			unsigned long addr, unsigned long end,
196e0da382cSHugh Dickins 			unsigned long floor, unsigned long ceiling)
1971da177e4SLinus Torvalds {
1981da177e4SLinus Torvalds 	pgd_t *pgd;
1991da177e4SLinus Torvalds 	unsigned long next;
200e0da382cSHugh Dickins 	unsigned long start;
2011da177e4SLinus Torvalds 
202e0da382cSHugh Dickins 	/*
203e0da382cSHugh Dickins 	 * The next few lines have given us lots of grief...
204e0da382cSHugh Dickins 	 *
205e0da382cSHugh Dickins 	 * Why are we testing PMD* at this top level?  Because often
206e0da382cSHugh Dickins 	 * there will be no work to do at all, and we'd prefer not to
207e0da382cSHugh Dickins 	 * go all the way down to the bottom just to discover that.
208e0da382cSHugh Dickins 	 *
209e0da382cSHugh Dickins 	 * Why all these "- 1"s?  Because 0 represents both the bottom
210e0da382cSHugh Dickins 	 * of the address space and the top of it (using -1 for the
211e0da382cSHugh Dickins 	 * top wouldn't help much: the masks would do the wrong thing).
212e0da382cSHugh Dickins 	 * The rule is that addr 0 and floor 0 refer to the bottom of
213e0da382cSHugh Dickins 	 * the address space, but end 0 and ceiling 0 refer to the top
214e0da382cSHugh Dickins 	 * Comparisons need to use "end - 1" and "ceiling - 1" (though
215e0da382cSHugh Dickins 	 * that end 0 case should be mythical).
216e0da382cSHugh Dickins 	 *
217e0da382cSHugh Dickins 	 * Wherever addr is brought up or ceiling brought down, we must
218e0da382cSHugh Dickins 	 * be careful to reject "the opposite 0" before it confuses the
219e0da382cSHugh Dickins 	 * subsequent tests.  But what about where end is brought down
220e0da382cSHugh Dickins 	 * by PMD_SIZE below? no, end can't go down to 0 there.
221e0da382cSHugh Dickins 	 *
222e0da382cSHugh Dickins 	 * Whereas we round start (addr) and ceiling down, by different
223e0da382cSHugh Dickins 	 * masks at different levels, in order to test whether a table
224e0da382cSHugh Dickins 	 * now has no other vmas using it, so can be freed, we don't
225e0da382cSHugh Dickins 	 * bother to round floor or end up - the tests don't need that.
226e0da382cSHugh Dickins 	 */
227e0da382cSHugh Dickins 
228e0da382cSHugh Dickins 	addr &= PMD_MASK;
229e0da382cSHugh Dickins 	if (addr < floor) {
230e0da382cSHugh Dickins 		addr += PMD_SIZE;
231e0da382cSHugh Dickins 		if (!addr)
232e0da382cSHugh Dickins 			return;
233e0da382cSHugh Dickins 	}
234e0da382cSHugh Dickins 	if (ceiling) {
235e0da382cSHugh Dickins 		ceiling &= PMD_MASK;
236e0da382cSHugh Dickins 		if (!ceiling)
237e0da382cSHugh Dickins 			return;
238e0da382cSHugh Dickins 	}
239e0da382cSHugh Dickins 	if (end - 1 > ceiling - 1)
240e0da382cSHugh Dickins 		end -= PMD_SIZE;
241e0da382cSHugh Dickins 	if (addr > end - 1)
242e0da382cSHugh Dickins 		return;
243e0da382cSHugh Dickins 
244e0da382cSHugh Dickins 	start = addr;
2453bf5ee95SHugh Dickins 	pgd = pgd_offset((*tlb)->mm, addr);
2461da177e4SLinus Torvalds 	do {
2471da177e4SLinus Torvalds 		next = pgd_addr_end(addr, end);
2481da177e4SLinus Torvalds 		if (pgd_none_or_clear_bad(pgd))
2491da177e4SLinus Torvalds 			continue;
2503bf5ee95SHugh Dickins 		free_pud_range(*tlb, pgd, addr, next, floor, ceiling);
2511da177e4SLinus Torvalds 	} while (pgd++, addr = next, addr != end);
252e0da382cSHugh Dickins 
2534d6ddfa9SHugh Dickins 	if (!(*tlb)->fullmm)
2543bf5ee95SHugh Dickins 		flush_tlb_pgtables((*tlb)->mm, start, end);
255e0da382cSHugh Dickins }
256e0da382cSHugh Dickins 
257e0da382cSHugh Dickins void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
258e0da382cSHugh Dickins 		unsigned long floor, unsigned long ceiling)
259e0da382cSHugh Dickins {
260e0da382cSHugh Dickins 	while (vma) {
261e0da382cSHugh Dickins 		struct vm_area_struct *next = vma->vm_next;
262e0da382cSHugh Dickins 		unsigned long addr = vma->vm_start;
263e0da382cSHugh Dickins 
2648f4f8c16SHugh Dickins 		/*
2658f4f8c16SHugh Dickins 		 * Hide vma from rmap and vmtruncate before freeing pgtables
2668f4f8c16SHugh Dickins 		 */
2678f4f8c16SHugh Dickins 		anon_vma_unlink(vma);
2688f4f8c16SHugh Dickins 		unlink_file_vma(vma);
2698f4f8c16SHugh Dickins 
2703bf5ee95SHugh Dickins 		if (is_hugepage_only_range(vma->vm_mm, addr, HPAGE_SIZE)) {
2713bf5ee95SHugh Dickins 			hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
2723bf5ee95SHugh Dickins 				floor, next? next->vm_start: ceiling);
2733bf5ee95SHugh Dickins 		} else {
2743bf5ee95SHugh Dickins 			/*
2753bf5ee95SHugh Dickins 			 * Optimization: gather nearby vmas into one call down
2763bf5ee95SHugh Dickins 			 */
2773bf5ee95SHugh Dickins 			while (next && next->vm_start <= vma->vm_end + PMD_SIZE
2783bf5ee95SHugh Dickins 			  && !is_hugepage_only_range(vma->vm_mm, next->vm_start,
2793bf5ee95SHugh Dickins 							HPAGE_SIZE)) {
280e0da382cSHugh Dickins 				vma = next;
281e0da382cSHugh Dickins 				next = vma->vm_next;
2828f4f8c16SHugh Dickins 				anon_vma_unlink(vma);
2838f4f8c16SHugh Dickins 				unlink_file_vma(vma);
284e0da382cSHugh Dickins 			}
2853bf5ee95SHugh Dickins 			free_pgd_range(tlb, addr, vma->vm_end,
286e0da382cSHugh Dickins 				floor, next? next->vm_start: ceiling);
2873bf5ee95SHugh Dickins 		}
288e0da382cSHugh Dickins 		vma = next;
289e0da382cSHugh Dickins 	}
2901da177e4SLinus Torvalds }
2911da177e4SLinus Torvalds 
2921bb3630eSHugh Dickins int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
2931da177e4SLinus Torvalds {
294c74df32cSHugh Dickins 	struct page *new = pte_alloc_one(mm, address);
2951da177e4SLinus Torvalds 	if (!new)
2961bb3630eSHugh Dickins 		return -ENOMEM;
2971bb3630eSHugh Dickins 
2984c21e2f2SHugh Dickins 	pte_lock_init(new);
299c74df32cSHugh Dickins 	spin_lock(&mm->page_table_lock);
3004c21e2f2SHugh Dickins 	if (pmd_present(*pmd)) {	/* Another has populated it */
3014c21e2f2SHugh Dickins 		pte_lock_deinit(new);
3021da177e4SLinus Torvalds 		pte_free(new);
3034c21e2f2SHugh Dickins 	} else {
3041da177e4SLinus Torvalds 		mm->nr_ptes++;
3051da177e4SLinus Torvalds 		inc_page_state(nr_page_table_pages);
3061da177e4SLinus Torvalds 		pmd_populate(mm, pmd, new);
3071da177e4SLinus Torvalds 	}
308c74df32cSHugh Dickins 	spin_unlock(&mm->page_table_lock);
3091bb3630eSHugh Dickins 	return 0;
3101da177e4SLinus Torvalds }
3111da177e4SLinus Torvalds 
3121bb3630eSHugh Dickins int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
3131da177e4SLinus Torvalds {
3141bb3630eSHugh Dickins 	pte_t *new = pte_alloc_one_kernel(&init_mm, address);
3151da177e4SLinus Torvalds 	if (!new)
3161bb3630eSHugh Dickins 		return -ENOMEM;
3171da177e4SLinus Torvalds 
318872fec16SHugh Dickins 	spin_lock(&init_mm.page_table_lock);
3191bb3630eSHugh Dickins 	if (pmd_present(*pmd))		/* Another has populated it */
3201da177e4SLinus Torvalds 		pte_free_kernel(new);
321872fec16SHugh Dickins 	else
322872fec16SHugh Dickins 		pmd_populate_kernel(&init_mm, pmd, new);
323872fec16SHugh Dickins 	spin_unlock(&init_mm.page_table_lock);
3241bb3630eSHugh Dickins 	return 0;
3251da177e4SLinus Torvalds }
3261da177e4SLinus Torvalds 
327ae859762SHugh Dickins static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss)
328ae859762SHugh Dickins {
329ae859762SHugh Dickins 	if (file_rss)
330ae859762SHugh Dickins 		add_mm_counter(mm, file_rss, file_rss);
331ae859762SHugh Dickins 	if (anon_rss)
332ae859762SHugh Dickins 		add_mm_counter(mm, anon_rss, anon_rss);
333ae859762SHugh Dickins }
334ae859762SHugh Dickins 
3351da177e4SLinus Torvalds /*
336b5810039SNick Piggin  * This function is called to print an error when a pte in a
337b5810039SNick Piggin  * !VM_RESERVED region is found pointing to an invalid pfn (which
338b5810039SNick Piggin  * is an error.
339b5810039SNick Piggin  *
340b5810039SNick Piggin  * The calling function must still handle the error.
341b5810039SNick Piggin  */
342b5810039SNick Piggin void print_bad_pte(struct vm_area_struct *vma, pte_t pte, unsigned long vaddr)
343b5810039SNick Piggin {
344b5810039SNick Piggin 	printk(KERN_ERR "Bad pte = %08llx, process = %s, "
345b5810039SNick Piggin 			"vm_flags = %lx, vaddr = %lx\n",
346b5810039SNick Piggin 		(long long)pte_val(pte),
347b5810039SNick Piggin 		(vma->vm_mm == current->mm ? current->comm : "???"),
348b5810039SNick Piggin 		vma->vm_flags, vaddr);
349b5810039SNick Piggin 	dump_stack();
350b5810039SNick Piggin }
351b5810039SNick Piggin 
352b5810039SNick Piggin /*
3531da177e4SLinus Torvalds  * copy one vm_area from one task to the other. Assumes the page tables
3541da177e4SLinus Torvalds  * already present in the new task to be cleared in the whole range
3551da177e4SLinus Torvalds  * covered by this vma.
3561da177e4SLinus Torvalds  */
3571da177e4SLinus Torvalds 
3588c103762SHugh Dickins static inline void
3591da177e4SLinus Torvalds copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
360b5810039SNick Piggin 		pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
3618c103762SHugh Dickins 		unsigned long addr, int *rss)
3621da177e4SLinus Torvalds {
363b5810039SNick Piggin 	unsigned long vm_flags = vma->vm_flags;
3641da177e4SLinus Torvalds 	pte_t pte = *src_pte;
3651da177e4SLinus Torvalds 	struct page *page;
3661da177e4SLinus Torvalds 	unsigned long pfn;
3671da177e4SLinus Torvalds 
3681da177e4SLinus Torvalds 	/* pte contains position in swap or file, so copy. */
3691da177e4SLinus Torvalds 	if (unlikely(!pte_present(pte))) {
3701da177e4SLinus Torvalds 		if (!pte_file(pte)) {
3711da177e4SLinus Torvalds 			swap_duplicate(pte_to_swp_entry(pte));
3721da177e4SLinus Torvalds 			/* make sure dst_mm is on swapoff's mmlist. */
3731da177e4SLinus Torvalds 			if (unlikely(list_empty(&dst_mm->mmlist))) {
3741da177e4SLinus Torvalds 				spin_lock(&mmlist_lock);
375f412ac08SHugh Dickins 				if (list_empty(&dst_mm->mmlist))
376f412ac08SHugh Dickins 					list_add(&dst_mm->mmlist,
377f412ac08SHugh Dickins 						 &src_mm->mmlist);
3781da177e4SLinus Torvalds 				spin_unlock(&mmlist_lock);
3791da177e4SLinus Torvalds 			}
3801da177e4SLinus Torvalds 		}
381ae859762SHugh Dickins 		goto out_set_pte;
3821da177e4SLinus Torvalds 	}
3831da177e4SLinus Torvalds 
384b5810039SNick Piggin 	/* If the region is VM_RESERVED, the mapping is not
385b5810039SNick Piggin 	 * mapped via rmap - duplicate the pte as is.
3861da177e4SLinus Torvalds 	 */
387b5810039SNick Piggin 	if (vm_flags & VM_RESERVED)
388ae859762SHugh Dickins 		goto out_set_pte;
3891da177e4SLinus Torvalds 
390b5810039SNick Piggin 	pfn = pte_pfn(pte);
391b5810039SNick Piggin 	/* If the pte points outside of valid memory but
392b5810039SNick Piggin 	 * the region is not VM_RESERVED, we have a problem.
393b5810039SNick Piggin 	 */
394b5810039SNick Piggin 	if (unlikely(!pfn_valid(pfn))) {
395b5810039SNick Piggin 		print_bad_pte(vma, pte, addr);
396b5810039SNick Piggin 		goto out_set_pte; /* try to do something sane */
397b5810039SNick Piggin 	}
398b5810039SNick Piggin 
399b5810039SNick Piggin 	page = pfn_to_page(pfn);
400b5810039SNick Piggin 
4011da177e4SLinus Torvalds 	/*
4021da177e4SLinus Torvalds 	 * If it's a COW mapping, write protect it both
4031da177e4SLinus Torvalds 	 * in the parent and the child
4041da177e4SLinus Torvalds 	 */
4051da177e4SLinus Torvalds 	if ((vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE) {
4061da177e4SLinus Torvalds 		ptep_set_wrprotect(src_mm, addr, src_pte);
4071da177e4SLinus Torvalds 		pte = *src_pte;
4081da177e4SLinus Torvalds 	}
4091da177e4SLinus Torvalds 
4101da177e4SLinus Torvalds 	/*
4111da177e4SLinus Torvalds 	 * If it's a shared mapping, mark it clean in
4121da177e4SLinus Torvalds 	 * the child
4131da177e4SLinus Torvalds 	 */
4141da177e4SLinus Torvalds 	if (vm_flags & VM_SHARED)
4151da177e4SLinus Torvalds 		pte = pte_mkclean(pte);
4161da177e4SLinus Torvalds 	pte = pte_mkold(pte);
4171da177e4SLinus Torvalds 	get_page(page);
4181da177e4SLinus Torvalds 	page_dup_rmap(page);
4198c103762SHugh Dickins 	rss[!!PageAnon(page)]++;
420ae859762SHugh Dickins 
421ae859762SHugh Dickins out_set_pte:
422ae859762SHugh Dickins 	set_pte_at(dst_mm, addr, dst_pte, pte);
4231da177e4SLinus Torvalds }
4241da177e4SLinus Torvalds 
4251da177e4SLinus Torvalds static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
4261da177e4SLinus Torvalds 		pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
4271da177e4SLinus Torvalds 		unsigned long addr, unsigned long end)
4281da177e4SLinus Torvalds {
4291da177e4SLinus Torvalds 	pte_t *src_pte, *dst_pte;
430c74df32cSHugh Dickins 	spinlock_t *src_ptl, *dst_ptl;
431e040f218SHugh Dickins 	int progress = 0;
4328c103762SHugh Dickins 	int rss[2];
4331da177e4SLinus Torvalds 
4341da177e4SLinus Torvalds again:
435ae859762SHugh Dickins 	rss[1] = rss[0] = 0;
436c74df32cSHugh Dickins 	dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
4371da177e4SLinus Torvalds 	if (!dst_pte)
4381da177e4SLinus Torvalds 		return -ENOMEM;
4391da177e4SLinus Torvalds 	src_pte = pte_offset_map_nested(src_pmd, addr);
4404c21e2f2SHugh Dickins 	src_ptl = pte_lockptr(src_mm, src_pmd);
441c74df32cSHugh Dickins 	spin_lock(src_ptl);
4421da177e4SLinus Torvalds 
4431da177e4SLinus Torvalds 	do {
4441da177e4SLinus Torvalds 		/*
4451da177e4SLinus Torvalds 		 * We are holding two locks at this point - either of them
4461da177e4SLinus Torvalds 		 * could generate latencies in another task on another CPU.
4471da177e4SLinus Torvalds 		 */
448e040f218SHugh Dickins 		if (progress >= 32) {
449e040f218SHugh Dickins 			progress = 0;
450e040f218SHugh Dickins 			if (need_resched() ||
451c74df32cSHugh Dickins 			    need_lockbreak(src_ptl) ||
452c74df32cSHugh Dickins 			    need_lockbreak(dst_ptl))
4531da177e4SLinus Torvalds 				break;
454e040f218SHugh Dickins 		}
4551da177e4SLinus Torvalds 		if (pte_none(*src_pte)) {
4561da177e4SLinus Torvalds 			progress++;
4571da177e4SLinus Torvalds 			continue;
4581da177e4SLinus Torvalds 		}
4598c103762SHugh Dickins 		copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vma, addr, rss);
4601da177e4SLinus Torvalds 		progress += 8;
4611da177e4SLinus Torvalds 	} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
4621da177e4SLinus Torvalds 
463c74df32cSHugh Dickins 	spin_unlock(src_ptl);
4641da177e4SLinus Torvalds 	pte_unmap_nested(src_pte - 1);
465ae859762SHugh Dickins 	add_mm_rss(dst_mm, rss[0], rss[1]);
466c74df32cSHugh Dickins 	pte_unmap_unlock(dst_pte - 1, dst_ptl);
467c74df32cSHugh Dickins 	cond_resched();
4681da177e4SLinus Torvalds 	if (addr != end)
4691da177e4SLinus Torvalds 		goto again;
4701da177e4SLinus Torvalds 	return 0;
4711da177e4SLinus Torvalds }
4721da177e4SLinus Torvalds 
4731da177e4SLinus Torvalds static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
4741da177e4SLinus Torvalds 		pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
4751da177e4SLinus Torvalds 		unsigned long addr, unsigned long end)
4761da177e4SLinus Torvalds {
4771da177e4SLinus Torvalds 	pmd_t *src_pmd, *dst_pmd;
4781da177e4SLinus Torvalds 	unsigned long next;
4791da177e4SLinus Torvalds 
4801da177e4SLinus Torvalds 	dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
4811da177e4SLinus Torvalds 	if (!dst_pmd)
4821da177e4SLinus Torvalds 		return -ENOMEM;
4831da177e4SLinus Torvalds 	src_pmd = pmd_offset(src_pud, addr);
4841da177e4SLinus Torvalds 	do {
4851da177e4SLinus Torvalds 		next = pmd_addr_end(addr, end);
4861da177e4SLinus Torvalds 		if (pmd_none_or_clear_bad(src_pmd))
4871da177e4SLinus Torvalds 			continue;
4881da177e4SLinus Torvalds 		if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
4891da177e4SLinus Torvalds 						vma, addr, next))
4901da177e4SLinus Torvalds 			return -ENOMEM;
4911da177e4SLinus Torvalds 	} while (dst_pmd++, src_pmd++, addr = next, addr != end);
4921da177e4SLinus Torvalds 	return 0;
4931da177e4SLinus Torvalds }
4941da177e4SLinus Torvalds 
4951da177e4SLinus Torvalds static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
4961da177e4SLinus Torvalds 		pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
4971da177e4SLinus Torvalds 		unsigned long addr, unsigned long end)
4981da177e4SLinus Torvalds {
4991da177e4SLinus Torvalds 	pud_t *src_pud, *dst_pud;
5001da177e4SLinus Torvalds 	unsigned long next;
5011da177e4SLinus Torvalds 
5021da177e4SLinus Torvalds 	dst_pud = pud_alloc(dst_mm, dst_pgd, addr);
5031da177e4SLinus Torvalds 	if (!dst_pud)
5041da177e4SLinus Torvalds 		return -ENOMEM;
5051da177e4SLinus Torvalds 	src_pud = pud_offset(src_pgd, addr);
5061da177e4SLinus Torvalds 	do {
5071da177e4SLinus Torvalds 		next = pud_addr_end(addr, end);
5081da177e4SLinus Torvalds 		if (pud_none_or_clear_bad(src_pud))
5091da177e4SLinus Torvalds 			continue;
5101da177e4SLinus Torvalds 		if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
5111da177e4SLinus Torvalds 						vma, addr, next))
5121da177e4SLinus Torvalds 			return -ENOMEM;
5131da177e4SLinus Torvalds 	} while (dst_pud++, src_pud++, addr = next, addr != end);
5141da177e4SLinus Torvalds 	return 0;
5151da177e4SLinus Torvalds }
5161da177e4SLinus Torvalds 
5171da177e4SLinus Torvalds int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
5181da177e4SLinus Torvalds 		struct vm_area_struct *vma)
5191da177e4SLinus Torvalds {
5201da177e4SLinus Torvalds 	pgd_t *src_pgd, *dst_pgd;
5211da177e4SLinus Torvalds 	unsigned long next;
5221da177e4SLinus Torvalds 	unsigned long addr = vma->vm_start;
5231da177e4SLinus Torvalds 	unsigned long end = vma->vm_end;
5241da177e4SLinus Torvalds 
525d992895bSNick Piggin 	/*
526d992895bSNick Piggin 	 * Don't copy ptes where a page fault will fill them correctly.
527d992895bSNick Piggin 	 * Fork becomes much lighter when there are big shared or private
528d992895bSNick Piggin 	 * readonly mappings. The tradeoff is that copy_page_range is more
529d992895bSNick Piggin 	 * efficient than faulting.
530d992895bSNick Piggin 	 */
531d992895bSNick Piggin 	if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_RESERVED))) {
532d992895bSNick Piggin 		if (!vma->anon_vma)
533d992895bSNick Piggin 			return 0;
534d992895bSNick Piggin 	}
535d992895bSNick Piggin 
5361da177e4SLinus Torvalds 	if (is_vm_hugetlb_page(vma))
5371da177e4SLinus Torvalds 		return copy_hugetlb_page_range(dst_mm, src_mm, vma);
5381da177e4SLinus Torvalds 
5391da177e4SLinus Torvalds 	dst_pgd = pgd_offset(dst_mm, addr);
5401da177e4SLinus Torvalds 	src_pgd = pgd_offset(src_mm, addr);
5411da177e4SLinus Torvalds 	do {
5421da177e4SLinus Torvalds 		next = pgd_addr_end(addr, end);
5431da177e4SLinus Torvalds 		if (pgd_none_or_clear_bad(src_pgd))
5441da177e4SLinus Torvalds 			continue;
5451da177e4SLinus Torvalds 		if (copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
5461da177e4SLinus Torvalds 						vma, addr, next))
5471da177e4SLinus Torvalds 			return -ENOMEM;
5481da177e4SLinus Torvalds 	} while (dst_pgd++, src_pgd++, addr = next, addr != end);
5491da177e4SLinus Torvalds 	return 0;
5501da177e4SLinus Torvalds }
5511da177e4SLinus Torvalds 
552b5810039SNick Piggin static void zap_pte_range(struct mmu_gather *tlb,
553b5810039SNick Piggin 				struct vm_area_struct *vma, pmd_t *pmd,
5541da177e4SLinus Torvalds 				unsigned long addr, unsigned long end,
5551da177e4SLinus Torvalds 				struct zap_details *details)
5561da177e4SLinus Torvalds {
557b5810039SNick Piggin 	struct mm_struct *mm = tlb->mm;
5581da177e4SLinus Torvalds 	pte_t *pte;
559508034a3SHugh Dickins 	spinlock_t *ptl;
560ae859762SHugh Dickins 	int file_rss = 0;
561ae859762SHugh Dickins 	int anon_rss = 0;
5621da177e4SLinus Torvalds 
563508034a3SHugh Dickins 	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
5641da177e4SLinus Torvalds 	do {
5651da177e4SLinus Torvalds 		pte_t ptent = *pte;
5661da177e4SLinus Torvalds 		if (pte_none(ptent))
5671da177e4SLinus Torvalds 			continue;
5681da177e4SLinus Torvalds 		if (pte_present(ptent)) {
5691da177e4SLinus Torvalds 			struct page *page = NULL;
570b5810039SNick Piggin 			if (!(vma->vm_flags & VM_RESERVED)) {
5711da177e4SLinus Torvalds 				unsigned long pfn = pte_pfn(ptent);
572b5810039SNick Piggin 				if (unlikely(!pfn_valid(pfn)))
573b5810039SNick Piggin 					print_bad_pte(vma, ptent, addr);
574b5810039SNick Piggin 				else
5751da177e4SLinus Torvalds 					page = pfn_to_page(pfn);
5761da177e4SLinus Torvalds 			}
5771da177e4SLinus Torvalds 			if (unlikely(details) && page) {
5781da177e4SLinus Torvalds 				/*
5791da177e4SLinus Torvalds 				 * unmap_shared_mapping_pages() wants to
5801da177e4SLinus Torvalds 				 * invalidate cache without truncating:
5811da177e4SLinus Torvalds 				 * unmap shared but keep private pages.
5821da177e4SLinus Torvalds 				 */
5831da177e4SLinus Torvalds 				if (details->check_mapping &&
5841da177e4SLinus Torvalds 				    details->check_mapping != page->mapping)
5851da177e4SLinus Torvalds 					continue;
5861da177e4SLinus Torvalds 				/*
5871da177e4SLinus Torvalds 				 * Each page->index must be checked when
5881da177e4SLinus Torvalds 				 * invalidating or truncating nonlinear.
5891da177e4SLinus Torvalds 				 */
5901da177e4SLinus Torvalds 				if (details->nonlinear_vma &&
5911da177e4SLinus Torvalds 				    (page->index < details->first_index ||
5921da177e4SLinus Torvalds 				     page->index > details->last_index))
5931da177e4SLinus Torvalds 					continue;
5941da177e4SLinus Torvalds 			}
595b5810039SNick Piggin 			ptent = ptep_get_and_clear_full(mm, addr, pte,
596a600388dSZachary Amsden 							tlb->fullmm);
5971da177e4SLinus Torvalds 			tlb_remove_tlb_entry(tlb, pte, addr);
5981da177e4SLinus Torvalds 			if (unlikely(!page))
5991da177e4SLinus Torvalds 				continue;
6001da177e4SLinus Torvalds 			if (unlikely(details) && details->nonlinear_vma
6011da177e4SLinus Torvalds 			    && linear_page_index(details->nonlinear_vma,
6021da177e4SLinus Torvalds 						addr) != page->index)
603b5810039SNick Piggin 				set_pte_at(mm, addr, pte,
6041da177e4SLinus Torvalds 					   pgoff_to_pte(page->index));
6051da177e4SLinus Torvalds 			if (PageAnon(page))
60686d912f4SHugh Dickins 				anon_rss--;
6076237bcd9SHugh Dickins 			else {
6086237bcd9SHugh Dickins 				if (pte_dirty(ptent))
6096237bcd9SHugh Dickins 					set_page_dirty(page);
6106237bcd9SHugh Dickins 				if (pte_young(ptent))
6111da177e4SLinus Torvalds 					mark_page_accessed(page);
61286d912f4SHugh Dickins 				file_rss--;
6136237bcd9SHugh Dickins 			}
6141da177e4SLinus Torvalds 			page_remove_rmap(page);
6151da177e4SLinus Torvalds 			tlb_remove_page(tlb, page);
6161da177e4SLinus Torvalds 			continue;
6171da177e4SLinus Torvalds 		}
6181da177e4SLinus Torvalds 		/*
6191da177e4SLinus Torvalds 		 * If details->check_mapping, we leave swap entries;
6201da177e4SLinus Torvalds 		 * if details->nonlinear_vma, we leave file entries.
6211da177e4SLinus Torvalds 		 */
6221da177e4SLinus Torvalds 		if (unlikely(details))
6231da177e4SLinus Torvalds 			continue;
6241da177e4SLinus Torvalds 		if (!pte_file(ptent))
6251da177e4SLinus Torvalds 			free_swap_and_cache(pte_to_swp_entry(ptent));
626b5810039SNick Piggin 		pte_clear_full(mm, addr, pte, tlb->fullmm);
6271da177e4SLinus Torvalds 	} while (pte++, addr += PAGE_SIZE, addr != end);
628ae859762SHugh Dickins 
62986d912f4SHugh Dickins 	add_mm_rss(mm, file_rss, anon_rss);
630508034a3SHugh Dickins 	pte_unmap_unlock(pte - 1, ptl);
6311da177e4SLinus Torvalds }
6321da177e4SLinus Torvalds 
633b5810039SNick Piggin static inline void zap_pmd_range(struct mmu_gather *tlb,
634b5810039SNick Piggin 				struct vm_area_struct *vma, pud_t *pud,
6351da177e4SLinus Torvalds 				unsigned long addr, unsigned long end,
6361da177e4SLinus Torvalds 				struct zap_details *details)
6371da177e4SLinus Torvalds {
6381da177e4SLinus Torvalds 	pmd_t *pmd;
6391da177e4SLinus Torvalds 	unsigned long next;
6401da177e4SLinus Torvalds 
6411da177e4SLinus Torvalds 	pmd = pmd_offset(pud, addr);
6421da177e4SLinus Torvalds 	do {
6431da177e4SLinus Torvalds 		next = pmd_addr_end(addr, end);
6441da177e4SLinus Torvalds 		if (pmd_none_or_clear_bad(pmd))
6451da177e4SLinus Torvalds 			continue;
646b5810039SNick Piggin 		zap_pte_range(tlb, vma, pmd, addr, next, details);
6471da177e4SLinus Torvalds 	} while (pmd++, addr = next, addr != end);
6481da177e4SLinus Torvalds }
6491da177e4SLinus Torvalds 
650b5810039SNick Piggin static inline void zap_pud_range(struct mmu_gather *tlb,
651b5810039SNick Piggin 				struct vm_area_struct *vma, pgd_t *pgd,
6521da177e4SLinus Torvalds 				unsigned long addr, unsigned long end,
6531da177e4SLinus Torvalds 				struct zap_details *details)
6541da177e4SLinus Torvalds {
6551da177e4SLinus Torvalds 	pud_t *pud;
6561da177e4SLinus Torvalds 	unsigned long next;
6571da177e4SLinus Torvalds 
6581da177e4SLinus Torvalds 	pud = pud_offset(pgd, addr);
6591da177e4SLinus Torvalds 	do {
6601da177e4SLinus Torvalds 		next = pud_addr_end(addr, end);
6611da177e4SLinus Torvalds 		if (pud_none_or_clear_bad(pud))
6621da177e4SLinus Torvalds 			continue;
663b5810039SNick Piggin 		zap_pmd_range(tlb, vma, pud, addr, next, details);
6641da177e4SLinus Torvalds 	} while (pud++, addr = next, addr != end);
6651da177e4SLinus Torvalds }
6661da177e4SLinus Torvalds 
6671da177e4SLinus Torvalds static void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
6681da177e4SLinus Torvalds 				unsigned long addr, unsigned long end,
6691da177e4SLinus Torvalds 				struct zap_details *details)
6701da177e4SLinus Torvalds {
6711da177e4SLinus Torvalds 	pgd_t *pgd;
6721da177e4SLinus Torvalds 	unsigned long next;
6731da177e4SLinus Torvalds 
6741da177e4SLinus Torvalds 	if (details && !details->check_mapping && !details->nonlinear_vma)
6751da177e4SLinus Torvalds 		details = NULL;
6761da177e4SLinus Torvalds 
6771da177e4SLinus Torvalds 	BUG_ON(addr >= end);
6781da177e4SLinus Torvalds 	tlb_start_vma(tlb, vma);
6791da177e4SLinus Torvalds 	pgd = pgd_offset(vma->vm_mm, addr);
6801da177e4SLinus Torvalds 	do {
6811da177e4SLinus Torvalds 		next = pgd_addr_end(addr, end);
6821da177e4SLinus Torvalds 		if (pgd_none_or_clear_bad(pgd))
6831da177e4SLinus Torvalds 			continue;
684b5810039SNick Piggin 		zap_pud_range(tlb, vma, pgd, addr, next, details);
6851da177e4SLinus Torvalds 	} while (pgd++, addr = next, addr != end);
6861da177e4SLinus Torvalds 	tlb_end_vma(tlb, vma);
6871da177e4SLinus Torvalds }
6881da177e4SLinus Torvalds 
6891da177e4SLinus Torvalds #ifdef CONFIG_PREEMPT
6901da177e4SLinus Torvalds # define ZAP_BLOCK_SIZE	(8 * PAGE_SIZE)
6911da177e4SLinus Torvalds #else
6921da177e4SLinus Torvalds /* No preempt: go for improved straight-line efficiency */
6931da177e4SLinus Torvalds # define ZAP_BLOCK_SIZE	(1024 * PAGE_SIZE)
6941da177e4SLinus Torvalds #endif
6951da177e4SLinus Torvalds 
6961da177e4SLinus Torvalds /**
6971da177e4SLinus Torvalds  * unmap_vmas - unmap a range of memory covered by a list of vma's
6981da177e4SLinus Torvalds  * @tlbp: address of the caller's struct mmu_gather
6991da177e4SLinus Torvalds  * @vma: the starting vma
7001da177e4SLinus Torvalds  * @start_addr: virtual address at which to start unmapping
7011da177e4SLinus Torvalds  * @end_addr: virtual address at which to end unmapping
7021da177e4SLinus Torvalds  * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
7031da177e4SLinus Torvalds  * @details: details of nonlinear truncation or shared cache invalidation
7041da177e4SLinus Torvalds  *
705ee39b37bSHugh Dickins  * Returns the end address of the unmapping (restart addr if interrupted).
7061da177e4SLinus Torvalds  *
707508034a3SHugh Dickins  * Unmap all pages in the vma list.
7081da177e4SLinus Torvalds  *
709508034a3SHugh Dickins  * We aim to not hold locks for too long (for scheduling latency reasons).
710508034a3SHugh Dickins  * So zap pages in ZAP_BLOCK_SIZE bytecounts.  This means we need to
7111da177e4SLinus Torvalds  * return the ending mmu_gather to the caller.
7121da177e4SLinus Torvalds  *
7131da177e4SLinus Torvalds  * Only addresses between `start' and `end' will be unmapped.
7141da177e4SLinus Torvalds  *
7151da177e4SLinus Torvalds  * The VMA list must be sorted in ascending virtual address order.
7161da177e4SLinus Torvalds  *
7171da177e4SLinus Torvalds  * unmap_vmas() assumes that the caller will flush the whole unmapped address
7181da177e4SLinus Torvalds  * range after unmap_vmas() returns.  So the only responsibility here is to
7191da177e4SLinus Torvalds  * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
7201da177e4SLinus Torvalds  * drops the lock and schedules.
7211da177e4SLinus Torvalds  */
722508034a3SHugh Dickins unsigned long unmap_vmas(struct mmu_gather **tlbp,
7231da177e4SLinus Torvalds 		struct vm_area_struct *vma, unsigned long start_addr,
7241da177e4SLinus Torvalds 		unsigned long end_addr, unsigned long *nr_accounted,
7251da177e4SLinus Torvalds 		struct zap_details *details)
7261da177e4SLinus Torvalds {
7271da177e4SLinus Torvalds 	unsigned long zap_bytes = ZAP_BLOCK_SIZE;
7281da177e4SLinus Torvalds 	unsigned long tlb_start = 0;	/* For tlb_finish_mmu */
7291da177e4SLinus Torvalds 	int tlb_start_valid = 0;
730ee39b37bSHugh Dickins 	unsigned long start = start_addr;
7311da177e4SLinus Torvalds 	spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL;
7324d6ddfa9SHugh Dickins 	int fullmm = (*tlbp)->fullmm;
7331da177e4SLinus Torvalds 
7341da177e4SLinus Torvalds 	for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) {
7351da177e4SLinus Torvalds 		unsigned long end;
7361da177e4SLinus Torvalds 
7371da177e4SLinus Torvalds 		start = max(vma->vm_start, start_addr);
7381da177e4SLinus Torvalds 		if (start >= vma->vm_end)
7391da177e4SLinus Torvalds 			continue;
7401da177e4SLinus Torvalds 		end = min(vma->vm_end, end_addr);
7411da177e4SLinus Torvalds 		if (end <= vma->vm_start)
7421da177e4SLinus Torvalds 			continue;
7431da177e4SLinus Torvalds 
7441da177e4SLinus Torvalds 		if (vma->vm_flags & VM_ACCOUNT)
7451da177e4SLinus Torvalds 			*nr_accounted += (end - start) >> PAGE_SHIFT;
7461da177e4SLinus Torvalds 
7471da177e4SLinus Torvalds 		while (start != end) {
7481da177e4SLinus Torvalds 			unsigned long block;
7491da177e4SLinus Torvalds 
7501da177e4SLinus Torvalds 			if (!tlb_start_valid) {
7511da177e4SLinus Torvalds 				tlb_start = start;
7521da177e4SLinus Torvalds 				tlb_start_valid = 1;
7531da177e4SLinus Torvalds 			}
7541da177e4SLinus Torvalds 
7551da177e4SLinus Torvalds 			if (is_vm_hugetlb_page(vma)) {
7561da177e4SLinus Torvalds 				block = end - start;
7571da177e4SLinus Torvalds 				unmap_hugepage_range(vma, start, end);
7581da177e4SLinus Torvalds 			} else {
7591da177e4SLinus Torvalds 				block = min(zap_bytes, end - start);
7601da177e4SLinus Torvalds 				unmap_page_range(*tlbp, vma, start,
7611da177e4SLinus Torvalds 						start + block, details);
7621da177e4SLinus Torvalds 			}
7631da177e4SLinus Torvalds 
7641da177e4SLinus Torvalds 			start += block;
7651da177e4SLinus Torvalds 			zap_bytes -= block;
7661da177e4SLinus Torvalds 			if ((long)zap_bytes > 0)
7671da177e4SLinus Torvalds 				continue;
7681da177e4SLinus Torvalds 
7691da177e4SLinus Torvalds 			tlb_finish_mmu(*tlbp, tlb_start, start);
7701da177e4SLinus Torvalds 
7711da177e4SLinus Torvalds 			if (need_resched() ||
7721da177e4SLinus Torvalds 				(i_mmap_lock && need_lockbreak(i_mmap_lock))) {
7731da177e4SLinus Torvalds 				if (i_mmap_lock) {
774508034a3SHugh Dickins 					*tlbp = NULL;
7751da177e4SLinus Torvalds 					goto out;
7761da177e4SLinus Torvalds 				}
7771da177e4SLinus Torvalds 				cond_resched();
7781da177e4SLinus Torvalds 			}
7791da177e4SLinus Torvalds 
780508034a3SHugh Dickins 			*tlbp = tlb_gather_mmu(vma->vm_mm, fullmm);
7811da177e4SLinus Torvalds 			tlb_start_valid = 0;
7821da177e4SLinus Torvalds 			zap_bytes = ZAP_BLOCK_SIZE;
7831da177e4SLinus Torvalds 		}
7841da177e4SLinus Torvalds 	}
7851da177e4SLinus Torvalds out:
786ee39b37bSHugh Dickins 	return start;	/* which is now the end (or restart) address */
7871da177e4SLinus Torvalds }
7881da177e4SLinus Torvalds 
7891da177e4SLinus Torvalds /**
7901da177e4SLinus Torvalds  * zap_page_range - remove user pages in a given range
7911da177e4SLinus Torvalds  * @vma: vm_area_struct holding the applicable pages
7921da177e4SLinus Torvalds  * @address: starting address of pages to zap
7931da177e4SLinus Torvalds  * @size: number of bytes to zap
7941da177e4SLinus Torvalds  * @details: details of nonlinear truncation or shared cache invalidation
7951da177e4SLinus Torvalds  */
796ee39b37bSHugh Dickins unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
7971da177e4SLinus Torvalds 		unsigned long size, struct zap_details *details)
7981da177e4SLinus Torvalds {
7991da177e4SLinus Torvalds 	struct mm_struct *mm = vma->vm_mm;
8001da177e4SLinus Torvalds 	struct mmu_gather *tlb;
8011da177e4SLinus Torvalds 	unsigned long end = address + size;
8021da177e4SLinus Torvalds 	unsigned long nr_accounted = 0;
8031da177e4SLinus Torvalds 
8041da177e4SLinus Torvalds 	lru_add_drain();
8051da177e4SLinus Torvalds 	tlb = tlb_gather_mmu(mm, 0);
806365e9c87SHugh Dickins 	update_hiwater_rss(mm);
807508034a3SHugh Dickins 	end = unmap_vmas(&tlb, vma, address, end, &nr_accounted, details);
808508034a3SHugh Dickins 	if (tlb)
8098f4f8c16SHugh Dickins 		tlb_finish_mmu(tlb, address, end);
810ee39b37bSHugh Dickins 	return end;
8111da177e4SLinus Torvalds }
8121da177e4SLinus Torvalds 
8131da177e4SLinus Torvalds /*
8141da177e4SLinus Torvalds  * Do a quick page-table lookup for a single page.
8151da177e4SLinus Torvalds  */
816deceb6cdSHugh Dickins struct page *follow_page(struct mm_struct *mm, unsigned long address,
817deceb6cdSHugh Dickins 			unsigned int flags)
8181da177e4SLinus Torvalds {
8191da177e4SLinus Torvalds 	pgd_t *pgd;
8201da177e4SLinus Torvalds 	pud_t *pud;
8211da177e4SLinus Torvalds 	pmd_t *pmd;
8221da177e4SLinus Torvalds 	pte_t *ptep, pte;
823deceb6cdSHugh Dickins 	spinlock_t *ptl;
8241da177e4SLinus Torvalds 	unsigned long pfn;
8251da177e4SLinus Torvalds 	struct page *page;
8261da177e4SLinus Torvalds 
827deceb6cdSHugh Dickins 	page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
828deceb6cdSHugh Dickins 	if (!IS_ERR(page)) {
829deceb6cdSHugh Dickins 		BUG_ON(flags & FOLL_GET);
830deceb6cdSHugh Dickins 		goto out;
831deceb6cdSHugh Dickins 	}
8321da177e4SLinus Torvalds 
833deceb6cdSHugh Dickins 	page = NULL;
8341da177e4SLinus Torvalds 	pgd = pgd_offset(mm, address);
8351da177e4SLinus Torvalds 	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
836deceb6cdSHugh Dickins 		goto no_page_table;
8371da177e4SLinus Torvalds 
8381da177e4SLinus Torvalds 	pud = pud_offset(pgd, address);
8391da177e4SLinus Torvalds 	if (pud_none(*pud) || unlikely(pud_bad(*pud)))
840deceb6cdSHugh Dickins 		goto no_page_table;
8411da177e4SLinus Torvalds 
8421da177e4SLinus Torvalds 	pmd = pmd_offset(pud, address);
8431da177e4SLinus Torvalds 	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
844deceb6cdSHugh Dickins 		goto no_page_table;
8451da177e4SLinus Torvalds 
846deceb6cdSHugh Dickins 	if (pmd_huge(*pmd)) {
847deceb6cdSHugh Dickins 		BUG_ON(flags & FOLL_GET);
848deceb6cdSHugh Dickins 		page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
849deceb6cdSHugh Dickins 		goto out;
850deceb6cdSHugh Dickins 	}
851deceb6cdSHugh Dickins 
852deceb6cdSHugh Dickins 	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
8531da177e4SLinus Torvalds 	if (!ptep)
8541da177e4SLinus Torvalds 		goto out;
8551da177e4SLinus Torvalds 
8561da177e4SLinus Torvalds 	pte = *ptep;
857deceb6cdSHugh Dickins 	if (!pte_present(pte))
858deceb6cdSHugh Dickins 		goto unlock;
859deceb6cdSHugh Dickins 	if ((flags & FOLL_WRITE) && !pte_write(pte))
860deceb6cdSHugh Dickins 		goto unlock;
8611da177e4SLinus Torvalds 	pfn = pte_pfn(pte);
862deceb6cdSHugh Dickins 	if (!pfn_valid(pfn))
863deceb6cdSHugh Dickins 		goto unlock;
864deceb6cdSHugh Dickins 
8651da177e4SLinus Torvalds 	page = pfn_to_page(pfn);
866deceb6cdSHugh Dickins 	if (flags & FOLL_GET)
867deceb6cdSHugh Dickins 		get_page(page);
868deceb6cdSHugh Dickins 	if (flags & FOLL_TOUCH) {
869deceb6cdSHugh Dickins 		if ((flags & FOLL_WRITE) &&
870deceb6cdSHugh Dickins 		    !pte_dirty(pte) && !PageDirty(page))
871f33ea7f4SNick Piggin 			set_page_dirty(page);
8721da177e4SLinus Torvalds 		mark_page_accessed(page);
8731da177e4SLinus Torvalds 	}
874deceb6cdSHugh Dickins unlock:
875deceb6cdSHugh Dickins 	pte_unmap_unlock(ptep, ptl);
8761da177e4SLinus Torvalds out:
877deceb6cdSHugh Dickins 	return page;
878deceb6cdSHugh Dickins 
879deceb6cdSHugh Dickins no_page_table:
880deceb6cdSHugh Dickins 	/*
881deceb6cdSHugh Dickins 	 * When core dumping an enormous anonymous area that nobody
882deceb6cdSHugh Dickins 	 * has touched so far, we don't want to allocate page tables.
883deceb6cdSHugh Dickins 	 */
884deceb6cdSHugh Dickins 	if (flags & FOLL_ANON) {
885deceb6cdSHugh Dickins 		page = ZERO_PAGE(address);
886deceb6cdSHugh Dickins 		if (flags & FOLL_GET)
887deceb6cdSHugh Dickins 			get_page(page);
888deceb6cdSHugh Dickins 		BUG_ON(flags & FOLL_WRITE);
8891da177e4SLinus Torvalds 	}
890deceb6cdSHugh Dickins 	return page;
8911da177e4SLinus Torvalds }
8921da177e4SLinus Torvalds 
8931da177e4SLinus Torvalds int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
8941da177e4SLinus Torvalds 		unsigned long start, int len, int write, int force,
8951da177e4SLinus Torvalds 		struct page **pages, struct vm_area_struct **vmas)
8961da177e4SLinus Torvalds {
8971da177e4SLinus Torvalds 	int i;
898deceb6cdSHugh Dickins 	unsigned int vm_flags;
8991da177e4SLinus Torvalds 
9001da177e4SLinus Torvalds 	/*
9011da177e4SLinus Torvalds 	 * Require read or write permissions.
9021da177e4SLinus Torvalds 	 * If 'force' is set, we only require the "MAY" flags.
9031da177e4SLinus Torvalds 	 */
904deceb6cdSHugh Dickins 	vm_flags  = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
905deceb6cdSHugh Dickins 	vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
9061da177e4SLinus Torvalds 	i = 0;
9071da177e4SLinus Torvalds 
9081da177e4SLinus Torvalds 	do {
9091da177e4SLinus Torvalds 		struct vm_area_struct *vma;
910deceb6cdSHugh Dickins 		unsigned int foll_flags;
9111da177e4SLinus Torvalds 
9121da177e4SLinus Torvalds 		vma = find_extend_vma(mm, start);
9131da177e4SLinus Torvalds 		if (!vma && in_gate_area(tsk, start)) {
9141da177e4SLinus Torvalds 			unsigned long pg = start & PAGE_MASK;
9151da177e4SLinus Torvalds 			struct vm_area_struct *gate_vma = get_gate_vma(tsk);
9161da177e4SLinus Torvalds 			pgd_t *pgd;
9171da177e4SLinus Torvalds 			pud_t *pud;
9181da177e4SLinus Torvalds 			pmd_t *pmd;
9191da177e4SLinus Torvalds 			pte_t *pte;
9201da177e4SLinus Torvalds 			if (write) /* user gate pages are read-only */
9211da177e4SLinus Torvalds 				return i ? : -EFAULT;
9221da177e4SLinus Torvalds 			if (pg > TASK_SIZE)
9231da177e4SLinus Torvalds 				pgd = pgd_offset_k(pg);
9241da177e4SLinus Torvalds 			else
9251da177e4SLinus Torvalds 				pgd = pgd_offset_gate(mm, pg);
9261da177e4SLinus Torvalds 			BUG_ON(pgd_none(*pgd));
9271da177e4SLinus Torvalds 			pud = pud_offset(pgd, pg);
9281da177e4SLinus Torvalds 			BUG_ON(pud_none(*pud));
9291da177e4SLinus Torvalds 			pmd = pmd_offset(pud, pg);
930690dbe1cSHugh Dickins 			if (pmd_none(*pmd))
931690dbe1cSHugh Dickins 				return i ? : -EFAULT;
9321da177e4SLinus Torvalds 			pte = pte_offset_map(pmd, pg);
933690dbe1cSHugh Dickins 			if (pte_none(*pte)) {
934690dbe1cSHugh Dickins 				pte_unmap(pte);
935690dbe1cSHugh Dickins 				return i ? : -EFAULT;
936690dbe1cSHugh Dickins 			}
9371da177e4SLinus Torvalds 			if (pages) {
9381da177e4SLinus Torvalds 				pages[i] = pte_page(*pte);
9391da177e4SLinus Torvalds 				get_page(pages[i]);
9401da177e4SLinus Torvalds 			}
9411da177e4SLinus Torvalds 			pte_unmap(pte);
9421da177e4SLinus Torvalds 			if (vmas)
9431da177e4SLinus Torvalds 				vmas[i] = gate_vma;
9441da177e4SLinus Torvalds 			i++;
9451da177e4SLinus Torvalds 			start += PAGE_SIZE;
9461da177e4SLinus Torvalds 			len--;
9471da177e4SLinus Torvalds 			continue;
9481da177e4SLinus Torvalds 		}
9491da177e4SLinus Torvalds 
950b5810039SNick Piggin 		if (!vma || (vma->vm_flags & (VM_IO | VM_RESERVED))
951deceb6cdSHugh Dickins 				|| !(vm_flags & vma->vm_flags))
9521da177e4SLinus Torvalds 			return i ? : -EFAULT;
9531da177e4SLinus Torvalds 
9541da177e4SLinus Torvalds 		if (is_vm_hugetlb_page(vma)) {
9551da177e4SLinus Torvalds 			i = follow_hugetlb_page(mm, vma, pages, vmas,
9561da177e4SLinus Torvalds 						&start, &len, i);
9571da177e4SLinus Torvalds 			continue;
9581da177e4SLinus Torvalds 		}
959deceb6cdSHugh Dickins 
960deceb6cdSHugh Dickins 		foll_flags = FOLL_TOUCH;
961deceb6cdSHugh Dickins 		if (pages)
962deceb6cdSHugh Dickins 			foll_flags |= FOLL_GET;
963deceb6cdSHugh Dickins 		if (!write && !(vma->vm_flags & VM_LOCKED) &&
964deceb6cdSHugh Dickins 		    (!vma->vm_ops || !vma->vm_ops->nopage))
965deceb6cdSHugh Dickins 			foll_flags |= FOLL_ANON;
966deceb6cdSHugh Dickins 
9671da177e4SLinus Torvalds 		do {
96808ef4729SHugh Dickins 			struct page *page;
9691da177e4SLinus Torvalds 
970deceb6cdSHugh Dickins 			if (write)
971deceb6cdSHugh Dickins 				foll_flags |= FOLL_WRITE;
972deceb6cdSHugh Dickins 
973deceb6cdSHugh Dickins 			cond_resched();
974deceb6cdSHugh Dickins 			while (!(page = follow_page(mm, start, foll_flags))) {
975a68d2ebcSLinus Torvalds 				int ret;
976deceb6cdSHugh Dickins 				ret = __handle_mm_fault(mm, vma, start,
977deceb6cdSHugh Dickins 						foll_flags & FOLL_WRITE);
978f33ea7f4SNick Piggin 				/*
979a68d2ebcSLinus Torvalds 				 * The VM_FAULT_WRITE bit tells us that do_wp_page has
980a68d2ebcSLinus Torvalds 				 * broken COW when necessary, even if maybe_mkwrite
981a68d2ebcSLinus Torvalds 				 * decided not to set pte_write. We can thus safely do
982a68d2ebcSLinus Torvalds 				 * subsequent page lookups as if they were reads.
983f33ea7f4SNick Piggin 				 */
984a68d2ebcSLinus Torvalds 				if (ret & VM_FAULT_WRITE)
985deceb6cdSHugh Dickins 					foll_flags &= ~FOLL_WRITE;
986a68d2ebcSLinus Torvalds 
987a68d2ebcSLinus Torvalds 				switch (ret & ~VM_FAULT_WRITE) {
9881da177e4SLinus Torvalds 				case VM_FAULT_MINOR:
9891da177e4SLinus Torvalds 					tsk->min_flt++;
9901da177e4SLinus Torvalds 					break;
9911da177e4SLinus Torvalds 				case VM_FAULT_MAJOR:
9921da177e4SLinus Torvalds 					tsk->maj_flt++;
9931da177e4SLinus Torvalds 					break;
9941da177e4SLinus Torvalds 				case VM_FAULT_SIGBUS:
9951da177e4SLinus Torvalds 					return i ? i : -EFAULT;
9961da177e4SLinus Torvalds 				case VM_FAULT_OOM:
9971da177e4SLinus Torvalds 					return i ? i : -ENOMEM;
9981da177e4SLinus Torvalds 				default:
9991da177e4SLinus Torvalds 					BUG();
10001da177e4SLinus Torvalds 				}
10011da177e4SLinus Torvalds 			}
10021da177e4SLinus Torvalds 			if (pages) {
100308ef4729SHugh Dickins 				pages[i] = page;
100408ef4729SHugh Dickins 				flush_dcache_page(page);
10051da177e4SLinus Torvalds 			}
10061da177e4SLinus Torvalds 			if (vmas)
10071da177e4SLinus Torvalds 				vmas[i] = vma;
10081da177e4SLinus Torvalds 			i++;
10091da177e4SLinus Torvalds 			start += PAGE_SIZE;
10101da177e4SLinus Torvalds 			len--;
10111da177e4SLinus Torvalds 		} while (len && start < vma->vm_end);
10121da177e4SLinus Torvalds 	} while (len);
10131da177e4SLinus Torvalds 	return i;
10141da177e4SLinus Torvalds }
10151da177e4SLinus Torvalds EXPORT_SYMBOL(get_user_pages);
10161da177e4SLinus Torvalds 
10171da177e4SLinus Torvalds static int zeromap_pte_range(struct mm_struct *mm, pmd_t *pmd,
10181da177e4SLinus Torvalds 			unsigned long addr, unsigned long end, pgprot_t prot)
10191da177e4SLinus Torvalds {
10201da177e4SLinus Torvalds 	pte_t *pte;
1021c74df32cSHugh Dickins 	spinlock_t *ptl;
10221da177e4SLinus Torvalds 
1023c74df32cSHugh Dickins 	pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
10241da177e4SLinus Torvalds 	if (!pte)
10251da177e4SLinus Torvalds 		return -ENOMEM;
10261da177e4SLinus Torvalds 	do {
1027b5810039SNick Piggin 		struct page *page = ZERO_PAGE(addr);
1028b5810039SNick Piggin 		pte_t zero_pte = pte_wrprotect(mk_pte(page, prot));
1029b5810039SNick Piggin 		page_cache_get(page);
1030b5810039SNick Piggin 		page_add_file_rmap(page);
1031b5810039SNick Piggin 		inc_mm_counter(mm, file_rss);
10321da177e4SLinus Torvalds 		BUG_ON(!pte_none(*pte));
10331da177e4SLinus Torvalds 		set_pte_at(mm, addr, pte, zero_pte);
10341da177e4SLinus Torvalds 	} while (pte++, addr += PAGE_SIZE, addr != end);
1035c74df32cSHugh Dickins 	pte_unmap_unlock(pte - 1, ptl);
10361da177e4SLinus Torvalds 	return 0;
10371da177e4SLinus Torvalds }
10381da177e4SLinus Torvalds 
10391da177e4SLinus Torvalds static inline int zeromap_pmd_range(struct mm_struct *mm, pud_t *pud,
10401da177e4SLinus Torvalds 			unsigned long addr, unsigned long end, pgprot_t prot)
10411da177e4SLinus Torvalds {
10421da177e4SLinus Torvalds 	pmd_t *pmd;
10431da177e4SLinus Torvalds 	unsigned long next;
10441da177e4SLinus Torvalds 
10451da177e4SLinus Torvalds 	pmd = pmd_alloc(mm, pud, addr);
10461da177e4SLinus Torvalds 	if (!pmd)
10471da177e4SLinus Torvalds 		return -ENOMEM;
10481da177e4SLinus Torvalds 	do {
10491da177e4SLinus Torvalds 		next = pmd_addr_end(addr, end);
10501da177e4SLinus Torvalds 		if (zeromap_pte_range(mm, pmd, addr, next, prot))
10511da177e4SLinus Torvalds 			return -ENOMEM;
10521da177e4SLinus Torvalds 	} while (pmd++, addr = next, addr != end);
10531da177e4SLinus Torvalds 	return 0;
10541da177e4SLinus Torvalds }
10551da177e4SLinus Torvalds 
10561da177e4SLinus Torvalds static inline int zeromap_pud_range(struct mm_struct *mm, pgd_t *pgd,
10571da177e4SLinus Torvalds 			unsigned long addr, unsigned long end, pgprot_t prot)
10581da177e4SLinus Torvalds {
10591da177e4SLinus Torvalds 	pud_t *pud;
10601da177e4SLinus Torvalds 	unsigned long next;
10611da177e4SLinus Torvalds 
10621da177e4SLinus Torvalds 	pud = pud_alloc(mm, pgd, addr);
10631da177e4SLinus Torvalds 	if (!pud)
10641da177e4SLinus Torvalds 		return -ENOMEM;
10651da177e4SLinus Torvalds 	do {
10661da177e4SLinus Torvalds 		next = pud_addr_end(addr, end);
10671da177e4SLinus Torvalds 		if (zeromap_pmd_range(mm, pud, addr, next, prot))
10681da177e4SLinus Torvalds 			return -ENOMEM;
10691da177e4SLinus Torvalds 	} while (pud++, addr = next, addr != end);
10701da177e4SLinus Torvalds 	return 0;
10711da177e4SLinus Torvalds }
10721da177e4SLinus Torvalds 
10731da177e4SLinus Torvalds int zeromap_page_range(struct vm_area_struct *vma,
10741da177e4SLinus Torvalds 			unsigned long addr, unsigned long size, pgprot_t prot)
10751da177e4SLinus Torvalds {
10761da177e4SLinus Torvalds 	pgd_t *pgd;
10771da177e4SLinus Torvalds 	unsigned long next;
10781da177e4SLinus Torvalds 	unsigned long end = addr + size;
10791da177e4SLinus Torvalds 	struct mm_struct *mm = vma->vm_mm;
10801da177e4SLinus Torvalds 	int err;
10811da177e4SLinus Torvalds 
10821da177e4SLinus Torvalds 	BUG_ON(addr >= end);
10831da177e4SLinus Torvalds 	pgd = pgd_offset(mm, addr);
10841da177e4SLinus Torvalds 	flush_cache_range(vma, addr, end);
10851da177e4SLinus Torvalds 	do {
10861da177e4SLinus Torvalds 		next = pgd_addr_end(addr, end);
10871da177e4SLinus Torvalds 		err = zeromap_pud_range(mm, pgd, addr, next, prot);
10881da177e4SLinus Torvalds 		if (err)
10891da177e4SLinus Torvalds 			break;
10901da177e4SLinus Torvalds 	} while (pgd++, addr = next, addr != end);
10911da177e4SLinus Torvalds 	return err;
10921da177e4SLinus Torvalds }
10931da177e4SLinus Torvalds 
10941da177e4SLinus Torvalds /*
10951da177e4SLinus Torvalds  * maps a range of physical memory into the requested pages. the old
10961da177e4SLinus Torvalds  * mappings are removed. any references to nonexistent pages results
10971da177e4SLinus Torvalds  * in null mappings (currently treated as "copy-on-access")
10981da177e4SLinus Torvalds  */
10991da177e4SLinus Torvalds static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
11001da177e4SLinus Torvalds 			unsigned long addr, unsigned long end,
11011da177e4SLinus Torvalds 			unsigned long pfn, pgprot_t prot)
11021da177e4SLinus Torvalds {
11031da177e4SLinus Torvalds 	pte_t *pte;
1104c74df32cSHugh Dickins 	spinlock_t *ptl;
11051da177e4SLinus Torvalds 
1106c74df32cSHugh Dickins 	pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
11071da177e4SLinus Torvalds 	if (!pte)
11081da177e4SLinus Torvalds 		return -ENOMEM;
11091da177e4SLinus Torvalds 	do {
11101da177e4SLinus Torvalds 		BUG_ON(!pte_none(*pte));
11111da177e4SLinus Torvalds 		set_pte_at(mm, addr, pte, pfn_pte(pfn, prot));
11121da177e4SLinus Torvalds 		pfn++;
11131da177e4SLinus Torvalds 	} while (pte++, addr += PAGE_SIZE, addr != end);
1114c74df32cSHugh Dickins 	pte_unmap_unlock(pte - 1, ptl);
11151da177e4SLinus Torvalds 	return 0;
11161da177e4SLinus Torvalds }
11171da177e4SLinus Torvalds 
11181da177e4SLinus Torvalds static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
11191da177e4SLinus Torvalds 			unsigned long addr, unsigned long end,
11201da177e4SLinus Torvalds 			unsigned long pfn, pgprot_t prot)
11211da177e4SLinus Torvalds {
11221da177e4SLinus Torvalds 	pmd_t *pmd;
11231da177e4SLinus Torvalds 	unsigned long next;
11241da177e4SLinus Torvalds 
11251da177e4SLinus Torvalds 	pfn -= addr >> PAGE_SHIFT;
11261da177e4SLinus Torvalds 	pmd = pmd_alloc(mm, pud, addr);
11271da177e4SLinus Torvalds 	if (!pmd)
11281da177e4SLinus Torvalds 		return -ENOMEM;
11291da177e4SLinus Torvalds 	do {
11301da177e4SLinus Torvalds 		next = pmd_addr_end(addr, end);
11311da177e4SLinus Torvalds 		if (remap_pte_range(mm, pmd, addr, next,
11321da177e4SLinus Torvalds 				pfn + (addr >> PAGE_SHIFT), prot))
11331da177e4SLinus Torvalds 			return -ENOMEM;
11341da177e4SLinus Torvalds 	} while (pmd++, addr = next, addr != end);
11351da177e4SLinus Torvalds 	return 0;
11361da177e4SLinus Torvalds }
11371da177e4SLinus Torvalds 
11381da177e4SLinus Torvalds static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
11391da177e4SLinus Torvalds 			unsigned long addr, unsigned long end,
11401da177e4SLinus Torvalds 			unsigned long pfn, pgprot_t prot)
11411da177e4SLinus Torvalds {
11421da177e4SLinus Torvalds 	pud_t *pud;
11431da177e4SLinus Torvalds 	unsigned long next;
11441da177e4SLinus Torvalds 
11451da177e4SLinus Torvalds 	pfn -= addr >> PAGE_SHIFT;
11461da177e4SLinus Torvalds 	pud = pud_alloc(mm, pgd, addr);
11471da177e4SLinus Torvalds 	if (!pud)
11481da177e4SLinus Torvalds 		return -ENOMEM;
11491da177e4SLinus Torvalds 	do {
11501da177e4SLinus Torvalds 		next = pud_addr_end(addr, end);
11511da177e4SLinus Torvalds 		if (remap_pmd_range(mm, pud, addr, next,
11521da177e4SLinus Torvalds 				pfn + (addr >> PAGE_SHIFT), prot))
11531da177e4SLinus Torvalds 			return -ENOMEM;
11541da177e4SLinus Torvalds 	} while (pud++, addr = next, addr != end);
11551da177e4SLinus Torvalds 	return 0;
11561da177e4SLinus Torvalds }
11571da177e4SLinus Torvalds 
11581da177e4SLinus Torvalds /*  Note: this is only safe if the mm semaphore is held when called. */
11591da177e4SLinus Torvalds int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
11601da177e4SLinus Torvalds 		    unsigned long pfn, unsigned long size, pgprot_t prot)
11611da177e4SLinus Torvalds {
11621da177e4SLinus Torvalds 	pgd_t *pgd;
11631da177e4SLinus Torvalds 	unsigned long next;
11642d15cab8SHugh Dickins 	unsigned long end = addr + PAGE_ALIGN(size);
11651da177e4SLinus Torvalds 	struct mm_struct *mm = vma->vm_mm;
11661da177e4SLinus Torvalds 	int err;
11671da177e4SLinus Torvalds 
11681da177e4SLinus Torvalds 	/*
11691da177e4SLinus Torvalds 	 * Physically remapped pages are special. Tell the
11701da177e4SLinus Torvalds 	 * rest of the world about it:
11711da177e4SLinus Torvalds 	 *   VM_IO tells people not to look at these pages
11721da177e4SLinus Torvalds 	 *	(accesses can have side effects).
1173b5810039SNick Piggin 	 *   VM_RESERVED tells the core MM not to "manage" these pages
1174b5810039SNick Piggin          *	(e.g. refcount, mapcount, try to swap them out).
11751da177e4SLinus Torvalds 	 */
11761da177e4SLinus Torvalds 	vma->vm_flags |= VM_IO | VM_RESERVED;
11771da177e4SLinus Torvalds 
11781da177e4SLinus Torvalds 	BUG_ON(addr >= end);
11791da177e4SLinus Torvalds 	pfn -= addr >> PAGE_SHIFT;
11801da177e4SLinus Torvalds 	pgd = pgd_offset(mm, addr);
11811da177e4SLinus Torvalds 	flush_cache_range(vma, addr, end);
11821da177e4SLinus Torvalds 	do {
11831da177e4SLinus Torvalds 		next = pgd_addr_end(addr, end);
11841da177e4SLinus Torvalds 		err = remap_pud_range(mm, pgd, addr, next,
11851da177e4SLinus Torvalds 				pfn + (addr >> PAGE_SHIFT), prot);
11861da177e4SLinus Torvalds 		if (err)
11871da177e4SLinus Torvalds 			break;
11881da177e4SLinus Torvalds 	} while (pgd++, addr = next, addr != end);
11891da177e4SLinus Torvalds 	return err;
11901da177e4SLinus Torvalds }
11911da177e4SLinus Torvalds EXPORT_SYMBOL(remap_pfn_range);
11921da177e4SLinus Torvalds 
11931da177e4SLinus Torvalds /*
11948f4e2101SHugh Dickins  * handle_pte_fault chooses page fault handler according to an entry
11958f4e2101SHugh Dickins  * which was read non-atomically.  Before making any commitment, on
11968f4e2101SHugh Dickins  * those architectures or configurations (e.g. i386 with PAE) which
11978f4e2101SHugh Dickins  * might give a mix of unmatched parts, do_swap_page and do_file_page
11988f4e2101SHugh Dickins  * must check under lock before unmapping the pte and proceeding
11998f4e2101SHugh Dickins  * (but do_wp_page is only called after already making such a check;
12008f4e2101SHugh Dickins  * and do_anonymous_page and do_no_page can safely check later on).
12018f4e2101SHugh Dickins  */
12024c21e2f2SHugh Dickins static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
12038f4e2101SHugh Dickins 				pte_t *page_table, pte_t orig_pte)
12048f4e2101SHugh Dickins {
12058f4e2101SHugh Dickins 	int same = 1;
12068f4e2101SHugh Dickins #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
12078f4e2101SHugh Dickins 	if (sizeof(pte_t) > sizeof(unsigned long)) {
12084c21e2f2SHugh Dickins 		spinlock_t *ptl = pte_lockptr(mm, pmd);
12094c21e2f2SHugh Dickins 		spin_lock(ptl);
12108f4e2101SHugh Dickins 		same = pte_same(*page_table, orig_pte);
12114c21e2f2SHugh Dickins 		spin_unlock(ptl);
12128f4e2101SHugh Dickins 	}
12138f4e2101SHugh Dickins #endif
12148f4e2101SHugh Dickins 	pte_unmap(page_table);
12158f4e2101SHugh Dickins 	return same;
12168f4e2101SHugh Dickins }
12178f4e2101SHugh Dickins 
12188f4e2101SHugh Dickins /*
12191da177e4SLinus Torvalds  * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
12201da177e4SLinus Torvalds  * servicing faults for write access.  In the normal case, do always want
12211da177e4SLinus Torvalds  * pte_mkwrite.  But get_user_pages can cause write faults for mappings
12221da177e4SLinus Torvalds  * that do not have writing enabled, when used by access_process_vm.
12231da177e4SLinus Torvalds  */
12241da177e4SLinus Torvalds static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
12251da177e4SLinus Torvalds {
12261da177e4SLinus Torvalds 	if (likely(vma->vm_flags & VM_WRITE))
12271da177e4SLinus Torvalds 		pte = pte_mkwrite(pte);
12281da177e4SLinus Torvalds 	return pte;
12291da177e4SLinus Torvalds }
12301da177e4SLinus Torvalds 
12311da177e4SLinus Torvalds /*
12321da177e4SLinus Torvalds  * This routine handles present pages, when users try to write
12331da177e4SLinus Torvalds  * to a shared page. It is done by copying the page to a new address
12341da177e4SLinus Torvalds  * and decrementing the shared-page counter for the old page.
12351da177e4SLinus Torvalds  *
12361da177e4SLinus Torvalds  * Note that this routine assumes that the protection checks have been
12371da177e4SLinus Torvalds  * done by the caller (the low-level page fault routine in most cases).
12381da177e4SLinus Torvalds  * Thus we can safely just mark it writable once we've done any necessary
12391da177e4SLinus Torvalds  * COW.
12401da177e4SLinus Torvalds  *
12411da177e4SLinus Torvalds  * We also mark the page dirty at this point even though the page will
12421da177e4SLinus Torvalds  * change only once the write actually happens. This avoids a few races,
12431da177e4SLinus Torvalds  * and potentially makes it more efficient.
12441da177e4SLinus Torvalds  *
12458f4e2101SHugh Dickins  * We enter with non-exclusive mmap_sem (to exclude vma changes,
12468f4e2101SHugh Dickins  * but allow concurrent faults), with pte both mapped and locked.
12478f4e2101SHugh Dickins  * We return with mmap_sem still held, but pte unmapped and unlocked.
12481da177e4SLinus Torvalds  */
12491da177e4SLinus Torvalds static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
125065500d23SHugh Dickins 		unsigned long address, pte_t *page_table, pmd_t *pmd,
12518f4e2101SHugh Dickins 		spinlock_t *ptl, pte_t orig_pte)
12521da177e4SLinus Torvalds {
12531da177e4SLinus Torvalds 	struct page *old_page, *new_page;
125465500d23SHugh Dickins 	unsigned long pfn = pte_pfn(orig_pte);
12551da177e4SLinus Torvalds 	pte_t entry;
125665500d23SHugh Dickins 	int ret = VM_FAULT_MINOR;
12571da177e4SLinus Torvalds 
1258b5810039SNick Piggin 	BUG_ON(vma->vm_flags & VM_RESERVED);
1259b5810039SNick Piggin 
12601da177e4SLinus Torvalds 	if (unlikely(!pfn_valid(pfn))) {
12611da177e4SLinus Torvalds 		/*
126265500d23SHugh Dickins 		 * Page table corrupted: show pte and kill process.
12631da177e4SLinus Torvalds 		 */
1264b5810039SNick Piggin 		print_bad_pte(vma, orig_pte, address);
126565500d23SHugh Dickins 		ret = VM_FAULT_OOM;
126665500d23SHugh Dickins 		goto unlock;
12671da177e4SLinus Torvalds 	}
12681da177e4SLinus Torvalds 	old_page = pfn_to_page(pfn);
12691da177e4SLinus Torvalds 
1270d296e9cdSHugh Dickins 	if (PageAnon(old_page) && !TestSetPageLocked(old_page)) {
12711da177e4SLinus Torvalds 		int reuse = can_share_swap_page(old_page);
12721da177e4SLinus Torvalds 		unlock_page(old_page);
12731da177e4SLinus Torvalds 		if (reuse) {
12741da177e4SLinus Torvalds 			flush_cache_page(vma, address, pfn);
127565500d23SHugh Dickins 			entry = pte_mkyoung(orig_pte);
127665500d23SHugh Dickins 			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
12771da177e4SLinus Torvalds 			ptep_set_access_flags(vma, address, page_table, entry, 1);
12781da177e4SLinus Torvalds 			update_mmu_cache(vma, address, entry);
12791da177e4SLinus Torvalds 			lazy_mmu_prot_update(entry);
128065500d23SHugh Dickins 			ret |= VM_FAULT_WRITE;
128165500d23SHugh Dickins 			goto unlock;
12821da177e4SLinus Torvalds 		}
12831da177e4SLinus Torvalds 	}
12841da177e4SLinus Torvalds 
12851da177e4SLinus Torvalds 	/*
12861da177e4SLinus Torvalds 	 * Ok, we need to copy. Oh, well..
12871da177e4SLinus Torvalds 	 */
12881da177e4SLinus Torvalds 	page_cache_get(old_page);
12898f4e2101SHugh Dickins 	pte_unmap_unlock(page_table, ptl);
12901da177e4SLinus Torvalds 
12911da177e4SLinus Torvalds 	if (unlikely(anon_vma_prepare(vma)))
129265500d23SHugh Dickins 		goto oom;
12931da177e4SLinus Torvalds 	if (old_page == ZERO_PAGE(address)) {
12941da177e4SLinus Torvalds 		new_page = alloc_zeroed_user_highpage(vma, address);
12951da177e4SLinus Torvalds 		if (!new_page)
129665500d23SHugh Dickins 			goto oom;
12971da177e4SLinus Torvalds 	} else {
12981da177e4SLinus Torvalds 		new_page = alloc_page_vma(GFP_HIGHUSER, vma, address);
12991da177e4SLinus Torvalds 		if (!new_page)
130065500d23SHugh Dickins 			goto oom;
13011da177e4SLinus Torvalds 		copy_user_highpage(new_page, old_page, address);
13021da177e4SLinus Torvalds 	}
130365500d23SHugh Dickins 
13041da177e4SLinus Torvalds 	/*
13051da177e4SLinus Torvalds 	 * Re-check the pte - we dropped the lock
13061da177e4SLinus Torvalds 	 */
13078f4e2101SHugh Dickins 	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
130865500d23SHugh Dickins 	if (likely(pte_same(*page_table, orig_pte))) {
13091da177e4SLinus Torvalds 		page_remove_rmap(old_page);
13104294621fSHugh Dickins 		if (!PageAnon(old_page)) {
13114294621fSHugh Dickins 			inc_mm_counter(mm, anon_rss);
13124294621fSHugh Dickins 			dec_mm_counter(mm, file_rss);
13134294621fSHugh Dickins 		}
13141da177e4SLinus Torvalds 		flush_cache_page(vma, address, pfn);
131565500d23SHugh Dickins 		entry = mk_pte(new_page, vma->vm_page_prot);
131665500d23SHugh Dickins 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
131765500d23SHugh Dickins 		ptep_establish(vma, address, page_table, entry);
131865500d23SHugh Dickins 		update_mmu_cache(vma, address, entry);
131965500d23SHugh Dickins 		lazy_mmu_prot_update(entry);
13201da177e4SLinus Torvalds 		lru_cache_add_active(new_page);
13211da177e4SLinus Torvalds 		page_add_anon_rmap(new_page, vma, address);
13221da177e4SLinus Torvalds 
13231da177e4SLinus Torvalds 		/* Free the old page.. */
13241da177e4SLinus Torvalds 		new_page = old_page;
1325f33ea7f4SNick Piggin 		ret |= VM_FAULT_WRITE;
13261da177e4SLinus Torvalds 	}
13271da177e4SLinus Torvalds 	page_cache_release(new_page);
13281da177e4SLinus Torvalds 	page_cache_release(old_page);
132965500d23SHugh Dickins unlock:
13308f4e2101SHugh Dickins 	pte_unmap_unlock(page_table, ptl);
1331f33ea7f4SNick Piggin 	return ret;
133265500d23SHugh Dickins oom:
13331da177e4SLinus Torvalds 	page_cache_release(old_page);
13341da177e4SLinus Torvalds 	return VM_FAULT_OOM;
13351da177e4SLinus Torvalds }
13361da177e4SLinus Torvalds 
13371da177e4SLinus Torvalds /*
13381da177e4SLinus Torvalds  * Helper functions for unmap_mapping_range().
13391da177e4SLinus Torvalds  *
13401da177e4SLinus Torvalds  * __ Notes on dropping i_mmap_lock to reduce latency while unmapping __
13411da177e4SLinus Torvalds  *
13421da177e4SLinus Torvalds  * We have to restart searching the prio_tree whenever we drop the lock,
13431da177e4SLinus Torvalds  * since the iterator is only valid while the lock is held, and anyway
13441da177e4SLinus Torvalds  * a later vma might be split and reinserted earlier while lock dropped.
13451da177e4SLinus Torvalds  *
13461da177e4SLinus Torvalds  * The list of nonlinear vmas could be handled more efficiently, using
13471da177e4SLinus Torvalds  * a placeholder, but handle it in the same way until a need is shown.
13481da177e4SLinus Torvalds  * It is important to search the prio_tree before nonlinear list: a vma
13491da177e4SLinus Torvalds  * may become nonlinear and be shifted from prio_tree to nonlinear list
13501da177e4SLinus Torvalds  * while the lock is dropped; but never shifted from list to prio_tree.
13511da177e4SLinus Torvalds  *
13521da177e4SLinus Torvalds  * In order to make forward progress despite restarting the search,
13531da177e4SLinus Torvalds  * vm_truncate_count is used to mark a vma as now dealt with, so we can
13541da177e4SLinus Torvalds  * quickly skip it next time around.  Since the prio_tree search only
13551da177e4SLinus Torvalds  * shows us those vmas affected by unmapping the range in question, we
13561da177e4SLinus Torvalds  * can't efficiently keep all vmas in step with mapping->truncate_count:
13571da177e4SLinus Torvalds  * so instead reset them all whenever it wraps back to 0 (then go to 1).
13581da177e4SLinus Torvalds  * mapping->truncate_count and vma->vm_truncate_count are protected by
13591da177e4SLinus Torvalds  * i_mmap_lock.
13601da177e4SLinus Torvalds  *
13611da177e4SLinus Torvalds  * In order to make forward progress despite repeatedly restarting some
1362ee39b37bSHugh Dickins  * large vma, note the restart_addr from unmap_vmas when it breaks out:
13631da177e4SLinus Torvalds  * and restart from that address when we reach that vma again.  It might
13641da177e4SLinus Torvalds  * have been split or merged, shrunk or extended, but never shifted: so
13651da177e4SLinus Torvalds  * restart_addr remains valid so long as it remains in the vma's range.
13661da177e4SLinus Torvalds  * unmap_mapping_range forces truncate_count to leap over page-aligned
13671da177e4SLinus Torvalds  * values so we can save vma's restart_addr in its truncate_count field.
13681da177e4SLinus Torvalds  */
13691da177e4SLinus Torvalds #define is_restart_addr(truncate_count) (!((truncate_count) & ~PAGE_MASK))
13701da177e4SLinus Torvalds 
13711da177e4SLinus Torvalds static void reset_vma_truncate_counts(struct address_space *mapping)
13721da177e4SLinus Torvalds {
13731da177e4SLinus Torvalds 	struct vm_area_struct *vma;
13741da177e4SLinus Torvalds 	struct prio_tree_iter iter;
13751da177e4SLinus Torvalds 
13761da177e4SLinus Torvalds 	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, 0, ULONG_MAX)
13771da177e4SLinus Torvalds 		vma->vm_truncate_count = 0;
13781da177e4SLinus Torvalds 	list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
13791da177e4SLinus Torvalds 		vma->vm_truncate_count = 0;
13801da177e4SLinus Torvalds }
13811da177e4SLinus Torvalds 
13821da177e4SLinus Torvalds static int unmap_mapping_range_vma(struct vm_area_struct *vma,
13831da177e4SLinus Torvalds 		unsigned long start_addr, unsigned long end_addr,
13841da177e4SLinus Torvalds 		struct zap_details *details)
13851da177e4SLinus Torvalds {
13861da177e4SLinus Torvalds 	unsigned long restart_addr;
13871da177e4SLinus Torvalds 	int need_break;
13881da177e4SLinus Torvalds 
13891da177e4SLinus Torvalds again:
13901da177e4SLinus Torvalds 	restart_addr = vma->vm_truncate_count;
13911da177e4SLinus Torvalds 	if (is_restart_addr(restart_addr) && start_addr < restart_addr) {
13921da177e4SLinus Torvalds 		start_addr = restart_addr;
13931da177e4SLinus Torvalds 		if (start_addr >= end_addr) {
13941da177e4SLinus Torvalds 			/* Top of vma has been split off since last time */
13951da177e4SLinus Torvalds 			vma->vm_truncate_count = details->truncate_count;
13961da177e4SLinus Torvalds 			return 0;
13971da177e4SLinus Torvalds 		}
13981da177e4SLinus Torvalds 	}
13991da177e4SLinus Torvalds 
1400ee39b37bSHugh Dickins 	restart_addr = zap_page_range(vma, start_addr,
1401ee39b37bSHugh Dickins 					end_addr - start_addr, details);
14021da177e4SLinus Torvalds 	need_break = need_resched() ||
14031da177e4SLinus Torvalds 			need_lockbreak(details->i_mmap_lock);
14041da177e4SLinus Torvalds 
1405ee39b37bSHugh Dickins 	if (restart_addr >= end_addr) {
14061da177e4SLinus Torvalds 		/* We have now completed this vma: mark it so */
14071da177e4SLinus Torvalds 		vma->vm_truncate_count = details->truncate_count;
14081da177e4SLinus Torvalds 		if (!need_break)
14091da177e4SLinus Torvalds 			return 0;
14101da177e4SLinus Torvalds 	} else {
14111da177e4SLinus Torvalds 		/* Note restart_addr in vma's truncate_count field */
1412ee39b37bSHugh Dickins 		vma->vm_truncate_count = restart_addr;
14131da177e4SLinus Torvalds 		if (!need_break)
14141da177e4SLinus Torvalds 			goto again;
14151da177e4SLinus Torvalds 	}
14161da177e4SLinus Torvalds 
14171da177e4SLinus Torvalds 	spin_unlock(details->i_mmap_lock);
14181da177e4SLinus Torvalds 	cond_resched();
14191da177e4SLinus Torvalds 	spin_lock(details->i_mmap_lock);
14201da177e4SLinus Torvalds 	return -EINTR;
14211da177e4SLinus Torvalds }
14221da177e4SLinus Torvalds 
14231da177e4SLinus Torvalds static inline void unmap_mapping_range_tree(struct prio_tree_root *root,
14241da177e4SLinus Torvalds 					    struct zap_details *details)
14251da177e4SLinus Torvalds {
14261da177e4SLinus Torvalds 	struct vm_area_struct *vma;
14271da177e4SLinus Torvalds 	struct prio_tree_iter iter;
14281da177e4SLinus Torvalds 	pgoff_t vba, vea, zba, zea;
14291da177e4SLinus Torvalds 
14301da177e4SLinus Torvalds restart:
14311da177e4SLinus Torvalds 	vma_prio_tree_foreach(vma, &iter, root,
14321da177e4SLinus Torvalds 			details->first_index, details->last_index) {
14331da177e4SLinus Torvalds 		/* Skip quickly over those we have already dealt with */
14341da177e4SLinus Torvalds 		if (vma->vm_truncate_count == details->truncate_count)
14351da177e4SLinus Torvalds 			continue;
14361da177e4SLinus Torvalds 
14371da177e4SLinus Torvalds 		vba = vma->vm_pgoff;
14381da177e4SLinus Torvalds 		vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1;
14391da177e4SLinus Torvalds 		/* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */
14401da177e4SLinus Torvalds 		zba = details->first_index;
14411da177e4SLinus Torvalds 		if (zba < vba)
14421da177e4SLinus Torvalds 			zba = vba;
14431da177e4SLinus Torvalds 		zea = details->last_index;
14441da177e4SLinus Torvalds 		if (zea > vea)
14451da177e4SLinus Torvalds 			zea = vea;
14461da177e4SLinus Torvalds 
14471da177e4SLinus Torvalds 		if (unmap_mapping_range_vma(vma,
14481da177e4SLinus Torvalds 			((zba - vba) << PAGE_SHIFT) + vma->vm_start,
14491da177e4SLinus Torvalds 			((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
14501da177e4SLinus Torvalds 				details) < 0)
14511da177e4SLinus Torvalds 			goto restart;
14521da177e4SLinus Torvalds 	}
14531da177e4SLinus Torvalds }
14541da177e4SLinus Torvalds 
14551da177e4SLinus Torvalds static inline void unmap_mapping_range_list(struct list_head *head,
14561da177e4SLinus Torvalds 					    struct zap_details *details)
14571da177e4SLinus Torvalds {
14581da177e4SLinus Torvalds 	struct vm_area_struct *vma;
14591da177e4SLinus Torvalds 
14601da177e4SLinus Torvalds 	/*
14611da177e4SLinus Torvalds 	 * In nonlinear VMAs there is no correspondence between virtual address
14621da177e4SLinus Torvalds 	 * offset and file offset.  So we must perform an exhaustive search
14631da177e4SLinus Torvalds 	 * across *all* the pages in each nonlinear VMA, not just the pages
14641da177e4SLinus Torvalds 	 * whose virtual address lies outside the file truncation point.
14651da177e4SLinus Torvalds 	 */
14661da177e4SLinus Torvalds restart:
14671da177e4SLinus Torvalds 	list_for_each_entry(vma, head, shared.vm_set.list) {
14681da177e4SLinus Torvalds 		/* Skip quickly over those we have already dealt with */
14691da177e4SLinus Torvalds 		if (vma->vm_truncate_count == details->truncate_count)
14701da177e4SLinus Torvalds 			continue;
14711da177e4SLinus Torvalds 		details->nonlinear_vma = vma;
14721da177e4SLinus Torvalds 		if (unmap_mapping_range_vma(vma, vma->vm_start,
14731da177e4SLinus Torvalds 					vma->vm_end, details) < 0)
14741da177e4SLinus Torvalds 			goto restart;
14751da177e4SLinus Torvalds 	}
14761da177e4SLinus Torvalds }
14771da177e4SLinus Torvalds 
14781da177e4SLinus Torvalds /**
14791da177e4SLinus Torvalds  * unmap_mapping_range - unmap the portion of all mmaps
14801da177e4SLinus Torvalds  * in the specified address_space corresponding to the specified
14811da177e4SLinus Torvalds  * page range in the underlying file.
14823d41088fSMartin Waitz  * @mapping: the address space containing mmaps to be unmapped.
14831da177e4SLinus Torvalds  * @holebegin: byte in first page to unmap, relative to the start of
14841da177e4SLinus Torvalds  * the underlying file.  This will be rounded down to a PAGE_SIZE
14851da177e4SLinus Torvalds  * boundary.  Note that this is different from vmtruncate(), which
14861da177e4SLinus Torvalds  * must keep the partial page.  In contrast, we must get rid of
14871da177e4SLinus Torvalds  * partial pages.
14881da177e4SLinus Torvalds  * @holelen: size of prospective hole in bytes.  This will be rounded
14891da177e4SLinus Torvalds  * up to a PAGE_SIZE boundary.  A holelen of zero truncates to the
14901da177e4SLinus Torvalds  * end of the file.
14911da177e4SLinus Torvalds  * @even_cows: 1 when truncating a file, unmap even private COWed pages;
14921da177e4SLinus Torvalds  * but 0 when invalidating pagecache, don't throw away private data.
14931da177e4SLinus Torvalds  */
14941da177e4SLinus Torvalds void unmap_mapping_range(struct address_space *mapping,
14951da177e4SLinus Torvalds 		loff_t const holebegin, loff_t const holelen, int even_cows)
14961da177e4SLinus Torvalds {
14971da177e4SLinus Torvalds 	struct zap_details details;
14981da177e4SLinus Torvalds 	pgoff_t hba = holebegin >> PAGE_SHIFT;
14991da177e4SLinus Torvalds 	pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
15001da177e4SLinus Torvalds 
15011da177e4SLinus Torvalds 	/* Check for overflow. */
15021da177e4SLinus Torvalds 	if (sizeof(holelen) > sizeof(hlen)) {
15031da177e4SLinus Torvalds 		long long holeend =
15041da177e4SLinus Torvalds 			(holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
15051da177e4SLinus Torvalds 		if (holeend & ~(long long)ULONG_MAX)
15061da177e4SLinus Torvalds 			hlen = ULONG_MAX - hba + 1;
15071da177e4SLinus Torvalds 	}
15081da177e4SLinus Torvalds 
15091da177e4SLinus Torvalds 	details.check_mapping = even_cows? NULL: mapping;
15101da177e4SLinus Torvalds 	details.nonlinear_vma = NULL;
15111da177e4SLinus Torvalds 	details.first_index = hba;
15121da177e4SLinus Torvalds 	details.last_index = hba + hlen - 1;
15131da177e4SLinus Torvalds 	if (details.last_index < details.first_index)
15141da177e4SLinus Torvalds 		details.last_index = ULONG_MAX;
15151da177e4SLinus Torvalds 	details.i_mmap_lock = &mapping->i_mmap_lock;
15161da177e4SLinus Torvalds 
15171da177e4SLinus Torvalds 	spin_lock(&mapping->i_mmap_lock);
15181da177e4SLinus Torvalds 
15191da177e4SLinus Torvalds 	/* serialize i_size write against truncate_count write */
15201da177e4SLinus Torvalds 	smp_wmb();
15211da177e4SLinus Torvalds 	/* Protect against page faults, and endless unmapping loops */
15221da177e4SLinus Torvalds 	mapping->truncate_count++;
15231da177e4SLinus Torvalds 	/*
15241da177e4SLinus Torvalds 	 * For archs where spin_lock has inclusive semantics like ia64
15251da177e4SLinus Torvalds 	 * this smp_mb() will prevent to read pagetable contents
15261da177e4SLinus Torvalds 	 * before the truncate_count increment is visible to
15271da177e4SLinus Torvalds 	 * other cpus.
15281da177e4SLinus Torvalds 	 */
15291da177e4SLinus Torvalds 	smp_mb();
15301da177e4SLinus Torvalds 	if (unlikely(is_restart_addr(mapping->truncate_count))) {
15311da177e4SLinus Torvalds 		if (mapping->truncate_count == 0)
15321da177e4SLinus Torvalds 			reset_vma_truncate_counts(mapping);
15331da177e4SLinus Torvalds 		mapping->truncate_count++;
15341da177e4SLinus Torvalds 	}
15351da177e4SLinus Torvalds 	details.truncate_count = mapping->truncate_count;
15361da177e4SLinus Torvalds 
15371da177e4SLinus Torvalds 	if (unlikely(!prio_tree_empty(&mapping->i_mmap)))
15381da177e4SLinus Torvalds 		unmap_mapping_range_tree(&mapping->i_mmap, &details);
15391da177e4SLinus Torvalds 	if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
15401da177e4SLinus Torvalds 		unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
15411da177e4SLinus Torvalds 	spin_unlock(&mapping->i_mmap_lock);
15421da177e4SLinus Torvalds }
15431da177e4SLinus Torvalds EXPORT_SYMBOL(unmap_mapping_range);
15441da177e4SLinus Torvalds 
15451da177e4SLinus Torvalds /*
15461da177e4SLinus Torvalds  * Handle all mappings that got truncated by a "truncate()"
15471da177e4SLinus Torvalds  * system call.
15481da177e4SLinus Torvalds  *
15491da177e4SLinus Torvalds  * NOTE! We have to be ready to update the memory sharing
15501da177e4SLinus Torvalds  * between the file and the memory map for a potential last
15511da177e4SLinus Torvalds  * incomplete page.  Ugly, but necessary.
15521da177e4SLinus Torvalds  */
15531da177e4SLinus Torvalds int vmtruncate(struct inode * inode, loff_t offset)
15541da177e4SLinus Torvalds {
15551da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
15561da177e4SLinus Torvalds 	unsigned long limit;
15571da177e4SLinus Torvalds 
15581da177e4SLinus Torvalds 	if (inode->i_size < offset)
15591da177e4SLinus Torvalds 		goto do_expand;
15601da177e4SLinus Torvalds 	/*
15611da177e4SLinus Torvalds 	 * truncation of in-use swapfiles is disallowed - it would cause
15621da177e4SLinus Torvalds 	 * subsequent swapout to scribble on the now-freed blocks.
15631da177e4SLinus Torvalds 	 */
15641da177e4SLinus Torvalds 	if (IS_SWAPFILE(inode))
15651da177e4SLinus Torvalds 		goto out_busy;
15661da177e4SLinus Torvalds 	i_size_write(inode, offset);
15671da177e4SLinus Torvalds 	unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
15681da177e4SLinus Torvalds 	truncate_inode_pages(mapping, offset);
15691da177e4SLinus Torvalds 	goto out_truncate;
15701da177e4SLinus Torvalds 
15711da177e4SLinus Torvalds do_expand:
15721da177e4SLinus Torvalds 	limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
15731da177e4SLinus Torvalds 	if (limit != RLIM_INFINITY && offset > limit)
15741da177e4SLinus Torvalds 		goto out_sig;
15751da177e4SLinus Torvalds 	if (offset > inode->i_sb->s_maxbytes)
15761da177e4SLinus Torvalds 		goto out_big;
15771da177e4SLinus Torvalds 	i_size_write(inode, offset);
15781da177e4SLinus Torvalds 
15791da177e4SLinus Torvalds out_truncate:
15801da177e4SLinus Torvalds 	if (inode->i_op && inode->i_op->truncate)
15811da177e4SLinus Torvalds 		inode->i_op->truncate(inode);
15821da177e4SLinus Torvalds 	return 0;
15831da177e4SLinus Torvalds out_sig:
15841da177e4SLinus Torvalds 	send_sig(SIGXFSZ, current, 0);
15851da177e4SLinus Torvalds out_big:
15861da177e4SLinus Torvalds 	return -EFBIG;
15871da177e4SLinus Torvalds out_busy:
15881da177e4SLinus Torvalds 	return -ETXTBSY;
15891da177e4SLinus Torvalds }
15901da177e4SLinus Torvalds 
15911da177e4SLinus Torvalds EXPORT_SYMBOL(vmtruncate);
15921da177e4SLinus Torvalds 
15931da177e4SLinus Torvalds /*
15941da177e4SLinus Torvalds  * Primitive swap readahead code. We simply read an aligned block of
15951da177e4SLinus Torvalds  * (1 << page_cluster) entries in the swap area. This method is chosen
15961da177e4SLinus Torvalds  * because it doesn't cost us any seek time.  We also make sure to queue
15971da177e4SLinus Torvalds  * the 'original' request together with the readahead ones...
15981da177e4SLinus Torvalds  *
15991da177e4SLinus Torvalds  * This has been extended to use the NUMA policies from the mm triggering
16001da177e4SLinus Torvalds  * the readahead.
16011da177e4SLinus Torvalds  *
16021da177e4SLinus Torvalds  * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
16031da177e4SLinus Torvalds  */
16041da177e4SLinus Torvalds void swapin_readahead(swp_entry_t entry, unsigned long addr,struct vm_area_struct *vma)
16051da177e4SLinus Torvalds {
16061da177e4SLinus Torvalds #ifdef CONFIG_NUMA
16071da177e4SLinus Torvalds 	struct vm_area_struct *next_vma = vma ? vma->vm_next : NULL;
16081da177e4SLinus Torvalds #endif
16091da177e4SLinus Torvalds 	int i, num;
16101da177e4SLinus Torvalds 	struct page *new_page;
16111da177e4SLinus Torvalds 	unsigned long offset;
16121da177e4SLinus Torvalds 
16131da177e4SLinus Torvalds 	/*
16141da177e4SLinus Torvalds 	 * Get the number of handles we should do readahead io to.
16151da177e4SLinus Torvalds 	 */
16161da177e4SLinus Torvalds 	num = valid_swaphandles(entry, &offset);
16171da177e4SLinus Torvalds 	for (i = 0; i < num; offset++, i++) {
16181da177e4SLinus Torvalds 		/* Ok, do the async read-ahead now */
16191da177e4SLinus Torvalds 		new_page = read_swap_cache_async(swp_entry(swp_type(entry),
16201da177e4SLinus Torvalds 							   offset), vma, addr);
16211da177e4SLinus Torvalds 		if (!new_page)
16221da177e4SLinus Torvalds 			break;
16231da177e4SLinus Torvalds 		page_cache_release(new_page);
16241da177e4SLinus Torvalds #ifdef CONFIG_NUMA
16251da177e4SLinus Torvalds 		/*
16261da177e4SLinus Torvalds 		 * Find the next applicable VMA for the NUMA policy.
16271da177e4SLinus Torvalds 		 */
16281da177e4SLinus Torvalds 		addr += PAGE_SIZE;
16291da177e4SLinus Torvalds 		if (addr == 0)
16301da177e4SLinus Torvalds 			vma = NULL;
16311da177e4SLinus Torvalds 		if (vma) {
16321da177e4SLinus Torvalds 			if (addr >= vma->vm_end) {
16331da177e4SLinus Torvalds 				vma = next_vma;
16341da177e4SLinus Torvalds 				next_vma = vma ? vma->vm_next : NULL;
16351da177e4SLinus Torvalds 			}
16361da177e4SLinus Torvalds 			if (vma && addr < vma->vm_start)
16371da177e4SLinus Torvalds 				vma = NULL;
16381da177e4SLinus Torvalds 		} else {
16391da177e4SLinus Torvalds 			if (next_vma && addr >= next_vma->vm_start) {
16401da177e4SLinus Torvalds 				vma = next_vma;
16411da177e4SLinus Torvalds 				next_vma = vma->vm_next;
16421da177e4SLinus Torvalds 			}
16431da177e4SLinus Torvalds 		}
16441da177e4SLinus Torvalds #endif
16451da177e4SLinus Torvalds 	}
16461da177e4SLinus Torvalds 	lru_add_drain();	/* Push any new pages onto the LRU now */
16471da177e4SLinus Torvalds }
16481da177e4SLinus Torvalds 
16491da177e4SLinus Torvalds /*
16508f4e2101SHugh Dickins  * We enter with non-exclusive mmap_sem (to exclude vma changes,
16518f4e2101SHugh Dickins  * but allow concurrent faults), and pte mapped but not yet locked.
16528f4e2101SHugh Dickins  * We return with mmap_sem still held, but pte unmapped and unlocked.
16531da177e4SLinus Torvalds  */
165465500d23SHugh Dickins static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
165565500d23SHugh Dickins 		unsigned long address, pte_t *page_table, pmd_t *pmd,
165665500d23SHugh Dickins 		int write_access, pte_t orig_pte)
16571da177e4SLinus Torvalds {
16588f4e2101SHugh Dickins 	spinlock_t *ptl;
16591da177e4SLinus Torvalds 	struct page *page;
166065500d23SHugh Dickins 	swp_entry_t entry;
16611da177e4SLinus Torvalds 	pte_t pte;
16621da177e4SLinus Torvalds 	int ret = VM_FAULT_MINOR;
16631da177e4SLinus Torvalds 
16644c21e2f2SHugh Dickins 	if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
16658f4e2101SHugh Dickins 		goto out;
166665500d23SHugh Dickins 
166765500d23SHugh Dickins 	entry = pte_to_swp_entry(orig_pte);
16681da177e4SLinus Torvalds 	page = lookup_swap_cache(entry);
16691da177e4SLinus Torvalds 	if (!page) {
16701da177e4SLinus Torvalds  		swapin_readahead(entry, address, vma);
16711da177e4SLinus Torvalds  		page = read_swap_cache_async(entry, vma, address);
16721da177e4SLinus Torvalds 		if (!page) {
16731da177e4SLinus Torvalds 			/*
16748f4e2101SHugh Dickins 			 * Back out if somebody else faulted in this pte
16758f4e2101SHugh Dickins 			 * while we released the pte lock.
16761da177e4SLinus Torvalds 			 */
16778f4e2101SHugh Dickins 			page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
16781da177e4SLinus Torvalds 			if (likely(pte_same(*page_table, orig_pte)))
16791da177e4SLinus Torvalds 				ret = VM_FAULT_OOM;
168065500d23SHugh Dickins 			goto unlock;
16811da177e4SLinus Torvalds 		}
16821da177e4SLinus Torvalds 
16831da177e4SLinus Torvalds 		/* Had to read the page from swap area: Major fault */
16841da177e4SLinus Torvalds 		ret = VM_FAULT_MAJOR;
16851da177e4SLinus Torvalds 		inc_page_state(pgmajfault);
16861da177e4SLinus Torvalds 		grab_swap_token();
16871da177e4SLinus Torvalds 	}
16881da177e4SLinus Torvalds 
16891da177e4SLinus Torvalds 	mark_page_accessed(page);
16901da177e4SLinus Torvalds 	lock_page(page);
16911da177e4SLinus Torvalds 
16921da177e4SLinus Torvalds 	/*
16938f4e2101SHugh Dickins 	 * Back out if somebody else already faulted in this pte.
16941da177e4SLinus Torvalds 	 */
16958f4e2101SHugh Dickins 	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
16969e9bef07SHugh Dickins 	if (unlikely(!pte_same(*page_table, orig_pte)))
1697b8107480SKirill Korotaev 		goto out_nomap;
1698b8107480SKirill Korotaev 
1699b8107480SKirill Korotaev 	if (unlikely(!PageUptodate(page))) {
1700b8107480SKirill Korotaev 		ret = VM_FAULT_SIGBUS;
1701b8107480SKirill Korotaev 		goto out_nomap;
17021da177e4SLinus Torvalds 	}
17031da177e4SLinus Torvalds 
17041da177e4SLinus Torvalds 	/* The page isn't present yet, go ahead with the fault. */
17051da177e4SLinus Torvalds 
17064294621fSHugh Dickins 	inc_mm_counter(mm, anon_rss);
17071da177e4SLinus Torvalds 	pte = mk_pte(page, vma->vm_page_prot);
17081da177e4SLinus Torvalds 	if (write_access && can_share_swap_page(page)) {
17091da177e4SLinus Torvalds 		pte = maybe_mkwrite(pte_mkdirty(pte), vma);
17101da177e4SLinus Torvalds 		write_access = 0;
17111da177e4SLinus Torvalds 	}
17121da177e4SLinus Torvalds 
17131da177e4SLinus Torvalds 	flush_icache_page(vma, page);
17141da177e4SLinus Torvalds 	set_pte_at(mm, address, page_table, pte);
17151da177e4SLinus Torvalds 	page_add_anon_rmap(page, vma, address);
17161da177e4SLinus Torvalds 
1717c475a8abSHugh Dickins 	swap_free(entry);
1718c475a8abSHugh Dickins 	if (vm_swap_full())
1719c475a8abSHugh Dickins 		remove_exclusive_swap_page(page);
1720c475a8abSHugh Dickins 	unlock_page(page);
1721c475a8abSHugh Dickins 
17221da177e4SLinus Torvalds 	if (write_access) {
17231da177e4SLinus Torvalds 		if (do_wp_page(mm, vma, address,
17248f4e2101SHugh Dickins 				page_table, pmd, ptl, pte) == VM_FAULT_OOM)
17251da177e4SLinus Torvalds 			ret = VM_FAULT_OOM;
17261da177e4SLinus Torvalds 		goto out;
17271da177e4SLinus Torvalds 	}
17281da177e4SLinus Torvalds 
17291da177e4SLinus Torvalds 	/* No need to invalidate - it was non-present before */
17301da177e4SLinus Torvalds 	update_mmu_cache(vma, address, pte);
17311da177e4SLinus Torvalds 	lazy_mmu_prot_update(pte);
173265500d23SHugh Dickins unlock:
17338f4e2101SHugh Dickins 	pte_unmap_unlock(page_table, ptl);
17341da177e4SLinus Torvalds out:
17351da177e4SLinus Torvalds 	return ret;
1736b8107480SKirill Korotaev out_nomap:
17378f4e2101SHugh Dickins 	pte_unmap_unlock(page_table, ptl);
1738b8107480SKirill Korotaev 	unlock_page(page);
1739b8107480SKirill Korotaev 	page_cache_release(page);
174065500d23SHugh Dickins 	return ret;
17411da177e4SLinus Torvalds }
17421da177e4SLinus Torvalds 
17431da177e4SLinus Torvalds /*
17448f4e2101SHugh Dickins  * We enter with non-exclusive mmap_sem (to exclude vma changes,
17458f4e2101SHugh Dickins  * but allow concurrent faults), and pte mapped but not yet locked.
17468f4e2101SHugh Dickins  * We return with mmap_sem still held, but pte unmapped and unlocked.
17471da177e4SLinus Torvalds  */
174865500d23SHugh Dickins static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
174965500d23SHugh Dickins 		unsigned long address, pte_t *page_table, pmd_t *pmd,
175065500d23SHugh Dickins 		int write_access)
17511da177e4SLinus Torvalds {
17528f4e2101SHugh Dickins 	struct page *page;
17538f4e2101SHugh Dickins 	spinlock_t *ptl;
17541da177e4SLinus Torvalds 	pte_t entry;
17551da177e4SLinus Torvalds 
17561da177e4SLinus Torvalds 	if (write_access) {
17571da177e4SLinus Torvalds 		/* Allocate our own private page. */
17581da177e4SLinus Torvalds 		pte_unmap(page_table);
17591da177e4SLinus Torvalds 
17601da177e4SLinus Torvalds 		if (unlikely(anon_vma_prepare(vma)))
176165500d23SHugh Dickins 			goto oom;
176265500d23SHugh Dickins 		page = alloc_zeroed_user_highpage(vma, address);
17631da177e4SLinus Torvalds 		if (!page)
176465500d23SHugh Dickins 			goto oom;
17651da177e4SLinus Torvalds 
176665500d23SHugh Dickins 		entry = mk_pte(page, vma->vm_page_prot);
176765500d23SHugh Dickins 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
17688f4e2101SHugh Dickins 
17698f4e2101SHugh Dickins 		page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
17708f4e2101SHugh Dickins 		if (!pte_none(*page_table))
17718f4e2101SHugh Dickins 			goto release;
17728f4e2101SHugh Dickins 		inc_mm_counter(mm, anon_rss);
17731da177e4SLinus Torvalds 		lru_cache_add_active(page);
17741da177e4SLinus Torvalds 		SetPageReferenced(page);
177565500d23SHugh Dickins 		page_add_anon_rmap(page, vma, address);
1776b5810039SNick Piggin 	} else {
17778f4e2101SHugh Dickins 		/* Map the ZERO_PAGE - vm_page_prot is readonly */
17788f4e2101SHugh Dickins 		page = ZERO_PAGE(address);
17798f4e2101SHugh Dickins 		page_cache_get(page);
17808f4e2101SHugh Dickins 		entry = mk_pte(page, vma->vm_page_prot);
17818f4e2101SHugh Dickins 
17824c21e2f2SHugh Dickins 		ptl = pte_lockptr(mm, pmd);
17838f4e2101SHugh Dickins 		spin_lock(ptl);
17848f4e2101SHugh Dickins 		if (!pte_none(*page_table))
17858f4e2101SHugh Dickins 			goto release;
1786b5810039SNick Piggin 		inc_mm_counter(mm, file_rss);
1787b5810039SNick Piggin 		page_add_file_rmap(page);
17881da177e4SLinus Torvalds 	}
17891da177e4SLinus Torvalds 
179065500d23SHugh Dickins 	set_pte_at(mm, address, page_table, entry);
17911da177e4SLinus Torvalds 
17921da177e4SLinus Torvalds 	/* No need to invalidate - it was non-present before */
179365500d23SHugh Dickins 	update_mmu_cache(vma, address, entry);
17941da177e4SLinus Torvalds 	lazy_mmu_prot_update(entry);
179565500d23SHugh Dickins unlock:
17968f4e2101SHugh Dickins 	pte_unmap_unlock(page_table, ptl);
17971da177e4SLinus Torvalds 	return VM_FAULT_MINOR;
17988f4e2101SHugh Dickins release:
17998f4e2101SHugh Dickins 	page_cache_release(page);
18008f4e2101SHugh Dickins 	goto unlock;
180165500d23SHugh Dickins oom:
18021da177e4SLinus Torvalds 	return VM_FAULT_OOM;
18031da177e4SLinus Torvalds }
18041da177e4SLinus Torvalds 
18051da177e4SLinus Torvalds /*
18061da177e4SLinus Torvalds  * do_no_page() tries to create a new page mapping. It aggressively
18071da177e4SLinus Torvalds  * tries to share with existing pages, but makes a separate copy if
18081da177e4SLinus Torvalds  * the "write_access" parameter is true in order to avoid the next
18091da177e4SLinus Torvalds  * page fault.
18101da177e4SLinus Torvalds  *
18111da177e4SLinus Torvalds  * As this is called only for pages that do not currently exist, we
18121da177e4SLinus Torvalds  * do not need to flush old virtual caches or the TLB.
18131da177e4SLinus Torvalds  *
18148f4e2101SHugh Dickins  * We enter with non-exclusive mmap_sem (to exclude vma changes,
18158f4e2101SHugh Dickins  * but allow concurrent faults), and pte mapped but not yet locked.
18168f4e2101SHugh Dickins  * We return with mmap_sem still held, but pte unmapped and unlocked.
18171da177e4SLinus Torvalds  */
181865500d23SHugh Dickins static int do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
181965500d23SHugh Dickins 		unsigned long address, pte_t *page_table, pmd_t *pmd,
182065500d23SHugh Dickins 		int write_access)
18211da177e4SLinus Torvalds {
18228f4e2101SHugh Dickins 	spinlock_t *ptl;
18231da177e4SLinus Torvalds 	struct page *new_page;
18241da177e4SLinus Torvalds 	struct address_space *mapping = NULL;
18251da177e4SLinus Torvalds 	pte_t entry;
18261da177e4SLinus Torvalds 	unsigned int sequence = 0;
18271da177e4SLinus Torvalds 	int ret = VM_FAULT_MINOR;
18281da177e4SLinus Torvalds 	int anon = 0;
18291da177e4SLinus Torvalds 
18301da177e4SLinus Torvalds 	pte_unmap(page_table);
18311da177e4SLinus Torvalds 
18321da177e4SLinus Torvalds 	if (vma->vm_file) {
18331da177e4SLinus Torvalds 		mapping = vma->vm_file->f_mapping;
18341da177e4SLinus Torvalds 		sequence = mapping->truncate_count;
18351da177e4SLinus Torvalds 		smp_rmb(); /* serializes i_size against truncate_count */
18361da177e4SLinus Torvalds 	}
18371da177e4SLinus Torvalds retry:
18381da177e4SLinus Torvalds 	new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, &ret);
18391da177e4SLinus Torvalds 	/*
18401da177e4SLinus Torvalds 	 * No smp_rmb is needed here as long as there's a full
18411da177e4SLinus Torvalds 	 * spin_lock/unlock sequence inside the ->nopage callback
18421da177e4SLinus Torvalds 	 * (for the pagecache lookup) that acts as an implicit
18431da177e4SLinus Torvalds 	 * smp_mb() and prevents the i_size read to happen
18441da177e4SLinus Torvalds 	 * after the next truncate_count read.
18451da177e4SLinus Torvalds 	 */
18461da177e4SLinus Torvalds 
18471da177e4SLinus Torvalds 	/* no page was available -- either SIGBUS or OOM */
18481da177e4SLinus Torvalds 	if (new_page == NOPAGE_SIGBUS)
18491da177e4SLinus Torvalds 		return VM_FAULT_SIGBUS;
18501da177e4SLinus Torvalds 	if (new_page == NOPAGE_OOM)
18511da177e4SLinus Torvalds 		return VM_FAULT_OOM;
18521da177e4SLinus Torvalds 
18531da177e4SLinus Torvalds 	/*
18541da177e4SLinus Torvalds 	 * Should we do an early C-O-W break?
18551da177e4SLinus Torvalds 	 */
18561da177e4SLinus Torvalds 	if (write_access && !(vma->vm_flags & VM_SHARED)) {
18571da177e4SLinus Torvalds 		struct page *page;
18581da177e4SLinus Torvalds 
18591da177e4SLinus Torvalds 		if (unlikely(anon_vma_prepare(vma)))
18601da177e4SLinus Torvalds 			goto oom;
18611da177e4SLinus Torvalds 		page = alloc_page_vma(GFP_HIGHUSER, vma, address);
18621da177e4SLinus Torvalds 		if (!page)
18631da177e4SLinus Torvalds 			goto oom;
18641da177e4SLinus Torvalds 		copy_user_highpage(page, new_page, address);
18651da177e4SLinus Torvalds 		page_cache_release(new_page);
18661da177e4SLinus Torvalds 		new_page = page;
18671da177e4SLinus Torvalds 		anon = 1;
18681da177e4SLinus Torvalds 	}
18691da177e4SLinus Torvalds 
18708f4e2101SHugh Dickins 	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
18711da177e4SLinus Torvalds 	/*
18721da177e4SLinus Torvalds 	 * For a file-backed vma, someone could have truncated or otherwise
18731da177e4SLinus Torvalds 	 * invalidated this page.  If unmap_mapping_range got called,
18741da177e4SLinus Torvalds 	 * retry getting the page.
18751da177e4SLinus Torvalds 	 */
18761da177e4SLinus Torvalds 	if (mapping && unlikely(sequence != mapping->truncate_count)) {
18778f4e2101SHugh Dickins 		pte_unmap_unlock(page_table, ptl);
18781da177e4SLinus Torvalds 		page_cache_release(new_page);
187965500d23SHugh Dickins 		cond_resched();
188065500d23SHugh Dickins 		sequence = mapping->truncate_count;
188165500d23SHugh Dickins 		smp_rmb();
18821da177e4SLinus Torvalds 		goto retry;
18831da177e4SLinus Torvalds 	}
18841da177e4SLinus Torvalds 
18851da177e4SLinus Torvalds 	/*
18861da177e4SLinus Torvalds 	 * This silly early PAGE_DIRTY setting removes a race
18871da177e4SLinus Torvalds 	 * due to the bad i386 page protection. But it's valid
18881da177e4SLinus Torvalds 	 * for other architectures too.
18891da177e4SLinus Torvalds 	 *
18901da177e4SLinus Torvalds 	 * Note that if write_access is true, we either now have
18911da177e4SLinus Torvalds 	 * an exclusive copy of the page, or this is a shared mapping,
18921da177e4SLinus Torvalds 	 * so we can make it writable and dirty to avoid having to
18931da177e4SLinus Torvalds 	 * handle that later.
18941da177e4SLinus Torvalds 	 */
18951da177e4SLinus Torvalds 	/* Only go through if we didn't race with anybody else... */
18961da177e4SLinus Torvalds 	if (pte_none(*page_table)) {
18971da177e4SLinus Torvalds 		flush_icache_page(vma, new_page);
18981da177e4SLinus Torvalds 		entry = mk_pte(new_page, vma->vm_page_prot);
18991da177e4SLinus Torvalds 		if (write_access)
19001da177e4SLinus Torvalds 			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
19011da177e4SLinus Torvalds 		set_pte_at(mm, address, page_table, entry);
19021da177e4SLinus Torvalds 		if (anon) {
19034294621fSHugh Dickins 			inc_mm_counter(mm, anon_rss);
19041da177e4SLinus Torvalds 			lru_cache_add_active(new_page);
19051da177e4SLinus Torvalds 			page_add_anon_rmap(new_page, vma, address);
1906b5810039SNick Piggin 		} else if (!(vma->vm_flags & VM_RESERVED)) {
19074294621fSHugh Dickins 			inc_mm_counter(mm, file_rss);
19081da177e4SLinus Torvalds 			page_add_file_rmap(new_page);
19094294621fSHugh Dickins 		}
19101da177e4SLinus Torvalds 	} else {
19111da177e4SLinus Torvalds 		/* One of our sibling threads was faster, back out. */
19121da177e4SLinus Torvalds 		page_cache_release(new_page);
191365500d23SHugh Dickins 		goto unlock;
19141da177e4SLinus Torvalds 	}
19151da177e4SLinus Torvalds 
19161da177e4SLinus Torvalds 	/* no need to invalidate: a not-present page shouldn't be cached */
19171da177e4SLinus Torvalds 	update_mmu_cache(vma, address, entry);
19181da177e4SLinus Torvalds 	lazy_mmu_prot_update(entry);
191965500d23SHugh Dickins unlock:
19208f4e2101SHugh Dickins 	pte_unmap_unlock(page_table, ptl);
19211da177e4SLinus Torvalds 	return ret;
19221da177e4SLinus Torvalds oom:
19231da177e4SLinus Torvalds 	page_cache_release(new_page);
192465500d23SHugh Dickins 	return VM_FAULT_OOM;
19251da177e4SLinus Torvalds }
19261da177e4SLinus Torvalds 
19271da177e4SLinus Torvalds /*
19281da177e4SLinus Torvalds  * Fault of a previously existing named mapping. Repopulate the pte
19291da177e4SLinus Torvalds  * from the encoded file_pte if possible. This enables swappable
19301da177e4SLinus Torvalds  * nonlinear vmas.
19318f4e2101SHugh Dickins  *
19328f4e2101SHugh Dickins  * We enter with non-exclusive mmap_sem (to exclude vma changes,
19338f4e2101SHugh Dickins  * but allow concurrent faults), and pte mapped but not yet locked.
19348f4e2101SHugh Dickins  * We return with mmap_sem still held, but pte unmapped and unlocked.
19351da177e4SLinus Torvalds  */
19361da177e4SLinus Torvalds static int do_file_page(struct mm_struct *mm, struct vm_area_struct *vma,
193765500d23SHugh Dickins 		unsigned long address, pte_t *page_table, pmd_t *pmd,
193865500d23SHugh Dickins 		int write_access, pte_t orig_pte)
19391da177e4SLinus Torvalds {
194065500d23SHugh Dickins 	pgoff_t pgoff;
19411da177e4SLinus Torvalds 	int err;
19421da177e4SLinus Torvalds 
19434c21e2f2SHugh Dickins 	if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
19448f4e2101SHugh Dickins 		return VM_FAULT_MINOR;
19451da177e4SLinus Torvalds 
194665500d23SHugh Dickins 	if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) {
194765500d23SHugh Dickins 		/*
194865500d23SHugh Dickins 		 * Page table corrupted: show pte and kill process.
194965500d23SHugh Dickins 		 */
1950b5810039SNick Piggin 		print_bad_pte(vma, orig_pte, address);
195165500d23SHugh Dickins 		return VM_FAULT_OOM;
195265500d23SHugh Dickins 	}
195365500d23SHugh Dickins 	/* We can then assume vm->vm_ops && vma->vm_ops->populate */
195465500d23SHugh Dickins 
195565500d23SHugh Dickins 	pgoff = pte_to_pgoff(orig_pte);
195665500d23SHugh Dickins 	err = vma->vm_ops->populate(vma, address & PAGE_MASK, PAGE_SIZE,
195765500d23SHugh Dickins 					vma->vm_page_prot, pgoff, 0);
19581da177e4SLinus Torvalds 	if (err == -ENOMEM)
19591da177e4SLinus Torvalds 		return VM_FAULT_OOM;
19601da177e4SLinus Torvalds 	if (err)
19611da177e4SLinus Torvalds 		return VM_FAULT_SIGBUS;
19621da177e4SLinus Torvalds 	return VM_FAULT_MAJOR;
19631da177e4SLinus Torvalds }
19641da177e4SLinus Torvalds 
19651da177e4SLinus Torvalds /*
19661da177e4SLinus Torvalds  * These routines also need to handle stuff like marking pages dirty
19671da177e4SLinus Torvalds  * and/or accessed for architectures that don't do it in hardware (most
19681da177e4SLinus Torvalds  * RISC architectures).  The early dirtying is also good on the i386.
19691da177e4SLinus Torvalds  *
19701da177e4SLinus Torvalds  * There is also a hook called "update_mmu_cache()" that architectures
19711da177e4SLinus Torvalds  * with external mmu caches can use to update those (ie the Sparc or
19721da177e4SLinus Torvalds  * PowerPC hashed page tables that act as extended TLBs).
19731da177e4SLinus Torvalds  *
1974c74df32cSHugh Dickins  * We enter with non-exclusive mmap_sem (to exclude vma changes,
1975c74df32cSHugh Dickins  * but allow concurrent faults), and pte mapped but not yet locked.
1976c74df32cSHugh Dickins  * We return with mmap_sem still held, but pte unmapped and unlocked.
19771da177e4SLinus Torvalds  */
19781da177e4SLinus Torvalds static inline int handle_pte_fault(struct mm_struct *mm,
19791da177e4SLinus Torvalds 		struct vm_area_struct *vma, unsigned long address,
198065500d23SHugh Dickins 		pte_t *pte, pmd_t *pmd, int write_access)
19811da177e4SLinus Torvalds {
19821da177e4SLinus Torvalds 	pte_t entry;
1983*1a44e149SAndrea Arcangeli 	pte_t old_entry;
19848f4e2101SHugh Dickins 	spinlock_t *ptl;
19851da177e4SLinus Torvalds 
1986*1a44e149SAndrea Arcangeli 	old_entry = entry = *pte;
19871da177e4SLinus Torvalds 	if (!pte_present(entry)) {
198865500d23SHugh Dickins 		if (pte_none(entry)) {
198965500d23SHugh Dickins 			if (!vma->vm_ops || !vma->vm_ops->nopage)
199065500d23SHugh Dickins 				return do_anonymous_page(mm, vma, address,
199165500d23SHugh Dickins 					pte, pmd, write_access);
199265500d23SHugh Dickins 			return do_no_page(mm, vma, address,
199365500d23SHugh Dickins 					pte, pmd, write_access);
199465500d23SHugh Dickins 		}
19951da177e4SLinus Torvalds 		if (pte_file(entry))
199665500d23SHugh Dickins 			return do_file_page(mm, vma, address,
199765500d23SHugh Dickins 					pte, pmd, write_access, entry);
199865500d23SHugh Dickins 		return do_swap_page(mm, vma, address,
199965500d23SHugh Dickins 					pte, pmd, write_access, entry);
20001da177e4SLinus Torvalds 	}
20011da177e4SLinus Torvalds 
20024c21e2f2SHugh Dickins 	ptl = pte_lockptr(mm, pmd);
20038f4e2101SHugh Dickins 	spin_lock(ptl);
20048f4e2101SHugh Dickins 	if (unlikely(!pte_same(*pte, entry)))
20058f4e2101SHugh Dickins 		goto unlock;
20061da177e4SLinus Torvalds 	if (write_access) {
20071da177e4SLinus Torvalds 		if (!pte_write(entry))
20088f4e2101SHugh Dickins 			return do_wp_page(mm, vma, address,
20098f4e2101SHugh Dickins 					pte, pmd, ptl, entry);
20101da177e4SLinus Torvalds 		entry = pte_mkdirty(entry);
20111da177e4SLinus Torvalds 	}
20121da177e4SLinus Torvalds 	entry = pte_mkyoung(entry);
2013*1a44e149SAndrea Arcangeli 	if (!pte_same(old_entry, entry)) {
20141da177e4SLinus Torvalds 		ptep_set_access_flags(vma, address, pte, entry, write_access);
20151da177e4SLinus Torvalds 		update_mmu_cache(vma, address, entry);
20161da177e4SLinus Torvalds 		lazy_mmu_prot_update(entry);
2017*1a44e149SAndrea Arcangeli 	} else {
2018*1a44e149SAndrea Arcangeli 		/*
2019*1a44e149SAndrea Arcangeli 		 * This is needed only for protection faults but the arch code
2020*1a44e149SAndrea Arcangeli 		 * is not yet telling us if this is a protection fault or not.
2021*1a44e149SAndrea Arcangeli 		 * This still avoids useless tlb flushes for .text page faults
2022*1a44e149SAndrea Arcangeli 		 * with threads.
2023*1a44e149SAndrea Arcangeli 		 */
2024*1a44e149SAndrea Arcangeli 		if (write_access)
2025*1a44e149SAndrea Arcangeli 			flush_tlb_page(vma, address);
2026*1a44e149SAndrea Arcangeli 	}
20278f4e2101SHugh Dickins unlock:
20288f4e2101SHugh Dickins 	pte_unmap_unlock(pte, ptl);
20291da177e4SLinus Torvalds 	return VM_FAULT_MINOR;
20301da177e4SLinus Torvalds }
20311da177e4SLinus Torvalds 
20321da177e4SLinus Torvalds /*
20331da177e4SLinus Torvalds  * By the time we get here, we already hold the mm semaphore
20341da177e4SLinus Torvalds  */
2035f33ea7f4SNick Piggin int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
20361da177e4SLinus Torvalds 		unsigned long address, int write_access)
20371da177e4SLinus Torvalds {
20381da177e4SLinus Torvalds 	pgd_t *pgd;
20391da177e4SLinus Torvalds 	pud_t *pud;
20401da177e4SLinus Torvalds 	pmd_t *pmd;
20411da177e4SLinus Torvalds 	pte_t *pte;
20421da177e4SLinus Torvalds 
20431da177e4SLinus Torvalds 	__set_current_state(TASK_RUNNING);
20441da177e4SLinus Torvalds 
20451da177e4SLinus Torvalds 	inc_page_state(pgfault);
20461da177e4SLinus Torvalds 
2047ac9b9c66SHugh Dickins 	if (unlikely(is_vm_hugetlb_page(vma)))
2048ac9b9c66SHugh Dickins 		return hugetlb_fault(mm, vma, address, write_access);
20491da177e4SLinus Torvalds 
20501da177e4SLinus Torvalds 	pgd = pgd_offset(mm, address);
20511da177e4SLinus Torvalds 	pud = pud_alloc(mm, pgd, address);
20521da177e4SLinus Torvalds 	if (!pud)
2053c74df32cSHugh Dickins 		return VM_FAULT_OOM;
20541da177e4SLinus Torvalds 	pmd = pmd_alloc(mm, pud, address);
20551da177e4SLinus Torvalds 	if (!pmd)
2056c74df32cSHugh Dickins 		return VM_FAULT_OOM;
20571da177e4SLinus Torvalds 	pte = pte_alloc_map(mm, pmd, address);
20581da177e4SLinus Torvalds 	if (!pte)
2059c74df32cSHugh Dickins 		return VM_FAULT_OOM;
20601da177e4SLinus Torvalds 
206165500d23SHugh Dickins 	return handle_pte_fault(mm, vma, address, pte, pmd, write_access);
20621da177e4SLinus Torvalds }
20631da177e4SLinus Torvalds 
20641da177e4SLinus Torvalds #ifndef __PAGETABLE_PUD_FOLDED
20651da177e4SLinus Torvalds /*
20661da177e4SLinus Torvalds  * Allocate page upper directory.
2067872fec16SHugh Dickins  * We've already handled the fast-path in-line.
20681da177e4SLinus Torvalds  */
20691bb3630eSHugh Dickins int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
20701da177e4SLinus Torvalds {
2071c74df32cSHugh Dickins 	pud_t *new = pud_alloc_one(mm, address);
2072c74df32cSHugh Dickins 	if (!new)
20731bb3630eSHugh Dickins 		return -ENOMEM;
20741da177e4SLinus Torvalds 
2075872fec16SHugh Dickins 	spin_lock(&mm->page_table_lock);
20761bb3630eSHugh Dickins 	if (pgd_present(*pgd))		/* Another has populated it */
20771da177e4SLinus Torvalds 		pud_free(new);
20781bb3630eSHugh Dickins 	else
20791da177e4SLinus Torvalds 		pgd_populate(mm, pgd, new);
2080872fec16SHugh Dickins 	spin_unlock(&mm->page_table_lock);
20811bb3630eSHugh Dickins 	return 0;
20821da177e4SLinus Torvalds }
20831da177e4SLinus Torvalds #endif /* __PAGETABLE_PUD_FOLDED */
20841da177e4SLinus Torvalds 
20851da177e4SLinus Torvalds #ifndef __PAGETABLE_PMD_FOLDED
20861da177e4SLinus Torvalds /*
20871da177e4SLinus Torvalds  * Allocate page middle directory.
2088872fec16SHugh Dickins  * We've already handled the fast-path in-line.
20891da177e4SLinus Torvalds  */
20901bb3630eSHugh Dickins int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
20911da177e4SLinus Torvalds {
2092c74df32cSHugh Dickins 	pmd_t *new = pmd_alloc_one(mm, address);
2093c74df32cSHugh Dickins 	if (!new)
20941bb3630eSHugh Dickins 		return -ENOMEM;
20951da177e4SLinus Torvalds 
2096872fec16SHugh Dickins 	spin_lock(&mm->page_table_lock);
20971da177e4SLinus Torvalds #ifndef __ARCH_HAS_4LEVEL_HACK
20981bb3630eSHugh Dickins 	if (pud_present(*pud))		/* Another has populated it */
20991da177e4SLinus Torvalds 		pmd_free(new);
21001bb3630eSHugh Dickins 	else
21011da177e4SLinus Torvalds 		pud_populate(mm, pud, new);
21021da177e4SLinus Torvalds #else
21031bb3630eSHugh Dickins 	if (pgd_present(*pud))		/* Another has populated it */
21041da177e4SLinus Torvalds 		pmd_free(new);
21051bb3630eSHugh Dickins 	else
21061da177e4SLinus Torvalds 		pgd_populate(mm, pud, new);
21071da177e4SLinus Torvalds #endif /* __ARCH_HAS_4LEVEL_HACK */
2108872fec16SHugh Dickins 	spin_unlock(&mm->page_table_lock);
21091bb3630eSHugh Dickins 	return 0;
21101da177e4SLinus Torvalds }
21111da177e4SLinus Torvalds #endif /* __PAGETABLE_PMD_FOLDED */
21121da177e4SLinus Torvalds 
21131da177e4SLinus Torvalds int make_pages_present(unsigned long addr, unsigned long end)
21141da177e4SLinus Torvalds {
21151da177e4SLinus Torvalds 	int ret, len, write;
21161da177e4SLinus Torvalds 	struct vm_area_struct * vma;
21171da177e4SLinus Torvalds 
21181da177e4SLinus Torvalds 	vma = find_vma(current->mm, addr);
21191da177e4SLinus Torvalds 	if (!vma)
21201da177e4SLinus Torvalds 		return -1;
21211da177e4SLinus Torvalds 	write = (vma->vm_flags & VM_WRITE) != 0;
21221da177e4SLinus Torvalds 	if (addr >= end)
21231da177e4SLinus Torvalds 		BUG();
21241da177e4SLinus Torvalds 	if (end > vma->vm_end)
21251da177e4SLinus Torvalds 		BUG();
21261da177e4SLinus Torvalds 	len = (end+PAGE_SIZE-1)/PAGE_SIZE-addr/PAGE_SIZE;
21271da177e4SLinus Torvalds 	ret = get_user_pages(current, current->mm, addr,
21281da177e4SLinus Torvalds 			len, write, 0, NULL, NULL);
21291da177e4SLinus Torvalds 	if (ret < 0)
21301da177e4SLinus Torvalds 		return ret;
21311da177e4SLinus Torvalds 	return ret == len ? 0 : -1;
21321da177e4SLinus Torvalds }
21331da177e4SLinus Torvalds 
21341da177e4SLinus Torvalds /*
21351da177e4SLinus Torvalds  * Map a vmalloc()-space virtual address to the physical page.
21361da177e4SLinus Torvalds  */
21371da177e4SLinus Torvalds struct page * vmalloc_to_page(void * vmalloc_addr)
21381da177e4SLinus Torvalds {
21391da177e4SLinus Torvalds 	unsigned long addr = (unsigned long) vmalloc_addr;
21401da177e4SLinus Torvalds 	struct page *page = NULL;
21411da177e4SLinus Torvalds 	pgd_t *pgd = pgd_offset_k(addr);
21421da177e4SLinus Torvalds 	pud_t *pud;
21431da177e4SLinus Torvalds 	pmd_t *pmd;
21441da177e4SLinus Torvalds 	pte_t *ptep, pte;
21451da177e4SLinus Torvalds 
21461da177e4SLinus Torvalds 	if (!pgd_none(*pgd)) {
21471da177e4SLinus Torvalds 		pud = pud_offset(pgd, addr);
21481da177e4SLinus Torvalds 		if (!pud_none(*pud)) {
21491da177e4SLinus Torvalds 			pmd = pmd_offset(pud, addr);
21501da177e4SLinus Torvalds 			if (!pmd_none(*pmd)) {
21511da177e4SLinus Torvalds 				ptep = pte_offset_map(pmd, addr);
21521da177e4SLinus Torvalds 				pte = *ptep;
21531da177e4SLinus Torvalds 				if (pte_present(pte))
21541da177e4SLinus Torvalds 					page = pte_page(pte);
21551da177e4SLinus Torvalds 				pte_unmap(ptep);
21561da177e4SLinus Torvalds 			}
21571da177e4SLinus Torvalds 		}
21581da177e4SLinus Torvalds 	}
21591da177e4SLinus Torvalds 	return page;
21601da177e4SLinus Torvalds }
21611da177e4SLinus Torvalds 
21621da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc_to_page);
21631da177e4SLinus Torvalds 
21641da177e4SLinus Torvalds /*
21651da177e4SLinus Torvalds  * Map a vmalloc()-space virtual address to the physical page frame number.
21661da177e4SLinus Torvalds  */
21671da177e4SLinus Torvalds unsigned long vmalloc_to_pfn(void * vmalloc_addr)
21681da177e4SLinus Torvalds {
21691da177e4SLinus Torvalds 	return page_to_pfn(vmalloc_to_page(vmalloc_addr));
21701da177e4SLinus Torvalds }
21711da177e4SLinus Torvalds 
21721da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc_to_pfn);
21731da177e4SLinus Torvalds 
21741da177e4SLinus Torvalds #if !defined(__HAVE_ARCH_GATE_AREA)
21751da177e4SLinus Torvalds 
21761da177e4SLinus Torvalds #if defined(AT_SYSINFO_EHDR)
21775ce7852cSAdrian Bunk static struct vm_area_struct gate_vma;
21781da177e4SLinus Torvalds 
21791da177e4SLinus Torvalds static int __init gate_vma_init(void)
21801da177e4SLinus Torvalds {
21811da177e4SLinus Torvalds 	gate_vma.vm_mm = NULL;
21821da177e4SLinus Torvalds 	gate_vma.vm_start = FIXADDR_USER_START;
21831da177e4SLinus Torvalds 	gate_vma.vm_end = FIXADDR_USER_END;
21841da177e4SLinus Torvalds 	gate_vma.vm_page_prot = PAGE_READONLY;
2185b5810039SNick Piggin 	gate_vma.vm_flags = VM_RESERVED;
21861da177e4SLinus Torvalds 	return 0;
21871da177e4SLinus Torvalds }
21881da177e4SLinus Torvalds __initcall(gate_vma_init);
21891da177e4SLinus Torvalds #endif
21901da177e4SLinus Torvalds 
21911da177e4SLinus Torvalds struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
21921da177e4SLinus Torvalds {
21931da177e4SLinus Torvalds #ifdef AT_SYSINFO_EHDR
21941da177e4SLinus Torvalds 	return &gate_vma;
21951da177e4SLinus Torvalds #else
21961da177e4SLinus Torvalds 	return NULL;
21971da177e4SLinus Torvalds #endif
21981da177e4SLinus Torvalds }
21991da177e4SLinus Torvalds 
22001da177e4SLinus Torvalds int in_gate_area_no_task(unsigned long addr)
22011da177e4SLinus Torvalds {
22021da177e4SLinus Torvalds #ifdef AT_SYSINFO_EHDR
22031da177e4SLinus Torvalds 	if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
22041da177e4SLinus Torvalds 		return 1;
22051da177e4SLinus Torvalds #endif
22061da177e4SLinus Torvalds 	return 0;
22071da177e4SLinus Torvalds }
22081da177e4SLinus Torvalds 
22091da177e4SLinus Torvalds #endif	/* __HAVE_ARCH_GATE_AREA */
2210