11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/mm/memory.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 51da177e4SLinus Torvalds */ 61da177e4SLinus Torvalds 71da177e4SLinus Torvalds /* 81da177e4SLinus Torvalds * demand-loading started 01.12.91 - seems it is high on the list of 91da177e4SLinus Torvalds * things wanted, and it should be easy to implement. - Linus 101da177e4SLinus Torvalds */ 111da177e4SLinus Torvalds 121da177e4SLinus Torvalds /* 131da177e4SLinus Torvalds * Ok, demand-loading was easy, shared pages a little bit tricker. Shared 141da177e4SLinus Torvalds * pages started 02.12.91, seems to work. - Linus. 151da177e4SLinus Torvalds * 161da177e4SLinus Torvalds * Tested sharing by executing about 30 /bin/sh: under the old kernel it 171da177e4SLinus Torvalds * would have taken more than the 6M I have free, but it worked well as 181da177e4SLinus Torvalds * far as I could see. 191da177e4SLinus Torvalds * 201da177e4SLinus Torvalds * Also corrected some "invalidate()"s - I wasn't doing enough of them. 211da177e4SLinus Torvalds */ 221da177e4SLinus Torvalds 231da177e4SLinus Torvalds /* 241da177e4SLinus Torvalds * Real VM (paging to/from disk) started 18.12.91. Much more work and 251da177e4SLinus Torvalds * thought has to go into this. Oh, well.. 261da177e4SLinus Torvalds * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why. 271da177e4SLinus Torvalds * Found it. Everything seems to work now. 281da177e4SLinus Torvalds * 20.12.91 - Ok, making the swap-device changeable like the root. 291da177e4SLinus Torvalds */ 301da177e4SLinus Torvalds 311da177e4SLinus Torvalds /* 321da177e4SLinus Torvalds * 05.04.94 - Multi-page memory management added for v1.1. 331da177e4SLinus Torvalds * Idea by Alex Bligh (alex@cconcepts.co.uk) 341da177e4SLinus Torvalds * 351da177e4SLinus Torvalds * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG 361da177e4SLinus Torvalds * (Gerhard.Wichert@pdb.siemens.de) 371da177e4SLinus Torvalds * 381da177e4SLinus Torvalds * Aug/Sep 2004 Changed to four level page tables (Andi Kleen) 391da177e4SLinus Torvalds */ 401da177e4SLinus Torvalds 411da177e4SLinus Torvalds #include <linux/kernel_stat.h> 421da177e4SLinus Torvalds #include <linux/mm.h> 431da177e4SLinus Torvalds #include <linux/hugetlb.h> 441da177e4SLinus Torvalds #include <linux/mman.h> 451da177e4SLinus Torvalds #include <linux/swap.h> 461da177e4SLinus Torvalds #include <linux/highmem.h> 471da177e4SLinus Torvalds #include <linux/pagemap.h> 481da177e4SLinus Torvalds #include <linux/rmap.h> 491da177e4SLinus Torvalds #include <linux/module.h> 500ff92245SShailabh Nagar #include <linux/delayacct.h> 511da177e4SLinus Torvalds #include <linux/init.h> 52edc79b2aSPeter Zijlstra #include <linux/writeback.h> 538a9f3ccdSBalbir Singh #include <linux/memcontrol.h> 54cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h> 551da177e4SLinus Torvalds 561da177e4SLinus Torvalds #include <asm/pgalloc.h> 571da177e4SLinus Torvalds #include <asm/uaccess.h> 581da177e4SLinus Torvalds #include <asm/tlb.h> 591da177e4SLinus Torvalds #include <asm/tlbflush.h> 601da177e4SLinus Torvalds #include <asm/pgtable.h> 611da177e4SLinus Torvalds 621da177e4SLinus Torvalds #include <linux/swapops.h> 631da177e4SLinus Torvalds #include <linux/elf.h> 641da177e4SLinus Torvalds 6542b77728SJan Beulich #include "internal.h" 6642b77728SJan Beulich 67d41dee36SAndy Whitcroft #ifndef CONFIG_NEED_MULTIPLE_NODES 681da177e4SLinus Torvalds /* use the per-pgdat data instead for discontigmem - mbligh */ 691da177e4SLinus Torvalds unsigned long max_mapnr; 701da177e4SLinus Torvalds struct page *mem_map; 711da177e4SLinus Torvalds 721da177e4SLinus Torvalds EXPORT_SYMBOL(max_mapnr); 731da177e4SLinus Torvalds EXPORT_SYMBOL(mem_map); 741da177e4SLinus Torvalds #endif 751da177e4SLinus Torvalds 761da177e4SLinus Torvalds unsigned long num_physpages; 771da177e4SLinus Torvalds /* 781da177e4SLinus Torvalds * A number of key systems in x86 including ioremap() rely on the assumption 791da177e4SLinus Torvalds * that high_memory defines the upper bound on direct map memory, then end 801da177e4SLinus Torvalds * of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and 811da177e4SLinus Torvalds * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL 821da177e4SLinus Torvalds * and ZONE_HIGHMEM. 831da177e4SLinus Torvalds */ 841da177e4SLinus Torvalds void * high_memory; 851da177e4SLinus Torvalds 861da177e4SLinus Torvalds EXPORT_SYMBOL(num_physpages); 871da177e4SLinus Torvalds EXPORT_SYMBOL(high_memory); 881da177e4SLinus Torvalds 8932a93233SIngo Molnar /* 9032a93233SIngo Molnar * Randomize the address space (stacks, mmaps, brk, etc.). 9132a93233SIngo Molnar * 9232a93233SIngo Molnar * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization, 9332a93233SIngo Molnar * as ancient (libc5 based) binaries can segfault. ) 9432a93233SIngo Molnar */ 9532a93233SIngo Molnar int randomize_va_space __read_mostly = 9632a93233SIngo Molnar #ifdef CONFIG_COMPAT_BRK 9732a93233SIngo Molnar 1; 9832a93233SIngo Molnar #else 9932a93233SIngo Molnar 2; 10032a93233SIngo Molnar #endif 101a62eaf15SAndi Kleen 102*2ab64037Svenkatesh.pallipadi@intel.com #ifndef track_pfn_vma_new 103*2ab64037Svenkatesh.pallipadi@intel.com /* 104*2ab64037Svenkatesh.pallipadi@intel.com * Interface that can be used by architecture code to keep track of 105*2ab64037Svenkatesh.pallipadi@intel.com * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn) 106*2ab64037Svenkatesh.pallipadi@intel.com * 107*2ab64037Svenkatesh.pallipadi@intel.com * track_pfn_vma_new is called when a _new_ pfn mapping is being established 108*2ab64037Svenkatesh.pallipadi@intel.com * for physical range indicated by pfn and size. 109*2ab64037Svenkatesh.pallipadi@intel.com */ 110*2ab64037Svenkatesh.pallipadi@intel.com int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot, 111*2ab64037Svenkatesh.pallipadi@intel.com unsigned long pfn, unsigned long size) 112*2ab64037Svenkatesh.pallipadi@intel.com { 113*2ab64037Svenkatesh.pallipadi@intel.com return 0; 114*2ab64037Svenkatesh.pallipadi@intel.com } 115*2ab64037Svenkatesh.pallipadi@intel.com #endif 116*2ab64037Svenkatesh.pallipadi@intel.com 117*2ab64037Svenkatesh.pallipadi@intel.com #ifndef track_pfn_vma_copy 118*2ab64037Svenkatesh.pallipadi@intel.com /* 119*2ab64037Svenkatesh.pallipadi@intel.com * Interface that can be used by architecture code to keep track of 120*2ab64037Svenkatesh.pallipadi@intel.com * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn) 121*2ab64037Svenkatesh.pallipadi@intel.com * 122*2ab64037Svenkatesh.pallipadi@intel.com * track_pfn_vma_copy is called when vma that is covering the pfnmap gets 123*2ab64037Svenkatesh.pallipadi@intel.com * copied through copy_page_range(). 124*2ab64037Svenkatesh.pallipadi@intel.com */ 125*2ab64037Svenkatesh.pallipadi@intel.com int track_pfn_vma_copy(struct vm_area_struct *vma) 126*2ab64037Svenkatesh.pallipadi@intel.com { 127*2ab64037Svenkatesh.pallipadi@intel.com return 0; 128*2ab64037Svenkatesh.pallipadi@intel.com } 129*2ab64037Svenkatesh.pallipadi@intel.com #endif 130*2ab64037Svenkatesh.pallipadi@intel.com 131*2ab64037Svenkatesh.pallipadi@intel.com #ifndef untrack_pfn_vma 132*2ab64037Svenkatesh.pallipadi@intel.com /* 133*2ab64037Svenkatesh.pallipadi@intel.com * Interface that can be used by architecture code to keep track of 134*2ab64037Svenkatesh.pallipadi@intel.com * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn) 135*2ab64037Svenkatesh.pallipadi@intel.com * 136*2ab64037Svenkatesh.pallipadi@intel.com * untrack_pfn_vma is called while unmapping a pfnmap for a region. 137*2ab64037Svenkatesh.pallipadi@intel.com * untrack can be called for a specific region indicated by pfn and size or 138*2ab64037Svenkatesh.pallipadi@intel.com * can be for the entire vma (in which case size can be zero). 139*2ab64037Svenkatesh.pallipadi@intel.com */ 140*2ab64037Svenkatesh.pallipadi@intel.com void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn, 141*2ab64037Svenkatesh.pallipadi@intel.com unsigned long size) 142*2ab64037Svenkatesh.pallipadi@intel.com { 143*2ab64037Svenkatesh.pallipadi@intel.com } 144*2ab64037Svenkatesh.pallipadi@intel.com #endif 145*2ab64037Svenkatesh.pallipadi@intel.com 146a62eaf15SAndi Kleen static int __init disable_randmaps(char *s) 147a62eaf15SAndi Kleen { 148a62eaf15SAndi Kleen randomize_va_space = 0; 1499b41046cSOGAWA Hirofumi return 1; 150a62eaf15SAndi Kleen } 151a62eaf15SAndi Kleen __setup("norandmaps", disable_randmaps); 152a62eaf15SAndi Kleen 153a62eaf15SAndi Kleen 1541da177e4SLinus Torvalds /* 1551da177e4SLinus Torvalds * If a p?d_bad entry is found while walking page tables, report 1561da177e4SLinus Torvalds * the error, before resetting entry to p?d_none. Usually (but 1571da177e4SLinus Torvalds * very seldom) called out from the p?d_none_or_clear_bad macros. 1581da177e4SLinus Torvalds */ 1591da177e4SLinus Torvalds 1601da177e4SLinus Torvalds void pgd_clear_bad(pgd_t *pgd) 1611da177e4SLinus Torvalds { 1621da177e4SLinus Torvalds pgd_ERROR(*pgd); 1631da177e4SLinus Torvalds pgd_clear(pgd); 1641da177e4SLinus Torvalds } 1651da177e4SLinus Torvalds 1661da177e4SLinus Torvalds void pud_clear_bad(pud_t *pud) 1671da177e4SLinus Torvalds { 1681da177e4SLinus Torvalds pud_ERROR(*pud); 1691da177e4SLinus Torvalds pud_clear(pud); 1701da177e4SLinus Torvalds } 1711da177e4SLinus Torvalds 1721da177e4SLinus Torvalds void pmd_clear_bad(pmd_t *pmd) 1731da177e4SLinus Torvalds { 1741da177e4SLinus Torvalds pmd_ERROR(*pmd); 1751da177e4SLinus Torvalds pmd_clear(pmd); 1761da177e4SLinus Torvalds } 1771da177e4SLinus Torvalds 1781da177e4SLinus Torvalds /* 1791da177e4SLinus Torvalds * Note: this doesn't free the actual pages themselves. That 1801da177e4SLinus Torvalds * has been handled earlier when unmapping all the memory regions. 1811da177e4SLinus Torvalds */ 182e0da382cSHugh Dickins static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd) 1831da177e4SLinus Torvalds { 1842f569afdSMartin Schwidefsky pgtable_t token = pmd_pgtable(*pmd); 1851da177e4SLinus Torvalds pmd_clear(pmd); 1862f569afdSMartin Schwidefsky pte_free_tlb(tlb, token); 1871da177e4SLinus Torvalds tlb->mm->nr_ptes--; 1881da177e4SLinus Torvalds } 1891da177e4SLinus Torvalds 190e0da382cSHugh Dickins static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, 191e0da382cSHugh Dickins unsigned long addr, unsigned long end, 192e0da382cSHugh Dickins unsigned long floor, unsigned long ceiling) 1931da177e4SLinus Torvalds { 1941da177e4SLinus Torvalds pmd_t *pmd; 1951da177e4SLinus Torvalds unsigned long next; 196e0da382cSHugh Dickins unsigned long start; 1971da177e4SLinus Torvalds 198e0da382cSHugh Dickins start = addr; 1991da177e4SLinus Torvalds pmd = pmd_offset(pud, addr); 2001da177e4SLinus Torvalds do { 2011da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 2021da177e4SLinus Torvalds if (pmd_none_or_clear_bad(pmd)) 2031da177e4SLinus Torvalds continue; 204e0da382cSHugh Dickins free_pte_range(tlb, pmd); 2051da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 2061da177e4SLinus Torvalds 207e0da382cSHugh Dickins start &= PUD_MASK; 208e0da382cSHugh Dickins if (start < floor) 209e0da382cSHugh Dickins return; 210e0da382cSHugh Dickins if (ceiling) { 211e0da382cSHugh Dickins ceiling &= PUD_MASK; 212e0da382cSHugh Dickins if (!ceiling) 213e0da382cSHugh Dickins return; 2141da177e4SLinus Torvalds } 215e0da382cSHugh Dickins if (end - 1 > ceiling - 1) 216e0da382cSHugh Dickins return; 217e0da382cSHugh Dickins 218e0da382cSHugh Dickins pmd = pmd_offset(pud, start); 219e0da382cSHugh Dickins pud_clear(pud); 220e0da382cSHugh Dickins pmd_free_tlb(tlb, pmd); 2211da177e4SLinus Torvalds } 2221da177e4SLinus Torvalds 223e0da382cSHugh Dickins static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, 224e0da382cSHugh Dickins unsigned long addr, unsigned long end, 225e0da382cSHugh Dickins unsigned long floor, unsigned long ceiling) 2261da177e4SLinus Torvalds { 2271da177e4SLinus Torvalds pud_t *pud; 2281da177e4SLinus Torvalds unsigned long next; 229e0da382cSHugh Dickins unsigned long start; 2301da177e4SLinus Torvalds 231e0da382cSHugh Dickins start = addr; 2321da177e4SLinus Torvalds pud = pud_offset(pgd, addr); 2331da177e4SLinus Torvalds do { 2341da177e4SLinus Torvalds next = pud_addr_end(addr, end); 2351da177e4SLinus Torvalds if (pud_none_or_clear_bad(pud)) 2361da177e4SLinus Torvalds continue; 237e0da382cSHugh Dickins free_pmd_range(tlb, pud, addr, next, floor, ceiling); 2381da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 2391da177e4SLinus Torvalds 240e0da382cSHugh Dickins start &= PGDIR_MASK; 241e0da382cSHugh Dickins if (start < floor) 242e0da382cSHugh Dickins return; 243e0da382cSHugh Dickins if (ceiling) { 244e0da382cSHugh Dickins ceiling &= PGDIR_MASK; 245e0da382cSHugh Dickins if (!ceiling) 246e0da382cSHugh Dickins return; 2471da177e4SLinus Torvalds } 248e0da382cSHugh Dickins if (end - 1 > ceiling - 1) 249e0da382cSHugh Dickins return; 250e0da382cSHugh Dickins 251e0da382cSHugh Dickins pud = pud_offset(pgd, start); 252e0da382cSHugh Dickins pgd_clear(pgd); 253e0da382cSHugh Dickins pud_free_tlb(tlb, pud); 2541da177e4SLinus Torvalds } 2551da177e4SLinus Torvalds 2561da177e4SLinus Torvalds /* 257e0da382cSHugh Dickins * This function frees user-level page tables of a process. 258e0da382cSHugh Dickins * 2591da177e4SLinus Torvalds * Must be called with pagetable lock held. 2601da177e4SLinus Torvalds */ 26142b77728SJan Beulich void free_pgd_range(struct mmu_gather *tlb, 262e0da382cSHugh Dickins unsigned long addr, unsigned long end, 263e0da382cSHugh Dickins unsigned long floor, unsigned long ceiling) 2641da177e4SLinus Torvalds { 2651da177e4SLinus Torvalds pgd_t *pgd; 2661da177e4SLinus Torvalds unsigned long next; 267e0da382cSHugh Dickins unsigned long start; 2681da177e4SLinus Torvalds 269e0da382cSHugh Dickins /* 270e0da382cSHugh Dickins * The next few lines have given us lots of grief... 271e0da382cSHugh Dickins * 272e0da382cSHugh Dickins * Why are we testing PMD* at this top level? Because often 273e0da382cSHugh Dickins * there will be no work to do at all, and we'd prefer not to 274e0da382cSHugh Dickins * go all the way down to the bottom just to discover that. 275e0da382cSHugh Dickins * 276e0da382cSHugh Dickins * Why all these "- 1"s? Because 0 represents both the bottom 277e0da382cSHugh Dickins * of the address space and the top of it (using -1 for the 278e0da382cSHugh Dickins * top wouldn't help much: the masks would do the wrong thing). 279e0da382cSHugh Dickins * The rule is that addr 0 and floor 0 refer to the bottom of 280e0da382cSHugh Dickins * the address space, but end 0 and ceiling 0 refer to the top 281e0da382cSHugh Dickins * Comparisons need to use "end - 1" and "ceiling - 1" (though 282e0da382cSHugh Dickins * that end 0 case should be mythical). 283e0da382cSHugh Dickins * 284e0da382cSHugh Dickins * Wherever addr is brought up or ceiling brought down, we must 285e0da382cSHugh Dickins * be careful to reject "the opposite 0" before it confuses the 286e0da382cSHugh Dickins * subsequent tests. But what about where end is brought down 287e0da382cSHugh Dickins * by PMD_SIZE below? no, end can't go down to 0 there. 288e0da382cSHugh Dickins * 289e0da382cSHugh Dickins * Whereas we round start (addr) and ceiling down, by different 290e0da382cSHugh Dickins * masks at different levels, in order to test whether a table 291e0da382cSHugh Dickins * now has no other vmas using it, so can be freed, we don't 292e0da382cSHugh Dickins * bother to round floor or end up - the tests don't need that. 293e0da382cSHugh Dickins */ 294e0da382cSHugh Dickins 295e0da382cSHugh Dickins addr &= PMD_MASK; 296e0da382cSHugh Dickins if (addr < floor) { 297e0da382cSHugh Dickins addr += PMD_SIZE; 298e0da382cSHugh Dickins if (!addr) 299e0da382cSHugh Dickins return; 300e0da382cSHugh Dickins } 301e0da382cSHugh Dickins if (ceiling) { 302e0da382cSHugh Dickins ceiling &= PMD_MASK; 303e0da382cSHugh Dickins if (!ceiling) 304e0da382cSHugh Dickins return; 305e0da382cSHugh Dickins } 306e0da382cSHugh Dickins if (end - 1 > ceiling - 1) 307e0da382cSHugh Dickins end -= PMD_SIZE; 308e0da382cSHugh Dickins if (addr > end - 1) 309e0da382cSHugh Dickins return; 310e0da382cSHugh Dickins 311e0da382cSHugh Dickins start = addr; 31242b77728SJan Beulich pgd = pgd_offset(tlb->mm, addr); 3131da177e4SLinus Torvalds do { 3141da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 3151da177e4SLinus Torvalds if (pgd_none_or_clear_bad(pgd)) 3161da177e4SLinus Torvalds continue; 31742b77728SJan Beulich free_pud_range(tlb, pgd, addr, next, floor, ceiling); 3181da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 319e0da382cSHugh Dickins } 320e0da382cSHugh Dickins 32142b77728SJan Beulich void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, 322e0da382cSHugh Dickins unsigned long floor, unsigned long ceiling) 323e0da382cSHugh Dickins { 324e0da382cSHugh Dickins while (vma) { 325e0da382cSHugh Dickins struct vm_area_struct *next = vma->vm_next; 326e0da382cSHugh Dickins unsigned long addr = vma->vm_start; 327e0da382cSHugh Dickins 3288f4f8c16SHugh Dickins /* 3298f4f8c16SHugh Dickins * Hide vma from rmap and vmtruncate before freeing pgtables 3308f4f8c16SHugh Dickins */ 3318f4f8c16SHugh Dickins anon_vma_unlink(vma); 3328f4f8c16SHugh Dickins unlink_file_vma(vma); 3338f4f8c16SHugh Dickins 3349da61aefSDavid Gibson if (is_vm_hugetlb_page(vma)) { 3353bf5ee95SHugh Dickins hugetlb_free_pgd_range(tlb, addr, vma->vm_end, 3363bf5ee95SHugh Dickins floor, next? next->vm_start: ceiling); 3373bf5ee95SHugh Dickins } else { 3383bf5ee95SHugh Dickins /* 3393bf5ee95SHugh Dickins * Optimization: gather nearby vmas into one call down 3403bf5ee95SHugh Dickins */ 3413bf5ee95SHugh Dickins while (next && next->vm_start <= vma->vm_end + PMD_SIZE 3424866920bSDavid Gibson && !is_vm_hugetlb_page(next)) { 343e0da382cSHugh Dickins vma = next; 344e0da382cSHugh Dickins next = vma->vm_next; 3458f4f8c16SHugh Dickins anon_vma_unlink(vma); 3468f4f8c16SHugh Dickins unlink_file_vma(vma); 347e0da382cSHugh Dickins } 3483bf5ee95SHugh Dickins free_pgd_range(tlb, addr, vma->vm_end, 349e0da382cSHugh Dickins floor, next? next->vm_start: ceiling); 3503bf5ee95SHugh Dickins } 351e0da382cSHugh Dickins vma = next; 352e0da382cSHugh Dickins } 3531da177e4SLinus Torvalds } 3541da177e4SLinus Torvalds 3551bb3630eSHugh Dickins int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address) 3561da177e4SLinus Torvalds { 3572f569afdSMartin Schwidefsky pgtable_t new = pte_alloc_one(mm, address); 3581da177e4SLinus Torvalds if (!new) 3591bb3630eSHugh Dickins return -ENOMEM; 3601bb3630eSHugh Dickins 361362a61adSNick Piggin /* 362362a61adSNick Piggin * Ensure all pte setup (eg. pte page lock and page clearing) are 363362a61adSNick Piggin * visible before the pte is made visible to other CPUs by being 364362a61adSNick Piggin * put into page tables. 365362a61adSNick Piggin * 366362a61adSNick Piggin * The other side of the story is the pointer chasing in the page 367362a61adSNick Piggin * table walking code (when walking the page table without locking; 368362a61adSNick Piggin * ie. most of the time). Fortunately, these data accesses consist 369362a61adSNick Piggin * of a chain of data-dependent loads, meaning most CPUs (alpha 370362a61adSNick Piggin * being the notable exception) will already guarantee loads are 371362a61adSNick Piggin * seen in-order. See the alpha page table accessors for the 372362a61adSNick Piggin * smp_read_barrier_depends() barriers in page table walking code. 373362a61adSNick Piggin */ 374362a61adSNick Piggin smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */ 375362a61adSNick Piggin 376c74df32cSHugh Dickins spin_lock(&mm->page_table_lock); 3772f569afdSMartin Schwidefsky if (!pmd_present(*pmd)) { /* Has another populated it ? */ 3781da177e4SLinus Torvalds mm->nr_ptes++; 3791da177e4SLinus Torvalds pmd_populate(mm, pmd, new); 3802f569afdSMartin Schwidefsky new = NULL; 3811da177e4SLinus Torvalds } 382c74df32cSHugh Dickins spin_unlock(&mm->page_table_lock); 3832f569afdSMartin Schwidefsky if (new) 3842f569afdSMartin Schwidefsky pte_free(mm, new); 3851bb3630eSHugh Dickins return 0; 3861da177e4SLinus Torvalds } 3871da177e4SLinus Torvalds 3881bb3630eSHugh Dickins int __pte_alloc_kernel(pmd_t *pmd, unsigned long address) 3891da177e4SLinus Torvalds { 3901bb3630eSHugh Dickins pte_t *new = pte_alloc_one_kernel(&init_mm, address); 3911da177e4SLinus Torvalds if (!new) 3921bb3630eSHugh Dickins return -ENOMEM; 3931da177e4SLinus Torvalds 394362a61adSNick Piggin smp_wmb(); /* See comment in __pte_alloc */ 395362a61adSNick Piggin 396872fec16SHugh Dickins spin_lock(&init_mm.page_table_lock); 3972f569afdSMartin Schwidefsky if (!pmd_present(*pmd)) { /* Has another populated it ? */ 398872fec16SHugh Dickins pmd_populate_kernel(&init_mm, pmd, new); 3992f569afdSMartin Schwidefsky new = NULL; 4002f569afdSMartin Schwidefsky } 401872fec16SHugh Dickins spin_unlock(&init_mm.page_table_lock); 4022f569afdSMartin Schwidefsky if (new) 4032f569afdSMartin Schwidefsky pte_free_kernel(&init_mm, new); 4041bb3630eSHugh Dickins return 0; 4051da177e4SLinus Torvalds } 4061da177e4SLinus Torvalds 407ae859762SHugh Dickins static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss) 408ae859762SHugh Dickins { 409ae859762SHugh Dickins if (file_rss) 410ae859762SHugh Dickins add_mm_counter(mm, file_rss, file_rss); 411ae859762SHugh Dickins if (anon_rss) 412ae859762SHugh Dickins add_mm_counter(mm, anon_rss, anon_rss); 413ae859762SHugh Dickins } 414ae859762SHugh Dickins 4151da177e4SLinus Torvalds /* 4166aab341eSLinus Torvalds * This function is called to print an error when a bad pte 4176aab341eSLinus Torvalds * is found. For example, we might have a PFN-mapped pte in 4186aab341eSLinus Torvalds * a region that doesn't allow it. 419b5810039SNick Piggin * 420b5810039SNick Piggin * The calling function must still handle the error. 421b5810039SNick Piggin */ 42215f59adaSAdrian Bunk static void print_bad_pte(struct vm_area_struct *vma, pte_t pte, 42315f59adaSAdrian Bunk unsigned long vaddr) 424b5810039SNick Piggin { 425b5810039SNick Piggin printk(KERN_ERR "Bad pte = %08llx, process = %s, " 426b5810039SNick Piggin "vm_flags = %lx, vaddr = %lx\n", 427b5810039SNick Piggin (long long)pte_val(pte), 428b5810039SNick Piggin (vma->vm_mm == current->mm ? current->comm : "???"), 429b5810039SNick Piggin vma->vm_flags, vaddr); 430b5810039SNick Piggin dump_stack(); 431b5810039SNick Piggin } 432b5810039SNick Piggin 43367121172SLinus Torvalds static inline int is_cow_mapping(unsigned int flags) 43467121172SLinus Torvalds { 43567121172SLinus Torvalds return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; 43667121172SLinus Torvalds } 43767121172SLinus Torvalds 438b5810039SNick Piggin /* 4397e675137SNick Piggin * vm_normal_page -- This function gets the "struct page" associated with a pte. 4406aab341eSLinus Torvalds * 4417e675137SNick Piggin * "Special" mappings do not wish to be associated with a "struct page" (either 4427e675137SNick Piggin * it doesn't exist, or it exists but they don't want to touch it). In this 4437e675137SNick Piggin * case, NULL is returned here. "Normal" mappings do have a struct page. 444b379d790SJared Hulbert * 4457e675137SNick Piggin * There are 2 broad cases. Firstly, an architecture may define a pte_special() 4467e675137SNick Piggin * pte bit, in which case this function is trivial. Secondly, an architecture 4477e675137SNick Piggin * may not have a spare pte bit, which requires a more complicated scheme, 4487e675137SNick Piggin * described below. 4497e675137SNick Piggin * 4507e675137SNick Piggin * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a 4517e675137SNick Piggin * special mapping (even if there are underlying and valid "struct pages"). 4527e675137SNick Piggin * COWed pages of a VM_PFNMAP are always normal. 4536aab341eSLinus Torvalds * 454b379d790SJared Hulbert * The way we recognize COWed pages within VM_PFNMAP mappings is through the 455b379d790SJared Hulbert * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit 4567e675137SNick Piggin * set, and the vm_pgoff will point to the first PFN mapped: thus every special 4577e675137SNick Piggin * mapping will always honor the rule 4586aab341eSLinus Torvalds * 4596aab341eSLinus Torvalds * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT) 4606aab341eSLinus Torvalds * 4617e675137SNick Piggin * And for normal mappings this is false. 462b379d790SJared Hulbert * 4637e675137SNick Piggin * This restricts such mappings to be a linear translation from virtual address 4647e675137SNick Piggin * to pfn. To get around this restriction, we allow arbitrary mappings so long 4657e675137SNick Piggin * as the vma is not a COW mapping; in that case, we know that all ptes are 4667e675137SNick Piggin * special (because none can have been COWed). 467b379d790SJared Hulbert * 468b379d790SJared Hulbert * 4697e675137SNick Piggin * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP. 4707e675137SNick Piggin * 471b379d790SJared Hulbert * VM_MIXEDMAP mappings can likewise contain memory with or without "struct 472b379d790SJared Hulbert * page" backing, however the difference is that _all_ pages with a struct 473b379d790SJared Hulbert * page (that is, those where pfn_valid is true) are refcounted and considered 474b379d790SJared Hulbert * normal pages by the VM. The disadvantage is that pages are refcounted 475b379d790SJared Hulbert * (which can be slower and simply not an option for some PFNMAP users). The 476b379d790SJared Hulbert * advantage is that we don't have to follow the strict linearity rule of 477b379d790SJared Hulbert * PFNMAP mappings in order to support COWable mappings. 478b379d790SJared Hulbert * 479ee498ed7SHugh Dickins */ 4807e675137SNick Piggin #ifdef __HAVE_ARCH_PTE_SPECIAL 4817e675137SNick Piggin # define HAVE_PTE_SPECIAL 1 4827e675137SNick Piggin #else 4837e675137SNick Piggin # define HAVE_PTE_SPECIAL 0 4847e675137SNick Piggin #endif 4857e675137SNick Piggin struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, 4867e675137SNick Piggin pte_t pte) 487ee498ed7SHugh Dickins { 4887e675137SNick Piggin unsigned long pfn; 4897e675137SNick Piggin 4907e675137SNick Piggin if (HAVE_PTE_SPECIAL) { 4917e675137SNick Piggin if (likely(!pte_special(pte))) { 4927e675137SNick Piggin VM_BUG_ON(!pfn_valid(pte_pfn(pte))); 4937e675137SNick Piggin return pte_page(pte); 4947e675137SNick Piggin } 4957e675137SNick Piggin VM_BUG_ON(!(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))); 4967e675137SNick Piggin return NULL; 4977e675137SNick Piggin } 4987e675137SNick Piggin 4997e675137SNick Piggin /* !HAVE_PTE_SPECIAL case follows: */ 5007e675137SNick Piggin 5017e675137SNick Piggin pfn = pte_pfn(pte); 5026aab341eSLinus Torvalds 503b379d790SJared Hulbert if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { 504b379d790SJared Hulbert if (vma->vm_flags & VM_MIXEDMAP) { 505b379d790SJared Hulbert if (!pfn_valid(pfn)) 506b379d790SJared Hulbert return NULL; 507b379d790SJared Hulbert goto out; 508b379d790SJared Hulbert } else { 5097e675137SNick Piggin unsigned long off; 5107e675137SNick Piggin off = (addr - vma->vm_start) >> PAGE_SHIFT; 5116aab341eSLinus Torvalds if (pfn == vma->vm_pgoff + off) 5126aab341eSLinus Torvalds return NULL; 51367121172SLinus Torvalds if (!is_cow_mapping(vma->vm_flags)) 514fb155c16SLinus Torvalds return NULL; 5156aab341eSLinus Torvalds } 516b379d790SJared Hulbert } 5176aab341eSLinus Torvalds 5187e675137SNick Piggin VM_BUG_ON(!pfn_valid(pfn)); 5196aab341eSLinus Torvalds 5206aab341eSLinus Torvalds /* 5217e675137SNick Piggin * NOTE! We still have PageReserved() pages in the page tables. 5226aab341eSLinus Torvalds * 5237e675137SNick Piggin * eg. VDSO mappings can cause them to exist. 5246aab341eSLinus Torvalds */ 525b379d790SJared Hulbert out: 5266aab341eSLinus Torvalds return pfn_to_page(pfn); 527ee498ed7SHugh Dickins } 528ee498ed7SHugh Dickins 529ee498ed7SHugh Dickins /* 5301da177e4SLinus Torvalds * copy one vm_area from one task to the other. Assumes the page tables 5311da177e4SLinus Torvalds * already present in the new task to be cleared in the whole range 5321da177e4SLinus Torvalds * covered by this vma. 5331da177e4SLinus Torvalds */ 5341da177e4SLinus Torvalds 5358c103762SHugh Dickins static inline void 5361da177e4SLinus Torvalds copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, 537b5810039SNick Piggin pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma, 5388c103762SHugh Dickins unsigned long addr, int *rss) 5391da177e4SLinus Torvalds { 540b5810039SNick Piggin unsigned long vm_flags = vma->vm_flags; 5411da177e4SLinus Torvalds pte_t pte = *src_pte; 5421da177e4SLinus Torvalds struct page *page; 5431da177e4SLinus Torvalds 5441da177e4SLinus Torvalds /* pte contains position in swap or file, so copy. */ 5451da177e4SLinus Torvalds if (unlikely(!pte_present(pte))) { 5461da177e4SLinus Torvalds if (!pte_file(pte)) { 5470697212aSChristoph Lameter swp_entry_t entry = pte_to_swp_entry(pte); 5480697212aSChristoph Lameter 5490697212aSChristoph Lameter swap_duplicate(entry); 5501da177e4SLinus Torvalds /* make sure dst_mm is on swapoff's mmlist. */ 5511da177e4SLinus Torvalds if (unlikely(list_empty(&dst_mm->mmlist))) { 5521da177e4SLinus Torvalds spin_lock(&mmlist_lock); 553f412ac08SHugh Dickins if (list_empty(&dst_mm->mmlist)) 554f412ac08SHugh Dickins list_add(&dst_mm->mmlist, 555f412ac08SHugh Dickins &src_mm->mmlist); 5561da177e4SLinus Torvalds spin_unlock(&mmlist_lock); 5571da177e4SLinus Torvalds } 5580697212aSChristoph Lameter if (is_write_migration_entry(entry) && 5590697212aSChristoph Lameter is_cow_mapping(vm_flags)) { 5600697212aSChristoph Lameter /* 5610697212aSChristoph Lameter * COW mappings require pages in both parent 5620697212aSChristoph Lameter * and child to be set to read. 5630697212aSChristoph Lameter */ 5640697212aSChristoph Lameter make_migration_entry_read(&entry); 5650697212aSChristoph Lameter pte = swp_entry_to_pte(entry); 5660697212aSChristoph Lameter set_pte_at(src_mm, addr, src_pte, pte); 5670697212aSChristoph Lameter } 5681da177e4SLinus Torvalds } 569ae859762SHugh Dickins goto out_set_pte; 5701da177e4SLinus Torvalds } 5711da177e4SLinus Torvalds 5721da177e4SLinus Torvalds /* 5731da177e4SLinus Torvalds * If it's a COW mapping, write protect it both 5741da177e4SLinus Torvalds * in the parent and the child 5751da177e4SLinus Torvalds */ 57667121172SLinus Torvalds if (is_cow_mapping(vm_flags)) { 5771da177e4SLinus Torvalds ptep_set_wrprotect(src_mm, addr, src_pte); 5783dc90795SZachary Amsden pte = pte_wrprotect(pte); 5791da177e4SLinus Torvalds } 5801da177e4SLinus Torvalds 5811da177e4SLinus Torvalds /* 5821da177e4SLinus Torvalds * If it's a shared mapping, mark it clean in 5831da177e4SLinus Torvalds * the child 5841da177e4SLinus Torvalds */ 5851da177e4SLinus Torvalds if (vm_flags & VM_SHARED) 5861da177e4SLinus Torvalds pte = pte_mkclean(pte); 5871da177e4SLinus Torvalds pte = pte_mkold(pte); 5886aab341eSLinus Torvalds 5896aab341eSLinus Torvalds page = vm_normal_page(vma, addr, pte); 5906aab341eSLinus Torvalds if (page) { 5911da177e4SLinus Torvalds get_page(page); 592c97a9e10SNick Piggin page_dup_rmap(page, vma, addr); 5938c103762SHugh Dickins rss[!!PageAnon(page)]++; 5946aab341eSLinus Torvalds } 595ae859762SHugh Dickins 596ae859762SHugh Dickins out_set_pte: 597ae859762SHugh Dickins set_pte_at(dst_mm, addr, dst_pte, pte); 5981da177e4SLinus Torvalds } 5991da177e4SLinus Torvalds 6001da177e4SLinus Torvalds static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 6011da177e4SLinus Torvalds pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma, 6021da177e4SLinus Torvalds unsigned long addr, unsigned long end) 6031da177e4SLinus Torvalds { 6041da177e4SLinus Torvalds pte_t *src_pte, *dst_pte; 605c74df32cSHugh Dickins spinlock_t *src_ptl, *dst_ptl; 606e040f218SHugh Dickins int progress = 0; 6078c103762SHugh Dickins int rss[2]; 6081da177e4SLinus Torvalds 6091da177e4SLinus Torvalds again: 610ae859762SHugh Dickins rss[1] = rss[0] = 0; 611c74df32cSHugh Dickins dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl); 6121da177e4SLinus Torvalds if (!dst_pte) 6131da177e4SLinus Torvalds return -ENOMEM; 6141da177e4SLinus Torvalds src_pte = pte_offset_map_nested(src_pmd, addr); 6154c21e2f2SHugh Dickins src_ptl = pte_lockptr(src_mm, src_pmd); 616f20dc5f7SIngo Molnar spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 6176606c3e0SZachary Amsden arch_enter_lazy_mmu_mode(); 6181da177e4SLinus Torvalds 6191da177e4SLinus Torvalds do { 6201da177e4SLinus Torvalds /* 6211da177e4SLinus Torvalds * We are holding two locks at this point - either of them 6221da177e4SLinus Torvalds * could generate latencies in another task on another CPU. 6231da177e4SLinus Torvalds */ 624e040f218SHugh Dickins if (progress >= 32) { 625e040f218SHugh Dickins progress = 0; 626e040f218SHugh Dickins if (need_resched() || 62795c354feSNick Piggin spin_needbreak(src_ptl) || spin_needbreak(dst_ptl)) 6281da177e4SLinus Torvalds break; 629e040f218SHugh Dickins } 6301da177e4SLinus Torvalds if (pte_none(*src_pte)) { 6311da177e4SLinus Torvalds progress++; 6321da177e4SLinus Torvalds continue; 6331da177e4SLinus Torvalds } 6348c103762SHugh Dickins copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vma, addr, rss); 6351da177e4SLinus Torvalds progress += 8; 6361da177e4SLinus Torvalds } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end); 6371da177e4SLinus Torvalds 6386606c3e0SZachary Amsden arch_leave_lazy_mmu_mode(); 639c74df32cSHugh Dickins spin_unlock(src_ptl); 6401da177e4SLinus Torvalds pte_unmap_nested(src_pte - 1); 641ae859762SHugh Dickins add_mm_rss(dst_mm, rss[0], rss[1]); 642c74df32cSHugh Dickins pte_unmap_unlock(dst_pte - 1, dst_ptl); 643c74df32cSHugh Dickins cond_resched(); 6441da177e4SLinus Torvalds if (addr != end) 6451da177e4SLinus Torvalds goto again; 6461da177e4SLinus Torvalds return 0; 6471da177e4SLinus Torvalds } 6481da177e4SLinus Torvalds 6491da177e4SLinus Torvalds static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 6501da177e4SLinus Torvalds pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma, 6511da177e4SLinus Torvalds unsigned long addr, unsigned long end) 6521da177e4SLinus Torvalds { 6531da177e4SLinus Torvalds pmd_t *src_pmd, *dst_pmd; 6541da177e4SLinus Torvalds unsigned long next; 6551da177e4SLinus Torvalds 6561da177e4SLinus Torvalds dst_pmd = pmd_alloc(dst_mm, dst_pud, addr); 6571da177e4SLinus Torvalds if (!dst_pmd) 6581da177e4SLinus Torvalds return -ENOMEM; 6591da177e4SLinus Torvalds src_pmd = pmd_offset(src_pud, addr); 6601da177e4SLinus Torvalds do { 6611da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 6621da177e4SLinus Torvalds if (pmd_none_or_clear_bad(src_pmd)) 6631da177e4SLinus Torvalds continue; 6641da177e4SLinus Torvalds if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd, 6651da177e4SLinus Torvalds vma, addr, next)) 6661da177e4SLinus Torvalds return -ENOMEM; 6671da177e4SLinus Torvalds } while (dst_pmd++, src_pmd++, addr = next, addr != end); 6681da177e4SLinus Torvalds return 0; 6691da177e4SLinus Torvalds } 6701da177e4SLinus Torvalds 6711da177e4SLinus Torvalds static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 6721da177e4SLinus Torvalds pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma, 6731da177e4SLinus Torvalds unsigned long addr, unsigned long end) 6741da177e4SLinus Torvalds { 6751da177e4SLinus Torvalds pud_t *src_pud, *dst_pud; 6761da177e4SLinus Torvalds unsigned long next; 6771da177e4SLinus Torvalds 6781da177e4SLinus Torvalds dst_pud = pud_alloc(dst_mm, dst_pgd, addr); 6791da177e4SLinus Torvalds if (!dst_pud) 6801da177e4SLinus Torvalds return -ENOMEM; 6811da177e4SLinus Torvalds src_pud = pud_offset(src_pgd, addr); 6821da177e4SLinus Torvalds do { 6831da177e4SLinus Torvalds next = pud_addr_end(addr, end); 6841da177e4SLinus Torvalds if (pud_none_or_clear_bad(src_pud)) 6851da177e4SLinus Torvalds continue; 6861da177e4SLinus Torvalds if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud, 6871da177e4SLinus Torvalds vma, addr, next)) 6881da177e4SLinus Torvalds return -ENOMEM; 6891da177e4SLinus Torvalds } while (dst_pud++, src_pud++, addr = next, addr != end); 6901da177e4SLinus Torvalds return 0; 6911da177e4SLinus Torvalds } 6921da177e4SLinus Torvalds 6931da177e4SLinus Torvalds int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 6941da177e4SLinus Torvalds struct vm_area_struct *vma) 6951da177e4SLinus Torvalds { 6961da177e4SLinus Torvalds pgd_t *src_pgd, *dst_pgd; 6971da177e4SLinus Torvalds unsigned long next; 6981da177e4SLinus Torvalds unsigned long addr = vma->vm_start; 6991da177e4SLinus Torvalds unsigned long end = vma->vm_end; 700cddb8a5cSAndrea Arcangeli int ret; 7011da177e4SLinus Torvalds 702d992895bSNick Piggin /* 703d992895bSNick Piggin * Don't copy ptes where a page fault will fill them correctly. 704d992895bSNick Piggin * Fork becomes much lighter when there are big shared or private 705d992895bSNick Piggin * readonly mappings. The tradeoff is that copy_page_range is more 706d992895bSNick Piggin * efficient than faulting. 707d992895bSNick Piggin */ 7084d7672b4SLinus Torvalds if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) { 709d992895bSNick Piggin if (!vma->anon_vma) 710d992895bSNick Piggin return 0; 711d992895bSNick Piggin } 712d992895bSNick Piggin 7131da177e4SLinus Torvalds if (is_vm_hugetlb_page(vma)) 7141da177e4SLinus Torvalds return copy_hugetlb_page_range(dst_mm, src_mm, vma); 7151da177e4SLinus Torvalds 716*2ab64037Svenkatesh.pallipadi@intel.com if (is_pfn_mapping(vma)) { 717*2ab64037Svenkatesh.pallipadi@intel.com /* 718*2ab64037Svenkatesh.pallipadi@intel.com * We do not free on error cases below as remove_vma 719*2ab64037Svenkatesh.pallipadi@intel.com * gets called on error from higher level routine 720*2ab64037Svenkatesh.pallipadi@intel.com */ 721*2ab64037Svenkatesh.pallipadi@intel.com ret = track_pfn_vma_copy(vma); 722*2ab64037Svenkatesh.pallipadi@intel.com if (ret) 723*2ab64037Svenkatesh.pallipadi@intel.com return ret; 724*2ab64037Svenkatesh.pallipadi@intel.com } 725*2ab64037Svenkatesh.pallipadi@intel.com 726cddb8a5cSAndrea Arcangeli /* 727cddb8a5cSAndrea Arcangeli * We need to invalidate the secondary MMU mappings only when 728cddb8a5cSAndrea Arcangeli * there could be a permission downgrade on the ptes of the 729cddb8a5cSAndrea Arcangeli * parent mm. And a permission downgrade will only happen if 730cddb8a5cSAndrea Arcangeli * is_cow_mapping() returns true. 731cddb8a5cSAndrea Arcangeli */ 732cddb8a5cSAndrea Arcangeli if (is_cow_mapping(vma->vm_flags)) 733cddb8a5cSAndrea Arcangeli mmu_notifier_invalidate_range_start(src_mm, addr, end); 734cddb8a5cSAndrea Arcangeli 735cddb8a5cSAndrea Arcangeli ret = 0; 7361da177e4SLinus Torvalds dst_pgd = pgd_offset(dst_mm, addr); 7371da177e4SLinus Torvalds src_pgd = pgd_offset(src_mm, addr); 7381da177e4SLinus Torvalds do { 7391da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 7401da177e4SLinus Torvalds if (pgd_none_or_clear_bad(src_pgd)) 7411da177e4SLinus Torvalds continue; 742cddb8a5cSAndrea Arcangeli if (unlikely(copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd, 743cddb8a5cSAndrea Arcangeli vma, addr, next))) { 744cddb8a5cSAndrea Arcangeli ret = -ENOMEM; 745cddb8a5cSAndrea Arcangeli break; 746cddb8a5cSAndrea Arcangeli } 7471da177e4SLinus Torvalds } while (dst_pgd++, src_pgd++, addr = next, addr != end); 748cddb8a5cSAndrea Arcangeli 749cddb8a5cSAndrea Arcangeli if (is_cow_mapping(vma->vm_flags)) 750cddb8a5cSAndrea Arcangeli mmu_notifier_invalidate_range_end(src_mm, 751cddb8a5cSAndrea Arcangeli vma->vm_start, end); 752cddb8a5cSAndrea Arcangeli return ret; 7531da177e4SLinus Torvalds } 7541da177e4SLinus Torvalds 75551c6f666SRobin Holt static unsigned long zap_pte_range(struct mmu_gather *tlb, 756b5810039SNick Piggin struct vm_area_struct *vma, pmd_t *pmd, 7571da177e4SLinus Torvalds unsigned long addr, unsigned long end, 75851c6f666SRobin Holt long *zap_work, struct zap_details *details) 7591da177e4SLinus Torvalds { 760b5810039SNick Piggin struct mm_struct *mm = tlb->mm; 7611da177e4SLinus Torvalds pte_t *pte; 762508034a3SHugh Dickins spinlock_t *ptl; 763ae859762SHugh Dickins int file_rss = 0; 764ae859762SHugh Dickins int anon_rss = 0; 7651da177e4SLinus Torvalds 766508034a3SHugh Dickins pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 7676606c3e0SZachary Amsden arch_enter_lazy_mmu_mode(); 7681da177e4SLinus Torvalds do { 7691da177e4SLinus Torvalds pte_t ptent = *pte; 77051c6f666SRobin Holt if (pte_none(ptent)) { 77151c6f666SRobin Holt (*zap_work)--; 7721da177e4SLinus Torvalds continue; 77351c6f666SRobin Holt } 77451c6f666SRobin Holt 77551c6f666SRobin Holt (*zap_work) -= PAGE_SIZE; 77651c6f666SRobin Holt 7776f5e6b9eSHugh Dickins if (pte_present(ptent)) { 7786f5e6b9eSHugh Dickins struct page *page; 7796f5e6b9eSHugh Dickins 7806aab341eSLinus Torvalds page = vm_normal_page(vma, addr, ptent); 7811da177e4SLinus Torvalds if (unlikely(details) && page) { 7821da177e4SLinus Torvalds /* 7831da177e4SLinus Torvalds * unmap_shared_mapping_pages() wants to 7841da177e4SLinus Torvalds * invalidate cache without truncating: 7851da177e4SLinus Torvalds * unmap shared but keep private pages. 7861da177e4SLinus Torvalds */ 7871da177e4SLinus Torvalds if (details->check_mapping && 7881da177e4SLinus Torvalds details->check_mapping != page->mapping) 7891da177e4SLinus Torvalds continue; 7901da177e4SLinus Torvalds /* 7911da177e4SLinus Torvalds * Each page->index must be checked when 7921da177e4SLinus Torvalds * invalidating or truncating nonlinear. 7931da177e4SLinus Torvalds */ 7941da177e4SLinus Torvalds if (details->nonlinear_vma && 7951da177e4SLinus Torvalds (page->index < details->first_index || 7961da177e4SLinus Torvalds page->index > details->last_index)) 7971da177e4SLinus Torvalds continue; 7981da177e4SLinus Torvalds } 799b5810039SNick Piggin ptent = ptep_get_and_clear_full(mm, addr, pte, 800a600388dSZachary Amsden tlb->fullmm); 8011da177e4SLinus Torvalds tlb_remove_tlb_entry(tlb, pte, addr); 8021da177e4SLinus Torvalds if (unlikely(!page)) 8031da177e4SLinus Torvalds continue; 8041da177e4SLinus Torvalds if (unlikely(details) && details->nonlinear_vma 8051da177e4SLinus Torvalds && linear_page_index(details->nonlinear_vma, 8061da177e4SLinus Torvalds addr) != page->index) 807b5810039SNick Piggin set_pte_at(mm, addr, pte, 8081da177e4SLinus Torvalds pgoff_to_pte(page->index)); 8091da177e4SLinus Torvalds if (PageAnon(page)) 81086d912f4SHugh Dickins anon_rss--; 8116237bcd9SHugh Dickins else { 8126237bcd9SHugh Dickins if (pte_dirty(ptent)) 8136237bcd9SHugh Dickins set_page_dirty(page); 8146237bcd9SHugh Dickins if (pte_young(ptent)) 815daa88c8dSKen Chen SetPageReferenced(page); 81686d912f4SHugh Dickins file_rss--; 8176237bcd9SHugh Dickins } 8187de6b805SNick Piggin page_remove_rmap(page, vma); 8191da177e4SLinus Torvalds tlb_remove_page(tlb, page); 8201da177e4SLinus Torvalds continue; 8211da177e4SLinus Torvalds } 8221da177e4SLinus Torvalds /* 8231da177e4SLinus Torvalds * If details->check_mapping, we leave swap entries; 8241da177e4SLinus Torvalds * if details->nonlinear_vma, we leave file entries. 8251da177e4SLinus Torvalds */ 8261da177e4SLinus Torvalds if (unlikely(details)) 8271da177e4SLinus Torvalds continue; 8281da177e4SLinus Torvalds if (!pte_file(ptent)) 8291da177e4SLinus Torvalds free_swap_and_cache(pte_to_swp_entry(ptent)); 8309888a1caSZachary Amsden pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); 83151c6f666SRobin Holt } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0)); 832ae859762SHugh Dickins 83386d912f4SHugh Dickins add_mm_rss(mm, file_rss, anon_rss); 8346606c3e0SZachary Amsden arch_leave_lazy_mmu_mode(); 835508034a3SHugh Dickins pte_unmap_unlock(pte - 1, ptl); 83651c6f666SRobin Holt 83751c6f666SRobin Holt return addr; 8381da177e4SLinus Torvalds } 8391da177e4SLinus Torvalds 84051c6f666SRobin Holt static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, 841b5810039SNick Piggin struct vm_area_struct *vma, pud_t *pud, 8421da177e4SLinus Torvalds unsigned long addr, unsigned long end, 84351c6f666SRobin Holt long *zap_work, struct zap_details *details) 8441da177e4SLinus Torvalds { 8451da177e4SLinus Torvalds pmd_t *pmd; 8461da177e4SLinus Torvalds unsigned long next; 8471da177e4SLinus Torvalds 8481da177e4SLinus Torvalds pmd = pmd_offset(pud, addr); 8491da177e4SLinus Torvalds do { 8501da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 85151c6f666SRobin Holt if (pmd_none_or_clear_bad(pmd)) { 85251c6f666SRobin Holt (*zap_work)--; 8531da177e4SLinus Torvalds continue; 85451c6f666SRobin Holt } 85551c6f666SRobin Holt next = zap_pte_range(tlb, vma, pmd, addr, next, 85651c6f666SRobin Holt zap_work, details); 85751c6f666SRobin Holt } while (pmd++, addr = next, (addr != end && *zap_work > 0)); 85851c6f666SRobin Holt 85951c6f666SRobin Holt return addr; 8601da177e4SLinus Torvalds } 8611da177e4SLinus Torvalds 86251c6f666SRobin Holt static inline unsigned long zap_pud_range(struct mmu_gather *tlb, 863b5810039SNick Piggin struct vm_area_struct *vma, pgd_t *pgd, 8641da177e4SLinus Torvalds unsigned long addr, unsigned long end, 86551c6f666SRobin Holt long *zap_work, struct zap_details *details) 8661da177e4SLinus Torvalds { 8671da177e4SLinus Torvalds pud_t *pud; 8681da177e4SLinus Torvalds unsigned long next; 8691da177e4SLinus Torvalds 8701da177e4SLinus Torvalds pud = pud_offset(pgd, addr); 8711da177e4SLinus Torvalds do { 8721da177e4SLinus Torvalds next = pud_addr_end(addr, end); 87351c6f666SRobin Holt if (pud_none_or_clear_bad(pud)) { 87451c6f666SRobin Holt (*zap_work)--; 8751da177e4SLinus Torvalds continue; 87651c6f666SRobin Holt } 87751c6f666SRobin Holt next = zap_pmd_range(tlb, vma, pud, addr, next, 87851c6f666SRobin Holt zap_work, details); 87951c6f666SRobin Holt } while (pud++, addr = next, (addr != end && *zap_work > 0)); 88051c6f666SRobin Holt 88151c6f666SRobin Holt return addr; 8821da177e4SLinus Torvalds } 8831da177e4SLinus Torvalds 88451c6f666SRobin Holt static unsigned long unmap_page_range(struct mmu_gather *tlb, 88551c6f666SRobin Holt struct vm_area_struct *vma, 8861da177e4SLinus Torvalds unsigned long addr, unsigned long end, 88751c6f666SRobin Holt long *zap_work, struct zap_details *details) 8881da177e4SLinus Torvalds { 8891da177e4SLinus Torvalds pgd_t *pgd; 8901da177e4SLinus Torvalds unsigned long next; 8911da177e4SLinus Torvalds 8921da177e4SLinus Torvalds if (details && !details->check_mapping && !details->nonlinear_vma) 8931da177e4SLinus Torvalds details = NULL; 8941da177e4SLinus Torvalds 8951da177e4SLinus Torvalds BUG_ON(addr >= end); 8961da177e4SLinus Torvalds tlb_start_vma(tlb, vma); 8971da177e4SLinus Torvalds pgd = pgd_offset(vma->vm_mm, addr); 8981da177e4SLinus Torvalds do { 8991da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 90051c6f666SRobin Holt if (pgd_none_or_clear_bad(pgd)) { 90151c6f666SRobin Holt (*zap_work)--; 9021da177e4SLinus Torvalds continue; 90351c6f666SRobin Holt } 90451c6f666SRobin Holt next = zap_pud_range(tlb, vma, pgd, addr, next, 90551c6f666SRobin Holt zap_work, details); 90651c6f666SRobin Holt } while (pgd++, addr = next, (addr != end && *zap_work > 0)); 9071da177e4SLinus Torvalds tlb_end_vma(tlb, vma); 90851c6f666SRobin Holt 90951c6f666SRobin Holt return addr; 9101da177e4SLinus Torvalds } 9111da177e4SLinus Torvalds 9121da177e4SLinus Torvalds #ifdef CONFIG_PREEMPT 9131da177e4SLinus Torvalds # define ZAP_BLOCK_SIZE (8 * PAGE_SIZE) 9141da177e4SLinus Torvalds #else 9151da177e4SLinus Torvalds /* No preempt: go for improved straight-line efficiency */ 9161da177e4SLinus Torvalds # define ZAP_BLOCK_SIZE (1024 * PAGE_SIZE) 9171da177e4SLinus Torvalds #endif 9181da177e4SLinus Torvalds 9191da177e4SLinus Torvalds /** 9201da177e4SLinus Torvalds * unmap_vmas - unmap a range of memory covered by a list of vma's 9211da177e4SLinus Torvalds * @tlbp: address of the caller's struct mmu_gather 9221da177e4SLinus Torvalds * @vma: the starting vma 9231da177e4SLinus Torvalds * @start_addr: virtual address at which to start unmapping 9241da177e4SLinus Torvalds * @end_addr: virtual address at which to end unmapping 9251da177e4SLinus Torvalds * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here 9261da177e4SLinus Torvalds * @details: details of nonlinear truncation or shared cache invalidation 9271da177e4SLinus Torvalds * 928ee39b37bSHugh Dickins * Returns the end address of the unmapping (restart addr if interrupted). 9291da177e4SLinus Torvalds * 930508034a3SHugh Dickins * Unmap all pages in the vma list. 9311da177e4SLinus Torvalds * 932508034a3SHugh Dickins * We aim to not hold locks for too long (for scheduling latency reasons). 933508034a3SHugh Dickins * So zap pages in ZAP_BLOCK_SIZE bytecounts. This means we need to 9341da177e4SLinus Torvalds * return the ending mmu_gather to the caller. 9351da177e4SLinus Torvalds * 9361da177e4SLinus Torvalds * Only addresses between `start' and `end' will be unmapped. 9371da177e4SLinus Torvalds * 9381da177e4SLinus Torvalds * The VMA list must be sorted in ascending virtual address order. 9391da177e4SLinus Torvalds * 9401da177e4SLinus Torvalds * unmap_vmas() assumes that the caller will flush the whole unmapped address 9411da177e4SLinus Torvalds * range after unmap_vmas() returns. So the only responsibility here is to 9421da177e4SLinus Torvalds * ensure that any thus-far unmapped pages are flushed before unmap_vmas() 9431da177e4SLinus Torvalds * drops the lock and schedules. 9441da177e4SLinus Torvalds */ 945508034a3SHugh Dickins unsigned long unmap_vmas(struct mmu_gather **tlbp, 9461da177e4SLinus Torvalds struct vm_area_struct *vma, unsigned long start_addr, 9471da177e4SLinus Torvalds unsigned long end_addr, unsigned long *nr_accounted, 9481da177e4SLinus Torvalds struct zap_details *details) 9491da177e4SLinus Torvalds { 95051c6f666SRobin Holt long zap_work = ZAP_BLOCK_SIZE; 9511da177e4SLinus Torvalds unsigned long tlb_start = 0; /* For tlb_finish_mmu */ 9521da177e4SLinus Torvalds int tlb_start_valid = 0; 953ee39b37bSHugh Dickins unsigned long start = start_addr; 9541da177e4SLinus Torvalds spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL; 9554d6ddfa9SHugh Dickins int fullmm = (*tlbp)->fullmm; 956cddb8a5cSAndrea Arcangeli struct mm_struct *mm = vma->vm_mm; 9571da177e4SLinus Torvalds 958cddb8a5cSAndrea Arcangeli mmu_notifier_invalidate_range_start(mm, start_addr, end_addr); 9591da177e4SLinus Torvalds for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) { 9601da177e4SLinus Torvalds unsigned long end; 9611da177e4SLinus Torvalds 9621da177e4SLinus Torvalds start = max(vma->vm_start, start_addr); 9631da177e4SLinus Torvalds if (start >= vma->vm_end) 9641da177e4SLinus Torvalds continue; 9651da177e4SLinus Torvalds end = min(vma->vm_end, end_addr); 9661da177e4SLinus Torvalds if (end <= vma->vm_start) 9671da177e4SLinus Torvalds continue; 9681da177e4SLinus Torvalds 9691da177e4SLinus Torvalds if (vma->vm_flags & VM_ACCOUNT) 9701da177e4SLinus Torvalds *nr_accounted += (end - start) >> PAGE_SHIFT; 9711da177e4SLinus Torvalds 972*2ab64037Svenkatesh.pallipadi@intel.com if (is_pfn_mapping(vma)) 973*2ab64037Svenkatesh.pallipadi@intel.com untrack_pfn_vma(vma, 0, 0); 974*2ab64037Svenkatesh.pallipadi@intel.com 9751da177e4SLinus Torvalds while (start != end) { 9761da177e4SLinus Torvalds if (!tlb_start_valid) { 9771da177e4SLinus Torvalds tlb_start = start; 9781da177e4SLinus Torvalds tlb_start_valid = 1; 9791da177e4SLinus Torvalds } 9801da177e4SLinus Torvalds 98151c6f666SRobin Holt if (unlikely(is_vm_hugetlb_page(vma))) { 982a137e1ccSAndi Kleen /* 983a137e1ccSAndi Kleen * It is undesirable to test vma->vm_file as it 984a137e1ccSAndi Kleen * should be non-null for valid hugetlb area. 985a137e1ccSAndi Kleen * However, vm_file will be NULL in the error 986a137e1ccSAndi Kleen * cleanup path of do_mmap_pgoff. When 987a137e1ccSAndi Kleen * hugetlbfs ->mmap method fails, 988a137e1ccSAndi Kleen * do_mmap_pgoff() nullifies vma->vm_file 989a137e1ccSAndi Kleen * before calling this function to clean up. 990a137e1ccSAndi Kleen * Since no pte has actually been setup, it is 991a137e1ccSAndi Kleen * safe to do nothing in this case. 992a137e1ccSAndi Kleen */ 993a137e1ccSAndi Kleen if (vma->vm_file) { 99404f2cbe3SMel Gorman unmap_hugepage_range(vma, start, end, NULL); 99551c6f666SRobin Holt zap_work -= (end - start) / 996a5516438SAndi Kleen pages_per_huge_page(hstate_vma(vma)); 997a137e1ccSAndi Kleen } 998a137e1ccSAndi Kleen 99951c6f666SRobin Holt start = end; 100051c6f666SRobin Holt } else 100151c6f666SRobin Holt start = unmap_page_range(*tlbp, vma, 100251c6f666SRobin Holt start, end, &zap_work, details); 10031da177e4SLinus Torvalds 100451c6f666SRobin Holt if (zap_work > 0) { 100551c6f666SRobin Holt BUG_ON(start != end); 100651c6f666SRobin Holt break; 100751c6f666SRobin Holt } 10081da177e4SLinus Torvalds 10091da177e4SLinus Torvalds tlb_finish_mmu(*tlbp, tlb_start, start); 10101da177e4SLinus Torvalds 10111da177e4SLinus Torvalds if (need_resched() || 101295c354feSNick Piggin (i_mmap_lock && spin_needbreak(i_mmap_lock))) { 10131da177e4SLinus Torvalds if (i_mmap_lock) { 1014508034a3SHugh Dickins *tlbp = NULL; 10151da177e4SLinus Torvalds goto out; 10161da177e4SLinus Torvalds } 10171da177e4SLinus Torvalds cond_resched(); 10181da177e4SLinus Torvalds } 10191da177e4SLinus Torvalds 1020508034a3SHugh Dickins *tlbp = tlb_gather_mmu(vma->vm_mm, fullmm); 10211da177e4SLinus Torvalds tlb_start_valid = 0; 102251c6f666SRobin Holt zap_work = ZAP_BLOCK_SIZE; 10231da177e4SLinus Torvalds } 10241da177e4SLinus Torvalds } 10251da177e4SLinus Torvalds out: 1026cddb8a5cSAndrea Arcangeli mmu_notifier_invalidate_range_end(mm, start_addr, end_addr); 1027ee39b37bSHugh Dickins return start; /* which is now the end (or restart) address */ 10281da177e4SLinus Torvalds } 10291da177e4SLinus Torvalds 10301da177e4SLinus Torvalds /** 10311da177e4SLinus Torvalds * zap_page_range - remove user pages in a given range 10321da177e4SLinus Torvalds * @vma: vm_area_struct holding the applicable pages 10331da177e4SLinus Torvalds * @address: starting address of pages to zap 10341da177e4SLinus Torvalds * @size: number of bytes to zap 10351da177e4SLinus Torvalds * @details: details of nonlinear truncation or shared cache invalidation 10361da177e4SLinus Torvalds */ 1037ee39b37bSHugh Dickins unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, 10381da177e4SLinus Torvalds unsigned long size, struct zap_details *details) 10391da177e4SLinus Torvalds { 10401da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 10411da177e4SLinus Torvalds struct mmu_gather *tlb; 10421da177e4SLinus Torvalds unsigned long end = address + size; 10431da177e4SLinus Torvalds unsigned long nr_accounted = 0; 10441da177e4SLinus Torvalds 10451da177e4SLinus Torvalds lru_add_drain(); 10461da177e4SLinus Torvalds tlb = tlb_gather_mmu(mm, 0); 1047365e9c87SHugh Dickins update_hiwater_rss(mm); 1048508034a3SHugh Dickins end = unmap_vmas(&tlb, vma, address, end, &nr_accounted, details); 1049508034a3SHugh Dickins if (tlb) 10508f4f8c16SHugh Dickins tlb_finish_mmu(tlb, address, end); 1051ee39b37bSHugh Dickins return end; 10521da177e4SLinus Torvalds } 10531da177e4SLinus Torvalds 1054c627f9ccSJack Steiner /** 1055c627f9ccSJack Steiner * zap_vma_ptes - remove ptes mapping the vma 1056c627f9ccSJack Steiner * @vma: vm_area_struct holding ptes to be zapped 1057c627f9ccSJack Steiner * @address: starting address of pages to zap 1058c627f9ccSJack Steiner * @size: number of bytes to zap 1059c627f9ccSJack Steiner * 1060c627f9ccSJack Steiner * This function only unmaps ptes assigned to VM_PFNMAP vmas. 1061c627f9ccSJack Steiner * 1062c627f9ccSJack Steiner * The entire address range must be fully contained within the vma. 1063c627f9ccSJack Steiner * 1064c627f9ccSJack Steiner * Returns 0 if successful. 1065c627f9ccSJack Steiner */ 1066c627f9ccSJack Steiner int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 1067c627f9ccSJack Steiner unsigned long size) 1068c627f9ccSJack Steiner { 1069c627f9ccSJack Steiner if (address < vma->vm_start || address + size > vma->vm_end || 1070c627f9ccSJack Steiner !(vma->vm_flags & VM_PFNMAP)) 1071c627f9ccSJack Steiner return -1; 1072c627f9ccSJack Steiner zap_page_range(vma, address, size, NULL); 1073c627f9ccSJack Steiner return 0; 1074c627f9ccSJack Steiner } 1075c627f9ccSJack Steiner EXPORT_SYMBOL_GPL(zap_vma_ptes); 1076c627f9ccSJack Steiner 10771da177e4SLinus Torvalds /* 10781da177e4SLinus Torvalds * Do a quick page-table lookup for a single page. 10791da177e4SLinus Torvalds */ 10806aab341eSLinus Torvalds struct page *follow_page(struct vm_area_struct *vma, unsigned long address, 1081deceb6cdSHugh Dickins unsigned int flags) 10821da177e4SLinus Torvalds { 10831da177e4SLinus Torvalds pgd_t *pgd; 10841da177e4SLinus Torvalds pud_t *pud; 10851da177e4SLinus Torvalds pmd_t *pmd; 10861da177e4SLinus Torvalds pte_t *ptep, pte; 1087deceb6cdSHugh Dickins spinlock_t *ptl; 10881da177e4SLinus Torvalds struct page *page; 10896aab341eSLinus Torvalds struct mm_struct *mm = vma->vm_mm; 10901da177e4SLinus Torvalds 1091deceb6cdSHugh Dickins page = follow_huge_addr(mm, address, flags & FOLL_WRITE); 1092deceb6cdSHugh Dickins if (!IS_ERR(page)) { 1093deceb6cdSHugh Dickins BUG_ON(flags & FOLL_GET); 1094deceb6cdSHugh Dickins goto out; 1095deceb6cdSHugh Dickins } 10961da177e4SLinus Torvalds 1097deceb6cdSHugh Dickins page = NULL; 10981da177e4SLinus Torvalds pgd = pgd_offset(mm, address); 10991da177e4SLinus Torvalds if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) 1100deceb6cdSHugh Dickins goto no_page_table; 11011da177e4SLinus Torvalds 11021da177e4SLinus Torvalds pud = pud_offset(pgd, address); 1103ceb86879SAndi Kleen if (pud_none(*pud)) 1104ceb86879SAndi Kleen goto no_page_table; 1105ceb86879SAndi Kleen if (pud_huge(*pud)) { 1106ceb86879SAndi Kleen BUG_ON(flags & FOLL_GET); 1107ceb86879SAndi Kleen page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE); 1108ceb86879SAndi Kleen goto out; 1109ceb86879SAndi Kleen } 1110ceb86879SAndi Kleen if (unlikely(pud_bad(*pud))) 1111deceb6cdSHugh Dickins goto no_page_table; 11121da177e4SLinus Torvalds 11131da177e4SLinus Torvalds pmd = pmd_offset(pud, address); 1114aeed5fceSHugh Dickins if (pmd_none(*pmd)) 1115deceb6cdSHugh Dickins goto no_page_table; 1116deceb6cdSHugh Dickins if (pmd_huge(*pmd)) { 1117deceb6cdSHugh Dickins BUG_ON(flags & FOLL_GET); 1118deceb6cdSHugh Dickins page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE); 1119deceb6cdSHugh Dickins goto out; 1120deceb6cdSHugh Dickins } 1121aeed5fceSHugh Dickins if (unlikely(pmd_bad(*pmd))) 1122aeed5fceSHugh Dickins goto no_page_table; 1123aeed5fceSHugh Dickins 1124deceb6cdSHugh Dickins ptep = pte_offset_map_lock(mm, pmd, address, &ptl); 11251da177e4SLinus Torvalds 11261da177e4SLinus Torvalds pte = *ptep; 1127deceb6cdSHugh Dickins if (!pte_present(pte)) 112889f5b7daSLinus Torvalds goto no_page; 1129deceb6cdSHugh Dickins if ((flags & FOLL_WRITE) && !pte_write(pte)) 1130deceb6cdSHugh Dickins goto unlock; 11316aab341eSLinus Torvalds page = vm_normal_page(vma, address, pte); 11326aab341eSLinus Torvalds if (unlikely(!page)) 113389f5b7daSLinus Torvalds goto bad_page; 1134deceb6cdSHugh Dickins 1135deceb6cdSHugh Dickins if (flags & FOLL_GET) 1136deceb6cdSHugh Dickins get_page(page); 1137deceb6cdSHugh Dickins if (flags & FOLL_TOUCH) { 1138deceb6cdSHugh Dickins if ((flags & FOLL_WRITE) && 1139deceb6cdSHugh Dickins !pte_dirty(pte) && !PageDirty(page)) 1140f33ea7f4SNick Piggin set_page_dirty(page); 11411da177e4SLinus Torvalds mark_page_accessed(page); 11421da177e4SLinus Torvalds } 1143deceb6cdSHugh Dickins unlock: 1144deceb6cdSHugh Dickins pte_unmap_unlock(ptep, ptl); 11451da177e4SLinus Torvalds out: 1146deceb6cdSHugh Dickins return page; 1147deceb6cdSHugh Dickins 114889f5b7daSLinus Torvalds bad_page: 114989f5b7daSLinus Torvalds pte_unmap_unlock(ptep, ptl); 115089f5b7daSLinus Torvalds return ERR_PTR(-EFAULT); 115189f5b7daSLinus Torvalds 115289f5b7daSLinus Torvalds no_page: 115389f5b7daSLinus Torvalds pte_unmap_unlock(ptep, ptl); 115489f5b7daSLinus Torvalds if (!pte_none(pte)) 115589f5b7daSLinus Torvalds return page; 115689f5b7daSLinus Torvalds /* Fall through to ZERO_PAGE handling */ 1157deceb6cdSHugh Dickins no_page_table: 1158deceb6cdSHugh Dickins /* 1159deceb6cdSHugh Dickins * When core dumping an enormous anonymous area that nobody 1160deceb6cdSHugh Dickins * has touched so far, we don't want to allocate page tables. 1161deceb6cdSHugh Dickins */ 1162deceb6cdSHugh Dickins if (flags & FOLL_ANON) { 1163557ed1faSNick Piggin page = ZERO_PAGE(0); 1164deceb6cdSHugh Dickins if (flags & FOLL_GET) 1165deceb6cdSHugh Dickins get_page(page); 1166deceb6cdSHugh Dickins BUG_ON(flags & FOLL_WRITE); 11671da177e4SLinus Torvalds } 1168deceb6cdSHugh Dickins return page; 11691da177e4SLinus Torvalds } 11701da177e4SLinus Torvalds 1171e121e418Svenkatesh.pallipadi@intel.com int follow_pfnmap_pte(struct vm_area_struct *vma, unsigned long address, 1172e121e418Svenkatesh.pallipadi@intel.com pte_t *ret_ptep) 1173e121e418Svenkatesh.pallipadi@intel.com { 1174e121e418Svenkatesh.pallipadi@intel.com pgd_t *pgd; 1175e121e418Svenkatesh.pallipadi@intel.com pud_t *pud; 1176e121e418Svenkatesh.pallipadi@intel.com pmd_t *pmd; 1177e121e418Svenkatesh.pallipadi@intel.com pte_t *ptep, pte; 1178e121e418Svenkatesh.pallipadi@intel.com spinlock_t *ptl; 1179e121e418Svenkatesh.pallipadi@intel.com struct page *page; 1180e121e418Svenkatesh.pallipadi@intel.com struct mm_struct *mm = vma->vm_mm; 1181e121e418Svenkatesh.pallipadi@intel.com 1182e121e418Svenkatesh.pallipadi@intel.com if (!is_pfn_mapping(vma)) 1183e121e418Svenkatesh.pallipadi@intel.com goto err; 1184e121e418Svenkatesh.pallipadi@intel.com 1185e121e418Svenkatesh.pallipadi@intel.com page = NULL; 1186e121e418Svenkatesh.pallipadi@intel.com pgd = pgd_offset(mm, address); 1187e121e418Svenkatesh.pallipadi@intel.com if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) 1188e121e418Svenkatesh.pallipadi@intel.com goto err; 1189e121e418Svenkatesh.pallipadi@intel.com 1190e121e418Svenkatesh.pallipadi@intel.com pud = pud_offset(pgd, address); 1191e121e418Svenkatesh.pallipadi@intel.com if (pud_none(*pud) || unlikely(pud_bad(*pud))) 1192e121e418Svenkatesh.pallipadi@intel.com goto err; 1193e121e418Svenkatesh.pallipadi@intel.com 1194e121e418Svenkatesh.pallipadi@intel.com pmd = pmd_offset(pud, address); 1195e121e418Svenkatesh.pallipadi@intel.com if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) 1196e121e418Svenkatesh.pallipadi@intel.com goto err; 1197e121e418Svenkatesh.pallipadi@intel.com 1198e121e418Svenkatesh.pallipadi@intel.com ptep = pte_offset_map_lock(mm, pmd, address, &ptl); 1199e121e418Svenkatesh.pallipadi@intel.com 1200e121e418Svenkatesh.pallipadi@intel.com pte = *ptep; 1201e121e418Svenkatesh.pallipadi@intel.com if (!pte_present(pte)) 1202e121e418Svenkatesh.pallipadi@intel.com goto err_unlock; 1203e121e418Svenkatesh.pallipadi@intel.com 1204e121e418Svenkatesh.pallipadi@intel.com *ret_ptep = pte; 1205e121e418Svenkatesh.pallipadi@intel.com pte_unmap_unlock(ptep, ptl); 1206e121e418Svenkatesh.pallipadi@intel.com return 0; 1207e121e418Svenkatesh.pallipadi@intel.com 1208e121e418Svenkatesh.pallipadi@intel.com err_unlock: 1209e121e418Svenkatesh.pallipadi@intel.com pte_unmap_unlock(ptep, ptl); 1210e121e418Svenkatesh.pallipadi@intel.com err: 1211e121e418Svenkatesh.pallipadi@intel.com return -EINVAL; 1212e121e418Svenkatesh.pallipadi@intel.com } 1213e121e418Svenkatesh.pallipadi@intel.com 1214672ca28eSLinus Torvalds /* Can we do the FOLL_ANON optimization? */ 1215672ca28eSLinus Torvalds static inline int use_zero_page(struct vm_area_struct *vma) 1216672ca28eSLinus Torvalds { 1217672ca28eSLinus Torvalds /* 1218672ca28eSLinus Torvalds * We don't want to optimize FOLL_ANON for make_pages_present() 1219672ca28eSLinus Torvalds * when it tries to page in a VM_LOCKED region. As to VM_SHARED, 1220672ca28eSLinus Torvalds * we want to get the page from the page tables to make sure 1221672ca28eSLinus Torvalds * that we serialize and update with any other user of that 1222672ca28eSLinus Torvalds * mapping. 1223672ca28eSLinus Torvalds */ 1224672ca28eSLinus Torvalds if (vma->vm_flags & (VM_LOCKED | VM_SHARED)) 1225672ca28eSLinus Torvalds return 0; 1226672ca28eSLinus Torvalds /* 12270d71d10aSNick Piggin * And if we have a fault routine, it's not an anonymous region. 1228672ca28eSLinus Torvalds */ 12290d71d10aSNick Piggin return !vma->vm_ops || !vma->vm_ops->fault; 1230672ca28eSLinus Torvalds } 1231672ca28eSLinus Torvalds 1232b291f000SNick Piggin 1233b291f000SNick Piggin 1234b291f000SNick Piggin int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 1235b291f000SNick Piggin unsigned long start, int len, int flags, 12361da177e4SLinus Torvalds struct page **pages, struct vm_area_struct **vmas) 12371da177e4SLinus Torvalds { 12381da177e4SLinus Torvalds int i; 1239b291f000SNick Piggin unsigned int vm_flags = 0; 1240b291f000SNick Piggin int write = !!(flags & GUP_FLAGS_WRITE); 1241b291f000SNick Piggin int force = !!(flags & GUP_FLAGS_FORCE); 1242b291f000SNick Piggin int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS); 12431da177e4SLinus Torvalds 1244900cf086SJonathan Corbet if (len <= 0) 1245900cf086SJonathan Corbet return 0; 12461da177e4SLinus Torvalds /* 12471da177e4SLinus Torvalds * Require read or write permissions. 12481da177e4SLinus Torvalds * If 'force' is set, we only require the "MAY" flags. 12491da177e4SLinus Torvalds */ 1250deceb6cdSHugh Dickins vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); 1251deceb6cdSHugh Dickins vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); 12521da177e4SLinus Torvalds i = 0; 12531da177e4SLinus Torvalds 12541da177e4SLinus Torvalds do { 12551da177e4SLinus Torvalds struct vm_area_struct *vma; 1256deceb6cdSHugh Dickins unsigned int foll_flags; 12571da177e4SLinus Torvalds 12581da177e4SLinus Torvalds vma = find_extend_vma(mm, start); 12591da177e4SLinus Torvalds if (!vma && in_gate_area(tsk, start)) { 12601da177e4SLinus Torvalds unsigned long pg = start & PAGE_MASK; 12611da177e4SLinus Torvalds struct vm_area_struct *gate_vma = get_gate_vma(tsk); 12621da177e4SLinus Torvalds pgd_t *pgd; 12631da177e4SLinus Torvalds pud_t *pud; 12641da177e4SLinus Torvalds pmd_t *pmd; 12651da177e4SLinus Torvalds pte_t *pte; 1266b291f000SNick Piggin 1267b291f000SNick Piggin /* user gate pages are read-only */ 1268b291f000SNick Piggin if (!ignore && write) 12691da177e4SLinus Torvalds return i ? : -EFAULT; 12701da177e4SLinus Torvalds if (pg > TASK_SIZE) 12711da177e4SLinus Torvalds pgd = pgd_offset_k(pg); 12721da177e4SLinus Torvalds else 12731da177e4SLinus Torvalds pgd = pgd_offset_gate(mm, pg); 12741da177e4SLinus Torvalds BUG_ON(pgd_none(*pgd)); 12751da177e4SLinus Torvalds pud = pud_offset(pgd, pg); 12761da177e4SLinus Torvalds BUG_ON(pud_none(*pud)); 12771da177e4SLinus Torvalds pmd = pmd_offset(pud, pg); 1278690dbe1cSHugh Dickins if (pmd_none(*pmd)) 1279690dbe1cSHugh Dickins return i ? : -EFAULT; 12801da177e4SLinus Torvalds pte = pte_offset_map(pmd, pg); 1281690dbe1cSHugh Dickins if (pte_none(*pte)) { 1282690dbe1cSHugh Dickins pte_unmap(pte); 1283690dbe1cSHugh Dickins return i ? : -EFAULT; 1284690dbe1cSHugh Dickins } 12851da177e4SLinus Torvalds if (pages) { 1286fa2a455bSNick Piggin struct page *page = vm_normal_page(gate_vma, start, *pte); 12876aab341eSLinus Torvalds pages[i] = page; 12886aab341eSLinus Torvalds if (page) 12896aab341eSLinus Torvalds get_page(page); 12901da177e4SLinus Torvalds } 12911da177e4SLinus Torvalds pte_unmap(pte); 12921da177e4SLinus Torvalds if (vmas) 12931da177e4SLinus Torvalds vmas[i] = gate_vma; 12941da177e4SLinus Torvalds i++; 12951da177e4SLinus Torvalds start += PAGE_SIZE; 12961da177e4SLinus Torvalds len--; 12971da177e4SLinus Torvalds continue; 12981da177e4SLinus Torvalds } 12991da177e4SLinus Torvalds 1300b291f000SNick Piggin if (!vma || 1301b291f000SNick Piggin (vma->vm_flags & (VM_IO | VM_PFNMAP)) || 1302b291f000SNick Piggin (!ignore && !(vm_flags & vma->vm_flags))) 13031da177e4SLinus Torvalds return i ? : -EFAULT; 13041da177e4SLinus Torvalds 13051da177e4SLinus Torvalds if (is_vm_hugetlb_page(vma)) { 13061da177e4SLinus Torvalds i = follow_hugetlb_page(mm, vma, pages, vmas, 13075b23dbe8SAdam Litke &start, &len, i, write); 13081da177e4SLinus Torvalds continue; 13091da177e4SLinus Torvalds } 1310deceb6cdSHugh Dickins 1311deceb6cdSHugh Dickins foll_flags = FOLL_TOUCH; 1312deceb6cdSHugh Dickins if (pages) 1313deceb6cdSHugh Dickins foll_flags |= FOLL_GET; 1314672ca28eSLinus Torvalds if (!write && use_zero_page(vma)) 1315deceb6cdSHugh Dickins foll_flags |= FOLL_ANON; 1316deceb6cdSHugh Dickins 13171da177e4SLinus Torvalds do { 131808ef4729SHugh Dickins struct page *page; 13191da177e4SLinus Torvalds 1320462e00ccSEthan Solomita /* 1321462e00ccSEthan Solomita * If tsk is ooming, cut off its access to large memory 1322462e00ccSEthan Solomita * allocations. It has a pending SIGKILL, but it can't 1323462e00ccSEthan Solomita * be processed until returning to user space. 1324462e00ccSEthan Solomita */ 1325462e00ccSEthan Solomita if (unlikely(test_tsk_thread_flag(tsk, TIF_MEMDIE))) 13267a36a752SOleg Nesterov return i ? i : -ENOMEM; 1327462e00ccSEthan Solomita 1328deceb6cdSHugh Dickins if (write) 1329deceb6cdSHugh Dickins foll_flags |= FOLL_WRITE; 1330deceb6cdSHugh Dickins 1331deceb6cdSHugh Dickins cond_resched(); 13326aab341eSLinus Torvalds while (!(page = follow_page(vma, start, foll_flags))) { 1333a68d2ebcSLinus Torvalds int ret; 133483c54070SNick Piggin ret = handle_mm_fault(mm, vma, start, 1335deceb6cdSHugh Dickins foll_flags & FOLL_WRITE); 133683c54070SNick Piggin if (ret & VM_FAULT_ERROR) { 133783c54070SNick Piggin if (ret & VM_FAULT_OOM) 133883c54070SNick Piggin return i ? i : -ENOMEM; 133983c54070SNick Piggin else if (ret & VM_FAULT_SIGBUS) 134083c54070SNick Piggin return i ? i : -EFAULT; 134183c54070SNick Piggin BUG(); 134283c54070SNick Piggin } 134383c54070SNick Piggin if (ret & VM_FAULT_MAJOR) 134483c54070SNick Piggin tsk->maj_flt++; 134583c54070SNick Piggin else 134683c54070SNick Piggin tsk->min_flt++; 134783c54070SNick Piggin 1348f33ea7f4SNick Piggin /* 134983c54070SNick Piggin * The VM_FAULT_WRITE bit tells us that 135083c54070SNick Piggin * do_wp_page has broken COW when necessary, 135183c54070SNick Piggin * even if maybe_mkwrite decided not to set 135283c54070SNick Piggin * pte_write. We can thus safely do subsequent 135383c54070SNick Piggin * page lookups as if they were reads. 1354f33ea7f4SNick Piggin */ 1355a68d2ebcSLinus Torvalds if (ret & VM_FAULT_WRITE) 1356deceb6cdSHugh Dickins foll_flags &= ~FOLL_WRITE; 1357a68d2ebcSLinus Torvalds 13587f7bbbe5SBenjamin Herrenschmidt cond_resched(); 13591da177e4SLinus Torvalds } 136089f5b7daSLinus Torvalds if (IS_ERR(page)) 136189f5b7daSLinus Torvalds return i ? i : PTR_ERR(page); 13621da177e4SLinus Torvalds if (pages) { 136308ef4729SHugh Dickins pages[i] = page; 136403beb076SJames Bottomley 1365a6f36be3SRussell King flush_anon_page(vma, page, start); 136608ef4729SHugh Dickins flush_dcache_page(page); 13671da177e4SLinus Torvalds } 13681da177e4SLinus Torvalds if (vmas) 13691da177e4SLinus Torvalds vmas[i] = vma; 13701da177e4SLinus Torvalds i++; 13711da177e4SLinus Torvalds start += PAGE_SIZE; 13721da177e4SLinus Torvalds len--; 13731da177e4SLinus Torvalds } while (len && start < vma->vm_end); 13741da177e4SLinus Torvalds } while (len); 13751da177e4SLinus Torvalds return i; 13761da177e4SLinus Torvalds } 1377b291f000SNick Piggin 1378b291f000SNick Piggin int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 1379b291f000SNick Piggin unsigned long start, int len, int write, int force, 1380b291f000SNick Piggin struct page **pages, struct vm_area_struct **vmas) 1381b291f000SNick Piggin { 1382b291f000SNick Piggin int flags = 0; 1383b291f000SNick Piggin 1384b291f000SNick Piggin if (write) 1385b291f000SNick Piggin flags |= GUP_FLAGS_WRITE; 1386b291f000SNick Piggin if (force) 1387b291f000SNick Piggin flags |= GUP_FLAGS_FORCE; 1388b291f000SNick Piggin 1389b291f000SNick Piggin return __get_user_pages(tsk, mm, 1390b291f000SNick Piggin start, len, flags, 1391b291f000SNick Piggin pages, vmas); 1392b291f000SNick Piggin } 1393b291f000SNick Piggin 13941da177e4SLinus Torvalds EXPORT_SYMBOL(get_user_pages); 13951da177e4SLinus Torvalds 1396920c7a5dSHarvey Harrison pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, 1397920c7a5dSHarvey Harrison spinlock_t **ptl) 1398c9cfcddfSLinus Torvalds { 1399c9cfcddfSLinus Torvalds pgd_t * pgd = pgd_offset(mm, addr); 1400c9cfcddfSLinus Torvalds pud_t * pud = pud_alloc(mm, pgd, addr); 1401c9cfcddfSLinus Torvalds if (pud) { 140249c91fb0STrond Myklebust pmd_t * pmd = pmd_alloc(mm, pud, addr); 1403c9cfcddfSLinus Torvalds if (pmd) 1404c9cfcddfSLinus Torvalds return pte_alloc_map_lock(mm, pmd, addr, ptl); 1405c9cfcddfSLinus Torvalds } 1406c9cfcddfSLinus Torvalds return NULL; 1407c9cfcddfSLinus Torvalds } 1408c9cfcddfSLinus Torvalds 14091da177e4SLinus Torvalds /* 1410238f58d8SLinus Torvalds * This is the old fallback for page remapping. 1411238f58d8SLinus Torvalds * 1412238f58d8SLinus Torvalds * For historical reasons, it only allows reserved pages. Only 1413238f58d8SLinus Torvalds * old drivers should use this, and they needed to mark their 1414238f58d8SLinus Torvalds * pages reserved for the old functions anyway. 1415238f58d8SLinus Torvalds */ 1416423bad60SNick Piggin static int insert_page(struct vm_area_struct *vma, unsigned long addr, 1417423bad60SNick Piggin struct page *page, pgprot_t prot) 1418238f58d8SLinus Torvalds { 1419423bad60SNick Piggin struct mm_struct *mm = vma->vm_mm; 1420238f58d8SLinus Torvalds int retval; 1421238f58d8SLinus Torvalds pte_t *pte; 1422238f58d8SLinus Torvalds spinlock_t *ptl; 1423238f58d8SLinus Torvalds 1424238f58d8SLinus Torvalds retval = -EINVAL; 1425a145dd41SLinus Torvalds if (PageAnon(page)) 14265b4e655eSKAMEZAWA Hiroyuki goto out; 1427238f58d8SLinus Torvalds retval = -ENOMEM; 1428238f58d8SLinus Torvalds flush_dcache_page(page); 1429c9cfcddfSLinus Torvalds pte = get_locked_pte(mm, addr, &ptl); 1430238f58d8SLinus Torvalds if (!pte) 14315b4e655eSKAMEZAWA Hiroyuki goto out; 1432238f58d8SLinus Torvalds retval = -EBUSY; 1433238f58d8SLinus Torvalds if (!pte_none(*pte)) 1434238f58d8SLinus Torvalds goto out_unlock; 1435238f58d8SLinus Torvalds 1436238f58d8SLinus Torvalds /* Ok, finally just insert the thing.. */ 1437238f58d8SLinus Torvalds get_page(page); 1438238f58d8SLinus Torvalds inc_mm_counter(mm, file_rss); 1439238f58d8SLinus Torvalds page_add_file_rmap(page); 1440238f58d8SLinus Torvalds set_pte_at(mm, addr, pte, mk_pte(page, prot)); 1441238f58d8SLinus Torvalds 1442238f58d8SLinus Torvalds retval = 0; 14438a9f3ccdSBalbir Singh pte_unmap_unlock(pte, ptl); 14448a9f3ccdSBalbir Singh return retval; 1445238f58d8SLinus Torvalds out_unlock: 1446238f58d8SLinus Torvalds pte_unmap_unlock(pte, ptl); 1447238f58d8SLinus Torvalds out: 1448238f58d8SLinus Torvalds return retval; 1449238f58d8SLinus Torvalds } 1450238f58d8SLinus Torvalds 1451bfa5bf6dSRolf Eike Beer /** 1452bfa5bf6dSRolf Eike Beer * vm_insert_page - insert single page into user vma 1453bfa5bf6dSRolf Eike Beer * @vma: user vma to map to 1454bfa5bf6dSRolf Eike Beer * @addr: target user address of this page 1455bfa5bf6dSRolf Eike Beer * @page: source kernel page 1456bfa5bf6dSRolf Eike Beer * 1457a145dd41SLinus Torvalds * This allows drivers to insert individual pages they've allocated 1458a145dd41SLinus Torvalds * into a user vma. 1459a145dd41SLinus Torvalds * 1460a145dd41SLinus Torvalds * The page has to be a nice clean _individual_ kernel allocation. 1461a145dd41SLinus Torvalds * If you allocate a compound page, you need to have marked it as 1462a145dd41SLinus Torvalds * such (__GFP_COMP), or manually just split the page up yourself 14638dfcc9baSNick Piggin * (see split_page()). 1464a145dd41SLinus Torvalds * 1465a145dd41SLinus Torvalds * NOTE! Traditionally this was done with "remap_pfn_range()" which 1466a145dd41SLinus Torvalds * took an arbitrary page protection parameter. This doesn't allow 1467a145dd41SLinus Torvalds * that. Your vma protection will have to be set up correctly, which 1468a145dd41SLinus Torvalds * means that if you want a shared writable mapping, you'd better 1469a145dd41SLinus Torvalds * ask for a shared writable mapping! 1470a145dd41SLinus Torvalds * 1471a145dd41SLinus Torvalds * The page does not need to be reserved. 1472a145dd41SLinus Torvalds */ 1473423bad60SNick Piggin int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, 1474423bad60SNick Piggin struct page *page) 1475a145dd41SLinus Torvalds { 1476a145dd41SLinus Torvalds if (addr < vma->vm_start || addr >= vma->vm_end) 1477a145dd41SLinus Torvalds return -EFAULT; 1478a145dd41SLinus Torvalds if (!page_count(page)) 1479a145dd41SLinus Torvalds return -EINVAL; 14804d7672b4SLinus Torvalds vma->vm_flags |= VM_INSERTPAGE; 1481423bad60SNick Piggin return insert_page(vma, addr, page, vma->vm_page_prot); 1482a145dd41SLinus Torvalds } 1483e3c3374fSLinus Torvalds EXPORT_SYMBOL(vm_insert_page); 1484a145dd41SLinus Torvalds 1485423bad60SNick Piggin static int insert_pfn(struct vm_area_struct *vma, unsigned long addr, 1486423bad60SNick Piggin unsigned long pfn, pgprot_t prot) 1487423bad60SNick Piggin { 1488423bad60SNick Piggin struct mm_struct *mm = vma->vm_mm; 1489423bad60SNick Piggin int retval; 1490423bad60SNick Piggin pte_t *pte, entry; 1491423bad60SNick Piggin spinlock_t *ptl; 1492423bad60SNick Piggin 1493423bad60SNick Piggin retval = -ENOMEM; 1494423bad60SNick Piggin pte = get_locked_pte(mm, addr, &ptl); 1495423bad60SNick Piggin if (!pte) 1496423bad60SNick Piggin goto out; 1497423bad60SNick Piggin retval = -EBUSY; 1498423bad60SNick Piggin if (!pte_none(*pte)) 1499423bad60SNick Piggin goto out_unlock; 1500423bad60SNick Piggin 1501423bad60SNick Piggin /* Ok, finally just insert the thing.. */ 1502423bad60SNick Piggin entry = pte_mkspecial(pfn_pte(pfn, prot)); 1503423bad60SNick Piggin set_pte_at(mm, addr, pte, entry); 1504423bad60SNick Piggin update_mmu_cache(vma, addr, entry); /* XXX: why not for insert_page? */ 1505423bad60SNick Piggin 1506423bad60SNick Piggin retval = 0; 1507423bad60SNick Piggin out_unlock: 1508423bad60SNick Piggin pte_unmap_unlock(pte, ptl); 1509423bad60SNick Piggin out: 1510423bad60SNick Piggin return retval; 1511423bad60SNick Piggin } 1512423bad60SNick Piggin 1513e0dc0d8fSNick Piggin /** 1514e0dc0d8fSNick Piggin * vm_insert_pfn - insert single pfn into user vma 1515e0dc0d8fSNick Piggin * @vma: user vma to map to 1516e0dc0d8fSNick Piggin * @addr: target user address of this page 1517e0dc0d8fSNick Piggin * @pfn: source kernel pfn 1518e0dc0d8fSNick Piggin * 1519e0dc0d8fSNick Piggin * Similar to vm_inert_page, this allows drivers to insert individual pages 1520e0dc0d8fSNick Piggin * they've allocated into a user vma. Same comments apply. 1521e0dc0d8fSNick Piggin * 1522e0dc0d8fSNick Piggin * This function should only be called from a vm_ops->fault handler, and 1523e0dc0d8fSNick Piggin * in that case the handler should return NULL. 15240d71d10aSNick Piggin * 15250d71d10aSNick Piggin * vma cannot be a COW mapping. 15260d71d10aSNick Piggin * 15270d71d10aSNick Piggin * As this is called only for pages that do not currently exist, we 15280d71d10aSNick Piggin * do not need to flush old virtual caches or the TLB. 1529e0dc0d8fSNick Piggin */ 1530e0dc0d8fSNick Piggin int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, 1531e0dc0d8fSNick Piggin unsigned long pfn) 1532e0dc0d8fSNick Piggin { 1533*2ab64037Svenkatesh.pallipadi@intel.com int ret; 15347e675137SNick Piggin /* 15357e675137SNick Piggin * Technically, architectures with pte_special can avoid all these 15367e675137SNick Piggin * restrictions (same for remap_pfn_range). However we would like 15377e675137SNick Piggin * consistency in testing and feature parity among all, so we should 15387e675137SNick Piggin * try to keep these invariants in place for everybody. 15397e675137SNick Piggin */ 1540b379d790SJared Hulbert BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); 1541b379d790SJared Hulbert BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == 1542b379d790SJared Hulbert (VM_PFNMAP|VM_MIXEDMAP)); 1543b379d790SJared Hulbert BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); 1544b379d790SJared Hulbert BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn)); 1545e0dc0d8fSNick Piggin 1546423bad60SNick Piggin if (addr < vma->vm_start || addr >= vma->vm_end) 1547423bad60SNick Piggin return -EFAULT; 1548*2ab64037Svenkatesh.pallipadi@intel.com if (track_pfn_vma_new(vma, vma->vm_page_prot, pfn, PAGE_SIZE)) 1549*2ab64037Svenkatesh.pallipadi@intel.com return -EINVAL; 1550*2ab64037Svenkatesh.pallipadi@intel.com 1551*2ab64037Svenkatesh.pallipadi@intel.com ret = insert_pfn(vma, addr, pfn, vma->vm_page_prot); 1552*2ab64037Svenkatesh.pallipadi@intel.com 1553*2ab64037Svenkatesh.pallipadi@intel.com if (ret) 1554*2ab64037Svenkatesh.pallipadi@intel.com untrack_pfn_vma(vma, pfn, PAGE_SIZE); 1555*2ab64037Svenkatesh.pallipadi@intel.com 1556*2ab64037Svenkatesh.pallipadi@intel.com return ret; 1557e0dc0d8fSNick Piggin } 1558e0dc0d8fSNick Piggin EXPORT_SYMBOL(vm_insert_pfn); 1559e0dc0d8fSNick Piggin 1560423bad60SNick Piggin int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, 1561423bad60SNick Piggin unsigned long pfn) 1562423bad60SNick Piggin { 1563423bad60SNick Piggin BUG_ON(!(vma->vm_flags & VM_MIXEDMAP)); 1564423bad60SNick Piggin 1565423bad60SNick Piggin if (addr < vma->vm_start || addr >= vma->vm_end) 1566423bad60SNick Piggin return -EFAULT; 1567423bad60SNick Piggin 1568423bad60SNick Piggin /* 1569423bad60SNick Piggin * If we don't have pte special, then we have to use the pfn_valid() 1570423bad60SNick Piggin * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must* 1571423bad60SNick Piggin * refcount the page if pfn_valid is true (hence insert_page rather 1572423bad60SNick Piggin * than insert_pfn). 1573423bad60SNick Piggin */ 1574423bad60SNick Piggin if (!HAVE_PTE_SPECIAL && pfn_valid(pfn)) { 1575423bad60SNick Piggin struct page *page; 1576423bad60SNick Piggin 1577423bad60SNick Piggin page = pfn_to_page(pfn); 1578423bad60SNick Piggin return insert_page(vma, addr, page, vma->vm_page_prot); 1579423bad60SNick Piggin } 1580423bad60SNick Piggin return insert_pfn(vma, addr, pfn, vma->vm_page_prot); 1581423bad60SNick Piggin } 1582423bad60SNick Piggin EXPORT_SYMBOL(vm_insert_mixed); 1583423bad60SNick Piggin 1584a145dd41SLinus Torvalds /* 15851da177e4SLinus Torvalds * maps a range of physical memory into the requested pages. the old 15861da177e4SLinus Torvalds * mappings are removed. any references to nonexistent pages results 15871da177e4SLinus Torvalds * in null mappings (currently treated as "copy-on-access") 15881da177e4SLinus Torvalds */ 15891da177e4SLinus Torvalds static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, 15901da177e4SLinus Torvalds unsigned long addr, unsigned long end, 15911da177e4SLinus Torvalds unsigned long pfn, pgprot_t prot) 15921da177e4SLinus Torvalds { 15931da177e4SLinus Torvalds pte_t *pte; 1594c74df32cSHugh Dickins spinlock_t *ptl; 15951da177e4SLinus Torvalds 1596c74df32cSHugh Dickins pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); 15971da177e4SLinus Torvalds if (!pte) 15981da177e4SLinus Torvalds return -ENOMEM; 15996606c3e0SZachary Amsden arch_enter_lazy_mmu_mode(); 16001da177e4SLinus Torvalds do { 16011da177e4SLinus Torvalds BUG_ON(!pte_none(*pte)); 16027e675137SNick Piggin set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot))); 16031da177e4SLinus Torvalds pfn++; 16041da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 16056606c3e0SZachary Amsden arch_leave_lazy_mmu_mode(); 1606c74df32cSHugh Dickins pte_unmap_unlock(pte - 1, ptl); 16071da177e4SLinus Torvalds return 0; 16081da177e4SLinus Torvalds } 16091da177e4SLinus Torvalds 16101da177e4SLinus Torvalds static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, 16111da177e4SLinus Torvalds unsigned long addr, unsigned long end, 16121da177e4SLinus Torvalds unsigned long pfn, pgprot_t prot) 16131da177e4SLinus Torvalds { 16141da177e4SLinus Torvalds pmd_t *pmd; 16151da177e4SLinus Torvalds unsigned long next; 16161da177e4SLinus Torvalds 16171da177e4SLinus Torvalds pfn -= addr >> PAGE_SHIFT; 16181da177e4SLinus Torvalds pmd = pmd_alloc(mm, pud, addr); 16191da177e4SLinus Torvalds if (!pmd) 16201da177e4SLinus Torvalds return -ENOMEM; 16211da177e4SLinus Torvalds do { 16221da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 16231da177e4SLinus Torvalds if (remap_pte_range(mm, pmd, addr, next, 16241da177e4SLinus Torvalds pfn + (addr >> PAGE_SHIFT), prot)) 16251da177e4SLinus Torvalds return -ENOMEM; 16261da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 16271da177e4SLinus Torvalds return 0; 16281da177e4SLinus Torvalds } 16291da177e4SLinus Torvalds 16301da177e4SLinus Torvalds static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd, 16311da177e4SLinus Torvalds unsigned long addr, unsigned long end, 16321da177e4SLinus Torvalds unsigned long pfn, pgprot_t prot) 16331da177e4SLinus Torvalds { 16341da177e4SLinus Torvalds pud_t *pud; 16351da177e4SLinus Torvalds unsigned long next; 16361da177e4SLinus Torvalds 16371da177e4SLinus Torvalds pfn -= addr >> PAGE_SHIFT; 16381da177e4SLinus Torvalds pud = pud_alloc(mm, pgd, addr); 16391da177e4SLinus Torvalds if (!pud) 16401da177e4SLinus Torvalds return -ENOMEM; 16411da177e4SLinus Torvalds do { 16421da177e4SLinus Torvalds next = pud_addr_end(addr, end); 16431da177e4SLinus Torvalds if (remap_pmd_range(mm, pud, addr, next, 16441da177e4SLinus Torvalds pfn + (addr >> PAGE_SHIFT), prot)) 16451da177e4SLinus Torvalds return -ENOMEM; 16461da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 16471da177e4SLinus Torvalds return 0; 16481da177e4SLinus Torvalds } 16491da177e4SLinus Torvalds 1650bfa5bf6dSRolf Eike Beer /** 1651bfa5bf6dSRolf Eike Beer * remap_pfn_range - remap kernel memory to userspace 1652bfa5bf6dSRolf Eike Beer * @vma: user vma to map to 1653bfa5bf6dSRolf Eike Beer * @addr: target user address to start at 1654bfa5bf6dSRolf Eike Beer * @pfn: physical address of kernel memory 1655bfa5bf6dSRolf Eike Beer * @size: size of map area 1656bfa5bf6dSRolf Eike Beer * @prot: page protection flags for this mapping 1657bfa5bf6dSRolf Eike Beer * 1658bfa5bf6dSRolf Eike Beer * Note: this is only safe if the mm semaphore is held when called. 1659bfa5bf6dSRolf Eike Beer */ 16601da177e4SLinus Torvalds int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, 16611da177e4SLinus Torvalds unsigned long pfn, unsigned long size, pgprot_t prot) 16621da177e4SLinus Torvalds { 16631da177e4SLinus Torvalds pgd_t *pgd; 16641da177e4SLinus Torvalds unsigned long next; 16652d15cab8SHugh Dickins unsigned long end = addr + PAGE_ALIGN(size); 16661da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 16671da177e4SLinus Torvalds int err; 16681da177e4SLinus Torvalds 16691da177e4SLinus Torvalds /* 16701da177e4SLinus Torvalds * Physically remapped pages are special. Tell the 16711da177e4SLinus Torvalds * rest of the world about it: 16721da177e4SLinus Torvalds * VM_IO tells people not to look at these pages 16731da177e4SLinus Torvalds * (accesses can have side effects). 16740b14c179SHugh Dickins * VM_RESERVED is specified all over the place, because 16750b14c179SHugh Dickins * in 2.4 it kept swapout's vma scan off this vma; but 16760b14c179SHugh Dickins * in 2.6 the LRU scan won't even find its pages, so this 16770b14c179SHugh Dickins * flag means no more than count its pages in reserved_vm, 16780b14c179SHugh Dickins * and omit it from core dump, even when VM_IO turned off. 16796aab341eSLinus Torvalds * VM_PFNMAP tells the core MM that the base pages are just 16806aab341eSLinus Torvalds * raw PFN mappings, and do not have a "struct page" associated 16816aab341eSLinus Torvalds * with them. 1682fb155c16SLinus Torvalds * 1683fb155c16SLinus Torvalds * There's a horrible special case to handle copy-on-write 1684fb155c16SLinus Torvalds * behaviour that some programs depend on. We mark the "original" 1685fb155c16SLinus Torvalds * un-COW'ed pages by matching them up with "vma->vm_pgoff". 16861da177e4SLinus Torvalds */ 16873c8bb73aSvenkatesh.pallipadi@intel.com if (addr == vma->vm_start && end == vma->vm_end) 16886aab341eSLinus Torvalds vma->vm_pgoff = pfn; 16893c8bb73aSvenkatesh.pallipadi@intel.com else if (is_cow_mapping(vma->vm_flags)) 16903c8bb73aSvenkatesh.pallipadi@intel.com return -EINVAL; 1691fb155c16SLinus Torvalds 1692fb155c16SLinus Torvalds vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; 16931da177e4SLinus Torvalds 1694*2ab64037Svenkatesh.pallipadi@intel.com err = track_pfn_vma_new(vma, prot, pfn, PAGE_ALIGN(size)); 1695*2ab64037Svenkatesh.pallipadi@intel.com if (err) 1696*2ab64037Svenkatesh.pallipadi@intel.com return -EINVAL; 1697*2ab64037Svenkatesh.pallipadi@intel.com 16981da177e4SLinus Torvalds BUG_ON(addr >= end); 16991da177e4SLinus Torvalds pfn -= addr >> PAGE_SHIFT; 17001da177e4SLinus Torvalds pgd = pgd_offset(mm, addr); 17011da177e4SLinus Torvalds flush_cache_range(vma, addr, end); 17021da177e4SLinus Torvalds do { 17031da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 17041da177e4SLinus Torvalds err = remap_pud_range(mm, pgd, addr, next, 17051da177e4SLinus Torvalds pfn + (addr >> PAGE_SHIFT), prot); 17061da177e4SLinus Torvalds if (err) 17071da177e4SLinus Torvalds break; 17081da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 1709*2ab64037Svenkatesh.pallipadi@intel.com 1710*2ab64037Svenkatesh.pallipadi@intel.com if (err) 1711*2ab64037Svenkatesh.pallipadi@intel.com untrack_pfn_vma(vma, pfn, PAGE_ALIGN(size)); 1712*2ab64037Svenkatesh.pallipadi@intel.com 17131da177e4SLinus Torvalds return err; 17141da177e4SLinus Torvalds } 17151da177e4SLinus Torvalds EXPORT_SYMBOL(remap_pfn_range); 17161da177e4SLinus Torvalds 1717aee16b3cSJeremy Fitzhardinge static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, 1718aee16b3cSJeremy Fitzhardinge unsigned long addr, unsigned long end, 1719aee16b3cSJeremy Fitzhardinge pte_fn_t fn, void *data) 1720aee16b3cSJeremy Fitzhardinge { 1721aee16b3cSJeremy Fitzhardinge pte_t *pte; 1722aee16b3cSJeremy Fitzhardinge int err; 17232f569afdSMartin Schwidefsky pgtable_t token; 172494909914SBorislav Petkov spinlock_t *uninitialized_var(ptl); 1725aee16b3cSJeremy Fitzhardinge 1726aee16b3cSJeremy Fitzhardinge pte = (mm == &init_mm) ? 1727aee16b3cSJeremy Fitzhardinge pte_alloc_kernel(pmd, addr) : 1728aee16b3cSJeremy Fitzhardinge pte_alloc_map_lock(mm, pmd, addr, &ptl); 1729aee16b3cSJeremy Fitzhardinge if (!pte) 1730aee16b3cSJeremy Fitzhardinge return -ENOMEM; 1731aee16b3cSJeremy Fitzhardinge 1732aee16b3cSJeremy Fitzhardinge BUG_ON(pmd_huge(*pmd)); 1733aee16b3cSJeremy Fitzhardinge 17342f569afdSMartin Schwidefsky token = pmd_pgtable(*pmd); 1735aee16b3cSJeremy Fitzhardinge 1736aee16b3cSJeremy Fitzhardinge do { 17372f569afdSMartin Schwidefsky err = fn(pte, token, addr, data); 1738aee16b3cSJeremy Fitzhardinge if (err) 1739aee16b3cSJeremy Fitzhardinge break; 1740aee16b3cSJeremy Fitzhardinge } while (pte++, addr += PAGE_SIZE, addr != end); 1741aee16b3cSJeremy Fitzhardinge 1742aee16b3cSJeremy Fitzhardinge if (mm != &init_mm) 1743aee16b3cSJeremy Fitzhardinge pte_unmap_unlock(pte-1, ptl); 1744aee16b3cSJeremy Fitzhardinge return err; 1745aee16b3cSJeremy Fitzhardinge } 1746aee16b3cSJeremy Fitzhardinge 1747aee16b3cSJeremy Fitzhardinge static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud, 1748aee16b3cSJeremy Fitzhardinge unsigned long addr, unsigned long end, 1749aee16b3cSJeremy Fitzhardinge pte_fn_t fn, void *data) 1750aee16b3cSJeremy Fitzhardinge { 1751aee16b3cSJeremy Fitzhardinge pmd_t *pmd; 1752aee16b3cSJeremy Fitzhardinge unsigned long next; 1753aee16b3cSJeremy Fitzhardinge int err; 1754aee16b3cSJeremy Fitzhardinge 1755ceb86879SAndi Kleen BUG_ON(pud_huge(*pud)); 1756ceb86879SAndi Kleen 1757aee16b3cSJeremy Fitzhardinge pmd = pmd_alloc(mm, pud, addr); 1758aee16b3cSJeremy Fitzhardinge if (!pmd) 1759aee16b3cSJeremy Fitzhardinge return -ENOMEM; 1760aee16b3cSJeremy Fitzhardinge do { 1761aee16b3cSJeremy Fitzhardinge next = pmd_addr_end(addr, end); 1762aee16b3cSJeremy Fitzhardinge err = apply_to_pte_range(mm, pmd, addr, next, fn, data); 1763aee16b3cSJeremy Fitzhardinge if (err) 1764aee16b3cSJeremy Fitzhardinge break; 1765aee16b3cSJeremy Fitzhardinge } while (pmd++, addr = next, addr != end); 1766aee16b3cSJeremy Fitzhardinge return err; 1767aee16b3cSJeremy Fitzhardinge } 1768aee16b3cSJeremy Fitzhardinge 1769aee16b3cSJeremy Fitzhardinge static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd, 1770aee16b3cSJeremy Fitzhardinge unsigned long addr, unsigned long end, 1771aee16b3cSJeremy Fitzhardinge pte_fn_t fn, void *data) 1772aee16b3cSJeremy Fitzhardinge { 1773aee16b3cSJeremy Fitzhardinge pud_t *pud; 1774aee16b3cSJeremy Fitzhardinge unsigned long next; 1775aee16b3cSJeremy Fitzhardinge int err; 1776aee16b3cSJeremy Fitzhardinge 1777aee16b3cSJeremy Fitzhardinge pud = pud_alloc(mm, pgd, addr); 1778aee16b3cSJeremy Fitzhardinge if (!pud) 1779aee16b3cSJeremy Fitzhardinge return -ENOMEM; 1780aee16b3cSJeremy Fitzhardinge do { 1781aee16b3cSJeremy Fitzhardinge next = pud_addr_end(addr, end); 1782aee16b3cSJeremy Fitzhardinge err = apply_to_pmd_range(mm, pud, addr, next, fn, data); 1783aee16b3cSJeremy Fitzhardinge if (err) 1784aee16b3cSJeremy Fitzhardinge break; 1785aee16b3cSJeremy Fitzhardinge } while (pud++, addr = next, addr != end); 1786aee16b3cSJeremy Fitzhardinge return err; 1787aee16b3cSJeremy Fitzhardinge } 1788aee16b3cSJeremy Fitzhardinge 1789aee16b3cSJeremy Fitzhardinge /* 1790aee16b3cSJeremy Fitzhardinge * Scan a region of virtual memory, filling in page tables as necessary 1791aee16b3cSJeremy Fitzhardinge * and calling a provided function on each leaf page table. 1792aee16b3cSJeremy Fitzhardinge */ 1793aee16b3cSJeremy Fitzhardinge int apply_to_page_range(struct mm_struct *mm, unsigned long addr, 1794aee16b3cSJeremy Fitzhardinge unsigned long size, pte_fn_t fn, void *data) 1795aee16b3cSJeremy Fitzhardinge { 1796aee16b3cSJeremy Fitzhardinge pgd_t *pgd; 1797aee16b3cSJeremy Fitzhardinge unsigned long next; 1798cddb8a5cSAndrea Arcangeli unsigned long start = addr, end = addr + size; 1799aee16b3cSJeremy Fitzhardinge int err; 1800aee16b3cSJeremy Fitzhardinge 1801aee16b3cSJeremy Fitzhardinge BUG_ON(addr >= end); 1802cddb8a5cSAndrea Arcangeli mmu_notifier_invalidate_range_start(mm, start, end); 1803aee16b3cSJeremy Fitzhardinge pgd = pgd_offset(mm, addr); 1804aee16b3cSJeremy Fitzhardinge do { 1805aee16b3cSJeremy Fitzhardinge next = pgd_addr_end(addr, end); 1806aee16b3cSJeremy Fitzhardinge err = apply_to_pud_range(mm, pgd, addr, next, fn, data); 1807aee16b3cSJeremy Fitzhardinge if (err) 1808aee16b3cSJeremy Fitzhardinge break; 1809aee16b3cSJeremy Fitzhardinge } while (pgd++, addr = next, addr != end); 1810cddb8a5cSAndrea Arcangeli mmu_notifier_invalidate_range_end(mm, start, end); 1811aee16b3cSJeremy Fitzhardinge return err; 1812aee16b3cSJeremy Fitzhardinge } 1813aee16b3cSJeremy Fitzhardinge EXPORT_SYMBOL_GPL(apply_to_page_range); 1814aee16b3cSJeremy Fitzhardinge 18151da177e4SLinus Torvalds /* 18168f4e2101SHugh Dickins * handle_pte_fault chooses page fault handler according to an entry 18178f4e2101SHugh Dickins * which was read non-atomically. Before making any commitment, on 18188f4e2101SHugh Dickins * those architectures or configurations (e.g. i386 with PAE) which 18198f4e2101SHugh Dickins * might give a mix of unmatched parts, do_swap_page and do_file_page 18208f4e2101SHugh Dickins * must check under lock before unmapping the pte and proceeding 18218f4e2101SHugh Dickins * (but do_wp_page is only called after already making such a check; 18228f4e2101SHugh Dickins * and do_anonymous_page and do_no_page can safely check later on). 18238f4e2101SHugh Dickins */ 18244c21e2f2SHugh Dickins static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd, 18258f4e2101SHugh Dickins pte_t *page_table, pte_t orig_pte) 18268f4e2101SHugh Dickins { 18278f4e2101SHugh Dickins int same = 1; 18288f4e2101SHugh Dickins #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) 18298f4e2101SHugh Dickins if (sizeof(pte_t) > sizeof(unsigned long)) { 18304c21e2f2SHugh Dickins spinlock_t *ptl = pte_lockptr(mm, pmd); 18314c21e2f2SHugh Dickins spin_lock(ptl); 18328f4e2101SHugh Dickins same = pte_same(*page_table, orig_pte); 18334c21e2f2SHugh Dickins spin_unlock(ptl); 18348f4e2101SHugh Dickins } 18358f4e2101SHugh Dickins #endif 18368f4e2101SHugh Dickins pte_unmap(page_table); 18378f4e2101SHugh Dickins return same; 18388f4e2101SHugh Dickins } 18398f4e2101SHugh Dickins 18408f4e2101SHugh Dickins /* 18411da177e4SLinus Torvalds * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when 18421da177e4SLinus Torvalds * servicing faults for write access. In the normal case, do always want 18431da177e4SLinus Torvalds * pte_mkwrite. But get_user_pages can cause write faults for mappings 18441da177e4SLinus Torvalds * that do not have writing enabled, when used by access_process_vm. 18451da177e4SLinus Torvalds */ 18461da177e4SLinus Torvalds static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) 18471da177e4SLinus Torvalds { 18481da177e4SLinus Torvalds if (likely(vma->vm_flags & VM_WRITE)) 18491da177e4SLinus Torvalds pte = pte_mkwrite(pte); 18501da177e4SLinus Torvalds return pte; 18511da177e4SLinus Torvalds } 18521da177e4SLinus Torvalds 18539de455b2SAtsushi Nemoto static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma) 18546aab341eSLinus Torvalds { 18556aab341eSLinus Torvalds /* 18566aab341eSLinus Torvalds * If the source page was a PFN mapping, we don't have 18576aab341eSLinus Torvalds * a "struct page" for it. We do a best-effort copy by 18586aab341eSLinus Torvalds * just copying from the original user address. If that 18596aab341eSLinus Torvalds * fails, we just zero-fill it. Live with it. 18606aab341eSLinus Torvalds */ 18616aab341eSLinus Torvalds if (unlikely(!src)) { 18626aab341eSLinus Torvalds void *kaddr = kmap_atomic(dst, KM_USER0); 18635d2a2dbbSLinus Torvalds void __user *uaddr = (void __user *)(va & PAGE_MASK); 18645d2a2dbbSLinus Torvalds 18655d2a2dbbSLinus Torvalds /* 18665d2a2dbbSLinus Torvalds * This really shouldn't fail, because the page is there 18675d2a2dbbSLinus Torvalds * in the page tables. But it might just be unreadable, 18685d2a2dbbSLinus Torvalds * in which case we just give up and fill the result with 18695d2a2dbbSLinus Torvalds * zeroes. 18705d2a2dbbSLinus Torvalds */ 18715d2a2dbbSLinus Torvalds if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) 18726aab341eSLinus Torvalds memset(kaddr, 0, PAGE_SIZE); 18736aab341eSLinus Torvalds kunmap_atomic(kaddr, KM_USER0); 1874c4ec7b0dSDmitriy Monakhov flush_dcache_page(dst); 18750ed361deSNick Piggin } else 18769de455b2SAtsushi Nemoto copy_user_highpage(dst, src, va, vma); 18776aab341eSLinus Torvalds } 18786aab341eSLinus Torvalds 18791da177e4SLinus Torvalds /* 18801da177e4SLinus Torvalds * This routine handles present pages, when users try to write 18811da177e4SLinus Torvalds * to a shared page. It is done by copying the page to a new address 18821da177e4SLinus Torvalds * and decrementing the shared-page counter for the old page. 18831da177e4SLinus Torvalds * 18841da177e4SLinus Torvalds * Note that this routine assumes that the protection checks have been 18851da177e4SLinus Torvalds * done by the caller (the low-level page fault routine in most cases). 18861da177e4SLinus Torvalds * Thus we can safely just mark it writable once we've done any necessary 18871da177e4SLinus Torvalds * COW. 18881da177e4SLinus Torvalds * 18891da177e4SLinus Torvalds * We also mark the page dirty at this point even though the page will 18901da177e4SLinus Torvalds * change only once the write actually happens. This avoids a few races, 18911da177e4SLinus Torvalds * and potentially makes it more efficient. 18921da177e4SLinus Torvalds * 18938f4e2101SHugh Dickins * We enter with non-exclusive mmap_sem (to exclude vma changes, 18948f4e2101SHugh Dickins * but allow concurrent faults), with pte both mapped and locked. 18958f4e2101SHugh Dickins * We return with mmap_sem still held, but pte unmapped and unlocked. 18961da177e4SLinus Torvalds */ 18971da177e4SLinus Torvalds static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, 189865500d23SHugh Dickins unsigned long address, pte_t *page_table, pmd_t *pmd, 18998f4e2101SHugh Dickins spinlock_t *ptl, pte_t orig_pte) 19001da177e4SLinus Torvalds { 1901e5bbe4dfSHugh Dickins struct page *old_page, *new_page; 19021da177e4SLinus Torvalds pte_t entry; 190383c54070SNick Piggin int reuse = 0, ret = 0; 1904a200ee18SPeter Zijlstra int page_mkwrite = 0; 1905d08b3851SPeter Zijlstra struct page *dirty_page = NULL; 19061da177e4SLinus Torvalds 19076aab341eSLinus Torvalds old_page = vm_normal_page(vma, address, orig_pte); 1908251b97f5SPeter Zijlstra if (!old_page) { 1909251b97f5SPeter Zijlstra /* 1910251b97f5SPeter Zijlstra * VM_MIXEDMAP !pfn_valid() case 1911251b97f5SPeter Zijlstra * 1912251b97f5SPeter Zijlstra * We should not cow pages in a shared writeable mapping. 1913251b97f5SPeter Zijlstra * Just mark the pages writable as we can't do any dirty 1914251b97f5SPeter Zijlstra * accounting on raw pfn maps. 1915251b97f5SPeter Zijlstra */ 1916251b97f5SPeter Zijlstra if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) == 1917251b97f5SPeter Zijlstra (VM_WRITE|VM_SHARED)) 1918251b97f5SPeter Zijlstra goto reuse; 1919920fc356SHugh Dickins goto gotten; 1920251b97f5SPeter Zijlstra } 19211da177e4SLinus Torvalds 1922d08b3851SPeter Zijlstra /* 1923ee6a6457SPeter Zijlstra * Take out anonymous pages first, anonymous shared vmas are 1924ee6a6457SPeter Zijlstra * not dirty accountable. 1925d08b3851SPeter Zijlstra */ 1926ee6a6457SPeter Zijlstra if (PageAnon(old_page)) { 1927529ae9aaSNick Piggin if (trylock_page(old_page)) { 1928ee6a6457SPeter Zijlstra reuse = can_share_swap_page(old_page); 1929ee6a6457SPeter Zijlstra unlock_page(old_page); 1930ee6a6457SPeter Zijlstra } 1931ee6a6457SPeter Zijlstra } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == 1932d08b3851SPeter Zijlstra (VM_WRITE|VM_SHARED))) { 1933ee6a6457SPeter Zijlstra /* 1934ee6a6457SPeter Zijlstra * Only catch write-faults on shared writable pages, 1935ee6a6457SPeter Zijlstra * read-only shared pages can get COWed by 1936ee6a6457SPeter Zijlstra * get_user_pages(.write=1, .force=1). 1937ee6a6457SPeter Zijlstra */ 19389637a5efSDavid Howells if (vma->vm_ops && vma->vm_ops->page_mkwrite) { 19399637a5efSDavid Howells /* 19409637a5efSDavid Howells * Notify the address space that the page is about to 19419637a5efSDavid Howells * become writable so that it can prohibit this or wait 19429637a5efSDavid Howells * for the page to get into an appropriate state. 19439637a5efSDavid Howells * 19449637a5efSDavid Howells * We do this without the lock held, so that it can 19459637a5efSDavid Howells * sleep if it needs to. 19469637a5efSDavid Howells */ 19479637a5efSDavid Howells page_cache_get(old_page); 19489637a5efSDavid Howells pte_unmap_unlock(page_table, ptl); 19499637a5efSDavid Howells 19509637a5efSDavid Howells if (vma->vm_ops->page_mkwrite(vma, old_page) < 0) 19519637a5efSDavid Howells goto unwritable_page; 19529637a5efSDavid Howells 19539637a5efSDavid Howells /* 19549637a5efSDavid Howells * Since we dropped the lock we need to revalidate 19559637a5efSDavid Howells * the PTE as someone else may have changed it. If 19569637a5efSDavid Howells * they did, we just return, as we can count on the 19579637a5efSDavid Howells * MMU to tell us if they didn't also make it writable. 19589637a5efSDavid Howells */ 19599637a5efSDavid Howells page_table = pte_offset_map_lock(mm, pmd, address, 19609637a5efSDavid Howells &ptl); 1961c3704cebSHugh Dickins page_cache_release(old_page); 19629637a5efSDavid Howells if (!pte_same(*page_table, orig_pte)) 19639637a5efSDavid Howells goto unlock; 1964a200ee18SPeter Zijlstra 1965a200ee18SPeter Zijlstra page_mkwrite = 1; 19669637a5efSDavid Howells } 1967d08b3851SPeter Zijlstra dirty_page = old_page; 1968d08b3851SPeter Zijlstra get_page(dirty_page); 19699637a5efSDavid Howells reuse = 1; 19709637a5efSDavid Howells } 19719637a5efSDavid Howells 19721da177e4SLinus Torvalds if (reuse) { 1973251b97f5SPeter Zijlstra reuse: 1974eca35133SBen Collins flush_cache_page(vma, address, pte_pfn(orig_pte)); 197565500d23SHugh Dickins entry = pte_mkyoung(orig_pte); 197665500d23SHugh Dickins entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1977954ffcb3SKAMEZAWA Hiroyuki if (ptep_set_access_flags(vma, address, page_table, entry,1)) 19781da177e4SLinus Torvalds update_mmu_cache(vma, address, entry); 197965500d23SHugh Dickins ret |= VM_FAULT_WRITE; 198065500d23SHugh Dickins goto unlock; 19811da177e4SLinus Torvalds } 19821da177e4SLinus Torvalds 19831da177e4SLinus Torvalds /* 19841da177e4SLinus Torvalds * Ok, we need to copy. Oh, well.. 19851da177e4SLinus Torvalds */ 19861da177e4SLinus Torvalds page_cache_get(old_page); 1987920fc356SHugh Dickins gotten: 19888f4e2101SHugh Dickins pte_unmap_unlock(page_table, ptl); 19891da177e4SLinus Torvalds 19901da177e4SLinus Torvalds if (unlikely(anon_vma_prepare(vma))) 199165500d23SHugh Dickins goto oom; 1992557ed1faSNick Piggin VM_BUG_ON(old_page == ZERO_PAGE(0)); 1993769848c0SMel Gorman new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); 19941da177e4SLinus Torvalds if (!new_page) 199565500d23SHugh Dickins goto oom; 1996b291f000SNick Piggin /* 1997b291f000SNick Piggin * Don't let another task, with possibly unlocked vma, 1998b291f000SNick Piggin * keep the mlocked page. 1999b291f000SNick Piggin */ 2000b291f000SNick Piggin if (vma->vm_flags & VM_LOCKED) { 2001b291f000SNick Piggin lock_page(old_page); /* for LRU manipulation */ 2002b291f000SNick Piggin clear_page_mlock(old_page); 2003b291f000SNick Piggin unlock_page(old_page); 2004b291f000SNick Piggin } 20059de455b2SAtsushi Nemoto cow_user_page(new_page, old_page, address, vma); 20060ed361deSNick Piggin __SetPageUptodate(new_page); 200765500d23SHugh Dickins 2008e1a1cd59SBalbir Singh if (mem_cgroup_charge(new_page, mm, GFP_KERNEL)) 20098a9f3ccdSBalbir Singh goto oom_free_new; 20108a9f3ccdSBalbir Singh 20111da177e4SLinus Torvalds /* 20121da177e4SLinus Torvalds * Re-check the pte - we dropped the lock 20131da177e4SLinus Torvalds */ 20148f4e2101SHugh Dickins page_table = pte_offset_map_lock(mm, pmd, address, &ptl); 201565500d23SHugh Dickins if (likely(pte_same(*page_table, orig_pte))) { 2016920fc356SHugh Dickins if (old_page) { 20174294621fSHugh Dickins if (!PageAnon(old_page)) { 20184294621fSHugh Dickins dec_mm_counter(mm, file_rss); 2019920fc356SHugh Dickins inc_mm_counter(mm, anon_rss); 20204294621fSHugh Dickins } 2021920fc356SHugh Dickins } else 2022920fc356SHugh Dickins inc_mm_counter(mm, anon_rss); 2023eca35133SBen Collins flush_cache_page(vma, address, pte_pfn(orig_pte)); 202465500d23SHugh Dickins entry = mk_pte(new_page, vma->vm_page_prot); 202565500d23SHugh Dickins entry = maybe_mkwrite(pte_mkdirty(entry), vma); 20264ce072f1SSiddha, Suresh B /* 20274ce072f1SSiddha, Suresh B * Clear the pte entry and flush it first, before updating the 20284ce072f1SSiddha, Suresh B * pte with the new entry. This will avoid a race condition 20294ce072f1SSiddha, Suresh B * seen in the presence of one thread doing SMC and another 20304ce072f1SSiddha, Suresh B * thread doing COW. 20314ce072f1SSiddha, Suresh B */ 2032cddb8a5cSAndrea Arcangeli ptep_clear_flush_notify(vma, address, page_table); 2033b2e18538SRik van Riel SetPageSwapBacked(new_page); 203464d6519dSLee Schermerhorn lru_cache_add_active_or_unevictable(new_page, vma); 20359617d95eSNick Piggin page_add_new_anon_rmap(new_page, vma, address); 20361da177e4SLinus Torvalds 203764d6519dSLee Schermerhorn //TODO: is this safe? do_anonymous_page() does it this way. 203864d6519dSLee Schermerhorn set_pte_at(mm, address, page_table, entry); 203964d6519dSLee Schermerhorn update_mmu_cache(vma, address, entry); 2040945754a1SNick Piggin if (old_page) { 2041945754a1SNick Piggin /* 2042945754a1SNick Piggin * Only after switching the pte to the new page may 2043945754a1SNick Piggin * we remove the mapcount here. Otherwise another 2044945754a1SNick Piggin * process may come and find the rmap count decremented 2045945754a1SNick Piggin * before the pte is switched to the new page, and 2046945754a1SNick Piggin * "reuse" the old page writing into it while our pte 2047945754a1SNick Piggin * here still points into it and can be read by other 2048945754a1SNick Piggin * threads. 2049945754a1SNick Piggin * 2050945754a1SNick Piggin * The critical issue is to order this 2051945754a1SNick Piggin * page_remove_rmap with the ptp_clear_flush above. 2052945754a1SNick Piggin * Those stores are ordered by (if nothing else,) 2053945754a1SNick Piggin * the barrier present in the atomic_add_negative 2054945754a1SNick Piggin * in page_remove_rmap. 2055945754a1SNick Piggin * 2056945754a1SNick Piggin * Then the TLB flush in ptep_clear_flush ensures that 2057945754a1SNick Piggin * no process can access the old page before the 2058945754a1SNick Piggin * decremented mapcount is visible. And the old page 2059945754a1SNick Piggin * cannot be reused until after the decremented 2060945754a1SNick Piggin * mapcount is visible. So transitively, TLBs to 2061945754a1SNick Piggin * old page will be flushed before it can be reused. 2062945754a1SNick Piggin */ 2063945754a1SNick Piggin page_remove_rmap(old_page, vma); 2064945754a1SNick Piggin } 2065945754a1SNick Piggin 20661da177e4SLinus Torvalds /* Free the old page.. */ 20671da177e4SLinus Torvalds new_page = old_page; 2068f33ea7f4SNick Piggin ret |= VM_FAULT_WRITE; 20698a9f3ccdSBalbir Singh } else 20708a9f3ccdSBalbir Singh mem_cgroup_uncharge_page(new_page); 20718a9f3ccdSBalbir Singh 2072920fc356SHugh Dickins if (new_page) 20731da177e4SLinus Torvalds page_cache_release(new_page); 2074920fc356SHugh Dickins if (old_page) 20751da177e4SLinus Torvalds page_cache_release(old_page); 207665500d23SHugh Dickins unlock: 20778f4e2101SHugh Dickins pte_unmap_unlock(page_table, ptl); 2078d08b3851SPeter Zijlstra if (dirty_page) { 20798f7b3d15SAnton Salikhmetov if (vma->vm_file) 20808f7b3d15SAnton Salikhmetov file_update_time(vma->vm_file); 20818f7b3d15SAnton Salikhmetov 208279352894SNick Piggin /* 208379352894SNick Piggin * Yes, Virginia, this is actually required to prevent a race 208479352894SNick Piggin * with clear_page_dirty_for_io() from clearing the page dirty 208579352894SNick Piggin * bit after it clear all dirty ptes, but before a racing 208679352894SNick Piggin * do_wp_page installs a dirty pte. 208779352894SNick Piggin * 208879352894SNick Piggin * do_no_page is protected similarly. 208979352894SNick Piggin */ 209079352894SNick Piggin wait_on_page_locked(dirty_page); 2091a200ee18SPeter Zijlstra set_page_dirty_balance(dirty_page, page_mkwrite); 2092d08b3851SPeter Zijlstra put_page(dirty_page); 2093d08b3851SPeter Zijlstra } 2094f33ea7f4SNick Piggin return ret; 20958a9f3ccdSBalbir Singh oom_free_new: 20966dbf6d3bSHugh Dickins page_cache_release(new_page); 209765500d23SHugh Dickins oom: 2098920fc356SHugh Dickins if (old_page) 20991da177e4SLinus Torvalds page_cache_release(old_page); 21001da177e4SLinus Torvalds return VM_FAULT_OOM; 21019637a5efSDavid Howells 21029637a5efSDavid Howells unwritable_page: 21039637a5efSDavid Howells page_cache_release(old_page); 21049637a5efSDavid Howells return VM_FAULT_SIGBUS; 21051da177e4SLinus Torvalds } 21061da177e4SLinus Torvalds 21071da177e4SLinus Torvalds /* 21081da177e4SLinus Torvalds * Helper functions for unmap_mapping_range(). 21091da177e4SLinus Torvalds * 21101da177e4SLinus Torvalds * __ Notes on dropping i_mmap_lock to reduce latency while unmapping __ 21111da177e4SLinus Torvalds * 21121da177e4SLinus Torvalds * We have to restart searching the prio_tree whenever we drop the lock, 21131da177e4SLinus Torvalds * since the iterator is only valid while the lock is held, and anyway 21141da177e4SLinus Torvalds * a later vma might be split and reinserted earlier while lock dropped. 21151da177e4SLinus Torvalds * 21161da177e4SLinus Torvalds * The list of nonlinear vmas could be handled more efficiently, using 21171da177e4SLinus Torvalds * a placeholder, but handle it in the same way until a need is shown. 21181da177e4SLinus Torvalds * It is important to search the prio_tree before nonlinear list: a vma 21191da177e4SLinus Torvalds * may become nonlinear and be shifted from prio_tree to nonlinear list 21201da177e4SLinus Torvalds * while the lock is dropped; but never shifted from list to prio_tree. 21211da177e4SLinus Torvalds * 21221da177e4SLinus Torvalds * In order to make forward progress despite restarting the search, 21231da177e4SLinus Torvalds * vm_truncate_count is used to mark a vma as now dealt with, so we can 21241da177e4SLinus Torvalds * quickly skip it next time around. Since the prio_tree search only 21251da177e4SLinus Torvalds * shows us those vmas affected by unmapping the range in question, we 21261da177e4SLinus Torvalds * can't efficiently keep all vmas in step with mapping->truncate_count: 21271da177e4SLinus Torvalds * so instead reset them all whenever it wraps back to 0 (then go to 1). 21281da177e4SLinus Torvalds * mapping->truncate_count and vma->vm_truncate_count are protected by 21291da177e4SLinus Torvalds * i_mmap_lock. 21301da177e4SLinus Torvalds * 21311da177e4SLinus Torvalds * In order to make forward progress despite repeatedly restarting some 2132ee39b37bSHugh Dickins * large vma, note the restart_addr from unmap_vmas when it breaks out: 21331da177e4SLinus Torvalds * and restart from that address when we reach that vma again. It might 21341da177e4SLinus Torvalds * have been split or merged, shrunk or extended, but never shifted: so 21351da177e4SLinus Torvalds * restart_addr remains valid so long as it remains in the vma's range. 21361da177e4SLinus Torvalds * unmap_mapping_range forces truncate_count to leap over page-aligned 21371da177e4SLinus Torvalds * values so we can save vma's restart_addr in its truncate_count field. 21381da177e4SLinus Torvalds */ 21391da177e4SLinus Torvalds #define is_restart_addr(truncate_count) (!((truncate_count) & ~PAGE_MASK)) 21401da177e4SLinus Torvalds 21411da177e4SLinus Torvalds static void reset_vma_truncate_counts(struct address_space *mapping) 21421da177e4SLinus Torvalds { 21431da177e4SLinus Torvalds struct vm_area_struct *vma; 21441da177e4SLinus Torvalds struct prio_tree_iter iter; 21451da177e4SLinus Torvalds 21461da177e4SLinus Torvalds vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, 0, ULONG_MAX) 21471da177e4SLinus Torvalds vma->vm_truncate_count = 0; 21481da177e4SLinus Torvalds list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) 21491da177e4SLinus Torvalds vma->vm_truncate_count = 0; 21501da177e4SLinus Torvalds } 21511da177e4SLinus Torvalds 21521da177e4SLinus Torvalds static int unmap_mapping_range_vma(struct vm_area_struct *vma, 21531da177e4SLinus Torvalds unsigned long start_addr, unsigned long end_addr, 21541da177e4SLinus Torvalds struct zap_details *details) 21551da177e4SLinus Torvalds { 21561da177e4SLinus Torvalds unsigned long restart_addr; 21571da177e4SLinus Torvalds int need_break; 21581da177e4SLinus Torvalds 2159d00806b1SNick Piggin /* 2160d00806b1SNick Piggin * files that support invalidating or truncating portions of the 2161d0217ac0SNick Piggin * file from under mmaped areas must have their ->fault function 216283c54070SNick Piggin * return a locked page (and set VM_FAULT_LOCKED in the return). 216383c54070SNick Piggin * This provides synchronisation against concurrent unmapping here. 2164d00806b1SNick Piggin */ 2165d00806b1SNick Piggin 21661da177e4SLinus Torvalds again: 21671da177e4SLinus Torvalds restart_addr = vma->vm_truncate_count; 21681da177e4SLinus Torvalds if (is_restart_addr(restart_addr) && start_addr < restart_addr) { 21691da177e4SLinus Torvalds start_addr = restart_addr; 21701da177e4SLinus Torvalds if (start_addr >= end_addr) { 21711da177e4SLinus Torvalds /* Top of vma has been split off since last time */ 21721da177e4SLinus Torvalds vma->vm_truncate_count = details->truncate_count; 21731da177e4SLinus Torvalds return 0; 21741da177e4SLinus Torvalds } 21751da177e4SLinus Torvalds } 21761da177e4SLinus Torvalds 2177ee39b37bSHugh Dickins restart_addr = zap_page_range(vma, start_addr, 2178ee39b37bSHugh Dickins end_addr - start_addr, details); 217995c354feSNick Piggin need_break = need_resched() || spin_needbreak(details->i_mmap_lock); 21801da177e4SLinus Torvalds 2181ee39b37bSHugh Dickins if (restart_addr >= end_addr) { 21821da177e4SLinus Torvalds /* We have now completed this vma: mark it so */ 21831da177e4SLinus Torvalds vma->vm_truncate_count = details->truncate_count; 21841da177e4SLinus Torvalds if (!need_break) 21851da177e4SLinus Torvalds return 0; 21861da177e4SLinus Torvalds } else { 21871da177e4SLinus Torvalds /* Note restart_addr in vma's truncate_count field */ 2188ee39b37bSHugh Dickins vma->vm_truncate_count = restart_addr; 21891da177e4SLinus Torvalds if (!need_break) 21901da177e4SLinus Torvalds goto again; 21911da177e4SLinus Torvalds } 21921da177e4SLinus Torvalds 21931da177e4SLinus Torvalds spin_unlock(details->i_mmap_lock); 21941da177e4SLinus Torvalds cond_resched(); 21951da177e4SLinus Torvalds spin_lock(details->i_mmap_lock); 21961da177e4SLinus Torvalds return -EINTR; 21971da177e4SLinus Torvalds } 21981da177e4SLinus Torvalds 21991da177e4SLinus Torvalds static inline void unmap_mapping_range_tree(struct prio_tree_root *root, 22001da177e4SLinus Torvalds struct zap_details *details) 22011da177e4SLinus Torvalds { 22021da177e4SLinus Torvalds struct vm_area_struct *vma; 22031da177e4SLinus Torvalds struct prio_tree_iter iter; 22041da177e4SLinus Torvalds pgoff_t vba, vea, zba, zea; 22051da177e4SLinus Torvalds 22061da177e4SLinus Torvalds restart: 22071da177e4SLinus Torvalds vma_prio_tree_foreach(vma, &iter, root, 22081da177e4SLinus Torvalds details->first_index, details->last_index) { 22091da177e4SLinus Torvalds /* Skip quickly over those we have already dealt with */ 22101da177e4SLinus Torvalds if (vma->vm_truncate_count == details->truncate_count) 22111da177e4SLinus Torvalds continue; 22121da177e4SLinus Torvalds 22131da177e4SLinus Torvalds vba = vma->vm_pgoff; 22141da177e4SLinus Torvalds vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1; 22151da177e4SLinus Torvalds /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */ 22161da177e4SLinus Torvalds zba = details->first_index; 22171da177e4SLinus Torvalds if (zba < vba) 22181da177e4SLinus Torvalds zba = vba; 22191da177e4SLinus Torvalds zea = details->last_index; 22201da177e4SLinus Torvalds if (zea > vea) 22211da177e4SLinus Torvalds zea = vea; 22221da177e4SLinus Torvalds 22231da177e4SLinus Torvalds if (unmap_mapping_range_vma(vma, 22241da177e4SLinus Torvalds ((zba - vba) << PAGE_SHIFT) + vma->vm_start, 22251da177e4SLinus Torvalds ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start, 22261da177e4SLinus Torvalds details) < 0) 22271da177e4SLinus Torvalds goto restart; 22281da177e4SLinus Torvalds } 22291da177e4SLinus Torvalds } 22301da177e4SLinus Torvalds 22311da177e4SLinus Torvalds static inline void unmap_mapping_range_list(struct list_head *head, 22321da177e4SLinus Torvalds struct zap_details *details) 22331da177e4SLinus Torvalds { 22341da177e4SLinus Torvalds struct vm_area_struct *vma; 22351da177e4SLinus Torvalds 22361da177e4SLinus Torvalds /* 22371da177e4SLinus Torvalds * In nonlinear VMAs there is no correspondence between virtual address 22381da177e4SLinus Torvalds * offset and file offset. So we must perform an exhaustive search 22391da177e4SLinus Torvalds * across *all* the pages in each nonlinear VMA, not just the pages 22401da177e4SLinus Torvalds * whose virtual address lies outside the file truncation point. 22411da177e4SLinus Torvalds */ 22421da177e4SLinus Torvalds restart: 22431da177e4SLinus Torvalds list_for_each_entry(vma, head, shared.vm_set.list) { 22441da177e4SLinus Torvalds /* Skip quickly over those we have already dealt with */ 22451da177e4SLinus Torvalds if (vma->vm_truncate_count == details->truncate_count) 22461da177e4SLinus Torvalds continue; 22471da177e4SLinus Torvalds details->nonlinear_vma = vma; 22481da177e4SLinus Torvalds if (unmap_mapping_range_vma(vma, vma->vm_start, 22491da177e4SLinus Torvalds vma->vm_end, details) < 0) 22501da177e4SLinus Torvalds goto restart; 22511da177e4SLinus Torvalds } 22521da177e4SLinus Torvalds } 22531da177e4SLinus Torvalds 22541da177e4SLinus Torvalds /** 225572fd4a35SRobert P. J. Day * unmap_mapping_range - unmap the portion of all mmaps in the specified address_space corresponding to the specified page range in the underlying file. 22563d41088fSMartin Waitz * @mapping: the address space containing mmaps to be unmapped. 22571da177e4SLinus Torvalds * @holebegin: byte in first page to unmap, relative to the start of 22581da177e4SLinus Torvalds * the underlying file. This will be rounded down to a PAGE_SIZE 22591da177e4SLinus Torvalds * boundary. Note that this is different from vmtruncate(), which 22601da177e4SLinus Torvalds * must keep the partial page. In contrast, we must get rid of 22611da177e4SLinus Torvalds * partial pages. 22621da177e4SLinus Torvalds * @holelen: size of prospective hole in bytes. This will be rounded 22631da177e4SLinus Torvalds * up to a PAGE_SIZE boundary. A holelen of zero truncates to the 22641da177e4SLinus Torvalds * end of the file. 22651da177e4SLinus Torvalds * @even_cows: 1 when truncating a file, unmap even private COWed pages; 22661da177e4SLinus Torvalds * but 0 when invalidating pagecache, don't throw away private data. 22671da177e4SLinus Torvalds */ 22681da177e4SLinus Torvalds void unmap_mapping_range(struct address_space *mapping, 22691da177e4SLinus Torvalds loff_t const holebegin, loff_t const holelen, int even_cows) 22701da177e4SLinus Torvalds { 22711da177e4SLinus Torvalds struct zap_details details; 22721da177e4SLinus Torvalds pgoff_t hba = holebegin >> PAGE_SHIFT; 22731da177e4SLinus Torvalds pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; 22741da177e4SLinus Torvalds 22751da177e4SLinus Torvalds /* Check for overflow. */ 22761da177e4SLinus Torvalds if (sizeof(holelen) > sizeof(hlen)) { 22771da177e4SLinus Torvalds long long holeend = 22781da177e4SLinus Torvalds (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; 22791da177e4SLinus Torvalds if (holeend & ~(long long)ULONG_MAX) 22801da177e4SLinus Torvalds hlen = ULONG_MAX - hba + 1; 22811da177e4SLinus Torvalds } 22821da177e4SLinus Torvalds 22831da177e4SLinus Torvalds details.check_mapping = even_cows? NULL: mapping; 22841da177e4SLinus Torvalds details.nonlinear_vma = NULL; 22851da177e4SLinus Torvalds details.first_index = hba; 22861da177e4SLinus Torvalds details.last_index = hba + hlen - 1; 22871da177e4SLinus Torvalds if (details.last_index < details.first_index) 22881da177e4SLinus Torvalds details.last_index = ULONG_MAX; 22891da177e4SLinus Torvalds details.i_mmap_lock = &mapping->i_mmap_lock; 22901da177e4SLinus Torvalds 22911da177e4SLinus Torvalds spin_lock(&mapping->i_mmap_lock); 22921da177e4SLinus Torvalds 2293d00806b1SNick Piggin /* Protect against endless unmapping loops */ 22941da177e4SLinus Torvalds mapping->truncate_count++; 22951da177e4SLinus Torvalds if (unlikely(is_restart_addr(mapping->truncate_count))) { 22961da177e4SLinus Torvalds if (mapping->truncate_count == 0) 22971da177e4SLinus Torvalds reset_vma_truncate_counts(mapping); 22981da177e4SLinus Torvalds mapping->truncate_count++; 22991da177e4SLinus Torvalds } 23001da177e4SLinus Torvalds details.truncate_count = mapping->truncate_count; 23011da177e4SLinus Torvalds 23021da177e4SLinus Torvalds if (unlikely(!prio_tree_empty(&mapping->i_mmap))) 23031da177e4SLinus Torvalds unmap_mapping_range_tree(&mapping->i_mmap, &details); 23041da177e4SLinus Torvalds if (unlikely(!list_empty(&mapping->i_mmap_nonlinear))) 23051da177e4SLinus Torvalds unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details); 23061da177e4SLinus Torvalds spin_unlock(&mapping->i_mmap_lock); 23071da177e4SLinus Torvalds } 23081da177e4SLinus Torvalds EXPORT_SYMBOL(unmap_mapping_range); 23091da177e4SLinus Torvalds 2310bfa5bf6dSRolf Eike Beer /** 2311bfa5bf6dSRolf Eike Beer * vmtruncate - unmap mappings "freed" by truncate() syscall 2312bfa5bf6dSRolf Eike Beer * @inode: inode of the file used 2313bfa5bf6dSRolf Eike Beer * @offset: file offset to start truncating 23141da177e4SLinus Torvalds * 23151da177e4SLinus Torvalds * NOTE! We have to be ready to update the memory sharing 23161da177e4SLinus Torvalds * between the file and the memory map for a potential last 23171da177e4SLinus Torvalds * incomplete page. Ugly, but necessary. 23181da177e4SLinus Torvalds */ 23191da177e4SLinus Torvalds int vmtruncate(struct inode * inode, loff_t offset) 23201da177e4SLinus Torvalds { 232161d5048fSChristoph Hellwig if (inode->i_size < offset) { 23221da177e4SLinus Torvalds unsigned long limit; 23231da177e4SLinus Torvalds 23241da177e4SLinus Torvalds limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; 23251da177e4SLinus Torvalds if (limit != RLIM_INFINITY && offset > limit) 23261da177e4SLinus Torvalds goto out_sig; 23271da177e4SLinus Torvalds if (offset > inode->i_sb->s_maxbytes) 23281da177e4SLinus Torvalds goto out_big; 23291da177e4SLinus Torvalds i_size_write(inode, offset); 233061d5048fSChristoph Hellwig } else { 233161d5048fSChristoph Hellwig struct address_space *mapping = inode->i_mapping; 23321da177e4SLinus Torvalds 233361d5048fSChristoph Hellwig /* 233461d5048fSChristoph Hellwig * truncation of in-use swapfiles is disallowed - it would 233561d5048fSChristoph Hellwig * cause subsequent swapout to scribble on the now-freed 233661d5048fSChristoph Hellwig * blocks. 233761d5048fSChristoph Hellwig */ 233861d5048fSChristoph Hellwig if (IS_SWAPFILE(inode)) 233961d5048fSChristoph Hellwig return -ETXTBSY; 234061d5048fSChristoph Hellwig i_size_write(inode, offset); 234161d5048fSChristoph Hellwig 234261d5048fSChristoph Hellwig /* 234361d5048fSChristoph Hellwig * unmap_mapping_range is called twice, first simply for 234461d5048fSChristoph Hellwig * efficiency so that truncate_inode_pages does fewer 234561d5048fSChristoph Hellwig * single-page unmaps. However after this first call, and 234661d5048fSChristoph Hellwig * before truncate_inode_pages finishes, it is possible for 234761d5048fSChristoph Hellwig * private pages to be COWed, which remain after 234861d5048fSChristoph Hellwig * truncate_inode_pages finishes, hence the second 234961d5048fSChristoph Hellwig * unmap_mapping_range call must be made for correctness. 235061d5048fSChristoph Hellwig */ 235161d5048fSChristoph Hellwig unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1); 235261d5048fSChristoph Hellwig truncate_inode_pages(mapping, offset); 235361d5048fSChristoph Hellwig unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1); 235461d5048fSChristoph Hellwig } 235561d5048fSChristoph Hellwig 23561da177e4SLinus Torvalds if (inode->i_op && inode->i_op->truncate) 23571da177e4SLinus Torvalds inode->i_op->truncate(inode); 23581da177e4SLinus Torvalds return 0; 235961d5048fSChristoph Hellwig 23601da177e4SLinus Torvalds out_sig: 23611da177e4SLinus Torvalds send_sig(SIGXFSZ, current, 0); 23621da177e4SLinus Torvalds out_big: 23631da177e4SLinus Torvalds return -EFBIG; 23641da177e4SLinus Torvalds } 23651da177e4SLinus Torvalds EXPORT_SYMBOL(vmtruncate); 23661da177e4SLinus Torvalds 2367f6b3ec23SBadari Pulavarty int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end) 2368f6b3ec23SBadari Pulavarty { 2369f6b3ec23SBadari Pulavarty struct address_space *mapping = inode->i_mapping; 2370f6b3ec23SBadari Pulavarty 2371f6b3ec23SBadari Pulavarty /* 2372f6b3ec23SBadari Pulavarty * If the underlying filesystem is not going to provide 2373f6b3ec23SBadari Pulavarty * a way to truncate a range of blocks (punch a hole) - 2374f6b3ec23SBadari Pulavarty * we should return failure right now. 2375f6b3ec23SBadari Pulavarty */ 2376f6b3ec23SBadari Pulavarty if (!inode->i_op || !inode->i_op->truncate_range) 2377f6b3ec23SBadari Pulavarty return -ENOSYS; 2378f6b3ec23SBadari Pulavarty 23791b1dcc1bSJes Sorensen mutex_lock(&inode->i_mutex); 2380f6b3ec23SBadari Pulavarty down_write(&inode->i_alloc_sem); 2381f6b3ec23SBadari Pulavarty unmap_mapping_range(mapping, offset, (end - offset), 1); 2382f6b3ec23SBadari Pulavarty truncate_inode_pages_range(mapping, offset, end); 2383d00806b1SNick Piggin unmap_mapping_range(mapping, offset, (end - offset), 1); 2384f6b3ec23SBadari Pulavarty inode->i_op->truncate_range(inode, offset, end); 2385f6b3ec23SBadari Pulavarty up_write(&inode->i_alloc_sem); 23861b1dcc1bSJes Sorensen mutex_unlock(&inode->i_mutex); 2387f6b3ec23SBadari Pulavarty 2388f6b3ec23SBadari Pulavarty return 0; 2389f6b3ec23SBadari Pulavarty } 2390f6b3ec23SBadari Pulavarty 23911da177e4SLinus Torvalds /* 23928f4e2101SHugh Dickins * We enter with non-exclusive mmap_sem (to exclude vma changes, 23938f4e2101SHugh Dickins * but allow concurrent faults), and pte mapped but not yet locked. 23948f4e2101SHugh Dickins * We return with mmap_sem still held, but pte unmapped and unlocked. 23951da177e4SLinus Torvalds */ 239665500d23SHugh Dickins static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, 239765500d23SHugh Dickins unsigned long address, pte_t *page_table, pmd_t *pmd, 239865500d23SHugh Dickins int write_access, pte_t orig_pte) 23991da177e4SLinus Torvalds { 24008f4e2101SHugh Dickins spinlock_t *ptl; 24011da177e4SLinus Torvalds struct page *page; 240265500d23SHugh Dickins swp_entry_t entry; 24031da177e4SLinus Torvalds pte_t pte; 240483c54070SNick Piggin int ret = 0; 24051da177e4SLinus Torvalds 24064c21e2f2SHugh Dickins if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) 24078f4e2101SHugh Dickins goto out; 240865500d23SHugh Dickins 240965500d23SHugh Dickins entry = pte_to_swp_entry(orig_pte); 24100697212aSChristoph Lameter if (is_migration_entry(entry)) { 24110697212aSChristoph Lameter migration_entry_wait(mm, pmd, address); 24120697212aSChristoph Lameter goto out; 24130697212aSChristoph Lameter } 24140ff92245SShailabh Nagar delayacct_set_flag(DELAYACCT_PF_SWAPIN); 24151da177e4SLinus Torvalds page = lookup_swap_cache(entry); 24161da177e4SLinus Torvalds if (!page) { 2417098fe651SAshwin Chaugule grab_swap_token(); /* Contend for token _before_ read-in */ 241802098feaSHugh Dickins page = swapin_readahead(entry, 241902098feaSHugh Dickins GFP_HIGHUSER_MOVABLE, vma, address); 24201da177e4SLinus Torvalds if (!page) { 24211da177e4SLinus Torvalds /* 24228f4e2101SHugh Dickins * Back out if somebody else faulted in this pte 24238f4e2101SHugh Dickins * while we released the pte lock. 24241da177e4SLinus Torvalds */ 24258f4e2101SHugh Dickins page_table = pte_offset_map_lock(mm, pmd, address, &ptl); 24261da177e4SLinus Torvalds if (likely(pte_same(*page_table, orig_pte))) 24271da177e4SLinus Torvalds ret = VM_FAULT_OOM; 24280ff92245SShailabh Nagar delayacct_clear_flag(DELAYACCT_PF_SWAPIN); 242965500d23SHugh Dickins goto unlock; 24301da177e4SLinus Torvalds } 24311da177e4SLinus Torvalds 24321da177e4SLinus Torvalds /* Had to read the page from swap area: Major fault */ 24331da177e4SLinus Torvalds ret = VM_FAULT_MAJOR; 2434f8891e5eSChristoph Lameter count_vm_event(PGMAJFAULT); 24351da177e4SLinus Torvalds } 24361da177e4SLinus Torvalds 24371da177e4SLinus Torvalds mark_page_accessed(page); 2438073e587eSKAMEZAWA Hiroyuki 24391da177e4SLinus Torvalds lock_page(page); 244020a1022dSBalbir Singh delayacct_clear_flag(DELAYACCT_PF_SWAPIN); 24411da177e4SLinus Torvalds 2442073e587eSKAMEZAWA Hiroyuki if (mem_cgroup_charge(page, mm, GFP_KERNEL)) { 2443073e587eSKAMEZAWA Hiroyuki ret = VM_FAULT_OOM; 2444073e587eSKAMEZAWA Hiroyuki unlock_page(page); 2445073e587eSKAMEZAWA Hiroyuki goto out; 2446073e587eSKAMEZAWA Hiroyuki } 2447073e587eSKAMEZAWA Hiroyuki 24481da177e4SLinus Torvalds /* 24498f4e2101SHugh Dickins * Back out if somebody else already faulted in this pte. 24501da177e4SLinus Torvalds */ 24518f4e2101SHugh Dickins page_table = pte_offset_map_lock(mm, pmd, address, &ptl); 24529e9bef07SHugh Dickins if (unlikely(!pte_same(*page_table, orig_pte))) 2453b8107480SKirill Korotaev goto out_nomap; 2454b8107480SKirill Korotaev 2455b8107480SKirill Korotaev if (unlikely(!PageUptodate(page))) { 2456b8107480SKirill Korotaev ret = VM_FAULT_SIGBUS; 2457b8107480SKirill Korotaev goto out_nomap; 24581da177e4SLinus Torvalds } 24591da177e4SLinus Torvalds 24601da177e4SLinus Torvalds /* The page isn't present yet, go ahead with the fault. */ 24611da177e4SLinus Torvalds 24624294621fSHugh Dickins inc_mm_counter(mm, anon_rss); 24631da177e4SLinus Torvalds pte = mk_pte(page, vma->vm_page_prot); 24641da177e4SLinus Torvalds if (write_access && can_share_swap_page(page)) { 24651da177e4SLinus Torvalds pte = maybe_mkwrite(pte_mkdirty(pte), vma); 24661da177e4SLinus Torvalds write_access = 0; 24671da177e4SLinus Torvalds } 24681da177e4SLinus Torvalds 24691da177e4SLinus Torvalds flush_icache_page(vma, page); 24701da177e4SLinus Torvalds set_pte_at(mm, address, page_table, pte); 24711da177e4SLinus Torvalds page_add_anon_rmap(page, vma, address); 24721da177e4SLinus Torvalds 2473c475a8abSHugh Dickins swap_free(entry); 2474b291f000SNick Piggin if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) 2475c475a8abSHugh Dickins remove_exclusive_swap_page(page); 2476c475a8abSHugh Dickins unlock_page(page); 2477c475a8abSHugh Dickins 24781da177e4SLinus Torvalds if (write_access) { 247961469f1dSHugh Dickins ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); 248061469f1dSHugh Dickins if (ret & VM_FAULT_ERROR) 248161469f1dSHugh Dickins ret &= VM_FAULT_ERROR; 24821da177e4SLinus Torvalds goto out; 24831da177e4SLinus Torvalds } 24841da177e4SLinus Torvalds 24851da177e4SLinus Torvalds /* No need to invalidate - it was non-present before */ 24861da177e4SLinus Torvalds update_mmu_cache(vma, address, pte); 248765500d23SHugh Dickins unlock: 24888f4e2101SHugh Dickins pte_unmap_unlock(page_table, ptl); 24891da177e4SLinus Torvalds out: 24901da177e4SLinus Torvalds return ret; 2491b8107480SKirill Korotaev out_nomap: 24928a9f3ccdSBalbir Singh mem_cgroup_uncharge_page(page); 24938f4e2101SHugh Dickins pte_unmap_unlock(page_table, ptl); 2494b8107480SKirill Korotaev unlock_page(page); 2495b8107480SKirill Korotaev page_cache_release(page); 249665500d23SHugh Dickins return ret; 24971da177e4SLinus Torvalds } 24981da177e4SLinus Torvalds 24991da177e4SLinus Torvalds /* 25008f4e2101SHugh Dickins * We enter with non-exclusive mmap_sem (to exclude vma changes, 25018f4e2101SHugh Dickins * but allow concurrent faults), and pte mapped but not yet locked. 25028f4e2101SHugh Dickins * We return with mmap_sem still held, but pte unmapped and unlocked. 25031da177e4SLinus Torvalds */ 250465500d23SHugh Dickins static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, 250565500d23SHugh Dickins unsigned long address, pte_t *page_table, pmd_t *pmd, 250665500d23SHugh Dickins int write_access) 25071da177e4SLinus Torvalds { 25088f4e2101SHugh Dickins struct page *page; 25098f4e2101SHugh Dickins spinlock_t *ptl; 25101da177e4SLinus Torvalds pte_t entry; 25111da177e4SLinus Torvalds 25121da177e4SLinus Torvalds /* Allocate our own private page. */ 25131da177e4SLinus Torvalds pte_unmap(page_table); 25141da177e4SLinus Torvalds 25151da177e4SLinus Torvalds if (unlikely(anon_vma_prepare(vma))) 251665500d23SHugh Dickins goto oom; 2517769848c0SMel Gorman page = alloc_zeroed_user_highpage_movable(vma, address); 25181da177e4SLinus Torvalds if (!page) 251965500d23SHugh Dickins goto oom; 25200ed361deSNick Piggin __SetPageUptodate(page); 25211da177e4SLinus Torvalds 2522e1a1cd59SBalbir Singh if (mem_cgroup_charge(page, mm, GFP_KERNEL)) 25238a9f3ccdSBalbir Singh goto oom_free_page; 25248a9f3ccdSBalbir Singh 252565500d23SHugh Dickins entry = mk_pte(page, vma->vm_page_prot); 252665500d23SHugh Dickins entry = maybe_mkwrite(pte_mkdirty(entry), vma); 25278f4e2101SHugh Dickins 25288f4e2101SHugh Dickins page_table = pte_offset_map_lock(mm, pmd, address, &ptl); 25298f4e2101SHugh Dickins if (!pte_none(*page_table)) 25308f4e2101SHugh Dickins goto release; 25318f4e2101SHugh Dickins inc_mm_counter(mm, anon_rss); 2532b2e18538SRik van Riel SetPageSwapBacked(page); 253364d6519dSLee Schermerhorn lru_cache_add_active_or_unevictable(page, vma); 25349617d95eSNick Piggin page_add_new_anon_rmap(page, vma, address); 253565500d23SHugh Dickins set_pte_at(mm, address, page_table, entry); 25361da177e4SLinus Torvalds 25371da177e4SLinus Torvalds /* No need to invalidate - it was non-present before */ 253865500d23SHugh Dickins update_mmu_cache(vma, address, entry); 253965500d23SHugh Dickins unlock: 25408f4e2101SHugh Dickins pte_unmap_unlock(page_table, ptl); 254183c54070SNick Piggin return 0; 25428f4e2101SHugh Dickins release: 25438a9f3ccdSBalbir Singh mem_cgroup_uncharge_page(page); 25448f4e2101SHugh Dickins page_cache_release(page); 25458f4e2101SHugh Dickins goto unlock; 25468a9f3ccdSBalbir Singh oom_free_page: 25476dbf6d3bSHugh Dickins page_cache_release(page); 254865500d23SHugh Dickins oom: 25491da177e4SLinus Torvalds return VM_FAULT_OOM; 25501da177e4SLinus Torvalds } 25511da177e4SLinus Torvalds 25521da177e4SLinus Torvalds /* 255354cb8821SNick Piggin * __do_fault() tries to create a new page mapping. It aggressively 25541da177e4SLinus Torvalds * tries to share with existing pages, but makes a separate copy if 255554cb8821SNick Piggin * the FAULT_FLAG_WRITE is set in the flags parameter in order to avoid 255654cb8821SNick Piggin * the next page fault. 25571da177e4SLinus Torvalds * 25581da177e4SLinus Torvalds * As this is called only for pages that do not currently exist, we 25591da177e4SLinus Torvalds * do not need to flush old virtual caches or the TLB. 25601da177e4SLinus Torvalds * 25618f4e2101SHugh Dickins * We enter with non-exclusive mmap_sem (to exclude vma changes, 256216abfa08SHugh Dickins * but allow concurrent faults), and pte neither mapped nor locked. 25638f4e2101SHugh Dickins * We return with mmap_sem still held, but pte unmapped and unlocked. 25641da177e4SLinus Torvalds */ 256554cb8821SNick Piggin static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, 256616abfa08SHugh Dickins unsigned long address, pmd_t *pmd, 256754cb8821SNick Piggin pgoff_t pgoff, unsigned int flags, pte_t orig_pte) 25681da177e4SLinus Torvalds { 256916abfa08SHugh Dickins pte_t *page_table; 25708f4e2101SHugh Dickins spinlock_t *ptl; 2571d0217ac0SNick Piggin struct page *page; 25721da177e4SLinus Torvalds pte_t entry; 25731da177e4SLinus Torvalds int anon = 0; 25745b4e655eSKAMEZAWA Hiroyuki int charged = 0; 2575d08b3851SPeter Zijlstra struct page *dirty_page = NULL; 2576d0217ac0SNick Piggin struct vm_fault vmf; 2577d0217ac0SNick Piggin int ret; 2578a200ee18SPeter Zijlstra int page_mkwrite = 0; 257954cb8821SNick Piggin 2580d0217ac0SNick Piggin vmf.virtual_address = (void __user *)(address & PAGE_MASK); 2581d0217ac0SNick Piggin vmf.pgoff = pgoff; 2582d0217ac0SNick Piggin vmf.flags = flags; 2583d0217ac0SNick Piggin vmf.page = NULL; 25841da177e4SLinus Torvalds 2585d0217ac0SNick Piggin ret = vma->vm_ops->fault(vma, &vmf); 258683c54070SNick Piggin if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) 258783c54070SNick Piggin return ret; 25881da177e4SLinus Torvalds 2589d00806b1SNick Piggin /* 2590d0217ac0SNick Piggin * For consistency in subsequent calls, make the faulted page always 2591d00806b1SNick Piggin * locked. 2592d00806b1SNick Piggin */ 259383c54070SNick Piggin if (unlikely(!(ret & VM_FAULT_LOCKED))) 2594d0217ac0SNick Piggin lock_page(vmf.page); 259554cb8821SNick Piggin else 2596d0217ac0SNick Piggin VM_BUG_ON(!PageLocked(vmf.page)); 2597d00806b1SNick Piggin 25981da177e4SLinus Torvalds /* 25991da177e4SLinus Torvalds * Should we do an early C-O-W break? 26001da177e4SLinus Torvalds */ 2601d0217ac0SNick Piggin page = vmf.page; 260254cb8821SNick Piggin if (flags & FAULT_FLAG_WRITE) { 26039637a5efSDavid Howells if (!(vma->vm_flags & VM_SHARED)) { 260454cb8821SNick Piggin anon = 1; 2605d00806b1SNick Piggin if (unlikely(anon_vma_prepare(vma))) { 2606d0217ac0SNick Piggin ret = VM_FAULT_OOM; 260754cb8821SNick Piggin goto out; 2608d00806b1SNick Piggin } 260983c54070SNick Piggin page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, 261083c54070SNick Piggin vma, address); 2611d00806b1SNick Piggin if (!page) { 2612d0217ac0SNick Piggin ret = VM_FAULT_OOM; 261354cb8821SNick Piggin goto out; 2614d00806b1SNick Piggin } 26155b4e655eSKAMEZAWA Hiroyuki if (mem_cgroup_charge(page, mm, GFP_KERNEL)) { 26165b4e655eSKAMEZAWA Hiroyuki ret = VM_FAULT_OOM; 26175b4e655eSKAMEZAWA Hiroyuki page_cache_release(page); 26185b4e655eSKAMEZAWA Hiroyuki goto out; 26195b4e655eSKAMEZAWA Hiroyuki } 26205b4e655eSKAMEZAWA Hiroyuki charged = 1; 2621b291f000SNick Piggin /* 2622b291f000SNick Piggin * Don't let another task, with possibly unlocked vma, 2623b291f000SNick Piggin * keep the mlocked page. 2624b291f000SNick Piggin */ 2625b291f000SNick Piggin if (vma->vm_flags & VM_LOCKED) 2626b291f000SNick Piggin clear_page_mlock(vmf.page); 2627d0217ac0SNick Piggin copy_user_highpage(page, vmf.page, address, vma); 26280ed361deSNick Piggin __SetPageUptodate(page); 26299637a5efSDavid Howells } else { 263054cb8821SNick Piggin /* 263154cb8821SNick Piggin * If the page will be shareable, see if the backing 26329637a5efSDavid Howells * address space wants to know that the page is about 263354cb8821SNick Piggin * to become writable 263454cb8821SNick Piggin */ 263569676147SMark Fasheh if (vma->vm_ops->page_mkwrite) { 263669676147SMark Fasheh unlock_page(page); 263769676147SMark Fasheh if (vma->vm_ops->page_mkwrite(vma, page) < 0) { 2638d0217ac0SNick Piggin ret = VM_FAULT_SIGBUS; 2639d0217ac0SNick Piggin anon = 1; /* no anon but release vmf.page */ 264069676147SMark Fasheh goto out_unlocked; 264169676147SMark Fasheh } 264269676147SMark Fasheh lock_page(page); 2643d0217ac0SNick Piggin /* 2644d0217ac0SNick Piggin * XXX: this is not quite right (racy vs 2645d0217ac0SNick Piggin * invalidate) to unlock and relock the page 2646d0217ac0SNick Piggin * like this, however a better fix requires 2647d0217ac0SNick Piggin * reworking page_mkwrite locking API, which 2648d0217ac0SNick Piggin * is better done later. 2649d0217ac0SNick Piggin */ 2650d0217ac0SNick Piggin if (!page->mapping) { 265183c54070SNick Piggin ret = 0; 2652d0217ac0SNick Piggin anon = 1; /* no anon but release vmf.page */ 2653d0217ac0SNick Piggin goto out; 2654d0217ac0SNick Piggin } 2655a200ee18SPeter Zijlstra page_mkwrite = 1; 26569637a5efSDavid Howells } 26579637a5efSDavid Howells } 265854cb8821SNick Piggin 26591da177e4SLinus Torvalds } 26601da177e4SLinus Torvalds 26618f4e2101SHugh Dickins page_table = pte_offset_map_lock(mm, pmd, address, &ptl); 26621da177e4SLinus Torvalds 26631da177e4SLinus Torvalds /* 26641da177e4SLinus Torvalds * This silly early PAGE_DIRTY setting removes a race 26651da177e4SLinus Torvalds * due to the bad i386 page protection. But it's valid 26661da177e4SLinus Torvalds * for other architectures too. 26671da177e4SLinus Torvalds * 26681da177e4SLinus Torvalds * Note that if write_access is true, we either now have 26691da177e4SLinus Torvalds * an exclusive copy of the page, or this is a shared mapping, 26701da177e4SLinus Torvalds * so we can make it writable and dirty to avoid having to 26711da177e4SLinus Torvalds * handle that later. 26721da177e4SLinus Torvalds */ 26731da177e4SLinus Torvalds /* Only go through if we didn't race with anybody else... */ 267454cb8821SNick Piggin if (likely(pte_same(*page_table, orig_pte))) { 2675d00806b1SNick Piggin flush_icache_page(vma, page); 2676d00806b1SNick Piggin entry = mk_pte(page, vma->vm_page_prot); 267754cb8821SNick Piggin if (flags & FAULT_FLAG_WRITE) 26781da177e4SLinus Torvalds entry = maybe_mkwrite(pte_mkdirty(entry), vma); 26791da177e4SLinus Torvalds if (anon) { 26804294621fSHugh Dickins inc_mm_counter(mm, anon_rss); 2681b2e18538SRik van Riel SetPageSwapBacked(page); 268264d6519dSLee Schermerhorn lru_cache_add_active_or_unevictable(page, vma); 2683d00806b1SNick Piggin page_add_new_anon_rmap(page, vma, address); 2684f57e88a8SHugh Dickins } else { 26854294621fSHugh Dickins inc_mm_counter(mm, file_rss); 2686d00806b1SNick Piggin page_add_file_rmap(page); 268754cb8821SNick Piggin if (flags & FAULT_FLAG_WRITE) { 2688d00806b1SNick Piggin dirty_page = page; 2689d08b3851SPeter Zijlstra get_page(dirty_page); 2690d08b3851SPeter Zijlstra } 26914294621fSHugh Dickins } 269264d6519dSLee Schermerhorn //TODO: is this safe? do_anonymous_page() does it this way. 269364d6519dSLee Schermerhorn set_pte_at(mm, address, page_table, entry); 26941da177e4SLinus Torvalds 2695d00806b1SNick Piggin /* no need to invalidate: a not-present page won't be cached */ 26961da177e4SLinus Torvalds update_mmu_cache(vma, address, entry); 2697d00806b1SNick Piggin } else { 26985b4e655eSKAMEZAWA Hiroyuki if (charged) 26998a9f3ccdSBalbir Singh mem_cgroup_uncharge_page(page); 2700d00806b1SNick Piggin if (anon) 2701d00806b1SNick Piggin page_cache_release(page); 2702d00806b1SNick Piggin else 270354cb8821SNick Piggin anon = 1; /* no anon but release faulted_page */ 2704d00806b1SNick Piggin } 2705d00806b1SNick Piggin 27068f4e2101SHugh Dickins pte_unmap_unlock(page_table, ptl); 2707d00806b1SNick Piggin 2708d00806b1SNick Piggin out: 2709d0217ac0SNick Piggin unlock_page(vmf.page); 271069676147SMark Fasheh out_unlocked: 2711d00806b1SNick Piggin if (anon) 2712d0217ac0SNick Piggin page_cache_release(vmf.page); 2713d00806b1SNick Piggin else if (dirty_page) { 27148f7b3d15SAnton Salikhmetov if (vma->vm_file) 27158f7b3d15SAnton Salikhmetov file_update_time(vma->vm_file); 27168f7b3d15SAnton Salikhmetov 2717a200ee18SPeter Zijlstra set_page_dirty_balance(dirty_page, page_mkwrite); 2718d08b3851SPeter Zijlstra put_page(dirty_page); 2719d08b3851SPeter Zijlstra } 2720d00806b1SNick Piggin 272183c54070SNick Piggin return ret; 272254cb8821SNick Piggin } 2723d00806b1SNick Piggin 272454cb8821SNick Piggin static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma, 272554cb8821SNick Piggin unsigned long address, pte_t *page_table, pmd_t *pmd, 272654cb8821SNick Piggin int write_access, pte_t orig_pte) 272754cb8821SNick Piggin { 272854cb8821SNick Piggin pgoff_t pgoff = (((address & PAGE_MASK) 27290da7e01fSDean Nelson - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 273054cb8821SNick Piggin unsigned int flags = (write_access ? FAULT_FLAG_WRITE : 0); 273154cb8821SNick Piggin 273216abfa08SHugh Dickins pte_unmap(page_table); 273316abfa08SHugh Dickins return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); 273454cb8821SNick Piggin } 273554cb8821SNick Piggin 2736f4b81804SJes Sorensen /* 27371da177e4SLinus Torvalds * Fault of a previously existing named mapping. Repopulate the pte 27381da177e4SLinus Torvalds * from the encoded file_pte if possible. This enables swappable 27391da177e4SLinus Torvalds * nonlinear vmas. 27408f4e2101SHugh Dickins * 27418f4e2101SHugh Dickins * We enter with non-exclusive mmap_sem (to exclude vma changes, 27428f4e2101SHugh Dickins * but allow concurrent faults), and pte mapped but not yet locked. 27438f4e2101SHugh Dickins * We return with mmap_sem still held, but pte unmapped and unlocked. 27441da177e4SLinus Torvalds */ 2745d0217ac0SNick Piggin static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma, 274665500d23SHugh Dickins unsigned long address, pte_t *page_table, pmd_t *pmd, 274765500d23SHugh Dickins int write_access, pte_t orig_pte) 27481da177e4SLinus Torvalds { 2749d0217ac0SNick Piggin unsigned int flags = FAULT_FLAG_NONLINEAR | 2750d0217ac0SNick Piggin (write_access ? FAULT_FLAG_WRITE : 0); 275165500d23SHugh Dickins pgoff_t pgoff; 27521da177e4SLinus Torvalds 27534c21e2f2SHugh Dickins if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) 275483c54070SNick Piggin return 0; 27551da177e4SLinus Torvalds 2756d0217ac0SNick Piggin if (unlikely(!(vma->vm_flags & VM_NONLINEAR) || 2757d0217ac0SNick Piggin !(vma->vm_flags & VM_CAN_NONLINEAR))) { 275865500d23SHugh Dickins /* 275965500d23SHugh Dickins * Page table corrupted: show pte and kill process. 276065500d23SHugh Dickins */ 2761b5810039SNick Piggin print_bad_pte(vma, orig_pte, address); 276265500d23SHugh Dickins return VM_FAULT_OOM; 276365500d23SHugh Dickins } 276465500d23SHugh Dickins 276565500d23SHugh Dickins pgoff = pte_to_pgoff(orig_pte); 276616abfa08SHugh Dickins return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); 27671da177e4SLinus Torvalds } 27681da177e4SLinus Torvalds 27691da177e4SLinus Torvalds /* 27701da177e4SLinus Torvalds * These routines also need to handle stuff like marking pages dirty 27711da177e4SLinus Torvalds * and/or accessed for architectures that don't do it in hardware (most 27721da177e4SLinus Torvalds * RISC architectures). The early dirtying is also good on the i386. 27731da177e4SLinus Torvalds * 27741da177e4SLinus Torvalds * There is also a hook called "update_mmu_cache()" that architectures 27751da177e4SLinus Torvalds * with external mmu caches can use to update those (ie the Sparc or 27761da177e4SLinus Torvalds * PowerPC hashed page tables that act as extended TLBs). 27771da177e4SLinus Torvalds * 2778c74df32cSHugh Dickins * We enter with non-exclusive mmap_sem (to exclude vma changes, 2779c74df32cSHugh Dickins * but allow concurrent faults), and pte mapped but not yet locked. 2780c74df32cSHugh Dickins * We return with mmap_sem still held, but pte unmapped and unlocked. 27811da177e4SLinus Torvalds */ 27821da177e4SLinus Torvalds static inline int handle_pte_fault(struct mm_struct *mm, 27831da177e4SLinus Torvalds struct vm_area_struct *vma, unsigned long address, 278465500d23SHugh Dickins pte_t *pte, pmd_t *pmd, int write_access) 27851da177e4SLinus Torvalds { 27861da177e4SLinus Torvalds pte_t entry; 27878f4e2101SHugh Dickins spinlock_t *ptl; 27881da177e4SLinus Torvalds 27898dab5241SBenjamin Herrenschmidt entry = *pte; 27901da177e4SLinus Torvalds if (!pte_present(entry)) { 279165500d23SHugh Dickins if (pte_none(entry)) { 2792f4b81804SJes Sorensen if (vma->vm_ops) { 27933c18ddd1SNick Piggin if (likely(vma->vm_ops->fault)) 279454cb8821SNick Piggin return do_linear_fault(mm, vma, address, 279554cb8821SNick Piggin pte, pmd, write_access, entry); 2796f4b81804SJes Sorensen } 2797f4b81804SJes Sorensen return do_anonymous_page(mm, vma, address, 279865500d23SHugh Dickins pte, pmd, write_access); 279965500d23SHugh Dickins } 28001da177e4SLinus Torvalds if (pte_file(entry)) 2801d0217ac0SNick Piggin return do_nonlinear_fault(mm, vma, address, 280265500d23SHugh Dickins pte, pmd, write_access, entry); 280365500d23SHugh Dickins return do_swap_page(mm, vma, address, 280465500d23SHugh Dickins pte, pmd, write_access, entry); 28051da177e4SLinus Torvalds } 28061da177e4SLinus Torvalds 28074c21e2f2SHugh Dickins ptl = pte_lockptr(mm, pmd); 28088f4e2101SHugh Dickins spin_lock(ptl); 28098f4e2101SHugh Dickins if (unlikely(!pte_same(*pte, entry))) 28108f4e2101SHugh Dickins goto unlock; 28111da177e4SLinus Torvalds if (write_access) { 28121da177e4SLinus Torvalds if (!pte_write(entry)) 28138f4e2101SHugh Dickins return do_wp_page(mm, vma, address, 28148f4e2101SHugh Dickins pte, pmd, ptl, entry); 28151da177e4SLinus Torvalds entry = pte_mkdirty(entry); 28161da177e4SLinus Torvalds } 28171da177e4SLinus Torvalds entry = pte_mkyoung(entry); 28188dab5241SBenjamin Herrenschmidt if (ptep_set_access_flags(vma, address, pte, entry, write_access)) { 28191da177e4SLinus Torvalds update_mmu_cache(vma, address, entry); 28201a44e149SAndrea Arcangeli } else { 28211a44e149SAndrea Arcangeli /* 28221a44e149SAndrea Arcangeli * This is needed only for protection faults but the arch code 28231a44e149SAndrea Arcangeli * is not yet telling us if this is a protection fault or not. 28241a44e149SAndrea Arcangeli * This still avoids useless tlb flushes for .text page faults 28251a44e149SAndrea Arcangeli * with threads. 28261a44e149SAndrea Arcangeli */ 28271a44e149SAndrea Arcangeli if (write_access) 28281a44e149SAndrea Arcangeli flush_tlb_page(vma, address); 28291a44e149SAndrea Arcangeli } 28308f4e2101SHugh Dickins unlock: 28318f4e2101SHugh Dickins pte_unmap_unlock(pte, ptl); 283283c54070SNick Piggin return 0; 28331da177e4SLinus Torvalds } 28341da177e4SLinus Torvalds 28351da177e4SLinus Torvalds /* 28361da177e4SLinus Torvalds * By the time we get here, we already hold the mm semaphore 28371da177e4SLinus Torvalds */ 283883c54070SNick Piggin int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, 28391da177e4SLinus Torvalds unsigned long address, int write_access) 28401da177e4SLinus Torvalds { 28411da177e4SLinus Torvalds pgd_t *pgd; 28421da177e4SLinus Torvalds pud_t *pud; 28431da177e4SLinus Torvalds pmd_t *pmd; 28441da177e4SLinus Torvalds pte_t *pte; 28451da177e4SLinus Torvalds 28461da177e4SLinus Torvalds __set_current_state(TASK_RUNNING); 28471da177e4SLinus Torvalds 2848f8891e5eSChristoph Lameter count_vm_event(PGFAULT); 28491da177e4SLinus Torvalds 2850ac9b9c66SHugh Dickins if (unlikely(is_vm_hugetlb_page(vma))) 2851ac9b9c66SHugh Dickins return hugetlb_fault(mm, vma, address, write_access); 28521da177e4SLinus Torvalds 28531da177e4SLinus Torvalds pgd = pgd_offset(mm, address); 28541da177e4SLinus Torvalds pud = pud_alloc(mm, pgd, address); 28551da177e4SLinus Torvalds if (!pud) 2856c74df32cSHugh Dickins return VM_FAULT_OOM; 28571da177e4SLinus Torvalds pmd = pmd_alloc(mm, pud, address); 28581da177e4SLinus Torvalds if (!pmd) 2859c74df32cSHugh Dickins return VM_FAULT_OOM; 28601da177e4SLinus Torvalds pte = pte_alloc_map(mm, pmd, address); 28611da177e4SLinus Torvalds if (!pte) 2862c74df32cSHugh Dickins return VM_FAULT_OOM; 28631da177e4SLinus Torvalds 286465500d23SHugh Dickins return handle_pte_fault(mm, vma, address, pte, pmd, write_access); 28651da177e4SLinus Torvalds } 28661da177e4SLinus Torvalds 28671da177e4SLinus Torvalds #ifndef __PAGETABLE_PUD_FOLDED 28681da177e4SLinus Torvalds /* 28691da177e4SLinus Torvalds * Allocate page upper directory. 2870872fec16SHugh Dickins * We've already handled the fast-path in-line. 28711da177e4SLinus Torvalds */ 28721bb3630eSHugh Dickins int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) 28731da177e4SLinus Torvalds { 2874c74df32cSHugh Dickins pud_t *new = pud_alloc_one(mm, address); 2875c74df32cSHugh Dickins if (!new) 28761bb3630eSHugh Dickins return -ENOMEM; 28771da177e4SLinus Torvalds 2878362a61adSNick Piggin smp_wmb(); /* See comment in __pte_alloc */ 2879362a61adSNick Piggin 2880872fec16SHugh Dickins spin_lock(&mm->page_table_lock); 28811bb3630eSHugh Dickins if (pgd_present(*pgd)) /* Another has populated it */ 28825e541973SBenjamin Herrenschmidt pud_free(mm, new); 28831bb3630eSHugh Dickins else 28841da177e4SLinus Torvalds pgd_populate(mm, pgd, new); 2885872fec16SHugh Dickins spin_unlock(&mm->page_table_lock); 28861bb3630eSHugh Dickins return 0; 28871da177e4SLinus Torvalds } 28881da177e4SLinus Torvalds #endif /* __PAGETABLE_PUD_FOLDED */ 28891da177e4SLinus Torvalds 28901da177e4SLinus Torvalds #ifndef __PAGETABLE_PMD_FOLDED 28911da177e4SLinus Torvalds /* 28921da177e4SLinus Torvalds * Allocate page middle directory. 2893872fec16SHugh Dickins * We've already handled the fast-path in-line. 28941da177e4SLinus Torvalds */ 28951bb3630eSHugh Dickins int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) 28961da177e4SLinus Torvalds { 2897c74df32cSHugh Dickins pmd_t *new = pmd_alloc_one(mm, address); 2898c74df32cSHugh Dickins if (!new) 28991bb3630eSHugh Dickins return -ENOMEM; 29001da177e4SLinus Torvalds 2901362a61adSNick Piggin smp_wmb(); /* See comment in __pte_alloc */ 2902362a61adSNick Piggin 2903872fec16SHugh Dickins spin_lock(&mm->page_table_lock); 29041da177e4SLinus Torvalds #ifndef __ARCH_HAS_4LEVEL_HACK 29051bb3630eSHugh Dickins if (pud_present(*pud)) /* Another has populated it */ 29065e541973SBenjamin Herrenschmidt pmd_free(mm, new); 29071bb3630eSHugh Dickins else 29081da177e4SLinus Torvalds pud_populate(mm, pud, new); 29091da177e4SLinus Torvalds #else 29101bb3630eSHugh Dickins if (pgd_present(*pud)) /* Another has populated it */ 29115e541973SBenjamin Herrenschmidt pmd_free(mm, new); 29121bb3630eSHugh Dickins else 29131da177e4SLinus Torvalds pgd_populate(mm, pud, new); 29141da177e4SLinus Torvalds #endif /* __ARCH_HAS_4LEVEL_HACK */ 2915872fec16SHugh Dickins spin_unlock(&mm->page_table_lock); 29161bb3630eSHugh Dickins return 0; 29171da177e4SLinus Torvalds } 29181da177e4SLinus Torvalds #endif /* __PAGETABLE_PMD_FOLDED */ 29191da177e4SLinus Torvalds 29201da177e4SLinus Torvalds int make_pages_present(unsigned long addr, unsigned long end) 29211da177e4SLinus Torvalds { 29221da177e4SLinus Torvalds int ret, len, write; 29231da177e4SLinus Torvalds struct vm_area_struct * vma; 29241da177e4SLinus Torvalds 29251da177e4SLinus Torvalds vma = find_vma(current->mm, addr); 29261da177e4SLinus Torvalds if (!vma) 2927a477097dSKOSAKI Motohiro return -ENOMEM; 29281da177e4SLinus Torvalds write = (vma->vm_flags & VM_WRITE) != 0; 29295bcb28b1SEric Sesterhenn BUG_ON(addr >= end); 29305bcb28b1SEric Sesterhenn BUG_ON(end > vma->vm_end); 293168e116a3SRolf Eike Beer len = DIV_ROUND_UP(end, PAGE_SIZE) - addr/PAGE_SIZE; 29321da177e4SLinus Torvalds ret = get_user_pages(current, current->mm, addr, 29331da177e4SLinus Torvalds len, write, 0, NULL, NULL); 2934c11d69d8SLee Schermerhorn if (ret < 0) 29351da177e4SLinus Torvalds return ret; 29369978ad58SLee Schermerhorn return ret == len ? 0 : -EFAULT; 29371da177e4SLinus Torvalds } 29381da177e4SLinus Torvalds 29391da177e4SLinus Torvalds #if !defined(__HAVE_ARCH_GATE_AREA) 29401da177e4SLinus Torvalds 29411da177e4SLinus Torvalds #if defined(AT_SYSINFO_EHDR) 29425ce7852cSAdrian Bunk static struct vm_area_struct gate_vma; 29431da177e4SLinus Torvalds 29441da177e4SLinus Torvalds static int __init gate_vma_init(void) 29451da177e4SLinus Torvalds { 29461da177e4SLinus Torvalds gate_vma.vm_mm = NULL; 29471da177e4SLinus Torvalds gate_vma.vm_start = FIXADDR_USER_START; 29481da177e4SLinus Torvalds gate_vma.vm_end = FIXADDR_USER_END; 2949b6558c4aSRoland McGrath gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; 2950b6558c4aSRoland McGrath gate_vma.vm_page_prot = __P101; 2951f47aef55SRoland McGrath /* 2952f47aef55SRoland McGrath * Make sure the vDSO gets into every core dump. 2953f47aef55SRoland McGrath * Dumping its contents makes post-mortem fully interpretable later 2954f47aef55SRoland McGrath * without matching up the same kernel and hardware config to see 2955f47aef55SRoland McGrath * what PC values meant. 2956f47aef55SRoland McGrath */ 2957f47aef55SRoland McGrath gate_vma.vm_flags |= VM_ALWAYSDUMP; 29581da177e4SLinus Torvalds return 0; 29591da177e4SLinus Torvalds } 29601da177e4SLinus Torvalds __initcall(gate_vma_init); 29611da177e4SLinus Torvalds #endif 29621da177e4SLinus Torvalds 29631da177e4SLinus Torvalds struct vm_area_struct *get_gate_vma(struct task_struct *tsk) 29641da177e4SLinus Torvalds { 29651da177e4SLinus Torvalds #ifdef AT_SYSINFO_EHDR 29661da177e4SLinus Torvalds return &gate_vma; 29671da177e4SLinus Torvalds #else 29681da177e4SLinus Torvalds return NULL; 29691da177e4SLinus Torvalds #endif 29701da177e4SLinus Torvalds } 29711da177e4SLinus Torvalds 29721da177e4SLinus Torvalds int in_gate_area_no_task(unsigned long addr) 29731da177e4SLinus Torvalds { 29741da177e4SLinus Torvalds #ifdef AT_SYSINFO_EHDR 29751da177e4SLinus Torvalds if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END)) 29761da177e4SLinus Torvalds return 1; 29771da177e4SLinus Torvalds #endif 29781da177e4SLinus Torvalds return 0; 29791da177e4SLinus Torvalds } 29801da177e4SLinus Torvalds 29811da177e4SLinus Torvalds #endif /* __HAVE_ARCH_GATE_AREA */ 29820ec76a11SDavid Howells 298328b2ee20SRik van Riel #ifdef CONFIG_HAVE_IOREMAP_PROT 298428b2ee20SRik van Riel static resource_size_t follow_phys(struct vm_area_struct *vma, 298528b2ee20SRik van Riel unsigned long address, unsigned int flags, 298628b2ee20SRik van Riel unsigned long *prot) 298728b2ee20SRik van Riel { 298828b2ee20SRik van Riel pgd_t *pgd; 298928b2ee20SRik van Riel pud_t *pud; 299028b2ee20SRik van Riel pmd_t *pmd; 299128b2ee20SRik van Riel pte_t *ptep, pte; 299228b2ee20SRik van Riel spinlock_t *ptl; 299328b2ee20SRik van Riel resource_size_t phys_addr = 0; 299428b2ee20SRik van Riel struct mm_struct *mm = vma->vm_mm; 299528b2ee20SRik van Riel 299628b2ee20SRik van Riel VM_BUG_ON(!(vma->vm_flags & (VM_IO | VM_PFNMAP))); 299728b2ee20SRik van Riel 299828b2ee20SRik van Riel pgd = pgd_offset(mm, address); 299928b2ee20SRik van Riel if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) 300028b2ee20SRik van Riel goto no_page_table; 300128b2ee20SRik van Riel 300228b2ee20SRik van Riel pud = pud_offset(pgd, address); 300328b2ee20SRik van Riel if (pud_none(*pud) || unlikely(pud_bad(*pud))) 300428b2ee20SRik van Riel goto no_page_table; 300528b2ee20SRik van Riel 300628b2ee20SRik van Riel pmd = pmd_offset(pud, address); 300728b2ee20SRik van Riel if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) 300828b2ee20SRik van Riel goto no_page_table; 300928b2ee20SRik van Riel 301028b2ee20SRik van Riel /* We cannot handle huge page PFN maps. Luckily they don't exist. */ 301128b2ee20SRik van Riel if (pmd_huge(*pmd)) 301228b2ee20SRik van Riel goto no_page_table; 301328b2ee20SRik van Riel 301428b2ee20SRik van Riel ptep = pte_offset_map_lock(mm, pmd, address, &ptl); 301528b2ee20SRik van Riel if (!ptep) 301628b2ee20SRik van Riel goto out; 301728b2ee20SRik van Riel 301828b2ee20SRik van Riel pte = *ptep; 301928b2ee20SRik van Riel if (!pte_present(pte)) 302028b2ee20SRik van Riel goto unlock; 302128b2ee20SRik van Riel if ((flags & FOLL_WRITE) && !pte_write(pte)) 302228b2ee20SRik van Riel goto unlock; 302328b2ee20SRik van Riel phys_addr = pte_pfn(pte); 302428b2ee20SRik van Riel phys_addr <<= PAGE_SHIFT; /* Shift here to avoid overflow on PAE */ 302528b2ee20SRik van Riel 302628b2ee20SRik van Riel *prot = pgprot_val(pte_pgprot(pte)); 302728b2ee20SRik van Riel 302828b2ee20SRik van Riel unlock: 302928b2ee20SRik van Riel pte_unmap_unlock(ptep, ptl); 303028b2ee20SRik van Riel out: 303128b2ee20SRik van Riel return phys_addr; 303228b2ee20SRik van Riel no_page_table: 303328b2ee20SRik van Riel return 0; 303428b2ee20SRik van Riel } 303528b2ee20SRik van Riel 303628b2ee20SRik van Riel int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, 303728b2ee20SRik van Riel void *buf, int len, int write) 303828b2ee20SRik van Riel { 303928b2ee20SRik van Riel resource_size_t phys_addr; 304028b2ee20SRik van Riel unsigned long prot = 0; 304128b2ee20SRik van Riel void *maddr; 304228b2ee20SRik van Riel int offset = addr & (PAGE_SIZE-1); 304328b2ee20SRik van Riel 304428b2ee20SRik van Riel if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) 304528b2ee20SRik van Riel return -EINVAL; 304628b2ee20SRik van Riel 304728b2ee20SRik van Riel phys_addr = follow_phys(vma, addr, write, &prot); 304828b2ee20SRik van Riel 304928b2ee20SRik van Riel if (!phys_addr) 305028b2ee20SRik van Riel return -EINVAL; 305128b2ee20SRik van Riel 305228b2ee20SRik van Riel maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot); 305328b2ee20SRik van Riel if (write) 305428b2ee20SRik van Riel memcpy_toio(maddr + offset, buf, len); 305528b2ee20SRik van Riel else 305628b2ee20SRik van Riel memcpy_fromio(buf, maddr + offset, len); 305728b2ee20SRik van Riel iounmap(maddr); 305828b2ee20SRik van Riel 305928b2ee20SRik van Riel return len; 306028b2ee20SRik van Riel } 306128b2ee20SRik van Riel #endif 306228b2ee20SRik van Riel 30630ec76a11SDavid Howells /* 30640ec76a11SDavid Howells * Access another process' address space. 30650ec76a11SDavid Howells * Source/target buffer must be kernel space, 30660ec76a11SDavid Howells * Do not walk the page table directly, use get_user_pages 30670ec76a11SDavid Howells */ 30680ec76a11SDavid Howells int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) 30690ec76a11SDavid Howells { 30700ec76a11SDavid Howells struct mm_struct *mm; 30710ec76a11SDavid Howells struct vm_area_struct *vma; 30720ec76a11SDavid Howells void *old_buf = buf; 30730ec76a11SDavid Howells 30740ec76a11SDavid Howells mm = get_task_mm(tsk); 30750ec76a11SDavid Howells if (!mm) 30760ec76a11SDavid Howells return 0; 30770ec76a11SDavid Howells 30780ec76a11SDavid Howells down_read(&mm->mmap_sem); 3079183ff22bSSimon Arlott /* ignore errors, just check how much was successfully transferred */ 30800ec76a11SDavid Howells while (len) { 30810ec76a11SDavid Howells int bytes, ret, offset; 30820ec76a11SDavid Howells void *maddr; 308328b2ee20SRik van Riel struct page *page = NULL; 30840ec76a11SDavid Howells 30850ec76a11SDavid Howells ret = get_user_pages(tsk, mm, addr, 1, 30860ec76a11SDavid Howells write, 1, &page, &vma); 308728b2ee20SRik van Riel if (ret <= 0) { 308828b2ee20SRik van Riel /* 308928b2ee20SRik van Riel * Check if this is a VM_IO | VM_PFNMAP VMA, which 309028b2ee20SRik van Riel * we can access using slightly different code. 309128b2ee20SRik van Riel */ 309228b2ee20SRik van Riel #ifdef CONFIG_HAVE_IOREMAP_PROT 309328b2ee20SRik van Riel vma = find_vma(mm, addr); 309428b2ee20SRik van Riel if (!vma) 30950ec76a11SDavid Howells break; 309628b2ee20SRik van Riel if (vma->vm_ops && vma->vm_ops->access) 309728b2ee20SRik van Riel ret = vma->vm_ops->access(vma, addr, buf, 309828b2ee20SRik van Riel len, write); 309928b2ee20SRik van Riel if (ret <= 0) 310028b2ee20SRik van Riel #endif 310128b2ee20SRik van Riel break; 310228b2ee20SRik van Riel bytes = ret; 310328b2ee20SRik van Riel } else { 31040ec76a11SDavid Howells bytes = len; 31050ec76a11SDavid Howells offset = addr & (PAGE_SIZE-1); 31060ec76a11SDavid Howells if (bytes > PAGE_SIZE-offset) 31070ec76a11SDavid Howells bytes = PAGE_SIZE-offset; 31080ec76a11SDavid Howells 31090ec76a11SDavid Howells maddr = kmap(page); 31100ec76a11SDavid Howells if (write) { 31110ec76a11SDavid Howells copy_to_user_page(vma, page, addr, 31120ec76a11SDavid Howells maddr + offset, buf, bytes); 31130ec76a11SDavid Howells set_page_dirty_lock(page); 31140ec76a11SDavid Howells } else { 31150ec76a11SDavid Howells copy_from_user_page(vma, page, addr, 31160ec76a11SDavid Howells buf, maddr + offset, bytes); 31170ec76a11SDavid Howells } 31180ec76a11SDavid Howells kunmap(page); 31190ec76a11SDavid Howells page_cache_release(page); 312028b2ee20SRik van Riel } 31210ec76a11SDavid Howells len -= bytes; 31220ec76a11SDavid Howells buf += bytes; 31230ec76a11SDavid Howells addr += bytes; 31240ec76a11SDavid Howells } 31250ec76a11SDavid Howells up_read(&mm->mmap_sem); 31260ec76a11SDavid Howells mmput(mm); 31270ec76a11SDavid Howells 31280ec76a11SDavid Howells return buf - old_buf; 31290ec76a11SDavid Howells } 313003252919SAndi Kleen 313103252919SAndi Kleen /* 313203252919SAndi Kleen * Print the name of a VMA. 313303252919SAndi Kleen */ 313403252919SAndi Kleen void print_vma_addr(char *prefix, unsigned long ip) 313503252919SAndi Kleen { 313603252919SAndi Kleen struct mm_struct *mm = current->mm; 313703252919SAndi Kleen struct vm_area_struct *vma; 313803252919SAndi Kleen 3139e8bff74aSIngo Molnar /* 3140e8bff74aSIngo Molnar * Do not print if we are in atomic 3141e8bff74aSIngo Molnar * contexts (in exception stacks, etc.): 3142e8bff74aSIngo Molnar */ 3143e8bff74aSIngo Molnar if (preempt_count()) 3144e8bff74aSIngo Molnar return; 3145e8bff74aSIngo Molnar 314603252919SAndi Kleen down_read(&mm->mmap_sem); 314703252919SAndi Kleen vma = find_vma(mm, ip); 314803252919SAndi Kleen if (vma && vma->vm_file) { 314903252919SAndi Kleen struct file *f = vma->vm_file; 315003252919SAndi Kleen char *buf = (char *)__get_free_page(GFP_KERNEL); 315103252919SAndi Kleen if (buf) { 315203252919SAndi Kleen char *p, *s; 315303252919SAndi Kleen 3154cf28b486SJan Blunck p = d_path(&f->f_path, buf, PAGE_SIZE); 315503252919SAndi Kleen if (IS_ERR(p)) 315603252919SAndi Kleen p = "?"; 315703252919SAndi Kleen s = strrchr(p, '/'); 315803252919SAndi Kleen if (s) 315903252919SAndi Kleen p = s+1; 316003252919SAndi Kleen printk("%s%s[%lx+%lx]", prefix, p, 316103252919SAndi Kleen vma->vm_start, 316203252919SAndi Kleen vma->vm_end - vma->vm_start); 316303252919SAndi Kleen free_page((unsigned long)buf); 316403252919SAndi Kleen } 316503252919SAndi Kleen } 316603252919SAndi Kleen up_read(¤t->mm->mmap_sem); 316703252919SAndi Kleen } 3168