11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/mm/memory.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 51da177e4SLinus Torvalds */ 61da177e4SLinus Torvalds 71da177e4SLinus Torvalds /* 81da177e4SLinus Torvalds * demand-loading started 01.12.91 - seems it is high on the list of 91da177e4SLinus Torvalds * things wanted, and it should be easy to implement. - Linus 101da177e4SLinus Torvalds */ 111da177e4SLinus Torvalds 121da177e4SLinus Torvalds /* 131da177e4SLinus Torvalds * Ok, demand-loading was easy, shared pages a little bit tricker. Shared 141da177e4SLinus Torvalds * pages started 02.12.91, seems to work. - Linus. 151da177e4SLinus Torvalds * 161da177e4SLinus Torvalds * Tested sharing by executing about 30 /bin/sh: under the old kernel it 171da177e4SLinus Torvalds * would have taken more than the 6M I have free, but it worked well as 181da177e4SLinus Torvalds * far as I could see. 191da177e4SLinus Torvalds * 201da177e4SLinus Torvalds * Also corrected some "invalidate()"s - I wasn't doing enough of them. 211da177e4SLinus Torvalds */ 221da177e4SLinus Torvalds 231da177e4SLinus Torvalds /* 241da177e4SLinus Torvalds * Real VM (paging to/from disk) started 18.12.91. Much more work and 251da177e4SLinus Torvalds * thought has to go into this. Oh, well.. 261da177e4SLinus Torvalds * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why. 271da177e4SLinus Torvalds * Found it. Everything seems to work now. 281da177e4SLinus Torvalds * 20.12.91 - Ok, making the swap-device changeable like the root. 291da177e4SLinus Torvalds */ 301da177e4SLinus Torvalds 311da177e4SLinus Torvalds /* 321da177e4SLinus Torvalds * 05.04.94 - Multi-page memory management added for v1.1. 331da177e4SLinus Torvalds * Idea by Alex Bligh (alex@cconcepts.co.uk) 341da177e4SLinus Torvalds * 351da177e4SLinus Torvalds * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG 361da177e4SLinus Torvalds * (Gerhard.Wichert@pdb.siemens.de) 371da177e4SLinus Torvalds * 381da177e4SLinus Torvalds * Aug/Sep 2004 Changed to four level page tables (Andi Kleen) 391da177e4SLinus Torvalds */ 401da177e4SLinus Torvalds 411da177e4SLinus Torvalds #include <linux/kernel_stat.h> 421da177e4SLinus Torvalds #include <linux/mm.h> 431da177e4SLinus Torvalds #include <linux/hugetlb.h> 441da177e4SLinus Torvalds #include <linux/mman.h> 451da177e4SLinus Torvalds #include <linux/swap.h> 461da177e4SLinus Torvalds #include <linux/highmem.h> 471da177e4SLinus Torvalds #include <linux/pagemap.h> 481da177e4SLinus Torvalds #include <linux/rmap.h> 491da177e4SLinus Torvalds #include <linux/module.h> 500ff92245SShailabh Nagar #include <linux/delayacct.h> 511da177e4SLinus Torvalds #include <linux/init.h> 52edc79b2aSPeter Zijlstra #include <linux/writeback.h> 538a9f3ccdSBalbir Singh #include <linux/memcontrol.h> 54cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h> 551da177e4SLinus Torvalds 561da177e4SLinus Torvalds #include <asm/pgalloc.h> 571da177e4SLinus Torvalds #include <asm/uaccess.h> 581da177e4SLinus Torvalds #include <asm/tlb.h> 591da177e4SLinus Torvalds #include <asm/tlbflush.h> 601da177e4SLinus Torvalds #include <asm/pgtable.h> 611da177e4SLinus Torvalds 621da177e4SLinus Torvalds #include <linux/swapops.h> 631da177e4SLinus Torvalds #include <linux/elf.h> 641da177e4SLinus Torvalds 6542b77728SJan Beulich #include "internal.h" 6642b77728SJan Beulich 67d41dee36SAndy Whitcroft #ifndef CONFIG_NEED_MULTIPLE_NODES 681da177e4SLinus Torvalds /* use the per-pgdat data instead for discontigmem - mbligh */ 691da177e4SLinus Torvalds unsigned long max_mapnr; 701da177e4SLinus Torvalds struct page *mem_map; 711da177e4SLinus Torvalds 721da177e4SLinus Torvalds EXPORT_SYMBOL(max_mapnr); 731da177e4SLinus Torvalds EXPORT_SYMBOL(mem_map); 741da177e4SLinus Torvalds #endif 751da177e4SLinus Torvalds 761da177e4SLinus Torvalds unsigned long num_physpages; 771da177e4SLinus Torvalds /* 781da177e4SLinus Torvalds * A number of key systems in x86 including ioremap() rely on the assumption 791da177e4SLinus Torvalds * that high_memory defines the upper bound on direct map memory, then end 801da177e4SLinus Torvalds * of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and 811da177e4SLinus Torvalds * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL 821da177e4SLinus Torvalds * and ZONE_HIGHMEM. 831da177e4SLinus Torvalds */ 841da177e4SLinus Torvalds void * high_memory; 851da177e4SLinus Torvalds 861da177e4SLinus Torvalds EXPORT_SYMBOL(num_physpages); 871da177e4SLinus Torvalds EXPORT_SYMBOL(high_memory); 881da177e4SLinus Torvalds 8932a93233SIngo Molnar /* 9032a93233SIngo Molnar * Randomize the address space (stacks, mmaps, brk, etc.). 9132a93233SIngo Molnar * 9232a93233SIngo Molnar * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization, 9332a93233SIngo Molnar * as ancient (libc5 based) binaries can segfault. ) 9432a93233SIngo Molnar */ 9532a93233SIngo Molnar int randomize_va_space __read_mostly = 9632a93233SIngo Molnar #ifdef CONFIG_COMPAT_BRK 9732a93233SIngo Molnar 1; 9832a93233SIngo Molnar #else 9932a93233SIngo Molnar 2; 10032a93233SIngo Molnar #endif 101a62eaf15SAndi Kleen 102a62eaf15SAndi Kleen static int __init disable_randmaps(char *s) 103a62eaf15SAndi Kleen { 104a62eaf15SAndi Kleen randomize_va_space = 0; 1059b41046cSOGAWA Hirofumi return 1; 106a62eaf15SAndi Kleen } 107a62eaf15SAndi Kleen __setup("norandmaps", disable_randmaps); 108a62eaf15SAndi Kleen 109a62eaf15SAndi Kleen 1101da177e4SLinus Torvalds /* 1111da177e4SLinus Torvalds * If a p?d_bad entry is found while walking page tables, report 1121da177e4SLinus Torvalds * the error, before resetting entry to p?d_none. Usually (but 1131da177e4SLinus Torvalds * very seldom) called out from the p?d_none_or_clear_bad macros. 1141da177e4SLinus Torvalds */ 1151da177e4SLinus Torvalds 1161da177e4SLinus Torvalds void pgd_clear_bad(pgd_t *pgd) 1171da177e4SLinus Torvalds { 1181da177e4SLinus Torvalds pgd_ERROR(*pgd); 1191da177e4SLinus Torvalds pgd_clear(pgd); 1201da177e4SLinus Torvalds } 1211da177e4SLinus Torvalds 1221da177e4SLinus Torvalds void pud_clear_bad(pud_t *pud) 1231da177e4SLinus Torvalds { 1241da177e4SLinus Torvalds pud_ERROR(*pud); 1251da177e4SLinus Torvalds pud_clear(pud); 1261da177e4SLinus Torvalds } 1271da177e4SLinus Torvalds 1281da177e4SLinus Torvalds void pmd_clear_bad(pmd_t *pmd) 1291da177e4SLinus Torvalds { 1301da177e4SLinus Torvalds pmd_ERROR(*pmd); 1311da177e4SLinus Torvalds pmd_clear(pmd); 1321da177e4SLinus Torvalds } 1331da177e4SLinus Torvalds 1341da177e4SLinus Torvalds /* 1351da177e4SLinus Torvalds * Note: this doesn't free the actual pages themselves. That 1361da177e4SLinus Torvalds * has been handled earlier when unmapping all the memory regions. 1371da177e4SLinus Torvalds */ 138e0da382cSHugh Dickins static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd) 1391da177e4SLinus Torvalds { 1402f569afdSMartin Schwidefsky pgtable_t token = pmd_pgtable(*pmd); 1411da177e4SLinus Torvalds pmd_clear(pmd); 1422f569afdSMartin Schwidefsky pte_free_tlb(tlb, token); 1431da177e4SLinus Torvalds tlb->mm->nr_ptes--; 1441da177e4SLinus Torvalds } 1451da177e4SLinus Torvalds 146e0da382cSHugh Dickins static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, 147e0da382cSHugh Dickins unsigned long addr, unsigned long end, 148e0da382cSHugh Dickins unsigned long floor, unsigned long ceiling) 1491da177e4SLinus Torvalds { 1501da177e4SLinus Torvalds pmd_t *pmd; 1511da177e4SLinus Torvalds unsigned long next; 152e0da382cSHugh Dickins unsigned long start; 1531da177e4SLinus Torvalds 154e0da382cSHugh Dickins start = addr; 1551da177e4SLinus Torvalds pmd = pmd_offset(pud, addr); 1561da177e4SLinus Torvalds do { 1571da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 1581da177e4SLinus Torvalds if (pmd_none_or_clear_bad(pmd)) 1591da177e4SLinus Torvalds continue; 160e0da382cSHugh Dickins free_pte_range(tlb, pmd); 1611da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 1621da177e4SLinus Torvalds 163e0da382cSHugh Dickins start &= PUD_MASK; 164e0da382cSHugh Dickins if (start < floor) 165e0da382cSHugh Dickins return; 166e0da382cSHugh Dickins if (ceiling) { 167e0da382cSHugh Dickins ceiling &= PUD_MASK; 168e0da382cSHugh Dickins if (!ceiling) 169e0da382cSHugh Dickins return; 1701da177e4SLinus Torvalds } 171e0da382cSHugh Dickins if (end - 1 > ceiling - 1) 172e0da382cSHugh Dickins return; 173e0da382cSHugh Dickins 174e0da382cSHugh Dickins pmd = pmd_offset(pud, start); 175e0da382cSHugh Dickins pud_clear(pud); 176e0da382cSHugh Dickins pmd_free_tlb(tlb, pmd); 1771da177e4SLinus Torvalds } 1781da177e4SLinus Torvalds 179e0da382cSHugh Dickins static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, 180e0da382cSHugh Dickins unsigned long addr, unsigned long end, 181e0da382cSHugh Dickins unsigned long floor, unsigned long ceiling) 1821da177e4SLinus Torvalds { 1831da177e4SLinus Torvalds pud_t *pud; 1841da177e4SLinus Torvalds unsigned long next; 185e0da382cSHugh Dickins unsigned long start; 1861da177e4SLinus Torvalds 187e0da382cSHugh Dickins start = addr; 1881da177e4SLinus Torvalds pud = pud_offset(pgd, addr); 1891da177e4SLinus Torvalds do { 1901da177e4SLinus Torvalds next = pud_addr_end(addr, end); 1911da177e4SLinus Torvalds if (pud_none_or_clear_bad(pud)) 1921da177e4SLinus Torvalds continue; 193e0da382cSHugh Dickins free_pmd_range(tlb, pud, addr, next, floor, ceiling); 1941da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 1951da177e4SLinus Torvalds 196e0da382cSHugh Dickins start &= PGDIR_MASK; 197e0da382cSHugh Dickins if (start < floor) 198e0da382cSHugh Dickins return; 199e0da382cSHugh Dickins if (ceiling) { 200e0da382cSHugh Dickins ceiling &= PGDIR_MASK; 201e0da382cSHugh Dickins if (!ceiling) 202e0da382cSHugh Dickins return; 2031da177e4SLinus Torvalds } 204e0da382cSHugh Dickins if (end - 1 > ceiling - 1) 205e0da382cSHugh Dickins return; 206e0da382cSHugh Dickins 207e0da382cSHugh Dickins pud = pud_offset(pgd, start); 208e0da382cSHugh Dickins pgd_clear(pgd); 209e0da382cSHugh Dickins pud_free_tlb(tlb, pud); 2101da177e4SLinus Torvalds } 2111da177e4SLinus Torvalds 2121da177e4SLinus Torvalds /* 213e0da382cSHugh Dickins * This function frees user-level page tables of a process. 214e0da382cSHugh Dickins * 2151da177e4SLinus Torvalds * Must be called with pagetable lock held. 2161da177e4SLinus Torvalds */ 21742b77728SJan Beulich void free_pgd_range(struct mmu_gather *tlb, 218e0da382cSHugh Dickins unsigned long addr, unsigned long end, 219e0da382cSHugh Dickins unsigned long floor, unsigned long ceiling) 2201da177e4SLinus Torvalds { 2211da177e4SLinus Torvalds pgd_t *pgd; 2221da177e4SLinus Torvalds unsigned long next; 223e0da382cSHugh Dickins unsigned long start; 2241da177e4SLinus Torvalds 225e0da382cSHugh Dickins /* 226e0da382cSHugh Dickins * The next few lines have given us lots of grief... 227e0da382cSHugh Dickins * 228e0da382cSHugh Dickins * Why are we testing PMD* at this top level? Because often 229e0da382cSHugh Dickins * there will be no work to do at all, and we'd prefer not to 230e0da382cSHugh Dickins * go all the way down to the bottom just to discover that. 231e0da382cSHugh Dickins * 232e0da382cSHugh Dickins * Why all these "- 1"s? Because 0 represents both the bottom 233e0da382cSHugh Dickins * of the address space and the top of it (using -1 for the 234e0da382cSHugh Dickins * top wouldn't help much: the masks would do the wrong thing). 235e0da382cSHugh Dickins * The rule is that addr 0 and floor 0 refer to the bottom of 236e0da382cSHugh Dickins * the address space, but end 0 and ceiling 0 refer to the top 237e0da382cSHugh Dickins * Comparisons need to use "end - 1" and "ceiling - 1" (though 238e0da382cSHugh Dickins * that end 0 case should be mythical). 239e0da382cSHugh Dickins * 240e0da382cSHugh Dickins * Wherever addr is brought up or ceiling brought down, we must 241e0da382cSHugh Dickins * be careful to reject "the opposite 0" before it confuses the 242e0da382cSHugh Dickins * subsequent tests. But what about where end is brought down 243e0da382cSHugh Dickins * by PMD_SIZE below? no, end can't go down to 0 there. 244e0da382cSHugh Dickins * 245e0da382cSHugh Dickins * Whereas we round start (addr) and ceiling down, by different 246e0da382cSHugh Dickins * masks at different levels, in order to test whether a table 247e0da382cSHugh Dickins * now has no other vmas using it, so can be freed, we don't 248e0da382cSHugh Dickins * bother to round floor or end up - the tests don't need that. 249e0da382cSHugh Dickins */ 250e0da382cSHugh Dickins 251e0da382cSHugh Dickins addr &= PMD_MASK; 252e0da382cSHugh Dickins if (addr < floor) { 253e0da382cSHugh Dickins addr += PMD_SIZE; 254e0da382cSHugh Dickins if (!addr) 255e0da382cSHugh Dickins return; 256e0da382cSHugh Dickins } 257e0da382cSHugh Dickins if (ceiling) { 258e0da382cSHugh Dickins ceiling &= PMD_MASK; 259e0da382cSHugh Dickins if (!ceiling) 260e0da382cSHugh Dickins return; 261e0da382cSHugh Dickins } 262e0da382cSHugh Dickins if (end - 1 > ceiling - 1) 263e0da382cSHugh Dickins end -= PMD_SIZE; 264e0da382cSHugh Dickins if (addr > end - 1) 265e0da382cSHugh Dickins return; 266e0da382cSHugh Dickins 267e0da382cSHugh Dickins start = addr; 26842b77728SJan Beulich pgd = pgd_offset(tlb->mm, addr); 2691da177e4SLinus Torvalds do { 2701da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 2711da177e4SLinus Torvalds if (pgd_none_or_clear_bad(pgd)) 2721da177e4SLinus Torvalds continue; 27342b77728SJan Beulich free_pud_range(tlb, pgd, addr, next, floor, ceiling); 2741da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 275e0da382cSHugh Dickins } 276e0da382cSHugh Dickins 27742b77728SJan Beulich void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, 278e0da382cSHugh Dickins unsigned long floor, unsigned long ceiling) 279e0da382cSHugh Dickins { 280e0da382cSHugh Dickins while (vma) { 281e0da382cSHugh Dickins struct vm_area_struct *next = vma->vm_next; 282e0da382cSHugh Dickins unsigned long addr = vma->vm_start; 283e0da382cSHugh Dickins 2848f4f8c16SHugh Dickins /* 2858f4f8c16SHugh Dickins * Hide vma from rmap and vmtruncate before freeing pgtables 2868f4f8c16SHugh Dickins */ 2878f4f8c16SHugh Dickins anon_vma_unlink(vma); 2888f4f8c16SHugh Dickins unlink_file_vma(vma); 2898f4f8c16SHugh Dickins 2909da61aefSDavid Gibson if (is_vm_hugetlb_page(vma)) { 2913bf5ee95SHugh Dickins hugetlb_free_pgd_range(tlb, addr, vma->vm_end, 2923bf5ee95SHugh Dickins floor, next? next->vm_start: ceiling); 2933bf5ee95SHugh Dickins } else { 2943bf5ee95SHugh Dickins /* 2953bf5ee95SHugh Dickins * Optimization: gather nearby vmas into one call down 2963bf5ee95SHugh Dickins */ 2973bf5ee95SHugh Dickins while (next && next->vm_start <= vma->vm_end + PMD_SIZE 2984866920bSDavid Gibson && !is_vm_hugetlb_page(next)) { 299e0da382cSHugh Dickins vma = next; 300e0da382cSHugh Dickins next = vma->vm_next; 3018f4f8c16SHugh Dickins anon_vma_unlink(vma); 3028f4f8c16SHugh Dickins unlink_file_vma(vma); 303e0da382cSHugh Dickins } 3043bf5ee95SHugh Dickins free_pgd_range(tlb, addr, vma->vm_end, 305e0da382cSHugh Dickins floor, next? next->vm_start: ceiling); 3063bf5ee95SHugh Dickins } 307e0da382cSHugh Dickins vma = next; 308e0da382cSHugh Dickins } 3091da177e4SLinus Torvalds } 3101da177e4SLinus Torvalds 3111bb3630eSHugh Dickins int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address) 3121da177e4SLinus Torvalds { 3132f569afdSMartin Schwidefsky pgtable_t new = pte_alloc_one(mm, address); 3141da177e4SLinus Torvalds if (!new) 3151bb3630eSHugh Dickins return -ENOMEM; 3161bb3630eSHugh Dickins 317362a61adSNick Piggin /* 318362a61adSNick Piggin * Ensure all pte setup (eg. pte page lock and page clearing) are 319362a61adSNick Piggin * visible before the pte is made visible to other CPUs by being 320362a61adSNick Piggin * put into page tables. 321362a61adSNick Piggin * 322362a61adSNick Piggin * The other side of the story is the pointer chasing in the page 323362a61adSNick Piggin * table walking code (when walking the page table without locking; 324362a61adSNick Piggin * ie. most of the time). Fortunately, these data accesses consist 325362a61adSNick Piggin * of a chain of data-dependent loads, meaning most CPUs (alpha 326362a61adSNick Piggin * being the notable exception) will already guarantee loads are 327362a61adSNick Piggin * seen in-order. See the alpha page table accessors for the 328362a61adSNick Piggin * smp_read_barrier_depends() barriers in page table walking code. 329362a61adSNick Piggin */ 330362a61adSNick Piggin smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */ 331362a61adSNick Piggin 332c74df32cSHugh Dickins spin_lock(&mm->page_table_lock); 3332f569afdSMartin Schwidefsky if (!pmd_present(*pmd)) { /* Has another populated it ? */ 3341da177e4SLinus Torvalds mm->nr_ptes++; 3351da177e4SLinus Torvalds pmd_populate(mm, pmd, new); 3362f569afdSMartin Schwidefsky new = NULL; 3371da177e4SLinus Torvalds } 338c74df32cSHugh Dickins spin_unlock(&mm->page_table_lock); 3392f569afdSMartin Schwidefsky if (new) 3402f569afdSMartin Schwidefsky pte_free(mm, new); 3411bb3630eSHugh Dickins return 0; 3421da177e4SLinus Torvalds } 3431da177e4SLinus Torvalds 3441bb3630eSHugh Dickins int __pte_alloc_kernel(pmd_t *pmd, unsigned long address) 3451da177e4SLinus Torvalds { 3461bb3630eSHugh Dickins pte_t *new = pte_alloc_one_kernel(&init_mm, address); 3471da177e4SLinus Torvalds if (!new) 3481bb3630eSHugh Dickins return -ENOMEM; 3491da177e4SLinus Torvalds 350362a61adSNick Piggin smp_wmb(); /* See comment in __pte_alloc */ 351362a61adSNick Piggin 352872fec16SHugh Dickins spin_lock(&init_mm.page_table_lock); 3532f569afdSMartin Schwidefsky if (!pmd_present(*pmd)) { /* Has another populated it ? */ 354872fec16SHugh Dickins pmd_populate_kernel(&init_mm, pmd, new); 3552f569afdSMartin Schwidefsky new = NULL; 3562f569afdSMartin Schwidefsky } 357872fec16SHugh Dickins spin_unlock(&init_mm.page_table_lock); 3582f569afdSMartin Schwidefsky if (new) 3592f569afdSMartin Schwidefsky pte_free_kernel(&init_mm, new); 3601bb3630eSHugh Dickins return 0; 3611da177e4SLinus Torvalds } 3621da177e4SLinus Torvalds 363ae859762SHugh Dickins static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss) 364ae859762SHugh Dickins { 365ae859762SHugh Dickins if (file_rss) 366ae859762SHugh Dickins add_mm_counter(mm, file_rss, file_rss); 367ae859762SHugh Dickins if (anon_rss) 368ae859762SHugh Dickins add_mm_counter(mm, anon_rss, anon_rss); 369ae859762SHugh Dickins } 370ae859762SHugh Dickins 3711da177e4SLinus Torvalds /* 3726aab341eSLinus Torvalds * This function is called to print an error when a bad pte 3736aab341eSLinus Torvalds * is found. For example, we might have a PFN-mapped pte in 3746aab341eSLinus Torvalds * a region that doesn't allow it. 375b5810039SNick Piggin * 376b5810039SNick Piggin * The calling function must still handle the error. 377b5810039SNick Piggin */ 37815f59adaSAdrian Bunk static void print_bad_pte(struct vm_area_struct *vma, pte_t pte, 37915f59adaSAdrian Bunk unsigned long vaddr) 380b5810039SNick Piggin { 381b5810039SNick Piggin printk(KERN_ERR "Bad pte = %08llx, process = %s, " 382b5810039SNick Piggin "vm_flags = %lx, vaddr = %lx\n", 383b5810039SNick Piggin (long long)pte_val(pte), 384b5810039SNick Piggin (vma->vm_mm == current->mm ? current->comm : "???"), 385b5810039SNick Piggin vma->vm_flags, vaddr); 386b5810039SNick Piggin dump_stack(); 387b5810039SNick Piggin } 388b5810039SNick Piggin 38967121172SLinus Torvalds static inline int is_cow_mapping(unsigned int flags) 39067121172SLinus Torvalds { 39167121172SLinus Torvalds return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; 39267121172SLinus Torvalds } 39367121172SLinus Torvalds 394b5810039SNick Piggin /* 3957e675137SNick Piggin * vm_normal_page -- This function gets the "struct page" associated with a pte. 3966aab341eSLinus Torvalds * 3977e675137SNick Piggin * "Special" mappings do not wish to be associated with a "struct page" (either 3987e675137SNick Piggin * it doesn't exist, or it exists but they don't want to touch it). In this 3997e675137SNick Piggin * case, NULL is returned here. "Normal" mappings do have a struct page. 400b379d790SJared Hulbert * 4017e675137SNick Piggin * There are 2 broad cases. Firstly, an architecture may define a pte_special() 4027e675137SNick Piggin * pte bit, in which case this function is trivial. Secondly, an architecture 4037e675137SNick Piggin * may not have a spare pte bit, which requires a more complicated scheme, 4047e675137SNick Piggin * described below. 4057e675137SNick Piggin * 4067e675137SNick Piggin * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a 4077e675137SNick Piggin * special mapping (even if there are underlying and valid "struct pages"). 4087e675137SNick Piggin * COWed pages of a VM_PFNMAP are always normal. 4096aab341eSLinus Torvalds * 410b379d790SJared Hulbert * The way we recognize COWed pages within VM_PFNMAP mappings is through the 411b379d790SJared Hulbert * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit 4127e675137SNick Piggin * set, and the vm_pgoff will point to the first PFN mapped: thus every special 4137e675137SNick Piggin * mapping will always honor the rule 4146aab341eSLinus Torvalds * 4156aab341eSLinus Torvalds * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT) 4166aab341eSLinus Torvalds * 4177e675137SNick Piggin * And for normal mappings this is false. 418b379d790SJared Hulbert * 4197e675137SNick Piggin * This restricts such mappings to be a linear translation from virtual address 4207e675137SNick Piggin * to pfn. To get around this restriction, we allow arbitrary mappings so long 4217e675137SNick Piggin * as the vma is not a COW mapping; in that case, we know that all ptes are 4227e675137SNick Piggin * special (because none can have been COWed). 423b379d790SJared Hulbert * 424b379d790SJared Hulbert * 4257e675137SNick Piggin * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP. 4267e675137SNick Piggin * 427b379d790SJared Hulbert * VM_MIXEDMAP mappings can likewise contain memory with or without "struct 428b379d790SJared Hulbert * page" backing, however the difference is that _all_ pages with a struct 429b379d790SJared Hulbert * page (that is, those where pfn_valid is true) are refcounted and considered 430b379d790SJared Hulbert * normal pages by the VM. The disadvantage is that pages are refcounted 431b379d790SJared Hulbert * (which can be slower and simply not an option for some PFNMAP users). The 432b379d790SJared Hulbert * advantage is that we don't have to follow the strict linearity rule of 433b379d790SJared Hulbert * PFNMAP mappings in order to support COWable mappings. 434b379d790SJared Hulbert * 435ee498ed7SHugh Dickins */ 4367e675137SNick Piggin #ifdef __HAVE_ARCH_PTE_SPECIAL 4377e675137SNick Piggin # define HAVE_PTE_SPECIAL 1 4387e675137SNick Piggin #else 4397e675137SNick Piggin # define HAVE_PTE_SPECIAL 0 4407e675137SNick Piggin #endif 4417e675137SNick Piggin struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, 4427e675137SNick Piggin pte_t pte) 443ee498ed7SHugh Dickins { 4447e675137SNick Piggin unsigned long pfn; 4457e675137SNick Piggin 4467e675137SNick Piggin if (HAVE_PTE_SPECIAL) { 4477e675137SNick Piggin if (likely(!pte_special(pte))) { 4487e675137SNick Piggin VM_BUG_ON(!pfn_valid(pte_pfn(pte))); 4497e675137SNick Piggin return pte_page(pte); 4507e675137SNick Piggin } 4517e675137SNick Piggin VM_BUG_ON(!(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))); 4527e675137SNick Piggin return NULL; 4537e675137SNick Piggin } 4547e675137SNick Piggin 4557e675137SNick Piggin /* !HAVE_PTE_SPECIAL case follows: */ 4567e675137SNick Piggin 4577e675137SNick Piggin pfn = pte_pfn(pte); 4586aab341eSLinus Torvalds 459b379d790SJared Hulbert if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { 460b379d790SJared Hulbert if (vma->vm_flags & VM_MIXEDMAP) { 461b379d790SJared Hulbert if (!pfn_valid(pfn)) 462b379d790SJared Hulbert return NULL; 463b379d790SJared Hulbert goto out; 464b379d790SJared Hulbert } else { 4657e675137SNick Piggin unsigned long off; 4667e675137SNick Piggin off = (addr - vma->vm_start) >> PAGE_SHIFT; 4676aab341eSLinus Torvalds if (pfn == vma->vm_pgoff + off) 4686aab341eSLinus Torvalds return NULL; 46967121172SLinus Torvalds if (!is_cow_mapping(vma->vm_flags)) 470fb155c16SLinus Torvalds return NULL; 4716aab341eSLinus Torvalds } 472b379d790SJared Hulbert } 4736aab341eSLinus Torvalds 4747e675137SNick Piggin VM_BUG_ON(!pfn_valid(pfn)); 4756aab341eSLinus Torvalds 4766aab341eSLinus Torvalds /* 4777e675137SNick Piggin * NOTE! We still have PageReserved() pages in the page tables. 4786aab341eSLinus Torvalds * 4797e675137SNick Piggin * eg. VDSO mappings can cause them to exist. 4806aab341eSLinus Torvalds */ 481b379d790SJared Hulbert out: 4826aab341eSLinus Torvalds return pfn_to_page(pfn); 483ee498ed7SHugh Dickins } 484ee498ed7SHugh Dickins 485ee498ed7SHugh Dickins /* 4861da177e4SLinus Torvalds * copy one vm_area from one task to the other. Assumes the page tables 4871da177e4SLinus Torvalds * already present in the new task to be cleared in the whole range 4881da177e4SLinus Torvalds * covered by this vma. 4891da177e4SLinus Torvalds */ 4901da177e4SLinus Torvalds 4918c103762SHugh Dickins static inline void 4921da177e4SLinus Torvalds copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, 493b5810039SNick Piggin pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma, 4948c103762SHugh Dickins unsigned long addr, int *rss) 4951da177e4SLinus Torvalds { 496b5810039SNick Piggin unsigned long vm_flags = vma->vm_flags; 4971da177e4SLinus Torvalds pte_t pte = *src_pte; 4981da177e4SLinus Torvalds struct page *page; 4991da177e4SLinus Torvalds 5001da177e4SLinus Torvalds /* pte contains position in swap or file, so copy. */ 5011da177e4SLinus Torvalds if (unlikely(!pte_present(pte))) { 5021da177e4SLinus Torvalds if (!pte_file(pte)) { 5030697212aSChristoph Lameter swp_entry_t entry = pte_to_swp_entry(pte); 5040697212aSChristoph Lameter 5050697212aSChristoph Lameter swap_duplicate(entry); 5061da177e4SLinus Torvalds /* make sure dst_mm is on swapoff's mmlist. */ 5071da177e4SLinus Torvalds if (unlikely(list_empty(&dst_mm->mmlist))) { 5081da177e4SLinus Torvalds spin_lock(&mmlist_lock); 509f412ac08SHugh Dickins if (list_empty(&dst_mm->mmlist)) 510f412ac08SHugh Dickins list_add(&dst_mm->mmlist, 511f412ac08SHugh Dickins &src_mm->mmlist); 5121da177e4SLinus Torvalds spin_unlock(&mmlist_lock); 5131da177e4SLinus Torvalds } 5140697212aSChristoph Lameter if (is_write_migration_entry(entry) && 5150697212aSChristoph Lameter is_cow_mapping(vm_flags)) { 5160697212aSChristoph Lameter /* 5170697212aSChristoph Lameter * COW mappings require pages in both parent 5180697212aSChristoph Lameter * and child to be set to read. 5190697212aSChristoph Lameter */ 5200697212aSChristoph Lameter make_migration_entry_read(&entry); 5210697212aSChristoph Lameter pte = swp_entry_to_pte(entry); 5220697212aSChristoph Lameter set_pte_at(src_mm, addr, src_pte, pte); 5230697212aSChristoph Lameter } 5241da177e4SLinus Torvalds } 525ae859762SHugh Dickins goto out_set_pte; 5261da177e4SLinus Torvalds } 5271da177e4SLinus Torvalds 5281da177e4SLinus Torvalds /* 5291da177e4SLinus Torvalds * If it's a COW mapping, write protect it both 5301da177e4SLinus Torvalds * in the parent and the child 5311da177e4SLinus Torvalds */ 53267121172SLinus Torvalds if (is_cow_mapping(vm_flags)) { 5331da177e4SLinus Torvalds ptep_set_wrprotect(src_mm, addr, src_pte); 5343dc90795SZachary Amsden pte = pte_wrprotect(pte); 5351da177e4SLinus Torvalds } 5361da177e4SLinus Torvalds 5371da177e4SLinus Torvalds /* 5381da177e4SLinus Torvalds * If it's a shared mapping, mark it clean in 5391da177e4SLinus Torvalds * the child 5401da177e4SLinus Torvalds */ 5411da177e4SLinus Torvalds if (vm_flags & VM_SHARED) 5421da177e4SLinus Torvalds pte = pte_mkclean(pte); 5431da177e4SLinus Torvalds pte = pte_mkold(pte); 5446aab341eSLinus Torvalds 5456aab341eSLinus Torvalds page = vm_normal_page(vma, addr, pte); 5466aab341eSLinus Torvalds if (page) { 5471da177e4SLinus Torvalds get_page(page); 548c97a9e10SNick Piggin page_dup_rmap(page, vma, addr); 5498c103762SHugh Dickins rss[!!PageAnon(page)]++; 5506aab341eSLinus Torvalds } 551ae859762SHugh Dickins 552ae859762SHugh Dickins out_set_pte: 553ae859762SHugh Dickins set_pte_at(dst_mm, addr, dst_pte, pte); 5541da177e4SLinus Torvalds } 5551da177e4SLinus Torvalds 5561da177e4SLinus Torvalds static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 5571da177e4SLinus Torvalds pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma, 5581da177e4SLinus Torvalds unsigned long addr, unsigned long end) 5591da177e4SLinus Torvalds { 5601da177e4SLinus Torvalds pte_t *src_pte, *dst_pte; 561c74df32cSHugh Dickins spinlock_t *src_ptl, *dst_ptl; 562e040f218SHugh Dickins int progress = 0; 5638c103762SHugh Dickins int rss[2]; 5641da177e4SLinus Torvalds 5651da177e4SLinus Torvalds again: 566ae859762SHugh Dickins rss[1] = rss[0] = 0; 567c74df32cSHugh Dickins dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl); 5681da177e4SLinus Torvalds if (!dst_pte) 5691da177e4SLinus Torvalds return -ENOMEM; 5701da177e4SLinus Torvalds src_pte = pte_offset_map_nested(src_pmd, addr); 5714c21e2f2SHugh Dickins src_ptl = pte_lockptr(src_mm, src_pmd); 572f20dc5f7SIngo Molnar spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 5736606c3e0SZachary Amsden arch_enter_lazy_mmu_mode(); 5741da177e4SLinus Torvalds 5751da177e4SLinus Torvalds do { 5761da177e4SLinus Torvalds /* 5771da177e4SLinus Torvalds * We are holding two locks at this point - either of them 5781da177e4SLinus Torvalds * could generate latencies in another task on another CPU. 5791da177e4SLinus Torvalds */ 580e040f218SHugh Dickins if (progress >= 32) { 581e040f218SHugh Dickins progress = 0; 582e040f218SHugh Dickins if (need_resched() || 58395c354feSNick Piggin spin_needbreak(src_ptl) || spin_needbreak(dst_ptl)) 5841da177e4SLinus Torvalds break; 585e040f218SHugh Dickins } 5861da177e4SLinus Torvalds if (pte_none(*src_pte)) { 5871da177e4SLinus Torvalds progress++; 5881da177e4SLinus Torvalds continue; 5891da177e4SLinus Torvalds } 5908c103762SHugh Dickins copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vma, addr, rss); 5911da177e4SLinus Torvalds progress += 8; 5921da177e4SLinus Torvalds } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end); 5931da177e4SLinus Torvalds 5946606c3e0SZachary Amsden arch_leave_lazy_mmu_mode(); 595c74df32cSHugh Dickins spin_unlock(src_ptl); 5961da177e4SLinus Torvalds pte_unmap_nested(src_pte - 1); 597ae859762SHugh Dickins add_mm_rss(dst_mm, rss[0], rss[1]); 598c74df32cSHugh Dickins pte_unmap_unlock(dst_pte - 1, dst_ptl); 599c74df32cSHugh Dickins cond_resched(); 6001da177e4SLinus Torvalds if (addr != end) 6011da177e4SLinus Torvalds goto again; 6021da177e4SLinus Torvalds return 0; 6031da177e4SLinus Torvalds } 6041da177e4SLinus Torvalds 6051da177e4SLinus Torvalds static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 6061da177e4SLinus Torvalds pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma, 6071da177e4SLinus Torvalds unsigned long addr, unsigned long end) 6081da177e4SLinus Torvalds { 6091da177e4SLinus Torvalds pmd_t *src_pmd, *dst_pmd; 6101da177e4SLinus Torvalds unsigned long next; 6111da177e4SLinus Torvalds 6121da177e4SLinus Torvalds dst_pmd = pmd_alloc(dst_mm, dst_pud, addr); 6131da177e4SLinus Torvalds if (!dst_pmd) 6141da177e4SLinus Torvalds return -ENOMEM; 6151da177e4SLinus Torvalds src_pmd = pmd_offset(src_pud, addr); 6161da177e4SLinus Torvalds do { 6171da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 6181da177e4SLinus Torvalds if (pmd_none_or_clear_bad(src_pmd)) 6191da177e4SLinus Torvalds continue; 6201da177e4SLinus Torvalds if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd, 6211da177e4SLinus Torvalds vma, addr, next)) 6221da177e4SLinus Torvalds return -ENOMEM; 6231da177e4SLinus Torvalds } while (dst_pmd++, src_pmd++, addr = next, addr != end); 6241da177e4SLinus Torvalds return 0; 6251da177e4SLinus Torvalds } 6261da177e4SLinus Torvalds 6271da177e4SLinus Torvalds static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 6281da177e4SLinus Torvalds pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma, 6291da177e4SLinus Torvalds unsigned long addr, unsigned long end) 6301da177e4SLinus Torvalds { 6311da177e4SLinus Torvalds pud_t *src_pud, *dst_pud; 6321da177e4SLinus Torvalds unsigned long next; 6331da177e4SLinus Torvalds 6341da177e4SLinus Torvalds dst_pud = pud_alloc(dst_mm, dst_pgd, addr); 6351da177e4SLinus Torvalds if (!dst_pud) 6361da177e4SLinus Torvalds return -ENOMEM; 6371da177e4SLinus Torvalds src_pud = pud_offset(src_pgd, addr); 6381da177e4SLinus Torvalds do { 6391da177e4SLinus Torvalds next = pud_addr_end(addr, end); 6401da177e4SLinus Torvalds if (pud_none_or_clear_bad(src_pud)) 6411da177e4SLinus Torvalds continue; 6421da177e4SLinus Torvalds if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud, 6431da177e4SLinus Torvalds vma, addr, next)) 6441da177e4SLinus Torvalds return -ENOMEM; 6451da177e4SLinus Torvalds } while (dst_pud++, src_pud++, addr = next, addr != end); 6461da177e4SLinus Torvalds return 0; 6471da177e4SLinus Torvalds } 6481da177e4SLinus Torvalds 6491da177e4SLinus Torvalds int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 6501da177e4SLinus Torvalds struct vm_area_struct *vma) 6511da177e4SLinus Torvalds { 6521da177e4SLinus Torvalds pgd_t *src_pgd, *dst_pgd; 6531da177e4SLinus Torvalds unsigned long next; 6541da177e4SLinus Torvalds unsigned long addr = vma->vm_start; 6551da177e4SLinus Torvalds unsigned long end = vma->vm_end; 656cddb8a5cSAndrea Arcangeli int ret; 6571da177e4SLinus Torvalds 658d992895bSNick Piggin /* 659d992895bSNick Piggin * Don't copy ptes where a page fault will fill them correctly. 660d992895bSNick Piggin * Fork becomes much lighter when there are big shared or private 661d992895bSNick Piggin * readonly mappings. The tradeoff is that copy_page_range is more 662d992895bSNick Piggin * efficient than faulting. 663d992895bSNick Piggin */ 6644d7672b4SLinus Torvalds if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) { 665d992895bSNick Piggin if (!vma->anon_vma) 666d992895bSNick Piggin return 0; 667d992895bSNick Piggin } 668d992895bSNick Piggin 6691da177e4SLinus Torvalds if (is_vm_hugetlb_page(vma)) 6701da177e4SLinus Torvalds return copy_hugetlb_page_range(dst_mm, src_mm, vma); 6711da177e4SLinus Torvalds 67234801ba9Svenkatesh.pallipadi@intel.com if (unlikely(is_pfn_mapping(vma))) { 6732ab64037Svenkatesh.pallipadi@intel.com /* 6742ab64037Svenkatesh.pallipadi@intel.com * We do not free on error cases below as remove_vma 6752ab64037Svenkatesh.pallipadi@intel.com * gets called on error from higher level routine 6762ab64037Svenkatesh.pallipadi@intel.com */ 6772ab64037Svenkatesh.pallipadi@intel.com ret = track_pfn_vma_copy(vma); 6782ab64037Svenkatesh.pallipadi@intel.com if (ret) 6792ab64037Svenkatesh.pallipadi@intel.com return ret; 6802ab64037Svenkatesh.pallipadi@intel.com } 6812ab64037Svenkatesh.pallipadi@intel.com 682cddb8a5cSAndrea Arcangeli /* 683cddb8a5cSAndrea Arcangeli * We need to invalidate the secondary MMU mappings only when 684cddb8a5cSAndrea Arcangeli * there could be a permission downgrade on the ptes of the 685cddb8a5cSAndrea Arcangeli * parent mm. And a permission downgrade will only happen if 686cddb8a5cSAndrea Arcangeli * is_cow_mapping() returns true. 687cddb8a5cSAndrea Arcangeli */ 688cddb8a5cSAndrea Arcangeli if (is_cow_mapping(vma->vm_flags)) 689cddb8a5cSAndrea Arcangeli mmu_notifier_invalidate_range_start(src_mm, addr, end); 690cddb8a5cSAndrea Arcangeli 691cddb8a5cSAndrea Arcangeli ret = 0; 6921da177e4SLinus Torvalds dst_pgd = pgd_offset(dst_mm, addr); 6931da177e4SLinus Torvalds src_pgd = pgd_offset(src_mm, addr); 6941da177e4SLinus Torvalds do { 6951da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 6961da177e4SLinus Torvalds if (pgd_none_or_clear_bad(src_pgd)) 6971da177e4SLinus Torvalds continue; 698cddb8a5cSAndrea Arcangeli if (unlikely(copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd, 699cddb8a5cSAndrea Arcangeli vma, addr, next))) { 700cddb8a5cSAndrea Arcangeli ret = -ENOMEM; 701cddb8a5cSAndrea Arcangeli break; 702cddb8a5cSAndrea Arcangeli } 7031da177e4SLinus Torvalds } while (dst_pgd++, src_pgd++, addr = next, addr != end); 704cddb8a5cSAndrea Arcangeli 705cddb8a5cSAndrea Arcangeli if (is_cow_mapping(vma->vm_flags)) 706cddb8a5cSAndrea Arcangeli mmu_notifier_invalidate_range_end(src_mm, 707cddb8a5cSAndrea Arcangeli vma->vm_start, end); 708cddb8a5cSAndrea Arcangeli return ret; 7091da177e4SLinus Torvalds } 7101da177e4SLinus Torvalds 71151c6f666SRobin Holt static unsigned long zap_pte_range(struct mmu_gather *tlb, 712b5810039SNick Piggin struct vm_area_struct *vma, pmd_t *pmd, 7131da177e4SLinus Torvalds unsigned long addr, unsigned long end, 71451c6f666SRobin Holt long *zap_work, struct zap_details *details) 7151da177e4SLinus Torvalds { 716b5810039SNick Piggin struct mm_struct *mm = tlb->mm; 7171da177e4SLinus Torvalds pte_t *pte; 718508034a3SHugh Dickins spinlock_t *ptl; 719ae859762SHugh Dickins int file_rss = 0; 720ae859762SHugh Dickins int anon_rss = 0; 7211da177e4SLinus Torvalds 722508034a3SHugh Dickins pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 7236606c3e0SZachary Amsden arch_enter_lazy_mmu_mode(); 7241da177e4SLinus Torvalds do { 7251da177e4SLinus Torvalds pte_t ptent = *pte; 72651c6f666SRobin Holt if (pte_none(ptent)) { 72751c6f666SRobin Holt (*zap_work)--; 7281da177e4SLinus Torvalds continue; 72951c6f666SRobin Holt } 73051c6f666SRobin Holt 73151c6f666SRobin Holt (*zap_work) -= PAGE_SIZE; 73251c6f666SRobin Holt 7336f5e6b9eSHugh Dickins if (pte_present(ptent)) { 7346f5e6b9eSHugh Dickins struct page *page; 7356f5e6b9eSHugh Dickins 7366aab341eSLinus Torvalds page = vm_normal_page(vma, addr, ptent); 7371da177e4SLinus Torvalds if (unlikely(details) && page) { 7381da177e4SLinus Torvalds /* 7391da177e4SLinus Torvalds * unmap_shared_mapping_pages() wants to 7401da177e4SLinus Torvalds * invalidate cache without truncating: 7411da177e4SLinus Torvalds * unmap shared but keep private pages. 7421da177e4SLinus Torvalds */ 7431da177e4SLinus Torvalds if (details->check_mapping && 7441da177e4SLinus Torvalds details->check_mapping != page->mapping) 7451da177e4SLinus Torvalds continue; 7461da177e4SLinus Torvalds /* 7471da177e4SLinus Torvalds * Each page->index must be checked when 7481da177e4SLinus Torvalds * invalidating or truncating nonlinear. 7491da177e4SLinus Torvalds */ 7501da177e4SLinus Torvalds if (details->nonlinear_vma && 7511da177e4SLinus Torvalds (page->index < details->first_index || 7521da177e4SLinus Torvalds page->index > details->last_index)) 7531da177e4SLinus Torvalds continue; 7541da177e4SLinus Torvalds } 755b5810039SNick Piggin ptent = ptep_get_and_clear_full(mm, addr, pte, 756a600388dSZachary Amsden tlb->fullmm); 7571da177e4SLinus Torvalds tlb_remove_tlb_entry(tlb, pte, addr); 7581da177e4SLinus Torvalds if (unlikely(!page)) 7591da177e4SLinus Torvalds continue; 7601da177e4SLinus Torvalds if (unlikely(details) && details->nonlinear_vma 7611da177e4SLinus Torvalds && linear_page_index(details->nonlinear_vma, 7621da177e4SLinus Torvalds addr) != page->index) 763b5810039SNick Piggin set_pte_at(mm, addr, pte, 7641da177e4SLinus Torvalds pgoff_to_pte(page->index)); 7651da177e4SLinus Torvalds if (PageAnon(page)) 76686d912f4SHugh Dickins anon_rss--; 7676237bcd9SHugh Dickins else { 7686237bcd9SHugh Dickins if (pte_dirty(ptent)) 7696237bcd9SHugh Dickins set_page_dirty(page); 7704917e5d0SJohannes Weiner if (pte_young(ptent) && 7714917e5d0SJohannes Weiner likely(!VM_SequentialReadHint(vma))) 772bf3f3bc5SNick Piggin mark_page_accessed(page); 77386d912f4SHugh Dickins file_rss--; 7746237bcd9SHugh Dickins } 7757de6b805SNick Piggin page_remove_rmap(page, vma); 7761da177e4SLinus Torvalds tlb_remove_page(tlb, page); 7771da177e4SLinus Torvalds continue; 7781da177e4SLinus Torvalds } 7791da177e4SLinus Torvalds /* 7801da177e4SLinus Torvalds * If details->check_mapping, we leave swap entries; 7811da177e4SLinus Torvalds * if details->nonlinear_vma, we leave file entries. 7821da177e4SLinus Torvalds */ 7831da177e4SLinus Torvalds if (unlikely(details)) 7841da177e4SLinus Torvalds continue; 7851da177e4SLinus Torvalds if (!pte_file(ptent)) 7861da177e4SLinus Torvalds free_swap_and_cache(pte_to_swp_entry(ptent)); 7879888a1caSZachary Amsden pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); 78851c6f666SRobin Holt } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0)); 789ae859762SHugh Dickins 79086d912f4SHugh Dickins add_mm_rss(mm, file_rss, anon_rss); 7916606c3e0SZachary Amsden arch_leave_lazy_mmu_mode(); 792508034a3SHugh Dickins pte_unmap_unlock(pte - 1, ptl); 79351c6f666SRobin Holt 79451c6f666SRobin Holt return addr; 7951da177e4SLinus Torvalds } 7961da177e4SLinus Torvalds 79751c6f666SRobin Holt static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, 798b5810039SNick Piggin struct vm_area_struct *vma, pud_t *pud, 7991da177e4SLinus Torvalds unsigned long addr, unsigned long end, 80051c6f666SRobin Holt long *zap_work, struct zap_details *details) 8011da177e4SLinus Torvalds { 8021da177e4SLinus Torvalds pmd_t *pmd; 8031da177e4SLinus Torvalds unsigned long next; 8041da177e4SLinus Torvalds 8051da177e4SLinus Torvalds pmd = pmd_offset(pud, addr); 8061da177e4SLinus Torvalds do { 8071da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 80851c6f666SRobin Holt if (pmd_none_or_clear_bad(pmd)) { 80951c6f666SRobin Holt (*zap_work)--; 8101da177e4SLinus Torvalds continue; 81151c6f666SRobin Holt } 81251c6f666SRobin Holt next = zap_pte_range(tlb, vma, pmd, addr, next, 81351c6f666SRobin Holt zap_work, details); 81451c6f666SRobin Holt } while (pmd++, addr = next, (addr != end && *zap_work > 0)); 81551c6f666SRobin Holt 81651c6f666SRobin Holt return addr; 8171da177e4SLinus Torvalds } 8181da177e4SLinus Torvalds 81951c6f666SRobin Holt static inline unsigned long zap_pud_range(struct mmu_gather *tlb, 820b5810039SNick Piggin struct vm_area_struct *vma, pgd_t *pgd, 8211da177e4SLinus Torvalds unsigned long addr, unsigned long end, 82251c6f666SRobin Holt long *zap_work, struct zap_details *details) 8231da177e4SLinus Torvalds { 8241da177e4SLinus Torvalds pud_t *pud; 8251da177e4SLinus Torvalds unsigned long next; 8261da177e4SLinus Torvalds 8271da177e4SLinus Torvalds pud = pud_offset(pgd, addr); 8281da177e4SLinus Torvalds do { 8291da177e4SLinus Torvalds next = pud_addr_end(addr, end); 83051c6f666SRobin Holt if (pud_none_or_clear_bad(pud)) { 83151c6f666SRobin Holt (*zap_work)--; 8321da177e4SLinus Torvalds continue; 83351c6f666SRobin Holt } 83451c6f666SRobin Holt next = zap_pmd_range(tlb, vma, pud, addr, next, 83551c6f666SRobin Holt zap_work, details); 83651c6f666SRobin Holt } while (pud++, addr = next, (addr != end && *zap_work > 0)); 83751c6f666SRobin Holt 83851c6f666SRobin Holt return addr; 8391da177e4SLinus Torvalds } 8401da177e4SLinus Torvalds 84151c6f666SRobin Holt static unsigned long unmap_page_range(struct mmu_gather *tlb, 84251c6f666SRobin Holt struct vm_area_struct *vma, 8431da177e4SLinus Torvalds unsigned long addr, unsigned long end, 84451c6f666SRobin Holt long *zap_work, struct zap_details *details) 8451da177e4SLinus Torvalds { 8461da177e4SLinus Torvalds pgd_t *pgd; 8471da177e4SLinus Torvalds unsigned long next; 8481da177e4SLinus Torvalds 8491da177e4SLinus Torvalds if (details && !details->check_mapping && !details->nonlinear_vma) 8501da177e4SLinus Torvalds details = NULL; 8511da177e4SLinus Torvalds 8521da177e4SLinus Torvalds BUG_ON(addr >= end); 8531da177e4SLinus Torvalds tlb_start_vma(tlb, vma); 8541da177e4SLinus Torvalds pgd = pgd_offset(vma->vm_mm, addr); 8551da177e4SLinus Torvalds do { 8561da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 85751c6f666SRobin Holt if (pgd_none_or_clear_bad(pgd)) { 85851c6f666SRobin Holt (*zap_work)--; 8591da177e4SLinus Torvalds continue; 86051c6f666SRobin Holt } 86151c6f666SRobin Holt next = zap_pud_range(tlb, vma, pgd, addr, next, 86251c6f666SRobin Holt zap_work, details); 86351c6f666SRobin Holt } while (pgd++, addr = next, (addr != end && *zap_work > 0)); 8641da177e4SLinus Torvalds tlb_end_vma(tlb, vma); 86551c6f666SRobin Holt 86651c6f666SRobin Holt return addr; 8671da177e4SLinus Torvalds } 8681da177e4SLinus Torvalds 8691da177e4SLinus Torvalds #ifdef CONFIG_PREEMPT 8701da177e4SLinus Torvalds # define ZAP_BLOCK_SIZE (8 * PAGE_SIZE) 8711da177e4SLinus Torvalds #else 8721da177e4SLinus Torvalds /* No preempt: go for improved straight-line efficiency */ 8731da177e4SLinus Torvalds # define ZAP_BLOCK_SIZE (1024 * PAGE_SIZE) 8741da177e4SLinus Torvalds #endif 8751da177e4SLinus Torvalds 8761da177e4SLinus Torvalds /** 8771da177e4SLinus Torvalds * unmap_vmas - unmap a range of memory covered by a list of vma's 8781da177e4SLinus Torvalds * @tlbp: address of the caller's struct mmu_gather 8791da177e4SLinus Torvalds * @vma: the starting vma 8801da177e4SLinus Torvalds * @start_addr: virtual address at which to start unmapping 8811da177e4SLinus Torvalds * @end_addr: virtual address at which to end unmapping 8821da177e4SLinus Torvalds * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here 8831da177e4SLinus Torvalds * @details: details of nonlinear truncation or shared cache invalidation 8841da177e4SLinus Torvalds * 885ee39b37bSHugh Dickins * Returns the end address of the unmapping (restart addr if interrupted). 8861da177e4SLinus Torvalds * 887508034a3SHugh Dickins * Unmap all pages in the vma list. 8881da177e4SLinus Torvalds * 889508034a3SHugh Dickins * We aim to not hold locks for too long (for scheduling latency reasons). 890508034a3SHugh Dickins * So zap pages in ZAP_BLOCK_SIZE bytecounts. This means we need to 8911da177e4SLinus Torvalds * return the ending mmu_gather to the caller. 8921da177e4SLinus Torvalds * 8931da177e4SLinus Torvalds * Only addresses between `start' and `end' will be unmapped. 8941da177e4SLinus Torvalds * 8951da177e4SLinus Torvalds * The VMA list must be sorted in ascending virtual address order. 8961da177e4SLinus Torvalds * 8971da177e4SLinus Torvalds * unmap_vmas() assumes that the caller will flush the whole unmapped address 8981da177e4SLinus Torvalds * range after unmap_vmas() returns. So the only responsibility here is to 8991da177e4SLinus Torvalds * ensure that any thus-far unmapped pages are flushed before unmap_vmas() 9001da177e4SLinus Torvalds * drops the lock and schedules. 9011da177e4SLinus Torvalds */ 902508034a3SHugh Dickins unsigned long unmap_vmas(struct mmu_gather **tlbp, 9031da177e4SLinus Torvalds struct vm_area_struct *vma, unsigned long start_addr, 9041da177e4SLinus Torvalds unsigned long end_addr, unsigned long *nr_accounted, 9051da177e4SLinus Torvalds struct zap_details *details) 9061da177e4SLinus Torvalds { 90751c6f666SRobin Holt long zap_work = ZAP_BLOCK_SIZE; 9081da177e4SLinus Torvalds unsigned long tlb_start = 0; /* For tlb_finish_mmu */ 9091da177e4SLinus Torvalds int tlb_start_valid = 0; 910ee39b37bSHugh Dickins unsigned long start = start_addr; 9111da177e4SLinus Torvalds spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL; 9124d6ddfa9SHugh Dickins int fullmm = (*tlbp)->fullmm; 913cddb8a5cSAndrea Arcangeli struct mm_struct *mm = vma->vm_mm; 9141da177e4SLinus Torvalds 915cddb8a5cSAndrea Arcangeli mmu_notifier_invalidate_range_start(mm, start_addr, end_addr); 9161da177e4SLinus Torvalds for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) { 9171da177e4SLinus Torvalds unsigned long end; 9181da177e4SLinus Torvalds 9191da177e4SLinus Torvalds start = max(vma->vm_start, start_addr); 9201da177e4SLinus Torvalds if (start >= vma->vm_end) 9211da177e4SLinus Torvalds continue; 9221da177e4SLinus Torvalds end = min(vma->vm_end, end_addr); 9231da177e4SLinus Torvalds if (end <= vma->vm_start) 9241da177e4SLinus Torvalds continue; 9251da177e4SLinus Torvalds 9261da177e4SLinus Torvalds if (vma->vm_flags & VM_ACCOUNT) 9271da177e4SLinus Torvalds *nr_accounted += (end - start) >> PAGE_SHIFT; 9281da177e4SLinus Torvalds 92934801ba9Svenkatesh.pallipadi@intel.com if (unlikely(is_pfn_mapping(vma))) 9302ab64037Svenkatesh.pallipadi@intel.com untrack_pfn_vma(vma, 0, 0); 9312ab64037Svenkatesh.pallipadi@intel.com 9321da177e4SLinus Torvalds while (start != end) { 9331da177e4SLinus Torvalds if (!tlb_start_valid) { 9341da177e4SLinus Torvalds tlb_start = start; 9351da177e4SLinus Torvalds tlb_start_valid = 1; 9361da177e4SLinus Torvalds } 9371da177e4SLinus Torvalds 93851c6f666SRobin Holt if (unlikely(is_vm_hugetlb_page(vma))) { 939a137e1ccSAndi Kleen /* 940a137e1ccSAndi Kleen * It is undesirable to test vma->vm_file as it 941a137e1ccSAndi Kleen * should be non-null for valid hugetlb area. 942a137e1ccSAndi Kleen * However, vm_file will be NULL in the error 943a137e1ccSAndi Kleen * cleanup path of do_mmap_pgoff. When 944a137e1ccSAndi Kleen * hugetlbfs ->mmap method fails, 945a137e1ccSAndi Kleen * do_mmap_pgoff() nullifies vma->vm_file 946a137e1ccSAndi Kleen * before calling this function to clean up. 947a137e1ccSAndi Kleen * Since no pte has actually been setup, it is 948a137e1ccSAndi Kleen * safe to do nothing in this case. 949a137e1ccSAndi Kleen */ 950a137e1ccSAndi Kleen if (vma->vm_file) { 95104f2cbe3SMel Gorman unmap_hugepage_range(vma, start, end, NULL); 95251c6f666SRobin Holt zap_work -= (end - start) / 953a5516438SAndi Kleen pages_per_huge_page(hstate_vma(vma)); 954a137e1ccSAndi Kleen } 955a137e1ccSAndi Kleen 95651c6f666SRobin Holt start = end; 95751c6f666SRobin Holt } else 95851c6f666SRobin Holt start = unmap_page_range(*tlbp, vma, 95951c6f666SRobin Holt start, end, &zap_work, details); 9601da177e4SLinus Torvalds 96151c6f666SRobin Holt if (zap_work > 0) { 96251c6f666SRobin Holt BUG_ON(start != end); 96351c6f666SRobin Holt break; 96451c6f666SRobin Holt } 9651da177e4SLinus Torvalds 9661da177e4SLinus Torvalds tlb_finish_mmu(*tlbp, tlb_start, start); 9671da177e4SLinus Torvalds 9681da177e4SLinus Torvalds if (need_resched() || 96995c354feSNick Piggin (i_mmap_lock && spin_needbreak(i_mmap_lock))) { 9701da177e4SLinus Torvalds if (i_mmap_lock) { 971508034a3SHugh Dickins *tlbp = NULL; 9721da177e4SLinus Torvalds goto out; 9731da177e4SLinus Torvalds } 9741da177e4SLinus Torvalds cond_resched(); 9751da177e4SLinus Torvalds } 9761da177e4SLinus Torvalds 977508034a3SHugh Dickins *tlbp = tlb_gather_mmu(vma->vm_mm, fullmm); 9781da177e4SLinus Torvalds tlb_start_valid = 0; 97951c6f666SRobin Holt zap_work = ZAP_BLOCK_SIZE; 9801da177e4SLinus Torvalds } 9811da177e4SLinus Torvalds } 9821da177e4SLinus Torvalds out: 983cddb8a5cSAndrea Arcangeli mmu_notifier_invalidate_range_end(mm, start_addr, end_addr); 984ee39b37bSHugh Dickins return start; /* which is now the end (or restart) address */ 9851da177e4SLinus Torvalds } 9861da177e4SLinus Torvalds 9871da177e4SLinus Torvalds /** 9881da177e4SLinus Torvalds * zap_page_range - remove user pages in a given range 9891da177e4SLinus Torvalds * @vma: vm_area_struct holding the applicable pages 9901da177e4SLinus Torvalds * @address: starting address of pages to zap 9911da177e4SLinus Torvalds * @size: number of bytes to zap 9921da177e4SLinus Torvalds * @details: details of nonlinear truncation or shared cache invalidation 9931da177e4SLinus Torvalds */ 994ee39b37bSHugh Dickins unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, 9951da177e4SLinus Torvalds unsigned long size, struct zap_details *details) 9961da177e4SLinus Torvalds { 9971da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 9981da177e4SLinus Torvalds struct mmu_gather *tlb; 9991da177e4SLinus Torvalds unsigned long end = address + size; 10001da177e4SLinus Torvalds unsigned long nr_accounted = 0; 10011da177e4SLinus Torvalds 10021da177e4SLinus Torvalds lru_add_drain(); 10031da177e4SLinus Torvalds tlb = tlb_gather_mmu(mm, 0); 1004365e9c87SHugh Dickins update_hiwater_rss(mm); 1005508034a3SHugh Dickins end = unmap_vmas(&tlb, vma, address, end, &nr_accounted, details); 1006508034a3SHugh Dickins if (tlb) 10078f4f8c16SHugh Dickins tlb_finish_mmu(tlb, address, end); 1008ee39b37bSHugh Dickins return end; 10091da177e4SLinus Torvalds } 10101da177e4SLinus Torvalds 1011c627f9ccSJack Steiner /** 1012c627f9ccSJack Steiner * zap_vma_ptes - remove ptes mapping the vma 1013c627f9ccSJack Steiner * @vma: vm_area_struct holding ptes to be zapped 1014c627f9ccSJack Steiner * @address: starting address of pages to zap 1015c627f9ccSJack Steiner * @size: number of bytes to zap 1016c627f9ccSJack Steiner * 1017c627f9ccSJack Steiner * This function only unmaps ptes assigned to VM_PFNMAP vmas. 1018c627f9ccSJack Steiner * 1019c627f9ccSJack Steiner * The entire address range must be fully contained within the vma. 1020c627f9ccSJack Steiner * 1021c627f9ccSJack Steiner * Returns 0 if successful. 1022c627f9ccSJack Steiner */ 1023c627f9ccSJack Steiner int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 1024c627f9ccSJack Steiner unsigned long size) 1025c627f9ccSJack Steiner { 1026c627f9ccSJack Steiner if (address < vma->vm_start || address + size > vma->vm_end || 1027c627f9ccSJack Steiner !(vma->vm_flags & VM_PFNMAP)) 1028c627f9ccSJack Steiner return -1; 1029c627f9ccSJack Steiner zap_page_range(vma, address, size, NULL); 1030c627f9ccSJack Steiner return 0; 1031c627f9ccSJack Steiner } 1032c627f9ccSJack Steiner EXPORT_SYMBOL_GPL(zap_vma_ptes); 1033c627f9ccSJack Steiner 10341da177e4SLinus Torvalds /* 10351da177e4SLinus Torvalds * Do a quick page-table lookup for a single page. 10361da177e4SLinus Torvalds */ 10376aab341eSLinus Torvalds struct page *follow_page(struct vm_area_struct *vma, unsigned long address, 1038deceb6cdSHugh Dickins unsigned int flags) 10391da177e4SLinus Torvalds { 10401da177e4SLinus Torvalds pgd_t *pgd; 10411da177e4SLinus Torvalds pud_t *pud; 10421da177e4SLinus Torvalds pmd_t *pmd; 10431da177e4SLinus Torvalds pte_t *ptep, pte; 1044deceb6cdSHugh Dickins spinlock_t *ptl; 10451da177e4SLinus Torvalds struct page *page; 10466aab341eSLinus Torvalds struct mm_struct *mm = vma->vm_mm; 10471da177e4SLinus Torvalds 1048deceb6cdSHugh Dickins page = follow_huge_addr(mm, address, flags & FOLL_WRITE); 1049deceb6cdSHugh Dickins if (!IS_ERR(page)) { 1050deceb6cdSHugh Dickins BUG_ON(flags & FOLL_GET); 1051deceb6cdSHugh Dickins goto out; 1052deceb6cdSHugh Dickins } 10531da177e4SLinus Torvalds 1054deceb6cdSHugh Dickins page = NULL; 10551da177e4SLinus Torvalds pgd = pgd_offset(mm, address); 10561da177e4SLinus Torvalds if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) 1057deceb6cdSHugh Dickins goto no_page_table; 10581da177e4SLinus Torvalds 10591da177e4SLinus Torvalds pud = pud_offset(pgd, address); 1060ceb86879SAndi Kleen if (pud_none(*pud)) 1061ceb86879SAndi Kleen goto no_page_table; 1062ceb86879SAndi Kleen if (pud_huge(*pud)) { 1063ceb86879SAndi Kleen BUG_ON(flags & FOLL_GET); 1064ceb86879SAndi Kleen page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE); 1065ceb86879SAndi Kleen goto out; 1066ceb86879SAndi Kleen } 1067ceb86879SAndi Kleen if (unlikely(pud_bad(*pud))) 1068deceb6cdSHugh Dickins goto no_page_table; 10691da177e4SLinus Torvalds 10701da177e4SLinus Torvalds pmd = pmd_offset(pud, address); 1071aeed5fceSHugh Dickins if (pmd_none(*pmd)) 1072deceb6cdSHugh Dickins goto no_page_table; 1073deceb6cdSHugh Dickins if (pmd_huge(*pmd)) { 1074deceb6cdSHugh Dickins BUG_ON(flags & FOLL_GET); 1075deceb6cdSHugh Dickins page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE); 1076deceb6cdSHugh Dickins goto out; 1077deceb6cdSHugh Dickins } 1078aeed5fceSHugh Dickins if (unlikely(pmd_bad(*pmd))) 1079aeed5fceSHugh Dickins goto no_page_table; 1080aeed5fceSHugh Dickins 1081deceb6cdSHugh Dickins ptep = pte_offset_map_lock(mm, pmd, address, &ptl); 10821da177e4SLinus Torvalds 10831da177e4SLinus Torvalds pte = *ptep; 1084deceb6cdSHugh Dickins if (!pte_present(pte)) 108589f5b7daSLinus Torvalds goto no_page; 1086deceb6cdSHugh Dickins if ((flags & FOLL_WRITE) && !pte_write(pte)) 1087deceb6cdSHugh Dickins goto unlock; 10886aab341eSLinus Torvalds page = vm_normal_page(vma, address, pte); 10896aab341eSLinus Torvalds if (unlikely(!page)) 109089f5b7daSLinus Torvalds goto bad_page; 1091deceb6cdSHugh Dickins 1092deceb6cdSHugh Dickins if (flags & FOLL_GET) 1093deceb6cdSHugh Dickins get_page(page); 1094deceb6cdSHugh Dickins if (flags & FOLL_TOUCH) { 1095deceb6cdSHugh Dickins if ((flags & FOLL_WRITE) && 1096deceb6cdSHugh Dickins !pte_dirty(pte) && !PageDirty(page)) 1097f33ea7f4SNick Piggin set_page_dirty(page); 10981da177e4SLinus Torvalds mark_page_accessed(page); 10991da177e4SLinus Torvalds } 1100deceb6cdSHugh Dickins unlock: 1101deceb6cdSHugh Dickins pte_unmap_unlock(ptep, ptl); 11021da177e4SLinus Torvalds out: 1103deceb6cdSHugh Dickins return page; 1104deceb6cdSHugh Dickins 110589f5b7daSLinus Torvalds bad_page: 110689f5b7daSLinus Torvalds pte_unmap_unlock(ptep, ptl); 110789f5b7daSLinus Torvalds return ERR_PTR(-EFAULT); 110889f5b7daSLinus Torvalds 110989f5b7daSLinus Torvalds no_page: 111089f5b7daSLinus Torvalds pte_unmap_unlock(ptep, ptl); 111189f5b7daSLinus Torvalds if (!pte_none(pte)) 111289f5b7daSLinus Torvalds return page; 111389f5b7daSLinus Torvalds /* Fall through to ZERO_PAGE handling */ 1114deceb6cdSHugh Dickins no_page_table: 1115deceb6cdSHugh Dickins /* 1116deceb6cdSHugh Dickins * When core dumping an enormous anonymous area that nobody 1117deceb6cdSHugh Dickins * has touched so far, we don't want to allocate page tables. 1118deceb6cdSHugh Dickins */ 1119deceb6cdSHugh Dickins if (flags & FOLL_ANON) { 1120557ed1faSNick Piggin page = ZERO_PAGE(0); 1121deceb6cdSHugh Dickins if (flags & FOLL_GET) 1122deceb6cdSHugh Dickins get_page(page); 1123deceb6cdSHugh Dickins BUG_ON(flags & FOLL_WRITE); 11241da177e4SLinus Torvalds } 1125deceb6cdSHugh Dickins return page; 11261da177e4SLinus Torvalds } 11271da177e4SLinus Torvalds 1128672ca28eSLinus Torvalds /* Can we do the FOLL_ANON optimization? */ 1129672ca28eSLinus Torvalds static inline int use_zero_page(struct vm_area_struct *vma) 1130672ca28eSLinus Torvalds { 1131672ca28eSLinus Torvalds /* 1132672ca28eSLinus Torvalds * We don't want to optimize FOLL_ANON for make_pages_present() 1133672ca28eSLinus Torvalds * when it tries to page in a VM_LOCKED region. As to VM_SHARED, 1134672ca28eSLinus Torvalds * we want to get the page from the page tables to make sure 1135672ca28eSLinus Torvalds * that we serialize and update with any other user of that 1136672ca28eSLinus Torvalds * mapping. 1137672ca28eSLinus Torvalds */ 1138672ca28eSLinus Torvalds if (vma->vm_flags & (VM_LOCKED | VM_SHARED)) 1139672ca28eSLinus Torvalds return 0; 1140672ca28eSLinus Torvalds /* 11410d71d10aSNick Piggin * And if we have a fault routine, it's not an anonymous region. 1142672ca28eSLinus Torvalds */ 11430d71d10aSNick Piggin return !vma->vm_ops || !vma->vm_ops->fault; 1144672ca28eSLinus Torvalds } 1145672ca28eSLinus Torvalds 1146b291f000SNick Piggin 1147b291f000SNick Piggin 1148b291f000SNick Piggin int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 1149b291f000SNick Piggin unsigned long start, int len, int flags, 11501da177e4SLinus Torvalds struct page **pages, struct vm_area_struct **vmas) 11511da177e4SLinus Torvalds { 11521da177e4SLinus Torvalds int i; 1153b291f000SNick Piggin unsigned int vm_flags = 0; 1154b291f000SNick Piggin int write = !!(flags & GUP_FLAGS_WRITE); 1155b291f000SNick Piggin int force = !!(flags & GUP_FLAGS_FORCE); 1156b291f000SNick Piggin int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS); 11571da177e4SLinus Torvalds 1158900cf086SJonathan Corbet if (len <= 0) 1159900cf086SJonathan Corbet return 0; 11601da177e4SLinus Torvalds /* 11611da177e4SLinus Torvalds * Require read or write permissions. 11621da177e4SLinus Torvalds * If 'force' is set, we only require the "MAY" flags. 11631da177e4SLinus Torvalds */ 1164deceb6cdSHugh Dickins vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); 1165deceb6cdSHugh Dickins vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); 11661da177e4SLinus Torvalds i = 0; 11671da177e4SLinus Torvalds 11681da177e4SLinus Torvalds do { 11691da177e4SLinus Torvalds struct vm_area_struct *vma; 1170deceb6cdSHugh Dickins unsigned int foll_flags; 11711da177e4SLinus Torvalds 11721da177e4SLinus Torvalds vma = find_extend_vma(mm, start); 11731da177e4SLinus Torvalds if (!vma && in_gate_area(tsk, start)) { 11741da177e4SLinus Torvalds unsigned long pg = start & PAGE_MASK; 11751da177e4SLinus Torvalds struct vm_area_struct *gate_vma = get_gate_vma(tsk); 11761da177e4SLinus Torvalds pgd_t *pgd; 11771da177e4SLinus Torvalds pud_t *pud; 11781da177e4SLinus Torvalds pmd_t *pmd; 11791da177e4SLinus Torvalds pte_t *pte; 1180b291f000SNick Piggin 1181b291f000SNick Piggin /* user gate pages are read-only */ 1182b291f000SNick Piggin if (!ignore && write) 11831da177e4SLinus Torvalds return i ? : -EFAULT; 11841da177e4SLinus Torvalds if (pg > TASK_SIZE) 11851da177e4SLinus Torvalds pgd = pgd_offset_k(pg); 11861da177e4SLinus Torvalds else 11871da177e4SLinus Torvalds pgd = pgd_offset_gate(mm, pg); 11881da177e4SLinus Torvalds BUG_ON(pgd_none(*pgd)); 11891da177e4SLinus Torvalds pud = pud_offset(pgd, pg); 11901da177e4SLinus Torvalds BUG_ON(pud_none(*pud)); 11911da177e4SLinus Torvalds pmd = pmd_offset(pud, pg); 1192690dbe1cSHugh Dickins if (pmd_none(*pmd)) 1193690dbe1cSHugh Dickins return i ? : -EFAULT; 11941da177e4SLinus Torvalds pte = pte_offset_map(pmd, pg); 1195690dbe1cSHugh Dickins if (pte_none(*pte)) { 1196690dbe1cSHugh Dickins pte_unmap(pte); 1197690dbe1cSHugh Dickins return i ? : -EFAULT; 1198690dbe1cSHugh Dickins } 11991da177e4SLinus Torvalds if (pages) { 1200fa2a455bSNick Piggin struct page *page = vm_normal_page(gate_vma, start, *pte); 12016aab341eSLinus Torvalds pages[i] = page; 12026aab341eSLinus Torvalds if (page) 12036aab341eSLinus Torvalds get_page(page); 12041da177e4SLinus Torvalds } 12051da177e4SLinus Torvalds pte_unmap(pte); 12061da177e4SLinus Torvalds if (vmas) 12071da177e4SLinus Torvalds vmas[i] = gate_vma; 12081da177e4SLinus Torvalds i++; 12091da177e4SLinus Torvalds start += PAGE_SIZE; 12101da177e4SLinus Torvalds len--; 12111da177e4SLinus Torvalds continue; 12121da177e4SLinus Torvalds } 12131da177e4SLinus Torvalds 1214b291f000SNick Piggin if (!vma || 1215b291f000SNick Piggin (vma->vm_flags & (VM_IO | VM_PFNMAP)) || 1216b291f000SNick Piggin (!ignore && !(vm_flags & vma->vm_flags))) 12171da177e4SLinus Torvalds return i ? : -EFAULT; 12181da177e4SLinus Torvalds 12191da177e4SLinus Torvalds if (is_vm_hugetlb_page(vma)) { 12201da177e4SLinus Torvalds i = follow_hugetlb_page(mm, vma, pages, vmas, 12215b23dbe8SAdam Litke &start, &len, i, write); 12221da177e4SLinus Torvalds continue; 12231da177e4SLinus Torvalds } 1224deceb6cdSHugh Dickins 1225deceb6cdSHugh Dickins foll_flags = FOLL_TOUCH; 1226deceb6cdSHugh Dickins if (pages) 1227deceb6cdSHugh Dickins foll_flags |= FOLL_GET; 1228672ca28eSLinus Torvalds if (!write && use_zero_page(vma)) 1229deceb6cdSHugh Dickins foll_flags |= FOLL_ANON; 1230deceb6cdSHugh Dickins 12311da177e4SLinus Torvalds do { 123208ef4729SHugh Dickins struct page *page; 12331da177e4SLinus Torvalds 1234462e00ccSEthan Solomita /* 1235462e00ccSEthan Solomita * If tsk is ooming, cut off its access to large memory 1236462e00ccSEthan Solomita * allocations. It has a pending SIGKILL, but it can't 1237462e00ccSEthan Solomita * be processed until returning to user space. 1238462e00ccSEthan Solomita */ 1239462e00ccSEthan Solomita if (unlikely(test_tsk_thread_flag(tsk, TIF_MEMDIE))) 12407a36a752SOleg Nesterov return i ? i : -ENOMEM; 1241462e00ccSEthan Solomita 1242deceb6cdSHugh Dickins if (write) 1243deceb6cdSHugh Dickins foll_flags |= FOLL_WRITE; 1244deceb6cdSHugh Dickins 1245deceb6cdSHugh Dickins cond_resched(); 12466aab341eSLinus Torvalds while (!(page = follow_page(vma, start, foll_flags))) { 1247a68d2ebcSLinus Torvalds int ret; 124883c54070SNick Piggin ret = handle_mm_fault(mm, vma, start, 1249deceb6cdSHugh Dickins foll_flags & FOLL_WRITE); 125083c54070SNick Piggin if (ret & VM_FAULT_ERROR) { 125183c54070SNick Piggin if (ret & VM_FAULT_OOM) 125283c54070SNick Piggin return i ? i : -ENOMEM; 125383c54070SNick Piggin else if (ret & VM_FAULT_SIGBUS) 125483c54070SNick Piggin return i ? i : -EFAULT; 125583c54070SNick Piggin BUG(); 125683c54070SNick Piggin } 125783c54070SNick Piggin if (ret & VM_FAULT_MAJOR) 125883c54070SNick Piggin tsk->maj_flt++; 125983c54070SNick Piggin else 126083c54070SNick Piggin tsk->min_flt++; 126183c54070SNick Piggin 1262f33ea7f4SNick Piggin /* 126383c54070SNick Piggin * The VM_FAULT_WRITE bit tells us that 126483c54070SNick Piggin * do_wp_page has broken COW when necessary, 126583c54070SNick Piggin * even if maybe_mkwrite decided not to set 126683c54070SNick Piggin * pte_write. We can thus safely do subsequent 1267878b63acSHugh Dickins * page lookups as if they were reads. But only 1268878b63acSHugh Dickins * do so when looping for pte_write is futile: 1269878b63acSHugh Dickins * in some cases userspace may also be wanting 1270878b63acSHugh Dickins * to write to the gotten user page, which a 1271878b63acSHugh Dickins * read fault here might prevent (a readonly 1272878b63acSHugh Dickins * page might get reCOWed by userspace write). 1273f33ea7f4SNick Piggin */ 1274878b63acSHugh Dickins if ((ret & VM_FAULT_WRITE) && 1275878b63acSHugh Dickins !(vma->vm_flags & VM_WRITE)) 1276deceb6cdSHugh Dickins foll_flags &= ~FOLL_WRITE; 1277a68d2ebcSLinus Torvalds 12787f7bbbe5SBenjamin Herrenschmidt cond_resched(); 12791da177e4SLinus Torvalds } 128089f5b7daSLinus Torvalds if (IS_ERR(page)) 128189f5b7daSLinus Torvalds return i ? i : PTR_ERR(page); 12821da177e4SLinus Torvalds if (pages) { 128308ef4729SHugh Dickins pages[i] = page; 128403beb076SJames Bottomley 1285a6f36be3SRussell King flush_anon_page(vma, page, start); 128608ef4729SHugh Dickins flush_dcache_page(page); 12871da177e4SLinus Torvalds } 12881da177e4SLinus Torvalds if (vmas) 12891da177e4SLinus Torvalds vmas[i] = vma; 12901da177e4SLinus Torvalds i++; 12911da177e4SLinus Torvalds start += PAGE_SIZE; 12921da177e4SLinus Torvalds len--; 12931da177e4SLinus Torvalds } while (len && start < vma->vm_end); 12941da177e4SLinus Torvalds } while (len); 12951da177e4SLinus Torvalds return i; 12961da177e4SLinus Torvalds } 1297b291f000SNick Piggin 1298b291f000SNick Piggin int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 1299b291f000SNick Piggin unsigned long start, int len, int write, int force, 1300b291f000SNick Piggin struct page **pages, struct vm_area_struct **vmas) 1301b291f000SNick Piggin { 1302b291f000SNick Piggin int flags = 0; 1303b291f000SNick Piggin 1304b291f000SNick Piggin if (write) 1305b291f000SNick Piggin flags |= GUP_FLAGS_WRITE; 1306b291f000SNick Piggin if (force) 1307b291f000SNick Piggin flags |= GUP_FLAGS_FORCE; 1308b291f000SNick Piggin 1309b291f000SNick Piggin return __get_user_pages(tsk, mm, 1310b291f000SNick Piggin start, len, flags, 1311b291f000SNick Piggin pages, vmas); 1312b291f000SNick Piggin } 1313b291f000SNick Piggin 13141da177e4SLinus Torvalds EXPORT_SYMBOL(get_user_pages); 13151da177e4SLinus Torvalds 1316920c7a5dSHarvey Harrison pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, 1317920c7a5dSHarvey Harrison spinlock_t **ptl) 1318c9cfcddfSLinus Torvalds { 1319c9cfcddfSLinus Torvalds pgd_t * pgd = pgd_offset(mm, addr); 1320c9cfcddfSLinus Torvalds pud_t * pud = pud_alloc(mm, pgd, addr); 1321c9cfcddfSLinus Torvalds if (pud) { 132249c91fb0STrond Myklebust pmd_t * pmd = pmd_alloc(mm, pud, addr); 1323c9cfcddfSLinus Torvalds if (pmd) 1324c9cfcddfSLinus Torvalds return pte_alloc_map_lock(mm, pmd, addr, ptl); 1325c9cfcddfSLinus Torvalds } 1326c9cfcddfSLinus Torvalds return NULL; 1327c9cfcddfSLinus Torvalds } 1328c9cfcddfSLinus Torvalds 13291da177e4SLinus Torvalds /* 1330238f58d8SLinus Torvalds * This is the old fallback for page remapping. 1331238f58d8SLinus Torvalds * 1332238f58d8SLinus Torvalds * For historical reasons, it only allows reserved pages. Only 1333238f58d8SLinus Torvalds * old drivers should use this, and they needed to mark their 1334238f58d8SLinus Torvalds * pages reserved for the old functions anyway. 1335238f58d8SLinus Torvalds */ 1336423bad60SNick Piggin static int insert_page(struct vm_area_struct *vma, unsigned long addr, 1337423bad60SNick Piggin struct page *page, pgprot_t prot) 1338238f58d8SLinus Torvalds { 1339423bad60SNick Piggin struct mm_struct *mm = vma->vm_mm; 1340238f58d8SLinus Torvalds int retval; 1341238f58d8SLinus Torvalds pte_t *pte; 1342238f58d8SLinus Torvalds spinlock_t *ptl; 1343238f58d8SLinus Torvalds 1344238f58d8SLinus Torvalds retval = -EINVAL; 1345a145dd41SLinus Torvalds if (PageAnon(page)) 13465b4e655eSKAMEZAWA Hiroyuki goto out; 1347238f58d8SLinus Torvalds retval = -ENOMEM; 1348238f58d8SLinus Torvalds flush_dcache_page(page); 1349c9cfcddfSLinus Torvalds pte = get_locked_pte(mm, addr, &ptl); 1350238f58d8SLinus Torvalds if (!pte) 13515b4e655eSKAMEZAWA Hiroyuki goto out; 1352238f58d8SLinus Torvalds retval = -EBUSY; 1353238f58d8SLinus Torvalds if (!pte_none(*pte)) 1354238f58d8SLinus Torvalds goto out_unlock; 1355238f58d8SLinus Torvalds 1356238f58d8SLinus Torvalds /* Ok, finally just insert the thing.. */ 1357238f58d8SLinus Torvalds get_page(page); 1358238f58d8SLinus Torvalds inc_mm_counter(mm, file_rss); 1359238f58d8SLinus Torvalds page_add_file_rmap(page); 1360238f58d8SLinus Torvalds set_pte_at(mm, addr, pte, mk_pte(page, prot)); 1361238f58d8SLinus Torvalds 1362238f58d8SLinus Torvalds retval = 0; 13638a9f3ccdSBalbir Singh pte_unmap_unlock(pte, ptl); 13648a9f3ccdSBalbir Singh return retval; 1365238f58d8SLinus Torvalds out_unlock: 1366238f58d8SLinus Torvalds pte_unmap_unlock(pte, ptl); 1367238f58d8SLinus Torvalds out: 1368238f58d8SLinus Torvalds return retval; 1369238f58d8SLinus Torvalds } 1370238f58d8SLinus Torvalds 1371bfa5bf6dSRolf Eike Beer /** 1372bfa5bf6dSRolf Eike Beer * vm_insert_page - insert single page into user vma 1373bfa5bf6dSRolf Eike Beer * @vma: user vma to map to 1374bfa5bf6dSRolf Eike Beer * @addr: target user address of this page 1375bfa5bf6dSRolf Eike Beer * @page: source kernel page 1376bfa5bf6dSRolf Eike Beer * 1377a145dd41SLinus Torvalds * This allows drivers to insert individual pages they've allocated 1378a145dd41SLinus Torvalds * into a user vma. 1379a145dd41SLinus Torvalds * 1380a145dd41SLinus Torvalds * The page has to be a nice clean _individual_ kernel allocation. 1381a145dd41SLinus Torvalds * If you allocate a compound page, you need to have marked it as 1382a145dd41SLinus Torvalds * such (__GFP_COMP), or manually just split the page up yourself 13838dfcc9baSNick Piggin * (see split_page()). 1384a145dd41SLinus Torvalds * 1385a145dd41SLinus Torvalds * NOTE! Traditionally this was done with "remap_pfn_range()" which 1386a145dd41SLinus Torvalds * took an arbitrary page protection parameter. This doesn't allow 1387a145dd41SLinus Torvalds * that. Your vma protection will have to be set up correctly, which 1388a145dd41SLinus Torvalds * means that if you want a shared writable mapping, you'd better 1389a145dd41SLinus Torvalds * ask for a shared writable mapping! 1390a145dd41SLinus Torvalds * 1391a145dd41SLinus Torvalds * The page does not need to be reserved. 1392a145dd41SLinus Torvalds */ 1393423bad60SNick Piggin int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, 1394423bad60SNick Piggin struct page *page) 1395a145dd41SLinus Torvalds { 1396a145dd41SLinus Torvalds if (addr < vma->vm_start || addr >= vma->vm_end) 1397a145dd41SLinus Torvalds return -EFAULT; 1398a145dd41SLinus Torvalds if (!page_count(page)) 1399a145dd41SLinus Torvalds return -EINVAL; 14004d7672b4SLinus Torvalds vma->vm_flags |= VM_INSERTPAGE; 1401423bad60SNick Piggin return insert_page(vma, addr, page, vma->vm_page_prot); 1402a145dd41SLinus Torvalds } 1403e3c3374fSLinus Torvalds EXPORT_SYMBOL(vm_insert_page); 1404a145dd41SLinus Torvalds 1405423bad60SNick Piggin static int insert_pfn(struct vm_area_struct *vma, unsigned long addr, 1406423bad60SNick Piggin unsigned long pfn, pgprot_t prot) 1407423bad60SNick Piggin { 1408423bad60SNick Piggin struct mm_struct *mm = vma->vm_mm; 1409423bad60SNick Piggin int retval; 1410423bad60SNick Piggin pte_t *pte, entry; 1411423bad60SNick Piggin spinlock_t *ptl; 1412423bad60SNick Piggin 1413423bad60SNick Piggin retval = -ENOMEM; 1414423bad60SNick Piggin pte = get_locked_pte(mm, addr, &ptl); 1415423bad60SNick Piggin if (!pte) 1416423bad60SNick Piggin goto out; 1417423bad60SNick Piggin retval = -EBUSY; 1418423bad60SNick Piggin if (!pte_none(*pte)) 1419423bad60SNick Piggin goto out_unlock; 1420423bad60SNick Piggin 1421423bad60SNick Piggin /* Ok, finally just insert the thing.. */ 1422423bad60SNick Piggin entry = pte_mkspecial(pfn_pte(pfn, prot)); 1423423bad60SNick Piggin set_pte_at(mm, addr, pte, entry); 1424423bad60SNick Piggin update_mmu_cache(vma, addr, entry); /* XXX: why not for insert_page? */ 1425423bad60SNick Piggin 1426423bad60SNick Piggin retval = 0; 1427423bad60SNick Piggin out_unlock: 1428423bad60SNick Piggin pte_unmap_unlock(pte, ptl); 1429423bad60SNick Piggin out: 1430423bad60SNick Piggin return retval; 1431423bad60SNick Piggin } 1432423bad60SNick Piggin 1433e0dc0d8fSNick Piggin /** 1434e0dc0d8fSNick Piggin * vm_insert_pfn - insert single pfn into user vma 1435e0dc0d8fSNick Piggin * @vma: user vma to map to 1436e0dc0d8fSNick Piggin * @addr: target user address of this page 1437e0dc0d8fSNick Piggin * @pfn: source kernel pfn 1438e0dc0d8fSNick Piggin * 1439e0dc0d8fSNick Piggin * Similar to vm_inert_page, this allows drivers to insert individual pages 1440e0dc0d8fSNick Piggin * they've allocated into a user vma. Same comments apply. 1441e0dc0d8fSNick Piggin * 1442e0dc0d8fSNick Piggin * This function should only be called from a vm_ops->fault handler, and 1443e0dc0d8fSNick Piggin * in that case the handler should return NULL. 14440d71d10aSNick Piggin * 14450d71d10aSNick Piggin * vma cannot be a COW mapping. 14460d71d10aSNick Piggin * 14470d71d10aSNick Piggin * As this is called only for pages that do not currently exist, we 14480d71d10aSNick Piggin * do not need to flush old virtual caches or the TLB. 1449e0dc0d8fSNick Piggin */ 1450e0dc0d8fSNick Piggin int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, 1451e0dc0d8fSNick Piggin unsigned long pfn) 1452e0dc0d8fSNick Piggin { 14532ab64037Svenkatesh.pallipadi@intel.com int ret; 14547e675137SNick Piggin /* 14557e675137SNick Piggin * Technically, architectures with pte_special can avoid all these 14567e675137SNick Piggin * restrictions (same for remap_pfn_range). However we would like 14577e675137SNick Piggin * consistency in testing and feature parity among all, so we should 14587e675137SNick Piggin * try to keep these invariants in place for everybody. 14597e675137SNick Piggin */ 1460b379d790SJared Hulbert BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); 1461b379d790SJared Hulbert BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == 1462b379d790SJared Hulbert (VM_PFNMAP|VM_MIXEDMAP)); 1463b379d790SJared Hulbert BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); 1464b379d790SJared Hulbert BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn)); 1465e0dc0d8fSNick Piggin 1466423bad60SNick Piggin if (addr < vma->vm_start || addr >= vma->vm_end) 1467423bad60SNick Piggin return -EFAULT; 14682ab64037Svenkatesh.pallipadi@intel.com if (track_pfn_vma_new(vma, vma->vm_page_prot, pfn, PAGE_SIZE)) 14692ab64037Svenkatesh.pallipadi@intel.com return -EINVAL; 14702ab64037Svenkatesh.pallipadi@intel.com 14712ab64037Svenkatesh.pallipadi@intel.com ret = insert_pfn(vma, addr, pfn, vma->vm_page_prot); 14722ab64037Svenkatesh.pallipadi@intel.com 14732ab64037Svenkatesh.pallipadi@intel.com if (ret) 14742ab64037Svenkatesh.pallipadi@intel.com untrack_pfn_vma(vma, pfn, PAGE_SIZE); 14752ab64037Svenkatesh.pallipadi@intel.com 14762ab64037Svenkatesh.pallipadi@intel.com return ret; 1477e0dc0d8fSNick Piggin } 1478e0dc0d8fSNick Piggin EXPORT_SYMBOL(vm_insert_pfn); 1479e0dc0d8fSNick Piggin 1480423bad60SNick Piggin int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, 1481423bad60SNick Piggin unsigned long pfn) 1482423bad60SNick Piggin { 1483423bad60SNick Piggin BUG_ON(!(vma->vm_flags & VM_MIXEDMAP)); 1484423bad60SNick Piggin 1485423bad60SNick Piggin if (addr < vma->vm_start || addr >= vma->vm_end) 1486423bad60SNick Piggin return -EFAULT; 1487423bad60SNick Piggin 1488423bad60SNick Piggin /* 1489423bad60SNick Piggin * If we don't have pte special, then we have to use the pfn_valid() 1490423bad60SNick Piggin * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must* 1491423bad60SNick Piggin * refcount the page if pfn_valid is true (hence insert_page rather 1492423bad60SNick Piggin * than insert_pfn). 1493423bad60SNick Piggin */ 1494423bad60SNick Piggin if (!HAVE_PTE_SPECIAL && pfn_valid(pfn)) { 1495423bad60SNick Piggin struct page *page; 1496423bad60SNick Piggin 1497423bad60SNick Piggin page = pfn_to_page(pfn); 1498423bad60SNick Piggin return insert_page(vma, addr, page, vma->vm_page_prot); 1499423bad60SNick Piggin } 1500423bad60SNick Piggin return insert_pfn(vma, addr, pfn, vma->vm_page_prot); 1501423bad60SNick Piggin } 1502423bad60SNick Piggin EXPORT_SYMBOL(vm_insert_mixed); 1503423bad60SNick Piggin 1504a145dd41SLinus Torvalds /* 15051da177e4SLinus Torvalds * maps a range of physical memory into the requested pages. the old 15061da177e4SLinus Torvalds * mappings are removed. any references to nonexistent pages results 15071da177e4SLinus Torvalds * in null mappings (currently treated as "copy-on-access") 15081da177e4SLinus Torvalds */ 15091da177e4SLinus Torvalds static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, 15101da177e4SLinus Torvalds unsigned long addr, unsigned long end, 15111da177e4SLinus Torvalds unsigned long pfn, pgprot_t prot) 15121da177e4SLinus Torvalds { 15131da177e4SLinus Torvalds pte_t *pte; 1514c74df32cSHugh Dickins spinlock_t *ptl; 15151da177e4SLinus Torvalds 1516c74df32cSHugh Dickins pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); 15171da177e4SLinus Torvalds if (!pte) 15181da177e4SLinus Torvalds return -ENOMEM; 15196606c3e0SZachary Amsden arch_enter_lazy_mmu_mode(); 15201da177e4SLinus Torvalds do { 15211da177e4SLinus Torvalds BUG_ON(!pte_none(*pte)); 15227e675137SNick Piggin set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot))); 15231da177e4SLinus Torvalds pfn++; 15241da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 15256606c3e0SZachary Amsden arch_leave_lazy_mmu_mode(); 1526c74df32cSHugh Dickins pte_unmap_unlock(pte - 1, ptl); 15271da177e4SLinus Torvalds return 0; 15281da177e4SLinus Torvalds } 15291da177e4SLinus Torvalds 15301da177e4SLinus Torvalds static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, 15311da177e4SLinus Torvalds unsigned long addr, unsigned long end, 15321da177e4SLinus Torvalds unsigned long pfn, pgprot_t prot) 15331da177e4SLinus Torvalds { 15341da177e4SLinus Torvalds pmd_t *pmd; 15351da177e4SLinus Torvalds unsigned long next; 15361da177e4SLinus Torvalds 15371da177e4SLinus Torvalds pfn -= addr >> PAGE_SHIFT; 15381da177e4SLinus Torvalds pmd = pmd_alloc(mm, pud, addr); 15391da177e4SLinus Torvalds if (!pmd) 15401da177e4SLinus Torvalds return -ENOMEM; 15411da177e4SLinus Torvalds do { 15421da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 15431da177e4SLinus Torvalds if (remap_pte_range(mm, pmd, addr, next, 15441da177e4SLinus Torvalds pfn + (addr >> PAGE_SHIFT), prot)) 15451da177e4SLinus Torvalds return -ENOMEM; 15461da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 15471da177e4SLinus Torvalds return 0; 15481da177e4SLinus Torvalds } 15491da177e4SLinus Torvalds 15501da177e4SLinus Torvalds static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd, 15511da177e4SLinus Torvalds unsigned long addr, unsigned long end, 15521da177e4SLinus Torvalds unsigned long pfn, pgprot_t prot) 15531da177e4SLinus Torvalds { 15541da177e4SLinus Torvalds pud_t *pud; 15551da177e4SLinus Torvalds unsigned long next; 15561da177e4SLinus Torvalds 15571da177e4SLinus Torvalds pfn -= addr >> PAGE_SHIFT; 15581da177e4SLinus Torvalds pud = pud_alloc(mm, pgd, addr); 15591da177e4SLinus Torvalds if (!pud) 15601da177e4SLinus Torvalds return -ENOMEM; 15611da177e4SLinus Torvalds do { 15621da177e4SLinus Torvalds next = pud_addr_end(addr, end); 15631da177e4SLinus Torvalds if (remap_pmd_range(mm, pud, addr, next, 15641da177e4SLinus Torvalds pfn + (addr >> PAGE_SHIFT), prot)) 15651da177e4SLinus Torvalds return -ENOMEM; 15661da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 15671da177e4SLinus Torvalds return 0; 15681da177e4SLinus Torvalds } 15691da177e4SLinus Torvalds 1570bfa5bf6dSRolf Eike Beer /** 1571bfa5bf6dSRolf Eike Beer * remap_pfn_range - remap kernel memory to userspace 1572bfa5bf6dSRolf Eike Beer * @vma: user vma to map to 1573bfa5bf6dSRolf Eike Beer * @addr: target user address to start at 1574bfa5bf6dSRolf Eike Beer * @pfn: physical address of kernel memory 1575bfa5bf6dSRolf Eike Beer * @size: size of map area 1576bfa5bf6dSRolf Eike Beer * @prot: page protection flags for this mapping 1577bfa5bf6dSRolf Eike Beer * 1578bfa5bf6dSRolf Eike Beer * Note: this is only safe if the mm semaphore is held when called. 1579bfa5bf6dSRolf Eike Beer */ 15801da177e4SLinus Torvalds int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, 15811da177e4SLinus Torvalds unsigned long pfn, unsigned long size, pgprot_t prot) 15821da177e4SLinus Torvalds { 15831da177e4SLinus Torvalds pgd_t *pgd; 15841da177e4SLinus Torvalds unsigned long next; 15852d15cab8SHugh Dickins unsigned long end = addr + PAGE_ALIGN(size); 15861da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 15871da177e4SLinus Torvalds int err; 15881da177e4SLinus Torvalds 15891da177e4SLinus Torvalds /* 15901da177e4SLinus Torvalds * Physically remapped pages are special. Tell the 15911da177e4SLinus Torvalds * rest of the world about it: 15921da177e4SLinus Torvalds * VM_IO tells people not to look at these pages 15931da177e4SLinus Torvalds * (accesses can have side effects). 15940b14c179SHugh Dickins * VM_RESERVED is specified all over the place, because 15950b14c179SHugh Dickins * in 2.4 it kept swapout's vma scan off this vma; but 15960b14c179SHugh Dickins * in 2.6 the LRU scan won't even find its pages, so this 15970b14c179SHugh Dickins * flag means no more than count its pages in reserved_vm, 15980b14c179SHugh Dickins * and omit it from core dump, even when VM_IO turned off. 15996aab341eSLinus Torvalds * VM_PFNMAP tells the core MM that the base pages are just 16006aab341eSLinus Torvalds * raw PFN mappings, and do not have a "struct page" associated 16016aab341eSLinus Torvalds * with them. 1602fb155c16SLinus Torvalds * 1603fb155c16SLinus Torvalds * There's a horrible special case to handle copy-on-write 1604fb155c16SLinus Torvalds * behaviour that some programs depend on. We mark the "original" 1605fb155c16SLinus Torvalds * un-COW'ed pages by matching them up with "vma->vm_pgoff". 16061da177e4SLinus Torvalds */ 16073c8bb73aSvenkatesh.pallipadi@intel.com if (addr == vma->vm_start && end == vma->vm_end) 16086aab341eSLinus Torvalds vma->vm_pgoff = pfn; 16093c8bb73aSvenkatesh.pallipadi@intel.com else if (is_cow_mapping(vma->vm_flags)) 16103c8bb73aSvenkatesh.pallipadi@intel.com return -EINVAL; 1611fb155c16SLinus Torvalds 1612fb155c16SLinus Torvalds vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; 16131da177e4SLinus Torvalds 16142ab64037Svenkatesh.pallipadi@intel.com err = track_pfn_vma_new(vma, prot, pfn, PAGE_ALIGN(size)); 16152ab64037Svenkatesh.pallipadi@intel.com if (err) 16162ab64037Svenkatesh.pallipadi@intel.com return -EINVAL; 16172ab64037Svenkatesh.pallipadi@intel.com 16181da177e4SLinus Torvalds BUG_ON(addr >= end); 16191da177e4SLinus Torvalds pfn -= addr >> PAGE_SHIFT; 16201da177e4SLinus Torvalds pgd = pgd_offset(mm, addr); 16211da177e4SLinus Torvalds flush_cache_range(vma, addr, end); 16221da177e4SLinus Torvalds do { 16231da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 16241da177e4SLinus Torvalds err = remap_pud_range(mm, pgd, addr, next, 16251da177e4SLinus Torvalds pfn + (addr >> PAGE_SHIFT), prot); 16261da177e4SLinus Torvalds if (err) 16271da177e4SLinus Torvalds break; 16281da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 16292ab64037Svenkatesh.pallipadi@intel.com 16302ab64037Svenkatesh.pallipadi@intel.com if (err) 16312ab64037Svenkatesh.pallipadi@intel.com untrack_pfn_vma(vma, pfn, PAGE_ALIGN(size)); 16322ab64037Svenkatesh.pallipadi@intel.com 16331da177e4SLinus Torvalds return err; 16341da177e4SLinus Torvalds } 16351da177e4SLinus Torvalds EXPORT_SYMBOL(remap_pfn_range); 16361da177e4SLinus Torvalds 1637aee16b3cSJeremy Fitzhardinge static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, 1638aee16b3cSJeremy Fitzhardinge unsigned long addr, unsigned long end, 1639aee16b3cSJeremy Fitzhardinge pte_fn_t fn, void *data) 1640aee16b3cSJeremy Fitzhardinge { 1641aee16b3cSJeremy Fitzhardinge pte_t *pte; 1642aee16b3cSJeremy Fitzhardinge int err; 16432f569afdSMartin Schwidefsky pgtable_t token; 164494909914SBorislav Petkov spinlock_t *uninitialized_var(ptl); 1645aee16b3cSJeremy Fitzhardinge 1646aee16b3cSJeremy Fitzhardinge pte = (mm == &init_mm) ? 1647aee16b3cSJeremy Fitzhardinge pte_alloc_kernel(pmd, addr) : 1648aee16b3cSJeremy Fitzhardinge pte_alloc_map_lock(mm, pmd, addr, &ptl); 1649aee16b3cSJeremy Fitzhardinge if (!pte) 1650aee16b3cSJeremy Fitzhardinge return -ENOMEM; 1651aee16b3cSJeremy Fitzhardinge 1652aee16b3cSJeremy Fitzhardinge BUG_ON(pmd_huge(*pmd)); 1653aee16b3cSJeremy Fitzhardinge 165438e0edb1SJeremy Fitzhardinge arch_enter_lazy_mmu_mode(); 165538e0edb1SJeremy Fitzhardinge 16562f569afdSMartin Schwidefsky token = pmd_pgtable(*pmd); 1657aee16b3cSJeremy Fitzhardinge 1658aee16b3cSJeremy Fitzhardinge do { 16592f569afdSMartin Schwidefsky err = fn(pte, token, addr, data); 1660aee16b3cSJeremy Fitzhardinge if (err) 1661aee16b3cSJeremy Fitzhardinge break; 1662aee16b3cSJeremy Fitzhardinge } while (pte++, addr += PAGE_SIZE, addr != end); 1663aee16b3cSJeremy Fitzhardinge 166438e0edb1SJeremy Fitzhardinge arch_leave_lazy_mmu_mode(); 166538e0edb1SJeremy Fitzhardinge 1666aee16b3cSJeremy Fitzhardinge if (mm != &init_mm) 1667aee16b3cSJeremy Fitzhardinge pte_unmap_unlock(pte-1, ptl); 1668aee16b3cSJeremy Fitzhardinge return err; 1669aee16b3cSJeremy Fitzhardinge } 1670aee16b3cSJeremy Fitzhardinge 1671aee16b3cSJeremy Fitzhardinge static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud, 1672aee16b3cSJeremy Fitzhardinge unsigned long addr, unsigned long end, 1673aee16b3cSJeremy Fitzhardinge pte_fn_t fn, void *data) 1674aee16b3cSJeremy Fitzhardinge { 1675aee16b3cSJeremy Fitzhardinge pmd_t *pmd; 1676aee16b3cSJeremy Fitzhardinge unsigned long next; 1677aee16b3cSJeremy Fitzhardinge int err; 1678aee16b3cSJeremy Fitzhardinge 1679ceb86879SAndi Kleen BUG_ON(pud_huge(*pud)); 1680ceb86879SAndi Kleen 1681aee16b3cSJeremy Fitzhardinge pmd = pmd_alloc(mm, pud, addr); 1682aee16b3cSJeremy Fitzhardinge if (!pmd) 1683aee16b3cSJeremy Fitzhardinge return -ENOMEM; 1684aee16b3cSJeremy Fitzhardinge do { 1685aee16b3cSJeremy Fitzhardinge next = pmd_addr_end(addr, end); 1686aee16b3cSJeremy Fitzhardinge err = apply_to_pte_range(mm, pmd, addr, next, fn, data); 1687aee16b3cSJeremy Fitzhardinge if (err) 1688aee16b3cSJeremy Fitzhardinge break; 1689aee16b3cSJeremy Fitzhardinge } while (pmd++, addr = next, addr != end); 1690aee16b3cSJeremy Fitzhardinge return err; 1691aee16b3cSJeremy Fitzhardinge } 1692aee16b3cSJeremy Fitzhardinge 1693aee16b3cSJeremy Fitzhardinge static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd, 1694aee16b3cSJeremy Fitzhardinge unsigned long addr, unsigned long end, 1695aee16b3cSJeremy Fitzhardinge pte_fn_t fn, void *data) 1696aee16b3cSJeremy Fitzhardinge { 1697aee16b3cSJeremy Fitzhardinge pud_t *pud; 1698aee16b3cSJeremy Fitzhardinge unsigned long next; 1699aee16b3cSJeremy Fitzhardinge int err; 1700aee16b3cSJeremy Fitzhardinge 1701aee16b3cSJeremy Fitzhardinge pud = pud_alloc(mm, pgd, addr); 1702aee16b3cSJeremy Fitzhardinge if (!pud) 1703aee16b3cSJeremy Fitzhardinge return -ENOMEM; 1704aee16b3cSJeremy Fitzhardinge do { 1705aee16b3cSJeremy Fitzhardinge next = pud_addr_end(addr, end); 1706aee16b3cSJeremy Fitzhardinge err = apply_to_pmd_range(mm, pud, addr, next, fn, data); 1707aee16b3cSJeremy Fitzhardinge if (err) 1708aee16b3cSJeremy Fitzhardinge break; 1709aee16b3cSJeremy Fitzhardinge } while (pud++, addr = next, addr != end); 1710aee16b3cSJeremy Fitzhardinge return err; 1711aee16b3cSJeremy Fitzhardinge } 1712aee16b3cSJeremy Fitzhardinge 1713aee16b3cSJeremy Fitzhardinge /* 1714aee16b3cSJeremy Fitzhardinge * Scan a region of virtual memory, filling in page tables as necessary 1715aee16b3cSJeremy Fitzhardinge * and calling a provided function on each leaf page table. 1716aee16b3cSJeremy Fitzhardinge */ 1717aee16b3cSJeremy Fitzhardinge int apply_to_page_range(struct mm_struct *mm, unsigned long addr, 1718aee16b3cSJeremy Fitzhardinge unsigned long size, pte_fn_t fn, void *data) 1719aee16b3cSJeremy Fitzhardinge { 1720aee16b3cSJeremy Fitzhardinge pgd_t *pgd; 1721aee16b3cSJeremy Fitzhardinge unsigned long next; 1722cddb8a5cSAndrea Arcangeli unsigned long start = addr, end = addr + size; 1723aee16b3cSJeremy Fitzhardinge int err; 1724aee16b3cSJeremy Fitzhardinge 1725aee16b3cSJeremy Fitzhardinge BUG_ON(addr >= end); 1726cddb8a5cSAndrea Arcangeli mmu_notifier_invalidate_range_start(mm, start, end); 1727aee16b3cSJeremy Fitzhardinge pgd = pgd_offset(mm, addr); 1728aee16b3cSJeremy Fitzhardinge do { 1729aee16b3cSJeremy Fitzhardinge next = pgd_addr_end(addr, end); 1730aee16b3cSJeremy Fitzhardinge err = apply_to_pud_range(mm, pgd, addr, next, fn, data); 1731aee16b3cSJeremy Fitzhardinge if (err) 1732aee16b3cSJeremy Fitzhardinge break; 1733aee16b3cSJeremy Fitzhardinge } while (pgd++, addr = next, addr != end); 1734cddb8a5cSAndrea Arcangeli mmu_notifier_invalidate_range_end(mm, start, end); 1735aee16b3cSJeremy Fitzhardinge return err; 1736aee16b3cSJeremy Fitzhardinge } 1737aee16b3cSJeremy Fitzhardinge EXPORT_SYMBOL_GPL(apply_to_page_range); 1738aee16b3cSJeremy Fitzhardinge 17391da177e4SLinus Torvalds /* 17408f4e2101SHugh Dickins * handle_pte_fault chooses page fault handler according to an entry 17418f4e2101SHugh Dickins * which was read non-atomically. Before making any commitment, on 17428f4e2101SHugh Dickins * those architectures or configurations (e.g. i386 with PAE) which 17438f4e2101SHugh Dickins * might give a mix of unmatched parts, do_swap_page and do_file_page 17448f4e2101SHugh Dickins * must check under lock before unmapping the pte and proceeding 17458f4e2101SHugh Dickins * (but do_wp_page is only called after already making such a check; 17468f4e2101SHugh Dickins * and do_anonymous_page and do_no_page can safely check later on). 17478f4e2101SHugh Dickins */ 17484c21e2f2SHugh Dickins static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd, 17498f4e2101SHugh Dickins pte_t *page_table, pte_t orig_pte) 17508f4e2101SHugh Dickins { 17518f4e2101SHugh Dickins int same = 1; 17528f4e2101SHugh Dickins #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) 17538f4e2101SHugh Dickins if (sizeof(pte_t) > sizeof(unsigned long)) { 17544c21e2f2SHugh Dickins spinlock_t *ptl = pte_lockptr(mm, pmd); 17554c21e2f2SHugh Dickins spin_lock(ptl); 17568f4e2101SHugh Dickins same = pte_same(*page_table, orig_pte); 17574c21e2f2SHugh Dickins spin_unlock(ptl); 17588f4e2101SHugh Dickins } 17598f4e2101SHugh Dickins #endif 17608f4e2101SHugh Dickins pte_unmap(page_table); 17618f4e2101SHugh Dickins return same; 17628f4e2101SHugh Dickins } 17638f4e2101SHugh Dickins 17648f4e2101SHugh Dickins /* 17651da177e4SLinus Torvalds * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when 17661da177e4SLinus Torvalds * servicing faults for write access. In the normal case, do always want 17671da177e4SLinus Torvalds * pte_mkwrite. But get_user_pages can cause write faults for mappings 17681da177e4SLinus Torvalds * that do not have writing enabled, when used by access_process_vm. 17691da177e4SLinus Torvalds */ 17701da177e4SLinus Torvalds static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) 17711da177e4SLinus Torvalds { 17721da177e4SLinus Torvalds if (likely(vma->vm_flags & VM_WRITE)) 17731da177e4SLinus Torvalds pte = pte_mkwrite(pte); 17741da177e4SLinus Torvalds return pte; 17751da177e4SLinus Torvalds } 17761da177e4SLinus Torvalds 17779de455b2SAtsushi Nemoto static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma) 17786aab341eSLinus Torvalds { 17796aab341eSLinus Torvalds /* 17806aab341eSLinus Torvalds * If the source page was a PFN mapping, we don't have 17816aab341eSLinus Torvalds * a "struct page" for it. We do a best-effort copy by 17826aab341eSLinus Torvalds * just copying from the original user address. If that 17836aab341eSLinus Torvalds * fails, we just zero-fill it. Live with it. 17846aab341eSLinus Torvalds */ 17856aab341eSLinus Torvalds if (unlikely(!src)) { 17866aab341eSLinus Torvalds void *kaddr = kmap_atomic(dst, KM_USER0); 17875d2a2dbbSLinus Torvalds void __user *uaddr = (void __user *)(va & PAGE_MASK); 17885d2a2dbbSLinus Torvalds 17895d2a2dbbSLinus Torvalds /* 17905d2a2dbbSLinus Torvalds * This really shouldn't fail, because the page is there 17915d2a2dbbSLinus Torvalds * in the page tables. But it might just be unreadable, 17925d2a2dbbSLinus Torvalds * in which case we just give up and fill the result with 17935d2a2dbbSLinus Torvalds * zeroes. 17945d2a2dbbSLinus Torvalds */ 17955d2a2dbbSLinus Torvalds if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) 17966aab341eSLinus Torvalds memset(kaddr, 0, PAGE_SIZE); 17976aab341eSLinus Torvalds kunmap_atomic(kaddr, KM_USER0); 1798c4ec7b0dSDmitriy Monakhov flush_dcache_page(dst); 17990ed361deSNick Piggin } else 18009de455b2SAtsushi Nemoto copy_user_highpage(dst, src, va, vma); 18016aab341eSLinus Torvalds } 18026aab341eSLinus Torvalds 18031da177e4SLinus Torvalds /* 18041da177e4SLinus Torvalds * This routine handles present pages, when users try to write 18051da177e4SLinus Torvalds * to a shared page. It is done by copying the page to a new address 18061da177e4SLinus Torvalds * and decrementing the shared-page counter for the old page. 18071da177e4SLinus Torvalds * 18081da177e4SLinus Torvalds * Note that this routine assumes that the protection checks have been 18091da177e4SLinus Torvalds * done by the caller (the low-level page fault routine in most cases). 18101da177e4SLinus Torvalds * Thus we can safely just mark it writable once we've done any necessary 18111da177e4SLinus Torvalds * COW. 18121da177e4SLinus Torvalds * 18131da177e4SLinus Torvalds * We also mark the page dirty at this point even though the page will 18141da177e4SLinus Torvalds * change only once the write actually happens. This avoids a few races, 18151da177e4SLinus Torvalds * and potentially makes it more efficient. 18161da177e4SLinus Torvalds * 18178f4e2101SHugh Dickins * We enter with non-exclusive mmap_sem (to exclude vma changes, 18188f4e2101SHugh Dickins * but allow concurrent faults), with pte both mapped and locked. 18198f4e2101SHugh Dickins * We return with mmap_sem still held, but pte unmapped and unlocked. 18201da177e4SLinus Torvalds */ 18211da177e4SLinus Torvalds static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, 182265500d23SHugh Dickins unsigned long address, pte_t *page_table, pmd_t *pmd, 18238f4e2101SHugh Dickins spinlock_t *ptl, pte_t orig_pte) 18241da177e4SLinus Torvalds { 1825e5bbe4dfSHugh Dickins struct page *old_page, *new_page; 18261da177e4SLinus Torvalds pte_t entry; 182783c54070SNick Piggin int reuse = 0, ret = 0; 1828a200ee18SPeter Zijlstra int page_mkwrite = 0; 1829d08b3851SPeter Zijlstra struct page *dirty_page = NULL; 18301da177e4SLinus Torvalds 18316aab341eSLinus Torvalds old_page = vm_normal_page(vma, address, orig_pte); 1832251b97f5SPeter Zijlstra if (!old_page) { 1833251b97f5SPeter Zijlstra /* 1834251b97f5SPeter Zijlstra * VM_MIXEDMAP !pfn_valid() case 1835251b97f5SPeter Zijlstra * 1836251b97f5SPeter Zijlstra * We should not cow pages in a shared writeable mapping. 1837251b97f5SPeter Zijlstra * Just mark the pages writable as we can't do any dirty 1838251b97f5SPeter Zijlstra * accounting on raw pfn maps. 1839251b97f5SPeter Zijlstra */ 1840251b97f5SPeter Zijlstra if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) == 1841251b97f5SPeter Zijlstra (VM_WRITE|VM_SHARED)) 1842251b97f5SPeter Zijlstra goto reuse; 1843920fc356SHugh Dickins goto gotten; 1844251b97f5SPeter Zijlstra } 18451da177e4SLinus Torvalds 1846d08b3851SPeter Zijlstra /* 1847ee6a6457SPeter Zijlstra * Take out anonymous pages first, anonymous shared vmas are 1848ee6a6457SPeter Zijlstra * not dirty accountable. 1849d08b3851SPeter Zijlstra */ 1850ee6a6457SPeter Zijlstra if (PageAnon(old_page)) { 1851ab967d86SHugh Dickins if (!trylock_page(old_page)) { 1852ab967d86SHugh Dickins page_cache_get(old_page); 1853ab967d86SHugh Dickins pte_unmap_unlock(page_table, ptl); 1854ab967d86SHugh Dickins lock_page(old_page); 1855ab967d86SHugh Dickins page_table = pte_offset_map_lock(mm, pmd, address, 1856ab967d86SHugh Dickins &ptl); 1857ab967d86SHugh Dickins if (!pte_same(*page_table, orig_pte)) { 1858ab967d86SHugh Dickins unlock_page(old_page); 1859ab967d86SHugh Dickins page_cache_release(old_page); 1860ab967d86SHugh Dickins goto unlock; 1861ab967d86SHugh Dickins } 1862ab967d86SHugh Dickins page_cache_release(old_page); 1863ab967d86SHugh Dickins } 18647b1fe597SHugh Dickins reuse = reuse_swap_page(old_page); 1865ee6a6457SPeter Zijlstra unlock_page(old_page); 1866ee6a6457SPeter Zijlstra } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == 1867d08b3851SPeter Zijlstra (VM_WRITE|VM_SHARED))) { 1868ee6a6457SPeter Zijlstra /* 1869ee6a6457SPeter Zijlstra * Only catch write-faults on shared writable pages, 1870ee6a6457SPeter Zijlstra * read-only shared pages can get COWed by 1871ee6a6457SPeter Zijlstra * get_user_pages(.write=1, .force=1). 1872ee6a6457SPeter Zijlstra */ 18739637a5efSDavid Howells if (vma->vm_ops && vma->vm_ops->page_mkwrite) { 18749637a5efSDavid Howells /* 18759637a5efSDavid Howells * Notify the address space that the page is about to 18769637a5efSDavid Howells * become writable so that it can prohibit this or wait 18779637a5efSDavid Howells * for the page to get into an appropriate state. 18789637a5efSDavid Howells * 18799637a5efSDavid Howells * We do this without the lock held, so that it can 18809637a5efSDavid Howells * sleep if it needs to. 18819637a5efSDavid Howells */ 18829637a5efSDavid Howells page_cache_get(old_page); 18839637a5efSDavid Howells pte_unmap_unlock(page_table, ptl); 18849637a5efSDavid Howells 18859637a5efSDavid Howells if (vma->vm_ops->page_mkwrite(vma, old_page) < 0) 18869637a5efSDavid Howells goto unwritable_page; 18879637a5efSDavid Howells 18889637a5efSDavid Howells /* 18899637a5efSDavid Howells * Since we dropped the lock we need to revalidate 18909637a5efSDavid Howells * the PTE as someone else may have changed it. If 18919637a5efSDavid Howells * they did, we just return, as we can count on the 18929637a5efSDavid Howells * MMU to tell us if they didn't also make it writable. 18939637a5efSDavid Howells */ 18949637a5efSDavid Howells page_table = pte_offset_map_lock(mm, pmd, address, 18959637a5efSDavid Howells &ptl); 1896c3704cebSHugh Dickins page_cache_release(old_page); 18979637a5efSDavid Howells if (!pte_same(*page_table, orig_pte)) 18989637a5efSDavid Howells goto unlock; 1899a200ee18SPeter Zijlstra 1900a200ee18SPeter Zijlstra page_mkwrite = 1; 19019637a5efSDavid Howells } 1902d08b3851SPeter Zijlstra dirty_page = old_page; 1903d08b3851SPeter Zijlstra get_page(dirty_page); 19049637a5efSDavid Howells reuse = 1; 19059637a5efSDavid Howells } 19069637a5efSDavid Howells 19071da177e4SLinus Torvalds if (reuse) { 1908251b97f5SPeter Zijlstra reuse: 1909eca35133SBen Collins flush_cache_page(vma, address, pte_pfn(orig_pte)); 191065500d23SHugh Dickins entry = pte_mkyoung(orig_pte); 191165500d23SHugh Dickins entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1912954ffcb3SKAMEZAWA Hiroyuki if (ptep_set_access_flags(vma, address, page_table, entry,1)) 19131da177e4SLinus Torvalds update_mmu_cache(vma, address, entry); 191465500d23SHugh Dickins ret |= VM_FAULT_WRITE; 191565500d23SHugh Dickins goto unlock; 19161da177e4SLinus Torvalds } 19171da177e4SLinus Torvalds 19181da177e4SLinus Torvalds /* 19191da177e4SLinus Torvalds * Ok, we need to copy. Oh, well.. 19201da177e4SLinus Torvalds */ 19211da177e4SLinus Torvalds page_cache_get(old_page); 1922920fc356SHugh Dickins gotten: 19238f4e2101SHugh Dickins pte_unmap_unlock(page_table, ptl); 19241da177e4SLinus Torvalds 19251da177e4SLinus Torvalds if (unlikely(anon_vma_prepare(vma))) 192665500d23SHugh Dickins goto oom; 1927557ed1faSNick Piggin VM_BUG_ON(old_page == ZERO_PAGE(0)); 1928769848c0SMel Gorman new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); 19291da177e4SLinus Torvalds if (!new_page) 193065500d23SHugh Dickins goto oom; 1931b291f000SNick Piggin /* 1932b291f000SNick Piggin * Don't let another task, with possibly unlocked vma, 1933b291f000SNick Piggin * keep the mlocked page. 1934b291f000SNick Piggin */ 1935b291f000SNick Piggin if (vma->vm_flags & VM_LOCKED) { 1936b291f000SNick Piggin lock_page(old_page); /* for LRU manipulation */ 1937b291f000SNick Piggin clear_page_mlock(old_page); 1938b291f000SNick Piggin unlock_page(old_page); 1939b291f000SNick Piggin } 19409de455b2SAtsushi Nemoto cow_user_page(new_page, old_page, address, vma); 19410ed361deSNick Piggin __SetPageUptodate(new_page); 194265500d23SHugh Dickins 1943e1a1cd59SBalbir Singh if (mem_cgroup_charge(new_page, mm, GFP_KERNEL)) 19448a9f3ccdSBalbir Singh goto oom_free_new; 19458a9f3ccdSBalbir Singh 19461da177e4SLinus Torvalds /* 19471da177e4SLinus Torvalds * Re-check the pte - we dropped the lock 19481da177e4SLinus Torvalds */ 19498f4e2101SHugh Dickins page_table = pte_offset_map_lock(mm, pmd, address, &ptl); 195065500d23SHugh Dickins if (likely(pte_same(*page_table, orig_pte))) { 1951920fc356SHugh Dickins if (old_page) { 19524294621fSHugh Dickins if (!PageAnon(old_page)) { 19534294621fSHugh Dickins dec_mm_counter(mm, file_rss); 1954920fc356SHugh Dickins inc_mm_counter(mm, anon_rss); 19554294621fSHugh Dickins } 1956920fc356SHugh Dickins } else 1957920fc356SHugh Dickins inc_mm_counter(mm, anon_rss); 1958eca35133SBen Collins flush_cache_page(vma, address, pte_pfn(orig_pte)); 195965500d23SHugh Dickins entry = mk_pte(new_page, vma->vm_page_prot); 196065500d23SHugh Dickins entry = maybe_mkwrite(pte_mkdirty(entry), vma); 19614ce072f1SSiddha, Suresh B /* 19624ce072f1SSiddha, Suresh B * Clear the pte entry and flush it first, before updating the 19634ce072f1SSiddha, Suresh B * pte with the new entry. This will avoid a race condition 19644ce072f1SSiddha, Suresh B * seen in the presence of one thread doing SMC and another 19654ce072f1SSiddha, Suresh B * thread doing COW. 19664ce072f1SSiddha, Suresh B */ 1967cddb8a5cSAndrea Arcangeli ptep_clear_flush_notify(vma, address, page_table); 19689617d95eSNick Piggin page_add_new_anon_rmap(new_page, vma, address); 196964d6519dSLee Schermerhorn set_pte_at(mm, address, page_table, entry); 197064d6519dSLee Schermerhorn update_mmu_cache(vma, address, entry); 1971945754a1SNick Piggin if (old_page) { 1972945754a1SNick Piggin /* 1973945754a1SNick Piggin * Only after switching the pte to the new page may 1974945754a1SNick Piggin * we remove the mapcount here. Otherwise another 1975945754a1SNick Piggin * process may come and find the rmap count decremented 1976945754a1SNick Piggin * before the pte is switched to the new page, and 1977945754a1SNick Piggin * "reuse" the old page writing into it while our pte 1978945754a1SNick Piggin * here still points into it and can be read by other 1979945754a1SNick Piggin * threads. 1980945754a1SNick Piggin * 1981945754a1SNick Piggin * The critical issue is to order this 1982945754a1SNick Piggin * page_remove_rmap with the ptp_clear_flush above. 1983945754a1SNick Piggin * Those stores are ordered by (if nothing else,) 1984945754a1SNick Piggin * the barrier present in the atomic_add_negative 1985945754a1SNick Piggin * in page_remove_rmap. 1986945754a1SNick Piggin * 1987945754a1SNick Piggin * Then the TLB flush in ptep_clear_flush ensures that 1988945754a1SNick Piggin * no process can access the old page before the 1989945754a1SNick Piggin * decremented mapcount is visible. And the old page 1990945754a1SNick Piggin * cannot be reused until after the decremented 1991945754a1SNick Piggin * mapcount is visible. So transitively, TLBs to 1992945754a1SNick Piggin * old page will be flushed before it can be reused. 1993945754a1SNick Piggin */ 1994945754a1SNick Piggin page_remove_rmap(old_page, vma); 1995945754a1SNick Piggin } 1996945754a1SNick Piggin 19971da177e4SLinus Torvalds /* Free the old page.. */ 19981da177e4SLinus Torvalds new_page = old_page; 1999f33ea7f4SNick Piggin ret |= VM_FAULT_WRITE; 20008a9f3ccdSBalbir Singh } else 20018a9f3ccdSBalbir Singh mem_cgroup_uncharge_page(new_page); 20028a9f3ccdSBalbir Singh 2003920fc356SHugh Dickins if (new_page) 20041da177e4SLinus Torvalds page_cache_release(new_page); 2005920fc356SHugh Dickins if (old_page) 20061da177e4SLinus Torvalds page_cache_release(old_page); 200765500d23SHugh Dickins unlock: 20088f4e2101SHugh Dickins pte_unmap_unlock(page_table, ptl); 2009d08b3851SPeter Zijlstra if (dirty_page) { 20108f7b3d15SAnton Salikhmetov if (vma->vm_file) 20118f7b3d15SAnton Salikhmetov file_update_time(vma->vm_file); 20128f7b3d15SAnton Salikhmetov 201379352894SNick Piggin /* 201479352894SNick Piggin * Yes, Virginia, this is actually required to prevent a race 201579352894SNick Piggin * with clear_page_dirty_for_io() from clearing the page dirty 201679352894SNick Piggin * bit after it clear all dirty ptes, but before a racing 201779352894SNick Piggin * do_wp_page installs a dirty pte. 201879352894SNick Piggin * 201979352894SNick Piggin * do_no_page is protected similarly. 202079352894SNick Piggin */ 202179352894SNick Piggin wait_on_page_locked(dirty_page); 2022a200ee18SPeter Zijlstra set_page_dirty_balance(dirty_page, page_mkwrite); 2023d08b3851SPeter Zijlstra put_page(dirty_page); 2024d08b3851SPeter Zijlstra } 2025f33ea7f4SNick Piggin return ret; 20268a9f3ccdSBalbir Singh oom_free_new: 20276dbf6d3bSHugh Dickins page_cache_release(new_page); 202865500d23SHugh Dickins oom: 2029920fc356SHugh Dickins if (old_page) 20301da177e4SLinus Torvalds page_cache_release(old_page); 20311da177e4SLinus Torvalds return VM_FAULT_OOM; 20329637a5efSDavid Howells 20339637a5efSDavid Howells unwritable_page: 20349637a5efSDavid Howells page_cache_release(old_page); 20359637a5efSDavid Howells return VM_FAULT_SIGBUS; 20361da177e4SLinus Torvalds } 20371da177e4SLinus Torvalds 20381da177e4SLinus Torvalds /* 20391da177e4SLinus Torvalds * Helper functions for unmap_mapping_range(). 20401da177e4SLinus Torvalds * 20411da177e4SLinus Torvalds * __ Notes on dropping i_mmap_lock to reduce latency while unmapping __ 20421da177e4SLinus Torvalds * 20431da177e4SLinus Torvalds * We have to restart searching the prio_tree whenever we drop the lock, 20441da177e4SLinus Torvalds * since the iterator is only valid while the lock is held, and anyway 20451da177e4SLinus Torvalds * a later vma might be split and reinserted earlier while lock dropped. 20461da177e4SLinus Torvalds * 20471da177e4SLinus Torvalds * The list of nonlinear vmas could be handled more efficiently, using 20481da177e4SLinus Torvalds * a placeholder, but handle it in the same way until a need is shown. 20491da177e4SLinus Torvalds * It is important to search the prio_tree before nonlinear list: a vma 20501da177e4SLinus Torvalds * may become nonlinear and be shifted from prio_tree to nonlinear list 20511da177e4SLinus Torvalds * while the lock is dropped; but never shifted from list to prio_tree. 20521da177e4SLinus Torvalds * 20531da177e4SLinus Torvalds * In order to make forward progress despite restarting the search, 20541da177e4SLinus Torvalds * vm_truncate_count is used to mark a vma as now dealt with, so we can 20551da177e4SLinus Torvalds * quickly skip it next time around. Since the prio_tree search only 20561da177e4SLinus Torvalds * shows us those vmas affected by unmapping the range in question, we 20571da177e4SLinus Torvalds * can't efficiently keep all vmas in step with mapping->truncate_count: 20581da177e4SLinus Torvalds * so instead reset them all whenever it wraps back to 0 (then go to 1). 20591da177e4SLinus Torvalds * mapping->truncate_count and vma->vm_truncate_count are protected by 20601da177e4SLinus Torvalds * i_mmap_lock. 20611da177e4SLinus Torvalds * 20621da177e4SLinus Torvalds * In order to make forward progress despite repeatedly restarting some 2063ee39b37bSHugh Dickins * large vma, note the restart_addr from unmap_vmas when it breaks out: 20641da177e4SLinus Torvalds * and restart from that address when we reach that vma again. It might 20651da177e4SLinus Torvalds * have been split or merged, shrunk or extended, but never shifted: so 20661da177e4SLinus Torvalds * restart_addr remains valid so long as it remains in the vma's range. 20671da177e4SLinus Torvalds * unmap_mapping_range forces truncate_count to leap over page-aligned 20681da177e4SLinus Torvalds * values so we can save vma's restart_addr in its truncate_count field. 20691da177e4SLinus Torvalds */ 20701da177e4SLinus Torvalds #define is_restart_addr(truncate_count) (!((truncate_count) & ~PAGE_MASK)) 20711da177e4SLinus Torvalds 20721da177e4SLinus Torvalds static void reset_vma_truncate_counts(struct address_space *mapping) 20731da177e4SLinus Torvalds { 20741da177e4SLinus Torvalds struct vm_area_struct *vma; 20751da177e4SLinus Torvalds struct prio_tree_iter iter; 20761da177e4SLinus Torvalds 20771da177e4SLinus Torvalds vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, 0, ULONG_MAX) 20781da177e4SLinus Torvalds vma->vm_truncate_count = 0; 20791da177e4SLinus Torvalds list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) 20801da177e4SLinus Torvalds vma->vm_truncate_count = 0; 20811da177e4SLinus Torvalds } 20821da177e4SLinus Torvalds 20831da177e4SLinus Torvalds static int unmap_mapping_range_vma(struct vm_area_struct *vma, 20841da177e4SLinus Torvalds unsigned long start_addr, unsigned long end_addr, 20851da177e4SLinus Torvalds struct zap_details *details) 20861da177e4SLinus Torvalds { 20871da177e4SLinus Torvalds unsigned long restart_addr; 20881da177e4SLinus Torvalds int need_break; 20891da177e4SLinus Torvalds 2090d00806b1SNick Piggin /* 2091d00806b1SNick Piggin * files that support invalidating or truncating portions of the 2092d0217ac0SNick Piggin * file from under mmaped areas must have their ->fault function 209383c54070SNick Piggin * return a locked page (and set VM_FAULT_LOCKED in the return). 209483c54070SNick Piggin * This provides synchronisation against concurrent unmapping here. 2095d00806b1SNick Piggin */ 2096d00806b1SNick Piggin 20971da177e4SLinus Torvalds again: 20981da177e4SLinus Torvalds restart_addr = vma->vm_truncate_count; 20991da177e4SLinus Torvalds if (is_restart_addr(restart_addr) && start_addr < restart_addr) { 21001da177e4SLinus Torvalds start_addr = restart_addr; 21011da177e4SLinus Torvalds if (start_addr >= end_addr) { 21021da177e4SLinus Torvalds /* Top of vma has been split off since last time */ 21031da177e4SLinus Torvalds vma->vm_truncate_count = details->truncate_count; 21041da177e4SLinus Torvalds return 0; 21051da177e4SLinus Torvalds } 21061da177e4SLinus Torvalds } 21071da177e4SLinus Torvalds 2108ee39b37bSHugh Dickins restart_addr = zap_page_range(vma, start_addr, 2109ee39b37bSHugh Dickins end_addr - start_addr, details); 211095c354feSNick Piggin need_break = need_resched() || spin_needbreak(details->i_mmap_lock); 21111da177e4SLinus Torvalds 2112ee39b37bSHugh Dickins if (restart_addr >= end_addr) { 21131da177e4SLinus Torvalds /* We have now completed this vma: mark it so */ 21141da177e4SLinus Torvalds vma->vm_truncate_count = details->truncate_count; 21151da177e4SLinus Torvalds if (!need_break) 21161da177e4SLinus Torvalds return 0; 21171da177e4SLinus Torvalds } else { 21181da177e4SLinus Torvalds /* Note restart_addr in vma's truncate_count field */ 2119ee39b37bSHugh Dickins vma->vm_truncate_count = restart_addr; 21201da177e4SLinus Torvalds if (!need_break) 21211da177e4SLinus Torvalds goto again; 21221da177e4SLinus Torvalds } 21231da177e4SLinus Torvalds 21241da177e4SLinus Torvalds spin_unlock(details->i_mmap_lock); 21251da177e4SLinus Torvalds cond_resched(); 21261da177e4SLinus Torvalds spin_lock(details->i_mmap_lock); 21271da177e4SLinus Torvalds return -EINTR; 21281da177e4SLinus Torvalds } 21291da177e4SLinus Torvalds 21301da177e4SLinus Torvalds static inline void unmap_mapping_range_tree(struct prio_tree_root *root, 21311da177e4SLinus Torvalds struct zap_details *details) 21321da177e4SLinus Torvalds { 21331da177e4SLinus Torvalds struct vm_area_struct *vma; 21341da177e4SLinus Torvalds struct prio_tree_iter iter; 21351da177e4SLinus Torvalds pgoff_t vba, vea, zba, zea; 21361da177e4SLinus Torvalds 21371da177e4SLinus Torvalds restart: 21381da177e4SLinus Torvalds vma_prio_tree_foreach(vma, &iter, root, 21391da177e4SLinus Torvalds details->first_index, details->last_index) { 21401da177e4SLinus Torvalds /* Skip quickly over those we have already dealt with */ 21411da177e4SLinus Torvalds if (vma->vm_truncate_count == details->truncate_count) 21421da177e4SLinus Torvalds continue; 21431da177e4SLinus Torvalds 21441da177e4SLinus Torvalds vba = vma->vm_pgoff; 21451da177e4SLinus Torvalds vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1; 21461da177e4SLinus Torvalds /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */ 21471da177e4SLinus Torvalds zba = details->first_index; 21481da177e4SLinus Torvalds if (zba < vba) 21491da177e4SLinus Torvalds zba = vba; 21501da177e4SLinus Torvalds zea = details->last_index; 21511da177e4SLinus Torvalds if (zea > vea) 21521da177e4SLinus Torvalds zea = vea; 21531da177e4SLinus Torvalds 21541da177e4SLinus Torvalds if (unmap_mapping_range_vma(vma, 21551da177e4SLinus Torvalds ((zba - vba) << PAGE_SHIFT) + vma->vm_start, 21561da177e4SLinus Torvalds ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start, 21571da177e4SLinus Torvalds details) < 0) 21581da177e4SLinus Torvalds goto restart; 21591da177e4SLinus Torvalds } 21601da177e4SLinus Torvalds } 21611da177e4SLinus Torvalds 21621da177e4SLinus Torvalds static inline void unmap_mapping_range_list(struct list_head *head, 21631da177e4SLinus Torvalds struct zap_details *details) 21641da177e4SLinus Torvalds { 21651da177e4SLinus Torvalds struct vm_area_struct *vma; 21661da177e4SLinus Torvalds 21671da177e4SLinus Torvalds /* 21681da177e4SLinus Torvalds * In nonlinear VMAs there is no correspondence between virtual address 21691da177e4SLinus Torvalds * offset and file offset. So we must perform an exhaustive search 21701da177e4SLinus Torvalds * across *all* the pages in each nonlinear VMA, not just the pages 21711da177e4SLinus Torvalds * whose virtual address lies outside the file truncation point. 21721da177e4SLinus Torvalds */ 21731da177e4SLinus Torvalds restart: 21741da177e4SLinus Torvalds list_for_each_entry(vma, head, shared.vm_set.list) { 21751da177e4SLinus Torvalds /* Skip quickly over those we have already dealt with */ 21761da177e4SLinus Torvalds if (vma->vm_truncate_count == details->truncate_count) 21771da177e4SLinus Torvalds continue; 21781da177e4SLinus Torvalds details->nonlinear_vma = vma; 21791da177e4SLinus Torvalds if (unmap_mapping_range_vma(vma, vma->vm_start, 21801da177e4SLinus Torvalds vma->vm_end, details) < 0) 21811da177e4SLinus Torvalds goto restart; 21821da177e4SLinus Torvalds } 21831da177e4SLinus Torvalds } 21841da177e4SLinus Torvalds 21851da177e4SLinus Torvalds /** 218672fd4a35SRobert P. J. Day * unmap_mapping_range - unmap the portion of all mmaps in the specified address_space corresponding to the specified page range in the underlying file. 21873d41088fSMartin Waitz * @mapping: the address space containing mmaps to be unmapped. 21881da177e4SLinus Torvalds * @holebegin: byte in first page to unmap, relative to the start of 21891da177e4SLinus Torvalds * the underlying file. This will be rounded down to a PAGE_SIZE 21901da177e4SLinus Torvalds * boundary. Note that this is different from vmtruncate(), which 21911da177e4SLinus Torvalds * must keep the partial page. In contrast, we must get rid of 21921da177e4SLinus Torvalds * partial pages. 21931da177e4SLinus Torvalds * @holelen: size of prospective hole in bytes. This will be rounded 21941da177e4SLinus Torvalds * up to a PAGE_SIZE boundary. A holelen of zero truncates to the 21951da177e4SLinus Torvalds * end of the file. 21961da177e4SLinus Torvalds * @even_cows: 1 when truncating a file, unmap even private COWed pages; 21971da177e4SLinus Torvalds * but 0 when invalidating pagecache, don't throw away private data. 21981da177e4SLinus Torvalds */ 21991da177e4SLinus Torvalds void unmap_mapping_range(struct address_space *mapping, 22001da177e4SLinus Torvalds loff_t const holebegin, loff_t const holelen, int even_cows) 22011da177e4SLinus Torvalds { 22021da177e4SLinus Torvalds struct zap_details details; 22031da177e4SLinus Torvalds pgoff_t hba = holebegin >> PAGE_SHIFT; 22041da177e4SLinus Torvalds pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; 22051da177e4SLinus Torvalds 22061da177e4SLinus Torvalds /* Check for overflow. */ 22071da177e4SLinus Torvalds if (sizeof(holelen) > sizeof(hlen)) { 22081da177e4SLinus Torvalds long long holeend = 22091da177e4SLinus Torvalds (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; 22101da177e4SLinus Torvalds if (holeend & ~(long long)ULONG_MAX) 22111da177e4SLinus Torvalds hlen = ULONG_MAX - hba + 1; 22121da177e4SLinus Torvalds } 22131da177e4SLinus Torvalds 22141da177e4SLinus Torvalds details.check_mapping = even_cows? NULL: mapping; 22151da177e4SLinus Torvalds details.nonlinear_vma = NULL; 22161da177e4SLinus Torvalds details.first_index = hba; 22171da177e4SLinus Torvalds details.last_index = hba + hlen - 1; 22181da177e4SLinus Torvalds if (details.last_index < details.first_index) 22191da177e4SLinus Torvalds details.last_index = ULONG_MAX; 22201da177e4SLinus Torvalds details.i_mmap_lock = &mapping->i_mmap_lock; 22211da177e4SLinus Torvalds 22221da177e4SLinus Torvalds spin_lock(&mapping->i_mmap_lock); 22231da177e4SLinus Torvalds 2224d00806b1SNick Piggin /* Protect against endless unmapping loops */ 22251da177e4SLinus Torvalds mapping->truncate_count++; 22261da177e4SLinus Torvalds if (unlikely(is_restart_addr(mapping->truncate_count))) { 22271da177e4SLinus Torvalds if (mapping->truncate_count == 0) 22281da177e4SLinus Torvalds reset_vma_truncate_counts(mapping); 22291da177e4SLinus Torvalds mapping->truncate_count++; 22301da177e4SLinus Torvalds } 22311da177e4SLinus Torvalds details.truncate_count = mapping->truncate_count; 22321da177e4SLinus Torvalds 22331da177e4SLinus Torvalds if (unlikely(!prio_tree_empty(&mapping->i_mmap))) 22341da177e4SLinus Torvalds unmap_mapping_range_tree(&mapping->i_mmap, &details); 22351da177e4SLinus Torvalds if (unlikely(!list_empty(&mapping->i_mmap_nonlinear))) 22361da177e4SLinus Torvalds unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details); 22371da177e4SLinus Torvalds spin_unlock(&mapping->i_mmap_lock); 22381da177e4SLinus Torvalds } 22391da177e4SLinus Torvalds EXPORT_SYMBOL(unmap_mapping_range); 22401da177e4SLinus Torvalds 2241bfa5bf6dSRolf Eike Beer /** 2242bfa5bf6dSRolf Eike Beer * vmtruncate - unmap mappings "freed" by truncate() syscall 2243bfa5bf6dSRolf Eike Beer * @inode: inode of the file used 2244bfa5bf6dSRolf Eike Beer * @offset: file offset to start truncating 22451da177e4SLinus Torvalds * 22461da177e4SLinus Torvalds * NOTE! We have to be ready to update the memory sharing 22471da177e4SLinus Torvalds * between the file and the memory map for a potential last 22481da177e4SLinus Torvalds * incomplete page. Ugly, but necessary. 22491da177e4SLinus Torvalds */ 22501da177e4SLinus Torvalds int vmtruncate(struct inode * inode, loff_t offset) 22511da177e4SLinus Torvalds { 225261d5048fSChristoph Hellwig if (inode->i_size < offset) { 22531da177e4SLinus Torvalds unsigned long limit; 22541da177e4SLinus Torvalds 22551da177e4SLinus Torvalds limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; 22561da177e4SLinus Torvalds if (limit != RLIM_INFINITY && offset > limit) 22571da177e4SLinus Torvalds goto out_sig; 22581da177e4SLinus Torvalds if (offset > inode->i_sb->s_maxbytes) 22591da177e4SLinus Torvalds goto out_big; 22601da177e4SLinus Torvalds i_size_write(inode, offset); 226161d5048fSChristoph Hellwig } else { 226261d5048fSChristoph Hellwig struct address_space *mapping = inode->i_mapping; 22631da177e4SLinus Torvalds 226461d5048fSChristoph Hellwig /* 226561d5048fSChristoph Hellwig * truncation of in-use swapfiles is disallowed - it would 226661d5048fSChristoph Hellwig * cause subsequent swapout to scribble on the now-freed 226761d5048fSChristoph Hellwig * blocks. 226861d5048fSChristoph Hellwig */ 226961d5048fSChristoph Hellwig if (IS_SWAPFILE(inode)) 227061d5048fSChristoph Hellwig return -ETXTBSY; 227161d5048fSChristoph Hellwig i_size_write(inode, offset); 227261d5048fSChristoph Hellwig 227361d5048fSChristoph Hellwig /* 227461d5048fSChristoph Hellwig * unmap_mapping_range is called twice, first simply for 227561d5048fSChristoph Hellwig * efficiency so that truncate_inode_pages does fewer 227661d5048fSChristoph Hellwig * single-page unmaps. However after this first call, and 227761d5048fSChristoph Hellwig * before truncate_inode_pages finishes, it is possible for 227861d5048fSChristoph Hellwig * private pages to be COWed, which remain after 227961d5048fSChristoph Hellwig * truncate_inode_pages finishes, hence the second 228061d5048fSChristoph Hellwig * unmap_mapping_range call must be made for correctness. 228161d5048fSChristoph Hellwig */ 228261d5048fSChristoph Hellwig unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1); 228361d5048fSChristoph Hellwig truncate_inode_pages(mapping, offset); 228461d5048fSChristoph Hellwig unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1); 228561d5048fSChristoph Hellwig } 228661d5048fSChristoph Hellwig 2287acfa4380SAl Viro if (inode->i_op->truncate) 22881da177e4SLinus Torvalds inode->i_op->truncate(inode); 22891da177e4SLinus Torvalds return 0; 229061d5048fSChristoph Hellwig 22911da177e4SLinus Torvalds out_sig: 22921da177e4SLinus Torvalds send_sig(SIGXFSZ, current, 0); 22931da177e4SLinus Torvalds out_big: 22941da177e4SLinus Torvalds return -EFBIG; 22951da177e4SLinus Torvalds } 22961da177e4SLinus Torvalds EXPORT_SYMBOL(vmtruncate); 22971da177e4SLinus Torvalds 2298f6b3ec23SBadari Pulavarty int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end) 2299f6b3ec23SBadari Pulavarty { 2300f6b3ec23SBadari Pulavarty struct address_space *mapping = inode->i_mapping; 2301f6b3ec23SBadari Pulavarty 2302f6b3ec23SBadari Pulavarty /* 2303f6b3ec23SBadari Pulavarty * If the underlying filesystem is not going to provide 2304f6b3ec23SBadari Pulavarty * a way to truncate a range of blocks (punch a hole) - 2305f6b3ec23SBadari Pulavarty * we should return failure right now. 2306f6b3ec23SBadari Pulavarty */ 2307acfa4380SAl Viro if (!inode->i_op->truncate_range) 2308f6b3ec23SBadari Pulavarty return -ENOSYS; 2309f6b3ec23SBadari Pulavarty 23101b1dcc1bSJes Sorensen mutex_lock(&inode->i_mutex); 2311f6b3ec23SBadari Pulavarty down_write(&inode->i_alloc_sem); 2312f6b3ec23SBadari Pulavarty unmap_mapping_range(mapping, offset, (end - offset), 1); 2313f6b3ec23SBadari Pulavarty truncate_inode_pages_range(mapping, offset, end); 2314d00806b1SNick Piggin unmap_mapping_range(mapping, offset, (end - offset), 1); 2315f6b3ec23SBadari Pulavarty inode->i_op->truncate_range(inode, offset, end); 2316f6b3ec23SBadari Pulavarty up_write(&inode->i_alloc_sem); 23171b1dcc1bSJes Sorensen mutex_unlock(&inode->i_mutex); 2318f6b3ec23SBadari Pulavarty 2319f6b3ec23SBadari Pulavarty return 0; 2320f6b3ec23SBadari Pulavarty } 2321f6b3ec23SBadari Pulavarty 23221da177e4SLinus Torvalds /* 23238f4e2101SHugh Dickins * We enter with non-exclusive mmap_sem (to exclude vma changes, 23248f4e2101SHugh Dickins * but allow concurrent faults), and pte mapped but not yet locked. 23258f4e2101SHugh Dickins * We return with mmap_sem still held, but pte unmapped and unlocked. 23261da177e4SLinus Torvalds */ 232765500d23SHugh Dickins static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, 232865500d23SHugh Dickins unsigned long address, pte_t *page_table, pmd_t *pmd, 232965500d23SHugh Dickins int write_access, pte_t orig_pte) 23301da177e4SLinus Torvalds { 23318f4e2101SHugh Dickins spinlock_t *ptl; 23321da177e4SLinus Torvalds struct page *page; 233365500d23SHugh Dickins swp_entry_t entry; 23341da177e4SLinus Torvalds pte_t pte; 233583c54070SNick Piggin int ret = 0; 23361da177e4SLinus Torvalds 23374c21e2f2SHugh Dickins if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) 23388f4e2101SHugh Dickins goto out; 233965500d23SHugh Dickins 234065500d23SHugh Dickins entry = pte_to_swp_entry(orig_pte); 23410697212aSChristoph Lameter if (is_migration_entry(entry)) { 23420697212aSChristoph Lameter migration_entry_wait(mm, pmd, address); 23430697212aSChristoph Lameter goto out; 23440697212aSChristoph Lameter } 23450ff92245SShailabh Nagar delayacct_set_flag(DELAYACCT_PF_SWAPIN); 23461da177e4SLinus Torvalds page = lookup_swap_cache(entry); 23471da177e4SLinus Torvalds if (!page) { 2348098fe651SAshwin Chaugule grab_swap_token(); /* Contend for token _before_ read-in */ 234902098feaSHugh Dickins page = swapin_readahead(entry, 235002098feaSHugh Dickins GFP_HIGHUSER_MOVABLE, vma, address); 23511da177e4SLinus Torvalds if (!page) { 23521da177e4SLinus Torvalds /* 23538f4e2101SHugh Dickins * Back out if somebody else faulted in this pte 23548f4e2101SHugh Dickins * while we released the pte lock. 23551da177e4SLinus Torvalds */ 23568f4e2101SHugh Dickins page_table = pte_offset_map_lock(mm, pmd, address, &ptl); 23571da177e4SLinus Torvalds if (likely(pte_same(*page_table, orig_pte))) 23581da177e4SLinus Torvalds ret = VM_FAULT_OOM; 23590ff92245SShailabh Nagar delayacct_clear_flag(DELAYACCT_PF_SWAPIN); 236065500d23SHugh Dickins goto unlock; 23611da177e4SLinus Torvalds } 23621da177e4SLinus Torvalds 23631da177e4SLinus Torvalds /* Had to read the page from swap area: Major fault */ 23641da177e4SLinus Torvalds ret = VM_FAULT_MAJOR; 2365f8891e5eSChristoph Lameter count_vm_event(PGMAJFAULT); 23661da177e4SLinus Torvalds } 23671da177e4SLinus Torvalds 23681da177e4SLinus Torvalds mark_page_accessed(page); 2369073e587eSKAMEZAWA Hiroyuki 23701da177e4SLinus Torvalds lock_page(page); 237120a1022dSBalbir Singh delayacct_clear_flag(DELAYACCT_PF_SWAPIN); 23721da177e4SLinus Torvalds 2373073e587eSKAMEZAWA Hiroyuki if (mem_cgroup_charge(page, mm, GFP_KERNEL)) { 2374073e587eSKAMEZAWA Hiroyuki ret = VM_FAULT_OOM; 2375073e587eSKAMEZAWA Hiroyuki unlock_page(page); 2376073e587eSKAMEZAWA Hiroyuki goto out; 2377073e587eSKAMEZAWA Hiroyuki } 2378073e587eSKAMEZAWA Hiroyuki 23791da177e4SLinus Torvalds /* 23808f4e2101SHugh Dickins * Back out if somebody else already faulted in this pte. 23811da177e4SLinus Torvalds */ 23828f4e2101SHugh Dickins page_table = pte_offset_map_lock(mm, pmd, address, &ptl); 23839e9bef07SHugh Dickins if (unlikely(!pte_same(*page_table, orig_pte))) 2384b8107480SKirill Korotaev goto out_nomap; 2385b8107480SKirill Korotaev 2386b8107480SKirill Korotaev if (unlikely(!PageUptodate(page))) { 2387b8107480SKirill Korotaev ret = VM_FAULT_SIGBUS; 2388b8107480SKirill Korotaev goto out_nomap; 23891da177e4SLinus Torvalds } 23901da177e4SLinus Torvalds 23911da177e4SLinus Torvalds /* The page isn't present yet, go ahead with the fault. */ 23921da177e4SLinus Torvalds 23934294621fSHugh Dickins inc_mm_counter(mm, anon_rss); 23941da177e4SLinus Torvalds pte = mk_pte(page, vma->vm_page_prot); 23957b1fe597SHugh Dickins if (write_access && reuse_swap_page(page)) { 23961da177e4SLinus Torvalds pte = maybe_mkwrite(pte_mkdirty(pte), vma); 23971da177e4SLinus Torvalds write_access = 0; 23981da177e4SLinus Torvalds } 23991da177e4SLinus Torvalds 24001da177e4SLinus Torvalds flush_icache_page(vma, page); 24011da177e4SLinus Torvalds set_pte_at(mm, address, page_table, pte); 24021da177e4SLinus Torvalds page_add_anon_rmap(page, vma, address); 24031da177e4SLinus Torvalds 2404c475a8abSHugh Dickins swap_free(entry); 2405b291f000SNick Piggin if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) 2406a2c43eedSHugh Dickins try_to_free_swap(page); 2407c475a8abSHugh Dickins unlock_page(page); 2408c475a8abSHugh Dickins 24091da177e4SLinus Torvalds if (write_access) { 241061469f1dSHugh Dickins ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); 241161469f1dSHugh Dickins if (ret & VM_FAULT_ERROR) 241261469f1dSHugh Dickins ret &= VM_FAULT_ERROR; 24131da177e4SLinus Torvalds goto out; 24141da177e4SLinus Torvalds } 24151da177e4SLinus Torvalds 24161da177e4SLinus Torvalds /* No need to invalidate - it was non-present before */ 24171da177e4SLinus Torvalds update_mmu_cache(vma, address, pte); 241865500d23SHugh Dickins unlock: 24198f4e2101SHugh Dickins pte_unmap_unlock(page_table, ptl); 24201da177e4SLinus Torvalds out: 24211da177e4SLinus Torvalds return ret; 2422b8107480SKirill Korotaev out_nomap: 24238a9f3ccdSBalbir Singh mem_cgroup_uncharge_page(page); 24248f4e2101SHugh Dickins pte_unmap_unlock(page_table, ptl); 2425b8107480SKirill Korotaev unlock_page(page); 2426b8107480SKirill Korotaev page_cache_release(page); 242765500d23SHugh Dickins return ret; 24281da177e4SLinus Torvalds } 24291da177e4SLinus Torvalds 24301da177e4SLinus Torvalds /* 24318f4e2101SHugh Dickins * We enter with non-exclusive mmap_sem (to exclude vma changes, 24328f4e2101SHugh Dickins * but allow concurrent faults), and pte mapped but not yet locked. 24338f4e2101SHugh Dickins * We return with mmap_sem still held, but pte unmapped and unlocked. 24341da177e4SLinus Torvalds */ 243565500d23SHugh Dickins static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, 243665500d23SHugh Dickins unsigned long address, pte_t *page_table, pmd_t *pmd, 243765500d23SHugh Dickins int write_access) 24381da177e4SLinus Torvalds { 24398f4e2101SHugh Dickins struct page *page; 24408f4e2101SHugh Dickins spinlock_t *ptl; 24411da177e4SLinus Torvalds pte_t entry; 24421da177e4SLinus Torvalds 24431da177e4SLinus Torvalds /* Allocate our own private page. */ 24441da177e4SLinus Torvalds pte_unmap(page_table); 24451da177e4SLinus Torvalds 24461da177e4SLinus Torvalds if (unlikely(anon_vma_prepare(vma))) 244765500d23SHugh Dickins goto oom; 2448769848c0SMel Gorman page = alloc_zeroed_user_highpage_movable(vma, address); 24491da177e4SLinus Torvalds if (!page) 245065500d23SHugh Dickins goto oom; 24510ed361deSNick Piggin __SetPageUptodate(page); 24521da177e4SLinus Torvalds 2453e1a1cd59SBalbir Singh if (mem_cgroup_charge(page, mm, GFP_KERNEL)) 24548a9f3ccdSBalbir Singh goto oom_free_page; 24558a9f3ccdSBalbir Singh 245665500d23SHugh Dickins entry = mk_pte(page, vma->vm_page_prot); 245765500d23SHugh Dickins entry = maybe_mkwrite(pte_mkdirty(entry), vma); 24588f4e2101SHugh Dickins 24598f4e2101SHugh Dickins page_table = pte_offset_map_lock(mm, pmd, address, &ptl); 24608f4e2101SHugh Dickins if (!pte_none(*page_table)) 24618f4e2101SHugh Dickins goto release; 24628f4e2101SHugh Dickins inc_mm_counter(mm, anon_rss); 24639617d95eSNick Piggin page_add_new_anon_rmap(page, vma, address); 246465500d23SHugh Dickins set_pte_at(mm, address, page_table, entry); 24651da177e4SLinus Torvalds 24661da177e4SLinus Torvalds /* No need to invalidate - it was non-present before */ 246765500d23SHugh Dickins update_mmu_cache(vma, address, entry); 246865500d23SHugh Dickins unlock: 24698f4e2101SHugh Dickins pte_unmap_unlock(page_table, ptl); 247083c54070SNick Piggin return 0; 24718f4e2101SHugh Dickins release: 24728a9f3ccdSBalbir Singh mem_cgroup_uncharge_page(page); 24738f4e2101SHugh Dickins page_cache_release(page); 24748f4e2101SHugh Dickins goto unlock; 24758a9f3ccdSBalbir Singh oom_free_page: 24766dbf6d3bSHugh Dickins page_cache_release(page); 247765500d23SHugh Dickins oom: 24781da177e4SLinus Torvalds return VM_FAULT_OOM; 24791da177e4SLinus Torvalds } 24801da177e4SLinus Torvalds 24811da177e4SLinus Torvalds /* 248254cb8821SNick Piggin * __do_fault() tries to create a new page mapping. It aggressively 24831da177e4SLinus Torvalds * tries to share with existing pages, but makes a separate copy if 248454cb8821SNick Piggin * the FAULT_FLAG_WRITE is set in the flags parameter in order to avoid 248554cb8821SNick Piggin * the next page fault. 24861da177e4SLinus Torvalds * 24871da177e4SLinus Torvalds * As this is called only for pages that do not currently exist, we 24881da177e4SLinus Torvalds * do not need to flush old virtual caches or the TLB. 24891da177e4SLinus Torvalds * 24908f4e2101SHugh Dickins * We enter with non-exclusive mmap_sem (to exclude vma changes, 249116abfa08SHugh Dickins * but allow concurrent faults), and pte neither mapped nor locked. 24928f4e2101SHugh Dickins * We return with mmap_sem still held, but pte unmapped and unlocked. 24931da177e4SLinus Torvalds */ 249454cb8821SNick Piggin static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, 249516abfa08SHugh Dickins unsigned long address, pmd_t *pmd, 249654cb8821SNick Piggin pgoff_t pgoff, unsigned int flags, pte_t orig_pte) 24971da177e4SLinus Torvalds { 249816abfa08SHugh Dickins pte_t *page_table; 24998f4e2101SHugh Dickins spinlock_t *ptl; 2500d0217ac0SNick Piggin struct page *page; 25011da177e4SLinus Torvalds pte_t entry; 25021da177e4SLinus Torvalds int anon = 0; 25035b4e655eSKAMEZAWA Hiroyuki int charged = 0; 2504d08b3851SPeter Zijlstra struct page *dirty_page = NULL; 2505d0217ac0SNick Piggin struct vm_fault vmf; 2506d0217ac0SNick Piggin int ret; 2507a200ee18SPeter Zijlstra int page_mkwrite = 0; 250854cb8821SNick Piggin 2509d0217ac0SNick Piggin vmf.virtual_address = (void __user *)(address & PAGE_MASK); 2510d0217ac0SNick Piggin vmf.pgoff = pgoff; 2511d0217ac0SNick Piggin vmf.flags = flags; 2512d0217ac0SNick Piggin vmf.page = NULL; 25131da177e4SLinus Torvalds 2514d0217ac0SNick Piggin ret = vma->vm_ops->fault(vma, &vmf); 251583c54070SNick Piggin if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) 251683c54070SNick Piggin return ret; 25171da177e4SLinus Torvalds 2518d00806b1SNick Piggin /* 2519d0217ac0SNick Piggin * For consistency in subsequent calls, make the faulted page always 2520d00806b1SNick Piggin * locked. 2521d00806b1SNick Piggin */ 252283c54070SNick Piggin if (unlikely(!(ret & VM_FAULT_LOCKED))) 2523d0217ac0SNick Piggin lock_page(vmf.page); 252454cb8821SNick Piggin else 2525d0217ac0SNick Piggin VM_BUG_ON(!PageLocked(vmf.page)); 2526d00806b1SNick Piggin 25271da177e4SLinus Torvalds /* 25281da177e4SLinus Torvalds * Should we do an early C-O-W break? 25291da177e4SLinus Torvalds */ 2530d0217ac0SNick Piggin page = vmf.page; 253154cb8821SNick Piggin if (flags & FAULT_FLAG_WRITE) { 25329637a5efSDavid Howells if (!(vma->vm_flags & VM_SHARED)) { 253354cb8821SNick Piggin anon = 1; 2534d00806b1SNick Piggin if (unlikely(anon_vma_prepare(vma))) { 2535d0217ac0SNick Piggin ret = VM_FAULT_OOM; 253654cb8821SNick Piggin goto out; 2537d00806b1SNick Piggin } 253883c54070SNick Piggin page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, 253983c54070SNick Piggin vma, address); 2540d00806b1SNick Piggin if (!page) { 2541d0217ac0SNick Piggin ret = VM_FAULT_OOM; 254254cb8821SNick Piggin goto out; 2543d00806b1SNick Piggin } 25445b4e655eSKAMEZAWA Hiroyuki if (mem_cgroup_charge(page, mm, GFP_KERNEL)) { 25455b4e655eSKAMEZAWA Hiroyuki ret = VM_FAULT_OOM; 25465b4e655eSKAMEZAWA Hiroyuki page_cache_release(page); 25475b4e655eSKAMEZAWA Hiroyuki goto out; 25485b4e655eSKAMEZAWA Hiroyuki } 25495b4e655eSKAMEZAWA Hiroyuki charged = 1; 2550b291f000SNick Piggin /* 2551b291f000SNick Piggin * Don't let another task, with possibly unlocked vma, 2552b291f000SNick Piggin * keep the mlocked page. 2553b291f000SNick Piggin */ 2554b291f000SNick Piggin if (vma->vm_flags & VM_LOCKED) 2555b291f000SNick Piggin clear_page_mlock(vmf.page); 2556d0217ac0SNick Piggin copy_user_highpage(page, vmf.page, address, vma); 25570ed361deSNick Piggin __SetPageUptodate(page); 25589637a5efSDavid Howells } else { 255954cb8821SNick Piggin /* 256054cb8821SNick Piggin * If the page will be shareable, see if the backing 25619637a5efSDavid Howells * address space wants to know that the page is about 256254cb8821SNick Piggin * to become writable 256354cb8821SNick Piggin */ 256469676147SMark Fasheh if (vma->vm_ops->page_mkwrite) { 256569676147SMark Fasheh unlock_page(page); 256669676147SMark Fasheh if (vma->vm_ops->page_mkwrite(vma, page) < 0) { 2567d0217ac0SNick Piggin ret = VM_FAULT_SIGBUS; 2568d0217ac0SNick Piggin anon = 1; /* no anon but release vmf.page */ 256969676147SMark Fasheh goto out_unlocked; 257069676147SMark Fasheh } 257169676147SMark Fasheh lock_page(page); 2572d0217ac0SNick Piggin /* 2573d0217ac0SNick Piggin * XXX: this is not quite right (racy vs 2574d0217ac0SNick Piggin * invalidate) to unlock and relock the page 2575d0217ac0SNick Piggin * like this, however a better fix requires 2576d0217ac0SNick Piggin * reworking page_mkwrite locking API, which 2577d0217ac0SNick Piggin * is better done later. 2578d0217ac0SNick Piggin */ 2579d0217ac0SNick Piggin if (!page->mapping) { 258083c54070SNick Piggin ret = 0; 2581d0217ac0SNick Piggin anon = 1; /* no anon but release vmf.page */ 2582d0217ac0SNick Piggin goto out; 2583d0217ac0SNick Piggin } 2584a200ee18SPeter Zijlstra page_mkwrite = 1; 25859637a5efSDavid Howells } 25869637a5efSDavid Howells } 258754cb8821SNick Piggin 25881da177e4SLinus Torvalds } 25891da177e4SLinus Torvalds 25908f4e2101SHugh Dickins page_table = pte_offset_map_lock(mm, pmd, address, &ptl); 25911da177e4SLinus Torvalds 25921da177e4SLinus Torvalds /* 25931da177e4SLinus Torvalds * This silly early PAGE_DIRTY setting removes a race 25941da177e4SLinus Torvalds * due to the bad i386 page protection. But it's valid 25951da177e4SLinus Torvalds * for other architectures too. 25961da177e4SLinus Torvalds * 25971da177e4SLinus Torvalds * Note that if write_access is true, we either now have 25981da177e4SLinus Torvalds * an exclusive copy of the page, or this is a shared mapping, 25991da177e4SLinus Torvalds * so we can make it writable and dirty to avoid having to 26001da177e4SLinus Torvalds * handle that later. 26011da177e4SLinus Torvalds */ 26021da177e4SLinus Torvalds /* Only go through if we didn't race with anybody else... */ 260354cb8821SNick Piggin if (likely(pte_same(*page_table, orig_pte))) { 2604d00806b1SNick Piggin flush_icache_page(vma, page); 2605d00806b1SNick Piggin entry = mk_pte(page, vma->vm_page_prot); 260654cb8821SNick Piggin if (flags & FAULT_FLAG_WRITE) 26071da177e4SLinus Torvalds entry = maybe_mkwrite(pte_mkdirty(entry), vma); 26081da177e4SLinus Torvalds if (anon) { 26094294621fSHugh Dickins inc_mm_counter(mm, anon_rss); 2610d00806b1SNick Piggin page_add_new_anon_rmap(page, vma, address); 2611f57e88a8SHugh Dickins } else { 26124294621fSHugh Dickins inc_mm_counter(mm, file_rss); 2613d00806b1SNick Piggin page_add_file_rmap(page); 261454cb8821SNick Piggin if (flags & FAULT_FLAG_WRITE) { 2615d00806b1SNick Piggin dirty_page = page; 2616d08b3851SPeter Zijlstra get_page(dirty_page); 2617d08b3851SPeter Zijlstra } 26184294621fSHugh Dickins } 261964d6519dSLee Schermerhorn set_pte_at(mm, address, page_table, entry); 26201da177e4SLinus Torvalds 2621d00806b1SNick Piggin /* no need to invalidate: a not-present page won't be cached */ 26221da177e4SLinus Torvalds update_mmu_cache(vma, address, entry); 2623d00806b1SNick Piggin } else { 26245b4e655eSKAMEZAWA Hiroyuki if (charged) 26258a9f3ccdSBalbir Singh mem_cgroup_uncharge_page(page); 2626d00806b1SNick Piggin if (anon) 2627d00806b1SNick Piggin page_cache_release(page); 2628d00806b1SNick Piggin else 262954cb8821SNick Piggin anon = 1; /* no anon but release faulted_page */ 2630d00806b1SNick Piggin } 2631d00806b1SNick Piggin 26328f4e2101SHugh Dickins pte_unmap_unlock(page_table, ptl); 2633d00806b1SNick Piggin 2634d00806b1SNick Piggin out: 2635d0217ac0SNick Piggin unlock_page(vmf.page); 263669676147SMark Fasheh out_unlocked: 2637d00806b1SNick Piggin if (anon) 2638d0217ac0SNick Piggin page_cache_release(vmf.page); 2639d00806b1SNick Piggin else if (dirty_page) { 26408f7b3d15SAnton Salikhmetov if (vma->vm_file) 26418f7b3d15SAnton Salikhmetov file_update_time(vma->vm_file); 26428f7b3d15SAnton Salikhmetov 2643a200ee18SPeter Zijlstra set_page_dirty_balance(dirty_page, page_mkwrite); 2644d08b3851SPeter Zijlstra put_page(dirty_page); 2645d08b3851SPeter Zijlstra } 2646d00806b1SNick Piggin 264783c54070SNick Piggin return ret; 264854cb8821SNick Piggin } 2649d00806b1SNick Piggin 265054cb8821SNick Piggin static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma, 265154cb8821SNick Piggin unsigned long address, pte_t *page_table, pmd_t *pmd, 265254cb8821SNick Piggin int write_access, pte_t orig_pte) 265354cb8821SNick Piggin { 265454cb8821SNick Piggin pgoff_t pgoff = (((address & PAGE_MASK) 26550da7e01fSDean Nelson - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 265654cb8821SNick Piggin unsigned int flags = (write_access ? FAULT_FLAG_WRITE : 0); 265754cb8821SNick Piggin 265816abfa08SHugh Dickins pte_unmap(page_table); 265916abfa08SHugh Dickins return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); 266054cb8821SNick Piggin } 266154cb8821SNick Piggin 2662f4b81804SJes Sorensen /* 26631da177e4SLinus Torvalds * Fault of a previously existing named mapping. Repopulate the pte 26641da177e4SLinus Torvalds * from the encoded file_pte if possible. This enables swappable 26651da177e4SLinus Torvalds * nonlinear vmas. 26668f4e2101SHugh Dickins * 26678f4e2101SHugh Dickins * We enter with non-exclusive mmap_sem (to exclude vma changes, 26688f4e2101SHugh Dickins * but allow concurrent faults), and pte mapped but not yet locked. 26698f4e2101SHugh Dickins * We return with mmap_sem still held, but pte unmapped and unlocked. 26701da177e4SLinus Torvalds */ 2671d0217ac0SNick Piggin static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma, 267265500d23SHugh Dickins unsigned long address, pte_t *page_table, pmd_t *pmd, 267365500d23SHugh Dickins int write_access, pte_t orig_pte) 26741da177e4SLinus Torvalds { 2675d0217ac0SNick Piggin unsigned int flags = FAULT_FLAG_NONLINEAR | 2676d0217ac0SNick Piggin (write_access ? FAULT_FLAG_WRITE : 0); 267765500d23SHugh Dickins pgoff_t pgoff; 26781da177e4SLinus Torvalds 26794c21e2f2SHugh Dickins if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) 268083c54070SNick Piggin return 0; 26811da177e4SLinus Torvalds 2682d0217ac0SNick Piggin if (unlikely(!(vma->vm_flags & VM_NONLINEAR) || 2683d0217ac0SNick Piggin !(vma->vm_flags & VM_CAN_NONLINEAR))) { 268465500d23SHugh Dickins /* 268565500d23SHugh Dickins * Page table corrupted: show pte and kill process. 268665500d23SHugh Dickins */ 2687b5810039SNick Piggin print_bad_pte(vma, orig_pte, address); 268865500d23SHugh Dickins return VM_FAULT_OOM; 268965500d23SHugh Dickins } 269065500d23SHugh Dickins 269165500d23SHugh Dickins pgoff = pte_to_pgoff(orig_pte); 269216abfa08SHugh Dickins return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); 26931da177e4SLinus Torvalds } 26941da177e4SLinus Torvalds 26951da177e4SLinus Torvalds /* 26961da177e4SLinus Torvalds * These routines also need to handle stuff like marking pages dirty 26971da177e4SLinus Torvalds * and/or accessed for architectures that don't do it in hardware (most 26981da177e4SLinus Torvalds * RISC architectures). The early dirtying is also good on the i386. 26991da177e4SLinus Torvalds * 27001da177e4SLinus Torvalds * There is also a hook called "update_mmu_cache()" that architectures 27011da177e4SLinus Torvalds * with external mmu caches can use to update those (ie the Sparc or 27021da177e4SLinus Torvalds * PowerPC hashed page tables that act as extended TLBs). 27031da177e4SLinus Torvalds * 2704c74df32cSHugh Dickins * We enter with non-exclusive mmap_sem (to exclude vma changes, 2705c74df32cSHugh Dickins * but allow concurrent faults), and pte mapped but not yet locked. 2706c74df32cSHugh Dickins * We return with mmap_sem still held, but pte unmapped and unlocked. 27071da177e4SLinus Torvalds */ 27081da177e4SLinus Torvalds static inline int handle_pte_fault(struct mm_struct *mm, 27091da177e4SLinus Torvalds struct vm_area_struct *vma, unsigned long address, 271065500d23SHugh Dickins pte_t *pte, pmd_t *pmd, int write_access) 27111da177e4SLinus Torvalds { 27121da177e4SLinus Torvalds pte_t entry; 27138f4e2101SHugh Dickins spinlock_t *ptl; 27141da177e4SLinus Torvalds 27158dab5241SBenjamin Herrenschmidt entry = *pte; 27161da177e4SLinus Torvalds if (!pte_present(entry)) { 271765500d23SHugh Dickins if (pte_none(entry)) { 2718f4b81804SJes Sorensen if (vma->vm_ops) { 27193c18ddd1SNick Piggin if (likely(vma->vm_ops->fault)) 272054cb8821SNick Piggin return do_linear_fault(mm, vma, address, 272154cb8821SNick Piggin pte, pmd, write_access, entry); 2722f4b81804SJes Sorensen } 2723f4b81804SJes Sorensen return do_anonymous_page(mm, vma, address, 272465500d23SHugh Dickins pte, pmd, write_access); 272565500d23SHugh Dickins } 27261da177e4SLinus Torvalds if (pte_file(entry)) 2727d0217ac0SNick Piggin return do_nonlinear_fault(mm, vma, address, 272865500d23SHugh Dickins pte, pmd, write_access, entry); 272965500d23SHugh Dickins return do_swap_page(mm, vma, address, 273065500d23SHugh Dickins pte, pmd, write_access, entry); 27311da177e4SLinus Torvalds } 27321da177e4SLinus Torvalds 27334c21e2f2SHugh Dickins ptl = pte_lockptr(mm, pmd); 27348f4e2101SHugh Dickins spin_lock(ptl); 27358f4e2101SHugh Dickins if (unlikely(!pte_same(*pte, entry))) 27368f4e2101SHugh Dickins goto unlock; 27371da177e4SLinus Torvalds if (write_access) { 27381da177e4SLinus Torvalds if (!pte_write(entry)) 27398f4e2101SHugh Dickins return do_wp_page(mm, vma, address, 27408f4e2101SHugh Dickins pte, pmd, ptl, entry); 27411da177e4SLinus Torvalds entry = pte_mkdirty(entry); 27421da177e4SLinus Torvalds } 27431da177e4SLinus Torvalds entry = pte_mkyoung(entry); 27448dab5241SBenjamin Herrenschmidt if (ptep_set_access_flags(vma, address, pte, entry, write_access)) { 27451da177e4SLinus Torvalds update_mmu_cache(vma, address, entry); 27461a44e149SAndrea Arcangeli } else { 27471a44e149SAndrea Arcangeli /* 27481a44e149SAndrea Arcangeli * This is needed only for protection faults but the arch code 27491a44e149SAndrea Arcangeli * is not yet telling us if this is a protection fault or not. 27501a44e149SAndrea Arcangeli * This still avoids useless tlb flushes for .text page faults 27511a44e149SAndrea Arcangeli * with threads. 27521a44e149SAndrea Arcangeli */ 27531a44e149SAndrea Arcangeli if (write_access) 27541a44e149SAndrea Arcangeli flush_tlb_page(vma, address); 27551a44e149SAndrea Arcangeli } 27568f4e2101SHugh Dickins unlock: 27578f4e2101SHugh Dickins pte_unmap_unlock(pte, ptl); 275883c54070SNick Piggin return 0; 27591da177e4SLinus Torvalds } 27601da177e4SLinus Torvalds 27611da177e4SLinus Torvalds /* 27621da177e4SLinus Torvalds * By the time we get here, we already hold the mm semaphore 27631da177e4SLinus Torvalds */ 276483c54070SNick Piggin int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, 27651da177e4SLinus Torvalds unsigned long address, int write_access) 27661da177e4SLinus Torvalds { 27671da177e4SLinus Torvalds pgd_t *pgd; 27681da177e4SLinus Torvalds pud_t *pud; 27691da177e4SLinus Torvalds pmd_t *pmd; 27701da177e4SLinus Torvalds pte_t *pte; 27711da177e4SLinus Torvalds 27721da177e4SLinus Torvalds __set_current_state(TASK_RUNNING); 27731da177e4SLinus Torvalds 2774f8891e5eSChristoph Lameter count_vm_event(PGFAULT); 27751da177e4SLinus Torvalds 2776ac9b9c66SHugh Dickins if (unlikely(is_vm_hugetlb_page(vma))) 2777ac9b9c66SHugh Dickins return hugetlb_fault(mm, vma, address, write_access); 27781da177e4SLinus Torvalds 27791da177e4SLinus Torvalds pgd = pgd_offset(mm, address); 27801da177e4SLinus Torvalds pud = pud_alloc(mm, pgd, address); 27811da177e4SLinus Torvalds if (!pud) 2782c74df32cSHugh Dickins return VM_FAULT_OOM; 27831da177e4SLinus Torvalds pmd = pmd_alloc(mm, pud, address); 27841da177e4SLinus Torvalds if (!pmd) 2785c74df32cSHugh Dickins return VM_FAULT_OOM; 27861da177e4SLinus Torvalds pte = pte_alloc_map(mm, pmd, address); 27871da177e4SLinus Torvalds if (!pte) 2788c74df32cSHugh Dickins return VM_FAULT_OOM; 27891da177e4SLinus Torvalds 279065500d23SHugh Dickins return handle_pte_fault(mm, vma, address, pte, pmd, write_access); 27911da177e4SLinus Torvalds } 27921da177e4SLinus Torvalds 27931da177e4SLinus Torvalds #ifndef __PAGETABLE_PUD_FOLDED 27941da177e4SLinus Torvalds /* 27951da177e4SLinus Torvalds * Allocate page upper directory. 2796872fec16SHugh Dickins * We've already handled the fast-path in-line. 27971da177e4SLinus Torvalds */ 27981bb3630eSHugh Dickins int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) 27991da177e4SLinus Torvalds { 2800c74df32cSHugh Dickins pud_t *new = pud_alloc_one(mm, address); 2801c74df32cSHugh Dickins if (!new) 28021bb3630eSHugh Dickins return -ENOMEM; 28031da177e4SLinus Torvalds 2804362a61adSNick Piggin smp_wmb(); /* See comment in __pte_alloc */ 2805362a61adSNick Piggin 2806872fec16SHugh Dickins spin_lock(&mm->page_table_lock); 28071bb3630eSHugh Dickins if (pgd_present(*pgd)) /* Another has populated it */ 28085e541973SBenjamin Herrenschmidt pud_free(mm, new); 28091bb3630eSHugh Dickins else 28101da177e4SLinus Torvalds pgd_populate(mm, pgd, new); 2811872fec16SHugh Dickins spin_unlock(&mm->page_table_lock); 28121bb3630eSHugh Dickins return 0; 28131da177e4SLinus Torvalds } 28141da177e4SLinus Torvalds #endif /* __PAGETABLE_PUD_FOLDED */ 28151da177e4SLinus Torvalds 28161da177e4SLinus Torvalds #ifndef __PAGETABLE_PMD_FOLDED 28171da177e4SLinus Torvalds /* 28181da177e4SLinus Torvalds * Allocate page middle directory. 2819872fec16SHugh Dickins * We've already handled the fast-path in-line. 28201da177e4SLinus Torvalds */ 28211bb3630eSHugh Dickins int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) 28221da177e4SLinus Torvalds { 2823c74df32cSHugh Dickins pmd_t *new = pmd_alloc_one(mm, address); 2824c74df32cSHugh Dickins if (!new) 28251bb3630eSHugh Dickins return -ENOMEM; 28261da177e4SLinus Torvalds 2827362a61adSNick Piggin smp_wmb(); /* See comment in __pte_alloc */ 2828362a61adSNick Piggin 2829872fec16SHugh Dickins spin_lock(&mm->page_table_lock); 28301da177e4SLinus Torvalds #ifndef __ARCH_HAS_4LEVEL_HACK 28311bb3630eSHugh Dickins if (pud_present(*pud)) /* Another has populated it */ 28325e541973SBenjamin Herrenschmidt pmd_free(mm, new); 28331bb3630eSHugh Dickins else 28341da177e4SLinus Torvalds pud_populate(mm, pud, new); 28351da177e4SLinus Torvalds #else 28361bb3630eSHugh Dickins if (pgd_present(*pud)) /* Another has populated it */ 28375e541973SBenjamin Herrenschmidt pmd_free(mm, new); 28381bb3630eSHugh Dickins else 28391da177e4SLinus Torvalds pgd_populate(mm, pud, new); 28401da177e4SLinus Torvalds #endif /* __ARCH_HAS_4LEVEL_HACK */ 2841872fec16SHugh Dickins spin_unlock(&mm->page_table_lock); 28421bb3630eSHugh Dickins return 0; 28431da177e4SLinus Torvalds } 28441da177e4SLinus Torvalds #endif /* __PAGETABLE_PMD_FOLDED */ 28451da177e4SLinus Torvalds 28461da177e4SLinus Torvalds int make_pages_present(unsigned long addr, unsigned long end) 28471da177e4SLinus Torvalds { 28481da177e4SLinus Torvalds int ret, len, write; 28491da177e4SLinus Torvalds struct vm_area_struct * vma; 28501da177e4SLinus Torvalds 28511da177e4SLinus Torvalds vma = find_vma(current->mm, addr); 28521da177e4SLinus Torvalds if (!vma) 2853a477097dSKOSAKI Motohiro return -ENOMEM; 28541da177e4SLinus Torvalds write = (vma->vm_flags & VM_WRITE) != 0; 28555bcb28b1SEric Sesterhenn BUG_ON(addr >= end); 28565bcb28b1SEric Sesterhenn BUG_ON(end > vma->vm_end); 285768e116a3SRolf Eike Beer len = DIV_ROUND_UP(end, PAGE_SIZE) - addr/PAGE_SIZE; 28581da177e4SLinus Torvalds ret = get_user_pages(current, current->mm, addr, 28591da177e4SLinus Torvalds len, write, 0, NULL, NULL); 2860c11d69d8SLee Schermerhorn if (ret < 0) 28611da177e4SLinus Torvalds return ret; 28629978ad58SLee Schermerhorn return ret == len ? 0 : -EFAULT; 28631da177e4SLinus Torvalds } 28641da177e4SLinus Torvalds 28651da177e4SLinus Torvalds #if !defined(__HAVE_ARCH_GATE_AREA) 28661da177e4SLinus Torvalds 28671da177e4SLinus Torvalds #if defined(AT_SYSINFO_EHDR) 28685ce7852cSAdrian Bunk static struct vm_area_struct gate_vma; 28691da177e4SLinus Torvalds 28701da177e4SLinus Torvalds static int __init gate_vma_init(void) 28711da177e4SLinus Torvalds { 28721da177e4SLinus Torvalds gate_vma.vm_mm = NULL; 28731da177e4SLinus Torvalds gate_vma.vm_start = FIXADDR_USER_START; 28741da177e4SLinus Torvalds gate_vma.vm_end = FIXADDR_USER_END; 2875b6558c4aSRoland McGrath gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; 2876b6558c4aSRoland McGrath gate_vma.vm_page_prot = __P101; 2877f47aef55SRoland McGrath /* 2878f47aef55SRoland McGrath * Make sure the vDSO gets into every core dump. 2879f47aef55SRoland McGrath * Dumping its contents makes post-mortem fully interpretable later 2880f47aef55SRoland McGrath * without matching up the same kernel and hardware config to see 2881f47aef55SRoland McGrath * what PC values meant. 2882f47aef55SRoland McGrath */ 2883f47aef55SRoland McGrath gate_vma.vm_flags |= VM_ALWAYSDUMP; 28841da177e4SLinus Torvalds return 0; 28851da177e4SLinus Torvalds } 28861da177e4SLinus Torvalds __initcall(gate_vma_init); 28871da177e4SLinus Torvalds #endif 28881da177e4SLinus Torvalds 28891da177e4SLinus Torvalds struct vm_area_struct *get_gate_vma(struct task_struct *tsk) 28901da177e4SLinus Torvalds { 28911da177e4SLinus Torvalds #ifdef AT_SYSINFO_EHDR 28921da177e4SLinus Torvalds return &gate_vma; 28931da177e4SLinus Torvalds #else 28941da177e4SLinus Torvalds return NULL; 28951da177e4SLinus Torvalds #endif 28961da177e4SLinus Torvalds } 28971da177e4SLinus Torvalds 28981da177e4SLinus Torvalds int in_gate_area_no_task(unsigned long addr) 28991da177e4SLinus Torvalds { 29001da177e4SLinus Torvalds #ifdef AT_SYSINFO_EHDR 29011da177e4SLinus Torvalds if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END)) 29021da177e4SLinus Torvalds return 1; 29031da177e4SLinus Torvalds #endif 29041da177e4SLinus Torvalds return 0; 29051da177e4SLinus Torvalds } 29061da177e4SLinus Torvalds 29071da177e4SLinus Torvalds #endif /* __HAVE_ARCH_GATE_AREA */ 29080ec76a11SDavid Howells 290928b2ee20SRik van Riel #ifdef CONFIG_HAVE_IOREMAP_PROT 2910d87fe660Svenkatesh.pallipadi@intel.com int follow_phys(struct vm_area_struct *vma, 291128b2ee20SRik van Riel unsigned long address, unsigned int flags, 2912d87fe660Svenkatesh.pallipadi@intel.com unsigned long *prot, resource_size_t *phys) 291328b2ee20SRik van Riel { 291428b2ee20SRik van Riel pgd_t *pgd; 291528b2ee20SRik van Riel pud_t *pud; 291628b2ee20SRik van Riel pmd_t *pmd; 291728b2ee20SRik van Riel pte_t *ptep, pte; 291828b2ee20SRik van Riel spinlock_t *ptl; 291928b2ee20SRik van Riel resource_size_t phys_addr = 0; 292028b2ee20SRik van Riel struct mm_struct *mm = vma->vm_mm; 2921d87fe660Svenkatesh.pallipadi@intel.com int ret = -EINVAL; 292228b2ee20SRik van Riel 2923d87fe660Svenkatesh.pallipadi@intel.com if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) 2924d87fe660Svenkatesh.pallipadi@intel.com goto out; 292528b2ee20SRik van Riel 292628b2ee20SRik van Riel pgd = pgd_offset(mm, address); 292728b2ee20SRik van Riel if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) 2928d87fe660Svenkatesh.pallipadi@intel.com goto out; 292928b2ee20SRik van Riel 293028b2ee20SRik van Riel pud = pud_offset(pgd, address); 293128b2ee20SRik van Riel if (pud_none(*pud) || unlikely(pud_bad(*pud))) 2932d87fe660Svenkatesh.pallipadi@intel.com goto out; 293328b2ee20SRik van Riel 293428b2ee20SRik van Riel pmd = pmd_offset(pud, address); 293528b2ee20SRik van Riel if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) 2936d87fe660Svenkatesh.pallipadi@intel.com goto out; 293728b2ee20SRik van Riel 293828b2ee20SRik van Riel /* We cannot handle huge page PFN maps. Luckily they don't exist. */ 293928b2ee20SRik van Riel if (pmd_huge(*pmd)) 2940d87fe660Svenkatesh.pallipadi@intel.com goto out; 294128b2ee20SRik van Riel 294228b2ee20SRik van Riel ptep = pte_offset_map_lock(mm, pmd, address, &ptl); 294328b2ee20SRik van Riel if (!ptep) 294428b2ee20SRik van Riel goto out; 294528b2ee20SRik van Riel 294628b2ee20SRik van Riel pte = *ptep; 294728b2ee20SRik van Riel if (!pte_present(pte)) 294828b2ee20SRik van Riel goto unlock; 294928b2ee20SRik van Riel if ((flags & FOLL_WRITE) && !pte_write(pte)) 295028b2ee20SRik van Riel goto unlock; 295128b2ee20SRik van Riel phys_addr = pte_pfn(pte); 295228b2ee20SRik van Riel phys_addr <<= PAGE_SHIFT; /* Shift here to avoid overflow on PAE */ 295328b2ee20SRik van Riel 295428b2ee20SRik van Riel *prot = pgprot_val(pte_pgprot(pte)); 2955d87fe660Svenkatesh.pallipadi@intel.com *phys = phys_addr; 2956d87fe660Svenkatesh.pallipadi@intel.com ret = 0; 295728b2ee20SRik van Riel 295828b2ee20SRik van Riel unlock: 295928b2ee20SRik van Riel pte_unmap_unlock(ptep, ptl); 296028b2ee20SRik van Riel out: 2961d87fe660Svenkatesh.pallipadi@intel.com return ret; 296228b2ee20SRik van Riel } 296328b2ee20SRik van Riel 296428b2ee20SRik van Riel int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, 296528b2ee20SRik van Riel void *buf, int len, int write) 296628b2ee20SRik van Riel { 296728b2ee20SRik van Riel resource_size_t phys_addr; 296828b2ee20SRik van Riel unsigned long prot = 0; 2969*2bc7273bSKOSAKI Motohiro void __iomem *maddr; 297028b2ee20SRik van Riel int offset = addr & (PAGE_SIZE-1); 297128b2ee20SRik van Riel 2972d87fe660Svenkatesh.pallipadi@intel.com if (follow_phys(vma, addr, write, &prot, &phys_addr)) 297328b2ee20SRik van Riel return -EINVAL; 297428b2ee20SRik van Riel 297528b2ee20SRik van Riel maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot); 297628b2ee20SRik van Riel if (write) 297728b2ee20SRik van Riel memcpy_toio(maddr + offset, buf, len); 297828b2ee20SRik van Riel else 297928b2ee20SRik van Riel memcpy_fromio(buf, maddr + offset, len); 298028b2ee20SRik van Riel iounmap(maddr); 298128b2ee20SRik van Riel 298228b2ee20SRik van Riel return len; 298328b2ee20SRik van Riel } 298428b2ee20SRik van Riel #endif 298528b2ee20SRik van Riel 29860ec76a11SDavid Howells /* 29870ec76a11SDavid Howells * Access another process' address space. 29880ec76a11SDavid Howells * Source/target buffer must be kernel space, 29890ec76a11SDavid Howells * Do not walk the page table directly, use get_user_pages 29900ec76a11SDavid Howells */ 29910ec76a11SDavid Howells int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) 29920ec76a11SDavid Howells { 29930ec76a11SDavid Howells struct mm_struct *mm; 29940ec76a11SDavid Howells struct vm_area_struct *vma; 29950ec76a11SDavid Howells void *old_buf = buf; 29960ec76a11SDavid Howells 29970ec76a11SDavid Howells mm = get_task_mm(tsk); 29980ec76a11SDavid Howells if (!mm) 29990ec76a11SDavid Howells return 0; 30000ec76a11SDavid Howells 30010ec76a11SDavid Howells down_read(&mm->mmap_sem); 3002183ff22bSSimon Arlott /* ignore errors, just check how much was successfully transferred */ 30030ec76a11SDavid Howells while (len) { 30040ec76a11SDavid Howells int bytes, ret, offset; 30050ec76a11SDavid Howells void *maddr; 300628b2ee20SRik van Riel struct page *page = NULL; 30070ec76a11SDavid Howells 30080ec76a11SDavid Howells ret = get_user_pages(tsk, mm, addr, 1, 30090ec76a11SDavid Howells write, 1, &page, &vma); 301028b2ee20SRik van Riel if (ret <= 0) { 301128b2ee20SRik van Riel /* 301228b2ee20SRik van Riel * Check if this is a VM_IO | VM_PFNMAP VMA, which 301328b2ee20SRik van Riel * we can access using slightly different code. 301428b2ee20SRik van Riel */ 301528b2ee20SRik van Riel #ifdef CONFIG_HAVE_IOREMAP_PROT 301628b2ee20SRik van Riel vma = find_vma(mm, addr); 301728b2ee20SRik van Riel if (!vma) 30180ec76a11SDavid Howells break; 301928b2ee20SRik van Riel if (vma->vm_ops && vma->vm_ops->access) 302028b2ee20SRik van Riel ret = vma->vm_ops->access(vma, addr, buf, 302128b2ee20SRik van Riel len, write); 302228b2ee20SRik van Riel if (ret <= 0) 302328b2ee20SRik van Riel #endif 302428b2ee20SRik van Riel break; 302528b2ee20SRik van Riel bytes = ret; 302628b2ee20SRik van Riel } else { 30270ec76a11SDavid Howells bytes = len; 30280ec76a11SDavid Howells offset = addr & (PAGE_SIZE-1); 30290ec76a11SDavid Howells if (bytes > PAGE_SIZE-offset) 30300ec76a11SDavid Howells bytes = PAGE_SIZE-offset; 30310ec76a11SDavid Howells 30320ec76a11SDavid Howells maddr = kmap(page); 30330ec76a11SDavid Howells if (write) { 30340ec76a11SDavid Howells copy_to_user_page(vma, page, addr, 30350ec76a11SDavid Howells maddr + offset, buf, bytes); 30360ec76a11SDavid Howells set_page_dirty_lock(page); 30370ec76a11SDavid Howells } else { 30380ec76a11SDavid Howells copy_from_user_page(vma, page, addr, 30390ec76a11SDavid Howells buf, maddr + offset, bytes); 30400ec76a11SDavid Howells } 30410ec76a11SDavid Howells kunmap(page); 30420ec76a11SDavid Howells page_cache_release(page); 304328b2ee20SRik van Riel } 30440ec76a11SDavid Howells len -= bytes; 30450ec76a11SDavid Howells buf += bytes; 30460ec76a11SDavid Howells addr += bytes; 30470ec76a11SDavid Howells } 30480ec76a11SDavid Howells up_read(&mm->mmap_sem); 30490ec76a11SDavid Howells mmput(mm); 30500ec76a11SDavid Howells 30510ec76a11SDavid Howells return buf - old_buf; 30520ec76a11SDavid Howells } 305303252919SAndi Kleen 305403252919SAndi Kleen /* 305503252919SAndi Kleen * Print the name of a VMA. 305603252919SAndi Kleen */ 305703252919SAndi Kleen void print_vma_addr(char *prefix, unsigned long ip) 305803252919SAndi Kleen { 305903252919SAndi Kleen struct mm_struct *mm = current->mm; 306003252919SAndi Kleen struct vm_area_struct *vma; 306103252919SAndi Kleen 3062e8bff74aSIngo Molnar /* 3063e8bff74aSIngo Molnar * Do not print if we are in atomic 3064e8bff74aSIngo Molnar * contexts (in exception stacks, etc.): 3065e8bff74aSIngo Molnar */ 3066e8bff74aSIngo Molnar if (preempt_count()) 3067e8bff74aSIngo Molnar return; 3068e8bff74aSIngo Molnar 306903252919SAndi Kleen down_read(&mm->mmap_sem); 307003252919SAndi Kleen vma = find_vma(mm, ip); 307103252919SAndi Kleen if (vma && vma->vm_file) { 307203252919SAndi Kleen struct file *f = vma->vm_file; 307303252919SAndi Kleen char *buf = (char *)__get_free_page(GFP_KERNEL); 307403252919SAndi Kleen if (buf) { 307503252919SAndi Kleen char *p, *s; 307603252919SAndi Kleen 3077cf28b486SJan Blunck p = d_path(&f->f_path, buf, PAGE_SIZE); 307803252919SAndi Kleen if (IS_ERR(p)) 307903252919SAndi Kleen p = "?"; 308003252919SAndi Kleen s = strrchr(p, '/'); 308103252919SAndi Kleen if (s) 308203252919SAndi Kleen p = s+1; 308303252919SAndi Kleen printk("%s%s[%lx+%lx]", prefix, p, 308403252919SAndi Kleen vma->vm_start, 308503252919SAndi Kleen vma->vm_end - vma->vm_start); 308603252919SAndi Kleen free_page((unsigned long)buf); 308703252919SAndi Kleen } 308803252919SAndi Kleen } 308903252919SAndi Kleen up_read(¤t->mm->mmap_sem); 309003252919SAndi Kleen } 30913ee1afa3SNick Piggin 30923ee1afa3SNick Piggin #ifdef CONFIG_PROVE_LOCKING 30933ee1afa3SNick Piggin void might_fault(void) 30943ee1afa3SNick Piggin { 30953ee1afa3SNick Piggin might_sleep(); 30963ee1afa3SNick Piggin /* 30973ee1afa3SNick Piggin * it would be nicer only to annotate paths which are not under 30983ee1afa3SNick Piggin * pagefault_disable, however that requires a larger audit and 30993ee1afa3SNick Piggin * providing helpers like get_user_atomic. 31003ee1afa3SNick Piggin */ 31013ee1afa3SNick Piggin if (!in_atomic() && current->mm) 31023ee1afa3SNick Piggin might_lock_read(¤t->mm->mmap_sem); 31033ee1afa3SNick Piggin } 31043ee1afa3SNick Piggin EXPORT_SYMBOL(might_fault); 31053ee1afa3SNick Piggin #endif 3106