11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/mm/memory.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 51da177e4SLinus Torvalds */ 61da177e4SLinus Torvalds 71da177e4SLinus Torvalds /* 81da177e4SLinus Torvalds * demand-loading started 01.12.91 - seems it is high on the list of 91da177e4SLinus Torvalds * things wanted, and it should be easy to implement. - Linus 101da177e4SLinus Torvalds */ 111da177e4SLinus Torvalds 121da177e4SLinus Torvalds /* 131da177e4SLinus Torvalds * Ok, demand-loading was easy, shared pages a little bit tricker. Shared 141da177e4SLinus Torvalds * pages started 02.12.91, seems to work. - Linus. 151da177e4SLinus Torvalds * 161da177e4SLinus Torvalds * Tested sharing by executing about 30 /bin/sh: under the old kernel it 171da177e4SLinus Torvalds * would have taken more than the 6M I have free, but it worked well as 181da177e4SLinus Torvalds * far as I could see. 191da177e4SLinus Torvalds * 201da177e4SLinus Torvalds * Also corrected some "invalidate()"s - I wasn't doing enough of them. 211da177e4SLinus Torvalds */ 221da177e4SLinus Torvalds 231da177e4SLinus Torvalds /* 241da177e4SLinus Torvalds * Real VM (paging to/from disk) started 18.12.91. Much more work and 251da177e4SLinus Torvalds * thought has to go into this. Oh, well.. 261da177e4SLinus Torvalds * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why. 271da177e4SLinus Torvalds * Found it. Everything seems to work now. 281da177e4SLinus Torvalds * 20.12.91 - Ok, making the swap-device changeable like the root. 291da177e4SLinus Torvalds */ 301da177e4SLinus Torvalds 311da177e4SLinus Torvalds /* 321da177e4SLinus Torvalds * 05.04.94 - Multi-page memory management added for v1.1. 331da177e4SLinus Torvalds * Idea by Alex Bligh (alex@cconcepts.co.uk) 341da177e4SLinus Torvalds * 351da177e4SLinus Torvalds * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG 361da177e4SLinus Torvalds * (Gerhard.Wichert@pdb.siemens.de) 371da177e4SLinus Torvalds * 381da177e4SLinus Torvalds * Aug/Sep 2004 Changed to four level page tables (Andi Kleen) 391da177e4SLinus Torvalds */ 401da177e4SLinus Torvalds 411da177e4SLinus Torvalds #include <linux/kernel_stat.h> 421da177e4SLinus Torvalds #include <linux/mm.h> 436e84f315SIngo Molnar #include <linux/sched/mm.h> 44f7ccbae4SIngo Molnar #include <linux/sched/coredump.h> 456a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h> 4629930025SIngo Molnar #include <linux/sched/task.h> 471da177e4SLinus Torvalds #include <linux/hugetlb.h> 481da177e4SLinus Torvalds #include <linux/mman.h> 491da177e4SLinus Torvalds #include <linux/swap.h> 501da177e4SLinus Torvalds #include <linux/highmem.h> 511da177e4SLinus Torvalds #include <linux/pagemap.h> 525042db43SJérôme Glisse #include <linux/memremap.h> 539a840895SHugh Dickins #include <linux/ksm.h> 541da177e4SLinus Torvalds #include <linux/rmap.h> 55b95f1b31SPaul Gortmaker #include <linux/export.h> 560ff92245SShailabh Nagar #include <linux/delayacct.h> 571da177e4SLinus Torvalds #include <linux/init.h> 5801c8f1c4SDan Williams #include <linux/pfn_t.h> 59edc79b2aSPeter Zijlstra #include <linux/writeback.h> 608a9f3ccdSBalbir Singh #include <linux/memcontrol.h> 61cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h> 623dc14741SHugh Dickins #include <linux/kallsyms.h> 633dc14741SHugh Dickins #include <linux/swapops.h> 643dc14741SHugh Dickins #include <linux/elf.h> 655a0e3ad6STejun Heo #include <linux/gfp.h> 664daae3b4SMel Gorman #include <linux/migrate.h> 672fbc57c5SAndy Shevchenko #include <linux/string.h> 680abdd7a8SDan Williams #include <linux/dma-debug.h> 691592eef0SKirill A. Shutemov #include <linux/debugfs.h> 706b251fc9SAndrea Arcangeli #include <linux/userfaultfd_k.h> 71bc2466e4SJan Kara #include <linux/dax.h> 726b31d595SMichal Hocko #include <linux/oom.h> 731da177e4SLinus Torvalds 746952b61dSAlexey Dobriyan #include <asm/io.h> 7533a709b2SDave Hansen #include <asm/mmu_context.h> 761da177e4SLinus Torvalds #include <asm/pgalloc.h> 777c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 781da177e4SLinus Torvalds #include <asm/tlb.h> 791da177e4SLinus Torvalds #include <asm/tlbflush.h> 801da177e4SLinus Torvalds #include <asm/pgtable.h> 811da177e4SLinus Torvalds 8242b77728SJan Beulich #include "internal.h" 8342b77728SJan Beulich 8490572890SPeter Zijlstra #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS 8590572890SPeter Zijlstra #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid. 8675980e97SPeter Zijlstra #endif 8775980e97SPeter Zijlstra 88d41dee36SAndy Whitcroft #ifndef CONFIG_NEED_MULTIPLE_NODES 891da177e4SLinus Torvalds /* use the per-pgdat data instead for discontigmem - mbligh */ 901da177e4SLinus Torvalds unsigned long max_mapnr; 911da177e4SLinus Torvalds EXPORT_SYMBOL(max_mapnr); 92166f61b9STobin C Harding 93166f61b9STobin C Harding struct page *mem_map; 941da177e4SLinus Torvalds EXPORT_SYMBOL(mem_map); 951da177e4SLinus Torvalds #endif 961da177e4SLinus Torvalds 971da177e4SLinus Torvalds /* 981da177e4SLinus Torvalds * A number of key systems in x86 including ioremap() rely on the assumption 991da177e4SLinus Torvalds * that high_memory defines the upper bound on direct map memory, then end 1001da177e4SLinus Torvalds * of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and 1011da177e4SLinus Torvalds * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL 1021da177e4SLinus Torvalds * and ZONE_HIGHMEM. 1031da177e4SLinus Torvalds */ 1041da177e4SLinus Torvalds void *high_memory; 1051da177e4SLinus Torvalds EXPORT_SYMBOL(high_memory); 1061da177e4SLinus Torvalds 10732a93233SIngo Molnar /* 10832a93233SIngo Molnar * Randomize the address space (stacks, mmaps, brk, etc.). 10932a93233SIngo Molnar * 11032a93233SIngo Molnar * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization, 11132a93233SIngo Molnar * as ancient (libc5 based) binaries can segfault. ) 11232a93233SIngo Molnar */ 11332a93233SIngo Molnar int randomize_va_space __read_mostly = 11432a93233SIngo Molnar #ifdef CONFIG_COMPAT_BRK 11532a93233SIngo Molnar 1; 11632a93233SIngo Molnar #else 11732a93233SIngo Molnar 2; 11832a93233SIngo Molnar #endif 119a62eaf15SAndi Kleen 120a62eaf15SAndi Kleen static int __init disable_randmaps(char *s) 121a62eaf15SAndi Kleen { 122a62eaf15SAndi Kleen randomize_va_space = 0; 1239b41046cSOGAWA Hirofumi return 1; 124a62eaf15SAndi Kleen } 125a62eaf15SAndi Kleen __setup("norandmaps", disable_randmaps); 126a62eaf15SAndi Kleen 12762eede62SHugh Dickins unsigned long zero_pfn __read_mostly; 1280b70068eSArd Biesheuvel EXPORT_SYMBOL(zero_pfn); 1290b70068eSArd Biesheuvel 130166f61b9STobin C Harding unsigned long highest_memmap_pfn __read_mostly; 131166f61b9STobin C Harding 132a13ea5b7SHugh Dickins /* 133a13ea5b7SHugh Dickins * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init() 134a13ea5b7SHugh Dickins */ 135a13ea5b7SHugh Dickins static int __init init_zero_pfn(void) 136a13ea5b7SHugh Dickins { 137a13ea5b7SHugh Dickins zero_pfn = page_to_pfn(ZERO_PAGE(0)); 138a13ea5b7SHugh Dickins return 0; 139a13ea5b7SHugh Dickins } 140a13ea5b7SHugh Dickins core_initcall(init_zero_pfn); 141a62eaf15SAndi Kleen 142d559db08SKAMEZAWA Hiroyuki 14334e55232SKAMEZAWA Hiroyuki #if defined(SPLIT_RSS_COUNTING) 14434e55232SKAMEZAWA Hiroyuki 145ea48cf78SDavid Rientjes void sync_mm_rss(struct mm_struct *mm) 14634e55232SKAMEZAWA Hiroyuki { 14734e55232SKAMEZAWA Hiroyuki int i; 14834e55232SKAMEZAWA Hiroyuki 14934e55232SKAMEZAWA Hiroyuki for (i = 0; i < NR_MM_COUNTERS; i++) { 15005af2e10SDavid Rientjes if (current->rss_stat.count[i]) { 15105af2e10SDavid Rientjes add_mm_counter(mm, i, current->rss_stat.count[i]); 15205af2e10SDavid Rientjes current->rss_stat.count[i] = 0; 15334e55232SKAMEZAWA Hiroyuki } 15434e55232SKAMEZAWA Hiroyuki } 15505af2e10SDavid Rientjes current->rss_stat.events = 0; 15634e55232SKAMEZAWA Hiroyuki } 15734e55232SKAMEZAWA Hiroyuki 15834e55232SKAMEZAWA Hiroyuki static void add_mm_counter_fast(struct mm_struct *mm, int member, int val) 15934e55232SKAMEZAWA Hiroyuki { 16034e55232SKAMEZAWA Hiroyuki struct task_struct *task = current; 16134e55232SKAMEZAWA Hiroyuki 16234e55232SKAMEZAWA Hiroyuki if (likely(task->mm == mm)) 16334e55232SKAMEZAWA Hiroyuki task->rss_stat.count[member] += val; 16434e55232SKAMEZAWA Hiroyuki else 16534e55232SKAMEZAWA Hiroyuki add_mm_counter(mm, member, val); 16634e55232SKAMEZAWA Hiroyuki } 16734e55232SKAMEZAWA Hiroyuki #define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1) 16834e55232SKAMEZAWA Hiroyuki #define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1) 16934e55232SKAMEZAWA Hiroyuki 17034e55232SKAMEZAWA Hiroyuki /* sync counter once per 64 page faults */ 17134e55232SKAMEZAWA Hiroyuki #define TASK_RSS_EVENTS_THRESH (64) 17234e55232SKAMEZAWA Hiroyuki static void check_sync_rss_stat(struct task_struct *task) 17334e55232SKAMEZAWA Hiroyuki { 17434e55232SKAMEZAWA Hiroyuki if (unlikely(task != current)) 17534e55232SKAMEZAWA Hiroyuki return; 17634e55232SKAMEZAWA Hiroyuki if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH)) 177ea48cf78SDavid Rientjes sync_mm_rss(task->mm); 17834e55232SKAMEZAWA Hiroyuki } 1799547d01bSPeter Zijlstra #else /* SPLIT_RSS_COUNTING */ 18034e55232SKAMEZAWA Hiroyuki 18134e55232SKAMEZAWA Hiroyuki #define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member) 18234e55232SKAMEZAWA Hiroyuki #define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member) 18334e55232SKAMEZAWA Hiroyuki 18434e55232SKAMEZAWA Hiroyuki static void check_sync_rss_stat(struct task_struct *task) 18534e55232SKAMEZAWA Hiroyuki { 18634e55232SKAMEZAWA Hiroyuki } 18734e55232SKAMEZAWA Hiroyuki 1889547d01bSPeter Zijlstra #endif /* SPLIT_RSS_COUNTING */ 1899547d01bSPeter Zijlstra 1909547d01bSPeter Zijlstra #ifdef HAVE_GENERIC_MMU_GATHER 1919547d01bSPeter Zijlstra 192ca1d6c7dSNicholas Krause static bool tlb_next_batch(struct mmu_gather *tlb) 1939547d01bSPeter Zijlstra { 1949547d01bSPeter Zijlstra struct mmu_gather_batch *batch; 1959547d01bSPeter Zijlstra 1969547d01bSPeter Zijlstra batch = tlb->active; 1979547d01bSPeter Zijlstra if (batch->next) { 1989547d01bSPeter Zijlstra tlb->active = batch->next; 199ca1d6c7dSNicholas Krause return true; 2009547d01bSPeter Zijlstra } 2019547d01bSPeter Zijlstra 20253a59fc6SMichal Hocko if (tlb->batch_count == MAX_GATHER_BATCH_COUNT) 203ca1d6c7dSNicholas Krause return false; 20453a59fc6SMichal Hocko 2059547d01bSPeter Zijlstra batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); 2069547d01bSPeter Zijlstra if (!batch) 207ca1d6c7dSNicholas Krause return false; 2089547d01bSPeter Zijlstra 20953a59fc6SMichal Hocko tlb->batch_count++; 2109547d01bSPeter Zijlstra batch->next = NULL; 2119547d01bSPeter Zijlstra batch->nr = 0; 2129547d01bSPeter Zijlstra batch->max = MAX_GATHER_BATCH; 2139547d01bSPeter Zijlstra 2149547d01bSPeter Zijlstra tlb->active->next = batch; 2159547d01bSPeter Zijlstra tlb->active = batch; 2169547d01bSPeter Zijlstra 217ca1d6c7dSNicholas Krause return true; 2189547d01bSPeter Zijlstra } 2199547d01bSPeter Zijlstra 22056236a59SMinchan Kim void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, 22156236a59SMinchan Kim unsigned long start, unsigned long end) 2229547d01bSPeter Zijlstra { 2239547d01bSPeter Zijlstra tlb->mm = mm; 2249547d01bSPeter Zijlstra 2252b047252SLinus Torvalds /* Is it from 0 to ~0? */ 2262b047252SLinus Torvalds tlb->fullmm = !(start | (end+1)); 2271de14c3cSDave Hansen tlb->need_flush_all = 0; 2289547d01bSPeter Zijlstra tlb->local.next = NULL; 2299547d01bSPeter Zijlstra tlb->local.nr = 0; 2309547d01bSPeter Zijlstra tlb->local.max = ARRAY_SIZE(tlb->__pages); 2319547d01bSPeter Zijlstra tlb->active = &tlb->local; 23253a59fc6SMichal Hocko tlb->batch_count = 0; 2339547d01bSPeter Zijlstra 2349547d01bSPeter Zijlstra #ifdef CONFIG_HAVE_RCU_TABLE_FREE 2359547d01bSPeter Zijlstra tlb->batch = NULL; 23634e55232SKAMEZAWA Hiroyuki #endif 237e77b0852SAneesh Kumar K.V tlb->page_size = 0; 238fb7332a9SWill Deacon 239fb7332a9SWill Deacon __tlb_reset_range(tlb); 2409547d01bSPeter Zijlstra } 2419547d01bSPeter Zijlstra 2421cf35d47SLinus Torvalds static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) 2439547d01bSPeter Zijlstra { 244721c21c1SWill Deacon if (!tlb->end) 245721c21c1SWill Deacon return; 246721c21c1SWill Deacon 2479547d01bSPeter Zijlstra tlb_flush(tlb); 24834ee645eSJoerg Roedel mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end); 2499547d01bSPeter Zijlstra #ifdef CONFIG_HAVE_RCU_TABLE_FREE 2509547d01bSPeter Zijlstra tlb_table_flush(tlb); 2519547d01bSPeter Zijlstra #endif 252fb7332a9SWill Deacon __tlb_reset_range(tlb); 2531cf35d47SLinus Torvalds } 2541cf35d47SLinus Torvalds 2551cf35d47SLinus Torvalds static void tlb_flush_mmu_free(struct mmu_gather *tlb) 2561cf35d47SLinus Torvalds { 2571cf35d47SLinus Torvalds struct mmu_gather_batch *batch; 2589547d01bSPeter Zijlstra 259721c21c1SWill Deacon for (batch = &tlb->local; batch && batch->nr; batch = batch->next) { 2609547d01bSPeter Zijlstra free_pages_and_swap_cache(batch->pages, batch->nr); 2619547d01bSPeter Zijlstra batch->nr = 0; 2629547d01bSPeter Zijlstra } 2639547d01bSPeter Zijlstra tlb->active = &tlb->local; 2649547d01bSPeter Zijlstra } 2659547d01bSPeter Zijlstra 2661cf35d47SLinus Torvalds void tlb_flush_mmu(struct mmu_gather *tlb) 2671cf35d47SLinus Torvalds { 2681cf35d47SLinus Torvalds tlb_flush_mmu_tlbonly(tlb); 2691cf35d47SLinus Torvalds tlb_flush_mmu_free(tlb); 2701cf35d47SLinus Torvalds } 2711cf35d47SLinus Torvalds 2729547d01bSPeter Zijlstra /* tlb_finish_mmu 2739547d01bSPeter Zijlstra * Called at the end of the shootdown operation to free up any resources 2749547d01bSPeter Zijlstra * that were required. 2759547d01bSPeter Zijlstra */ 27656236a59SMinchan Kim void arch_tlb_finish_mmu(struct mmu_gather *tlb, 27799baac21SMinchan Kim unsigned long start, unsigned long end, bool force) 2789547d01bSPeter Zijlstra { 2799547d01bSPeter Zijlstra struct mmu_gather_batch *batch, *next; 2809547d01bSPeter Zijlstra 28199baac21SMinchan Kim if (force) 28299baac21SMinchan Kim __tlb_adjust_range(tlb, start, end - start); 28399baac21SMinchan Kim 2849547d01bSPeter Zijlstra tlb_flush_mmu(tlb); 2859547d01bSPeter Zijlstra 2869547d01bSPeter Zijlstra /* keep the page table cache within bounds */ 2879547d01bSPeter Zijlstra check_pgt_cache(); 2889547d01bSPeter Zijlstra 2899547d01bSPeter Zijlstra for (batch = tlb->local.next; batch; batch = next) { 2909547d01bSPeter Zijlstra next = batch->next; 2919547d01bSPeter Zijlstra free_pages((unsigned long)batch, 0); 2929547d01bSPeter Zijlstra } 2939547d01bSPeter Zijlstra tlb->local.next = NULL; 2949547d01bSPeter Zijlstra } 2959547d01bSPeter Zijlstra 2969547d01bSPeter Zijlstra /* __tlb_remove_page 2979547d01bSPeter Zijlstra * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while 2989547d01bSPeter Zijlstra * handling the additional races in SMP caused by other CPUs caching valid 2999547d01bSPeter Zijlstra * mappings in their TLBs. Returns the number of free page slots left. 3009547d01bSPeter Zijlstra * When out of page slots we must call tlb_flush_mmu(). 301e9d55e15SAneesh Kumar K.V *returns true if the caller should flush. 3029547d01bSPeter Zijlstra */ 303e77b0852SAneesh Kumar K.V bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size) 3049547d01bSPeter Zijlstra { 3059547d01bSPeter Zijlstra struct mmu_gather_batch *batch; 3069547d01bSPeter Zijlstra 307fb7332a9SWill Deacon VM_BUG_ON(!tlb->end); 308692a68c1SAneesh Kumar K.V VM_WARN_ON(tlb->page_size != page_size); 309e77b0852SAneesh Kumar K.V 3109547d01bSPeter Zijlstra batch = tlb->active; 311692a68c1SAneesh Kumar K.V /* 312692a68c1SAneesh Kumar K.V * Add the page and check if we are full. If so 313692a68c1SAneesh Kumar K.V * force a flush. 314692a68c1SAneesh Kumar K.V */ 315692a68c1SAneesh Kumar K.V batch->pages[batch->nr++] = page; 3169547d01bSPeter Zijlstra if (batch->nr == batch->max) { 3179547d01bSPeter Zijlstra if (!tlb_next_batch(tlb)) 318e9d55e15SAneesh Kumar K.V return true; 3190b43c3aaSShaohua Li batch = tlb->active; 3209547d01bSPeter Zijlstra } 321309381feSSasha Levin VM_BUG_ON_PAGE(batch->nr > batch->max, page); 3229547d01bSPeter Zijlstra 323e9d55e15SAneesh Kumar K.V return false; 3249547d01bSPeter Zijlstra } 3259547d01bSPeter Zijlstra 3269547d01bSPeter Zijlstra #endif /* HAVE_GENERIC_MMU_GATHER */ 32734e55232SKAMEZAWA Hiroyuki 32826723911SPeter Zijlstra #ifdef CONFIG_HAVE_RCU_TABLE_FREE 32926723911SPeter Zijlstra 33026723911SPeter Zijlstra /* 33126723911SPeter Zijlstra * See the comment near struct mmu_table_batch. 33226723911SPeter Zijlstra */ 33326723911SPeter Zijlstra 33426723911SPeter Zijlstra static void tlb_remove_table_smp_sync(void *arg) 33526723911SPeter Zijlstra { 33626723911SPeter Zijlstra /* Simply deliver the interrupt */ 33726723911SPeter Zijlstra } 33826723911SPeter Zijlstra 33926723911SPeter Zijlstra static void tlb_remove_table_one(void *table) 34026723911SPeter Zijlstra { 34126723911SPeter Zijlstra /* 34226723911SPeter Zijlstra * This isn't an RCU grace period and hence the page-tables cannot be 34326723911SPeter Zijlstra * assumed to be actually RCU-freed. 34426723911SPeter Zijlstra * 34526723911SPeter Zijlstra * It is however sufficient for software page-table walkers that rely on 34626723911SPeter Zijlstra * IRQ disabling. See the comment near struct mmu_table_batch. 34726723911SPeter Zijlstra */ 34826723911SPeter Zijlstra smp_call_function(tlb_remove_table_smp_sync, NULL, 1); 34926723911SPeter Zijlstra __tlb_remove_table(table); 35026723911SPeter Zijlstra } 35126723911SPeter Zijlstra 35226723911SPeter Zijlstra static void tlb_remove_table_rcu(struct rcu_head *head) 35326723911SPeter Zijlstra { 35426723911SPeter Zijlstra struct mmu_table_batch *batch; 35526723911SPeter Zijlstra int i; 35626723911SPeter Zijlstra 35726723911SPeter Zijlstra batch = container_of(head, struct mmu_table_batch, rcu); 35826723911SPeter Zijlstra 35926723911SPeter Zijlstra for (i = 0; i < batch->nr; i++) 36026723911SPeter Zijlstra __tlb_remove_table(batch->tables[i]); 36126723911SPeter Zijlstra 36226723911SPeter Zijlstra free_page((unsigned long)batch); 36326723911SPeter Zijlstra } 36426723911SPeter Zijlstra 36526723911SPeter Zijlstra void tlb_table_flush(struct mmu_gather *tlb) 36626723911SPeter Zijlstra { 36726723911SPeter Zijlstra struct mmu_table_batch **batch = &tlb->batch; 36826723911SPeter Zijlstra 36926723911SPeter Zijlstra if (*batch) { 37026723911SPeter Zijlstra call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu); 37126723911SPeter Zijlstra *batch = NULL; 37226723911SPeter Zijlstra } 37326723911SPeter Zijlstra } 37426723911SPeter Zijlstra 37526723911SPeter Zijlstra void tlb_remove_table(struct mmu_gather *tlb, void *table) 37626723911SPeter Zijlstra { 37726723911SPeter Zijlstra struct mmu_table_batch **batch = &tlb->batch; 37826723911SPeter Zijlstra 37926723911SPeter Zijlstra /* 38026723911SPeter Zijlstra * When there's less then two users of this mm there cannot be a 38126723911SPeter Zijlstra * concurrent page-table walk. 38226723911SPeter Zijlstra */ 38326723911SPeter Zijlstra if (atomic_read(&tlb->mm->mm_users) < 2) { 38426723911SPeter Zijlstra __tlb_remove_table(table); 38526723911SPeter Zijlstra return; 38626723911SPeter Zijlstra } 38726723911SPeter Zijlstra 38826723911SPeter Zijlstra if (*batch == NULL) { 38926723911SPeter Zijlstra *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN); 39026723911SPeter Zijlstra if (*batch == NULL) { 39126723911SPeter Zijlstra tlb_remove_table_one(table); 39226723911SPeter Zijlstra return; 39326723911SPeter Zijlstra } 39426723911SPeter Zijlstra (*batch)->nr = 0; 39526723911SPeter Zijlstra } 39626723911SPeter Zijlstra (*batch)->tables[(*batch)->nr++] = table; 39726723911SPeter Zijlstra if ((*batch)->nr == MAX_TABLE_BATCH) 39826723911SPeter Zijlstra tlb_table_flush(tlb); 39926723911SPeter Zijlstra } 40026723911SPeter Zijlstra 4019547d01bSPeter Zijlstra #endif /* CONFIG_HAVE_RCU_TABLE_FREE */ 40226723911SPeter Zijlstra 40356236a59SMinchan Kim /* tlb_gather_mmu 40456236a59SMinchan Kim * Called to initialize an (on-stack) mmu_gather structure for page-table 40556236a59SMinchan Kim * tear-down from @mm. The @fullmm argument is used when @mm is without 40656236a59SMinchan Kim * users and we're going to destroy the full address space (exit/execve). 40756236a59SMinchan Kim */ 40856236a59SMinchan Kim void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, 40956236a59SMinchan Kim unsigned long start, unsigned long end) 41056236a59SMinchan Kim { 41156236a59SMinchan Kim arch_tlb_gather_mmu(tlb, mm, start, end); 41299baac21SMinchan Kim inc_tlb_flush_pending(tlb->mm); 41356236a59SMinchan Kim } 41456236a59SMinchan Kim 41556236a59SMinchan Kim void tlb_finish_mmu(struct mmu_gather *tlb, 41656236a59SMinchan Kim unsigned long start, unsigned long end) 41756236a59SMinchan Kim { 41899baac21SMinchan Kim /* 41999baac21SMinchan Kim * If there are parallel threads are doing PTE changes on same range 42099baac21SMinchan Kim * under non-exclusive lock(e.g., mmap_sem read-side) but defer TLB 42199baac21SMinchan Kim * flush by batching, a thread has stable TLB entry can fail to flush 42299baac21SMinchan Kim * the TLB by observing pte_none|!pte_dirty, for example so flush TLB 42399baac21SMinchan Kim * forcefully if we detect parallel PTE batching threads. 42499baac21SMinchan Kim */ 42599baac21SMinchan Kim bool force = mm_tlb_flush_nested(tlb->mm); 42699baac21SMinchan Kim 42799baac21SMinchan Kim arch_tlb_finish_mmu(tlb, start, end, force); 42899baac21SMinchan Kim dec_tlb_flush_pending(tlb->mm); 42956236a59SMinchan Kim } 43056236a59SMinchan Kim 4311da177e4SLinus Torvalds /* 4321da177e4SLinus Torvalds * Note: this doesn't free the actual pages themselves. That 4331da177e4SLinus Torvalds * has been handled earlier when unmapping all the memory regions. 4341da177e4SLinus Torvalds */ 4359e1b32caSBenjamin Herrenschmidt static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, 4369e1b32caSBenjamin Herrenschmidt unsigned long addr) 4371da177e4SLinus Torvalds { 4382f569afdSMartin Schwidefsky pgtable_t token = pmd_pgtable(*pmd); 4391da177e4SLinus Torvalds pmd_clear(pmd); 4409e1b32caSBenjamin Herrenschmidt pte_free_tlb(tlb, token, addr); 441c4812909SKirill A. Shutemov mm_dec_nr_ptes(tlb->mm); 4421da177e4SLinus Torvalds } 4431da177e4SLinus Torvalds 444e0da382cSHugh Dickins static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, 445e0da382cSHugh Dickins unsigned long addr, unsigned long end, 446e0da382cSHugh Dickins unsigned long floor, unsigned long ceiling) 4471da177e4SLinus Torvalds { 4481da177e4SLinus Torvalds pmd_t *pmd; 4491da177e4SLinus Torvalds unsigned long next; 450e0da382cSHugh Dickins unsigned long start; 4511da177e4SLinus Torvalds 452e0da382cSHugh Dickins start = addr; 4531da177e4SLinus Torvalds pmd = pmd_offset(pud, addr); 4541da177e4SLinus Torvalds do { 4551da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 4561da177e4SLinus Torvalds if (pmd_none_or_clear_bad(pmd)) 4571da177e4SLinus Torvalds continue; 4589e1b32caSBenjamin Herrenschmidt free_pte_range(tlb, pmd, addr); 4591da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 4601da177e4SLinus Torvalds 461e0da382cSHugh Dickins start &= PUD_MASK; 462e0da382cSHugh Dickins if (start < floor) 463e0da382cSHugh Dickins return; 464e0da382cSHugh Dickins if (ceiling) { 465e0da382cSHugh Dickins ceiling &= PUD_MASK; 466e0da382cSHugh Dickins if (!ceiling) 467e0da382cSHugh Dickins return; 4681da177e4SLinus Torvalds } 469e0da382cSHugh Dickins if (end - 1 > ceiling - 1) 470e0da382cSHugh Dickins return; 471e0da382cSHugh Dickins 472e0da382cSHugh Dickins pmd = pmd_offset(pud, start); 473e0da382cSHugh Dickins pud_clear(pud); 4749e1b32caSBenjamin Herrenschmidt pmd_free_tlb(tlb, pmd, start); 475dc6c9a35SKirill A. Shutemov mm_dec_nr_pmds(tlb->mm); 4761da177e4SLinus Torvalds } 4771da177e4SLinus Torvalds 478c2febafcSKirill A. Shutemov static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d, 479e0da382cSHugh Dickins unsigned long addr, unsigned long end, 480e0da382cSHugh Dickins unsigned long floor, unsigned long ceiling) 4811da177e4SLinus Torvalds { 4821da177e4SLinus Torvalds pud_t *pud; 4831da177e4SLinus Torvalds unsigned long next; 484e0da382cSHugh Dickins unsigned long start; 4851da177e4SLinus Torvalds 486e0da382cSHugh Dickins start = addr; 487c2febafcSKirill A. Shutemov pud = pud_offset(p4d, addr); 4881da177e4SLinus Torvalds do { 4891da177e4SLinus Torvalds next = pud_addr_end(addr, end); 4901da177e4SLinus Torvalds if (pud_none_or_clear_bad(pud)) 4911da177e4SLinus Torvalds continue; 492e0da382cSHugh Dickins free_pmd_range(tlb, pud, addr, next, floor, ceiling); 4931da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 4941da177e4SLinus Torvalds 495c2febafcSKirill A. Shutemov start &= P4D_MASK; 496c2febafcSKirill A. Shutemov if (start < floor) 497c2febafcSKirill A. Shutemov return; 498c2febafcSKirill A. Shutemov if (ceiling) { 499c2febafcSKirill A. Shutemov ceiling &= P4D_MASK; 500c2febafcSKirill A. Shutemov if (!ceiling) 501c2febafcSKirill A. Shutemov return; 502c2febafcSKirill A. Shutemov } 503c2febafcSKirill A. Shutemov if (end - 1 > ceiling - 1) 504c2febafcSKirill A. Shutemov return; 505c2febafcSKirill A. Shutemov 506c2febafcSKirill A. Shutemov pud = pud_offset(p4d, start); 507c2febafcSKirill A. Shutemov p4d_clear(p4d); 508c2febafcSKirill A. Shutemov pud_free_tlb(tlb, pud, start); 509b4e98d9aSKirill A. Shutemov mm_dec_nr_puds(tlb->mm); 510c2febafcSKirill A. Shutemov } 511c2febafcSKirill A. Shutemov 512c2febafcSKirill A. Shutemov static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd, 513c2febafcSKirill A. Shutemov unsigned long addr, unsigned long end, 514c2febafcSKirill A. Shutemov unsigned long floor, unsigned long ceiling) 515c2febafcSKirill A. Shutemov { 516c2febafcSKirill A. Shutemov p4d_t *p4d; 517c2febafcSKirill A. Shutemov unsigned long next; 518c2febafcSKirill A. Shutemov unsigned long start; 519c2febafcSKirill A. Shutemov 520c2febafcSKirill A. Shutemov start = addr; 521c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, addr); 522c2febafcSKirill A. Shutemov do { 523c2febafcSKirill A. Shutemov next = p4d_addr_end(addr, end); 524c2febafcSKirill A. Shutemov if (p4d_none_or_clear_bad(p4d)) 525c2febafcSKirill A. Shutemov continue; 526c2febafcSKirill A. Shutemov free_pud_range(tlb, p4d, addr, next, floor, ceiling); 527c2febafcSKirill A. Shutemov } while (p4d++, addr = next, addr != end); 528c2febafcSKirill A. Shutemov 529e0da382cSHugh Dickins start &= PGDIR_MASK; 530e0da382cSHugh Dickins if (start < floor) 531e0da382cSHugh Dickins return; 532e0da382cSHugh Dickins if (ceiling) { 533e0da382cSHugh Dickins ceiling &= PGDIR_MASK; 534e0da382cSHugh Dickins if (!ceiling) 535e0da382cSHugh Dickins return; 5361da177e4SLinus Torvalds } 537e0da382cSHugh Dickins if (end - 1 > ceiling - 1) 538e0da382cSHugh Dickins return; 539e0da382cSHugh Dickins 540c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, start); 541e0da382cSHugh Dickins pgd_clear(pgd); 542c2febafcSKirill A. Shutemov p4d_free_tlb(tlb, p4d, start); 5431da177e4SLinus Torvalds } 5441da177e4SLinus Torvalds 5451da177e4SLinus Torvalds /* 546e0da382cSHugh Dickins * This function frees user-level page tables of a process. 5471da177e4SLinus Torvalds */ 54842b77728SJan Beulich void free_pgd_range(struct mmu_gather *tlb, 549e0da382cSHugh Dickins unsigned long addr, unsigned long end, 550e0da382cSHugh Dickins unsigned long floor, unsigned long ceiling) 5511da177e4SLinus Torvalds { 5521da177e4SLinus Torvalds pgd_t *pgd; 5531da177e4SLinus Torvalds unsigned long next; 5541da177e4SLinus Torvalds 555e0da382cSHugh Dickins /* 556e0da382cSHugh Dickins * The next few lines have given us lots of grief... 557e0da382cSHugh Dickins * 558e0da382cSHugh Dickins * Why are we testing PMD* at this top level? Because often 559e0da382cSHugh Dickins * there will be no work to do at all, and we'd prefer not to 560e0da382cSHugh Dickins * go all the way down to the bottom just to discover that. 561e0da382cSHugh Dickins * 562e0da382cSHugh Dickins * Why all these "- 1"s? Because 0 represents both the bottom 563e0da382cSHugh Dickins * of the address space and the top of it (using -1 for the 564e0da382cSHugh Dickins * top wouldn't help much: the masks would do the wrong thing). 565e0da382cSHugh Dickins * The rule is that addr 0 and floor 0 refer to the bottom of 566e0da382cSHugh Dickins * the address space, but end 0 and ceiling 0 refer to the top 567e0da382cSHugh Dickins * Comparisons need to use "end - 1" and "ceiling - 1" (though 568e0da382cSHugh Dickins * that end 0 case should be mythical). 569e0da382cSHugh Dickins * 570e0da382cSHugh Dickins * Wherever addr is brought up or ceiling brought down, we must 571e0da382cSHugh Dickins * be careful to reject "the opposite 0" before it confuses the 572e0da382cSHugh Dickins * subsequent tests. But what about where end is brought down 573e0da382cSHugh Dickins * by PMD_SIZE below? no, end can't go down to 0 there. 574e0da382cSHugh Dickins * 575e0da382cSHugh Dickins * Whereas we round start (addr) and ceiling down, by different 576e0da382cSHugh Dickins * masks at different levels, in order to test whether a table 577e0da382cSHugh Dickins * now has no other vmas using it, so can be freed, we don't 578e0da382cSHugh Dickins * bother to round floor or end up - the tests don't need that. 579e0da382cSHugh Dickins */ 580e0da382cSHugh Dickins 581e0da382cSHugh Dickins addr &= PMD_MASK; 582e0da382cSHugh Dickins if (addr < floor) { 583e0da382cSHugh Dickins addr += PMD_SIZE; 584e0da382cSHugh Dickins if (!addr) 585e0da382cSHugh Dickins return; 586e0da382cSHugh Dickins } 587e0da382cSHugh Dickins if (ceiling) { 588e0da382cSHugh Dickins ceiling &= PMD_MASK; 589e0da382cSHugh Dickins if (!ceiling) 590e0da382cSHugh Dickins return; 591e0da382cSHugh Dickins } 592e0da382cSHugh Dickins if (end - 1 > ceiling - 1) 593e0da382cSHugh Dickins end -= PMD_SIZE; 594e0da382cSHugh Dickins if (addr > end - 1) 595e0da382cSHugh Dickins return; 59607e32661SAneesh Kumar K.V /* 59707e32661SAneesh Kumar K.V * We add page table cache pages with PAGE_SIZE, 59807e32661SAneesh Kumar K.V * (see pte_free_tlb()), flush the tlb if we need 59907e32661SAneesh Kumar K.V */ 60007e32661SAneesh Kumar K.V tlb_remove_check_page_size_change(tlb, PAGE_SIZE); 60142b77728SJan Beulich pgd = pgd_offset(tlb->mm, addr); 6021da177e4SLinus Torvalds do { 6031da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 6041da177e4SLinus Torvalds if (pgd_none_or_clear_bad(pgd)) 6051da177e4SLinus Torvalds continue; 606c2febafcSKirill A. Shutemov free_p4d_range(tlb, pgd, addr, next, floor, ceiling); 6071da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 608e0da382cSHugh Dickins } 609e0da382cSHugh Dickins 61042b77728SJan Beulich void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, 611e0da382cSHugh Dickins unsigned long floor, unsigned long ceiling) 612e0da382cSHugh Dickins { 613e0da382cSHugh Dickins while (vma) { 614e0da382cSHugh Dickins struct vm_area_struct *next = vma->vm_next; 615e0da382cSHugh Dickins unsigned long addr = vma->vm_start; 616e0da382cSHugh Dickins 6178f4f8c16SHugh Dickins /* 61825d9e2d1Snpiggin@suse.de * Hide vma from rmap and truncate_pagecache before freeing 61925d9e2d1Snpiggin@suse.de * pgtables 6208f4f8c16SHugh Dickins */ 6215beb4930SRik van Riel unlink_anon_vmas(vma); 6228f4f8c16SHugh Dickins unlink_file_vma(vma); 6238f4f8c16SHugh Dickins 6249da61aefSDavid Gibson if (is_vm_hugetlb_page(vma)) { 6253bf5ee95SHugh Dickins hugetlb_free_pgd_range(tlb, addr, vma->vm_end, 6263bf5ee95SHugh Dickins floor, next ? next->vm_start : ceiling); 6273bf5ee95SHugh Dickins } else { 6283bf5ee95SHugh Dickins /* 6293bf5ee95SHugh Dickins * Optimization: gather nearby vmas into one call down 6303bf5ee95SHugh Dickins */ 6313bf5ee95SHugh Dickins while (next && next->vm_start <= vma->vm_end + PMD_SIZE 6324866920bSDavid Gibson && !is_vm_hugetlb_page(next)) { 633e0da382cSHugh Dickins vma = next; 634e0da382cSHugh Dickins next = vma->vm_next; 6355beb4930SRik van Riel unlink_anon_vmas(vma); 6368f4f8c16SHugh Dickins unlink_file_vma(vma); 637e0da382cSHugh Dickins } 6383bf5ee95SHugh Dickins free_pgd_range(tlb, addr, vma->vm_end, 639e0da382cSHugh Dickins floor, next ? next->vm_start : ceiling); 6403bf5ee95SHugh Dickins } 641e0da382cSHugh Dickins vma = next; 642e0da382cSHugh Dickins } 6431da177e4SLinus Torvalds } 6441da177e4SLinus Torvalds 6453ed3a4f0SKirill A. Shutemov int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address) 6461da177e4SLinus Torvalds { 647c4088ebdSKirill A. Shutemov spinlock_t *ptl; 6482f569afdSMartin Schwidefsky pgtable_t new = pte_alloc_one(mm, address); 6491da177e4SLinus Torvalds if (!new) 6501bb3630eSHugh Dickins return -ENOMEM; 6511bb3630eSHugh Dickins 652362a61adSNick Piggin /* 653362a61adSNick Piggin * Ensure all pte setup (eg. pte page lock and page clearing) are 654362a61adSNick Piggin * visible before the pte is made visible to other CPUs by being 655362a61adSNick Piggin * put into page tables. 656362a61adSNick Piggin * 657362a61adSNick Piggin * The other side of the story is the pointer chasing in the page 658362a61adSNick Piggin * table walking code (when walking the page table without locking; 659362a61adSNick Piggin * ie. most of the time). Fortunately, these data accesses consist 660362a61adSNick Piggin * of a chain of data-dependent loads, meaning most CPUs (alpha 661362a61adSNick Piggin * being the notable exception) will already guarantee loads are 662362a61adSNick Piggin * seen in-order. See the alpha page table accessors for the 663362a61adSNick Piggin * smp_read_barrier_depends() barriers in page table walking code. 664362a61adSNick Piggin */ 665362a61adSNick Piggin smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */ 666362a61adSNick Piggin 667c4088ebdSKirill A. Shutemov ptl = pmd_lock(mm, pmd); 6688ac1f832SAndrea Arcangeli if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ 669c4812909SKirill A. Shutemov mm_inc_nr_ptes(mm); 6701da177e4SLinus Torvalds pmd_populate(mm, pmd, new); 6712f569afdSMartin Schwidefsky new = NULL; 6724b471e88SKirill A. Shutemov } 673c4088ebdSKirill A. Shutemov spin_unlock(ptl); 6742f569afdSMartin Schwidefsky if (new) 6752f569afdSMartin Schwidefsky pte_free(mm, new); 6761bb3630eSHugh Dickins return 0; 6771da177e4SLinus Torvalds } 6781da177e4SLinus Torvalds 6791bb3630eSHugh Dickins int __pte_alloc_kernel(pmd_t *pmd, unsigned long address) 6801da177e4SLinus Torvalds { 6811bb3630eSHugh Dickins pte_t *new = pte_alloc_one_kernel(&init_mm, address); 6821da177e4SLinus Torvalds if (!new) 6831bb3630eSHugh Dickins return -ENOMEM; 6841da177e4SLinus Torvalds 685362a61adSNick Piggin smp_wmb(); /* See comment in __pte_alloc */ 686362a61adSNick Piggin 687872fec16SHugh Dickins spin_lock(&init_mm.page_table_lock); 6888ac1f832SAndrea Arcangeli if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ 689872fec16SHugh Dickins pmd_populate_kernel(&init_mm, pmd, new); 6902f569afdSMartin Schwidefsky new = NULL; 6914b471e88SKirill A. Shutemov } 692872fec16SHugh Dickins spin_unlock(&init_mm.page_table_lock); 6932f569afdSMartin Schwidefsky if (new) 6942f569afdSMartin Schwidefsky pte_free_kernel(&init_mm, new); 6951bb3630eSHugh Dickins return 0; 6961da177e4SLinus Torvalds } 6971da177e4SLinus Torvalds 698d559db08SKAMEZAWA Hiroyuki static inline void init_rss_vec(int *rss) 699ae859762SHugh Dickins { 700d559db08SKAMEZAWA Hiroyuki memset(rss, 0, sizeof(int) * NR_MM_COUNTERS); 701d559db08SKAMEZAWA Hiroyuki } 702d559db08SKAMEZAWA Hiroyuki 703d559db08SKAMEZAWA Hiroyuki static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss) 704d559db08SKAMEZAWA Hiroyuki { 705d559db08SKAMEZAWA Hiroyuki int i; 706d559db08SKAMEZAWA Hiroyuki 70734e55232SKAMEZAWA Hiroyuki if (current->mm == mm) 70805af2e10SDavid Rientjes sync_mm_rss(mm); 709d559db08SKAMEZAWA Hiroyuki for (i = 0; i < NR_MM_COUNTERS; i++) 710d559db08SKAMEZAWA Hiroyuki if (rss[i]) 711d559db08SKAMEZAWA Hiroyuki add_mm_counter(mm, i, rss[i]); 712ae859762SHugh Dickins } 713ae859762SHugh Dickins 7141da177e4SLinus Torvalds /* 7156aab341eSLinus Torvalds * This function is called to print an error when a bad pte 7166aab341eSLinus Torvalds * is found. For example, we might have a PFN-mapped pte in 7176aab341eSLinus Torvalds * a region that doesn't allow it. 718b5810039SNick Piggin * 719b5810039SNick Piggin * The calling function must still handle the error. 720b5810039SNick Piggin */ 7213dc14741SHugh Dickins static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, 7223dc14741SHugh Dickins pte_t pte, struct page *page) 723b5810039SNick Piggin { 7243dc14741SHugh Dickins pgd_t *pgd = pgd_offset(vma->vm_mm, addr); 725c2febafcSKirill A. Shutemov p4d_t *p4d = p4d_offset(pgd, addr); 726c2febafcSKirill A. Shutemov pud_t *pud = pud_offset(p4d, addr); 7273dc14741SHugh Dickins pmd_t *pmd = pmd_offset(pud, addr); 7283dc14741SHugh Dickins struct address_space *mapping; 7293dc14741SHugh Dickins pgoff_t index; 730d936cf9bSHugh Dickins static unsigned long resume; 731d936cf9bSHugh Dickins static unsigned long nr_shown; 732d936cf9bSHugh Dickins static unsigned long nr_unshown; 733d936cf9bSHugh Dickins 734d936cf9bSHugh Dickins /* 735d936cf9bSHugh Dickins * Allow a burst of 60 reports, then keep quiet for that minute; 736d936cf9bSHugh Dickins * or allow a steady drip of one report per second. 737d936cf9bSHugh Dickins */ 738d936cf9bSHugh Dickins if (nr_shown == 60) { 739d936cf9bSHugh Dickins if (time_before(jiffies, resume)) { 740d936cf9bSHugh Dickins nr_unshown++; 741d936cf9bSHugh Dickins return; 742d936cf9bSHugh Dickins } 743d936cf9bSHugh Dickins if (nr_unshown) { 7441170532bSJoe Perches pr_alert("BUG: Bad page map: %lu messages suppressed\n", 745d936cf9bSHugh Dickins nr_unshown); 746d936cf9bSHugh Dickins nr_unshown = 0; 747d936cf9bSHugh Dickins } 748d936cf9bSHugh Dickins nr_shown = 0; 749d936cf9bSHugh Dickins } 750d936cf9bSHugh Dickins if (nr_shown++ == 0) 751d936cf9bSHugh Dickins resume = jiffies + 60 * HZ; 7523dc14741SHugh Dickins 7533dc14741SHugh Dickins mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL; 7543dc14741SHugh Dickins index = linear_page_index(vma, addr); 7553dc14741SHugh Dickins 7561170532bSJoe Perches pr_alert("BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n", 7573dc14741SHugh Dickins current->comm, 7583dc14741SHugh Dickins (long long)pte_val(pte), (long long)pmd_val(*pmd)); 759718a3821SWu Fengguang if (page) 760f0b791a3SDave Hansen dump_page(page, "bad pte"); 7611170532bSJoe Perches pr_alert("addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n", 7623dc14741SHugh Dickins (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); 7633dc14741SHugh Dickins /* 7643dc14741SHugh Dickins * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y 7653dc14741SHugh Dickins */ 7662682582aSKonstantin Khlebnikov pr_alert("file:%pD fault:%pf mmap:%pf readpage:%pf\n", 7672682582aSKonstantin Khlebnikov vma->vm_file, 7682682582aSKonstantin Khlebnikov vma->vm_ops ? vma->vm_ops->fault : NULL, 7692682582aSKonstantin Khlebnikov vma->vm_file ? vma->vm_file->f_op->mmap : NULL, 7702682582aSKonstantin Khlebnikov mapping ? mapping->a_ops->readpage : NULL); 771b5810039SNick Piggin dump_stack(); 772373d4d09SRusty Russell add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 773b5810039SNick Piggin } 774b5810039SNick Piggin 775b5810039SNick Piggin /* 7767e675137SNick Piggin * vm_normal_page -- This function gets the "struct page" associated with a pte. 7776aab341eSLinus Torvalds * 7787e675137SNick Piggin * "Special" mappings do not wish to be associated with a "struct page" (either 7797e675137SNick Piggin * it doesn't exist, or it exists but they don't want to touch it). In this 7807e675137SNick Piggin * case, NULL is returned here. "Normal" mappings do have a struct page. 781b379d790SJared Hulbert * 7827e675137SNick Piggin * There are 2 broad cases. Firstly, an architecture may define a pte_special() 7837e675137SNick Piggin * pte bit, in which case this function is trivial. Secondly, an architecture 7847e675137SNick Piggin * may not have a spare pte bit, which requires a more complicated scheme, 7857e675137SNick Piggin * described below. 7867e675137SNick Piggin * 7877e675137SNick Piggin * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a 7887e675137SNick Piggin * special mapping (even if there are underlying and valid "struct pages"). 7897e675137SNick Piggin * COWed pages of a VM_PFNMAP are always normal. 7906aab341eSLinus Torvalds * 791b379d790SJared Hulbert * The way we recognize COWed pages within VM_PFNMAP mappings is through the 792b379d790SJared Hulbert * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit 7937e675137SNick Piggin * set, and the vm_pgoff will point to the first PFN mapped: thus every special 7947e675137SNick Piggin * mapping will always honor the rule 7956aab341eSLinus Torvalds * 7966aab341eSLinus Torvalds * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT) 7976aab341eSLinus Torvalds * 7987e675137SNick Piggin * And for normal mappings this is false. 799b379d790SJared Hulbert * 8007e675137SNick Piggin * This restricts such mappings to be a linear translation from virtual address 8017e675137SNick Piggin * to pfn. To get around this restriction, we allow arbitrary mappings so long 8027e675137SNick Piggin * as the vma is not a COW mapping; in that case, we know that all ptes are 8037e675137SNick Piggin * special (because none can have been COWed). 804b379d790SJared Hulbert * 805b379d790SJared Hulbert * 8067e675137SNick Piggin * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP. 8077e675137SNick Piggin * 808b379d790SJared Hulbert * VM_MIXEDMAP mappings can likewise contain memory with or without "struct 809b379d790SJared Hulbert * page" backing, however the difference is that _all_ pages with a struct 810b379d790SJared Hulbert * page (that is, those where pfn_valid is true) are refcounted and considered 811b379d790SJared Hulbert * normal pages by the VM. The disadvantage is that pages are refcounted 812b379d790SJared Hulbert * (which can be slower and simply not an option for some PFNMAP users). The 813b379d790SJared Hulbert * advantage is that we don't have to follow the strict linearity rule of 814b379d790SJared Hulbert * PFNMAP mappings in order to support COWable mappings. 815b379d790SJared Hulbert * 816ee498ed7SHugh Dickins */ 8177e675137SNick Piggin #ifdef __HAVE_ARCH_PTE_SPECIAL 8187e675137SNick Piggin # define HAVE_PTE_SPECIAL 1 8197e675137SNick Piggin #else 8207e675137SNick Piggin # define HAVE_PTE_SPECIAL 0 8217e675137SNick Piggin #endif 822df6ad698SJérôme Glisse struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr, 823df6ad698SJérôme Glisse pte_t pte, bool with_public_device) 824ee498ed7SHugh Dickins { 82522b31eecSHugh Dickins unsigned long pfn = pte_pfn(pte); 8267e675137SNick Piggin 8277e675137SNick Piggin if (HAVE_PTE_SPECIAL) { 828b38af472SHugh Dickins if (likely(!pte_special(pte))) 82922b31eecSHugh Dickins goto check_pfn; 830667a0a06SDavid Vrabel if (vma->vm_ops && vma->vm_ops->find_special_page) 831667a0a06SDavid Vrabel return vma->vm_ops->find_special_page(vma, addr); 832a13ea5b7SHugh Dickins if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) 833a13ea5b7SHugh Dickins return NULL; 834df6ad698SJérôme Glisse if (is_zero_pfn(pfn)) 835df6ad698SJérôme Glisse return NULL; 836df6ad698SJérôme Glisse 837df6ad698SJérôme Glisse /* 838df6ad698SJérôme Glisse * Device public pages are special pages (they are ZONE_DEVICE 839df6ad698SJérôme Glisse * pages but different from persistent memory). They behave 840df6ad698SJérôme Glisse * allmost like normal pages. The difference is that they are 841df6ad698SJérôme Glisse * not on the lru and thus should never be involve with any- 842df6ad698SJérôme Glisse * thing that involve lru manipulation (mlock, numa balancing, 843df6ad698SJérôme Glisse * ...). 844df6ad698SJérôme Glisse * 845df6ad698SJérôme Glisse * This is why we still want to return NULL for such page from 846df6ad698SJérôme Glisse * vm_normal_page() so that we do not have to special case all 847df6ad698SJérôme Glisse * call site of vm_normal_page(). 848df6ad698SJérôme Glisse */ 8497d790d2dSReza Arbab if (likely(pfn <= highest_memmap_pfn)) { 850df6ad698SJérôme Glisse struct page *page = pfn_to_page(pfn); 851df6ad698SJérôme Glisse 852df6ad698SJérôme Glisse if (is_device_public_page(page)) { 853df6ad698SJérôme Glisse if (with_public_device) 854df6ad698SJérôme Glisse return page; 855df6ad698SJérôme Glisse return NULL; 856df6ad698SJérôme Glisse } 857df6ad698SJérôme Glisse } 85822b31eecSHugh Dickins print_bad_pte(vma, addr, pte, NULL); 8597e675137SNick Piggin return NULL; 8607e675137SNick Piggin } 8617e675137SNick Piggin 8627e675137SNick Piggin /* !HAVE_PTE_SPECIAL case follows: */ 8637e675137SNick Piggin 864b379d790SJared Hulbert if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { 865b379d790SJared Hulbert if (vma->vm_flags & VM_MIXEDMAP) { 866b379d790SJared Hulbert if (!pfn_valid(pfn)) 867b379d790SJared Hulbert return NULL; 868b379d790SJared Hulbert goto out; 869b379d790SJared Hulbert } else { 8707e675137SNick Piggin unsigned long off; 8717e675137SNick Piggin off = (addr - vma->vm_start) >> PAGE_SHIFT; 8726aab341eSLinus Torvalds if (pfn == vma->vm_pgoff + off) 8736aab341eSLinus Torvalds return NULL; 87467121172SLinus Torvalds if (!is_cow_mapping(vma->vm_flags)) 875fb155c16SLinus Torvalds return NULL; 8766aab341eSLinus Torvalds } 877b379d790SJared Hulbert } 8786aab341eSLinus Torvalds 879b38af472SHugh Dickins if (is_zero_pfn(pfn)) 880b38af472SHugh Dickins return NULL; 88122b31eecSHugh Dickins check_pfn: 88222b31eecSHugh Dickins if (unlikely(pfn > highest_memmap_pfn)) { 88322b31eecSHugh Dickins print_bad_pte(vma, addr, pte, NULL); 88422b31eecSHugh Dickins return NULL; 88522b31eecSHugh Dickins } 8866aab341eSLinus Torvalds 8876aab341eSLinus Torvalds /* 8887e675137SNick Piggin * NOTE! We still have PageReserved() pages in the page tables. 8897e675137SNick Piggin * eg. VDSO mappings can cause them to exist. 8906aab341eSLinus Torvalds */ 891b379d790SJared Hulbert out: 8926aab341eSLinus Torvalds return pfn_to_page(pfn); 893ee498ed7SHugh Dickins } 894ee498ed7SHugh Dickins 89528093f9fSGerald Schaefer #ifdef CONFIG_TRANSPARENT_HUGEPAGE 89628093f9fSGerald Schaefer struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, 89728093f9fSGerald Schaefer pmd_t pmd) 89828093f9fSGerald Schaefer { 89928093f9fSGerald Schaefer unsigned long pfn = pmd_pfn(pmd); 90028093f9fSGerald Schaefer 90128093f9fSGerald Schaefer /* 90228093f9fSGerald Schaefer * There is no pmd_special() but there may be special pmds, e.g. 90328093f9fSGerald Schaefer * in a direct-access (dax) mapping, so let's just replicate the 90428093f9fSGerald Schaefer * !HAVE_PTE_SPECIAL case from vm_normal_page() here. 90528093f9fSGerald Schaefer */ 90628093f9fSGerald Schaefer if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { 90728093f9fSGerald Schaefer if (vma->vm_flags & VM_MIXEDMAP) { 90828093f9fSGerald Schaefer if (!pfn_valid(pfn)) 90928093f9fSGerald Schaefer return NULL; 91028093f9fSGerald Schaefer goto out; 91128093f9fSGerald Schaefer } else { 91228093f9fSGerald Schaefer unsigned long off; 91328093f9fSGerald Schaefer off = (addr - vma->vm_start) >> PAGE_SHIFT; 91428093f9fSGerald Schaefer if (pfn == vma->vm_pgoff + off) 91528093f9fSGerald Schaefer return NULL; 91628093f9fSGerald Schaefer if (!is_cow_mapping(vma->vm_flags)) 91728093f9fSGerald Schaefer return NULL; 91828093f9fSGerald Schaefer } 91928093f9fSGerald Schaefer } 92028093f9fSGerald Schaefer 92128093f9fSGerald Schaefer if (is_zero_pfn(pfn)) 92228093f9fSGerald Schaefer return NULL; 92328093f9fSGerald Schaefer if (unlikely(pfn > highest_memmap_pfn)) 92428093f9fSGerald Schaefer return NULL; 92528093f9fSGerald Schaefer 92628093f9fSGerald Schaefer /* 92728093f9fSGerald Schaefer * NOTE! We still have PageReserved() pages in the page tables. 92828093f9fSGerald Schaefer * eg. VDSO mappings can cause them to exist. 92928093f9fSGerald Schaefer */ 93028093f9fSGerald Schaefer out: 93128093f9fSGerald Schaefer return pfn_to_page(pfn); 93228093f9fSGerald Schaefer } 93328093f9fSGerald Schaefer #endif 93428093f9fSGerald Schaefer 935ee498ed7SHugh Dickins /* 9361da177e4SLinus Torvalds * copy one vm_area from one task to the other. Assumes the page tables 9371da177e4SLinus Torvalds * already present in the new task to be cleared in the whole range 9381da177e4SLinus Torvalds * covered by this vma. 9391da177e4SLinus Torvalds */ 9401da177e4SLinus Torvalds 941570a335bSHugh Dickins static inline unsigned long 9421da177e4SLinus Torvalds copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, 943b5810039SNick Piggin pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma, 9448c103762SHugh Dickins unsigned long addr, int *rss) 9451da177e4SLinus Torvalds { 946b5810039SNick Piggin unsigned long vm_flags = vma->vm_flags; 9471da177e4SLinus Torvalds pte_t pte = *src_pte; 9481da177e4SLinus Torvalds struct page *page; 9491da177e4SLinus Torvalds 9501da177e4SLinus Torvalds /* pte contains position in swap or file, so copy. */ 9511da177e4SLinus Torvalds if (unlikely(!pte_present(pte))) { 9520697212aSChristoph Lameter swp_entry_t entry = pte_to_swp_entry(pte); 9530697212aSChristoph Lameter 9542022b4d1SHugh Dickins if (likely(!non_swap_entry(entry))) { 955570a335bSHugh Dickins if (swap_duplicate(entry) < 0) 956570a335bSHugh Dickins return entry.val; 957570a335bSHugh Dickins 9581da177e4SLinus Torvalds /* make sure dst_mm is on swapoff's mmlist. */ 9591da177e4SLinus Torvalds if (unlikely(list_empty(&dst_mm->mmlist))) { 9601da177e4SLinus Torvalds spin_lock(&mmlist_lock); 961f412ac08SHugh Dickins if (list_empty(&dst_mm->mmlist)) 962f412ac08SHugh Dickins list_add(&dst_mm->mmlist, 963f412ac08SHugh Dickins &src_mm->mmlist); 9641da177e4SLinus Torvalds spin_unlock(&mmlist_lock); 9651da177e4SLinus Torvalds } 966b084d435SKAMEZAWA Hiroyuki rss[MM_SWAPENTS]++; 9672022b4d1SHugh Dickins } else if (is_migration_entry(entry)) { 9689f9f1acdSKonstantin Khlebnikov page = migration_entry_to_page(entry); 9699f9f1acdSKonstantin Khlebnikov 970eca56ff9SJerome Marchand rss[mm_counter(page)]++; 9719f9f1acdSKonstantin Khlebnikov 9729f9f1acdSKonstantin Khlebnikov if (is_write_migration_entry(entry) && 9730697212aSChristoph Lameter is_cow_mapping(vm_flags)) { 9740697212aSChristoph Lameter /* 9759f9f1acdSKonstantin Khlebnikov * COW mappings require pages in both 9769f9f1acdSKonstantin Khlebnikov * parent and child to be set to read. 9770697212aSChristoph Lameter */ 9780697212aSChristoph Lameter make_migration_entry_read(&entry); 9790697212aSChristoph Lameter pte = swp_entry_to_pte(entry); 980c3d16e16SCyrill Gorcunov if (pte_swp_soft_dirty(*src_pte)) 981c3d16e16SCyrill Gorcunov pte = pte_swp_mksoft_dirty(pte); 9820697212aSChristoph Lameter set_pte_at(src_mm, addr, src_pte, pte); 9830697212aSChristoph Lameter } 9845042db43SJérôme Glisse } else if (is_device_private_entry(entry)) { 9855042db43SJérôme Glisse page = device_private_entry_to_page(entry); 9865042db43SJérôme Glisse 9875042db43SJérôme Glisse /* 9885042db43SJérôme Glisse * Update rss count even for unaddressable pages, as 9895042db43SJérôme Glisse * they should treated just like normal pages in this 9905042db43SJérôme Glisse * respect. 9915042db43SJérôme Glisse * 9925042db43SJérôme Glisse * We will likely want to have some new rss counters 9935042db43SJérôme Glisse * for unaddressable pages, at some point. But for now 9945042db43SJérôme Glisse * keep things as they are. 9955042db43SJérôme Glisse */ 9965042db43SJérôme Glisse get_page(page); 9975042db43SJérôme Glisse rss[mm_counter(page)]++; 9985042db43SJérôme Glisse page_dup_rmap(page, false); 9995042db43SJérôme Glisse 10005042db43SJérôme Glisse /* 10015042db43SJérôme Glisse * We do not preserve soft-dirty information, because so 10025042db43SJérôme Glisse * far, checkpoint/restore is the only feature that 10035042db43SJérôme Glisse * requires that. And checkpoint/restore does not work 10045042db43SJérôme Glisse * when a device driver is involved (you cannot easily 10055042db43SJérôme Glisse * save and restore device driver state). 10065042db43SJérôme Glisse */ 10075042db43SJérôme Glisse if (is_write_device_private_entry(entry) && 10085042db43SJérôme Glisse is_cow_mapping(vm_flags)) { 10095042db43SJérôme Glisse make_device_private_entry_read(&entry); 10105042db43SJérôme Glisse pte = swp_entry_to_pte(entry); 10115042db43SJérôme Glisse set_pte_at(src_mm, addr, src_pte, pte); 10125042db43SJérôme Glisse } 10131da177e4SLinus Torvalds } 1014ae859762SHugh Dickins goto out_set_pte; 10151da177e4SLinus Torvalds } 10161da177e4SLinus Torvalds 10171da177e4SLinus Torvalds /* 10181da177e4SLinus Torvalds * If it's a COW mapping, write protect it both 10191da177e4SLinus Torvalds * in the parent and the child 10201da177e4SLinus Torvalds */ 102167121172SLinus Torvalds if (is_cow_mapping(vm_flags)) { 10221da177e4SLinus Torvalds ptep_set_wrprotect(src_mm, addr, src_pte); 10233dc90795SZachary Amsden pte = pte_wrprotect(pte); 10241da177e4SLinus Torvalds } 10251da177e4SLinus Torvalds 10261da177e4SLinus Torvalds /* 10271da177e4SLinus Torvalds * If it's a shared mapping, mark it clean in 10281da177e4SLinus Torvalds * the child 10291da177e4SLinus Torvalds */ 10301da177e4SLinus Torvalds if (vm_flags & VM_SHARED) 10311da177e4SLinus Torvalds pte = pte_mkclean(pte); 10321da177e4SLinus Torvalds pte = pte_mkold(pte); 10336aab341eSLinus Torvalds 10346aab341eSLinus Torvalds page = vm_normal_page(vma, addr, pte); 10356aab341eSLinus Torvalds if (page) { 10361da177e4SLinus Torvalds get_page(page); 103753f9263bSKirill A. Shutemov page_dup_rmap(page, false); 1038eca56ff9SJerome Marchand rss[mm_counter(page)]++; 1039df6ad698SJérôme Glisse } else if (pte_devmap(pte)) { 1040df6ad698SJérôme Glisse page = pte_page(pte); 1041df6ad698SJérôme Glisse 1042df6ad698SJérôme Glisse /* 1043df6ad698SJérôme Glisse * Cache coherent device memory behave like regular page and 1044df6ad698SJérôme Glisse * not like persistent memory page. For more informations see 1045df6ad698SJérôme Glisse * MEMORY_DEVICE_CACHE_COHERENT in memory_hotplug.h 1046df6ad698SJérôme Glisse */ 1047df6ad698SJérôme Glisse if (is_device_public_page(page)) { 1048df6ad698SJérôme Glisse get_page(page); 1049df6ad698SJérôme Glisse page_dup_rmap(page, false); 1050df6ad698SJérôme Glisse rss[mm_counter(page)]++; 1051df6ad698SJérôme Glisse } 10526aab341eSLinus Torvalds } 1053ae859762SHugh Dickins 1054ae859762SHugh Dickins out_set_pte: 1055ae859762SHugh Dickins set_pte_at(dst_mm, addr, dst_pte, pte); 1056570a335bSHugh Dickins return 0; 10571da177e4SLinus Torvalds } 10581da177e4SLinus Torvalds 105921bda264SJerome Marchand static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 10601da177e4SLinus Torvalds pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma, 10611da177e4SLinus Torvalds unsigned long addr, unsigned long end) 10621da177e4SLinus Torvalds { 1063c36987e2SDaisuke Nishimura pte_t *orig_src_pte, *orig_dst_pte; 10641da177e4SLinus Torvalds pte_t *src_pte, *dst_pte; 1065c74df32cSHugh Dickins spinlock_t *src_ptl, *dst_ptl; 1066e040f218SHugh Dickins int progress = 0; 1067d559db08SKAMEZAWA Hiroyuki int rss[NR_MM_COUNTERS]; 1068570a335bSHugh Dickins swp_entry_t entry = (swp_entry_t){0}; 10691da177e4SLinus Torvalds 10701da177e4SLinus Torvalds again: 1071d559db08SKAMEZAWA Hiroyuki init_rss_vec(rss); 1072d559db08SKAMEZAWA Hiroyuki 1073c74df32cSHugh Dickins dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl); 10741da177e4SLinus Torvalds if (!dst_pte) 10751da177e4SLinus Torvalds return -ENOMEM; 1076ece0e2b6SPeter Zijlstra src_pte = pte_offset_map(src_pmd, addr); 10774c21e2f2SHugh Dickins src_ptl = pte_lockptr(src_mm, src_pmd); 1078f20dc5f7SIngo Molnar spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 1079c36987e2SDaisuke Nishimura orig_src_pte = src_pte; 1080c36987e2SDaisuke Nishimura orig_dst_pte = dst_pte; 10816606c3e0SZachary Amsden arch_enter_lazy_mmu_mode(); 10821da177e4SLinus Torvalds 10831da177e4SLinus Torvalds do { 10841da177e4SLinus Torvalds /* 10851da177e4SLinus Torvalds * We are holding two locks at this point - either of them 10861da177e4SLinus Torvalds * could generate latencies in another task on another CPU. 10871da177e4SLinus Torvalds */ 1088e040f218SHugh Dickins if (progress >= 32) { 1089e040f218SHugh Dickins progress = 0; 1090e040f218SHugh Dickins if (need_resched() || 109195c354feSNick Piggin spin_needbreak(src_ptl) || spin_needbreak(dst_ptl)) 10921da177e4SLinus Torvalds break; 1093e040f218SHugh Dickins } 10941da177e4SLinus Torvalds if (pte_none(*src_pte)) { 10951da177e4SLinus Torvalds progress++; 10961da177e4SLinus Torvalds continue; 10971da177e4SLinus Torvalds } 1098570a335bSHugh Dickins entry.val = copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, 1099570a335bSHugh Dickins vma, addr, rss); 1100570a335bSHugh Dickins if (entry.val) 1101570a335bSHugh Dickins break; 11021da177e4SLinus Torvalds progress += 8; 11031da177e4SLinus Torvalds } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end); 11041da177e4SLinus Torvalds 11056606c3e0SZachary Amsden arch_leave_lazy_mmu_mode(); 1106c74df32cSHugh Dickins spin_unlock(src_ptl); 1107ece0e2b6SPeter Zijlstra pte_unmap(orig_src_pte); 1108d559db08SKAMEZAWA Hiroyuki add_mm_rss_vec(dst_mm, rss); 1109c36987e2SDaisuke Nishimura pte_unmap_unlock(orig_dst_pte, dst_ptl); 1110c74df32cSHugh Dickins cond_resched(); 1111570a335bSHugh Dickins 1112570a335bSHugh Dickins if (entry.val) { 1113570a335bSHugh Dickins if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) 1114570a335bSHugh Dickins return -ENOMEM; 1115570a335bSHugh Dickins progress = 0; 1116570a335bSHugh Dickins } 11171da177e4SLinus Torvalds if (addr != end) 11181da177e4SLinus Torvalds goto again; 11191da177e4SLinus Torvalds return 0; 11201da177e4SLinus Torvalds } 11211da177e4SLinus Torvalds 11221da177e4SLinus Torvalds static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 11231da177e4SLinus Torvalds pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma, 11241da177e4SLinus Torvalds unsigned long addr, unsigned long end) 11251da177e4SLinus Torvalds { 11261da177e4SLinus Torvalds pmd_t *src_pmd, *dst_pmd; 11271da177e4SLinus Torvalds unsigned long next; 11281da177e4SLinus Torvalds 11291da177e4SLinus Torvalds dst_pmd = pmd_alloc(dst_mm, dst_pud, addr); 11301da177e4SLinus Torvalds if (!dst_pmd) 11311da177e4SLinus Torvalds return -ENOMEM; 11321da177e4SLinus Torvalds src_pmd = pmd_offset(src_pud, addr); 11331da177e4SLinus Torvalds do { 11341da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 113584c3fc4eSZi Yan if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd) 113684c3fc4eSZi Yan || pmd_devmap(*src_pmd)) { 113771e3aac0SAndrea Arcangeli int err; 1138a00cc7d9SMatthew Wilcox VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, vma); 113971e3aac0SAndrea Arcangeli err = copy_huge_pmd(dst_mm, src_mm, 114071e3aac0SAndrea Arcangeli dst_pmd, src_pmd, addr, vma); 114171e3aac0SAndrea Arcangeli if (err == -ENOMEM) 114271e3aac0SAndrea Arcangeli return -ENOMEM; 114371e3aac0SAndrea Arcangeli if (!err) 114471e3aac0SAndrea Arcangeli continue; 114571e3aac0SAndrea Arcangeli /* fall through */ 114671e3aac0SAndrea Arcangeli } 11471da177e4SLinus Torvalds if (pmd_none_or_clear_bad(src_pmd)) 11481da177e4SLinus Torvalds continue; 11491da177e4SLinus Torvalds if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd, 11501da177e4SLinus Torvalds vma, addr, next)) 11511da177e4SLinus Torvalds return -ENOMEM; 11521da177e4SLinus Torvalds } while (dst_pmd++, src_pmd++, addr = next, addr != end); 11531da177e4SLinus Torvalds return 0; 11541da177e4SLinus Torvalds } 11551da177e4SLinus Torvalds 11561da177e4SLinus Torvalds static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 1157c2febafcSKirill A. Shutemov p4d_t *dst_p4d, p4d_t *src_p4d, struct vm_area_struct *vma, 11581da177e4SLinus Torvalds unsigned long addr, unsigned long end) 11591da177e4SLinus Torvalds { 11601da177e4SLinus Torvalds pud_t *src_pud, *dst_pud; 11611da177e4SLinus Torvalds unsigned long next; 11621da177e4SLinus Torvalds 1163c2febafcSKirill A. Shutemov dst_pud = pud_alloc(dst_mm, dst_p4d, addr); 11641da177e4SLinus Torvalds if (!dst_pud) 11651da177e4SLinus Torvalds return -ENOMEM; 1166c2febafcSKirill A. Shutemov src_pud = pud_offset(src_p4d, addr); 11671da177e4SLinus Torvalds do { 11681da177e4SLinus Torvalds next = pud_addr_end(addr, end); 1169a00cc7d9SMatthew Wilcox if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) { 1170a00cc7d9SMatthew Wilcox int err; 1171a00cc7d9SMatthew Wilcox 1172a00cc7d9SMatthew Wilcox VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, vma); 1173a00cc7d9SMatthew Wilcox err = copy_huge_pud(dst_mm, src_mm, 1174a00cc7d9SMatthew Wilcox dst_pud, src_pud, addr, vma); 1175a00cc7d9SMatthew Wilcox if (err == -ENOMEM) 1176a00cc7d9SMatthew Wilcox return -ENOMEM; 1177a00cc7d9SMatthew Wilcox if (!err) 1178a00cc7d9SMatthew Wilcox continue; 1179a00cc7d9SMatthew Wilcox /* fall through */ 1180a00cc7d9SMatthew Wilcox } 11811da177e4SLinus Torvalds if (pud_none_or_clear_bad(src_pud)) 11821da177e4SLinus Torvalds continue; 11831da177e4SLinus Torvalds if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud, 11841da177e4SLinus Torvalds vma, addr, next)) 11851da177e4SLinus Torvalds return -ENOMEM; 11861da177e4SLinus Torvalds } while (dst_pud++, src_pud++, addr = next, addr != end); 11871da177e4SLinus Torvalds return 0; 11881da177e4SLinus Torvalds } 11891da177e4SLinus Torvalds 1190c2febafcSKirill A. Shutemov static inline int copy_p4d_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 1191c2febafcSKirill A. Shutemov pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma, 1192c2febafcSKirill A. Shutemov unsigned long addr, unsigned long end) 1193c2febafcSKirill A. Shutemov { 1194c2febafcSKirill A. Shutemov p4d_t *src_p4d, *dst_p4d; 1195c2febafcSKirill A. Shutemov unsigned long next; 1196c2febafcSKirill A. Shutemov 1197c2febafcSKirill A. Shutemov dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr); 1198c2febafcSKirill A. Shutemov if (!dst_p4d) 1199c2febafcSKirill A. Shutemov return -ENOMEM; 1200c2febafcSKirill A. Shutemov src_p4d = p4d_offset(src_pgd, addr); 1201c2febafcSKirill A. Shutemov do { 1202c2febafcSKirill A. Shutemov next = p4d_addr_end(addr, end); 1203c2febafcSKirill A. Shutemov if (p4d_none_or_clear_bad(src_p4d)) 1204c2febafcSKirill A. Shutemov continue; 1205c2febafcSKirill A. Shutemov if (copy_pud_range(dst_mm, src_mm, dst_p4d, src_p4d, 1206c2febafcSKirill A. Shutemov vma, addr, next)) 1207c2febafcSKirill A. Shutemov return -ENOMEM; 1208c2febafcSKirill A. Shutemov } while (dst_p4d++, src_p4d++, addr = next, addr != end); 1209c2febafcSKirill A. Shutemov return 0; 1210c2febafcSKirill A. Shutemov } 1211c2febafcSKirill A. Shutemov 12121da177e4SLinus Torvalds int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 12131da177e4SLinus Torvalds struct vm_area_struct *vma) 12141da177e4SLinus Torvalds { 12151da177e4SLinus Torvalds pgd_t *src_pgd, *dst_pgd; 12161da177e4SLinus Torvalds unsigned long next; 12171da177e4SLinus Torvalds unsigned long addr = vma->vm_start; 12181da177e4SLinus Torvalds unsigned long end = vma->vm_end; 12192ec74c3eSSagi Grimberg unsigned long mmun_start; /* For mmu_notifiers */ 12202ec74c3eSSagi Grimberg unsigned long mmun_end; /* For mmu_notifiers */ 12212ec74c3eSSagi Grimberg bool is_cow; 1222cddb8a5cSAndrea Arcangeli int ret; 12231da177e4SLinus Torvalds 1224d992895bSNick Piggin /* 1225d992895bSNick Piggin * Don't copy ptes where a page fault will fill them correctly. 1226d992895bSNick Piggin * Fork becomes much lighter when there are big shared or private 1227d992895bSNick Piggin * readonly mappings. The tradeoff is that copy_page_range is more 1228d992895bSNick Piggin * efficient than faulting. 1229d992895bSNick Piggin */ 12300661a336SKirill A. Shutemov if (!(vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) && 12310661a336SKirill A. Shutemov !vma->anon_vma) 1232d992895bSNick Piggin return 0; 1233d992895bSNick Piggin 12341da177e4SLinus Torvalds if (is_vm_hugetlb_page(vma)) 12351da177e4SLinus Torvalds return copy_hugetlb_page_range(dst_mm, src_mm, vma); 12361da177e4SLinus Torvalds 1237b3b9c293SKonstantin Khlebnikov if (unlikely(vma->vm_flags & VM_PFNMAP)) { 12382ab64037Svenkatesh.pallipadi@intel.com /* 12392ab64037Svenkatesh.pallipadi@intel.com * We do not free on error cases below as remove_vma 12402ab64037Svenkatesh.pallipadi@intel.com * gets called on error from higher level routine 12412ab64037Svenkatesh.pallipadi@intel.com */ 12425180da41SSuresh Siddha ret = track_pfn_copy(vma); 12432ab64037Svenkatesh.pallipadi@intel.com if (ret) 12442ab64037Svenkatesh.pallipadi@intel.com return ret; 12452ab64037Svenkatesh.pallipadi@intel.com } 12462ab64037Svenkatesh.pallipadi@intel.com 1247cddb8a5cSAndrea Arcangeli /* 1248cddb8a5cSAndrea Arcangeli * We need to invalidate the secondary MMU mappings only when 1249cddb8a5cSAndrea Arcangeli * there could be a permission downgrade on the ptes of the 1250cddb8a5cSAndrea Arcangeli * parent mm. And a permission downgrade will only happen if 1251cddb8a5cSAndrea Arcangeli * is_cow_mapping() returns true. 1252cddb8a5cSAndrea Arcangeli */ 12532ec74c3eSSagi Grimberg is_cow = is_cow_mapping(vma->vm_flags); 12542ec74c3eSSagi Grimberg mmun_start = addr; 12552ec74c3eSSagi Grimberg mmun_end = end; 12562ec74c3eSSagi Grimberg if (is_cow) 12572ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_start(src_mm, mmun_start, 12582ec74c3eSSagi Grimberg mmun_end); 1259cddb8a5cSAndrea Arcangeli 1260cddb8a5cSAndrea Arcangeli ret = 0; 12611da177e4SLinus Torvalds dst_pgd = pgd_offset(dst_mm, addr); 12621da177e4SLinus Torvalds src_pgd = pgd_offset(src_mm, addr); 12631da177e4SLinus Torvalds do { 12641da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 12651da177e4SLinus Torvalds if (pgd_none_or_clear_bad(src_pgd)) 12661da177e4SLinus Torvalds continue; 1267c2febafcSKirill A. Shutemov if (unlikely(copy_p4d_range(dst_mm, src_mm, dst_pgd, src_pgd, 1268cddb8a5cSAndrea Arcangeli vma, addr, next))) { 1269cddb8a5cSAndrea Arcangeli ret = -ENOMEM; 1270cddb8a5cSAndrea Arcangeli break; 1271cddb8a5cSAndrea Arcangeli } 12721da177e4SLinus Torvalds } while (dst_pgd++, src_pgd++, addr = next, addr != end); 1273cddb8a5cSAndrea Arcangeli 12742ec74c3eSSagi Grimberg if (is_cow) 12752ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_end(src_mm, mmun_start, mmun_end); 1276cddb8a5cSAndrea Arcangeli return ret; 12771da177e4SLinus Torvalds } 12781da177e4SLinus Torvalds 127951c6f666SRobin Holt static unsigned long zap_pte_range(struct mmu_gather *tlb, 1280b5810039SNick Piggin struct vm_area_struct *vma, pmd_t *pmd, 12811da177e4SLinus Torvalds unsigned long addr, unsigned long end, 128297a89413SPeter Zijlstra struct zap_details *details) 12831da177e4SLinus Torvalds { 1284b5810039SNick Piggin struct mm_struct *mm = tlb->mm; 1285d16dfc55SPeter Zijlstra int force_flush = 0; 1286d559db08SKAMEZAWA Hiroyuki int rss[NR_MM_COUNTERS]; 128797a89413SPeter Zijlstra spinlock_t *ptl; 12885f1a1907SSteven Rostedt pte_t *start_pte; 128997a89413SPeter Zijlstra pte_t *pte; 12908a5f14a2SKirill A. Shutemov swp_entry_t entry; 1291d559db08SKAMEZAWA Hiroyuki 129207e32661SAneesh Kumar K.V tlb_remove_check_page_size_change(tlb, PAGE_SIZE); 1293d16dfc55SPeter Zijlstra again: 1294e303297eSPeter Zijlstra init_rss_vec(rss); 12955f1a1907SSteven Rostedt start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 12965f1a1907SSteven Rostedt pte = start_pte; 12973ea27719SMel Gorman flush_tlb_batched_pending(mm); 12986606c3e0SZachary Amsden arch_enter_lazy_mmu_mode(); 12991da177e4SLinus Torvalds do { 13001da177e4SLinus Torvalds pte_t ptent = *pte; 1301166f61b9STobin C Harding if (pte_none(ptent)) 13021da177e4SLinus Torvalds continue; 130351c6f666SRobin Holt 13046f5e6b9eSHugh Dickins if (pte_present(ptent)) { 13056f5e6b9eSHugh Dickins struct page *page; 13066f5e6b9eSHugh Dickins 1307df6ad698SJérôme Glisse page = _vm_normal_page(vma, addr, ptent, true); 13081da177e4SLinus Torvalds if (unlikely(details) && page) { 13091da177e4SLinus Torvalds /* 13101da177e4SLinus Torvalds * unmap_shared_mapping_pages() wants to 13111da177e4SLinus Torvalds * invalidate cache without truncating: 13121da177e4SLinus Torvalds * unmap shared but keep private pages. 13131da177e4SLinus Torvalds */ 13141da177e4SLinus Torvalds if (details->check_mapping && 1315800d8c63SKirill A. Shutemov details->check_mapping != page_rmapping(page)) 13161da177e4SLinus Torvalds continue; 13171da177e4SLinus Torvalds } 1318b5810039SNick Piggin ptent = ptep_get_and_clear_full(mm, addr, pte, 1319a600388dSZachary Amsden tlb->fullmm); 13201da177e4SLinus Torvalds tlb_remove_tlb_entry(tlb, pte, addr); 13211da177e4SLinus Torvalds if (unlikely(!page)) 13221da177e4SLinus Torvalds continue; 1323eca56ff9SJerome Marchand 1324eca56ff9SJerome Marchand if (!PageAnon(page)) { 13251cf35d47SLinus Torvalds if (pte_dirty(ptent)) { 13261cf35d47SLinus Torvalds force_flush = 1; 13276237bcd9SHugh Dickins set_page_dirty(page); 13281cf35d47SLinus Torvalds } 13294917e5d0SJohannes Weiner if (pte_young(ptent) && 133064363aadSJoe Perches likely(!(vma->vm_flags & VM_SEQ_READ))) 1331bf3f3bc5SNick Piggin mark_page_accessed(page); 13326237bcd9SHugh Dickins } 1333eca56ff9SJerome Marchand rss[mm_counter(page)]--; 1334d281ee61SKirill A. Shutemov page_remove_rmap(page, false); 13353dc14741SHugh Dickins if (unlikely(page_mapcount(page) < 0)) 13363dc14741SHugh Dickins print_bad_pte(vma, addr, ptent, page); 1337e9d55e15SAneesh Kumar K.V if (unlikely(__tlb_remove_page(tlb, page))) { 13381cf35d47SLinus Torvalds force_flush = 1; 1339ce9ec37bSWill Deacon addr += PAGE_SIZE; 1340d16dfc55SPeter Zijlstra break; 13411cf35d47SLinus Torvalds } 13421da177e4SLinus Torvalds continue; 13431da177e4SLinus Torvalds } 13445042db43SJérôme Glisse 13455042db43SJérôme Glisse entry = pte_to_swp_entry(ptent); 13465042db43SJérôme Glisse if (non_swap_entry(entry) && is_device_private_entry(entry)) { 13475042db43SJérôme Glisse struct page *page = device_private_entry_to_page(entry); 13485042db43SJérôme Glisse 13495042db43SJérôme Glisse if (unlikely(details && details->check_mapping)) { 13505042db43SJérôme Glisse /* 13515042db43SJérôme Glisse * unmap_shared_mapping_pages() wants to 13525042db43SJérôme Glisse * invalidate cache without truncating: 13535042db43SJérôme Glisse * unmap shared but keep private pages. 13545042db43SJérôme Glisse */ 13555042db43SJérôme Glisse if (details->check_mapping != 13565042db43SJérôme Glisse page_rmapping(page)) 13575042db43SJérôme Glisse continue; 13585042db43SJérôme Glisse } 13595042db43SJérôme Glisse 13605042db43SJérôme Glisse pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); 13615042db43SJérôme Glisse rss[mm_counter(page)]--; 13625042db43SJérôme Glisse page_remove_rmap(page, false); 13635042db43SJérôme Glisse put_page(page); 13645042db43SJérôme Glisse continue; 13655042db43SJérôme Glisse } 13665042db43SJérôme Glisse 13673e8715fdSKirill A. Shutemov /* If details->check_mapping, we leave swap entries. */ 13683e8715fdSKirill A. Shutemov if (unlikely(details)) 13691da177e4SLinus Torvalds continue; 1370b084d435SKAMEZAWA Hiroyuki 13718a5f14a2SKirill A. Shutemov entry = pte_to_swp_entry(ptent); 1372b084d435SKAMEZAWA Hiroyuki if (!non_swap_entry(entry)) 1373b084d435SKAMEZAWA Hiroyuki rss[MM_SWAPENTS]--; 13749f9f1acdSKonstantin Khlebnikov else if (is_migration_entry(entry)) { 13759f9f1acdSKonstantin Khlebnikov struct page *page; 13769f9f1acdSKonstantin Khlebnikov 13779f9f1acdSKonstantin Khlebnikov page = migration_entry_to_page(entry); 1378eca56ff9SJerome Marchand rss[mm_counter(page)]--; 13799f9f1acdSKonstantin Khlebnikov } 1380b084d435SKAMEZAWA Hiroyuki if (unlikely(!free_swap_and_cache(entry))) 13812509ef26SHugh Dickins print_bad_pte(vma, addr, ptent, NULL); 13829888a1caSZachary Amsden pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); 138397a89413SPeter Zijlstra } while (pte++, addr += PAGE_SIZE, addr != end); 1384ae859762SHugh Dickins 1385d559db08SKAMEZAWA Hiroyuki add_mm_rss_vec(mm, rss); 13866606c3e0SZachary Amsden arch_leave_lazy_mmu_mode(); 138751c6f666SRobin Holt 13881cf35d47SLinus Torvalds /* Do the actual TLB flush before dropping ptl */ 1389fb7332a9SWill Deacon if (force_flush) 13901cf35d47SLinus Torvalds tlb_flush_mmu_tlbonly(tlb); 13911cf35d47SLinus Torvalds pte_unmap_unlock(start_pte, ptl); 13921cf35d47SLinus Torvalds 13931cf35d47SLinus Torvalds /* 13941cf35d47SLinus Torvalds * If we forced a TLB flush (either due to running out of 13951cf35d47SLinus Torvalds * batch buffers or because we needed to flush dirty TLB 13961cf35d47SLinus Torvalds * entries before releasing the ptl), free the batched 13971cf35d47SLinus Torvalds * memory too. Restart if we didn't do everything. 13981cf35d47SLinus Torvalds */ 13991cf35d47SLinus Torvalds if (force_flush) { 14001cf35d47SLinus Torvalds force_flush = 0; 14011cf35d47SLinus Torvalds tlb_flush_mmu_free(tlb); 14022b047252SLinus Torvalds if (addr != end) 1403d16dfc55SPeter Zijlstra goto again; 1404d16dfc55SPeter Zijlstra } 1405d16dfc55SPeter Zijlstra 140651c6f666SRobin Holt return addr; 14071da177e4SLinus Torvalds } 14081da177e4SLinus Torvalds 140951c6f666SRobin Holt static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, 1410b5810039SNick Piggin struct vm_area_struct *vma, pud_t *pud, 14111da177e4SLinus Torvalds unsigned long addr, unsigned long end, 141297a89413SPeter Zijlstra struct zap_details *details) 14131da177e4SLinus Torvalds { 14141da177e4SLinus Torvalds pmd_t *pmd; 14151da177e4SLinus Torvalds unsigned long next; 14161da177e4SLinus Torvalds 14171da177e4SLinus Torvalds pmd = pmd_offset(pud, addr); 14181da177e4SLinus Torvalds do { 14191da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 142084c3fc4eSZi Yan if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { 142114d1a55cSAndrea Arcangeli if (next - addr != HPAGE_PMD_SIZE) { 142268428398SHugh Dickins VM_BUG_ON_VMA(vma_is_anonymous(vma) && 142368428398SHugh Dickins !rwsem_is_locked(&tlb->mm->mmap_sem), vma); 1424fd60775aSDavid Rientjes __split_huge_pmd(vma, pmd, addr, false, NULL); 1425f21760b1SShaohua Li } else if (zap_huge_pmd(tlb, vma, pmd, addr)) 14261a5a9906SAndrea Arcangeli goto next; 142771e3aac0SAndrea Arcangeli /* fall through */ 142871e3aac0SAndrea Arcangeli } 14291a5a9906SAndrea Arcangeli /* 14301a5a9906SAndrea Arcangeli * Here there can be other concurrent MADV_DONTNEED or 14311a5a9906SAndrea Arcangeli * trans huge page faults running, and if the pmd is 14321a5a9906SAndrea Arcangeli * none or trans huge it can change under us. This is 14331a5a9906SAndrea Arcangeli * because MADV_DONTNEED holds the mmap_sem in read 14341a5a9906SAndrea Arcangeli * mode. 14351a5a9906SAndrea Arcangeli */ 14361a5a9906SAndrea Arcangeli if (pmd_none_or_trans_huge_or_clear_bad(pmd)) 14371a5a9906SAndrea Arcangeli goto next; 143897a89413SPeter Zijlstra next = zap_pte_range(tlb, vma, pmd, addr, next, details); 14391a5a9906SAndrea Arcangeli next: 144097a89413SPeter Zijlstra cond_resched(); 144197a89413SPeter Zijlstra } while (pmd++, addr = next, addr != end); 144251c6f666SRobin Holt 144351c6f666SRobin Holt return addr; 14441da177e4SLinus Torvalds } 14451da177e4SLinus Torvalds 144651c6f666SRobin Holt static inline unsigned long zap_pud_range(struct mmu_gather *tlb, 1447c2febafcSKirill A. Shutemov struct vm_area_struct *vma, p4d_t *p4d, 14481da177e4SLinus Torvalds unsigned long addr, unsigned long end, 144997a89413SPeter Zijlstra struct zap_details *details) 14501da177e4SLinus Torvalds { 14511da177e4SLinus Torvalds pud_t *pud; 14521da177e4SLinus Torvalds unsigned long next; 14531da177e4SLinus Torvalds 1454c2febafcSKirill A. Shutemov pud = pud_offset(p4d, addr); 14551da177e4SLinus Torvalds do { 14561da177e4SLinus Torvalds next = pud_addr_end(addr, end); 1457a00cc7d9SMatthew Wilcox if (pud_trans_huge(*pud) || pud_devmap(*pud)) { 1458a00cc7d9SMatthew Wilcox if (next - addr != HPAGE_PUD_SIZE) { 1459a00cc7d9SMatthew Wilcox VM_BUG_ON_VMA(!rwsem_is_locked(&tlb->mm->mmap_sem), vma); 1460a00cc7d9SMatthew Wilcox split_huge_pud(vma, pud, addr); 1461a00cc7d9SMatthew Wilcox } else if (zap_huge_pud(tlb, vma, pud, addr)) 1462a00cc7d9SMatthew Wilcox goto next; 1463a00cc7d9SMatthew Wilcox /* fall through */ 1464a00cc7d9SMatthew Wilcox } 146597a89413SPeter Zijlstra if (pud_none_or_clear_bad(pud)) 14661da177e4SLinus Torvalds continue; 146797a89413SPeter Zijlstra next = zap_pmd_range(tlb, vma, pud, addr, next, details); 1468a00cc7d9SMatthew Wilcox next: 1469a00cc7d9SMatthew Wilcox cond_resched(); 147097a89413SPeter Zijlstra } while (pud++, addr = next, addr != end); 147151c6f666SRobin Holt 147251c6f666SRobin Holt return addr; 14731da177e4SLinus Torvalds } 14741da177e4SLinus Torvalds 1475c2febafcSKirill A. Shutemov static inline unsigned long zap_p4d_range(struct mmu_gather *tlb, 1476c2febafcSKirill A. Shutemov struct vm_area_struct *vma, pgd_t *pgd, 1477c2febafcSKirill A. Shutemov unsigned long addr, unsigned long end, 1478c2febafcSKirill A. Shutemov struct zap_details *details) 1479c2febafcSKirill A. Shutemov { 1480c2febafcSKirill A. Shutemov p4d_t *p4d; 1481c2febafcSKirill A. Shutemov unsigned long next; 1482c2febafcSKirill A. Shutemov 1483c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, addr); 1484c2febafcSKirill A. Shutemov do { 1485c2febafcSKirill A. Shutemov next = p4d_addr_end(addr, end); 1486c2febafcSKirill A. Shutemov if (p4d_none_or_clear_bad(p4d)) 1487c2febafcSKirill A. Shutemov continue; 1488c2febafcSKirill A. Shutemov next = zap_pud_range(tlb, vma, p4d, addr, next, details); 1489c2febafcSKirill A. Shutemov } while (p4d++, addr = next, addr != end); 1490c2febafcSKirill A. Shutemov 1491c2febafcSKirill A. Shutemov return addr; 1492c2febafcSKirill A. Shutemov } 1493c2febafcSKirill A. Shutemov 1494aac45363SMichal Hocko void unmap_page_range(struct mmu_gather *tlb, 149551c6f666SRobin Holt struct vm_area_struct *vma, 14961da177e4SLinus Torvalds unsigned long addr, unsigned long end, 149797a89413SPeter Zijlstra struct zap_details *details) 14981da177e4SLinus Torvalds { 14991da177e4SLinus Torvalds pgd_t *pgd; 15001da177e4SLinus Torvalds unsigned long next; 15011da177e4SLinus Torvalds 15021da177e4SLinus Torvalds BUG_ON(addr >= end); 15031da177e4SLinus Torvalds tlb_start_vma(tlb, vma); 15041da177e4SLinus Torvalds pgd = pgd_offset(vma->vm_mm, addr); 15051da177e4SLinus Torvalds do { 15061da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 150797a89413SPeter Zijlstra if (pgd_none_or_clear_bad(pgd)) 15081da177e4SLinus Torvalds continue; 1509c2febafcSKirill A. Shutemov next = zap_p4d_range(tlb, vma, pgd, addr, next, details); 151097a89413SPeter Zijlstra } while (pgd++, addr = next, addr != end); 15111da177e4SLinus Torvalds tlb_end_vma(tlb, vma); 15121da177e4SLinus Torvalds } 15131da177e4SLinus Torvalds 1514f5cc4eefSAl Viro 1515f5cc4eefSAl Viro static void unmap_single_vma(struct mmu_gather *tlb, 15161da177e4SLinus Torvalds struct vm_area_struct *vma, unsigned long start_addr, 15174f74d2c8SLinus Torvalds unsigned long end_addr, 15181da177e4SLinus Torvalds struct zap_details *details) 15191da177e4SLinus Torvalds { 1520f5cc4eefSAl Viro unsigned long start = max(vma->vm_start, start_addr); 15211da177e4SLinus Torvalds unsigned long end; 15221da177e4SLinus Torvalds 15231da177e4SLinus Torvalds if (start >= vma->vm_end) 1524f5cc4eefSAl Viro return; 15251da177e4SLinus Torvalds end = min(vma->vm_end, end_addr); 15261da177e4SLinus Torvalds if (end <= vma->vm_start) 1527f5cc4eefSAl Viro return; 15281da177e4SLinus Torvalds 1529cbc91f71SSrikar Dronamraju if (vma->vm_file) 1530cbc91f71SSrikar Dronamraju uprobe_munmap(vma, start, end); 1531cbc91f71SSrikar Dronamraju 1532b3b9c293SKonstantin Khlebnikov if (unlikely(vma->vm_flags & VM_PFNMAP)) 15335180da41SSuresh Siddha untrack_pfn(vma, 0, 0); 15342ab64037Svenkatesh.pallipadi@intel.com 15358b2a1238SAl Viro if (start != end) { 153651c6f666SRobin Holt if (unlikely(is_vm_hugetlb_page(vma))) { 1537a137e1ccSAndi Kleen /* 1538a137e1ccSAndi Kleen * It is undesirable to test vma->vm_file as it 1539a137e1ccSAndi Kleen * should be non-null for valid hugetlb area. 1540a137e1ccSAndi Kleen * However, vm_file will be NULL in the error 15417aa6b4adSDavidlohr Bueso * cleanup path of mmap_region. When 1542a137e1ccSAndi Kleen * hugetlbfs ->mmap method fails, 15437aa6b4adSDavidlohr Bueso * mmap_region() nullifies vma->vm_file 1544a137e1ccSAndi Kleen * before calling this function to clean up. 1545a137e1ccSAndi Kleen * Since no pte has actually been setup, it is 1546a137e1ccSAndi Kleen * safe to do nothing in this case. 1547a137e1ccSAndi Kleen */ 154824669e58SAneesh Kumar K.V if (vma->vm_file) { 154983cde9e8SDavidlohr Bueso i_mmap_lock_write(vma->vm_file->f_mapping); 1550d833352aSMel Gorman __unmap_hugepage_range_final(tlb, vma, start, end, NULL); 155183cde9e8SDavidlohr Bueso i_mmap_unlock_write(vma->vm_file->f_mapping); 155224669e58SAneesh Kumar K.V } 155351c6f666SRobin Holt } else 1554038c7aa1SAl Viro unmap_page_range(tlb, vma, start, end, details); 155597a89413SPeter Zijlstra } 155651c6f666SRobin Holt } 15571da177e4SLinus Torvalds 1558f5cc4eefSAl Viro /** 1559f5cc4eefSAl Viro * unmap_vmas - unmap a range of memory covered by a list of vma's 1560f5cc4eefSAl Viro * @tlb: address of the caller's struct mmu_gather 1561f5cc4eefSAl Viro * @vma: the starting vma 1562f5cc4eefSAl Viro * @start_addr: virtual address at which to start unmapping 1563f5cc4eefSAl Viro * @end_addr: virtual address at which to end unmapping 1564f5cc4eefSAl Viro * 1565f5cc4eefSAl Viro * Unmap all pages in the vma list. 1566f5cc4eefSAl Viro * 1567f5cc4eefSAl Viro * Only addresses between `start' and `end' will be unmapped. 1568f5cc4eefSAl Viro * 1569f5cc4eefSAl Viro * The VMA list must be sorted in ascending virtual address order. 1570f5cc4eefSAl Viro * 1571f5cc4eefSAl Viro * unmap_vmas() assumes that the caller will flush the whole unmapped address 1572f5cc4eefSAl Viro * range after unmap_vmas() returns. So the only responsibility here is to 1573f5cc4eefSAl Viro * ensure that any thus-far unmapped pages are flushed before unmap_vmas() 1574f5cc4eefSAl Viro * drops the lock and schedules. 1575f5cc4eefSAl Viro */ 1576f5cc4eefSAl Viro void unmap_vmas(struct mmu_gather *tlb, 1577f5cc4eefSAl Viro struct vm_area_struct *vma, unsigned long start_addr, 15784f74d2c8SLinus Torvalds unsigned long end_addr) 1579f5cc4eefSAl Viro { 1580f5cc4eefSAl Viro struct mm_struct *mm = vma->vm_mm; 1581f5cc4eefSAl Viro 1582f5cc4eefSAl Viro mmu_notifier_invalidate_range_start(mm, start_addr, end_addr); 1583f5cc4eefSAl Viro for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) 15844f74d2c8SLinus Torvalds unmap_single_vma(tlb, vma, start_addr, end_addr, NULL); 1585cddb8a5cSAndrea Arcangeli mmu_notifier_invalidate_range_end(mm, start_addr, end_addr); 15861da177e4SLinus Torvalds } 15871da177e4SLinus Torvalds 15881da177e4SLinus Torvalds /** 15891da177e4SLinus Torvalds * zap_page_range - remove user pages in a given range 15901da177e4SLinus Torvalds * @vma: vm_area_struct holding the applicable pages 1591eb4546bbSRandy Dunlap * @start: starting address of pages to zap 15921da177e4SLinus Torvalds * @size: number of bytes to zap 1593f5cc4eefSAl Viro * 1594f5cc4eefSAl Viro * Caller must protect the VMA list 15951da177e4SLinus Torvalds */ 15967e027b14SLinus Torvalds void zap_page_range(struct vm_area_struct *vma, unsigned long start, 1597ecf1385dSKirill A. Shutemov unsigned long size) 15981da177e4SLinus Torvalds { 15991da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 1600d16dfc55SPeter Zijlstra struct mmu_gather tlb; 16017e027b14SLinus Torvalds unsigned long end = start + size; 16021da177e4SLinus Torvalds 16031da177e4SLinus Torvalds lru_add_drain(); 16042b047252SLinus Torvalds tlb_gather_mmu(&tlb, mm, start, end); 1605365e9c87SHugh Dickins update_hiwater_rss(mm); 16067e027b14SLinus Torvalds mmu_notifier_invalidate_range_start(mm, start, end); 16074647706eSMel Gorman for ( ; vma && vma->vm_start < end; vma = vma->vm_next) { 1608ecf1385dSKirill A. Shutemov unmap_single_vma(&tlb, vma, start, end, NULL); 16094647706eSMel Gorman 16104647706eSMel Gorman /* 16114647706eSMel Gorman * zap_page_range does not specify whether mmap_sem should be 16124647706eSMel Gorman * held for read or write. That allows parallel zap_page_range 16134647706eSMel Gorman * operations to unmap a PTE and defer a flush meaning that 16144647706eSMel Gorman * this call observes pte_none and fails to flush the TLB. 16154647706eSMel Gorman * Rather than adding a complex API, ensure that no stale 16164647706eSMel Gorman * TLB entries exist when this call returns. 16174647706eSMel Gorman */ 16184647706eSMel Gorman flush_tlb_range(vma, start, end); 16194647706eSMel Gorman } 16204647706eSMel Gorman 16217e027b14SLinus Torvalds mmu_notifier_invalidate_range_end(mm, start, end); 16227e027b14SLinus Torvalds tlb_finish_mmu(&tlb, start, end); 16231da177e4SLinus Torvalds } 16241da177e4SLinus Torvalds 1625c627f9ccSJack Steiner /** 1626f5cc4eefSAl Viro * zap_page_range_single - remove user pages in a given range 1627f5cc4eefSAl Viro * @vma: vm_area_struct holding the applicable pages 1628f5cc4eefSAl Viro * @address: starting address of pages to zap 1629f5cc4eefSAl Viro * @size: number of bytes to zap 16308a5f14a2SKirill A. Shutemov * @details: details of shared cache invalidation 1631f5cc4eefSAl Viro * 1632f5cc4eefSAl Viro * The range must fit into one VMA. 1633f5cc4eefSAl Viro */ 1634f5cc4eefSAl Viro static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, 1635f5cc4eefSAl Viro unsigned long size, struct zap_details *details) 1636f5cc4eefSAl Viro { 1637f5cc4eefSAl Viro struct mm_struct *mm = vma->vm_mm; 1638f5cc4eefSAl Viro struct mmu_gather tlb; 1639f5cc4eefSAl Viro unsigned long end = address + size; 1640f5cc4eefSAl Viro 1641f5cc4eefSAl Viro lru_add_drain(); 16422b047252SLinus Torvalds tlb_gather_mmu(&tlb, mm, address, end); 1643f5cc4eefSAl Viro update_hiwater_rss(mm); 1644f5cc4eefSAl Viro mmu_notifier_invalidate_range_start(mm, address, end); 16454f74d2c8SLinus Torvalds unmap_single_vma(&tlb, vma, address, end, details); 1646f5cc4eefSAl Viro mmu_notifier_invalidate_range_end(mm, address, end); 1647f5cc4eefSAl Viro tlb_finish_mmu(&tlb, address, end); 16481da177e4SLinus Torvalds } 16491da177e4SLinus Torvalds 1650c627f9ccSJack Steiner /** 1651c627f9ccSJack Steiner * zap_vma_ptes - remove ptes mapping the vma 1652c627f9ccSJack Steiner * @vma: vm_area_struct holding ptes to be zapped 1653c627f9ccSJack Steiner * @address: starting address of pages to zap 1654c627f9ccSJack Steiner * @size: number of bytes to zap 1655c627f9ccSJack Steiner * 1656c627f9ccSJack Steiner * This function only unmaps ptes assigned to VM_PFNMAP vmas. 1657c627f9ccSJack Steiner * 1658c627f9ccSJack Steiner * The entire address range must be fully contained within the vma. 1659c627f9ccSJack Steiner * 1660c627f9ccSJack Steiner * Returns 0 if successful. 1661c627f9ccSJack Steiner */ 1662c627f9ccSJack Steiner int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 1663c627f9ccSJack Steiner unsigned long size) 1664c627f9ccSJack Steiner { 1665c627f9ccSJack Steiner if (address < vma->vm_start || address + size > vma->vm_end || 1666c627f9ccSJack Steiner !(vma->vm_flags & VM_PFNMAP)) 1667c627f9ccSJack Steiner return -1; 1668f5cc4eefSAl Viro zap_page_range_single(vma, address, size, NULL); 1669c627f9ccSJack Steiner return 0; 1670c627f9ccSJack Steiner } 1671c627f9ccSJack Steiner EXPORT_SYMBOL_GPL(zap_vma_ptes); 1672c627f9ccSJack Steiner 167325ca1d6cSNamhyung Kim pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, 1674920c7a5dSHarvey Harrison spinlock_t **ptl) 1675c9cfcddfSLinus Torvalds { 1676c2febafcSKirill A. Shutemov pgd_t *pgd; 1677c2febafcSKirill A. Shutemov p4d_t *p4d; 1678c2febafcSKirill A. Shutemov pud_t *pud; 1679c2febafcSKirill A. Shutemov pmd_t *pmd; 1680c2febafcSKirill A. Shutemov 1681c2febafcSKirill A. Shutemov pgd = pgd_offset(mm, addr); 1682c2febafcSKirill A. Shutemov p4d = p4d_alloc(mm, pgd, addr); 1683c2febafcSKirill A. Shutemov if (!p4d) 1684c2febafcSKirill A. Shutemov return NULL; 1685c2febafcSKirill A. Shutemov pud = pud_alloc(mm, p4d, addr); 1686c2febafcSKirill A. Shutemov if (!pud) 1687c2febafcSKirill A. Shutemov return NULL; 1688c2febafcSKirill A. Shutemov pmd = pmd_alloc(mm, pud, addr); 1689c2febafcSKirill A. Shutemov if (!pmd) 1690c2febafcSKirill A. Shutemov return NULL; 1691c2febafcSKirill A. Shutemov 1692f66055abSAndrea Arcangeli VM_BUG_ON(pmd_trans_huge(*pmd)); 1693c9cfcddfSLinus Torvalds return pte_alloc_map_lock(mm, pmd, addr, ptl); 1694c9cfcddfSLinus Torvalds } 1695c9cfcddfSLinus Torvalds 16961da177e4SLinus Torvalds /* 1697238f58d8SLinus Torvalds * This is the old fallback for page remapping. 1698238f58d8SLinus Torvalds * 1699238f58d8SLinus Torvalds * For historical reasons, it only allows reserved pages. Only 1700238f58d8SLinus Torvalds * old drivers should use this, and they needed to mark their 1701238f58d8SLinus Torvalds * pages reserved for the old functions anyway. 1702238f58d8SLinus Torvalds */ 1703423bad60SNick Piggin static int insert_page(struct vm_area_struct *vma, unsigned long addr, 1704423bad60SNick Piggin struct page *page, pgprot_t prot) 1705238f58d8SLinus Torvalds { 1706423bad60SNick Piggin struct mm_struct *mm = vma->vm_mm; 1707238f58d8SLinus Torvalds int retval; 1708238f58d8SLinus Torvalds pte_t *pte; 1709238f58d8SLinus Torvalds spinlock_t *ptl; 1710238f58d8SLinus Torvalds 1711238f58d8SLinus Torvalds retval = -EINVAL; 1712a145dd41SLinus Torvalds if (PageAnon(page)) 17135b4e655eSKAMEZAWA Hiroyuki goto out; 1714238f58d8SLinus Torvalds retval = -ENOMEM; 1715238f58d8SLinus Torvalds flush_dcache_page(page); 1716c9cfcddfSLinus Torvalds pte = get_locked_pte(mm, addr, &ptl); 1717238f58d8SLinus Torvalds if (!pte) 17185b4e655eSKAMEZAWA Hiroyuki goto out; 1719238f58d8SLinus Torvalds retval = -EBUSY; 1720238f58d8SLinus Torvalds if (!pte_none(*pte)) 1721238f58d8SLinus Torvalds goto out_unlock; 1722238f58d8SLinus Torvalds 1723238f58d8SLinus Torvalds /* Ok, finally just insert the thing.. */ 1724238f58d8SLinus Torvalds get_page(page); 1725eca56ff9SJerome Marchand inc_mm_counter_fast(mm, mm_counter_file(page)); 1726dd78feddSKirill A. Shutemov page_add_file_rmap(page, false); 1727238f58d8SLinus Torvalds set_pte_at(mm, addr, pte, mk_pte(page, prot)); 1728238f58d8SLinus Torvalds 1729238f58d8SLinus Torvalds retval = 0; 17308a9f3ccdSBalbir Singh pte_unmap_unlock(pte, ptl); 17318a9f3ccdSBalbir Singh return retval; 1732238f58d8SLinus Torvalds out_unlock: 1733238f58d8SLinus Torvalds pte_unmap_unlock(pte, ptl); 1734238f58d8SLinus Torvalds out: 1735238f58d8SLinus Torvalds return retval; 1736238f58d8SLinus Torvalds } 1737238f58d8SLinus Torvalds 1738bfa5bf6dSRolf Eike Beer /** 1739bfa5bf6dSRolf Eike Beer * vm_insert_page - insert single page into user vma 1740bfa5bf6dSRolf Eike Beer * @vma: user vma to map to 1741bfa5bf6dSRolf Eike Beer * @addr: target user address of this page 1742bfa5bf6dSRolf Eike Beer * @page: source kernel page 1743bfa5bf6dSRolf Eike Beer * 1744a145dd41SLinus Torvalds * This allows drivers to insert individual pages they've allocated 1745a145dd41SLinus Torvalds * into a user vma. 1746a145dd41SLinus Torvalds * 1747a145dd41SLinus Torvalds * The page has to be a nice clean _individual_ kernel allocation. 1748a145dd41SLinus Torvalds * If you allocate a compound page, you need to have marked it as 1749a145dd41SLinus Torvalds * such (__GFP_COMP), or manually just split the page up yourself 17508dfcc9baSNick Piggin * (see split_page()). 1751a145dd41SLinus Torvalds * 1752a145dd41SLinus Torvalds * NOTE! Traditionally this was done with "remap_pfn_range()" which 1753a145dd41SLinus Torvalds * took an arbitrary page protection parameter. This doesn't allow 1754a145dd41SLinus Torvalds * that. Your vma protection will have to be set up correctly, which 1755a145dd41SLinus Torvalds * means that if you want a shared writable mapping, you'd better 1756a145dd41SLinus Torvalds * ask for a shared writable mapping! 1757a145dd41SLinus Torvalds * 1758a145dd41SLinus Torvalds * The page does not need to be reserved. 17594b6e1e37SKonstantin Khlebnikov * 17604b6e1e37SKonstantin Khlebnikov * Usually this function is called from f_op->mmap() handler 17614b6e1e37SKonstantin Khlebnikov * under mm->mmap_sem write-lock, so it can change vma->vm_flags. 17624b6e1e37SKonstantin Khlebnikov * Caller must set VM_MIXEDMAP on vma if it wants to call this 17634b6e1e37SKonstantin Khlebnikov * function from other places, for example from page-fault handler. 1764a145dd41SLinus Torvalds */ 1765423bad60SNick Piggin int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, 1766423bad60SNick Piggin struct page *page) 1767a145dd41SLinus Torvalds { 1768a145dd41SLinus Torvalds if (addr < vma->vm_start || addr >= vma->vm_end) 1769a145dd41SLinus Torvalds return -EFAULT; 1770a145dd41SLinus Torvalds if (!page_count(page)) 1771a145dd41SLinus Torvalds return -EINVAL; 17724b6e1e37SKonstantin Khlebnikov if (!(vma->vm_flags & VM_MIXEDMAP)) { 17734b6e1e37SKonstantin Khlebnikov BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem)); 17744b6e1e37SKonstantin Khlebnikov BUG_ON(vma->vm_flags & VM_PFNMAP); 17754b6e1e37SKonstantin Khlebnikov vma->vm_flags |= VM_MIXEDMAP; 17764b6e1e37SKonstantin Khlebnikov } 1777423bad60SNick Piggin return insert_page(vma, addr, page, vma->vm_page_prot); 1778a145dd41SLinus Torvalds } 1779e3c3374fSLinus Torvalds EXPORT_SYMBOL(vm_insert_page); 1780a145dd41SLinus Torvalds 1781423bad60SNick Piggin static int insert_pfn(struct vm_area_struct *vma, unsigned long addr, 1782b2770da6SRoss Zwisler pfn_t pfn, pgprot_t prot, bool mkwrite) 1783423bad60SNick Piggin { 1784423bad60SNick Piggin struct mm_struct *mm = vma->vm_mm; 1785423bad60SNick Piggin int retval; 1786423bad60SNick Piggin pte_t *pte, entry; 1787423bad60SNick Piggin spinlock_t *ptl; 1788423bad60SNick Piggin 1789423bad60SNick Piggin retval = -ENOMEM; 1790423bad60SNick Piggin pte = get_locked_pte(mm, addr, &ptl); 1791423bad60SNick Piggin if (!pte) 1792423bad60SNick Piggin goto out; 1793423bad60SNick Piggin retval = -EBUSY; 1794b2770da6SRoss Zwisler if (!pte_none(*pte)) { 1795b2770da6SRoss Zwisler if (mkwrite) { 1796b2770da6SRoss Zwisler /* 1797b2770da6SRoss Zwisler * For read faults on private mappings the PFN passed 1798b2770da6SRoss Zwisler * in may not match the PFN we have mapped if the 1799b2770da6SRoss Zwisler * mapped PFN is a writeable COW page. In the mkwrite 1800b2770da6SRoss Zwisler * case we are creating a writable PTE for a shared 1801b2770da6SRoss Zwisler * mapping and we expect the PFNs to match. 1802b2770da6SRoss Zwisler */ 1803b2770da6SRoss Zwisler if (WARN_ON_ONCE(pte_pfn(*pte) != pfn_t_to_pfn(pfn))) 1804423bad60SNick Piggin goto out_unlock; 1805b2770da6SRoss Zwisler entry = *pte; 1806b2770da6SRoss Zwisler goto out_mkwrite; 1807b2770da6SRoss Zwisler } else 1808b2770da6SRoss Zwisler goto out_unlock; 1809b2770da6SRoss Zwisler } 1810423bad60SNick Piggin 1811423bad60SNick Piggin /* Ok, finally just insert the thing.. */ 181201c8f1c4SDan Williams if (pfn_t_devmap(pfn)) 181301c8f1c4SDan Williams entry = pte_mkdevmap(pfn_t_pte(pfn, prot)); 181401c8f1c4SDan Williams else 181501c8f1c4SDan Williams entry = pte_mkspecial(pfn_t_pte(pfn, prot)); 1816b2770da6SRoss Zwisler 1817b2770da6SRoss Zwisler out_mkwrite: 1818b2770da6SRoss Zwisler if (mkwrite) { 1819b2770da6SRoss Zwisler entry = pte_mkyoung(entry); 1820b2770da6SRoss Zwisler entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1821b2770da6SRoss Zwisler } 1822b2770da6SRoss Zwisler 1823423bad60SNick Piggin set_pte_at(mm, addr, pte, entry); 18244b3073e1SRussell King update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */ 1825423bad60SNick Piggin 1826423bad60SNick Piggin retval = 0; 1827423bad60SNick Piggin out_unlock: 1828423bad60SNick Piggin pte_unmap_unlock(pte, ptl); 1829423bad60SNick Piggin out: 1830423bad60SNick Piggin return retval; 1831423bad60SNick Piggin } 1832423bad60SNick Piggin 1833e0dc0d8fSNick Piggin /** 1834e0dc0d8fSNick Piggin * vm_insert_pfn - insert single pfn into user vma 1835e0dc0d8fSNick Piggin * @vma: user vma to map to 1836e0dc0d8fSNick Piggin * @addr: target user address of this page 1837e0dc0d8fSNick Piggin * @pfn: source kernel pfn 1838e0dc0d8fSNick Piggin * 1839c462f179SRobert P. J. Day * Similar to vm_insert_page, this allows drivers to insert individual pages 1840e0dc0d8fSNick Piggin * they've allocated into a user vma. Same comments apply. 1841e0dc0d8fSNick Piggin * 1842e0dc0d8fSNick Piggin * This function should only be called from a vm_ops->fault handler, and 1843e0dc0d8fSNick Piggin * in that case the handler should return NULL. 18440d71d10aSNick Piggin * 18450d71d10aSNick Piggin * vma cannot be a COW mapping. 18460d71d10aSNick Piggin * 18470d71d10aSNick Piggin * As this is called only for pages that do not currently exist, we 18480d71d10aSNick Piggin * do not need to flush old virtual caches or the TLB. 1849e0dc0d8fSNick Piggin */ 1850e0dc0d8fSNick Piggin int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, 1851e0dc0d8fSNick Piggin unsigned long pfn) 1852e0dc0d8fSNick Piggin { 18531745cbc5SAndy Lutomirski return vm_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot); 18541745cbc5SAndy Lutomirski } 18551745cbc5SAndy Lutomirski EXPORT_SYMBOL(vm_insert_pfn); 18561745cbc5SAndy Lutomirski 18571745cbc5SAndy Lutomirski /** 18581745cbc5SAndy Lutomirski * vm_insert_pfn_prot - insert single pfn into user vma with specified pgprot 18591745cbc5SAndy Lutomirski * @vma: user vma to map to 18601745cbc5SAndy Lutomirski * @addr: target user address of this page 18611745cbc5SAndy Lutomirski * @pfn: source kernel pfn 18621745cbc5SAndy Lutomirski * @pgprot: pgprot flags for the inserted page 18631745cbc5SAndy Lutomirski * 18641745cbc5SAndy Lutomirski * This is exactly like vm_insert_pfn, except that it allows drivers to 18651745cbc5SAndy Lutomirski * to override pgprot on a per-page basis. 18661745cbc5SAndy Lutomirski * 18671745cbc5SAndy Lutomirski * This only makes sense for IO mappings, and it makes no sense for 18681745cbc5SAndy Lutomirski * cow mappings. In general, using multiple vmas is preferable; 18691745cbc5SAndy Lutomirski * vm_insert_pfn_prot should only be used if using multiple VMAs is 18701745cbc5SAndy Lutomirski * impractical. 18711745cbc5SAndy Lutomirski */ 18721745cbc5SAndy Lutomirski int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, 18731745cbc5SAndy Lutomirski unsigned long pfn, pgprot_t pgprot) 18741745cbc5SAndy Lutomirski { 18752ab64037Svenkatesh.pallipadi@intel.com int ret; 18767e675137SNick Piggin /* 18777e675137SNick Piggin * Technically, architectures with pte_special can avoid all these 18787e675137SNick Piggin * restrictions (same for remap_pfn_range). However we would like 18797e675137SNick Piggin * consistency in testing and feature parity among all, so we should 18807e675137SNick Piggin * try to keep these invariants in place for everybody. 18817e675137SNick Piggin */ 1882b379d790SJared Hulbert BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); 1883b379d790SJared Hulbert BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == 1884b379d790SJared Hulbert (VM_PFNMAP|VM_MIXEDMAP)); 1885b379d790SJared Hulbert BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); 1886b379d790SJared Hulbert BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn)); 1887e0dc0d8fSNick Piggin 1888423bad60SNick Piggin if (addr < vma->vm_start || addr >= vma->vm_end) 1889423bad60SNick Piggin return -EFAULT; 1890308a047cSBorislav Petkov 1891308a047cSBorislav Petkov track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV)); 18922ab64037Svenkatesh.pallipadi@intel.com 1893b2770da6SRoss Zwisler ret = insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot, 1894b2770da6SRoss Zwisler false); 18952ab64037Svenkatesh.pallipadi@intel.com 18962ab64037Svenkatesh.pallipadi@intel.com return ret; 1897e0dc0d8fSNick Piggin } 18981745cbc5SAndy Lutomirski EXPORT_SYMBOL(vm_insert_pfn_prot); 1899e0dc0d8fSNick Piggin 1900b2770da6SRoss Zwisler static int __vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, 1901b2770da6SRoss Zwisler pfn_t pfn, bool mkwrite) 1902423bad60SNick Piggin { 190387744ab3SDan Williams pgprot_t pgprot = vma->vm_page_prot; 190487744ab3SDan Williams 1905423bad60SNick Piggin BUG_ON(!(vma->vm_flags & VM_MIXEDMAP)); 1906423bad60SNick Piggin 1907423bad60SNick Piggin if (addr < vma->vm_start || addr >= vma->vm_end) 1908423bad60SNick Piggin return -EFAULT; 1909308a047cSBorislav Petkov 1910308a047cSBorislav Petkov track_pfn_insert(vma, &pgprot, pfn); 1911423bad60SNick Piggin 1912423bad60SNick Piggin /* 1913423bad60SNick Piggin * If we don't have pte special, then we have to use the pfn_valid() 1914423bad60SNick Piggin * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must* 1915423bad60SNick Piggin * refcount the page if pfn_valid is true (hence insert_page rather 191662eede62SHugh Dickins * than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP 191762eede62SHugh Dickins * without pte special, it would there be refcounted as a normal page. 1918423bad60SNick Piggin */ 191903fc2da6SDan Williams if (!HAVE_PTE_SPECIAL && !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) { 1920423bad60SNick Piggin struct page *page; 1921423bad60SNick Piggin 192203fc2da6SDan Williams /* 192303fc2da6SDan Williams * At this point we are committed to insert_page() 192403fc2da6SDan Williams * regardless of whether the caller specified flags that 192503fc2da6SDan Williams * result in pfn_t_has_page() == false. 192603fc2da6SDan Williams */ 192703fc2da6SDan Williams page = pfn_to_page(pfn_t_to_pfn(pfn)); 192887744ab3SDan Williams return insert_page(vma, addr, page, pgprot); 1929423bad60SNick Piggin } 1930b2770da6SRoss Zwisler return insert_pfn(vma, addr, pfn, pgprot, mkwrite); 1931b2770da6SRoss Zwisler } 1932b2770da6SRoss Zwisler 1933b2770da6SRoss Zwisler int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, 1934b2770da6SRoss Zwisler pfn_t pfn) 1935b2770da6SRoss Zwisler { 1936b2770da6SRoss Zwisler return __vm_insert_mixed(vma, addr, pfn, false); 1937b2770da6SRoss Zwisler 1938423bad60SNick Piggin } 1939423bad60SNick Piggin EXPORT_SYMBOL(vm_insert_mixed); 1940423bad60SNick Piggin 1941b2770da6SRoss Zwisler int vm_insert_mixed_mkwrite(struct vm_area_struct *vma, unsigned long addr, 1942b2770da6SRoss Zwisler pfn_t pfn) 1943b2770da6SRoss Zwisler { 1944b2770da6SRoss Zwisler return __vm_insert_mixed(vma, addr, pfn, true); 1945b2770da6SRoss Zwisler } 1946b2770da6SRoss Zwisler EXPORT_SYMBOL(vm_insert_mixed_mkwrite); 1947b2770da6SRoss Zwisler 1948a145dd41SLinus Torvalds /* 19491da177e4SLinus Torvalds * maps a range of physical memory into the requested pages. the old 19501da177e4SLinus Torvalds * mappings are removed. any references to nonexistent pages results 19511da177e4SLinus Torvalds * in null mappings (currently treated as "copy-on-access") 19521da177e4SLinus Torvalds */ 19531da177e4SLinus Torvalds static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, 19541da177e4SLinus Torvalds unsigned long addr, unsigned long end, 19551da177e4SLinus Torvalds unsigned long pfn, pgprot_t prot) 19561da177e4SLinus Torvalds { 19571da177e4SLinus Torvalds pte_t *pte; 1958c74df32cSHugh Dickins spinlock_t *ptl; 19591da177e4SLinus Torvalds 1960c74df32cSHugh Dickins pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); 19611da177e4SLinus Torvalds if (!pte) 19621da177e4SLinus Torvalds return -ENOMEM; 19636606c3e0SZachary Amsden arch_enter_lazy_mmu_mode(); 19641da177e4SLinus Torvalds do { 19651da177e4SLinus Torvalds BUG_ON(!pte_none(*pte)); 19667e675137SNick Piggin set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot))); 19671da177e4SLinus Torvalds pfn++; 19681da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 19696606c3e0SZachary Amsden arch_leave_lazy_mmu_mode(); 1970c74df32cSHugh Dickins pte_unmap_unlock(pte - 1, ptl); 19711da177e4SLinus Torvalds return 0; 19721da177e4SLinus Torvalds } 19731da177e4SLinus Torvalds 19741da177e4SLinus Torvalds static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, 19751da177e4SLinus Torvalds unsigned long addr, unsigned long end, 19761da177e4SLinus Torvalds unsigned long pfn, pgprot_t prot) 19771da177e4SLinus Torvalds { 19781da177e4SLinus Torvalds pmd_t *pmd; 19791da177e4SLinus Torvalds unsigned long next; 19801da177e4SLinus Torvalds 19811da177e4SLinus Torvalds pfn -= addr >> PAGE_SHIFT; 19821da177e4SLinus Torvalds pmd = pmd_alloc(mm, pud, addr); 19831da177e4SLinus Torvalds if (!pmd) 19841da177e4SLinus Torvalds return -ENOMEM; 1985f66055abSAndrea Arcangeli VM_BUG_ON(pmd_trans_huge(*pmd)); 19861da177e4SLinus Torvalds do { 19871da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 19881da177e4SLinus Torvalds if (remap_pte_range(mm, pmd, addr, next, 19891da177e4SLinus Torvalds pfn + (addr >> PAGE_SHIFT), prot)) 19901da177e4SLinus Torvalds return -ENOMEM; 19911da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 19921da177e4SLinus Torvalds return 0; 19931da177e4SLinus Torvalds } 19941da177e4SLinus Torvalds 1995c2febafcSKirill A. Shutemov static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d, 19961da177e4SLinus Torvalds unsigned long addr, unsigned long end, 19971da177e4SLinus Torvalds unsigned long pfn, pgprot_t prot) 19981da177e4SLinus Torvalds { 19991da177e4SLinus Torvalds pud_t *pud; 20001da177e4SLinus Torvalds unsigned long next; 20011da177e4SLinus Torvalds 20021da177e4SLinus Torvalds pfn -= addr >> PAGE_SHIFT; 2003c2febafcSKirill A. Shutemov pud = pud_alloc(mm, p4d, addr); 20041da177e4SLinus Torvalds if (!pud) 20051da177e4SLinus Torvalds return -ENOMEM; 20061da177e4SLinus Torvalds do { 20071da177e4SLinus Torvalds next = pud_addr_end(addr, end); 20081da177e4SLinus Torvalds if (remap_pmd_range(mm, pud, addr, next, 20091da177e4SLinus Torvalds pfn + (addr >> PAGE_SHIFT), prot)) 20101da177e4SLinus Torvalds return -ENOMEM; 20111da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 20121da177e4SLinus Torvalds return 0; 20131da177e4SLinus Torvalds } 20141da177e4SLinus Torvalds 2015c2febafcSKirill A. Shutemov static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd, 2016c2febafcSKirill A. Shutemov unsigned long addr, unsigned long end, 2017c2febafcSKirill A. Shutemov unsigned long pfn, pgprot_t prot) 2018c2febafcSKirill A. Shutemov { 2019c2febafcSKirill A. Shutemov p4d_t *p4d; 2020c2febafcSKirill A. Shutemov unsigned long next; 2021c2febafcSKirill A. Shutemov 2022c2febafcSKirill A. Shutemov pfn -= addr >> PAGE_SHIFT; 2023c2febafcSKirill A. Shutemov p4d = p4d_alloc(mm, pgd, addr); 2024c2febafcSKirill A. Shutemov if (!p4d) 2025c2febafcSKirill A. Shutemov return -ENOMEM; 2026c2febafcSKirill A. Shutemov do { 2027c2febafcSKirill A. Shutemov next = p4d_addr_end(addr, end); 2028c2febafcSKirill A. Shutemov if (remap_pud_range(mm, p4d, addr, next, 2029c2febafcSKirill A. Shutemov pfn + (addr >> PAGE_SHIFT), prot)) 2030c2febafcSKirill A. Shutemov return -ENOMEM; 2031c2febafcSKirill A. Shutemov } while (p4d++, addr = next, addr != end); 2032c2febafcSKirill A. Shutemov return 0; 2033c2febafcSKirill A. Shutemov } 2034c2febafcSKirill A. Shutemov 2035bfa5bf6dSRolf Eike Beer /** 2036bfa5bf6dSRolf Eike Beer * remap_pfn_range - remap kernel memory to userspace 2037bfa5bf6dSRolf Eike Beer * @vma: user vma to map to 2038bfa5bf6dSRolf Eike Beer * @addr: target user address to start at 2039bfa5bf6dSRolf Eike Beer * @pfn: physical address of kernel memory 2040bfa5bf6dSRolf Eike Beer * @size: size of map area 2041bfa5bf6dSRolf Eike Beer * @prot: page protection flags for this mapping 2042bfa5bf6dSRolf Eike Beer * 2043bfa5bf6dSRolf Eike Beer * Note: this is only safe if the mm semaphore is held when called. 2044bfa5bf6dSRolf Eike Beer */ 20451da177e4SLinus Torvalds int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, 20461da177e4SLinus Torvalds unsigned long pfn, unsigned long size, pgprot_t prot) 20471da177e4SLinus Torvalds { 20481da177e4SLinus Torvalds pgd_t *pgd; 20491da177e4SLinus Torvalds unsigned long next; 20502d15cab8SHugh Dickins unsigned long end = addr + PAGE_ALIGN(size); 20511da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 2052d5957d2fSYongji Xie unsigned long remap_pfn = pfn; 20531da177e4SLinus Torvalds int err; 20541da177e4SLinus Torvalds 20551da177e4SLinus Torvalds /* 20561da177e4SLinus Torvalds * Physically remapped pages are special. Tell the 20571da177e4SLinus Torvalds * rest of the world about it: 20581da177e4SLinus Torvalds * VM_IO tells people not to look at these pages 20591da177e4SLinus Torvalds * (accesses can have side effects). 20606aab341eSLinus Torvalds * VM_PFNMAP tells the core MM that the base pages are just 20616aab341eSLinus Torvalds * raw PFN mappings, and do not have a "struct page" associated 20626aab341eSLinus Torvalds * with them. 2063314e51b9SKonstantin Khlebnikov * VM_DONTEXPAND 2064314e51b9SKonstantin Khlebnikov * Disable vma merging and expanding with mremap(). 2065314e51b9SKonstantin Khlebnikov * VM_DONTDUMP 2066314e51b9SKonstantin Khlebnikov * Omit vma from core dump, even when VM_IO turned off. 2067fb155c16SLinus Torvalds * 2068fb155c16SLinus Torvalds * There's a horrible special case to handle copy-on-write 2069fb155c16SLinus Torvalds * behaviour that some programs depend on. We mark the "original" 2070fb155c16SLinus Torvalds * un-COW'ed pages by matching them up with "vma->vm_pgoff". 2071b3b9c293SKonstantin Khlebnikov * See vm_normal_page() for details. 20721da177e4SLinus Torvalds */ 2073b3b9c293SKonstantin Khlebnikov if (is_cow_mapping(vma->vm_flags)) { 2074b3b9c293SKonstantin Khlebnikov if (addr != vma->vm_start || end != vma->vm_end) 2075b3b9c293SKonstantin Khlebnikov return -EINVAL; 20766aab341eSLinus Torvalds vma->vm_pgoff = pfn; 2077b3b9c293SKonstantin Khlebnikov } 2078b3b9c293SKonstantin Khlebnikov 2079d5957d2fSYongji Xie err = track_pfn_remap(vma, &prot, remap_pfn, addr, PAGE_ALIGN(size)); 2080b3b9c293SKonstantin Khlebnikov if (err) 20813c8bb73aSvenkatesh.pallipadi@intel.com return -EINVAL; 2082fb155c16SLinus Torvalds 2083314e51b9SKonstantin Khlebnikov vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; 20841da177e4SLinus Torvalds 20851da177e4SLinus Torvalds BUG_ON(addr >= end); 20861da177e4SLinus Torvalds pfn -= addr >> PAGE_SHIFT; 20871da177e4SLinus Torvalds pgd = pgd_offset(mm, addr); 20881da177e4SLinus Torvalds flush_cache_range(vma, addr, end); 20891da177e4SLinus Torvalds do { 20901da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 2091c2febafcSKirill A. Shutemov err = remap_p4d_range(mm, pgd, addr, next, 20921da177e4SLinus Torvalds pfn + (addr >> PAGE_SHIFT), prot); 20931da177e4SLinus Torvalds if (err) 20941da177e4SLinus Torvalds break; 20951da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 20962ab64037Svenkatesh.pallipadi@intel.com 20972ab64037Svenkatesh.pallipadi@intel.com if (err) 2098d5957d2fSYongji Xie untrack_pfn(vma, remap_pfn, PAGE_ALIGN(size)); 20992ab64037Svenkatesh.pallipadi@intel.com 21001da177e4SLinus Torvalds return err; 21011da177e4SLinus Torvalds } 21021da177e4SLinus Torvalds EXPORT_SYMBOL(remap_pfn_range); 21031da177e4SLinus Torvalds 2104b4cbb197SLinus Torvalds /** 2105b4cbb197SLinus Torvalds * vm_iomap_memory - remap memory to userspace 2106b4cbb197SLinus Torvalds * @vma: user vma to map to 2107b4cbb197SLinus Torvalds * @start: start of area 2108b4cbb197SLinus Torvalds * @len: size of area 2109b4cbb197SLinus Torvalds * 2110b4cbb197SLinus Torvalds * This is a simplified io_remap_pfn_range() for common driver use. The 2111b4cbb197SLinus Torvalds * driver just needs to give us the physical memory range to be mapped, 2112b4cbb197SLinus Torvalds * we'll figure out the rest from the vma information. 2113b4cbb197SLinus Torvalds * 2114b4cbb197SLinus Torvalds * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get 2115b4cbb197SLinus Torvalds * whatever write-combining details or similar. 2116b4cbb197SLinus Torvalds */ 2117b4cbb197SLinus Torvalds int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) 2118b4cbb197SLinus Torvalds { 2119b4cbb197SLinus Torvalds unsigned long vm_len, pfn, pages; 2120b4cbb197SLinus Torvalds 2121b4cbb197SLinus Torvalds /* Check that the physical memory area passed in looks valid */ 2122b4cbb197SLinus Torvalds if (start + len < start) 2123b4cbb197SLinus Torvalds return -EINVAL; 2124b4cbb197SLinus Torvalds /* 2125b4cbb197SLinus Torvalds * You *really* shouldn't map things that aren't page-aligned, 2126b4cbb197SLinus Torvalds * but we've historically allowed it because IO memory might 2127b4cbb197SLinus Torvalds * just have smaller alignment. 2128b4cbb197SLinus Torvalds */ 2129b4cbb197SLinus Torvalds len += start & ~PAGE_MASK; 2130b4cbb197SLinus Torvalds pfn = start >> PAGE_SHIFT; 2131b4cbb197SLinus Torvalds pages = (len + ~PAGE_MASK) >> PAGE_SHIFT; 2132b4cbb197SLinus Torvalds if (pfn + pages < pfn) 2133b4cbb197SLinus Torvalds return -EINVAL; 2134b4cbb197SLinus Torvalds 2135b4cbb197SLinus Torvalds /* We start the mapping 'vm_pgoff' pages into the area */ 2136b4cbb197SLinus Torvalds if (vma->vm_pgoff > pages) 2137b4cbb197SLinus Torvalds return -EINVAL; 2138b4cbb197SLinus Torvalds pfn += vma->vm_pgoff; 2139b4cbb197SLinus Torvalds pages -= vma->vm_pgoff; 2140b4cbb197SLinus Torvalds 2141b4cbb197SLinus Torvalds /* Can we fit all of the mapping? */ 2142b4cbb197SLinus Torvalds vm_len = vma->vm_end - vma->vm_start; 2143b4cbb197SLinus Torvalds if (vm_len >> PAGE_SHIFT > pages) 2144b4cbb197SLinus Torvalds return -EINVAL; 2145b4cbb197SLinus Torvalds 2146b4cbb197SLinus Torvalds /* Ok, let it rip */ 2147b4cbb197SLinus Torvalds return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); 2148b4cbb197SLinus Torvalds } 2149b4cbb197SLinus Torvalds EXPORT_SYMBOL(vm_iomap_memory); 2150b4cbb197SLinus Torvalds 2151aee16b3cSJeremy Fitzhardinge static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, 2152aee16b3cSJeremy Fitzhardinge unsigned long addr, unsigned long end, 2153aee16b3cSJeremy Fitzhardinge pte_fn_t fn, void *data) 2154aee16b3cSJeremy Fitzhardinge { 2155aee16b3cSJeremy Fitzhardinge pte_t *pte; 2156aee16b3cSJeremy Fitzhardinge int err; 21572f569afdSMartin Schwidefsky pgtable_t token; 215894909914SBorislav Petkov spinlock_t *uninitialized_var(ptl); 2159aee16b3cSJeremy Fitzhardinge 2160aee16b3cSJeremy Fitzhardinge pte = (mm == &init_mm) ? 2161aee16b3cSJeremy Fitzhardinge pte_alloc_kernel(pmd, addr) : 2162aee16b3cSJeremy Fitzhardinge pte_alloc_map_lock(mm, pmd, addr, &ptl); 2163aee16b3cSJeremy Fitzhardinge if (!pte) 2164aee16b3cSJeremy Fitzhardinge return -ENOMEM; 2165aee16b3cSJeremy Fitzhardinge 2166aee16b3cSJeremy Fitzhardinge BUG_ON(pmd_huge(*pmd)); 2167aee16b3cSJeremy Fitzhardinge 216838e0edb1SJeremy Fitzhardinge arch_enter_lazy_mmu_mode(); 216938e0edb1SJeremy Fitzhardinge 21702f569afdSMartin Schwidefsky token = pmd_pgtable(*pmd); 2171aee16b3cSJeremy Fitzhardinge 2172aee16b3cSJeremy Fitzhardinge do { 2173c36987e2SDaisuke Nishimura err = fn(pte++, token, addr, data); 2174aee16b3cSJeremy Fitzhardinge if (err) 2175aee16b3cSJeremy Fitzhardinge break; 2176c36987e2SDaisuke Nishimura } while (addr += PAGE_SIZE, addr != end); 2177aee16b3cSJeremy Fitzhardinge 217838e0edb1SJeremy Fitzhardinge arch_leave_lazy_mmu_mode(); 217938e0edb1SJeremy Fitzhardinge 2180aee16b3cSJeremy Fitzhardinge if (mm != &init_mm) 2181aee16b3cSJeremy Fitzhardinge pte_unmap_unlock(pte-1, ptl); 2182aee16b3cSJeremy Fitzhardinge return err; 2183aee16b3cSJeremy Fitzhardinge } 2184aee16b3cSJeremy Fitzhardinge 2185aee16b3cSJeremy Fitzhardinge static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud, 2186aee16b3cSJeremy Fitzhardinge unsigned long addr, unsigned long end, 2187aee16b3cSJeremy Fitzhardinge pte_fn_t fn, void *data) 2188aee16b3cSJeremy Fitzhardinge { 2189aee16b3cSJeremy Fitzhardinge pmd_t *pmd; 2190aee16b3cSJeremy Fitzhardinge unsigned long next; 2191aee16b3cSJeremy Fitzhardinge int err; 2192aee16b3cSJeremy Fitzhardinge 2193ceb86879SAndi Kleen BUG_ON(pud_huge(*pud)); 2194ceb86879SAndi Kleen 2195aee16b3cSJeremy Fitzhardinge pmd = pmd_alloc(mm, pud, addr); 2196aee16b3cSJeremy Fitzhardinge if (!pmd) 2197aee16b3cSJeremy Fitzhardinge return -ENOMEM; 2198aee16b3cSJeremy Fitzhardinge do { 2199aee16b3cSJeremy Fitzhardinge next = pmd_addr_end(addr, end); 2200aee16b3cSJeremy Fitzhardinge err = apply_to_pte_range(mm, pmd, addr, next, fn, data); 2201aee16b3cSJeremy Fitzhardinge if (err) 2202aee16b3cSJeremy Fitzhardinge break; 2203aee16b3cSJeremy Fitzhardinge } while (pmd++, addr = next, addr != end); 2204aee16b3cSJeremy Fitzhardinge return err; 2205aee16b3cSJeremy Fitzhardinge } 2206aee16b3cSJeremy Fitzhardinge 2207c2febafcSKirill A. Shutemov static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d, 2208aee16b3cSJeremy Fitzhardinge unsigned long addr, unsigned long end, 2209aee16b3cSJeremy Fitzhardinge pte_fn_t fn, void *data) 2210aee16b3cSJeremy Fitzhardinge { 2211aee16b3cSJeremy Fitzhardinge pud_t *pud; 2212aee16b3cSJeremy Fitzhardinge unsigned long next; 2213aee16b3cSJeremy Fitzhardinge int err; 2214aee16b3cSJeremy Fitzhardinge 2215c2febafcSKirill A. Shutemov pud = pud_alloc(mm, p4d, addr); 2216aee16b3cSJeremy Fitzhardinge if (!pud) 2217aee16b3cSJeremy Fitzhardinge return -ENOMEM; 2218aee16b3cSJeremy Fitzhardinge do { 2219aee16b3cSJeremy Fitzhardinge next = pud_addr_end(addr, end); 2220aee16b3cSJeremy Fitzhardinge err = apply_to_pmd_range(mm, pud, addr, next, fn, data); 2221aee16b3cSJeremy Fitzhardinge if (err) 2222aee16b3cSJeremy Fitzhardinge break; 2223aee16b3cSJeremy Fitzhardinge } while (pud++, addr = next, addr != end); 2224aee16b3cSJeremy Fitzhardinge return err; 2225aee16b3cSJeremy Fitzhardinge } 2226aee16b3cSJeremy Fitzhardinge 2227c2febafcSKirill A. Shutemov static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd, 2228c2febafcSKirill A. Shutemov unsigned long addr, unsigned long end, 2229c2febafcSKirill A. Shutemov pte_fn_t fn, void *data) 2230c2febafcSKirill A. Shutemov { 2231c2febafcSKirill A. Shutemov p4d_t *p4d; 2232c2febafcSKirill A. Shutemov unsigned long next; 2233c2febafcSKirill A. Shutemov int err; 2234c2febafcSKirill A. Shutemov 2235c2febafcSKirill A. Shutemov p4d = p4d_alloc(mm, pgd, addr); 2236c2febafcSKirill A. Shutemov if (!p4d) 2237c2febafcSKirill A. Shutemov return -ENOMEM; 2238c2febafcSKirill A. Shutemov do { 2239c2febafcSKirill A. Shutemov next = p4d_addr_end(addr, end); 2240c2febafcSKirill A. Shutemov err = apply_to_pud_range(mm, p4d, addr, next, fn, data); 2241c2febafcSKirill A. Shutemov if (err) 2242c2febafcSKirill A. Shutemov break; 2243c2febafcSKirill A. Shutemov } while (p4d++, addr = next, addr != end); 2244c2febafcSKirill A. Shutemov return err; 2245c2febafcSKirill A. Shutemov } 2246c2febafcSKirill A. Shutemov 2247aee16b3cSJeremy Fitzhardinge /* 2248aee16b3cSJeremy Fitzhardinge * Scan a region of virtual memory, filling in page tables as necessary 2249aee16b3cSJeremy Fitzhardinge * and calling a provided function on each leaf page table. 2250aee16b3cSJeremy Fitzhardinge */ 2251aee16b3cSJeremy Fitzhardinge int apply_to_page_range(struct mm_struct *mm, unsigned long addr, 2252aee16b3cSJeremy Fitzhardinge unsigned long size, pte_fn_t fn, void *data) 2253aee16b3cSJeremy Fitzhardinge { 2254aee16b3cSJeremy Fitzhardinge pgd_t *pgd; 2255aee16b3cSJeremy Fitzhardinge unsigned long next; 225657250a5bSJeremy Fitzhardinge unsigned long end = addr + size; 2257aee16b3cSJeremy Fitzhardinge int err; 2258aee16b3cSJeremy Fitzhardinge 22599cb65bc3SMika Penttilä if (WARN_ON(addr >= end)) 22609cb65bc3SMika Penttilä return -EINVAL; 22619cb65bc3SMika Penttilä 2262aee16b3cSJeremy Fitzhardinge pgd = pgd_offset(mm, addr); 2263aee16b3cSJeremy Fitzhardinge do { 2264aee16b3cSJeremy Fitzhardinge next = pgd_addr_end(addr, end); 2265c2febafcSKirill A. Shutemov err = apply_to_p4d_range(mm, pgd, addr, next, fn, data); 2266aee16b3cSJeremy Fitzhardinge if (err) 2267aee16b3cSJeremy Fitzhardinge break; 2268aee16b3cSJeremy Fitzhardinge } while (pgd++, addr = next, addr != end); 226957250a5bSJeremy Fitzhardinge 2270aee16b3cSJeremy Fitzhardinge return err; 2271aee16b3cSJeremy Fitzhardinge } 2272aee16b3cSJeremy Fitzhardinge EXPORT_SYMBOL_GPL(apply_to_page_range); 2273aee16b3cSJeremy Fitzhardinge 22741da177e4SLinus Torvalds /* 22759b4bdd2fSKirill A. Shutemov * handle_pte_fault chooses page fault handler according to an entry which was 22769b4bdd2fSKirill A. Shutemov * read non-atomically. Before making any commitment, on those architectures 22779b4bdd2fSKirill A. Shutemov * or configurations (e.g. i386 with PAE) which might give a mix of unmatched 22789b4bdd2fSKirill A. Shutemov * parts, do_swap_page must check under lock before unmapping the pte and 22799b4bdd2fSKirill A. Shutemov * proceeding (but do_wp_page is only called after already making such a check; 2280a335b2e1SRyota Ozaki * and do_anonymous_page can safely check later on). 22818f4e2101SHugh Dickins */ 22824c21e2f2SHugh Dickins static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd, 22838f4e2101SHugh Dickins pte_t *page_table, pte_t orig_pte) 22848f4e2101SHugh Dickins { 22858f4e2101SHugh Dickins int same = 1; 22868f4e2101SHugh Dickins #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) 22878f4e2101SHugh Dickins if (sizeof(pte_t) > sizeof(unsigned long)) { 22884c21e2f2SHugh Dickins spinlock_t *ptl = pte_lockptr(mm, pmd); 22894c21e2f2SHugh Dickins spin_lock(ptl); 22908f4e2101SHugh Dickins same = pte_same(*page_table, orig_pte); 22914c21e2f2SHugh Dickins spin_unlock(ptl); 22928f4e2101SHugh Dickins } 22938f4e2101SHugh Dickins #endif 22948f4e2101SHugh Dickins pte_unmap(page_table); 22958f4e2101SHugh Dickins return same; 22968f4e2101SHugh Dickins } 22978f4e2101SHugh Dickins 22989de455b2SAtsushi Nemoto static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma) 22996aab341eSLinus Torvalds { 23000abdd7a8SDan Williams debug_dma_assert_idle(src); 23010abdd7a8SDan Williams 23026aab341eSLinus Torvalds /* 23036aab341eSLinus Torvalds * If the source page was a PFN mapping, we don't have 23046aab341eSLinus Torvalds * a "struct page" for it. We do a best-effort copy by 23056aab341eSLinus Torvalds * just copying from the original user address. If that 23066aab341eSLinus Torvalds * fails, we just zero-fill it. Live with it. 23076aab341eSLinus Torvalds */ 23086aab341eSLinus Torvalds if (unlikely(!src)) { 23099b04c5feSCong Wang void *kaddr = kmap_atomic(dst); 23105d2a2dbbSLinus Torvalds void __user *uaddr = (void __user *)(va & PAGE_MASK); 23115d2a2dbbSLinus Torvalds 23125d2a2dbbSLinus Torvalds /* 23135d2a2dbbSLinus Torvalds * This really shouldn't fail, because the page is there 23145d2a2dbbSLinus Torvalds * in the page tables. But it might just be unreadable, 23155d2a2dbbSLinus Torvalds * in which case we just give up and fill the result with 23165d2a2dbbSLinus Torvalds * zeroes. 23175d2a2dbbSLinus Torvalds */ 23185d2a2dbbSLinus Torvalds if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) 23193ecb01dfSJan Beulich clear_page(kaddr); 23209b04c5feSCong Wang kunmap_atomic(kaddr); 2321c4ec7b0dSDmitriy Monakhov flush_dcache_page(dst); 23220ed361deSNick Piggin } else 23239de455b2SAtsushi Nemoto copy_user_highpage(dst, src, va, vma); 23246aab341eSLinus Torvalds } 23256aab341eSLinus Torvalds 2326c20cd45eSMichal Hocko static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma) 2327c20cd45eSMichal Hocko { 2328c20cd45eSMichal Hocko struct file *vm_file = vma->vm_file; 2329c20cd45eSMichal Hocko 2330c20cd45eSMichal Hocko if (vm_file) 2331c20cd45eSMichal Hocko return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO; 2332c20cd45eSMichal Hocko 2333c20cd45eSMichal Hocko /* 2334c20cd45eSMichal Hocko * Special mappings (e.g. VDSO) do not have any file so fake 2335c20cd45eSMichal Hocko * a default GFP_KERNEL for them. 2336c20cd45eSMichal Hocko */ 2337c20cd45eSMichal Hocko return GFP_KERNEL; 2338c20cd45eSMichal Hocko } 2339c20cd45eSMichal Hocko 23401da177e4SLinus Torvalds /* 2341fb09a464SKirill A. Shutemov * Notify the address space that the page is about to become writable so that 2342fb09a464SKirill A. Shutemov * it can prohibit this or wait for the page to get into an appropriate state. 2343fb09a464SKirill A. Shutemov * 2344fb09a464SKirill A. Shutemov * We do this without the lock held, so that it can sleep if it needs to. 2345fb09a464SKirill A. Shutemov */ 234638b8cb7fSJan Kara static int do_page_mkwrite(struct vm_fault *vmf) 2347fb09a464SKirill A. Shutemov { 2348fb09a464SKirill A. Shutemov int ret; 234938b8cb7fSJan Kara struct page *page = vmf->page; 235038b8cb7fSJan Kara unsigned int old_flags = vmf->flags; 2351fb09a464SKirill A. Shutemov 235238b8cb7fSJan Kara vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; 2353fb09a464SKirill A. Shutemov 235411bac800SDave Jiang ret = vmf->vma->vm_ops->page_mkwrite(vmf); 235538b8cb7fSJan Kara /* Restore original flags so that caller is not surprised */ 235638b8cb7fSJan Kara vmf->flags = old_flags; 2357fb09a464SKirill A. Shutemov if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) 2358fb09a464SKirill A. Shutemov return ret; 2359fb09a464SKirill A. Shutemov if (unlikely(!(ret & VM_FAULT_LOCKED))) { 2360fb09a464SKirill A. Shutemov lock_page(page); 2361fb09a464SKirill A. Shutemov if (!page->mapping) { 2362fb09a464SKirill A. Shutemov unlock_page(page); 2363fb09a464SKirill A. Shutemov return 0; /* retry */ 2364fb09a464SKirill A. Shutemov } 2365fb09a464SKirill A. Shutemov ret |= VM_FAULT_LOCKED; 2366fb09a464SKirill A. Shutemov } else 2367fb09a464SKirill A. Shutemov VM_BUG_ON_PAGE(!PageLocked(page), page); 2368fb09a464SKirill A. Shutemov return ret; 2369fb09a464SKirill A. Shutemov } 2370fb09a464SKirill A. Shutemov 2371fb09a464SKirill A. Shutemov /* 237297ba0c2bSJan Kara * Handle dirtying of a page in shared file mapping on a write fault. 23734e047f89SShachar Raindel * 237497ba0c2bSJan Kara * The function expects the page to be locked and unlocks it. 23754e047f89SShachar Raindel */ 237697ba0c2bSJan Kara static void fault_dirty_shared_page(struct vm_area_struct *vma, 237797ba0c2bSJan Kara struct page *page) 23784e047f89SShachar Raindel { 23794e047f89SShachar Raindel struct address_space *mapping; 238097ba0c2bSJan Kara bool dirtied; 238197ba0c2bSJan Kara bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite; 23824e047f89SShachar Raindel 23834e047f89SShachar Raindel dirtied = set_page_dirty(page); 23844e047f89SShachar Raindel VM_BUG_ON_PAGE(PageAnon(page), page); 238597ba0c2bSJan Kara /* 238697ba0c2bSJan Kara * Take a local copy of the address_space - page.mapping may be zeroed 238797ba0c2bSJan Kara * by truncate after unlock_page(). The address_space itself remains 238897ba0c2bSJan Kara * pinned by vma->vm_file's reference. We rely on unlock_page()'s 238997ba0c2bSJan Kara * release semantics to prevent the compiler from undoing this copying. 239097ba0c2bSJan Kara */ 239197ba0c2bSJan Kara mapping = page_rmapping(page); 23924e047f89SShachar Raindel unlock_page(page); 23934e047f89SShachar Raindel 23944e047f89SShachar Raindel if ((dirtied || page_mkwrite) && mapping) { 23954e047f89SShachar Raindel /* 23964e047f89SShachar Raindel * Some device drivers do not set page.mapping 23974e047f89SShachar Raindel * but still dirty their pages 23984e047f89SShachar Raindel */ 23994e047f89SShachar Raindel balance_dirty_pages_ratelimited(mapping); 24004e047f89SShachar Raindel } 24014e047f89SShachar Raindel 24024e047f89SShachar Raindel if (!page_mkwrite) 24034e047f89SShachar Raindel file_update_time(vma->vm_file); 24044e047f89SShachar Raindel } 24054e047f89SShachar Raindel 240697ba0c2bSJan Kara /* 24074e047f89SShachar Raindel * Handle write page faults for pages that can be reused in the current vma 24084e047f89SShachar Raindel * 24094e047f89SShachar Raindel * This can happen either due to the mapping being with the VM_SHARED flag, 24104e047f89SShachar Raindel * or due to us being the last reference standing to the page. In either 24114e047f89SShachar Raindel * case, all we need to do here is to mark the page as writable and update 24124e047f89SShachar Raindel * any related book-keeping. 24134e047f89SShachar Raindel */ 2414997dd98dSJan Kara static inline void wp_page_reuse(struct vm_fault *vmf) 241582b0f8c3SJan Kara __releases(vmf->ptl) 24164e047f89SShachar Raindel { 241782b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 2418a41b70d6SJan Kara struct page *page = vmf->page; 24194e047f89SShachar Raindel pte_t entry; 24204e047f89SShachar Raindel /* 24214e047f89SShachar Raindel * Clear the pages cpupid information as the existing 24224e047f89SShachar Raindel * information potentially belongs to a now completely 24234e047f89SShachar Raindel * unrelated process. 24244e047f89SShachar Raindel */ 24254e047f89SShachar Raindel if (page) 24264e047f89SShachar Raindel page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1); 24274e047f89SShachar Raindel 24282994302bSJan Kara flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); 24292994302bSJan Kara entry = pte_mkyoung(vmf->orig_pte); 24304e047f89SShachar Raindel entry = maybe_mkwrite(pte_mkdirty(entry), vma); 243182b0f8c3SJan Kara if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1)) 243282b0f8c3SJan Kara update_mmu_cache(vma, vmf->address, vmf->pte); 243382b0f8c3SJan Kara pte_unmap_unlock(vmf->pte, vmf->ptl); 24344e047f89SShachar Raindel } 24354e047f89SShachar Raindel 24364e047f89SShachar Raindel /* 24372f38ab2cSShachar Raindel * Handle the case of a page which we actually need to copy to a new page. 24382f38ab2cSShachar Raindel * 24392f38ab2cSShachar Raindel * Called with mmap_sem locked and the old page referenced, but 24402f38ab2cSShachar Raindel * without the ptl held. 24412f38ab2cSShachar Raindel * 24422f38ab2cSShachar Raindel * High level logic flow: 24432f38ab2cSShachar Raindel * 24442f38ab2cSShachar Raindel * - Allocate a page, copy the content of the old page to the new one. 24452f38ab2cSShachar Raindel * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc. 24462f38ab2cSShachar Raindel * - Take the PTL. If the pte changed, bail out and release the allocated page 24472f38ab2cSShachar Raindel * - If the pte is still the way we remember it, update the page table and all 24482f38ab2cSShachar Raindel * relevant references. This includes dropping the reference the page-table 24492f38ab2cSShachar Raindel * held to the old page, as well as updating the rmap. 24502f38ab2cSShachar Raindel * - In any case, unlock the PTL and drop the reference we took to the old page. 24512f38ab2cSShachar Raindel */ 2452a41b70d6SJan Kara static int wp_page_copy(struct vm_fault *vmf) 24532f38ab2cSShachar Raindel { 245482b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 2455bae473a4SKirill A. Shutemov struct mm_struct *mm = vma->vm_mm; 2456a41b70d6SJan Kara struct page *old_page = vmf->page; 24572f38ab2cSShachar Raindel struct page *new_page = NULL; 24582f38ab2cSShachar Raindel pte_t entry; 24592f38ab2cSShachar Raindel int page_copied = 0; 246082b0f8c3SJan Kara const unsigned long mmun_start = vmf->address & PAGE_MASK; 2461bae473a4SKirill A. Shutemov const unsigned long mmun_end = mmun_start + PAGE_SIZE; 24622f38ab2cSShachar Raindel struct mem_cgroup *memcg; 24632f38ab2cSShachar Raindel 24642f38ab2cSShachar Raindel if (unlikely(anon_vma_prepare(vma))) 24652f38ab2cSShachar Raindel goto oom; 24662f38ab2cSShachar Raindel 24672994302bSJan Kara if (is_zero_pfn(pte_pfn(vmf->orig_pte))) { 246882b0f8c3SJan Kara new_page = alloc_zeroed_user_highpage_movable(vma, 246982b0f8c3SJan Kara vmf->address); 24702f38ab2cSShachar Raindel if (!new_page) 24712f38ab2cSShachar Raindel goto oom; 24722f38ab2cSShachar Raindel } else { 2473bae473a4SKirill A. Shutemov new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, 247482b0f8c3SJan Kara vmf->address); 24752f38ab2cSShachar Raindel if (!new_page) 24762f38ab2cSShachar Raindel goto oom; 247782b0f8c3SJan Kara cow_user_page(new_page, old_page, vmf->address, vma); 24782f38ab2cSShachar Raindel } 24792f38ab2cSShachar Raindel 2480f627c2f5SKirill A. Shutemov if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg, false)) 24812f38ab2cSShachar Raindel goto oom_free_new; 24822f38ab2cSShachar Raindel 2483eb3c24f3SMel Gorman __SetPageUptodate(new_page); 2484eb3c24f3SMel Gorman 24852f38ab2cSShachar Raindel mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 24862f38ab2cSShachar Raindel 24872f38ab2cSShachar Raindel /* 24882f38ab2cSShachar Raindel * Re-check the pte - we dropped the lock 24892f38ab2cSShachar Raindel */ 249082b0f8c3SJan Kara vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl); 24912994302bSJan Kara if (likely(pte_same(*vmf->pte, vmf->orig_pte))) { 24922f38ab2cSShachar Raindel if (old_page) { 24932f38ab2cSShachar Raindel if (!PageAnon(old_page)) { 2494eca56ff9SJerome Marchand dec_mm_counter_fast(mm, 2495eca56ff9SJerome Marchand mm_counter_file(old_page)); 24962f38ab2cSShachar Raindel inc_mm_counter_fast(mm, MM_ANONPAGES); 24972f38ab2cSShachar Raindel } 24982f38ab2cSShachar Raindel } else { 24992f38ab2cSShachar Raindel inc_mm_counter_fast(mm, MM_ANONPAGES); 25002f38ab2cSShachar Raindel } 25012994302bSJan Kara flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); 25022f38ab2cSShachar Raindel entry = mk_pte(new_page, vma->vm_page_prot); 25032f38ab2cSShachar Raindel entry = maybe_mkwrite(pte_mkdirty(entry), vma); 25042f38ab2cSShachar Raindel /* 25052f38ab2cSShachar Raindel * Clear the pte entry and flush it first, before updating the 25062f38ab2cSShachar Raindel * pte with the new entry. This will avoid a race condition 25072f38ab2cSShachar Raindel * seen in the presence of one thread doing SMC and another 25082f38ab2cSShachar Raindel * thread doing COW. 25092f38ab2cSShachar Raindel */ 251082b0f8c3SJan Kara ptep_clear_flush_notify(vma, vmf->address, vmf->pte); 251182b0f8c3SJan Kara page_add_new_anon_rmap(new_page, vma, vmf->address, false); 2512f627c2f5SKirill A. Shutemov mem_cgroup_commit_charge(new_page, memcg, false, false); 25132f38ab2cSShachar Raindel lru_cache_add_active_or_unevictable(new_page, vma); 25142f38ab2cSShachar Raindel /* 25152f38ab2cSShachar Raindel * We call the notify macro here because, when using secondary 25162f38ab2cSShachar Raindel * mmu page tables (such as kvm shadow page tables), we want the 25172f38ab2cSShachar Raindel * new page to be mapped directly into the secondary page table. 25182f38ab2cSShachar Raindel */ 251982b0f8c3SJan Kara set_pte_at_notify(mm, vmf->address, vmf->pte, entry); 252082b0f8c3SJan Kara update_mmu_cache(vma, vmf->address, vmf->pte); 25212f38ab2cSShachar Raindel if (old_page) { 25222f38ab2cSShachar Raindel /* 25232f38ab2cSShachar Raindel * Only after switching the pte to the new page may 25242f38ab2cSShachar Raindel * we remove the mapcount here. Otherwise another 25252f38ab2cSShachar Raindel * process may come and find the rmap count decremented 25262f38ab2cSShachar Raindel * before the pte is switched to the new page, and 25272f38ab2cSShachar Raindel * "reuse" the old page writing into it while our pte 25282f38ab2cSShachar Raindel * here still points into it and can be read by other 25292f38ab2cSShachar Raindel * threads. 25302f38ab2cSShachar Raindel * 25312f38ab2cSShachar Raindel * The critical issue is to order this 25322f38ab2cSShachar Raindel * page_remove_rmap with the ptp_clear_flush above. 25332f38ab2cSShachar Raindel * Those stores are ordered by (if nothing else,) 25342f38ab2cSShachar Raindel * the barrier present in the atomic_add_negative 25352f38ab2cSShachar Raindel * in page_remove_rmap. 25362f38ab2cSShachar Raindel * 25372f38ab2cSShachar Raindel * Then the TLB flush in ptep_clear_flush ensures that 25382f38ab2cSShachar Raindel * no process can access the old page before the 25392f38ab2cSShachar Raindel * decremented mapcount is visible. And the old page 25402f38ab2cSShachar Raindel * cannot be reused until after the decremented 25412f38ab2cSShachar Raindel * mapcount is visible. So transitively, TLBs to 25422f38ab2cSShachar Raindel * old page will be flushed before it can be reused. 25432f38ab2cSShachar Raindel */ 2544d281ee61SKirill A. Shutemov page_remove_rmap(old_page, false); 25452f38ab2cSShachar Raindel } 25462f38ab2cSShachar Raindel 25472f38ab2cSShachar Raindel /* Free the old page.. */ 25482f38ab2cSShachar Raindel new_page = old_page; 25492f38ab2cSShachar Raindel page_copied = 1; 25502f38ab2cSShachar Raindel } else { 2551f627c2f5SKirill A. Shutemov mem_cgroup_cancel_charge(new_page, memcg, false); 25522f38ab2cSShachar Raindel } 25532f38ab2cSShachar Raindel 25542f38ab2cSShachar Raindel if (new_page) 255509cbfeafSKirill A. Shutemov put_page(new_page); 25562f38ab2cSShachar Raindel 255782b0f8c3SJan Kara pte_unmap_unlock(vmf->pte, vmf->ptl); 25584645b9feSJérôme Glisse /* 25594645b9feSJérôme Glisse * No need to double call mmu_notifier->invalidate_range() callback as 25604645b9feSJérôme Glisse * the above ptep_clear_flush_notify() did already call it. 25614645b9feSJérôme Glisse */ 25624645b9feSJérôme Glisse mmu_notifier_invalidate_range_only_end(mm, mmun_start, mmun_end); 25632f38ab2cSShachar Raindel if (old_page) { 25642f38ab2cSShachar Raindel /* 25652f38ab2cSShachar Raindel * Don't let another task, with possibly unlocked vma, 25662f38ab2cSShachar Raindel * keep the mlocked page. 25672f38ab2cSShachar Raindel */ 25682f38ab2cSShachar Raindel if (page_copied && (vma->vm_flags & VM_LOCKED)) { 25692f38ab2cSShachar Raindel lock_page(old_page); /* LRU manipulation */ 2570e90309c9SKirill A. Shutemov if (PageMlocked(old_page)) 25712f38ab2cSShachar Raindel munlock_vma_page(old_page); 25722f38ab2cSShachar Raindel unlock_page(old_page); 25732f38ab2cSShachar Raindel } 257409cbfeafSKirill A. Shutemov put_page(old_page); 25752f38ab2cSShachar Raindel } 25762f38ab2cSShachar Raindel return page_copied ? VM_FAULT_WRITE : 0; 25772f38ab2cSShachar Raindel oom_free_new: 257809cbfeafSKirill A. Shutemov put_page(new_page); 25792f38ab2cSShachar Raindel oom: 25802f38ab2cSShachar Raindel if (old_page) 258109cbfeafSKirill A. Shutemov put_page(old_page); 25822f38ab2cSShachar Raindel return VM_FAULT_OOM; 25832f38ab2cSShachar Raindel } 25842f38ab2cSShachar Raindel 258566a6197cSJan Kara /** 258666a6197cSJan Kara * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE 258766a6197cSJan Kara * writeable once the page is prepared 258866a6197cSJan Kara * 258966a6197cSJan Kara * @vmf: structure describing the fault 259066a6197cSJan Kara * 259166a6197cSJan Kara * This function handles all that is needed to finish a write page fault in a 259266a6197cSJan Kara * shared mapping due to PTE being read-only once the mapped page is prepared. 259366a6197cSJan Kara * It handles locking of PTE and modifying it. The function returns 259466a6197cSJan Kara * VM_FAULT_WRITE on success, 0 when PTE got changed before we acquired PTE 259566a6197cSJan Kara * lock. 259666a6197cSJan Kara * 259766a6197cSJan Kara * The function expects the page to be locked or other protection against 259866a6197cSJan Kara * concurrent faults / writeback (such as DAX radix tree locks). 259966a6197cSJan Kara */ 260066a6197cSJan Kara int finish_mkwrite_fault(struct vm_fault *vmf) 260166a6197cSJan Kara { 260266a6197cSJan Kara WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED)); 260366a6197cSJan Kara vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, 260466a6197cSJan Kara &vmf->ptl); 260566a6197cSJan Kara /* 260666a6197cSJan Kara * We might have raced with another page fault while we released the 260766a6197cSJan Kara * pte_offset_map_lock. 260866a6197cSJan Kara */ 260966a6197cSJan Kara if (!pte_same(*vmf->pte, vmf->orig_pte)) { 261066a6197cSJan Kara pte_unmap_unlock(vmf->pte, vmf->ptl); 2611a19e2553SJan Kara return VM_FAULT_NOPAGE; 261266a6197cSJan Kara } 261366a6197cSJan Kara wp_page_reuse(vmf); 2614a19e2553SJan Kara return 0; 261566a6197cSJan Kara } 261666a6197cSJan Kara 2617dd906184SBoaz Harrosh /* 2618dd906184SBoaz Harrosh * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED 2619dd906184SBoaz Harrosh * mapping 2620dd906184SBoaz Harrosh */ 26212994302bSJan Kara static int wp_pfn_shared(struct vm_fault *vmf) 2622dd906184SBoaz Harrosh { 262382b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 2624bae473a4SKirill A. Shutemov 2625dd906184SBoaz Harrosh if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) { 2626dd906184SBoaz Harrosh int ret; 2627dd906184SBoaz Harrosh 262882b0f8c3SJan Kara pte_unmap_unlock(vmf->pte, vmf->ptl); 2629fe82221fSJan Kara vmf->flags |= FAULT_FLAG_MKWRITE; 263011bac800SDave Jiang ret = vma->vm_ops->pfn_mkwrite(vmf); 26312f89dc12SJan Kara if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)) 2632dd906184SBoaz Harrosh return ret; 263366a6197cSJan Kara return finish_mkwrite_fault(vmf); 2634dd906184SBoaz Harrosh } 2635997dd98dSJan Kara wp_page_reuse(vmf); 2636997dd98dSJan Kara return VM_FAULT_WRITE; 2637dd906184SBoaz Harrosh } 2638dd906184SBoaz Harrosh 2639a41b70d6SJan Kara static int wp_page_shared(struct vm_fault *vmf) 264082b0f8c3SJan Kara __releases(vmf->ptl) 264193e478d4SShachar Raindel { 264282b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 264393e478d4SShachar Raindel 2644a41b70d6SJan Kara get_page(vmf->page); 264593e478d4SShachar Raindel 264693e478d4SShachar Raindel if (vma->vm_ops && vma->vm_ops->page_mkwrite) { 264793e478d4SShachar Raindel int tmp; 264893e478d4SShachar Raindel 264982b0f8c3SJan Kara pte_unmap_unlock(vmf->pte, vmf->ptl); 265038b8cb7fSJan Kara tmp = do_page_mkwrite(vmf); 265193e478d4SShachar Raindel if (unlikely(!tmp || (tmp & 265293e478d4SShachar Raindel (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { 2653a41b70d6SJan Kara put_page(vmf->page); 265493e478d4SShachar Raindel return tmp; 265593e478d4SShachar Raindel } 265666a6197cSJan Kara tmp = finish_mkwrite_fault(vmf); 2657a19e2553SJan Kara if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) { 2658a41b70d6SJan Kara unlock_page(vmf->page); 2659a41b70d6SJan Kara put_page(vmf->page); 266066a6197cSJan Kara return tmp; 266193e478d4SShachar Raindel } 266266a6197cSJan Kara } else { 2663997dd98dSJan Kara wp_page_reuse(vmf); 2664997dd98dSJan Kara lock_page(vmf->page); 266593e478d4SShachar Raindel } 2666997dd98dSJan Kara fault_dirty_shared_page(vma, vmf->page); 2667997dd98dSJan Kara put_page(vmf->page); 266893e478d4SShachar Raindel 2669997dd98dSJan Kara return VM_FAULT_WRITE; 267093e478d4SShachar Raindel } 267193e478d4SShachar Raindel 26722f38ab2cSShachar Raindel /* 26731da177e4SLinus Torvalds * This routine handles present pages, when users try to write 26741da177e4SLinus Torvalds * to a shared page. It is done by copying the page to a new address 26751da177e4SLinus Torvalds * and decrementing the shared-page counter for the old page. 26761da177e4SLinus Torvalds * 26771da177e4SLinus Torvalds * Note that this routine assumes that the protection checks have been 26781da177e4SLinus Torvalds * done by the caller (the low-level page fault routine in most cases). 26791da177e4SLinus Torvalds * Thus we can safely just mark it writable once we've done any necessary 26801da177e4SLinus Torvalds * COW. 26811da177e4SLinus Torvalds * 26821da177e4SLinus Torvalds * We also mark the page dirty at this point even though the page will 26831da177e4SLinus Torvalds * change only once the write actually happens. This avoids a few races, 26841da177e4SLinus Torvalds * and potentially makes it more efficient. 26851da177e4SLinus Torvalds * 26868f4e2101SHugh Dickins * We enter with non-exclusive mmap_sem (to exclude vma changes, 26878f4e2101SHugh Dickins * but allow concurrent faults), with pte both mapped and locked. 26888f4e2101SHugh Dickins * We return with mmap_sem still held, but pte unmapped and unlocked. 26891da177e4SLinus Torvalds */ 26902994302bSJan Kara static int do_wp_page(struct vm_fault *vmf) 269182b0f8c3SJan Kara __releases(vmf->ptl) 26921da177e4SLinus Torvalds { 269382b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 26941da177e4SLinus Torvalds 2695a41b70d6SJan Kara vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte); 2696a41b70d6SJan Kara if (!vmf->page) { 2697251b97f5SPeter Zijlstra /* 269864e45507SPeter Feiner * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a 269964e45507SPeter Feiner * VM_PFNMAP VMA. 2700251b97f5SPeter Zijlstra * 2701251b97f5SPeter Zijlstra * We should not cow pages in a shared writeable mapping. 2702dd906184SBoaz Harrosh * Just mark the pages writable and/or call ops->pfn_mkwrite. 2703251b97f5SPeter Zijlstra */ 2704251b97f5SPeter Zijlstra if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) == 2705251b97f5SPeter Zijlstra (VM_WRITE|VM_SHARED)) 27062994302bSJan Kara return wp_pfn_shared(vmf); 27072f38ab2cSShachar Raindel 270882b0f8c3SJan Kara pte_unmap_unlock(vmf->pte, vmf->ptl); 2709a41b70d6SJan Kara return wp_page_copy(vmf); 2710251b97f5SPeter Zijlstra } 27111da177e4SLinus Torvalds 2712d08b3851SPeter Zijlstra /* 2713ee6a6457SPeter Zijlstra * Take out anonymous pages first, anonymous shared vmas are 2714ee6a6457SPeter Zijlstra * not dirty accountable. 2715d08b3851SPeter Zijlstra */ 2716a41b70d6SJan Kara if (PageAnon(vmf->page) && !PageKsm(vmf->page)) { 2717ba3c4ce6SHuang Ying int total_map_swapcount; 2718a41b70d6SJan Kara if (!trylock_page(vmf->page)) { 2719a41b70d6SJan Kara get_page(vmf->page); 272082b0f8c3SJan Kara pte_unmap_unlock(vmf->pte, vmf->ptl); 2721a41b70d6SJan Kara lock_page(vmf->page); 272282b0f8c3SJan Kara vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, 272382b0f8c3SJan Kara vmf->address, &vmf->ptl); 27242994302bSJan Kara if (!pte_same(*vmf->pte, vmf->orig_pte)) { 2725a41b70d6SJan Kara unlock_page(vmf->page); 272682b0f8c3SJan Kara pte_unmap_unlock(vmf->pte, vmf->ptl); 2727a41b70d6SJan Kara put_page(vmf->page); 272828766805SShachar Raindel return 0; 2729ab967d86SHugh Dickins } 2730a41b70d6SJan Kara put_page(vmf->page); 2731ab967d86SHugh Dickins } 2732ba3c4ce6SHuang Ying if (reuse_swap_page(vmf->page, &total_map_swapcount)) { 2733ba3c4ce6SHuang Ying if (total_map_swapcount == 1) { 2734c44b6743SRik van Riel /* 27356d0a07edSAndrea Arcangeli * The page is all ours. Move it to 27366d0a07edSAndrea Arcangeli * our anon_vma so the rmap code will 27376d0a07edSAndrea Arcangeli * not search our parent or siblings. 27386d0a07edSAndrea Arcangeli * Protected against the rmap code by 27396d0a07edSAndrea Arcangeli * the page lock. 2740c44b6743SRik van Riel */ 2741a41b70d6SJan Kara page_move_anon_rmap(vmf->page, vma); 27426d0a07edSAndrea Arcangeli } 2743a41b70d6SJan Kara unlock_page(vmf->page); 2744997dd98dSJan Kara wp_page_reuse(vmf); 2745997dd98dSJan Kara return VM_FAULT_WRITE; 2746b009c024SMichel Lespinasse } 2747a41b70d6SJan Kara unlock_page(vmf->page); 2748ee6a6457SPeter Zijlstra } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == 2749d08b3851SPeter Zijlstra (VM_WRITE|VM_SHARED))) { 2750a41b70d6SJan Kara return wp_page_shared(vmf); 27511da177e4SLinus Torvalds } 27521da177e4SLinus Torvalds 27531da177e4SLinus Torvalds /* 27541da177e4SLinus Torvalds * Ok, we need to copy. Oh, well.. 27551da177e4SLinus Torvalds */ 2756a41b70d6SJan Kara get_page(vmf->page); 275728766805SShachar Raindel 275882b0f8c3SJan Kara pte_unmap_unlock(vmf->pte, vmf->ptl); 2759a41b70d6SJan Kara return wp_page_copy(vmf); 27601da177e4SLinus Torvalds } 27611da177e4SLinus Torvalds 276297a89413SPeter Zijlstra static void unmap_mapping_range_vma(struct vm_area_struct *vma, 27631da177e4SLinus Torvalds unsigned long start_addr, unsigned long end_addr, 27641da177e4SLinus Torvalds struct zap_details *details) 27651da177e4SLinus Torvalds { 2766f5cc4eefSAl Viro zap_page_range_single(vma, start_addr, end_addr - start_addr, details); 27671da177e4SLinus Torvalds } 27681da177e4SLinus Torvalds 2769f808c13fSDavidlohr Bueso static inline void unmap_mapping_range_tree(struct rb_root_cached *root, 27701da177e4SLinus Torvalds struct zap_details *details) 27711da177e4SLinus Torvalds { 27721da177e4SLinus Torvalds struct vm_area_struct *vma; 27731da177e4SLinus Torvalds pgoff_t vba, vea, zba, zea; 27741da177e4SLinus Torvalds 27756b2dbba8SMichel Lespinasse vma_interval_tree_foreach(vma, root, 27761da177e4SLinus Torvalds details->first_index, details->last_index) { 27771da177e4SLinus Torvalds 27781da177e4SLinus Torvalds vba = vma->vm_pgoff; 2779d6e93217SLibin vea = vba + vma_pages(vma) - 1; 27801da177e4SLinus Torvalds zba = details->first_index; 27811da177e4SLinus Torvalds if (zba < vba) 27821da177e4SLinus Torvalds zba = vba; 27831da177e4SLinus Torvalds zea = details->last_index; 27841da177e4SLinus Torvalds if (zea > vea) 27851da177e4SLinus Torvalds zea = vea; 27861da177e4SLinus Torvalds 278797a89413SPeter Zijlstra unmap_mapping_range_vma(vma, 27881da177e4SLinus Torvalds ((zba - vba) << PAGE_SHIFT) + vma->vm_start, 27891da177e4SLinus Torvalds ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start, 279097a89413SPeter Zijlstra details); 27911da177e4SLinus Torvalds } 27921da177e4SLinus Torvalds } 27931da177e4SLinus Torvalds 27941da177e4SLinus Torvalds /** 27958a5f14a2SKirill A. Shutemov * unmap_mapping_range - unmap the portion of all mmaps in the specified 27968a5f14a2SKirill A. Shutemov * address_space corresponding to the specified page range in the underlying 27978a5f14a2SKirill A. Shutemov * file. 27988a5f14a2SKirill A. Shutemov * 27993d41088fSMartin Waitz * @mapping: the address space containing mmaps to be unmapped. 28001da177e4SLinus Torvalds * @holebegin: byte in first page to unmap, relative to the start of 28011da177e4SLinus Torvalds * the underlying file. This will be rounded down to a PAGE_SIZE 280225d9e2d1Snpiggin@suse.de * boundary. Note that this is different from truncate_pagecache(), which 28031da177e4SLinus Torvalds * must keep the partial page. In contrast, we must get rid of 28041da177e4SLinus Torvalds * partial pages. 28051da177e4SLinus Torvalds * @holelen: size of prospective hole in bytes. This will be rounded 28061da177e4SLinus Torvalds * up to a PAGE_SIZE boundary. A holelen of zero truncates to the 28071da177e4SLinus Torvalds * end of the file. 28081da177e4SLinus Torvalds * @even_cows: 1 when truncating a file, unmap even private COWed pages; 28091da177e4SLinus Torvalds * but 0 when invalidating pagecache, don't throw away private data. 28101da177e4SLinus Torvalds */ 28111da177e4SLinus Torvalds void unmap_mapping_range(struct address_space *mapping, 28121da177e4SLinus Torvalds loff_t const holebegin, loff_t const holelen, int even_cows) 28131da177e4SLinus Torvalds { 2814aac45363SMichal Hocko struct zap_details details = { }; 28151da177e4SLinus Torvalds pgoff_t hba = holebegin >> PAGE_SHIFT; 28161da177e4SLinus Torvalds pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; 28171da177e4SLinus Torvalds 28181da177e4SLinus Torvalds /* Check for overflow. */ 28191da177e4SLinus Torvalds if (sizeof(holelen) > sizeof(hlen)) { 28201da177e4SLinus Torvalds long long holeend = 28211da177e4SLinus Torvalds (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; 28221da177e4SLinus Torvalds if (holeend & ~(long long)ULONG_MAX) 28231da177e4SLinus Torvalds hlen = ULONG_MAX - hba + 1; 28241da177e4SLinus Torvalds } 28251da177e4SLinus Torvalds 28261da177e4SLinus Torvalds details.check_mapping = even_cows ? NULL : mapping; 28271da177e4SLinus Torvalds details.first_index = hba; 28281da177e4SLinus Torvalds details.last_index = hba + hlen - 1; 28291da177e4SLinus Torvalds if (details.last_index < details.first_index) 28301da177e4SLinus Torvalds details.last_index = ULONG_MAX; 28311da177e4SLinus Torvalds 283248ec833bSKirill A. Shutemov i_mmap_lock_write(mapping); 2833f808c13fSDavidlohr Bueso if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))) 28341da177e4SLinus Torvalds unmap_mapping_range_tree(&mapping->i_mmap, &details); 283548ec833bSKirill A. Shutemov i_mmap_unlock_write(mapping); 28361da177e4SLinus Torvalds } 28371da177e4SLinus Torvalds EXPORT_SYMBOL(unmap_mapping_range); 28381da177e4SLinus Torvalds 28391da177e4SLinus Torvalds /* 28408f4e2101SHugh Dickins * We enter with non-exclusive mmap_sem (to exclude vma changes, 28418f4e2101SHugh Dickins * but allow concurrent faults), and pte mapped but not yet locked. 28429a95f3cfSPaul Cassella * We return with pte unmapped and unlocked. 28439a95f3cfSPaul Cassella * 28449a95f3cfSPaul Cassella * We return with the mmap_sem locked or unlocked in the same cases 28459a95f3cfSPaul Cassella * as does filemap_fault(). 28461da177e4SLinus Torvalds */ 28472994302bSJan Kara int do_swap_page(struct vm_fault *vmf) 28481da177e4SLinus Torvalds { 284982b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 28500bcac06fSMinchan Kim struct page *page = NULL, *swapcache = NULL; 285100501b53SJohannes Weiner struct mem_cgroup *memcg; 2852ec560175SHuang Ying struct vma_swap_readahead swap_ra; 285365500d23SHugh Dickins swp_entry_t entry; 28541da177e4SLinus Torvalds pte_t pte; 2855d065bd81SMichel Lespinasse int locked; 2856ad8c2ee8SRik van Riel int exclusive = 0; 285783c54070SNick Piggin int ret = 0; 2858ec560175SHuang Ying bool vma_readahead = swap_use_vma_readahead(); 28591da177e4SLinus Torvalds 2860ec560175SHuang Ying if (vma_readahead) 2861ec560175SHuang Ying page = swap_readahead_detect(vmf, &swap_ra); 2862ec560175SHuang Ying if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte)) { 2863ec560175SHuang Ying if (page) 2864ec560175SHuang Ying put_page(page); 28658f4e2101SHugh Dickins goto out; 2866ec560175SHuang Ying } 286765500d23SHugh Dickins 28682994302bSJan Kara entry = pte_to_swp_entry(vmf->orig_pte); 2869d1737fdbSAndi Kleen if (unlikely(non_swap_entry(entry))) { 28700697212aSChristoph Lameter if (is_migration_entry(entry)) { 287182b0f8c3SJan Kara migration_entry_wait(vma->vm_mm, vmf->pmd, 287282b0f8c3SJan Kara vmf->address); 28735042db43SJérôme Glisse } else if (is_device_private_entry(entry)) { 28745042db43SJérôme Glisse /* 28755042db43SJérôme Glisse * For un-addressable device memory we call the pgmap 28765042db43SJérôme Glisse * fault handler callback. The callback must migrate 28775042db43SJérôme Glisse * the page back to some CPU accessible page. 28785042db43SJérôme Glisse */ 28795042db43SJérôme Glisse ret = device_private_entry_fault(vma, vmf->address, entry, 28805042db43SJérôme Glisse vmf->flags, vmf->pmd); 2881d1737fdbSAndi Kleen } else if (is_hwpoison_entry(entry)) { 2882d1737fdbSAndi Kleen ret = VM_FAULT_HWPOISON; 2883d1737fdbSAndi Kleen } else { 28842994302bSJan Kara print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL); 2885d99be1a8SHugh Dickins ret = VM_FAULT_SIGBUS; 2886d1737fdbSAndi Kleen } 28870697212aSChristoph Lameter goto out; 28880697212aSChristoph Lameter } 28890bcac06fSMinchan Kim 28900bcac06fSMinchan Kim 28910ff92245SShailabh Nagar delayacct_set_flag(DELAYACCT_PF_SWAPIN); 2892ec560175SHuang Ying if (!page) 2893ec560175SHuang Ying page = lookup_swap_cache(entry, vma_readahead ? vma : NULL, 289482b0f8c3SJan Kara vmf->address); 28951da177e4SLinus Torvalds if (!page) { 28960bcac06fSMinchan Kim struct swap_info_struct *si = swp_swap_info(entry); 28970bcac06fSMinchan Kim 2898aa8d22a1SMinchan Kim if (si->flags & SWP_SYNCHRONOUS_IO && 2899aa8d22a1SMinchan Kim __swap_count(si, entry) == 1) { 29000bcac06fSMinchan Kim /* skip swapcache */ 29010bcac06fSMinchan Kim page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address); 29020bcac06fSMinchan Kim if (page) { 29030bcac06fSMinchan Kim __SetPageLocked(page); 29040bcac06fSMinchan Kim __SetPageSwapBacked(page); 29050bcac06fSMinchan Kim set_page_private(page, entry.val); 29060bcac06fSMinchan Kim lru_cache_add_anon(page); 29070bcac06fSMinchan Kim swap_readpage(page, true); 29080bcac06fSMinchan Kim } 2909aa8d22a1SMinchan Kim } else { 2910aa8d22a1SMinchan Kim if (vma_readahead) 2911aa8d22a1SMinchan Kim page = do_swap_page_readahead(entry, 2912aa8d22a1SMinchan Kim GFP_HIGHUSER_MOVABLE, vmf, &swap_ra); 2913aa8d22a1SMinchan Kim else 2914aa8d22a1SMinchan Kim page = swapin_readahead(entry, 2915aa8d22a1SMinchan Kim GFP_HIGHUSER_MOVABLE, vma, vmf->address); 2916aa8d22a1SMinchan Kim swapcache = page; 29170bcac06fSMinchan Kim } 29180bcac06fSMinchan Kim 2919ec560175SHuang Ying if (!page) { 29201da177e4SLinus Torvalds /* 29218f4e2101SHugh Dickins * Back out if somebody else faulted in this pte 29228f4e2101SHugh Dickins * while we released the pte lock. 29231da177e4SLinus Torvalds */ 292482b0f8c3SJan Kara vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, 292582b0f8c3SJan Kara vmf->address, &vmf->ptl); 29262994302bSJan Kara if (likely(pte_same(*vmf->pte, vmf->orig_pte))) 29271da177e4SLinus Torvalds ret = VM_FAULT_OOM; 29280ff92245SShailabh Nagar delayacct_clear_flag(DELAYACCT_PF_SWAPIN); 292965500d23SHugh Dickins goto unlock; 29301da177e4SLinus Torvalds } 29311da177e4SLinus Torvalds 29321da177e4SLinus Torvalds /* Had to read the page from swap area: Major fault */ 29331da177e4SLinus Torvalds ret = VM_FAULT_MAJOR; 2934f8891e5eSChristoph Lameter count_vm_event(PGMAJFAULT); 29352262185cSRoman Gushchin count_memcg_event_mm(vma->vm_mm, PGMAJFAULT); 2936d1737fdbSAndi Kleen } else if (PageHWPoison(page)) { 293771f72525SWu Fengguang /* 293871f72525SWu Fengguang * hwpoisoned dirty swapcache pages are kept for killing 293971f72525SWu Fengguang * owner processes (which may be unknown at hwpoison time) 294071f72525SWu Fengguang */ 2941d1737fdbSAndi Kleen ret = VM_FAULT_HWPOISON; 2942d1737fdbSAndi Kleen delayacct_clear_flag(DELAYACCT_PF_SWAPIN); 294356f31801SHugh Dickins swapcache = page; 29444779cb31SAndi Kleen goto out_release; 29451da177e4SLinus Torvalds } 29461da177e4SLinus Torvalds 294782b0f8c3SJan Kara locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags); 2948e709ffd6SRik van Riel 294920a1022dSBalbir Singh delayacct_clear_flag(DELAYACCT_PF_SWAPIN); 2950d065bd81SMichel Lespinasse if (!locked) { 2951d065bd81SMichel Lespinasse ret |= VM_FAULT_RETRY; 2952d065bd81SMichel Lespinasse goto out_release; 2953d065bd81SMichel Lespinasse } 29541da177e4SLinus Torvalds 29554969c119SAndrea Arcangeli /* 295631c4a3d3SHugh Dickins * Make sure try_to_free_swap or reuse_swap_page or swapoff did not 295731c4a3d3SHugh Dickins * release the swapcache from under us. The page pin, and pte_same 295831c4a3d3SHugh Dickins * test below, are not enough to exclude that. Even if it is still 295931c4a3d3SHugh Dickins * swapcache, we need to check that the page's swap has not changed. 29604969c119SAndrea Arcangeli */ 29610bcac06fSMinchan Kim if (unlikely((!PageSwapCache(page) || 29620bcac06fSMinchan Kim page_private(page) != entry.val)) && swapcache) 29634969c119SAndrea Arcangeli goto out_page; 29644969c119SAndrea Arcangeli 296582b0f8c3SJan Kara page = ksm_might_need_to_copy(page, vma, vmf->address); 29664969c119SAndrea Arcangeli if (unlikely(!page)) { 29675ad64688SHugh Dickins ret = VM_FAULT_OOM; 29684969c119SAndrea Arcangeli page = swapcache; 29694969c119SAndrea Arcangeli goto out_page; 29704969c119SAndrea Arcangeli } 29715ad64688SHugh Dickins 2972bae473a4SKirill A. Shutemov if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, 2973bae473a4SKirill A. Shutemov &memcg, false)) { 2974073e587eSKAMEZAWA Hiroyuki ret = VM_FAULT_OOM; 2975bc43f75cSJohannes Weiner goto out_page; 2976073e587eSKAMEZAWA Hiroyuki } 2977073e587eSKAMEZAWA Hiroyuki 29781da177e4SLinus Torvalds /* 29798f4e2101SHugh Dickins * Back out if somebody else already faulted in this pte. 29801da177e4SLinus Torvalds */ 298182b0f8c3SJan Kara vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, 298282b0f8c3SJan Kara &vmf->ptl); 29832994302bSJan Kara if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) 2984b8107480SKirill Korotaev goto out_nomap; 2985b8107480SKirill Korotaev 2986b8107480SKirill Korotaev if (unlikely(!PageUptodate(page))) { 2987b8107480SKirill Korotaev ret = VM_FAULT_SIGBUS; 2988b8107480SKirill Korotaev goto out_nomap; 29891da177e4SLinus Torvalds } 29901da177e4SLinus Torvalds 29918c7c6e34SKAMEZAWA Hiroyuki /* 29928c7c6e34SKAMEZAWA Hiroyuki * The page isn't present yet, go ahead with the fault. 29938c7c6e34SKAMEZAWA Hiroyuki * 29948c7c6e34SKAMEZAWA Hiroyuki * Be careful about the sequence of operations here. 29958c7c6e34SKAMEZAWA Hiroyuki * To get its accounting right, reuse_swap_page() must be called 29968c7c6e34SKAMEZAWA Hiroyuki * while the page is counted on swap but not yet in mapcount i.e. 29978c7c6e34SKAMEZAWA Hiroyuki * before page_add_anon_rmap() and swap_free(); try_to_free_swap() 29988c7c6e34SKAMEZAWA Hiroyuki * must be called after the swap_free(), or it will never succeed. 29998c7c6e34SKAMEZAWA Hiroyuki */ 30001da177e4SLinus Torvalds 3001bae473a4SKirill A. Shutemov inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); 3002bae473a4SKirill A. Shutemov dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS); 30031da177e4SLinus Torvalds pte = mk_pte(page, vma->vm_page_prot); 300482b0f8c3SJan Kara if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) { 30051da177e4SLinus Torvalds pte = maybe_mkwrite(pte_mkdirty(pte), vma); 300682b0f8c3SJan Kara vmf->flags &= ~FAULT_FLAG_WRITE; 30079a5b489bSAndrea Arcangeli ret |= VM_FAULT_WRITE; 3008d281ee61SKirill A. Shutemov exclusive = RMAP_EXCLUSIVE; 30091da177e4SLinus Torvalds } 30101da177e4SLinus Torvalds flush_icache_page(vma, page); 30112994302bSJan Kara if (pte_swp_soft_dirty(vmf->orig_pte)) 3012179ef71cSCyrill Gorcunov pte = pte_mksoft_dirty(pte); 301382b0f8c3SJan Kara set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); 30142994302bSJan Kara vmf->orig_pte = pte; 30150bcac06fSMinchan Kim 30160bcac06fSMinchan Kim /* ksm created a completely new copy */ 30170bcac06fSMinchan Kim if (unlikely(page != swapcache && swapcache)) { 301882b0f8c3SJan Kara page_add_new_anon_rmap(page, vma, vmf->address, false); 3019f627c2f5SKirill A. Shutemov mem_cgroup_commit_charge(page, memcg, false, false); 302000501b53SJohannes Weiner lru_cache_add_active_or_unevictable(page, vma); 30210bcac06fSMinchan Kim } else { 30220bcac06fSMinchan Kim do_page_add_anon_rmap(page, vma, vmf->address, exclusive); 30230bcac06fSMinchan Kim mem_cgroup_commit_charge(page, memcg, true, false); 30240bcac06fSMinchan Kim activate_page(page); 302500501b53SJohannes Weiner } 30261da177e4SLinus Torvalds 3027c475a8abSHugh Dickins swap_free(entry); 30285ccc5abaSVladimir Davydov if (mem_cgroup_swap_full(page) || 30295ccc5abaSVladimir Davydov (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) 3030a2c43eedSHugh Dickins try_to_free_swap(page); 3031c475a8abSHugh Dickins unlock_page(page); 30320bcac06fSMinchan Kim if (page != swapcache && swapcache) { 30334969c119SAndrea Arcangeli /* 30344969c119SAndrea Arcangeli * Hold the lock to avoid the swap entry to be reused 30354969c119SAndrea Arcangeli * until we take the PT lock for the pte_same() check 30364969c119SAndrea Arcangeli * (to avoid false positives from pte_same). For 30374969c119SAndrea Arcangeli * further safety release the lock after the swap_free 30384969c119SAndrea Arcangeli * so that the swap count won't change under a 30394969c119SAndrea Arcangeli * parallel locked swapcache. 30404969c119SAndrea Arcangeli */ 30414969c119SAndrea Arcangeli unlock_page(swapcache); 304209cbfeafSKirill A. Shutemov put_page(swapcache); 30434969c119SAndrea Arcangeli } 3044c475a8abSHugh Dickins 304582b0f8c3SJan Kara if (vmf->flags & FAULT_FLAG_WRITE) { 30462994302bSJan Kara ret |= do_wp_page(vmf); 304761469f1dSHugh Dickins if (ret & VM_FAULT_ERROR) 304861469f1dSHugh Dickins ret &= VM_FAULT_ERROR; 30491da177e4SLinus Torvalds goto out; 30501da177e4SLinus Torvalds } 30511da177e4SLinus Torvalds 30521da177e4SLinus Torvalds /* No need to invalidate - it was non-present before */ 305382b0f8c3SJan Kara update_mmu_cache(vma, vmf->address, vmf->pte); 305465500d23SHugh Dickins unlock: 305582b0f8c3SJan Kara pte_unmap_unlock(vmf->pte, vmf->ptl); 30561da177e4SLinus Torvalds out: 30571da177e4SLinus Torvalds return ret; 3058b8107480SKirill Korotaev out_nomap: 3059f627c2f5SKirill A. Shutemov mem_cgroup_cancel_charge(page, memcg, false); 306082b0f8c3SJan Kara pte_unmap_unlock(vmf->pte, vmf->ptl); 3061bc43f75cSJohannes Weiner out_page: 3062b8107480SKirill Korotaev unlock_page(page); 30634779cb31SAndi Kleen out_release: 306409cbfeafSKirill A. Shutemov put_page(page); 30650bcac06fSMinchan Kim if (page != swapcache && swapcache) { 30664969c119SAndrea Arcangeli unlock_page(swapcache); 306709cbfeafSKirill A. Shutemov put_page(swapcache); 30684969c119SAndrea Arcangeli } 306965500d23SHugh Dickins return ret; 30701da177e4SLinus Torvalds } 30711da177e4SLinus Torvalds 30721da177e4SLinus Torvalds /* 30738f4e2101SHugh Dickins * We enter with non-exclusive mmap_sem (to exclude vma changes, 30748f4e2101SHugh Dickins * but allow concurrent faults), and pte mapped but not yet locked. 30758f4e2101SHugh Dickins * We return with mmap_sem still held, but pte unmapped and unlocked. 30761da177e4SLinus Torvalds */ 307782b0f8c3SJan Kara static int do_anonymous_page(struct vm_fault *vmf) 30781da177e4SLinus Torvalds { 307982b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 308000501b53SJohannes Weiner struct mem_cgroup *memcg; 30818f4e2101SHugh Dickins struct page *page; 30826b31d595SMichal Hocko int ret = 0; 30831da177e4SLinus Torvalds pte_t entry; 30841da177e4SLinus Torvalds 30856b7339f4SKirill A. Shutemov /* File mapping without ->vm_ops ? */ 30866b7339f4SKirill A. Shutemov if (vma->vm_flags & VM_SHARED) 30876b7339f4SKirill A. Shutemov return VM_FAULT_SIGBUS; 30886b7339f4SKirill A. Shutemov 30897267ec00SKirill A. Shutemov /* 30907267ec00SKirill A. Shutemov * Use pte_alloc() instead of pte_alloc_map(). We can't run 30917267ec00SKirill A. Shutemov * pte_offset_map() on pmds where a huge pmd might be created 30927267ec00SKirill A. Shutemov * from a different thread. 30937267ec00SKirill A. Shutemov * 30947267ec00SKirill A. Shutemov * pte_alloc_map() is safe to use under down_write(mmap_sem) or when 30957267ec00SKirill A. Shutemov * parallel threads are excluded by other means. 30967267ec00SKirill A. Shutemov * 30977267ec00SKirill A. Shutemov * Here we only have down_read(mmap_sem). 30987267ec00SKirill A. Shutemov */ 309982b0f8c3SJan Kara if (pte_alloc(vma->vm_mm, vmf->pmd, vmf->address)) 31007267ec00SKirill A. Shutemov return VM_FAULT_OOM; 31017267ec00SKirill A. Shutemov 31027267ec00SKirill A. Shutemov /* See the comment in pte_alloc_one_map() */ 310382b0f8c3SJan Kara if (unlikely(pmd_trans_unstable(vmf->pmd))) 31047267ec00SKirill A. Shutemov return 0; 31057267ec00SKirill A. Shutemov 310611ac5524SLinus Torvalds /* Use the zero-page for reads */ 310782b0f8c3SJan Kara if (!(vmf->flags & FAULT_FLAG_WRITE) && 3108bae473a4SKirill A. Shutemov !mm_forbids_zeropage(vma->vm_mm)) { 310982b0f8c3SJan Kara entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address), 311062eede62SHugh Dickins vma->vm_page_prot)); 311182b0f8c3SJan Kara vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, 311282b0f8c3SJan Kara vmf->address, &vmf->ptl); 311382b0f8c3SJan Kara if (!pte_none(*vmf->pte)) 3114a13ea5b7SHugh Dickins goto unlock; 31156b31d595SMichal Hocko ret = check_stable_address_space(vma->vm_mm); 31166b31d595SMichal Hocko if (ret) 31176b31d595SMichal Hocko goto unlock; 31186b251fc9SAndrea Arcangeli /* Deliver the page fault to userland, check inside PT lock */ 31196b251fc9SAndrea Arcangeli if (userfaultfd_missing(vma)) { 312082b0f8c3SJan Kara pte_unmap_unlock(vmf->pte, vmf->ptl); 312182b0f8c3SJan Kara return handle_userfault(vmf, VM_UFFD_MISSING); 31226b251fc9SAndrea Arcangeli } 3123a13ea5b7SHugh Dickins goto setpte; 3124a13ea5b7SHugh Dickins } 3125a13ea5b7SHugh Dickins 31261da177e4SLinus Torvalds /* Allocate our own private page. */ 31271da177e4SLinus Torvalds if (unlikely(anon_vma_prepare(vma))) 312865500d23SHugh Dickins goto oom; 312982b0f8c3SJan Kara page = alloc_zeroed_user_highpage_movable(vma, vmf->address); 31301da177e4SLinus Torvalds if (!page) 313165500d23SHugh Dickins goto oom; 3132eb3c24f3SMel Gorman 3133bae473a4SKirill A. Shutemov if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg, false)) 3134eb3c24f3SMel Gorman goto oom_free_page; 3135eb3c24f3SMel Gorman 313652f37629SMinchan Kim /* 313752f37629SMinchan Kim * The memory barrier inside __SetPageUptodate makes sure that 313852f37629SMinchan Kim * preceeding stores to the page contents become visible before 313952f37629SMinchan Kim * the set_pte_at() write. 314052f37629SMinchan Kim */ 31410ed361deSNick Piggin __SetPageUptodate(page); 31421da177e4SLinus Torvalds 314365500d23SHugh Dickins entry = mk_pte(page, vma->vm_page_prot); 31441ac0cb5dSHugh Dickins if (vma->vm_flags & VM_WRITE) 31451ac0cb5dSHugh Dickins entry = pte_mkwrite(pte_mkdirty(entry)); 31468f4e2101SHugh Dickins 314782b0f8c3SJan Kara vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, 314882b0f8c3SJan Kara &vmf->ptl); 314982b0f8c3SJan Kara if (!pte_none(*vmf->pte)) 31508f4e2101SHugh Dickins goto release; 31519ba69294SHugh Dickins 31526b31d595SMichal Hocko ret = check_stable_address_space(vma->vm_mm); 31536b31d595SMichal Hocko if (ret) 31546b31d595SMichal Hocko goto release; 31556b31d595SMichal Hocko 31566b251fc9SAndrea Arcangeli /* Deliver the page fault to userland, check inside PT lock */ 31576b251fc9SAndrea Arcangeli if (userfaultfd_missing(vma)) { 315882b0f8c3SJan Kara pte_unmap_unlock(vmf->pte, vmf->ptl); 3159f627c2f5SKirill A. Shutemov mem_cgroup_cancel_charge(page, memcg, false); 316009cbfeafSKirill A. Shutemov put_page(page); 316182b0f8c3SJan Kara return handle_userfault(vmf, VM_UFFD_MISSING); 31626b251fc9SAndrea Arcangeli } 31636b251fc9SAndrea Arcangeli 3164bae473a4SKirill A. Shutemov inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); 316582b0f8c3SJan Kara page_add_new_anon_rmap(page, vma, vmf->address, false); 3166f627c2f5SKirill A. Shutemov mem_cgroup_commit_charge(page, memcg, false, false); 316700501b53SJohannes Weiner lru_cache_add_active_or_unevictable(page, vma); 3168a13ea5b7SHugh Dickins setpte: 316982b0f8c3SJan Kara set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); 31701da177e4SLinus Torvalds 31711da177e4SLinus Torvalds /* No need to invalidate - it was non-present before */ 317282b0f8c3SJan Kara update_mmu_cache(vma, vmf->address, vmf->pte); 317365500d23SHugh Dickins unlock: 317482b0f8c3SJan Kara pte_unmap_unlock(vmf->pte, vmf->ptl); 31756b31d595SMichal Hocko return ret; 31768f4e2101SHugh Dickins release: 3177f627c2f5SKirill A. Shutemov mem_cgroup_cancel_charge(page, memcg, false); 317809cbfeafSKirill A. Shutemov put_page(page); 31798f4e2101SHugh Dickins goto unlock; 31808a9f3ccdSBalbir Singh oom_free_page: 318109cbfeafSKirill A. Shutemov put_page(page); 318265500d23SHugh Dickins oom: 31831da177e4SLinus Torvalds return VM_FAULT_OOM; 31841da177e4SLinus Torvalds } 31851da177e4SLinus Torvalds 31869a95f3cfSPaul Cassella /* 31879a95f3cfSPaul Cassella * The mmap_sem must have been held on entry, and may have been 31889a95f3cfSPaul Cassella * released depending on flags and vma->vm_ops->fault() return value. 31899a95f3cfSPaul Cassella * See filemap_fault() and __lock_page_retry(). 31909a95f3cfSPaul Cassella */ 3191936ca80dSJan Kara static int __do_fault(struct vm_fault *vmf) 31927eae74afSKirill A. Shutemov { 319382b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 31947eae74afSKirill A. Shutemov int ret; 31957eae74afSKirill A. Shutemov 319611bac800SDave Jiang ret = vma->vm_ops->fault(vmf); 31973917048dSJan Kara if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY | 3198b1aa812bSJan Kara VM_FAULT_DONE_COW))) 31997eae74afSKirill A. Shutemov return ret; 32007eae74afSKirill A. Shutemov 3201667240e0SJan Kara if (unlikely(PageHWPoison(vmf->page))) { 32027eae74afSKirill A. Shutemov if (ret & VM_FAULT_LOCKED) 3203667240e0SJan Kara unlock_page(vmf->page); 3204667240e0SJan Kara put_page(vmf->page); 3205936ca80dSJan Kara vmf->page = NULL; 32067eae74afSKirill A. Shutemov return VM_FAULT_HWPOISON; 32077eae74afSKirill A. Shutemov } 32087eae74afSKirill A. Shutemov 32097eae74afSKirill A. Shutemov if (unlikely(!(ret & VM_FAULT_LOCKED))) 3210667240e0SJan Kara lock_page(vmf->page); 32117eae74afSKirill A. Shutemov else 3212667240e0SJan Kara VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page); 32137eae74afSKirill A. Shutemov 32147eae74afSKirill A. Shutemov return ret; 32157eae74afSKirill A. Shutemov } 32167eae74afSKirill A. Shutemov 3217d0f0931dSRoss Zwisler /* 3218d0f0931dSRoss Zwisler * The ordering of these checks is important for pmds with _PAGE_DEVMAP set. 3219d0f0931dSRoss Zwisler * If we check pmd_trans_unstable() first we will trip the bad_pmd() check 3220d0f0931dSRoss Zwisler * inside of pmd_none_or_trans_huge_or_clear_bad(). This will end up correctly 3221d0f0931dSRoss Zwisler * returning 1 but not before it spams dmesg with the pmd_clear_bad() output. 3222d0f0931dSRoss Zwisler */ 3223d0f0931dSRoss Zwisler static int pmd_devmap_trans_unstable(pmd_t *pmd) 3224d0f0931dSRoss Zwisler { 3225d0f0931dSRoss Zwisler return pmd_devmap(*pmd) || pmd_trans_unstable(pmd); 3226d0f0931dSRoss Zwisler } 3227d0f0931dSRoss Zwisler 322882b0f8c3SJan Kara static int pte_alloc_one_map(struct vm_fault *vmf) 32297267ec00SKirill A. Shutemov { 323082b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 32317267ec00SKirill A. Shutemov 323282b0f8c3SJan Kara if (!pmd_none(*vmf->pmd)) 32337267ec00SKirill A. Shutemov goto map_pte; 323482b0f8c3SJan Kara if (vmf->prealloc_pte) { 323582b0f8c3SJan Kara vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 323682b0f8c3SJan Kara if (unlikely(!pmd_none(*vmf->pmd))) { 323782b0f8c3SJan Kara spin_unlock(vmf->ptl); 32387267ec00SKirill A. Shutemov goto map_pte; 32397267ec00SKirill A. Shutemov } 32407267ec00SKirill A. Shutemov 3241c4812909SKirill A. Shutemov mm_inc_nr_ptes(vma->vm_mm); 324282b0f8c3SJan Kara pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); 324382b0f8c3SJan Kara spin_unlock(vmf->ptl); 32447f2b6ce8STobin C Harding vmf->prealloc_pte = NULL; 324582b0f8c3SJan Kara } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd, vmf->address))) { 32467267ec00SKirill A. Shutemov return VM_FAULT_OOM; 32477267ec00SKirill A. Shutemov } 32487267ec00SKirill A. Shutemov map_pte: 32497267ec00SKirill A. Shutemov /* 32507267ec00SKirill A. Shutemov * If a huge pmd materialized under us just retry later. Use 3251d0f0931dSRoss Zwisler * pmd_trans_unstable() via pmd_devmap_trans_unstable() instead of 3252d0f0931dSRoss Zwisler * pmd_trans_huge() to ensure the pmd didn't become pmd_trans_huge 3253d0f0931dSRoss Zwisler * under us and then back to pmd_none, as a result of MADV_DONTNEED 3254d0f0931dSRoss Zwisler * running immediately after a huge pmd fault in a different thread of 3255d0f0931dSRoss Zwisler * this mm, in turn leading to a misleading pmd_trans_huge() retval. 3256d0f0931dSRoss Zwisler * All we have to ensure is that it is a regular pmd that we can walk 3257d0f0931dSRoss Zwisler * with pte_offset_map() and we can do that through an atomic read in 3258d0f0931dSRoss Zwisler * C, which is what pmd_trans_unstable() provides. 32597267ec00SKirill A. Shutemov */ 3260d0f0931dSRoss Zwisler if (pmd_devmap_trans_unstable(vmf->pmd)) 32617267ec00SKirill A. Shutemov return VM_FAULT_NOPAGE; 32627267ec00SKirill A. Shutemov 3263d0f0931dSRoss Zwisler /* 3264d0f0931dSRoss Zwisler * At this point we know that our vmf->pmd points to a page of ptes 3265d0f0931dSRoss Zwisler * and it cannot become pmd_none(), pmd_devmap() or pmd_trans_huge() 3266d0f0931dSRoss Zwisler * for the duration of the fault. If a racing MADV_DONTNEED runs and 3267d0f0931dSRoss Zwisler * we zap the ptes pointed to by our vmf->pmd, the vmf->ptl will still 3268d0f0931dSRoss Zwisler * be valid and we will re-check to make sure the vmf->pte isn't 3269d0f0931dSRoss Zwisler * pte_none() under vmf->ptl protection when we return to 3270d0f0931dSRoss Zwisler * alloc_set_pte(). 3271d0f0931dSRoss Zwisler */ 327282b0f8c3SJan Kara vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, 327382b0f8c3SJan Kara &vmf->ptl); 32747267ec00SKirill A. Shutemov return 0; 32757267ec00SKirill A. Shutemov } 32767267ec00SKirill A. Shutemov 3277e496cf3dSKirill A. Shutemov #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 327810102459SKirill A. Shutemov 327910102459SKirill A. Shutemov #define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1) 328010102459SKirill A. Shutemov static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, 328110102459SKirill A. Shutemov unsigned long haddr) 328210102459SKirill A. Shutemov { 328310102459SKirill A. Shutemov if (((vma->vm_start >> PAGE_SHIFT) & HPAGE_CACHE_INDEX_MASK) != 328410102459SKirill A. Shutemov (vma->vm_pgoff & HPAGE_CACHE_INDEX_MASK)) 328510102459SKirill A. Shutemov return false; 328610102459SKirill A. Shutemov if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) 328710102459SKirill A. Shutemov return false; 328810102459SKirill A. Shutemov return true; 328910102459SKirill A. Shutemov } 329010102459SKirill A. Shutemov 329182b0f8c3SJan Kara static void deposit_prealloc_pte(struct vm_fault *vmf) 3292953c66c2SAneesh Kumar K.V { 329382b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 3294953c66c2SAneesh Kumar K.V 329582b0f8c3SJan Kara pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); 3296953c66c2SAneesh Kumar K.V /* 3297953c66c2SAneesh Kumar K.V * We are going to consume the prealloc table, 3298953c66c2SAneesh Kumar K.V * count that as nr_ptes. 3299953c66c2SAneesh Kumar K.V */ 3300c4812909SKirill A. Shutemov mm_inc_nr_ptes(vma->vm_mm); 33017f2b6ce8STobin C Harding vmf->prealloc_pte = NULL; 3302953c66c2SAneesh Kumar K.V } 3303953c66c2SAneesh Kumar K.V 330482b0f8c3SJan Kara static int do_set_pmd(struct vm_fault *vmf, struct page *page) 330510102459SKirill A. Shutemov { 330682b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 330782b0f8c3SJan Kara bool write = vmf->flags & FAULT_FLAG_WRITE; 330882b0f8c3SJan Kara unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 330910102459SKirill A. Shutemov pmd_t entry; 331010102459SKirill A. Shutemov int i, ret; 331110102459SKirill A. Shutemov 331210102459SKirill A. Shutemov if (!transhuge_vma_suitable(vma, haddr)) 331310102459SKirill A. Shutemov return VM_FAULT_FALLBACK; 331410102459SKirill A. Shutemov 331510102459SKirill A. Shutemov ret = VM_FAULT_FALLBACK; 331610102459SKirill A. Shutemov page = compound_head(page); 331710102459SKirill A. Shutemov 3318953c66c2SAneesh Kumar K.V /* 3319953c66c2SAneesh Kumar K.V * Archs like ppc64 need additonal space to store information 3320953c66c2SAneesh Kumar K.V * related to pte entry. Use the preallocated table for that. 3321953c66c2SAneesh Kumar K.V */ 332282b0f8c3SJan Kara if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) { 332382b0f8c3SJan Kara vmf->prealloc_pte = pte_alloc_one(vma->vm_mm, vmf->address); 332482b0f8c3SJan Kara if (!vmf->prealloc_pte) 3325953c66c2SAneesh Kumar K.V return VM_FAULT_OOM; 3326953c66c2SAneesh Kumar K.V smp_wmb(); /* See comment in __pte_alloc() */ 3327953c66c2SAneesh Kumar K.V } 3328953c66c2SAneesh Kumar K.V 332982b0f8c3SJan Kara vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 333082b0f8c3SJan Kara if (unlikely(!pmd_none(*vmf->pmd))) 333110102459SKirill A. Shutemov goto out; 333210102459SKirill A. Shutemov 333310102459SKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) 333410102459SKirill A. Shutemov flush_icache_page(vma, page + i); 333510102459SKirill A. Shutemov 333610102459SKirill A. Shutemov entry = mk_huge_pmd(page, vma->vm_page_prot); 333710102459SKirill A. Shutemov if (write) 333810102459SKirill A. Shutemov entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 333910102459SKirill A. Shutemov 334010102459SKirill A. Shutemov add_mm_counter(vma->vm_mm, MM_FILEPAGES, HPAGE_PMD_NR); 334110102459SKirill A. Shutemov page_add_file_rmap(page, true); 3342953c66c2SAneesh Kumar K.V /* 3343953c66c2SAneesh Kumar K.V * deposit and withdraw with pmd lock held 3344953c66c2SAneesh Kumar K.V */ 3345953c66c2SAneesh Kumar K.V if (arch_needs_pgtable_deposit()) 334682b0f8c3SJan Kara deposit_prealloc_pte(vmf); 334710102459SKirill A. Shutemov 334882b0f8c3SJan Kara set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); 334910102459SKirill A. Shutemov 335082b0f8c3SJan Kara update_mmu_cache_pmd(vma, haddr, vmf->pmd); 335110102459SKirill A. Shutemov 335210102459SKirill A. Shutemov /* fault is handled */ 335310102459SKirill A. Shutemov ret = 0; 335495ecedcdSKirill A. Shutemov count_vm_event(THP_FILE_MAPPED); 335510102459SKirill A. Shutemov out: 335682b0f8c3SJan Kara spin_unlock(vmf->ptl); 335710102459SKirill A. Shutemov return ret; 335810102459SKirill A. Shutemov } 335910102459SKirill A. Shutemov #else 336082b0f8c3SJan Kara static int do_set_pmd(struct vm_fault *vmf, struct page *page) 336110102459SKirill A. Shutemov { 336210102459SKirill A. Shutemov BUILD_BUG(); 336310102459SKirill A. Shutemov return 0; 336410102459SKirill A. Shutemov } 336510102459SKirill A. Shutemov #endif 336610102459SKirill A. Shutemov 33678c6e50b0SKirill A. Shutemov /** 33687267ec00SKirill A. Shutemov * alloc_set_pte - setup new PTE entry for given page and add reverse page 33697267ec00SKirill A. Shutemov * mapping. If needed, the fucntion allocates page table or use pre-allocated. 33708c6e50b0SKirill A. Shutemov * 337182b0f8c3SJan Kara * @vmf: fault environment 33727267ec00SKirill A. Shutemov * @memcg: memcg to charge page (only for private mappings) 33738c6e50b0SKirill A. Shutemov * @page: page to map 33748c6e50b0SKirill A. Shutemov * 337582b0f8c3SJan Kara * Caller must take care of unlocking vmf->ptl, if vmf->pte is non-NULL on 337682b0f8c3SJan Kara * return. 33778c6e50b0SKirill A. Shutemov * 33788c6e50b0SKirill A. Shutemov * Target users are page handler itself and implementations of 33798c6e50b0SKirill A. Shutemov * vm_ops->map_pages. 33808c6e50b0SKirill A. Shutemov */ 338182b0f8c3SJan Kara int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg, 33827267ec00SKirill A. Shutemov struct page *page) 33833bb97794SKirill A. Shutemov { 338482b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 338582b0f8c3SJan Kara bool write = vmf->flags & FAULT_FLAG_WRITE; 33863bb97794SKirill A. Shutemov pte_t entry; 338710102459SKirill A. Shutemov int ret; 338810102459SKirill A. Shutemov 338982b0f8c3SJan Kara if (pmd_none(*vmf->pmd) && PageTransCompound(page) && 3390e496cf3dSKirill A. Shutemov IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) { 339110102459SKirill A. Shutemov /* THP on COW? */ 339210102459SKirill A. Shutemov VM_BUG_ON_PAGE(memcg, page); 339310102459SKirill A. Shutemov 339482b0f8c3SJan Kara ret = do_set_pmd(vmf, page); 339510102459SKirill A. Shutemov if (ret != VM_FAULT_FALLBACK) 3396b0b9b3dfSHugh Dickins return ret; 339710102459SKirill A. Shutemov } 33983bb97794SKirill A. Shutemov 339982b0f8c3SJan Kara if (!vmf->pte) { 340082b0f8c3SJan Kara ret = pte_alloc_one_map(vmf); 34017267ec00SKirill A. Shutemov if (ret) 3402b0b9b3dfSHugh Dickins return ret; 34037267ec00SKirill A. Shutemov } 34047267ec00SKirill A. Shutemov 34057267ec00SKirill A. Shutemov /* Re-check under ptl */ 3406b0b9b3dfSHugh Dickins if (unlikely(!pte_none(*vmf->pte))) 3407b0b9b3dfSHugh Dickins return VM_FAULT_NOPAGE; 34087267ec00SKirill A. Shutemov 34093bb97794SKirill A. Shutemov flush_icache_page(vma, page); 34103bb97794SKirill A. Shutemov entry = mk_pte(page, vma->vm_page_prot); 34113bb97794SKirill A. Shutemov if (write) 34123bb97794SKirill A. Shutemov entry = maybe_mkwrite(pte_mkdirty(entry), vma); 3413bae473a4SKirill A. Shutemov /* copy-on-write page */ 3414bae473a4SKirill A. Shutemov if (write && !(vma->vm_flags & VM_SHARED)) { 34153bb97794SKirill A. Shutemov inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); 341682b0f8c3SJan Kara page_add_new_anon_rmap(page, vma, vmf->address, false); 34177267ec00SKirill A. Shutemov mem_cgroup_commit_charge(page, memcg, false, false); 34187267ec00SKirill A. Shutemov lru_cache_add_active_or_unevictable(page, vma); 34193bb97794SKirill A. Shutemov } else { 3420eca56ff9SJerome Marchand inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page)); 3421dd78feddSKirill A. Shutemov page_add_file_rmap(page, false); 34223bb97794SKirill A. Shutemov } 342382b0f8c3SJan Kara set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); 34243bb97794SKirill A. Shutemov 34253bb97794SKirill A. Shutemov /* no need to invalidate: a not-present page won't be cached */ 342682b0f8c3SJan Kara update_mmu_cache(vma, vmf->address, vmf->pte); 34277267ec00SKirill A. Shutemov 3428b0b9b3dfSHugh Dickins return 0; 34293bb97794SKirill A. Shutemov } 34303bb97794SKirill A. Shutemov 34319118c0cbSJan Kara 34329118c0cbSJan Kara /** 34339118c0cbSJan Kara * finish_fault - finish page fault once we have prepared the page to fault 34349118c0cbSJan Kara * 34359118c0cbSJan Kara * @vmf: structure describing the fault 34369118c0cbSJan Kara * 34379118c0cbSJan Kara * This function handles all that is needed to finish a page fault once the 34389118c0cbSJan Kara * page to fault in is prepared. It handles locking of PTEs, inserts PTE for 34399118c0cbSJan Kara * given page, adds reverse page mapping, handles memcg charges and LRU 34409118c0cbSJan Kara * addition. The function returns 0 on success, VM_FAULT_ code in case of 34419118c0cbSJan Kara * error. 34429118c0cbSJan Kara * 34439118c0cbSJan Kara * The function expects the page to be locked and on success it consumes a 34449118c0cbSJan Kara * reference of a page being mapped (for the PTE which maps it). 34459118c0cbSJan Kara */ 34469118c0cbSJan Kara int finish_fault(struct vm_fault *vmf) 34479118c0cbSJan Kara { 34489118c0cbSJan Kara struct page *page; 34496b31d595SMichal Hocko int ret = 0; 34509118c0cbSJan Kara 34519118c0cbSJan Kara /* Did we COW the page? */ 34529118c0cbSJan Kara if ((vmf->flags & FAULT_FLAG_WRITE) && 34539118c0cbSJan Kara !(vmf->vma->vm_flags & VM_SHARED)) 34549118c0cbSJan Kara page = vmf->cow_page; 34559118c0cbSJan Kara else 34569118c0cbSJan Kara page = vmf->page; 34576b31d595SMichal Hocko 34586b31d595SMichal Hocko /* 34596b31d595SMichal Hocko * check even for read faults because we might have lost our CoWed 34606b31d595SMichal Hocko * page 34616b31d595SMichal Hocko */ 34626b31d595SMichal Hocko if (!(vmf->vma->vm_flags & VM_SHARED)) 34636b31d595SMichal Hocko ret = check_stable_address_space(vmf->vma->vm_mm); 34646b31d595SMichal Hocko if (!ret) 34659118c0cbSJan Kara ret = alloc_set_pte(vmf, vmf->memcg, page); 34669118c0cbSJan Kara if (vmf->pte) 34679118c0cbSJan Kara pte_unmap_unlock(vmf->pte, vmf->ptl); 34689118c0cbSJan Kara return ret; 34699118c0cbSJan Kara } 34709118c0cbSJan Kara 34713a91053aSKirill A. Shutemov static unsigned long fault_around_bytes __read_mostly = 34723a91053aSKirill A. Shutemov rounddown_pow_of_two(65536); 3473a9b0f861SKirill A. Shutemov 34741592eef0SKirill A. Shutemov #ifdef CONFIG_DEBUG_FS 3475a9b0f861SKirill A. Shutemov static int fault_around_bytes_get(void *data, u64 *val) 34761592eef0SKirill A. Shutemov { 3477a9b0f861SKirill A. Shutemov *val = fault_around_bytes; 34781592eef0SKirill A. Shutemov return 0; 34791592eef0SKirill A. Shutemov } 34801592eef0SKirill A. Shutemov 3481b4903d6eSAndrey Ryabinin /* 3482b4903d6eSAndrey Ryabinin * fault_around_pages() and fault_around_mask() expects fault_around_bytes 3483b4903d6eSAndrey Ryabinin * rounded down to nearest page order. It's what do_fault_around() expects to 3484b4903d6eSAndrey Ryabinin * see. 3485b4903d6eSAndrey Ryabinin */ 3486a9b0f861SKirill A. Shutemov static int fault_around_bytes_set(void *data, u64 val) 34871592eef0SKirill A. Shutemov { 3488a9b0f861SKirill A. Shutemov if (val / PAGE_SIZE > PTRS_PER_PTE) 34891592eef0SKirill A. Shutemov return -EINVAL; 3490b4903d6eSAndrey Ryabinin if (val > PAGE_SIZE) 3491b4903d6eSAndrey Ryabinin fault_around_bytes = rounddown_pow_of_two(val); 3492b4903d6eSAndrey Ryabinin else 3493b4903d6eSAndrey Ryabinin fault_around_bytes = PAGE_SIZE; /* rounddown_pow_of_two(0) is undefined */ 34941592eef0SKirill A. Shutemov return 0; 34951592eef0SKirill A. Shutemov } 34960a1345f8SYevgen Pronenko DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops, 3497a9b0f861SKirill A. Shutemov fault_around_bytes_get, fault_around_bytes_set, "%llu\n"); 34981592eef0SKirill A. Shutemov 34991592eef0SKirill A. Shutemov static int __init fault_around_debugfs(void) 35001592eef0SKirill A. Shutemov { 35011592eef0SKirill A. Shutemov void *ret; 35021592eef0SKirill A. Shutemov 35030a1345f8SYevgen Pronenko ret = debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL, 3504a9b0f861SKirill A. Shutemov &fault_around_bytes_fops); 35051592eef0SKirill A. Shutemov if (!ret) 3506a9b0f861SKirill A. Shutemov pr_warn("Failed to create fault_around_bytes in debugfs"); 35071592eef0SKirill A. Shutemov return 0; 35081592eef0SKirill A. Shutemov } 35091592eef0SKirill A. Shutemov late_initcall(fault_around_debugfs); 35101592eef0SKirill A. Shutemov #endif 35118c6e50b0SKirill A. Shutemov 35121fdb412bSKirill A. Shutemov /* 35131fdb412bSKirill A. Shutemov * do_fault_around() tries to map few pages around the fault address. The hope 35141fdb412bSKirill A. Shutemov * is that the pages will be needed soon and this will lower the number of 35151fdb412bSKirill A. Shutemov * faults to handle. 35161fdb412bSKirill A. Shutemov * 35171fdb412bSKirill A. Shutemov * It uses vm_ops->map_pages() to map the pages, which skips the page if it's 35181fdb412bSKirill A. Shutemov * not ready to be mapped: not up-to-date, locked, etc. 35191fdb412bSKirill A. Shutemov * 35201fdb412bSKirill A. Shutemov * This function is called with the page table lock taken. In the split ptlock 35211fdb412bSKirill A. Shutemov * case the page table lock only protects only those entries which belong to 35221fdb412bSKirill A. Shutemov * the page table corresponding to the fault address. 35231fdb412bSKirill A. Shutemov * 35241fdb412bSKirill A. Shutemov * This function doesn't cross the VMA boundaries, in order to call map_pages() 35251fdb412bSKirill A. Shutemov * only once. 35261fdb412bSKirill A. Shutemov * 35271fdb412bSKirill A. Shutemov * fault_around_pages() defines how many pages we'll try to map. 35281fdb412bSKirill A. Shutemov * do_fault_around() expects it to return a power of two less than or equal to 35291fdb412bSKirill A. Shutemov * PTRS_PER_PTE. 35301fdb412bSKirill A. Shutemov * 35311fdb412bSKirill A. Shutemov * The virtual address of the area that we map is naturally aligned to the 35321fdb412bSKirill A. Shutemov * fault_around_pages() value (and therefore to page order). This way it's 35331fdb412bSKirill A. Shutemov * easier to guarantee that we don't cross page table boundaries. 35341fdb412bSKirill A. Shutemov */ 35350721ec8bSJan Kara static int do_fault_around(struct vm_fault *vmf) 35368c6e50b0SKirill A. Shutemov { 353782b0f8c3SJan Kara unsigned long address = vmf->address, nr_pages, mask; 35380721ec8bSJan Kara pgoff_t start_pgoff = vmf->pgoff; 3539bae473a4SKirill A. Shutemov pgoff_t end_pgoff; 35407267ec00SKirill A. Shutemov int off, ret = 0; 35418c6e50b0SKirill A. Shutemov 35424db0c3c2SJason Low nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT; 3543aecd6f44SKirill A. Shutemov mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK; 3544aecd6f44SKirill A. Shutemov 354582b0f8c3SJan Kara vmf->address = max(address & mask, vmf->vma->vm_start); 354682b0f8c3SJan Kara off = ((address - vmf->address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); 3547bae473a4SKirill A. Shutemov start_pgoff -= off; 35488c6e50b0SKirill A. Shutemov 35498c6e50b0SKirill A. Shutemov /* 3550bae473a4SKirill A. Shutemov * end_pgoff is either end of page table or end of vma 3551bae473a4SKirill A. Shutemov * or fault_around_pages() from start_pgoff, depending what is nearest. 35528c6e50b0SKirill A. Shutemov */ 3553bae473a4SKirill A. Shutemov end_pgoff = start_pgoff - 355482b0f8c3SJan Kara ((vmf->address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) + 35558c6e50b0SKirill A. Shutemov PTRS_PER_PTE - 1; 355682b0f8c3SJan Kara end_pgoff = min3(end_pgoff, vma_pages(vmf->vma) + vmf->vma->vm_pgoff - 1, 3557bae473a4SKirill A. Shutemov start_pgoff + nr_pages - 1); 35588c6e50b0SKirill A. Shutemov 355982b0f8c3SJan Kara if (pmd_none(*vmf->pmd)) { 356082b0f8c3SJan Kara vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm, 356182b0f8c3SJan Kara vmf->address); 356282b0f8c3SJan Kara if (!vmf->prealloc_pte) 3563c5f88bd2SVegard Nossum goto out; 35647267ec00SKirill A. Shutemov smp_wmb(); /* See comment in __pte_alloc() */ 35658c6e50b0SKirill A. Shutemov } 35668c6e50b0SKirill A. Shutemov 356782b0f8c3SJan Kara vmf->vma->vm_ops->map_pages(vmf, start_pgoff, end_pgoff); 35687267ec00SKirill A. Shutemov 35697267ec00SKirill A. Shutemov /* Huge page is mapped? Page fault is solved */ 357082b0f8c3SJan Kara if (pmd_trans_huge(*vmf->pmd)) { 35717267ec00SKirill A. Shutemov ret = VM_FAULT_NOPAGE; 35727267ec00SKirill A. Shutemov goto out; 35738c6e50b0SKirill A. Shutemov } 35748c6e50b0SKirill A. Shutemov 35757267ec00SKirill A. Shutemov /* ->map_pages() haven't done anything useful. Cold page cache? */ 357682b0f8c3SJan Kara if (!vmf->pte) 35777267ec00SKirill A. Shutemov goto out; 35787267ec00SKirill A. Shutemov 35797267ec00SKirill A. Shutemov /* check if the page fault is solved */ 358082b0f8c3SJan Kara vmf->pte -= (vmf->address >> PAGE_SHIFT) - (address >> PAGE_SHIFT); 358182b0f8c3SJan Kara if (!pte_none(*vmf->pte)) 35827267ec00SKirill A. Shutemov ret = VM_FAULT_NOPAGE; 358382b0f8c3SJan Kara pte_unmap_unlock(vmf->pte, vmf->ptl); 35847267ec00SKirill A. Shutemov out: 358582b0f8c3SJan Kara vmf->address = address; 358682b0f8c3SJan Kara vmf->pte = NULL; 35877267ec00SKirill A. Shutemov return ret; 35887267ec00SKirill A. Shutemov } 35897267ec00SKirill A. Shutemov 35900721ec8bSJan Kara static int do_read_fault(struct vm_fault *vmf) 3591e655fb29SKirill A. Shutemov { 359282b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 35938c6e50b0SKirill A. Shutemov int ret = 0; 35948c6e50b0SKirill A. Shutemov 35958c6e50b0SKirill A. Shutemov /* 35968c6e50b0SKirill A. Shutemov * Let's call ->map_pages() first and use ->fault() as fallback 35978c6e50b0SKirill A. Shutemov * if page by the offset is not ready to be mapped (cold cache or 35988c6e50b0SKirill A. Shutemov * something). 35998c6e50b0SKirill A. Shutemov */ 36009b4bdd2fSKirill A. Shutemov if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) { 36010721ec8bSJan Kara ret = do_fault_around(vmf); 36027267ec00SKirill A. Shutemov if (ret) 36037267ec00SKirill A. Shutemov return ret; 36048c6e50b0SKirill A. Shutemov } 3605e655fb29SKirill A. Shutemov 3606936ca80dSJan Kara ret = __do_fault(vmf); 3607e655fb29SKirill A. Shutemov if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 3608e655fb29SKirill A. Shutemov return ret; 3609e655fb29SKirill A. Shutemov 36109118c0cbSJan Kara ret |= finish_fault(vmf); 3611936ca80dSJan Kara unlock_page(vmf->page); 36127267ec00SKirill A. Shutemov if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 3613936ca80dSJan Kara put_page(vmf->page); 3614e655fb29SKirill A. Shutemov return ret; 3615e655fb29SKirill A. Shutemov } 3616e655fb29SKirill A. Shutemov 36170721ec8bSJan Kara static int do_cow_fault(struct vm_fault *vmf) 3618ec47c3b9SKirill A. Shutemov { 361982b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 3620ec47c3b9SKirill A. Shutemov int ret; 3621ec47c3b9SKirill A. Shutemov 3622ec47c3b9SKirill A. Shutemov if (unlikely(anon_vma_prepare(vma))) 3623ec47c3b9SKirill A. Shutemov return VM_FAULT_OOM; 3624ec47c3b9SKirill A. Shutemov 3625936ca80dSJan Kara vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address); 3626936ca80dSJan Kara if (!vmf->cow_page) 3627ec47c3b9SKirill A. Shutemov return VM_FAULT_OOM; 3628ec47c3b9SKirill A. Shutemov 3629936ca80dSJan Kara if (mem_cgroup_try_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL, 36303917048dSJan Kara &vmf->memcg, false)) { 3631936ca80dSJan Kara put_page(vmf->cow_page); 3632ec47c3b9SKirill A. Shutemov return VM_FAULT_OOM; 3633ec47c3b9SKirill A. Shutemov } 3634ec47c3b9SKirill A. Shutemov 3635936ca80dSJan Kara ret = __do_fault(vmf); 3636ec47c3b9SKirill A. Shutemov if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 3637ec47c3b9SKirill A. Shutemov goto uncharge_out; 36383917048dSJan Kara if (ret & VM_FAULT_DONE_COW) 36393917048dSJan Kara return ret; 3640ec47c3b9SKirill A. Shutemov 3641936ca80dSJan Kara copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma); 3642936ca80dSJan Kara __SetPageUptodate(vmf->cow_page); 3643ec47c3b9SKirill A. Shutemov 36449118c0cbSJan Kara ret |= finish_fault(vmf); 3645936ca80dSJan Kara unlock_page(vmf->page); 3646936ca80dSJan Kara put_page(vmf->page); 36477267ec00SKirill A. Shutemov if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 36487267ec00SKirill A. Shutemov goto uncharge_out; 3649ec47c3b9SKirill A. Shutemov return ret; 3650ec47c3b9SKirill A. Shutemov uncharge_out: 36513917048dSJan Kara mem_cgroup_cancel_charge(vmf->cow_page, vmf->memcg, false); 3652936ca80dSJan Kara put_page(vmf->cow_page); 3653ec47c3b9SKirill A. Shutemov return ret; 3654ec47c3b9SKirill A. Shutemov } 3655ec47c3b9SKirill A. Shutemov 36560721ec8bSJan Kara static int do_shared_fault(struct vm_fault *vmf) 36571da177e4SLinus Torvalds { 365882b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 3659f0c6d4d2SKirill A. Shutemov int ret, tmp; 36601d65f86dSKAMEZAWA Hiroyuki 3661936ca80dSJan Kara ret = __do_fault(vmf); 36627eae74afSKirill A. Shutemov if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 3663f0c6d4d2SKirill A. Shutemov return ret; 36641da177e4SLinus Torvalds 36651da177e4SLinus Torvalds /* 3666f0c6d4d2SKirill A. Shutemov * Check if the backing address space wants to know that the page is 3667f0c6d4d2SKirill A. Shutemov * about to become writable 36681da177e4SLinus Torvalds */ 3669fb09a464SKirill A. Shutemov if (vma->vm_ops->page_mkwrite) { 3670936ca80dSJan Kara unlock_page(vmf->page); 367138b8cb7fSJan Kara tmp = do_page_mkwrite(vmf); 3672fb09a464SKirill A. Shutemov if (unlikely(!tmp || 3673fb09a464SKirill A. Shutemov (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { 3674936ca80dSJan Kara put_page(vmf->page); 3675f0c6d4d2SKirill A. Shutemov return tmp; 367669676147SMark Fasheh } 3677d0217ac0SNick Piggin } 3678fb09a464SKirill A. Shutemov 36799118c0cbSJan Kara ret |= finish_fault(vmf); 36807267ec00SKirill A. Shutemov if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | 36817267ec00SKirill A. Shutemov VM_FAULT_RETRY))) { 3682936ca80dSJan Kara unlock_page(vmf->page); 3683936ca80dSJan Kara put_page(vmf->page); 3684f0c6d4d2SKirill A. Shutemov return ret; 36859637a5efSDavid Howells } 3686d00806b1SNick Piggin 368797ba0c2bSJan Kara fault_dirty_shared_page(vma, vmf->page); 3688b827e496SNick Piggin return ret; 368954cb8821SNick Piggin } 3690d00806b1SNick Piggin 36919a95f3cfSPaul Cassella /* 36929a95f3cfSPaul Cassella * We enter with non-exclusive mmap_sem (to exclude vma changes, 36939a95f3cfSPaul Cassella * but allow concurrent faults). 36949a95f3cfSPaul Cassella * The mmap_sem may have been released depending on flags and our 36959a95f3cfSPaul Cassella * return value. See filemap_fault() and __lock_page_or_retry(). 36969a95f3cfSPaul Cassella */ 369782b0f8c3SJan Kara static int do_fault(struct vm_fault *vmf) 369854cb8821SNick Piggin { 369982b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 3700b0b9b3dfSHugh Dickins int ret; 370154cb8821SNick Piggin 37026b7339f4SKirill A. Shutemov /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */ 37036b7339f4SKirill A. Shutemov if (!vma->vm_ops->fault) 3704b0b9b3dfSHugh Dickins ret = VM_FAULT_SIGBUS; 3705b0b9b3dfSHugh Dickins else if (!(vmf->flags & FAULT_FLAG_WRITE)) 3706b0b9b3dfSHugh Dickins ret = do_read_fault(vmf); 3707b0b9b3dfSHugh Dickins else if (!(vma->vm_flags & VM_SHARED)) 3708b0b9b3dfSHugh Dickins ret = do_cow_fault(vmf); 3709b0b9b3dfSHugh Dickins else 3710b0b9b3dfSHugh Dickins ret = do_shared_fault(vmf); 3711b0b9b3dfSHugh Dickins 3712b0b9b3dfSHugh Dickins /* preallocated pagetable is unused: free it */ 3713b0b9b3dfSHugh Dickins if (vmf->prealloc_pte) { 3714b0b9b3dfSHugh Dickins pte_free(vma->vm_mm, vmf->prealloc_pte); 37157f2b6ce8STobin C Harding vmf->prealloc_pte = NULL; 3716b0b9b3dfSHugh Dickins } 3717b0b9b3dfSHugh Dickins return ret; 371854cb8821SNick Piggin } 371954cb8821SNick Piggin 3720b19a9939SRashika Kheria static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, 372104bb2f94SRik van Riel unsigned long addr, int page_nid, 372204bb2f94SRik van Riel int *flags) 37239532fec1SMel Gorman { 37249532fec1SMel Gorman get_page(page); 37259532fec1SMel Gorman 37269532fec1SMel Gorman count_vm_numa_event(NUMA_HINT_FAULTS); 372704bb2f94SRik van Riel if (page_nid == numa_node_id()) { 37289532fec1SMel Gorman count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); 372904bb2f94SRik van Riel *flags |= TNF_FAULT_LOCAL; 373004bb2f94SRik van Riel } 37319532fec1SMel Gorman 37329532fec1SMel Gorman return mpol_misplaced(page, vma, addr); 37339532fec1SMel Gorman } 37349532fec1SMel Gorman 37352994302bSJan Kara static int do_numa_page(struct vm_fault *vmf) 3736d10e63f2SMel Gorman { 373782b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 37384daae3b4SMel Gorman struct page *page = NULL; 37398191acbdSMel Gorman int page_nid = -1; 374090572890SPeter Zijlstra int last_cpupid; 3741cbee9f88SPeter Zijlstra int target_nid; 3742b8593bfdSMel Gorman bool migrated = false; 3743cee216a6SAneesh Kumar K.V pte_t pte; 3744288bc549SAneesh Kumar K.V bool was_writable = pte_savedwrite(vmf->orig_pte); 37456688cc05SPeter Zijlstra int flags = 0; 3746d10e63f2SMel Gorman 3747d10e63f2SMel Gorman /* 3748d10e63f2SMel Gorman * The "pte" at this point cannot be used safely without 3749d10e63f2SMel Gorman * validation through pte_unmap_same(). It's of NUMA type but 3750d10e63f2SMel Gorman * the pfn may be screwed if the read is non atomic. 3751d10e63f2SMel Gorman */ 375282b0f8c3SJan Kara vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd); 375382b0f8c3SJan Kara spin_lock(vmf->ptl); 3754cee216a6SAneesh Kumar K.V if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) { 375582b0f8c3SJan Kara pte_unmap_unlock(vmf->pte, vmf->ptl); 37564daae3b4SMel Gorman goto out; 37574daae3b4SMel Gorman } 37584daae3b4SMel Gorman 3759cee216a6SAneesh Kumar K.V /* 3760cee216a6SAneesh Kumar K.V * Make it present again, Depending on how arch implementes non 3761cee216a6SAneesh Kumar K.V * accessible ptes, some can allow access by kernel mode. 3762cee216a6SAneesh Kumar K.V */ 3763cee216a6SAneesh Kumar K.V pte = ptep_modify_prot_start(vma->vm_mm, vmf->address, vmf->pte); 37644d942466SMel Gorman pte = pte_modify(pte, vma->vm_page_prot); 37654d942466SMel Gorman pte = pte_mkyoung(pte); 3766b191f9b1SMel Gorman if (was_writable) 3767b191f9b1SMel Gorman pte = pte_mkwrite(pte); 3768cee216a6SAneesh Kumar K.V ptep_modify_prot_commit(vma->vm_mm, vmf->address, vmf->pte, pte); 376982b0f8c3SJan Kara update_mmu_cache(vma, vmf->address, vmf->pte); 3770d10e63f2SMel Gorman 377182b0f8c3SJan Kara page = vm_normal_page(vma, vmf->address, pte); 3772d10e63f2SMel Gorman if (!page) { 377382b0f8c3SJan Kara pte_unmap_unlock(vmf->pte, vmf->ptl); 3774d10e63f2SMel Gorman return 0; 3775d10e63f2SMel Gorman } 3776d10e63f2SMel Gorman 3777e81c4802SKirill A. Shutemov /* TODO: handle PTE-mapped THP */ 3778e81c4802SKirill A. Shutemov if (PageCompound(page)) { 377982b0f8c3SJan Kara pte_unmap_unlock(vmf->pte, vmf->ptl); 3780e81c4802SKirill A. Shutemov return 0; 3781e81c4802SKirill A. Shutemov } 3782e81c4802SKirill A. Shutemov 37836688cc05SPeter Zijlstra /* 3784bea66fbdSMel Gorman * Avoid grouping on RO pages in general. RO pages shouldn't hurt as 3785bea66fbdSMel Gorman * much anyway since they can be in shared cache state. This misses 3786bea66fbdSMel Gorman * the case where a mapping is writable but the process never writes 3787bea66fbdSMel Gorman * to it but pte_write gets cleared during protection updates and 3788bea66fbdSMel Gorman * pte_dirty has unpredictable behaviour between PTE scan updates, 3789bea66fbdSMel Gorman * background writeback, dirty balancing and application behaviour. 37906688cc05SPeter Zijlstra */ 3791d59dc7bcSRik van Riel if (!pte_write(pte)) 37926688cc05SPeter Zijlstra flags |= TNF_NO_GROUP; 37936688cc05SPeter Zijlstra 3794dabe1d99SRik van Riel /* 3795dabe1d99SRik van Riel * Flag if the page is shared between multiple address spaces. This 3796dabe1d99SRik van Riel * is later used when determining whether to group tasks together 3797dabe1d99SRik van Riel */ 3798dabe1d99SRik van Riel if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED)) 3799dabe1d99SRik van Riel flags |= TNF_SHARED; 3800dabe1d99SRik van Riel 380190572890SPeter Zijlstra last_cpupid = page_cpupid_last(page); 38028191acbdSMel Gorman page_nid = page_to_nid(page); 380382b0f8c3SJan Kara target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid, 3804bae473a4SKirill A. Shutemov &flags); 380582b0f8c3SJan Kara pte_unmap_unlock(vmf->pte, vmf->ptl); 38064daae3b4SMel Gorman if (target_nid == -1) { 38074daae3b4SMel Gorman put_page(page); 38084daae3b4SMel Gorman goto out; 38094daae3b4SMel Gorman } 38104daae3b4SMel Gorman 38114daae3b4SMel Gorman /* Migrate to the requested node */ 38121bc115d8SMel Gorman migrated = migrate_misplaced_page(page, vma, target_nid); 38136688cc05SPeter Zijlstra if (migrated) { 38148191acbdSMel Gorman page_nid = target_nid; 38156688cc05SPeter Zijlstra flags |= TNF_MIGRATED; 3816074c2381SMel Gorman } else 3817074c2381SMel Gorman flags |= TNF_MIGRATE_FAIL; 38184daae3b4SMel Gorman 38194daae3b4SMel Gorman out: 38208191acbdSMel Gorman if (page_nid != -1) 38216688cc05SPeter Zijlstra task_numa_fault(last_cpupid, page_nid, 1, flags); 3822d10e63f2SMel Gorman return 0; 3823d10e63f2SMel Gorman } 3824d10e63f2SMel Gorman 382591a90140SGeert Uytterhoeven static inline int create_huge_pmd(struct vm_fault *vmf) 3826b96375f7SMatthew Wilcox { 3827f4200391SDave Jiang if (vma_is_anonymous(vmf->vma)) 382882b0f8c3SJan Kara return do_huge_pmd_anonymous_page(vmf); 3829a2d58167SDave Jiang if (vmf->vma->vm_ops->huge_fault) 3830c791ace1SDave Jiang return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD); 3831b96375f7SMatthew Wilcox return VM_FAULT_FALLBACK; 3832b96375f7SMatthew Wilcox } 3833b96375f7SMatthew Wilcox 383482b0f8c3SJan Kara static int wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd) 3835b96375f7SMatthew Wilcox { 383682b0f8c3SJan Kara if (vma_is_anonymous(vmf->vma)) 383782b0f8c3SJan Kara return do_huge_pmd_wp_page(vmf, orig_pmd); 3838a2d58167SDave Jiang if (vmf->vma->vm_ops->huge_fault) 3839c791ace1SDave Jiang return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD); 3840af9e4d5fSKirill A. Shutemov 3841af9e4d5fSKirill A. Shutemov /* COW handled on pte level: split pmd */ 384282b0f8c3SJan Kara VM_BUG_ON_VMA(vmf->vma->vm_flags & VM_SHARED, vmf->vma); 384382b0f8c3SJan Kara __split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL); 3844af9e4d5fSKirill A. Shutemov 3845b96375f7SMatthew Wilcox return VM_FAULT_FALLBACK; 3846b96375f7SMatthew Wilcox } 3847b96375f7SMatthew Wilcox 384838e08854SLorenzo Stoakes static inline bool vma_is_accessible(struct vm_area_struct *vma) 384938e08854SLorenzo Stoakes { 385038e08854SLorenzo Stoakes return vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE); 385138e08854SLorenzo Stoakes } 385238e08854SLorenzo Stoakes 3853a00cc7d9SMatthew Wilcox static int create_huge_pud(struct vm_fault *vmf) 3854a00cc7d9SMatthew Wilcox { 3855a00cc7d9SMatthew Wilcox #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3856a00cc7d9SMatthew Wilcox /* No support for anonymous transparent PUD pages yet */ 3857a00cc7d9SMatthew Wilcox if (vma_is_anonymous(vmf->vma)) 3858a00cc7d9SMatthew Wilcox return VM_FAULT_FALLBACK; 3859a00cc7d9SMatthew Wilcox if (vmf->vma->vm_ops->huge_fault) 3860c791ace1SDave Jiang return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD); 3861a00cc7d9SMatthew Wilcox #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 3862a00cc7d9SMatthew Wilcox return VM_FAULT_FALLBACK; 3863a00cc7d9SMatthew Wilcox } 3864a00cc7d9SMatthew Wilcox 3865a00cc7d9SMatthew Wilcox static int wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud) 3866a00cc7d9SMatthew Wilcox { 3867a00cc7d9SMatthew Wilcox #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3868a00cc7d9SMatthew Wilcox /* No support for anonymous transparent PUD pages yet */ 3869a00cc7d9SMatthew Wilcox if (vma_is_anonymous(vmf->vma)) 3870a00cc7d9SMatthew Wilcox return VM_FAULT_FALLBACK; 3871a00cc7d9SMatthew Wilcox if (vmf->vma->vm_ops->huge_fault) 3872c791ace1SDave Jiang return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD); 3873a00cc7d9SMatthew Wilcox #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 3874a00cc7d9SMatthew Wilcox return VM_FAULT_FALLBACK; 3875a00cc7d9SMatthew Wilcox } 3876a00cc7d9SMatthew Wilcox 38771da177e4SLinus Torvalds /* 38781da177e4SLinus Torvalds * These routines also need to handle stuff like marking pages dirty 38791da177e4SLinus Torvalds * and/or accessed for architectures that don't do it in hardware (most 38801da177e4SLinus Torvalds * RISC architectures). The early dirtying is also good on the i386. 38811da177e4SLinus Torvalds * 38821da177e4SLinus Torvalds * There is also a hook called "update_mmu_cache()" that architectures 38831da177e4SLinus Torvalds * with external mmu caches can use to update those (ie the Sparc or 38841da177e4SLinus Torvalds * PowerPC hashed page tables that act as extended TLBs). 38851da177e4SLinus Torvalds * 38867267ec00SKirill A. Shutemov * We enter with non-exclusive mmap_sem (to exclude vma changes, but allow 38877267ec00SKirill A. Shutemov * concurrent faults). 38889a95f3cfSPaul Cassella * 38897267ec00SKirill A. Shutemov * The mmap_sem may have been released depending on flags and our return value. 38907267ec00SKirill A. Shutemov * See filemap_fault() and __lock_page_or_retry(). 38911da177e4SLinus Torvalds */ 389282b0f8c3SJan Kara static int handle_pte_fault(struct vm_fault *vmf) 38931da177e4SLinus Torvalds { 38941da177e4SLinus Torvalds pte_t entry; 38951da177e4SLinus Torvalds 389682b0f8c3SJan Kara if (unlikely(pmd_none(*vmf->pmd))) { 38977267ec00SKirill A. Shutemov /* 38987267ec00SKirill A. Shutemov * Leave __pte_alloc() until later: because vm_ops->fault may 38997267ec00SKirill A. Shutemov * want to allocate huge page, and if we expose page table 39007267ec00SKirill A. Shutemov * for an instant, it will be difficult to retract from 39017267ec00SKirill A. Shutemov * concurrent faults and from rmap lookups. 39027267ec00SKirill A. Shutemov */ 390382b0f8c3SJan Kara vmf->pte = NULL; 39047267ec00SKirill A. Shutemov } else { 39057267ec00SKirill A. Shutemov /* See comment in pte_alloc_one_map() */ 3906d0f0931dSRoss Zwisler if (pmd_devmap_trans_unstable(vmf->pmd)) 39077267ec00SKirill A. Shutemov return 0; 39087267ec00SKirill A. Shutemov /* 39097267ec00SKirill A. Shutemov * A regular pmd is established and it can't morph into a huge 39107267ec00SKirill A. Shutemov * pmd from under us anymore at this point because we hold the 39117267ec00SKirill A. Shutemov * mmap_sem read mode and khugepaged takes it in write mode. 39127267ec00SKirill A. Shutemov * So now it's safe to run pte_offset_map(). 39137267ec00SKirill A. Shutemov */ 391482b0f8c3SJan Kara vmf->pte = pte_offset_map(vmf->pmd, vmf->address); 39152994302bSJan Kara vmf->orig_pte = *vmf->pte; 39167267ec00SKirill A. Shutemov 3917e37c6982SChristian Borntraeger /* 3918e37c6982SChristian Borntraeger * some architectures can have larger ptes than wordsize, 39197267ec00SKirill A. Shutemov * e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and 3920b03a0fe0SPaul E. McKenney * CONFIG_32BIT=y, so READ_ONCE cannot guarantee atomic 3921b03a0fe0SPaul E. McKenney * accesses. The code below just needs a consistent view 3922b03a0fe0SPaul E. McKenney * for the ifs and we later double check anyway with the 39237267ec00SKirill A. Shutemov * ptl lock held. So here a barrier will do. 3924e37c6982SChristian Borntraeger */ 3925e37c6982SChristian Borntraeger barrier(); 39262994302bSJan Kara if (pte_none(vmf->orig_pte)) { 392782b0f8c3SJan Kara pte_unmap(vmf->pte); 392882b0f8c3SJan Kara vmf->pte = NULL; 39297267ec00SKirill A. Shutemov } 39307267ec00SKirill A. Shutemov } 39317267ec00SKirill A. Shutemov 393282b0f8c3SJan Kara if (!vmf->pte) { 393382b0f8c3SJan Kara if (vma_is_anonymous(vmf->vma)) 393482b0f8c3SJan Kara return do_anonymous_page(vmf); 3935b5330628SOleg Nesterov else 393682b0f8c3SJan Kara return do_fault(vmf); 393765500d23SHugh Dickins } 39387267ec00SKirill A. Shutemov 39392994302bSJan Kara if (!pte_present(vmf->orig_pte)) 39402994302bSJan Kara return do_swap_page(vmf); 39411da177e4SLinus Torvalds 39422994302bSJan Kara if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma)) 39432994302bSJan Kara return do_numa_page(vmf); 3944d10e63f2SMel Gorman 394582b0f8c3SJan Kara vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); 394682b0f8c3SJan Kara spin_lock(vmf->ptl); 39472994302bSJan Kara entry = vmf->orig_pte; 394882b0f8c3SJan Kara if (unlikely(!pte_same(*vmf->pte, entry))) 39498f4e2101SHugh Dickins goto unlock; 395082b0f8c3SJan Kara if (vmf->flags & FAULT_FLAG_WRITE) { 39511da177e4SLinus Torvalds if (!pte_write(entry)) 39522994302bSJan Kara return do_wp_page(vmf); 39531da177e4SLinus Torvalds entry = pte_mkdirty(entry); 39541da177e4SLinus Torvalds } 39551da177e4SLinus Torvalds entry = pte_mkyoung(entry); 395682b0f8c3SJan Kara if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry, 395782b0f8c3SJan Kara vmf->flags & FAULT_FLAG_WRITE)) { 395882b0f8c3SJan Kara update_mmu_cache(vmf->vma, vmf->address, vmf->pte); 39591a44e149SAndrea Arcangeli } else { 39601a44e149SAndrea Arcangeli /* 39611a44e149SAndrea Arcangeli * This is needed only for protection faults but the arch code 39621a44e149SAndrea Arcangeli * is not yet telling us if this is a protection fault or not. 39631a44e149SAndrea Arcangeli * This still avoids useless tlb flushes for .text page faults 39641a44e149SAndrea Arcangeli * with threads. 39651a44e149SAndrea Arcangeli */ 396682b0f8c3SJan Kara if (vmf->flags & FAULT_FLAG_WRITE) 396782b0f8c3SJan Kara flush_tlb_fix_spurious_fault(vmf->vma, vmf->address); 39681a44e149SAndrea Arcangeli } 39698f4e2101SHugh Dickins unlock: 397082b0f8c3SJan Kara pte_unmap_unlock(vmf->pte, vmf->ptl); 397183c54070SNick Piggin return 0; 39721da177e4SLinus Torvalds } 39731da177e4SLinus Torvalds 39741da177e4SLinus Torvalds /* 39751da177e4SLinus Torvalds * By the time we get here, we already hold the mm semaphore 39769a95f3cfSPaul Cassella * 39779a95f3cfSPaul Cassella * The mmap_sem may have been released depending on flags and our 39789a95f3cfSPaul Cassella * return value. See filemap_fault() and __lock_page_or_retry(). 39791da177e4SLinus Torvalds */ 3980dcddffd4SKirill A. Shutemov static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address, 3981dcddffd4SKirill A. Shutemov unsigned int flags) 39821da177e4SLinus Torvalds { 398382b0f8c3SJan Kara struct vm_fault vmf = { 3984bae473a4SKirill A. Shutemov .vma = vma, 39851a29d85eSJan Kara .address = address & PAGE_MASK, 3986bae473a4SKirill A. Shutemov .flags = flags, 39870721ec8bSJan Kara .pgoff = linear_page_index(vma, address), 3988667240e0SJan Kara .gfp_mask = __get_fault_gfp_mask(vma), 3989bae473a4SKirill A. Shutemov }; 3990fde26bedSAnshuman Khandual unsigned int dirty = flags & FAULT_FLAG_WRITE; 3991dcddffd4SKirill A. Shutemov struct mm_struct *mm = vma->vm_mm; 39921da177e4SLinus Torvalds pgd_t *pgd; 3993c2febafcSKirill A. Shutemov p4d_t *p4d; 3994a2d58167SDave Jiang int ret; 39951da177e4SLinus Torvalds 39961da177e4SLinus Torvalds pgd = pgd_offset(mm, address); 3997c2febafcSKirill A. Shutemov p4d = p4d_alloc(mm, pgd, address); 3998c2febafcSKirill A. Shutemov if (!p4d) 3999c2febafcSKirill A. Shutemov return VM_FAULT_OOM; 4000a00cc7d9SMatthew Wilcox 4001c2febafcSKirill A. Shutemov vmf.pud = pud_alloc(mm, p4d, address); 4002a00cc7d9SMatthew Wilcox if (!vmf.pud) 4003c74df32cSHugh Dickins return VM_FAULT_OOM; 4004a00cc7d9SMatthew Wilcox if (pud_none(*vmf.pud) && transparent_hugepage_enabled(vma)) { 4005a00cc7d9SMatthew Wilcox ret = create_huge_pud(&vmf); 4006a00cc7d9SMatthew Wilcox if (!(ret & VM_FAULT_FALLBACK)) 4007a00cc7d9SMatthew Wilcox return ret; 4008a00cc7d9SMatthew Wilcox } else { 4009a00cc7d9SMatthew Wilcox pud_t orig_pud = *vmf.pud; 4010a00cc7d9SMatthew Wilcox 4011a00cc7d9SMatthew Wilcox barrier(); 4012a00cc7d9SMatthew Wilcox if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) { 4013a00cc7d9SMatthew Wilcox 4014a00cc7d9SMatthew Wilcox /* NUMA case for anonymous PUDs would go here */ 4015a00cc7d9SMatthew Wilcox 4016a00cc7d9SMatthew Wilcox if (dirty && !pud_write(orig_pud)) { 4017a00cc7d9SMatthew Wilcox ret = wp_huge_pud(&vmf, orig_pud); 4018a00cc7d9SMatthew Wilcox if (!(ret & VM_FAULT_FALLBACK)) 4019a00cc7d9SMatthew Wilcox return ret; 4020a00cc7d9SMatthew Wilcox } else { 4021a00cc7d9SMatthew Wilcox huge_pud_set_accessed(&vmf, orig_pud); 4022a00cc7d9SMatthew Wilcox return 0; 4023a00cc7d9SMatthew Wilcox } 4024a00cc7d9SMatthew Wilcox } 4025a00cc7d9SMatthew Wilcox } 4026a00cc7d9SMatthew Wilcox 4027a00cc7d9SMatthew Wilcox vmf.pmd = pmd_alloc(mm, vmf.pud, address); 402882b0f8c3SJan Kara if (!vmf.pmd) 4029c74df32cSHugh Dickins return VM_FAULT_OOM; 403082b0f8c3SJan Kara if (pmd_none(*vmf.pmd) && transparent_hugepage_enabled(vma)) { 4031a2d58167SDave Jiang ret = create_huge_pmd(&vmf); 4032c0292554SKirill A. Shutemov if (!(ret & VM_FAULT_FALLBACK)) 4033c0292554SKirill A. Shutemov return ret; 403471e3aac0SAndrea Arcangeli } else { 403582b0f8c3SJan Kara pmd_t orig_pmd = *vmf.pmd; 40361f1d06c3SDavid Rientjes 403771e3aac0SAndrea Arcangeli barrier(); 403884c3fc4eSZi Yan if (unlikely(is_swap_pmd(orig_pmd))) { 403984c3fc4eSZi Yan VM_BUG_ON(thp_migration_supported() && 404084c3fc4eSZi Yan !is_pmd_migration_entry(orig_pmd)); 404184c3fc4eSZi Yan if (is_pmd_migration_entry(orig_pmd)) 404284c3fc4eSZi Yan pmd_migration_entry_wait(mm, vmf.pmd); 404384c3fc4eSZi Yan return 0; 404484c3fc4eSZi Yan } 40455c7fb56eSDan Williams if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) { 404638e08854SLorenzo Stoakes if (pmd_protnone(orig_pmd) && vma_is_accessible(vma)) 404782b0f8c3SJan Kara return do_huge_pmd_numa_page(&vmf, orig_pmd); 4048d10e63f2SMel Gorman 4049fde26bedSAnshuman Khandual if (dirty && !pmd_write(orig_pmd)) { 405082b0f8c3SJan Kara ret = wp_huge_pmd(&vmf, orig_pmd); 40519845cbbdSKirill A. Shutemov if (!(ret & VM_FAULT_FALLBACK)) 40521f1d06c3SDavid Rientjes return ret; 4053a1dd450bSWill Deacon } else { 405482b0f8c3SJan Kara huge_pmd_set_accessed(&vmf, orig_pmd); 405571e3aac0SAndrea Arcangeli return 0; 405671e3aac0SAndrea Arcangeli } 405771e3aac0SAndrea Arcangeli } 40589845cbbdSKirill A. Shutemov } 405971e3aac0SAndrea Arcangeli 406082b0f8c3SJan Kara return handle_pte_fault(&vmf); 40611da177e4SLinus Torvalds } 40621da177e4SLinus Torvalds 40639a95f3cfSPaul Cassella /* 40649a95f3cfSPaul Cassella * By the time we get here, we already hold the mm semaphore 40659a95f3cfSPaul Cassella * 40669a95f3cfSPaul Cassella * The mmap_sem may have been released depending on flags and our 40679a95f3cfSPaul Cassella * return value. See filemap_fault() and __lock_page_or_retry(). 40689a95f3cfSPaul Cassella */ 4069dcddffd4SKirill A. Shutemov int handle_mm_fault(struct vm_area_struct *vma, unsigned long address, 4070dcddffd4SKirill A. Shutemov unsigned int flags) 4071519e5247SJohannes Weiner { 4072519e5247SJohannes Weiner int ret; 4073519e5247SJohannes Weiner 4074519e5247SJohannes Weiner __set_current_state(TASK_RUNNING); 4075519e5247SJohannes Weiner 4076519e5247SJohannes Weiner count_vm_event(PGFAULT); 40772262185cSRoman Gushchin count_memcg_event_mm(vma->vm_mm, PGFAULT); 4078519e5247SJohannes Weiner 4079519e5247SJohannes Weiner /* do counter updates before entering really critical section. */ 4080519e5247SJohannes Weiner check_sync_rss_stat(current); 4081519e5247SJohannes Weiner 4082de0c799bSLaurent Dufour if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE, 4083de0c799bSLaurent Dufour flags & FAULT_FLAG_INSTRUCTION, 4084de0c799bSLaurent Dufour flags & FAULT_FLAG_REMOTE)) 4085de0c799bSLaurent Dufour return VM_FAULT_SIGSEGV; 4086de0c799bSLaurent Dufour 4087519e5247SJohannes Weiner /* 4088519e5247SJohannes Weiner * Enable the memcg OOM handling for faults triggered in user 4089519e5247SJohannes Weiner * space. Kernel faults are handled more gracefully. 4090519e5247SJohannes Weiner */ 4091519e5247SJohannes Weiner if (flags & FAULT_FLAG_USER) 409249426420SJohannes Weiner mem_cgroup_oom_enable(); 4093519e5247SJohannes Weiner 4094bae473a4SKirill A. Shutemov if (unlikely(is_vm_hugetlb_page(vma))) 4095bae473a4SKirill A. Shutemov ret = hugetlb_fault(vma->vm_mm, vma, address, flags); 4096bae473a4SKirill A. Shutemov else 4097dcddffd4SKirill A. Shutemov ret = __handle_mm_fault(vma, address, flags); 4098519e5247SJohannes Weiner 409949426420SJohannes Weiner if (flags & FAULT_FLAG_USER) { 410049426420SJohannes Weiner mem_cgroup_oom_disable(); 410149426420SJohannes Weiner /* 410249426420SJohannes Weiner * The task may have entered a memcg OOM situation but 410349426420SJohannes Weiner * if the allocation error was handled gracefully (no 410449426420SJohannes Weiner * VM_FAULT_OOM), there is no need to kill anything. 410549426420SJohannes Weiner * Just clean up the OOM state peacefully. 410649426420SJohannes Weiner */ 410749426420SJohannes Weiner if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM)) 410849426420SJohannes Weiner mem_cgroup_oom_synchronize(false); 410949426420SJohannes Weiner } 41103812c8c8SJohannes Weiner 4111519e5247SJohannes Weiner return ret; 4112519e5247SJohannes Weiner } 4113e1d6d01aSJesse Barnes EXPORT_SYMBOL_GPL(handle_mm_fault); 4114519e5247SJohannes Weiner 411590eceff1SKirill A. Shutemov #ifndef __PAGETABLE_P4D_FOLDED 411690eceff1SKirill A. Shutemov /* 411790eceff1SKirill A. Shutemov * Allocate p4d page table. 411890eceff1SKirill A. Shutemov * We've already handled the fast-path in-line. 411990eceff1SKirill A. Shutemov */ 412090eceff1SKirill A. Shutemov int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) 412190eceff1SKirill A. Shutemov { 412290eceff1SKirill A. Shutemov p4d_t *new = p4d_alloc_one(mm, address); 412390eceff1SKirill A. Shutemov if (!new) 412490eceff1SKirill A. Shutemov return -ENOMEM; 412590eceff1SKirill A. Shutemov 412690eceff1SKirill A. Shutemov smp_wmb(); /* See comment in __pte_alloc */ 412790eceff1SKirill A. Shutemov 412890eceff1SKirill A. Shutemov spin_lock(&mm->page_table_lock); 412990eceff1SKirill A. Shutemov if (pgd_present(*pgd)) /* Another has populated it */ 413090eceff1SKirill A. Shutemov p4d_free(mm, new); 413190eceff1SKirill A. Shutemov else 413290eceff1SKirill A. Shutemov pgd_populate(mm, pgd, new); 413390eceff1SKirill A. Shutemov spin_unlock(&mm->page_table_lock); 413490eceff1SKirill A. Shutemov return 0; 413590eceff1SKirill A. Shutemov } 413690eceff1SKirill A. Shutemov #endif /* __PAGETABLE_P4D_FOLDED */ 413790eceff1SKirill A. Shutemov 41381da177e4SLinus Torvalds #ifndef __PAGETABLE_PUD_FOLDED 41391da177e4SLinus Torvalds /* 41401da177e4SLinus Torvalds * Allocate page upper directory. 4141872fec16SHugh Dickins * We've already handled the fast-path in-line. 41421da177e4SLinus Torvalds */ 4143c2febafcSKirill A. Shutemov int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address) 41441da177e4SLinus Torvalds { 4145c74df32cSHugh Dickins pud_t *new = pud_alloc_one(mm, address); 4146c74df32cSHugh Dickins if (!new) 41471bb3630eSHugh Dickins return -ENOMEM; 41481da177e4SLinus Torvalds 4149362a61adSNick Piggin smp_wmb(); /* See comment in __pte_alloc */ 4150362a61adSNick Piggin 4151872fec16SHugh Dickins spin_lock(&mm->page_table_lock); 4152c2febafcSKirill A. Shutemov #ifndef __ARCH_HAS_5LEVEL_HACK 4153b4e98d9aSKirill A. Shutemov if (!p4d_present(*p4d)) { 4154b4e98d9aSKirill A. Shutemov mm_inc_nr_puds(mm); 4155c2febafcSKirill A. Shutemov p4d_populate(mm, p4d, new); 4156b4e98d9aSKirill A. Shutemov } else /* Another has populated it */ 4157c2febafcSKirill A. Shutemov pud_free(mm, new); 4158b4e98d9aSKirill A. Shutemov #else 4159b4e98d9aSKirill A. Shutemov if (!pgd_present(*p4d)) { 4160b4e98d9aSKirill A. Shutemov mm_inc_nr_puds(mm); 4161c2febafcSKirill A. Shutemov pgd_populate(mm, p4d, new); 4162b4e98d9aSKirill A. Shutemov } else /* Another has populated it */ 4163b4e98d9aSKirill A. Shutemov pud_free(mm, new); 4164c2febafcSKirill A. Shutemov #endif /* __ARCH_HAS_5LEVEL_HACK */ 4165872fec16SHugh Dickins spin_unlock(&mm->page_table_lock); 41661bb3630eSHugh Dickins return 0; 41671da177e4SLinus Torvalds } 41681da177e4SLinus Torvalds #endif /* __PAGETABLE_PUD_FOLDED */ 41691da177e4SLinus Torvalds 41701da177e4SLinus Torvalds #ifndef __PAGETABLE_PMD_FOLDED 41711da177e4SLinus Torvalds /* 41721da177e4SLinus Torvalds * Allocate page middle directory. 4173872fec16SHugh Dickins * We've already handled the fast-path in-line. 41741da177e4SLinus Torvalds */ 41751bb3630eSHugh Dickins int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) 41761da177e4SLinus Torvalds { 4177a00cc7d9SMatthew Wilcox spinlock_t *ptl; 4178c74df32cSHugh Dickins pmd_t *new = pmd_alloc_one(mm, address); 4179c74df32cSHugh Dickins if (!new) 41801bb3630eSHugh Dickins return -ENOMEM; 41811da177e4SLinus Torvalds 4182362a61adSNick Piggin smp_wmb(); /* See comment in __pte_alloc */ 4183362a61adSNick Piggin 4184a00cc7d9SMatthew Wilcox ptl = pud_lock(mm, pud); 41851da177e4SLinus Torvalds #ifndef __ARCH_HAS_4LEVEL_HACK 4186dc6c9a35SKirill A. Shutemov if (!pud_present(*pud)) { 4187dc6c9a35SKirill A. Shutemov mm_inc_nr_pmds(mm); 41881da177e4SLinus Torvalds pud_populate(mm, pud, new); 4189dc6c9a35SKirill A. Shutemov } else /* Another has populated it */ 41905e541973SBenjamin Herrenschmidt pmd_free(mm, new); 4191dc6c9a35SKirill A. Shutemov #else 4192dc6c9a35SKirill A. Shutemov if (!pgd_present(*pud)) { 4193dc6c9a35SKirill A. Shutemov mm_inc_nr_pmds(mm); 41941da177e4SLinus Torvalds pgd_populate(mm, pud, new); 4195dc6c9a35SKirill A. Shutemov } else /* Another has populated it */ 4196dc6c9a35SKirill A. Shutemov pmd_free(mm, new); 41971da177e4SLinus Torvalds #endif /* __ARCH_HAS_4LEVEL_HACK */ 4198a00cc7d9SMatthew Wilcox spin_unlock(ptl); 41991bb3630eSHugh Dickins return 0; 42001da177e4SLinus Torvalds } 42011da177e4SLinus Torvalds #endif /* __PAGETABLE_PMD_FOLDED */ 42021da177e4SLinus Torvalds 420309796395SRoss Zwisler static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address, 4204a4d1a885SJérôme Glisse unsigned long *start, unsigned long *end, 420509796395SRoss Zwisler pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp) 4206f8ad0f49SJohannes Weiner { 4207f8ad0f49SJohannes Weiner pgd_t *pgd; 4208c2febafcSKirill A. Shutemov p4d_t *p4d; 4209f8ad0f49SJohannes Weiner pud_t *pud; 4210f8ad0f49SJohannes Weiner pmd_t *pmd; 4211f8ad0f49SJohannes Weiner pte_t *ptep; 4212f8ad0f49SJohannes Weiner 4213f8ad0f49SJohannes Weiner pgd = pgd_offset(mm, address); 4214f8ad0f49SJohannes Weiner if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) 4215f8ad0f49SJohannes Weiner goto out; 4216f8ad0f49SJohannes Weiner 4217c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, address); 4218c2febafcSKirill A. Shutemov if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d))) 4219c2febafcSKirill A. Shutemov goto out; 4220c2febafcSKirill A. Shutemov 4221c2febafcSKirill A. Shutemov pud = pud_offset(p4d, address); 4222f8ad0f49SJohannes Weiner if (pud_none(*pud) || unlikely(pud_bad(*pud))) 4223f8ad0f49SJohannes Weiner goto out; 4224f8ad0f49SJohannes Weiner 4225f8ad0f49SJohannes Weiner pmd = pmd_offset(pud, address); 4226f66055abSAndrea Arcangeli VM_BUG_ON(pmd_trans_huge(*pmd)); 422709796395SRoss Zwisler 422809796395SRoss Zwisler if (pmd_huge(*pmd)) { 422909796395SRoss Zwisler if (!pmdpp) 4230f8ad0f49SJohannes Weiner goto out; 4231f8ad0f49SJohannes Weiner 4232a4d1a885SJérôme Glisse if (start && end) { 4233a4d1a885SJérôme Glisse *start = address & PMD_MASK; 4234a4d1a885SJérôme Glisse *end = *start + PMD_SIZE; 4235a4d1a885SJérôme Glisse mmu_notifier_invalidate_range_start(mm, *start, *end); 4236a4d1a885SJérôme Glisse } 423709796395SRoss Zwisler *ptlp = pmd_lock(mm, pmd); 423809796395SRoss Zwisler if (pmd_huge(*pmd)) { 423909796395SRoss Zwisler *pmdpp = pmd; 424009796395SRoss Zwisler return 0; 424109796395SRoss Zwisler } 424209796395SRoss Zwisler spin_unlock(*ptlp); 4243a4d1a885SJérôme Glisse if (start && end) 4244a4d1a885SJérôme Glisse mmu_notifier_invalidate_range_end(mm, *start, *end); 424509796395SRoss Zwisler } 424609796395SRoss Zwisler 424709796395SRoss Zwisler if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) 4248f8ad0f49SJohannes Weiner goto out; 4249f8ad0f49SJohannes Weiner 4250a4d1a885SJérôme Glisse if (start && end) { 4251a4d1a885SJérôme Glisse *start = address & PAGE_MASK; 4252a4d1a885SJérôme Glisse *end = *start + PAGE_SIZE; 4253a4d1a885SJérôme Glisse mmu_notifier_invalidate_range_start(mm, *start, *end); 4254a4d1a885SJérôme Glisse } 4255f8ad0f49SJohannes Weiner ptep = pte_offset_map_lock(mm, pmd, address, ptlp); 4256f8ad0f49SJohannes Weiner if (!pte_present(*ptep)) 4257f8ad0f49SJohannes Weiner goto unlock; 4258f8ad0f49SJohannes Weiner *ptepp = ptep; 4259f8ad0f49SJohannes Weiner return 0; 4260f8ad0f49SJohannes Weiner unlock: 4261f8ad0f49SJohannes Weiner pte_unmap_unlock(ptep, *ptlp); 4262a4d1a885SJérôme Glisse if (start && end) 4263a4d1a885SJérôme Glisse mmu_notifier_invalidate_range_end(mm, *start, *end); 4264f8ad0f49SJohannes Weiner out: 4265f8ad0f49SJohannes Weiner return -EINVAL; 4266f8ad0f49SJohannes Weiner } 4267f8ad0f49SJohannes Weiner 4268f729c8c9SRoss Zwisler static inline int follow_pte(struct mm_struct *mm, unsigned long address, 4269f729c8c9SRoss Zwisler pte_t **ptepp, spinlock_t **ptlp) 42701b36ba81SNamhyung Kim { 42711b36ba81SNamhyung Kim int res; 42721b36ba81SNamhyung Kim 42731b36ba81SNamhyung Kim /* (void) is needed to make gcc happy */ 42741b36ba81SNamhyung Kim (void) __cond_lock(*ptlp, 4275a4d1a885SJérôme Glisse !(res = __follow_pte_pmd(mm, address, NULL, NULL, 4276a4d1a885SJérôme Glisse ptepp, NULL, ptlp))); 42771b36ba81SNamhyung Kim return res; 42781b36ba81SNamhyung Kim } 42791b36ba81SNamhyung Kim 428009796395SRoss Zwisler int follow_pte_pmd(struct mm_struct *mm, unsigned long address, 4281a4d1a885SJérôme Glisse unsigned long *start, unsigned long *end, 428209796395SRoss Zwisler pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp) 428309796395SRoss Zwisler { 428409796395SRoss Zwisler int res; 428509796395SRoss Zwisler 428609796395SRoss Zwisler /* (void) is needed to make gcc happy */ 428709796395SRoss Zwisler (void) __cond_lock(*ptlp, 4288a4d1a885SJérôme Glisse !(res = __follow_pte_pmd(mm, address, start, end, 4289a4d1a885SJérôme Glisse ptepp, pmdpp, ptlp))); 429009796395SRoss Zwisler return res; 429109796395SRoss Zwisler } 429209796395SRoss Zwisler EXPORT_SYMBOL(follow_pte_pmd); 429309796395SRoss Zwisler 42943b6748e2SJohannes Weiner /** 42953b6748e2SJohannes Weiner * follow_pfn - look up PFN at a user virtual address 42963b6748e2SJohannes Weiner * @vma: memory mapping 42973b6748e2SJohannes Weiner * @address: user virtual address 42983b6748e2SJohannes Weiner * @pfn: location to store found PFN 42993b6748e2SJohannes Weiner * 43003b6748e2SJohannes Weiner * Only IO mappings and raw PFN mappings are allowed. 43013b6748e2SJohannes Weiner * 43023b6748e2SJohannes Weiner * Returns zero and the pfn at @pfn on success, -ve otherwise. 43033b6748e2SJohannes Weiner */ 43043b6748e2SJohannes Weiner int follow_pfn(struct vm_area_struct *vma, unsigned long address, 43053b6748e2SJohannes Weiner unsigned long *pfn) 43063b6748e2SJohannes Weiner { 43073b6748e2SJohannes Weiner int ret = -EINVAL; 43083b6748e2SJohannes Weiner spinlock_t *ptl; 43093b6748e2SJohannes Weiner pte_t *ptep; 43103b6748e2SJohannes Weiner 43113b6748e2SJohannes Weiner if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) 43123b6748e2SJohannes Weiner return ret; 43133b6748e2SJohannes Weiner 43143b6748e2SJohannes Weiner ret = follow_pte(vma->vm_mm, address, &ptep, &ptl); 43153b6748e2SJohannes Weiner if (ret) 43163b6748e2SJohannes Weiner return ret; 43173b6748e2SJohannes Weiner *pfn = pte_pfn(*ptep); 43183b6748e2SJohannes Weiner pte_unmap_unlock(ptep, ptl); 43193b6748e2SJohannes Weiner return 0; 43203b6748e2SJohannes Weiner } 43213b6748e2SJohannes Weiner EXPORT_SYMBOL(follow_pfn); 43223b6748e2SJohannes Weiner 432328b2ee20SRik van Riel #ifdef CONFIG_HAVE_IOREMAP_PROT 4324d87fe660Svenkatesh.pallipadi@intel.com int follow_phys(struct vm_area_struct *vma, 432528b2ee20SRik van Riel unsigned long address, unsigned int flags, 4326d87fe660Svenkatesh.pallipadi@intel.com unsigned long *prot, resource_size_t *phys) 432728b2ee20SRik van Riel { 432803668a4dSJohannes Weiner int ret = -EINVAL; 432928b2ee20SRik van Riel pte_t *ptep, pte; 433028b2ee20SRik van Riel spinlock_t *ptl; 433128b2ee20SRik van Riel 4332d87fe660Svenkatesh.pallipadi@intel.com if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) 4333d87fe660Svenkatesh.pallipadi@intel.com goto out; 433428b2ee20SRik van Riel 433503668a4dSJohannes Weiner if (follow_pte(vma->vm_mm, address, &ptep, &ptl)) 4336d87fe660Svenkatesh.pallipadi@intel.com goto out; 433728b2ee20SRik van Riel pte = *ptep; 433803668a4dSJohannes Weiner 433928b2ee20SRik van Riel if ((flags & FOLL_WRITE) && !pte_write(pte)) 434028b2ee20SRik van Riel goto unlock; 434128b2ee20SRik van Riel 434228b2ee20SRik van Riel *prot = pgprot_val(pte_pgprot(pte)); 434303668a4dSJohannes Weiner *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT; 434428b2ee20SRik van Riel 434503668a4dSJohannes Weiner ret = 0; 434628b2ee20SRik van Riel unlock: 434728b2ee20SRik van Riel pte_unmap_unlock(ptep, ptl); 434828b2ee20SRik van Riel out: 4349d87fe660Svenkatesh.pallipadi@intel.com return ret; 435028b2ee20SRik van Riel } 435128b2ee20SRik van Riel 435228b2ee20SRik van Riel int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, 435328b2ee20SRik van Riel void *buf, int len, int write) 435428b2ee20SRik van Riel { 435528b2ee20SRik van Riel resource_size_t phys_addr; 435628b2ee20SRik van Riel unsigned long prot = 0; 43572bc7273bSKOSAKI Motohiro void __iomem *maddr; 435828b2ee20SRik van Riel int offset = addr & (PAGE_SIZE-1); 435928b2ee20SRik van Riel 4360d87fe660Svenkatesh.pallipadi@intel.com if (follow_phys(vma, addr, write, &prot, &phys_addr)) 436128b2ee20SRik van Riel return -EINVAL; 436228b2ee20SRik van Riel 43639cb12d7bSGrazvydas Ignotas maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot); 436428b2ee20SRik van Riel if (write) 436528b2ee20SRik van Riel memcpy_toio(maddr + offset, buf, len); 436628b2ee20SRik van Riel else 436728b2ee20SRik van Riel memcpy_fromio(buf, maddr + offset, len); 436828b2ee20SRik van Riel iounmap(maddr); 436928b2ee20SRik van Riel 437028b2ee20SRik van Riel return len; 437128b2ee20SRik van Riel } 43725a73633eSUwe Kleine-König EXPORT_SYMBOL_GPL(generic_access_phys); 437328b2ee20SRik van Riel #endif 437428b2ee20SRik van Riel 43750ec76a11SDavid Howells /* 4376206cb636SStephen Wilson * Access another process' address space as given in mm. If non-NULL, use the 4377206cb636SStephen Wilson * given task for page fault accounting. 43780ec76a11SDavid Howells */ 437984d77d3fSEric W. Biederman int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, 4380442486ecSLorenzo Stoakes unsigned long addr, void *buf, int len, unsigned int gup_flags) 43810ec76a11SDavid Howells { 43820ec76a11SDavid Howells struct vm_area_struct *vma; 43830ec76a11SDavid Howells void *old_buf = buf; 4384442486ecSLorenzo Stoakes int write = gup_flags & FOLL_WRITE; 43850ec76a11SDavid Howells 43860ec76a11SDavid Howells down_read(&mm->mmap_sem); 4387183ff22bSSimon Arlott /* ignore errors, just check how much was successfully transferred */ 43880ec76a11SDavid Howells while (len) { 43890ec76a11SDavid Howells int bytes, ret, offset; 43900ec76a11SDavid Howells void *maddr; 439128b2ee20SRik van Riel struct page *page = NULL; 43920ec76a11SDavid Howells 43931e987790SDave Hansen ret = get_user_pages_remote(tsk, mm, addr, 1, 43945b56d49fSLorenzo Stoakes gup_flags, &page, &vma, NULL); 439528b2ee20SRik van Riel if (ret <= 0) { 4396dbffcd03SRik van Riel #ifndef CONFIG_HAVE_IOREMAP_PROT 4397dbffcd03SRik van Riel break; 4398dbffcd03SRik van Riel #else 439928b2ee20SRik van Riel /* 440028b2ee20SRik van Riel * Check if this is a VM_IO | VM_PFNMAP VMA, which 440128b2ee20SRik van Riel * we can access using slightly different code. 440228b2ee20SRik van Riel */ 440328b2ee20SRik van Riel vma = find_vma(mm, addr); 4404fe936dfcSMichael Ellerman if (!vma || vma->vm_start > addr) 44050ec76a11SDavid Howells break; 440628b2ee20SRik van Riel if (vma->vm_ops && vma->vm_ops->access) 440728b2ee20SRik van Riel ret = vma->vm_ops->access(vma, addr, buf, 440828b2ee20SRik van Riel len, write); 440928b2ee20SRik van Riel if (ret <= 0) 441028b2ee20SRik van Riel break; 441128b2ee20SRik van Riel bytes = ret; 4412dbffcd03SRik van Riel #endif 441328b2ee20SRik van Riel } else { 44140ec76a11SDavid Howells bytes = len; 44150ec76a11SDavid Howells offset = addr & (PAGE_SIZE-1); 44160ec76a11SDavid Howells if (bytes > PAGE_SIZE-offset) 44170ec76a11SDavid Howells bytes = PAGE_SIZE-offset; 44180ec76a11SDavid Howells 44190ec76a11SDavid Howells maddr = kmap(page); 44200ec76a11SDavid Howells if (write) { 44210ec76a11SDavid Howells copy_to_user_page(vma, page, addr, 44220ec76a11SDavid Howells maddr + offset, buf, bytes); 44230ec76a11SDavid Howells set_page_dirty_lock(page); 44240ec76a11SDavid Howells } else { 44250ec76a11SDavid Howells copy_from_user_page(vma, page, addr, 44260ec76a11SDavid Howells buf, maddr + offset, bytes); 44270ec76a11SDavid Howells } 44280ec76a11SDavid Howells kunmap(page); 442909cbfeafSKirill A. Shutemov put_page(page); 443028b2ee20SRik van Riel } 44310ec76a11SDavid Howells len -= bytes; 44320ec76a11SDavid Howells buf += bytes; 44330ec76a11SDavid Howells addr += bytes; 44340ec76a11SDavid Howells } 44350ec76a11SDavid Howells up_read(&mm->mmap_sem); 44360ec76a11SDavid Howells 44370ec76a11SDavid Howells return buf - old_buf; 44380ec76a11SDavid Howells } 443903252919SAndi Kleen 44405ddd36b9SStephen Wilson /** 4441ae91dbfcSRandy Dunlap * access_remote_vm - access another process' address space 44425ddd36b9SStephen Wilson * @mm: the mm_struct of the target address space 44435ddd36b9SStephen Wilson * @addr: start address to access 44445ddd36b9SStephen Wilson * @buf: source or destination buffer 44455ddd36b9SStephen Wilson * @len: number of bytes to transfer 44466347e8d5SLorenzo Stoakes * @gup_flags: flags modifying lookup behaviour 44475ddd36b9SStephen Wilson * 44485ddd36b9SStephen Wilson * The caller must hold a reference on @mm. 44495ddd36b9SStephen Wilson */ 44505ddd36b9SStephen Wilson int access_remote_vm(struct mm_struct *mm, unsigned long addr, 44516347e8d5SLorenzo Stoakes void *buf, int len, unsigned int gup_flags) 44525ddd36b9SStephen Wilson { 44536347e8d5SLorenzo Stoakes return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags); 44545ddd36b9SStephen Wilson } 44555ddd36b9SStephen Wilson 445603252919SAndi Kleen /* 4457206cb636SStephen Wilson * Access another process' address space. 4458206cb636SStephen Wilson * Source/target buffer must be kernel space, 4459206cb636SStephen Wilson * Do not walk the page table directly, use get_user_pages 4460206cb636SStephen Wilson */ 4461206cb636SStephen Wilson int access_process_vm(struct task_struct *tsk, unsigned long addr, 4462f307ab6dSLorenzo Stoakes void *buf, int len, unsigned int gup_flags) 4463206cb636SStephen Wilson { 4464206cb636SStephen Wilson struct mm_struct *mm; 4465206cb636SStephen Wilson int ret; 4466206cb636SStephen Wilson 4467206cb636SStephen Wilson mm = get_task_mm(tsk); 4468206cb636SStephen Wilson if (!mm) 4469206cb636SStephen Wilson return 0; 4470206cb636SStephen Wilson 4471f307ab6dSLorenzo Stoakes ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags); 4472442486ecSLorenzo Stoakes 4473206cb636SStephen Wilson mmput(mm); 4474206cb636SStephen Wilson 4475206cb636SStephen Wilson return ret; 4476206cb636SStephen Wilson } 4477fcd35857SCatalin Marinas EXPORT_SYMBOL_GPL(access_process_vm); 4478206cb636SStephen Wilson 447903252919SAndi Kleen /* 448003252919SAndi Kleen * Print the name of a VMA. 448103252919SAndi Kleen */ 448203252919SAndi Kleen void print_vma_addr(char *prefix, unsigned long ip) 448303252919SAndi Kleen { 448403252919SAndi Kleen struct mm_struct *mm = current->mm; 448503252919SAndi Kleen struct vm_area_struct *vma; 448603252919SAndi Kleen 4487e8bff74aSIngo Molnar /* 4488*0a7f682dSMichal Hocko * we might be running from an atomic context so we cannot sleep 4489e8bff74aSIngo Molnar */ 4490*0a7f682dSMichal Hocko if (!down_read_trylock(&mm->mmap_sem)) 4491e8bff74aSIngo Molnar return; 4492e8bff74aSIngo Molnar 449303252919SAndi Kleen vma = find_vma(mm, ip); 449403252919SAndi Kleen if (vma && vma->vm_file) { 449503252919SAndi Kleen struct file *f = vma->vm_file; 4496*0a7f682dSMichal Hocko char *buf = (char *)__get_free_page(GFP_NOWAIT); 449703252919SAndi Kleen if (buf) { 44982fbc57c5SAndy Shevchenko char *p; 449903252919SAndi Kleen 45009bf39ab2SMiklos Szeredi p = file_path(f, buf, PAGE_SIZE); 450103252919SAndi Kleen if (IS_ERR(p)) 450203252919SAndi Kleen p = "?"; 45032fbc57c5SAndy Shevchenko printk("%s%s[%lx+%lx]", prefix, kbasename(p), 450403252919SAndi Kleen vma->vm_start, 450503252919SAndi Kleen vma->vm_end - vma->vm_start); 450603252919SAndi Kleen free_page((unsigned long)buf); 450703252919SAndi Kleen } 450803252919SAndi Kleen } 450951a07e50SJeff Liu up_read(&mm->mmap_sem); 451003252919SAndi Kleen } 45113ee1afa3SNick Piggin 4512662bbcb2SMichael S. Tsirkin #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP) 45139ec23531SDavid Hildenbrand void __might_fault(const char *file, int line) 45143ee1afa3SNick Piggin { 451595156f00SPeter Zijlstra /* 451695156f00SPeter Zijlstra * Some code (nfs/sunrpc) uses socket ops on kernel memory while 451795156f00SPeter Zijlstra * holding the mmap_sem, this is safe because kernel memory doesn't 451895156f00SPeter Zijlstra * get paged out, therefore we'll never actually fault, and the 451995156f00SPeter Zijlstra * below annotations will generate false positives. 452095156f00SPeter Zijlstra */ 4521db68ce10SAl Viro if (uaccess_kernel()) 452295156f00SPeter Zijlstra return; 45239ec23531SDavid Hildenbrand if (pagefault_disabled()) 4524662bbcb2SMichael S. Tsirkin return; 45259ec23531SDavid Hildenbrand __might_sleep(file, line, 0); 45269ec23531SDavid Hildenbrand #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) 4527662bbcb2SMichael S. Tsirkin if (current->mm) 45283ee1afa3SNick Piggin might_lock_read(¤t->mm->mmap_sem); 45299ec23531SDavid Hildenbrand #endif 45303ee1afa3SNick Piggin } 45319ec23531SDavid Hildenbrand EXPORT_SYMBOL(__might_fault); 45323ee1afa3SNick Piggin #endif 453347ad8475SAndrea Arcangeli 453447ad8475SAndrea Arcangeli #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) 453547ad8475SAndrea Arcangeli static void clear_gigantic_page(struct page *page, 453647ad8475SAndrea Arcangeli unsigned long addr, 453747ad8475SAndrea Arcangeli unsigned int pages_per_huge_page) 453847ad8475SAndrea Arcangeli { 453947ad8475SAndrea Arcangeli int i; 454047ad8475SAndrea Arcangeli struct page *p = page; 454147ad8475SAndrea Arcangeli 454247ad8475SAndrea Arcangeli might_sleep(); 454347ad8475SAndrea Arcangeli for (i = 0; i < pages_per_huge_page; 454447ad8475SAndrea Arcangeli i++, p = mem_map_next(p, page, i)) { 454547ad8475SAndrea Arcangeli cond_resched(); 454647ad8475SAndrea Arcangeli clear_user_highpage(p, addr + i * PAGE_SIZE); 454747ad8475SAndrea Arcangeli } 454847ad8475SAndrea Arcangeli } 454947ad8475SAndrea Arcangeli void clear_huge_page(struct page *page, 4550c79b57e4SHuang Ying unsigned long addr_hint, unsigned int pages_per_huge_page) 455147ad8475SAndrea Arcangeli { 4552c79b57e4SHuang Ying int i, n, base, l; 4553c79b57e4SHuang Ying unsigned long addr = addr_hint & 4554c79b57e4SHuang Ying ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1); 455547ad8475SAndrea Arcangeli 455647ad8475SAndrea Arcangeli if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) { 455747ad8475SAndrea Arcangeli clear_gigantic_page(page, addr, pages_per_huge_page); 455847ad8475SAndrea Arcangeli return; 455947ad8475SAndrea Arcangeli } 456047ad8475SAndrea Arcangeli 4561c79b57e4SHuang Ying /* Clear sub-page to access last to keep its cache lines hot */ 456247ad8475SAndrea Arcangeli might_sleep(); 4563c79b57e4SHuang Ying n = (addr_hint - addr) / PAGE_SIZE; 4564c79b57e4SHuang Ying if (2 * n <= pages_per_huge_page) { 4565c79b57e4SHuang Ying /* If sub-page to access in first half of huge page */ 4566c79b57e4SHuang Ying base = 0; 4567c79b57e4SHuang Ying l = n; 4568c79b57e4SHuang Ying /* Clear sub-pages at the end of huge page */ 4569c79b57e4SHuang Ying for (i = pages_per_huge_page - 1; i >= 2 * n; i--) { 457047ad8475SAndrea Arcangeli cond_resched(); 457147ad8475SAndrea Arcangeli clear_user_highpage(page + i, addr + i * PAGE_SIZE); 457247ad8475SAndrea Arcangeli } 4573c79b57e4SHuang Ying } else { 4574c79b57e4SHuang Ying /* If sub-page to access in second half of huge page */ 4575c79b57e4SHuang Ying base = pages_per_huge_page - 2 * (pages_per_huge_page - n); 4576c79b57e4SHuang Ying l = pages_per_huge_page - n; 4577c79b57e4SHuang Ying /* Clear sub-pages at the begin of huge page */ 4578c79b57e4SHuang Ying for (i = 0; i < base; i++) { 4579c79b57e4SHuang Ying cond_resched(); 4580c79b57e4SHuang Ying clear_user_highpage(page + i, addr + i * PAGE_SIZE); 4581c79b57e4SHuang Ying } 4582c79b57e4SHuang Ying } 4583c79b57e4SHuang Ying /* 4584c79b57e4SHuang Ying * Clear remaining sub-pages in left-right-left-right pattern 4585c79b57e4SHuang Ying * towards the sub-page to access 4586c79b57e4SHuang Ying */ 4587c79b57e4SHuang Ying for (i = 0; i < l; i++) { 4588c79b57e4SHuang Ying int left_idx = base + i; 4589c79b57e4SHuang Ying int right_idx = base + 2 * l - 1 - i; 4590c79b57e4SHuang Ying 4591c79b57e4SHuang Ying cond_resched(); 4592c79b57e4SHuang Ying clear_user_highpage(page + left_idx, 4593c79b57e4SHuang Ying addr + left_idx * PAGE_SIZE); 4594c79b57e4SHuang Ying cond_resched(); 4595c79b57e4SHuang Ying clear_user_highpage(page + right_idx, 4596c79b57e4SHuang Ying addr + right_idx * PAGE_SIZE); 4597c79b57e4SHuang Ying } 459847ad8475SAndrea Arcangeli } 459947ad8475SAndrea Arcangeli 460047ad8475SAndrea Arcangeli static void copy_user_gigantic_page(struct page *dst, struct page *src, 460147ad8475SAndrea Arcangeli unsigned long addr, 460247ad8475SAndrea Arcangeli struct vm_area_struct *vma, 460347ad8475SAndrea Arcangeli unsigned int pages_per_huge_page) 460447ad8475SAndrea Arcangeli { 460547ad8475SAndrea Arcangeli int i; 460647ad8475SAndrea Arcangeli struct page *dst_base = dst; 460747ad8475SAndrea Arcangeli struct page *src_base = src; 460847ad8475SAndrea Arcangeli 460947ad8475SAndrea Arcangeli for (i = 0; i < pages_per_huge_page; ) { 461047ad8475SAndrea Arcangeli cond_resched(); 461147ad8475SAndrea Arcangeli copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma); 461247ad8475SAndrea Arcangeli 461347ad8475SAndrea Arcangeli i++; 461447ad8475SAndrea Arcangeli dst = mem_map_next(dst, dst_base, i); 461547ad8475SAndrea Arcangeli src = mem_map_next(src, src_base, i); 461647ad8475SAndrea Arcangeli } 461747ad8475SAndrea Arcangeli } 461847ad8475SAndrea Arcangeli 461947ad8475SAndrea Arcangeli void copy_user_huge_page(struct page *dst, struct page *src, 462047ad8475SAndrea Arcangeli unsigned long addr, struct vm_area_struct *vma, 462147ad8475SAndrea Arcangeli unsigned int pages_per_huge_page) 462247ad8475SAndrea Arcangeli { 462347ad8475SAndrea Arcangeli int i; 462447ad8475SAndrea Arcangeli 462547ad8475SAndrea Arcangeli if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) { 462647ad8475SAndrea Arcangeli copy_user_gigantic_page(dst, src, addr, vma, 462747ad8475SAndrea Arcangeli pages_per_huge_page); 462847ad8475SAndrea Arcangeli return; 462947ad8475SAndrea Arcangeli } 463047ad8475SAndrea Arcangeli 463147ad8475SAndrea Arcangeli might_sleep(); 463247ad8475SAndrea Arcangeli for (i = 0; i < pages_per_huge_page; i++) { 463347ad8475SAndrea Arcangeli cond_resched(); 463447ad8475SAndrea Arcangeli copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma); 463547ad8475SAndrea Arcangeli } 463647ad8475SAndrea Arcangeli } 4637fa4d75c1SMike Kravetz 4638fa4d75c1SMike Kravetz long copy_huge_page_from_user(struct page *dst_page, 4639fa4d75c1SMike Kravetz const void __user *usr_src, 4640810a56b9SMike Kravetz unsigned int pages_per_huge_page, 4641810a56b9SMike Kravetz bool allow_pagefault) 4642fa4d75c1SMike Kravetz { 4643fa4d75c1SMike Kravetz void *src = (void *)usr_src; 4644fa4d75c1SMike Kravetz void *page_kaddr; 4645fa4d75c1SMike Kravetz unsigned long i, rc = 0; 4646fa4d75c1SMike Kravetz unsigned long ret_val = pages_per_huge_page * PAGE_SIZE; 4647fa4d75c1SMike Kravetz 4648fa4d75c1SMike Kravetz for (i = 0; i < pages_per_huge_page; i++) { 4649810a56b9SMike Kravetz if (allow_pagefault) 4650810a56b9SMike Kravetz page_kaddr = kmap(dst_page + i); 4651810a56b9SMike Kravetz else 4652fa4d75c1SMike Kravetz page_kaddr = kmap_atomic(dst_page + i); 4653fa4d75c1SMike Kravetz rc = copy_from_user(page_kaddr, 4654fa4d75c1SMike Kravetz (const void __user *)(src + i * PAGE_SIZE), 4655fa4d75c1SMike Kravetz PAGE_SIZE); 4656810a56b9SMike Kravetz if (allow_pagefault) 4657810a56b9SMike Kravetz kunmap(dst_page + i); 4658810a56b9SMike Kravetz else 4659fa4d75c1SMike Kravetz kunmap_atomic(page_kaddr); 4660fa4d75c1SMike Kravetz 4661fa4d75c1SMike Kravetz ret_val -= (PAGE_SIZE - rc); 4662fa4d75c1SMike Kravetz if (rc) 4663fa4d75c1SMike Kravetz break; 4664fa4d75c1SMike Kravetz 4665fa4d75c1SMike Kravetz cond_resched(); 4666fa4d75c1SMike Kravetz } 4667fa4d75c1SMike Kravetz return ret_val; 4668fa4d75c1SMike Kravetz } 466947ad8475SAndrea Arcangeli #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ 467049076ec2SKirill A. Shutemov 467140b64acdSOlof Johansson #if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS 4672b35f1819SKirill A. Shutemov 4673b35f1819SKirill A. Shutemov static struct kmem_cache *page_ptl_cachep; 4674b35f1819SKirill A. Shutemov 4675b35f1819SKirill A. Shutemov void __init ptlock_cache_init(void) 4676b35f1819SKirill A. Shutemov { 4677b35f1819SKirill A. Shutemov page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0, 4678b35f1819SKirill A. Shutemov SLAB_PANIC, NULL); 4679b35f1819SKirill A. Shutemov } 4680b35f1819SKirill A. Shutemov 4681539edb58SPeter Zijlstra bool ptlock_alloc(struct page *page) 468249076ec2SKirill A. Shutemov { 468349076ec2SKirill A. Shutemov spinlock_t *ptl; 468449076ec2SKirill A. Shutemov 4685b35f1819SKirill A. Shutemov ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL); 468649076ec2SKirill A. Shutemov if (!ptl) 468749076ec2SKirill A. Shutemov return false; 4688539edb58SPeter Zijlstra page->ptl = ptl; 468949076ec2SKirill A. Shutemov return true; 469049076ec2SKirill A. Shutemov } 469149076ec2SKirill A. Shutemov 4692539edb58SPeter Zijlstra void ptlock_free(struct page *page) 469349076ec2SKirill A. Shutemov { 4694b35f1819SKirill A. Shutemov kmem_cache_free(page_ptl_cachep, page->ptl); 469549076ec2SKirill A. Shutemov } 469649076ec2SKirill A. Shutemov #endif 4697