11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/mm/memory.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 51da177e4SLinus Torvalds */ 61da177e4SLinus Torvalds 71da177e4SLinus Torvalds /* 81da177e4SLinus Torvalds * demand-loading started 01.12.91 - seems it is high on the list of 91da177e4SLinus Torvalds * things wanted, and it should be easy to implement. - Linus 101da177e4SLinus Torvalds */ 111da177e4SLinus Torvalds 121da177e4SLinus Torvalds /* 131da177e4SLinus Torvalds * Ok, demand-loading was easy, shared pages a little bit tricker. Shared 141da177e4SLinus Torvalds * pages started 02.12.91, seems to work. - Linus. 151da177e4SLinus Torvalds * 161da177e4SLinus Torvalds * Tested sharing by executing about 30 /bin/sh: under the old kernel it 171da177e4SLinus Torvalds * would have taken more than the 6M I have free, but it worked well as 181da177e4SLinus Torvalds * far as I could see. 191da177e4SLinus Torvalds * 201da177e4SLinus Torvalds * Also corrected some "invalidate()"s - I wasn't doing enough of them. 211da177e4SLinus Torvalds */ 221da177e4SLinus Torvalds 231da177e4SLinus Torvalds /* 241da177e4SLinus Torvalds * Real VM (paging to/from disk) started 18.12.91. Much more work and 251da177e4SLinus Torvalds * thought has to go into this. Oh, well.. 261da177e4SLinus Torvalds * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why. 271da177e4SLinus Torvalds * Found it. Everything seems to work now. 281da177e4SLinus Torvalds * 20.12.91 - Ok, making the swap-device changeable like the root. 291da177e4SLinus Torvalds */ 301da177e4SLinus Torvalds 311da177e4SLinus Torvalds /* 321da177e4SLinus Torvalds * 05.04.94 - Multi-page memory management added for v1.1. 331da177e4SLinus Torvalds * Idea by Alex Bligh (alex@cconcepts.co.uk) 341da177e4SLinus Torvalds * 351da177e4SLinus Torvalds * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG 361da177e4SLinus Torvalds * (Gerhard.Wichert@pdb.siemens.de) 371da177e4SLinus Torvalds * 381da177e4SLinus Torvalds * Aug/Sep 2004 Changed to four level page tables (Andi Kleen) 391da177e4SLinus Torvalds */ 401da177e4SLinus Torvalds 411da177e4SLinus Torvalds #include <linux/kernel_stat.h> 421da177e4SLinus Torvalds #include <linux/mm.h> 436e84f315SIngo Molnar #include <linux/sched/mm.h> 44f7ccbae4SIngo Molnar #include <linux/sched/coredump.h> 456a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h> 4629930025SIngo Molnar #include <linux/sched/task.h> 471da177e4SLinus Torvalds #include <linux/hugetlb.h> 481da177e4SLinus Torvalds #include <linux/mman.h> 491da177e4SLinus Torvalds #include <linux/swap.h> 501da177e4SLinus Torvalds #include <linux/highmem.h> 511da177e4SLinus Torvalds #include <linux/pagemap.h> 525042db43SJérôme Glisse #include <linux/memremap.h> 539a840895SHugh Dickins #include <linux/ksm.h> 541da177e4SLinus Torvalds #include <linux/rmap.h> 55b95f1b31SPaul Gortmaker #include <linux/export.h> 560ff92245SShailabh Nagar #include <linux/delayacct.h> 571da177e4SLinus Torvalds #include <linux/init.h> 5801c8f1c4SDan Williams #include <linux/pfn_t.h> 59edc79b2aSPeter Zijlstra #include <linux/writeback.h> 608a9f3ccdSBalbir Singh #include <linux/memcontrol.h> 61cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h> 623dc14741SHugh Dickins #include <linux/swapops.h> 633dc14741SHugh Dickins #include <linux/elf.h> 645a0e3ad6STejun Heo #include <linux/gfp.h> 654daae3b4SMel Gorman #include <linux/migrate.h> 662fbc57c5SAndy Shevchenko #include <linux/string.h> 670abdd7a8SDan Williams #include <linux/dma-debug.h> 681592eef0SKirill A. Shutemov #include <linux/debugfs.h> 696b251fc9SAndrea Arcangeli #include <linux/userfaultfd_k.h> 70bc2466e4SJan Kara #include <linux/dax.h> 716b31d595SMichal Hocko #include <linux/oom.h> 721da177e4SLinus Torvalds 736952b61dSAlexey Dobriyan #include <asm/io.h> 7433a709b2SDave Hansen #include <asm/mmu_context.h> 751da177e4SLinus Torvalds #include <asm/pgalloc.h> 767c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 771da177e4SLinus Torvalds #include <asm/tlb.h> 781da177e4SLinus Torvalds #include <asm/tlbflush.h> 791da177e4SLinus Torvalds #include <asm/pgtable.h> 801da177e4SLinus Torvalds 8142b77728SJan Beulich #include "internal.h" 8242b77728SJan Beulich 83af27d940SArnd Bergmann #if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST) 8490572890SPeter Zijlstra #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid. 8575980e97SPeter Zijlstra #endif 8675980e97SPeter Zijlstra 87d41dee36SAndy Whitcroft #ifndef CONFIG_NEED_MULTIPLE_NODES 881da177e4SLinus Torvalds /* use the per-pgdat data instead for discontigmem - mbligh */ 891da177e4SLinus Torvalds unsigned long max_mapnr; 901da177e4SLinus Torvalds EXPORT_SYMBOL(max_mapnr); 91166f61b9STobin C Harding 92166f61b9STobin C Harding struct page *mem_map; 931da177e4SLinus Torvalds EXPORT_SYMBOL(mem_map); 941da177e4SLinus Torvalds #endif 951da177e4SLinus Torvalds 961da177e4SLinus Torvalds /* 971da177e4SLinus Torvalds * A number of key systems in x86 including ioremap() rely on the assumption 981da177e4SLinus Torvalds * that high_memory defines the upper bound on direct map memory, then end 991da177e4SLinus Torvalds * of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and 1001da177e4SLinus Torvalds * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL 1011da177e4SLinus Torvalds * and ZONE_HIGHMEM. 1021da177e4SLinus Torvalds */ 1031da177e4SLinus Torvalds void *high_memory; 1041da177e4SLinus Torvalds EXPORT_SYMBOL(high_memory); 1051da177e4SLinus Torvalds 10632a93233SIngo Molnar /* 10732a93233SIngo Molnar * Randomize the address space (stacks, mmaps, brk, etc.). 10832a93233SIngo Molnar * 10932a93233SIngo Molnar * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization, 11032a93233SIngo Molnar * as ancient (libc5 based) binaries can segfault. ) 11132a93233SIngo Molnar */ 11232a93233SIngo Molnar int randomize_va_space __read_mostly = 11332a93233SIngo Molnar #ifdef CONFIG_COMPAT_BRK 11432a93233SIngo Molnar 1; 11532a93233SIngo Molnar #else 11632a93233SIngo Molnar 2; 11732a93233SIngo Molnar #endif 118a62eaf15SAndi Kleen 119a62eaf15SAndi Kleen static int __init disable_randmaps(char *s) 120a62eaf15SAndi Kleen { 121a62eaf15SAndi Kleen randomize_va_space = 0; 1229b41046cSOGAWA Hirofumi return 1; 123a62eaf15SAndi Kleen } 124a62eaf15SAndi Kleen __setup("norandmaps", disable_randmaps); 125a62eaf15SAndi Kleen 12662eede62SHugh Dickins unsigned long zero_pfn __read_mostly; 1270b70068eSArd Biesheuvel EXPORT_SYMBOL(zero_pfn); 1280b70068eSArd Biesheuvel 129166f61b9STobin C Harding unsigned long highest_memmap_pfn __read_mostly; 130166f61b9STobin C Harding 131a13ea5b7SHugh Dickins /* 132a13ea5b7SHugh Dickins * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init() 133a13ea5b7SHugh Dickins */ 134a13ea5b7SHugh Dickins static int __init init_zero_pfn(void) 135a13ea5b7SHugh Dickins { 136a13ea5b7SHugh Dickins zero_pfn = page_to_pfn(ZERO_PAGE(0)); 137a13ea5b7SHugh Dickins return 0; 138a13ea5b7SHugh Dickins } 139a13ea5b7SHugh Dickins core_initcall(init_zero_pfn); 140a62eaf15SAndi Kleen 141d559db08SKAMEZAWA Hiroyuki 14234e55232SKAMEZAWA Hiroyuki #if defined(SPLIT_RSS_COUNTING) 14334e55232SKAMEZAWA Hiroyuki 144ea48cf78SDavid Rientjes void sync_mm_rss(struct mm_struct *mm) 14534e55232SKAMEZAWA Hiroyuki { 14634e55232SKAMEZAWA Hiroyuki int i; 14734e55232SKAMEZAWA Hiroyuki 14834e55232SKAMEZAWA Hiroyuki for (i = 0; i < NR_MM_COUNTERS; i++) { 14905af2e10SDavid Rientjes if (current->rss_stat.count[i]) { 15005af2e10SDavid Rientjes add_mm_counter(mm, i, current->rss_stat.count[i]); 15105af2e10SDavid Rientjes current->rss_stat.count[i] = 0; 15234e55232SKAMEZAWA Hiroyuki } 15334e55232SKAMEZAWA Hiroyuki } 15405af2e10SDavid Rientjes current->rss_stat.events = 0; 15534e55232SKAMEZAWA Hiroyuki } 15634e55232SKAMEZAWA Hiroyuki 15734e55232SKAMEZAWA Hiroyuki static void add_mm_counter_fast(struct mm_struct *mm, int member, int val) 15834e55232SKAMEZAWA Hiroyuki { 15934e55232SKAMEZAWA Hiroyuki struct task_struct *task = current; 16034e55232SKAMEZAWA Hiroyuki 16134e55232SKAMEZAWA Hiroyuki if (likely(task->mm == mm)) 16234e55232SKAMEZAWA Hiroyuki task->rss_stat.count[member] += val; 16334e55232SKAMEZAWA Hiroyuki else 16434e55232SKAMEZAWA Hiroyuki add_mm_counter(mm, member, val); 16534e55232SKAMEZAWA Hiroyuki } 16634e55232SKAMEZAWA Hiroyuki #define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1) 16734e55232SKAMEZAWA Hiroyuki #define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1) 16834e55232SKAMEZAWA Hiroyuki 16934e55232SKAMEZAWA Hiroyuki /* sync counter once per 64 page faults */ 17034e55232SKAMEZAWA Hiroyuki #define TASK_RSS_EVENTS_THRESH (64) 17134e55232SKAMEZAWA Hiroyuki static void check_sync_rss_stat(struct task_struct *task) 17234e55232SKAMEZAWA Hiroyuki { 17334e55232SKAMEZAWA Hiroyuki if (unlikely(task != current)) 17434e55232SKAMEZAWA Hiroyuki return; 17534e55232SKAMEZAWA Hiroyuki if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH)) 176ea48cf78SDavid Rientjes sync_mm_rss(task->mm); 17734e55232SKAMEZAWA Hiroyuki } 1789547d01bSPeter Zijlstra #else /* SPLIT_RSS_COUNTING */ 17934e55232SKAMEZAWA Hiroyuki 18034e55232SKAMEZAWA Hiroyuki #define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member) 18134e55232SKAMEZAWA Hiroyuki #define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member) 18234e55232SKAMEZAWA Hiroyuki 18334e55232SKAMEZAWA Hiroyuki static void check_sync_rss_stat(struct task_struct *task) 18434e55232SKAMEZAWA Hiroyuki { 18534e55232SKAMEZAWA Hiroyuki } 18634e55232SKAMEZAWA Hiroyuki 1879547d01bSPeter Zijlstra #endif /* SPLIT_RSS_COUNTING */ 1889547d01bSPeter Zijlstra 1899547d01bSPeter Zijlstra #ifdef HAVE_GENERIC_MMU_GATHER 1909547d01bSPeter Zijlstra 191ca1d6c7dSNicholas Krause static bool tlb_next_batch(struct mmu_gather *tlb) 1929547d01bSPeter Zijlstra { 1939547d01bSPeter Zijlstra struct mmu_gather_batch *batch; 1949547d01bSPeter Zijlstra 1959547d01bSPeter Zijlstra batch = tlb->active; 1969547d01bSPeter Zijlstra if (batch->next) { 1979547d01bSPeter Zijlstra tlb->active = batch->next; 198ca1d6c7dSNicholas Krause return true; 1999547d01bSPeter Zijlstra } 2009547d01bSPeter Zijlstra 20153a59fc6SMichal Hocko if (tlb->batch_count == MAX_GATHER_BATCH_COUNT) 202ca1d6c7dSNicholas Krause return false; 20353a59fc6SMichal Hocko 2049547d01bSPeter Zijlstra batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); 2059547d01bSPeter Zijlstra if (!batch) 206ca1d6c7dSNicholas Krause return false; 2079547d01bSPeter Zijlstra 20853a59fc6SMichal Hocko tlb->batch_count++; 2099547d01bSPeter Zijlstra batch->next = NULL; 2109547d01bSPeter Zijlstra batch->nr = 0; 2119547d01bSPeter Zijlstra batch->max = MAX_GATHER_BATCH; 2129547d01bSPeter Zijlstra 2139547d01bSPeter Zijlstra tlb->active->next = batch; 2149547d01bSPeter Zijlstra tlb->active = batch; 2159547d01bSPeter Zijlstra 216ca1d6c7dSNicholas Krause return true; 2179547d01bSPeter Zijlstra } 2189547d01bSPeter Zijlstra 21956236a59SMinchan Kim void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, 22056236a59SMinchan Kim unsigned long start, unsigned long end) 2219547d01bSPeter Zijlstra { 2229547d01bSPeter Zijlstra tlb->mm = mm; 2239547d01bSPeter Zijlstra 2242b047252SLinus Torvalds /* Is it from 0 to ~0? */ 2252b047252SLinus Torvalds tlb->fullmm = !(start | (end+1)); 2261de14c3cSDave Hansen tlb->need_flush_all = 0; 2279547d01bSPeter Zijlstra tlb->local.next = NULL; 2289547d01bSPeter Zijlstra tlb->local.nr = 0; 2299547d01bSPeter Zijlstra tlb->local.max = ARRAY_SIZE(tlb->__pages); 2309547d01bSPeter Zijlstra tlb->active = &tlb->local; 23153a59fc6SMichal Hocko tlb->batch_count = 0; 2329547d01bSPeter Zijlstra 2339547d01bSPeter Zijlstra #ifdef CONFIG_HAVE_RCU_TABLE_FREE 2349547d01bSPeter Zijlstra tlb->batch = NULL; 23534e55232SKAMEZAWA Hiroyuki #endif 236e77b0852SAneesh Kumar K.V tlb->page_size = 0; 237fb7332a9SWill Deacon 238fb7332a9SWill Deacon __tlb_reset_range(tlb); 2399547d01bSPeter Zijlstra } 2409547d01bSPeter Zijlstra 2411cf35d47SLinus Torvalds static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) 2429547d01bSPeter Zijlstra { 243721c21c1SWill Deacon if (!tlb->end) 244721c21c1SWill Deacon return; 245721c21c1SWill Deacon 2469547d01bSPeter Zijlstra tlb_flush(tlb); 24734ee645eSJoerg Roedel mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end); 2489547d01bSPeter Zijlstra #ifdef CONFIG_HAVE_RCU_TABLE_FREE 2499547d01bSPeter Zijlstra tlb_table_flush(tlb); 2509547d01bSPeter Zijlstra #endif 251fb7332a9SWill Deacon __tlb_reset_range(tlb); 2521cf35d47SLinus Torvalds } 2531cf35d47SLinus Torvalds 2541cf35d47SLinus Torvalds static void tlb_flush_mmu_free(struct mmu_gather *tlb) 2551cf35d47SLinus Torvalds { 2561cf35d47SLinus Torvalds struct mmu_gather_batch *batch; 2579547d01bSPeter Zijlstra 258721c21c1SWill Deacon for (batch = &tlb->local; batch && batch->nr; batch = batch->next) { 2599547d01bSPeter Zijlstra free_pages_and_swap_cache(batch->pages, batch->nr); 2609547d01bSPeter Zijlstra batch->nr = 0; 2619547d01bSPeter Zijlstra } 2629547d01bSPeter Zijlstra tlb->active = &tlb->local; 2639547d01bSPeter Zijlstra } 2649547d01bSPeter Zijlstra 2651cf35d47SLinus Torvalds void tlb_flush_mmu(struct mmu_gather *tlb) 2661cf35d47SLinus Torvalds { 2671cf35d47SLinus Torvalds tlb_flush_mmu_tlbonly(tlb); 2681cf35d47SLinus Torvalds tlb_flush_mmu_free(tlb); 2691cf35d47SLinus Torvalds } 2701cf35d47SLinus Torvalds 2719547d01bSPeter Zijlstra /* tlb_finish_mmu 2729547d01bSPeter Zijlstra * Called at the end of the shootdown operation to free up any resources 2739547d01bSPeter Zijlstra * that were required. 2749547d01bSPeter Zijlstra */ 27556236a59SMinchan Kim void arch_tlb_finish_mmu(struct mmu_gather *tlb, 27699baac21SMinchan Kim unsigned long start, unsigned long end, bool force) 2779547d01bSPeter Zijlstra { 2789547d01bSPeter Zijlstra struct mmu_gather_batch *batch, *next; 2799547d01bSPeter Zijlstra 28099baac21SMinchan Kim if (force) 28199baac21SMinchan Kim __tlb_adjust_range(tlb, start, end - start); 28299baac21SMinchan Kim 2839547d01bSPeter Zijlstra tlb_flush_mmu(tlb); 2849547d01bSPeter Zijlstra 2859547d01bSPeter Zijlstra /* keep the page table cache within bounds */ 2869547d01bSPeter Zijlstra check_pgt_cache(); 2879547d01bSPeter Zijlstra 2889547d01bSPeter Zijlstra for (batch = tlb->local.next; batch; batch = next) { 2899547d01bSPeter Zijlstra next = batch->next; 2909547d01bSPeter Zijlstra free_pages((unsigned long)batch, 0); 2919547d01bSPeter Zijlstra } 2929547d01bSPeter Zijlstra tlb->local.next = NULL; 2939547d01bSPeter Zijlstra } 2949547d01bSPeter Zijlstra 2959547d01bSPeter Zijlstra /* __tlb_remove_page 2969547d01bSPeter Zijlstra * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while 2979547d01bSPeter Zijlstra * handling the additional races in SMP caused by other CPUs caching valid 2989547d01bSPeter Zijlstra * mappings in their TLBs. Returns the number of free page slots left. 2999547d01bSPeter Zijlstra * When out of page slots we must call tlb_flush_mmu(). 300e9d55e15SAneesh Kumar K.V *returns true if the caller should flush. 3019547d01bSPeter Zijlstra */ 302e77b0852SAneesh Kumar K.V bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size) 3039547d01bSPeter Zijlstra { 3049547d01bSPeter Zijlstra struct mmu_gather_batch *batch; 3059547d01bSPeter Zijlstra 306fb7332a9SWill Deacon VM_BUG_ON(!tlb->end); 307692a68c1SAneesh Kumar K.V VM_WARN_ON(tlb->page_size != page_size); 308e77b0852SAneesh Kumar K.V 3099547d01bSPeter Zijlstra batch = tlb->active; 310692a68c1SAneesh Kumar K.V /* 311692a68c1SAneesh Kumar K.V * Add the page and check if we are full. If so 312692a68c1SAneesh Kumar K.V * force a flush. 313692a68c1SAneesh Kumar K.V */ 314692a68c1SAneesh Kumar K.V batch->pages[batch->nr++] = page; 3159547d01bSPeter Zijlstra if (batch->nr == batch->max) { 3169547d01bSPeter Zijlstra if (!tlb_next_batch(tlb)) 317e9d55e15SAneesh Kumar K.V return true; 3180b43c3aaSShaohua Li batch = tlb->active; 3199547d01bSPeter Zijlstra } 320309381feSSasha Levin VM_BUG_ON_PAGE(batch->nr > batch->max, page); 3219547d01bSPeter Zijlstra 322e9d55e15SAneesh Kumar K.V return false; 3239547d01bSPeter Zijlstra } 3249547d01bSPeter Zijlstra 3259547d01bSPeter Zijlstra #endif /* HAVE_GENERIC_MMU_GATHER */ 32634e55232SKAMEZAWA Hiroyuki 32726723911SPeter Zijlstra #ifdef CONFIG_HAVE_RCU_TABLE_FREE 32826723911SPeter Zijlstra 32926723911SPeter Zijlstra static void tlb_remove_table_smp_sync(void *arg) 33026723911SPeter Zijlstra { 3312ff6ddf1SRik van Riel struct mm_struct __maybe_unused *mm = arg; 3322ff6ddf1SRik van Riel /* 3332ff6ddf1SRik van Riel * On most architectures this does nothing. Simply delivering the 3342ff6ddf1SRik van Riel * interrupt is enough to prevent races with software page table 3352ff6ddf1SRik van Riel * walking like that done in get_user_pages_fast. 3362ff6ddf1SRik van Riel * 3372ff6ddf1SRik van Riel * See the comment near struct mmu_table_batch. 3382ff6ddf1SRik van Riel */ 3392ff6ddf1SRik van Riel tlb_flush_remove_tables_local(mm); 34026723911SPeter Zijlstra } 34126723911SPeter Zijlstra 3422ff6ddf1SRik van Riel static void tlb_remove_table_one(void *table, struct mmu_gather *tlb) 34326723911SPeter Zijlstra { 34426723911SPeter Zijlstra /* 34526723911SPeter Zijlstra * This isn't an RCU grace period and hence the page-tables cannot be 34626723911SPeter Zijlstra * assumed to be actually RCU-freed. 34726723911SPeter Zijlstra * 34826723911SPeter Zijlstra * It is however sufficient for software page-table walkers that rely on 34926723911SPeter Zijlstra * IRQ disabling. See the comment near struct mmu_table_batch. 35026723911SPeter Zijlstra */ 3512ff6ddf1SRik van Riel smp_call_function(tlb_remove_table_smp_sync, tlb->mm, 1); 35226723911SPeter Zijlstra __tlb_remove_table(table); 35326723911SPeter Zijlstra } 35426723911SPeter Zijlstra 35526723911SPeter Zijlstra static void tlb_remove_table_rcu(struct rcu_head *head) 35626723911SPeter Zijlstra { 35726723911SPeter Zijlstra struct mmu_table_batch *batch; 35826723911SPeter Zijlstra int i; 35926723911SPeter Zijlstra 36026723911SPeter Zijlstra batch = container_of(head, struct mmu_table_batch, rcu); 36126723911SPeter Zijlstra 36226723911SPeter Zijlstra for (i = 0; i < batch->nr; i++) 36326723911SPeter Zijlstra __tlb_remove_table(batch->tables[i]); 36426723911SPeter Zijlstra 36526723911SPeter Zijlstra free_page((unsigned long)batch); 36626723911SPeter Zijlstra } 36726723911SPeter Zijlstra 36826723911SPeter Zijlstra void tlb_table_flush(struct mmu_gather *tlb) 36926723911SPeter Zijlstra { 37026723911SPeter Zijlstra struct mmu_table_batch **batch = &tlb->batch; 37126723911SPeter Zijlstra 3722ff6ddf1SRik van Riel tlb_flush_remove_tables(tlb->mm); 3732ff6ddf1SRik van Riel 37426723911SPeter Zijlstra if (*batch) { 37526723911SPeter Zijlstra call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu); 37626723911SPeter Zijlstra *batch = NULL; 37726723911SPeter Zijlstra } 37826723911SPeter Zijlstra } 37926723911SPeter Zijlstra 38026723911SPeter Zijlstra void tlb_remove_table(struct mmu_gather *tlb, void *table) 38126723911SPeter Zijlstra { 38226723911SPeter Zijlstra struct mmu_table_batch **batch = &tlb->batch; 38326723911SPeter Zijlstra 38426723911SPeter Zijlstra /* 38526723911SPeter Zijlstra * When there's less then two users of this mm there cannot be a 38626723911SPeter Zijlstra * concurrent page-table walk. 38726723911SPeter Zijlstra */ 38826723911SPeter Zijlstra if (atomic_read(&tlb->mm->mm_users) < 2) { 38926723911SPeter Zijlstra __tlb_remove_table(table); 39026723911SPeter Zijlstra return; 39126723911SPeter Zijlstra } 39226723911SPeter Zijlstra 39326723911SPeter Zijlstra if (*batch == NULL) { 39426723911SPeter Zijlstra *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN); 39526723911SPeter Zijlstra if (*batch == NULL) { 3962ff6ddf1SRik van Riel tlb_remove_table_one(table, tlb); 39726723911SPeter Zijlstra return; 39826723911SPeter Zijlstra } 39926723911SPeter Zijlstra (*batch)->nr = 0; 40026723911SPeter Zijlstra } 40126723911SPeter Zijlstra (*batch)->tables[(*batch)->nr++] = table; 40226723911SPeter Zijlstra if ((*batch)->nr == MAX_TABLE_BATCH) 40326723911SPeter Zijlstra tlb_table_flush(tlb); 40426723911SPeter Zijlstra } 40526723911SPeter Zijlstra 4069547d01bSPeter Zijlstra #endif /* CONFIG_HAVE_RCU_TABLE_FREE */ 40726723911SPeter Zijlstra 408ef549e13SMike Rapoport /** 409ef549e13SMike Rapoport * tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down 410ef549e13SMike Rapoport * @tlb: the mmu_gather structure to initialize 411ef549e13SMike Rapoport * @mm: the mm_struct of the target address space 412ef549e13SMike Rapoport * @start: start of the region that will be removed from the page-table 413ef549e13SMike Rapoport * @end: end of the region that will be removed from the page-table 414ef549e13SMike Rapoport * 41556236a59SMinchan Kim * Called to initialize an (on-stack) mmu_gather structure for page-table 416ef549e13SMike Rapoport * tear-down from @mm. The @start and @end are set to 0 and -1 417ef549e13SMike Rapoport * respectively when @mm is without users and we're going to destroy 418ef549e13SMike Rapoport * the full address space (exit/execve). 41956236a59SMinchan Kim */ 42056236a59SMinchan Kim void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, 42156236a59SMinchan Kim unsigned long start, unsigned long end) 42256236a59SMinchan Kim { 42356236a59SMinchan Kim arch_tlb_gather_mmu(tlb, mm, start, end); 42499baac21SMinchan Kim inc_tlb_flush_pending(tlb->mm); 42556236a59SMinchan Kim } 42656236a59SMinchan Kim 42756236a59SMinchan Kim void tlb_finish_mmu(struct mmu_gather *tlb, 42856236a59SMinchan Kim unsigned long start, unsigned long end) 42956236a59SMinchan Kim { 43099baac21SMinchan Kim /* 43199baac21SMinchan Kim * If there are parallel threads are doing PTE changes on same range 43299baac21SMinchan Kim * under non-exclusive lock(e.g., mmap_sem read-side) but defer TLB 43399baac21SMinchan Kim * flush by batching, a thread has stable TLB entry can fail to flush 43499baac21SMinchan Kim * the TLB by observing pte_none|!pte_dirty, for example so flush TLB 43599baac21SMinchan Kim * forcefully if we detect parallel PTE batching threads. 43699baac21SMinchan Kim */ 43799baac21SMinchan Kim bool force = mm_tlb_flush_nested(tlb->mm); 43899baac21SMinchan Kim 43999baac21SMinchan Kim arch_tlb_finish_mmu(tlb, start, end, force); 44099baac21SMinchan Kim dec_tlb_flush_pending(tlb->mm); 44156236a59SMinchan Kim } 44256236a59SMinchan Kim 4431da177e4SLinus Torvalds /* 4441da177e4SLinus Torvalds * Note: this doesn't free the actual pages themselves. That 4451da177e4SLinus Torvalds * has been handled earlier when unmapping all the memory regions. 4461da177e4SLinus Torvalds */ 4479e1b32caSBenjamin Herrenschmidt static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, 4489e1b32caSBenjamin Herrenschmidt unsigned long addr) 4491da177e4SLinus Torvalds { 4502f569afdSMartin Schwidefsky pgtable_t token = pmd_pgtable(*pmd); 4511da177e4SLinus Torvalds pmd_clear(pmd); 4529e1b32caSBenjamin Herrenschmidt pte_free_tlb(tlb, token, addr); 453c4812909SKirill A. Shutemov mm_dec_nr_ptes(tlb->mm); 4541da177e4SLinus Torvalds } 4551da177e4SLinus Torvalds 456e0da382cSHugh Dickins static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, 457e0da382cSHugh Dickins unsigned long addr, unsigned long end, 458e0da382cSHugh Dickins unsigned long floor, unsigned long ceiling) 4591da177e4SLinus Torvalds { 4601da177e4SLinus Torvalds pmd_t *pmd; 4611da177e4SLinus Torvalds unsigned long next; 462e0da382cSHugh Dickins unsigned long start; 4631da177e4SLinus Torvalds 464e0da382cSHugh Dickins start = addr; 4651da177e4SLinus Torvalds pmd = pmd_offset(pud, addr); 4661da177e4SLinus Torvalds do { 4671da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 4681da177e4SLinus Torvalds if (pmd_none_or_clear_bad(pmd)) 4691da177e4SLinus Torvalds continue; 4709e1b32caSBenjamin Herrenschmidt free_pte_range(tlb, pmd, addr); 4711da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 4721da177e4SLinus Torvalds 473e0da382cSHugh Dickins start &= PUD_MASK; 474e0da382cSHugh Dickins if (start < floor) 475e0da382cSHugh Dickins return; 476e0da382cSHugh Dickins if (ceiling) { 477e0da382cSHugh Dickins ceiling &= PUD_MASK; 478e0da382cSHugh Dickins if (!ceiling) 479e0da382cSHugh Dickins return; 4801da177e4SLinus Torvalds } 481e0da382cSHugh Dickins if (end - 1 > ceiling - 1) 482e0da382cSHugh Dickins return; 483e0da382cSHugh Dickins 484e0da382cSHugh Dickins pmd = pmd_offset(pud, start); 485e0da382cSHugh Dickins pud_clear(pud); 4869e1b32caSBenjamin Herrenschmidt pmd_free_tlb(tlb, pmd, start); 487dc6c9a35SKirill A. Shutemov mm_dec_nr_pmds(tlb->mm); 4881da177e4SLinus Torvalds } 4891da177e4SLinus Torvalds 490c2febafcSKirill A. Shutemov static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d, 491e0da382cSHugh Dickins unsigned long addr, unsigned long end, 492e0da382cSHugh Dickins unsigned long floor, unsigned long ceiling) 4931da177e4SLinus Torvalds { 4941da177e4SLinus Torvalds pud_t *pud; 4951da177e4SLinus Torvalds unsigned long next; 496e0da382cSHugh Dickins unsigned long start; 4971da177e4SLinus Torvalds 498e0da382cSHugh Dickins start = addr; 499c2febafcSKirill A. Shutemov pud = pud_offset(p4d, addr); 5001da177e4SLinus Torvalds do { 5011da177e4SLinus Torvalds next = pud_addr_end(addr, end); 5021da177e4SLinus Torvalds if (pud_none_or_clear_bad(pud)) 5031da177e4SLinus Torvalds continue; 504e0da382cSHugh Dickins free_pmd_range(tlb, pud, addr, next, floor, ceiling); 5051da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 5061da177e4SLinus Torvalds 507c2febafcSKirill A. Shutemov start &= P4D_MASK; 508c2febafcSKirill A. Shutemov if (start < floor) 509c2febafcSKirill A. Shutemov return; 510c2febafcSKirill A. Shutemov if (ceiling) { 511c2febafcSKirill A. Shutemov ceiling &= P4D_MASK; 512c2febafcSKirill A. Shutemov if (!ceiling) 513c2febafcSKirill A. Shutemov return; 514c2febafcSKirill A. Shutemov } 515c2febafcSKirill A. Shutemov if (end - 1 > ceiling - 1) 516c2febafcSKirill A. Shutemov return; 517c2febafcSKirill A. Shutemov 518c2febafcSKirill A. Shutemov pud = pud_offset(p4d, start); 519c2febafcSKirill A. Shutemov p4d_clear(p4d); 520c2febafcSKirill A. Shutemov pud_free_tlb(tlb, pud, start); 521b4e98d9aSKirill A. Shutemov mm_dec_nr_puds(tlb->mm); 522c2febafcSKirill A. Shutemov } 523c2febafcSKirill A. Shutemov 524c2febafcSKirill A. Shutemov static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd, 525c2febafcSKirill A. Shutemov unsigned long addr, unsigned long end, 526c2febafcSKirill A. Shutemov unsigned long floor, unsigned long ceiling) 527c2febafcSKirill A. Shutemov { 528c2febafcSKirill A. Shutemov p4d_t *p4d; 529c2febafcSKirill A. Shutemov unsigned long next; 530c2febafcSKirill A. Shutemov unsigned long start; 531c2febafcSKirill A. Shutemov 532c2febafcSKirill A. Shutemov start = addr; 533c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, addr); 534c2febafcSKirill A. Shutemov do { 535c2febafcSKirill A. Shutemov next = p4d_addr_end(addr, end); 536c2febafcSKirill A. Shutemov if (p4d_none_or_clear_bad(p4d)) 537c2febafcSKirill A. Shutemov continue; 538c2febafcSKirill A. Shutemov free_pud_range(tlb, p4d, addr, next, floor, ceiling); 539c2febafcSKirill A. Shutemov } while (p4d++, addr = next, addr != end); 540c2febafcSKirill A. Shutemov 541e0da382cSHugh Dickins start &= PGDIR_MASK; 542e0da382cSHugh Dickins if (start < floor) 543e0da382cSHugh Dickins return; 544e0da382cSHugh Dickins if (ceiling) { 545e0da382cSHugh Dickins ceiling &= PGDIR_MASK; 546e0da382cSHugh Dickins if (!ceiling) 547e0da382cSHugh Dickins return; 5481da177e4SLinus Torvalds } 549e0da382cSHugh Dickins if (end - 1 > ceiling - 1) 550e0da382cSHugh Dickins return; 551e0da382cSHugh Dickins 552c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, start); 553e0da382cSHugh Dickins pgd_clear(pgd); 554c2febafcSKirill A. Shutemov p4d_free_tlb(tlb, p4d, start); 5551da177e4SLinus Torvalds } 5561da177e4SLinus Torvalds 5571da177e4SLinus Torvalds /* 558e0da382cSHugh Dickins * This function frees user-level page tables of a process. 5591da177e4SLinus Torvalds */ 56042b77728SJan Beulich void free_pgd_range(struct mmu_gather *tlb, 561e0da382cSHugh Dickins unsigned long addr, unsigned long end, 562e0da382cSHugh Dickins unsigned long floor, unsigned long ceiling) 5631da177e4SLinus Torvalds { 5641da177e4SLinus Torvalds pgd_t *pgd; 5651da177e4SLinus Torvalds unsigned long next; 5661da177e4SLinus Torvalds 567e0da382cSHugh Dickins /* 568e0da382cSHugh Dickins * The next few lines have given us lots of grief... 569e0da382cSHugh Dickins * 570e0da382cSHugh Dickins * Why are we testing PMD* at this top level? Because often 571e0da382cSHugh Dickins * there will be no work to do at all, and we'd prefer not to 572e0da382cSHugh Dickins * go all the way down to the bottom just to discover that. 573e0da382cSHugh Dickins * 574e0da382cSHugh Dickins * Why all these "- 1"s? Because 0 represents both the bottom 575e0da382cSHugh Dickins * of the address space and the top of it (using -1 for the 576e0da382cSHugh Dickins * top wouldn't help much: the masks would do the wrong thing). 577e0da382cSHugh Dickins * The rule is that addr 0 and floor 0 refer to the bottom of 578e0da382cSHugh Dickins * the address space, but end 0 and ceiling 0 refer to the top 579e0da382cSHugh Dickins * Comparisons need to use "end - 1" and "ceiling - 1" (though 580e0da382cSHugh Dickins * that end 0 case should be mythical). 581e0da382cSHugh Dickins * 582e0da382cSHugh Dickins * Wherever addr is brought up or ceiling brought down, we must 583e0da382cSHugh Dickins * be careful to reject "the opposite 0" before it confuses the 584e0da382cSHugh Dickins * subsequent tests. But what about where end is brought down 585e0da382cSHugh Dickins * by PMD_SIZE below? no, end can't go down to 0 there. 586e0da382cSHugh Dickins * 587e0da382cSHugh Dickins * Whereas we round start (addr) and ceiling down, by different 588e0da382cSHugh Dickins * masks at different levels, in order to test whether a table 589e0da382cSHugh Dickins * now has no other vmas using it, so can be freed, we don't 590e0da382cSHugh Dickins * bother to round floor or end up - the tests don't need that. 591e0da382cSHugh Dickins */ 592e0da382cSHugh Dickins 593e0da382cSHugh Dickins addr &= PMD_MASK; 594e0da382cSHugh Dickins if (addr < floor) { 595e0da382cSHugh Dickins addr += PMD_SIZE; 596e0da382cSHugh Dickins if (!addr) 597e0da382cSHugh Dickins return; 598e0da382cSHugh Dickins } 599e0da382cSHugh Dickins if (ceiling) { 600e0da382cSHugh Dickins ceiling &= PMD_MASK; 601e0da382cSHugh Dickins if (!ceiling) 602e0da382cSHugh Dickins return; 603e0da382cSHugh Dickins } 604e0da382cSHugh Dickins if (end - 1 > ceiling - 1) 605e0da382cSHugh Dickins end -= PMD_SIZE; 606e0da382cSHugh Dickins if (addr > end - 1) 607e0da382cSHugh Dickins return; 60807e32661SAneesh Kumar K.V /* 60907e32661SAneesh Kumar K.V * We add page table cache pages with PAGE_SIZE, 61007e32661SAneesh Kumar K.V * (see pte_free_tlb()), flush the tlb if we need 61107e32661SAneesh Kumar K.V */ 61207e32661SAneesh Kumar K.V tlb_remove_check_page_size_change(tlb, PAGE_SIZE); 61342b77728SJan Beulich pgd = pgd_offset(tlb->mm, addr); 6141da177e4SLinus Torvalds do { 6151da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 6161da177e4SLinus Torvalds if (pgd_none_or_clear_bad(pgd)) 6171da177e4SLinus Torvalds continue; 618c2febafcSKirill A. Shutemov free_p4d_range(tlb, pgd, addr, next, floor, ceiling); 6191da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 620e0da382cSHugh Dickins } 621e0da382cSHugh Dickins 62242b77728SJan Beulich void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, 623e0da382cSHugh Dickins unsigned long floor, unsigned long ceiling) 624e0da382cSHugh Dickins { 625e0da382cSHugh Dickins while (vma) { 626e0da382cSHugh Dickins struct vm_area_struct *next = vma->vm_next; 627e0da382cSHugh Dickins unsigned long addr = vma->vm_start; 628e0da382cSHugh Dickins 6298f4f8c16SHugh Dickins /* 63025d9e2d1Snpiggin@suse.de * Hide vma from rmap and truncate_pagecache before freeing 63125d9e2d1Snpiggin@suse.de * pgtables 6328f4f8c16SHugh Dickins */ 6335beb4930SRik van Riel unlink_anon_vmas(vma); 6348f4f8c16SHugh Dickins unlink_file_vma(vma); 6358f4f8c16SHugh Dickins 6369da61aefSDavid Gibson if (is_vm_hugetlb_page(vma)) { 6373bf5ee95SHugh Dickins hugetlb_free_pgd_range(tlb, addr, vma->vm_end, 6383bf5ee95SHugh Dickins floor, next ? next->vm_start : ceiling); 6393bf5ee95SHugh Dickins } else { 6403bf5ee95SHugh Dickins /* 6413bf5ee95SHugh Dickins * Optimization: gather nearby vmas into one call down 6423bf5ee95SHugh Dickins */ 6433bf5ee95SHugh Dickins while (next && next->vm_start <= vma->vm_end + PMD_SIZE 6444866920bSDavid Gibson && !is_vm_hugetlb_page(next)) { 645e0da382cSHugh Dickins vma = next; 646e0da382cSHugh Dickins next = vma->vm_next; 6475beb4930SRik van Riel unlink_anon_vmas(vma); 6488f4f8c16SHugh Dickins unlink_file_vma(vma); 649e0da382cSHugh Dickins } 6503bf5ee95SHugh Dickins free_pgd_range(tlb, addr, vma->vm_end, 651e0da382cSHugh Dickins floor, next ? next->vm_start : ceiling); 6523bf5ee95SHugh Dickins } 653e0da382cSHugh Dickins vma = next; 654e0da382cSHugh Dickins } 6551da177e4SLinus Torvalds } 6561da177e4SLinus Torvalds 6573ed3a4f0SKirill A. Shutemov int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address) 6581da177e4SLinus Torvalds { 659c4088ebdSKirill A. Shutemov spinlock_t *ptl; 6602f569afdSMartin Schwidefsky pgtable_t new = pte_alloc_one(mm, address); 6611da177e4SLinus Torvalds if (!new) 6621bb3630eSHugh Dickins return -ENOMEM; 6631bb3630eSHugh Dickins 664362a61adSNick Piggin /* 665362a61adSNick Piggin * Ensure all pte setup (eg. pte page lock and page clearing) are 666362a61adSNick Piggin * visible before the pte is made visible to other CPUs by being 667362a61adSNick Piggin * put into page tables. 668362a61adSNick Piggin * 669362a61adSNick Piggin * The other side of the story is the pointer chasing in the page 670362a61adSNick Piggin * table walking code (when walking the page table without locking; 671362a61adSNick Piggin * ie. most of the time). Fortunately, these data accesses consist 672362a61adSNick Piggin * of a chain of data-dependent loads, meaning most CPUs (alpha 673362a61adSNick Piggin * being the notable exception) will already guarantee loads are 674362a61adSNick Piggin * seen in-order. See the alpha page table accessors for the 675362a61adSNick Piggin * smp_read_barrier_depends() barriers in page table walking code. 676362a61adSNick Piggin */ 677362a61adSNick Piggin smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */ 678362a61adSNick Piggin 679c4088ebdSKirill A. Shutemov ptl = pmd_lock(mm, pmd); 6808ac1f832SAndrea Arcangeli if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ 681c4812909SKirill A. Shutemov mm_inc_nr_ptes(mm); 6821da177e4SLinus Torvalds pmd_populate(mm, pmd, new); 6832f569afdSMartin Schwidefsky new = NULL; 6844b471e88SKirill A. Shutemov } 685c4088ebdSKirill A. Shutemov spin_unlock(ptl); 6862f569afdSMartin Schwidefsky if (new) 6872f569afdSMartin Schwidefsky pte_free(mm, new); 6881bb3630eSHugh Dickins return 0; 6891da177e4SLinus Torvalds } 6901da177e4SLinus Torvalds 6911bb3630eSHugh Dickins int __pte_alloc_kernel(pmd_t *pmd, unsigned long address) 6921da177e4SLinus Torvalds { 6931bb3630eSHugh Dickins pte_t *new = pte_alloc_one_kernel(&init_mm, address); 6941da177e4SLinus Torvalds if (!new) 6951bb3630eSHugh Dickins return -ENOMEM; 6961da177e4SLinus Torvalds 697362a61adSNick Piggin smp_wmb(); /* See comment in __pte_alloc */ 698362a61adSNick Piggin 699872fec16SHugh Dickins spin_lock(&init_mm.page_table_lock); 7008ac1f832SAndrea Arcangeli if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ 701872fec16SHugh Dickins pmd_populate_kernel(&init_mm, pmd, new); 7022f569afdSMartin Schwidefsky new = NULL; 7034b471e88SKirill A. Shutemov } 704872fec16SHugh Dickins spin_unlock(&init_mm.page_table_lock); 7052f569afdSMartin Schwidefsky if (new) 7062f569afdSMartin Schwidefsky pte_free_kernel(&init_mm, new); 7071bb3630eSHugh Dickins return 0; 7081da177e4SLinus Torvalds } 7091da177e4SLinus Torvalds 710d559db08SKAMEZAWA Hiroyuki static inline void init_rss_vec(int *rss) 711ae859762SHugh Dickins { 712d559db08SKAMEZAWA Hiroyuki memset(rss, 0, sizeof(int) * NR_MM_COUNTERS); 713d559db08SKAMEZAWA Hiroyuki } 714d559db08SKAMEZAWA Hiroyuki 715d559db08SKAMEZAWA Hiroyuki static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss) 716d559db08SKAMEZAWA Hiroyuki { 717d559db08SKAMEZAWA Hiroyuki int i; 718d559db08SKAMEZAWA Hiroyuki 71934e55232SKAMEZAWA Hiroyuki if (current->mm == mm) 72005af2e10SDavid Rientjes sync_mm_rss(mm); 721d559db08SKAMEZAWA Hiroyuki for (i = 0; i < NR_MM_COUNTERS; i++) 722d559db08SKAMEZAWA Hiroyuki if (rss[i]) 723d559db08SKAMEZAWA Hiroyuki add_mm_counter(mm, i, rss[i]); 724ae859762SHugh Dickins } 725ae859762SHugh Dickins 7261da177e4SLinus Torvalds /* 7276aab341eSLinus Torvalds * This function is called to print an error when a bad pte 7286aab341eSLinus Torvalds * is found. For example, we might have a PFN-mapped pte in 7296aab341eSLinus Torvalds * a region that doesn't allow it. 730b5810039SNick Piggin * 731b5810039SNick Piggin * The calling function must still handle the error. 732b5810039SNick Piggin */ 7333dc14741SHugh Dickins static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, 7343dc14741SHugh Dickins pte_t pte, struct page *page) 735b5810039SNick Piggin { 7363dc14741SHugh Dickins pgd_t *pgd = pgd_offset(vma->vm_mm, addr); 737c2febafcSKirill A. Shutemov p4d_t *p4d = p4d_offset(pgd, addr); 738c2febafcSKirill A. Shutemov pud_t *pud = pud_offset(p4d, addr); 7393dc14741SHugh Dickins pmd_t *pmd = pmd_offset(pud, addr); 7403dc14741SHugh Dickins struct address_space *mapping; 7413dc14741SHugh Dickins pgoff_t index; 742d936cf9bSHugh Dickins static unsigned long resume; 743d936cf9bSHugh Dickins static unsigned long nr_shown; 744d936cf9bSHugh Dickins static unsigned long nr_unshown; 745d936cf9bSHugh Dickins 746d936cf9bSHugh Dickins /* 747d936cf9bSHugh Dickins * Allow a burst of 60 reports, then keep quiet for that minute; 748d936cf9bSHugh Dickins * or allow a steady drip of one report per second. 749d936cf9bSHugh Dickins */ 750d936cf9bSHugh Dickins if (nr_shown == 60) { 751d936cf9bSHugh Dickins if (time_before(jiffies, resume)) { 752d936cf9bSHugh Dickins nr_unshown++; 753d936cf9bSHugh Dickins return; 754d936cf9bSHugh Dickins } 755d936cf9bSHugh Dickins if (nr_unshown) { 7561170532bSJoe Perches pr_alert("BUG: Bad page map: %lu messages suppressed\n", 757d936cf9bSHugh Dickins nr_unshown); 758d936cf9bSHugh Dickins nr_unshown = 0; 759d936cf9bSHugh Dickins } 760d936cf9bSHugh Dickins nr_shown = 0; 761d936cf9bSHugh Dickins } 762d936cf9bSHugh Dickins if (nr_shown++ == 0) 763d936cf9bSHugh Dickins resume = jiffies + 60 * HZ; 7643dc14741SHugh Dickins 7653dc14741SHugh Dickins mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL; 7663dc14741SHugh Dickins index = linear_page_index(vma, addr); 7673dc14741SHugh Dickins 7681170532bSJoe Perches pr_alert("BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n", 7693dc14741SHugh Dickins current->comm, 7703dc14741SHugh Dickins (long long)pte_val(pte), (long long)pmd_val(*pmd)); 771718a3821SWu Fengguang if (page) 772f0b791a3SDave Hansen dump_page(page, "bad pte"); 7731170532bSJoe Perches pr_alert("addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n", 7743dc14741SHugh Dickins (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); 7752682582aSKonstantin Khlebnikov pr_alert("file:%pD fault:%pf mmap:%pf readpage:%pf\n", 7762682582aSKonstantin Khlebnikov vma->vm_file, 7772682582aSKonstantin Khlebnikov vma->vm_ops ? vma->vm_ops->fault : NULL, 7782682582aSKonstantin Khlebnikov vma->vm_file ? vma->vm_file->f_op->mmap : NULL, 7792682582aSKonstantin Khlebnikov mapping ? mapping->a_ops->readpage : NULL); 780b5810039SNick Piggin dump_stack(); 781373d4d09SRusty Russell add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 782b5810039SNick Piggin } 783b5810039SNick Piggin 784b5810039SNick Piggin /* 7857e675137SNick Piggin * vm_normal_page -- This function gets the "struct page" associated with a pte. 7866aab341eSLinus Torvalds * 7877e675137SNick Piggin * "Special" mappings do not wish to be associated with a "struct page" (either 7887e675137SNick Piggin * it doesn't exist, or it exists but they don't want to touch it). In this 7897e675137SNick Piggin * case, NULL is returned here. "Normal" mappings do have a struct page. 790b379d790SJared Hulbert * 7917e675137SNick Piggin * There are 2 broad cases. Firstly, an architecture may define a pte_special() 7927e675137SNick Piggin * pte bit, in which case this function is trivial. Secondly, an architecture 7937e675137SNick Piggin * may not have a spare pte bit, which requires a more complicated scheme, 7947e675137SNick Piggin * described below. 7957e675137SNick Piggin * 7967e675137SNick Piggin * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a 7977e675137SNick Piggin * special mapping (even if there are underlying and valid "struct pages"). 7987e675137SNick Piggin * COWed pages of a VM_PFNMAP are always normal. 7996aab341eSLinus Torvalds * 800b379d790SJared Hulbert * The way we recognize COWed pages within VM_PFNMAP mappings is through the 801b379d790SJared Hulbert * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit 8027e675137SNick Piggin * set, and the vm_pgoff will point to the first PFN mapped: thus every special 8037e675137SNick Piggin * mapping will always honor the rule 8046aab341eSLinus Torvalds * 8056aab341eSLinus Torvalds * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT) 8066aab341eSLinus Torvalds * 8077e675137SNick Piggin * And for normal mappings this is false. 808b379d790SJared Hulbert * 8097e675137SNick Piggin * This restricts such mappings to be a linear translation from virtual address 8107e675137SNick Piggin * to pfn. To get around this restriction, we allow arbitrary mappings so long 8117e675137SNick Piggin * as the vma is not a COW mapping; in that case, we know that all ptes are 8127e675137SNick Piggin * special (because none can have been COWed). 813b379d790SJared Hulbert * 814b379d790SJared Hulbert * 8157e675137SNick Piggin * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP. 8167e675137SNick Piggin * 817b379d790SJared Hulbert * VM_MIXEDMAP mappings can likewise contain memory with or without "struct 818b379d790SJared Hulbert * page" backing, however the difference is that _all_ pages with a struct 819b379d790SJared Hulbert * page (that is, those where pfn_valid is true) are refcounted and considered 820b379d790SJared Hulbert * normal pages by the VM. The disadvantage is that pages are refcounted 821b379d790SJared Hulbert * (which can be slower and simply not an option for some PFNMAP users). The 822b379d790SJared Hulbert * advantage is that we don't have to follow the strict linearity rule of 823b379d790SJared Hulbert * PFNMAP mappings in order to support COWable mappings. 824b379d790SJared Hulbert * 825ee498ed7SHugh Dickins */ 826df6ad698SJérôme Glisse struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr, 827df6ad698SJérôme Glisse pte_t pte, bool with_public_device) 828ee498ed7SHugh Dickins { 82922b31eecSHugh Dickins unsigned long pfn = pte_pfn(pte); 8307e675137SNick Piggin 83100b3a331SLaurent Dufour if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) { 832b38af472SHugh Dickins if (likely(!pte_special(pte))) 83322b31eecSHugh Dickins goto check_pfn; 834667a0a06SDavid Vrabel if (vma->vm_ops && vma->vm_ops->find_special_page) 835667a0a06SDavid Vrabel return vma->vm_ops->find_special_page(vma, addr); 836a13ea5b7SHugh Dickins if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) 837a13ea5b7SHugh Dickins return NULL; 838df6ad698SJérôme Glisse if (is_zero_pfn(pfn)) 839df6ad698SJérôme Glisse return NULL; 840df6ad698SJérôme Glisse 841df6ad698SJérôme Glisse /* 842df6ad698SJérôme Glisse * Device public pages are special pages (they are ZONE_DEVICE 843df6ad698SJérôme Glisse * pages but different from persistent memory). They behave 844df6ad698SJérôme Glisse * allmost like normal pages. The difference is that they are 845df6ad698SJérôme Glisse * not on the lru and thus should never be involve with any- 846df6ad698SJérôme Glisse * thing that involve lru manipulation (mlock, numa balancing, 847df6ad698SJérôme Glisse * ...). 848df6ad698SJérôme Glisse * 849df6ad698SJérôme Glisse * This is why we still want to return NULL for such page from 850df6ad698SJérôme Glisse * vm_normal_page() so that we do not have to special case all 851df6ad698SJérôme Glisse * call site of vm_normal_page(). 852df6ad698SJérôme Glisse */ 8537d790d2dSReza Arbab if (likely(pfn <= highest_memmap_pfn)) { 854df6ad698SJérôme Glisse struct page *page = pfn_to_page(pfn); 855df6ad698SJérôme Glisse 856df6ad698SJérôme Glisse if (is_device_public_page(page)) { 857df6ad698SJérôme Glisse if (with_public_device) 858df6ad698SJérôme Glisse return page; 859df6ad698SJérôme Glisse return NULL; 860df6ad698SJérôme Glisse } 861df6ad698SJérôme Glisse } 862*e1fb4a08SDave Jiang 863*e1fb4a08SDave Jiang if (pte_devmap(pte)) 864*e1fb4a08SDave Jiang return NULL; 865*e1fb4a08SDave Jiang 86622b31eecSHugh Dickins print_bad_pte(vma, addr, pte, NULL); 8677e675137SNick Piggin return NULL; 8687e675137SNick Piggin } 8697e675137SNick Piggin 87000b3a331SLaurent Dufour /* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */ 8717e675137SNick Piggin 872b379d790SJared Hulbert if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { 873b379d790SJared Hulbert if (vma->vm_flags & VM_MIXEDMAP) { 874b379d790SJared Hulbert if (!pfn_valid(pfn)) 875b379d790SJared Hulbert return NULL; 876b379d790SJared Hulbert goto out; 877b379d790SJared Hulbert } else { 8787e675137SNick Piggin unsigned long off; 8797e675137SNick Piggin off = (addr - vma->vm_start) >> PAGE_SHIFT; 8806aab341eSLinus Torvalds if (pfn == vma->vm_pgoff + off) 8816aab341eSLinus Torvalds return NULL; 88267121172SLinus Torvalds if (!is_cow_mapping(vma->vm_flags)) 883fb155c16SLinus Torvalds return NULL; 8846aab341eSLinus Torvalds } 885b379d790SJared Hulbert } 8866aab341eSLinus Torvalds 887b38af472SHugh Dickins if (is_zero_pfn(pfn)) 888b38af472SHugh Dickins return NULL; 88900b3a331SLaurent Dufour 89022b31eecSHugh Dickins check_pfn: 89122b31eecSHugh Dickins if (unlikely(pfn > highest_memmap_pfn)) { 89222b31eecSHugh Dickins print_bad_pte(vma, addr, pte, NULL); 89322b31eecSHugh Dickins return NULL; 89422b31eecSHugh Dickins } 8956aab341eSLinus Torvalds 8966aab341eSLinus Torvalds /* 8977e675137SNick Piggin * NOTE! We still have PageReserved() pages in the page tables. 8987e675137SNick Piggin * eg. VDSO mappings can cause them to exist. 8996aab341eSLinus Torvalds */ 900b379d790SJared Hulbert out: 9016aab341eSLinus Torvalds return pfn_to_page(pfn); 902ee498ed7SHugh Dickins } 903ee498ed7SHugh Dickins 90428093f9fSGerald Schaefer #ifdef CONFIG_TRANSPARENT_HUGEPAGE 90528093f9fSGerald Schaefer struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, 90628093f9fSGerald Schaefer pmd_t pmd) 90728093f9fSGerald Schaefer { 90828093f9fSGerald Schaefer unsigned long pfn = pmd_pfn(pmd); 90928093f9fSGerald Schaefer 91028093f9fSGerald Schaefer /* 91128093f9fSGerald Schaefer * There is no pmd_special() but there may be special pmds, e.g. 91228093f9fSGerald Schaefer * in a direct-access (dax) mapping, so let's just replicate the 91300b3a331SLaurent Dufour * !CONFIG_ARCH_HAS_PTE_SPECIAL case from vm_normal_page() here. 91428093f9fSGerald Schaefer */ 91528093f9fSGerald Schaefer if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { 91628093f9fSGerald Schaefer if (vma->vm_flags & VM_MIXEDMAP) { 91728093f9fSGerald Schaefer if (!pfn_valid(pfn)) 91828093f9fSGerald Schaefer return NULL; 91928093f9fSGerald Schaefer goto out; 92028093f9fSGerald Schaefer } else { 92128093f9fSGerald Schaefer unsigned long off; 92228093f9fSGerald Schaefer off = (addr - vma->vm_start) >> PAGE_SHIFT; 92328093f9fSGerald Schaefer if (pfn == vma->vm_pgoff + off) 92428093f9fSGerald Schaefer return NULL; 92528093f9fSGerald Schaefer if (!is_cow_mapping(vma->vm_flags)) 92628093f9fSGerald Schaefer return NULL; 92728093f9fSGerald Schaefer } 92828093f9fSGerald Schaefer } 92928093f9fSGerald Schaefer 930*e1fb4a08SDave Jiang if (pmd_devmap(pmd)) 931*e1fb4a08SDave Jiang return NULL; 93228093f9fSGerald Schaefer if (is_zero_pfn(pfn)) 93328093f9fSGerald Schaefer return NULL; 93428093f9fSGerald Schaefer if (unlikely(pfn > highest_memmap_pfn)) 93528093f9fSGerald Schaefer return NULL; 93628093f9fSGerald Schaefer 93728093f9fSGerald Schaefer /* 93828093f9fSGerald Schaefer * NOTE! We still have PageReserved() pages in the page tables. 93928093f9fSGerald Schaefer * eg. VDSO mappings can cause them to exist. 94028093f9fSGerald Schaefer */ 94128093f9fSGerald Schaefer out: 94228093f9fSGerald Schaefer return pfn_to_page(pfn); 94328093f9fSGerald Schaefer } 94428093f9fSGerald Schaefer #endif 94528093f9fSGerald Schaefer 946ee498ed7SHugh Dickins /* 9471da177e4SLinus Torvalds * copy one vm_area from one task to the other. Assumes the page tables 9481da177e4SLinus Torvalds * already present in the new task to be cleared in the whole range 9491da177e4SLinus Torvalds * covered by this vma. 9501da177e4SLinus Torvalds */ 9511da177e4SLinus Torvalds 952570a335bSHugh Dickins static inline unsigned long 9531da177e4SLinus Torvalds copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, 954b5810039SNick Piggin pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma, 9558c103762SHugh Dickins unsigned long addr, int *rss) 9561da177e4SLinus Torvalds { 957b5810039SNick Piggin unsigned long vm_flags = vma->vm_flags; 9581da177e4SLinus Torvalds pte_t pte = *src_pte; 9591da177e4SLinus Torvalds struct page *page; 9601da177e4SLinus Torvalds 9611da177e4SLinus Torvalds /* pte contains position in swap or file, so copy. */ 9621da177e4SLinus Torvalds if (unlikely(!pte_present(pte))) { 9630697212aSChristoph Lameter swp_entry_t entry = pte_to_swp_entry(pte); 9640697212aSChristoph Lameter 9652022b4d1SHugh Dickins if (likely(!non_swap_entry(entry))) { 966570a335bSHugh Dickins if (swap_duplicate(entry) < 0) 967570a335bSHugh Dickins return entry.val; 968570a335bSHugh Dickins 9691da177e4SLinus Torvalds /* make sure dst_mm is on swapoff's mmlist. */ 9701da177e4SLinus Torvalds if (unlikely(list_empty(&dst_mm->mmlist))) { 9711da177e4SLinus Torvalds spin_lock(&mmlist_lock); 972f412ac08SHugh Dickins if (list_empty(&dst_mm->mmlist)) 973f412ac08SHugh Dickins list_add(&dst_mm->mmlist, 974f412ac08SHugh Dickins &src_mm->mmlist); 9751da177e4SLinus Torvalds spin_unlock(&mmlist_lock); 9761da177e4SLinus Torvalds } 977b084d435SKAMEZAWA Hiroyuki rss[MM_SWAPENTS]++; 9782022b4d1SHugh Dickins } else if (is_migration_entry(entry)) { 9799f9f1acdSKonstantin Khlebnikov page = migration_entry_to_page(entry); 9809f9f1acdSKonstantin Khlebnikov 981eca56ff9SJerome Marchand rss[mm_counter(page)]++; 9829f9f1acdSKonstantin Khlebnikov 9839f9f1acdSKonstantin Khlebnikov if (is_write_migration_entry(entry) && 9840697212aSChristoph Lameter is_cow_mapping(vm_flags)) { 9850697212aSChristoph Lameter /* 9869f9f1acdSKonstantin Khlebnikov * COW mappings require pages in both 9879f9f1acdSKonstantin Khlebnikov * parent and child to be set to read. 9880697212aSChristoph Lameter */ 9890697212aSChristoph Lameter make_migration_entry_read(&entry); 9900697212aSChristoph Lameter pte = swp_entry_to_pte(entry); 991c3d16e16SCyrill Gorcunov if (pte_swp_soft_dirty(*src_pte)) 992c3d16e16SCyrill Gorcunov pte = pte_swp_mksoft_dirty(pte); 9930697212aSChristoph Lameter set_pte_at(src_mm, addr, src_pte, pte); 9940697212aSChristoph Lameter } 9955042db43SJérôme Glisse } else if (is_device_private_entry(entry)) { 9965042db43SJérôme Glisse page = device_private_entry_to_page(entry); 9975042db43SJérôme Glisse 9985042db43SJérôme Glisse /* 9995042db43SJérôme Glisse * Update rss count even for unaddressable pages, as 10005042db43SJérôme Glisse * they should treated just like normal pages in this 10015042db43SJérôme Glisse * respect. 10025042db43SJérôme Glisse * 10035042db43SJérôme Glisse * We will likely want to have some new rss counters 10045042db43SJérôme Glisse * for unaddressable pages, at some point. But for now 10055042db43SJérôme Glisse * keep things as they are. 10065042db43SJérôme Glisse */ 10075042db43SJérôme Glisse get_page(page); 10085042db43SJérôme Glisse rss[mm_counter(page)]++; 10095042db43SJérôme Glisse page_dup_rmap(page, false); 10105042db43SJérôme Glisse 10115042db43SJérôme Glisse /* 10125042db43SJérôme Glisse * We do not preserve soft-dirty information, because so 10135042db43SJérôme Glisse * far, checkpoint/restore is the only feature that 10145042db43SJérôme Glisse * requires that. And checkpoint/restore does not work 10155042db43SJérôme Glisse * when a device driver is involved (you cannot easily 10165042db43SJérôme Glisse * save and restore device driver state). 10175042db43SJérôme Glisse */ 10185042db43SJérôme Glisse if (is_write_device_private_entry(entry) && 10195042db43SJérôme Glisse is_cow_mapping(vm_flags)) { 10205042db43SJérôme Glisse make_device_private_entry_read(&entry); 10215042db43SJérôme Glisse pte = swp_entry_to_pte(entry); 10225042db43SJérôme Glisse set_pte_at(src_mm, addr, src_pte, pte); 10235042db43SJérôme Glisse } 10241da177e4SLinus Torvalds } 1025ae859762SHugh Dickins goto out_set_pte; 10261da177e4SLinus Torvalds } 10271da177e4SLinus Torvalds 10281da177e4SLinus Torvalds /* 10291da177e4SLinus Torvalds * If it's a COW mapping, write protect it both 10301da177e4SLinus Torvalds * in the parent and the child 10311da177e4SLinus Torvalds */ 103267121172SLinus Torvalds if (is_cow_mapping(vm_flags)) { 10331da177e4SLinus Torvalds ptep_set_wrprotect(src_mm, addr, src_pte); 10343dc90795SZachary Amsden pte = pte_wrprotect(pte); 10351da177e4SLinus Torvalds } 10361da177e4SLinus Torvalds 10371da177e4SLinus Torvalds /* 10381da177e4SLinus Torvalds * If it's a shared mapping, mark it clean in 10391da177e4SLinus Torvalds * the child 10401da177e4SLinus Torvalds */ 10411da177e4SLinus Torvalds if (vm_flags & VM_SHARED) 10421da177e4SLinus Torvalds pte = pte_mkclean(pte); 10431da177e4SLinus Torvalds pte = pte_mkold(pte); 10446aab341eSLinus Torvalds 10456aab341eSLinus Torvalds page = vm_normal_page(vma, addr, pte); 10466aab341eSLinus Torvalds if (page) { 10471da177e4SLinus Torvalds get_page(page); 104853f9263bSKirill A. Shutemov page_dup_rmap(page, false); 1049eca56ff9SJerome Marchand rss[mm_counter(page)]++; 1050df6ad698SJérôme Glisse } else if (pte_devmap(pte)) { 1051df6ad698SJérôme Glisse page = pte_page(pte); 1052df6ad698SJérôme Glisse 1053df6ad698SJérôme Glisse /* 1054df6ad698SJérôme Glisse * Cache coherent device memory behave like regular page and 1055df6ad698SJérôme Glisse * not like persistent memory page. For more informations see 1056df6ad698SJérôme Glisse * MEMORY_DEVICE_CACHE_COHERENT in memory_hotplug.h 1057df6ad698SJérôme Glisse */ 1058df6ad698SJérôme Glisse if (is_device_public_page(page)) { 1059df6ad698SJérôme Glisse get_page(page); 1060df6ad698SJérôme Glisse page_dup_rmap(page, false); 1061df6ad698SJérôme Glisse rss[mm_counter(page)]++; 1062df6ad698SJérôme Glisse } 10636aab341eSLinus Torvalds } 1064ae859762SHugh Dickins 1065ae859762SHugh Dickins out_set_pte: 1066ae859762SHugh Dickins set_pte_at(dst_mm, addr, dst_pte, pte); 1067570a335bSHugh Dickins return 0; 10681da177e4SLinus Torvalds } 10691da177e4SLinus Torvalds 107021bda264SJerome Marchand static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 10711da177e4SLinus Torvalds pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma, 10721da177e4SLinus Torvalds unsigned long addr, unsigned long end) 10731da177e4SLinus Torvalds { 1074c36987e2SDaisuke Nishimura pte_t *orig_src_pte, *orig_dst_pte; 10751da177e4SLinus Torvalds pte_t *src_pte, *dst_pte; 1076c74df32cSHugh Dickins spinlock_t *src_ptl, *dst_ptl; 1077e040f218SHugh Dickins int progress = 0; 1078d559db08SKAMEZAWA Hiroyuki int rss[NR_MM_COUNTERS]; 1079570a335bSHugh Dickins swp_entry_t entry = (swp_entry_t){0}; 10801da177e4SLinus Torvalds 10811da177e4SLinus Torvalds again: 1082d559db08SKAMEZAWA Hiroyuki init_rss_vec(rss); 1083d559db08SKAMEZAWA Hiroyuki 1084c74df32cSHugh Dickins dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl); 10851da177e4SLinus Torvalds if (!dst_pte) 10861da177e4SLinus Torvalds return -ENOMEM; 1087ece0e2b6SPeter Zijlstra src_pte = pte_offset_map(src_pmd, addr); 10884c21e2f2SHugh Dickins src_ptl = pte_lockptr(src_mm, src_pmd); 1089f20dc5f7SIngo Molnar spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 1090c36987e2SDaisuke Nishimura orig_src_pte = src_pte; 1091c36987e2SDaisuke Nishimura orig_dst_pte = dst_pte; 10926606c3e0SZachary Amsden arch_enter_lazy_mmu_mode(); 10931da177e4SLinus Torvalds 10941da177e4SLinus Torvalds do { 10951da177e4SLinus Torvalds /* 10961da177e4SLinus Torvalds * We are holding two locks at this point - either of them 10971da177e4SLinus Torvalds * could generate latencies in another task on another CPU. 10981da177e4SLinus Torvalds */ 1099e040f218SHugh Dickins if (progress >= 32) { 1100e040f218SHugh Dickins progress = 0; 1101e040f218SHugh Dickins if (need_resched() || 110295c354feSNick Piggin spin_needbreak(src_ptl) || spin_needbreak(dst_ptl)) 11031da177e4SLinus Torvalds break; 1104e040f218SHugh Dickins } 11051da177e4SLinus Torvalds if (pte_none(*src_pte)) { 11061da177e4SLinus Torvalds progress++; 11071da177e4SLinus Torvalds continue; 11081da177e4SLinus Torvalds } 1109570a335bSHugh Dickins entry.val = copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, 1110570a335bSHugh Dickins vma, addr, rss); 1111570a335bSHugh Dickins if (entry.val) 1112570a335bSHugh Dickins break; 11131da177e4SLinus Torvalds progress += 8; 11141da177e4SLinus Torvalds } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end); 11151da177e4SLinus Torvalds 11166606c3e0SZachary Amsden arch_leave_lazy_mmu_mode(); 1117c74df32cSHugh Dickins spin_unlock(src_ptl); 1118ece0e2b6SPeter Zijlstra pte_unmap(orig_src_pte); 1119d559db08SKAMEZAWA Hiroyuki add_mm_rss_vec(dst_mm, rss); 1120c36987e2SDaisuke Nishimura pte_unmap_unlock(orig_dst_pte, dst_ptl); 1121c74df32cSHugh Dickins cond_resched(); 1122570a335bSHugh Dickins 1123570a335bSHugh Dickins if (entry.val) { 1124570a335bSHugh Dickins if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) 1125570a335bSHugh Dickins return -ENOMEM; 1126570a335bSHugh Dickins progress = 0; 1127570a335bSHugh Dickins } 11281da177e4SLinus Torvalds if (addr != end) 11291da177e4SLinus Torvalds goto again; 11301da177e4SLinus Torvalds return 0; 11311da177e4SLinus Torvalds } 11321da177e4SLinus Torvalds 11331da177e4SLinus Torvalds static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 11341da177e4SLinus Torvalds pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma, 11351da177e4SLinus Torvalds unsigned long addr, unsigned long end) 11361da177e4SLinus Torvalds { 11371da177e4SLinus Torvalds pmd_t *src_pmd, *dst_pmd; 11381da177e4SLinus Torvalds unsigned long next; 11391da177e4SLinus Torvalds 11401da177e4SLinus Torvalds dst_pmd = pmd_alloc(dst_mm, dst_pud, addr); 11411da177e4SLinus Torvalds if (!dst_pmd) 11421da177e4SLinus Torvalds return -ENOMEM; 11431da177e4SLinus Torvalds src_pmd = pmd_offset(src_pud, addr); 11441da177e4SLinus Torvalds do { 11451da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 114684c3fc4eSZi Yan if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd) 114784c3fc4eSZi Yan || pmd_devmap(*src_pmd)) { 114871e3aac0SAndrea Arcangeli int err; 1149a00cc7d9SMatthew Wilcox VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, vma); 115071e3aac0SAndrea Arcangeli err = copy_huge_pmd(dst_mm, src_mm, 115171e3aac0SAndrea Arcangeli dst_pmd, src_pmd, addr, vma); 115271e3aac0SAndrea Arcangeli if (err == -ENOMEM) 115371e3aac0SAndrea Arcangeli return -ENOMEM; 115471e3aac0SAndrea Arcangeli if (!err) 115571e3aac0SAndrea Arcangeli continue; 115671e3aac0SAndrea Arcangeli /* fall through */ 115771e3aac0SAndrea Arcangeli } 11581da177e4SLinus Torvalds if (pmd_none_or_clear_bad(src_pmd)) 11591da177e4SLinus Torvalds continue; 11601da177e4SLinus Torvalds if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd, 11611da177e4SLinus Torvalds vma, addr, next)) 11621da177e4SLinus Torvalds return -ENOMEM; 11631da177e4SLinus Torvalds } while (dst_pmd++, src_pmd++, addr = next, addr != end); 11641da177e4SLinus Torvalds return 0; 11651da177e4SLinus Torvalds } 11661da177e4SLinus Torvalds 11671da177e4SLinus Torvalds static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 1168c2febafcSKirill A. Shutemov p4d_t *dst_p4d, p4d_t *src_p4d, struct vm_area_struct *vma, 11691da177e4SLinus Torvalds unsigned long addr, unsigned long end) 11701da177e4SLinus Torvalds { 11711da177e4SLinus Torvalds pud_t *src_pud, *dst_pud; 11721da177e4SLinus Torvalds unsigned long next; 11731da177e4SLinus Torvalds 1174c2febafcSKirill A. Shutemov dst_pud = pud_alloc(dst_mm, dst_p4d, addr); 11751da177e4SLinus Torvalds if (!dst_pud) 11761da177e4SLinus Torvalds return -ENOMEM; 1177c2febafcSKirill A. Shutemov src_pud = pud_offset(src_p4d, addr); 11781da177e4SLinus Torvalds do { 11791da177e4SLinus Torvalds next = pud_addr_end(addr, end); 1180a00cc7d9SMatthew Wilcox if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) { 1181a00cc7d9SMatthew Wilcox int err; 1182a00cc7d9SMatthew Wilcox 1183a00cc7d9SMatthew Wilcox VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, vma); 1184a00cc7d9SMatthew Wilcox err = copy_huge_pud(dst_mm, src_mm, 1185a00cc7d9SMatthew Wilcox dst_pud, src_pud, addr, vma); 1186a00cc7d9SMatthew Wilcox if (err == -ENOMEM) 1187a00cc7d9SMatthew Wilcox return -ENOMEM; 1188a00cc7d9SMatthew Wilcox if (!err) 1189a00cc7d9SMatthew Wilcox continue; 1190a00cc7d9SMatthew Wilcox /* fall through */ 1191a00cc7d9SMatthew Wilcox } 11921da177e4SLinus Torvalds if (pud_none_or_clear_bad(src_pud)) 11931da177e4SLinus Torvalds continue; 11941da177e4SLinus Torvalds if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud, 11951da177e4SLinus Torvalds vma, addr, next)) 11961da177e4SLinus Torvalds return -ENOMEM; 11971da177e4SLinus Torvalds } while (dst_pud++, src_pud++, addr = next, addr != end); 11981da177e4SLinus Torvalds return 0; 11991da177e4SLinus Torvalds } 12001da177e4SLinus Torvalds 1201c2febafcSKirill A. Shutemov static inline int copy_p4d_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 1202c2febafcSKirill A. Shutemov pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma, 1203c2febafcSKirill A. Shutemov unsigned long addr, unsigned long end) 1204c2febafcSKirill A. Shutemov { 1205c2febafcSKirill A. Shutemov p4d_t *src_p4d, *dst_p4d; 1206c2febafcSKirill A. Shutemov unsigned long next; 1207c2febafcSKirill A. Shutemov 1208c2febafcSKirill A. Shutemov dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr); 1209c2febafcSKirill A. Shutemov if (!dst_p4d) 1210c2febafcSKirill A. Shutemov return -ENOMEM; 1211c2febafcSKirill A. Shutemov src_p4d = p4d_offset(src_pgd, addr); 1212c2febafcSKirill A. Shutemov do { 1213c2febafcSKirill A. Shutemov next = p4d_addr_end(addr, end); 1214c2febafcSKirill A. Shutemov if (p4d_none_or_clear_bad(src_p4d)) 1215c2febafcSKirill A. Shutemov continue; 1216c2febafcSKirill A. Shutemov if (copy_pud_range(dst_mm, src_mm, dst_p4d, src_p4d, 1217c2febafcSKirill A. Shutemov vma, addr, next)) 1218c2febafcSKirill A. Shutemov return -ENOMEM; 1219c2febafcSKirill A. Shutemov } while (dst_p4d++, src_p4d++, addr = next, addr != end); 1220c2febafcSKirill A. Shutemov return 0; 1221c2febafcSKirill A. Shutemov } 1222c2febafcSKirill A. Shutemov 12231da177e4SLinus Torvalds int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 12241da177e4SLinus Torvalds struct vm_area_struct *vma) 12251da177e4SLinus Torvalds { 12261da177e4SLinus Torvalds pgd_t *src_pgd, *dst_pgd; 12271da177e4SLinus Torvalds unsigned long next; 12281da177e4SLinus Torvalds unsigned long addr = vma->vm_start; 12291da177e4SLinus Torvalds unsigned long end = vma->vm_end; 12302ec74c3eSSagi Grimberg unsigned long mmun_start; /* For mmu_notifiers */ 12312ec74c3eSSagi Grimberg unsigned long mmun_end; /* For mmu_notifiers */ 12322ec74c3eSSagi Grimberg bool is_cow; 1233cddb8a5cSAndrea Arcangeli int ret; 12341da177e4SLinus Torvalds 1235d992895bSNick Piggin /* 1236d992895bSNick Piggin * Don't copy ptes where a page fault will fill them correctly. 1237d992895bSNick Piggin * Fork becomes much lighter when there are big shared or private 1238d992895bSNick Piggin * readonly mappings. The tradeoff is that copy_page_range is more 1239d992895bSNick Piggin * efficient than faulting. 1240d992895bSNick Piggin */ 12410661a336SKirill A. Shutemov if (!(vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) && 12420661a336SKirill A. Shutemov !vma->anon_vma) 1243d992895bSNick Piggin return 0; 1244d992895bSNick Piggin 12451da177e4SLinus Torvalds if (is_vm_hugetlb_page(vma)) 12461da177e4SLinus Torvalds return copy_hugetlb_page_range(dst_mm, src_mm, vma); 12471da177e4SLinus Torvalds 1248b3b9c293SKonstantin Khlebnikov if (unlikely(vma->vm_flags & VM_PFNMAP)) { 12492ab64037Svenkatesh.pallipadi@intel.com /* 12502ab64037Svenkatesh.pallipadi@intel.com * We do not free on error cases below as remove_vma 12512ab64037Svenkatesh.pallipadi@intel.com * gets called on error from higher level routine 12522ab64037Svenkatesh.pallipadi@intel.com */ 12535180da41SSuresh Siddha ret = track_pfn_copy(vma); 12542ab64037Svenkatesh.pallipadi@intel.com if (ret) 12552ab64037Svenkatesh.pallipadi@intel.com return ret; 12562ab64037Svenkatesh.pallipadi@intel.com } 12572ab64037Svenkatesh.pallipadi@intel.com 1258cddb8a5cSAndrea Arcangeli /* 1259cddb8a5cSAndrea Arcangeli * We need to invalidate the secondary MMU mappings only when 1260cddb8a5cSAndrea Arcangeli * there could be a permission downgrade on the ptes of the 1261cddb8a5cSAndrea Arcangeli * parent mm. And a permission downgrade will only happen if 1262cddb8a5cSAndrea Arcangeli * is_cow_mapping() returns true. 1263cddb8a5cSAndrea Arcangeli */ 12642ec74c3eSSagi Grimberg is_cow = is_cow_mapping(vma->vm_flags); 12652ec74c3eSSagi Grimberg mmun_start = addr; 12662ec74c3eSSagi Grimberg mmun_end = end; 12672ec74c3eSSagi Grimberg if (is_cow) 12682ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_start(src_mm, mmun_start, 12692ec74c3eSSagi Grimberg mmun_end); 1270cddb8a5cSAndrea Arcangeli 1271cddb8a5cSAndrea Arcangeli ret = 0; 12721da177e4SLinus Torvalds dst_pgd = pgd_offset(dst_mm, addr); 12731da177e4SLinus Torvalds src_pgd = pgd_offset(src_mm, addr); 12741da177e4SLinus Torvalds do { 12751da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 12761da177e4SLinus Torvalds if (pgd_none_or_clear_bad(src_pgd)) 12771da177e4SLinus Torvalds continue; 1278c2febafcSKirill A. Shutemov if (unlikely(copy_p4d_range(dst_mm, src_mm, dst_pgd, src_pgd, 1279cddb8a5cSAndrea Arcangeli vma, addr, next))) { 1280cddb8a5cSAndrea Arcangeli ret = -ENOMEM; 1281cddb8a5cSAndrea Arcangeli break; 1282cddb8a5cSAndrea Arcangeli } 12831da177e4SLinus Torvalds } while (dst_pgd++, src_pgd++, addr = next, addr != end); 1284cddb8a5cSAndrea Arcangeli 12852ec74c3eSSagi Grimberg if (is_cow) 12862ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_end(src_mm, mmun_start, mmun_end); 1287cddb8a5cSAndrea Arcangeli return ret; 12881da177e4SLinus Torvalds } 12891da177e4SLinus Torvalds 129051c6f666SRobin Holt static unsigned long zap_pte_range(struct mmu_gather *tlb, 1291b5810039SNick Piggin struct vm_area_struct *vma, pmd_t *pmd, 12921da177e4SLinus Torvalds unsigned long addr, unsigned long end, 129397a89413SPeter Zijlstra struct zap_details *details) 12941da177e4SLinus Torvalds { 1295b5810039SNick Piggin struct mm_struct *mm = tlb->mm; 1296d16dfc55SPeter Zijlstra int force_flush = 0; 1297d559db08SKAMEZAWA Hiroyuki int rss[NR_MM_COUNTERS]; 129897a89413SPeter Zijlstra spinlock_t *ptl; 12995f1a1907SSteven Rostedt pte_t *start_pte; 130097a89413SPeter Zijlstra pte_t *pte; 13018a5f14a2SKirill A. Shutemov swp_entry_t entry; 1302d559db08SKAMEZAWA Hiroyuki 130307e32661SAneesh Kumar K.V tlb_remove_check_page_size_change(tlb, PAGE_SIZE); 1304d16dfc55SPeter Zijlstra again: 1305e303297eSPeter Zijlstra init_rss_vec(rss); 13065f1a1907SSteven Rostedt start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 13075f1a1907SSteven Rostedt pte = start_pte; 13083ea27719SMel Gorman flush_tlb_batched_pending(mm); 13096606c3e0SZachary Amsden arch_enter_lazy_mmu_mode(); 13101da177e4SLinus Torvalds do { 13111da177e4SLinus Torvalds pte_t ptent = *pte; 1312166f61b9STobin C Harding if (pte_none(ptent)) 13131da177e4SLinus Torvalds continue; 131451c6f666SRobin Holt 13156f5e6b9eSHugh Dickins if (pte_present(ptent)) { 13166f5e6b9eSHugh Dickins struct page *page; 13176f5e6b9eSHugh Dickins 1318df6ad698SJérôme Glisse page = _vm_normal_page(vma, addr, ptent, true); 13191da177e4SLinus Torvalds if (unlikely(details) && page) { 13201da177e4SLinus Torvalds /* 13211da177e4SLinus Torvalds * unmap_shared_mapping_pages() wants to 13221da177e4SLinus Torvalds * invalidate cache without truncating: 13231da177e4SLinus Torvalds * unmap shared but keep private pages. 13241da177e4SLinus Torvalds */ 13251da177e4SLinus Torvalds if (details->check_mapping && 1326800d8c63SKirill A. Shutemov details->check_mapping != page_rmapping(page)) 13271da177e4SLinus Torvalds continue; 13281da177e4SLinus Torvalds } 1329b5810039SNick Piggin ptent = ptep_get_and_clear_full(mm, addr, pte, 1330a600388dSZachary Amsden tlb->fullmm); 13311da177e4SLinus Torvalds tlb_remove_tlb_entry(tlb, pte, addr); 13321da177e4SLinus Torvalds if (unlikely(!page)) 13331da177e4SLinus Torvalds continue; 1334eca56ff9SJerome Marchand 1335eca56ff9SJerome Marchand if (!PageAnon(page)) { 13361cf35d47SLinus Torvalds if (pte_dirty(ptent)) { 13371cf35d47SLinus Torvalds force_flush = 1; 13386237bcd9SHugh Dickins set_page_dirty(page); 13391cf35d47SLinus Torvalds } 13404917e5d0SJohannes Weiner if (pte_young(ptent) && 134164363aadSJoe Perches likely(!(vma->vm_flags & VM_SEQ_READ))) 1342bf3f3bc5SNick Piggin mark_page_accessed(page); 13436237bcd9SHugh Dickins } 1344eca56ff9SJerome Marchand rss[mm_counter(page)]--; 1345d281ee61SKirill A. Shutemov page_remove_rmap(page, false); 13463dc14741SHugh Dickins if (unlikely(page_mapcount(page) < 0)) 13473dc14741SHugh Dickins print_bad_pte(vma, addr, ptent, page); 1348e9d55e15SAneesh Kumar K.V if (unlikely(__tlb_remove_page(tlb, page))) { 13491cf35d47SLinus Torvalds force_flush = 1; 1350ce9ec37bSWill Deacon addr += PAGE_SIZE; 1351d16dfc55SPeter Zijlstra break; 13521cf35d47SLinus Torvalds } 13531da177e4SLinus Torvalds continue; 13541da177e4SLinus Torvalds } 13555042db43SJérôme Glisse 13565042db43SJérôme Glisse entry = pte_to_swp_entry(ptent); 13575042db43SJérôme Glisse if (non_swap_entry(entry) && is_device_private_entry(entry)) { 13585042db43SJérôme Glisse struct page *page = device_private_entry_to_page(entry); 13595042db43SJérôme Glisse 13605042db43SJérôme Glisse if (unlikely(details && details->check_mapping)) { 13615042db43SJérôme Glisse /* 13625042db43SJérôme Glisse * unmap_shared_mapping_pages() wants to 13635042db43SJérôme Glisse * invalidate cache without truncating: 13645042db43SJérôme Glisse * unmap shared but keep private pages. 13655042db43SJérôme Glisse */ 13665042db43SJérôme Glisse if (details->check_mapping != 13675042db43SJérôme Glisse page_rmapping(page)) 13685042db43SJérôme Glisse continue; 13695042db43SJérôme Glisse } 13705042db43SJérôme Glisse 13715042db43SJérôme Glisse pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); 13725042db43SJérôme Glisse rss[mm_counter(page)]--; 13735042db43SJérôme Glisse page_remove_rmap(page, false); 13745042db43SJérôme Glisse put_page(page); 13755042db43SJérôme Glisse continue; 13765042db43SJérôme Glisse } 13775042db43SJérôme Glisse 13783e8715fdSKirill A. Shutemov /* If details->check_mapping, we leave swap entries. */ 13793e8715fdSKirill A. Shutemov if (unlikely(details)) 13801da177e4SLinus Torvalds continue; 1381b084d435SKAMEZAWA Hiroyuki 13828a5f14a2SKirill A. Shutemov entry = pte_to_swp_entry(ptent); 1383b084d435SKAMEZAWA Hiroyuki if (!non_swap_entry(entry)) 1384b084d435SKAMEZAWA Hiroyuki rss[MM_SWAPENTS]--; 13859f9f1acdSKonstantin Khlebnikov else if (is_migration_entry(entry)) { 13869f9f1acdSKonstantin Khlebnikov struct page *page; 13879f9f1acdSKonstantin Khlebnikov 13889f9f1acdSKonstantin Khlebnikov page = migration_entry_to_page(entry); 1389eca56ff9SJerome Marchand rss[mm_counter(page)]--; 13909f9f1acdSKonstantin Khlebnikov } 1391b084d435SKAMEZAWA Hiroyuki if (unlikely(!free_swap_and_cache(entry))) 13922509ef26SHugh Dickins print_bad_pte(vma, addr, ptent, NULL); 13939888a1caSZachary Amsden pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); 139497a89413SPeter Zijlstra } while (pte++, addr += PAGE_SIZE, addr != end); 1395ae859762SHugh Dickins 1396d559db08SKAMEZAWA Hiroyuki add_mm_rss_vec(mm, rss); 13976606c3e0SZachary Amsden arch_leave_lazy_mmu_mode(); 139851c6f666SRobin Holt 13991cf35d47SLinus Torvalds /* Do the actual TLB flush before dropping ptl */ 1400fb7332a9SWill Deacon if (force_flush) 14011cf35d47SLinus Torvalds tlb_flush_mmu_tlbonly(tlb); 14021cf35d47SLinus Torvalds pte_unmap_unlock(start_pte, ptl); 14031cf35d47SLinus Torvalds 14041cf35d47SLinus Torvalds /* 14051cf35d47SLinus Torvalds * If we forced a TLB flush (either due to running out of 14061cf35d47SLinus Torvalds * batch buffers or because we needed to flush dirty TLB 14071cf35d47SLinus Torvalds * entries before releasing the ptl), free the batched 14081cf35d47SLinus Torvalds * memory too. Restart if we didn't do everything. 14091cf35d47SLinus Torvalds */ 14101cf35d47SLinus Torvalds if (force_flush) { 14111cf35d47SLinus Torvalds force_flush = 0; 14121cf35d47SLinus Torvalds tlb_flush_mmu_free(tlb); 14132b047252SLinus Torvalds if (addr != end) 1414d16dfc55SPeter Zijlstra goto again; 1415d16dfc55SPeter Zijlstra } 1416d16dfc55SPeter Zijlstra 141751c6f666SRobin Holt return addr; 14181da177e4SLinus Torvalds } 14191da177e4SLinus Torvalds 142051c6f666SRobin Holt static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, 1421b5810039SNick Piggin struct vm_area_struct *vma, pud_t *pud, 14221da177e4SLinus Torvalds unsigned long addr, unsigned long end, 142397a89413SPeter Zijlstra struct zap_details *details) 14241da177e4SLinus Torvalds { 14251da177e4SLinus Torvalds pmd_t *pmd; 14261da177e4SLinus Torvalds unsigned long next; 14271da177e4SLinus Torvalds 14281da177e4SLinus Torvalds pmd = pmd_offset(pud, addr); 14291da177e4SLinus Torvalds do { 14301da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 143184c3fc4eSZi Yan if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { 143253406ed1SHugh Dickins if (next - addr != HPAGE_PMD_SIZE) 1433fd60775aSDavid Rientjes __split_huge_pmd(vma, pmd, addr, false, NULL); 143453406ed1SHugh Dickins else if (zap_huge_pmd(tlb, vma, pmd, addr)) 14351a5a9906SAndrea Arcangeli goto next; 143671e3aac0SAndrea Arcangeli /* fall through */ 143771e3aac0SAndrea Arcangeli } 14381a5a9906SAndrea Arcangeli /* 14391a5a9906SAndrea Arcangeli * Here there can be other concurrent MADV_DONTNEED or 14401a5a9906SAndrea Arcangeli * trans huge page faults running, and if the pmd is 14411a5a9906SAndrea Arcangeli * none or trans huge it can change under us. This is 14421a5a9906SAndrea Arcangeli * because MADV_DONTNEED holds the mmap_sem in read 14431a5a9906SAndrea Arcangeli * mode. 14441a5a9906SAndrea Arcangeli */ 14451a5a9906SAndrea Arcangeli if (pmd_none_or_trans_huge_or_clear_bad(pmd)) 14461a5a9906SAndrea Arcangeli goto next; 144797a89413SPeter Zijlstra next = zap_pte_range(tlb, vma, pmd, addr, next, details); 14481a5a9906SAndrea Arcangeli next: 144997a89413SPeter Zijlstra cond_resched(); 145097a89413SPeter Zijlstra } while (pmd++, addr = next, addr != end); 145151c6f666SRobin Holt 145251c6f666SRobin Holt return addr; 14531da177e4SLinus Torvalds } 14541da177e4SLinus Torvalds 145551c6f666SRobin Holt static inline unsigned long zap_pud_range(struct mmu_gather *tlb, 1456c2febafcSKirill A. Shutemov struct vm_area_struct *vma, p4d_t *p4d, 14571da177e4SLinus Torvalds unsigned long addr, unsigned long end, 145897a89413SPeter Zijlstra struct zap_details *details) 14591da177e4SLinus Torvalds { 14601da177e4SLinus Torvalds pud_t *pud; 14611da177e4SLinus Torvalds unsigned long next; 14621da177e4SLinus Torvalds 1463c2febafcSKirill A. Shutemov pud = pud_offset(p4d, addr); 14641da177e4SLinus Torvalds do { 14651da177e4SLinus Torvalds next = pud_addr_end(addr, end); 1466a00cc7d9SMatthew Wilcox if (pud_trans_huge(*pud) || pud_devmap(*pud)) { 1467a00cc7d9SMatthew Wilcox if (next - addr != HPAGE_PUD_SIZE) { 1468a00cc7d9SMatthew Wilcox VM_BUG_ON_VMA(!rwsem_is_locked(&tlb->mm->mmap_sem), vma); 1469a00cc7d9SMatthew Wilcox split_huge_pud(vma, pud, addr); 1470a00cc7d9SMatthew Wilcox } else if (zap_huge_pud(tlb, vma, pud, addr)) 1471a00cc7d9SMatthew Wilcox goto next; 1472a00cc7d9SMatthew Wilcox /* fall through */ 1473a00cc7d9SMatthew Wilcox } 147497a89413SPeter Zijlstra if (pud_none_or_clear_bad(pud)) 14751da177e4SLinus Torvalds continue; 147697a89413SPeter Zijlstra next = zap_pmd_range(tlb, vma, pud, addr, next, details); 1477a00cc7d9SMatthew Wilcox next: 1478a00cc7d9SMatthew Wilcox cond_resched(); 147997a89413SPeter Zijlstra } while (pud++, addr = next, addr != end); 148051c6f666SRobin Holt 148151c6f666SRobin Holt return addr; 14821da177e4SLinus Torvalds } 14831da177e4SLinus Torvalds 1484c2febafcSKirill A. Shutemov static inline unsigned long zap_p4d_range(struct mmu_gather *tlb, 1485c2febafcSKirill A. Shutemov struct vm_area_struct *vma, pgd_t *pgd, 1486c2febafcSKirill A. Shutemov unsigned long addr, unsigned long end, 1487c2febafcSKirill A. Shutemov struct zap_details *details) 1488c2febafcSKirill A. Shutemov { 1489c2febafcSKirill A. Shutemov p4d_t *p4d; 1490c2febafcSKirill A. Shutemov unsigned long next; 1491c2febafcSKirill A. Shutemov 1492c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, addr); 1493c2febafcSKirill A. Shutemov do { 1494c2febafcSKirill A. Shutemov next = p4d_addr_end(addr, end); 1495c2febafcSKirill A. Shutemov if (p4d_none_or_clear_bad(p4d)) 1496c2febafcSKirill A. Shutemov continue; 1497c2febafcSKirill A. Shutemov next = zap_pud_range(tlb, vma, p4d, addr, next, details); 1498c2febafcSKirill A. Shutemov } while (p4d++, addr = next, addr != end); 1499c2febafcSKirill A. Shutemov 1500c2febafcSKirill A. Shutemov return addr; 1501c2febafcSKirill A. Shutemov } 1502c2febafcSKirill A. Shutemov 1503aac45363SMichal Hocko void unmap_page_range(struct mmu_gather *tlb, 150451c6f666SRobin Holt struct vm_area_struct *vma, 15051da177e4SLinus Torvalds unsigned long addr, unsigned long end, 150697a89413SPeter Zijlstra struct zap_details *details) 15071da177e4SLinus Torvalds { 15081da177e4SLinus Torvalds pgd_t *pgd; 15091da177e4SLinus Torvalds unsigned long next; 15101da177e4SLinus Torvalds 15111da177e4SLinus Torvalds BUG_ON(addr >= end); 15121da177e4SLinus Torvalds tlb_start_vma(tlb, vma); 15131da177e4SLinus Torvalds pgd = pgd_offset(vma->vm_mm, addr); 15141da177e4SLinus Torvalds do { 15151da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 151697a89413SPeter Zijlstra if (pgd_none_or_clear_bad(pgd)) 15171da177e4SLinus Torvalds continue; 1518c2febafcSKirill A. Shutemov next = zap_p4d_range(tlb, vma, pgd, addr, next, details); 151997a89413SPeter Zijlstra } while (pgd++, addr = next, addr != end); 15201da177e4SLinus Torvalds tlb_end_vma(tlb, vma); 15211da177e4SLinus Torvalds } 15221da177e4SLinus Torvalds 1523f5cc4eefSAl Viro 1524f5cc4eefSAl Viro static void unmap_single_vma(struct mmu_gather *tlb, 15251da177e4SLinus Torvalds struct vm_area_struct *vma, unsigned long start_addr, 15264f74d2c8SLinus Torvalds unsigned long end_addr, 15271da177e4SLinus Torvalds struct zap_details *details) 15281da177e4SLinus Torvalds { 1529f5cc4eefSAl Viro unsigned long start = max(vma->vm_start, start_addr); 15301da177e4SLinus Torvalds unsigned long end; 15311da177e4SLinus Torvalds 15321da177e4SLinus Torvalds if (start >= vma->vm_end) 1533f5cc4eefSAl Viro return; 15341da177e4SLinus Torvalds end = min(vma->vm_end, end_addr); 15351da177e4SLinus Torvalds if (end <= vma->vm_start) 1536f5cc4eefSAl Viro return; 15371da177e4SLinus Torvalds 1538cbc91f71SSrikar Dronamraju if (vma->vm_file) 1539cbc91f71SSrikar Dronamraju uprobe_munmap(vma, start, end); 1540cbc91f71SSrikar Dronamraju 1541b3b9c293SKonstantin Khlebnikov if (unlikely(vma->vm_flags & VM_PFNMAP)) 15425180da41SSuresh Siddha untrack_pfn(vma, 0, 0); 15432ab64037Svenkatesh.pallipadi@intel.com 15448b2a1238SAl Viro if (start != end) { 154551c6f666SRobin Holt if (unlikely(is_vm_hugetlb_page(vma))) { 1546a137e1ccSAndi Kleen /* 1547a137e1ccSAndi Kleen * It is undesirable to test vma->vm_file as it 1548a137e1ccSAndi Kleen * should be non-null for valid hugetlb area. 1549a137e1ccSAndi Kleen * However, vm_file will be NULL in the error 15507aa6b4adSDavidlohr Bueso * cleanup path of mmap_region. When 1551a137e1ccSAndi Kleen * hugetlbfs ->mmap method fails, 15527aa6b4adSDavidlohr Bueso * mmap_region() nullifies vma->vm_file 1553a137e1ccSAndi Kleen * before calling this function to clean up. 1554a137e1ccSAndi Kleen * Since no pte has actually been setup, it is 1555a137e1ccSAndi Kleen * safe to do nothing in this case. 1556a137e1ccSAndi Kleen */ 155724669e58SAneesh Kumar K.V if (vma->vm_file) { 155883cde9e8SDavidlohr Bueso i_mmap_lock_write(vma->vm_file->f_mapping); 1559d833352aSMel Gorman __unmap_hugepage_range_final(tlb, vma, start, end, NULL); 156083cde9e8SDavidlohr Bueso i_mmap_unlock_write(vma->vm_file->f_mapping); 156124669e58SAneesh Kumar K.V } 156251c6f666SRobin Holt } else 1563038c7aa1SAl Viro unmap_page_range(tlb, vma, start, end, details); 156497a89413SPeter Zijlstra } 156551c6f666SRobin Holt } 15661da177e4SLinus Torvalds 1567f5cc4eefSAl Viro /** 1568f5cc4eefSAl Viro * unmap_vmas - unmap a range of memory covered by a list of vma's 1569f5cc4eefSAl Viro * @tlb: address of the caller's struct mmu_gather 1570f5cc4eefSAl Viro * @vma: the starting vma 1571f5cc4eefSAl Viro * @start_addr: virtual address at which to start unmapping 1572f5cc4eefSAl Viro * @end_addr: virtual address at which to end unmapping 1573f5cc4eefSAl Viro * 1574f5cc4eefSAl Viro * Unmap all pages in the vma list. 1575f5cc4eefSAl Viro * 1576f5cc4eefSAl Viro * Only addresses between `start' and `end' will be unmapped. 1577f5cc4eefSAl Viro * 1578f5cc4eefSAl Viro * The VMA list must be sorted in ascending virtual address order. 1579f5cc4eefSAl Viro * 1580f5cc4eefSAl Viro * unmap_vmas() assumes that the caller will flush the whole unmapped address 1581f5cc4eefSAl Viro * range after unmap_vmas() returns. So the only responsibility here is to 1582f5cc4eefSAl Viro * ensure that any thus-far unmapped pages are flushed before unmap_vmas() 1583f5cc4eefSAl Viro * drops the lock and schedules. 1584f5cc4eefSAl Viro */ 1585f5cc4eefSAl Viro void unmap_vmas(struct mmu_gather *tlb, 1586f5cc4eefSAl Viro struct vm_area_struct *vma, unsigned long start_addr, 15874f74d2c8SLinus Torvalds unsigned long end_addr) 1588f5cc4eefSAl Viro { 1589f5cc4eefSAl Viro struct mm_struct *mm = vma->vm_mm; 1590f5cc4eefSAl Viro 1591f5cc4eefSAl Viro mmu_notifier_invalidate_range_start(mm, start_addr, end_addr); 1592f5cc4eefSAl Viro for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) 15934f74d2c8SLinus Torvalds unmap_single_vma(tlb, vma, start_addr, end_addr, NULL); 1594cddb8a5cSAndrea Arcangeli mmu_notifier_invalidate_range_end(mm, start_addr, end_addr); 15951da177e4SLinus Torvalds } 15961da177e4SLinus Torvalds 15971da177e4SLinus Torvalds /** 15981da177e4SLinus Torvalds * zap_page_range - remove user pages in a given range 15991da177e4SLinus Torvalds * @vma: vm_area_struct holding the applicable pages 1600eb4546bbSRandy Dunlap * @start: starting address of pages to zap 16011da177e4SLinus Torvalds * @size: number of bytes to zap 1602f5cc4eefSAl Viro * 1603f5cc4eefSAl Viro * Caller must protect the VMA list 16041da177e4SLinus Torvalds */ 16057e027b14SLinus Torvalds void zap_page_range(struct vm_area_struct *vma, unsigned long start, 1606ecf1385dSKirill A. Shutemov unsigned long size) 16071da177e4SLinus Torvalds { 16081da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 1609d16dfc55SPeter Zijlstra struct mmu_gather tlb; 16107e027b14SLinus Torvalds unsigned long end = start + size; 16111da177e4SLinus Torvalds 16121da177e4SLinus Torvalds lru_add_drain(); 16132b047252SLinus Torvalds tlb_gather_mmu(&tlb, mm, start, end); 1614365e9c87SHugh Dickins update_hiwater_rss(mm); 16157e027b14SLinus Torvalds mmu_notifier_invalidate_range_start(mm, start, end); 16164647706eSMel Gorman for ( ; vma && vma->vm_start < end; vma = vma->vm_next) { 1617ecf1385dSKirill A. Shutemov unmap_single_vma(&tlb, vma, start, end, NULL); 16184647706eSMel Gorman 16194647706eSMel Gorman /* 16204647706eSMel Gorman * zap_page_range does not specify whether mmap_sem should be 16214647706eSMel Gorman * held for read or write. That allows parallel zap_page_range 16224647706eSMel Gorman * operations to unmap a PTE and defer a flush meaning that 16234647706eSMel Gorman * this call observes pte_none and fails to flush the TLB. 16244647706eSMel Gorman * Rather than adding a complex API, ensure that no stale 16254647706eSMel Gorman * TLB entries exist when this call returns. 16264647706eSMel Gorman */ 16274647706eSMel Gorman flush_tlb_range(vma, start, end); 16284647706eSMel Gorman } 16294647706eSMel Gorman 16307e027b14SLinus Torvalds mmu_notifier_invalidate_range_end(mm, start, end); 16317e027b14SLinus Torvalds tlb_finish_mmu(&tlb, start, end); 16321da177e4SLinus Torvalds } 16331da177e4SLinus Torvalds 1634c627f9ccSJack Steiner /** 1635f5cc4eefSAl Viro * zap_page_range_single - remove user pages in a given range 1636f5cc4eefSAl Viro * @vma: vm_area_struct holding the applicable pages 1637f5cc4eefSAl Viro * @address: starting address of pages to zap 1638f5cc4eefSAl Viro * @size: number of bytes to zap 16398a5f14a2SKirill A. Shutemov * @details: details of shared cache invalidation 1640f5cc4eefSAl Viro * 1641f5cc4eefSAl Viro * The range must fit into one VMA. 1642f5cc4eefSAl Viro */ 1643f5cc4eefSAl Viro static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, 1644f5cc4eefSAl Viro unsigned long size, struct zap_details *details) 1645f5cc4eefSAl Viro { 1646f5cc4eefSAl Viro struct mm_struct *mm = vma->vm_mm; 1647f5cc4eefSAl Viro struct mmu_gather tlb; 1648f5cc4eefSAl Viro unsigned long end = address + size; 1649f5cc4eefSAl Viro 1650f5cc4eefSAl Viro lru_add_drain(); 16512b047252SLinus Torvalds tlb_gather_mmu(&tlb, mm, address, end); 1652f5cc4eefSAl Viro update_hiwater_rss(mm); 1653f5cc4eefSAl Viro mmu_notifier_invalidate_range_start(mm, address, end); 16544f74d2c8SLinus Torvalds unmap_single_vma(&tlb, vma, address, end, details); 1655f5cc4eefSAl Viro mmu_notifier_invalidate_range_end(mm, address, end); 1656f5cc4eefSAl Viro tlb_finish_mmu(&tlb, address, end); 16571da177e4SLinus Torvalds } 16581da177e4SLinus Torvalds 1659c627f9ccSJack Steiner /** 1660c627f9ccSJack Steiner * zap_vma_ptes - remove ptes mapping the vma 1661c627f9ccSJack Steiner * @vma: vm_area_struct holding ptes to be zapped 1662c627f9ccSJack Steiner * @address: starting address of pages to zap 1663c627f9ccSJack Steiner * @size: number of bytes to zap 1664c627f9ccSJack Steiner * 1665c627f9ccSJack Steiner * This function only unmaps ptes assigned to VM_PFNMAP vmas. 1666c627f9ccSJack Steiner * 1667c627f9ccSJack Steiner * The entire address range must be fully contained within the vma. 1668c627f9ccSJack Steiner * 1669c627f9ccSJack Steiner */ 167027d036e3SLeon Romanovsky void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 1671c627f9ccSJack Steiner unsigned long size) 1672c627f9ccSJack Steiner { 1673c627f9ccSJack Steiner if (address < vma->vm_start || address + size > vma->vm_end || 1674c627f9ccSJack Steiner !(vma->vm_flags & VM_PFNMAP)) 167527d036e3SLeon Romanovsky return; 167627d036e3SLeon Romanovsky 1677f5cc4eefSAl Viro zap_page_range_single(vma, address, size, NULL); 1678c627f9ccSJack Steiner } 1679c627f9ccSJack Steiner EXPORT_SYMBOL_GPL(zap_vma_ptes); 1680c627f9ccSJack Steiner 168125ca1d6cSNamhyung Kim pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, 1682920c7a5dSHarvey Harrison spinlock_t **ptl) 1683c9cfcddfSLinus Torvalds { 1684c2febafcSKirill A. Shutemov pgd_t *pgd; 1685c2febafcSKirill A. Shutemov p4d_t *p4d; 1686c2febafcSKirill A. Shutemov pud_t *pud; 1687c2febafcSKirill A. Shutemov pmd_t *pmd; 1688c2febafcSKirill A. Shutemov 1689c2febafcSKirill A. Shutemov pgd = pgd_offset(mm, addr); 1690c2febafcSKirill A. Shutemov p4d = p4d_alloc(mm, pgd, addr); 1691c2febafcSKirill A. Shutemov if (!p4d) 1692c2febafcSKirill A. Shutemov return NULL; 1693c2febafcSKirill A. Shutemov pud = pud_alloc(mm, p4d, addr); 1694c2febafcSKirill A. Shutemov if (!pud) 1695c2febafcSKirill A. Shutemov return NULL; 1696c2febafcSKirill A. Shutemov pmd = pmd_alloc(mm, pud, addr); 1697c2febafcSKirill A. Shutemov if (!pmd) 1698c2febafcSKirill A. Shutemov return NULL; 1699c2febafcSKirill A. Shutemov 1700f66055abSAndrea Arcangeli VM_BUG_ON(pmd_trans_huge(*pmd)); 1701c9cfcddfSLinus Torvalds return pte_alloc_map_lock(mm, pmd, addr, ptl); 1702c9cfcddfSLinus Torvalds } 1703c9cfcddfSLinus Torvalds 17041da177e4SLinus Torvalds /* 1705238f58d8SLinus Torvalds * This is the old fallback for page remapping. 1706238f58d8SLinus Torvalds * 1707238f58d8SLinus Torvalds * For historical reasons, it only allows reserved pages. Only 1708238f58d8SLinus Torvalds * old drivers should use this, and they needed to mark their 1709238f58d8SLinus Torvalds * pages reserved for the old functions anyway. 1710238f58d8SLinus Torvalds */ 1711423bad60SNick Piggin static int insert_page(struct vm_area_struct *vma, unsigned long addr, 1712423bad60SNick Piggin struct page *page, pgprot_t prot) 1713238f58d8SLinus Torvalds { 1714423bad60SNick Piggin struct mm_struct *mm = vma->vm_mm; 1715238f58d8SLinus Torvalds int retval; 1716238f58d8SLinus Torvalds pte_t *pte; 1717238f58d8SLinus Torvalds spinlock_t *ptl; 1718238f58d8SLinus Torvalds 1719238f58d8SLinus Torvalds retval = -EINVAL; 1720a145dd41SLinus Torvalds if (PageAnon(page)) 17215b4e655eSKAMEZAWA Hiroyuki goto out; 1722238f58d8SLinus Torvalds retval = -ENOMEM; 1723238f58d8SLinus Torvalds flush_dcache_page(page); 1724c9cfcddfSLinus Torvalds pte = get_locked_pte(mm, addr, &ptl); 1725238f58d8SLinus Torvalds if (!pte) 17265b4e655eSKAMEZAWA Hiroyuki goto out; 1727238f58d8SLinus Torvalds retval = -EBUSY; 1728238f58d8SLinus Torvalds if (!pte_none(*pte)) 1729238f58d8SLinus Torvalds goto out_unlock; 1730238f58d8SLinus Torvalds 1731238f58d8SLinus Torvalds /* Ok, finally just insert the thing.. */ 1732238f58d8SLinus Torvalds get_page(page); 1733eca56ff9SJerome Marchand inc_mm_counter_fast(mm, mm_counter_file(page)); 1734dd78feddSKirill A. Shutemov page_add_file_rmap(page, false); 1735238f58d8SLinus Torvalds set_pte_at(mm, addr, pte, mk_pte(page, prot)); 1736238f58d8SLinus Torvalds 1737238f58d8SLinus Torvalds retval = 0; 17388a9f3ccdSBalbir Singh pte_unmap_unlock(pte, ptl); 17398a9f3ccdSBalbir Singh return retval; 1740238f58d8SLinus Torvalds out_unlock: 1741238f58d8SLinus Torvalds pte_unmap_unlock(pte, ptl); 1742238f58d8SLinus Torvalds out: 1743238f58d8SLinus Torvalds return retval; 1744238f58d8SLinus Torvalds } 1745238f58d8SLinus Torvalds 1746bfa5bf6dSRolf Eike Beer /** 1747bfa5bf6dSRolf Eike Beer * vm_insert_page - insert single page into user vma 1748bfa5bf6dSRolf Eike Beer * @vma: user vma to map to 1749bfa5bf6dSRolf Eike Beer * @addr: target user address of this page 1750bfa5bf6dSRolf Eike Beer * @page: source kernel page 1751bfa5bf6dSRolf Eike Beer * 1752a145dd41SLinus Torvalds * This allows drivers to insert individual pages they've allocated 1753a145dd41SLinus Torvalds * into a user vma. 1754a145dd41SLinus Torvalds * 1755a145dd41SLinus Torvalds * The page has to be a nice clean _individual_ kernel allocation. 1756a145dd41SLinus Torvalds * If you allocate a compound page, you need to have marked it as 1757a145dd41SLinus Torvalds * such (__GFP_COMP), or manually just split the page up yourself 17588dfcc9baSNick Piggin * (see split_page()). 1759a145dd41SLinus Torvalds * 1760a145dd41SLinus Torvalds * NOTE! Traditionally this was done with "remap_pfn_range()" which 1761a145dd41SLinus Torvalds * took an arbitrary page protection parameter. This doesn't allow 1762a145dd41SLinus Torvalds * that. Your vma protection will have to be set up correctly, which 1763a145dd41SLinus Torvalds * means that if you want a shared writable mapping, you'd better 1764a145dd41SLinus Torvalds * ask for a shared writable mapping! 1765a145dd41SLinus Torvalds * 1766a145dd41SLinus Torvalds * The page does not need to be reserved. 17674b6e1e37SKonstantin Khlebnikov * 17684b6e1e37SKonstantin Khlebnikov * Usually this function is called from f_op->mmap() handler 17694b6e1e37SKonstantin Khlebnikov * under mm->mmap_sem write-lock, so it can change vma->vm_flags. 17704b6e1e37SKonstantin Khlebnikov * Caller must set VM_MIXEDMAP on vma if it wants to call this 17714b6e1e37SKonstantin Khlebnikov * function from other places, for example from page-fault handler. 1772a145dd41SLinus Torvalds */ 1773423bad60SNick Piggin int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, 1774423bad60SNick Piggin struct page *page) 1775a145dd41SLinus Torvalds { 1776a145dd41SLinus Torvalds if (addr < vma->vm_start || addr >= vma->vm_end) 1777a145dd41SLinus Torvalds return -EFAULT; 1778a145dd41SLinus Torvalds if (!page_count(page)) 1779a145dd41SLinus Torvalds return -EINVAL; 17804b6e1e37SKonstantin Khlebnikov if (!(vma->vm_flags & VM_MIXEDMAP)) { 17814b6e1e37SKonstantin Khlebnikov BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem)); 17824b6e1e37SKonstantin Khlebnikov BUG_ON(vma->vm_flags & VM_PFNMAP); 17834b6e1e37SKonstantin Khlebnikov vma->vm_flags |= VM_MIXEDMAP; 17844b6e1e37SKonstantin Khlebnikov } 1785423bad60SNick Piggin return insert_page(vma, addr, page, vma->vm_page_prot); 1786a145dd41SLinus Torvalds } 1787e3c3374fSLinus Torvalds EXPORT_SYMBOL(vm_insert_page); 1788a145dd41SLinus Torvalds 1789423bad60SNick Piggin static int insert_pfn(struct vm_area_struct *vma, unsigned long addr, 1790b2770da6SRoss Zwisler pfn_t pfn, pgprot_t prot, bool mkwrite) 1791423bad60SNick Piggin { 1792423bad60SNick Piggin struct mm_struct *mm = vma->vm_mm; 1793423bad60SNick Piggin int retval; 1794423bad60SNick Piggin pte_t *pte, entry; 1795423bad60SNick Piggin spinlock_t *ptl; 1796423bad60SNick Piggin 1797423bad60SNick Piggin retval = -ENOMEM; 1798423bad60SNick Piggin pte = get_locked_pte(mm, addr, &ptl); 1799423bad60SNick Piggin if (!pte) 1800423bad60SNick Piggin goto out; 1801423bad60SNick Piggin retval = -EBUSY; 1802b2770da6SRoss Zwisler if (!pte_none(*pte)) { 1803b2770da6SRoss Zwisler if (mkwrite) { 1804b2770da6SRoss Zwisler /* 1805b2770da6SRoss Zwisler * For read faults on private mappings the PFN passed 1806b2770da6SRoss Zwisler * in may not match the PFN we have mapped if the 1807b2770da6SRoss Zwisler * mapped PFN is a writeable COW page. In the mkwrite 1808b2770da6SRoss Zwisler * case we are creating a writable PTE for a shared 1809b2770da6SRoss Zwisler * mapping and we expect the PFNs to match. 1810b2770da6SRoss Zwisler */ 1811b2770da6SRoss Zwisler if (WARN_ON_ONCE(pte_pfn(*pte) != pfn_t_to_pfn(pfn))) 1812423bad60SNick Piggin goto out_unlock; 1813b2770da6SRoss Zwisler entry = *pte; 1814b2770da6SRoss Zwisler goto out_mkwrite; 1815b2770da6SRoss Zwisler } else 1816b2770da6SRoss Zwisler goto out_unlock; 1817b2770da6SRoss Zwisler } 1818423bad60SNick Piggin 1819423bad60SNick Piggin /* Ok, finally just insert the thing.. */ 182001c8f1c4SDan Williams if (pfn_t_devmap(pfn)) 182101c8f1c4SDan Williams entry = pte_mkdevmap(pfn_t_pte(pfn, prot)); 182201c8f1c4SDan Williams else 182301c8f1c4SDan Williams entry = pte_mkspecial(pfn_t_pte(pfn, prot)); 1824b2770da6SRoss Zwisler 1825b2770da6SRoss Zwisler out_mkwrite: 1826b2770da6SRoss Zwisler if (mkwrite) { 1827b2770da6SRoss Zwisler entry = pte_mkyoung(entry); 1828b2770da6SRoss Zwisler entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1829b2770da6SRoss Zwisler } 1830b2770da6SRoss Zwisler 1831423bad60SNick Piggin set_pte_at(mm, addr, pte, entry); 18324b3073e1SRussell King update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */ 1833423bad60SNick Piggin 1834423bad60SNick Piggin retval = 0; 1835423bad60SNick Piggin out_unlock: 1836423bad60SNick Piggin pte_unmap_unlock(pte, ptl); 1837423bad60SNick Piggin out: 1838423bad60SNick Piggin return retval; 1839423bad60SNick Piggin } 1840423bad60SNick Piggin 1841e0dc0d8fSNick Piggin /** 1842e0dc0d8fSNick Piggin * vm_insert_pfn - insert single pfn into user vma 1843e0dc0d8fSNick Piggin * @vma: user vma to map to 1844e0dc0d8fSNick Piggin * @addr: target user address of this page 1845e0dc0d8fSNick Piggin * @pfn: source kernel pfn 1846e0dc0d8fSNick Piggin * 1847c462f179SRobert P. J. Day * Similar to vm_insert_page, this allows drivers to insert individual pages 1848e0dc0d8fSNick Piggin * they've allocated into a user vma. Same comments apply. 1849e0dc0d8fSNick Piggin * 1850e0dc0d8fSNick Piggin * This function should only be called from a vm_ops->fault handler, and 1851e0dc0d8fSNick Piggin * in that case the handler should return NULL. 18520d71d10aSNick Piggin * 18530d71d10aSNick Piggin * vma cannot be a COW mapping. 18540d71d10aSNick Piggin * 18550d71d10aSNick Piggin * As this is called only for pages that do not currently exist, we 18560d71d10aSNick Piggin * do not need to flush old virtual caches or the TLB. 1857e0dc0d8fSNick Piggin */ 1858e0dc0d8fSNick Piggin int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, 1859e0dc0d8fSNick Piggin unsigned long pfn) 1860e0dc0d8fSNick Piggin { 18611745cbc5SAndy Lutomirski return vm_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot); 18621745cbc5SAndy Lutomirski } 18631745cbc5SAndy Lutomirski EXPORT_SYMBOL(vm_insert_pfn); 18641745cbc5SAndy Lutomirski 18651745cbc5SAndy Lutomirski /** 18661745cbc5SAndy Lutomirski * vm_insert_pfn_prot - insert single pfn into user vma with specified pgprot 18671745cbc5SAndy Lutomirski * @vma: user vma to map to 18681745cbc5SAndy Lutomirski * @addr: target user address of this page 18691745cbc5SAndy Lutomirski * @pfn: source kernel pfn 18701745cbc5SAndy Lutomirski * @pgprot: pgprot flags for the inserted page 18711745cbc5SAndy Lutomirski * 18721745cbc5SAndy Lutomirski * This is exactly like vm_insert_pfn, except that it allows drivers to 18731745cbc5SAndy Lutomirski * to override pgprot on a per-page basis. 18741745cbc5SAndy Lutomirski * 18751745cbc5SAndy Lutomirski * This only makes sense for IO mappings, and it makes no sense for 18761745cbc5SAndy Lutomirski * cow mappings. In general, using multiple vmas is preferable; 18771745cbc5SAndy Lutomirski * vm_insert_pfn_prot should only be used if using multiple VMAs is 18781745cbc5SAndy Lutomirski * impractical. 18791745cbc5SAndy Lutomirski */ 18801745cbc5SAndy Lutomirski int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, 18811745cbc5SAndy Lutomirski unsigned long pfn, pgprot_t pgprot) 18821745cbc5SAndy Lutomirski { 18832ab64037Svenkatesh.pallipadi@intel.com int ret; 18847e675137SNick Piggin /* 18857e675137SNick Piggin * Technically, architectures with pte_special can avoid all these 18867e675137SNick Piggin * restrictions (same for remap_pfn_range). However we would like 18877e675137SNick Piggin * consistency in testing and feature parity among all, so we should 18887e675137SNick Piggin * try to keep these invariants in place for everybody. 18897e675137SNick Piggin */ 1890b379d790SJared Hulbert BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); 1891b379d790SJared Hulbert BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == 1892b379d790SJared Hulbert (VM_PFNMAP|VM_MIXEDMAP)); 1893b379d790SJared Hulbert BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); 1894b379d790SJared Hulbert BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn)); 1895e0dc0d8fSNick Piggin 1896423bad60SNick Piggin if (addr < vma->vm_start || addr >= vma->vm_end) 1897423bad60SNick Piggin return -EFAULT; 1898308a047cSBorislav Petkov 189942e4089cSAndi Kleen if (!pfn_modify_allowed(pfn, pgprot)) 190042e4089cSAndi Kleen return -EACCES; 190142e4089cSAndi Kleen 1902308a047cSBorislav Petkov track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV)); 19032ab64037Svenkatesh.pallipadi@intel.com 1904b2770da6SRoss Zwisler ret = insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot, 1905b2770da6SRoss Zwisler false); 19062ab64037Svenkatesh.pallipadi@intel.com 19072ab64037Svenkatesh.pallipadi@intel.com return ret; 1908e0dc0d8fSNick Piggin } 19091745cbc5SAndy Lutomirski EXPORT_SYMBOL(vm_insert_pfn_prot); 1910e0dc0d8fSNick Piggin 1911785a3fabSDan Williams static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn) 1912785a3fabSDan Williams { 1913785a3fabSDan Williams /* these checks mirror the abort conditions in vm_normal_page */ 1914785a3fabSDan Williams if (vma->vm_flags & VM_MIXEDMAP) 1915785a3fabSDan Williams return true; 1916785a3fabSDan Williams if (pfn_t_devmap(pfn)) 1917785a3fabSDan Williams return true; 1918785a3fabSDan Williams if (pfn_t_special(pfn)) 1919785a3fabSDan Williams return true; 1920785a3fabSDan Williams if (is_zero_pfn(pfn_t_to_pfn(pfn))) 1921785a3fabSDan Williams return true; 1922785a3fabSDan Williams return false; 1923785a3fabSDan Williams } 1924785a3fabSDan Williams 1925b2770da6SRoss Zwisler static int __vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, 1926b2770da6SRoss Zwisler pfn_t pfn, bool mkwrite) 1927423bad60SNick Piggin { 192887744ab3SDan Williams pgprot_t pgprot = vma->vm_page_prot; 192987744ab3SDan Williams 1930785a3fabSDan Williams BUG_ON(!vm_mixed_ok(vma, pfn)); 1931423bad60SNick Piggin 1932423bad60SNick Piggin if (addr < vma->vm_start || addr >= vma->vm_end) 1933423bad60SNick Piggin return -EFAULT; 1934308a047cSBorislav Petkov 1935308a047cSBorislav Petkov track_pfn_insert(vma, &pgprot, pfn); 1936423bad60SNick Piggin 193742e4089cSAndi Kleen if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot)) 193842e4089cSAndi Kleen return -EACCES; 193942e4089cSAndi Kleen 1940423bad60SNick Piggin /* 1941423bad60SNick Piggin * If we don't have pte special, then we have to use the pfn_valid() 1942423bad60SNick Piggin * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must* 1943423bad60SNick Piggin * refcount the page if pfn_valid is true (hence insert_page rather 194462eede62SHugh Dickins * than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP 194562eede62SHugh Dickins * without pte special, it would there be refcounted as a normal page. 1946423bad60SNick Piggin */ 194700b3a331SLaurent Dufour if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && 194800b3a331SLaurent Dufour !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) { 1949423bad60SNick Piggin struct page *page; 1950423bad60SNick Piggin 195103fc2da6SDan Williams /* 195203fc2da6SDan Williams * At this point we are committed to insert_page() 195303fc2da6SDan Williams * regardless of whether the caller specified flags that 195403fc2da6SDan Williams * result in pfn_t_has_page() == false. 195503fc2da6SDan Williams */ 195603fc2da6SDan Williams page = pfn_to_page(pfn_t_to_pfn(pfn)); 195787744ab3SDan Williams return insert_page(vma, addr, page, pgprot); 1958423bad60SNick Piggin } 1959b2770da6SRoss Zwisler return insert_pfn(vma, addr, pfn, pgprot, mkwrite); 1960b2770da6SRoss Zwisler } 1961b2770da6SRoss Zwisler 1962b2770da6SRoss Zwisler int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, 1963b2770da6SRoss Zwisler pfn_t pfn) 1964b2770da6SRoss Zwisler { 1965b2770da6SRoss Zwisler return __vm_insert_mixed(vma, addr, pfn, false); 1966b2770da6SRoss Zwisler 1967423bad60SNick Piggin } 1968423bad60SNick Piggin EXPORT_SYMBOL(vm_insert_mixed); 1969423bad60SNick Piggin 1970ab77dab4SSouptick Joarder /* 1971ab77dab4SSouptick Joarder * If the insertion of PTE failed because someone else already added a 1972ab77dab4SSouptick Joarder * different entry in the mean time, we treat that as success as we assume 1973ab77dab4SSouptick Joarder * the same entry was actually inserted. 1974ab77dab4SSouptick Joarder */ 1975ab77dab4SSouptick Joarder 1976ab77dab4SSouptick Joarder vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma, 1977ab77dab4SSouptick Joarder unsigned long addr, pfn_t pfn) 1978b2770da6SRoss Zwisler { 1979ab77dab4SSouptick Joarder int err; 1980ab77dab4SSouptick Joarder 1981ab77dab4SSouptick Joarder err = __vm_insert_mixed(vma, addr, pfn, true); 1982ab77dab4SSouptick Joarder if (err == -ENOMEM) 1983ab77dab4SSouptick Joarder return VM_FAULT_OOM; 1984ab77dab4SSouptick Joarder if (err < 0 && err != -EBUSY) 1985ab77dab4SSouptick Joarder return VM_FAULT_SIGBUS; 1986ab77dab4SSouptick Joarder return VM_FAULT_NOPAGE; 1987b2770da6SRoss Zwisler } 1988ab77dab4SSouptick Joarder EXPORT_SYMBOL(vmf_insert_mixed_mkwrite); 1989b2770da6SRoss Zwisler 1990a145dd41SLinus Torvalds /* 19911da177e4SLinus Torvalds * maps a range of physical memory into the requested pages. the old 19921da177e4SLinus Torvalds * mappings are removed. any references to nonexistent pages results 19931da177e4SLinus Torvalds * in null mappings (currently treated as "copy-on-access") 19941da177e4SLinus Torvalds */ 19951da177e4SLinus Torvalds static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, 19961da177e4SLinus Torvalds unsigned long addr, unsigned long end, 19971da177e4SLinus Torvalds unsigned long pfn, pgprot_t prot) 19981da177e4SLinus Torvalds { 19991da177e4SLinus Torvalds pte_t *pte; 2000c74df32cSHugh Dickins spinlock_t *ptl; 200142e4089cSAndi Kleen int err = 0; 20021da177e4SLinus Torvalds 2003c74df32cSHugh Dickins pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); 20041da177e4SLinus Torvalds if (!pte) 20051da177e4SLinus Torvalds return -ENOMEM; 20066606c3e0SZachary Amsden arch_enter_lazy_mmu_mode(); 20071da177e4SLinus Torvalds do { 20081da177e4SLinus Torvalds BUG_ON(!pte_none(*pte)); 200942e4089cSAndi Kleen if (!pfn_modify_allowed(pfn, prot)) { 201042e4089cSAndi Kleen err = -EACCES; 201142e4089cSAndi Kleen break; 201242e4089cSAndi Kleen } 20137e675137SNick Piggin set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot))); 20141da177e4SLinus Torvalds pfn++; 20151da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 20166606c3e0SZachary Amsden arch_leave_lazy_mmu_mode(); 2017c74df32cSHugh Dickins pte_unmap_unlock(pte - 1, ptl); 201842e4089cSAndi Kleen return err; 20191da177e4SLinus Torvalds } 20201da177e4SLinus Torvalds 20211da177e4SLinus Torvalds static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, 20221da177e4SLinus Torvalds unsigned long addr, unsigned long end, 20231da177e4SLinus Torvalds unsigned long pfn, pgprot_t prot) 20241da177e4SLinus Torvalds { 20251da177e4SLinus Torvalds pmd_t *pmd; 20261da177e4SLinus Torvalds unsigned long next; 202742e4089cSAndi Kleen int err; 20281da177e4SLinus Torvalds 20291da177e4SLinus Torvalds pfn -= addr >> PAGE_SHIFT; 20301da177e4SLinus Torvalds pmd = pmd_alloc(mm, pud, addr); 20311da177e4SLinus Torvalds if (!pmd) 20321da177e4SLinus Torvalds return -ENOMEM; 2033f66055abSAndrea Arcangeli VM_BUG_ON(pmd_trans_huge(*pmd)); 20341da177e4SLinus Torvalds do { 20351da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 203642e4089cSAndi Kleen err = remap_pte_range(mm, pmd, addr, next, 203742e4089cSAndi Kleen pfn + (addr >> PAGE_SHIFT), prot); 203842e4089cSAndi Kleen if (err) 203942e4089cSAndi Kleen return err; 20401da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 20411da177e4SLinus Torvalds return 0; 20421da177e4SLinus Torvalds } 20431da177e4SLinus Torvalds 2044c2febafcSKirill A. Shutemov static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d, 20451da177e4SLinus Torvalds unsigned long addr, unsigned long end, 20461da177e4SLinus Torvalds unsigned long pfn, pgprot_t prot) 20471da177e4SLinus Torvalds { 20481da177e4SLinus Torvalds pud_t *pud; 20491da177e4SLinus Torvalds unsigned long next; 205042e4089cSAndi Kleen int err; 20511da177e4SLinus Torvalds 20521da177e4SLinus Torvalds pfn -= addr >> PAGE_SHIFT; 2053c2febafcSKirill A. Shutemov pud = pud_alloc(mm, p4d, addr); 20541da177e4SLinus Torvalds if (!pud) 20551da177e4SLinus Torvalds return -ENOMEM; 20561da177e4SLinus Torvalds do { 20571da177e4SLinus Torvalds next = pud_addr_end(addr, end); 205842e4089cSAndi Kleen err = remap_pmd_range(mm, pud, addr, next, 205942e4089cSAndi Kleen pfn + (addr >> PAGE_SHIFT), prot); 206042e4089cSAndi Kleen if (err) 206142e4089cSAndi Kleen return err; 20621da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 20631da177e4SLinus Torvalds return 0; 20641da177e4SLinus Torvalds } 20651da177e4SLinus Torvalds 2066c2febafcSKirill A. Shutemov static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd, 2067c2febafcSKirill A. Shutemov unsigned long addr, unsigned long end, 2068c2febafcSKirill A. Shutemov unsigned long pfn, pgprot_t prot) 2069c2febafcSKirill A. Shutemov { 2070c2febafcSKirill A. Shutemov p4d_t *p4d; 2071c2febafcSKirill A. Shutemov unsigned long next; 207242e4089cSAndi Kleen int err; 2073c2febafcSKirill A. Shutemov 2074c2febafcSKirill A. Shutemov pfn -= addr >> PAGE_SHIFT; 2075c2febafcSKirill A. Shutemov p4d = p4d_alloc(mm, pgd, addr); 2076c2febafcSKirill A. Shutemov if (!p4d) 2077c2febafcSKirill A. Shutemov return -ENOMEM; 2078c2febafcSKirill A. Shutemov do { 2079c2febafcSKirill A. Shutemov next = p4d_addr_end(addr, end); 208042e4089cSAndi Kleen err = remap_pud_range(mm, p4d, addr, next, 208142e4089cSAndi Kleen pfn + (addr >> PAGE_SHIFT), prot); 208242e4089cSAndi Kleen if (err) 208342e4089cSAndi Kleen return err; 2084c2febafcSKirill A. Shutemov } while (p4d++, addr = next, addr != end); 2085c2febafcSKirill A. Shutemov return 0; 2086c2febafcSKirill A. Shutemov } 2087c2febafcSKirill A. Shutemov 2088bfa5bf6dSRolf Eike Beer /** 2089bfa5bf6dSRolf Eike Beer * remap_pfn_range - remap kernel memory to userspace 2090bfa5bf6dSRolf Eike Beer * @vma: user vma to map to 2091bfa5bf6dSRolf Eike Beer * @addr: target user address to start at 2092bfa5bf6dSRolf Eike Beer * @pfn: physical address of kernel memory 2093bfa5bf6dSRolf Eike Beer * @size: size of map area 2094bfa5bf6dSRolf Eike Beer * @prot: page protection flags for this mapping 2095bfa5bf6dSRolf Eike Beer * 2096bfa5bf6dSRolf Eike Beer * Note: this is only safe if the mm semaphore is held when called. 2097bfa5bf6dSRolf Eike Beer */ 20981da177e4SLinus Torvalds int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, 20991da177e4SLinus Torvalds unsigned long pfn, unsigned long size, pgprot_t prot) 21001da177e4SLinus Torvalds { 21011da177e4SLinus Torvalds pgd_t *pgd; 21021da177e4SLinus Torvalds unsigned long next; 21032d15cab8SHugh Dickins unsigned long end = addr + PAGE_ALIGN(size); 21041da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 2105d5957d2fSYongji Xie unsigned long remap_pfn = pfn; 21061da177e4SLinus Torvalds int err; 21071da177e4SLinus Torvalds 21081da177e4SLinus Torvalds /* 21091da177e4SLinus Torvalds * Physically remapped pages are special. Tell the 21101da177e4SLinus Torvalds * rest of the world about it: 21111da177e4SLinus Torvalds * VM_IO tells people not to look at these pages 21121da177e4SLinus Torvalds * (accesses can have side effects). 21136aab341eSLinus Torvalds * VM_PFNMAP tells the core MM that the base pages are just 21146aab341eSLinus Torvalds * raw PFN mappings, and do not have a "struct page" associated 21156aab341eSLinus Torvalds * with them. 2116314e51b9SKonstantin Khlebnikov * VM_DONTEXPAND 2117314e51b9SKonstantin Khlebnikov * Disable vma merging and expanding with mremap(). 2118314e51b9SKonstantin Khlebnikov * VM_DONTDUMP 2119314e51b9SKonstantin Khlebnikov * Omit vma from core dump, even when VM_IO turned off. 2120fb155c16SLinus Torvalds * 2121fb155c16SLinus Torvalds * There's a horrible special case to handle copy-on-write 2122fb155c16SLinus Torvalds * behaviour that some programs depend on. We mark the "original" 2123fb155c16SLinus Torvalds * un-COW'ed pages by matching them up with "vma->vm_pgoff". 2124b3b9c293SKonstantin Khlebnikov * See vm_normal_page() for details. 21251da177e4SLinus Torvalds */ 2126b3b9c293SKonstantin Khlebnikov if (is_cow_mapping(vma->vm_flags)) { 2127b3b9c293SKonstantin Khlebnikov if (addr != vma->vm_start || end != vma->vm_end) 2128b3b9c293SKonstantin Khlebnikov return -EINVAL; 21296aab341eSLinus Torvalds vma->vm_pgoff = pfn; 2130b3b9c293SKonstantin Khlebnikov } 2131b3b9c293SKonstantin Khlebnikov 2132d5957d2fSYongji Xie err = track_pfn_remap(vma, &prot, remap_pfn, addr, PAGE_ALIGN(size)); 2133b3b9c293SKonstantin Khlebnikov if (err) 21343c8bb73aSvenkatesh.pallipadi@intel.com return -EINVAL; 2135fb155c16SLinus Torvalds 2136314e51b9SKonstantin Khlebnikov vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; 21371da177e4SLinus Torvalds 21381da177e4SLinus Torvalds BUG_ON(addr >= end); 21391da177e4SLinus Torvalds pfn -= addr >> PAGE_SHIFT; 21401da177e4SLinus Torvalds pgd = pgd_offset(mm, addr); 21411da177e4SLinus Torvalds flush_cache_range(vma, addr, end); 21421da177e4SLinus Torvalds do { 21431da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 2144c2febafcSKirill A. Shutemov err = remap_p4d_range(mm, pgd, addr, next, 21451da177e4SLinus Torvalds pfn + (addr >> PAGE_SHIFT), prot); 21461da177e4SLinus Torvalds if (err) 21471da177e4SLinus Torvalds break; 21481da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 21492ab64037Svenkatesh.pallipadi@intel.com 21502ab64037Svenkatesh.pallipadi@intel.com if (err) 2151d5957d2fSYongji Xie untrack_pfn(vma, remap_pfn, PAGE_ALIGN(size)); 21522ab64037Svenkatesh.pallipadi@intel.com 21531da177e4SLinus Torvalds return err; 21541da177e4SLinus Torvalds } 21551da177e4SLinus Torvalds EXPORT_SYMBOL(remap_pfn_range); 21561da177e4SLinus Torvalds 2157b4cbb197SLinus Torvalds /** 2158b4cbb197SLinus Torvalds * vm_iomap_memory - remap memory to userspace 2159b4cbb197SLinus Torvalds * @vma: user vma to map to 2160b4cbb197SLinus Torvalds * @start: start of area 2161b4cbb197SLinus Torvalds * @len: size of area 2162b4cbb197SLinus Torvalds * 2163b4cbb197SLinus Torvalds * This is a simplified io_remap_pfn_range() for common driver use. The 2164b4cbb197SLinus Torvalds * driver just needs to give us the physical memory range to be mapped, 2165b4cbb197SLinus Torvalds * we'll figure out the rest from the vma information. 2166b4cbb197SLinus Torvalds * 2167b4cbb197SLinus Torvalds * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get 2168b4cbb197SLinus Torvalds * whatever write-combining details or similar. 2169b4cbb197SLinus Torvalds */ 2170b4cbb197SLinus Torvalds int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) 2171b4cbb197SLinus Torvalds { 2172b4cbb197SLinus Torvalds unsigned long vm_len, pfn, pages; 2173b4cbb197SLinus Torvalds 2174b4cbb197SLinus Torvalds /* Check that the physical memory area passed in looks valid */ 2175b4cbb197SLinus Torvalds if (start + len < start) 2176b4cbb197SLinus Torvalds return -EINVAL; 2177b4cbb197SLinus Torvalds /* 2178b4cbb197SLinus Torvalds * You *really* shouldn't map things that aren't page-aligned, 2179b4cbb197SLinus Torvalds * but we've historically allowed it because IO memory might 2180b4cbb197SLinus Torvalds * just have smaller alignment. 2181b4cbb197SLinus Torvalds */ 2182b4cbb197SLinus Torvalds len += start & ~PAGE_MASK; 2183b4cbb197SLinus Torvalds pfn = start >> PAGE_SHIFT; 2184b4cbb197SLinus Torvalds pages = (len + ~PAGE_MASK) >> PAGE_SHIFT; 2185b4cbb197SLinus Torvalds if (pfn + pages < pfn) 2186b4cbb197SLinus Torvalds return -EINVAL; 2187b4cbb197SLinus Torvalds 2188b4cbb197SLinus Torvalds /* We start the mapping 'vm_pgoff' pages into the area */ 2189b4cbb197SLinus Torvalds if (vma->vm_pgoff > pages) 2190b4cbb197SLinus Torvalds return -EINVAL; 2191b4cbb197SLinus Torvalds pfn += vma->vm_pgoff; 2192b4cbb197SLinus Torvalds pages -= vma->vm_pgoff; 2193b4cbb197SLinus Torvalds 2194b4cbb197SLinus Torvalds /* Can we fit all of the mapping? */ 2195b4cbb197SLinus Torvalds vm_len = vma->vm_end - vma->vm_start; 2196b4cbb197SLinus Torvalds if (vm_len >> PAGE_SHIFT > pages) 2197b4cbb197SLinus Torvalds return -EINVAL; 2198b4cbb197SLinus Torvalds 2199b4cbb197SLinus Torvalds /* Ok, let it rip */ 2200b4cbb197SLinus Torvalds return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); 2201b4cbb197SLinus Torvalds } 2202b4cbb197SLinus Torvalds EXPORT_SYMBOL(vm_iomap_memory); 2203b4cbb197SLinus Torvalds 2204aee16b3cSJeremy Fitzhardinge static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, 2205aee16b3cSJeremy Fitzhardinge unsigned long addr, unsigned long end, 2206aee16b3cSJeremy Fitzhardinge pte_fn_t fn, void *data) 2207aee16b3cSJeremy Fitzhardinge { 2208aee16b3cSJeremy Fitzhardinge pte_t *pte; 2209aee16b3cSJeremy Fitzhardinge int err; 22102f569afdSMartin Schwidefsky pgtable_t token; 221194909914SBorislav Petkov spinlock_t *uninitialized_var(ptl); 2212aee16b3cSJeremy Fitzhardinge 2213aee16b3cSJeremy Fitzhardinge pte = (mm == &init_mm) ? 2214aee16b3cSJeremy Fitzhardinge pte_alloc_kernel(pmd, addr) : 2215aee16b3cSJeremy Fitzhardinge pte_alloc_map_lock(mm, pmd, addr, &ptl); 2216aee16b3cSJeremy Fitzhardinge if (!pte) 2217aee16b3cSJeremy Fitzhardinge return -ENOMEM; 2218aee16b3cSJeremy Fitzhardinge 2219aee16b3cSJeremy Fitzhardinge BUG_ON(pmd_huge(*pmd)); 2220aee16b3cSJeremy Fitzhardinge 222138e0edb1SJeremy Fitzhardinge arch_enter_lazy_mmu_mode(); 222238e0edb1SJeremy Fitzhardinge 22232f569afdSMartin Schwidefsky token = pmd_pgtable(*pmd); 2224aee16b3cSJeremy Fitzhardinge 2225aee16b3cSJeremy Fitzhardinge do { 2226c36987e2SDaisuke Nishimura err = fn(pte++, token, addr, data); 2227aee16b3cSJeremy Fitzhardinge if (err) 2228aee16b3cSJeremy Fitzhardinge break; 2229c36987e2SDaisuke Nishimura } while (addr += PAGE_SIZE, addr != end); 2230aee16b3cSJeremy Fitzhardinge 223138e0edb1SJeremy Fitzhardinge arch_leave_lazy_mmu_mode(); 223238e0edb1SJeremy Fitzhardinge 2233aee16b3cSJeremy Fitzhardinge if (mm != &init_mm) 2234aee16b3cSJeremy Fitzhardinge pte_unmap_unlock(pte-1, ptl); 2235aee16b3cSJeremy Fitzhardinge return err; 2236aee16b3cSJeremy Fitzhardinge } 2237aee16b3cSJeremy Fitzhardinge 2238aee16b3cSJeremy Fitzhardinge static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud, 2239aee16b3cSJeremy Fitzhardinge unsigned long addr, unsigned long end, 2240aee16b3cSJeremy Fitzhardinge pte_fn_t fn, void *data) 2241aee16b3cSJeremy Fitzhardinge { 2242aee16b3cSJeremy Fitzhardinge pmd_t *pmd; 2243aee16b3cSJeremy Fitzhardinge unsigned long next; 2244aee16b3cSJeremy Fitzhardinge int err; 2245aee16b3cSJeremy Fitzhardinge 2246ceb86879SAndi Kleen BUG_ON(pud_huge(*pud)); 2247ceb86879SAndi Kleen 2248aee16b3cSJeremy Fitzhardinge pmd = pmd_alloc(mm, pud, addr); 2249aee16b3cSJeremy Fitzhardinge if (!pmd) 2250aee16b3cSJeremy Fitzhardinge return -ENOMEM; 2251aee16b3cSJeremy Fitzhardinge do { 2252aee16b3cSJeremy Fitzhardinge next = pmd_addr_end(addr, end); 2253aee16b3cSJeremy Fitzhardinge err = apply_to_pte_range(mm, pmd, addr, next, fn, data); 2254aee16b3cSJeremy Fitzhardinge if (err) 2255aee16b3cSJeremy Fitzhardinge break; 2256aee16b3cSJeremy Fitzhardinge } while (pmd++, addr = next, addr != end); 2257aee16b3cSJeremy Fitzhardinge return err; 2258aee16b3cSJeremy Fitzhardinge } 2259aee16b3cSJeremy Fitzhardinge 2260c2febafcSKirill A. Shutemov static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d, 2261aee16b3cSJeremy Fitzhardinge unsigned long addr, unsigned long end, 2262aee16b3cSJeremy Fitzhardinge pte_fn_t fn, void *data) 2263aee16b3cSJeremy Fitzhardinge { 2264aee16b3cSJeremy Fitzhardinge pud_t *pud; 2265aee16b3cSJeremy Fitzhardinge unsigned long next; 2266aee16b3cSJeremy Fitzhardinge int err; 2267aee16b3cSJeremy Fitzhardinge 2268c2febafcSKirill A. Shutemov pud = pud_alloc(mm, p4d, addr); 2269aee16b3cSJeremy Fitzhardinge if (!pud) 2270aee16b3cSJeremy Fitzhardinge return -ENOMEM; 2271aee16b3cSJeremy Fitzhardinge do { 2272aee16b3cSJeremy Fitzhardinge next = pud_addr_end(addr, end); 2273aee16b3cSJeremy Fitzhardinge err = apply_to_pmd_range(mm, pud, addr, next, fn, data); 2274aee16b3cSJeremy Fitzhardinge if (err) 2275aee16b3cSJeremy Fitzhardinge break; 2276aee16b3cSJeremy Fitzhardinge } while (pud++, addr = next, addr != end); 2277aee16b3cSJeremy Fitzhardinge return err; 2278aee16b3cSJeremy Fitzhardinge } 2279aee16b3cSJeremy Fitzhardinge 2280c2febafcSKirill A. Shutemov static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd, 2281c2febafcSKirill A. Shutemov unsigned long addr, unsigned long end, 2282c2febafcSKirill A. Shutemov pte_fn_t fn, void *data) 2283c2febafcSKirill A. Shutemov { 2284c2febafcSKirill A. Shutemov p4d_t *p4d; 2285c2febafcSKirill A. Shutemov unsigned long next; 2286c2febafcSKirill A. Shutemov int err; 2287c2febafcSKirill A. Shutemov 2288c2febafcSKirill A. Shutemov p4d = p4d_alloc(mm, pgd, addr); 2289c2febafcSKirill A. Shutemov if (!p4d) 2290c2febafcSKirill A. Shutemov return -ENOMEM; 2291c2febafcSKirill A. Shutemov do { 2292c2febafcSKirill A. Shutemov next = p4d_addr_end(addr, end); 2293c2febafcSKirill A. Shutemov err = apply_to_pud_range(mm, p4d, addr, next, fn, data); 2294c2febafcSKirill A. Shutemov if (err) 2295c2febafcSKirill A. Shutemov break; 2296c2febafcSKirill A. Shutemov } while (p4d++, addr = next, addr != end); 2297c2febafcSKirill A. Shutemov return err; 2298c2febafcSKirill A. Shutemov } 2299c2febafcSKirill A. Shutemov 2300aee16b3cSJeremy Fitzhardinge /* 2301aee16b3cSJeremy Fitzhardinge * Scan a region of virtual memory, filling in page tables as necessary 2302aee16b3cSJeremy Fitzhardinge * and calling a provided function on each leaf page table. 2303aee16b3cSJeremy Fitzhardinge */ 2304aee16b3cSJeremy Fitzhardinge int apply_to_page_range(struct mm_struct *mm, unsigned long addr, 2305aee16b3cSJeremy Fitzhardinge unsigned long size, pte_fn_t fn, void *data) 2306aee16b3cSJeremy Fitzhardinge { 2307aee16b3cSJeremy Fitzhardinge pgd_t *pgd; 2308aee16b3cSJeremy Fitzhardinge unsigned long next; 230957250a5bSJeremy Fitzhardinge unsigned long end = addr + size; 2310aee16b3cSJeremy Fitzhardinge int err; 2311aee16b3cSJeremy Fitzhardinge 23129cb65bc3SMika Penttilä if (WARN_ON(addr >= end)) 23139cb65bc3SMika Penttilä return -EINVAL; 23149cb65bc3SMika Penttilä 2315aee16b3cSJeremy Fitzhardinge pgd = pgd_offset(mm, addr); 2316aee16b3cSJeremy Fitzhardinge do { 2317aee16b3cSJeremy Fitzhardinge next = pgd_addr_end(addr, end); 2318c2febafcSKirill A. Shutemov err = apply_to_p4d_range(mm, pgd, addr, next, fn, data); 2319aee16b3cSJeremy Fitzhardinge if (err) 2320aee16b3cSJeremy Fitzhardinge break; 2321aee16b3cSJeremy Fitzhardinge } while (pgd++, addr = next, addr != end); 232257250a5bSJeremy Fitzhardinge 2323aee16b3cSJeremy Fitzhardinge return err; 2324aee16b3cSJeremy Fitzhardinge } 2325aee16b3cSJeremy Fitzhardinge EXPORT_SYMBOL_GPL(apply_to_page_range); 2326aee16b3cSJeremy Fitzhardinge 23271da177e4SLinus Torvalds /* 23289b4bdd2fSKirill A. Shutemov * handle_pte_fault chooses page fault handler according to an entry which was 23299b4bdd2fSKirill A. Shutemov * read non-atomically. Before making any commitment, on those architectures 23309b4bdd2fSKirill A. Shutemov * or configurations (e.g. i386 with PAE) which might give a mix of unmatched 23319b4bdd2fSKirill A. Shutemov * parts, do_swap_page must check under lock before unmapping the pte and 23329b4bdd2fSKirill A. Shutemov * proceeding (but do_wp_page is only called after already making such a check; 2333a335b2e1SRyota Ozaki * and do_anonymous_page can safely check later on). 23348f4e2101SHugh Dickins */ 23354c21e2f2SHugh Dickins static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd, 23368f4e2101SHugh Dickins pte_t *page_table, pte_t orig_pte) 23378f4e2101SHugh Dickins { 23388f4e2101SHugh Dickins int same = 1; 23398f4e2101SHugh Dickins #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) 23408f4e2101SHugh Dickins if (sizeof(pte_t) > sizeof(unsigned long)) { 23414c21e2f2SHugh Dickins spinlock_t *ptl = pte_lockptr(mm, pmd); 23424c21e2f2SHugh Dickins spin_lock(ptl); 23438f4e2101SHugh Dickins same = pte_same(*page_table, orig_pte); 23444c21e2f2SHugh Dickins spin_unlock(ptl); 23458f4e2101SHugh Dickins } 23468f4e2101SHugh Dickins #endif 23478f4e2101SHugh Dickins pte_unmap(page_table); 23488f4e2101SHugh Dickins return same; 23498f4e2101SHugh Dickins } 23508f4e2101SHugh Dickins 23519de455b2SAtsushi Nemoto static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma) 23526aab341eSLinus Torvalds { 23530abdd7a8SDan Williams debug_dma_assert_idle(src); 23540abdd7a8SDan Williams 23556aab341eSLinus Torvalds /* 23566aab341eSLinus Torvalds * If the source page was a PFN mapping, we don't have 23576aab341eSLinus Torvalds * a "struct page" for it. We do a best-effort copy by 23586aab341eSLinus Torvalds * just copying from the original user address. If that 23596aab341eSLinus Torvalds * fails, we just zero-fill it. Live with it. 23606aab341eSLinus Torvalds */ 23616aab341eSLinus Torvalds if (unlikely(!src)) { 23629b04c5feSCong Wang void *kaddr = kmap_atomic(dst); 23635d2a2dbbSLinus Torvalds void __user *uaddr = (void __user *)(va & PAGE_MASK); 23645d2a2dbbSLinus Torvalds 23655d2a2dbbSLinus Torvalds /* 23665d2a2dbbSLinus Torvalds * This really shouldn't fail, because the page is there 23675d2a2dbbSLinus Torvalds * in the page tables. But it might just be unreadable, 23685d2a2dbbSLinus Torvalds * in which case we just give up and fill the result with 23695d2a2dbbSLinus Torvalds * zeroes. 23705d2a2dbbSLinus Torvalds */ 23715d2a2dbbSLinus Torvalds if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) 23723ecb01dfSJan Beulich clear_page(kaddr); 23739b04c5feSCong Wang kunmap_atomic(kaddr); 2374c4ec7b0dSDmitriy Monakhov flush_dcache_page(dst); 23750ed361deSNick Piggin } else 23769de455b2SAtsushi Nemoto copy_user_highpage(dst, src, va, vma); 23776aab341eSLinus Torvalds } 23786aab341eSLinus Torvalds 2379c20cd45eSMichal Hocko static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma) 2380c20cd45eSMichal Hocko { 2381c20cd45eSMichal Hocko struct file *vm_file = vma->vm_file; 2382c20cd45eSMichal Hocko 2383c20cd45eSMichal Hocko if (vm_file) 2384c20cd45eSMichal Hocko return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO; 2385c20cd45eSMichal Hocko 2386c20cd45eSMichal Hocko /* 2387c20cd45eSMichal Hocko * Special mappings (e.g. VDSO) do not have any file so fake 2388c20cd45eSMichal Hocko * a default GFP_KERNEL for them. 2389c20cd45eSMichal Hocko */ 2390c20cd45eSMichal Hocko return GFP_KERNEL; 2391c20cd45eSMichal Hocko } 2392c20cd45eSMichal Hocko 23931da177e4SLinus Torvalds /* 2394fb09a464SKirill A. Shutemov * Notify the address space that the page is about to become writable so that 2395fb09a464SKirill A. Shutemov * it can prohibit this or wait for the page to get into an appropriate state. 2396fb09a464SKirill A. Shutemov * 2397fb09a464SKirill A. Shutemov * We do this without the lock held, so that it can sleep if it needs to. 2398fb09a464SKirill A. Shutemov */ 239938b8cb7fSJan Kara static int do_page_mkwrite(struct vm_fault *vmf) 2400fb09a464SKirill A. Shutemov { 2401fb09a464SKirill A. Shutemov int ret; 240238b8cb7fSJan Kara struct page *page = vmf->page; 240338b8cb7fSJan Kara unsigned int old_flags = vmf->flags; 2404fb09a464SKirill A. Shutemov 240538b8cb7fSJan Kara vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; 2406fb09a464SKirill A. Shutemov 240711bac800SDave Jiang ret = vmf->vma->vm_ops->page_mkwrite(vmf); 240838b8cb7fSJan Kara /* Restore original flags so that caller is not surprised */ 240938b8cb7fSJan Kara vmf->flags = old_flags; 2410fb09a464SKirill A. Shutemov if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) 2411fb09a464SKirill A. Shutemov return ret; 2412fb09a464SKirill A. Shutemov if (unlikely(!(ret & VM_FAULT_LOCKED))) { 2413fb09a464SKirill A. Shutemov lock_page(page); 2414fb09a464SKirill A. Shutemov if (!page->mapping) { 2415fb09a464SKirill A. Shutemov unlock_page(page); 2416fb09a464SKirill A. Shutemov return 0; /* retry */ 2417fb09a464SKirill A. Shutemov } 2418fb09a464SKirill A. Shutemov ret |= VM_FAULT_LOCKED; 2419fb09a464SKirill A. Shutemov } else 2420fb09a464SKirill A. Shutemov VM_BUG_ON_PAGE(!PageLocked(page), page); 2421fb09a464SKirill A. Shutemov return ret; 2422fb09a464SKirill A. Shutemov } 2423fb09a464SKirill A. Shutemov 2424fb09a464SKirill A. Shutemov /* 242597ba0c2bSJan Kara * Handle dirtying of a page in shared file mapping on a write fault. 24264e047f89SShachar Raindel * 242797ba0c2bSJan Kara * The function expects the page to be locked and unlocks it. 24284e047f89SShachar Raindel */ 242997ba0c2bSJan Kara static void fault_dirty_shared_page(struct vm_area_struct *vma, 243097ba0c2bSJan Kara struct page *page) 24314e047f89SShachar Raindel { 24324e047f89SShachar Raindel struct address_space *mapping; 243397ba0c2bSJan Kara bool dirtied; 243497ba0c2bSJan Kara bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite; 24354e047f89SShachar Raindel 24364e047f89SShachar Raindel dirtied = set_page_dirty(page); 24374e047f89SShachar Raindel VM_BUG_ON_PAGE(PageAnon(page), page); 243897ba0c2bSJan Kara /* 243997ba0c2bSJan Kara * Take a local copy of the address_space - page.mapping may be zeroed 244097ba0c2bSJan Kara * by truncate after unlock_page(). The address_space itself remains 244197ba0c2bSJan Kara * pinned by vma->vm_file's reference. We rely on unlock_page()'s 244297ba0c2bSJan Kara * release semantics to prevent the compiler from undoing this copying. 244397ba0c2bSJan Kara */ 244497ba0c2bSJan Kara mapping = page_rmapping(page); 24454e047f89SShachar Raindel unlock_page(page); 24464e047f89SShachar Raindel 24474e047f89SShachar Raindel if ((dirtied || page_mkwrite) && mapping) { 24484e047f89SShachar Raindel /* 24494e047f89SShachar Raindel * Some device drivers do not set page.mapping 24504e047f89SShachar Raindel * but still dirty their pages 24514e047f89SShachar Raindel */ 24524e047f89SShachar Raindel balance_dirty_pages_ratelimited(mapping); 24534e047f89SShachar Raindel } 24544e047f89SShachar Raindel 24554e047f89SShachar Raindel if (!page_mkwrite) 24564e047f89SShachar Raindel file_update_time(vma->vm_file); 24574e047f89SShachar Raindel } 24584e047f89SShachar Raindel 245997ba0c2bSJan Kara /* 24604e047f89SShachar Raindel * Handle write page faults for pages that can be reused in the current vma 24614e047f89SShachar Raindel * 24624e047f89SShachar Raindel * This can happen either due to the mapping being with the VM_SHARED flag, 24634e047f89SShachar Raindel * or due to us being the last reference standing to the page. In either 24644e047f89SShachar Raindel * case, all we need to do here is to mark the page as writable and update 24654e047f89SShachar Raindel * any related book-keeping. 24664e047f89SShachar Raindel */ 2467997dd98dSJan Kara static inline void wp_page_reuse(struct vm_fault *vmf) 246882b0f8c3SJan Kara __releases(vmf->ptl) 24694e047f89SShachar Raindel { 247082b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 2471a41b70d6SJan Kara struct page *page = vmf->page; 24724e047f89SShachar Raindel pte_t entry; 24734e047f89SShachar Raindel /* 24744e047f89SShachar Raindel * Clear the pages cpupid information as the existing 24754e047f89SShachar Raindel * information potentially belongs to a now completely 24764e047f89SShachar Raindel * unrelated process. 24774e047f89SShachar Raindel */ 24784e047f89SShachar Raindel if (page) 24794e047f89SShachar Raindel page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1); 24804e047f89SShachar Raindel 24812994302bSJan Kara flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); 24822994302bSJan Kara entry = pte_mkyoung(vmf->orig_pte); 24834e047f89SShachar Raindel entry = maybe_mkwrite(pte_mkdirty(entry), vma); 248482b0f8c3SJan Kara if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1)) 248582b0f8c3SJan Kara update_mmu_cache(vma, vmf->address, vmf->pte); 248682b0f8c3SJan Kara pte_unmap_unlock(vmf->pte, vmf->ptl); 24874e047f89SShachar Raindel } 24884e047f89SShachar Raindel 24894e047f89SShachar Raindel /* 24902f38ab2cSShachar Raindel * Handle the case of a page which we actually need to copy to a new page. 24912f38ab2cSShachar Raindel * 24922f38ab2cSShachar Raindel * Called with mmap_sem locked and the old page referenced, but 24932f38ab2cSShachar Raindel * without the ptl held. 24942f38ab2cSShachar Raindel * 24952f38ab2cSShachar Raindel * High level logic flow: 24962f38ab2cSShachar Raindel * 24972f38ab2cSShachar Raindel * - Allocate a page, copy the content of the old page to the new one. 24982f38ab2cSShachar Raindel * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc. 24992f38ab2cSShachar Raindel * - Take the PTL. If the pte changed, bail out and release the allocated page 25002f38ab2cSShachar Raindel * - If the pte is still the way we remember it, update the page table and all 25012f38ab2cSShachar Raindel * relevant references. This includes dropping the reference the page-table 25022f38ab2cSShachar Raindel * held to the old page, as well as updating the rmap. 25032f38ab2cSShachar Raindel * - In any case, unlock the PTL and drop the reference we took to the old page. 25042f38ab2cSShachar Raindel */ 2505a41b70d6SJan Kara static int wp_page_copy(struct vm_fault *vmf) 25062f38ab2cSShachar Raindel { 250782b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 2508bae473a4SKirill A. Shutemov struct mm_struct *mm = vma->vm_mm; 2509a41b70d6SJan Kara struct page *old_page = vmf->page; 25102f38ab2cSShachar Raindel struct page *new_page = NULL; 25112f38ab2cSShachar Raindel pte_t entry; 25122f38ab2cSShachar Raindel int page_copied = 0; 251382b0f8c3SJan Kara const unsigned long mmun_start = vmf->address & PAGE_MASK; 2514bae473a4SKirill A. Shutemov const unsigned long mmun_end = mmun_start + PAGE_SIZE; 25152f38ab2cSShachar Raindel struct mem_cgroup *memcg; 25162f38ab2cSShachar Raindel 25172f38ab2cSShachar Raindel if (unlikely(anon_vma_prepare(vma))) 25182f38ab2cSShachar Raindel goto oom; 25192f38ab2cSShachar Raindel 25202994302bSJan Kara if (is_zero_pfn(pte_pfn(vmf->orig_pte))) { 252182b0f8c3SJan Kara new_page = alloc_zeroed_user_highpage_movable(vma, 252282b0f8c3SJan Kara vmf->address); 25232f38ab2cSShachar Raindel if (!new_page) 25242f38ab2cSShachar Raindel goto oom; 25252f38ab2cSShachar Raindel } else { 2526bae473a4SKirill A. Shutemov new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, 252782b0f8c3SJan Kara vmf->address); 25282f38ab2cSShachar Raindel if (!new_page) 25292f38ab2cSShachar Raindel goto oom; 253082b0f8c3SJan Kara cow_user_page(new_page, old_page, vmf->address, vma); 25312f38ab2cSShachar Raindel } 25322f38ab2cSShachar Raindel 25332cf85583STejun Heo if (mem_cgroup_try_charge_delay(new_page, mm, GFP_KERNEL, &memcg, false)) 25342f38ab2cSShachar Raindel goto oom_free_new; 25352f38ab2cSShachar Raindel 2536eb3c24f3SMel Gorman __SetPageUptodate(new_page); 2537eb3c24f3SMel Gorman 25382f38ab2cSShachar Raindel mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 25392f38ab2cSShachar Raindel 25402f38ab2cSShachar Raindel /* 25412f38ab2cSShachar Raindel * Re-check the pte - we dropped the lock 25422f38ab2cSShachar Raindel */ 254382b0f8c3SJan Kara vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl); 25442994302bSJan Kara if (likely(pte_same(*vmf->pte, vmf->orig_pte))) { 25452f38ab2cSShachar Raindel if (old_page) { 25462f38ab2cSShachar Raindel if (!PageAnon(old_page)) { 2547eca56ff9SJerome Marchand dec_mm_counter_fast(mm, 2548eca56ff9SJerome Marchand mm_counter_file(old_page)); 25492f38ab2cSShachar Raindel inc_mm_counter_fast(mm, MM_ANONPAGES); 25502f38ab2cSShachar Raindel } 25512f38ab2cSShachar Raindel } else { 25522f38ab2cSShachar Raindel inc_mm_counter_fast(mm, MM_ANONPAGES); 25532f38ab2cSShachar Raindel } 25542994302bSJan Kara flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); 25552f38ab2cSShachar Raindel entry = mk_pte(new_page, vma->vm_page_prot); 25562f38ab2cSShachar Raindel entry = maybe_mkwrite(pte_mkdirty(entry), vma); 25572f38ab2cSShachar Raindel /* 25582f38ab2cSShachar Raindel * Clear the pte entry and flush it first, before updating the 25592f38ab2cSShachar Raindel * pte with the new entry. This will avoid a race condition 25602f38ab2cSShachar Raindel * seen in the presence of one thread doing SMC and another 25612f38ab2cSShachar Raindel * thread doing COW. 25622f38ab2cSShachar Raindel */ 256382b0f8c3SJan Kara ptep_clear_flush_notify(vma, vmf->address, vmf->pte); 256482b0f8c3SJan Kara page_add_new_anon_rmap(new_page, vma, vmf->address, false); 2565f627c2f5SKirill A. Shutemov mem_cgroup_commit_charge(new_page, memcg, false, false); 25662f38ab2cSShachar Raindel lru_cache_add_active_or_unevictable(new_page, vma); 25672f38ab2cSShachar Raindel /* 25682f38ab2cSShachar Raindel * We call the notify macro here because, when using secondary 25692f38ab2cSShachar Raindel * mmu page tables (such as kvm shadow page tables), we want the 25702f38ab2cSShachar Raindel * new page to be mapped directly into the secondary page table. 25712f38ab2cSShachar Raindel */ 257282b0f8c3SJan Kara set_pte_at_notify(mm, vmf->address, vmf->pte, entry); 257382b0f8c3SJan Kara update_mmu_cache(vma, vmf->address, vmf->pte); 25742f38ab2cSShachar Raindel if (old_page) { 25752f38ab2cSShachar Raindel /* 25762f38ab2cSShachar Raindel * Only after switching the pte to the new page may 25772f38ab2cSShachar Raindel * we remove the mapcount here. Otherwise another 25782f38ab2cSShachar Raindel * process may come and find the rmap count decremented 25792f38ab2cSShachar Raindel * before the pte is switched to the new page, and 25802f38ab2cSShachar Raindel * "reuse" the old page writing into it while our pte 25812f38ab2cSShachar Raindel * here still points into it and can be read by other 25822f38ab2cSShachar Raindel * threads. 25832f38ab2cSShachar Raindel * 25842f38ab2cSShachar Raindel * The critical issue is to order this 25852f38ab2cSShachar Raindel * page_remove_rmap with the ptp_clear_flush above. 25862f38ab2cSShachar Raindel * Those stores are ordered by (if nothing else,) 25872f38ab2cSShachar Raindel * the barrier present in the atomic_add_negative 25882f38ab2cSShachar Raindel * in page_remove_rmap. 25892f38ab2cSShachar Raindel * 25902f38ab2cSShachar Raindel * Then the TLB flush in ptep_clear_flush ensures that 25912f38ab2cSShachar Raindel * no process can access the old page before the 25922f38ab2cSShachar Raindel * decremented mapcount is visible. And the old page 25932f38ab2cSShachar Raindel * cannot be reused until after the decremented 25942f38ab2cSShachar Raindel * mapcount is visible. So transitively, TLBs to 25952f38ab2cSShachar Raindel * old page will be flushed before it can be reused. 25962f38ab2cSShachar Raindel */ 2597d281ee61SKirill A. Shutemov page_remove_rmap(old_page, false); 25982f38ab2cSShachar Raindel } 25992f38ab2cSShachar Raindel 26002f38ab2cSShachar Raindel /* Free the old page.. */ 26012f38ab2cSShachar Raindel new_page = old_page; 26022f38ab2cSShachar Raindel page_copied = 1; 26032f38ab2cSShachar Raindel } else { 2604f627c2f5SKirill A. Shutemov mem_cgroup_cancel_charge(new_page, memcg, false); 26052f38ab2cSShachar Raindel } 26062f38ab2cSShachar Raindel 26072f38ab2cSShachar Raindel if (new_page) 260809cbfeafSKirill A. Shutemov put_page(new_page); 26092f38ab2cSShachar Raindel 261082b0f8c3SJan Kara pte_unmap_unlock(vmf->pte, vmf->ptl); 26114645b9feSJérôme Glisse /* 26124645b9feSJérôme Glisse * No need to double call mmu_notifier->invalidate_range() callback as 26134645b9feSJérôme Glisse * the above ptep_clear_flush_notify() did already call it. 26144645b9feSJérôme Glisse */ 26154645b9feSJérôme Glisse mmu_notifier_invalidate_range_only_end(mm, mmun_start, mmun_end); 26162f38ab2cSShachar Raindel if (old_page) { 26172f38ab2cSShachar Raindel /* 26182f38ab2cSShachar Raindel * Don't let another task, with possibly unlocked vma, 26192f38ab2cSShachar Raindel * keep the mlocked page. 26202f38ab2cSShachar Raindel */ 26212f38ab2cSShachar Raindel if (page_copied && (vma->vm_flags & VM_LOCKED)) { 26222f38ab2cSShachar Raindel lock_page(old_page); /* LRU manipulation */ 2623e90309c9SKirill A. Shutemov if (PageMlocked(old_page)) 26242f38ab2cSShachar Raindel munlock_vma_page(old_page); 26252f38ab2cSShachar Raindel unlock_page(old_page); 26262f38ab2cSShachar Raindel } 262709cbfeafSKirill A. Shutemov put_page(old_page); 26282f38ab2cSShachar Raindel } 26292f38ab2cSShachar Raindel return page_copied ? VM_FAULT_WRITE : 0; 26302f38ab2cSShachar Raindel oom_free_new: 263109cbfeafSKirill A. Shutemov put_page(new_page); 26322f38ab2cSShachar Raindel oom: 26332f38ab2cSShachar Raindel if (old_page) 263409cbfeafSKirill A. Shutemov put_page(old_page); 26352f38ab2cSShachar Raindel return VM_FAULT_OOM; 26362f38ab2cSShachar Raindel } 26372f38ab2cSShachar Raindel 263866a6197cSJan Kara /** 263966a6197cSJan Kara * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE 264066a6197cSJan Kara * writeable once the page is prepared 264166a6197cSJan Kara * 264266a6197cSJan Kara * @vmf: structure describing the fault 264366a6197cSJan Kara * 264466a6197cSJan Kara * This function handles all that is needed to finish a write page fault in a 264566a6197cSJan Kara * shared mapping due to PTE being read-only once the mapped page is prepared. 264666a6197cSJan Kara * It handles locking of PTE and modifying it. The function returns 264766a6197cSJan Kara * VM_FAULT_WRITE on success, 0 when PTE got changed before we acquired PTE 264866a6197cSJan Kara * lock. 264966a6197cSJan Kara * 265066a6197cSJan Kara * The function expects the page to be locked or other protection against 265166a6197cSJan Kara * concurrent faults / writeback (such as DAX radix tree locks). 265266a6197cSJan Kara */ 265366a6197cSJan Kara int finish_mkwrite_fault(struct vm_fault *vmf) 265466a6197cSJan Kara { 265566a6197cSJan Kara WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED)); 265666a6197cSJan Kara vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, 265766a6197cSJan Kara &vmf->ptl); 265866a6197cSJan Kara /* 265966a6197cSJan Kara * We might have raced with another page fault while we released the 266066a6197cSJan Kara * pte_offset_map_lock. 266166a6197cSJan Kara */ 266266a6197cSJan Kara if (!pte_same(*vmf->pte, vmf->orig_pte)) { 266366a6197cSJan Kara pte_unmap_unlock(vmf->pte, vmf->ptl); 2664a19e2553SJan Kara return VM_FAULT_NOPAGE; 266566a6197cSJan Kara } 266666a6197cSJan Kara wp_page_reuse(vmf); 2667a19e2553SJan Kara return 0; 266866a6197cSJan Kara } 266966a6197cSJan Kara 2670dd906184SBoaz Harrosh /* 2671dd906184SBoaz Harrosh * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED 2672dd906184SBoaz Harrosh * mapping 2673dd906184SBoaz Harrosh */ 26742994302bSJan Kara static int wp_pfn_shared(struct vm_fault *vmf) 2675dd906184SBoaz Harrosh { 267682b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 2677bae473a4SKirill A. Shutemov 2678dd906184SBoaz Harrosh if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) { 2679dd906184SBoaz Harrosh int ret; 2680dd906184SBoaz Harrosh 268182b0f8c3SJan Kara pte_unmap_unlock(vmf->pte, vmf->ptl); 2682fe82221fSJan Kara vmf->flags |= FAULT_FLAG_MKWRITE; 268311bac800SDave Jiang ret = vma->vm_ops->pfn_mkwrite(vmf); 26842f89dc12SJan Kara if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)) 2685dd906184SBoaz Harrosh return ret; 268666a6197cSJan Kara return finish_mkwrite_fault(vmf); 2687dd906184SBoaz Harrosh } 2688997dd98dSJan Kara wp_page_reuse(vmf); 2689997dd98dSJan Kara return VM_FAULT_WRITE; 2690dd906184SBoaz Harrosh } 2691dd906184SBoaz Harrosh 2692a41b70d6SJan Kara static int wp_page_shared(struct vm_fault *vmf) 269382b0f8c3SJan Kara __releases(vmf->ptl) 269493e478d4SShachar Raindel { 269582b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 269693e478d4SShachar Raindel 2697a41b70d6SJan Kara get_page(vmf->page); 269893e478d4SShachar Raindel 269993e478d4SShachar Raindel if (vma->vm_ops && vma->vm_ops->page_mkwrite) { 270093e478d4SShachar Raindel int tmp; 270193e478d4SShachar Raindel 270282b0f8c3SJan Kara pte_unmap_unlock(vmf->pte, vmf->ptl); 270338b8cb7fSJan Kara tmp = do_page_mkwrite(vmf); 270493e478d4SShachar Raindel if (unlikely(!tmp || (tmp & 270593e478d4SShachar Raindel (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { 2706a41b70d6SJan Kara put_page(vmf->page); 270793e478d4SShachar Raindel return tmp; 270893e478d4SShachar Raindel } 270966a6197cSJan Kara tmp = finish_mkwrite_fault(vmf); 2710a19e2553SJan Kara if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) { 2711a41b70d6SJan Kara unlock_page(vmf->page); 2712a41b70d6SJan Kara put_page(vmf->page); 271366a6197cSJan Kara return tmp; 271493e478d4SShachar Raindel } 271566a6197cSJan Kara } else { 2716997dd98dSJan Kara wp_page_reuse(vmf); 2717997dd98dSJan Kara lock_page(vmf->page); 271893e478d4SShachar Raindel } 2719997dd98dSJan Kara fault_dirty_shared_page(vma, vmf->page); 2720997dd98dSJan Kara put_page(vmf->page); 272193e478d4SShachar Raindel 2722997dd98dSJan Kara return VM_FAULT_WRITE; 272393e478d4SShachar Raindel } 272493e478d4SShachar Raindel 27252f38ab2cSShachar Raindel /* 27261da177e4SLinus Torvalds * This routine handles present pages, when users try to write 27271da177e4SLinus Torvalds * to a shared page. It is done by copying the page to a new address 27281da177e4SLinus Torvalds * and decrementing the shared-page counter for the old page. 27291da177e4SLinus Torvalds * 27301da177e4SLinus Torvalds * Note that this routine assumes that the protection checks have been 27311da177e4SLinus Torvalds * done by the caller (the low-level page fault routine in most cases). 27321da177e4SLinus Torvalds * Thus we can safely just mark it writable once we've done any necessary 27331da177e4SLinus Torvalds * COW. 27341da177e4SLinus Torvalds * 27351da177e4SLinus Torvalds * We also mark the page dirty at this point even though the page will 27361da177e4SLinus Torvalds * change only once the write actually happens. This avoids a few races, 27371da177e4SLinus Torvalds * and potentially makes it more efficient. 27381da177e4SLinus Torvalds * 27398f4e2101SHugh Dickins * We enter with non-exclusive mmap_sem (to exclude vma changes, 27408f4e2101SHugh Dickins * but allow concurrent faults), with pte both mapped and locked. 27418f4e2101SHugh Dickins * We return with mmap_sem still held, but pte unmapped and unlocked. 27421da177e4SLinus Torvalds */ 27432994302bSJan Kara static int do_wp_page(struct vm_fault *vmf) 274482b0f8c3SJan Kara __releases(vmf->ptl) 27451da177e4SLinus Torvalds { 274682b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 27471da177e4SLinus Torvalds 2748a41b70d6SJan Kara vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte); 2749a41b70d6SJan Kara if (!vmf->page) { 2750251b97f5SPeter Zijlstra /* 275164e45507SPeter Feiner * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a 275264e45507SPeter Feiner * VM_PFNMAP VMA. 2753251b97f5SPeter Zijlstra * 2754251b97f5SPeter Zijlstra * We should not cow pages in a shared writeable mapping. 2755dd906184SBoaz Harrosh * Just mark the pages writable and/or call ops->pfn_mkwrite. 2756251b97f5SPeter Zijlstra */ 2757251b97f5SPeter Zijlstra if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) == 2758251b97f5SPeter Zijlstra (VM_WRITE|VM_SHARED)) 27592994302bSJan Kara return wp_pfn_shared(vmf); 27602f38ab2cSShachar Raindel 276182b0f8c3SJan Kara pte_unmap_unlock(vmf->pte, vmf->ptl); 2762a41b70d6SJan Kara return wp_page_copy(vmf); 2763251b97f5SPeter Zijlstra } 27641da177e4SLinus Torvalds 2765d08b3851SPeter Zijlstra /* 2766ee6a6457SPeter Zijlstra * Take out anonymous pages first, anonymous shared vmas are 2767ee6a6457SPeter Zijlstra * not dirty accountable. 2768d08b3851SPeter Zijlstra */ 2769a41b70d6SJan Kara if (PageAnon(vmf->page) && !PageKsm(vmf->page)) { 2770ba3c4ce6SHuang Ying int total_map_swapcount; 2771a41b70d6SJan Kara if (!trylock_page(vmf->page)) { 2772a41b70d6SJan Kara get_page(vmf->page); 277382b0f8c3SJan Kara pte_unmap_unlock(vmf->pte, vmf->ptl); 2774a41b70d6SJan Kara lock_page(vmf->page); 277582b0f8c3SJan Kara vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, 277682b0f8c3SJan Kara vmf->address, &vmf->ptl); 27772994302bSJan Kara if (!pte_same(*vmf->pte, vmf->orig_pte)) { 2778a41b70d6SJan Kara unlock_page(vmf->page); 277982b0f8c3SJan Kara pte_unmap_unlock(vmf->pte, vmf->ptl); 2780a41b70d6SJan Kara put_page(vmf->page); 278128766805SShachar Raindel return 0; 2782ab967d86SHugh Dickins } 2783a41b70d6SJan Kara put_page(vmf->page); 2784ab967d86SHugh Dickins } 2785ba3c4ce6SHuang Ying if (reuse_swap_page(vmf->page, &total_map_swapcount)) { 2786ba3c4ce6SHuang Ying if (total_map_swapcount == 1) { 2787c44b6743SRik van Riel /* 27886d0a07edSAndrea Arcangeli * The page is all ours. Move it to 27896d0a07edSAndrea Arcangeli * our anon_vma so the rmap code will 27906d0a07edSAndrea Arcangeli * not search our parent or siblings. 27916d0a07edSAndrea Arcangeli * Protected against the rmap code by 27926d0a07edSAndrea Arcangeli * the page lock. 2793c44b6743SRik van Riel */ 2794a41b70d6SJan Kara page_move_anon_rmap(vmf->page, vma); 27956d0a07edSAndrea Arcangeli } 2796a41b70d6SJan Kara unlock_page(vmf->page); 2797997dd98dSJan Kara wp_page_reuse(vmf); 2798997dd98dSJan Kara return VM_FAULT_WRITE; 2799b009c024SMichel Lespinasse } 2800a41b70d6SJan Kara unlock_page(vmf->page); 2801ee6a6457SPeter Zijlstra } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == 2802d08b3851SPeter Zijlstra (VM_WRITE|VM_SHARED))) { 2803a41b70d6SJan Kara return wp_page_shared(vmf); 28041da177e4SLinus Torvalds } 28051da177e4SLinus Torvalds 28061da177e4SLinus Torvalds /* 28071da177e4SLinus Torvalds * Ok, we need to copy. Oh, well.. 28081da177e4SLinus Torvalds */ 2809a41b70d6SJan Kara get_page(vmf->page); 281028766805SShachar Raindel 281182b0f8c3SJan Kara pte_unmap_unlock(vmf->pte, vmf->ptl); 2812a41b70d6SJan Kara return wp_page_copy(vmf); 28131da177e4SLinus Torvalds } 28141da177e4SLinus Torvalds 281597a89413SPeter Zijlstra static void unmap_mapping_range_vma(struct vm_area_struct *vma, 28161da177e4SLinus Torvalds unsigned long start_addr, unsigned long end_addr, 28171da177e4SLinus Torvalds struct zap_details *details) 28181da177e4SLinus Torvalds { 2819f5cc4eefSAl Viro zap_page_range_single(vma, start_addr, end_addr - start_addr, details); 28201da177e4SLinus Torvalds } 28211da177e4SLinus Torvalds 2822f808c13fSDavidlohr Bueso static inline void unmap_mapping_range_tree(struct rb_root_cached *root, 28231da177e4SLinus Torvalds struct zap_details *details) 28241da177e4SLinus Torvalds { 28251da177e4SLinus Torvalds struct vm_area_struct *vma; 28261da177e4SLinus Torvalds pgoff_t vba, vea, zba, zea; 28271da177e4SLinus Torvalds 28286b2dbba8SMichel Lespinasse vma_interval_tree_foreach(vma, root, 28291da177e4SLinus Torvalds details->first_index, details->last_index) { 28301da177e4SLinus Torvalds 28311da177e4SLinus Torvalds vba = vma->vm_pgoff; 2832d6e93217SLibin vea = vba + vma_pages(vma) - 1; 28331da177e4SLinus Torvalds zba = details->first_index; 28341da177e4SLinus Torvalds if (zba < vba) 28351da177e4SLinus Torvalds zba = vba; 28361da177e4SLinus Torvalds zea = details->last_index; 28371da177e4SLinus Torvalds if (zea > vea) 28381da177e4SLinus Torvalds zea = vea; 28391da177e4SLinus Torvalds 284097a89413SPeter Zijlstra unmap_mapping_range_vma(vma, 28411da177e4SLinus Torvalds ((zba - vba) << PAGE_SHIFT) + vma->vm_start, 28421da177e4SLinus Torvalds ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start, 284397a89413SPeter Zijlstra details); 28441da177e4SLinus Torvalds } 28451da177e4SLinus Torvalds } 28461da177e4SLinus Torvalds 28471da177e4SLinus Torvalds /** 2848977fbdcdSMatthew Wilcox * unmap_mapping_pages() - Unmap pages from processes. 2849977fbdcdSMatthew Wilcox * @mapping: The address space containing pages to be unmapped. 2850977fbdcdSMatthew Wilcox * @start: Index of first page to be unmapped. 2851977fbdcdSMatthew Wilcox * @nr: Number of pages to be unmapped. 0 to unmap to end of file. 2852977fbdcdSMatthew Wilcox * @even_cows: Whether to unmap even private COWed pages. 2853977fbdcdSMatthew Wilcox * 2854977fbdcdSMatthew Wilcox * Unmap the pages in this address space from any userspace process which 2855977fbdcdSMatthew Wilcox * has them mmaped. Generally, you want to remove COWed pages as well when 2856977fbdcdSMatthew Wilcox * a file is being truncated, but not when invalidating pages from the page 2857977fbdcdSMatthew Wilcox * cache. 2858977fbdcdSMatthew Wilcox */ 2859977fbdcdSMatthew Wilcox void unmap_mapping_pages(struct address_space *mapping, pgoff_t start, 2860977fbdcdSMatthew Wilcox pgoff_t nr, bool even_cows) 2861977fbdcdSMatthew Wilcox { 2862977fbdcdSMatthew Wilcox struct zap_details details = { }; 2863977fbdcdSMatthew Wilcox 2864977fbdcdSMatthew Wilcox details.check_mapping = even_cows ? NULL : mapping; 2865977fbdcdSMatthew Wilcox details.first_index = start; 2866977fbdcdSMatthew Wilcox details.last_index = start + nr - 1; 2867977fbdcdSMatthew Wilcox if (details.last_index < details.first_index) 2868977fbdcdSMatthew Wilcox details.last_index = ULONG_MAX; 2869977fbdcdSMatthew Wilcox 2870977fbdcdSMatthew Wilcox i_mmap_lock_write(mapping); 2871977fbdcdSMatthew Wilcox if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))) 2872977fbdcdSMatthew Wilcox unmap_mapping_range_tree(&mapping->i_mmap, &details); 2873977fbdcdSMatthew Wilcox i_mmap_unlock_write(mapping); 2874977fbdcdSMatthew Wilcox } 2875977fbdcdSMatthew Wilcox 2876977fbdcdSMatthew Wilcox /** 28778a5f14a2SKirill A. Shutemov * unmap_mapping_range - unmap the portion of all mmaps in the specified 2878977fbdcdSMatthew Wilcox * address_space corresponding to the specified byte range in the underlying 28798a5f14a2SKirill A. Shutemov * file. 28808a5f14a2SKirill A. Shutemov * 28813d41088fSMartin Waitz * @mapping: the address space containing mmaps to be unmapped. 28821da177e4SLinus Torvalds * @holebegin: byte in first page to unmap, relative to the start of 28831da177e4SLinus Torvalds * the underlying file. This will be rounded down to a PAGE_SIZE 288425d9e2d1Snpiggin@suse.de * boundary. Note that this is different from truncate_pagecache(), which 28851da177e4SLinus Torvalds * must keep the partial page. In contrast, we must get rid of 28861da177e4SLinus Torvalds * partial pages. 28871da177e4SLinus Torvalds * @holelen: size of prospective hole in bytes. This will be rounded 28881da177e4SLinus Torvalds * up to a PAGE_SIZE boundary. A holelen of zero truncates to the 28891da177e4SLinus Torvalds * end of the file. 28901da177e4SLinus Torvalds * @even_cows: 1 when truncating a file, unmap even private COWed pages; 28911da177e4SLinus Torvalds * but 0 when invalidating pagecache, don't throw away private data. 28921da177e4SLinus Torvalds */ 28931da177e4SLinus Torvalds void unmap_mapping_range(struct address_space *mapping, 28941da177e4SLinus Torvalds loff_t const holebegin, loff_t const holelen, int even_cows) 28951da177e4SLinus Torvalds { 28961da177e4SLinus Torvalds pgoff_t hba = holebegin >> PAGE_SHIFT; 28971da177e4SLinus Torvalds pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; 28981da177e4SLinus Torvalds 28991da177e4SLinus Torvalds /* Check for overflow. */ 29001da177e4SLinus Torvalds if (sizeof(holelen) > sizeof(hlen)) { 29011da177e4SLinus Torvalds long long holeend = 29021da177e4SLinus Torvalds (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; 29031da177e4SLinus Torvalds if (holeend & ~(long long)ULONG_MAX) 29041da177e4SLinus Torvalds hlen = ULONG_MAX - hba + 1; 29051da177e4SLinus Torvalds } 29061da177e4SLinus Torvalds 2907977fbdcdSMatthew Wilcox unmap_mapping_pages(mapping, hba, hlen, even_cows); 29081da177e4SLinus Torvalds } 29091da177e4SLinus Torvalds EXPORT_SYMBOL(unmap_mapping_range); 29101da177e4SLinus Torvalds 29111da177e4SLinus Torvalds /* 29128f4e2101SHugh Dickins * We enter with non-exclusive mmap_sem (to exclude vma changes, 29138f4e2101SHugh Dickins * but allow concurrent faults), and pte mapped but not yet locked. 29149a95f3cfSPaul Cassella * We return with pte unmapped and unlocked. 29159a95f3cfSPaul Cassella * 29169a95f3cfSPaul Cassella * We return with the mmap_sem locked or unlocked in the same cases 29179a95f3cfSPaul Cassella * as does filemap_fault(). 29181da177e4SLinus Torvalds */ 29192994302bSJan Kara int do_swap_page(struct vm_fault *vmf) 29201da177e4SLinus Torvalds { 292182b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 2922eaf649ebSMinchan Kim struct page *page = NULL, *swapcache; 292300501b53SJohannes Weiner struct mem_cgroup *memcg; 292465500d23SHugh Dickins swp_entry_t entry; 29251da177e4SLinus Torvalds pte_t pte; 2926d065bd81SMichel Lespinasse int locked; 2927ad8c2ee8SRik van Riel int exclusive = 0; 292883c54070SNick Piggin int ret = 0; 29291da177e4SLinus Torvalds 2930eaf649ebSMinchan Kim if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte)) 29318f4e2101SHugh Dickins goto out; 293265500d23SHugh Dickins 29332994302bSJan Kara entry = pte_to_swp_entry(vmf->orig_pte); 2934d1737fdbSAndi Kleen if (unlikely(non_swap_entry(entry))) { 29350697212aSChristoph Lameter if (is_migration_entry(entry)) { 293682b0f8c3SJan Kara migration_entry_wait(vma->vm_mm, vmf->pmd, 293782b0f8c3SJan Kara vmf->address); 29385042db43SJérôme Glisse } else if (is_device_private_entry(entry)) { 29395042db43SJérôme Glisse /* 29405042db43SJérôme Glisse * For un-addressable device memory we call the pgmap 29415042db43SJérôme Glisse * fault handler callback. The callback must migrate 29425042db43SJérôme Glisse * the page back to some CPU accessible page. 29435042db43SJérôme Glisse */ 29445042db43SJérôme Glisse ret = device_private_entry_fault(vma, vmf->address, entry, 29455042db43SJérôme Glisse vmf->flags, vmf->pmd); 2946d1737fdbSAndi Kleen } else if (is_hwpoison_entry(entry)) { 2947d1737fdbSAndi Kleen ret = VM_FAULT_HWPOISON; 2948d1737fdbSAndi Kleen } else { 29492994302bSJan Kara print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL); 2950d99be1a8SHugh Dickins ret = VM_FAULT_SIGBUS; 2951d1737fdbSAndi Kleen } 29520697212aSChristoph Lameter goto out; 29530697212aSChristoph Lameter } 29540bcac06fSMinchan Kim 29550bcac06fSMinchan Kim 29560ff92245SShailabh Nagar delayacct_set_flag(DELAYACCT_PF_SWAPIN); 2957eaf649ebSMinchan Kim page = lookup_swap_cache(entry, vma, vmf->address); 2958f8020772SMinchan Kim swapcache = page; 2959f8020772SMinchan Kim 29601da177e4SLinus Torvalds if (!page) { 29610bcac06fSMinchan Kim struct swap_info_struct *si = swp_swap_info(entry); 29620bcac06fSMinchan Kim 2963aa8d22a1SMinchan Kim if (si->flags & SWP_SYNCHRONOUS_IO && 2964aa8d22a1SMinchan Kim __swap_count(si, entry) == 1) { 29650bcac06fSMinchan Kim /* skip swapcache */ 2966e9e9b7ecSMinchan Kim page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, 2967e9e9b7ecSMinchan Kim vmf->address); 29680bcac06fSMinchan Kim if (page) { 29690bcac06fSMinchan Kim __SetPageLocked(page); 29700bcac06fSMinchan Kim __SetPageSwapBacked(page); 29710bcac06fSMinchan Kim set_page_private(page, entry.val); 29720bcac06fSMinchan Kim lru_cache_add_anon(page); 29730bcac06fSMinchan Kim swap_readpage(page, true); 29740bcac06fSMinchan Kim } 2975aa8d22a1SMinchan Kim } else { 2976e9e9b7ecSMinchan Kim page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE, 2977e9e9b7ecSMinchan Kim vmf); 2978aa8d22a1SMinchan Kim swapcache = page; 29790bcac06fSMinchan Kim } 29800bcac06fSMinchan Kim 2981ec560175SHuang Ying if (!page) { 29821da177e4SLinus Torvalds /* 29838f4e2101SHugh Dickins * Back out if somebody else faulted in this pte 29848f4e2101SHugh Dickins * while we released the pte lock. 29851da177e4SLinus Torvalds */ 298682b0f8c3SJan Kara vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, 298782b0f8c3SJan Kara vmf->address, &vmf->ptl); 29882994302bSJan Kara if (likely(pte_same(*vmf->pte, vmf->orig_pte))) 29891da177e4SLinus Torvalds ret = VM_FAULT_OOM; 29900ff92245SShailabh Nagar delayacct_clear_flag(DELAYACCT_PF_SWAPIN); 299165500d23SHugh Dickins goto unlock; 29921da177e4SLinus Torvalds } 29931da177e4SLinus Torvalds 29941da177e4SLinus Torvalds /* Had to read the page from swap area: Major fault */ 29951da177e4SLinus Torvalds ret = VM_FAULT_MAJOR; 2996f8891e5eSChristoph Lameter count_vm_event(PGMAJFAULT); 29972262185cSRoman Gushchin count_memcg_event_mm(vma->vm_mm, PGMAJFAULT); 2998d1737fdbSAndi Kleen } else if (PageHWPoison(page)) { 299971f72525SWu Fengguang /* 300071f72525SWu Fengguang * hwpoisoned dirty swapcache pages are kept for killing 300171f72525SWu Fengguang * owner processes (which may be unknown at hwpoison time) 300271f72525SWu Fengguang */ 3003d1737fdbSAndi Kleen ret = VM_FAULT_HWPOISON; 3004d1737fdbSAndi Kleen delayacct_clear_flag(DELAYACCT_PF_SWAPIN); 30054779cb31SAndi Kleen goto out_release; 30061da177e4SLinus Torvalds } 30071da177e4SLinus Torvalds 300882b0f8c3SJan Kara locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags); 3009e709ffd6SRik van Riel 301020a1022dSBalbir Singh delayacct_clear_flag(DELAYACCT_PF_SWAPIN); 3011d065bd81SMichel Lespinasse if (!locked) { 3012d065bd81SMichel Lespinasse ret |= VM_FAULT_RETRY; 3013d065bd81SMichel Lespinasse goto out_release; 3014d065bd81SMichel Lespinasse } 30151da177e4SLinus Torvalds 30164969c119SAndrea Arcangeli /* 301731c4a3d3SHugh Dickins * Make sure try_to_free_swap or reuse_swap_page or swapoff did not 301831c4a3d3SHugh Dickins * release the swapcache from under us. The page pin, and pte_same 301931c4a3d3SHugh Dickins * test below, are not enough to exclude that. Even if it is still 302031c4a3d3SHugh Dickins * swapcache, we need to check that the page's swap has not changed. 30214969c119SAndrea Arcangeli */ 30220bcac06fSMinchan Kim if (unlikely((!PageSwapCache(page) || 30230bcac06fSMinchan Kim page_private(page) != entry.val)) && swapcache) 30244969c119SAndrea Arcangeli goto out_page; 30254969c119SAndrea Arcangeli 302682b0f8c3SJan Kara page = ksm_might_need_to_copy(page, vma, vmf->address); 30274969c119SAndrea Arcangeli if (unlikely(!page)) { 30285ad64688SHugh Dickins ret = VM_FAULT_OOM; 30294969c119SAndrea Arcangeli page = swapcache; 30304969c119SAndrea Arcangeli goto out_page; 30314969c119SAndrea Arcangeli } 30325ad64688SHugh Dickins 30332cf85583STejun Heo if (mem_cgroup_try_charge_delay(page, vma->vm_mm, GFP_KERNEL, 3034bae473a4SKirill A. Shutemov &memcg, false)) { 3035073e587eSKAMEZAWA Hiroyuki ret = VM_FAULT_OOM; 3036bc43f75cSJohannes Weiner goto out_page; 3037073e587eSKAMEZAWA Hiroyuki } 3038073e587eSKAMEZAWA Hiroyuki 30391da177e4SLinus Torvalds /* 30408f4e2101SHugh Dickins * Back out if somebody else already faulted in this pte. 30411da177e4SLinus Torvalds */ 304282b0f8c3SJan Kara vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, 304382b0f8c3SJan Kara &vmf->ptl); 30442994302bSJan Kara if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) 3045b8107480SKirill Korotaev goto out_nomap; 3046b8107480SKirill Korotaev 3047b8107480SKirill Korotaev if (unlikely(!PageUptodate(page))) { 3048b8107480SKirill Korotaev ret = VM_FAULT_SIGBUS; 3049b8107480SKirill Korotaev goto out_nomap; 30501da177e4SLinus Torvalds } 30511da177e4SLinus Torvalds 30528c7c6e34SKAMEZAWA Hiroyuki /* 30538c7c6e34SKAMEZAWA Hiroyuki * The page isn't present yet, go ahead with the fault. 30548c7c6e34SKAMEZAWA Hiroyuki * 30558c7c6e34SKAMEZAWA Hiroyuki * Be careful about the sequence of operations here. 30568c7c6e34SKAMEZAWA Hiroyuki * To get its accounting right, reuse_swap_page() must be called 30578c7c6e34SKAMEZAWA Hiroyuki * while the page is counted on swap but not yet in mapcount i.e. 30588c7c6e34SKAMEZAWA Hiroyuki * before page_add_anon_rmap() and swap_free(); try_to_free_swap() 30598c7c6e34SKAMEZAWA Hiroyuki * must be called after the swap_free(), or it will never succeed. 30608c7c6e34SKAMEZAWA Hiroyuki */ 30611da177e4SLinus Torvalds 3062bae473a4SKirill A. Shutemov inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); 3063bae473a4SKirill A. Shutemov dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS); 30641da177e4SLinus Torvalds pte = mk_pte(page, vma->vm_page_prot); 306582b0f8c3SJan Kara if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) { 30661da177e4SLinus Torvalds pte = maybe_mkwrite(pte_mkdirty(pte), vma); 306782b0f8c3SJan Kara vmf->flags &= ~FAULT_FLAG_WRITE; 30689a5b489bSAndrea Arcangeli ret |= VM_FAULT_WRITE; 3069d281ee61SKirill A. Shutemov exclusive = RMAP_EXCLUSIVE; 30701da177e4SLinus Torvalds } 30711da177e4SLinus Torvalds flush_icache_page(vma, page); 30722994302bSJan Kara if (pte_swp_soft_dirty(vmf->orig_pte)) 3073179ef71cSCyrill Gorcunov pte = pte_mksoft_dirty(pte); 307482b0f8c3SJan Kara set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); 3075ca827d55SKhalid Aziz arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte); 30762994302bSJan Kara vmf->orig_pte = pte; 30770bcac06fSMinchan Kim 30780bcac06fSMinchan Kim /* ksm created a completely new copy */ 30790bcac06fSMinchan Kim if (unlikely(page != swapcache && swapcache)) { 308082b0f8c3SJan Kara page_add_new_anon_rmap(page, vma, vmf->address, false); 3081f627c2f5SKirill A. Shutemov mem_cgroup_commit_charge(page, memcg, false, false); 308200501b53SJohannes Weiner lru_cache_add_active_or_unevictable(page, vma); 30830bcac06fSMinchan Kim } else { 30840bcac06fSMinchan Kim do_page_add_anon_rmap(page, vma, vmf->address, exclusive); 30850bcac06fSMinchan Kim mem_cgroup_commit_charge(page, memcg, true, false); 30860bcac06fSMinchan Kim activate_page(page); 308700501b53SJohannes Weiner } 30881da177e4SLinus Torvalds 3089c475a8abSHugh Dickins swap_free(entry); 30905ccc5abaSVladimir Davydov if (mem_cgroup_swap_full(page) || 30915ccc5abaSVladimir Davydov (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) 3092a2c43eedSHugh Dickins try_to_free_swap(page); 3093c475a8abSHugh Dickins unlock_page(page); 30940bcac06fSMinchan Kim if (page != swapcache && swapcache) { 30954969c119SAndrea Arcangeli /* 30964969c119SAndrea Arcangeli * Hold the lock to avoid the swap entry to be reused 30974969c119SAndrea Arcangeli * until we take the PT lock for the pte_same() check 30984969c119SAndrea Arcangeli * (to avoid false positives from pte_same). For 30994969c119SAndrea Arcangeli * further safety release the lock after the swap_free 31004969c119SAndrea Arcangeli * so that the swap count won't change under a 31014969c119SAndrea Arcangeli * parallel locked swapcache. 31024969c119SAndrea Arcangeli */ 31034969c119SAndrea Arcangeli unlock_page(swapcache); 310409cbfeafSKirill A. Shutemov put_page(swapcache); 31054969c119SAndrea Arcangeli } 3106c475a8abSHugh Dickins 310782b0f8c3SJan Kara if (vmf->flags & FAULT_FLAG_WRITE) { 31082994302bSJan Kara ret |= do_wp_page(vmf); 310961469f1dSHugh Dickins if (ret & VM_FAULT_ERROR) 311061469f1dSHugh Dickins ret &= VM_FAULT_ERROR; 31111da177e4SLinus Torvalds goto out; 31121da177e4SLinus Torvalds } 31131da177e4SLinus Torvalds 31141da177e4SLinus Torvalds /* No need to invalidate - it was non-present before */ 311582b0f8c3SJan Kara update_mmu_cache(vma, vmf->address, vmf->pte); 311665500d23SHugh Dickins unlock: 311782b0f8c3SJan Kara pte_unmap_unlock(vmf->pte, vmf->ptl); 31181da177e4SLinus Torvalds out: 31191da177e4SLinus Torvalds return ret; 3120b8107480SKirill Korotaev out_nomap: 3121f627c2f5SKirill A. Shutemov mem_cgroup_cancel_charge(page, memcg, false); 312282b0f8c3SJan Kara pte_unmap_unlock(vmf->pte, vmf->ptl); 3123bc43f75cSJohannes Weiner out_page: 3124b8107480SKirill Korotaev unlock_page(page); 31254779cb31SAndi Kleen out_release: 312609cbfeafSKirill A. Shutemov put_page(page); 31270bcac06fSMinchan Kim if (page != swapcache && swapcache) { 31284969c119SAndrea Arcangeli unlock_page(swapcache); 312909cbfeafSKirill A. Shutemov put_page(swapcache); 31304969c119SAndrea Arcangeli } 313165500d23SHugh Dickins return ret; 31321da177e4SLinus Torvalds } 31331da177e4SLinus Torvalds 31341da177e4SLinus Torvalds /* 31358f4e2101SHugh Dickins * We enter with non-exclusive mmap_sem (to exclude vma changes, 31368f4e2101SHugh Dickins * but allow concurrent faults), and pte mapped but not yet locked. 31378f4e2101SHugh Dickins * We return with mmap_sem still held, but pte unmapped and unlocked. 31381da177e4SLinus Torvalds */ 313982b0f8c3SJan Kara static int do_anonymous_page(struct vm_fault *vmf) 31401da177e4SLinus Torvalds { 314182b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 314200501b53SJohannes Weiner struct mem_cgroup *memcg; 31438f4e2101SHugh Dickins struct page *page; 31446b31d595SMichal Hocko int ret = 0; 31451da177e4SLinus Torvalds pte_t entry; 31461da177e4SLinus Torvalds 31476b7339f4SKirill A. Shutemov /* File mapping without ->vm_ops ? */ 31486b7339f4SKirill A. Shutemov if (vma->vm_flags & VM_SHARED) 31496b7339f4SKirill A. Shutemov return VM_FAULT_SIGBUS; 31506b7339f4SKirill A. Shutemov 31517267ec00SKirill A. Shutemov /* 31527267ec00SKirill A. Shutemov * Use pte_alloc() instead of pte_alloc_map(). We can't run 31537267ec00SKirill A. Shutemov * pte_offset_map() on pmds where a huge pmd might be created 31547267ec00SKirill A. Shutemov * from a different thread. 31557267ec00SKirill A. Shutemov * 31567267ec00SKirill A. Shutemov * pte_alloc_map() is safe to use under down_write(mmap_sem) or when 31577267ec00SKirill A. Shutemov * parallel threads are excluded by other means. 31587267ec00SKirill A. Shutemov * 31597267ec00SKirill A. Shutemov * Here we only have down_read(mmap_sem). 31607267ec00SKirill A. Shutemov */ 316182b0f8c3SJan Kara if (pte_alloc(vma->vm_mm, vmf->pmd, vmf->address)) 31627267ec00SKirill A. Shutemov return VM_FAULT_OOM; 31637267ec00SKirill A. Shutemov 31647267ec00SKirill A. Shutemov /* See the comment in pte_alloc_one_map() */ 316582b0f8c3SJan Kara if (unlikely(pmd_trans_unstable(vmf->pmd))) 31667267ec00SKirill A. Shutemov return 0; 31677267ec00SKirill A. Shutemov 316811ac5524SLinus Torvalds /* Use the zero-page for reads */ 316982b0f8c3SJan Kara if (!(vmf->flags & FAULT_FLAG_WRITE) && 3170bae473a4SKirill A. Shutemov !mm_forbids_zeropage(vma->vm_mm)) { 317182b0f8c3SJan Kara entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address), 317262eede62SHugh Dickins vma->vm_page_prot)); 317382b0f8c3SJan Kara vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, 317482b0f8c3SJan Kara vmf->address, &vmf->ptl); 317582b0f8c3SJan Kara if (!pte_none(*vmf->pte)) 3176a13ea5b7SHugh Dickins goto unlock; 31776b31d595SMichal Hocko ret = check_stable_address_space(vma->vm_mm); 31786b31d595SMichal Hocko if (ret) 31796b31d595SMichal Hocko goto unlock; 31806b251fc9SAndrea Arcangeli /* Deliver the page fault to userland, check inside PT lock */ 31816b251fc9SAndrea Arcangeli if (userfaultfd_missing(vma)) { 318282b0f8c3SJan Kara pte_unmap_unlock(vmf->pte, vmf->ptl); 318382b0f8c3SJan Kara return handle_userfault(vmf, VM_UFFD_MISSING); 31846b251fc9SAndrea Arcangeli } 3185a13ea5b7SHugh Dickins goto setpte; 3186a13ea5b7SHugh Dickins } 3187a13ea5b7SHugh Dickins 31881da177e4SLinus Torvalds /* Allocate our own private page. */ 31891da177e4SLinus Torvalds if (unlikely(anon_vma_prepare(vma))) 319065500d23SHugh Dickins goto oom; 319182b0f8c3SJan Kara page = alloc_zeroed_user_highpage_movable(vma, vmf->address); 31921da177e4SLinus Torvalds if (!page) 319365500d23SHugh Dickins goto oom; 3194eb3c24f3SMel Gorman 31952cf85583STejun Heo if (mem_cgroup_try_charge_delay(page, vma->vm_mm, GFP_KERNEL, &memcg, 31962cf85583STejun Heo false)) 3197eb3c24f3SMel Gorman goto oom_free_page; 3198eb3c24f3SMel Gorman 319952f37629SMinchan Kim /* 320052f37629SMinchan Kim * The memory barrier inside __SetPageUptodate makes sure that 320152f37629SMinchan Kim * preceeding stores to the page contents become visible before 320252f37629SMinchan Kim * the set_pte_at() write. 320352f37629SMinchan Kim */ 32040ed361deSNick Piggin __SetPageUptodate(page); 32051da177e4SLinus Torvalds 320665500d23SHugh Dickins entry = mk_pte(page, vma->vm_page_prot); 32071ac0cb5dSHugh Dickins if (vma->vm_flags & VM_WRITE) 32081ac0cb5dSHugh Dickins entry = pte_mkwrite(pte_mkdirty(entry)); 32098f4e2101SHugh Dickins 321082b0f8c3SJan Kara vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, 321182b0f8c3SJan Kara &vmf->ptl); 321282b0f8c3SJan Kara if (!pte_none(*vmf->pte)) 32138f4e2101SHugh Dickins goto release; 32149ba69294SHugh Dickins 32156b31d595SMichal Hocko ret = check_stable_address_space(vma->vm_mm); 32166b31d595SMichal Hocko if (ret) 32176b31d595SMichal Hocko goto release; 32186b31d595SMichal Hocko 32196b251fc9SAndrea Arcangeli /* Deliver the page fault to userland, check inside PT lock */ 32206b251fc9SAndrea Arcangeli if (userfaultfd_missing(vma)) { 322182b0f8c3SJan Kara pte_unmap_unlock(vmf->pte, vmf->ptl); 3222f627c2f5SKirill A. Shutemov mem_cgroup_cancel_charge(page, memcg, false); 322309cbfeafSKirill A. Shutemov put_page(page); 322482b0f8c3SJan Kara return handle_userfault(vmf, VM_UFFD_MISSING); 32256b251fc9SAndrea Arcangeli } 32266b251fc9SAndrea Arcangeli 3227bae473a4SKirill A. Shutemov inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); 322882b0f8c3SJan Kara page_add_new_anon_rmap(page, vma, vmf->address, false); 3229f627c2f5SKirill A. Shutemov mem_cgroup_commit_charge(page, memcg, false, false); 323000501b53SJohannes Weiner lru_cache_add_active_or_unevictable(page, vma); 3231a13ea5b7SHugh Dickins setpte: 323282b0f8c3SJan Kara set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); 32331da177e4SLinus Torvalds 32341da177e4SLinus Torvalds /* No need to invalidate - it was non-present before */ 323582b0f8c3SJan Kara update_mmu_cache(vma, vmf->address, vmf->pte); 323665500d23SHugh Dickins unlock: 323782b0f8c3SJan Kara pte_unmap_unlock(vmf->pte, vmf->ptl); 32386b31d595SMichal Hocko return ret; 32398f4e2101SHugh Dickins release: 3240f627c2f5SKirill A. Shutemov mem_cgroup_cancel_charge(page, memcg, false); 324109cbfeafSKirill A. Shutemov put_page(page); 32428f4e2101SHugh Dickins goto unlock; 32438a9f3ccdSBalbir Singh oom_free_page: 324409cbfeafSKirill A. Shutemov put_page(page); 324565500d23SHugh Dickins oom: 32461da177e4SLinus Torvalds return VM_FAULT_OOM; 32471da177e4SLinus Torvalds } 32481da177e4SLinus Torvalds 32499a95f3cfSPaul Cassella /* 32509a95f3cfSPaul Cassella * The mmap_sem must have been held on entry, and may have been 32519a95f3cfSPaul Cassella * released depending on flags and vma->vm_ops->fault() return value. 32529a95f3cfSPaul Cassella * See filemap_fault() and __lock_page_retry(). 32539a95f3cfSPaul Cassella */ 3254936ca80dSJan Kara static int __do_fault(struct vm_fault *vmf) 32557eae74afSKirill A. Shutemov { 325682b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 32577eae74afSKirill A. Shutemov int ret; 32587eae74afSKirill A. Shutemov 325911bac800SDave Jiang ret = vma->vm_ops->fault(vmf); 32603917048dSJan Kara if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY | 3261b1aa812bSJan Kara VM_FAULT_DONE_COW))) 32627eae74afSKirill A. Shutemov return ret; 32637eae74afSKirill A. Shutemov 3264667240e0SJan Kara if (unlikely(PageHWPoison(vmf->page))) { 32657eae74afSKirill A. Shutemov if (ret & VM_FAULT_LOCKED) 3266667240e0SJan Kara unlock_page(vmf->page); 3267667240e0SJan Kara put_page(vmf->page); 3268936ca80dSJan Kara vmf->page = NULL; 32697eae74afSKirill A. Shutemov return VM_FAULT_HWPOISON; 32707eae74afSKirill A. Shutemov } 32717eae74afSKirill A. Shutemov 32727eae74afSKirill A. Shutemov if (unlikely(!(ret & VM_FAULT_LOCKED))) 3273667240e0SJan Kara lock_page(vmf->page); 32747eae74afSKirill A. Shutemov else 3275667240e0SJan Kara VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page); 32767eae74afSKirill A. Shutemov 32777eae74afSKirill A. Shutemov return ret; 32787eae74afSKirill A. Shutemov } 32797eae74afSKirill A. Shutemov 3280d0f0931dSRoss Zwisler /* 3281d0f0931dSRoss Zwisler * The ordering of these checks is important for pmds with _PAGE_DEVMAP set. 3282d0f0931dSRoss Zwisler * If we check pmd_trans_unstable() first we will trip the bad_pmd() check 3283d0f0931dSRoss Zwisler * inside of pmd_none_or_trans_huge_or_clear_bad(). This will end up correctly 3284d0f0931dSRoss Zwisler * returning 1 but not before it spams dmesg with the pmd_clear_bad() output. 3285d0f0931dSRoss Zwisler */ 3286d0f0931dSRoss Zwisler static int pmd_devmap_trans_unstable(pmd_t *pmd) 3287d0f0931dSRoss Zwisler { 3288d0f0931dSRoss Zwisler return pmd_devmap(*pmd) || pmd_trans_unstable(pmd); 3289d0f0931dSRoss Zwisler } 3290d0f0931dSRoss Zwisler 329182b0f8c3SJan Kara static int pte_alloc_one_map(struct vm_fault *vmf) 32927267ec00SKirill A. Shutemov { 329382b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 32947267ec00SKirill A. Shutemov 329582b0f8c3SJan Kara if (!pmd_none(*vmf->pmd)) 32967267ec00SKirill A. Shutemov goto map_pte; 329782b0f8c3SJan Kara if (vmf->prealloc_pte) { 329882b0f8c3SJan Kara vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 329982b0f8c3SJan Kara if (unlikely(!pmd_none(*vmf->pmd))) { 330082b0f8c3SJan Kara spin_unlock(vmf->ptl); 33017267ec00SKirill A. Shutemov goto map_pte; 33027267ec00SKirill A. Shutemov } 33037267ec00SKirill A. Shutemov 3304c4812909SKirill A. Shutemov mm_inc_nr_ptes(vma->vm_mm); 330582b0f8c3SJan Kara pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); 330682b0f8c3SJan Kara spin_unlock(vmf->ptl); 33077f2b6ce8STobin C Harding vmf->prealloc_pte = NULL; 330882b0f8c3SJan Kara } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd, vmf->address))) { 33097267ec00SKirill A. Shutemov return VM_FAULT_OOM; 33107267ec00SKirill A. Shutemov } 33117267ec00SKirill A. Shutemov map_pte: 33127267ec00SKirill A. Shutemov /* 33137267ec00SKirill A. Shutemov * If a huge pmd materialized under us just retry later. Use 3314d0f0931dSRoss Zwisler * pmd_trans_unstable() via pmd_devmap_trans_unstable() instead of 3315d0f0931dSRoss Zwisler * pmd_trans_huge() to ensure the pmd didn't become pmd_trans_huge 3316d0f0931dSRoss Zwisler * under us and then back to pmd_none, as a result of MADV_DONTNEED 3317d0f0931dSRoss Zwisler * running immediately after a huge pmd fault in a different thread of 3318d0f0931dSRoss Zwisler * this mm, in turn leading to a misleading pmd_trans_huge() retval. 3319d0f0931dSRoss Zwisler * All we have to ensure is that it is a regular pmd that we can walk 3320d0f0931dSRoss Zwisler * with pte_offset_map() and we can do that through an atomic read in 3321d0f0931dSRoss Zwisler * C, which is what pmd_trans_unstable() provides. 33227267ec00SKirill A. Shutemov */ 3323d0f0931dSRoss Zwisler if (pmd_devmap_trans_unstable(vmf->pmd)) 33247267ec00SKirill A. Shutemov return VM_FAULT_NOPAGE; 33257267ec00SKirill A. Shutemov 3326d0f0931dSRoss Zwisler /* 3327d0f0931dSRoss Zwisler * At this point we know that our vmf->pmd points to a page of ptes 3328d0f0931dSRoss Zwisler * and it cannot become pmd_none(), pmd_devmap() or pmd_trans_huge() 3329d0f0931dSRoss Zwisler * for the duration of the fault. If a racing MADV_DONTNEED runs and 3330d0f0931dSRoss Zwisler * we zap the ptes pointed to by our vmf->pmd, the vmf->ptl will still 3331d0f0931dSRoss Zwisler * be valid and we will re-check to make sure the vmf->pte isn't 3332d0f0931dSRoss Zwisler * pte_none() under vmf->ptl protection when we return to 3333d0f0931dSRoss Zwisler * alloc_set_pte(). 3334d0f0931dSRoss Zwisler */ 333582b0f8c3SJan Kara vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, 333682b0f8c3SJan Kara &vmf->ptl); 33377267ec00SKirill A. Shutemov return 0; 33387267ec00SKirill A. Shutemov } 33397267ec00SKirill A. Shutemov 3340e496cf3dSKirill A. Shutemov #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 334110102459SKirill A. Shutemov 334210102459SKirill A. Shutemov #define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1) 334310102459SKirill A. Shutemov static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, 334410102459SKirill A. Shutemov unsigned long haddr) 334510102459SKirill A. Shutemov { 334610102459SKirill A. Shutemov if (((vma->vm_start >> PAGE_SHIFT) & HPAGE_CACHE_INDEX_MASK) != 334710102459SKirill A. Shutemov (vma->vm_pgoff & HPAGE_CACHE_INDEX_MASK)) 334810102459SKirill A. Shutemov return false; 334910102459SKirill A. Shutemov if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) 335010102459SKirill A. Shutemov return false; 335110102459SKirill A. Shutemov return true; 335210102459SKirill A. Shutemov } 335310102459SKirill A. Shutemov 335482b0f8c3SJan Kara static void deposit_prealloc_pte(struct vm_fault *vmf) 3355953c66c2SAneesh Kumar K.V { 335682b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 3357953c66c2SAneesh Kumar K.V 335882b0f8c3SJan Kara pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); 3359953c66c2SAneesh Kumar K.V /* 3360953c66c2SAneesh Kumar K.V * We are going to consume the prealloc table, 3361953c66c2SAneesh Kumar K.V * count that as nr_ptes. 3362953c66c2SAneesh Kumar K.V */ 3363c4812909SKirill A. Shutemov mm_inc_nr_ptes(vma->vm_mm); 33647f2b6ce8STobin C Harding vmf->prealloc_pte = NULL; 3365953c66c2SAneesh Kumar K.V } 3366953c66c2SAneesh Kumar K.V 336782b0f8c3SJan Kara static int do_set_pmd(struct vm_fault *vmf, struct page *page) 336810102459SKirill A. Shutemov { 336982b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 337082b0f8c3SJan Kara bool write = vmf->flags & FAULT_FLAG_WRITE; 337182b0f8c3SJan Kara unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 337210102459SKirill A. Shutemov pmd_t entry; 337310102459SKirill A. Shutemov int i, ret; 337410102459SKirill A. Shutemov 337510102459SKirill A. Shutemov if (!transhuge_vma_suitable(vma, haddr)) 337610102459SKirill A. Shutemov return VM_FAULT_FALLBACK; 337710102459SKirill A. Shutemov 337810102459SKirill A. Shutemov ret = VM_FAULT_FALLBACK; 337910102459SKirill A. Shutemov page = compound_head(page); 338010102459SKirill A. Shutemov 3381953c66c2SAneesh Kumar K.V /* 3382953c66c2SAneesh Kumar K.V * Archs like ppc64 need additonal space to store information 3383953c66c2SAneesh Kumar K.V * related to pte entry. Use the preallocated table for that. 3384953c66c2SAneesh Kumar K.V */ 338582b0f8c3SJan Kara if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) { 338682b0f8c3SJan Kara vmf->prealloc_pte = pte_alloc_one(vma->vm_mm, vmf->address); 338782b0f8c3SJan Kara if (!vmf->prealloc_pte) 3388953c66c2SAneesh Kumar K.V return VM_FAULT_OOM; 3389953c66c2SAneesh Kumar K.V smp_wmb(); /* See comment in __pte_alloc() */ 3390953c66c2SAneesh Kumar K.V } 3391953c66c2SAneesh Kumar K.V 339282b0f8c3SJan Kara vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 339382b0f8c3SJan Kara if (unlikely(!pmd_none(*vmf->pmd))) 339410102459SKirill A. Shutemov goto out; 339510102459SKirill A. Shutemov 339610102459SKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) 339710102459SKirill A. Shutemov flush_icache_page(vma, page + i); 339810102459SKirill A. Shutemov 339910102459SKirill A. Shutemov entry = mk_huge_pmd(page, vma->vm_page_prot); 340010102459SKirill A. Shutemov if (write) 3401f55e1014SLinus Torvalds entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 340210102459SKirill A. Shutemov 340310102459SKirill A. Shutemov add_mm_counter(vma->vm_mm, MM_FILEPAGES, HPAGE_PMD_NR); 340410102459SKirill A. Shutemov page_add_file_rmap(page, true); 3405953c66c2SAneesh Kumar K.V /* 3406953c66c2SAneesh Kumar K.V * deposit and withdraw with pmd lock held 3407953c66c2SAneesh Kumar K.V */ 3408953c66c2SAneesh Kumar K.V if (arch_needs_pgtable_deposit()) 340982b0f8c3SJan Kara deposit_prealloc_pte(vmf); 341010102459SKirill A. Shutemov 341182b0f8c3SJan Kara set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); 341210102459SKirill A. Shutemov 341382b0f8c3SJan Kara update_mmu_cache_pmd(vma, haddr, vmf->pmd); 341410102459SKirill A. Shutemov 341510102459SKirill A. Shutemov /* fault is handled */ 341610102459SKirill A. Shutemov ret = 0; 341795ecedcdSKirill A. Shutemov count_vm_event(THP_FILE_MAPPED); 341810102459SKirill A. Shutemov out: 341982b0f8c3SJan Kara spin_unlock(vmf->ptl); 342010102459SKirill A. Shutemov return ret; 342110102459SKirill A. Shutemov } 342210102459SKirill A. Shutemov #else 342382b0f8c3SJan Kara static int do_set_pmd(struct vm_fault *vmf, struct page *page) 342410102459SKirill A. Shutemov { 342510102459SKirill A. Shutemov BUILD_BUG(); 342610102459SKirill A. Shutemov return 0; 342710102459SKirill A. Shutemov } 342810102459SKirill A. Shutemov #endif 342910102459SKirill A. Shutemov 34308c6e50b0SKirill A. Shutemov /** 34317267ec00SKirill A. Shutemov * alloc_set_pte - setup new PTE entry for given page and add reverse page 34327267ec00SKirill A. Shutemov * mapping. If needed, the fucntion allocates page table or use pre-allocated. 34338c6e50b0SKirill A. Shutemov * 343482b0f8c3SJan Kara * @vmf: fault environment 34357267ec00SKirill A. Shutemov * @memcg: memcg to charge page (only for private mappings) 34368c6e50b0SKirill A. Shutemov * @page: page to map 34378c6e50b0SKirill A. Shutemov * 343882b0f8c3SJan Kara * Caller must take care of unlocking vmf->ptl, if vmf->pte is non-NULL on 343982b0f8c3SJan Kara * return. 34408c6e50b0SKirill A. Shutemov * 34418c6e50b0SKirill A. Shutemov * Target users are page handler itself and implementations of 34428c6e50b0SKirill A. Shutemov * vm_ops->map_pages. 34438c6e50b0SKirill A. Shutemov */ 344482b0f8c3SJan Kara int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg, 34457267ec00SKirill A. Shutemov struct page *page) 34463bb97794SKirill A. Shutemov { 344782b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 344882b0f8c3SJan Kara bool write = vmf->flags & FAULT_FLAG_WRITE; 34493bb97794SKirill A. Shutemov pte_t entry; 345010102459SKirill A. Shutemov int ret; 345110102459SKirill A. Shutemov 345282b0f8c3SJan Kara if (pmd_none(*vmf->pmd) && PageTransCompound(page) && 3453e496cf3dSKirill A. Shutemov IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) { 345410102459SKirill A. Shutemov /* THP on COW? */ 345510102459SKirill A. Shutemov VM_BUG_ON_PAGE(memcg, page); 345610102459SKirill A. Shutemov 345782b0f8c3SJan Kara ret = do_set_pmd(vmf, page); 345810102459SKirill A. Shutemov if (ret != VM_FAULT_FALLBACK) 3459b0b9b3dfSHugh Dickins return ret; 346010102459SKirill A. Shutemov } 34613bb97794SKirill A. Shutemov 346282b0f8c3SJan Kara if (!vmf->pte) { 346382b0f8c3SJan Kara ret = pte_alloc_one_map(vmf); 34647267ec00SKirill A. Shutemov if (ret) 3465b0b9b3dfSHugh Dickins return ret; 34667267ec00SKirill A. Shutemov } 34677267ec00SKirill A. Shutemov 34687267ec00SKirill A. Shutemov /* Re-check under ptl */ 3469b0b9b3dfSHugh Dickins if (unlikely(!pte_none(*vmf->pte))) 3470b0b9b3dfSHugh Dickins return VM_FAULT_NOPAGE; 34717267ec00SKirill A. Shutemov 34723bb97794SKirill A. Shutemov flush_icache_page(vma, page); 34733bb97794SKirill A. Shutemov entry = mk_pte(page, vma->vm_page_prot); 34743bb97794SKirill A. Shutemov if (write) 34753bb97794SKirill A. Shutemov entry = maybe_mkwrite(pte_mkdirty(entry), vma); 3476bae473a4SKirill A. Shutemov /* copy-on-write page */ 3477bae473a4SKirill A. Shutemov if (write && !(vma->vm_flags & VM_SHARED)) { 34783bb97794SKirill A. Shutemov inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); 347982b0f8c3SJan Kara page_add_new_anon_rmap(page, vma, vmf->address, false); 34807267ec00SKirill A. Shutemov mem_cgroup_commit_charge(page, memcg, false, false); 34817267ec00SKirill A. Shutemov lru_cache_add_active_or_unevictable(page, vma); 34823bb97794SKirill A. Shutemov } else { 3483eca56ff9SJerome Marchand inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page)); 3484dd78feddSKirill A. Shutemov page_add_file_rmap(page, false); 34853bb97794SKirill A. Shutemov } 348682b0f8c3SJan Kara set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); 34873bb97794SKirill A. Shutemov 34883bb97794SKirill A. Shutemov /* no need to invalidate: a not-present page won't be cached */ 348982b0f8c3SJan Kara update_mmu_cache(vma, vmf->address, vmf->pte); 34907267ec00SKirill A. Shutemov 3491b0b9b3dfSHugh Dickins return 0; 34923bb97794SKirill A. Shutemov } 34933bb97794SKirill A. Shutemov 34949118c0cbSJan Kara 34959118c0cbSJan Kara /** 34969118c0cbSJan Kara * finish_fault - finish page fault once we have prepared the page to fault 34979118c0cbSJan Kara * 34989118c0cbSJan Kara * @vmf: structure describing the fault 34999118c0cbSJan Kara * 35009118c0cbSJan Kara * This function handles all that is needed to finish a page fault once the 35019118c0cbSJan Kara * page to fault in is prepared. It handles locking of PTEs, inserts PTE for 35029118c0cbSJan Kara * given page, adds reverse page mapping, handles memcg charges and LRU 35039118c0cbSJan Kara * addition. The function returns 0 on success, VM_FAULT_ code in case of 35049118c0cbSJan Kara * error. 35059118c0cbSJan Kara * 35069118c0cbSJan Kara * The function expects the page to be locked and on success it consumes a 35079118c0cbSJan Kara * reference of a page being mapped (for the PTE which maps it). 35089118c0cbSJan Kara */ 35099118c0cbSJan Kara int finish_fault(struct vm_fault *vmf) 35109118c0cbSJan Kara { 35119118c0cbSJan Kara struct page *page; 35126b31d595SMichal Hocko int ret = 0; 35139118c0cbSJan Kara 35149118c0cbSJan Kara /* Did we COW the page? */ 35159118c0cbSJan Kara if ((vmf->flags & FAULT_FLAG_WRITE) && 35169118c0cbSJan Kara !(vmf->vma->vm_flags & VM_SHARED)) 35179118c0cbSJan Kara page = vmf->cow_page; 35189118c0cbSJan Kara else 35199118c0cbSJan Kara page = vmf->page; 35206b31d595SMichal Hocko 35216b31d595SMichal Hocko /* 35226b31d595SMichal Hocko * check even for read faults because we might have lost our CoWed 35236b31d595SMichal Hocko * page 35246b31d595SMichal Hocko */ 35256b31d595SMichal Hocko if (!(vmf->vma->vm_flags & VM_SHARED)) 35266b31d595SMichal Hocko ret = check_stable_address_space(vmf->vma->vm_mm); 35276b31d595SMichal Hocko if (!ret) 35289118c0cbSJan Kara ret = alloc_set_pte(vmf, vmf->memcg, page); 35299118c0cbSJan Kara if (vmf->pte) 35309118c0cbSJan Kara pte_unmap_unlock(vmf->pte, vmf->ptl); 35319118c0cbSJan Kara return ret; 35329118c0cbSJan Kara } 35339118c0cbSJan Kara 35343a91053aSKirill A. Shutemov static unsigned long fault_around_bytes __read_mostly = 35353a91053aSKirill A. Shutemov rounddown_pow_of_two(65536); 3536a9b0f861SKirill A. Shutemov 35371592eef0SKirill A. Shutemov #ifdef CONFIG_DEBUG_FS 3538a9b0f861SKirill A. Shutemov static int fault_around_bytes_get(void *data, u64 *val) 35391592eef0SKirill A. Shutemov { 3540a9b0f861SKirill A. Shutemov *val = fault_around_bytes; 35411592eef0SKirill A. Shutemov return 0; 35421592eef0SKirill A. Shutemov } 35431592eef0SKirill A. Shutemov 3544b4903d6eSAndrey Ryabinin /* 3545da391d64SWilliam Kucharski * fault_around_bytes must be rounded down to the nearest page order as it's 3546da391d64SWilliam Kucharski * what do_fault_around() expects to see. 3547b4903d6eSAndrey Ryabinin */ 3548a9b0f861SKirill A. Shutemov static int fault_around_bytes_set(void *data, u64 val) 35491592eef0SKirill A. Shutemov { 3550a9b0f861SKirill A. Shutemov if (val / PAGE_SIZE > PTRS_PER_PTE) 35511592eef0SKirill A. Shutemov return -EINVAL; 3552b4903d6eSAndrey Ryabinin if (val > PAGE_SIZE) 3553b4903d6eSAndrey Ryabinin fault_around_bytes = rounddown_pow_of_two(val); 3554b4903d6eSAndrey Ryabinin else 3555b4903d6eSAndrey Ryabinin fault_around_bytes = PAGE_SIZE; /* rounddown_pow_of_two(0) is undefined */ 35561592eef0SKirill A. Shutemov return 0; 35571592eef0SKirill A. Shutemov } 35580a1345f8SYevgen Pronenko DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops, 3559a9b0f861SKirill A. Shutemov fault_around_bytes_get, fault_around_bytes_set, "%llu\n"); 35601592eef0SKirill A. Shutemov 35611592eef0SKirill A. Shutemov static int __init fault_around_debugfs(void) 35621592eef0SKirill A. Shutemov { 35631592eef0SKirill A. Shutemov void *ret; 35641592eef0SKirill A. Shutemov 35650a1345f8SYevgen Pronenko ret = debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL, 3566a9b0f861SKirill A. Shutemov &fault_around_bytes_fops); 35671592eef0SKirill A. Shutemov if (!ret) 3568a9b0f861SKirill A. Shutemov pr_warn("Failed to create fault_around_bytes in debugfs"); 35691592eef0SKirill A. Shutemov return 0; 35701592eef0SKirill A. Shutemov } 35711592eef0SKirill A. Shutemov late_initcall(fault_around_debugfs); 35721592eef0SKirill A. Shutemov #endif 35738c6e50b0SKirill A. Shutemov 35741fdb412bSKirill A. Shutemov /* 35751fdb412bSKirill A. Shutemov * do_fault_around() tries to map few pages around the fault address. The hope 35761fdb412bSKirill A. Shutemov * is that the pages will be needed soon and this will lower the number of 35771fdb412bSKirill A. Shutemov * faults to handle. 35781fdb412bSKirill A. Shutemov * 35791fdb412bSKirill A. Shutemov * It uses vm_ops->map_pages() to map the pages, which skips the page if it's 35801fdb412bSKirill A. Shutemov * not ready to be mapped: not up-to-date, locked, etc. 35811fdb412bSKirill A. Shutemov * 35821fdb412bSKirill A. Shutemov * This function is called with the page table lock taken. In the split ptlock 35831fdb412bSKirill A. Shutemov * case the page table lock only protects only those entries which belong to 35841fdb412bSKirill A. Shutemov * the page table corresponding to the fault address. 35851fdb412bSKirill A. Shutemov * 35861fdb412bSKirill A. Shutemov * This function doesn't cross the VMA boundaries, in order to call map_pages() 35871fdb412bSKirill A. Shutemov * only once. 35881fdb412bSKirill A. Shutemov * 3589da391d64SWilliam Kucharski * fault_around_bytes defines how many bytes we'll try to map. 3590da391d64SWilliam Kucharski * do_fault_around() expects it to be set to a power of two less than or equal 3591da391d64SWilliam Kucharski * to PTRS_PER_PTE. 35921fdb412bSKirill A. Shutemov * 3593da391d64SWilliam Kucharski * The virtual address of the area that we map is naturally aligned to 3594da391d64SWilliam Kucharski * fault_around_bytes rounded down to the machine page size 3595da391d64SWilliam Kucharski * (and therefore to page order). This way it's easier to guarantee 3596da391d64SWilliam Kucharski * that we don't cross page table boundaries. 35971fdb412bSKirill A. Shutemov */ 35980721ec8bSJan Kara static int do_fault_around(struct vm_fault *vmf) 35998c6e50b0SKirill A. Shutemov { 360082b0f8c3SJan Kara unsigned long address = vmf->address, nr_pages, mask; 36010721ec8bSJan Kara pgoff_t start_pgoff = vmf->pgoff; 3602bae473a4SKirill A. Shutemov pgoff_t end_pgoff; 36037267ec00SKirill A. Shutemov int off, ret = 0; 36048c6e50b0SKirill A. Shutemov 36054db0c3c2SJason Low nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT; 3606aecd6f44SKirill A. Shutemov mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK; 3607aecd6f44SKirill A. Shutemov 360882b0f8c3SJan Kara vmf->address = max(address & mask, vmf->vma->vm_start); 360982b0f8c3SJan Kara off = ((address - vmf->address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); 3610bae473a4SKirill A. Shutemov start_pgoff -= off; 36118c6e50b0SKirill A. Shutemov 36128c6e50b0SKirill A. Shutemov /* 3613da391d64SWilliam Kucharski * end_pgoff is either the end of the page table, the end of 3614da391d64SWilliam Kucharski * the vma or nr_pages from start_pgoff, depending what is nearest. 36158c6e50b0SKirill A. Shutemov */ 3616bae473a4SKirill A. Shutemov end_pgoff = start_pgoff - 361782b0f8c3SJan Kara ((vmf->address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) + 36188c6e50b0SKirill A. Shutemov PTRS_PER_PTE - 1; 361982b0f8c3SJan Kara end_pgoff = min3(end_pgoff, vma_pages(vmf->vma) + vmf->vma->vm_pgoff - 1, 3620bae473a4SKirill A. Shutemov start_pgoff + nr_pages - 1); 36218c6e50b0SKirill A. Shutemov 362282b0f8c3SJan Kara if (pmd_none(*vmf->pmd)) { 362382b0f8c3SJan Kara vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm, 362482b0f8c3SJan Kara vmf->address); 362582b0f8c3SJan Kara if (!vmf->prealloc_pte) 3626c5f88bd2SVegard Nossum goto out; 36277267ec00SKirill A. Shutemov smp_wmb(); /* See comment in __pte_alloc() */ 36288c6e50b0SKirill A. Shutemov } 36298c6e50b0SKirill A. Shutemov 363082b0f8c3SJan Kara vmf->vma->vm_ops->map_pages(vmf, start_pgoff, end_pgoff); 36317267ec00SKirill A. Shutemov 36327267ec00SKirill A. Shutemov /* Huge page is mapped? Page fault is solved */ 363382b0f8c3SJan Kara if (pmd_trans_huge(*vmf->pmd)) { 36347267ec00SKirill A. Shutemov ret = VM_FAULT_NOPAGE; 36357267ec00SKirill A. Shutemov goto out; 36368c6e50b0SKirill A. Shutemov } 36378c6e50b0SKirill A. Shutemov 36387267ec00SKirill A. Shutemov /* ->map_pages() haven't done anything useful. Cold page cache? */ 363982b0f8c3SJan Kara if (!vmf->pte) 36407267ec00SKirill A. Shutemov goto out; 36417267ec00SKirill A. Shutemov 36427267ec00SKirill A. Shutemov /* check if the page fault is solved */ 364382b0f8c3SJan Kara vmf->pte -= (vmf->address >> PAGE_SHIFT) - (address >> PAGE_SHIFT); 364482b0f8c3SJan Kara if (!pte_none(*vmf->pte)) 36457267ec00SKirill A. Shutemov ret = VM_FAULT_NOPAGE; 364682b0f8c3SJan Kara pte_unmap_unlock(vmf->pte, vmf->ptl); 36477267ec00SKirill A. Shutemov out: 364882b0f8c3SJan Kara vmf->address = address; 364982b0f8c3SJan Kara vmf->pte = NULL; 36507267ec00SKirill A. Shutemov return ret; 36517267ec00SKirill A. Shutemov } 36527267ec00SKirill A. Shutemov 36530721ec8bSJan Kara static int do_read_fault(struct vm_fault *vmf) 3654e655fb29SKirill A. Shutemov { 365582b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 36568c6e50b0SKirill A. Shutemov int ret = 0; 36578c6e50b0SKirill A. Shutemov 36588c6e50b0SKirill A. Shutemov /* 36598c6e50b0SKirill A. Shutemov * Let's call ->map_pages() first and use ->fault() as fallback 36608c6e50b0SKirill A. Shutemov * if page by the offset is not ready to be mapped (cold cache or 36618c6e50b0SKirill A. Shutemov * something). 36628c6e50b0SKirill A. Shutemov */ 36639b4bdd2fSKirill A. Shutemov if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) { 36640721ec8bSJan Kara ret = do_fault_around(vmf); 36657267ec00SKirill A. Shutemov if (ret) 36667267ec00SKirill A. Shutemov return ret; 36678c6e50b0SKirill A. Shutemov } 3668e655fb29SKirill A. Shutemov 3669936ca80dSJan Kara ret = __do_fault(vmf); 3670e655fb29SKirill A. Shutemov if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 3671e655fb29SKirill A. Shutemov return ret; 3672e655fb29SKirill A. Shutemov 36739118c0cbSJan Kara ret |= finish_fault(vmf); 3674936ca80dSJan Kara unlock_page(vmf->page); 36757267ec00SKirill A. Shutemov if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 3676936ca80dSJan Kara put_page(vmf->page); 3677e655fb29SKirill A. Shutemov return ret; 3678e655fb29SKirill A. Shutemov } 3679e655fb29SKirill A. Shutemov 36800721ec8bSJan Kara static int do_cow_fault(struct vm_fault *vmf) 3681ec47c3b9SKirill A. Shutemov { 368282b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 3683ec47c3b9SKirill A. Shutemov int ret; 3684ec47c3b9SKirill A. Shutemov 3685ec47c3b9SKirill A. Shutemov if (unlikely(anon_vma_prepare(vma))) 3686ec47c3b9SKirill A. Shutemov return VM_FAULT_OOM; 3687ec47c3b9SKirill A. Shutemov 3688936ca80dSJan Kara vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address); 3689936ca80dSJan Kara if (!vmf->cow_page) 3690ec47c3b9SKirill A. Shutemov return VM_FAULT_OOM; 3691ec47c3b9SKirill A. Shutemov 36922cf85583STejun Heo if (mem_cgroup_try_charge_delay(vmf->cow_page, vma->vm_mm, GFP_KERNEL, 36933917048dSJan Kara &vmf->memcg, false)) { 3694936ca80dSJan Kara put_page(vmf->cow_page); 3695ec47c3b9SKirill A. Shutemov return VM_FAULT_OOM; 3696ec47c3b9SKirill A. Shutemov } 3697ec47c3b9SKirill A. Shutemov 3698936ca80dSJan Kara ret = __do_fault(vmf); 3699ec47c3b9SKirill A. Shutemov if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 3700ec47c3b9SKirill A. Shutemov goto uncharge_out; 37013917048dSJan Kara if (ret & VM_FAULT_DONE_COW) 37023917048dSJan Kara return ret; 3703ec47c3b9SKirill A. Shutemov 3704936ca80dSJan Kara copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma); 3705936ca80dSJan Kara __SetPageUptodate(vmf->cow_page); 3706ec47c3b9SKirill A. Shutemov 37079118c0cbSJan Kara ret |= finish_fault(vmf); 3708936ca80dSJan Kara unlock_page(vmf->page); 3709936ca80dSJan Kara put_page(vmf->page); 37107267ec00SKirill A. Shutemov if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 37117267ec00SKirill A. Shutemov goto uncharge_out; 3712ec47c3b9SKirill A. Shutemov return ret; 3713ec47c3b9SKirill A. Shutemov uncharge_out: 37143917048dSJan Kara mem_cgroup_cancel_charge(vmf->cow_page, vmf->memcg, false); 3715936ca80dSJan Kara put_page(vmf->cow_page); 3716ec47c3b9SKirill A. Shutemov return ret; 3717ec47c3b9SKirill A. Shutemov } 3718ec47c3b9SKirill A. Shutemov 37190721ec8bSJan Kara static int do_shared_fault(struct vm_fault *vmf) 37201da177e4SLinus Torvalds { 372182b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 3722f0c6d4d2SKirill A. Shutemov int ret, tmp; 37231d65f86dSKAMEZAWA Hiroyuki 3724936ca80dSJan Kara ret = __do_fault(vmf); 37257eae74afSKirill A. Shutemov if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 3726f0c6d4d2SKirill A. Shutemov return ret; 37271da177e4SLinus Torvalds 37281da177e4SLinus Torvalds /* 3729f0c6d4d2SKirill A. Shutemov * Check if the backing address space wants to know that the page is 3730f0c6d4d2SKirill A. Shutemov * about to become writable 37311da177e4SLinus Torvalds */ 3732fb09a464SKirill A. Shutemov if (vma->vm_ops->page_mkwrite) { 3733936ca80dSJan Kara unlock_page(vmf->page); 373438b8cb7fSJan Kara tmp = do_page_mkwrite(vmf); 3735fb09a464SKirill A. Shutemov if (unlikely(!tmp || 3736fb09a464SKirill A. Shutemov (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { 3737936ca80dSJan Kara put_page(vmf->page); 3738f0c6d4d2SKirill A. Shutemov return tmp; 373969676147SMark Fasheh } 3740d0217ac0SNick Piggin } 3741fb09a464SKirill A. Shutemov 37429118c0cbSJan Kara ret |= finish_fault(vmf); 37437267ec00SKirill A. Shutemov if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | 37447267ec00SKirill A. Shutemov VM_FAULT_RETRY))) { 3745936ca80dSJan Kara unlock_page(vmf->page); 3746936ca80dSJan Kara put_page(vmf->page); 3747f0c6d4d2SKirill A. Shutemov return ret; 37489637a5efSDavid Howells } 3749d00806b1SNick Piggin 375097ba0c2bSJan Kara fault_dirty_shared_page(vma, vmf->page); 3751b827e496SNick Piggin return ret; 375254cb8821SNick Piggin } 3753d00806b1SNick Piggin 37549a95f3cfSPaul Cassella /* 37559a95f3cfSPaul Cassella * We enter with non-exclusive mmap_sem (to exclude vma changes, 37569a95f3cfSPaul Cassella * but allow concurrent faults). 37579a95f3cfSPaul Cassella * The mmap_sem may have been released depending on flags and our 37589a95f3cfSPaul Cassella * return value. See filemap_fault() and __lock_page_or_retry(). 37599a95f3cfSPaul Cassella */ 376082b0f8c3SJan Kara static int do_fault(struct vm_fault *vmf) 376154cb8821SNick Piggin { 376282b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 3763b0b9b3dfSHugh Dickins int ret; 376454cb8821SNick Piggin 37656b7339f4SKirill A. Shutemov /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */ 37666b7339f4SKirill A. Shutemov if (!vma->vm_ops->fault) 3767b0b9b3dfSHugh Dickins ret = VM_FAULT_SIGBUS; 3768b0b9b3dfSHugh Dickins else if (!(vmf->flags & FAULT_FLAG_WRITE)) 3769b0b9b3dfSHugh Dickins ret = do_read_fault(vmf); 3770b0b9b3dfSHugh Dickins else if (!(vma->vm_flags & VM_SHARED)) 3771b0b9b3dfSHugh Dickins ret = do_cow_fault(vmf); 3772b0b9b3dfSHugh Dickins else 3773b0b9b3dfSHugh Dickins ret = do_shared_fault(vmf); 3774b0b9b3dfSHugh Dickins 3775b0b9b3dfSHugh Dickins /* preallocated pagetable is unused: free it */ 3776b0b9b3dfSHugh Dickins if (vmf->prealloc_pte) { 3777b0b9b3dfSHugh Dickins pte_free(vma->vm_mm, vmf->prealloc_pte); 37787f2b6ce8STobin C Harding vmf->prealloc_pte = NULL; 3779b0b9b3dfSHugh Dickins } 3780b0b9b3dfSHugh Dickins return ret; 378154cb8821SNick Piggin } 378254cb8821SNick Piggin 3783b19a9939SRashika Kheria static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, 378404bb2f94SRik van Riel unsigned long addr, int page_nid, 378504bb2f94SRik van Riel int *flags) 37869532fec1SMel Gorman { 37879532fec1SMel Gorman get_page(page); 37889532fec1SMel Gorman 37899532fec1SMel Gorman count_vm_numa_event(NUMA_HINT_FAULTS); 379004bb2f94SRik van Riel if (page_nid == numa_node_id()) { 37919532fec1SMel Gorman count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); 379204bb2f94SRik van Riel *flags |= TNF_FAULT_LOCAL; 379304bb2f94SRik van Riel } 37949532fec1SMel Gorman 37959532fec1SMel Gorman return mpol_misplaced(page, vma, addr); 37969532fec1SMel Gorman } 37979532fec1SMel Gorman 37982994302bSJan Kara static int do_numa_page(struct vm_fault *vmf) 3799d10e63f2SMel Gorman { 380082b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 38014daae3b4SMel Gorman struct page *page = NULL; 38028191acbdSMel Gorman int page_nid = -1; 380390572890SPeter Zijlstra int last_cpupid; 3804cbee9f88SPeter Zijlstra int target_nid; 3805b8593bfdSMel Gorman bool migrated = false; 3806cee216a6SAneesh Kumar K.V pte_t pte; 3807288bc549SAneesh Kumar K.V bool was_writable = pte_savedwrite(vmf->orig_pte); 38086688cc05SPeter Zijlstra int flags = 0; 3809d10e63f2SMel Gorman 3810d10e63f2SMel Gorman /* 3811d10e63f2SMel Gorman * The "pte" at this point cannot be used safely without 3812d10e63f2SMel Gorman * validation through pte_unmap_same(). It's of NUMA type but 3813d10e63f2SMel Gorman * the pfn may be screwed if the read is non atomic. 3814d10e63f2SMel Gorman */ 381582b0f8c3SJan Kara vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd); 381682b0f8c3SJan Kara spin_lock(vmf->ptl); 3817cee216a6SAneesh Kumar K.V if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) { 381882b0f8c3SJan Kara pte_unmap_unlock(vmf->pte, vmf->ptl); 38194daae3b4SMel Gorman goto out; 38204daae3b4SMel Gorman } 38214daae3b4SMel Gorman 3822cee216a6SAneesh Kumar K.V /* 3823cee216a6SAneesh Kumar K.V * Make it present again, Depending on how arch implementes non 3824cee216a6SAneesh Kumar K.V * accessible ptes, some can allow access by kernel mode. 3825cee216a6SAneesh Kumar K.V */ 3826cee216a6SAneesh Kumar K.V pte = ptep_modify_prot_start(vma->vm_mm, vmf->address, vmf->pte); 38274d942466SMel Gorman pte = pte_modify(pte, vma->vm_page_prot); 38284d942466SMel Gorman pte = pte_mkyoung(pte); 3829b191f9b1SMel Gorman if (was_writable) 3830b191f9b1SMel Gorman pte = pte_mkwrite(pte); 3831cee216a6SAneesh Kumar K.V ptep_modify_prot_commit(vma->vm_mm, vmf->address, vmf->pte, pte); 383282b0f8c3SJan Kara update_mmu_cache(vma, vmf->address, vmf->pte); 3833d10e63f2SMel Gorman 383482b0f8c3SJan Kara page = vm_normal_page(vma, vmf->address, pte); 3835d10e63f2SMel Gorman if (!page) { 383682b0f8c3SJan Kara pte_unmap_unlock(vmf->pte, vmf->ptl); 3837d10e63f2SMel Gorman return 0; 3838d10e63f2SMel Gorman } 3839d10e63f2SMel Gorman 3840e81c4802SKirill A. Shutemov /* TODO: handle PTE-mapped THP */ 3841e81c4802SKirill A. Shutemov if (PageCompound(page)) { 384282b0f8c3SJan Kara pte_unmap_unlock(vmf->pte, vmf->ptl); 3843e81c4802SKirill A. Shutemov return 0; 3844e81c4802SKirill A. Shutemov } 3845e81c4802SKirill A. Shutemov 38466688cc05SPeter Zijlstra /* 3847bea66fbdSMel Gorman * Avoid grouping on RO pages in general. RO pages shouldn't hurt as 3848bea66fbdSMel Gorman * much anyway since they can be in shared cache state. This misses 3849bea66fbdSMel Gorman * the case where a mapping is writable but the process never writes 3850bea66fbdSMel Gorman * to it but pte_write gets cleared during protection updates and 3851bea66fbdSMel Gorman * pte_dirty has unpredictable behaviour between PTE scan updates, 3852bea66fbdSMel Gorman * background writeback, dirty balancing and application behaviour. 38536688cc05SPeter Zijlstra */ 3854d59dc7bcSRik van Riel if (!pte_write(pte)) 38556688cc05SPeter Zijlstra flags |= TNF_NO_GROUP; 38566688cc05SPeter Zijlstra 3857dabe1d99SRik van Riel /* 3858dabe1d99SRik van Riel * Flag if the page is shared between multiple address spaces. This 3859dabe1d99SRik van Riel * is later used when determining whether to group tasks together 3860dabe1d99SRik van Riel */ 3861dabe1d99SRik van Riel if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED)) 3862dabe1d99SRik van Riel flags |= TNF_SHARED; 3863dabe1d99SRik van Riel 386490572890SPeter Zijlstra last_cpupid = page_cpupid_last(page); 38658191acbdSMel Gorman page_nid = page_to_nid(page); 386682b0f8c3SJan Kara target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid, 3867bae473a4SKirill A. Shutemov &flags); 386882b0f8c3SJan Kara pte_unmap_unlock(vmf->pte, vmf->ptl); 38694daae3b4SMel Gorman if (target_nid == -1) { 38704daae3b4SMel Gorman put_page(page); 38714daae3b4SMel Gorman goto out; 38724daae3b4SMel Gorman } 38734daae3b4SMel Gorman 38744daae3b4SMel Gorman /* Migrate to the requested node */ 38751bc115d8SMel Gorman migrated = migrate_misplaced_page(page, vma, target_nid); 38766688cc05SPeter Zijlstra if (migrated) { 38778191acbdSMel Gorman page_nid = target_nid; 38786688cc05SPeter Zijlstra flags |= TNF_MIGRATED; 3879074c2381SMel Gorman } else 3880074c2381SMel Gorman flags |= TNF_MIGRATE_FAIL; 38814daae3b4SMel Gorman 38824daae3b4SMel Gorman out: 38838191acbdSMel Gorman if (page_nid != -1) 38846688cc05SPeter Zijlstra task_numa_fault(last_cpupid, page_nid, 1, flags); 3885d10e63f2SMel Gorman return 0; 3886d10e63f2SMel Gorman } 3887d10e63f2SMel Gorman 388891a90140SGeert Uytterhoeven static inline int create_huge_pmd(struct vm_fault *vmf) 3889b96375f7SMatthew Wilcox { 3890f4200391SDave Jiang if (vma_is_anonymous(vmf->vma)) 389182b0f8c3SJan Kara return do_huge_pmd_anonymous_page(vmf); 3892a2d58167SDave Jiang if (vmf->vma->vm_ops->huge_fault) 3893c791ace1SDave Jiang return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD); 3894b96375f7SMatthew Wilcox return VM_FAULT_FALLBACK; 3895b96375f7SMatthew Wilcox } 3896b96375f7SMatthew Wilcox 3897183f24aaSGeert Uytterhoeven /* `inline' is required to avoid gcc 4.1.2 build error */ 3898183f24aaSGeert Uytterhoeven static inline int wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd) 3899b96375f7SMatthew Wilcox { 390082b0f8c3SJan Kara if (vma_is_anonymous(vmf->vma)) 390182b0f8c3SJan Kara return do_huge_pmd_wp_page(vmf, orig_pmd); 3902a2d58167SDave Jiang if (vmf->vma->vm_ops->huge_fault) 3903c791ace1SDave Jiang return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD); 3904af9e4d5fSKirill A. Shutemov 3905af9e4d5fSKirill A. Shutemov /* COW handled on pte level: split pmd */ 390682b0f8c3SJan Kara VM_BUG_ON_VMA(vmf->vma->vm_flags & VM_SHARED, vmf->vma); 390782b0f8c3SJan Kara __split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL); 3908af9e4d5fSKirill A. Shutemov 3909b96375f7SMatthew Wilcox return VM_FAULT_FALLBACK; 3910b96375f7SMatthew Wilcox } 3911b96375f7SMatthew Wilcox 391238e08854SLorenzo Stoakes static inline bool vma_is_accessible(struct vm_area_struct *vma) 391338e08854SLorenzo Stoakes { 391438e08854SLorenzo Stoakes return vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE); 391538e08854SLorenzo Stoakes } 391638e08854SLorenzo Stoakes 3917a00cc7d9SMatthew Wilcox static int create_huge_pud(struct vm_fault *vmf) 3918a00cc7d9SMatthew Wilcox { 3919a00cc7d9SMatthew Wilcox #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3920a00cc7d9SMatthew Wilcox /* No support for anonymous transparent PUD pages yet */ 3921a00cc7d9SMatthew Wilcox if (vma_is_anonymous(vmf->vma)) 3922a00cc7d9SMatthew Wilcox return VM_FAULT_FALLBACK; 3923a00cc7d9SMatthew Wilcox if (vmf->vma->vm_ops->huge_fault) 3924c791ace1SDave Jiang return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD); 3925a00cc7d9SMatthew Wilcox #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 3926a00cc7d9SMatthew Wilcox return VM_FAULT_FALLBACK; 3927a00cc7d9SMatthew Wilcox } 3928a00cc7d9SMatthew Wilcox 3929a00cc7d9SMatthew Wilcox static int wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud) 3930a00cc7d9SMatthew Wilcox { 3931a00cc7d9SMatthew Wilcox #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3932a00cc7d9SMatthew Wilcox /* No support for anonymous transparent PUD pages yet */ 3933a00cc7d9SMatthew Wilcox if (vma_is_anonymous(vmf->vma)) 3934a00cc7d9SMatthew Wilcox return VM_FAULT_FALLBACK; 3935a00cc7d9SMatthew Wilcox if (vmf->vma->vm_ops->huge_fault) 3936c791ace1SDave Jiang return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD); 3937a00cc7d9SMatthew Wilcox #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 3938a00cc7d9SMatthew Wilcox return VM_FAULT_FALLBACK; 3939a00cc7d9SMatthew Wilcox } 3940a00cc7d9SMatthew Wilcox 39411da177e4SLinus Torvalds /* 39421da177e4SLinus Torvalds * These routines also need to handle stuff like marking pages dirty 39431da177e4SLinus Torvalds * and/or accessed for architectures that don't do it in hardware (most 39441da177e4SLinus Torvalds * RISC architectures). The early dirtying is also good on the i386. 39451da177e4SLinus Torvalds * 39461da177e4SLinus Torvalds * There is also a hook called "update_mmu_cache()" that architectures 39471da177e4SLinus Torvalds * with external mmu caches can use to update those (ie the Sparc or 39481da177e4SLinus Torvalds * PowerPC hashed page tables that act as extended TLBs). 39491da177e4SLinus Torvalds * 39507267ec00SKirill A. Shutemov * We enter with non-exclusive mmap_sem (to exclude vma changes, but allow 39517267ec00SKirill A. Shutemov * concurrent faults). 39529a95f3cfSPaul Cassella * 39537267ec00SKirill A. Shutemov * The mmap_sem may have been released depending on flags and our return value. 39547267ec00SKirill A. Shutemov * See filemap_fault() and __lock_page_or_retry(). 39551da177e4SLinus Torvalds */ 395682b0f8c3SJan Kara static int handle_pte_fault(struct vm_fault *vmf) 39571da177e4SLinus Torvalds { 39581da177e4SLinus Torvalds pte_t entry; 39591da177e4SLinus Torvalds 396082b0f8c3SJan Kara if (unlikely(pmd_none(*vmf->pmd))) { 39617267ec00SKirill A. Shutemov /* 39627267ec00SKirill A. Shutemov * Leave __pte_alloc() until later: because vm_ops->fault may 39637267ec00SKirill A. Shutemov * want to allocate huge page, and if we expose page table 39647267ec00SKirill A. Shutemov * for an instant, it will be difficult to retract from 39657267ec00SKirill A. Shutemov * concurrent faults and from rmap lookups. 39667267ec00SKirill A. Shutemov */ 396782b0f8c3SJan Kara vmf->pte = NULL; 39687267ec00SKirill A. Shutemov } else { 39697267ec00SKirill A. Shutemov /* See comment in pte_alloc_one_map() */ 3970d0f0931dSRoss Zwisler if (pmd_devmap_trans_unstable(vmf->pmd)) 39717267ec00SKirill A. Shutemov return 0; 39727267ec00SKirill A. Shutemov /* 39737267ec00SKirill A. Shutemov * A regular pmd is established and it can't morph into a huge 39747267ec00SKirill A. Shutemov * pmd from under us anymore at this point because we hold the 39757267ec00SKirill A. Shutemov * mmap_sem read mode and khugepaged takes it in write mode. 39767267ec00SKirill A. Shutemov * So now it's safe to run pte_offset_map(). 39777267ec00SKirill A. Shutemov */ 397882b0f8c3SJan Kara vmf->pte = pte_offset_map(vmf->pmd, vmf->address); 39792994302bSJan Kara vmf->orig_pte = *vmf->pte; 39807267ec00SKirill A. Shutemov 3981e37c6982SChristian Borntraeger /* 3982e37c6982SChristian Borntraeger * some architectures can have larger ptes than wordsize, 39837267ec00SKirill A. Shutemov * e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and 3984b03a0fe0SPaul E. McKenney * CONFIG_32BIT=y, so READ_ONCE cannot guarantee atomic 3985b03a0fe0SPaul E. McKenney * accesses. The code below just needs a consistent view 3986b03a0fe0SPaul E. McKenney * for the ifs and we later double check anyway with the 39877267ec00SKirill A. Shutemov * ptl lock held. So here a barrier will do. 3988e37c6982SChristian Borntraeger */ 3989e37c6982SChristian Borntraeger barrier(); 39902994302bSJan Kara if (pte_none(vmf->orig_pte)) { 399182b0f8c3SJan Kara pte_unmap(vmf->pte); 399282b0f8c3SJan Kara vmf->pte = NULL; 39937267ec00SKirill A. Shutemov } 39947267ec00SKirill A. Shutemov } 39957267ec00SKirill A. Shutemov 399682b0f8c3SJan Kara if (!vmf->pte) { 399782b0f8c3SJan Kara if (vma_is_anonymous(vmf->vma)) 399882b0f8c3SJan Kara return do_anonymous_page(vmf); 3999b5330628SOleg Nesterov else 400082b0f8c3SJan Kara return do_fault(vmf); 400165500d23SHugh Dickins } 40027267ec00SKirill A. Shutemov 40032994302bSJan Kara if (!pte_present(vmf->orig_pte)) 40042994302bSJan Kara return do_swap_page(vmf); 40051da177e4SLinus Torvalds 40062994302bSJan Kara if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma)) 40072994302bSJan Kara return do_numa_page(vmf); 4008d10e63f2SMel Gorman 400982b0f8c3SJan Kara vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); 401082b0f8c3SJan Kara spin_lock(vmf->ptl); 40112994302bSJan Kara entry = vmf->orig_pte; 401282b0f8c3SJan Kara if (unlikely(!pte_same(*vmf->pte, entry))) 40138f4e2101SHugh Dickins goto unlock; 401482b0f8c3SJan Kara if (vmf->flags & FAULT_FLAG_WRITE) { 4015f6f37321SLinus Torvalds if (!pte_write(entry)) 40162994302bSJan Kara return do_wp_page(vmf); 40171da177e4SLinus Torvalds entry = pte_mkdirty(entry); 40181da177e4SLinus Torvalds } 40191da177e4SLinus Torvalds entry = pte_mkyoung(entry); 402082b0f8c3SJan Kara if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry, 402182b0f8c3SJan Kara vmf->flags & FAULT_FLAG_WRITE)) { 402282b0f8c3SJan Kara update_mmu_cache(vmf->vma, vmf->address, vmf->pte); 40231a44e149SAndrea Arcangeli } else { 40241a44e149SAndrea Arcangeli /* 40251a44e149SAndrea Arcangeli * This is needed only for protection faults but the arch code 40261a44e149SAndrea Arcangeli * is not yet telling us if this is a protection fault or not. 40271a44e149SAndrea Arcangeli * This still avoids useless tlb flushes for .text page faults 40281a44e149SAndrea Arcangeli * with threads. 40291a44e149SAndrea Arcangeli */ 403082b0f8c3SJan Kara if (vmf->flags & FAULT_FLAG_WRITE) 403182b0f8c3SJan Kara flush_tlb_fix_spurious_fault(vmf->vma, vmf->address); 40321a44e149SAndrea Arcangeli } 40338f4e2101SHugh Dickins unlock: 403482b0f8c3SJan Kara pte_unmap_unlock(vmf->pte, vmf->ptl); 403583c54070SNick Piggin return 0; 40361da177e4SLinus Torvalds } 40371da177e4SLinus Torvalds 40381da177e4SLinus Torvalds /* 40391da177e4SLinus Torvalds * By the time we get here, we already hold the mm semaphore 40409a95f3cfSPaul Cassella * 40419a95f3cfSPaul Cassella * The mmap_sem may have been released depending on flags and our 40429a95f3cfSPaul Cassella * return value. See filemap_fault() and __lock_page_or_retry(). 40431da177e4SLinus Torvalds */ 4044dcddffd4SKirill A. Shutemov static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address, 4045dcddffd4SKirill A. Shutemov unsigned int flags) 40461da177e4SLinus Torvalds { 404782b0f8c3SJan Kara struct vm_fault vmf = { 4048bae473a4SKirill A. Shutemov .vma = vma, 40491a29d85eSJan Kara .address = address & PAGE_MASK, 4050bae473a4SKirill A. Shutemov .flags = flags, 40510721ec8bSJan Kara .pgoff = linear_page_index(vma, address), 4052667240e0SJan Kara .gfp_mask = __get_fault_gfp_mask(vma), 4053bae473a4SKirill A. Shutemov }; 4054fde26bedSAnshuman Khandual unsigned int dirty = flags & FAULT_FLAG_WRITE; 4055dcddffd4SKirill A. Shutemov struct mm_struct *mm = vma->vm_mm; 40561da177e4SLinus Torvalds pgd_t *pgd; 4057c2febafcSKirill A. Shutemov p4d_t *p4d; 4058a2d58167SDave Jiang int ret; 40591da177e4SLinus Torvalds 40601da177e4SLinus Torvalds pgd = pgd_offset(mm, address); 4061c2febafcSKirill A. Shutemov p4d = p4d_alloc(mm, pgd, address); 4062c2febafcSKirill A. Shutemov if (!p4d) 4063c2febafcSKirill A. Shutemov return VM_FAULT_OOM; 4064a00cc7d9SMatthew Wilcox 4065c2febafcSKirill A. Shutemov vmf.pud = pud_alloc(mm, p4d, address); 4066a00cc7d9SMatthew Wilcox if (!vmf.pud) 4067c74df32cSHugh Dickins return VM_FAULT_OOM; 4068a00cc7d9SMatthew Wilcox if (pud_none(*vmf.pud) && transparent_hugepage_enabled(vma)) { 4069a00cc7d9SMatthew Wilcox ret = create_huge_pud(&vmf); 4070a00cc7d9SMatthew Wilcox if (!(ret & VM_FAULT_FALLBACK)) 4071a00cc7d9SMatthew Wilcox return ret; 4072a00cc7d9SMatthew Wilcox } else { 4073a00cc7d9SMatthew Wilcox pud_t orig_pud = *vmf.pud; 4074a00cc7d9SMatthew Wilcox 4075a00cc7d9SMatthew Wilcox barrier(); 4076a00cc7d9SMatthew Wilcox if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) { 4077a00cc7d9SMatthew Wilcox 4078a00cc7d9SMatthew Wilcox /* NUMA case for anonymous PUDs would go here */ 4079a00cc7d9SMatthew Wilcox 4080f6f37321SLinus Torvalds if (dirty && !pud_write(orig_pud)) { 4081a00cc7d9SMatthew Wilcox ret = wp_huge_pud(&vmf, orig_pud); 4082a00cc7d9SMatthew Wilcox if (!(ret & VM_FAULT_FALLBACK)) 4083a00cc7d9SMatthew Wilcox return ret; 4084a00cc7d9SMatthew Wilcox } else { 4085a00cc7d9SMatthew Wilcox huge_pud_set_accessed(&vmf, orig_pud); 4086a00cc7d9SMatthew Wilcox return 0; 4087a00cc7d9SMatthew Wilcox } 4088a00cc7d9SMatthew Wilcox } 4089a00cc7d9SMatthew Wilcox } 4090a00cc7d9SMatthew Wilcox 4091a00cc7d9SMatthew Wilcox vmf.pmd = pmd_alloc(mm, vmf.pud, address); 409282b0f8c3SJan Kara if (!vmf.pmd) 4093c74df32cSHugh Dickins return VM_FAULT_OOM; 409482b0f8c3SJan Kara if (pmd_none(*vmf.pmd) && transparent_hugepage_enabled(vma)) { 4095a2d58167SDave Jiang ret = create_huge_pmd(&vmf); 4096c0292554SKirill A. Shutemov if (!(ret & VM_FAULT_FALLBACK)) 4097c0292554SKirill A. Shutemov return ret; 409871e3aac0SAndrea Arcangeli } else { 409982b0f8c3SJan Kara pmd_t orig_pmd = *vmf.pmd; 41001f1d06c3SDavid Rientjes 410171e3aac0SAndrea Arcangeli barrier(); 410284c3fc4eSZi Yan if (unlikely(is_swap_pmd(orig_pmd))) { 410384c3fc4eSZi Yan VM_BUG_ON(thp_migration_supported() && 410484c3fc4eSZi Yan !is_pmd_migration_entry(orig_pmd)); 410584c3fc4eSZi Yan if (is_pmd_migration_entry(orig_pmd)) 410684c3fc4eSZi Yan pmd_migration_entry_wait(mm, vmf.pmd); 410784c3fc4eSZi Yan return 0; 410884c3fc4eSZi Yan } 41095c7fb56eSDan Williams if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) { 411038e08854SLorenzo Stoakes if (pmd_protnone(orig_pmd) && vma_is_accessible(vma)) 411182b0f8c3SJan Kara return do_huge_pmd_numa_page(&vmf, orig_pmd); 4112d10e63f2SMel Gorman 4113f6f37321SLinus Torvalds if (dirty && !pmd_write(orig_pmd)) { 411482b0f8c3SJan Kara ret = wp_huge_pmd(&vmf, orig_pmd); 41159845cbbdSKirill A. Shutemov if (!(ret & VM_FAULT_FALLBACK)) 41161f1d06c3SDavid Rientjes return ret; 4117a1dd450bSWill Deacon } else { 411882b0f8c3SJan Kara huge_pmd_set_accessed(&vmf, orig_pmd); 411971e3aac0SAndrea Arcangeli return 0; 412071e3aac0SAndrea Arcangeli } 412171e3aac0SAndrea Arcangeli } 41229845cbbdSKirill A. Shutemov } 412371e3aac0SAndrea Arcangeli 412482b0f8c3SJan Kara return handle_pte_fault(&vmf); 41251da177e4SLinus Torvalds } 41261da177e4SLinus Torvalds 41279a95f3cfSPaul Cassella /* 41289a95f3cfSPaul Cassella * By the time we get here, we already hold the mm semaphore 41299a95f3cfSPaul Cassella * 41309a95f3cfSPaul Cassella * The mmap_sem may have been released depending on flags and our 41319a95f3cfSPaul Cassella * return value. See filemap_fault() and __lock_page_or_retry(). 41329a95f3cfSPaul Cassella */ 4133dcddffd4SKirill A. Shutemov int handle_mm_fault(struct vm_area_struct *vma, unsigned long address, 4134dcddffd4SKirill A. Shutemov unsigned int flags) 4135519e5247SJohannes Weiner { 4136519e5247SJohannes Weiner int ret; 4137519e5247SJohannes Weiner 4138519e5247SJohannes Weiner __set_current_state(TASK_RUNNING); 4139519e5247SJohannes Weiner 4140519e5247SJohannes Weiner count_vm_event(PGFAULT); 41412262185cSRoman Gushchin count_memcg_event_mm(vma->vm_mm, PGFAULT); 4142519e5247SJohannes Weiner 4143519e5247SJohannes Weiner /* do counter updates before entering really critical section. */ 4144519e5247SJohannes Weiner check_sync_rss_stat(current); 4145519e5247SJohannes Weiner 4146de0c799bSLaurent Dufour if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE, 4147de0c799bSLaurent Dufour flags & FAULT_FLAG_INSTRUCTION, 4148de0c799bSLaurent Dufour flags & FAULT_FLAG_REMOTE)) 4149de0c799bSLaurent Dufour return VM_FAULT_SIGSEGV; 4150de0c799bSLaurent Dufour 4151519e5247SJohannes Weiner /* 4152519e5247SJohannes Weiner * Enable the memcg OOM handling for faults triggered in user 4153519e5247SJohannes Weiner * space. Kernel faults are handled more gracefully. 4154519e5247SJohannes Weiner */ 4155519e5247SJohannes Weiner if (flags & FAULT_FLAG_USER) 415649426420SJohannes Weiner mem_cgroup_oom_enable(); 4157519e5247SJohannes Weiner 4158bae473a4SKirill A. Shutemov if (unlikely(is_vm_hugetlb_page(vma))) 4159bae473a4SKirill A. Shutemov ret = hugetlb_fault(vma->vm_mm, vma, address, flags); 4160bae473a4SKirill A. Shutemov else 4161dcddffd4SKirill A. Shutemov ret = __handle_mm_fault(vma, address, flags); 4162519e5247SJohannes Weiner 416349426420SJohannes Weiner if (flags & FAULT_FLAG_USER) { 416449426420SJohannes Weiner mem_cgroup_oom_disable(); 416549426420SJohannes Weiner /* 416649426420SJohannes Weiner * The task may have entered a memcg OOM situation but 416749426420SJohannes Weiner * if the allocation error was handled gracefully (no 416849426420SJohannes Weiner * VM_FAULT_OOM), there is no need to kill anything. 416949426420SJohannes Weiner * Just clean up the OOM state peacefully. 417049426420SJohannes Weiner */ 417149426420SJohannes Weiner if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM)) 417249426420SJohannes Weiner mem_cgroup_oom_synchronize(false); 417349426420SJohannes Weiner } 41743812c8c8SJohannes Weiner 4175519e5247SJohannes Weiner return ret; 4176519e5247SJohannes Weiner } 4177e1d6d01aSJesse Barnes EXPORT_SYMBOL_GPL(handle_mm_fault); 4178519e5247SJohannes Weiner 417990eceff1SKirill A. Shutemov #ifndef __PAGETABLE_P4D_FOLDED 418090eceff1SKirill A. Shutemov /* 418190eceff1SKirill A. Shutemov * Allocate p4d page table. 418290eceff1SKirill A. Shutemov * We've already handled the fast-path in-line. 418390eceff1SKirill A. Shutemov */ 418490eceff1SKirill A. Shutemov int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) 418590eceff1SKirill A. Shutemov { 418690eceff1SKirill A. Shutemov p4d_t *new = p4d_alloc_one(mm, address); 418790eceff1SKirill A. Shutemov if (!new) 418890eceff1SKirill A. Shutemov return -ENOMEM; 418990eceff1SKirill A. Shutemov 419090eceff1SKirill A. Shutemov smp_wmb(); /* See comment in __pte_alloc */ 419190eceff1SKirill A. Shutemov 419290eceff1SKirill A. Shutemov spin_lock(&mm->page_table_lock); 419390eceff1SKirill A. Shutemov if (pgd_present(*pgd)) /* Another has populated it */ 419490eceff1SKirill A. Shutemov p4d_free(mm, new); 419590eceff1SKirill A. Shutemov else 419690eceff1SKirill A. Shutemov pgd_populate(mm, pgd, new); 419790eceff1SKirill A. Shutemov spin_unlock(&mm->page_table_lock); 419890eceff1SKirill A. Shutemov return 0; 419990eceff1SKirill A. Shutemov } 420090eceff1SKirill A. Shutemov #endif /* __PAGETABLE_P4D_FOLDED */ 420190eceff1SKirill A. Shutemov 42021da177e4SLinus Torvalds #ifndef __PAGETABLE_PUD_FOLDED 42031da177e4SLinus Torvalds /* 42041da177e4SLinus Torvalds * Allocate page upper directory. 4205872fec16SHugh Dickins * We've already handled the fast-path in-line. 42061da177e4SLinus Torvalds */ 4207c2febafcSKirill A. Shutemov int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address) 42081da177e4SLinus Torvalds { 4209c74df32cSHugh Dickins pud_t *new = pud_alloc_one(mm, address); 4210c74df32cSHugh Dickins if (!new) 42111bb3630eSHugh Dickins return -ENOMEM; 42121da177e4SLinus Torvalds 4213362a61adSNick Piggin smp_wmb(); /* See comment in __pte_alloc */ 4214362a61adSNick Piggin 4215872fec16SHugh Dickins spin_lock(&mm->page_table_lock); 4216c2febafcSKirill A. Shutemov #ifndef __ARCH_HAS_5LEVEL_HACK 4217b4e98d9aSKirill A. Shutemov if (!p4d_present(*p4d)) { 4218b4e98d9aSKirill A. Shutemov mm_inc_nr_puds(mm); 4219c2febafcSKirill A. Shutemov p4d_populate(mm, p4d, new); 4220b4e98d9aSKirill A. Shutemov } else /* Another has populated it */ 4221c2febafcSKirill A. Shutemov pud_free(mm, new); 4222b4e98d9aSKirill A. Shutemov #else 4223b4e98d9aSKirill A. Shutemov if (!pgd_present(*p4d)) { 4224b4e98d9aSKirill A. Shutemov mm_inc_nr_puds(mm); 4225c2febafcSKirill A. Shutemov pgd_populate(mm, p4d, new); 4226b4e98d9aSKirill A. Shutemov } else /* Another has populated it */ 4227b4e98d9aSKirill A. Shutemov pud_free(mm, new); 4228c2febafcSKirill A. Shutemov #endif /* __ARCH_HAS_5LEVEL_HACK */ 4229872fec16SHugh Dickins spin_unlock(&mm->page_table_lock); 42301bb3630eSHugh Dickins return 0; 42311da177e4SLinus Torvalds } 42321da177e4SLinus Torvalds #endif /* __PAGETABLE_PUD_FOLDED */ 42331da177e4SLinus Torvalds 42341da177e4SLinus Torvalds #ifndef __PAGETABLE_PMD_FOLDED 42351da177e4SLinus Torvalds /* 42361da177e4SLinus Torvalds * Allocate page middle directory. 4237872fec16SHugh Dickins * We've already handled the fast-path in-line. 42381da177e4SLinus Torvalds */ 42391bb3630eSHugh Dickins int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) 42401da177e4SLinus Torvalds { 4241a00cc7d9SMatthew Wilcox spinlock_t *ptl; 4242c74df32cSHugh Dickins pmd_t *new = pmd_alloc_one(mm, address); 4243c74df32cSHugh Dickins if (!new) 42441bb3630eSHugh Dickins return -ENOMEM; 42451da177e4SLinus Torvalds 4246362a61adSNick Piggin smp_wmb(); /* See comment in __pte_alloc */ 4247362a61adSNick Piggin 4248a00cc7d9SMatthew Wilcox ptl = pud_lock(mm, pud); 42491da177e4SLinus Torvalds #ifndef __ARCH_HAS_4LEVEL_HACK 4250dc6c9a35SKirill A. Shutemov if (!pud_present(*pud)) { 4251dc6c9a35SKirill A. Shutemov mm_inc_nr_pmds(mm); 42521da177e4SLinus Torvalds pud_populate(mm, pud, new); 4253dc6c9a35SKirill A. Shutemov } else /* Another has populated it */ 42545e541973SBenjamin Herrenschmidt pmd_free(mm, new); 4255dc6c9a35SKirill A. Shutemov #else 4256dc6c9a35SKirill A. Shutemov if (!pgd_present(*pud)) { 4257dc6c9a35SKirill A. Shutemov mm_inc_nr_pmds(mm); 42581da177e4SLinus Torvalds pgd_populate(mm, pud, new); 4259dc6c9a35SKirill A. Shutemov } else /* Another has populated it */ 4260dc6c9a35SKirill A. Shutemov pmd_free(mm, new); 42611da177e4SLinus Torvalds #endif /* __ARCH_HAS_4LEVEL_HACK */ 4262a00cc7d9SMatthew Wilcox spin_unlock(ptl); 42631bb3630eSHugh Dickins return 0; 42641da177e4SLinus Torvalds } 42651da177e4SLinus Torvalds #endif /* __PAGETABLE_PMD_FOLDED */ 42661da177e4SLinus Torvalds 426709796395SRoss Zwisler static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address, 4268a4d1a885SJérôme Glisse unsigned long *start, unsigned long *end, 426909796395SRoss Zwisler pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp) 4270f8ad0f49SJohannes Weiner { 4271f8ad0f49SJohannes Weiner pgd_t *pgd; 4272c2febafcSKirill A. Shutemov p4d_t *p4d; 4273f8ad0f49SJohannes Weiner pud_t *pud; 4274f8ad0f49SJohannes Weiner pmd_t *pmd; 4275f8ad0f49SJohannes Weiner pte_t *ptep; 4276f8ad0f49SJohannes Weiner 4277f8ad0f49SJohannes Weiner pgd = pgd_offset(mm, address); 4278f8ad0f49SJohannes Weiner if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) 4279f8ad0f49SJohannes Weiner goto out; 4280f8ad0f49SJohannes Weiner 4281c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, address); 4282c2febafcSKirill A. Shutemov if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d))) 4283c2febafcSKirill A. Shutemov goto out; 4284c2febafcSKirill A. Shutemov 4285c2febafcSKirill A. Shutemov pud = pud_offset(p4d, address); 4286f8ad0f49SJohannes Weiner if (pud_none(*pud) || unlikely(pud_bad(*pud))) 4287f8ad0f49SJohannes Weiner goto out; 4288f8ad0f49SJohannes Weiner 4289f8ad0f49SJohannes Weiner pmd = pmd_offset(pud, address); 4290f66055abSAndrea Arcangeli VM_BUG_ON(pmd_trans_huge(*pmd)); 429109796395SRoss Zwisler 429209796395SRoss Zwisler if (pmd_huge(*pmd)) { 429309796395SRoss Zwisler if (!pmdpp) 4294f8ad0f49SJohannes Weiner goto out; 4295f8ad0f49SJohannes Weiner 4296a4d1a885SJérôme Glisse if (start && end) { 4297a4d1a885SJérôme Glisse *start = address & PMD_MASK; 4298a4d1a885SJérôme Glisse *end = *start + PMD_SIZE; 4299a4d1a885SJérôme Glisse mmu_notifier_invalidate_range_start(mm, *start, *end); 4300a4d1a885SJérôme Glisse } 430109796395SRoss Zwisler *ptlp = pmd_lock(mm, pmd); 430209796395SRoss Zwisler if (pmd_huge(*pmd)) { 430309796395SRoss Zwisler *pmdpp = pmd; 430409796395SRoss Zwisler return 0; 430509796395SRoss Zwisler } 430609796395SRoss Zwisler spin_unlock(*ptlp); 4307a4d1a885SJérôme Glisse if (start && end) 4308a4d1a885SJérôme Glisse mmu_notifier_invalidate_range_end(mm, *start, *end); 430909796395SRoss Zwisler } 431009796395SRoss Zwisler 431109796395SRoss Zwisler if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) 4312f8ad0f49SJohannes Weiner goto out; 4313f8ad0f49SJohannes Weiner 4314a4d1a885SJérôme Glisse if (start && end) { 4315a4d1a885SJérôme Glisse *start = address & PAGE_MASK; 4316a4d1a885SJérôme Glisse *end = *start + PAGE_SIZE; 4317a4d1a885SJérôme Glisse mmu_notifier_invalidate_range_start(mm, *start, *end); 4318a4d1a885SJérôme Glisse } 4319f8ad0f49SJohannes Weiner ptep = pte_offset_map_lock(mm, pmd, address, ptlp); 4320f8ad0f49SJohannes Weiner if (!pte_present(*ptep)) 4321f8ad0f49SJohannes Weiner goto unlock; 4322f8ad0f49SJohannes Weiner *ptepp = ptep; 4323f8ad0f49SJohannes Weiner return 0; 4324f8ad0f49SJohannes Weiner unlock: 4325f8ad0f49SJohannes Weiner pte_unmap_unlock(ptep, *ptlp); 4326a4d1a885SJérôme Glisse if (start && end) 4327a4d1a885SJérôme Glisse mmu_notifier_invalidate_range_end(mm, *start, *end); 4328f8ad0f49SJohannes Weiner out: 4329f8ad0f49SJohannes Weiner return -EINVAL; 4330f8ad0f49SJohannes Weiner } 4331f8ad0f49SJohannes Weiner 4332f729c8c9SRoss Zwisler static inline int follow_pte(struct mm_struct *mm, unsigned long address, 4333f729c8c9SRoss Zwisler pte_t **ptepp, spinlock_t **ptlp) 43341b36ba81SNamhyung Kim { 43351b36ba81SNamhyung Kim int res; 43361b36ba81SNamhyung Kim 43371b36ba81SNamhyung Kim /* (void) is needed to make gcc happy */ 43381b36ba81SNamhyung Kim (void) __cond_lock(*ptlp, 4339a4d1a885SJérôme Glisse !(res = __follow_pte_pmd(mm, address, NULL, NULL, 4340a4d1a885SJérôme Glisse ptepp, NULL, ptlp))); 43411b36ba81SNamhyung Kim return res; 43421b36ba81SNamhyung Kim } 43431b36ba81SNamhyung Kim 434409796395SRoss Zwisler int follow_pte_pmd(struct mm_struct *mm, unsigned long address, 4345a4d1a885SJérôme Glisse unsigned long *start, unsigned long *end, 434609796395SRoss Zwisler pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp) 434709796395SRoss Zwisler { 434809796395SRoss Zwisler int res; 434909796395SRoss Zwisler 435009796395SRoss Zwisler /* (void) is needed to make gcc happy */ 435109796395SRoss Zwisler (void) __cond_lock(*ptlp, 4352a4d1a885SJérôme Glisse !(res = __follow_pte_pmd(mm, address, start, end, 4353a4d1a885SJérôme Glisse ptepp, pmdpp, ptlp))); 435409796395SRoss Zwisler return res; 435509796395SRoss Zwisler } 435609796395SRoss Zwisler EXPORT_SYMBOL(follow_pte_pmd); 435709796395SRoss Zwisler 43583b6748e2SJohannes Weiner /** 43593b6748e2SJohannes Weiner * follow_pfn - look up PFN at a user virtual address 43603b6748e2SJohannes Weiner * @vma: memory mapping 43613b6748e2SJohannes Weiner * @address: user virtual address 43623b6748e2SJohannes Weiner * @pfn: location to store found PFN 43633b6748e2SJohannes Weiner * 43643b6748e2SJohannes Weiner * Only IO mappings and raw PFN mappings are allowed. 43653b6748e2SJohannes Weiner * 43663b6748e2SJohannes Weiner * Returns zero and the pfn at @pfn on success, -ve otherwise. 43673b6748e2SJohannes Weiner */ 43683b6748e2SJohannes Weiner int follow_pfn(struct vm_area_struct *vma, unsigned long address, 43693b6748e2SJohannes Weiner unsigned long *pfn) 43703b6748e2SJohannes Weiner { 43713b6748e2SJohannes Weiner int ret = -EINVAL; 43723b6748e2SJohannes Weiner spinlock_t *ptl; 43733b6748e2SJohannes Weiner pte_t *ptep; 43743b6748e2SJohannes Weiner 43753b6748e2SJohannes Weiner if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) 43763b6748e2SJohannes Weiner return ret; 43773b6748e2SJohannes Weiner 43783b6748e2SJohannes Weiner ret = follow_pte(vma->vm_mm, address, &ptep, &ptl); 43793b6748e2SJohannes Weiner if (ret) 43803b6748e2SJohannes Weiner return ret; 43813b6748e2SJohannes Weiner *pfn = pte_pfn(*ptep); 43823b6748e2SJohannes Weiner pte_unmap_unlock(ptep, ptl); 43833b6748e2SJohannes Weiner return 0; 43843b6748e2SJohannes Weiner } 43853b6748e2SJohannes Weiner EXPORT_SYMBOL(follow_pfn); 43863b6748e2SJohannes Weiner 438728b2ee20SRik van Riel #ifdef CONFIG_HAVE_IOREMAP_PROT 4388d87fe660Svenkatesh.pallipadi@intel.com int follow_phys(struct vm_area_struct *vma, 438928b2ee20SRik van Riel unsigned long address, unsigned int flags, 4390d87fe660Svenkatesh.pallipadi@intel.com unsigned long *prot, resource_size_t *phys) 439128b2ee20SRik van Riel { 439203668a4dSJohannes Weiner int ret = -EINVAL; 439328b2ee20SRik van Riel pte_t *ptep, pte; 439428b2ee20SRik van Riel spinlock_t *ptl; 439528b2ee20SRik van Riel 4396d87fe660Svenkatesh.pallipadi@intel.com if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) 4397d87fe660Svenkatesh.pallipadi@intel.com goto out; 439828b2ee20SRik van Riel 439903668a4dSJohannes Weiner if (follow_pte(vma->vm_mm, address, &ptep, &ptl)) 4400d87fe660Svenkatesh.pallipadi@intel.com goto out; 440128b2ee20SRik van Riel pte = *ptep; 440203668a4dSJohannes Weiner 4403f6f37321SLinus Torvalds if ((flags & FOLL_WRITE) && !pte_write(pte)) 440428b2ee20SRik van Riel goto unlock; 440528b2ee20SRik van Riel 440628b2ee20SRik van Riel *prot = pgprot_val(pte_pgprot(pte)); 440703668a4dSJohannes Weiner *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT; 440828b2ee20SRik van Riel 440903668a4dSJohannes Weiner ret = 0; 441028b2ee20SRik van Riel unlock: 441128b2ee20SRik van Riel pte_unmap_unlock(ptep, ptl); 441228b2ee20SRik van Riel out: 4413d87fe660Svenkatesh.pallipadi@intel.com return ret; 441428b2ee20SRik van Riel } 441528b2ee20SRik van Riel 441628b2ee20SRik van Riel int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, 441728b2ee20SRik van Riel void *buf, int len, int write) 441828b2ee20SRik van Riel { 441928b2ee20SRik van Riel resource_size_t phys_addr; 442028b2ee20SRik van Riel unsigned long prot = 0; 44212bc7273bSKOSAKI Motohiro void __iomem *maddr; 442228b2ee20SRik van Riel int offset = addr & (PAGE_SIZE-1); 442328b2ee20SRik van Riel 4424d87fe660Svenkatesh.pallipadi@intel.com if (follow_phys(vma, addr, write, &prot, &phys_addr)) 442528b2ee20SRik van Riel return -EINVAL; 442628b2ee20SRik van Riel 44279cb12d7bSGrazvydas Ignotas maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot); 442824eee1e4Sjie@chenjie6@huwei.com if (!maddr) 442924eee1e4Sjie@chenjie6@huwei.com return -ENOMEM; 443024eee1e4Sjie@chenjie6@huwei.com 443128b2ee20SRik van Riel if (write) 443228b2ee20SRik van Riel memcpy_toio(maddr + offset, buf, len); 443328b2ee20SRik van Riel else 443428b2ee20SRik van Riel memcpy_fromio(buf, maddr + offset, len); 443528b2ee20SRik van Riel iounmap(maddr); 443628b2ee20SRik van Riel 443728b2ee20SRik van Riel return len; 443828b2ee20SRik van Riel } 44395a73633eSUwe Kleine-König EXPORT_SYMBOL_GPL(generic_access_phys); 444028b2ee20SRik van Riel #endif 444128b2ee20SRik van Riel 44420ec76a11SDavid Howells /* 4443206cb636SStephen Wilson * Access another process' address space as given in mm. If non-NULL, use the 4444206cb636SStephen Wilson * given task for page fault accounting. 44450ec76a11SDavid Howells */ 444684d77d3fSEric W. Biederman int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, 4447442486ecSLorenzo Stoakes unsigned long addr, void *buf, int len, unsigned int gup_flags) 44480ec76a11SDavid Howells { 44490ec76a11SDavid Howells struct vm_area_struct *vma; 44500ec76a11SDavid Howells void *old_buf = buf; 4451442486ecSLorenzo Stoakes int write = gup_flags & FOLL_WRITE; 44520ec76a11SDavid Howells 44530ec76a11SDavid Howells down_read(&mm->mmap_sem); 4454183ff22bSSimon Arlott /* ignore errors, just check how much was successfully transferred */ 44550ec76a11SDavid Howells while (len) { 44560ec76a11SDavid Howells int bytes, ret, offset; 44570ec76a11SDavid Howells void *maddr; 445828b2ee20SRik van Riel struct page *page = NULL; 44590ec76a11SDavid Howells 44601e987790SDave Hansen ret = get_user_pages_remote(tsk, mm, addr, 1, 44615b56d49fSLorenzo Stoakes gup_flags, &page, &vma, NULL); 446228b2ee20SRik van Riel if (ret <= 0) { 4463dbffcd03SRik van Riel #ifndef CONFIG_HAVE_IOREMAP_PROT 4464dbffcd03SRik van Riel break; 4465dbffcd03SRik van Riel #else 446628b2ee20SRik van Riel /* 446728b2ee20SRik van Riel * Check if this is a VM_IO | VM_PFNMAP VMA, which 446828b2ee20SRik van Riel * we can access using slightly different code. 446928b2ee20SRik van Riel */ 447028b2ee20SRik van Riel vma = find_vma(mm, addr); 4471fe936dfcSMichael Ellerman if (!vma || vma->vm_start > addr) 44720ec76a11SDavid Howells break; 447328b2ee20SRik van Riel if (vma->vm_ops && vma->vm_ops->access) 447428b2ee20SRik van Riel ret = vma->vm_ops->access(vma, addr, buf, 447528b2ee20SRik van Riel len, write); 447628b2ee20SRik van Riel if (ret <= 0) 447728b2ee20SRik van Riel break; 447828b2ee20SRik van Riel bytes = ret; 4479dbffcd03SRik van Riel #endif 448028b2ee20SRik van Riel } else { 44810ec76a11SDavid Howells bytes = len; 44820ec76a11SDavid Howells offset = addr & (PAGE_SIZE-1); 44830ec76a11SDavid Howells if (bytes > PAGE_SIZE-offset) 44840ec76a11SDavid Howells bytes = PAGE_SIZE-offset; 44850ec76a11SDavid Howells 44860ec76a11SDavid Howells maddr = kmap(page); 44870ec76a11SDavid Howells if (write) { 44880ec76a11SDavid Howells copy_to_user_page(vma, page, addr, 44890ec76a11SDavid Howells maddr + offset, buf, bytes); 44900ec76a11SDavid Howells set_page_dirty_lock(page); 44910ec76a11SDavid Howells } else { 44920ec76a11SDavid Howells copy_from_user_page(vma, page, addr, 44930ec76a11SDavid Howells buf, maddr + offset, bytes); 44940ec76a11SDavid Howells } 44950ec76a11SDavid Howells kunmap(page); 449609cbfeafSKirill A. Shutemov put_page(page); 449728b2ee20SRik van Riel } 44980ec76a11SDavid Howells len -= bytes; 44990ec76a11SDavid Howells buf += bytes; 45000ec76a11SDavid Howells addr += bytes; 45010ec76a11SDavid Howells } 45020ec76a11SDavid Howells up_read(&mm->mmap_sem); 45030ec76a11SDavid Howells 45040ec76a11SDavid Howells return buf - old_buf; 45050ec76a11SDavid Howells } 450603252919SAndi Kleen 45075ddd36b9SStephen Wilson /** 4508ae91dbfcSRandy Dunlap * access_remote_vm - access another process' address space 45095ddd36b9SStephen Wilson * @mm: the mm_struct of the target address space 45105ddd36b9SStephen Wilson * @addr: start address to access 45115ddd36b9SStephen Wilson * @buf: source or destination buffer 45125ddd36b9SStephen Wilson * @len: number of bytes to transfer 45136347e8d5SLorenzo Stoakes * @gup_flags: flags modifying lookup behaviour 45145ddd36b9SStephen Wilson * 45155ddd36b9SStephen Wilson * The caller must hold a reference on @mm. 45165ddd36b9SStephen Wilson */ 45175ddd36b9SStephen Wilson int access_remote_vm(struct mm_struct *mm, unsigned long addr, 45186347e8d5SLorenzo Stoakes void *buf, int len, unsigned int gup_flags) 45195ddd36b9SStephen Wilson { 45206347e8d5SLorenzo Stoakes return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags); 45215ddd36b9SStephen Wilson } 45225ddd36b9SStephen Wilson 452303252919SAndi Kleen /* 4524206cb636SStephen Wilson * Access another process' address space. 4525206cb636SStephen Wilson * Source/target buffer must be kernel space, 4526206cb636SStephen Wilson * Do not walk the page table directly, use get_user_pages 4527206cb636SStephen Wilson */ 4528206cb636SStephen Wilson int access_process_vm(struct task_struct *tsk, unsigned long addr, 4529f307ab6dSLorenzo Stoakes void *buf, int len, unsigned int gup_flags) 4530206cb636SStephen Wilson { 4531206cb636SStephen Wilson struct mm_struct *mm; 4532206cb636SStephen Wilson int ret; 4533206cb636SStephen Wilson 4534206cb636SStephen Wilson mm = get_task_mm(tsk); 4535206cb636SStephen Wilson if (!mm) 4536206cb636SStephen Wilson return 0; 4537206cb636SStephen Wilson 4538f307ab6dSLorenzo Stoakes ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags); 4539442486ecSLorenzo Stoakes 4540206cb636SStephen Wilson mmput(mm); 4541206cb636SStephen Wilson 4542206cb636SStephen Wilson return ret; 4543206cb636SStephen Wilson } 4544fcd35857SCatalin Marinas EXPORT_SYMBOL_GPL(access_process_vm); 4545206cb636SStephen Wilson 454603252919SAndi Kleen /* 454703252919SAndi Kleen * Print the name of a VMA. 454803252919SAndi Kleen */ 454903252919SAndi Kleen void print_vma_addr(char *prefix, unsigned long ip) 455003252919SAndi Kleen { 455103252919SAndi Kleen struct mm_struct *mm = current->mm; 455203252919SAndi Kleen struct vm_area_struct *vma; 455303252919SAndi Kleen 4554e8bff74aSIngo Molnar /* 45550a7f682dSMichal Hocko * we might be running from an atomic context so we cannot sleep 4556e8bff74aSIngo Molnar */ 45570a7f682dSMichal Hocko if (!down_read_trylock(&mm->mmap_sem)) 4558e8bff74aSIngo Molnar return; 4559e8bff74aSIngo Molnar 456003252919SAndi Kleen vma = find_vma(mm, ip); 456103252919SAndi Kleen if (vma && vma->vm_file) { 456203252919SAndi Kleen struct file *f = vma->vm_file; 45630a7f682dSMichal Hocko char *buf = (char *)__get_free_page(GFP_NOWAIT); 456403252919SAndi Kleen if (buf) { 45652fbc57c5SAndy Shevchenko char *p; 456603252919SAndi Kleen 45679bf39ab2SMiklos Szeredi p = file_path(f, buf, PAGE_SIZE); 456803252919SAndi Kleen if (IS_ERR(p)) 456903252919SAndi Kleen p = "?"; 45702fbc57c5SAndy Shevchenko printk("%s%s[%lx+%lx]", prefix, kbasename(p), 457103252919SAndi Kleen vma->vm_start, 457203252919SAndi Kleen vma->vm_end - vma->vm_start); 457303252919SAndi Kleen free_page((unsigned long)buf); 457403252919SAndi Kleen } 457503252919SAndi Kleen } 457651a07e50SJeff Liu up_read(&mm->mmap_sem); 457703252919SAndi Kleen } 45783ee1afa3SNick Piggin 4579662bbcb2SMichael S. Tsirkin #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP) 45809ec23531SDavid Hildenbrand void __might_fault(const char *file, int line) 45813ee1afa3SNick Piggin { 458295156f00SPeter Zijlstra /* 458395156f00SPeter Zijlstra * Some code (nfs/sunrpc) uses socket ops on kernel memory while 458495156f00SPeter Zijlstra * holding the mmap_sem, this is safe because kernel memory doesn't 458595156f00SPeter Zijlstra * get paged out, therefore we'll never actually fault, and the 458695156f00SPeter Zijlstra * below annotations will generate false positives. 458795156f00SPeter Zijlstra */ 4588db68ce10SAl Viro if (uaccess_kernel()) 458995156f00SPeter Zijlstra return; 45909ec23531SDavid Hildenbrand if (pagefault_disabled()) 4591662bbcb2SMichael S. Tsirkin return; 45929ec23531SDavid Hildenbrand __might_sleep(file, line, 0); 45939ec23531SDavid Hildenbrand #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) 4594662bbcb2SMichael S. Tsirkin if (current->mm) 45953ee1afa3SNick Piggin might_lock_read(¤t->mm->mmap_sem); 45969ec23531SDavid Hildenbrand #endif 45973ee1afa3SNick Piggin } 45989ec23531SDavid Hildenbrand EXPORT_SYMBOL(__might_fault); 45993ee1afa3SNick Piggin #endif 460047ad8475SAndrea Arcangeli 460147ad8475SAndrea Arcangeli #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) 460247ad8475SAndrea Arcangeli static void clear_gigantic_page(struct page *page, 460347ad8475SAndrea Arcangeli unsigned long addr, 460447ad8475SAndrea Arcangeli unsigned int pages_per_huge_page) 460547ad8475SAndrea Arcangeli { 460647ad8475SAndrea Arcangeli int i; 460747ad8475SAndrea Arcangeli struct page *p = page; 460847ad8475SAndrea Arcangeli 460947ad8475SAndrea Arcangeli might_sleep(); 461047ad8475SAndrea Arcangeli for (i = 0; i < pages_per_huge_page; 461147ad8475SAndrea Arcangeli i++, p = mem_map_next(p, page, i)) { 461247ad8475SAndrea Arcangeli cond_resched(); 461347ad8475SAndrea Arcangeli clear_user_highpage(p, addr + i * PAGE_SIZE); 461447ad8475SAndrea Arcangeli } 461547ad8475SAndrea Arcangeli } 461647ad8475SAndrea Arcangeli void clear_huge_page(struct page *page, 4617c79b57e4SHuang Ying unsigned long addr_hint, unsigned int pages_per_huge_page) 461847ad8475SAndrea Arcangeli { 4619c79b57e4SHuang Ying int i, n, base, l; 4620c79b57e4SHuang Ying unsigned long addr = addr_hint & 4621c79b57e4SHuang Ying ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1); 462247ad8475SAndrea Arcangeli 462347ad8475SAndrea Arcangeli if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) { 462447ad8475SAndrea Arcangeli clear_gigantic_page(page, addr, pages_per_huge_page); 462547ad8475SAndrea Arcangeli return; 462647ad8475SAndrea Arcangeli } 462747ad8475SAndrea Arcangeli 4628c79b57e4SHuang Ying /* Clear sub-page to access last to keep its cache lines hot */ 462947ad8475SAndrea Arcangeli might_sleep(); 4630c79b57e4SHuang Ying n = (addr_hint - addr) / PAGE_SIZE; 4631c79b57e4SHuang Ying if (2 * n <= pages_per_huge_page) { 4632c79b57e4SHuang Ying /* If sub-page to access in first half of huge page */ 4633c79b57e4SHuang Ying base = 0; 4634c79b57e4SHuang Ying l = n; 4635c79b57e4SHuang Ying /* Clear sub-pages at the end of huge page */ 4636c79b57e4SHuang Ying for (i = pages_per_huge_page - 1; i >= 2 * n; i--) { 463747ad8475SAndrea Arcangeli cond_resched(); 463847ad8475SAndrea Arcangeli clear_user_highpage(page + i, addr + i * PAGE_SIZE); 463947ad8475SAndrea Arcangeli } 4640c79b57e4SHuang Ying } else { 4641c79b57e4SHuang Ying /* If sub-page to access in second half of huge page */ 4642c79b57e4SHuang Ying base = pages_per_huge_page - 2 * (pages_per_huge_page - n); 4643c79b57e4SHuang Ying l = pages_per_huge_page - n; 4644c79b57e4SHuang Ying /* Clear sub-pages at the begin of huge page */ 4645c79b57e4SHuang Ying for (i = 0; i < base; i++) { 4646c79b57e4SHuang Ying cond_resched(); 4647c79b57e4SHuang Ying clear_user_highpage(page + i, addr + i * PAGE_SIZE); 4648c79b57e4SHuang Ying } 4649c79b57e4SHuang Ying } 4650c79b57e4SHuang Ying /* 4651c79b57e4SHuang Ying * Clear remaining sub-pages in left-right-left-right pattern 4652c79b57e4SHuang Ying * towards the sub-page to access 4653c79b57e4SHuang Ying */ 4654c79b57e4SHuang Ying for (i = 0; i < l; i++) { 4655c79b57e4SHuang Ying int left_idx = base + i; 4656c79b57e4SHuang Ying int right_idx = base + 2 * l - 1 - i; 4657c79b57e4SHuang Ying 4658c79b57e4SHuang Ying cond_resched(); 4659c79b57e4SHuang Ying clear_user_highpage(page + left_idx, 4660c79b57e4SHuang Ying addr + left_idx * PAGE_SIZE); 4661c79b57e4SHuang Ying cond_resched(); 4662c79b57e4SHuang Ying clear_user_highpage(page + right_idx, 4663c79b57e4SHuang Ying addr + right_idx * PAGE_SIZE); 4664c79b57e4SHuang Ying } 466547ad8475SAndrea Arcangeli } 466647ad8475SAndrea Arcangeli 466747ad8475SAndrea Arcangeli static void copy_user_gigantic_page(struct page *dst, struct page *src, 466847ad8475SAndrea Arcangeli unsigned long addr, 466947ad8475SAndrea Arcangeli struct vm_area_struct *vma, 467047ad8475SAndrea Arcangeli unsigned int pages_per_huge_page) 467147ad8475SAndrea Arcangeli { 467247ad8475SAndrea Arcangeli int i; 467347ad8475SAndrea Arcangeli struct page *dst_base = dst; 467447ad8475SAndrea Arcangeli struct page *src_base = src; 467547ad8475SAndrea Arcangeli 467647ad8475SAndrea Arcangeli for (i = 0; i < pages_per_huge_page; ) { 467747ad8475SAndrea Arcangeli cond_resched(); 467847ad8475SAndrea Arcangeli copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma); 467947ad8475SAndrea Arcangeli 468047ad8475SAndrea Arcangeli i++; 468147ad8475SAndrea Arcangeli dst = mem_map_next(dst, dst_base, i); 468247ad8475SAndrea Arcangeli src = mem_map_next(src, src_base, i); 468347ad8475SAndrea Arcangeli } 468447ad8475SAndrea Arcangeli } 468547ad8475SAndrea Arcangeli 468647ad8475SAndrea Arcangeli void copy_user_huge_page(struct page *dst, struct page *src, 468747ad8475SAndrea Arcangeli unsigned long addr, struct vm_area_struct *vma, 468847ad8475SAndrea Arcangeli unsigned int pages_per_huge_page) 468947ad8475SAndrea Arcangeli { 469047ad8475SAndrea Arcangeli int i; 469147ad8475SAndrea Arcangeli 469247ad8475SAndrea Arcangeli if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) { 469347ad8475SAndrea Arcangeli copy_user_gigantic_page(dst, src, addr, vma, 469447ad8475SAndrea Arcangeli pages_per_huge_page); 469547ad8475SAndrea Arcangeli return; 469647ad8475SAndrea Arcangeli } 469747ad8475SAndrea Arcangeli 469847ad8475SAndrea Arcangeli might_sleep(); 469947ad8475SAndrea Arcangeli for (i = 0; i < pages_per_huge_page; i++) { 470047ad8475SAndrea Arcangeli cond_resched(); 470147ad8475SAndrea Arcangeli copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma); 470247ad8475SAndrea Arcangeli } 470347ad8475SAndrea Arcangeli } 4704fa4d75c1SMike Kravetz 4705fa4d75c1SMike Kravetz long copy_huge_page_from_user(struct page *dst_page, 4706fa4d75c1SMike Kravetz const void __user *usr_src, 4707810a56b9SMike Kravetz unsigned int pages_per_huge_page, 4708810a56b9SMike Kravetz bool allow_pagefault) 4709fa4d75c1SMike Kravetz { 4710fa4d75c1SMike Kravetz void *src = (void *)usr_src; 4711fa4d75c1SMike Kravetz void *page_kaddr; 4712fa4d75c1SMike Kravetz unsigned long i, rc = 0; 4713fa4d75c1SMike Kravetz unsigned long ret_val = pages_per_huge_page * PAGE_SIZE; 4714fa4d75c1SMike Kravetz 4715fa4d75c1SMike Kravetz for (i = 0; i < pages_per_huge_page; i++) { 4716810a56b9SMike Kravetz if (allow_pagefault) 4717810a56b9SMike Kravetz page_kaddr = kmap(dst_page + i); 4718810a56b9SMike Kravetz else 4719fa4d75c1SMike Kravetz page_kaddr = kmap_atomic(dst_page + i); 4720fa4d75c1SMike Kravetz rc = copy_from_user(page_kaddr, 4721fa4d75c1SMike Kravetz (const void __user *)(src + i * PAGE_SIZE), 4722fa4d75c1SMike Kravetz PAGE_SIZE); 4723810a56b9SMike Kravetz if (allow_pagefault) 4724810a56b9SMike Kravetz kunmap(dst_page + i); 4725810a56b9SMike Kravetz else 4726fa4d75c1SMike Kravetz kunmap_atomic(page_kaddr); 4727fa4d75c1SMike Kravetz 4728fa4d75c1SMike Kravetz ret_val -= (PAGE_SIZE - rc); 4729fa4d75c1SMike Kravetz if (rc) 4730fa4d75c1SMike Kravetz break; 4731fa4d75c1SMike Kravetz 4732fa4d75c1SMike Kravetz cond_resched(); 4733fa4d75c1SMike Kravetz } 4734fa4d75c1SMike Kravetz return ret_val; 4735fa4d75c1SMike Kravetz } 473647ad8475SAndrea Arcangeli #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ 473749076ec2SKirill A. Shutemov 473840b64acdSOlof Johansson #if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS 4739b35f1819SKirill A. Shutemov 4740b35f1819SKirill A. Shutemov static struct kmem_cache *page_ptl_cachep; 4741b35f1819SKirill A. Shutemov 4742b35f1819SKirill A. Shutemov void __init ptlock_cache_init(void) 4743b35f1819SKirill A. Shutemov { 4744b35f1819SKirill A. Shutemov page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0, 4745b35f1819SKirill A. Shutemov SLAB_PANIC, NULL); 4746b35f1819SKirill A. Shutemov } 4747b35f1819SKirill A. Shutemov 4748539edb58SPeter Zijlstra bool ptlock_alloc(struct page *page) 474949076ec2SKirill A. Shutemov { 475049076ec2SKirill A. Shutemov spinlock_t *ptl; 475149076ec2SKirill A. Shutemov 4752b35f1819SKirill A. Shutemov ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL); 475349076ec2SKirill A. Shutemov if (!ptl) 475449076ec2SKirill A. Shutemov return false; 4755539edb58SPeter Zijlstra page->ptl = ptl; 475649076ec2SKirill A. Shutemov return true; 475749076ec2SKirill A. Shutemov } 475849076ec2SKirill A. Shutemov 4759539edb58SPeter Zijlstra void ptlock_free(struct page *page) 476049076ec2SKirill A. Shutemov { 4761b35f1819SKirill A. Shutemov kmem_cache_free(page_ptl_cachep, page->ptl); 476249076ec2SKirill A. Shutemov } 476349076ec2SKirill A. Shutemov #endif 4764