11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * mm/rmap.c - physical to virtual reverse mappings 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright 2001, Rik van Riel <riel@conectiva.com.br> 51da177e4SLinus Torvalds * Released under the General Public License (GPL). 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Simple, low overhead reverse mapping scheme. 81da177e4SLinus Torvalds * Please try to keep this thing as modular as possible. 91da177e4SLinus Torvalds * 101da177e4SLinus Torvalds * Provides methods for unmapping each kind of mapped page: 111da177e4SLinus Torvalds * the anon methods track anonymous pages, and 121da177e4SLinus Torvalds * the file methods track pages belonging to an inode. 131da177e4SLinus Torvalds * 141da177e4SLinus Torvalds * Original design by Rik van Riel <riel@conectiva.com.br> 2001 151da177e4SLinus Torvalds * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 161da177e4SLinus Torvalds * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 1798f32602SHugh Dickins * Contributions by Hugh Dickins 2003, 2004 181da177e4SLinus Torvalds */ 191da177e4SLinus Torvalds 201da177e4SLinus Torvalds /* 211da177e4SLinus Torvalds * Lock ordering in mm: 221da177e4SLinus Torvalds * 231b1dcc1bSJes Sorensen * inode->i_mutex (while writing or truncating, not reading or faulting) 2482591e6eSNick Piggin * inode->i_alloc_sem (vmtruncate_range) 251da177e4SLinus Torvalds * mm->mmap_sem 261da177e4SLinus Torvalds * page->flags PG_locked (lock_page) 271da177e4SLinus Torvalds * mapping->i_mmap_lock 281da177e4SLinus Torvalds * anon_vma->lock 29b8072f09SHugh Dickins * mm->page_table_lock or pte_lock 30053837fcSNick Piggin * zone->lru_lock (in mark_page_accessed, isolate_lru_page) 315d337b91SHugh Dickins * swap_lock (in swap_duplicate, swap_info_get) 321da177e4SLinus Torvalds * mmlist_lock (in mmput, drain_mmlist and others) 331da177e4SLinus Torvalds * mapping->private_lock (in __set_page_dirty_buffers) 341da177e4SLinus Torvalds * inode_lock (in set_page_dirty's __mark_inode_dirty) 351da177e4SLinus Torvalds * sb_lock (within inode_lock in fs/fs-writeback.c) 361da177e4SLinus Torvalds * mapping->tree_lock (widely used, in set_page_dirty, 371da177e4SLinus Torvalds * in arch-dependent flush_dcache_mmap_lock, 381da177e4SLinus Torvalds * within inode_lock in __sync_single_inode) 396a46079cSAndi Kleen * 406a46079cSAndi Kleen * (code doesn't rely on that order so it could be switched around) 416a46079cSAndi Kleen * ->tasklist_lock 426a46079cSAndi Kleen * anon_vma->lock (memory_failure, collect_procs_anon) 436a46079cSAndi Kleen * pte map lock 441da177e4SLinus Torvalds */ 451da177e4SLinus Torvalds 461da177e4SLinus Torvalds #include <linux/mm.h> 471da177e4SLinus Torvalds #include <linux/pagemap.h> 481da177e4SLinus Torvalds #include <linux/swap.h> 491da177e4SLinus Torvalds #include <linux/swapops.h> 501da177e4SLinus Torvalds #include <linux/slab.h> 511da177e4SLinus Torvalds #include <linux/init.h> 525ad64688SHugh Dickins #include <linux/ksm.h> 531da177e4SLinus Torvalds #include <linux/rmap.h> 541da177e4SLinus Torvalds #include <linux/rcupdate.h> 55a48d07afSChristoph Lameter #include <linux/module.h> 568a9f3ccdSBalbir Singh #include <linux/memcontrol.h> 57cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h> 5864cdd548SKOSAKI Motohiro #include <linux/migrate.h> 59*0fe6e20bSNaoya Horiguchi #include <linux/hugetlb.h> 601da177e4SLinus Torvalds 611da177e4SLinus Torvalds #include <asm/tlbflush.h> 621da177e4SLinus Torvalds 63b291f000SNick Piggin #include "internal.h" 64b291f000SNick Piggin 65fdd2e5f8SAdrian Bunk static struct kmem_cache *anon_vma_cachep; 665beb4930SRik van Riel static struct kmem_cache *anon_vma_chain_cachep; 67fdd2e5f8SAdrian Bunk 68fdd2e5f8SAdrian Bunk static inline struct anon_vma *anon_vma_alloc(void) 69fdd2e5f8SAdrian Bunk { 70fdd2e5f8SAdrian Bunk return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); 71fdd2e5f8SAdrian Bunk } 72fdd2e5f8SAdrian Bunk 73db114b83SHugh Dickins void anon_vma_free(struct anon_vma *anon_vma) 74fdd2e5f8SAdrian Bunk { 75fdd2e5f8SAdrian Bunk kmem_cache_free(anon_vma_cachep, anon_vma); 76fdd2e5f8SAdrian Bunk } 771da177e4SLinus Torvalds 785beb4930SRik van Riel static inline struct anon_vma_chain *anon_vma_chain_alloc(void) 795beb4930SRik van Riel { 805beb4930SRik van Riel return kmem_cache_alloc(anon_vma_chain_cachep, GFP_KERNEL); 815beb4930SRik van Riel } 825beb4930SRik van Riel 835beb4930SRik van Riel void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) 845beb4930SRik van Riel { 855beb4930SRik van Riel kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain); 865beb4930SRik van Riel } 875beb4930SRik van Riel 88d9d332e0SLinus Torvalds /** 89d9d332e0SLinus Torvalds * anon_vma_prepare - attach an anon_vma to a memory region 90d9d332e0SLinus Torvalds * @vma: the memory region in question 91d9d332e0SLinus Torvalds * 92d9d332e0SLinus Torvalds * This makes sure the memory mapping described by 'vma' has 93d9d332e0SLinus Torvalds * an 'anon_vma' attached to it, so that we can associate the 94d9d332e0SLinus Torvalds * anonymous pages mapped into it with that anon_vma. 95d9d332e0SLinus Torvalds * 96d9d332e0SLinus Torvalds * The common case will be that we already have one, but if 97d9d332e0SLinus Torvalds * if not we either need to find an adjacent mapping that we 98d9d332e0SLinus Torvalds * can re-use the anon_vma from (very common when the only 99d9d332e0SLinus Torvalds * reason for splitting a vma has been mprotect()), or we 100d9d332e0SLinus Torvalds * allocate a new one. 101d9d332e0SLinus Torvalds * 102d9d332e0SLinus Torvalds * Anon-vma allocations are very subtle, because we may have 103d9d332e0SLinus Torvalds * optimistically looked up an anon_vma in page_lock_anon_vma() 104d9d332e0SLinus Torvalds * and that may actually touch the spinlock even in the newly 105d9d332e0SLinus Torvalds * allocated vma (it depends on RCU to make sure that the 106d9d332e0SLinus Torvalds * anon_vma isn't actually destroyed). 107d9d332e0SLinus Torvalds * 108d9d332e0SLinus Torvalds * As a result, we need to do proper anon_vma locking even 109d9d332e0SLinus Torvalds * for the new allocation. At the same time, we do not want 110d9d332e0SLinus Torvalds * to do any locking for the common case of already having 111d9d332e0SLinus Torvalds * an anon_vma. 112d9d332e0SLinus Torvalds * 113d9d332e0SLinus Torvalds * This must be called with the mmap_sem held for reading. 114d9d332e0SLinus Torvalds */ 1151da177e4SLinus Torvalds int anon_vma_prepare(struct vm_area_struct *vma) 1161da177e4SLinus Torvalds { 1171da177e4SLinus Torvalds struct anon_vma *anon_vma = vma->anon_vma; 1185beb4930SRik van Riel struct anon_vma_chain *avc; 1191da177e4SLinus Torvalds 1201da177e4SLinus Torvalds might_sleep(); 1211da177e4SLinus Torvalds if (unlikely(!anon_vma)) { 1221da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 123d9d332e0SLinus Torvalds struct anon_vma *allocated; 1241da177e4SLinus Torvalds 1255beb4930SRik van Riel avc = anon_vma_chain_alloc(); 1265beb4930SRik van Riel if (!avc) 1275beb4930SRik van Riel goto out_enomem; 1285beb4930SRik van Riel 1291da177e4SLinus Torvalds anon_vma = find_mergeable_anon_vma(vma); 1301da177e4SLinus Torvalds allocated = NULL; 131d9d332e0SLinus Torvalds if (!anon_vma) { 1321da177e4SLinus Torvalds anon_vma = anon_vma_alloc(); 1331da177e4SLinus Torvalds if (unlikely(!anon_vma)) 1345beb4930SRik van Riel goto out_enomem_free_avc; 1351da177e4SLinus Torvalds allocated = anon_vma; 1361da177e4SLinus Torvalds } 1371da177e4SLinus Torvalds 13831f2b0ebSOleg Nesterov spin_lock(&anon_vma->lock); 1391da177e4SLinus Torvalds /* page_table_lock to protect against threads */ 1401da177e4SLinus Torvalds spin_lock(&mm->page_table_lock); 1411da177e4SLinus Torvalds if (likely(!vma->anon_vma)) { 1421da177e4SLinus Torvalds vma->anon_vma = anon_vma; 1435beb4930SRik van Riel avc->anon_vma = anon_vma; 1445beb4930SRik van Riel avc->vma = vma; 1455beb4930SRik van Riel list_add(&avc->same_vma, &vma->anon_vma_chain); 1465beb4930SRik van Riel list_add(&avc->same_anon_vma, &anon_vma->head); 1471da177e4SLinus Torvalds allocated = NULL; 14831f2b0ebSOleg Nesterov avc = NULL; 1491da177e4SLinus Torvalds } 1501da177e4SLinus Torvalds spin_unlock(&mm->page_table_lock); 151d9d332e0SLinus Torvalds spin_unlock(&anon_vma->lock); 15231f2b0ebSOleg Nesterov 15331f2b0ebSOleg Nesterov if (unlikely(allocated)) 1541da177e4SLinus Torvalds anon_vma_free(allocated); 15531f2b0ebSOleg Nesterov if (unlikely(avc)) 1565beb4930SRik van Riel anon_vma_chain_free(avc); 1575beb4930SRik van Riel } 1581da177e4SLinus Torvalds return 0; 1595beb4930SRik van Riel 1605beb4930SRik van Riel out_enomem_free_avc: 1615beb4930SRik van Riel anon_vma_chain_free(avc); 1625beb4930SRik van Riel out_enomem: 1635beb4930SRik van Riel return -ENOMEM; 1641da177e4SLinus Torvalds } 1651da177e4SLinus Torvalds 1665beb4930SRik van Riel static void anon_vma_chain_link(struct vm_area_struct *vma, 1675beb4930SRik van Riel struct anon_vma_chain *avc, 1685beb4930SRik van Riel struct anon_vma *anon_vma) 1691da177e4SLinus Torvalds { 1705beb4930SRik van Riel avc->vma = vma; 1715beb4930SRik van Riel avc->anon_vma = anon_vma; 1725beb4930SRik van Riel list_add(&avc->same_vma, &vma->anon_vma_chain); 1731da177e4SLinus Torvalds 1741da177e4SLinus Torvalds spin_lock(&anon_vma->lock); 1755beb4930SRik van Riel list_add_tail(&avc->same_anon_vma, &anon_vma->head); 1761da177e4SLinus Torvalds spin_unlock(&anon_vma->lock); 1771da177e4SLinus Torvalds } 1785beb4930SRik van Riel 1795beb4930SRik van Riel /* 1805beb4930SRik van Riel * Attach the anon_vmas from src to dst. 1815beb4930SRik van Riel * Returns 0 on success, -ENOMEM on failure. 1825beb4930SRik van Riel */ 1835beb4930SRik van Riel int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) 1845beb4930SRik van Riel { 1855beb4930SRik van Riel struct anon_vma_chain *avc, *pavc; 1865beb4930SRik van Riel 187646d87b4SLinus Torvalds list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { 1885beb4930SRik van Riel avc = anon_vma_chain_alloc(); 1895beb4930SRik van Riel if (!avc) 1905beb4930SRik van Riel goto enomem_failure; 1915beb4930SRik van Riel anon_vma_chain_link(dst, avc, pavc->anon_vma); 1925beb4930SRik van Riel } 1935beb4930SRik van Riel return 0; 1945beb4930SRik van Riel 1955beb4930SRik van Riel enomem_failure: 1965beb4930SRik van Riel unlink_anon_vmas(dst); 1975beb4930SRik van Riel return -ENOMEM; 1981da177e4SLinus Torvalds } 1991da177e4SLinus Torvalds 2005beb4930SRik van Riel /* 2015beb4930SRik van Riel * Attach vma to its own anon_vma, as well as to the anon_vmas that 2025beb4930SRik van Riel * the corresponding VMA in the parent process is attached to. 2035beb4930SRik van Riel * Returns 0 on success, non-zero on failure. 2045beb4930SRik van Riel */ 2055beb4930SRik van Riel int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) 2061da177e4SLinus Torvalds { 2075beb4930SRik van Riel struct anon_vma_chain *avc; 2085beb4930SRik van Riel struct anon_vma *anon_vma; 2095beb4930SRik van Riel 2105beb4930SRik van Riel /* Don't bother if the parent process has no anon_vma here. */ 2115beb4930SRik van Riel if (!pvma->anon_vma) 2125beb4930SRik van Riel return 0; 2135beb4930SRik van Riel 2145beb4930SRik van Riel /* 2155beb4930SRik van Riel * First, attach the new VMA to the parent VMA's anon_vmas, 2165beb4930SRik van Riel * so rmap can find non-COWed pages in child processes. 2175beb4930SRik van Riel */ 2185beb4930SRik van Riel if (anon_vma_clone(vma, pvma)) 2195beb4930SRik van Riel return -ENOMEM; 2205beb4930SRik van Riel 2215beb4930SRik van Riel /* Then add our own anon_vma. */ 2225beb4930SRik van Riel anon_vma = anon_vma_alloc(); 2235beb4930SRik van Riel if (!anon_vma) 2245beb4930SRik van Riel goto out_error; 2255beb4930SRik van Riel avc = anon_vma_chain_alloc(); 2265beb4930SRik van Riel if (!avc) 2275beb4930SRik van Riel goto out_error_free_anon_vma; 2285beb4930SRik van Riel anon_vma_chain_link(vma, avc, anon_vma); 2295beb4930SRik van Riel /* Mark this anon_vma as the one where our new (COWed) pages go. */ 2305beb4930SRik van Riel vma->anon_vma = anon_vma; 2315beb4930SRik van Riel 2325beb4930SRik van Riel return 0; 2335beb4930SRik van Riel 2345beb4930SRik van Riel out_error_free_anon_vma: 2355beb4930SRik van Riel anon_vma_free(anon_vma); 2365beb4930SRik van Riel out_error: 2374946d54cSRik van Riel unlink_anon_vmas(vma); 2385beb4930SRik van Riel return -ENOMEM; 2395beb4930SRik van Riel } 2405beb4930SRik van Riel 2415beb4930SRik van Riel static void anon_vma_unlink(struct anon_vma_chain *anon_vma_chain) 2425beb4930SRik van Riel { 2435beb4930SRik van Riel struct anon_vma *anon_vma = anon_vma_chain->anon_vma; 2441da177e4SLinus Torvalds int empty; 2451da177e4SLinus Torvalds 2465beb4930SRik van Riel /* If anon_vma_fork fails, we can get an empty anon_vma_chain. */ 2471da177e4SLinus Torvalds if (!anon_vma) 2481da177e4SLinus Torvalds return; 2491da177e4SLinus Torvalds 2501da177e4SLinus Torvalds spin_lock(&anon_vma->lock); 2515beb4930SRik van Riel list_del(&anon_vma_chain->same_anon_vma); 2521da177e4SLinus Torvalds 2531da177e4SLinus Torvalds /* We must garbage collect the anon_vma if it's empty */ 2547f60c214SMel Gorman empty = list_empty(&anon_vma->head) && !anonvma_external_refcount(anon_vma); 2551da177e4SLinus Torvalds spin_unlock(&anon_vma->lock); 2561da177e4SLinus Torvalds 2571da177e4SLinus Torvalds if (empty) 2581da177e4SLinus Torvalds anon_vma_free(anon_vma); 2591da177e4SLinus Torvalds } 2601da177e4SLinus Torvalds 2615beb4930SRik van Riel void unlink_anon_vmas(struct vm_area_struct *vma) 2625beb4930SRik van Riel { 2635beb4930SRik van Riel struct anon_vma_chain *avc, *next; 2645beb4930SRik van Riel 2655beb4930SRik van Riel /* Unlink each anon_vma chained to the VMA. */ 2665beb4930SRik van Riel list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 2675beb4930SRik van Riel anon_vma_unlink(avc); 2685beb4930SRik van Riel list_del(&avc->same_vma); 2695beb4930SRik van Riel anon_vma_chain_free(avc); 2705beb4930SRik van Riel } 2715beb4930SRik van Riel } 2725beb4930SRik van Riel 27351cc5068SAlexey Dobriyan static void anon_vma_ctor(void *data) 2741da177e4SLinus Torvalds { 2751da177e4SLinus Torvalds struct anon_vma *anon_vma = data; 2761da177e4SLinus Torvalds 2771da177e4SLinus Torvalds spin_lock_init(&anon_vma->lock); 2787f60c214SMel Gorman anonvma_external_refcount_init(anon_vma); 2791da177e4SLinus Torvalds INIT_LIST_HEAD(&anon_vma->head); 2801da177e4SLinus Torvalds } 2811da177e4SLinus Torvalds 2821da177e4SLinus Torvalds void __init anon_vma_init(void) 2831da177e4SLinus Torvalds { 2841da177e4SLinus Torvalds anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 28520c2df83SPaul Mundt 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor); 2865beb4930SRik van Riel anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC); 2871da177e4SLinus Torvalds } 2881da177e4SLinus Torvalds 2891da177e4SLinus Torvalds /* 2901da177e4SLinus Torvalds * Getting a lock on a stable anon_vma from a page off the LRU is 2911da177e4SLinus Torvalds * tricky: page_lock_anon_vma rely on RCU to guard against the races. 2921da177e4SLinus Torvalds */ 29310be22dfSAndi Kleen struct anon_vma *page_lock_anon_vma(struct page *page) 2941da177e4SLinus Torvalds { 29534bbd704SOleg Nesterov struct anon_vma *anon_vma; 2961da177e4SLinus Torvalds unsigned long anon_mapping; 2971da177e4SLinus Torvalds 2981da177e4SLinus Torvalds rcu_read_lock(); 29980e14822SHugh Dickins anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping); 3003ca7b3c5SHugh Dickins if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 3011da177e4SLinus Torvalds goto out; 3021da177e4SLinus Torvalds if (!page_mapped(page)) 3031da177e4SLinus Torvalds goto out; 3041da177e4SLinus Torvalds 3051da177e4SLinus Torvalds anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 3061da177e4SLinus Torvalds spin_lock(&anon_vma->lock); 30734bbd704SOleg Nesterov return anon_vma; 3081da177e4SLinus Torvalds out: 3091da177e4SLinus Torvalds rcu_read_unlock(); 31034bbd704SOleg Nesterov return NULL; 31134bbd704SOleg Nesterov } 31234bbd704SOleg Nesterov 31310be22dfSAndi Kleen void page_unlock_anon_vma(struct anon_vma *anon_vma) 31434bbd704SOleg Nesterov { 31534bbd704SOleg Nesterov spin_unlock(&anon_vma->lock); 31634bbd704SOleg Nesterov rcu_read_unlock(); 3171da177e4SLinus Torvalds } 3181da177e4SLinus Torvalds 3191da177e4SLinus Torvalds /* 3203ad33b24SLee Schermerhorn * At what user virtual address is page expected in @vma? 3213ad33b24SLee Schermerhorn * Returns virtual address or -EFAULT if page's index/offset is not 3223ad33b24SLee Schermerhorn * within the range mapped the @vma. 3231da177e4SLinus Torvalds */ 3241da177e4SLinus Torvalds static inline unsigned long 3251da177e4SLinus Torvalds vma_address(struct page *page, struct vm_area_struct *vma) 3261da177e4SLinus Torvalds { 3271da177e4SLinus Torvalds pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 3281da177e4SLinus Torvalds unsigned long address; 3291da177e4SLinus Torvalds 330*0fe6e20bSNaoya Horiguchi if (unlikely(is_vm_hugetlb_page(vma))) 331*0fe6e20bSNaoya Horiguchi pgoff = page->index << huge_page_order(page_hstate(page)); 3321da177e4SLinus Torvalds address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 3331da177e4SLinus Torvalds if (unlikely(address < vma->vm_start || address >= vma->vm_end)) { 3343ad33b24SLee Schermerhorn /* page should be within @vma mapping range */ 3351da177e4SLinus Torvalds return -EFAULT; 3361da177e4SLinus Torvalds } 3371da177e4SLinus Torvalds return address; 3381da177e4SLinus Torvalds } 3391da177e4SLinus Torvalds 3401da177e4SLinus Torvalds /* 341bf89c8c8SHuang Shijie * At what user virtual address is page expected in vma? 342ab941e0fSNaoya Horiguchi * Caller should check the page is actually part of the vma. 3431da177e4SLinus Torvalds */ 3441da177e4SLinus Torvalds unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 3451da177e4SLinus Torvalds { 346ab941e0fSNaoya Horiguchi if (PageAnon(page)) 347ab941e0fSNaoya Horiguchi ; 348ab941e0fSNaoya Horiguchi else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { 349ee498ed7SHugh Dickins if (!vma->vm_file || 350ee498ed7SHugh Dickins vma->vm_file->f_mapping != page->mapping) 3511da177e4SLinus Torvalds return -EFAULT; 3521da177e4SLinus Torvalds } else 3531da177e4SLinus Torvalds return -EFAULT; 3541da177e4SLinus Torvalds return vma_address(page, vma); 3551da177e4SLinus Torvalds } 3561da177e4SLinus Torvalds 3571da177e4SLinus Torvalds /* 35881b4082dSNikita Danilov * Check that @page is mapped at @address into @mm. 35981b4082dSNikita Danilov * 360479db0bfSNick Piggin * If @sync is false, page_check_address may perform a racy check to avoid 361479db0bfSNick Piggin * the page table lock when the pte is not present (helpful when reclaiming 362479db0bfSNick Piggin * highly shared pages). 363479db0bfSNick Piggin * 364b8072f09SHugh Dickins * On success returns with pte mapped and locked. 36581b4082dSNikita Danilov */ 366ceffc078SCarsten Otte pte_t *page_check_address(struct page *page, struct mm_struct *mm, 367479db0bfSNick Piggin unsigned long address, spinlock_t **ptlp, int sync) 36881b4082dSNikita Danilov { 36981b4082dSNikita Danilov pgd_t *pgd; 37081b4082dSNikita Danilov pud_t *pud; 37181b4082dSNikita Danilov pmd_t *pmd; 37281b4082dSNikita Danilov pte_t *pte; 373c0718806SHugh Dickins spinlock_t *ptl; 37481b4082dSNikita Danilov 375*0fe6e20bSNaoya Horiguchi if (unlikely(PageHuge(page))) { 376*0fe6e20bSNaoya Horiguchi pte = huge_pte_offset(mm, address); 377*0fe6e20bSNaoya Horiguchi ptl = &mm->page_table_lock; 378*0fe6e20bSNaoya Horiguchi goto check; 379*0fe6e20bSNaoya Horiguchi } 380*0fe6e20bSNaoya Horiguchi 38181b4082dSNikita Danilov pgd = pgd_offset(mm, address); 382c0718806SHugh Dickins if (!pgd_present(*pgd)) 383c0718806SHugh Dickins return NULL; 384c0718806SHugh Dickins 38581b4082dSNikita Danilov pud = pud_offset(pgd, address); 386c0718806SHugh Dickins if (!pud_present(*pud)) 387c0718806SHugh Dickins return NULL; 388c0718806SHugh Dickins 38981b4082dSNikita Danilov pmd = pmd_offset(pud, address); 390c0718806SHugh Dickins if (!pmd_present(*pmd)) 391c0718806SHugh Dickins return NULL; 392c0718806SHugh Dickins 39381b4082dSNikita Danilov pte = pte_offset_map(pmd, address); 394c0718806SHugh Dickins /* Make a quick check before getting the lock */ 395479db0bfSNick Piggin if (!sync && !pte_present(*pte)) { 39681b4082dSNikita Danilov pte_unmap(pte); 397c0718806SHugh Dickins return NULL; 39881b4082dSNikita Danilov } 399c0718806SHugh Dickins 4004c21e2f2SHugh Dickins ptl = pte_lockptr(mm, pmd); 401*0fe6e20bSNaoya Horiguchi check: 402c0718806SHugh Dickins spin_lock(ptl); 403c0718806SHugh Dickins if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) { 404c0718806SHugh Dickins *ptlp = ptl; 405c0718806SHugh Dickins return pte; 40681b4082dSNikita Danilov } 407c0718806SHugh Dickins pte_unmap_unlock(pte, ptl); 408c0718806SHugh Dickins return NULL; 40981b4082dSNikita Danilov } 41081b4082dSNikita Danilov 411b291f000SNick Piggin /** 412b291f000SNick Piggin * page_mapped_in_vma - check whether a page is really mapped in a VMA 413b291f000SNick Piggin * @page: the page to test 414b291f000SNick Piggin * @vma: the VMA to test 415b291f000SNick Piggin * 416b291f000SNick Piggin * Returns 1 if the page is mapped into the page tables of the VMA, 0 417b291f000SNick Piggin * if the page is not mapped into the page tables of this VMA. Only 418b291f000SNick Piggin * valid for normal file or anonymous VMAs. 419b291f000SNick Piggin */ 4206a46079cSAndi Kleen int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) 421b291f000SNick Piggin { 422b291f000SNick Piggin unsigned long address; 423b291f000SNick Piggin pte_t *pte; 424b291f000SNick Piggin spinlock_t *ptl; 425b291f000SNick Piggin 426b291f000SNick Piggin address = vma_address(page, vma); 427b291f000SNick Piggin if (address == -EFAULT) /* out of vma range */ 428b291f000SNick Piggin return 0; 429b291f000SNick Piggin pte = page_check_address(page, vma->vm_mm, address, &ptl, 1); 430b291f000SNick Piggin if (!pte) /* the page is not in this mm */ 431b291f000SNick Piggin return 0; 432b291f000SNick Piggin pte_unmap_unlock(pte, ptl); 433b291f000SNick Piggin 434b291f000SNick Piggin return 1; 435b291f000SNick Piggin } 436b291f000SNick Piggin 43781b4082dSNikita Danilov /* 4381da177e4SLinus Torvalds * Subfunctions of page_referenced: page_referenced_one called 4391da177e4SLinus Torvalds * repeatedly from either page_referenced_anon or page_referenced_file. 4401da177e4SLinus Torvalds */ 4415ad64688SHugh Dickins int page_referenced_one(struct page *page, struct vm_area_struct *vma, 4421cb1729bSHugh Dickins unsigned long address, unsigned int *mapcount, 4436fe6b7e3SWu Fengguang unsigned long *vm_flags) 4441da177e4SLinus Torvalds { 4451da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 4461da177e4SLinus Torvalds pte_t *pte; 447c0718806SHugh Dickins spinlock_t *ptl; 4481da177e4SLinus Torvalds int referenced = 0; 4491da177e4SLinus Torvalds 450479db0bfSNick Piggin pte = page_check_address(page, mm, address, &ptl, 0); 451c0718806SHugh Dickins if (!pte) 452c0718806SHugh Dickins goto out; 453c0718806SHugh Dickins 454b291f000SNick Piggin /* 455b291f000SNick Piggin * Don't want to elevate referenced for mlocked page that gets this far, 456b291f000SNick Piggin * in order that it progresses to try_to_unmap and is moved to the 457b291f000SNick Piggin * unevictable list. 458b291f000SNick Piggin */ 4595a9bbdcdSHugh Dickins if (vma->vm_flags & VM_LOCKED) { 4605a9bbdcdSHugh Dickins *mapcount = 1; /* break early from loop */ 46103ef83afSMinchan Kim *vm_flags |= VM_LOCKED; 462b291f000SNick Piggin goto out_unmap; 463b291f000SNick Piggin } 464b291f000SNick Piggin 4654917e5d0SJohannes Weiner if (ptep_clear_flush_young_notify(vma, address, pte)) { 4664917e5d0SJohannes Weiner /* 4674917e5d0SJohannes Weiner * Don't treat a reference through a sequentially read 4684917e5d0SJohannes Weiner * mapping as such. If the page has been used in 4694917e5d0SJohannes Weiner * another mapping, we will catch it; if this other 4704917e5d0SJohannes Weiner * mapping is already gone, the unmap path will have 4714917e5d0SJohannes Weiner * set PG_referenced or activated the page. 4724917e5d0SJohannes Weiner */ 4734917e5d0SJohannes Weiner if (likely(!VM_SequentialReadHint(vma))) 4741da177e4SLinus Torvalds referenced++; 4754917e5d0SJohannes Weiner } 4761da177e4SLinus Torvalds 477fcdae29aSRik Van Riel /* Pretend the page is referenced if the task has the 478fcdae29aSRik Van Riel swap token and is in the middle of a page fault. */ 479f7b7fd8fSRik van Riel if (mm != current->mm && has_swap_token(mm) && 480fcdae29aSRik Van Riel rwsem_is_locked(&mm->mmap_sem)) 4811da177e4SLinus Torvalds referenced++; 4821da177e4SLinus Torvalds 483b291f000SNick Piggin out_unmap: 4841da177e4SLinus Torvalds (*mapcount)--; 485c0718806SHugh Dickins pte_unmap_unlock(pte, ptl); 486273f047eSHuang Shijie 4876fe6b7e3SWu Fengguang if (referenced) 4886fe6b7e3SWu Fengguang *vm_flags |= vma->vm_flags; 489273f047eSHuang Shijie out: 4901da177e4SLinus Torvalds return referenced; 4911da177e4SLinus Torvalds } 4921da177e4SLinus Torvalds 493bed7161aSBalbir Singh static int page_referenced_anon(struct page *page, 4946fe6b7e3SWu Fengguang struct mem_cgroup *mem_cont, 4956fe6b7e3SWu Fengguang unsigned long *vm_flags) 4961da177e4SLinus Torvalds { 4971da177e4SLinus Torvalds unsigned int mapcount; 4981da177e4SLinus Torvalds struct anon_vma *anon_vma; 4995beb4930SRik van Riel struct anon_vma_chain *avc; 5001da177e4SLinus Torvalds int referenced = 0; 5011da177e4SLinus Torvalds 5021da177e4SLinus Torvalds anon_vma = page_lock_anon_vma(page); 5031da177e4SLinus Torvalds if (!anon_vma) 5041da177e4SLinus Torvalds return referenced; 5051da177e4SLinus Torvalds 5061da177e4SLinus Torvalds mapcount = page_mapcount(page); 5075beb4930SRik van Riel list_for_each_entry(avc, &anon_vma->head, same_anon_vma) { 5085beb4930SRik van Riel struct vm_area_struct *vma = avc->vma; 5091cb1729bSHugh Dickins unsigned long address = vma_address(page, vma); 5101cb1729bSHugh Dickins if (address == -EFAULT) 5111cb1729bSHugh Dickins continue; 512bed7161aSBalbir Singh /* 513bed7161aSBalbir Singh * If we are reclaiming on behalf of a cgroup, skip 514bed7161aSBalbir Singh * counting on behalf of references from different 515bed7161aSBalbir Singh * cgroups 516bed7161aSBalbir Singh */ 517bd845e38SHugh Dickins if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont)) 518bed7161aSBalbir Singh continue; 5191cb1729bSHugh Dickins referenced += page_referenced_one(page, vma, address, 5206fe6b7e3SWu Fengguang &mapcount, vm_flags); 5211da177e4SLinus Torvalds if (!mapcount) 5221da177e4SLinus Torvalds break; 5231da177e4SLinus Torvalds } 52434bbd704SOleg Nesterov 52534bbd704SOleg Nesterov page_unlock_anon_vma(anon_vma); 5261da177e4SLinus Torvalds return referenced; 5271da177e4SLinus Torvalds } 5281da177e4SLinus Torvalds 5291da177e4SLinus Torvalds /** 5301da177e4SLinus Torvalds * page_referenced_file - referenced check for object-based rmap 5311da177e4SLinus Torvalds * @page: the page we're checking references on. 53243d8eac4SRandy Dunlap * @mem_cont: target memory controller 5336fe6b7e3SWu Fengguang * @vm_flags: collect encountered vma->vm_flags who actually referenced the page 5341da177e4SLinus Torvalds * 5351da177e4SLinus Torvalds * For an object-based mapped page, find all the places it is mapped and 5361da177e4SLinus Torvalds * check/clear the referenced flag. This is done by following the page->mapping 5371da177e4SLinus Torvalds * pointer, then walking the chain of vmas it holds. It returns the number 5381da177e4SLinus Torvalds * of references it found. 5391da177e4SLinus Torvalds * 5401da177e4SLinus Torvalds * This function is only called from page_referenced for object-based pages. 5411da177e4SLinus Torvalds */ 542bed7161aSBalbir Singh static int page_referenced_file(struct page *page, 5436fe6b7e3SWu Fengguang struct mem_cgroup *mem_cont, 5446fe6b7e3SWu Fengguang unsigned long *vm_flags) 5451da177e4SLinus Torvalds { 5461da177e4SLinus Torvalds unsigned int mapcount; 5471da177e4SLinus Torvalds struct address_space *mapping = page->mapping; 5481da177e4SLinus Torvalds pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 5491da177e4SLinus Torvalds struct vm_area_struct *vma; 5501da177e4SLinus Torvalds struct prio_tree_iter iter; 5511da177e4SLinus Torvalds int referenced = 0; 5521da177e4SLinus Torvalds 5531da177e4SLinus Torvalds /* 5541da177e4SLinus Torvalds * The caller's checks on page->mapping and !PageAnon have made 5551da177e4SLinus Torvalds * sure that this is a file page: the check for page->mapping 5561da177e4SLinus Torvalds * excludes the case just before it gets set on an anon page. 5571da177e4SLinus Torvalds */ 5581da177e4SLinus Torvalds BUG_ON(PageAnon(page)); 5591da177e4SLinus Torvalds 5601da177e4SLinus Torvalds /* 5611da177e4SLinus Torvalds * The page lock not only makes sure that page->mapping cannot 5621da177e4SLinus Torvalds * suddenly be NULLified by truncation, it makes sure that the 5631da177e4SLinus Torvalds * structure at mapping cannot be freed and reused yet, 5641da177e4SLinus Torvalds * so we can safely take mapping->i_mmap_lock. 5651da177e4SLinus Torvalds */ 5661da177e4SLinus Torvalds BUG_ON(!PageLocked(page)); 5671da177e4SLinus Torvalds 5681da177e4SLinus Torvalds spin_lock(&mapping->i_mmap_lock); 5691da177e4SLinus Torvalds 5701da177e4SLinus Torvalds /* 5711da177e4SLinus Torvalds * i_mmap_lock does not stabilize mapcount at all, but mapcount 5721da177e4SLinus Torvalds * is more likely to be accurate if we note it after spinning. 5731da177e4SLinus Torvalds */ 5741da177e4SLinus Torvalds mapcount = page_mapcount(page); 5751da177e4SLinus Torvalds 5761da177e4SLinus Torvalds vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 5771cb1729bSHugh Dickins unsigned long address = vma_address(page, vma); 5781cb1729bSHugh Dickins if (address == -EFAULT) 5791cb1729bSHugh Dickins continue; 580bed7161aSBalbir Singh /* 581bed7161aSBalbir Singh * If we are reclaiming on behalf of a cgroup, skip 582bed7161aSBalbir Singh * counting on behalf of references from different 583bed7161aSBalbir Singh * cgroups 584bed7161aSBalbir Singh */ 585bd845e38SHugh Dickins if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont)) 586bed7161aSBalbir Singh continue; 5871cb1729bSHugh Dickins referenced += page_referenced_one(page, vma, address, 5886fe6b7e3SWu Fengguang &mapcount, vm_flags); 5891da177e4SLinus Torvalds if (!mapcount) 5901da177e4SLinus Torvalds break; 5911da177e4SLinus Torvalds } 5921da177e4SLinus Torvalds 5931da177e4SLinus Torvalds spin_unlock(&mapping->i_mmap_lock); 5941da177e4SLinus Torvalds return referenced; 5951da177e4SLinus Torvalds } 5961da177e4SLinus Torvalds 5971da177e4SLinus Torvalds /** 5981da177e4SLinus Torvalds * page_referenced - test if the page was referenced 5991da177e4SLinus Torvalds * @page: the page to test 6001da177e4SLinus Torvalds * @is_locked: caller holds lock on the page 60143d8eac4SRandy Dunlap * @mem_cont: target memory controller 6026fe6b7e3SWu Fengguang * @vm_flags: collect encountered vma->vm_flags who actually referenced the page 6031da177e4SLinus Torvalds * 6041da177e4SLinus Torvalds * Quick test_and_clear_referenced for all mappings to a page, 6051da177e4SLinus Torvalds * returns the number of ptes which referenced the page. 6061da177e4SLinus Torvalds */ 6076fe6b7e3SWu Fengguang int page_referenced(struct page *page, 6086fe6b7e3SWu Fengguang int is_locked, 6096fe6b7e3SWu Fengguang struct mem_cgroup *mem_cont, 6106fe6b7e3SWu Fengguang unsigned long *vm_flags) 6111da177e4SLinus Torvalds { 6121da177e4SLinus Torvalds int referenced = 0; 6135ad64688SHugh Dickins int we_locked = 0; 6141da177e4SLinus Torvalds 6156fe6b7e3SWu Fengguang *vm_flags = 0; 6163ca7b3c5SHugh Dickins if (page_mapped(page) && page_rmapping(page)) { 6175ad64688SHugh Dickins if (!is_locked && (!PageAnon(page) || PageKsm(page))) { 6185ad64688SHugh Dickins we_locked = trylock_page(page); 6195ad64688SHugh Dickins if (!we_locked) { 6205ad64688SHugh Dickins referenced++; 6215ad64688SHugh Dickins goto out; 6225ad64688SHugh Dickins } 6235ad64688SHugh Dickins } 6245ad64688SHugh Dickins if (unlikely(PageKsm(page))) 6255ad64688SHugh Dickins referenced += page_referenced_ksm(page, mem_cont, 6265ad64688SHugh Dickins vm_flags); 6275ad64688SHugh Dickins else if (PageAnon(page)) 6286fe6b7e3SWu Fengguang referenced += page_referenced_anon(page, mem_cont, 6296fe6b7e3SWu Fengguang vm_flags); 6305ad64688SHugh Dickins else if (page->mapping) 6316fe6b7e3SWu Fengguang referenced += page_referenced_file(page, mem_cont, 6326fe6b7e3SWu Fengguang vm_flags); 6335ad64688SHugh Dickins if (we_locked) 6341da177e4SLinus Torvalds unlock_page(page); 6351da177e4SLinus Torvalds } 6365ad64688SHugh Dickins out: 6375b7baf05SChristian Borntraeger if (page_test_and_clear_young(page)) 6385b7baf05SChristian Borntraeger referenced++; 6395b7baf05SChristian Borntraeger 6401da177e4SLinus Torvalds return referenced; 6411da177e4SLinus Torvalds } 6421da177e4SLinus Torvalds 6431cb1729bSHugh Dickins static int page_mkclean_one(struct page *page, struct vm_area_struct *vma, 6441cb1729bSHugh Dickins unsigned long address) 645d08b3851SPeter Zijlstra { 646d08b3851SPeter Zijlstra struct mm_struct *mm = vma->vm_mm; 647c2fda5feSPeter Zijlstra pte_t *pte; 648d08b3851SPeter Zijlstra spinlock_t *ptl; 649d08b3851SPeter Zijlstra int ret = 0; 650d08b3851SPeter Zijlstra 651479db0bfSNick Piggin pte = page_check_address(page, mm, address, &ptl, 1); 652d08b3851SPeter Zijlstra if (!pte) 653d08b3851SPeter Zijlstra goto out; 654d08b3851SPeter Zijlstra 655c2fda5feSPeter Zijlstra if (pte_dirty(*pte) || pte_write(*pte)) { 656c2fda5feSPeter Zijlstra pte_t entry; 657d08b3851SPeter Zijlstra 658c2fda5feSPeter Zijlstra flush_cache_page(vma, address, pte_pfn(*pte)); 659cddb8a5cSAndrea Arcangeli entry = ptep_clear_flush_notify(vma, address, pte); 660d08b3851SPeter Zijlstra entry = pte_wrprotect(entry); 661c2fda5feSPeter Zijlstra entry = pte_mkclean(entry); 662d6e88e67SAl Viro set_pte_at(mm, address, pte, entry); 663d08b3851SPeter Zijlstra ret = 1; 664c2fda5feSPeter Zijlstra } 665d08b3851SPeter Zijlstra 666d08b3851SPeter Zijlstra pte_unmap_unlock(pte, ptl); 667d08b3851SPeter Zijlstra out: 668d08b3851SPeter Zijlstra return ret; 669d08b3851SPeter Zijlstra } 670d08b3851SPeter Zijlstra 671d08b3851SPeter Zijlstra static int page_mkclean_file(struct address_space *mapping, struct page *page) 672d08b3851SPeter Zijlstra { 673d08b3851SPeter Zijlstra pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 674d08b3851SPeter Zijlstra struct vm_area_struct *vma; 675d08b3851SPeter Zijlstra struct prio_tree_iter iter; 676d08b3851SPeter Zijlstra int ret = 0; 677d08b3851SPeter Zijlstra 678d08b3851SPeter Zijlstra BUG_ON(PageAnon(page)); 679d08b3851SPeter Zijlstra 680d08b3851SPeter Zijlstra spin_lock(&mapping->i_mmap_lock); 681d08b3851SPeter Zijlstra vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 6821cb1729bSHugh Dickins if (vma->vm_flags & VM_SHARED) { 6831cb1729bSHugh Dickins unsigned long address = vma_address(page, vma); 6841cb1729bSHugh Dickins if (address == -EFAULT) 6851cb1729bSHugh Dickins continue; 6861cb1729bSHugh Dickins ret += page_mkclean_one(page, vma, address); 6871cb1729bSHugh Dickins } 688d08b3851SPeter Zijlstra } 689d08b3851SPeter Zijlstra spin_unlock(&mapping->i_mmap_lock); 690d08b3851SPeter Zijlstra return ret; 691d08b3851SPeter Zijlstra } 692d08b3851SPeter Zijlstra 693d08b3851SPeter Zijlstra int page_mkclean(struct page *page) 694d08b3851SPeter Zijlstra { 695d08b3851SPeter Zijlstra int ret = 0; 696d08b3851SPeter Zijlstra 697d08b3851SPeter Zijlstra BUG_ON(!PageLocked(page)); 698d08b3851SPeter Zijlstra 699d08b3851SPeter Zijlstra if (page_mapped(page)) { 700d08b3851SPeter Zijlstra struct address_space *mapping = page_mapping(page); 701ce7e9faeSChristian Borntraeger if (mapping) { 702d08b3851SPeter Zijlstra ret = page_mkclean_file(mapping, page); 7036c210482SMartin Schwidefsky if (page_test_dirty(page)) { 7046c210482SMartin Schwidefsky page_clear_dirty(page); 705c2fda5feSPeter Zijlstra ret = 1; 7066e1beb3cSMartin Schwidefsky } 7076c210482SMartin Schwidefsky } 708ce7e9faeSChristian Borntraeger } 709d08b3851SPeter Zijlstra 710d08b3851SPeter Zijlstra return ret; 711d08b3851SPeter Zijlstra } 71260b59beaSJaya Kumar EXPORT_SYMBOL_GPL(page_mkclean); 713d08b3851SPeter Zijlstra 7141da177e4SLinus Torvalds /** 715c44b6743SRik van Riel * page_move_anon_rmap - move a page to our anon_vma 716c44b6743SRik van Riel * @page: the page to move to our anon_vma 717c44b6743SRik van Riel * @vma: the vma the page belongs to 718c44b6743SRik van Riel * @address: the user virtual address mapped 719c44b6743SRik van Riel * 720c44b6743SRik van Riel * When a page belongs exclusively to one process after a COW event, 721c44b6743SRik van Riel * that page can be moved into the anon_vma that belongs to just that 722c44b6743SRik van Riel * process, so the rmap code will not search the parent or sibling 723c44b6743SRik van Riel * processes. 724c44b6743SRik van Riel */ 725c44b6743SRik van Riel void page_move_anon_rmap(struct page *page, 726c44b6743SRik van Riel struct vm_area_struct *vma, unsigned long address) 727c44b6743SRik van Riel { 728c44b6743SRik van Riel struct anon_vma *anon_vma = vma->anon_vma; 729c44b6743SRik van Riel 730c44b6743SRik van Riel VM_BUG_ON(!PageLocked(page)); 731c44b6743SRik van Riel VM_BUG_ON(!anon_vma); 732c44b6743SRik van Riel VM_BUG_ON(page->index != linear_page_index(vma, address)); 733c44b6743SRik van Riel 734c44b6743SRik van Riel anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 735c44b6743SRik van Riel page->mapping = (struct address_space *) anon_vma; 736c44b6743SRik van Riel } 737c44b6743SRik van Riel 738c44b6743SRik van Riel /** 73943d8eac4SRandy Dunlap * __page_set_anon_rmap - setup new anonymous rmap 7401da177e4SLinus Torvalds * @page: the page to add the mapping to 7411da177e4SLinus Torvalds * @vma: the vm area in which the mapping is added 7421da177e4SLinus Torvalds * @address: the user virtual address mapped 743e8a03febSRik van Riel * @exclusive: the page is exclusively owned by the current process 7441da177e4SLinus Torvalds */ 7459617d95eSNick Piggin static void __page_set_anon_rmap(struct page *page, 746e8a03febSRik van Riel struct vm_area_struct *vma, unsigned long address, int exclusive) 7471da177e4SLinus Torvalds { 748e8a03febSRik van Riel struct anon_vma *anon_vma = vma->anon_vma; 7492822c1aaSNick Piggin 750e8a03febSRik van Riel BUG_ON(!anon_vma); 751ea90002bSLinus Torvalds 752ea90002bSLinus Torvalds /* 753e8a03febSRik van Riel * If the page isn't exclusively mapped into this vma, 754e8a03febSRik van Riel * we must use the _oldest_ possible anon_vma for the 755e8a03febSRik van Riel * page mapping! 756ea90002bSLinus Torvalds * 757e8a03febSRik van Riel * So take the last AVC chain entry in the vma, which is 758e8a03febSRik van Riel * the deepest ancestor, and use the anon_vma from that. 759ea90002bSLinus Torvalds */ 760e8a03febSRik van Riel if (!exclusive) { 761e8a03febSRik van Riel struct anon_vma_chain *avc; 762ea90002bSLinus Torvalds avc = list_entry(vma->anon_vma_chain.prev, struct anon_vma_chain, same_vma); 763ea90002bSLinus Torvalds anon_vma = avc->anon_vma; 764e8a03febSRik van Riel } 765ea90002bSLinus Torvalds 7661da177e4SLinus Torvalds anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 7672822c1aaSNick Piggin page->mapping = (struct address_space *) anon_vma; 7684d7670e0SNick Piggin page->index = linear_page_index(vma, address); 7691da177e4SLinus Torvalds } 7709617d95eSNick Piggin 7719617d95eSNick Piggin /** 77243d8eac4SRandy Dunlap * __page_check_anon_rmap - sanity check anonymous rmap addition 773c97a9e10SNick Piggin * @page: the page to add the mapping to 774c97a9e10SNick Piggin * @vma: the vm area in which the mapping is added 775c97a9e10SNick Piggin * @address: the user virtual address mapped 776c97a9e10SNick Piggin */ 777c97a9e10SNick Piggin static void __page_check_anon_rmap(struct page *page, 778c97a9e10SNick Piggin struct vm_area_struct *vma, unsigned long address) 779c97a9e10SNick Piggin { 780c97a9e10SNick Piggin #ifdef CONFIG_DEBUG_VM 781c97a9e10SNick Piggin /* 782c97a9e10SNick Piggin * The page's anon-rmap details (mapping and index) are guaranteed to 783c97a9e10SNick Piggin * be set up correctly at this point. 784c97a9e10SNick Piggin * 785c97a9e10SNick Piggin * We have exclusion against page_add_anon_rmap because the caller 786c97a9e10SNick Piggin * always holds the page locked, except if called from page_dup_rmap, 787c97a9e10SNick Piggin * in which case the page is already known to be setup. 788c97a9e10SNick Piggin * 789c97a9e10SNick Piggin * We have exclusion against page_add_new_anon_rmap because those pages 790c97a9e10SNick Piggin * are initially only visible via the pagetables, and the pte is locked 791c97a9e10SNick Piggin * over the call to page_add_new_anon_rmap. 792c97a9e10SNick Piggin */ 793c97a9e10SNick Piggin BUG_ON(page->index != linear_page_index(vma, address)); 794c97a9e10SNick Piggin #endif 795c97a9e10SNick Piggin } 796c97a9e10SNick Piggin 797c97a9e10SNick Piggin /** 7989617d95eSNick Piggin * page_add_anon_rmap - add pte mapping to an anonymous page 7999617d95eSNick Piggin * @page: the page to add the mapping to 8009617d95eSNick Piggin * @vma: the vm area in which the mapping is added 8019617d95eSNick Piggin * @address: the user virtual address mapped 8029617d95eSNick Piggin * 8035ad64688SHugh Dickins * The caller needs to hold the pte lock, and the page must be locked in 80480e14822SHugh Dickins * the anon_vma case: to serialize mapping,index checking after setting, 80580e14822SHugh Dickins * and to ensure that PageAnon is not being upgraded racily to PageKsm 80680e14822SHugh Dickins * (but PageKsm is never downgraded to PageAnon). 8079617d95eSNick Piggin */ 8089617d95eSNick Piggin void page_add_anon_rmap(struct page *page, 8099617d95eSNick Piggin struct vm_area_struct *vma, unsigned long address) 8109617d95eSNick Piggin { 8115ad64688SHugh Dickins int first = atomic_inc_and_test(&page->_mapcount); 8125ad64688SHugh Dickins if (first) 8135ad64688SHugh Dickins __inc_zone_page_state(page, NR_ANON_PAGES); 8145ad64688SHugh Dickins if (unlikely(PageKsm(page))) 8155ad64688SHugh Dickins return; 8165ad64688SHugh Dickins 817c97a9e10SNick Piggin VM_BUG_ON(!PageLocked(page)); 818c97a9e10SNick Piggin VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); 8195ad64688SHugh Dickins if (first) 820e8a03febSRik van Riel __page_set_anon_rmap(page, vma, address, 0); 82169029cd5SKAMEZAWA Hiroyuki else 822c97a9e10SNick Piggin __page_check_anon_rmap(page, vma, address); 8231da177e4SLinus Torvalds } 8241da177e4SLinus Torvalds 82543d8eac4SRandy Dunlap /** 8269617d95eSNick Piggin * page_add_new_anon_rmap - add pte mapping to a new anonymous page 8279617d95eSNick Piggin * @page: the page to add the mapping to 8289617d95eSNick Piggin * @vma: the vm area in which the mapping is added 8299617d95eSNick Piggin * @address: the user virtual address mapped 8309617d95eSNick Piggin * 8319617d95eSNick Piggin * Same as page_add_anon_rmap but must only be called on *new* pages. 8329617d95eSNick Piggin * This means the inc-and-test can be bypassed. 833c97a9e10SNick Piggin * Page does not have to be locked. 8349617d95eSNick Piggin */ 8359617d95eSNick Piggin void page_add_new_anon_rmap(struct page *page, 8369617d95eSNick Piggin struct vm_area_struct *vma, unsigned long address) 8379617d95eSNick Piggin { 838b5934c53SHugh Dickins VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); 839cbf84b7aSHugh Dickins SetPageSwapBacked(page); 840cbf84b7aSHugh Dickins atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ 8415ad64688SHugh Dickins __inc_zone_page_state(page, NR_ANON_PAGES); 842e8a03febSRik van Riel __page_set_anon_rmap(page, vma, address, 1); 843b5934c53SHugh Dickins if (page_evictable(page, vma)) 844cbf84b7aSHugh Dickins lru_cache_add_lru(page, LRU_ACTIVE_ANON); 845b5934c53SHugh Dickins else 846b5934c53SHugh Dickins add_page_to_unevictable_list(page); 8479617d95eSNick Piggin } 8489617d95eSNick Piggin 8491da177e4SLinus Torvalds /** 8501da177e4SLinus Torvalds * page_add_file_rmap - add pte mapping to a file page 8511da177e4SLinus Torvalds * @page: the page to add the mapping to 8521da177e4SLinus Torvalds * 853b8072f09SHugh Dickins * The caller needs to hold the pte lock. 8541da177e4SLinus Torvalds */ 8551da177e4SLinus Torvalds void page_add_file_rmap(struct page *page) 8561da177e4SLinus Torvalds { 857d69b042fSBalbir Singh if (atomic_inc_and_test(&page->_mapcount)) { 85865ba55f5SChristoph Lameter __inc_zone_page_state(page, NR_FILE_MAPPED); 859d8046582SKAMEZAWA Hiroyuki mem_cgroup_update_file_mapped(page, 1); 860d69b042fSBalbir Singh } 8611da177e4SLinus Torvalds } 8621da177e4SLinus Torvalds 8631da177e4SLinus Torvalds /** 8641da177e4SLinus Torvalds * page_remove_rmap - take down pte mapping from a page 8651da177e4SLinus Torvalds * @page: page to remove mapping from 8661da177e4SLinus Torvalds * 867b8072f09SHugh Dickins * The caller needs to hold the pte lock. 8681da177e4SLinus Torvalds */ 869edc315fdSHugh Dickins void page_remove_rmap(struct page *page) 8701da177e4SLinus Torvalds { 871b904dcfeSKOSAKI Motohiro /* page still mapped by someone else? */ 872b904dcfeSKOSAKI Motohiro if (!atomic_add_negative(-1, &page->_mapcount)) 873b904dcfeSKOSAKI Motohiro return; 874b904dcfeSKOSAKI Motohiro 8751da177e4SLinus Torvalds /* 87616f8c5b2SHugh Dickins * Now that the last pte has gone, s390 must transfer dirty 87716f8c5b2SHugh Dickins * flag from storage key to struct page. We can usually skip 87816f8c5b2SHugh Dickins * this if the page is anon, so about to be freed; but perhaps 87916f8c5b2SHugh Dickins * not if it's in swapcache - there might be another pte slot 88016f8c5b2SHugh Dickins * containing the swap entry, but page not yet written to swap. 88116f8c5b2SHugh Dickins */ 882b904dcfeSKOSAKI Motohiro if ((!PageAnon(page) || PageSwapCache(page)) && page_test_dirty(page)) { 88316f8c5b2SHugh Dickins page_clear_dirty(page); 88416f8c5b2SHugh Dickins set_page_dirty(page); 88516f8c5b2SHugh Dickins } 886*0fe6e20bSNaoya Horiguchi /* 887*0fe6e20bSNaoya Horiguchi * Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED 888*0fe6e20bSNaoya Horiguchi * and not charged by memcg for now. 889*0fe6e20bSNaoya Horiguchi */ 890*0fe6e20bSNaoya Horiguchi if (unlikely(PageHuge(page))) 891*0fe6e20bSNaoya Horiguchi return; 892b904dcfeSKOSAKI Motohiro if (PageAnon(page)) { 89316f8c5b2SHugh Dickins mem_cgroup_uncharge_page(page); 894b904dcfeSKOSAKI Motohiro __dec_zone_page_state(page, NR_ANON_PAGES); 895b904dcfeSKOSAKI Motohiro } else { 896b904dcfeSKOSAKI Motohiro __dec_zone_page_state(page, NR_FILE_MAPPED); 897d8046582SKAMEZAWA Hiroyuki mem_cgroup_update_file_mapped(page, -1); 898b904dcfeSKOSAKI Motohiro } 89916f8c5b2SHugh Dickins /* 9001da177e4SLinus Torvalds * It would be tidy to reset the PageAnon mapping here, 9011da177e4SLinus Torvalds * but that might overwrite a racing page_add_anon_rmap 9021da177e4SLinus Torvalds * which increments mapcount after us but sets mapping 9031da177e4SLinus Torvalds * before us: so leave the reset to free_hot_cold_page, 9041da177e4SLinus Torvalds * and remember that it's only reliable while mapped. 9051da177e4SLinus Torvalds * Leaving it set also helps swapoff to reinstate ptes 9061da177e4SLinus Torvalds * faster for those pages still in swapcache. 9071da177e4SLinus Torvalds */ 9081da177e4SLinus Torvalds } 9091da177e4SLinus Torvalds 9101da177e4SLinus Torvalds /* 9111da177e4SLinus Torvalds * Subfunctions of try_to_unmap: try_to_unmap_one called 9121da177e4SLinus Torvalds * repeatedly from either try_to_unmap_anon or try_to_unmap_file. 9131da177e4SLinus Torvalds */ 9145ad64688SHugh Dickins int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, 9151cb1729bSHugh Dickins unsigned long address, enum ttu_flags flags) 9161da177e4SLinus Torvalds { 9171da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 9181da177e4SLinus Torvalds pte_t *pte; 9191da177e4SLinus Torvalds pte_t pteval; 920c0718806SHugh Dickins spinlock_t *ptl; 9211da177e4SLinus Torvalds int ret = SWAP_AGAIN; 9221da177e4SLinus Torvalds 923479db0bfSNick Piggin pte = page_check_address(page, mm, address, &ptl, 0); 924c0718806SHugh Dickins if (!pte) 92581b4082dSNikita Danilov goto out; 9261da177e4SLinus Torvalds 9271da177e4SLinus Torvalds /* 9281da177e4SLinus Torvalds * If the page is mlock()d, we cannot swap it out. 9291da177e4SLinus Torvalds * If it's recently referenced (perhaps page_referenced 9301da177e4SLinus Torvalds * skipped over this mm) then we should reactivate it. 9311da177e4SLinus Torvalds */ 93214fa31b8SAndi Kleen if (!(flags & TTU_IGNORE_MLOCK)) { 933caed0f48SKOSAKI Motohiro if (vma->vm_flags & VM_LOCKED) 934caed0f48SKOSAKI Motohiro goto out_mlock; 935caed0f48SKOSAKI Motohiro 936af8e3354SHugh Dickins if (TTU_ACTION(flags) == TTU_MUNLOCK) 93753f79acbSHugh Dickins goto out_unmap; 93814fa31b8SAndi Kleen } 93914fa31b8SAndi Kleen if (!(flags & TTU_IGNORE_ACCESS)) { 940b291f000SNick Piggin if (ptep_clear_flush_young_notify(vma, address, pte)) { 9411da177e4SLinus Torvalds ret = SWAP_FAIL; 9421da177e4SLinus Torvalds goto out_unmap; 9431da177e4SLinus Torvalds } 944b291f000SNick Piggin } 9451da177e4SLinus Torvalds 9461da177e4SLinus Torvalds /* Nuke the page table entry. */ 9471da177e4SLinus Torvalds flush_cache_page(vma, address, page_to_pfn(page)); 948cddb8a5cSAndrea Arcangeli pteval = ptep_clear_flush_notify(vma, address, pte); 9491da177e4SLinus Torvalds 9501da177e4SLinus Torvalds /* Move the dirty bit to the physical page now the pte is gone. */ 9511da177e4SLinus Torvalds if (pte_dirty(pteval)) 9521da177e4SLinus Torvalds set_page_dirty(page); 9531da177e4SLinus Torvalds 954365e9c87SHugh Dickins /* Update high watermark before we lower rss */ 955365e9c87SHugh Dickins update_hiwater_rss(mm); 956365e9c87SHugh Dickins 957888b9f7cSAndi Kleen if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) { 958888b9f7cSAndi Kleen if (PageAnon(page)) 959d559db08SKAMEZAWA Hiroyuki dec_mm_counter(mm, MM_ANONPAGES); 960888b9f7cSAndi Kleen else 961d559db08SKAMEZAWA Hiroyuki dec_mm_counter(mm, MM_FILEPAGES); 962888b9f7cSAndi Kleen set_pte_at(mm, address, pte, 963888b9f7cSAndi Kleen swp_entry_to_pte(make_hwpoison_entry(page))); 964888b9f7cSAndi Kleen } else if (PageAnon(page)) { 9654c21e2f2SHugh Dickins swp_entry_t entry = { .val = page_private(page) }; 9660697212aSChristoph Lameter 9670697212aSChristoph Lameter if (PageSwapCache(page)) { 9681da177e4SLinus Torvalds /* 9691da177e4SLinus Torvalds * Store the swap location in the pte. 9701da177e4SLinus Torvalds * See handle_pte_fault() ... 9711da177e4SLinus Torvalds */ 972570a335bSHugh Dickins if (swap_duplicate(entry) < 0) { 973570a335bSHugh Dickins set_pte_at(mm, address, pte, pteval); 974570a335bSHugh Dickins ret = SWAP_FAIL; 975570a335bSHugh Dickins goto out_unmap; 976570a335bSHugh Dickins } 9771da177e4SLinus Torvalds if (list_empty(&mm->mmlist)) { 9781da177e4SLinus Torvalds spin_lock(&mmlist_lock); 979f412ac08SHugh Dickins if (list_empty(&mm->mmlist)) 9801da177e4SLinus Torvalds list_add(&mm->mmlist, &init_mm.mmlist); 9811da177e4SLinus Torvalds spin_unlock(&mmlist_lock); 9821da177e4SLinus Torvalds } 983d559db08SKAMEZAWA Hiroyuki dec_mm_counter(mm, MM_ANONPAGES); 984b084d435SKAMEZAWA Hiroyuki inc_mm_counter(mm, MM_SWAPENTS); 98564cdd548SKOSAKI Motohiro } else if (PAGE_MIGRATION) { 9860697212aSChristoph Lameter /* 9870697212aSChristoph Lameter * Store the pfn of the page in a special migration 9880697212aSChristoph Lameter * pte. do_swap_page() will wait until the migration 9890697212aSChristoph Lameter * pte is removed and then restart fault handling. 9900697212aSChristoph Lameter */ 99114fa31b8SAndi Kleen BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION); 9920697212aSChristoph Lameter entry = make_migration_entry(page, pte_write(pteval)); 9930697212aSChristoph Lameter } 9941da177e4SLinus Torvalds set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); 9951da177e4SLinus Torvalds BUG_ON(pte_file(*pte)); 99614fa31b8SAndi Kleen } else if (PAGE_MIGRATION && (TTU_ACTION(flags) == TTU_MIGRATION)) { 99704e62a29SChristoph Lameter /* Establish migration entry for a file page */ 99804e62a29SChristoph Lameter swp_entry_t entry; 99904e62a29SChristoph Lameter entry = make_migration_entry(page, pte_write(pteval)); 100004e62a29SChristoph Lameter set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); 100104e62a29SChristoph Lameter } else 1002d559db08SKAMEZAWA Hiroyuki dec_mm_counter(mm, MM_FILEPAGES); 10031da177e4SLinus Torvalds 1004edc315fdSHugh Dickins page_remove_rmap(page); 10051da177e4SLinus Torvalds page_cache_release(page); 10061da177e4SLinus Torvalds 10071da177e4SLinus Torvalds out_unmap: 1008c0718806SHugh Dickins pte_unmap_unlock(pte, ptl); 1009caed0f48SKOSAKI Motohiro out: 1010caed0f48SKOSAKI Motohiro return ret; 101153f79acbSHugh Dickins 1012caed0f48SKOSAKI Motohiro out_mlock: 1013caed0f48SKOSAKI Motohiro pte_unmap_unlock(pte, ptl); 1014caed0f48SKOSAKI Motohiro 1015caed0f48SKOSAKI Motohiro 1016caed0f48SKOSAKI Motohiro /* 1017caed0f48SKOSAKI Motohiro * We need mmap_sem locking, Otherwise VM_LOCKED check makes 1018caed0f48SKOSAKI Motohiro * unstable result and race. Plus, We can't wait here because 1019caed0f48SKOSAKI Motohiro * we now hold anon_vma->lock or mapping->i_mmap_lock. 1020caed0f48SKOSAKI Motohiro * if trylock failed, the page remain in evictable lru and later 1021caed0f48SKOSAKI Motohiro * vmscan could retry to move the page to unevictable lru if the 1022caed0f48SKOSAKI Motohiro * page is actually mlocked. 1023caed0f48SKOSAKI Motohiro */ 102453f79acbSHugh Dickins if (down_read_trylock(&vma->vm_mm->mmap_sem)) { 102553f79acbSHugh Dickins if (vma->vm_flags & VM_LOCKED) { 102653f79acbSHugh Dickins mlock_vma_page(page); 102753f79acbSHugh Dickins ret = SWAP_MLOCK; 102853f79acbSHugh Dickins } 102953f79acbSHugh Dickins up_read(&vma->vm_mm->mmap_sem); 103053f79acbSHugh Dickins } 10311da177e4SLinus Torvalds return ret; 10321da177e4SLinus Torvalds } 10331da177e4SLinus Torvalds 10341da177e4SLinus Torvalds /* 10351da177e4SLinus Torvalds * objrmap doesn't work for nonlinear VMAs because the assumption that 10361da177e4SLinus Torvalds * offset-into-file correlates with offset-into-virtual-addresses does not hold. 10371da177e4SLinus Torvalds * Consequently, given a particular page and its ->index, we cannot locate the 10381da177e4SLinus Torvalds * ptes which are mapping that page without an exhaustive linear search. 10391da177e4SLinus Torvalds * 10401da177e4SLinus Torvalds * So what this code does is a mini "virtual scan" of each nonlinear VMA which 10411da177e4SLinus Torvalds * maps the file to which the target page belongs. The ->vm_private_data field 10421da177e4SLinus Torvalds * holds the current cursor into that scan. Successive searches will circulate 10431da177e4SLinus Torvalds * around the vma's virtual address space. 10441da177e4SLinus Torvalds * 10451da177e4SLinus Torvalds * So as more replacement pressure is applied to the pages in a nonlinear VMA, 10461da177e4SLinus Torvalds * more scanning pressure is placed against them as well. Eventually pages 10471da177e4SLinus Torvalds * will become fully unmapped and are eligible for eviction. 10481da177e4SLinus Torvalds * 10491da177e4SLinus Torvalds * For very sparsely populated VMAs this is a little inefficient - chances are 10501da177e4SLinus Torvalds * there there won't be many ptes located within the scan cluster. In this case 10511da177e4SLinus Torvalds * maybe we could scan further - to the end of the pte page, perhaps. 1052b291f000SNick Piggin * 1053b291f000SNick Piggin * Mlocked pages: check VM_LOCKED under mmap_sem held for read, if we can 1054b291f000SNick Piggin * acquire it without blocking. If vma locked, mlock the pages in the cluster, 1055b291f000SNick Piggin * rather than unmapping them. If we encounter the "check_page" that vmscan is 1056b291f000SNick Piggin * trying to unmap, return SWAP_MLOCK, else default SWAP_AGAIN. 10571da177e4SLinus Torvalds */ 10581da177e4SLinus Torvalds #define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE) 10591da177e4SLinus Torvalds #define CLUSTER_MASK (~(CLUSTER_SIZE - 1)) 10601da177e4SLinus Torvalds 1061b291f000SNick Piggin static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, 1062b291f000SNick Piggin struct vm_area_struct *vma, struct page *check_page) 10631da177e4SLinus Torvalds { 10641da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 10651da177e4SLinus Torvalds pgd_t *pgd; 10661da177e4SLinus Torvalds pud_t *pud; 10671da177e4SLinus Torvalds pmd_t *pmd; 1068c0718806SHugh Dickins pte_t *pte; 10691da177e4SLinus Torvalds pte_t pteval; 1070c0718806SHugh Dickins spinlock_t *ptl; 10711da177e4SLinus Torvalds struct page *page; 10721da177e4SLinus Torvalds unsigned long address; 10731da177e4SLinus Torvalds unsigned long end; 1074b291f000SNick Piggin int ret = SWAP_AGAIN; 1075b291f000SNick Piggin int locked_vma = 0; 10761da177e4SLinus Torvalds 10771da177e4SLinus Torvalds address = (vma->vm_start + cursor) & CLUSTER_MASK; 10781da177e4SLinus Torvalds end = address + CLUSTER_SIZE; 10791da177e4SLinus Torvalds if (address < vma->vm_start) 10801da177e4SLinus Torvalds address = vma->vm_start; 10811da177e4SLinus Torvalds if (end > vma->vm_end) 10821da177e4SLinus Torvalds end = vma->vm_end; 10831da177e4SLinus Torvalds 10841da177e4SLinus Torvalds pgd = pgd_offset(mm, address); 10851da177e4SLinus Torvalds if (!pgd_present(*pgd)) 1086b291f000SNick Piggin return ret; 10871da177e4SLinus Torvalds 10881da177e4SLinus Torvalds pud = pud_offset(pgd, address); 10891da177e4SLinus Torvalds if (!pud_present(*pud)) 1090b291f000SNick Piggin return ret; 10911da177e4SLinus Torvalds 10921da177e4SLinus Torvalds pmd = pmd_offset(pud, address); 10931da177e4SLinus Torvalds if (!pmd_present(*pmd)) 1094b291f000SNick Piggin return ret; 1095b291f000SNick Piggin 1096b291f000SNick Piggin /* 1097af8e3354SHugh Dickins * If we can acquire the mmap_sem for read, and vma is VM_LOCKED, 1098b291f000SNick Piggin * keep the sem while scanning the cluster for mlocking pages. 1099b291f000SNick Piggin */ 1100af8e3354SHugh Dickins if (down_read_trylock(&vma->vm_mm->mmap_sem)) { 1101b291f000SNick Piggin locked_vma = (vma->vm_flags & VM_LOCKED); 1102b291f000SNick Piggin if (!locked_vma) 1103b291f000SNick Piggin up_read(&vma->vm_mm->mmap_sem); /* don't need it */ 1104b291f000SNick Piggin } 1105c0718806SHugh Dickins 1106c0718806SHugh Dickins pte = pte_offset_map_lock(mm, pmd, address, &ptl); 11071da177e4SLinus Torvalds 1108365e9c87SHugh Dickins /* Update high watermark before we lower rss */ 1109365e9c87SHugh Dickins update_hiwater_rss(mm); 1110365e9c87SHugh Dickins 1111c0718806SHugh Dickins for (; address < end; pte++, address += PAGE_SIZE) { 11121da177e4SLinus Torvalds if (!pte_present(*pte)) 11131da177e4SLinus Torvalds continue; 11146aab341eSLinus Torvalds page = vm_normal_page(vma, address, *pte); 11156aab341eSLinus Torvalds BUG_ON(!page || PageAnon(page)); 11161da177e4SLinus Torvalds 1117b291f000SNick Piggin if (locked_vma) { 1118b291f000SNick Piggin mlock_vma_page(page); /* no-op if already mlocked */ 1119b291f000SNick Piggin if (page == check_page) 1120b291f000SNick Piggin ret = SWAP_MLOCK; 1121b291f000SNick Piggin continue; /* don't unmap */ 1122b291f000SNick Piggin } 1123b291f000SNick Piggin 1124cddb8a5cSAndrea Arcangeli if (ptep_clear_flush_young_notify(vma, address, pte)) 11251da177e4SLinus Torvalds continue; 11261da177e4SLinus Torvalds 11271da177e4SLinus Torvalds /* Nuke the page table entry. */ 1128eca35133SBen Collins flush_cache_page(vma, address, pte_pfn(*pte)); 1129cddb8a5cSAndrea Arcangeli pteval = ptep_clear_flush_notify(vma, address, pte); 11301da177e4SLinus Torvalds 11311da177e4SLinus Torvalds /* If nonlinear, store the file page offset in the pte. */ 11321da177e4SLinus Torvalds if (page->index != linear_page_index(vma, address)) 11331da177e4SLinus Torvalds set_pte_at(mm, address, pte, pgoff_to_pte(page->index)); 11341da177e4SLinus Torvalds 11351da177e4SLinus Torvalds /* Move the dirty bit to the physical page now the pte is gone. */ 11361da177e4SLinus Torvalds if (pte_dirty(pteval)) 11371da177e4SLinus Torvalds set_page_dirty(page); 11381da177e4SLinus Torvalds 1139edc315fdSHugh Dickins page_remove_rmap(page); 11401da177e4SLinus Torvalds page_cache_release(page); 1141d559db08SKAMEZAWA Hiroyuki dec_mm_counter(mm, MM_FILEPAGES); 11421da177e4SLinus Torvalds (*mapcount)--; 11431da177e4SLinus Torvalds } 1144c0718806SHugh Dickins pte_unmap_unlock(pte - 1, ptl); 1145b291f000SNick Piggin if (locked_vma) 1146b291f000SNick Piggin up_read(&vma->vm_mm->mmap_sem); 1147b291f000SNick Piggin return ret; 11481da177e4SLinus Torvalds } 11491da177e4SLinus Torvalds 1150a8bef8ffSMel Gorman static bool is_vma_temporary_stack(struct vm_area_struct *vma) 1151a8bef8ffSMel Gorman { 1152a8bef8ffSMel Gorman int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP); 1153a8bef8ffSMel Gorman 1154a8bef8ffSMel Gorman if (!maybe_stack) 1155a8bef8ffSMel Gorman return false; 1156a8bef8ffSMel Gorman 1157a8bef8ffSMel Gorman if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) == 1158a8bef8ffSMel Gorman VM_STACK_INCOMPLETE_SETUP) 1159a8bef8ffSMel Gorman return true; 1160a8bef8ffSMel Gorman 1161a8bef8ffSMel Gorman return false; 1162a8bef8ffSMel Gorman } 1163a8bef8ffSMel Gorman 1164b291f000SNick Piggin /** 1165b291f000SNick Piggin * try_to_unmap_anon - unmap or unlock anonymous page using the object-based 1166b291f000SNick Piggin * rmap method 1167b291f000SNick Piggin * @page: the page to unmap/unlock 11688051be5eSHuang Shijie * @flags: action and flags 1169b291f000SNick Piggin * 1170b291f000SNick Piggin * Find all the mappings of a page using the mapping pointer and the vma chains 1171b291f000SNick Piggin * contained in the anon_vma struct it points to. 1172b291f000SNick Piggin * 1173b291f000SNick Piggin * This function is only called from try_to_unmap/try_to_munlock for 1174b291f000SNick Piggin * anonymous pages. 1175b291f000SNick Piggin * When called from try_to_munlock(), the mmap_sem of the mm containing the vma 1176b291f000SNick Piggin * where the page was found will be held for write. So, we won't recheck 1177b291f000SNick Piggin * vm_flags for that VMA. That should be OK, because that vma shouldn't be 1178b291f000SNick Piggin * 'LOCKED. 1179b291f000SNick Piggin */ 118014fa31b8SAndi Kleen static int try_to_unmap_anon(struct page *page, enum ttu_flags flags) 11811da177e4SLinus Torvalds { 11821da177e4SLinus Torvalds struct anon_vma *anon_vma; 11835beb4930SRik van Riel struct anon_vma_chain *avc; 11841da177e4SLinus Torvalds int ret = SWAP_AGAIN; 1185b291f000SNick Piggin 11861da177e4SLinus Torvalds anon_vma = page_lock_anon_vma(page); 11871da177e4SLinus Torvalds if (!anon_vma) 11881da177e4SLinus Torvalds return ret; 11891da177e4SLinus Torvalds 11905beb4930SRik van Riel list_for_each_entry(avc, &anon_vma->head, same_anon_vma) { 11915beb4930SRik van Riel struct vm_area_struct *vma = avc->vma; 1192a8bef8ffSMel Gorman unsigned long address; 1193a8bef8ffSMel Gorman 1194a8bef8ffSMel Gorman /* 1195a8bef8ffSMel Gorman * During exec, a temporary VMA is setup and later moved. 1196a8bef8ffSMel Gorman * The VMA is moved under the anon_vma lock but not the 1197a8bef8ffSMel Gorman * page tables leading to a race where migration cannot 1198a8bef8ffSMel Gorman * find the migration ptes. Rather than increasing the 1199a8bef8ffSMel Gorman * locking requirements of exec(), migration skips 1200a8bef8ffSMel Gorman * temporary VMAs until after exec() completes. 1201a8bef8ffSMel Gorman */ 1202a8bef8ffSMel Gorman if (PAGE_MIGRATION && (flags & TTU_MIGRATION) && 1203a8bef8ffSMel Gorman is_vma_temporary_stack(vma)) 1204a8bef8ffSMel Gorman continue; 1205a8bef8ffSMel Gorman 1206a8bef8ffSMel Gorman address = vma_address(page, vma); 12071cb1729bSHugh Dickins if (address == -EFAULT) 12081cb1729bSHugh Dickins continue; 12091cb1729bSHugh Dickins ret = try_to_unmap_one(page, vma, address, flags); 121053f79acbSHugh Dickins if (ret != SWAP_AGAIN || !page_mapped(page)) 12111da177e4SLinus Torvalds break; 12121da177e4SLinus Torvalds } 121334bbd704SOleg Nesterov 121434bbd704SOleg Nesterov page_unlock_anon_vma(anon_vma); 12151da177e4SLinus Torvalds return ret; 12161da177e4SLinus Torvalds } 12171da177e4SLinus Torvalds 12181da177e4SLinus Torvalds /** 1219b291f000SNick Piggin * try_to_unmap_file - unmap/unlock file page using the object-based rmap method 1220b291f000SNick Piggin * @page: the page to unmap/unlock 122114fa31b8SAndi Kleen * @flags: action and flags 12221da177e4SLinus Torvalds * 12231da177e4SLinus Torvalds * Find all the mappings of a page using the mapping pointer and the vma chains 12241da177e4SLinus Torvalds * contained in the address_space struct it points to. 12251da177e4SLinus Torvalds * 1226b291f000SNick Piggin * This function is only called from try_to_unmap/try_to_munlock for 1227b291f000SNick Piggin * object-based pages. 1228b291f000SNick Piggin * When called from try_to_munlock(), the mmap_sem of the mm containing the vma 1229b291f000SNick Piggin * where the page was found will be held for write. So, we won't recheck 1230b291f000SNick Piggin * vm_flags for that VMA. That should be OK, because that vma shouldn't be 1231b291f000SNick Piggin * 'LOCKED. 12321da177e4SLinus Torvalds */ 123314fa31b8SAndi Kleen static int try_to_unmap_file(struct page *page, enum ttu_flags flags) 12341da177e4SLinus Torvalds { 12351da177e4SLinus Torvalds struct address_space *mapping = page->mapping; 12361da177e4SLinus Torvalds pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 12371da177e4SLinus Torvalds struct vm_area_struct *vma; 12381da177e4SLinus Torvalds struct prio_tree_iter iter; 12391da177e4SLinus Torvalds int ret = SWAP_AGAIN; 12401da177e4SLinus Torvalds unsigned long cursor; 12411da177e4SLinus Torvalds unsigned long max_nl_cursor = 0; 12421da177e4SLinus Torvalds unsigned long max_nl_size = 0; 12431da177e4SLinus Torvalds unsigned int mapcount; 12441da177e4SLinus Torvalds 12451da177e4SLinus Torvalds spin_lock(&mapping->i_mmap_lock); 12461da177e4SLinus Torvalds vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 12471cb1729bSHugh Dickins unsigned long address = vma_address(page, vma); 12481cb1729bSHugh Dickins if (address == -EFAULT) 12491cb1729bSHugh Dickins continue; 12501cb1729bSHugh Dickins ret = try_to_unmap_one(page, vma, address, flags); 125153f79acbSHugh Dickins if (ret != SWAP_AGAIN || !page_mapped(page)) 12521da177e4SLinus Torvalds goto out; 12531da177e4SLinus Torvalds } 1254b291f000SNick Piggin 12551da177e4SLinus Torvalds if (list_empty(&mapping->i_mmap_nonlinear)) 12561da177e4SLinus Torvalds goto out; 12571da177e4SLinus Torvalds 125853f79acbSHugh Dickins /* 125953f79acbSHugh Dickins * We don't bother to try to find the munlocked page in nonlinears. 126053f79acbSHugh Dickins * It's costly. Instead, later, page reclaim logic may call 126153f79acbSHugh Dickins * try_to_unmap(TTU_MUNLOCK) and recover PG_mlocked lazily. 126253f79acbSHugh Dickins */ 126353f79acbSHugh Dickins if (TTU_ACTION(flags) == TTU_MUNLOCK) 126453f79acbSHugh Dickins goto out; 126553f79acbSHugh Dickins 12661da177e4SLinus Torvalds list_for_each_entry(vma, &mapping->i_mmap_nonlinear, 12671da177e4SLinus Torvalds shared.vm_set.list) { 12681da177e4SLinus Torvalds cursor = (unsigned long) vma->vm_private_data; 12691da177e4SLinus Torvalds if (cursor > max_nl_cursor) 12701da177e4SLinus Torvalds max_nl_cursor = cursor; 12711da177e4SLinus Torvalds cursor = vma->vm_end - vma->vm_start; 12721da177e4SLinus Torvalds if (cursor > max_nl_size) 12731da177e4SLinus Torvalds max_nl_size = cursor; 12741da177e4SLinus Torvalds } 12751da177e4SLinus Torvalds 1276b291f000SNick Piggin if (max_nl_size == 0) { /* all nonlinears locked or reserved ? */ 12771da177e4SLinus Torvalds ret = SWAP_FAIL; 12781da177e4SLinus Torvalds goto out; 12791da177e4SLinus Torvalds } 12801da177e4SLinus Torvalds 12811da177e4SLinus Torvalds /* 12821da177e4SLinus Torvalds * We don't try to search for this page in the nonlinear vmas, 12831da177e4SLinus Torvalds * and page_referenced wouldn't have found it anyway. Instead 12841da177e4SLinus Torvalds * just walk the nonlinear vmas trying to age and unmap some. 12851da177e4SLinus Torvalds * The mapcount of the page we came in with is irrelevant, 12861da177e4SLinus Torvalds * but even so use it as a guide to how hard we should try? 12871da177e4SLinus Torvalds */ 12881da177e4SLinus Torvalds mapcount = page_mapcount(page); 12891da177e4SLinus Torvalds if (!mapcount) 12901da177e4SLinus Torvalds goto out; 12911da177e4SLinus Torvalds cond_resched_lock(&mapping->i_mmap_lock); 12921da177e4SLinus Torvalds 12931da177e4SLinus Torvalds max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK; 12941da177e4SLinus Torvalds if (max_nl_cursor == 0) 12951da177e4SLinus Torvalds max_nl_cursor = CLUSTER_SIZE; 12961da177e4SLinus Torvalds 12971da177e4SLinus Torvalds do { 12981da177e4SLinus Torvalds list_for_each_entry(vma, &mapping->i_mmap_nonlinear, 12991da177e4SLinus Torvalds shared.vm_set.list) { 13001da177e4SLinus Torvalds cursor = (unsigned long) vma->vm_private_data; 1301839b9685SHugh Dickins while ( cursor < max_nl_cursor && 13021da177e4SLinus Torvalds cursor < vma->vm_end - vma->vm_start) { 130353f79acbSHugh Dickins if (try_to_unmap_cluster(cursor, &mapcount, 130453f79acbSHugh Dickins vma, page) == SWAP_MLOCK) 130553f79acbSHugh Dickins ret = SWAP_MLOCK; 13061da177e4SLinus Torvalds cursor += CLUSTER_SIZE; 13071da177e4SLinus Torvalds vma->vm_private_data = (void *) cursor; 13081da177e4SLinus Torvalds if ((int)mapcount <= 0) 13091da177e4SLinus Torvalds goto out; 13101da177e4SLinus Torvalds } 13111da177e4SLinus Torvalds vma->vm_private_data = (void *) max_nl_cursor; 13121da177e4SLinus Torvalds } 13131da177e4SLinus Torvalds cond_resched_lock(&mapping->i_mmap_lock); 13141da177e4SLinus Torvalds max_nl_cursor += CLUSTER_SIZE; 13151da177e4SLinus Torvalds } while (max_nl_cursor <= max_nl_size); 13161da177e4SLinus Torvalds 13171da177e4SLinus Torvalds /* 13181da177e4SLinus Torvalds * Don't loop forever (perhaps all the remaining pages are 13191da177e4SLinus Torvalds * in locked vmas). Reset cursor on all unreserved nonlinear 13201da177e4SLinus Torvalds * vmas, now forgetting on which ones it had fallen behind. 13211da177e4SLinus Torvalds */ 1322101d2be7SHugh Dickins list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) 13231da177e4SLinus Torvalds vma->vm_private_data = NULL; 13241da177e4SLinus Torvalds out: 13251da177e4SLinus Torvalds spin_unlock(&mapping->i_mmap_lock); 13261da177e4SLinus Torvalds return ret; 13271da177e4SLinus Torvalds } 13281da177e4SLinus Torvalds 13291da177e4SLinus Torvalds /** 13301da177e4SLinus Torvalds * try_to_unmap - try to remove all page table mappings to a page 13311da177e4SLinus Torvalds * @page: the page to get unmapped 133214fa31b8SAndi Kleen * @flags: action and flags 13331da177e4SLinus Torvalds * 13341da177e4SLinus Torvalds * Tries to remove all the page table entries which are mapping this 13351da177e4SLinus Torvalds * page, used in the pageout path. Caller must hold the page lock. 13361da177e4SLinus Torvalds * Return values are: 13371da177e4SLinus Torvalds * 13381da177e4SLinus Torvalds * SWAP_SUCCESS - we succeeded in removing all mappings 13391da177e4SLinus Torvalds * SWAP_AGAIN - we missed a mapping, try again later 13401da177e4SLinus Torvalds * SWAP_FAIL - the page is unswappable 1341b291f000SNick Piggin * SWAP_MLOCK - page is mlocked. 13421da177e4SLinus Torvalds */ 134314fa31b8SAndi Kleen int try_to_unmap(struct page *page, enum ttu_flags flags) 13441da177e4SLinus Torvalds { 13451da177e4SLinus Torvalds int ret; 13461da177e4SLinus Torvalds 13471da177e4SLinus Torvalds BUG_ON(!PageLocked(page)); 13481da177e4SLinus Torvalds 13495ad64688SHugh Dickins if (unlikely(PageKsm(page))) 13505ad64688SHugh Dickins ret = try_to_unmap_ksm(page, flags); 13515ad64688SHugh Dickins else if (PageAnon(page)) 135214fa31b8SAndi Kleen ret = try_to_unmap_anon(page, flags); 13531da177e4SLinus Torvalds else 135414fa31b8SAndi Kleen ret = try_to_unmap_file(page, flags); 1355b291f000SNick Piggin if (ret != SWAP_MLOCK && !page_mapped(page)) 13561da177e4SLinus Torvalds ret = SWAP_SUCCESS; 13571da177e4SLinus Torvalds return ret; 13581da177e4SLinus Torvalds } 135981b4082dSNikita Danilov 1360b291f000SNick Piggin /** 1361b291f000SNick Piggin * try_to_munlock - try to munlock a page 1362b291f000SNick Piggin * @page: the page to be munlocked 1363b291f000SNick Piggin * 1364b291f000SNick Piggin * Called from munlock code. Checks all of the VMAs mapping the page 1365b291f000SNick Piggin * to make sure nobody else has this page mlocked. The page will be 1366b291f000SNick Piggin * returned with PG_mlocked cleared if no other vmas have it mlocked. 1367b291f000SNick Piggin * 1368b291f000SNick Piggin * Return values are: 1369b291f000SNick Piggin * 137053f79acbSHugh Dickins * SWAP_AGAIN - no vma is holding page mlocked, or, 1371b291f000SNick Piggin * SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem 13725ad64688SHugh Dickins * SWAP_FAIL - page cannot be located at present 1373b291f000SNick Piggin * SWAP_MLOCK - page is now mlocked. 1374b291f000SNick Piggin */ 1375b291f000SNick Piggin int try_to_munlock(struct page *page) 1376b291f000SNick Piggin { 1377b291f000SNick Piggin VM_BUG_ON(!PageLocked(page) || PageLRU(page)); 1378b291f000SNick Piggin 13795ad64688SHugh Dickins if (unlikely(PageKsm(page))) 13805ad64688SHugh Dickins return try_to_unmap_ksm(page, TTU_MUNLOCK); 13815ad64688SHugh Dickins else if (PageAnon(page)) 138214fa31b8SAndi Kleen return try_to_unmap_anon(page, TTU_MUNLOCK); 1383b291f000SNick Piggin else 138414fa31b8SAndi Kleen return try_to_unmap_file(page, TTU_MUNLOCK); 1385b291f000SNick Piggin } 1386e9995ef9SHugh Dickins 1387e9995ef9SHugh Dickins #ifdef CONFIG_MIGRATION 1388e9995ef9SHugh Dickins /* 1389e9995ef9SHugh Dickins * rmap_walk() and its helpers rmap_walk_anon() and rmap_walk_file(): 1390e9995ef9SHugh Dickins * Called by migrate.c to remove migration ptes, but might be used more later. 1391e9995ef9SHugh Dickins */ 1392e9995ef9SHugh Dickins static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *, 1393e9995ef9SHugh Dickins struct vm_area_struct *, unsigned long, void *), void *arg) 1394e9995ef9SHugh Dickins { 1395e9995ef9SHugh Dickins struct anon_vma *anon_vma; 13965beb4930SRik van Riel struct anon_vma_chain *avc; 1397e9995ef9SHugh Dickins int ret = SWAP_AGAIN; 1398e9995ef9SHugh Dickins 1399e9995ef9SHugh Dickins /* 1400e9995ef9SHugh Dickins * Note: remove_migration_ptes() cannot use page_lock_anon_vma() 1401e9995ef9SHugh Dickins * because that depends on page_mapped(); but not all its usages 14023f6c8272SMel Gorman * are holding mmap_sem. Users without mmap_sem are required to 14033f6c8272SMel Gorman * take a reference count to prevent the anon_vma disappearing 1404e9995ef9SHugh Dickins */ 1405e9995ef9SHugh Dickins anon_vma = page_anon_vma(page); 1406e9995ef9SHugh Dickins if (!anon_vma) 1407e9995ef9SHugh Dickins return ret; 1408e9995ef9SHugh Dickins spin_lock(&anon_vma->lock); 14095beb4930SRik van Riel list_for_each_entry(avc, &anon_vma->head, same_anon_vma) { 14105beb4930SRik van Riel struct vm_area_struct *vma = avc->vma; 1411e9995ef9SHugh Dickins unsigned long address = vma_address(page, vma); 1412e9995ef9SHugh Dickins if (address == -EFAULT) 1413e9995ef9SHugh Dickins continue; 1414e9995ef9SHugh Dickins ret = rmap_one(page, vma, address, arg); 1415e9995ef9SHugh Dickins if (ret != SWAP_AGAIN) 1416e9995ef9SHugh Dickins break; 1417e9995ef9SHugh Dickins } 1418e9995ef9SHugh Dickins spin_unlock(&anon_vma->lock); 1419e9995ef9SHugh Dickins return ret; 1420e9995ef9SHugh Dickins } 1421e9995ef9SHugh Dickins 1422e9995ef9SHugh Dickins static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *, 1423e9995ef9SHugh Dickins struct vm_area_struct *, unsigned long, void *), void *arg) 1424e9995ef9SHugh Dickins { 1425e9995ef9SHugh Dickins struct address_space *mapping = page->mapping; 1426e9995ef9SHugh Dickins pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 1427e9995ef9SHugh Dickins struct vm_area_struct *vma; 1428e9995ef9SHugh Dickins struct prio_tree_iter iter; 1429e9995ef9SHugh Dickins int ret = SWAP_AGAIN; 1430e9995ef9SHugh Dickins 1431e9995ef9SHugh Dickins if (!mapping) 1432e9995ef9SHugh Dickins return ret; 1433e9995ef9SHugh Dickins spin_lock(&mapping->i_mmap_lock); 1434e9995ef9SHugh Dickins vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 1435e9995ef9SHugh Dickins unsigned long address = vma_address(page, vma); 1436e9995ef9SHugh Dickins if (address == -EFAULT) 1437e9995ef9SHugh Dickins continue; 1438e9995ef9SHugh Dickins ret = rmap_one(page, vma, address, arg); 1439e9995ef9SHugh Dickins if (ret != SWAP_AGAIN) 1440e9995ef9SHugh Dickins break; 1441e9995ef9SHugh Dickins } 1442e9995ef9SHugh Dickins /* 1443e9995ef9SHugh Dickins * No nonlinear handling: being always shared, nonlinear vmas 1444e9995ef9SHugh Dickins * never contain migration ptes. Decide what to do about this 1445e9995ef9SHugh Dickins * limitation to linear when we need rmap_walk() on nonlinear. 1446e9995ef9SHugh Dickins */ 1447e9995ef9SHugh Dickins spin_unlock(&mapping->i_mmap_lock); 1448e9995ef9SHugh Dickins return ret; 1449e9995ef9SHugh Dickins } 1450e9995ef9SHugh Dickins 1451e9995ef9SHugh Dickins int rmap_walk(struct page *page, int (*rmap_one)(struct page *, 1452e9995ef9SHugh Dickins struct vm_area_struct *, unsigned long, void *), void *arg) 1453e9995ef9SHugh Dickins { 1454e9995ef9SHugh Dickins VM_BUG_ON(!PageLocked(page)); 1455e9995ef9SHugh Dickins 1456e9995ef9SHugh Dickins if (unlikely(PageKsm(page))) 1457e9995ef9SHugh Dickins return rmap_walk_ksm(page, rmap_one, arg); 1458e9995ef9SHugh Dickins else if (PageAnon(page)) 1459e9995ef9SHugh Dickins return rmap_walk_anon(page, rmap_one, arg); 1460e9995ef9SHugh Dickins else 1461e9995ef9SHugh Dickins return rmap_walk_file(page, rmap_one, arg); 1462e9995ef9SHugh Dickins } 1463e9995ef9SHugh Dickins #endif /* CONFIG_MIGRATION */ 1464*0fe6e20bSNaoya Horiguchi 1465*0fe6e20bSNaoya Horiguchi #ifdef CONFIG_HUGETLBFS 1466*0fe6e20bSNaoya Horiguchi /* 1467*0fe6e20bSNaoya Horiguchi * The following three functions are for anonymous (private mapped) hugepages. 1468*0fe6e20bSNaoya Horiguchi * Unlike common anonymous pages, anonymous hugepages have no accounting code 1469*0fe6e20bSNaoya Horiguchi * and no lru code, because we handle hugepages differently from common pages. 1470*0fe6e20bSNaoya Horiguchi */ 1471*0fe6e20bSNaoya Horiguchi static void __hugepage_set_anon_rmap(struct page *page, 1472*0fe6e20bSNaoya Horiguchi struct vm_area_struct *vma, unsigned long address, int exclusive) 1473*0fe6e20bSNaoya Horiguchi { 1474*0fe6e20bSNaoya Horiguchi struct anon_vma *anon_vma = vma->anon_vma; 1475*0fe6e20bSNaoya Horiguchi BUG_ON(!anon_vma); 1476*0fe6e20bSNaoya Horiguchi if (!exclusive) { 1477*0fe6e20bSNaoya Horiguchi struct anon_vma_chain *avc; 1478*0fe6e20bSNaoya Horiguchi avc = list_entry(vma->anon_vma_chain.prev, 1479*0fe6e20bSNaoya Horiguchi struct anon_vma_chain, same_vma); 1480*0fe6e20bSNaoya Horiguchi anon_vma = avc->anon_vma; 1481*0fe6e20bSNaoya Horiguchi } 1482*0fe6e20bSNaoya Horiguchi anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 1483*0fe6e20bSNaoya Horiguchi page->mapping = (struct address_space *) anon_vma; 1484*0fe6e20bSNaoya Horiguchi page->index = linear_page_index(vma, address); 1485*0fe6e20bSNaoya Horiguchi } 1486*0fe6e20bSNaoya Horiguchi 1487*0fe6e20bSNaoya Horiguchi void hugepage_add_anon_rmap(struct page *page, 1488*0fe6e20bSNaoya Horiguchi struct vm_area_struct *vma, unsigned long address) 1489*0fe6e20bSNaoya Horiguchi { 1490*0fe6e20bSNaoya Horiguchi struct anon_vma *anon_vma = vma->anon_vma; 1491*0fe6e20bSNaoya Horiguchi int first; 1492*0fe6e20bSNaoya Horiguchi BUG_ON(!anon_vma); 1493*0fe6e20bSNaoya Horiguchi BUG_ON(address < vma->vm_start || address >= vma->vm_end); 1494*0fe6e20bSNaoya Horiguchi first = atomic_inc_and_test(&page->_mapcount); 1495*0fe6e20bSNaoya Horiguchi if (first) 1496*0fe6e20bSNaoya Horiguchi __hugepage_set_anon_rmap(page, vma, address, 0); 1497*0fe6e20bSNaoya Horiguchi } 1498*0fe6e20bSNaoya Horiguchi 1499*0fe6e20bSNaoya Horiguchi void hugepage_add_new_anon_rmap(struct page *page, 1500*0fe6e20bSNaoya Horiguchi struct vm_area_struct *vma, unsigned long address) 1501*0fe6e20bSNaoya Horiguchi { 1502*0fe6e20bSNaoya Horiguchi BUG_ON(address < vma->vm_start || address >= vma->vm_end); 1503*0fe6e20bSNaoya Horiguchi atomic_set(&page->_mapcount, 0); 1504*0fe6e20bSNaoya Horiguchi __hugepage_set_anon_rmap(page, vma, address, 1); 1505*0fe6e20bSNaoya Horiguchi } 1506*0fe6e20bSNaoya Horiguchi #endif /* CONFIG_HUGETLBFS */ 1507