11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * mm/rmap.c - physical to virtual reverse mappings 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright 2001, Rik van Riel <riel@conectiva.com.br> 51da177e4SLinus Torvalds * Released under the General Public License (GPL). 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Simple, low overhead reverse mapping scheme. 81da177e4SLinus Torvalds * Please try to keep this thing as modular as possible. 91da177e4SLinus Torvalds * 101da177e4SLinus Torvalds * Provides methods for unmapping each kind of mapped page: 111da177e4SLinus Torvalds * the anon methods track anonymous pages, and 121da177e4SLinus Torvalds * the file methods track pages belonging to an inode. 131da177e4SLinus Torvalds * 141da177e4SLinus Torvalds * Original design by Rik van Riel <riel@conectiva.com.br> 2001 151da177e4SLinus Torvalds * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 161da177e4SLinus Torvalds * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 171da177e4SLinus Torvalds * Contributions by Hugh Dickins <hugh@veritas.com> 2003, 2004 181da177e4SLinus Torvalds */ 191da177e4SLinus Torvalds 201da177e4SLinus Torvalds /* 211da177e4SLinus Torvalds * Lock ordering in mm: 221da177e4SLinus Torvalds * 231b1dcc1bSJes Sorensen * inode->i_mutex (while writing or truncating, not reading or faulting) 2482591e6eSNick Piggin * inode->i_alloc_sem (vmtruncate_range) 251da177e4SLinus Torvalds * mm->mmap_sem 261da177e4SLinus Torvalds * page->flags PG_locked (lock_page) 271da177e4SLinus Torvalds * mapping->i_mmap_lock 281da177e4SLinus Torvalds * anon_vma->lock 29b8072f09SHugh Dickins * mm->page_table_lock or pte_lock 30053837fcSNick Piggin * zone->lru_lock (in mark_page_accessed, isolate_lru_page) 315d337b91SHugh Dickins * swap_lock (in swap_duplicate, swap_info_get) 321da177e4SLinus Torvalds * mmlist_lock (in mmput, drain_mmlist and others) 331da177e4SLinus Torvalds * mapping->private_lock (in __set_page_dirty_buffers) 341da177e4SLinus Torvalds * inode_lock (in set_page_dirty's __mark_inode_dirty) 351da177e4SLinus Torvalds * sb_lock (within inode_lock in fs/fs-writeback.c) 361da177e4SLinus Torvalds * mapping->tree_lock (widely used, in set_page_dirty, 371da177e4SLinus Torvalds * in arch-dependent flush_dcache_mmap_lock, 381da177e4SLinus Torvalds * within inode_lock in __sync_single_inode) 391da177e4SLinus Torvalds */ 401da177e4SLinus Torvalds 411da177e4SLinus Torvalds #include <linux/mm.h> 421da177e4SLinus Torvalds #include <linux/pagemap.h> 431da177e4SLinus Torvalds #include <linux/swap.h> 441da177e4SLinus Torvalds #include <linux/swapops.h> 451da177e4SLinus Torvalds #include <linux/slab.h> 461da177e4SLinus Torvalds #include <linux/init.h> 471da177e4SLinus Torvalds #include <linux/rmap.h> 481da177e4SLinus Torvalds #include <linux/rcupdate.h> 49a48d07afSChristoph Lameter #include <linux/module.h> 507de6b805SNick Piggin #include <linux/kallsyms.h> 518a9f3ccdSBalbir Singh #include <linux/memcontrol.h> 52*cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h> 531da177e4SLinus Torvalds 541da177e4SLinus Torvalds #include <asm/tlbflush.h> 551da177e4SLinus Torvalds 56fcc234f8SPekka Enberg struct kmem_cache *anon_vma_cachep; 571da177e4SLinus Torvalds 581da177e4SLinus Torvalds /* This must be called under the mmap_sem. */ 591da177e4SLinus Torvalds int anon_vma_prepare(struct vm_area_struct *vma) 601da177e4SLinus Torvalds { 611da177e4SLinus Torvalds struct anon_vma *anon_vma = vma->anon_vma; 621da177e4SLinus Torvalds 631da177e4SLinus Torvalds might_sleep(); 641da177e4SLinus Torvalds if (unlikely(!anon_vma)) { 651da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 661da177e4SLinus Torvalds struct anon_vma *allocated, *locked; 671da177e4SLinus Torvalds 681da177e4SLinus Torvalds anon_vma = find_mergeable_anon_vma(vma); 691da177e4SLinus Torvalds if (anon_vma) { 701da177e4SLinus Torvalds allocated = NULL; 711da177e4SLinus Torvalds locked = anon_vma; 721da177e4SLinus Torvalds spin_lock(&locked->lock); 731da177e4SLinus Torvalds } else { 741da177e4SLinus Torvalds anon_vma = anon_vma_alloc(); 751da177e4SLinus Torvalds if (unlikely(!anon_vma)) 761da177e4SLinus Torvalds return -ENOMEM; 771da177e4SLinus Torvalds allocated = anon_vma; 781da177e4SLinus Torvalds locked = NULL; 791da177e4SLinus Torvalds } 801da177e4SLinus Torvalds 811da177e4SLinus Torvalds /* page_table_lock to protect against threads */ 821da177e4SLinus Torvalds spin_lock(&mm->page_table_lock); 831da177e4SLinus Torvalds if (likely(!vma->anon_vma)) { 841da177e4SLinus Torvalds vma->anon_vma = anon_vma; 850697212aSChristoph Lameter list_add_tail(&vma->anon_vma_node, &anon_vma->head); 861da177e4SLinus Torvalds allocated = NULL; 871da177e4SLinus Torvalds } 881da177e4SLinus Torvalds spin_unlock(&mm->page_table_lock); 891da177e4SLinus Torvalds 901da177e4SLinus Torvalds if (locked) 911da177e4SLinus Torvalds spin_unlock(&locked->lock); 921da177e4SLinus Torvalds if (unlikely(allocated)) 931da177e4SLinus Torvalds anon_vma_free(allocated); 941da177e4SLinus Torvalds } 951da177e4SLinus Torvalds return 0; 961da177e4SLinus Torvalds } 971da177e4SLinus Torvalds 981da177e4SLinus Torvalds void __anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next) 991da177e4SLinus Torvalds { 1001da177e4SLinus Torvalds BUG_ON(vma->anon_vma != next->anon_vma); 1011da177e4SLinus Torvalds list_del(&next->anon_vma_node); 1021da177e4SLinus Torvalds } 1031da177e4SLinus Torvalds 1041da177e4SLinus Torvalds void __anon_vma_link(struct vm_area_struct *vma) 1051da177e4SLinus Torvalds { 1061da177e4SLinus Torvalds struct anon_vma *anon_vma = vma->anon_vma; 1071da177e4SLinus Torvalds 10830acbabaSHugh Dickins if (anon_vma) 1090697212aSChristoph Lameter list_add_tail(&vma->anon_vma_node, &anon_vma->head); 1101da177e4SLinus Torvalds } 1111da177e4SLinus Torvalds 1121da177e4SLinus Torvalds void anon_vma_link(struct vm_area_struct *vma) 1131da177e4SLinus Torvalds { 1141da177e4SLinus Torvalds struct anon_vma *anon_vma = vma->anon_vma; 1151da177e4SLinus Torvalds 1161da177e4SLinus Torvalds if (anon_vma) { 1171da177e4SLinus Torvalds spin_lock(&anon_vma->lock); 1180697212aSChristoph Lameter list_add_tail(&vma->anon_vma_node, &anon_vma->head); 1191da177e4SLinus Torvalds spin_unlock(&anon_vma->lock); 1201da177e4SLinus Torvalds } 1211da177e4SLinus Torvalds } 1221da177e4SLinus Torvalds 1231da177e4SLinus Torvalds void anon_vma_unlink(struct vm_area_struct *vma) 1241da177e4SLinus Torvalds { 1251da177e4SLinus Torvalds struct anon_vma *anon_vma = vma->anon_vma; 1261da177e4SLinus Torvalds int empty; 1271da177e4SLinus Torvalds 1281da177e4SLinus Torvalds if (!anon_vma) 1291da177e4SLinus Torvalds return; 1301da177e4SLinus Torvalds 1311da177e4SLinus Torvalds spin_lock(&anon_vma->lock); 1321da177e4SLinus Torvalds list_del(&vma->anon_vma_node); 1331da177e4SLinus Torvalds 1341da177e4SLinus Torvalds /* We must garbage collect the anon_vma if it's empty */ 1351da177e4SLinus Torvalds empty = list_empty(&anon_vma->head); 1361da177e4SLinus Torvalds spin_unlock(&anon_vma->lock); 1371da177e4SLinus Torvalds 1381da177e4SLinus Torvalds if (empty) 1391da177e4SLinus Torvalds anon_vma_free(anon_vma); 1401da177e4SLinus Torvalds } 1411da177e4SLinus Torvalds 14251cc5068SAlexey Dobriyan static void anon_vma_ctor(void *data) 1431da177e4SLinus Torvalds { 1441da177e4SLinus Torvalds struct anon_vma *anon_vma = data; 1451da177e4SLinus Torvalds 1461da177e4SLinus Torvalds spin_lock_init(&anon_vma->lock); 1471da177e4SLinus Torvalds INIT_LIST_HEAD(&anon_vma->head); 1481da177e4SLinus Torvalds } 1491da177e4SLinus Torvalds 1501da177e4SLinus Torvalds void __init anon_vma_init(void) 1511da177e4SLinus Torvalds { 1521da177e4SLinus Torvalds anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 15320c2df83SPaul Mundt 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor); 1541da177e4SLinus Torvalds } 1551da177e4SLinus Torvalds 1561da177e4SLinus Torvalds /* 1571da177e4SLinus Torvalds * Getting a lock on a stable anon_vma from a page off the LRU is 1581da177e4SLinus Torvalds * tricky: page_lock_anon_vma rely on RCU to guard against the races. 1591da177e4SLinus Torvalds */ 1601da177e4SLinus Torvalds static struct anon_vma *page_lock_anon_vma(struct page *page) 1611da177e4SLinus Torvalds { 16234bbd704SOleg Nesterov struct anon_vma *anon_vma; 1631da177e4SLinus Torvalds unsigned long anon_mapping; 1641da177e4SLinus Torvalds 1651da177e4SLinus Torvalds rcu_read_lock(); 1661da177e4SLinus Torvalds anon_mapping = (unsigned long) page->mapping; 1671da177e4SLinus Torvalds if (!(anon_mapping & PAGE_MAPPING_ANON)) 1681da177e4SLinus Torvalds goto out; 1691da177e4SLinus Torvalds if (!page_mapped(page)) 1701da177e4SLinus Torvalds goto out; 1711da177e4SLinus Torvalds 1721da177e4SLinus Torvalds anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 1731da177e4SLinus Torvalds spin_lock(&anon_vma->lock); 17434bbd704SOleg Nesterov return anon_vma; 1751da177e4SLinus Torvalds out: 1761da177e4SLinus Torvalds rcu_read_unlock(); 17734bbd704SOleg Nesterov return NULL; 17834bbd704SOleg Nesterov } 17934bbd704SOleg Nesterov 18034bbd704SOleg Nesterov static void page_unlock_anon_vma(struct anon_vma *anon_vma) 18134bbd704SOleg Nesterov { 18234bbd704SOleg Nesterov spin_unlock(&anon_vma->lock); 18334bbd704SOleg Nesterov rcu_read_unlock(); 1841da177e4SLinus Torvalds } 1851da177e4SLinus Torvalds 1861da177e4SLinus Torvalds /* 1873ad33b24SLee Schermerhorn * At what user virtual address is page expected in @vma? 1883ad33b24SLee Schermerhorn * Returns virtual address or -EFAULT if page's index/offset is not 1893ad33b24SLee Schermerhorn * within the range mapped the @vma. 1901da177e4SLinus Torvalds */ 1911da177e4SLinus Torvalds static inline unsigned long 1921da177e4SLinus Torvalds vma_address(struct page *page, struct vm_area_struct *vma) 1931da177e4SLinus Torvalds { 1941da177e4SLinus Torvalds pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 1951da177e4SLinus Torvalds unsigned long address; 1961da177e4SLinus Torvalds 1971da177e4SLinus Torvalds address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 1981da177e4SLinus Torvalds if (unlikely(address < vma->vm_start || address >= vma->vm_end)) { 1993ad33b24SLee Schermerhorn /* page should be within @vma mapping range */ 2001da177e4SLinus Torvalds return -EFAULT; 2011da177e4SLinus Torvalds } 2021da177e4SLinus Torvalds return address; 2031da177e4SLinus Torvalds } 2041da177e4SLinus Torvalds 2051da177e4SLinus Torvalds /* 2061da177e4SLinus Torvalds * At what user virtual address is page expected in vma? checking that the 207ee498ed7SHugh Dickins * page matches the vma: currently only used on anon pages, by unuse_vma; 2081da177e4SLinus Torvalds */ 2091da177e4SLinus Torvalds unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 2101da177e4SLinus Torvalds { 2111da177e4SLinus Torvalds if (PageAnon(page)) { 2121da177e4SLinus Torvalds if ((void *)vma->anon_vma != 2131da177e4SLinus Torvalds (void *)page->mapping - PAGE_MAPPING_ANON) 2141da177e4SLinus Torvalds return -EFAULT; 2151da177e4SLinus Torvalds } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { 216ee498ed7SHugh Dickins if (!vma->vm_file || 217ee498ed7SHugh Dickins vma->vm_file->f_mapping != page->mapping) 2181da177e4SLinus Torvalds return -EFAULT; 2191da177e4SLinus Torvalds } else 2201da177e4SLinus Torvalds return -EFAULT; 2211da177e4SLinus Torvalds return vma_address(page, vma); 2221da177e4SLinus Torvalds } 2231da177e4SLinus Torvalds 2241da177e4SLinus Torvalds /* 22581b4082dSNikita Danilov * Check that @page is mapped at @address into @mm. 22681b4082dSNikita Danilov * 227b8072f09SHugh Dickins * On success returns with pte mapped and locked. 22881b4082dSNikita Danilov */ 229ceffc078SCarsten Otte pte_t *page_check_address(struct page *page, struct mm_struct *mm, 230c0718806SHugh Dickins unsigned long address, spinlock_t **ptlp) 23181b4082dSNikita Danilov { 23281b4082dSNikita Danilov pgd_t *pgd; 23381b4082dSNikita Danilov pud_t *pud; 23481b4082dSNikita Danilov pmd_t *pmd; 23581b4082dSNikita Danilov pte_t *pte; 236c0718806SHugh Dickins spinlock_t *ptl; 23781b4082dSNikita Danilov 23881b4082dSNikita Danilov pgd = pgd_offset(mm, address); 239c0718806SHugh Dickins if (!pgd_present(*pgd)) 240c0718806SHugh Dickins return NULL; 241c0718806SHugh Dickins 24281b4082dSNikita Danilov pud = pud_offset(pgd, address); 243c0718806SHugh Dickins if (!pud_present(*pud)) 244c0718806SHugh Dickins return NULL; 245c0718806SHugh Dickins 24681b4082dSNikita Danilov pmd = pmd_offset(pud, address); 247c0718806SHugh Dickins if (!pmd_present(*pmd)) 248c0718806SHugh Dickins return NULL; 249c0718806SHugh Dickins 25081b4082dSNikita Danilov pte = pte_offset_map(pmd, address); 251c0718806SHugh Dickins /* Make a quick check before getting the lock */ 252c0718806SHugh Dickins if (!pte_present(*pte)) { 25381b4082dSNikita Danilov pte_unmap(pte); 254c0718806SHugh Dickins return NULL; 25581b4082dSNikita Danilov } 256c0718806SHugh Dickins 2574c21e2f2SHugh Dickins ptl = pte_lockptr(mm, pmd); 258c0718806SHugh Dickins spin_lock(ptl); 259c0718806SHugh Dickins if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) { 260c0718806SHugh Dickins *ptlp = ptl; 261c0718806SHugh Dickins return pte; 26281b4082dSNikita Danilov } 263c0718806SHugh Dickins pte_unmap_unlock(pte, ptl); 264c0718806SHugh Dickins return NULL; 26581b4082dSNikita Danilov } 26681b4082dSNikita Danilov 26781b4082dSNikita Danilov /* 2681da177e4SLinus Torvalds * Subfunctions of page_referenced: page_referenced_one called 2691da177e4SLinus Torvalds * repeatedly from either page_referenced_anon or page_referenced_file. 2701da177e4SLinus Torvalds */ 2711da177e4SLinus Torvalds static int page_referenced_one(struct page *page, 272f7b7fd8fSRik van Riel struct vm_area_struct *vma, unsigned int *mapcount) 2731da177e4SLinus Torvalds { 2741da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 2751da177e4SLinus Torvalds unsigned long address; 2761da177e4SLinus Torvalds pte_t *pte; 277c0718806SHugh Dickins spinlock_t *ptl; 2781da177e4SLinus Torvalds int referenced = 0; 2791da177e4SLinus Torvalds 2801da177e4SLinus Torvalds address = vma_address(page, vma); 2811da177e4SLinus Torvalds if (address == -EFAULT) 2821da177e4SLinus Torvalds goto out; 2831da177e4SLinus Torvalds 284c0718806SHugh Dickins pte = page_check_address(page, mm, address, &ptl); 285c0718806SHugh Dickins if (!pte) 286c0718806SHugh Dickins goto out; 287c0718806SHugh Dickins 2885a9bbdcdSHugh Dickins if (vma->vm_flags & VM_LOCKED) { 2895a9bbdcdSHugh Dickins referenced++; 2905a9bbdcdSHugh Dickins *mapcount = 1; /* break early from loop */ 291*cddb8a5cSAndrea Arcangeli } else if (ptep_clear_flush_young_notify(vma, address, pte)) 2921da177e4SLinus Torvalds referenced++; 2931da177e4SLinus Torvalds 294fcdae29aSRik Van Riel /* Pretend the page is referenced if the task has the 295fcdae29aSRik Van Riel swap token and is in the middle of a page fault. */ 296f7b7fd8fSRik van Riel if (mm != current->mm && has_swap_token(mm) && 297fcdae29aSRik Van Riel rwsem_is_locked(&mm->mmap_sem)) 2981da177e4SLinus Torvalds referenced++; 2991da177e4SLinus Torvalds 3001da177e4SLinus Torvalds (*mapcount)--; 301c0718806SHugh Dickins pte_unmap_unlock(pte, ptl); 3021da177e4SLinus Torvalds out: 3031da177e4SLinus Torvalds return referenced; 3041da177e4SLinus Torvalds } 3051da177e4SLinus Torvalds 306bed7161aSBalbir Singh static int page_referenced_anon(struct page *page, 307bed7161aSBalbir Singh struct mem_cgroup *mem_cont) 3081da177e4SLinus Torvalds { 3091da177e4SLinus Torvalds unsigned int mapcount; 3101da177e4SLinus Torvalds struct anon_vma *anon_vma; 3111da177e4SLinus Torvalds struct vm_area_struct *vma; 3121da177e4SLinus Torvalds int referenced = 0; 3131da177e4SLinus Torvalds 3141da177e4SLinus Torvalds anon_vma = page_lock_anon_vma(page); 3151da177e4SLinus Torvalds if (!anon_vma) 3161da177e4SLinus Torvalds return referenced; 3171da177e4SLinus Torvalds 3181da177e4SLinus Torvalds mapcount = page_mapcount(page); 3191da177e4SLinus Torvalds list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { 320bed7161aSBalbir Singh /* 321bed7161aSBalbir Singh * If we are reclaiming on behalf of a cgroup, skip 322bed7161aSBalbir Singh * counting on behalf of references from different 323bed7161aSBalbir Singh * cgroups 324bed7161aSBalbir Singh */ 325bd845e38SHugh Dickins if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont)) 326bed7161aSBalbir Singh continue; 327f7b7fd8fSRik van Riel referenced += page_referenced_one(page, vma, &mapcount); 3281da177e4SLinus Torvalds if (!mapcount) 3291da177e4SLinus Torvalds break; 3301da177e4SLinus Torvalds } 33134bbd704SOleg Nesterov 33234bbd704SOleg Nesterov page_unlock_anon_vma(anon_vma); 3331da177e4SLinus Torvalds return referenced; 3341da177e4SLinus Torvalds } 3351da177e4SLinus Torvalds 3361da177e4SLinus Torvalds /** 3371da177e4SLinus Torvalds * page_referenced_file - referenced check for object-based rmap 3381da177e4SLinus Torvalds * @page: the page we're checking references on. 33943d8eac4SRandy Dunlap * @mem_cont: target memory controller 3401da177e4SLinus Torvalds * 3411da177e4SLinus Torvalds * For an object-based mapped page, find all the places it is mapped and 3421da177e4SLinus Torvalds * check/clear the referenced flag. This is done by following the page->mapping 3431da177e4SLinus Torvalds * pointer, then walking the chain of vmas it holds. It returns the number 3441da177e4SLinus Torvalds * of references it found. 3451da177e4SLinus Torvalds * 3461da177e4SLinus Torvalds * This function is only called from page_referenced for object-based pages. 3471da177e4SLinus Torvalds */ 348bed7161aSBalbir Singh static int page_referenced_file(struct page *page, 349bed7161aSBalbir Singh struct mem_cgroup *mem_cont) 3501da177e4SLinus Torvalds { 3511da177e4SLinus Torvalds unsigned int mapcount; 3521da177e4SLinus Torvalds struct address_space *mapping = page->mapping; 3531da177e4SLinus Torvalds pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 3541da177e4SLinus Torvalds struct vm_area_struct *vma; 3551da177e4SLinus Torvalds struct prio_tree_iter iter; 3561da177e4SLinus Torvalds int referenced = 0; 3571da177e4SLinus Torvalds 3581da177e4SLinus Torvalds /* 3591da177e4SLinus Torvalds * The caller's checks on page->mapping and !PageAnon have made 3601da177e4SLinus Torvalds * sure that this is a file page: the check for page->mapping 3611da177e4SLinus Torvalds * excludes the case just before it gets set on an anon page. 3621da177e4SLinus Torvalds */ 3631da177e4SLinus Torvalds BUG_ON(PageAnon(page)); 3641da177e4SLinus Torvalds 3651da177e4SLinus Torvalds /* 3661da177e4SLinus Torvalds * The page lock not only makes sure that page->mapping cannot 3671da177e4SLinus Torvalds * suddenly be NULLified by truncation, it makes sure that the 3681da177e4SLinus Torvalds * structure at mapping cannot be freed and reused yet, 3691da177e4SLinus Torvalds * so we can safely take mapping->i_mmap_lock. 3701da177e4SLinus Torvalds */ 3711da177e4SLinus Torvalds BUG_ON(!PageLocked(page)); 3721da177e4SLinus Torvalds 3731da177e4SLinus Torvalds spin_lock(&mapping->i_mmap_lock); 3741da177e4SLinus Torvalds 3751da177e4SLinus Torvalds /* 3761da177e4SLinus Torvalds * i_mmap_lock does not stabilize mapcount at all, but mapcount 3771da177e4SLinus Torvalds * is more likely to be accurate if we note it after spinning. 3781da177e4SLinus Torvalds */ 3791da177e4SLinus Torvalds mapcount = page_mapcount(page); 3801da177e4SLinus Torvalds 3811da177e4SLinus Torvalds vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 382bed7161aSBalbir Singh /* 383bed7161aSBalbir Singh * If we are reclaiming on behalf of a cgroup, skip 384bed7161aSBalbir Singh * counting on behalf of references from different 385bed7161aSBalbir Singh * cgroups 386bed7161aSBalbir Singh */ 387bd845e38SHugh Dickins if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont)) 388bed7161aSBalbir Singh continue; 3891da177e4SLinus Torvalds if ((vma->vm_flags & (VM_LOCKED|VM_MAYSHARE)) 3901da177e4SLinus Torvalds == (VM_LOCKED|VM_MAYSHARE)) { 3911da177e4SLinus Torvalds referenced++; 3921da177e4SLinus Torvalds break; 3931da177e4SLinus Torvalds } 394f7b7fd8fSRik van Riel referenced += page_referenced_one(page, vma, &mapcount); 3951da177e4SLinus Torvalds if (!mapcount) 3961da177e4SLinus Torvalds break; 3971da177e4SLinus Torvalds } 3981da177e4SLinus Torvalds 3991da177e4SLinus Torvalds spin_unlock(&mapping->i_mmap_lock); 4001da177e4SLinus Torvalds return referenced; 4011da177e4SLinus Torvalds } 4021da177e4SLinus Torvalds 4031da177e4SLinus Torvalds /** 4041da177e4SLinus Torvalds * page_referenced - test if the page was referenced 4051da177e4SLinus Torvalds * @page: the page to test 4061da177e4SLinus Torvalds * @is_locked: caller holds lock on the page 40743d8eac4SRandy Dunlap * @mem_cont: target memory controller 4081da177e4SLinus Torvalds * 4091da177e4SLinus Torvalds * Quick test_and_clear_referenced for all mappings to a page, 4101da177e4SLinus Torvalds * returns the number of ptes which referenced the page. 4111da177e4SLinus Torvalds */ 412bed7161aSBalbir Singh int page_referenced(struct page *page, int is_locked, 413bed7161aSBalbir Singh struct mem_cgroup *mem_cont) 4141da177e4SLinus Torvalds { 4151da177e4SLinus Torvalds int referenced = 0; 4161da177e4SLinus Torvalds 4171da177e4SLinus Torvalds if (TestClearPageReferenced(page)) 4181da177e4SLinus Torvalds referenced++; 4191da177e4SLinus Torvalds 4201da177e4SLinus Torvalds if (page_mapped(page) && page->mapping) { 4211da177e4SLinus Torvalds if (PageAnon(page)) 422bed7161aSBalbir Singh referenced += page_referenced_anon(page, mem_cont); 4231da177e4SLinus Torvalds else if (is_locked) 424bed7161aSBalbir Singh referenced += page_referenced_file(page, mem_cont); 4251da177e4SLinus Torvalds else if (TestSetPageLocked(page)) 4261da177e4SLinus Torvalds referenced++; 4271da177e4SLinus Torvalds else { 4281da177e4SLinus Torvalds if (page->mapping) 429bed7161aSBalbir Singh referenced += 430bed7161aSBalbir Singh page_referenced_file(page, mem_cont); 4311da177e4SLinus Torvalds unlock_page(page); 4321da177e4SLinus Torvalds } 4331da177e4SLinus Torvalds } 4345b7baf05SChristian Borntraeger 4355b7baf05SChristian Borntraeger if (page_test_and_clear_young(page)) 4365b7baf05SChristian Borntraeger referenced++; 4375b7baf05SChristian Borntraeger 4381da177e4SLinus Torvalds return referenced; 4391da177e4SLinus Torvalds } 4401da177e4SLinus Torvalds 441d08b3851SPeter Zijlstra static int page_mkclean_one(struct page *page, struct vm_area_struct *vma) 442d08b3851SPeter Zijlstra { 443d08b3851SPeter Zijlstra struct mm_struct *mm = vma->vm_mm; 444d08b3851SPeter Zijlstra unsigned long address; 445c2fda5feSPeter Zijlstra pte_t *pte; 446d08b3851SPeter Zijlstra spinlock_t *ptl; 447d08b3851SPeter Zijlstra int ret = 0; 448d08b3851SPeter Zijlstra 449d08b3851SPeter Zijlstra address = vma_address(page, vma); 450d08b3851SPeter Zijlstra if (address == -EFAULT) 451d08b3851SPeter Zijlstra goto out; 452d08b3851SPeter Zijlstra 453d08b3851SPeter Zijlstra pte = page_check_address(page, mm, address, &ptl); 454d08b3851SPeter Zijlstra if (!pte) 455d08b3851SPeter Zijlstra goto out; 456d08b3851SPeter Zijlstra 457c2fda5feSPeter Zijlstra if (pte_dirty(*pte) || pte_write(*pte)) { 458c2fda5feSPeter Zijlstra pte_t entry; 459d08b3851SPeter Zijlstra 460c2fda5feSPeter Zijlstra flush_cache_page(vma, address, pte_pfn(*pte)); 461*cddb8a5cSAndrea Arcangeli entry = ptep_clear_flush_notify(vma, address, pte); 462d08b3851SPeter Zijlstra entry = pte_wrprotect(entry); 463c2fda5feSPeter Zijlstra entry = pte_mkclean(entry); 464d6e88e67SAl Viro set_pte_at(mm, address, pte, entry); 465d08b3851SPeter Zijlstra ret = 1; 466c2fda5feSPeter Zijlstra } 467d08b3851SPeter Zijlstra 468d08b3851SPeter Zijlstra pte_unmap_unlock(pte, ptl); 469d08b3851SPeter Zijlstra out: 470d08b3851SPeter Zijlstra return ret; 471d08b3851SPeter Zijlstra } 472d08b3851SPeter Zijlstra 473d08b3851SPeter Zijlstra static int page_mkclean_file(struct address_space *mapping, struct page *page) 474d08b3851SPeter Zijlstra { 475d08b3851SPeter Zijlstra pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 476d08b3851SPeter Zijlstra struct vm_area_struct *vma; 477d08b3851SPeter Zijlstra struct prio_tree_iter iter; 478d08b3851SPeter Zijlstra int ret = 0; 479d08b3851SPeter Zijlstra 480d08b3851SPeter Zijlstra BUG_ON(PageAnon(page)); 481d08b3851SPeter Zijlstra 482d08b3851SPeter Zijlstra spin_lock(&mapping->i_mmap_lock); 483d08b3851SPeter Zijlstra vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 484d08b3851SPeter Zijlstra if (vma->vm_flags & VM_SHARED) 485d08b3851SPeter Zijlstra ret += page_mkclean_one(page, vma); 486d08b3851SPeter Zijlstra } 487d08b3851SPeter Zijlstra spin_unlock(&mapping->i_mmap_lock); 488d08b3851SPeter Zijlstra return ret; 489d08b3851SPeter Zijlstra } 490d08b3851SPeter Zijlstra 491d08b3851SPeter Zijlstra int page_mkclean(struct page *page) 492d08b3851SPeter Zijlstra { 493d08b3851SPeter Zijlstra int ret = 0; 494d08b3851SPeter Zijlstra 495d08b3851SPeter Zijlstra BUG_ON(!PageLocked(page)); 496d08b3851SPeter Zijlstra 497d08b3851SPeter Zijlstra if (page_mapped(page)) { 498d08b3851SPeter Zijlstra struct address_space *mapping = page_mapping(page); 499ce7e9faeSChristian Borntraeger if (mapping) { 500d08b3851SPeter Zijlstra ret = page_mkclean_file(mapping, page); 5016c210482SMartin Schwidefsky if (page_test_dirty(page)) { 5026c210482SMartin Schwidefsky page_clear_dirty(page); 503c2fda5feSPeter Zijlstra ret = 1; 5046e1beb3cSMartin Schwidefsky } 5056c210482SMartin Schwidefsky } 506ce7e9faeSChristian Borntraeger } 507d08b3851SPeter Zijlstra 508d08b3851SPeter Zijlstra return ret; 509d08b3851SPeter Zijlstra } 51060b59beaSJaya Kumar EXPORT_SYMBOL_GPL(page_mkclean); 511d08b3851SPeter Zijlstra 5121da177e4SLinus Torvalds /** 51343d8eac4SRandy Dunlap * __page_set_anon_rmap - setup new anonymous rmap 5141da177e4SLinus Torvalds * @page: the page to add the mapping to 5151da177e4SLinus Torvalds * @vma: the vm area in which the mapping is added 5161da177e4SLinus Torvalds * @address: the user virtual address mapped 5171da177e4SLinus Torvalds */ 5189617d95eSNick Piggin static void __page_set_anon_rmap(struct page *page, 5191da177e4SLinus Torvalds struct vm_area_struct *vma, unsigned long address) 5201da177e4SLinus Torvalds { 5212822c1aaSNick Piggin struct anon_vma *anon_vma = vma->anon_vma; 5222822c1aaSNick Piggin 5232822c1aaSNick Piggin BUG_ON(!anon_vma); 5241da177e4SLinus Torvalds anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 5252822c1aaSNick Piggin page->mapping = (struct address_space *) anon_vma; 5262822c1aaSNick Piggin 5274d7670e0SNick Piggin page->index = linear_page_index(vma, address); 5282822c1aaSNick Piggin 529a74609faSNick Piggin /* 530a74609faSNick Piggin * nr_mapped state can be updated without turning off 531a74609faSNick Piggin * interrupts because it is not modified via interrupt. 532a74609faSNick Piggin */ 533f3dbd344SChristoph Lameter __inc_zone_page_state(page, NR_ANON_PAGES); 5341da177e4SLinus Torvalds } 5359617d95eSNick Piggin 5369617d95eSNick Piggin /** 53743d8eac4SRandy Dunlap * __page_check_anon_rmap - sanity check anonymous rmap addition 538c97a9e10SNick Piggin * @page: the page to add the mapping to 539c97a9e10SNick Piggin * @vma: the vm area in which the mapping is added 540c97a9e10SNick Piggin * @address: the user virtual address mapped 541c97a9e10SNick Piggin */ 542c97a9e10SNick Piggin static void __page_check_anon_rmap(struct page *page, 543c97a9e10SNick Piggin struct vm_area_struct *vma, unsigned long address) 544c97a9e10SNick Piggin { 545c97a9e10SNick Piggin #ifdef CONFIG_DEBUG_VM 546c97a9e10SNick Piggin /* 547c97a9e10SNick Piggin * The page's anon-rmap details (mapping and index) are guaranteed to 548c97a9e10SNick Piggin * be set up correctly at this point. 549c97a9e10SNick Piggin * 550c97a9e10SNick Piggin * We have exclusion against page_add_anon_rmap because the caller 551c97a9e10SNick Piggin * always holds the page locked, except if called from page_dup_rmap, 552c97a9e10SNick Piggin * in which case the page is already known to be setup. 553c97a9e10SNick Piggin * 554c97a9e10SNick Piggin * We have exclusion against page_add_new_anon_rmap because those pages 555c97a9e10SNick Piggin * are initially only visible via the pagetables, and the pte is locked 556c97a9e10SNick Piggin * over the call to page_add_new_anon_rmap. 557c97a9e10SNick Piggin */ 558c97a9e10SNick Piggin struct anon_vma *anon_vma = vma->anon_vma; 559c97a9e10SNick Piggin anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 560c97a9e10SNick Piggin BUG_ON(page->mapping != (struct address_space *)anon_vma); 561c97a9e10SNick Piggin BUG_ON(page->index != linear_page_index(vma, address)); 562c97a9e10SNick Piggin #endif 563c97a9e10SNick Piggin } 564c97a9e10SNick Piggin 565c97a9e10SNick Piggin /** 5669617d95eSNick Piggin * page_add_anon_rmap - add pte mapping to an anonymous page 5679617d95eSNick Piggin * @page: the page to add the mapping to 5689617d95eSNick Piggin * @vma: the vm area in which the mapping is added 5699617d95eSNick Piggin * @address: the user virtual address mapped 5709617d95eSNick Piggin * 571c97a9e10SNick Piggin * The caller needs to hold the pte lock and the page must be locked. 5729617d95eSNick Piggin */ 5739617d95eSNick Piggin void page_add_anon_rmap(struct page *page, 5749617d95eSNick Piggin struct vm_area_struct *vma, unsigned long address) 5759617d95eSNick Piggin { 576c97a9e10SNick Piggin VM_BUG_ON(!PageLocked(page)); 577c97a9e10SNick Piggin VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); 5789617d95eSNick Piggin if (atomic_inc_and_test(&page->_mapcount)) 5799617d95eSNick Piggin __page_set_anon_rmap(page, vma, address); 58069029cd5SKAMEZAWA Hiroyuki else 581c97a9e10SNick Piggin __page_check_anon_rmap(page, vma, address); 5821da177e4SLinus Torvalds } 5831da177e4SLinus Torvalds 58443d8eac4SRandy Dunlap /** 5859617d95eSNick Piggin * page_add_new_anon_rmap - add pte mapping to a new anonymous page 5869617d95eSNick Piggin * @page: the page to add the mapping to 5879617d95eSNick Piggin * @vma: the vm area in which the mapping is added 5889617d95eSNick Piggin * @address: the user virtual address mapped 5899617d95eSNick Piggin * 5909617d95eSNick Piggin * Same as page_add_anon_rmap but must only be called on *new* pages. 5919617d95eSNick Piggin * This means the inc-and-test can be bypassed. 592c97a9e10SNick Piggin * Page does not have to be locked. 5939617d95eSNick Piggin */ 5949617d95eSNick Piggin void page_add_new_anon_rmap(struct page *page, 5959617d95eSNick Piggin struct vm_area_struct *vma, unsigned long address) 5969617d95eSNick Piggin { 597c97a9e10SNick Piggin BUG_ON(address < vma->vm_start || address >= vma->vm_end); 5989617d95eSNick Piggin atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */ 5999617d95eSNick Piggin __page_set_anon_rmap(page, vma, address); 6009617d95eSNick Piggin } 6019617d95eSNick Piggin 6021da177e4SLinus Torvalds /** 6031da177e4SLinus Torvalds * page_add_file_rmap - add pte mapping to a file page 6041da177e4SLinus Torvalds * @page: the page to add the mapping to 6051da177e4SLinus Torvalds * 606b8072f09SHugh Dickins * The caller needs to hold the pte lock. 6071da177e4SLinus Torvalds */ 6081da177e4SLinus Torvalds void page_add_file_rmap(struct page *page) 6091da177e4SLinus Torvalds { 6101da177e4SLinus Torvalds if (atomic_inc_and_test(&page->_mapcount)) 61165ba55f5SChristoph Lameter __inc_zone_page_state(page, NR_FILE_MAPPED); 6121da177e4SLinus Torvalds } 6131da177e4SLinus Torvalds 614c97a9e10SNick Piggin #ifdef CONFIG_DEBUG_VM 615c97a9e10SNick Piggin /** 616c97a9e10SNick Piggin * page_dup_rmap - duplicate pte mapping to a page 617c97a9e10SNick Piggin * @page: the page to add the mapping to 61843d8eac4SRandy Dunlap * @vma: the vm area being duplicated 61943d8eac4SRandy Dunlap * @address: the user virtual address mapped 620c97a9e10SNick Piggin * 621c97a9e10SNick Piggin * For copy_page_range only: minimal extract from page_add_file_rmap / 622c97a9e10SNick Piggin * page_add_anon_rmap, avoiding unnecessary tests (already checked) so it's 623c97a9e10SNick Piggin * quicker. 624c97a9e10SNick Piggin * 625c97a9e10SNick Piggin * The caller needs to hold the pte lock. 626c97a9e10SNick Piggin */ 627c97a9e10SNick Piggin void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) 628c97a9e10SNick Piggin { 629c97a9e10SNick Piggin BUG_ON(page_mapcount(page) == 0); 630c97a9e10SNick Piggin if (PageAnon(page)) 631c97a9e10SNick Piggin __page_check_anon_rmap(page, vma, address); 632c97a9e10SNick Piggin atomic_inc(&page->_mapcount); 633c97a9e10SNick Piggin } 634c97a9e10SNick Piggin #endif 635c97a9e10SNick Piggin 6361da177e4SLinus Torvalds /** 6371da177e4SLinus Torvalds * page_remove_rmap - take down pte mapping from a page 6381da177e4SLinus Torvalds * @page: page to remove mapping from 63943d8eac4SRandy Dunlap * @vma: the vm area in which the mapping is removed 6401da177e4SLinus Torvalds * 641b8072f09SHugh Dickins * The caller needs to hold the pte lock. 6421da177e4SLinus Torvalds */ 6437de6b805SNick Piggin void page_remove_rmap(struct page *page, struct vm_area_struct *vma) 6441da177e4SLinus Torvalds { 6451da177e4SLinus Torvalds if (atomic_add_negative(-1, &page->_mapcount)) { 646b7ab795bSNick Piggin if (unlikely(page_mapcount(page) < 0)) { 647ef2bf0dcSDave Jones printk (KERN_EMERG "Eeek! page_mapcount(page) went negative! (%d)\n", page_mapcount(page)); 6487de6b805SNick Piggin printk (KERN_EMERG " page pfn = %lx\n", page_to_pfn(page)); 649ef2bf0dcSDave Jones printk (KERN_EMERG " page->flags = %lx\n", page->flags); 650ef2bf0dcSDave Jones printk (KERN_EMERG " page->count = %x\n", page_count(page)); 651ef2bf0dcSDave Jones printk (KERN_EMERG " page->mapping = %p\n", page->mapping); 6527de6b805SNick Piggin print_symbol (KERN_EMERG " vma->vm_ops = %s\n", (unsigned long)vma->vm_ops); 65354cb8821SNick Piggin if (vma->vm_ops) { 65454cb8821SNick Piggin print_symbol (KERN_EMERG " vma->vm_ops->fault = %s\n", (unsigned long)vma->vm_ops->fault); 65554cb8821SNick Piggin } 6567de6b805SNick Piggin if (vma->vm_file && vma->vm_file->f_op) 6577de6b805SNick Piggin print_symbol (KERN_EMERG " vma->vm_file->f_op->mmap = %s\n", (unsigned long)vma->vm_file->f_op->mmap); 658b16bc64dSDave Jones BUG(); 659ef2bf0dcSDave Jones } 660b16bc64dSDave Jones 6611da177e4SLinus Torvalds /* 6621da177e4SLinus Torvalds * It would be tidy to reset the PageAnon mapping here, 6631da177e4SLinus Torvalds * but that might overwrite a racing page_add_anon_rmap 6641da177e4SLinus Torvalds * which increments mapcount after us but sets mapping 6651da177e4SLinus Torvalds * before us: so leave the reset to free_hot_cold_page, 6661da177e4SLinus Torvalds * and remember that it's only reliable while mapped. 6671da177e4SLinus Torvalds * Leaving it set also helps swapoff to reinstate ptes 6681da177e4SLinus Torvalds * faster for those pages still in swapcache. 6691da177e4SLinus Torvalds */ 6706c210482SMartin Schwidefsky if (page_test_dirty(page)) { 6716c210482SMartin Schwidefsky page_clear_dirty(page); 6721da177e4SLinus Torvalds set_page_dirty(page); 6736c210482SMartin Schwidefsky } 6748a9f3ccdSBalbir Singh mem_cgroup_uncharge_page(page); 6758a9f3ccdSBalbir Singh 676f3dbd344SChristoph Lameter __dec_zone_page_state(page, 677f3dbd344SChristoph Lameter PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED); 6781da177e4SLinus Torvalds } 6791da177e4SLinus Torvalds } 6801da177e4SLinus Torvalds 6811da177e4SLinus Torvalds /* 6821da177e4SLinus Torvalds * Subfunctions of try_to_unmap: try_to_unmap_one called 6831da177e4SLinus Torvalds * repeatedly from either try_to_unmap_anon or try_to_unmap_file. 6841da177e4SLinus Torvalds */ 685a48d07afSChristoph Lameter static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, 6867352349aSChristoph Lameter int migration) 6871da177e4SLinus Torvalds { 6881da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 6891da177e4SLinus Torvalds unsigned long address; 6901da177e4SLinus Torvalds pte_t *pte; 6911da177e4SLinus Torvalds pte_t pteval; 692c0718806SHugh Dickins spinlock_t *ptl; 6931da177e4SLinus Torvalds int ret = SWAP_AGAIN; 6941da177e4SLinus Torvalds 6951da177e4SLinus Torvalds address = vma_address(page, vma); 6961da177e4SLinus Torvalds if (address == -EFAULT) 6971da177e4SLinus Torvalds goto out; 6981da177e4SLinus Torvalds 699c0718806SHugh Dickins pte = page_check_address(page, mm, address, &ptl); 700c0718806SHugh Dickins if (!pte) 70181b4082dSNikita Danilov goto out; 7021da177e4SLinus Torvalds 7031da177e4SLinus Torvalds /* 7041da177e4SLinus Torvalds * If the page is mlock()d, we cannot swap it out. 7051da177e4SLinus Torvalds * If it's recently referenced (perhaps page_referenced 7061da177e4SLinus Torvalds * skipped over this mm) then we should reactivate it. 7071da177e4SLinus Torvalds */ 708e6a1530dSChristoph Lameter if (!migration && ((vma->vm_flags & VM_LOCKED) || 709*cddb8a5cSAndrea Arcangeli (ptep_clear_flush_young_notify(vma, address, pte)))) { 7101da177e4SLinus Torvalds ret = SWAP_FAIL; 7111da177e4SLinus Torvalds goto out_unmap; 7121da177e4SLinus Torvalds } 7131da177e4SLinus Torvalds 7141da177e4SLinus Torvalds /* Nuke the page table entry. */ 7151da177e4SLinus Torvalds flush_cache_page(vma, address, page_to_pfn(page)); 716*cddb8a5cSAndrea Arcangeli pteval = ptep_clear_flush_notify(vma, address, pte); 7171da177e4SLinus Torvalds 7181da177e4SLinus Torvalds /* Move the dirty bit to the physical page now the pte is gone. */ 7191da177e4SLinus Torvalds if (pte_dirty(pteval)) 7201da177e4SLinus Torvalds set_page_dirty(page); 7211da177e4SLinus Torvalds 722365e9c87SHugh Dickins /* Update high watermark before we lower rss */ 723365e9c87SHugh Dickins update_hiwater_rss(mm); 724365e9c87SHugh Dickins 7251da177e4SLinus Torvalds if (PageAnon(page)) { 7264c21e2f2SHugh Dickins swp_entry_t entry = { .val = page_private(page) }; 7270697212aSChristoph Lameter 7280697212aSChristoph Lameter if (PageSwapCache(page)) { 7291da177e4SLinus Torvalds /* 7301da177e4SLinus Torvalds * Store the swap location in the pte. 7311da177e4SLinus Torvalds * See handle_pte_fault() ... 7321da177e4SLinus Torvalds */ 7331da177e4SLinus Torvalds swap_duplicate(entry); 7341da177e4SLinus Torvalds if (list_empty(&mm->mmlist)) { 7351da177e4SLinus Torvalds spin_lock(&mmlist_lock); 736f412ac08SHugh Dickins if (list_empty(&mm->mmlist)) 7371da177e4SLinus Torvalds list_add(&mm->mmlist, &init_mm.mmlist); 7381da177e4SLinus Torvalds spin_unlock(&mmlist_lock); 7391da177e4SLinus Torvalds } 740442c9137SChristoph Lameter dec_mm_counter(mm, anon_rss); 74104e62a29SChristoph Lameter #ifdef CONFIG_MIGRATION 7420697212aSChristoph Lameter } else { 7430697212aSChristoph Lameter /* 7440697212aSChristoph Lameter * Store the pfn of the page in a special migration 7450697212aSChristoph Lameter * pte. do_swap_page() will wait until the migration 7460697212aSChristoph Lameter * pte is removed and then restart fault handling. 7470697212aSChristoph Lameter */ 7480697212aSChristoph Lameter BUG_ON(!migration); 7490697212aSChristoph Lameter entry = make_migration_entry(page, pte_write(pteval)); 75004e62a29SChristoph Lameter #endif 7510697212aSChristoph Lameter } 7521da177e4SLinus Torvalds set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); 7531da177e4SLinus Torvalds BUG_ON(pte_file(*pte)); 7544294621fSHugh Dickins } else 75504e62a29SChristoph Lameter #ifdef CONFIG_MIGRATION 75604e62a29SChristoph Lameter if (migration) { 75704e62a29SChristoph Lameter /* Establish migration entry for a file page */ 75804e62a29SChristoph Lameter swp_entry_t entry; 75904e62a29SChristoph Lameter entry = make_migration_entry(page, pte_write(pteval)); 76004e62a29SChristoph Lameter set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); 76104e62a29SChristoph Lameter } else 76204e62a29SChristoph Lameter #endif 7634294621fSHugh Dickins dec_mm_counter(mm, file_rss); 7641da177e4SLinus Torvalds 76504e62a29SChristoph Lameter 7667de6b805SNick Piggin page_remove_rmap(page, vma); 7671da177e4SLinus Torvalds page_cache_release(page); 7681da177e4SLinus Torvalds 7691da177e4SLinus Torvalds out_unmap: 770c0718806SHugh Dickins pte_unmap_unlock(pte, ptl); 7711da177e4SLinus Torvalds out: 7721da177e4SLinus Torvalds return ret; 7731da177e4SLinus Torvalds } 7741da177e4SLinus Torvalds 7751da177e4SLinus Torvalds /* 7761da177e4SLinus Torvalds * objrmap doesn't work for nonlinear VMAs because the assumption that 7771da177e4SLinus Torvalds * offset-into-file correlates with offset-into-virtual-addresses does not hold. 7781da177e4SLinus Torvalds * Consequently, given a particular page and its ->index, we cannot locate the 7791da177e4SLinus Torvalds * ptes which are mapping that page without an exhaustive linear search. 7801da177e4SLinus Torvalds * 7811da177e4SLinus Torvalds * So what this code does is a mini "virtual scan" of each nonlinear VMA which 7821da177e4SLinus Torvalds * maps the file to which the target page belongs. The ->vm_private_data field 7831da177e4SLinus Torvalds * holds the current cursor into that scan. Successive searches will circulate 7841da177e4SLinus Torvalds * around the vma's virtual address space. 7851da177e4SLinus Torvalds * 7861da177e4SLinus Torvalds * So as more replacement pressure is applied to the pages in a nonlinear VMA, 7871da177e4SLinus Torvalds * more scanning pressure is placed against them as well. Eventually pages 7881da177e4SLinus Torvalds * will become fully unmapped and are eligible for eviction. 7891da177e4SLinus Torvalds * 7901da177e4SLinus Torvalds * For very sparsely populated VMAs this is a little inefficient - chances are 7911da177e4SLinus Torvalds * there there won't be many ptes located within the scan cluster. In this case 7921da177e4SLinus Torvalds * maybe we could scan further - to the end of the pte page, perhaps. 7931da177e4SLinus Torvalds */ 7941da177e4SLinus Torvalds #define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE) 7951da177e4SLinus Torvalds #define CLUSTER_MASK (~(CLUSTER_SIZE - 1)) 7961da177e4SLinus Torvalds 7971da177e4SLinus Torvalds static void try_to_unmap_cluster(unsigned long cursor, 7981da177e4SLinus Torvalds unsigned int *mapcount, struct vm_area_struct *vma) 7991da177e4SLinus Torvalds { 8001da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 8011da177e4SLinus Torvalds pgd_t *pgd; 8021da177e4SLinus Torvalds pud_t *pud; 8031da177e4SLinus Torvalds pmd_t *pmd; 804c0718806SHugh Dickins pte_t *pte; 8051da177e4SLinus Torvalds pte_t pteval; 806c0718806SHugh Dickins spinlock_t *ptl; 8071da177e4SLinus Torvalds struct page *page; 8081da177e4SLinus Torvalds unsigned long address; 8091da177e4SLinus Torvalds unsigned long end; 8101da177e4SLinus Torvalds 8111da177e4SLinus Torvalds address = (vma->vm_start + cursor) & CLUSTER_MASK; 8121da177e4SLinus Torvalds end = address + CLUSTER_SIZE; 8131da177e4SLinus Torvalds if (address < vma->vm_start) 8141da177e4SLinus Torvalds address = vma->vm_start; 8151da177e4SLinus Torvalds if (end > vma->vm_end) 8161da177e4SLinus Torvalds end = vma->vm_end; 8171da177e4SLinus Torvalds 8181da177e4SLinus Torvalds pgd = pgd_offset(mm, address); 8191da177e4SLinus Torvalds if (!pgd_present(*pgd)) 820c0718806SHugh Dickins return; 8211da177e4SLinus Torvalds 8221da177e4SLinus Torvalds pud = pud_offset(pgd, address); 8231da177e4SLinus Torvalds if (!pud_present(*pud)) 824c0718806SHugh Dickins return; 8251da177e4SLinus Torvalds 8261da177e4SLinus Torvalds pmd = pmd_offset(pud, address); 8271da177e4SLinus Torvalds if (!pmd_present(*pmd)) 828c0718806SHugh Dickins return; 829c0718806SHugh Dickins 830c0718806SHugh Dickins pte = pte_offset_map_lock(mm, pmd, address, &ptl); 8311da177e4SLinus Torvalds 832365e9c87SHugh Dickins /* Update high watermark before we lower rss */ 833365e9c87SHugh Dickins update_hiwater_rss(mm); 834365e9c87SHugh Dickins 835c0718806SHugh Dickins for (; address < end; pte++, address += PAGE_SIZE) { 8361da177e4SLinus Torvalds if (!pte_present(*pte)) 8371da177e4SLinus Torvalds continue; 8386aab341eSLinus Torvalds page = vm_normal_page(vma, address, *pte); 8396aab341eSLinus Torvalds BUG_ON(!page || PageAnon(page)); 8401da177e4SLinus Torvalds 841*cddb8a5cSAndrea Arcangeli if (ptep_clear_flush_young_notify(vma, address, pte)) 8421da177e4SLinus Torvalds continue; 8431da177e4SLinus Torvalds 8441da177e4SLinus Torvalds /* Nuke the page table entry. */ 845eca35133SBen Collins flush_cache_page(vma, address, pte_pfn(*pte)); 846*cddb8a5cSAndrea Arcangeli pteval = ptep_clear_flush_notify(vma, address, pte); 8471da177e4SLinus Torvalds 8481da177e4SLinus Torvalds /* If nonlinear, store the file page offset in the pte. */ 8491da177e4SLinus Torvalds if (page->index != linear_page_index(vma, address)) 8501da177e4SLinus Torvalds set_pte_at(mm, address, pte, pgoff_to_pte(page->index)); 8511da177e4SLinus Torvalds 8521da177e4SLinus Torvalds /* Move the dirty bit to the physical page now the pte is gone. */ 8531da177e4SLinus Torvalds if (pte_dirty(pteval)) 8541da177e4SLinus Torvalds set_page_dirty(page); 8551da177e4SLinus Torvalds 8567de6b805SNick Piggin page_remove_rmap(page, vma); 8571da177e4SLinus Torvalds page_cache_release(page); 8584294621fSHugh Dickins dec_mm_counter(mm, file_rss); 8591da177e4SLinus Torvalds (*mapcount)--; 8601da177e4SLinus Torvalds } 861c0718806SHugh Dickins pte_unmap_unlock(pte - 1, ptl); 8621da177e4SLinus Torvalds } 8631da177e4SLinus Torvalds 8647352349aSChristoph Lameter static int try_to_unmap_anon(struct page *page, int migration) 8651da177e4SLinus Torvalds { 8661da177e4SLinus Torvalds struct anon_vma *anon_vma; 8671da177e4SLinus Torvalds struct vm_area_struct *vma; 8681da177e4SLinus Torvalds int ret = SWAP_AGAIN; 8691da177e4SLinus Torvalds 8701da177e4SLinus Torvalds anon_vma = page_lock_anon_vma(page); 8711da177e4SLinus Torvalds if (!anon_vma) 8721da177e4SLinus Torvalds return ret; 8731da177e4SLinus Torvalds 8741da177e4SLinus Torvalds list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { 8757352349aSChristoph Lameter ret = try_to_unmap_one(page, vma, migration); 8761da177e4SLinus Torvalds if (ret == SWAP_FAIL || !page_mapped(page)) 8771da177e4SLinus Torvalds break; 8781da177e4SLinus Torvalds } 87934bbd704SOleg Nesterov 88034bbd704SOleg Nesterov page_unlock_anon_vma(anon_vma); 8811da177e4SLinus Torvalds return ret; 8821da177e4SLinus Torvalds } 8831da177e4SLinus Torvalds 8841da177e4SLinus Torvalds /** 8851da177e4SLinus Torvalds * try_to_unmap_file - unmap file page using the object-based rmap method 8861da177e4SLinus Torvalds * @page: the page to unmap 88743d8eac4SRandy Dunlap * @migration: migration flag 8881da177e4SLinus Torvalds * 8891da177e4SLinus Torvalds * Find all the mappings of a page using the mapping pointer and the vma chains 8901da177e4SLinus Torvalds * contained in the address_space struct it points to. 8911da177e4SLinus Torvalds * 8921da177e4SLinus Torvalds * This function is only called from try_to_unmap for object-based pages. 8931da177e4SLinus Torvalds */ 8947352349aSChristoph Lameter static int try_to_unmap_file(struct page *page, int migration) 8951da177e4SLinus Torvalds { 8961da177e4SLinus Torvalds struct address_space *mapping = page->mapping; 8971da177e4SLinus Torvalds pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 8981da177e4SLinus Torvalds struct vm_area_struct *vma; 8991da177e4SLinus Torvalds struct prio_tree_iter iter; 9001da177e4SLinus Torvalds int ret = SWAP_AGAIN; 9011da177e4SLinus Torvalds unsigned long cursor; 9021da177e4SLinus Torvalds unsigned long max_nl_cursor = 0; 9031da177e4SLinus Torvalds unsigned long max_nl_size = 0; 9041da177e4SLinus Torvalds unsigned int mapcount; 9051da177e4SLinus Torvalds 9061da177e4SLinus Torvalds spin_lock(&mapping->i_mmap_lock); 9071da177e4SLinus Torvalds vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 9087352349aSChristoph Lameter ret = try_to_unmap_one(page, vma, migration); 9091da177e4SLinus Torvalds if (ret == SWAP_FAIL || !page_mapped(page)) 9101da177e4SLinus Torvalds goto out; 9111da177e4SLinus Torvalds } 9121da177e4SLinus Torvalds 9131da177e4SLinus Torvalds if (list_empty(&mapping->i_mmap_nonlinear)) 9141da177e4SLinus Torvalds goto out; 9151da177e4SLinus Torvalds 9161da177e4SLinus Torvalds list_for_each_entry(vma, &mapping->i_mmap_nonlinear, 9171da177e4SLinus Torvalds shared.vm_set.list) { 918e6a1530dSChristoph Lameter if ((vma->vm_flags & VM_LOCKED) && !migration) 9191da177e4SLinus Torvalds continue; 9201da177e4SLinus Torvalds cursor = (unsigned long) vma->vm_private_data; 9211da177e4SLinus Torvalds if (cursor > max_nl_cursor) 9221da177e4SLinus Torvalds max_nl_cursor = cursor; 9231da177e4SLinus Torvalds cursor = vma->vm_end - vma->vm_start; 9241da177e4SLinus Torvalds if (cursor > max_nl_size) 9251da177e4SLinus Torvalds max_nl_size = cursor; 9261da177e4SLinus Torvalds } 9271da177e4SLinus Torvalds 9281da177e4SLinus Torvalds if (max_nl_size == 0) { /* any nonlinears locked or reserved */ 9291da177e4SLinus Torvalds ret = SWAP_FAIL; 9301da177e4SLinus Torvalds goto out; 9311da177e4SLinus Torvalds } 9321da177e4SLinus Torvalds 9331da177e4SLinus Torvalds /* 9341da177e4SLinus Torvalds * We don't try to search for this page in the nonlinear vmas, 9351da177e4SLinus Torvalds * and page_referenced wouldn't have found it anyway. Instead 9361da177e4SLinus Torvalds * just walk the nonlinear vmas trying to age and unmap some. 9371da177e4SLinus Torvalds * The mapcount of the page we came in with is irrelevant, 9381da177e4SLinus Torvalds * but even so use it as a guide to how hard we should try? 9391da177e4SLinus Torvalds */ 9401da177e4SLinus Torvalds mapcount = page_mapcount(page); 9411da177e4SLinus Torvalds if (!mapcount) 9421da177e4SLinus Torvalds goto out; 9431da177e4SLinus Torvalds cond_resched_lock(&mapping->i_mmap_lock); 9441da177e4SLinus Torvalds 9451da177e4SLinus Torvalds max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK; 9461da177e4SLinus Torvalds if (max_nl_cursor == 0) 9471da177e4SLinus Torvalds max_nl_cursor = CLUSTER_SIZE; 9481da177e4SLinus Torvalds 9491da177e4SLinus Torvalds do { 9501da177e4SLinus Torvalds list_for_each_entry(vma, &mapping->i_mmap_nonlinear, 9511da177e4SLinus Torvalds shared.vm_set.list) { 952e6a1530dSChristoph Lameter if ((vma->vm_flags & VM_LOCKED) && !migration) 9531da177e4SLinus Torvalds continue; 9541da177e4SLinus Torvalds cursor = (unsigned long) vma->vm_private_data; 955839b9685SHugh Dickins while ( cursor < max_nl_cursor && 9561da177e4SLinus Torvalds cursor < vma->vm_end - vma->vm_start) { 9571da177e4SLinus Torvalds try_to_unmap_cluster(cursor, &mapcount, vma); 9581da177e4SLinus Torvalds cursor += CLUSTER_SIZE; 9591da177e4SLinus Torvalds vma->vm_private_data = (void *) cursor; 9601da177e4SLinus Torvalds if ((int)mapcount <= 0) 9611da177e4SLinus Torvalds goto out; 9621da177e4SLinus Torvalds } 9631da177e4SLinus Torvalds vma->vm_private_data = (void *) max_nl_cursor; 9641da177e4SLinus Torvalds } 9651da177e4SLinus Torvalds cond_resched_lock(&mapping->i_mmap_lock); 9661da177e4SLinus Torvalds max_nl_cursor += CLUSTER_SIZE; 9671da177e4SLinus Torvalds } while (max_nl_cursor <= max_nl_size); 9681da177e4SLinus Torvalds 9691da177e4SLinus Torvalds /* 9701da177e4SLinus Torvalds * Don't loop forever (perhaps all the remaining pages are 9711da177e4SLinus Torvalds * in locked vmas). Reset cursor on all unreserved nonlinear 9721da177e4SLinus Torvalds * vmas, now forgetting on which ones it had fallen behind. 9731da177e4SLinus Torvalds */ 974101d2be7SHugh Dickins list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) 9751da177e4SLinus Torvalds vma->vm_private_data = NULL; 9761da177e4SLinus Torvalds out: 9771da177e4SLinus Torvalds spin_unlock(&mapping->i_mmap_lock); 9781da177e4SLinus Torvalds return ret; 9791da177e4SLinus Torvalds } 9801da177e4SLinus Torvalds 9811da177e4SLinus Torvalds /** 9821da177e4SLinus Torvalds * try_to_unmap - try to remove all page table mappings to a page 9831da177e4SLinus Torvalds * @page: the page to get unmapped 98443d8eac4SRandy Dunlap * @migration: migration flag 9851da177e4SLinus Torvalds * 9861da177e4SLinus Torvalds * Tries to remove all the page table entries which are mapping this 9871da177e4SLinus Torvalds * page, used in the pageout path. Caller must hold the page lock. 9881da177e4SLinus Torvalds * Return values are: 9891da177e4SLinus Torvalds * 9901da177e4SLinus Torvalds * SWAP_SUCCESS - we succeeded in removing all mappings 9911da177e4SLinus Torvalds * SWAP_AGAIN - we missed a mapping, try again later 9921da177e4SLinus Torvalds * SWAP_FAIL - the page is unswappable 9931da177e4SLinus Torvalds */ 9947352349aSChristoph Lameter int try_to_unmap(struct page *page, int migration) 9951da177e4SLinus Torvalds { 9961da177e4SLinus Torvalds int ret; 9971da177e4SLinus Torvalds 9981da177e4SLinus Torvalds BUG_ON(!PageLocked(page)); 9991da177e4SLinus Torvalds 10001da177e4SLinus Torvalds if (PageAnon(page)) 10017352349aSChristoph Lameter ret = try_to_unmap_anon(page, migration); 10021da177e4SLinus Torvalds else 10037352349aSChristoph Lameter ret = try_to_unmap_file(page, migration); 10041da177e4SLinus Torvalds 10051da177e4SLinus Torvalds if (!page_mapped(page)) 10061da177e4SLinus Torvalds ret = SWAP_SUCCESS; 10071da177e4SLinus Torvalds return ret; 10081da177e4SLinus Torvalds } 100981b4082dSNikita Danilov 1010