11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * mm/truncate.c - code for taking down pages from address_spaces 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2002, Linus Torvalds 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * 10Sep2002 akpm@zip.com.au 71da177e4SLinus Torvalds * Initial version. 81da177e4SLinus Torvalds */ 91da177e4SLinus Torvalds 101da177e4SLinus Torvalds #include <linux/kernel.h> 111da177e4SLinus Torvalds #include <linux/mm.h> 120fd0e6b0SNick Piggin #include <linux/swap.h> 131da177e4SLinus Torvalds #include <linux/module.h> 141da177e4SLinus Torvalds #include <linux/pagemap.h> 1501f2705dSNate Diller #include <linux/highmem.h> 161da177e4SLinus Torvalds #include <linux/pagevec.h> 17e08748ceSAndrew Morton #include <linux/task_io_accounting_ops.h> 181da177e4SLinus Torvalds #include <linux/buffer_head.h> /* grr. try_to_release_page, 19aaa4059bSJan Kara do_invalidatepage */ 201da177e4SLinus Torvalds 211da177e4SLinus Torvalds 22cf9a2ae8SDavid Howells /** 23cf9a2ae8SDavid Howells * do_invalidatepage - invalidate part of all of a page 24cf9a2ae8SDavid Howells * @page: the page which is affected 25cf9a2ae8SDavid Howells * @offset: the index of the truncation point 26cf9a2ae8SDavid Howells * 27cf9a2ae8SDavid Howells * do_invalidatepage() is called when all or part of the page has become 28cf9a2ae8SDavid Howells * invalidated by a truncate operation. 29cf9a2ae8SDavid Howells * 30cf9a2ae8SDavid Howells * do_invalidatepage() does not have to release all buffers, but it must 31cf9a2ae8SDavid Howells * ensure that no dirty buffer is left outside @offset and that no I/O 32cf9a2ae8SDavid Howells * is underway against any of the blocks which are outside the truncation 33cf9a2ae8SDavid Howells * point. Because the caller is about to free (and possibly reuse) those 34cf9a2ae8SDavid Howells * blocks on-disk. 35cf9a2ae8SDavid Howells */ 36cf9a2ae8SDavid Howells void do_invalidatepage(struct page *page, unsigned long offset) 37cf9a2ae8SDavid Howells { 38cf9a2ae8SDavid Howells void (*invalidatepage)(struct page *, unsigned long); 39cf9a2ae8SDavid Howells invalidatepage = page->mapping->a_ops->invalidatepage; 409361401eSDavid Howells #ifdef CONFIG_BLOCK 41cf9a2ae8SDavid Howells if (!invalidatepage) 42cf9a2ae8SDavid Howells invalidatepage = block_invalidatepage; 439361401eSDavid Howells #endif 44cf9a2ae8SDavid Howells if (invalidatepage) 45cf9a2ae8SDavid Howells (*invalidatepage)(page, offset); 46cf9a2ae8SDavid Howells } 47cf9a2ae8SDavid Howells 481da177e4SLinus Torvalds static inline void truncate_partial_page(struct page *page, unsigned partial) 491da177e4SLinus Torvalds { 5001f2705dSNate Diller zero_user_page(page, partial, PAGE_CACHE_SIZE - partial, KM_USER0); 511da177e4SLinus Torvalds if (PagePrivate(page)) 521da177e4SLinus Torvalds do_invalidatepage(page, partial); 531da177e4SLinus Torvalds } 541da177e4SLinus Torvalds 55ecdfc978SLinus Torvalds /* 56ecdfc978SLinus Torvalds * This cancels just the dirty bit on the kernel page itself, it 57ecdfc978SLinus Torvalds * does NOT actually remove dirty bits on any mmap's that may be 58ecdfc978SLinus Torvalds * around. It also leaves the page tagged dirty, so any sync 59ecdfc978SLinus Torvalds * activity will still find it on the dirty lists, and in particular, 60ecdfc978SLinus Torvalds * clear_page_dirty_for_io() will still look at the dirty bits in 61ecdfc978SLinus Torvalds * the VM. 62ecdfc978SLinus Torvalds * 63ecdfc978SLinus Torvalds * Doing this should *normally* only ever be done when a page 64ecdfc978SLinus Torvalds * is truncated, and is not actually mapped anywhere at all. However, 65ecdfc978SLinus Torvalds * fs/buffer.c does this when it notices that somebody has cleaned 66ecdfc978SLinus Torvalds * out all the buffers on a page without actually doing it through 67ecdfc978SLinus Torvalds * the VM. Can you say "ext3 is horribly ugly"? Tought you could. 68ecdfc978SLinus Torvalds */ 69fba2591bSLinus Torvalds void cancel_dirty_page(struct page *page, unsigned int account_size) 70fba2591bSLinus Torvalds { 718368e328SLinus Torvalds if (TestClearPageDirty(page)) { 728368e328SLinus Torvalds struct address_space *mapping = page->mapping; 738368e328SLinus Torvalds if (mapping && mapping_cap_account_dirty(mapping)) { 743e67c098SAndrew Morton dec_zone_page_state(page, NR_FILE_DIRTY); 758368e328SLinus Torvalds if (account_size) 76fba2591bSLinus Torvalds task_io_account_cancelled_write(account_size); 77fba2591bSLinus Torvalds } 783e67c098SAndrew Morton } 798368e328SLinus Torvalds } 808368e328SLinus Torvalds EXPORT_SYMBOL(cancel_dirty_page); 81fba2591bSLinus Torvalds 821da177e4SLinus Torvalds /* 831da177e4SLinus Torvalds * If truncate cannot remove the fs-private metadata from the page, the page 841da177e4SLinus Torvalds * becomes anonymous. It will be left on the LRU and may even be mapped into 851da177e4SLinus Torvalds * user pagetables if we're racing with filemap_nopage(). 861da177e4SLinus Torvalds * 871da177e4SLinus Torvalds * We need to bale out if page->mapping is no longer equal to the original 881da177e4SLinus Torvalds * mapping. This happens a) when the VM reclaimed the page while we waited on 89fc0ecff6SAndrew Morton * its lock, b) when a concurrent invalidate_mapping_pages got there first and 901da177e4SLinus Torvalds * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space. 911da177e4SLinus Torvalds */ 921da177e4SLinus Torvalds static void 931da177e4SLinus Torvalds truncate_complete_page(struct address_space *mapping, struct page *page) 941da177e4SLinus Torvalds { 951da177e4SLinus Torvalds if (page->mapping != mapping) 961da177e4SLinus Torvalds return; 971da177e4SLinus Torvalds 983e67c098SAndrew Morton cancel_dirty_page(page, PAGE_CACHE_SIZE); 993e67c098SAndrew Morton 1001da177e4SLinus Torvalds if (PagePrivate(page)) 1011da177e4SLinus Torvalds do_invalidatepage(page, 0); 1021da177e4SLinus Torvalds 103*787d2214SNick Piggin remove_from_page_cache(page); 1041da177e4SLinus Torvalds ClearPageUptodate(page); 1051da177e4SLinus Torvalds ClearPageMappedToDisk(page); 1061da177e4SLinus Torvalds page_cache_release(page); /* pagecache ref */ 1071da177e4SLinus Torvalds } 1081da177e4SLinus Torvalds 1091da177e4SLinus Torvalds /* 110fc0ecff6SAndrew Morton * This is for invalidate_mapping_pages(). That function can be called at 1111da177e4SLinus Torvalds * any time, and is not supposed to throw away dirty pages. But pages can 1120fd0e6b0SNick Piggin * be marked dirty at any time too, so use remove_mapping which safely 1130fd0e6b0SNick Piggin * discards clean, unused pages. 1141da177e4SLinus Torvalds * 1151da177e4SLinus Torvalds * Returns non-zero if the page was successfully invalidated. 1161da177e4SLinus Torvalds */ 1171da177e4SLinus Torvalds static int 1181da177e4SLinus Torvalds invalidate_complete_page(struct address_space *mapping, struct page *page) 1191da177e4SLinus Torvalds { 1200fd0e6b0SNick Piggin int ret; 1210fd0e6b0SNick Piggin 1221da177e4SLinus Torvalds if (page->mapping != mapping) 1231da177e4SLinus Torvalds return 0; 1241da177e4SLinus Torvalds 1251da177e4SLinus Torvalds if (PagePrivate(page) && !try_to_release_page(page, 0)) 1261da177e4SLinus Torvalds return 0; 1271da177e4SLinus Torvalds 1280fd0e6b0SNick Piggin ret = remove_mapping(mapping, page); 1290fd0e6b0SNick Piggin 1300fd0e6b0SNick Piggin return ret; 1311da177e4SLinus Torvalds } 1321da177e4SLinus Torvalds 1331da177e4SLinus Torvalds /** 134d7339071SHans Reiser * truncate_inode_pages - truncate range of pages specified by start and 135d7339071SHans Reiser * end byte offsets 1361da177e4SLinus Torvalds * @mapping: mapping to truncate 1371da177e4SLinus Torvalds * @lstart: offset from which to truncate 138d7339071SHans Reiser * @lend: offset to which to truncate 1391da177e4SLinus Torvalds * 140d7339071SHans Reiser * Truncate the page cache, removing the pages that are between 141d7339071SHans Reiser * specified offsets (and zeroing out partial page 142d7339071SHans Reiser * (if lstart is not page aligned)). 1431da177e4SLinus Torvalds * 1441da177e4SLinus Torvalds * Truncate takes two passes - the first pass is nonblocking. It will not 1451da177e4SLinus Torvalds * block on page locks and it will not block on writeback. The second pass 1461da177e4SLinus Torvalds * will wait. This is to prevent as much IO as possible in the affected region. 1471da177e4SLinus Torvalds * The first pass will remove most pages, so the search cost of the second pass 1481da177e4SLinus Torvalds * is low. 1491da177e4SLinus Torvalds * 1501da177e4SLinus Torvalds * When looking at page->index outside the page lock we need to be careful to 1511da177e4SLinus Torvalds * copy it into a local to avoid races (it could change at any time). 1521da177e4SLinus Torvalds * 1531da177e4SLinus Torvalds * We pass down the cache-hot hint to the page freeing code. Even if the 1541da177e4SLinus Torvalds * mapping is large, it is probably the case that the final pages are the most 1551da177e4SLinus Torvalds * recently touched, and freeing happens in ascending file offset order. 1561da177e4SLinus Torvalds */ 157d7339071SHans Reiser void truncate_inode_pages_range(struct address_space *mapping, 158d7339071SHans Reiser loff_t lstart, loff_t lend) 1591da177e4SLinus Torvalds { 1601da177e4SLinus Torvalds const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; 161d7339071SHans Reiser pgoff_t end; 1621da177e4SLinus Torvalds const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1); 1631da177e4SLinus Torvalds struct pagevec pvec; 1641da177e4SLinus Torvalds pgoff_t next; 1651da177e4SLinus Torvalds int i; 1661da177e4SLinus Torvalds 1671da177e4SLinus Torvalds if (mapping->nrpages == 0) 1681da177e4SLinus Torvalds return; 1691da177e4SLinus Torvalds 170d7339071SHans Reiser BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1)); 171d7339071SHans Reiser end = (lend >> PAGE_CACHE_SHIFT); 172d7339071SHans Reiser 1731da177e4SLinus Torvalds pagevec_init(&pvec, 0); 1741da177e4SLinus Torvalds next = start; 175d7339071SHans Reiser while (next <= end && 176d7339071SHans Reiser pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { 1771da177e4SLinus Torvalds for (i = 0; i < pagevec_count(&pvec); i++) { 1781da177e4SLinus Torvalds struct page *page = pvec.pages[i]; 1791da177e4SLinus Torvalds pgoff_t page_index = page->index; 1801da177e4SLinus Torvalds 181d7339071SHans Reiser if (page_index > end) { 182d7339071SHans Reiser next = page_index; 183d7339071SHans Reiser break; 184d7339071SHans Reiser } 185d7339071SHans Reiser 1861da177e4SLinus Torvalds if (page_index > next) 1871da177e4SLinus Torvalds next = page_index; 1881da177e4SLinus Torvalds next++; 1891da177e4SLinus Torvalds if (TestSetPageLocked(page)) 1901da177e4SLinus Torvalds continue; 1911da177e4SLinus Torvalds if (PageWriteback(page)) { 1921da177e4SLinus Torvalds unlock_page(page); 1931da177e4SLinus Torvalds continue; 1941da177e4SLinus Torvalds } 1951da177e4SLinus Torvalds truncate_complete_page(mapping, page); 1961da177e4SLinus Torvalds unlock_page(page); 1971da177e4SLinus Torvalds } 1981da177e4SLinus Torvalds pagevec_release(&pvec); 1991da177e4SLinus Torvalds cond_resched(); 2001da177e4SLinus Torvalds } 2011da177e4SLinus Torvalds 2021da177e4SLinus Torvalds if (partial) { 2031da177e4SLinus Torvalds struct page *page = find_lock_page(mapping, start - 1); 2041da177e4SLinus Torvalds if (page) { 2051da177e4SLinus Torvalds wait_on_page_writeback(page); 2061da177e4SLinus Torvalds truncate_partial_page(page, partial); 2071da177e4SLinus Torvalds unlock_page(page); 2081da177e4SLinus Torvalds page_cache_release(page); 2091da177e4SLinus Torvalds } 2101da177e4SLinus Torvalds } 2111da177e4SLinus Torvalds 2121da177e4SLinus Torvalds next = start; 2131da177e4SLinus Torvalds for ( ; ; ) { 2141da177e4SLinus Torvalds cond_resched(); 2151da177e4SLinus Torvalds if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { 2161da177e4SLinus Torvalds if (next == start) 2171da177e4SLinus Torvalds break; 2181da177e4SLinus Torvalds next = start; 2191da177e4SLinus Torvalds continue; 2201da177e4SLinus Torvalds } 221d7339071SHans Reiser if (pvec.pages[0]->index > end) { 222d7339071SHans Reiser pagevec_release(&pvec); 223d7339071SHans Reiser break; 224d7339071SHans Reiser } 2251da177e4SLinus Torvalds for (i = 0; i < pagevec_count(&pvec); i++) { 2261da177e4SLinus Torvalds struct page *page = pvec.pages[i]; 2271da177e4SLinus Torvalds 228d7339071SHans Reiser if (page->index > end) 229d7339071SHans Reiser break; 2301da177e4SLinus Torvalds lock_page(page); 2311da177e4SLinus Torvalds wait_on_page_writeback(page); 2321da177e4SLinus Torvalds if (page->index > next) 2331da177e4SLinus Torvalds next = page->index; 2341da177e4SLinus Torvalds next++; 2351da177e4SLinus Torvalds truncate_complete_page(mapping, page); 2361da177e4SLinus Torvalds unlock_page(page); 2371da177e4SLinus Torvalds } 2381da177e4SLinus Torvalds pagevec_release(&pvec); 2391da177e4SLinus Torvalds } 2401da177e4SLinus Torvalds } 241d7339071SHans Reiser EXPORT_SYMBOL(truncate_inode_pages_range); 2421da177e4SLinus Torvalds 243d7339071SHans Reiser /** 244d7339071SHans Reiser * truncate_inode_pages - truncate *all* the pages from an offset 245d7339071SHans Reiser * @mapping: mapping to truncate 246d7339071SHans Reiser * @lstart: offset from which to truncate 247d7339071SHans Reiser * 2481b1dcc1bSJes Sorensen * Called under (and serialised by) inode->i_mutex. 249d7339071SHans Reiser */ 250d7339071SHans Reiser void truncate_inode_pages(struct address_space *mapping, loff_t lstart) 251d7339071SHans Reiser { 252d7339071SHans Reiser truncate_inode_pages_range(mapping, lstart, (loff_t)-1); 253d7339071SHans Reiser } 2541da177e4SLinus Torvalds EXPORT_SYMBOL(truncate_inode_pages); 2551da177e4SLinus Torvalds 256fc9a07e7SAndrew Morton unsigned long __invalidate_mapping_pages(struct address_space *mapping, 257fc9a07e7SAndrew Morton pgoff_t start, pgoff_t end, bool be_atomic) 2581da177e4SLinus Torvalds { 2591da177e4SLinus Torvalds struct pagevec pvec; 2601da177e4SLinus Torvalds pgoff_t next = start; 2611da177e4SLinus Torvalds unsigned long ret = 0; 2621da177e4SLinus Torvalds int i; 2631da177e4SLinus Torvalds 2641da177e4SLinus Torvalds pagevec_init(&pvec, 0); 2651da177e4SLinus Torvalds while (next <= end && 2661da177e4SLinus Torvalds pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { 2671da177e4SLinus Torvalds for (i = 0; i < pagevec_count(&pvec); i++) { 2681da177e4SLinus Torvalds struct page *page = pvec.pages[i]; 269e0f23603SNeilBrown pgoff_t index; 270e0f23603SNeilBrown int lock_failed; 2711da177e4SLinus Torvalds 272e0f23603SNeilBrown lock_failed = TestSetPageLocked(page); 273e0f23603SNeilBrown 274e0f23603SNeilBrown /* 275e0f23603SNeilBrown * We really shouldn't be looking at the ->index of an 276e0f23603SNeilBrown * unlocked page. But we're not allowed to lock these 277e0f23603SNeilBrown * pages. So we rely upon nobody altering the ->index 278e0f23603SNeilBrown * of this (pinned-by-us) page. 279e0f23603SNeilBrown */ 280e0f23603SNeilBrown index = page->index; 281e0f23603SNeilBrown if (index > next) 282e0f23603SNeilBrown next = index; 2831da177e4SLinus Torvalds next++; 284e0f23603SNeilBrown if (lock_failed) 2851da177e4SLinus Torvalds continue; 286e0f23603SNeilBrown 2871da177e4SLinus Torvalds if (PageDirty(page) || PageWriteback(page)) 2881da177e4SLinus Torvalds goto unlock; 2891da177e4SLinus Torvalds if (page_mapped(page)) 2901da177e4SLinus Torvalds goto unlock; 2911da177e4SLinus Torvalds ret += invalidate_complete_page(mapping, page); 2921da177e4SLinus Torvalds unlock: 2931da177e4SLinus Torvalds unlock_page(page); 2941da177e4SLinus Torvalds if (next > end) 2951da177e4SLinus Torvalds break; 2961da177e4SLinus Torvalds } 2971da177e4SLinus Torvalds pagevec_release(&pvec); 298fc9a07e7SAndrew Morton if (likely(!be_atomic)) 299fc9a07e7SAndrew Morton cond_resched(); 3001da177e4SLinus Torvalds } 3011da177e4SLinus Torvalds return ret; 3021da177e4SLinus Torvalds } 303fc9a07e7SAndrew Morton 304fc9a07e7SAndrew Morton /** 305fc9a07e7SAndrew Morton * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode 306fc9a07e7SAndrew Morton * @mapping: the address_space which holds the pages to invalidate 307fc9a07e7SAndrew Morton * @start: the offset 'from' which to invalidate 308fc9a07e7SAndrew Morton * @end: the offset 'to' which to invalidate (inclusive) 309fc9a07e7SAndrew Morton * 310fc9a07e7SAndrew Morton * This function only removes the unlocked pages, if you want to 311fc9a07e7SAndrew Morton * remove all the pages of one inode, you must call truncate_inode_pages. 312fc9a07e7SAndrew Morton * 313fc9a07e7SAndrew Morton * invalidate_mapping_pages() will not block on IO activity. It will not 314fc9a07e7SAndrew Morton * invalidate pages which are dirty, locked, under writeback or mapped into 315fc9a07e7SAndrew Morton * pagetables. 316fc9a07e7SAndrew Morton */ 317fc9a07e7SAndrew Morton unsigned long invalidate_mapping_pages(struct address_space *mapping, 318fc9a07e7SAndrew Morton pgoff_t start, pgoff_t end) 319fc9a07e7SAndrew Morton { 320fc9a07e7SAndrew Morton return __invalidate_mapping_pages(mapping, start, end, false); 321fc9a07e7SAndrew Morton } 32254bc4855SAnton Altaparmakov EXPORT_SYMBOL(invalidate_mapping_pages); 3231da177e4SLinus Torvalds 324bd4c8ce4SAndrew Morton /* 325bd4c8ce4SAndrew Morton * This is like invalidate_complete_page(), except it ignores the page's 326bd4c8ce4SAndrew Morton * refcount. We do this because invalidate_inode_pages2() needs stronger 327bd4c8ce4SAndrew Morton * invalidation guarantees, and cannot afford to leave pages behind because 3282706a1b8SAnderson Briglia * shrink_page_list() has a temp ref on them, or because they're transiently 3292706a1b8SAnderson Briglia * sitting in the lru_cache_add() pagevecs. 330bd4c8ce4SAndrew Morton */ 331bd4c8ce4SAndrew Morton static int 332bd4c8ce4SAndrew Morton invalidate_complete_page2(struct address_space *mapping, struct page *page) 333bd4c8ce4SAndrew Morton { 334bd4c8ce4SAndrew Morton if (page->mapping != mapping) 335bd4c8ce4SAndrew Morton return 0; 336bd4c8ce4SAndrew Morton 337887ed2f3STrond Myklebust if (PagePrivate(page) && !try_to_release_page(page, GFP_KERNEL)) 338bd4c8ce4SAndrew Morton return 0; 339bd4c8ce4SAndrew Morton 340bd4c8ce4SAndrew Morton write_lock_irq(&mapping->tree_lock); 341bd4c8ce4SAndrew Morton if (PageDirty(page)) 342bd4c8ce4SAndrew Morton goto failed; 343bd4c8ce4SAndrew Morton 344bd4c8ce4SAndrew Morton BUG_ON(PagePrivate(page)); 345bd4c8ce4SAndrew Morton __remove_from_page_cache(page); 346bd4c8ce4SAndrew Morton write_unlock_irq(&mapping->tree_lock); 347bd4c8ce4SAndrew Morton ClearPageUptodate(page); 348bd4c8ce4SAndrew Morton page_cache_release(page); /* pagecache ref */ 349bd4c8ce4SAndrew Morton return 1; 350bd4c8ce4SAndrew Morton failed: 351bd4c8ce4SAndrew Morton write_unlock_irq(&mapping->tree_lock); 352bd4c8ce4SAndrew Morton return 0; 353bd4c8ce4SAndrew Morton } 354bd4c8ce4SAndrew Morton 355e3db7691STrond Myklebust static int do_launder_page(struct address_space *mapping, struct page *page) 356e3db7691STrond Myklebust { 357e3db7691STrond Myklebust if (!PageDirty(page)) 358e3db7691STrond Myklebust return 0; 359e3db7691STrond Myklebust if (page->mapping != mapping || mapping->a_ops->launder_page == NULL) 360e3db7691STrond Myklebust return 0; 361e3db7691STrond Myklebust return mapping->a_ops->launder_page(page); 362e3db7691STrond Myklebust } 363e3db7691STrond Myklebust 3641da177e4SLinus Torvalds /** 3651da177e4SLinus Torvalds * invalidate_inode_pages2_range - remove range of pages from an address_space 36667be2dd1SMartin Waitz * @mapping: the address_space 3671da177e4SLinus Torvalds * @start: the page offset 'from' which to invalidate 3681da177e4SLinus Torvalds * @end: the page offset 'to' which to invalidate (inclusive) 3691da177e4SLinus Torvalds * 3701da177e4SLinus Torvalds * Any pages which are found to be mapped into pagetables are unmapped prior to 3711da177e4SLinus Torvalds * invalidation. 3721da177e4SLinus Torvalds * 3731da177e4SLinus Torvalds * Returns -EIO if any pages could not be invalidated. 3741da177e4SLinus Torvalds */ 3751da177e4SLinus Torvalds int invalidate_inode_pages2_range(struct address_space *mapping, 3761da177e4SLinus Torvalds pgoff_t start, pgoff_t end) 3771da177e4SLinus Torvalds { 3781da177e4SLinus Torvalds struct pagevec pvec; 3791da177e4SLinus Torvalds pgoff_t next; 3801da177e4SLinus Torvalds int i; 3811da177e4SLinus Torvalds int ret = 0; 3821da177e4SLinus Torvalds int did_range_unmap = 0; 3831da177e4SLinus Torvalds int wrapped = 0; 3841da177e4SLinus Torvalds 3851da177e4SLinus Torvalds pagevec_init(&pvec, 0); 3861da177e4SLinus Torvalds next = start; 3877b965e08STrond Myklebust while (next <= end && !wrapped && 3881da177e4SLinus Torvalds pagevec_lookup(&pvec, mapping, next, 3891da177e4SLinus Torvalds min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) { 3907b965e08STrond Myklebust for (i = 0; i < pagevec_count(&pvec); i++) { 3911da177e4SLinus Torvalds struct page *page = pvec.pages[i]; 3921da177e4SLinus Torvalds pgoff_t page_index; 3931da177e4SLinus Torvalds 3941da177e4SLinus Torvalds lock_page(page); 3951da177e4SLinus Torvalds if (page->mapping != mapping) { 3961da177e4SLinus Torvalds unlock_page(page); 3971da177e4SLinus Torvalds continue; 3981da177e4SLinus Torvalds } 3991da177e4SLinus Torvalds page_index = page->index; 4001da177e4SLinus Torvalds next = page_index + 1; 4011da177e4SLinus Torvalds if (next == 0) 4021da177e4SLinus Torvalds wrapped = 1; 4031da177e4SLinus Torvalds if (page_index > end) { 4041da177e4SLinus Torvalds unlock_page(page); 4051da177e4SLinus Torvalds break; 4061da177e4SLinus Torvalds } 4071da177e4SLinus Torvalds wait_on_page_writeback(page); 4081da177e4SLinus Torvalds while (page_mapped(page)) { 4091da177e4SLinus Torvalds if (!did_range_unmap) { 4101da177e4SLinus Torvalds /* 4111da177e4SLinus Torvalds * Zap the rest of the file in one hit. 4121da177e4SLinus Torvalds */ 4131da177e4SLinus Torvalds unmap_mapping_range(mapping, 414479ef592SOleg Drokin (loff_t)page_index<<PAGE_CACHE_SHIFT, 415479ef592SOleg Drokin (loff_t)(end - page_index + 1) 4161da177e4SLinus Torvalds << PAGE_CACHE_SHIFT, 4171da177e4SLinus Torvalds 0); 4181da177e4SLinus Torvalds did_range_unmap = 1; 4191da177e4SLinus Torvalds } else { 4201da177e4SLinus Torvalds /* 4211da177e4SLinus Torvalds * Just zap this page 4221da177e4SLinus Torvalds */ 4231da177e4SLinus Torvalds unmap_mapping_range(mapping, 424479ef592SOleg Drokin (loff_t)page_index<<PAGE_CACHE_SHIFT, 4251da177e4SLinus Torvalds PAGE_CACHE_SIZE, 0); 4261da177e4SLinus Torvalds } 4271da177e4SLinus Torvalds } 428e3db7691STrond Myklebust ret = do_launder_page(mapping, page); 429e3db7691STrond Myklebust if (ret == 0 && !invalidate_complete_page2(mapping, page)) 4301da177e4SLinus Torvalds ret = -EIO; 4311da177e4SLinus Torvalds unlock_page(page); 4321da177e4SLinus Torvalds } 4331da177e4SLinus Torvalds pagevec_release(&pvec); 4341da177e4SLinus Torvalds cond_resched(); 4351da177e4SLinus Torvalds } 4361da177e4SLinus Torvalds return ret; 4371da177e4SLinus Torvalds } 4381da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range); 4391da177e4SLinus Torvalds 4401da177e4SLinus Torvalds /** 4411da177e4SLinus Torvalds * invalidate_inode_pages2 - remove all pages from an address_space 44267be2dd1SMartin Waitz * @mapping: the address_space 4431da177e4SLinus Torvalds * 4441da177e4SLinus Torvalds * Any pages which are found to be mapped into pagetables are unmapped prior to 4451da177e4SLinus Torvalds * invalidation. 4461da177e4SLinus Torvalds * 4471da177e4SLinus Torvalds * Returns -EIO if any pages could not be invalidated. 4481da177e4SLinus Torvalds */ 4491da177e4SLinus Torvalds int invalidate_inode_pages2(struct address_space *mapping) 4501da177e4SLinus Torvalds { 4511da177e4SLinus Torvalds return invalidate_inode_pages2_range(mapping, 0, -1); 4521da177e4SLinus Torvalds } 4531da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(invalidate_inode_pages2); 454