xref: /linux/mm/truncate.c (revision 32691f0fbe41a52eee811496205dc4828991b399)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * mm/truncate.c - code for taking down pages from address_spaces
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright (C) 2002, Linus Torvalds
51da177e4SLinus Torvalds  *
6e1f8e874SFrancois Cami  * 10Sep2002	Andrew Morton
71da177e4SLinus Torvalds  *		Initial version.
81da177e4SLinus Torvalds  */
91da177e4SLinus Torvalds 
101da177e4SLinus Torvalds #include <linux/kernel.h>
114af3c9ccSAlexey Dobriyan #include <linux/backing-dev.h>
12f9fe48beSRoss Zwisler #include <linux/dax.h>
135a0e3ad6STejun Heo #include <linux/gfp.h>
141da177e4SLinus Torvalds #include <linux/mm.h>
150fd0e6b0SNick Piggin #include <linux/swap.h>
16b95f1b31SPaul Gortmaker #include <linux/export.h>
171da177e4SLinus Torvalds #include <linux/pagemap.h>
1801f2705dSNate Diller #include <linux/highmem.h>
191da177e4SLinus Torvalds #include <linux/pagevec.h>
20e08748ceSAndrew Morton #include <linux/task_io_accounting_ops.h>
211da177e4SLinus Torvalds #include <linux/buffer_head.h>	/* grr. try_to_release_page,
22aaa4059bSJan Kara 				   do_invalidatepage */
233a4f8a0bSHugh Dickins #include <linux/shmem_fs.h>
24c515e1fdSDan Magenheimer #include <linux/cleancache.h>
2590a80202SJan Kara #include <linux/rmap.h>
26ba470de4SRik van Riel #include "internal.h"
271da177e4SLinus Torvalds 
28c6dcf52cSJan Kara static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
29c6dcf52cSJan Kara 			       void *entry)
300cd6144aSJohannes Weiner {
31449dd698SJohannes Weiner 	struct radix_tree_node *node;
32449dd698SJohannes Weiner 	void **slot;
33449dd698SJohannes Weiner 
34ac401cc7SJan Kara 	spin_lock_irq(&mapping->tree_lock);
350cd6144aSJohannes Weiner 	/*
360cd6144aSJohannes Weiner 	 * Regular page slots are stabilized by the page lock even
370cd6144aSJohannes Weiner 	 * without the tree itself locked.  These unlocked entries
380cd6144aSJohannes Weiner 	 * need verification under the tree lock.
390cd6144aSJohannes Weiner 	 */
4014b46879SJohannes Weiner 	if (!__radix_tree_lookup(&mapping->page_tree, index, &node, &slot))
41449dd698SJohannes Weiner 		goto unlock;
42449dd698SJohannes Weiner 	if (*slot != entry)
43449dd698SJohannes Weiner 		goto unlock;
4414b46879SJohannes Weiner 	__radix_tree_replace(&mapping->page_tree, node, slot, NULL,
4514b46879SJohannes Weiner 			     workingset_update_node, mapping);
46f9fe48beSRoss Zwisler 	mapping->nrexceptional--;
47449dd698SJohannes Weiner unlock:
480cd6144aSJohannes Weiner 	spin_unlock_irq(&mapping->tree_lock);
490cd6144aSJohannes Weiner }
501da177e4SLinus Torvalds 
51c6dcf52cSJan Kara /*
52c6dcf52cSJan Kara  * Unconditionally remove exceptional entry. Usually called from truncate path.
53c6dcf52cSJan Kara  */
54c6dcf52cSJan Kara static void truncate_exceptional_entry(struct address_space *mapping,
55c6dcf52cSJan Kara 				       pgoff_t index, void *entry)
56c6dcf52cSJan Kara {
57c6dcf52cSJan Kara 	/* Handled by shmem itself */
58c6dcf52cSJan Kara 	if (shmem_mapping(mapping))
59c6dcf52cSJan Kara 		return;
60c6dcf52cSJan Kara 
61c6dcf52cSJan Kara 	if (dax_mapping(mapping)) {
62c6dcf52cSJan Kara 		dax_delete_mapping_entry(mapping, index);
63c6dcf52cSJan Kara 		return;
64c6dcf52cSJan Kara 	}
65c6dcf52cSJan Kara 	clear_shadow_entry(mapping, index, entry);
66c6dcf52cSJan Kara }
67c6dcf52cSJan Kara 
68c6dcf52cSJan Kara /*
69c6dcf52cSJan Kara  * Invalidate exceptional entry if easily possible. This handles exceptional
70c6dcf52cSJan Kara  * entries for invalidate_inode_pages() so for DAX it evicts only unlocked and
71c6dcf52cSJan Kara  * clean entries.
72c6dcf52cSJan Kara  */
73c6dcf52cSJan Kara static int invalidate_exceptional_entry(struct address_space *mapping,
74c6dcf52cSJan Kara 					pgoff_t index, void *entry)
75c6dcf52cSJan Kara {
76c6dcf52cSJan Kara 	/* Handled by shmem itself */
77c6dcf52cSJan Kara 	if (shmem_mapping(mapping))
78c6dcf52cSJan Kara 		return 1;
79c6dcf52cSJan Kara 	if (dax_mapping(mapping))
80c6dcf52cSJan Kara 		return dax_invalidate_mapping_entry(mapping, index);
81c6dcf52cSJan Kara 	clear_shadow_entry(mapping, index, entry);
82c6dcf52cSJan Kara 	return 1;
83c6dcf52cSJan Kara }
84c6dcf52cSJan Kara 
85c6dcf52cSJan Kara /*
86c6dcf52cSJan Kara  * Invalidate exceptional entry if clean. This handles exceptional entries for
87c6dcf52cSJan Kara  * invalidate_inode_pages2() so for DAX it evicts only clean entries.
88c6dcf52cSJan Kara  */
89c6dcf52cSJan Kara static int invalidate_exceptional_entry2(struct address_space *mapping,
90c6dcf52cSJan Kara 					 pgoff_t index, void *entry)
91c6dcf52cSJan Kara {
92c6dcf52cSJan Kara 	/* Handled by shmem itself */
93c6dcf52cSJan Kara 	if (shmem_mapping(mapping))
94c6dcf52cSJan Kara 		return 1;
95c6dcf52cSJan Kara 	if (dax_mapping(mapping))
96c6dcf52cSJan Kara 		return dax_invalidate_mapping_entry_sync(mapping, index);
97c6dcf52cSJan Kara 	clear_shadow_entry(mapping, index, entry);
98c6dcf52cSJan Kara 	return 1;
99c6dcf52cSJan Kara }
100c6dcf52cSJan Kara 
101cf9a2ae8SDavid Howells /**
10228bc44d7SFengguang Wu  * do_invalidatepage - invalidate part or all of a page
103cf9a2ae8SDavid Howells  * @page: the page which is affected
104d47992f8SLukas Czerner  * @offset: start of the range to invalidate
105d47992f8SLukas Czerner  * @length: length of the range to invalidate
106cf9a2ae8SDavid Howells  *
107cf9a2ae8SDavid Howells  * do_invalidatepage() is called when all or part of the page has become
108cf9a2ae8SDavid Howells  * invalidated by a truncate operation.
109cf9a2ae8SDavid Howells  *
110cf9a2ae8SDavid Howells  * do_invalidatepage() does not have to release all buffers, but it must
111cf9a2ae8SDavid Howells  * ensure that no dirty buffer is left outside @offset and that no I/O
112cf9a2ae8SDavid Howells  * is underway against any of the blocks which are outside the truncation
113cf9a2ae8SDavid Howells  * point.  Because the caller is about to free (and possibly reuse) those
114cf9a2ae8SDavid Howells  * blocks on-disk.
115cf9a2ae8SDavid Howells  */
116d47992f8SLukas Czerner void do_invalidatepage(struct page *page, unsigned int offset,
117d47992f8SLukas Czerner 		       unsigned int length)
118cf9a2ae8SDavid Howells {
119d47992f8SLukas Czerner 	void (*invalidatepage)(struct page *, unsigned int, unsigned int);
120d47992f8SLukas Czerner 
121cf9a2ae8SDavid Howells 	invalidatepage = page->mapping->a_ops->invalidatepage;
1229361401eSDavid Howells #ifdef CONFIG_BLOCK
123cf9a2ae8SDavid Howells 	if (!invalidatepage)
124cf9a2ae8SDavid Howells 		invalidatepage = block_invalidatepage;
1259361401eSDavid Howells #endif
126cf9a2ae8SDavid Howells 	if (invalidatepage)
127d47992f8SLukas Czerner 		(*invalidatepage)(page, offset, length);
128cf9a2ae8SDavid Howells }
129cf9a2ae8SDavid Howells 
130ecdfc978SLinus Torvalds /*
1311da177e4SLinus Torvalds  * If truncate cannot remove the fs-private metadata from the page, the page
13262e1c553SShaohua Li  * becomes orphaned.  It will be left on the LRU and may even be mapped into
13354cb8821SNick Piggin  * user pagetables if we're racing with filemap_fault().
1341da177e4SLinus Torvalds  *
1351da177e4SLinus Torvalds  * We need to bale out if page->mapping is no longer equal to the original
1361da177e4SLinus Torvalds  * mapping.  This happens a) when the VM reclaimed the page while we waited on
137fc0ecff6SAndrew Morton  * its lock, b) when a concurrent invalidate_mapping_pages got there first and
1381da177e4SLinus Torvalds  * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
1391da177e4SLinus Torvalds  */
140750b4987SNick Piggin static int
1411da177e4SLinus Torvalds truncate_complete_page(struct address_space *mapping, struct page *page)
1421da177e4SLinus Torvalds {
1431da177e4SLinus Torvalds 	if (page->mapping != mapping)
144750b4987SNick Piggin 		return -EIO;
1451da177e4SLinus Torvalds 
146266cf658SDavid Howells 	if (page_has_private(page))
14709cbfeafSKirill A. Shutemov 		do_invalidatepage(page, 0, PAGE_SIZE);
1481da177e4SLinus Torvalds 
149b9ea2515SKonstantin Khlebnikov 	/*
150b9ea2515SKonstantin Khlebnikov 	 * Some filesystems seem to re-dirty the page even after
151b9ea2515SKonstantin Khlebnikov 	 * the VM has canceled the dirty bit (eg ext3 journaling).
152b9ea2515SKonstantin Khlebnikov 	 * Hence dirty accounting check is placed after invalidation.
153b9ea2515SKonstantin Khlebnikov 	 */
15411f81becSTejun Heo 	cancel_dirty_page(page);
1551da177e4SLinus Torvalds 	ClearPageMappedToDisk(page);
1565adc7b51SMinchan Kim 	delete_from_page_cache(page);
157750b4987SNick Piggin 	return 0;
1581da177e4SLinus Torvalds }
1591da177e4SLinus Torvalds 
1601da177e4SLinus Torvalds /*
161fc0ecff6SAndrew Morton  * This is for invalidate_mapping_pages().  That function can be called at
1621da177e4SLinus Torvalds  * any time, and is not supposed to throw away dirty pages.  But pages can
1630fd0e6b0SNick Piggin  * be marked dirty at any time too, so use remove_mapping which safely
1640fd0e6b0SNick Piggin  * discards clean, unused pages.
1651da177e4SLinus Torvalds  *
1661da177e4SLinus Torvalds  * Returns non-zero if the page was successfully invalidated.
1671da177e4SLinus Torvalds  */
1681da177e4SLinus Torvalds static int
1691da177e4SLinus Torvalds invalidate_complete_page(struct address_space *mapping, struct page *page)
1701da177e4SLinus Torvalds {
1710fd0e6b0SNick Piggin 	int ret;
1720fd0e6b0SNick Piggin 
1731da177e4SLinus Torvalds 	if (page->mapping != mapping)
1741da177e4SLinus Torvalds 		return 0;
1751da177e4SLinus Torvalds 
176266cf658SDavid Howells 	if (page_has_private(page) && !try_to_release_page(page, 0))
1771da177e4SLinus Torvalds 		return 0;
1781da177e4SLinus Torvalds 
1790fd0e6b0SNick Piggin 	ret = remove_mapping(mapping, page);
1800fd0e6b0SNick Piggin 
1810fd0e6b0SNick Piggin 	return ret;
1821da177e4SLinus Torvalds }
1831da177e4SLinus Torvalds 
184750b4987SNick Piggin int truncate_inode_page(struct address_space *mapping, struct page *page)
185750b4987SNick Piggin {
186fc127da0SKirill A. Shutemov 	loff_t holelen;
187fc127da0SKirill A. Shutemov 	VM_BUG_ON_PAGE(PageTail(page), page);
188fc127da0SKirill A. Shutemov 
189fc127da0SKirill A. Shutemov 	holelen = PageTransHuge(page) ? HPAGE_PMD_SIZE : PAGE_SIZE;
190750b4987SNick Piggin 	if (page_mapped(page)) {
191750b4987SNick Piggin 		unmap_mapping_range(mapping,
19209cbfeafSKirill A. Shutemov 				   (loff_t)page->index << PAGE_SHIFT,
193fc127da0SKirill A. Shutemov 				   holelen, 0);
194750b4987SNick Piggin 	}
195750b4987SNick Piggin 	return truncate_complete_page(mapping, page);
196750b4987SNick Piggin }
197750b4987SNick Piggin 
19883f78668SWu Fengguang /*
19925718736SAndi Kleen  * Used to get rid of pages on hardware memory corruption.
20025718736SAndi Kleen  */
20125718736SAndi Kleen int generic_error_remove_page(struct address_space *mapping, struct page *page)
20225718736SAndi Kleen {
20325718736SAndi Kleen 	if (!mapping)
20425718736SAndi Kleen 		return -EINVAL;
20525718736SAndi Kleen 	/*
20625718736SAndi Kleen 	 * Only punch for normal data pages for now.
20725718736SAndi Kleen 	 * Handling other types like directories would need more auditing.
20825718736SAndi Kleen 	 */
20925718736SAndi Kleen 	if (!S_ISREG(mapping->host->i_mode))
21025718736SAndi Kleen 		return -EIO;
21125718736SAndi Kleen 	return truncate_inode_page(mapping, page);
21225718736SAndi Kleen }
21325718736SAndi Kleen EXPORT_SYMBOL(generic_error_remove_page);
21425718736SAndi Kleen 
21525718736SAndi Kleen /*
21683f78668SWu Fengguang  * Safely invalidate one page from its pagecache mapping.
21783f78668SWu Fengguang  * It only drops clean, unused pages. The page must be locked.
21883f78668SWu Fengguang  *
21983f78668SWu Fengguang  * Returns 1 if the page is successfully invalidated, otherwise 0.
22083f78668SWu Fengguang  */
22183f78668SWu Fengguang int invalidate_inode_page(struct page *page)
22283f78668SWu Fengguang {
22383f78668SWu Fengguang 	struct address_space *mapping = page_mapping(page);
22483f78668SWu Fengguang 	if (!mapping)
22583f78668SWu Fengguang 		return 0;
22683f78668SWu Fengguang 	if (PageDirty(page) || PageWriteback(page))
22783f78668SWu Fengguang 		return 0;
22883f78668SWu Fengguang 	if (page_mapped(page))
22983f78668SWu Fengguang 		return 0;
23083f78668SWu Fengguang 	return invalidate_complete_page(mapping, page);
23183f78668SWu Fengguang }
23283f78668SWu Fengguang 
2331da177e4SLinus Torvalds /**
23473c1e204SLiu Bo  * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
2351da177e4SLinus Torvalds  * @mapping: mapping to truncate
2361da177e4SLinus Torvalds  * @lstart: offset from which to truncate
2375a720394SLukas Czerner  * @lend: offset to which to truncate (inclusive)
2381da177e4SLinus Torvalds  *
239d7339071SHans Reiser  * Truncate the page cache, removing the pages that are between
2405a720394SLukas Czerner  * specified offsets (and zeroing out partial pages
2415a720394SLukas Czerner  * if lstart or lend + 1 is not page aligned).
2421da177e4SLinus Torvalds  *
2431da177e4SLinus Torvalds  * Truncate takes two passes - the first pass is nonblocking.  It will not
2441da177e4SLinus Torvalds  * block on page locks and it will not block on writeback.  The second pass
2451da177e4SLinus Torvalds  * will wait.  This is to prevent as much IO as possible in the affected region.
2461da177e4SLinus Torvalds  * The first pass will remove most pages, so the search cost of the second pass
2471da177e4SLinus Torvalds  * is low.
2481da177e4SLinus Torvalds  *
2491da177e4SLinus Torvalds  * We pass down the cache-hot hint to the page freeing code.  Even if the
2501da177e4SLinus Torvalds  * mapping is large, it is probably the case that the final pages are the most
2511da177e4SLinus Torvalds  * recently touched, and freeing happens in ascending file offset order.
2525a720394SLukas Czerner  *
2535a720394SLukas Czerner  * Note that since ->invalidatepage() accepts range to invalidate
2545a720394SLukas Czerner  * truncate_inode_pages_range is able to handle cases where lend + 1 is not
2555a720394SLukas Czerner  * page aligned properly.
2561da177e4SLinus Torvalds  */
257d7339071SHans Reiser void truncate_inode_pages_range(struct address_space *mapping,
258d7339071SHans Reiser 				loff_t lstart, loff_t lend)
2591da177e4SLinus Torvalds {
2605a720394SLukas Czerner 	pgoff_t		start;		/* inclusive */
2615a720394SLukas Czerner 	pgoff_t		end;		/* exclusive */
2625a720394SLukas Czerner 	unsigned int	partial_start;	/* inclusive */
2635a720394SLukas Czerner 	unsigned int	partial_end;	/* exclusive */
2641da177e4SLinus Torvalds 	struct pagevec	pvec;
2650cd6144aSJohannes Weiner 	pgoff_t		indices[PAGEVEC_SIZE];
266b85e0effSHugh Dickins 	pgoff_t		index;
2671da177e4SLinus Torvalds 	int		i;
2681da177e4SLinus Torvalds 
2693167760fSDan Magenheimer 	cleancache_invalidate_inode(mapping);
270f9fe48beSRoss Zwisler 	if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
2711da177e4SLinus Torvalds 		return;
2721da177e4SLinus Torvalds 
2735a720394SLukas Czerner 	/* Offsets within partial pages */
27409cbfeafSKirill A. Shutemov 	partial_start = lstart & (PAGE_SIZE - 1);
27509cbfeafSKirill A. Shutemov 	partial_end = (lend + 1) & (PAGE_SIZE - 1);
2765a720394SLukas Czerner 
2775a720394SLukas Czerner 	/*
2785a720394SLukas Czerner 	 * 'start' and 'end' always covers the range of pages to be fully
2795a720394SLukas Czerner 	 * truncated. Partial pages are covered with 'partial_start' at the
2805a720394SLukas Czerner 	 * start of the range and 'partial_end' at the end of the range.
2815a720394SLukas Czerner 	 * Note that 'end' is exclusive while 'lend' is inclusive.
2825a720394SLukas Czerner 	 */
28309cbfeafSKirill A. Shutemov 	start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
2845a720394SLukas Czerner 	if (lend == -1)
2855a720394SLukas Czerner 		/*
2865a720394SLukas Czerner 		 * lend == -1 indicates end-of-file so we have to set 'end'
2875a720394SLukas Czerner 		 * to the highest possible pgoff_t and since the type is
2885a720394SLukas Czerner 		 * unsigned we're using -1.
2895a720394SLukas Czerner 		 */
2905a720394SLukas Czerner 		end = -1;
2915a720394SLukas Czerner 	else
29209cbfeafSKirill A. Shutemov 		end = (lend + 1) >> PAGE_SHIFT;
293d7339071SHans Reiser 
2941da177e4SLinus Torvalds 	pagevec_init(&pvec, 0);
295b85e0effSHugh Dickins 	index = start;
2960cd6144aSJohannes Weiner 	while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
2970cd6144aSJohannes Weiner 			min(end - index, (pgoff_t)PAGEVEC_SIZE),
2980cd6144aSJohannes Weiner 			indices)) {
2991da177e4SLinus Torvalds 		for (i = 0; i < pagevec_count(&pvec); i++) {
3001da177e4SLinus Torvalds 			struct page *page = pvec.pages[i];
3011da177e4SLinus Torvalds 
302b85e0effSHugh Dickins 			/* We rely upon deletion not changing page->index */
3030cd6144aSJohannes Weiner 			index = indices[i];
3045a720394SLukas Czerner 			if (index >= end)
305d7339071SHans Reiser 				break;
306d7339071SHans Reiser 
3070cd6144aSJohannes Weiner 			if (radix_tree_exceptional_entry(page)) {
308c6dcf52cSJan Kara 				truncate_exceptional_entry(mapping, index,
309c6dcf52cSJan Kara 							   page);
3100cd6144aSJohannes Weiner 				continue;
3110cd6144aSJohannes Weiner 			}
3120cd6144aSJohannes Weiner 
313529ae9aaSNick Piggin 			if (!trylock_page(page))
3141da177e4SLinus Torvalds 				continue;
3155cbc198aSKirill A. Shutemov 			WARN_ON(page_to_index(page) != index);
3161da177e4SLinus Torvalds 			if (PageWriteback(page)) {
3171da177e4SLinus Torvalds 				unlock_page(page);
3181da177e4SLinus Torvalds 				continue;
3191da177e4SLinus Torvalds 			}
320750b4987SNick Piggin 			truncate_inode_page(mapping, page);
3211da177e4SLinus Torvalds 			unlock_page(page);
3221da177e4SLinus Torvalds 		}
3230cd6144aSJohannes Weiner 		pagevec_remove_exceptionals(&pvec);
3241da177e4SLinus Torvalds 		pagevec_release(&pvec);
3251da177e4SLinus Torvalds 		cond_resched();
326b85e0effSHugh Dickins 		index++;
3271da177e4SLinus Torvalds 	}
3281da177e4SLinus Torvalds 
3295a720394SLukas Czerner 	if (partial_start) {
3301da177e4SLinus Torvalds 		struct page *page = find_lock_page(mapping, start - 1);
3311da177e4SLinus Torvalds 		if (page) {
33209cbfeafSKirill A. Shutemov 			unsigned int top = PAGE_SIZE;
3335a720394SLukas Czerner 			if (start > end) {
3345a720394SLukas Czerner 				/* Truncation within a single page */
3355a720394SLukas Czerner 				top = partial_end;
3365a720394SLukas Czerner 				partial_end = 0;
3375a720394SLukas Czerner 			}
3381da177e4SLinus Torvalds 			wait_on_page_writeback(page);
3395a720394SLukas Czerner 			zero_user_segment(page, partial_start, top);
3405a720394SLukas Czerner 			cleancache_invalidate_page(mapping, page);
3415a720394SLukas Czerner 			if (page_has_private(page))
3425a720394SLukas Czerner 				do_invalidatepage(page, partial_start,
3435a720394SLukas Czerner 						  top - partial_start);
3441da177e4SLinus Torvalds 			unlock_page(page);
34509cbfeafSKirill A. Shutemov 			put_page(page);
3461da177e4SLinus Torvalds 		}
3471da177e4SLinus Torvalds 	}
3485a720394SLukas Czerner 	if (partial_end) {
3495a720394SLukas Czerner 		struct page *page = find_lock_page(mapping, end);
3505a720394SLukas Czerner 		if (page) {
3515a720394SLukas Czerner 			wait_on_page_writeback(page);
3525a720394SLukas Czerner 			zero_user_segment(page, 0, partial_end);
3535a720394SLukas Czerner 			cleancache_invalidate_page(mapping, page);
3545a720394SLukas Czerner 			if (page_has_private(page))
3555a720394SLukas Czerner 				do_invalidatepage(page, 0,
3565a720394SLukas Czerner 						  partial_end);
3575a720394SLukas Czerner 			unlock_page(page);
35809cbfeafSKirill A. Shutemov 			put_page(page);
3595a720394SLukas Czerner 		}
3605a720394SLukas Czerner 	}
3615a720394SLukas Czerner 	/*
3625a720394SLukas Czerner 	 * If the truncation happened within a single page no pages
3635a720394SLukas Czerner 	 * will be released, just zeroed, so we can bail out now.
3645a720394SLukas Czerner 	 */
3655a720394SLukas Czerner 	if (start >= end)
3665a720394SLukas Czerner 		return;
3671da177e4SLinus Torvalds 
368b85e0effSHugh Dickins 	index = start;
3691da177e4SLinus Torvalds 	for ( ; ; ) {
3701da177e4SLinus Torvalds 		cond_resched();
3710cd6144aSJohannes Weiner 		if (!pagevec_lookup_entries(&pvec, mapping, index,
372792ceaefSHugh Dickins 			min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) {
373792ceaefSHugh Dickins 			/* If all gone from start onwards, we're done */
374b85e0effSHugh Dickins 			if (index == start)
3751da177e4SLinus Torvalds 				break;
376792ceaefSHugh Dickins 			/* Otherwise restart to make sure all gone */
377b85e0effSHugh Dickins 			index = start;
3781da177e4SLinus Torvalds 			continue;
3791da177e4SLinus Torvalds 		}
3800cd6144aSJohannes Weiner 		if (index == start && indices[0] >= end) {
381792ceaefSHugh Dickins 			/* All gone out of hole to be punched, we're done */
3820cd6144aSJohannes Weiner 			pagevec_remove_exceptionals(&pvec);
383d7339071SHans Reiser 			pagevec_release(&pvec);
384d7339071SHans Reiser 			break;
385d7339071SHans Reiser 		}
3861da177e4SLinus Torvalds 		for (i = 0; i < pagevec_count(&pvec); i++) {
3871da177e4SLinus Torvalds 			struct page *page = pvec.pages[i];
3881da177e4SLinus Torvalds 
389b85e0effSHugh Dickins 			/* We rely upon deletion not changing page->index */
3900cd6144aSJohannes Weiner 			index = indices[i];
391792ceaefSHugh Dickins 			if (index >= end) {
392792ceaefSHugh Dickins 				/* Restart punch to make sure all gone */
393792ceaefSHugh Dickins 				index = start - 1;
394d7339071SHans Reiser 				break;
395792ceaefSHugh Dickins 			}
396b85e0effSHugh Dickins 
3970cd6144aSJohannes Weiner 			if (radix_tree_exceptional_entry(page)) {
398c6dcf52cSJan Kara 				truncate_exceptional_entry(mapping, index,
399c6dcf52cSJan Kara 							   page);
4000cd6144aSJohannes Weiner 				continue;
4010cd6144aSJohannes Weiner 			}
4020cd6144aSJohannes Weiner 
4031da177e4SLinus Torvalds 			lock_page(page);
4045cbc198aSKirill A. Shutemov 			WARN_ON(page_to_index(page) != index);
4051da177e4SLinus Torvalds 			wait_on_page_writeback(page);
406750b4987SNick Piggin 			truncate_inode_page(mapping, page);
4071da177e4SLinus Torvalds 			unlock_page(page);
4081da177e4SLinus Torvalds 		}
4090cd6144aSJohannes Weiner 		pagevec_remove_exceptionals(&pvec);
4101da177e4SLinus Torvalds 		pagevec_release(&pvec);
411b85e0effSHugh Dickins 		index++;
4121da177e4SLinus Torvalds 	}
4133167760fSDan Magenheimer 	cleancache_invalidate_inode(mapping);
4141da177e4SLinus Torvalds }
415d7339071SHans Reiser EXPORT_SYMBOL(truncate_inode_pages_range);
4161da177e4SLinus Torvalds 
417d7339071SHans Reiser /**
418d7339071SHans Reiser  * truncate_inode_pages - truncate *all* the pages from an offset
419d7339071SHans Reiser  * @mapping: mapping to truncate
420d7339071SHans Reiser  * @lstart: offset from which to truncate
421d7339071SHans Reiser  *
4221b1dcc1bSJes Sorensen  * Called under (and serialised by) inode->i_mutex.
42308142579SJan Kara  *
42408142579SJan Kara  * Note: When this function returns, there can be a page in the process of
42508142579SJan Kara  * deletion (inside __delete_from_page_cache()) in the specified range.  Thus
42608142579SJan Kara  * mapping->nrpages can be non-zero when this function returns even after
42708142579SJan Kara  * truncation of the whole mapping.
428d7339071SHans Reiser  */
429d7339071SHans Reiser void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
430d7339071SHans Reiser {
431d7339071SHans Reiser 	truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
432d7339071SHans Reiser }
4331da177e4SLinus Torvalds EXPORT_SYMBOL(truncate_inode_pages);
4341da177e4SLinus Torvalds 
43528697355SMike Waychison /**
43691b0abe3SJohannes Weiner  * truncate_inode_pages_final - truncate *all* pages before inode dies
43791b0abe3SJohannes Weiner  * @mapping: mapping to truncate
43891b0abe3SJohannes Weiner  *
43991b0abe3SJohannes Weiner  * Called under (and serialized by) inode->i_mutex.
44091b0abe3SJohannes Weiner  *
44191b0abe3SJohannes Weiner  * Filesystems have to use this in the .evict_inode path to inform the
44291b0abe3SJohannes Weiner  * VM that this is the final truncate and the inode is going away.
44391b0abe3SJohannes Weiner  */
44491b0abe3SJohannes Weiner void truncate_inode_pages_final(struct address_space *mapping)
44591b0abe3SJohannes Weiner {
446f9fe48beSRoss Zwisler 	unsigned long nrexceptional;
44791b0abe3SJohannes Weiner 	unsigned long nrpages;
44891b0abe3SJohannes Weiner 
44991b0abe3SJohannes Weiner 	/*
45091b0abe3SJohannes Weiner 	 * Page reclaim can not participate in regular inode lifetime
45191b0abe3SJohannes Weiner 	 * management (can't call iput()) and thus can race with the
45291b0abe3SJohannes Weiner 	 * inode teardown.  Tell it when the address space is exiting,
45391b0abe3SJohannes Weiner 	 * so that it does not install eviction information after the
45491b0abe3SJohannes Weiner 	 * final truncate has begun.
45591b0abe3SJohannes Weiner 	 */
45691b0abe3SJohannes Weiner 	mapping_set_exiting(mapping);
45791b0abe3SJohannes Weiner 
45891b0abe3SJohannes Weiner 	/*
45991b0abe3SJohannes Weiner 	 * When reclaim installs eviction entries, it increases
460f9fe48beSRoss Zwisler 	 * nrexceptional first, then decreases nrpages.  Make sure we see
46191b0abe3SJohannes Weiner 	 * this in the right order or we might miss an entry.
46291b0abe3SJohannes Weiner 	 */
46391b0abe3SJohannes Weiner 	nrpages = mapping->nrpages;
46491b0abe3SJohannes Weiner 	smp_rmb();
465f9fe48beSRoss Zwisler 	nrexceptional = mapping->nrexceptional;
46691b0abe3SJohannes Weiner 
467f9fe48beSRoss Zwisler 	if (nrpages || nrexceptional) {
46891b0abe3SJohannes Weiner 		/*
46991b0abe3SJohannes Weiner 		 * As truncation uses a lockless tree lookup, cycle
47091b0abe3SJohannes Weiner 		 * the tree lock to make sure any ongoing tree
47191b0abe3SJohannes Weiner 		 * modification that does not see AS_EXITING is
47291b0abe3SJohannes Weiner 		 * completed before starting the final truncate.
47391b0abe3SJohannes Weiner 		 */
47491b0abe3SJohannes Weiner 		spin_lock_irq(&mapping->tree_lock);
47591b0abe3SJohannes Weiner 		spin_unlock_irq(&mapping->tree_lock);
47691b0abe3SJohannes Weiner 
47791b0abe3SJohannes Weiner 		truncate_inode_pages(mapping, 0);
47891b0abe3SJohannes Weiner 	}
47991b0abe3SJohannes Weiner }
48091b0abe3SJohannes Weiner EXPORT_SYMBOL(truncate_inode_pages_final);
48191b0abe3SJohannes Weiner 
48291b0abe3SJohannes Weiner /**
48328697355SMike Waychison  * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
48428697355SMike Waychison  * @mapping: the address_space which holds the pages to invalidate
48528697355SMike Waychison  * @start: the offset 'from' which to invalidate
48628697355SMike Waychison  * @end: the offset 'to' which to invalidate (inclusive)
48728697355SMike Waychison  *
48828697355SMike Waychison  * This function only removes the unlocked pages, if you want to
48928697355SMike Waychison  * remove all the pages of one inode, you must call truncate_inode_pages.
49028697355SMike Waychison  *
49128697355SMike Waychison  * invalidate_mapping_pages() will not block on IO activity. It will not
49228697355SMike Waychison  * invalidate pages which are dirty, locked, under writeback or mapped into
49328697355SMike Waychison  * pagetables.
49428697355SMike Waychison  */
49528697355SMike Waychison unsigned long invalidate_mapping_pages(struct address_space *mapping,
49628697355SMike Waychison 		pgoff_t start, pgoff_t end)
4971da177e4SLinus Torvalds {
4980cd6144aSJohannes Weiner 	pgoff_t indices[PAGEVEC_SIZE];
4991da177e4SLinus Torvalds 	struct pagevec pvec;
500b85e0effSHugh Dickins 	pgoff_t index = start;
50131560180SMinchan Kim 	unsigned long ret;
50231560180SMinchan Kim 	unsigned long count = 0;
5031da177e4SLinus Torvalds 	int i;
5041da177e4SLinus Torvalds 
5051da177e4SLinus Torvalds 	pagevec_init(&pvec, 0);
5060cd6144aSJohannes Weiner 	while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
5070cd6144aSJohannes Weiner 			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
5080cd6144aSJohannes Weiner 			indices)) {
5091da177e4SLinus Torvalds 		for (i = 0; i < pagevec_count(&pvec); i++) {
5101da177e4SLinus Torvalds 			struct page *page = pvec.pages[i];
5111da177e4SLinus Torvalds 
512b85e0effSHugh Dickins 			/* We rely upon deletion not changing page->index */
5130cd6144aSJohannes Weiner 			index = indices[i];
514b85e0effSHugh Dickins 			if (index > end)
515b85e0effSHugh Dickins 				break;
516e0f23603SNeilBrown 
5170cd6144aSJohannes Weiner 			if (radix_tree_exceptional_entry(page)) {
518c6dcf52cSJan Kara 				invalidate_exceptional_entry(mapping, index,
519c6dcf52cSJan Kara 							     page);
5200cd6144aSJohannes Weiner 				continue;
5210cd6144aSJohannes Weiner 			}
5220cd6144aSJohannes Weiner 
523b85e0effSHugh Dickins 			if (!trylock_page(page))
524b85e0effSHugh Dickins 				continue;
525fc127da0SKirill A. Shutemov 
5265cbc198aSKirill A. Shutemov 			WARN_ON(page_to_index(page) != index);
527fc127da0SKirill A. Shutemov 
528fc127da0SKirill A. Shutemov 			/* Middle of THP: skip */
529fc127da0SKirill A. Shutemov 			if (PageTransTail(page)) {
530fc127da0SKirill A. Shutemov 				unlock_page(page);
531fc127da0SKirill A. Shutemov 				continue;
532fc127da0SKirill A. Shutemov 			} else if (PageTransHuge(page)) {
533fc127da0SKirill A. Shutemov 				index += HPAGE_PMD_NR - 1;
534fc127da0SKirill A. Shutemov 				i += HPAGE_PMD_NR - 1;
535fc127da0SKirill A. Shutemov 				/* 'end' is in the middle of THP */
536fc127da0SKirill A. Shutemov 				if (index ==  round_down(end, HPAGE_PMD_NR))
537fc127da0SKirill A. Shutemov 					continue;
538fc127da0SKirill A. Shutemov 			}
539fc127da0SKirill A. Shutemov 
54031560180SMinchan Kim 			ret = invalidate_inode_page(page);
5411da177e4SLinus Torvalds 			unlock_page(page);
54231560180SMinchan Kim 			/*
54331560180SMinchan Kim 			 * Invalidation is a hint that the page is no longer
54431560180SMinchan Kim 			 * of interest and try to speed up its reclaim.
54531560180SMinchan Kim 			 */
54631560180SMinchan Kim 			if (!ret)
547cc5993bdSMinchan Kim 				deactivate_file_page(page);
54831560180SMinchan Kim 			count += ret;
5491da177e4SLinus Torvalds 		}
5500cd6144aSJohannes Weiner 		pagevec_remove_exceptionals(&pvec);
5511da177e4SLinus Torvalds 		pagevec_release(&pvec);
552fc9a07e7SAndrew Morton 		cond_resched();
553b85e0effSHugh Dickins 		index++;
5541da177e4SLinus Torvalds 	}
55531560180SMinchan Kim 	return count;
5561da177e4SLinus Torvalds }
55754bc4855SAnton Altaparmakov EXPORT_SYMBOL(invalidate_mapping_pages);
5581da177e4SLinus Torvalds 
559bd4c8ce4SAndrew Morton /*
560bd4c8ce4SAndrew Morton  * This is like invalidate_complete_page(), except it ignores the page's
561bd4c8ce4SAndrew Morton  * refcount.  We do this because invalidate_inode_pages2() needs stronger
562bd4c8ce4SAndrew Morton  * invalidation guarantees, and cannot afford to leave pages behind because
5632706a1b8SAnderson Briglia  * shrink_page_list() has a temp ref on them, or because they're transiently
5642706a1b8SAnderson Briglia  * sitting in the lru_cache_add() pagevecs.
565bd4c8ce4SAndrew Morton  */
566bd4c8ce4SAndrew Morton static int
567bd4c8ce4SAndrew Morton invalidate_complete_page2(struct address_space *mapping, struct page *page)
568bd4c8ce4SAndrew Morton {
569c4843a75SGreg Thelen 	unsigned long flags;
570c4843a75SGreg Thelen 
571bd4c8ce4SAndrew Morton 	if (page->mapping != mapping)
572bd4c8ce4SAndrew Morton 		return 0;
573bd4c8ce4SAndrew Morton 
574266cf658SDavid Howells 	if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
575bd4c8ce4SAndrew Morton 		return 0;
576bd4c8ce4SAndrew Morton 
577c4843a75SGreg Thelen 	spin_lock_irqsave(&mapping->tree_lock, flags);
578bd4c8ce4SAndrew Morton 	if (PageDirty(page))
579bd4c8ce4SAndrew Morton 		goto failed;
580bd4c8ce4SAndrew Morton 
581266cf658SDavid Howells 	BUG_ON(page_has_private(page));
58262cccb8cSJohannes Weiner 	__delete_from_page_cache(page, NULL);
583c4843a75SGreg Thelen 	spin_unlock_irqrestore(&mapping->tree_lock, flags);
5846072d13cSLinus Torvalds 
5856072d13cSLinus Torvalds 	if (mapping->a_ops->freepage)
5866072d13cSLinus Torvalds 		mapping->a_ops->freepage(page);
5876072d13cSLinus Torvalds 
58809cbfeafSKirill A. Shutemov 	put_page(page);	/* pagecache ref */
589bd4c8ce4SAndrew Morton 	return 1;
590bd4c8ce4SAndrew Morton failed:
591c4843a75SGreg Thelen 	spin_unlock_irqrestore(&mapping->tree_lock, flags);
592bd4c8ce4SAndrew Morton 	return 0;
593bd4c8ce4SAndrew Morton }
594bd4c8ce4SAndrew Morton 
595e3db7691STrond Myklebust static int do_launder_page(struct address_space *mapping, struct page *page)
596e3db7691STrond Myklebust {
597e3db7691STrond Myklebust 	if (!PageDirty(page))
598e3db7691STrond Myklebust 		return 0;
599e3db7691STrond Myklebust 	if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
600e3db7691STrond Myklebust 		return 0;
601e3db7691STrond Myklebust 	return mapping->a_ops->launder_page(page);
602e3db7691STrond Myklebust }
603e3db7691STrond Myklebust 
6041da177e4SLinus Torvalds /**
6051da177e4SLinus Torvalds  * invalidate_inode_pages2_range - remove range of pages from an address_space
60667be2dd1SMartin Waitz  * @mapping: the address_space
6071da177e4SLinus Torvalds  * @start: the page offset 'from' which to invalidate
6081da177e4SLinus Torvalds  * @end: the page offset 'to' which to invalidate (inclusive)
6091da177e4SLinus Torvalds  *
6101da177e4SLinus Torvalds  * Any pages which are found to be mapped into pagetables are unmapped prior to
6111da177e4SLinus Torvalds  * invalidation.
6121da177e4SLinus Torvalds  *
6136ccfa806SHisashi Hifumi  * Returns -EBUSY if any pages could not be invalidated.
6141da177e4SLinus Torvalds  */
6151da177e4SLinus Torvalds int invalidate_inode_pages2_range(struct address_space *mapping,
6161da177e4SLinus Torvalds 				  pgoff_t start, pgoff_t end)
6171da177e4SLinus Torvalds {
6180cd6144aSJohannes Weiner 	pgoff_t indices[PAGEVEC_SIZE];
6191da177e4SLinus Torvalds 	struct pagevec pvec;
620b85e0effSHugh Dickins 	pgoff_t index;
6211da177e4SLinus Torvalds 	int i;
6221da177e4SLinus Torvalds 	int ret = 0;
6230dd1334fSHisashi Hifumi 	int ret2 = 0;
6241da177e4SLinus Torvalds 	int did_range_unmap = 0;
6251da177e4SLinus Torvalds 
6263167760fSDan Magenheimer 	cleancache_invalidate_inode(mapping);
627*32691f0fSAndrey Ryabinin 	if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
628*32691f0fSAndrey Ryabinin 		return 0;
629*32691f0fSAndrey Ryabinin 
6301da177e4SLinus Torvalds 	pagevec_init(&pvec, 0);
631b85e0effSHugh Dickins 	index = start;
6320cd6144aSJohannes Weiner 	while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
6330cd6144aSJohannes Weiner 			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
6340cd6144aSJohannes Weiner 			indices)) {
6357b965e08STrond Myklebust 		for (i = 0; i < pagevec_count(&pvec); i++) {
6361da177e4SLinus Torvalds 			struct page *page = pvec.pages[i];
637b85e0effSHugh Dickins 
638b85e0effSHugh Dickins 			/* We rely upon deletion not changing page->index */
6390cd6144aSJohannes Weiner 			index = indices[i];
640b85e0effSHugh Dickins 			if (index > end)
641b85e0effSHugh Dickins 				break;
6421da177e4SLinus Torvalds 
6430cd6144aSJohannes Weiner 			if (radix_tree_exceptional_entry(page)) {
644c6dcf52cSJan Kara 				if (!invalidate_exceptional_entry2(mapping,
645c6dcf52cSJan Kara 								   index, page))
646c6dcf52cSJan Kara 					ret = -EBUSY;
6470cd6144aSJohannes Weiner 				continue;
6480cd6144aSJohannes Weiner 			}
6490cd6144aSJohannes Weiner 
6501da177e4SLinus Torvalds 			lock_page(page);
6515cbc198aSKirill A. Shutemov 			WARN_ON(page_to_index(page) != index);
6521da177e4SLinus Torvalds 			if (page->mapping != mapping) {
6531da177e4SLinus Torvalds 				unlock_page(page);
6541da177e4SLinus Torvalds 				continue;
6551da177e4SLinus Torvalds 			}
6561da177e4SLinus Torvalds 			wait_on_page_writeback(page);
657d00806b1SNick Piggin 			if (page_mapped(page)) {
6581da177e4SLinus Torvalds 				if (!did_range_unmap) {
6591da177e4SLinus Torvalds 					/*
6601da177e4SLinus Torvalds 					 * Zap the rest of the file in one hit.
6611da177e4SLinus Torvalds 					 */
6621da177e4SLinus Torvalds 					unmap_mapping_range(mapping,
66309cbfeafSKirill A. Shutemov 					   (loff_t)index << PAGE_SHIFT,
664b85e0effSHugh Dickins 					   (loff_t)(1 + end - index)
66509cbfeafSKirill A. Shutemov 							 << PAGE_SHIFT,
6661da177e4SLinus Torvalds 							 0);
6671da177e4SLinus Torvalds 					did_range_unmap = 1;
6681da177e4SLinus Torvalds 				} else {
6691da177e4SLinus Torvalds 					/*
6701da177e4SLinus Torvalds 					 * Just zap this page
6711da177e4SLinus Torvalds 					 */
6721da177e4SLinus Torvalds 					unmap_mapping_range(mapping,
67309cbfeafSKirill A. Shutemov 					   (loff_t)index << PAGE_SHIFT,
67409cbfeafSKirill A. Shutemov 					   PAGE_SIZE, 0);
6751da177e4SLinus Torvalds 				}
6761da177e4SLinus Torvalds 			}
677d00806b1SNick Piggin 			BUG_ON(page_mapped(page));
6780dd1334fSHisashi Hifumi 			ret2 = do_launder_page(mapping, page);
6790dd1334fSHisashi Hifumi 			if (ret2 == 0) {
6800dd1334fSHisashi Hifumi 				if (!invalidate_complete_page2(mapping, page))
6816ccfa806SHisashi Hifumi 					ret2 = -EBUSY;
6820dd1334fSHisashi Hifumi 			}
6830dd1334fSHisashi Hifumi 			if (ret2 < 0)
6840dd1334fSHisashi Hifumi 				ret = ret2;
6851da177e4SLinus Torvalds 			unlock_page(page);
6861da177e4SLinus Torvalds 		}
6870cd6144aSJohannes Weiner 		pagevec_remove_exceptionals(&pvec);
6881da177e4SLinus Torvalds 		pagevec_release(&pvec);
6891da177e4SLinus Torvalds 		cond_resched();
690b85e0effSHugh Dickins 		index++;
6911da177e4SLinus Torvalds 	}
6923167760fSDan Magenheimer 	cleancache_invalidate_inode(mapping);
6931da177e4SLinus Torvalds 	return ret;
6941da177e4SLinus Torvalds }
6951da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
6961da177e4SLinus Torvalds 
6971da177e4SLinus Torvalds /**
6981da177e4SLinus Torvalds  * invalidate_inode_pages2 - remove all pages from an address_space
69967be2dd1SMartin Waitz  * @mapping: the address_space
7001da177e4SLinus Torvalds  *
7011da177e4SLinus Torvalds  * Any pages which are found to be mapped into pagetables are unmapped prior to
7021da177e4SLinus Torvalds  * invalidation.
7031da177e4SLinus Torvalds  *
704e9de25ddSPeng Tao  * Returns -EBUSY if any pages could not be invalidated.
7051da177e4SLinus Torvalds  */
7061da177e4SLinus Torvalds int invalidate_inode_pages2(struct address_space *mapping)
7071da177e4SLinus Torvalds {
7081da177e4SLinus Torvalds 	return invalidate_inode_pages2_range(mapping, 0, -1);
7091da177e4SLinus Torvalds }
7101da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
71125d9e2d1Snpiggin@suse.de 
71225d9e2d1Snpiggin@suse.de /**
71325d9e2d1Snpiggin@suse.de  * truncate_pagecache - unmap and remove pagecache that has been truncated
71425d9e2d1Snpiggin@suse.de  * @inode: inode
7158a549beaSHugh Dickins  * @newsize: new file size
71625d9e2d1Snpiggin@suse.de  *
71725d9e2d1Snpiggin@suse.de  * inode's new i_size must already be written before truncate_pagecache
71825d9e2d1Snpiggin@suse.de  * is called.
71925d9e2d1Snpiggin@suse.de  *
72025d9e2d1Snpiggin@suse.de  * This function should typically be called before the filesystem
72125d9e2d1Snpiggin@suse.de  * releases resources associated with the freed range (eg. deallocates
72225d9e2d1Snpiggin@suse.de  * blocks). This way, pagecache will always stay logically coherent
72325d9e2d1Snpiggin@suse.de  * with on-disk format, and the filesystem would not have to deal with
72425d9e2d1Snpiggin@suse.de  * situations such as writepage being called for a page that has already
72525d9e2d1Snpiggin@suse.de  * had its underlying blocks deallocated.
72625d9e2d1Snpiggin@suse.de  */
7277caef267SKirill A. Shutemov void truncate_pagecache(struct inode *inode, loff_t newsize)
72825d9e2d1Snpiggin@suse.de {
72925d9e2d1Snpiggin@suse.de 	struct address_space *mapping = inode->i_mapping;
7308a549beaSHugh Dickins 	loff_t holebegin = round_up(newsize, PAGE_SIZE);
73125d9e2d1Snpiggin@suse.de 
73225d9e2d1Snpiggin@suse.de 	/*
73325d9e2d1Snpiggin@suse.de 	 * unmap_mapping_range is called twice, first simply for
73425d9e2d1Snpiggin@suse.de 	 * efficiency so that truncate_inode_pages does fewer
73525d9e2d1Snpiggin@suse.de 	 * single-page unmaps.  However after this first call, and
73625d9e2d1Snpiggin@suse.de 	 * before truncate_inode_pages finishes, it is possible for
73725d9e2d1Snpiggin@suse.de 	 * private pages to be COWed, which remain after
73825d9e2d1Snpiggin@suse.de 	 * truncate_inode_pages finishes, hence the second
73925d9e2d1Snpiggin@suse.de 	 * unmap_mapping_range call must be made for correctness.
74025d9e2d1Snpiggin@suse.de 	 */
7418a549beaSHugh Dickins 	unmap_mapping_range(mapping, holebegin, 0, 1);
7428a549beaSHugh Dickins 	truncate_inode_pages(mapping, newsize);
7438a549beaSHugh Dickins 	unmap_mapping_range(mapping, holebegin, 0, 1);
74425d9e2d1Snpiggin@suse.de }
74525d9e2d1Snpiggin@suse.de EXPORT_SYMBOL(truncate_pagecache);
74625d9e2d1Snpiggin@suse.de 
74725d9e2d1Snpiggin@suse.de /**
7482c27c65eSChristoph Hellwig  * truncate_setsize - update inode and pagecache for a new file size
7492c27c65eSChristoph Hellwig  * @inode: inode
7502c27c65eSChristoph Hellwig  * @newsize: new file size
7512c27c65eSChristoph Hellwig  *
752382e27daSJan Kara  * truncate_setsize updates i_size and performs pagecache truncation (if
753382e27daSJan Kara  * necessary) to @newsize. It will be typically be called from the filesystem's
754382e27daSJan Kara  * setattr function when ATTR_SIZE is passed in.
7552c27c65eSChristoph Hellwig  *
75677783d06SJan Kara  * Must be called with a lock serializing truncates and writes (generally
75777783d06SJan Kara  * i_mutex but e.g. xfs uses a different lock) and before all filesystem
75877783d06SJan Kara  * specific block truncation has been performed.
7592c27c65eSChristoph Hellwig  */
7602c27c65eSChristoph Hellwig void truncate_setsize(struct inode *inode, loff_t newsize)
7612c27c65eSChristoph Hellwig {
76290a80202SJan Kara 	loff_t oldsize = inode->i_size;
76390a80202SJan Kara 
7642c27c65eSChristoph Hellwig 	i_size_write(inode, newsize);
76590a80202SJan Kara 	if (newsize > oldsize)
76690a80202SJan Kara 		pagecache_isize_extended(inode, oldsize, newsize);
7677caef267SKirill A. Shutemov 	truncate_pagecache(inode, newsize);
7682c27c65eSChristoph Hellwig }
7692c27c65eSChristoph Hellwig EXPORT_SYMBOL(truncate_setsize);
7702c27c65eSChristoph Hellwig 
7712c27c65eSChristoph Hellwig /**
77290a80202SJan Kara  * pagecache_isize_extended - update pagecache after extension of i_size
77390a80202SJan Kara  * @inode:	inode for which i_size was extended
77490a80202SJan Kara  * @from:	original inode size
77590a80202SJan Kara  * @to:		new inode size
77690a80202SJan Kara  *
77790a80202SJan Kara  * Handle extension of inode size either caused by extending truncate or by
77890a80202SJan Kara  * write starting after current i_size. We mark the page straddling current
77990a80202SJan Kara  * i_size RO so that page_mkwrite() is called on the nearest write access to
78090a80202SJan Kara  * the page.  This way filesystem can be sure that page_mkwrite() is called on
78190a80202SJan Kara  * the page before user writes to the page via mmap after the i_size has been
78290a80202SJan Kara  * changed.
78390a80202SJan Kara  *
78490a80202SJan Kara  * The function must be called after i_size is updated so that page fault
78590a80202SJan Kara  * coming after we unlock the page will already see the new i_size.
78690a80202SJan Kara  * The function must be called while we still hold i_mutex - this not only
78790a80202SJan Kara  * makes sure i_size is stable but also that userspace cannot observe new
78890a80202SJan Kara  * i_size value before we are prepared to store mmap writes at new inode size.
78990a80202SJan Kara  */
79090a80202SJan Kara void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
79190a80202SJan Kara {
79293407472SFabian Frederick 	int bsize = i_blocksize(inode);
79390a80202SJan Kara 	loff_t rounded_from;
79490a80202SJan Kara 	struct page *page;
79590a80202SJan Kara 	pgoff_t index;
79690a80202SJan Kara 
79790a80202SJan Kara 	WARN_ON(to > inode->i_size);
79890a80202SJan Kara 
79909cbfeafSKirill A. Shutemov 	if (from >= to || bsize == PAGE_SIZE)
80090a80202SJan Kara 		return;
80190a80202SJan Kara 	/* Page straddling @from will not have any hole block created? */
80290a80202SJan Kara 	rounded_from = round_up(from, bsize);
80309cbfeafSKirill A. Shutemov 	if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1)))
80490a80202SJan Kara 		return;
80590a80202SJan Kara 
80609cbfeafSKirill A. Shutemov 	index = from >> PAGE_SHIFT;
80790a80202SJan Kara 	page = find_lock_page(inode->i_mapping, index);
80890a80202SJan Kara 	/* Page not cached? Nothing to do */
80990a80202SJan Kara 	if (!page)
81090a80202SJan Kara 		return;
81190a80202SJan Kara 	/*
81290a80202SJan Kara 	 * See clear_page_dirty_for_io() for details why set_page_dirty()
81390a80202SJan Kara 	 * is needed.
81490a80202SJan Kara 	 */
81590a80202SJan Kara 	if (page_mkclean(page))
81690a80202SJan Kara 		set_page_dirty(page);
81790a80202SJan Kara 	unlock_page(page);
81809cbfeafSKirill A. Shutemov 	put_page(page);
81990a80202SJan Kara }
82090a80202SJan Kara EXPORT_SYMBOL(pagecache_isize_extended);
82190a80202SJan Kara 
82290a80202SJan Kara /**
823623e3db9SHugh Dickins  * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
824623e3db9SHugh Dickins  * @inode: inode
825623e3db9SHugh Dickins  * @lstart: offset of beginning of hole
826623e3db9SHugh Dickins  * @lend: offset of last byte of hole
827623e3db9SHugh Dickins  *
828623e3db9SHugh Dickins  * This function should typically be called before the filesystem
829623e3db9SHugh Dickins  * releases resources associated with the freed range (eg. deallocates
830623e3db9SHugh Dickins  * blocks). This way, pagecache will always stay logically coherent
831623e3db9SHugh Dickins  * with on-disk format, and the filesystem would not have to deal with
832623e3db9SHugh Dickins  * situations such as writepage being called for a page that has already
833623e3db9SHugh Dickins  * had its underlying blocks deallocated.
834623e3db9SHugh Dickins  */
835623e3db9SHugh Dickins void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
836623e3db9SHugh Dickins {
837623e3db9SHugh Dickins 	struct address_space *mapping = inode->i_mapping;
838623e3db9SHugh Dickins 	loff_t unmap_start = round_up(lstart, PAGE_SIZE);
839623e3db9SHugh Dickins 	loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
840623e3db9SHugh Dickins 	/*
841623e3db9SHugh Dickins 	 * This rounding is currently just for example: unmap_mapping_range
842623e3db9SHugh Dickins 	 * expands its hole outwards, whereas we want it to contract the hole
843623e3db9SHugh Dickins 	 * inwards.  However, existing callers of truncate_pagecache_range are
8445a720394SLukas Czerner 	 * doing their own page rounding first.  Note that unmap_mapping_range
8455a720394SLukas Czerner 	 * allows holelen 0 for all, and we allow lend -1 for end of file.
846623e3db9SHugh Dickins 	 */
847623e3db9SHugh Dickins 
848623e3db9SHugh Dickins 	/*
849623e3db9SHugh Dickins 	 * Unlike in truncate_pagecache, unmap_mapping_range is called only
850623e3db9SHugh Dickins 	 * once (before truncating pagecache), and without "even_cows" flag:
851623e3db9SHugh Dickins 	 * hole-punching should not remove private COWed pages from the hole.
852623e3db9SHugh Dickins 	 */
853623e3db9SHugh Dickins 	if ((u64)unmap_end > (u64)unmap_start)
854623e3db9SHugh Dickins 		unmap_mapping_range(mapping, unmap_start,
855623e3db9SHugh Dickins 				    1 + unmap_end - unmap_start, 0);
856623e3db9SHugh Dickins 	truncate_inode_pages_range(mapping, lstart, lend);
857623e3db9SHugh Dickins }
858623e3db9SHugh Dickins EXPORT_SYMBOL(truncate_pagecache_range);
859