xref: /linux/mm/truncate.c (revision 5cbc198ae08d84bd416b672ad8bd1222acd0855c)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * mm/truncate.c - code for taking down pages from address_spaces
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright (C) 2002, Linus Torvalds
51da177e4SLinus Torvalds  *
6e1f8e874SFrancois Cami  * 10Sep2002	Andrew Morton
71da177e4SLinus Torvalds  *		Initial version.
81da177e4SLinus Torvalds  */
91da177e4SLinus Torvalds 
101da177e4SLinus Torvalds #include <linux/kernel.h>
114af3c9ccSAlexey Dobriyan #include <linux/backing-dev.h>
12f9fe48beSRoss Zwisler #include <linux/dax.h>
135a0e3ad6STejun Heo #include <linux/gfp.h>
141da177e4SLinus Torvalds #include <linux/mm.h>
150fd0e6b0SNick Piggin #include <linux/swap.h>
16b95f1b31SPaul Gortmaker #include <linux/export.h>
171da177e4SLinus Torvalds #include <linux/pagemap.h>
1801f2705dSNate Diller #include <linux/highmem.h>
191da177e4SLinus Torvalds #include <linux/pagevec.h>
20e08748ceSAndrew Morton #include <linux/task_io_accounting_ops.h>
211da177e4SLinus Torvalds #include <linux/buffer_head.h>	/* grr. try_to_release_page,
22aaa4059bSJan Kara 				   do_invalidatepage */
23c515e1fdSDan Magenheimer #include <linux/cleancache.h>
2490a80202SJan Kara #include <linux/rmap.h>
25ba470de4SRik van Riel #include "internal.h"
261da177e4SLinus Torvalds 
270cd6144aSJohannes Weiner static void clear_exceptional_entry(struct address_space *mapping,
280cd6144aSJohannes Weiner 				    pgoff_t index, void *entry)
290cd6144aSJohannes Weiner {
30449dd698SJohannes Weiner 	struct radix_tree_node *node;
31449dd698SJohannes Weiner 	void **slot;
32449dd698SJohannes Weiner 
330cd6144aSJohannes Weiner 	/* Handled by shmem itself */
340cd6144aSJohannes Weiner 	if (shmem_mapping(mapping))
350cd6144aSJohannes Weiner 		return;
360cd6144aSJohannes Weiner 
37f9fe48beSRoss Zwisler 	if (dax_mapping(mapping)) {
38ac401cc7SJan Kara 		dax_delete_mapping_entry(mapping, index);
39ac401cc7SJan Kara 		return;
40ac401cc7SJan Kara 	}
41ac401cc7SJan Kara 	spin_lock_irq(&mapping->tree_lock);
420cd6144aSJohannes Weiner 	/*
430cd6144aSJohannes Weiner 	 * Regular page slots are stabilized by the page lock even
440cd6144aSJohannes Weiner 	 * without the tree itself locked.  These unlocked entries
450cd6144aSJohannes Weiner 	 * need verification under the tree lock.
460cd6144aSJohannes Weiner 	 */
47f9fe48beSRoss Zwisler 	if (!__radix_tree_lookup(&mapping->page_tree, index, &node,
48f9fe48beSRoss Zwisler 				&slot))
49449dd698SJohannes Weiner 		goto unlock;
50449dd698SJohannes Weiner 	if (*slot != entry)
51449dd698SJohannes Weiner 		goto unlock;
52449dd698SJohannes Weiner 	radix_tree_replace_slot(slot, NULL);
53f9fe48beSRoss Zwisler 	mapping->nrexceptional--;
54449dd698SJohannes Weiner 	if (!node)
55449dd698SJohannes Weiner 		goto unlock;
56449dd698SJohannes Weiner 	workingset_node_shadows_dec(node);
57449dd698SJohannes Weiner 	/*
58449dd698SJohannes Weiner 	 * Don't track node without shadow entries.
59449dd698SJohannes Weiner 	 *
60449dd698SJohannes Weiner 	 * Avoid acquiring the list_lru lock if already untracked.
61449dd698SJohannes Weiner 	 * The list_empty() test is safe as node->private_list is
62449dd698SJohannes Weiner 	 * protected by mapping->tree_lock.
63449dd698SJohannes Weiner 	 */
64449dd698SJohannes Weiner 	if (!workingset_node_shadows(node) &&
65449dd698SJohannes Weiner 	    !list_empty(&node->private_list))
66f9fe48beSRoss Zwisler 		list_lru_del(&workingset_shadow_nodes,
67f9fe48beSRoss Zwisler 				&node->private_list);
68449dd698SJohannes Weiner 	__radix_tree_delete_node(&mapping->page_tree, node);
69449dd698SJohannes Weiner unlock:
700cd6144aSJohannes Weiner 	spin_unlock_irq(&mapping->tree_lock);
710cd6144aSJohannes Weiner }
721da177e4SLinus Torvalds 
73cf9a2ae8SDavid Howells /**
7428bc44d7SFengguang Wu  * do_invalidatepage - invalidate part or all of a page
75cf9a2ae8SDavid Howells  * @page: the page which is affected
76d47992f8SLukas Czerner  * @offset: start of the range to invalidate
77d47992f8SLukas Czerner  * @length: length of the range to invalidate
78cf9a2ae8SDavid Howells  *
79cf9a2ae8SDavid Howells  * do_invalidatepage() is called when all or part of the page has become
80cf9a2ae8SDavid Howells  * invalidated by a truncate operation.
81cf9a2ae8SDavid Howells  *
82cf9a2ae8SDavid Howells  * do_invalidatepage() does not have to release all buffers, but it must
83cf9a2ae8SDavid Howells  * ensure that no dirty buffer is left outside @offset and that no I/O
84cf9a2ae8SDavid Howells  * is underway against any of the blocks which are outside the truncation
85cf9a2ae8SDavid Howells  * point.  Because the caller is about to free (and possibly reuse) those
86cf9a2ae8SDavid Howells  * blocks on-disk.
87cf9a2ae8SDavid Howells  */
88d47992f8SLukas Czerner void do_invalidatepage(struct page *page, unsigned int offset,
89d47992f8SLukas Czerner 		       unsigned int length)
90cf9a2ae8SDavid Howells {
91d47992f8SLukas Czerner 	void (*invalidatepage)(struct page *, unsigned int, unsigned int);
92d47992f8SLukas Czerner 
93cf9a2ae8SDavid Howells 	invalidatepage = page->mapping->a_ops->invalidatepage;
949361401eSDavid Howells #ifdef CONFIG_BLOCK
95cf9a2ae8SDavid Howells 	if (!invalidatepage)
96cf9a2ae8SDavid Howells 		invalidatepage = block_invalidatepage;
979361401eSDavid Howells #endif
98cf9a2ae8SDavid Howells 	if (invalidatepage)
99d47992f8SLukas Czerner 		(*invalidatepage)(page, offset, length);
100cf9a2ae8SDavid Howells }
101cf9a2ae8SDavid Howells 
102ecdfc978SLinus Torvalds /*
1031da177e4SLinus Torvalds  * If truncate cannot remove the fs-private metadata from the page, the page
10462e1c553SShaohua Li  * becomes orphaned.  It will be left on the LRU and may even be mapped into
10554cb8821SNick Piggin  * user pagetables if we're racing with filemap_fault().
1061da177e4SLinus Torvalds  *
1071da177e4SLinus Torvalds  * We need to bale out if page->mapping is no longer equal to the original
1081da177e4SLinus Torvalds  * mapping.  This happens a) when the VM reclaimed the page while we waited on
109fc0ecff6SAndrew Morton  * its lock, b) when a concurrent invalidate_mapping_pages got there first and
1101da177e4SLinus Torvalds  * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
1111da177e4SLinus Torvalds  */
112750b4987SNick Piggin static int
1131da177e4SLinus Torvalds truncate_complete_page(struct address_space *mapping, struct page *page)
1141da177e4SLinus Torvalds {
1151da177e4SLinus Torvalds 	if (page->mapping != mapping)
116750b4987SNick Piggin 		return -EIO;
1171da177e4SLinus Torvalds 
118266cf658SDavid Howells 	if (page_has_private(page))
11909cbfeafSKirill A. Shutemov 		do_invalidatepage(page, 0, PAGE_SIZE);
1201da177e4SLinus Torvalds 
121b9ea2515SKonstantin Khlebnikov 	/*
122b9ea2515SKonstantin Khlebnikov 	 * Some filesystems seem to re-dirty the page even after
123b9ea2515SKonstantin Khlebnikov 	 * the VM has canceled the dirty bit (eg ext3 journaling).
124b9ea2515SKonstantin Khlebnikov 	 * Hence dirty accounting check is placed after invalidation.
125b9ea2515SKonstantin Khlebnikov 	 */
12611f81becSTejun Heo 	cancel_dirty_page(page);
1271da177e4SLinus Torvalds 	ClearPageMappedToDisk(page);
1285adc7b51SMinchan Kim 	delete_from_page_cache(page);
129750b4987SNick Piggin 	return 0;
1301da177e4SLinus Torvalds }
1311da177e4SLinus Torvalds 
1321da177e4SLinus Torvalds /*
133fc0ecff6SAndrew Morton  * This is for invalidate_mapping_pages().  That function can be called at
1341da177e4SLinus Torvalds  * any time, and is not supposed to throw away dirty pages.  But pages can
1350fd0e6b0SNick Piggin  * be marked dirty at any time too, so use remove_mapping which safely
1360fd0e6b0SNick Piggin  * discards clean, unused pages.
1371da177e4SLinus Torvalds  *
1381da177e4SLinus Torvalds  * Returns non-zero if the page was successfully invalidated.
1391da177e4SLinus Torvalds  */
1401da177e4SLinus Torvalds static int
1411da177e4SLinus Torvalds invalidate_complete_page(struct address_space *mapping, struct page *page)
1421da177e4SLinus Torvalds {
1430fd0e6b0SNick Piggin 	int ret;
1440fd0e6b0SNick Piggin 
1451da177e4SLinus Torvalds 	if (page->mapping != mapping)
1461da177e4SLinus Torvalds 		return 0;
1471da177e4SLinus Torvalds 
148266cf658SDavid Howells 	if (page_has_private(page) && !try_to_release_page(page, 0))
1491da177e4SLinus Torvalds 		return 0;
1501da177e4SLinus Torvalds 
1510fd0e6b0SNick Piggin 	ret = remove_mapping(mapping, page);
1520fd0e6b0SNick Piggin 
1530fd0e6b0SNick Piggin 	return ret;
1541da177e4SLinus Torvalds }
1551da177e4SLinus Torvalds 
156750b4987SNick Piggin int truncate_inode_page(struct address_space *mapping, struct page *page)
157750b4987SNick Piggin {
158fc127da0SKirill A. Shutemov 	loff_t holelen;
159fc127da0SKirill A. Shutemov 	VM_BUG_ON_PAGE(PageTail(page), page);
160fc127da0SKirill A. Shutemov 
161fc127da0SKirill A. Shutemov 	holelen = PageTransHuge(page) ? HPAGE_PMD_SIZE : PAGE_SIZE;
162750b4987SNick Piggin 	if (page_mapped(page)) {
163750b4987SNick Piggin 		unmap_mapping_range(mapping,
16409cbfeafSKirill A. Shutemov 				   (loff_t)page->index << PAGE_SHIFT,
165fc127da0SKirill A. Shutemov 				   holelen, 0);
166750b4987SNick Piggin 	}
167750b4987SNick Piggin 	return truncate_complete_page(mapping, page);
168750b4987SNick Piggin }
169750b4987SNick Piggin 
17083f78668SWu Fengguang /*
17125718736SAndi Kleen  * Used to get rid of pages on hardware memory corruption.
17225718736SAndi Kleen  */
17325718736SAndi Kleen int generic_error_remove_page(struct address_space *mapping, struct page *page)
17425718736SAndi Kleen {
17525718736SAndi Kleen 	if (!mapping)
17625718736SAndi Kleen 		return -EINVAL;
17725718736SAndi Kleen 	/*
17825718736SAndi Kleen 	 * Only punch for normal data pages for now.
17925718736SAndi Kleen 	 * Handling other types like directories would need more auditing.
18025718736SAndi Kleen 	 */
18125718736SAndi Kleen 	if (!S_ISREG(mapping->host->i_mode))
18225718736SAndi Kleen 		return -EIO;
18325718736SAndi Kleen 	return truncate_inode_page(mapping, page);
18425718736SAndi Kleen }
18525718736SAndi Kleen EXPORT_SYMBOL(generic_error_remove_page);
18625718736SAndi Kleen 
18725718736SAndi Kleen /*
18883f78668SWu Fengguang  * Safely invalidate one page from its pagecache mapping.
18983f78668SWu Fengguang  * It only drops clean, unused pages. The page must be locked.
19083f78668SWu Fengguang  *
19183f78668SWu Fengguang  * Returns 1 if the page is successfully invalidated, otherwise 0.
19283f78668SWu Fengguang  */
19383f78668SWu Fengguang int invalidate_inode_page(struct page *page)
19483f78668SWu Fengguang {
19583f78668SWu Fengguang 	struct address_space *mapping = page_mapping(page);
19683f78668SWu Fengguang 	if (!mapping)
19783f78668SWu Fengguang 		return 0;
19883f78668SWu Fengguang 	if (PageDirty(page) || PageWriteback(page))
19983f78668SWu Fengguang 		return 0;
20083f78668SWu Fengguang 	if (page_mapped(page))
20183f78668SWu Fengguang 		return 0;
20283f78668SWu Fengguang 	return invalidate_complete_page(mapping, page);
20383f78668SWu Fengguang }
20483f78668SWu Fengguang 
2051da177e4SLinus Torvalds /**
20673c1e204SLiu Bo  * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
2071da177e4SLinus Torvalds  * @mapping: mapping to truncate
2081da177e4SLinus Torvalds  * @lstart: offset from which to truncate
2095a720394SLukas Czerner  * @lend: offset to which to truncate (inclusive)
2101da177e4SLinus Torvalds  *
211d7339071SHans Reiser  * Truncate the page cache, removing the pages that are between
2125a720394SLukas Czerner  * specified offsets (and zeroing out partial pages
2135a720394SLukas Czerner  * if lstart or lend + 1 is not page aligned).
2141da177e4SLinus Torvalds  *
2151da177e4SLinus Torvalds  * Truncate takes two passes - the first pass is nonblocking.  It will not
2161da177e4SLinus Torvalds  * block on page locks and it will not block on writeback.  The second pass
2171da177e4SLinus Torvalds  * will wait.  This is to prevent as much IO as possible in the affected region.
2181da177e4SLinus Torvalds  * The first pass will remove most pages, so the search cost of the second pass
2191da177e4SLinus Torvalds  * is low.
2201da177e4SLinus Torvalds  *
2211da177e4SLinus Torvalds  * We pass down the cache-hot hint to the page freeing code.  Even if the
2221da177e4SLinus Torvalds  * mapping is large, it is probably the case that the final pages are the most
2231da177e4SLinus Torvalds  * recently touched, and freeing happens in ascending file offset order.
2245a720394SLukas Czerner  *
2255a720394SLukas Czerner  * Note that since ->invalidatepage() accepts range to invalidate
2265a720394SLukas Czerner  * truncate_inode_pages_range is able to handle cases where lend + 1 is not
2275a720394SLukas Czerner  * page aligned properly.
2281da177e4SLinus Torvalds  */
229d7339071SHans Reiser void truncate_inode_pages_range(struct address_space *mapping,
230d7339071SHans Reiser 				loff_t lstart, loff_t lend)
2311da177e4SLinus Torvalds {
2325a720394SLukas Czerner 	pgoff_t		start;		/* inclusive */
2335a720394SLukas Czerner 	pgoff_t		end;		/* exclusive */
2345a720394SLukas Czerner 	unsigned int	partial_start;	/* inclusive */
2355a720394SLukas Czerner 	unsigned int	partial_end;	/* exclusive */
2361da177e4SLinus Torvalds 	struct pagevec	pvec;
2370cd6144aSJohannes Weiner 	pgoff_t		indices[PAGEVEC_SIZE];
238b85e0effSHugh Dickins 	pgoff_t		index;
2391da177e4SLinus Torvalds 	int		i;
2401da177e4SLinus Torvalds 
2413167760fSDan Magenheimer 	cleancache_invalidate_inode(mapping);
242f9fe48beSRoss Zwisler 	if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
2431da177e4SLinus Torvalds 		return;
2441da177e4SLinus Torvalds 
2455a720394SLukas Czerner 	/* Offsets within partial pages */
24609cbfeafSKirill A. Shutemov 	partial_start = lstart & (PAGE_SIZE - 1);
24709cbfeafSKirill A. Shutemov 	partial_end = (lend + 1) & (PAGE_SIZE - 1);
2485a720394SLukas Czerner 
2495a720394SLukas Czerner 	/*
2505a720394SLukas Czerner 	 * 'start' and 'end' always covers the range of pages to be fully
2515a720394SLukas Czerner 	 * truncated. Partial pages are covered with 'partial_start' at the
2525a720394SLukas Czerner 	 * start of the range and 'partial_end' at the end of the range.
2535a720394SLukas Czerner 	 * Note that 'end' is exclusive while 'lend' is inclusive.
2545a720394SLukas Czerner 	 */
25509cbfeafSKirill A. Shutemov 	start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
2565a720394SLukas Czerner 	if (lend == -1)
2575a720394SLukas Czerner 		/*
2585a720394SLukas Czerner 		 * lend == -1 indicates end-of-file so we have to set 'end'
2595a720394SLukas Czerner 		 * to the highest possible pgoff_t and since the type is
2605a720394SLukas Czerner 		 * unsigned we're using -1.
2615a720394SLukas Czerner 		 */
2625a720394SLukas Czerner 		end = -1;
2635a720394SLukas Czerner 	else
26409cbfeafSKirill A. Shutemov 		end = (lend + 1) >> PAGE_SHIFT;
265d7339071SHans Reiser 
2661da177e4SLinus Torvalds 	pagevec_init(&pvec, 0);
267b85e0effSHugh Dickins 	index = start;
2680cd6144aSJohannes Weiner 	while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
2690cd6144aSJohannes Weiner 			min(end - index, (pgoff_t)PAGEVEC_SIZE),
2700cd6144aSJohannes Weiner 			indices)) {
2711da177e4SLinus Torvalds 		for (i = 0; i < pagevec_count(&pvec); i++) {
2721da177e4SLinus Torvalds 			struct page *page = pvec.pages[i];
2731da177e4SLinus Torvalds 
274b85e0effSHugh Dickins 			/* We rely upon deletion not changing page->index */
2750cd6144aSJohannes Weiner 			index = indices[i];
2765a720394SLukas Czerner 			if (index >= end)
277d7339071SHans Reiser 				break;
278d7339071SHans Reiser 
2790cd6144aSJohannes Weiner 			if (radix_tree_exceptional_entry(page)) {
2800cd6144aSJohannes Weiner 				clear_exceptional_entry(mapping, index, page);
2810cd6144aSJohannes Weiner 				continue;
2820cd6144aSJohannes Weiner 			}
2830cd6144aSJohannes Weiner 
284529ae9aaSNick Piggin 			if (!trylock_page(page))
2851da177e4SLinus Torvalds 				continue;
286*5cbc198aSKirill A. Shutemov 			WARN_ON(page_to_index(page) != index);
2871da177e4SLinus Torvalds 			if (PageWriteback(page)) {
2881da177e4SLinus Torvalds 				unlock_page(page);
2891da177e4SLinus Torvalds 				continue;
2901da177e4SLinus Torvalds 			}
291750b4987SNick Piggin 			truncate_inode_page(mapping, page);
2921da177e4SLinus Torvalds 			unlock_page(page);
2931da177e4SLinus Torvalds 		}
2940cd6144aSJohannes Weiner 		pagevec_remove_exceptionals(&pvec);
2951da177e4SLinus Torvalds 		pagevec_release(&pvec);
2961da177e4SLinus Torvalds 		cond_resched();
297b85e0effSHugh Dickins 		index++;
2981da177e4SLinus Torvalds 	}
2991da177e4SLinus Torvalds 
3005a720394SLukas Czerner 	if (partial_start) {
3011da177e4SLinus Torvalds 		struct page *page = find_lock_page(mapping, start - 1);
3021da177e4SLinus Torvalds 		if (page) {
30309cbfeafSKirill A. Shutemov 			unsigned int top = PAGE_SIZE;
3045a720394SLukas Czerner 			if (start > end) {
3055a720394SLukas Czerner 				/* Truncation within a single page */
3065a720394SLukas Czerner 				top = partial_end;
3075a720394SLukas Czerner 				partial_end = 0;
3085a720394SLukas Czerner 			}
3091da177e4SLinus Torvalds 			wait_on_page_writeback(page);
3105a720394SLukas Czerner 			zero_user_segment(page, partial_start, top);
3115a720394SLukas Czerner 			cleancache_invalidate_page(mapping, page);
3125a720394SLukas Czerner 			if (page_has_private(page))
3135a720394SLukas Czerner 				do_invalidatepage(page, partial_start,
3145a720394SLukas Czerner 						  top - partial_start);
3151da177e4SLinus Torvalds 			unlock_page(page);
31609cbfeafSKirill A. Shutemov 			put_page(page);
3171da177e4SLinus Torvalds 		}
3181da177e4SLinus Torvalds 	}
3195a720394SLukas Czerner 	if (partial_end) {
3205a720394SLukas Czerner 		struct page *page = find_lock_page(mapping, end);
3215a720394SLukas Czerner 		if (page) {
3225a720394SLukas Czerner 			wait_on_page_writeback(page);
3235a720394SLukas Czerner 			zero_user_segment(page, 0, partial_end);
3245a720394SLukas Czerner 			cleancache_invalidate_page(mapping, page);
3255a720394SLukas Czerner 			if (page_has_private(page))
3265a720394SLukas Czerner 				do_invalidatepage(page, 0,
3275a720394SLukas Czerner 						  partial_end);
3285a720394SLukas Czerner 			unlock_page(page);
32909cbfeafSKirill A. Shutemov 			put_page(page);
3305a720394SLukas Czerner 		}
3315a720394SLukas Czerner 	}
3325a720394SLukas Czerner 	/*
3335a720394SLukas Czerner 	 * If the truncation happened within a single page no pages
3345a720394SLukas Czerner 	 * will be released, just zeroed, so we can bail out now.
3355a720394SLukas Czerner 	 */
3365a720394SLukas Czerner 	if (start >= end)
3375a720394SLukas Czerner 		return;
3381da177e4SLinus Torvalds 
339b85e0effSHugh Dickins 	index = start;
3401da177e4SLinus Torvalds 	for ( ; ; ) {
3411da177e4SLinus Torvalds 		cond_resched();
3420cd6144aSJohannes Weiner 		if (!pagevec_lookup_entries(&pvec, mapping, index,
343792ceaefSHugh Dickins 			min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) {
344792ceaefSHugh Dickins 			/* If all gone from start onwards, we're done */
345b85e0effSHugh Dickins 			if (index == start)
3461da177e4SLinus Torvalds 				break;
347792ceaefSHugh Dickins 			/* Otherwise restart to make sure all gone */
348b85e0effSHugh Dickins 			index = start;
3491da177e4SLinus Torvalds 			continue;
3501da177e4SLinus Torvalds 		}
3510cd6144aSJohannes Weiner 		if (index == start && indices[0] >= end) {
352792ceaefSHugh Dickins 			/* All gone out of hole to be punched, we're done */
3530cd6144aSJohannes Weiner 			pagevec_remove_exceptionals(&pvec);
354d7339071SHans Reiser 			pagevec_release(&pvec);
355d7339071SHans Reiser 			break;
356d7339071SHans Reiser 		}
3571da177e4SLinus Torvalds 		for (i = 0; i < pagevec_count(&pvec); i++) {
3581da177e4SLinus Torvalds 			struct page *page = pvec.pages[i];
3591da177e4SLinus Torvalds 
360b85e0effSHugh Dickins 			/* We rely upon deletion not changing page->index */
3610cd6144aSJohannes Weiner 			index = indices[i];
362792ceaefSHugh Dickins 			if (index >= end) {
363792ceaefSHugh Dickins 				/* Restart punch to make sure all gone */
364792ceaefSHugh Dickins 				index = start - 1;
365d7339071SHans Reiser 				break;
366792ceaefSHugh Dickins 			}
367b85e0effSHugh Dickins 
3680cd6144aSJohannes Weiner 			if (radix_tree_exceptional_entry(page)) {
3690cd6144aSJohannes Weiner 				clear_exceptional_entry(mapping, index, page);
3700cd6144aSJohannes Weiner 				continue;
3710cd6144aSJohannes Weiner 			}
3720cd6144aSJohannes Weiner 
3731da177e4SLinus Torvalds 			lock_page(page);
374*5cbc198aSKirill A. Shutemov 			WARN_ON(page_to_index(page) != index);
3751da177e4SLinus Torvalds 			wait_on_page_writeback(page);
376750b4987SNick Piggin 			truncate_inode_page(mapping, page);
3771da177e4SLinus Torvalds 			unlock_page(page);
3781da177e4SLinus Torvalds 		}
3790cd6144aSJohannes Weiner 		pagevec_remove_exceptionals(&pvec);
3801da177e4SLinus Torvalds 		pagevec_release(&pvec);
381b85e0effSHugh Dickins 		index++;
3821da177e4SLinus Torvalds 	}
3833167760fSDan Magenheimer 	cleancache_invalidate_inode(mapping);
3841da177e4SLinus Torvalds }
385d7339071SHans Reiser EXPORT_SYMBOL(truncate_inode_pages_range);
3861da177e4SLinus Torvalds 
387d7339071SHans Reiser /**
388d7339071SHans Reiser  * truncate_inode_pages - truncate *all* the pages from an offset
389d7339071SHans Reiser  * @mapping: mapping to truncate
390d7339071SHans Reiser  * @lstart: offset from which to truncate
391d7339071SHans Reiser  *
3921b1dcc1bSJes Sorensen  * Called under (and serialised by) inode->i_mutex.
39308142579SJan Kara  *
39408142579SJan Kara  * Note: When this function returns, there can be a page in the process of
39508142579SJan Kara  * deletion (inside __delete_from_page_cache()) in the specified range.  Thus
39608142579SJan Kara  * mapping->nrpages can be non-zero when this function returns even after
39708142579SJan Kara  * truncation of the whole mapping.
398d7339071SHans Reiser  */
399d7339071SHans Reiser void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
400d7339071SHans Reiser {
401d7339071SHans Reiser 	truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
402d7339071SHans Reiser }
4031da177e4SLinus Torvalds EXPORT_SYMBOL(truncate_inode_pages);
4041da177e4SLinus Torvalds 
40528697355SMike Waychison /**
40691b0abe3SJohannes Weiner  * truncate_inode_pages_final - truncate *all* pages before inode dies
40791b0abe3SJohannes Weiner  * @mapping: mapping to truncate
40891b0abe3SJohannes Weiner  *
40991b0abe3SJohannes Weiner  * Called under (and serialized by) inode->i_mutex.
41091b0abe3SJohannes Weiner  *
41191b0abe3SJohannes Weiner  * Filesystems have to use this in the .evict_inode path to inform the
41291b0abe3SJohannes Weiner  * VM that this is the final truncate and the inode is going away.
41391b0abe3SJohannes Weiner  */
41491b0abe3SJohannes Weiner void truncate_inode_pages_final(struct address_space *mapping)
41591b0abe3SJohannes Weiner {
416f9fe48beSRoss Zwisler 	unsigned long nrexceptional;
41791b0abe3SJohannes Weiner 	unsigned long nrpages;
41891b0abe3SJohannes Weiner 
41991b0abe3SJohannes Weiner 	/*
42091b0abe3SJohannes Weiner 	 * Page reclaim can not participate in regular inode lifetime
42191b0abe3SJohannes Weiner 	 * management (can't call iput()) and thus can race with the
42291b0abe3SJohannes Weiner 	 * inode teardown.  Tell it when the address space is exiting,
42391b0abe3SJohannes Weiner 	 * so that it does not install eviction information after the
42491b0abe3SJohannes Weiner 	 * final truncate has begun.
42591b0abe3SJohannes Weiner 	 */
42691b0abe3SJohannes Weiner 	mapping_set_exiting(mapping);
42791b0abe3SJohannes Weiner 
42891b0abe3SJohannes Weiner 	/*
42991b0abe3SJohannes Weiner 	 * When reclaim installs eviction entries, it increases
430f9fe48beSRoss Zwisler 	 * nrexceptional first, then decreases nrpages.  Make sure we see
43191b0abe3SJohannes Weiner 	 * this in the right order or we might miss an entry.
43291b0abe3SJohannes Weiner 	 */
43391b0abe3SJohannes Weiner 	nrpages = mapping->nrpages;
43491b0abe3SJohannes Weiner 	smp_rmb();
435f9fe48beSRoss Zwisler 	nrexceptional = mapping->nrexceptional;
43691b0abe3SJohannes Weiner 
437f9fe48beSRoss Zwisler 	if (nrpages || nrexceptional) {
43891b0abe3SJohannes Weiner 		/*
43991b0abe3SJohannes Weiner 		 * As truncation uses a lockless tree lookup, cycle
44091b0abe3SJohannes Weiner 		 * the tree lock to make sure any ongoing tree
44191b0abe3SJohannes Weiner 		 * modification that does not see AS_EXITING is
44291b0abe3SJohannes Weiner 		 * completed before starting the final truncate.
44391b0abe3SJohannes Weiner 		 */
44491b0abe3SJohannes Weiner 		spin_lock_irq(&mapping->tree_lock);
44591b0abe3SJohannes Weiner 		spin_unlock_irq(&mapping->tree_lock);
44691b0abe3SJohannes Weiner 
44791b0abe3SJohannes Weiner 		truncate_inode_pages(mapping, 0);
44891b0abe3SJohannes Weiner 	}
44991b0abe3SJohannes Weiner }
45091b0abe3SJohannes Weiner EXPORT_SYMBOL(truncate_inode_pages_final);
45191b0abe3SJohannes Weiner 
45291b0abe3SJohannes Weiner /**
45328697355SMike Waychison  * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
45428697355SMike Waychison  * @mapping: the address_space which holds the pages to invalidate
45528697355SMike Waychison  * @start: the offset 'from' which to invalidate
45628697355SMike Waychison  * @end: the offset 'to' which to invalidate (inclusive)
45728697355SMike Waychison  *
45828697355SMike Waychison  * This function only removes the unlocked pages, if you want to
45928697355SMike Waychison  * remove all the pages of one inode, you must call truncate_inode_pages.
46028697355SMike Waychison  *
46128697355SMike Waychison  * invalidate_mapping_pages() will not block on IO activity. It will not
46228697355SMike Waychison  * invalidate pages which are dirty, locked, under writeback or mapped into
46328697355SMike Waychison  * pagetables.
46428697355SMike Waychison  */
46528697355SMike Waychison unsigned long invalidate_mapping_pages(struct address_space *mapping,
46628697355SMike Waychison 		pgoff_t start, pgoff_t end)
4671da177e4SLinus Torvalds {
4680cd6144aSJohannes Weiner 	pgoff_t indices[PAGEVEC_SIZE];
4691da177e4SLinus Torvalds 	struct pagevec pvec;
470b85e0effSHugh Dickins 	pgoff_t index = start;
47131560180SMinchan Kim 	unsigned long ret;
47231560180SMinchan Kim 	unsigned long count = 0;
4731da177e4SLinus Torvalds 	int i;
4741da177e4SLinus Torvalds 
4751da177e4SLinus Torvalds 	pagevec_init(&pvec, 0);
4760cd6144aSJohannes Weiner 	while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
4770cd6144aSJohannes Weiner 			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
4780cd6144aSJohannes Weiner 			indices)) {
4791da177e4SLinus Torvalds 		for (i = 0; i < pagevec_count(&pvec); i++) {
4801da177e4SLinus Torvalds 			struct page *page = pvec.pages[i];
4811da177e4SLinus Torvalds 
482b85e0effSHugh Dickins 			/* We rely upon deletion not changing page->index */
4830cd6144aSJohannes Weiner 			index = indices[i];
484b85e0effSHugh Dickins 			if (index > end)
485b85e0effSHugh Dickins 				break;
486e0f23603SNeilBrown 
4870cd6144aSJohannes Weiner 			if (radix_tree_exceptional_entry(page)) {
4880cd6144aSJohannes Weiner 				clear_exceptional_entry(mapping, index, page);
4890cd6144aSJohannes Weiner 				continue;
4900cd6144aSJohannes Weiner 			}
4910cd6144aSJohannes Weiner 
492b85e0effSHugh Dickins 			if (!trylock_page(page))
493b85e0effSHugh Dickins 				continue;
494fc127da0SKirill A. Shutemov 
495*5cbc198aSKirill A. Shutemov 			WARN_ON(page_to_index(page) != index);
496fc127da0SKirill A. Shutemov 
497fc127da0SKirill A. Shutemov 			/* Middle of THP: skip */
498fc127da0SKirill A. Shutemov 			if (PageTransTail(page)) {
499fc127da0SKirill A. Shutemov 				unlock_page(page);
500fc127da0SKirill A. Shutemov 				continue;
501fc127da0SKirill A. Shutemov 			} else if (PageTransHuge(page)) {
502fc127da0SKirill A. Shutemov 				index += HPAGE_PMD_NR - 1;
503fc127da0SKirill A. Shutemov 				i += HPAGE_PMD_NR - 1;
504fc127da0SKirill A. Shutemov 				/* 'end' is in the middle of THP */
505fc127da0SKirill A. Shutemov 				if (index ==  round_down(end, HPAGE_PMD_NR))
506fc127da0SKirill A. Shutemov 					continue;
507fc127da0SKirill A. Shutemov 			}
508fc127da0SKirill A. Shutemov 
50931560180SMinchan Kim 			ret = invalidate_inode_page(page);
5101da177e4SLinus Torvalds 			unlock_page(page);
51131560180SMinchan Kim 			/*
51231560180SMinchan Kim 			 * Invalidation is a hint that the page is no longer
51331560180SMinchan Kim 			 * of interest and try to speed up its reclaim.
51431560180SMinchan Kim 			 */
51531560180SMinchan Kim 			if (!ret)
516cc5993bdSMinchan Kim 				deactivate_file_page(page);
51731560180SMinchan Kim 			count += ret;
5181da177e4SLinus Torvalds 		}
5190cd6144aSJohannes Weiner 		pagevec_remove_exceptionals(&pvec);
5201da177e4SLinus Torvalds 		pagevec_release(&pvec);
521fc9a07e7SAndrew Morton 		cond_resched();
522b85e0effSHugh Dickins 		index++;
5231da177e4SLinus Torvalds 	}
52431560180SMinchan Kim 	return count;
5251da177e4SLinus Torvalds }
52654bc4855SAnton Altaparmakov EXPORT_SYMBOL(invalidate_mapping_pages);
5271da177e4SLinus Torvalds 
528bd4c8ce4SAndrew Morton /*
529bd4c8ce4SAndrew Morton  * This is like invalidate_complete_page(), except it ignores the page's
530bd4c8ce4SAndrew Morton  * refcount.  We do this because invalidate_inode_pages2() needs stronger
531bd4c8ce4SAndrew Morton  * invalidation guarantees, and cannot afford to leave pages behind because
5322706a1b8SAnderson Briglia  * shrink_page_list() has a temp ref on them, or because they're transiently
5332706a1b8SAnderson Briglia  * sitting in the lru_cache_add() pagevecs.
534bd4c8ce4SAndrew Morton  */
535bd4c8ce4SAndrew Morton static int
536bd4c8ce4SAndrew Morton invalidate_complete_page2(struct address_space *mapping, struct page *page)
537bd4c8ce4SAndrew Morton {
538c4843a75SGreg Thelen 	unsigned long flags;
539c4843a75SGreg Thelen 
540bd4c8ce4SAndrew Morton 	if (page->mapping != mapping)
541bd4c8ce4SAndrew Morton 		return 0;
542bd4c8ce4SAndrew Morton 
543266cf658SDavid Howells 	if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
544bd4c8ce4SAndrew Morton 		return 0;
545bd4c8ce4SAndrew Morton 
546c4843a75SGreg Thelen 	spin_lock_irqsave(&mapping->tree_lock, flags);
547bd4c8ce4SAndrew Morton 	if (PageDirty(page))
548bd4c8ce4SAndrew Morton 		goto failed;
549bd4c8ce4SAndrew Morton 
550266cf658SDavid Howells 	BUG_ON(page_has_private(page));
55162cccb8cSJohannes Weiner 	__delete_from_page_cache(page, NULL);
552c4843a75SGreg Thelen 	spin_unlock_irqrestore(&mapping->tree_lock, flags);
5536072d13cSLinus Torvalds 
5546072d13cSLinus Torvalds 	if (mapping->a_ops->freepage)
5556072d13cSLinus Torvalds 		mapping->a_ops->freepage(page);
5566072d13cSLinus Torvalds 
55709cbfeafSKirill A. Shutemov 	put_page(page);	/* pagecache ref */
558bd4c8ce4SAndrew Morton 	return 1;
559bd4c8ce4SAndrew Morton failed:
560c4843a75SGreg Thelen 	spin_unlock_irqrestore(&mapping->tree_lock, flags);
561bd4c8ce4SAndrew Morton 	return 0;
562bd4c8ce4SAndrew Morton }
563bd4c8ce4SAndrew Morton 
564e3db7691STrond Myklebust static int do_launder_page(struct address_space *mapping, struct page *page)
565e3db7691STrond Myklebust {
566e3db7691STrond Myklebust 	if (!PageDirty(page))
567e3db7691STrond Myklebust 		return 0;
568e3db7691STrond Myklebust 	if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
569e3db7691STrond Myklebust 		return 0;
570e3db7691STrond Myklebust 	return mapping->a_ops->launder_page(page);
571e3db7691STrond Myklebust }
572e3db7691STrond Myklebust 
5731da177e4SLinus Torvalds /**
5741da177e4SLinus Torvalds  * invalidate_inode_pages2_range - remove range of pages from an address_space
57567be2dd1SMartin Waitz  * @mapping: the address_space
5761da177e4SLinus Torvalds  * @start: the page offset 'from' which to invalidate
5771da177e4SLinus Torvalds  * @end: the page offset 'to' which to invalidate (inclusive)
5781da177e4SLinus Torvalds  *
5791da177e4SLinus Torvalds  * Any pages which are found to be mapped into pagetables are unmapped prior to
5801da177e4SLinus Torvalds  * invalidation.
5811da177e4SLinus Torvalds  *
5826ccfa806SHisashi Hifumi  * Returns -EBUSY if any pages could not be invalidated.
5831da177e4SLinus Torvalds  */
5841da177e4SLinus Torvalds int invalidate_inode_pages2_range(struct address_space *mapping,
5851da177e4SLinus Torvalds 				  pgoff_t start, pgoff_t end)
5861da177e4SLinus Torvalds {
5870cd6144aSJohannes Weiner 	pgoff_t indices[PAGEVEC_SIZE];
5881da177e4SLinus Torvalds 	struct pagevec pvec;
589b85e0effSHugh Dickins 	pgoff_t index;
5901da177e4SLinus Torvalds 	int i;
5911da177e4SLinus Torvalds 	int ret = 0;
5920dd1334fSHisashi Hifumi 	int ret2 = 0;
5931da177e4SLinus Torvalds 	int did_range_unmap = 0;
5941da177e4SLinus Torvalds 
5953167760fSDan Magenheimer 	cleancache_invalidate_inode(mapping);
5961da177e4SLinus Torvalds 	pagevec_init(&pvec, 0);
597b85e0effSHugh Dickins 	index = start;
5980cd6144aSJohannes Weiner 	while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
5990cd6144aSJohannes Weiner 			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
6000cd6144aSJohannes Weiner 			indices)) {
6017b965e08STrond Myklebust 		for (i = 0; i < pagevec_count(&pvec); i++) {
6021da177e4SLinus Torvalds 			struct page *page = pvec.pages[i];
603b85e0effSHugh Dickins 
604b85e0effSHugh Dickins 			/* We rely upon deletion not changing page->index */
6050cd6144aSJohannes Weiner 			index = indices[i];
606b85e0effSHugh Dickins 			if (index > end)
607b85e0effSHugh Dickins 				break;
6081da177e4SLinus Torvalds 
6090cd6144aSJohannes Weiner 			if (radix_tree_exceptional_entry(page)) {
6100cd6144aSJohannes Weiner 				clear_exceptional_entry(mapping, index, page);
6110cd6144aSJohannes Weiner 				continue;
6120cd6144aSJohannes Weiner 			}
6130cd6144aSJohannes Weiner 
6141da177e4SLinus Torvalds 			lock_page(page);
615*5cbc198aSKirill A. Shutemov 			WARN_ON(page_to_index(page) != index);
6161da177e4SLinus Torvalds 			if (page->mapping != mapping) {
6171da177e4SLinus Torvalds 				unlock_page(page);
6181da177e4SLinus Torvalds 				continue;
6191da177e4SLinus Torvalds 			}
6201da177e4SLinus Torvalds 			wait_on_page_writeback(page);
621d00806b1SNick Piggin 			if (page_mapped(page)) {
6221da177e4SLinus Torvalds 				if (!did_range_unmap) {
6231da177e4SLinus Torvalds 					/*
6241da177e4SLinus Torvalds 					 * Zap the rest of the file in one hit.
6251da177e4SLinus Torvalds 					 */
6261da177e4SLinus Torvalds 					unmap_mapping_range(mapping,
62709cbfeafSKirill A. Shutemov 					   (loff_t)index << PAGE_SHIFT,
628b85e0effSHugh Dickins 					   (loff_t)(1 + end - index)
62909cbfeafSKirill A. Shutemov 							 << PAGE_SHIFT,
6301da177e4SLinus Torvalds 							 0);
6311da177e4SLinus Torvalds 					did_range_unmap = 1;
6321da177e4SLinus Torvalds 				} else {
6331da177e4SLinus Torvalds 					/*
6341da177e4SLinus Torvalds 					 * Just zap this page
6351da177e4SLinus Torvalds 					 */
6361da177e4SLinus Torvalds 					unmap_mapping_range(mapping,
63709cbfeafSKirill A. Shutemov 					   (loff_t)index << PAGE_SHIFT,
63809cbfeafSKirill A. Shutemov 					   PAGE_SIZE, 0);
6391da177e4SLinus Torvalds 				}
6401da177e4SLinus Torvalds 			}
641d00806b1SNick Piggin 			BUG_ON(page_mapped(page));
6420dd1334fSHisashi Hifumi 			ret2 = do_launder_page(mapping, page);
6430dd1334fSHisashi Hifumi 			if (ret2 == 0) {
6440dd1334fSHisashi Hifumi 				if (!invalidate_complete_page2(mapping, page))
6456ccfa806SHisashi Hifumi 					ret2 = -EBUSY;
6460dd1334fSHisashi Hifumi 			}
6470dd1334fSHisashi Hifumi 			if (ret2 < 0)
6480dd1334fSHisashi Hifumi 				ret = ret2;
6491da177e4SLinus Torvalds 			unlock_page(page);
6501da177e4SLinus Torvalds 		}
6510cd6144aSJohannes Weiner 		pagevec_remove_exceptionals(&pvec);
6521da177e4SLinus Torvalds 		pagevec_release(&pvec);
6531da177e4SLinus Torvalds 		cond_resched();
654b85e0effSHugh Dickins 		index++;
6551da177e4SLinus Torvalds 	}
6563167760fSDan Magenheimer 	cleancache_invalidate_inode(mapping);
6571da177e4SLinus Torvalds 	return ret;
6581da177e4SLinus Torvalds }
6591da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
6601da177e4SLinus Torvalds 
6611da177e4SLinus Torvalds /**
6621da177e4SLinus Torvalds  * invalidate_inode_pages2 - remove all pages from an address_space
66367be2dd1SMartin Waitz  * @mapping: the address_space
6641da177e4SLinus Torvalds  *
6651da177e4SLinus Torvalds  * Any pages which are found to be mapped into pagetables are unmapped prior to
6661da177e4SLinus Torvalds  * invalidation.
6671da177e4SLinus Torvalds  *
668e9de25ddSPeng Tao  * Returns -EBUSY if any pages could not be invalidated.
6691da177e4SLinus Torvalds  */
6701da177e4SLinus Torvalds int invalidate_inode_pages2(struct address_space *mapping)
6711da177e4SLinus Torvalds {
6721da177e4SLinus Torvalds 	return invalidate_inode_pages2_range(mapping, 0, -1);
6731da177e4SLinus Torvalds }
6741da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
67525d9e2d1Snpiggin@suse.de 
67625d9e2d1Snpiggin@suse.de /**
67725d9e2d1Snpiggin@suse.de  * truncate_pagecache - unmap and remove pagecache that has been truncated
67825d9e2d1Snpiggin@suse.de  * @inode: inode
6798a549beaSHugh Dickins  * @newsize: new file size
68025d9e2d1Snpiggin@suse.de  *
68125d9e2d1Snpiggin@suse.de  * inode's new i_size must already be written before truncate_pagecache
68225d9e2d1Snpiggin@suse.de  * is called.
68325d9e2d1Snpiggin@suse.de  *
68425d9e2d1Snpiggin@suse.de  * This function should typically be called before the filesystem
68525d9e2d1Snpiggin@suse.de  * releases resources associated with the freed range (eg. deallocates
68625d9e2d1Snpiggin@suse.de  * blocks). This way, pagecache will always stay logically coherent
68725d9e2d1Snpiggin@suse.de  * with on-disk format, and the filesystem would not have to deal with
68825d9e2d1Snpiggin@suse.de  * situations such as writepage being called for a page that has already
68925d9e2d1Snpiggin@suse.de  * had its underlying blocks deallocated.
69025d9e2d1Snpiggin@suse.de  */
6917caef267SKirill A. Shutemov void truncate_pagecache(struct inode *inode, loff_t newsize)
69225d9e2d1Snpiggin@suse.de {
69325d9e2d1Snpiggin@suse.de 	struct address_space *mapping = inode->i_mapping;
6948a549beaSHugh Dickins 	loff_t holebegin = round_up(newsize, PAGE_SIZE);
69525d9e2d1Snpiggin@suse.de 
69625d9e2d1Snpiggin@suse.de 	/*
69725d9e2d1Snpiggin@suse.de 	 * unmap_mapping_range is called twice, first simply for
69825d9e2d1Snpiggin@suse.de 	 * efficiency so that truncate_inode_pages does fewer
69925d9e2d1Snpiggin@suse.de 	 * single-page unmaps.  However after this first call, and
70025d9e2d1Snpiggin@suse.de 	 * before truncate_inode_pages finishes, it is possible for
70125d9e2d1Snpiggin@suse.de 	 * private pages to be COWed, which remain after
70225d9e2d1Snpiggin@suse.de 	 * truncate_inode_pages finishes, hence the second
70325d9e2d1Snpiggin@suse.de 	 * unmap_mapping_range call must be made for correctness.
70425d9e2d1Snpiggin@suse.de 	 */
7058a549beaSHugh Dickins 	unmap_mapping_range(mapping, holebegin, 0, 1);
7068a549beaSHugh Dickins 	truncate_inode_pages(mapping, newsize);
7078a549beaSHugh Dickins 	unmap_mapping_range(mapping, holebegin, 0, 1);
70825d9e2d1Snpiggin@suse.de }
70925d9e2d1Snpiggin@suse.de EXPORT_SYMBOL(truncate_pagecache);
71025d9e2d1Snpiggin@suse.de 
71125d9e2d1Snpiggin@suse.de /**
7122c27c65eSChristoph Hellwig  * truncate_setsize - update inode and pagecache for a new file size
7132c27c65eSChristoph Hellwig  * @inode: inode
7142c27c65eSChristoph Hellwig  * @newsize: new file size
7152c27c65eSChristoph Hellwig  *
716382e27daSJan Kara  * truncate_setsize updates i_size and performs pagecache truncation (if
717382e27daSJan Kara  * necessary) to @newsize. It will be typically be called from the filesystem's
718382e27daSJan Kara  * setattr function when ATTR_SIZE is passed in.
7192c27c65eSChristoph Hellwig  *
72077783d06SJan Kara  * Must be called with a lock serializing truncates and writes (generally
72177783d06SJan Kara  * i_mutex but e.g. xfs uses a different lock) and before all filesystem
72277783d06SJan Kara  * specific block truncation has been performed.
7232c27c65eSChristoph Hellwig  */
7242c27c65eSChristoph Hellwig void truncate_setsize(struct inode *inode, loff_t newsize)
7252c27c65eSChristoph Hellwig {
72690a80202SJan Kara 	loff_t oldsize = inode->i_size;
72790a80202SJan Kara 
7282c27c65eSChristoph Hellwig 	i_size_write(inode, newsize);
72990a80202SJan Kara 	if (newsize > oldsize)
73090a80202SJan Kara 		pagecache_isize_extended(inode, oldsize, newsize);
7317caef267SKirill A. Shutemov 	truncate_pagecache(inode, newsize);
7322c27c65eSChristoph Hellwig }
7332c27c65eSChristoph Hellwig EXPORT_SYMBOL(truncate_setsize);
7342c27c65eSChristoph Hellwig 
7352c27c65eSChristoph Hellwig /**
73690a80202SJan Kara  * pagecache_isize_extended - update pagecache after extension of i_size
73790a80202SJan Kara  * @inode:	inode for which i_size was extended
73890a80202SJan Kara  * @from:	original inode size
73990a80202SJan Kara  * @to:		new inode size
74090a80202SJan Kara  *
74190a80202SJan Kara  * Handle extension of inode size either caused by extending truncate or by
74290a80202SJan Kara  * write starting after current i_size. We mark the page straddling current
74390a80202SJan Kara  * i_size RO so that page_mkwrite() is called on the nearest write access to
74490a80202SJan Kara  * the page.  This way filesystem can be sure that page_mkwrite() is called on
74590a80202SJan Kara  * the page before user writes to the page via mmap after the i_size has been
74690a80202SJan Kara  * changed.
74790a80202SJan Kara  *
74890a80202SJan Kara  * The function must be called after i_size is updated so that page fault
74990a80202SJan Kara  * coming after we unlock the page will already see the new i_size.
75090a80202SJan Kara  * The function must be called while we still hold i_mutex - this not only
75190a80202SJan Kara  * makes sure i_size is stable but also that userspace cannot observe new
75290a80202SJan Kara  * i_size value before we are prepared to store mmap writes at new inode size.
75390a80202SJan Kara  */
75490a80202SJan Kara void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
75590a80202SJan Kara {
75690a80202SJan Kara 	int bsize = 1 << inode->i_blkbits;
75790a80202SJan Kara 	loff_t rounded_from;
75890a80202SJan Kara 	struct page *page;
75990a80202SJan Kara 	pgoff_t index;
76090a80202SJan Kara 
76190a80202SJan Kara 	WARN_ON(to > inode->i_size);
76290a80202SJan Kara 
76309cbfeafSKirill A. Shutemov 	if (from >= to || bsize == PAGE_SIZE)
76490a80202SJan Kara 		return;
76590a80202SJan Kara 	/* Page straddling @from will not have any hole block created? */
76690a80202SJan Kara 	rounded_from = round_up(from, bsize);
76709cbfeafSKirill A. Shutemov 	if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1)))
76890a80202SJan Kara 		return;
76990a80202SJan Kara 
77009cbfeafSKirill A. Shutemov 	index = from >> PAGE_SHIFT;
77190a80202SJan Kara 	page = find_lock_page(inode->i_mapping, index);
77290a80202SJan Kara 	/* Page not cached? Nothing to do */
77390a80202SJan Kara 	if (!page)
77490a80202SJan Kara 		return;
77590a80202SJan Kara 	/*
77690a80202SJan Kara 	 * See clear_page_dirty_for_io() for details why set_page_dirty()
77790a80202SJan Kara 	 * is needed.
77890a80202SJan Kara 	 */
77990a80202SJan Kara 	if (page_mkclean(page))
78090a80202SJan Kara 		set_page_dirty(page);
78190a80202SJan Kara 	unlock_page(page);
78209cbfeafSKirill A. Shutemov 	put_page(page);
78390a80202SJan Kara }
78490a80202SJan Kara EXPORT_SYMBOL(pagecache_isize_extended);
78590a80202SJan Kara 
78690a80202SJan Kara /**
787623e3db9SHugh Dickins  * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
788623e3db9SHugh Dickins  * @inode: inode
789623e3db9SHugh Dickins  * @lstart: offset of beginning of hole
790623e3db9SHugh Dickins  * @lend: offset of last byte of hole
791623e3db9SHugh Dickins  *
792623e3db9SHugh Dickins  * This function should typically be called before the filesystem
793623e3db9SHugh Dickins  * releases resources associated with the freed range (eg. deallocates
794623e3db9SHugh Dickins  * blocks). This way, pagecache will always stay logically coherent
795623e3db9SHugh Dickins  * with on-disk format, and the filesystem would not have to deal with
796623e3db9SHugh Dickins  * situations such as writepage being called for a page that has already
797623e3db9SHugh Dickins  * had its underlying blocks deallocated.
798623e3db9SHugh Dickins  */
799623e3db9SHugh Dickins void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
800623e3db9SHugh Dickins {
801623e3db9SHugh Dickins 	struct address_space *mapping = inode->i_mapping;
802623e3db9SHugh Dickins 	loff_t unmap_start = round_up(lstart, PAGE_SIZE);
803623e3db9SHugh Dickins 	loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
804623e3db9SHugh Dickins 	/*
805623e3db9SHugh Dickins 	 * This rounding is currently just for example: unmap_mapping_range
806623e3db9SHugh Dickins 	 * expands its hole outwards, whereas we want it to contract the hole
807623e3db9SHugh Dickins 	 * inwards.  However, existing callers of truncate_pagecache_range are
8085a720394SLukas Czerner 	 * doing their own page rounding first.  Note that unmap_mapping_range
8095a720394SLukas Czerner 	 * allows holelen 0 for all, and we allow lend -1 for end of file.
810623e3db9SHugh Dickins 	 */
811623e3db9SHugh Dickins 
812623e3db9SHugh Dickins 	/*
813623e3db9SHugh Dickins 	 * Unlike in truncate_pagecache, unmap_mapping_range is called only
814623e3db9SHugh Dickins 	 * once (before truncating pagecache), and without "even_cows" flag:
815623e3db9SHugh Dickins 	 * hole-punching should not remove private COWed pages from the hole.
816623e3db9SHugh Dickins 	 */
817623e3db9SHugh Dickins 	if ((u64)unmap_end > (u64)unmap_start)
818623e3db9SHugh Dickins 		unmap_mapping_range(mapping, unmap_start,
819623e3db9SHugh Dickins 				    1 + unmap_end - unmap_start, 0);
820623e3db9SHugh Dickins 	truncate_inode_pages_range(mapping, lstart, lend);
821623e3db9SHugh Dickins }
822623e3db9SHugh Dickins EXPORT_SYMBOL(truncate_pagecache_range);
823