xref: /linux/mm/truncate.c (revision 4e4d9c72c946b77f0278988d0bf1207fa1b2cd0f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * mm/truncate.c - code for taking down pages from address_spaces
4  *
5  * Copyright (C) 2002, Linus Torvalds
6  *
7  * 10Sep2002	Andrew Morton
8  *		Initial version.
9  */
10 
11 #include <linux/kernel.h>
12 #include <linux/backing-dev.h>
13 #include <linux/dax.h>
14 #include <linux/gfp.h>
15 #include <linux/mm.h>
16 #include <linux/swap.h>
17 #include <linux/export.h>
18 #include <linux/pagemap.h>
19 #include <linux/highmem.h>
20 #include <linux/pagevec.h>
21 #include <linux/task_io_accounting_ops.h>
22 #include <linux/shmem_fs.h>
23 #include <linux/rmap.h>
24 #include "internal.h"
25 
26 static void clear_shadow_entries(struct address_space *mapping,
27 				 unsigned long start, unsigned long max)
28 {
29 	XA_STATE(xas, &mapping->i_pages, start);
30 	struct folio *folio;
31 
32 	/* Handled by shmem itself, or for DAX we do nothing. */
33 	if (shmem_mapping(mapping) || dax_mapping(mapping))
34 		return;
35 
36 	xas_set_update(&xas, workingset_update_node);
37 
38 	spin_lock(&mapping->host->i_lock);
39 	xas_lock_irq(&xas);
40 
41 	/* Clear all shadow entries from start to max */
42 	xas_for_each(&xas, folio, max) {
43 		if (xa_is_value(folio))
44 			xas_store(&xas, NULL);
45 	}
46 
47 	xas_unlock_irq(&xas);
48 	if (mapping_shrinkable(mapping))
49 		inode_add_lru(mapping->host);
50 	spin_unlock(&mapping->host->i_lock);
51 }
52 
53 /*
54  * Unconditionally remove exceptional entries. Usually called from truncate
55  * path. Note that the folio_batch may be altered by this function by removing
56  * exceptional entries similar to what folio_batch_remove_exceptionals() does.
57  * Please note that indices[] has entries in ascending order as guaranteed by
58  * either find_get_entries() or find_lock_entries().
59  */
60 static void truncate_folio_batch_exceptionals(struct address_space *mapping,
61 				struct folio_batch *fbatch, pgoff_t *indices)
62 {
63 	XA_STATE(xas, &mapping->i_pages, indices[0]);
64 	int nr = folio_batch_count(fbatch);
65 	struct folio *folio;
66 	int i, j;
67 
68 	/* Handled by shmem itself */
69 	if (shmem_mapping(mapping))
70 		return;
71 
72 	for (j = 0; j < nr; j++)
73 		if (xa_is_value(fbatch->folios[j]))
74 			break;
75 
76 	if (j == nr)
77 		return;
78 
79 	if (dax_mapping(mapping)) {
80 		for (i = j; i < nr; i++) {
81 			if (xa_is_value(fbatch->folios[i]))
82 				dax_delete_mapping_entry(mapping, indices[i]);
83 		}
84 		goto out;
85 	}
86 
87 	xas_set(&xas, indices[j]);
88 	xas_set_update(&xas, workingset_update_node);
89 
90 	spin_lock(&mapping->host->i_lock);
91 	xas_lock_irq(&xas);
92 
93 	xas_for_each(&xas, folio, indices[nr-1]) {
94 		if (xa_is_value(folio))
95 			xas_store(&xas, NULL);
96 	}
97 
98 	xas_unlock_irq(&xas);
99 	if (mapping_shrinkable(mapping))
100 		inode_add_lru(mapping->host);
101 	spin_unlock(&mapping->host->i_lock);
102 out:
103 	folio_batch_remove_exceptionals(fbatch);
104 }
105 
106 /**
107  * folio_invalidate - Invalidate part or all of a folio.
108  * @folio: The folio which is affected.
109  * @offset: start of the range to invalidate
110  * @length: length of the range to invalidate
111  *
112  * folio_invalidate() is called when all or part of the folio has become
113  * invalidated by a truncate operation.
114  *
115  * folio_invalidate() does not have to release all buffers, but it must
116  * ensure that no dirty buffer is left outside @offset and that no I/O
117  * is underway against any of the blocks which are outside the truncation
118  * point.  Because the caller is about to free (and possibly reuse) those
119  * blocks on-disk.
120  */
121 void folio_invalidate(struct folio *folio, size_t offset, size_t length)
122 {
123 	const struct address_space_operations *aops = folio->mapping->a_ops;
124 
125 	if (aops->invalidate_folio)
126 		aops->invalidate_folio(folio, offset, length);
127 }
128 EXPORT_SYMBOL_GPL(folio_invalidate);
129 
130 /*
131  * If truncate cannot remove the fs-private metadata from the page, the page
132  * becomes orphaned.  It will be left on the LRU and may even be mapped into
133  * user pagetables if we're racing with filemap_fault().
134  *
135  * We need to bail out if page->mapping is no longer equal to the original
136  * mapping.  This happens a) when the VM reclaimed the page while we waited on
137  * its lock, b) when a concurrent invalidate_mapping_pages got there first and
138  * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
139  */
140 static void truncate_cleanup_folio(struct folio *folio)
141 {
142 	if (folio_mapped(folio))
143 		unmap_mapping_folio(folio);
144 
145 	if (folio_needs_release(folio))
146 		folio_invalidate(folio, 0, folio_size(folio));
147 
148 	/*
149 	 * Some filesystems seem to re-dirty the page even after
150 	 * the VM has canceled the dirty bit (eg ext3 journaling).
151 	 * Hence dirty accounting check is placed after invalidation.
152 	 */
153 	folio_cancel_dirty(folio);
154 	folio_clear_mappedtodisk(folio);
155 }
156 
157 int truncate_inode_folio(struct address_space *mapping, struct folio *folio)
158 {
159 	if (folio->mapping != mapping)
160 		return -EIO;
161 
162 	truncate_cleanup_folio(folio);
163 	filemap_remove_folio(folio);
164 	return 0;
165 }
166 
167 /*
168  * Handle partial folios.  The folio may be entirely within the
169  * range if a split has raced with us.  If not, we zero the part of the
170  * folio that's within the [start, end] range, and then split the folio if
171  * it's large.  split_page_range() will discard pages which now lie beyond
172  * i_size, and we rely on the caller to discard pages which lie within a
173  * newly created hole.
174  *
175  * Returns false if splitting failed so the caller can avoid
176  * discarding the entire folio which is stubbornly unsplit.
177  */
178 bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
179 {
180 	loff_t pos = folio_pos(folio);
181 	unsigned int offset, length;
182 
183 	if (pos < start)
184 		offset = start - pos;
185 	else
186 		offset = 0;
187 	length = folio_size(folio);
188 	if (pos + length <= (u64)end)
189 		length = length - offset;
190 	else
191 		length = end + 1 - pos - offset;
192 
193 	folio_wait_writeback(folio);
194 	if (length == folio_size(folio)) {
195 		truncate_inode_folio(folio->mapping, folio);
196 		return true;
197 	}
198 
199 	/*
200 	 * We may be zeroing pages we're about to discard, but it avoids
201 	 * doing a complex calculation here, and then doing the zeroing
202 	 * anyway if the page split fails.
203 	 */
204 	if (!mapping_inaccessible(folio->mapping))
205 		folio_zero_range(folio, offset, length);
206 
207 	if (folio_needs_release(folio))
208 		folio_invalidate(folio, offset, length);
209 	if (!folio_test_large(folio))
210 		return true;
211 	if (split_folio(folio) == 0)
212 		return true;
213 	if (folio_test_dirty(folio))
214 		return false;
215 	truncate_inode_folio(folio->mapping, folio);
216 	return true;
217 }
218 
219 /*
220  * Used to get rid of pages on hardware memory corruption.
221  */
222 int generic_error_remove_folio(struct address_space *mapping,
223 		struct folio *folio)
224 {
225 	if (!mapping)
226 		return -EINVAL;
227 	/*
228 	 * Only punch for normal data pages for now.
229 	 * Handling other types like directories would need more auditing.
230 	 */
231 	if (!S_ISREG(mapping->host->i_mode))
232 		return -EIO;
233 	return truncate_inode_folio(mapping, folio);
234 }
235 EXPORT_SYMBOL(generic_error_remove_folio);
236 
237 /**
238  * mapping_evict_folio() - Remove an unused folio from the page-cache.
239  * @mapping: The mapping this folio belongs to.
240  * @folio: The folio to remove.
241  *
242  * Safely remove one folio from the page cache.
243  * It only drops clean, unused folios.
244  *
245  * Context: Folio must be locked.
246  * Return: The number of pages successfully removed.
247  */
248 long mapping_evict_folio(struct address_space *mapping, struct folio *folio)
249 {
250 	/* The page may have been truncated before it was locked */
251 	if (!mapping)
252 		return 0;
253 	if (folio_test_dirty(folio) || folio_test_writeback(folio))
254 		return 0;
255 	/* The refcount will be elevated if any page in the folio is mapped */
256 	if (folio_ref_count(folio) >
257 			folio_nr_pages(folio) + folio_has_private(folio) + 1)
258 		return 0;
259 	if (!filemap_release_folio(folio, 0))
260 		return 0;
261 
262 	return remove_mapping(mapping, folio);
263 }
264 
265 /**
266  * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
267  * @mapping: mapping to truncate
268  * @lstart: offset from which to truncate
269  * @lend: offset to which to truncate (inclusive)
270  *
271  * Truncate the page cache, removing the pages that are between
272  * specified offsets (and zeroing out partial pages
273  * if lstart or lend + 1 is not page aligned).
274  *
275  * Truncate takes two passes - the first pass is nonblocking.  It will not
276  * block on page locks and it will not block on writeback.  The second pass
277  * will wait.  This is to prevent as much IO as possible in the affected region.
278  * The first pass will remove most pages, so the search cost of the second pass
279  * is low.
280  *
281  * We pass down the cache-hot hint to the page freeing code.  Even if the
282  * mapping is large, it is probably the case that the final pages are the most
283  * recently touched, and freeing happens in ascending file offset order.
284  *
285  * Note that since ->invalidate_folio() accepts range to invalidate
286  * truncate_inode_pages_range is able to handle cases where lend + 1 is not
287  * page aligned properly.
288  */
289 void truncate_inode_pages_range(struct address_space *mapping,
290 				loff_t lstart, loff_t lend)
291 {
292 	pgoff_t		start;		/* inclusive */
293 	pgoff_t		end;		/* exclusive */
294 	struct folio_batch fbatch;
295 	pgoff_t		indices[PAGEVEC_SIZE];
296 	pgoff_t		index;
297 	int		i;
298 	struct folio	*folio;
299 	bool		same_folio;
300 
301 	if (mapping_empty(mapping))
302 		return;
303 
304 	/*
305 	 * 'start' and 'end' always covers the range of pages to be fully
306 	 * truncated. Partial pages are covered with 'partial_start' at the
307 	 * start of the range and 'partial_end' at the end of the range.
308 	 * Note that 'end' is exclusive while 'lend' is inclusive.
309 	 */
310 	start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
311 	if (lend == -1)
312 		/*
313 		 * lend == -1 indicates end-of-file so we have to set 'end'
314 		 * to the highest possible pgoff_t and since the type is
315 		 * unsigned we're using -1.
316 		 */
317 		end = -1;
318 	else
319 		end = (lend + 1) >> PAGE_SHIFT;
320 
321 	folio_batch_init(&fbatch);
322 	index = start;
323 	while (index < end && find_lock_entries(mapping, &index, end - 1,
324 			&fbatch, indices)) {
325 		truncate_folio_batch_exceptionals(mapping, &fbatch, indices);
326 		for (i = 0; i < folio_batch_count(&fbatch); i++)
327 			truncate_cleanup_folio(fbatch.folios[i]);
328 		delete_from_page_cache_batch(mapping, &fbatch);
329 		for (i = 0; i < folio_batch_count(&fbatch); i++)
330 			folio_unlock(fbatch.folios[i]);
331 		folio_batch_release(&fbatch);
332 		cond_resched();
333 	}
334 
335 	same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
336 	folio = __filemap_get_folio(mapping, lstart >> PAGE_SHIFT, FGP_LOCK, 0);
337 	if (!IS_ERR(folio)) {
338 		same_folio = lend < folio_pos(folio) + folio_size(folio);
339 		if (!truncate_inode_partial_folio(folio, lstart, lend)) {
340 			start = folio_next_index(folio);
341 			if (same_folio)
342 				end = folio->index;
343 		}
344 		folio_unlock(folio);
345 		folio_put(folio);
346 		folio = NULL;
347 	}
348 
349 	if (!same_folio) {
350 		folio = __filemap_get_folio(mapping, lend >> PAGE_SHIFT,
351 						FGP_LOCK, 0);
352 		if (!IS_ERR(folio)) {
353 			if (!truncate_inode_partial_folio(folio, lstart, lend))
354 				end = folio->index;
355 			folio_unlock(folio);
356 			folio_put(folio);
357 		}
358 	}
359 
360 	index = start;
361 	while (index < end) {
362 		cond_resched();
363 		if (!find_get_entries(mapping, &index, end - 1, &fbatch,
364 				indices)) {
365 			/* If all gone from start onwards, we're done */
366 			if (index == start)
367 				break;
368 			/* Otherwise restart to make sure all gone */
369 			index = start;
370 			continue;
371 		}
372 
373 		for (i = 0; i < folio_batch_count(&fbatch); i++) {
374 			struct folio *folio = fbatch.folios[i];
375 
376 			/* We rely upon deletion not changing page->index */
377 
378 			if (xa_is_value(folio))
379 				continue;
380 
381 			folio_lock(folio);
382 			VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio);
383 			folio_wait_writeback(folio);
384 			truncate_inode_folio(mapping, folio);
385 			folio_unlock(folio);
386 		}
387 		truncate_folio_batch_exceptionals(mapping, &fbatch, indices);
388 		folio_batch_release(&fbatch);
389 	}
390 }
391 EXPORT_SYMBOL(truncate_inode_pages_range);
392 
393 /**
394  * truncate_inode_pages - truncate *all* the pages from an offset
395  * @mapping: mapping to truncate
396  * @lstart: offset from which to truncate
397  *
398  * Called under (and serialised by) inode->i_rwsem and
399  * mapping->invalidate_lock.
400  *
401  * Note: When this function returns, there can be a page in the process of
402  * deletion (inside __filemap_remove_folio()) in the specified range.  Thus
403  * mapping->nrpages can be non-zero when this function returns even after
404  * truncation of the whole mapping.
405  */
406 void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
407 {
408 	truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
409 }
410 EXPORT_SYMBOL(truncate_inode_pages);
411 
412 /**
413  * truncate_inode_pages_final - truncate *all* pages before inode dies
414  * @mapping: mapping to truncate
415  *
416  * Called under (and serialized by) inode->i_rwsem.
417  *
418  * Filesystems have to use this in the .evict_inode path to inform the
419  * VM that this is the final truncate and the inode is going away.
420  */
421 void truncate_inode_pages_final(struct address_space *mapping)
422 {
423 	/*
424 	 * Page reclaim can not participate in regular inode lifetime
425 	 * management (can't call iput()) and thus can race with the
426 	 * inode teardown.  Tell it when the address space is exiting,
427 	 * so that it does not install eviction information after the
428 	 * final truncate has begun.
429 	 */
430 	mapping_set_exiting(mapping);
431 
432 	if (!mapping_empty(mapping)) {
433 		/*
434 		 * As truncation uses a lockless tree lookup, cycle
435 		 * the tree lock to make sure any ongoing tree
436 		 * modification that does not see AS_EXITING is
437 		 * completed before starting the final truncate.
438 		 */
439 		xa_lock_irq(&mapping->i_pages);
440 		xa_unlock_irq(&mapping->i_pages);
441 	}
442 
443 	truncate_inode_pages(mapping, 0);
444 }
445 EXPORT_SYMBOL(truncate_inode_pages_final);
446 
447 /**
448  * mapping_try_invalidate - Invalidate all the evictable folios of one inode
449  * @mapping: the address_space which holds the folios to invalidate
450  * @start: the offset 'from' which to invalidate
451  * @end: the offset 'to' which to invalidate (inclusive)
452  * @nr_failed: How many folio invalidations failed
453  *
454  * This function is similar to invalidate_mapping_pages(), except that it
455  * returns the number of folios which could not be evicted in @nr_failed.
456  */
457 unsigned long mapping_try_invalidate(struct address_space *mapping,
458 		pgoff_t start, pgoff_t end, unsigned long *nr_failed)
459 {
460 	pgoff_t indices[PAGEVEC_SIZE];
461 	struct folio_batch fbatch;
462 	pgoff_t index = start;
463 	unsigned long ret;
464 	unsigned long count = 0;
465 	int i;
466 
467 	folio_batch_init(&fbatch);
468 	while (find_lock_entries(mapping, &index, end, &fbatch, indices)) {
469 		bool xa_has_values = false;
470 		int nr = folio_batch_count(&fbatch);
471 
472 		for (i = 0; i < nr; i++) {
473 			struct folio *folio = fbatch.folios[i];
474 
475 			/* We rely upon deletion not changing folio->index */
476 
477 			if (xa_is_value(folio)) {
478 				xa_has_values = true;
479 				count++;
480 				continue;
481 			}
482 
483 			ret = mapping_evict_folio(mapping, folio);
484 			folio_unlock(folio);
485 			/*
486 			 * Invalidation is a hint that the folio is no longer
487 			 * of interest and try to speed up its reclaim.
488 			 */
489 			if (!ret) {
490 				deactivate_file_folio(folio);
491 				/* Likely in the lru cache of a remote CPU */
492 				if (nr_failed)
493 					(*nr_failed)++;
494 			}
495 			count += ret;
496 		}
497 
498 		if (xa_has_values)
499 			clear_shadow_entries(mapping, indices[0], indices[nr-1]);
500 
501 		folio_batch_remove_exceptionals(&fbatch);
502 		folio_batch_release(&fbatch);
503 		cond_resched();
504 	}
505 	return count;
506 }
507 
508 /**
509  * invalidate_mapping_pages - Invalidate all clean, unlocked cache of one inode
510  * @mapping: the address_space which holds the cache to invalidate
511  * @start: the offset 'from' which to invalidate
512  * @end: the offset 'to' which to invalidate (inclusive)
513  *
514  * This function removes pages that are clean, unmapped and unlocked,
515  * as well as shadow entries. It will not block on IO activity.
516  *
517  * If you want to remove all the pages of one inode, regardless of
518  * their use and writeback state, use truncate_inode_pages().
519  *
520  * Return: The number of indices that had their contents invalidated
521  */
522 unsigned long invalidate_mapping_pages(struct address_space *mapping,
523 		pgoff_t start, pgoff_t end)
524 {
525 	return mapping_try_invalidate(mapping, start, end, NULL);
526 }
527 EXPORT_SYMBOL(invalidate_mapping_pages);
528 
529 /*
530  * This is like mapping_evict_folio(), except it ignores the folio's
531  * refcount.  We do this because invalidate_inode_pages2() needs stronger
532  * invalidation guarantees, and cannot afford to leave folios behind because
533  * shrink_folio_list() has a temp ref on them, or because they're transiently
534  * sitting in the folio_add_lru() caches.
535  */
536 static int invalidate_complete_folio2(struct address_space *mapping,
537 					struct folio *folio)
538 {
539 	if (folio->mapping != mapping)
540 		return 0;
541 
542 	if (!filemap_release_folio(folio, GFP_KERNEL))
543 		return 0;
544 
545 	spin_lock(&mapping->host->i_lock);
546 	xa_lock_irq(&mapping->i_pages);
547 	if (folio_test_dirty(folio))
548 		goto failed;
549 
550 	BUG_ON(folio_has_private(folio));
551 	__filemap_remove_folio(folio, NULL);
552 	xa_unlock_irq(&mapping->i_pages);
553 	if (mapping_shrinkable(mapping))
554 		inode_add_lru(mapping->host);
555 	spin_unlock(&mapping->host->i_lock);
556 
557 	filemap_free_folio(mapping, folio);
558 	return 1;
559 failed:
560 	xa_unlock_irq(&mapping->i_pages);
561 	spin_unlock(&mapping->host->i_lock);
562 	return 0;
563 }
564 
565 static int folio_launder(struct address_space *mapping, struct folio *folio)
566 {
567 	if (!folio_test_dirty(folio))
568 		return 0;
569 	if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL)
570 		return 0;
571 	return mapping->a_ops->launder_folio(folio);
572 }
573 
574 /**
575  * invalidate_inode_pages2_range - remove range of pages from an address_space
576  * @mapping: the address_space
577  * @start: the page offset 'from' which to invalidate
578  * @end: the page offset 'to' which to invalidate (inclusive)
579  *
580  * Any pages which are found to be mapped into pagetables are unmapped prior to
581  * invalidation.
582  *
583  * Return: -EBUSY if any pages could not be invalidated.
584  */
585 int invalidate_inode_pages2_range(struct address_space *mapping,
586 				  pgoff_t start, pgoff_t end)
587 {
588 	pgoff_t indices[PAGEVEC_SIZE];
589 	struct folio_batch fbatch;
590 	pgoff_t index;
591 	int i;
592 	int ret = 0;
593 	int ret2 = 0;
594 	int did_range_unmap = 0;
595 
596 	if (mapping_empty(mapping))
597 		return 0;
598 
599 	folio_batch_init(&fbatch);
600 	index = start;
601 	while (find_get_entries(mapping, &index, end, &fbatch, indices)) {
602 		bool xa_has_values = false;
603 		int nr = folio_batch_count(&fbatch);
604 
605 		for (i = 0; i < nr; i++) {
606 			struct folio *folio = fbatch.folios[i];
607 
608 			/* We rely upon deletion not changing folio->index */
609 
610 			if (xa_is_value(folio)) {
611 				xa_has_values = true;
612 				if (dax_mapping(mapping) &&
613 				    !dax_invalidate_mapping_entry_sync(mapping, indices[i]))
614 					ret = -EBUSY;
615 				continue;
616 			}
617 
618 			if (!did_range_unmap && folio_mapped(folio)) {
619 				/*
620 				 * If folio is mapped, before taking its lock,
621 				 * zap the rest of the file in one hit.
622 				 */
623 				unmap_mapping_pages(mapping, indices[i],
624 						(1 + end - indices[i]), false);
625 				did_range_unmap = 1;
626 			}
627 
628 			folio_lock(folio);
629 			if (unlikely(folio->mapping != mapping)) {
630 				folio_unlock(folio);
631 				continue;
632 			}
633 			VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio);
634 			folio_wait_writeback(folio);
635 
636 			if (folio_mapped(folio))
637 				unmap_mapping_folio(folio);
638 			BUG_ON(folio_mapped(folio));
639 
640 			ret2 = folio_launder(mapping, folio);
641 			if (ret2 == 0) {
642 				if (!invalidate_complete_folio2(mapping, folio))
643 					ret2 = -EBUSY;
644 			}
645 			if (ret2 < 0)
646 				ret = ret2;
647 			folio_unlock(folio);
648 		}
649 
650 		if (xa_has_values)
651 			clear_shadow_entries(mapping, indices[0], indices[nr-1]);
652 
653 		folio_batch_remove_exceptionals(&fbatch);
654 		folio_batch_release(&fbatch);
655 		cond_resched();
656 	}
657 	/*
658 	 * For DAX we invalidate page tables after invalidating page cache.  We
659 	 * could invalidate page tables while invalidating each entry however
660 	 * that would be expensive. And doing range unmapping before doesn't
661 	 * work as we have no cheap way to find whether page cache entry didn't
662 	 * get remapped later.
663 	 */
664 	if (dax_mapping(mapping)) {
665 		unmap_mapping_pages(mapping, start, end - start + 1, false);
666 	}
667 	return ret;
668 }
669 EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
670 
671 /**
672  * invalidate_inode_pages2 - remove all pages from an address_space
673  * @mapping: the address_space
674  *
675  * Any pages which are found to be mapped into pagetables are unmapped prior to
676  * invalidation.
677  *
678  * Return: -EBUSY if any pages could not be invalidated.
679  */
680 int invalidate_inode_pages2(struct address_space *mapping)
681 {
682 	return invalidate_inode_pages2_range(mapping, 0, -1);
683 }
684 EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
685 
686 /**
687  * truncate_pagecache - unmap and remove pagecache that has been truncated
688  * @inode: inode
689  * @newsize: new file size
690  *
691  * inode's new i_size must already be written before truncate_pagecache
692  * is called.
693  *
694  * This function should typically be called before the filesystem
695  * releases resources associated with the freed range (eg. deallocates
696  * blocks). This way, pagecache will always stay logically coherent
697  * with on-disk format, and the filesystem would not have to deal with
698  * situations such as writepage being called for a page that has already
699  * had its underlying blocks deallocated.
700  */
701 void truncate_pagecache(struct inode *inode, loff_t newsize)
702 {
703 	struct address_space *mapping = inode->i_mapping;
704 	loff_t holebegin = round_up(newsize, PAGE_SIZE);
705 
706 	/*
707 	 * unmap_mapping_range is called twice, first simply for
708 	 * efficiency so that truncate_inode_pages does fewer
709 	 * single-page unmaps.  However after this first call, and
710 	 * before truncate_inode_pages finishes, it is possible for
711 	 * private pages to be COWed, which remain after
712 	 * truncate_inode_pages finishes, hence the second
713 	 * unmap_mapping_range call must be made for correctness.
714 	 */
715 	unmap_mapping_range(mapping, holebegin, 0, 1);
716 	truncate_inode_pages(mapping, newsize);
717 	unmap_mapping_range(mapping, holebegin, 0, 1);
718 }
719 EXPORT_SYMBOL(truncate_pagecache);
720 
721 /**
722  * truncate_setsize - update inode and pagecache for a new file size
723  * @inode: inode
724  * @newsize: new file size
725  *
726  * truncate_setsize updates i_size and performs pagecache truncation (if
727  * necessary) to @newsize. It will be typically be called from the filesystem's
728  * setattr function when ATTR_SIZE is passed in.
729  *
730  * Must be called with a lock serializing truncates and writes (generally
731  * i_rwsem but e.g. xfs uses a different lock) and before all filesystem
732  * specific block truncation has been performed.
733  */
734 void truncate_setsize(struct inode *inode, loff_t newsize)
735 {
736 	loff_t oldsize = inode->i_size;
737 
738 	i_size_write(inode, newsize);
739 	if (newsize > oldsize)
740 		pagecache_isize_extended(inode, oldsize, newsize);
741 	truncate_pagecache(inode, newsize);
742 }
743 EXPORT_SYMBOL(truncate_setsize);
744 
745 /**
746  * pagecache_isize_extended - update pagecache after extension of i_size
747  * @inode:	inode for which i_size was extended
748  * @from:	original inode size
749  * @to:		new inode size
750  *
751  * Handle extension of inode size either caused by extending truncate or
752  * by write starting after current i_size.  We mark the page straddling
753  * current i_size RO so that page_mkwrite() is called on the first
754  * write access to the page.  The filesystem will update its per-block
755  * information before user writes to the page via mmap after the i_size
756  * has been changed.
757  *
758  * The function must be called after i_size is updated so that page fault
759  * coming after we unlock the folio will already see the new i_size.
760  * The function must be called while we still hold i_rwsem - this not only
761  * makes sure i_size is stable but also that userspace cannot observe new
762  * i_size value before we are prepared to store mmap writes at new inode size.
763  */
764 void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
765 {
766 	int bsize = i_blocksize(inode);
767 	loff_t rounded_from;
768 	struct folio *folio;
769 
770 	WARN_ON(to > inode->i_size);
771 
772 	if (from >= to || bsize >= PAGE_SIZE)
773 		return;
774 	/* Page straddling @from will not have any hole block created? */
775 	rounded_from = round_up(from, bsize);
776 	if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1)))
777 		return;
778 
779 	folio = filemap_lock_folio(inode->i_mapping, from / PAGE_SIZE);
780 	/* Folio not cached? Nothing to do */
781 	if (IS_ERR(folio))
782 		return;
783 	/*
784 	 * See folio_clear_dirty_for_io() for details why folio_mark_dirty()
785 	 * is needed.
786 	 */
787 	if (folio_mkclean(folio))
788 		folio_mark_dirty(folio);
789 	folio_unlock(folio);
790 	folio_put(folio);
791 }
792 EXPORT_SYMBOL(pagecache_isize_extended);
793 
794 /**
795  * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
796  * @inode: inode
797  * @lstart: offset of beginning of hole
798  * @lend: offset of last byte of hole
799  *
800  * This function should typically be called before the filesystem
801  * releases resources associated with the freed range (eg. deallocates
802  * blocks). This way, pagecache will always stay logically coherent
803  * with on-disk format, and the filesystem would not have to deal with
804  * situations such as writepage being called for a page that has already
805  * had its underlying blocks deallocated.
806  */
807 void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
808 {
809 	struct address_space *mapping = inode->i_mapping;
810 	loff_t unmap_start = round_up(lstart, PAGE_SIZE);
811 	loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
812 	/*
813 	 * This rounding is currently just for example: unmap_mapping_range
814 	 * expands its hole outwards, whereas we want it to contract the hole
815 	 * inwards.  However, existing callers of truncate_pagecache_range are
816 	 * doing their own page rounding first.  Note that unmap_mapping_range
817 	 * allows holelen 0 for all, and we allow lend -1 for end of file.
818 	 */
819 
820 	/*
821 	 * Unlike in truncate_pagecache, unmap_mapping_range is called only
822 	 * once (before truncating pagecache), and without "even_cows" flag:
823 	 * hole-punching should not remove private COWed pages from the hole.
824 	 */
825 	if ((u64)unmap_end > (u64)unmap_start)
826 		unmap_mapping_range(mapping, unmap_start,
827 				    1 + unmap_end - unmap_start, 0);
828 	truncate_inode_pages_range(mapping, lstart, lend);
829 }
830 EXPORT_SYMBOL(truncate_pagecache_range);
831