xref: /linux/mm/truncate.c (revision 5ea5880764cbb164afb17a62e76ca75dc371409d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * mm/truncate.c - code for taking down pages from address_spaces
4  *
5  * Copyright (C) 2002, Linus Torvalds
6  *
7  * 10Sep2002	Andrew Morton
8  *		Initial version.
9  */
10 
11 #include <linux/kernel.h>
12 #include <linux/backing-dev.h>
13 #include <linux/dax.h>
14 #include <linux/gfp.h>
15 #include <linux/mm.h>
16 #include <linux/swap.h>
17 #include <linux/export.h>
18 #include <linux/pagemap.h>
19 #include <linux/highmem.h>
20 #include <linux/folio_batch.h>
21 #include <linux/task_io_accounting_ops.h>
22 #include <linux/shmem_fs.h>
23 #include <linux/rmap.h>
24 #include "internal.h"
25 
26 static void clear_shadow_entries(struct address_space *mapping,
27 				 unsigned long start, unsigned long max)
28 {
29 	XA_STATE(xas, &mapping->i_pages, start);
30 	struct folio *folio;
31 
32 	/* Handled by shmem itself, or for DAX we do nothing. */
33 	if (shmem_mapping(mapping) || dax_mapping(mapping))
34 		return;
35 
36 	xas_set_update(&xas, workingset_update_node);
37 
38 	spin_lock(&mapping->host->i_lock);
39 	xas_lock_irq(&xas);
40 
41 	/* Clear all shadow entries from start to max */
42 	xas_for_each(&xas, folio, max) {
43 		if (xa_is_value(folio))
44 			xas_store(&xas, NULL);
45 	}
46 
47 	xas_unlock_irq(&xas);
48 	if (mapping_shrinkable(mapping))
49 		inode_lru_list_add(mapping->host);
50 	spin_unlock(&mapping->host->i_lock);
51 }
52 
53 /*
54  * Unconditionally remove exceptional entries. Usually called from truncate
55  * path. Note that the folio_batch may be altered by this function by removing
56  * exceptional entries similar to what folio_batch_remove_exceptionals() does.
57  * Please note that indices[] has entries in ascending order as guaranteed by
58  * either find_get_entries() or find_lock_entries().
59  */
60 static void truncate_folio_batch_exceptionals(struct address_space *mapping,
61 				struct folio_batch *fbatch, pgoff_t *indices)
62 {
63 	XA_STATE(xas, &mapping->i_pages, indices[0]);
64 	int nr = folio_batch_count(fbatch);
65 	struct folio *folio;
66 	int i, j;
67 
68 	/* Handled by shmem itself */
69 	if (shmem_mapping(mapping))
70 		return;
71 
72 	for (j = 0; j < nr; j++)
73 		if (xa_is_value(fbatch->folios[j]))
74 			break;
75 
76 	if (j == nr)
77 		return;
78 
79 	if (dax_mapping(mapping)) {
80 		for (i = j; i < nr; i++) {
81 			if (xa_is_value(fbatch->folios[i])) {
82 				/*
83 				 * File systems should already have called
84 				 * dax_break_layout_entry() to remove all DAX
85 				 * entries while holding a lock to prevent
86 				 * establishing new entries. Therefore we
87 				 * shouldn't find any here.
88 				 */
89 				WARN_ON_ONCE(1);
90 
91 				/*
92 				 * Delete the mapping so truncate_pagecache()
93 				 * doesn't loop forever.
94 				 */
95 				dax_delete_mapping_entry(mapping, indices[i]);
96 			}
97 		}
98 		goto out;
99 	}
100 
101 	xas_set(&xas, indices[j]);
102 	xas_set_update(&xas, workingset_update_node);
103 
104 	spin_lock(&mapping->host->i_lock);
105 	xas_lock_irq(&xas);
106 
107 	xas_for_each(&xas, folio, indices[nr-1]) {
108 		if (xa_is_value(folio))
109 			xas_store(&xas, NULL);
110 	}
111 
112 	xas_unlock_irq(&xas);
113 	if (mapping_shrinkable(mapping))
114 		inode_lru_list_add(mapping->host);
115 	spin_unlock(&mapping->host->i_lock);
116 out:
117 	folio_batch_remove_exceptionals(fbatch);
118 }
119 
120 /**
121  * folio_invalidate - Invalidate part or all of a folio.
122  * @folio: The folio which is affected.
123  * @offset: start of the range to invalidate
124  * @length: length of the range to invalidate
125  *
126  * folio_invalidate() is called when all or part of the folio has become
127  * invalidated by a truncate operation.
128  *
129  * folio_invalidate() does not have to release all buffers, but it must
130  * ensure that no dirty buffer is left outside @offset and that no I/O
131  * is underway against any of the blocks which are outside the truncation
132  * point.  Because the caller is about to free (and possibly reuse) those
133  * blocks on-disk.
134  */
135 void folio_invalidate(struct folio *folio, size_t offset, size_t length)
136 {
137 	const struct address_space_operations *aops = folio->mapping->a_ops;
138 
139 	if (aops->invalidate_folio)
140 		aops->invalidate_folio(folio, offset, length);
141 }
142 EXPORT_SYMBOL_GPL(folio_invalidate);
143 
144 /*
145  * If truncate cannot remove the fs-private metadata from the page, the page
146  * becomes orphaned.  It will be left on the LRU and may even be mapped into
147  * user pagetables if we're racing with filemap_fault().
148  *
149  * We need to bail out if page->mapping is no longer equal to the original
150  * mapping.  This happens a) when the VM reclaimed the page while we waited on
151  * its lock, b) when a concurrent invalidate_mapping_pages got there first and
152  * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
153  */
154 static void truncate_cleanup_folio(struct folio *folio)
155 {
156 	if (folio_mapped(folio))
157 		unmap_mapping_folio(folio);
158 
159 	if (folio_needs_release(folio))
160 		folio_invalidate(folio, 0, folio_size(folio));
161 
162 	/*
163 	 * Some filesystems seem to re-dirty the page even after
164 	 * the VM has canceled the dirty bit (eg ext3 journaling).
165 	 * Hence dirty accounting check is placed after invalidation.
166 	 */
167 	folio_cancel_dirty(folio);
168 }
169 
170 int truncate_inode_folio(struct address_space *mapping, struct folio *folio)
171 {
172 	if (folio->mapping != mapping)
173 		return -EIO;
174 
175 	truncate_cleanup_folio(folio);
176 	filemap_remove_folio(folio);
177 	return 0;
178 }
179 
180 static int try_folio_split_or_unmap(struct folio *folio, struct page *split_at,
181 				    unsigned long min_order)
182 {
183 	enum ttu_flags ttu_flags =
184 		TTU_SYNC |
185 		TTU_SPLIT_HUGE_PMD |
186 		TTU_IGNORE_MLOCK;
187 	int ret;
188 
189 	ret = try_folio_split_to_order(folio, split_at, min_order);
190 
191 	/*
192 	 * If the split fails, unmap the folio, so it will be refaulted
193 	 * with PTEs to respect SIGBUS semantics.
194 	 *
195 	 * Make an exception for shmem/tmpfs that for long time
196 	 * intentionally mapped with PMDs across i_size.
197 	 */
198 	if (ret && !shmem_mapping(folio->mapping)) {
199 		try_to_unmap(folio, ttu_flags);
200 		WARN_ON(folio_mapped(folio));
201 	}
202 
203 	return ret;
204 }
205 
206 /*
207  * Handle partial folios.  The folio may be entirely within the
208  * range if a split has raced with us.  If not, we zero the part of the
209  * folio that's within the [start, end] range, and then split the folio if
210  * it's large.  split_page_range() will discard pages which now lie beyond
211  * i_size, and we rely on the caller to discard pages which lie within a
212  * newly created hole.
213  *
214  * Returns false if splitting failed so the caller can avoid
215  * discarding the entire folio which is stubbornly unsplit.
216  */
217 bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
218 {
219 	loff_t pos = folio_pos(folio);
220 	size_t size = folio_size(folio);
221 	unsigned int offset, length;
222 	struct page *split_at, *split_at2;
223 	unsigned int min_order;
224 
225 	if (pos < start)
226 		offset = start - pos;
227 	else
228 		offset = 0;
229 	if (pos + size <= (u64)end)
230 		length = size - offset;
231 	else
232 		length = end + 1 - pos - offset;
233 
234 	folio_wait_writeback(folio);
235 	if (length == size) {
236 		truncate_inode_folio(folio->mapping, folio);
237 		return true;
238 	}
239 
240 	/*
241 	 * We may be zeroing pages we're about to discard, but it avoids
242 	 * doing a complex calculation here, and then doing the zeroing
243 	 * anyway if the page split fails.
244 	 */
245 	if (!mapping_inaccessible(folio->mapping))
246 		folio_zero_range(folio, offset, length);
247 
248 	if (folio_needs_release(folio))
249 		folio_invalidate(folio, offset, length);
250 	if (!folio_test_large(folio))
251 		return true;
252 
253 	min_order = mapping_min_folio_order(folio->mapping);
254 	split_at = folio_page(folio, PAGE_ALIGN_DOWN(offset) / PAGE_SIZE);
255 	if (!try_folio_split_or_unmap(folio, split_at, min_order)) {
256 		/*
257 		 * try to split at offset + length to make sure folios within
258 		 * the range can be dropped, especially to avoid memory waste
259 		 * for shmem truncate
260 		 */
261 		struct folio *folio2;
262 
263 		if (offset + length == size)
264 			goto no_split;
265 
266 		split_at2 = folio_page(folio,
267 				PAGE_ALIGN_DOWN(offset + length) / PAGE_SIZE);
268 		folio2 = page_folio(split_at2);
269 
270 		if (!folio_try_get(folio2))
271 			goto no_split;
272 
273 		if (!folio_test_large(folio2))
274 			goto out;
275 
276 		if (!folio_trylock(folio2))
277 			goto out;
278 
279 		/* make sure folio2 is large and does not change its mapping */
280 		if (folio_test_large(folio2) &&
281 		    folio2->mapping == folio->mapping)
282 			try_folio_split_or_unmap(folio2, split_at2, min_order);
283 
284 		folio_unlock(folio2);
285 out:
286 		folio_put(folio2);
287 no_split:
288 		return true;
289 	}
290 	if (folio_test_dirty(folio))
291 		return false;
292 	truncate_inode_folio(folio->mapping, folio);
293 	return true;
294 }
295 
296 /*
297  * Used to get rid of pages on hardware memory corruption.
298  */
299 int generic_error_remove_folio(struct address_space *mapping,
300 		struct folio *folio)
301 {
302 	if (!mapping)
303 		return -EINVAL;
304 	/*
305 	 * Only punch for normal data pages for now.
306 	 * Handling other types like directories would need more auditing.
307 	 */
308 	if (!S_ISREG(mapping->host->i_mode))
309 		return -EIO;
310 	return truncate_inode_folio(mapping, folio);
311 }
312 EXPORT_SYMBOL(generic_error_remove_folio);
313 
314 /**
315  * mapping_evict_folio() - Remove an unused folio from the page-cache.
316  * @mapping: The mapping this folio belongs to.
317  * @folio: The folio to remove.
318  *
319  * Safely remove one folio from the page cache.
320  * It only drops clean, unused folios.
321  *
322  * Context: Folio must be locked.
323  * Return: The number of pages successfully removed.
324  */
325 long mapping_evict_folio(struct address_space *mapping, struct folio *folio)
326 {
327 	/* The page may have been truncated before it was locked */
328 	if (!mapping)
329 		return 0;
330 	if (folio_test_dirty(folio) || folio_test_writeback(folio))
331 		return 0;
332 	/* The refcount will be elevated if any page in the folio is mapped */
333 	if (folio_ref_count(folio) >
334 			folio_nr_pages(folio) + folio_has_private(folio) + 1)
335 		return 0;
336 	if (!filemap_release_folio(folio, 0))
337 		return 0;
338 
339 	return remove_mapping(mapping, folio);
340 }
341 
342 /**
343  * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
344  * @mapping: mapping to truncate
345  * @lstart: offset from which to truncate
346  * @lend: offset to which to truncate (inclusive)
347  *
348  * Truncate the page cache, removing the pages that are between
349  * specified offsets (and zeroing out partial pages
350  * if lstart or lend + 1 is not page aligned).
351  *
352  * Truncate takes two passes - the first pass is nonblocking.  It will not
353  * block on page locks and it will not block on writeback.  The second pass
354  * will wait.  This is to prevent as much IO as possible in the affected region.
355  * The first pass will remove most pages, so the search cost of the second pass
356  * is low.
357  *
358  * We pass down the cache-hot hint to the page freeing code.  Even if the
359  * mapping is large, it is probably the case that the final pages are the most
360  * recently touched, and freeing happens in ascending file offset order.
361  *
362  * Note that since ->invalidate_folio() accepts range to invalidate
363  * truncate_inode_pages_range is able to handle cases where lend + 1 is not
364  * page aligned properly.
365  */
366 void truncate_inode_pages_range(struct address_space *mapping,
367 				loff_t lstart, uoff_t lend)
368 {
369 	pgoff_t		start;		/* inclusive */
370 	pgoff_t		end;		/* exclusive */
371 	struct folio_batch fbatch;
372 	pgoff_t		indices[FOLIO_BATCH_SIZE];
373 	pgoff_t		index;
374 	int		i;
375 	struct folio	*folio;
376 	bool		same_folio;
377 
378 	if (mapping_empty(mapping))
379 		return;
380 
381 	/*
382 	 * 'start' and 'end' always covers the range of pages to be fully
383 	 * truncated. Partial pages are covered with 'partial_start' at the
384 	 * start of the range and 'partial_end' at the end of the range.
385 	 * Note that 'end' is exclusive while 'lend' is inclusive.
386 	 */
387 	start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
388 	if (lend == -1)
389 		/*
390 		 * lend == -1 indicates end-of-file so we have to set 'end'
391 		 * to the highest possible pgoff_t and since the type is
392 		 * unsigned we're using -1.
393 		 */
394 		end = -1;
395 	else
396 		end = (lend + 1) >> PAGE_SHIFT;
397 
398 	folio_batch_init(&fbatch);
399 	index = start;
400 	while (index < end && find_lock_entries(mapping, &index, end - 1,
401 			&fbatch, indices)) {
402 		truncate_folio_batch_exceptionals(mapping, &fbatch, indices);
403 		for (i = 0; i < folio_batch_count(&fbatch); i++)
404 			truncate_cleanup_folio(fbatch.folios[i]);
405 		delete_from_page_cache_batch(mapping, &fbatch);
406 		for (i = 0; i < folio_batch_count(&fbatch); i++)
407 			folio_unlock(fbatch.folios[i]);
408 		folio_batch_release(&fbatch);
409 		cond_resched();
410 	}
411 
412 	same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
413 	folio = __filemap_get_folio(mapping, lstart >> PAGE_SHIFT, FGP_LOCK, 0);
414 	if (!IS_ERR(folio)) {
415 		same_folio = lend < folio_next_pos(folio);
416 		if (!truncate_inode_partial_folio(folio, lstart, lend)) {
417 			start = folio_next_index(folio);
418 			if (same_folio)
419 				end = folio->index;
420 		}
421 		folio_unlock(folio);
422 		folio_put(folio);
423 		folio = NULL;
424 	}
425 
426 	if (!same_folio) {
427 		folio = __filemap_get_folio(mapping, lend >> PAGE_SHIFT,
428 						FGP_LOCK, 0);
429 		if (!IS_ERR(folio)) {
430 			if (!truncate_inode_partial_folio(folio, lstart, lend))
431 				end = folio->index;
432 			folio_unlock(folio);
433 			folio_put(folio);
434 		}
435 	}
436 
437 	index = start;
438 	while (index < end) {
439 		cond_resched();
440 		if (!find_get_entries(mapping, &index, end - 1, &fbatch,
441 				indices)) {
442 			/* If all gone from start onwards, we're done */
443 			if (index == start)
444 				break;
445 			/* Otherwise restart to make sure all gone */
446 			index = start;
447 			continue;
448 		}
449 
450 		for (i = 0; i < folio_batch_count(&fbatch); i++) {
451 			struct folio *folio = fbatch.folios[i];
452 
453 			/* We rely upon deletion not changing folio->index */
454 
455 			if (xa_is_value(folio))
456 				continue;
457 
458 			folio_lock(folio);
459 			VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio);
460 			folio_wait_writeback(folio);
461 			truncate_inode_folio(mapping, folio);
462 			folio_unlock(folio);
463 		}
464 		truncate_folio_batch_exceptionals(mapping, &fbatch, indices);
465 		folio_batch_release(&fbatch);
466 	}
467 }
468 EXPORT_SYMBOL(truncate_inode_pages_range);
469 
470 /**
471  * truncate_inode_pages - truncate *all* the pages from an offset
472  * @mapping: mapping to truncate
473  * @lstart: offset from which to truncate
474  *
475  * Called under (and serialised by) inode->i_rwsem and
476  * mapping->invalidate_lock.
477  *
478  * Note: When this function returns, there can be a page in the process of
479  * deletion (inside __filemap_remove_folio()) in the specified range.  Thus
480  * mapping->nrpages can be non-zero when this function returns even after
481  * truncation of the whole mapping.
482  */
483 void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
484 {
485 	truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
486 }
487 EXPORT_SYMBOL(truncate_inode_pages);
488 
489 /**
490  * truncate_inode_pages_final - truncate *all* pages before inode dies
491  * @mapping: mapping to truncate
492  *
493  * Called under (and serialized by) inode->i_rwsem.
494  *
495  * Filesystems have to use this in the .evict_inode path to inform the
496  * VM that this is the final truncate and the inode is going away.
497  */
498 void truncate_inode_pages_final(struct address_space *mapping)
499 {
500 	/*
501 	 * Page reclaim can not participate in regular inode lifetime
502 	 * management (can't call iput()) and thus can race with the
503 	 * inode teardown.  Tell it when the address space is exiting,
504 	 * so that it does not install eviction information after the
505 	 * final truncate has begun.
506 	 */
507 	mapping_set_exiting(mapping);
508 
509 	if (!mapping_empty(mapping)) {
510 		/*
511 		 * As truncation uses a lockless tree lookup, cycle
512 		 * the tree lock to make sure any ongoing tree
513 		 * modification that does not see AS_EXITING is
514 		 * completed before starting the final truncate.
515 		 */
516 		xa_lock_irq(&mapping->i_pages);
517 		xa_unlock_irq(&mapping->i_pages);
518 	}
519 
520 	truncate_inode_pages(mapping, 0);
521 }
522 EXPORT_SYMBOL(truncate_inode_pages_final);
523 
524 /**
525  * mapping_try_invalidate - Invalidate all the evictable folios of one inode
526  * @mapping: the address_space which holds the folios to invalidate
527  * @start: the offset 'from' which to invalidate
528  * @end: the offset 'to' which to invalidate (inclusive)
529  * @nr_failed: How many folio invalidations failed
530  *
531  * This function is similar to invalidate_mapping_pages(), except that it
532  * returns the number of folios which could not be evicted in @nr_failed.
533  */
534 unsigned long mapping_try_invalidate(struct address_space *mapping,
535 		pgoff_t start, pgoff_t end, unsigned long *nr_failed)
536 {
537 	pgoff_t indices[FOLIO_BATCH_SIZE];
538 	struct folio_batch fbatch;
539 	pgoff_t index = start;
540 	unsigned long ret;
541 	unsigned long count = 0;
542 	int i;
543 
544 	folio_batch_init(&fbatch);
545 	while (find_lock_entries(mapping, &index, end, &fbatch, indices)) {
546 		bool xa_has_values = false;
547 		int nr = folio_batch_count(&fbatch);
548 
549 		for (i = 0; i < nr; i++) {
550 			struct folio *folio = fbatch.folios[i];
551 
552 			/* We rely upon deletion not changing folio->index */
553 
554 			if (xa_is_value(folio)) {
555 				xa_has_values = true;
556 				count++;
557 				continue;
558 			}
559 
560 			ret = mapping_evict_folio(mapping, folio);
561 			folio_unlock(folio);
562 			/*
563 			 * Invalidation is a hint that the folio is no longer
564 			 * of interest and try to speed up its reclaim.
565 			 */
566 			if (!ret) {
567 				deactivate_file_folio(folio);
568 				/* Likely in the lru cache of a remote CPU */
569 				if (nr_failed)
570 					(*nr_failed)++;
571 			}
572 			count += ret;
573 		}
574 
575 		if (xa_has_values)
576 			clear_shadow_entries(mapping, indices[0], indices[nr-1]);
577 
578 		folio_batch_remove_exceptionals(&fbatch);
579 		folio_batch_release(&fbatch);
580 		cond_resched();
581 	}
582 	return count;
583 }
584 
585 /**
586  * invalidate_mapping_pages - Invalidate all clean, unlocked cache of one inode
587  * @mapping: the address_space which holds the cache to invalidate
588  * @start: the offset 'from' which to invalidate
589  * @end: the offset 'to' which to invalidate (inclusive)
590  *
591  * This function removes pages that are clean, unmapped and unlocked,
592  * as well as shadow entries. It will not block on IO activity.
593  *
594  * If you want to remove all the pages of one inode, regardless of
595  * their use and writeback state, use truncate_inode_pages().
596  *
597  * Return: The number of indices that had their contents invalidated
598  */
599 unsigned long invalidate_mapping_pages(struct address_space *mapping,
600 		pgoff_t start, pgoff_t end)
601 {
602 	return mapping_try_invalidate(mapping, start, end, NULL);
603 }
604 EXPORT_SYMBOL(invalidate_mapping_pages);
605 
606 static int folio_launder(struct address_space *mapping, struct folio *folio)
607 {
608 	if (!folio_test_dirty(folio))
609 		return 0;
610 	if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL)
611 		return 0;
612 	return mapping->a_ops->launder_folio(folio);
613 }
614 
615 /*
616  * This is like mapping_evict_folio(), except it ignores the folio's
617  * refcount.  We do this because invalidate_inode_pages2() needs stronger
618  * invalidation guarantees, and cannot afford to leave folios behind because
619  * shrink_folio_list() has a temp ref on them, or because they're transiently
620  * sitting in the folio_add_lru() caches.
621  */
622 int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
623 			   gfp_t gfp)
624 {
625 	void (*free_folio)(struct folio *);
626 	int ret;
627 
628 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
629 
630 	if (folio_mapped(folio))
631 		unmap_mapping_folio(folio);
632 	BUG_ON(folio_mapped(folio));
633 
634 	ret = folio_launder(mapping, folio);
635 	if (ret)
636 		return ret;
637 	if (folio->mapping != mapping)
638 		return -EBUSY;
639 	if (!filemap_release_folio(folio, gfp))
640 		return -EBUSY;
641 
642 	spin_lock(&mapping->host->i_lock);
643 	xa_lock_irq(&mapping->i_pages);
644 	if (folio_test_dirty(folio))
645 		goto failed;
646 
647 	BUG_ON(folio_has_private(folio));
648 	__filemap_remove_folio(folio, NULL);
649 	xa_unlock_irq(&mapping->i_pages);
650 	if (mapping_shrinkable(mapping))
651 		inode_lru_list_add(mapping->host);
652 	free_folio = mapping->a_ops->free_folio;
653 	spin_unlock(&mapping->host->i_lock);
654 
655 	if (free_folio)
656 		free_folio(folio);
657 	folio_put_refs(folio, folio_nr_pages(folio));
658 	return 1;
659 failed:
660 	xa_unlock_irq(&mapping->i_pages);
661 	spin_unlock(&mapping->host->i_lock);
662 	return -EBUSY;
663 }
664 
665 /**
666  * invalidate_inode_pages2_range - remove range of pages from an address_space
667  * @mapping: the address_space
668  * @start: the page offset 'from' which to invalidate
669  * @end: the page offset 'to' which to invalidate (inclusive)
670  *
671  * Any pages which are found to be mapped into pagetables are unmapped prior to
672  * invalidation.
673  *
674  * Return: -EBUSY if any pages could not be invalidated.
675  */
676 int invalidate_inode_pages2_range(struct address_space *mapping,
677 				  pgoff_t start, pgoff_t end)
678 {
679 	pgoff_t indices[FOLIO_BATCH_SIZE];
680 	struct folio_batch fbatch;
681 	pgoff_t index;
682 	int i;
683 	int ret = 0;
684 	int ret2 = 0;
685 	int did_range_unmap = 0;
686 
687 	if (mapping_empty(mapping))
688 		return 0;
689 
690 	folio_batch_init(&fbatch);
691 	index = start;
692 	while (find_get_entries(mapping, &index, end, &fbatch, indices)) {
693 		bool xa_has_values = false;
694 		int nr = folio_batch_count(&fbatch);
695 
696 		for (i = 0; i < nr; i++) {
697 			struct folio *folio = fbatch.folios[i];
698 
699 			/* We rely upon deletion not changing folio->index */
700 
701 			if (xa_is_value(folio)) {
702 				xa_has_values = true;
703 				if (dax_mapping(mapping) &&
704 				    !dax_invalidate_mapping_entry_sync(mapping, indices[i]))
705 					ret = -EBUSY;
706 				continue;
707 			}
708 
709 			if (!did_range_unmap && folio_mapped(folio)) {
710 				/*
711 				 * If folio is mapped, before taking its lock,
712 				 * zap the rest of the file in one hit.
713 				 */
714 				unmap_mapping_pages(mapping, indices[i],
715 						(1 + end - indices[i]), false);
716 				did_range_unmap = 1;
717 			}
718 
719 			folio_lock(folio);
720 			if (unlikely(folio->mapping != mapping)) {
721 				folio_unlock(folio);
722 				continue;
723 			}
724 			VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio);
725 			folio_wait_writeback(folio);
726 			ret2 = folio_unmap_invalidate(mapping, folio, GFP_KERNEL);
727 			if (ret2 < 0)
728 				ret = ret2;
729 			folio_unlock(folio);
730 		}
731 
732 		if (xa_has_values)
733 			clear_shadow_entries(mapping, indices[0], indices[nr-1]);
734 
735 		folio_batch_remove_exceptionals(&fbatch);
736 		folio_batch_release(&fbatch);
737 		cond_resched();
738 	}
739 	/*
740 	 * For DAX we invalidate page tables after invalidating page cache.  We
741 	 * could invalidate page tables while invalidating each entry however
742 	 * that would be expensive. And doing range unmapping before doesn't
743 	 * work as we have no cheap way to find whether page cache entry didn't
744 	 * get remapped later.
745 	 */
746 	if (dax_mapping(mapping)) {
747 		unmap_mapping_pages(mapping, start, end - start + 1, false);
748 	}
749 	return ret;
750 }
751 EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
752 
753 /**
754  * invalidate_inode_pages2 - remove all pages from an address_space
755  * @mapping: the address_space
756  *
757  * Any pages which are found to be mapped into pagetables are unmapped prior to
758  * invalidation.
759  *
760  * Return: -EBUSY if any pages could not be invalidated.
761  */
762 int invalidate_inode_pages2(struct address_space *mapping)
763 {
764 	return invalidate_inode_pages2_range(mapping, 0, -1);
765 }
766 EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
767 
768 /**
769  * truncate_pagecache - unmap and remove pagecache that has been truncated
770  * @inode: inode
771  * @newsize: new file size
772  *
773  * inode's new i_size must already be written before truncate_pagecache
774  * is called.
775  *
776  * This function should typically be called before the filesystem
777  * releases resources associated with the freed range (eg. deallocates
778  * blocks). This way, pagecache will always stay logically coherent
779  * with on-disk format, and the filesystem would not have to deal with
780  * situations such as writepage being called for a page that has already
781  * had its underlying blocks deallocated.
782  */
783 void truncate_pagecache(struct inode *inode, loff_t newsize)
784 {
785 	struct address_space *mapping = inode->i_mapping;
786 	loff_t holebegin = round_up(newsize, PAGE_SIZE);
787 
788 	/*
789 	 * unmap_mapping_range is called twice, first simply for
790 	 * efficiency so that truncate_inode_pages does fewer
791 	 * single-page unmaps.  However after this first call, and
792 	 * before truncate_inode_pages finishes, it is possible for
793 	 * private pages to be COWed, which remain after
794 	 * truncate_inode_pages finishes, hence the second
795 	 * unmap_mapping_range call must be made for correctness.
796 	 */
797 	unmap_mapping_range(mapping, holebegin, 0, 1);
798 	truncate_inode_pages(mapping, newsize);
799 	unmap_mapping_range(mapping, holebegin, 0, 1);
800 }
801 EXPORT_SYMBOL(truncate_pagecache);
802 
803 /**
804  * truncate_setsize - update inode and pagecache for a new file size
805  * @inode: inode
806  * @newsize: new file size
807  *
808  * truncate_setsize updates i_size and performs pagecache truncation (if
809  * necessary) to @newsize. It will be typically be called from the filesystem's
810  * setattr function when ATTR_SIZE is passed in.
811  *
812  * Must be called with a lock serializing truncates and writes (generally
813  * i_rwsem but e.g. xfs uses a different lock) and before all filesystem
814  * specific block truncation has been performed.
815  */
816 void truncate_setsize(struct inode *inode, loff_t newsize)
817 {
818 	loff_t oldsize = inode->i_size;
819 
820 	i_size_write(inode, newsize);
821 	if (newsize > oldsize)
822 		pagecache_isize_extended(inode, oldsize, newsize);
823 	truncate_pagecache(inode, newsize);
824 }
825 EXPORT_SYMBOL(truncate_setsize);
826 
827 /**
828  * pagecache_isize_extended - update pagecache after extension of i_size
829  * @inode:	inode for which i_size was extended
830  * @from:	original inode size
831  * @to:		new inode size
832  *
833  * Handle extension of inode size either caused by extending truncate or
834  * by write starting after current i_size.  We mark the page straddling
835  * current i_size RO so that page_mkwrite() is called on the first
836  * write access to the page.  The filesystem will update its per-block
837  * information before user writes to the page via mmap after the i_size
838  * has been changed.
839  *
840  * The function must be called after i_size is updated so that page fault
841  * coming after we unlock the folio will already see the new i_size.
842  * The function must be called while we still hold i_rwsem - this not only
843  * makes sure i_size is stable but also that userspace cannot observe new
844  * i_size value before we are prepared to store mmap writes at new inode size.
845  */
846 void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
847 {
848 	int bsize = i_blocksize(inode);
849 	loff_t rounded_from;
850 	struct folio *folio;
851 
852 	WARN_ON(to > inode->i_size);
853 
854 	if (from >= to || bsize >= PAGE_SIZE)
855 		return;
856 	/* Page straddling @from will not have any hole block created? */
857 	rounded_from = round_up(from, bsize);
858 	if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1)))
859 		return;
860 
861 	folio = filemap_lock_folio(inode->i_mapping, from / PAGE_SIZE);
862 	/* Folio not cached? Nothing to do */
863 	if (IS_ERR(folio))
864 		return;
865 	/*
866 	 * See folio_clear_dirty_for_io() for details why folio_mark_dirty()
867 	 * is needed.
868 	 */
869 	if (folio_mkclean(folio))
870 		folio_mark_dirty(folio);
871 
872 	/*
873 	 * The post-eof range of the folio must be zeroed before it is exposed
874 	 * to the file. Writeback normally does this, but since i_size has been
875 	 * increased we handle it here.
876 	 */
877 	if (folio_test_dirty(folio)) {
878 		unsigned int offset, end;
879 
880 		offset = from - folio_pos(folio);
881 		end = min_t(unsigned int, to - folio_pos(folio),
882 			    folio_size(folio));
883 		folio_zero_segment(folio, offset, end);
884 	}
885 
886 	folio_unlock(folio);
887 	folio_put(folio);
888 }
889 EXPORT_SYMBOL(pagecache_isize_extended);
890 
891 /**
892  * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
893  * @inode: inode
894  * @lstart: offset of beginning of hole
895  * @lend: offset of last byte of hole
896  *
897  * This function should typically be called before the filesystem
898  * releases resources associated with the freed range (eg. deallocates
899  * blocks). This way, pagecache will always stay logically coherent
900  * with on-disk format, and the filesystem would not have to deal with
901  * situations such as writepage being called for a page that has already
902  * had its underlying blocks deallocated.
903  */
904 void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
905 {
906 	struct address_space *mapping = inode->i_mapping;
907 	loff_t unmap_start = round_up(lstart, PAGE_SIZE);
908 	loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
909 	/*
910 	 * This rounding is currently just for example: unmap_mapping_range
911 	 * expands its hole outwards, whereas we want it to contract the hole
912 	 * inwards.  However, existing callers of truncate_pagecache_range are
913 	 * doing their own page rounding first.  Note that unmap_mapping_range
914 	 * allows holelen 0 for all, and we allow lend -1 for end of file.
915 	 */
916 
917 	/*
918 	 * Unlike in truncate_pagecache, unmap_mapping_range is called only
919 	 * once (before truncating pagecache), and without "even_cows" flag:
920 	 * hole-punching should not remove private COWed pages from the hole.
921 	 */
922 	if ((u64)unmap_end > (u64)unmap_start)
923 		unmap_mapping_range(mapping, unmap_start,
924 				    1 + unmap_end - unmap_start, 0);
925 	truncate_inode_pages_range(mapping, lstart, lend);
926 }
927 EXPORT_SYMBOL(truncate_pagecache_range);
928