1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * mm/truncate.c - code for taking down pages from address_spaces 4 * 5 * Copyright (C) 2002, Linus Torvalds 6 * 7 * 10Sep2002 Andrew Morton 8 * Initial version. 9 */ 10 11 #include <linux/kernel.h> 12 #include <linux/backing-dev.h> 13 #include <linux/dax.h> 14 #include <linux/gfp.h> 15 #include <linux/mm.h> 16 #include <linux/swap.h> 17 #include <linux/export.h> 18 #include <linux/pagemap.h> 19 #include <linux/highmem.h> 20 #include <linux/pagevec.h> 21 #include <linux/task_io_accounting_ops.h> 22 #include <linux/shmem_fs.h> 23 #include <linux/rmap.h> 24 #include "internal.h" 25 26 /* 27 * Regular page slots are stabilized by the page lock even without the tree 28 * itself locked. These unlocked entries need verification under the tree 29 * lock. 30 */ 31 static inline void __clear_shadow_entry(struct address_space *mapping, 32 pgoff_t index, void *entry) 33 { 34 XA_STATE(xas, &mapping->i_pages, index); 35 36 xas_set_update(&xas, workingset_update_node); 37 if (xas_load(&xas) != entry) 38 return; 39 xas_store(&xas, NULL); 40 } 41 42 static void clear_shadow_entries(struct address_space *mapping, 43 struct folio_batch *fbatch, pgoff_t *indices) 44 { 45 int i; 46 47 /* Handled by shmem itself, or for DAX we do nothing. */ 48 if (shmem_mapping(mapping) || dax_mapping(mapping)) 49 return; 50 51 spin_lock(&mapping->host->i_lock); 52 xa_lock_irq(&mapping->i_pages); 53 54 for (i = 0; i < folio_batch_count(fbatch); i++) { 55 struct folio *folio = fbatch->folios[i]; 56 57 if (xa_is_value(folio)) 58 __clear_shadow_entry(mapping, indices[i], folio); 59 } 60 61 xa_unlock_irq(&mapping->i_pages); 62 if (mapping_shrinkable(mapping)) 63 inode_add_lru(mapping->host); 64 spin_unlock(&mapping->host->i_lock); 65 } 66 67 /* 68 * Unconditionally remove exceptional entries. Usually called from truncate 69 * path. Note that the folio_batch may be altered by this function by removing 70 * exceptional entries similar to what folio_batch_remove_exceptionals() does. 71 */ 72 static void truncate_folio_batch_exceptionals(struct address_space *mapping, 73 struct folio_batch *fbatch, pgoff_t *indices) 74 { 75 int i, j; 76 bool dax; 77 78 /* Handled by shmem itself */ 79 if (shmem_mapping(mapping)) 80 return; 81 82 for (j = 0; j < folio_batch_count(fbatch); j++) 83 if (xa_is_value(fbatch->folios[j])) 84 break; 85 86 if (j == folio_batch_count(fbatch)) 87 return; 88 89 dax = dax_mapping(mapping); 90 if (!dax) { 91 spin_lock(&mapping->host->i_lock); 92 xa_lock_irq(&mapping->i_pages); 93 } 94 95 for (i = j; i < folio_batch_count(fbatch); i++) { 96 struct folio *folio = fbatch->folios[i]; 97 pgoff_t index = indices[i]; 98 99 if (!xa_is_value(folio)) { 100 fbatch->folios[j++] = folio; 101 continue; 102 } 103 104 if (unlikely(dax)) { 105 dax_delete_mapping_entry(mapping, index); 106 continue; 107 } 108 109 __clear_shadow_entry(mapping, index, folio); 110 } 111 112 if (!dax) { 113 xa_unlock_irq(&mapping->i_pages); 114 if (mapping_shrinkable(mapping)) 115 inode_add_lru(mapping->host); 116 spin_unlock(&mapping->host->i_lock); 117 } 118 fbatch->nr = j; 119 } 120 121 /** 122 * folio_invalidate - Invalidate part or all of a folio. 123 * @folio: The folio which is affected. 124 * @offset: start of the range to invalidate 125 * @length: length of the range to invalidate 126 * 127 * folio_invalidate() is called when all or part of the folio has become 128 * invalidated by a truncate operation. 129 * 130 * folio_invalidate() does not have to release all buffers, but it must 131 * ensure that no dirty buffer is left outside @offset and that no I/O 132 * is underway against any of the blocks which are outside the truncation 133 * point. Because the caller is about to free (and possibly reuse) those 134 * blocks on-disk. 135 */ 136 void folio_invalidate(struct folio *folio, size_t offset, size_t length) 137 { 138 const struct address_space_operations *aops = folio->mapping->a_ops; 139 140 if (aops->invalidate_folio) 141 aops->invalidate_folio(folio, offset, length); 142 } 143 EXPORT_SYMBOL_GPL(folio_invalidate); 144 145 /* 146 * If truncate cannot remove the fs-private metadata from the page, the page 147 * becomes orphaned. It will be left on the LRU and may even be mapped into 148 * user pagetables if we're racing with filemap_fault(). 149 * 150 * We need to bail out if page->mapping is no longer equal to the original 151 * mapping. This happens a) when the VM reclaimed the page while we waited on 152 * its lock, b) when a concurrent invalidate_mapping_pages got there first and 153 * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space. 154 */ 155 static void truncate_cleanup_folio(struct folio *folio) 156 { 157 if (folio_mapped(folio)) 158 unmap_mapping_folio(folio); 159 160 if (folio_needs_release(folio)) 161 folio_invalidate(folio, 0, folio_size(folio)); 162 163 /* 164 * Some filesystems seem to re-dirty the page even after 165 * the VM has canceled the dirty bit (eg ext3 journaling). 166 * Hence dirty accounting check is placed after invalidation. 167 */ 168 folio_cancel_dirty(folio); 169 } 170 171 int truncate_inode_folio(struct address_space *mapping, struct folio *folio) 172 { 173 if (folio->mapping != mapping) 174 return -EIO; 175 176 truncate_cleanup_folio(folio); 177 filemap_remove_folio(folio); 178 return 0; 179 } 180 181 /* 182 * Handle partial folios. The folio may be entirely within the 183 * range if a split has raced with us. If not, we zero the part of the 184 * folio that's within the [start, end] range, and then split the folio if 185 * it's large. split_page_range() will discard pages which now lie beyond 186 * i_size, and we rely on the caller to discard pages which lie within a 187 * newly created hole. 188 * 189 * Returns false if splitting failed so the caller can avoid 190 * discarding the entire folio which is stubbornly unsplit. 191 */ 192 bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end) 193 { 194 loff_t pos = folio_pos(folio); 195 unsigned int offset, length; 196 197 if (pos < start) 198 offset = start - pos; 199 else 200 offset = 0; 201 length = folio_size(folio); 202 if (pos + length <= (u64)end) 203 length = length - offset; 204 else 205 length = end + 1 - pos - offset; 206 207 folio_wait_writeback(folio); 208 if (length == folio_size(folio)) { 209 truncate_inode_folio(folio->mapping, folio); 210 return true; 211 } 212 213 /* 214 * We may be zeroing pages we're about to discard, but it avoids 215 * doing a complex calculation here, and then doing the zeroing 216 * anyway if the page split fails. 217 */ 218 if (!mapping_inaccessible(folio->mapping)) 219 folio_zero_range(folio, offset, length); 220 221 if (folio_needs_release(folio)) 222 folio_invalidate(folio, offset, length); 223 if (!folio_test_large(folio)) 224 return true; 225 if (split_folio(folio) == 0) 226 return true; 227 if (folio_test_dirty(folio)) 228 return false; 229 truncate_inode_folio(folio->mapping, folio); 230 return true; 231 } 232 233 /* 234 * Used to get rid of pages on hardware memory corruption. 235 */ 236 int generic_error_remove_folio(struct address_space *mapping, 237 struct folio *folio) 238 { 239 if (!mapping) 240 return -EINVAL; 241 /* 242 * Only punch for normal data pages for now. 243 * Handling other types like directories would need more auditing. 244 */ 245 if (!S_ISREG(mapping->host->i_mode)) 246 return -EIO; 247 return truncate_inode_folio(mapping, folio); 248 } 249 EXPORT_SYMBOL(generic_error_remove_folio); 250 251 /** 252 * mapping_evict_folio() - Remove an unused folio from the page-cache. 253 * @mapping: The mapping this folio belongs to. 254 * @folio: The folio to remove. 255 * 256 * Safely remove one folio from the page cache. 257 * It only drops clean, unused folios. 258 * 259 * Context: Folio must be locked. 260 * Return: The number of pages successfully removed. 261 */ 262 long mapping_evict_folio(struct address_space *mapping, struct folio *folio) 263 { 264 /* The page may have been truncated before it was locked */ 265 if (!mapping) 266 return 0; 267 if (folio_test_dirty(folio) || folio_test_writeback(folio)) 268 return 0; 269 /* The refcount will be elevated if any page in the folio is mapped */ 270 if (folio_ref_count(folio) > 271 folio_nr_pages(folio) + folio_has_private(folio) + 1) 272 return 0; 273 if (!filemap_release_folio(folio, 0)) 274 return 0; 275 276 return remove_mapping(mapping, folio); 277 } 278 279 /** 280 * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets 281 * @mapping: mapping to truncate 282 * @lstart: offset from which to truncate 283 * @lend: offset to which to truncate (inclusive) 284 * 285 * Truncate the page cache, removing the pages that are between 286 * specified offsets (and zeroing out partial pages 287 * if lstart or lend + 1 is not page aligned). 288 * 289 * Truncate takes two passes - the first pass is nonblocking. It will not 290 * block on page locks and it will not block on writeback. The second pass 291 * will wait. This is to prevent as much IO as possible in the affected region. 292 * The first pass will remove most pages, so the search cost of the second pass 293 * is low. 294 * 295 * We pass down the cache-hot hint to the page freeing code. Even if the 296 * mapping is large, it is probably the case that the final pages are the most 297 * recently touched, and freeing happens in ascending file offset order. 298 * 299 * Note that since ->invalidate_folio() accepts range to invalidate 300 * truncate_inode_pages_range is able to handle cases where lend + 1 is not 301 * page aligned properly. 302 */ 303 void truncate_inode_pages_range(struct address_space *mapping, 304 loff_t lstart, loff_t lend) 305 { 306 pgoff_t start; /* inclusive */ 307 pgoff_t end; /* exclusive */ 308 struct folio_batch fbatch; 309 pgoff_t indices[PAGEVEC_SIZE]; 310 pgoff_t index; 311 int i; 312 struct folio *folio; 313 bool same_folio; 314 315 if (mapping_empty(mapping)) 316 return; 317 318 /* 319 * 'start' and 'end' always covers the range of pages to be fully 320 * truncated. Partial pages are covered with 'partial_start' at the 321 * start of the range and 'partial_end' at the end of the range. 322 * Note that 'end' is exclusive while 'lend' is inclusive. 323 */ 324 start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT; 325 if (lend == -1) 326 /* 327 * lend == -1 indicates end-of-file so we have to set 'end' 328 * to the highest possible pgoff_t and since the type is 329 * unsigned we're using -1. 330 */ 331 end = -1; 332 else 333 end = (lend + 1) >> PAGE_SHIFT; 334 335 folio_batch_init(&fbatch); 336 index = start; 337 while (index < end && find_lock_entries(mapping, &index, end - 1, 338 &fbatch, indices)) { 339 truncate_folio_batch_exceptionals(mapping, &fbatch, indices); 340 for (i = 0; i < folio_batch_count(&fbatch); i++) 341 truncate_cleanup_folio(fbatch.folios[i]); 342 delete_from_page_cache_batch(mapping, &fbatch); 343 for (i = 0; i < folio_batch_count(&fbatch); i++) 344 folio_unlock(fbatch.folios[i]); 345 folio_batch_release(&fbatch); 346 cond_resched(); 347 } 348 349 same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT); 350 folio = __filemap_get_folio(mapping, lstart >> PAGE_SHIFT, FGP_LOCK, 0); 351 if (!IS_ERR(folio)) { 352 same_folio = lend < folio_pos(folio) + folio_size(folio); 353 if (!truncate_inode_partial_folio(folio, lstart, lend)) { 354 start = folio_next_index(folio); 355 if (same_folio) 356 end = folio->index; 357 } 358 folio_unlock(folio); 359 folio_put(folio); 360 folio = NULL; 361 } 362 363 if (!same_folio) { 364 folio = __filemap_get_folio(mapping, lend >> PAGE_SHIFT, 365 FGP_LOCK, 0); 366 if (!IS_ERR(folio)) { 367 if (!truncate_inode_partial_folio(folio, lstart, lend)) 368 end = folio->index; 369 folio_unlock(folio); 370 folio_put(folio); 371 } 372 } 373 374 index = start; 375 while (index < end) { 376 cond_resched(); 377 if (!find_get_entries(mapping, &index, end - 1, &fbatch, 378 indices)) { 379 /* If all gone from start onwards, we're done */ 380 if (index == start) 381 break; 382 /* Otherwise restart to make sure all gone */ 383 index = start; 384 continue; 385 } 386 387 for (i = 0; i < folio_batch_count(&fbatch); i++) { 388 struct folio *folio = fbatch.folios[i]; 389 390 /* We rely upon deletion not changing page->index */ 391 392 if (xa_is_value(folio)) 393 continue; 394 395 folio_lock(folio); 396 VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio); 397 folio_wait_writeback(folio); 398 truncate_inode_folio(mapping, folio); 399 folio_unlock(folio); 400 } 401 truncate_folio_batch_exceptionals(mapping, &fbatch, indices); 402 folio_batch_release(&fbatch); 403 } 404 } 405 EXPORT_SYMBOL(truncate_inode_pages_range); 406 407 /** 408 * truncate_inode_pages - truncate *all* the pages from an offset 409 * @mapping: mapping to truncate 410 * @lstart: offset from which to truncate 411 * 412 * Called under (and serialised by) inode->i_rwsem and 413 * mapping->invalidate_lock. 414 * 415 * Note: When this function returns, there can be a page in the process of 416 * deletion (inside __filemap_remove_folio()) in the specified range. Thus 417 * mapping->nrpages can be non-zero when this function returns even after 418 * truncation of the whole mapping. 419 */ 420 void truncate_inode_pages(struct address_space *mapping, loff_t lstart) 421 { 422 truncate_inode_pages_range(mapping, lstart, (loff_t)-1); 423 } 424 EXPORT_SYMBOL(truncate_inode_pages); 425 426 /** 427 * truncate_inode_pages_final - truncate *all* pages before inode dies 428 * @mapping: mapping to truncate 429 * 430 * Called under (and serialized by) inode->i_rwsem. 431 * 432 * Filesystems have to use this in the .evict_inode path to inform the 433 * VM that this is the final truncate and the inode is going away. 434 */ 435 void truncate_inode_pages_final(struct address_space *mapping) 436 { 437 /* 438 * Page reclaim can not participate in regular inode lifetime 439 * management (can't call iput()) and thus can race with the 440 * inode teardown. Tell it when the address space is exiting, 441 * so that it does not install eviction information after the 442 * final truncate has begun. 443 */ 444 mapping_set_exiting(mapping); 445 446 if (!mapping_empty(mapping)) { 447 /* 448 * As truncation uses a lockless tree lookup, cycle 449 * the tree lock to make sure any ongoing tree 450 * modification that does not see AS_EXITING is 451 * completed before starting the final truncate. 452 */ 453 xa_lock_irq(&mapping->i_pages); 454 xa_unlock_irq(&mapping->i_pages); 455 } 456 457 truncate_inode_pages(mapping, 0); 458 } 459 EXPORT_SYMBOL(truncate_inode_pages_final); 460 461 /** 462 * mapping_try_invalidate - Invalidate all the evictable folios of one inode 463 * @mapping: the address_space which holds the folios to invalidate 464 * @start: the offset 'from' which to invalidate 465 * @end: the offset 'to' which to invalidate (inclusive) 466 * @nr_failed: How many folio invalidations failed 467 * 468 * This function is similar to invalidate_mapping_pages(), except that it 469 * returns the number of folios which could not be evicted in @nr_failed. 470 */ 471 unsigned long mapping_try_invalidate(struct address_space *mapping, 472 pgoff_t start, pgoff_t end, unsigned long *nr_failed) 473 { 474 pgoff_t indices[PAGEVEC_SIZE]; 475 struct folio_batch fbatch; 476 pgoff_t index = start; 477 unsigned long ret; 478 unsigned long count = 0; 479 int i; 480 bool xa_has_values = false; 481 482 folio_batch_init(&fbatch); 483 while (find_lock_entries(mapping, &index, end, &fbatch, indices)) { 484 for (i = 0; i < folio_batch_count(&fbatch); i++) { 485 struct folio *folio = fbatch.folios[i]; 486 487 /* We rely upon deletion not changing folio->index */ 488 489 if (xa_is_value(folio)) { 490 xa_has_values = true; 491 count++; 492 continue; 493 } 494 495 ret = mapping_evict_folio(mapping, folio); 496 folio_unlock(folio); 497 /* 498 * Invalidation is a hint that the folio is no longer 499 * of interest and try to speed up its reclaim. 500 */ 501 if (!ret) { 502 deactivate_file_folio(folio); 503 /* Likely in the lru cache of a remote CPU */ 504 if (nr_failed) 505 (*nr_failed)++; 506 } 507 count += ret; 508 } 509 510 if (xa_has_values) 511 clear_shadow_entries(mapping, &fbatch, indices); 512 513 folio_batch_remove_exceptionals(&fbatch); 514 folio_batch_release(&fbatch); 515 cond_resched(); 516 } 517 return count; 518 } 519 520 /** 521 * invalidate_mapping_pages - Invalidate all clean, unlocked cache of one inode 522 * @mapping: the address_space which holds the cache to invalidate 523 * @start: the offset 'from' which to invalidate 524 * @end: the offset 'to' which to invalidate (inclusive) 525 * 526 * This function removes pages that are clean, unmapped and unlocked, 527 * as well as shadow entries. It will not block on IO activity. 528 * 529 * If you want to remove all the pages of one inode, regardless of 530 * their use and writeback state, use truncate_inode_pages(). 531 * 532 * Return: The number of indices that had their contents invalidated 533 */ 534 unsigned long invalidate_mapping_pages(struct address_space *mapping, 535 pgoff_t start, pgoff_t end) 536 { 537 return mapping_try_invalidate(mapping, start, end, NULL); 538 } 539 EXPORT_SYMBOL(invalidate_mapping_pages); 540 541 /* 542 * This is like mapping_evict_folio(), except it ignores the folio's 543 * refcount. We do this because invalidate_inode_pages2() needs stronger 544 * invalidation guarantees, and cannot afford to leave folios behind because 545 * shrink_folio_list() has a temp ref on them, or because they're transiently 546 * sitting in the folio_add_lru() caches. 547 */ 548 static int invalidate_complete_folio2(struct address_space *mapping, 549 struct folio *folio) 550 { 551 if (folio->mapping != mapping) 552 return 0; 553 554 if (!filemap_release_folio(folio, GFP_KERNEL)) 555 return 0; 556 557 spin_lock(&mapping->host->i_lock); 558 xa_lock_irq(&mapping->i_pages); 559 if (folio_test_dirty(folio)) 560 goto failed; 561 562 BUG_ON(folio_has_private(folio)); 563 __filemap_remove_folio(folio, NULL); 564 xa_unlock_irq(&mapping->i_pages); 565 if (mapping_shrinkable(mapping)) 566 inode_add_lru(mapping->host); 567 spin_unlock(&mapping->host->i_lock); 568 569 filemap_free_folio(mapping, folio); 570 return 1; 571 failed: 572 xa_unlock_irq(&mapping->i_pages); 573 spin_unlock(&mapping->host->i_lock); 574 return 0; 575 } 576 577 static int folio_launder(struct address_space *mapping, struct folio *folio) 578 { 579 if (!folio_test_dirty(folio)) 580 return 0; 581 if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL) 582 return 0; 583 return mapping->a_ops->launder_folio(folio); 584 } 585 586 /** 587 * invalidate_inode_pages2_range - remove range of pages from an address_space 588 * @mapping: the address_space 589 * @start: the page offset 'from' which to invalidate 590 * @end: the page offset 'to' which to invalidate (inclusive) 591 * 592 * Any pages which are found to be mapped into pagetables are unmapped prior to 593 * invalidation. 594 * 595 * Return: -EBUSY if any pages could not be invalidated. 596 */ 597 int invalidate_inode_pages2_range(struct address_space *mapping, 598 pgoff_t start, pgoff_t end) 599 { 600 pgoff_t indices[PAGEVEC_SIZE]; 601 struct folio_batch fbatch; 602 pgoff_t index; 603 int i; 604 int ret = 0; 605 int ret2 = 0; 606 int did_range_unmap = 0; 607 bool xa_has_values = false; 608 609 if (mapping_empty(mapping)) 610 return 0; 611 612 folio_batch_init(&fbatch); 613 index = start; 614 while (find_get_entries(mapping, &index, end, &fbatch, indices)) { 615 for (i = 0; i < folio_batch_count(&fbatch); i++) { 616 struct folio *folio = fbatch.folios[i]; 617 618 /* We rely upon deletion not changing folio->index */ 619 620 if (xa_is_value(folio)) { 621 xa_has_values = true; 622 if (dax_mapping(mapping) && 623 !dax_invalidate_mapping_entry_sync(mapping, indices[i])) 624 ret = -EBUSY; 625 continue; 626 } 627 628 if (!did_range_unmap && folio_mapped(folio)) { 629 /* 630 * If folio is mapped, before taking its lock, 631 * zap the rest of the file in one hit. 632 */ 633 unmap_mapping_pages(mapping, indices[i], 634 (1 + end - indices[i]), false); 635 did_range_unmap = 1; 636 } 637 638 folio_lock(folio); 639 if (unlikely(folio->mapping != mapping)) { 640 folio_unlock(folio); 641 continue; 642 } 643 VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio); 644 folio_wait_writeback(folio); 645 646 if (folio_mapped(folio)) 647 unmap_mapping_folio(folio); 648 BUG_ON(folio_mapped(folio)); 649 650 ret2 = folio_launder(mapping, folio); 651 if (ret2 == 0) { 652 if (!invalidate_complete_folio2(mapping, folio)) 653 ret2 = -EBUSY; 654 } 655 if (ret2 < 0) 656 ret = ret2; 657 folio_unlock(folio); 658 } 659 660 if (xa_has_values) 661 clear_shadow_entries(mapping, &fbatch, indices); 662 663 folio_batch_remove_exceptionals(&fbatch); 664 folio_batch_release(&fbatch); 665 cond_resched(); 666 } 667 /* 668 * For DAX we invalidate page tables after invalidating page cache. We 669 * could invalidate page tables while invalidating each entry however 670 * that would be expensive. And doing range unmapping before doesn't 671 * work as we have no cheap way to find whether page cache entry didn't 672 * get remapped later. 673 */ 674 if (dax_mapping(mapping)) { 675 unmap_mapping_pages(mapping, start, end - start + 1, false); 676 } 677 return ret; 678 } 679 EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range); 680 681 /** 682 * invalidate_inode_pages2 - remove all pages from an address_space 683 * @mapping: the address_space 684 * 685 * Any pages which are found to be mapped into pagetables are unmapped prior to 686 * invalidation. 687 * 688 * Return: -EBUSY if any pages could not be invalidated. 689 */ 690 int invalidate_inode_pages2(struct address_space *mapping) 691 { 692 return invalidate_inode_pages2_range(mapping, 0, -1); 693 } 694 EXPORT_SYMBOL_GPL(invalidate_inode_pages2); 695 696 /** 697 * truncate_pagecache - unmap and remove pagecache that has been truncated 698 * @inode: inode 699 * @newsize: new file size 700 * 701 * inode's new i_size must already be written before truncate_pagecache 702 * is called. 703 * 704 * This function should typically be called before the filesystem 705 * releases resources associated with the freed range (eg. deallocates 706 * blocks). This way, pagecache will always stay logically coherent 707 * with on-disk format, and the filesystem would not have to deal with 708 * situations such as writepage being called for a page that has already 709 * had its underlying blocks deallocated. 710 */ 711 void truncate_pagecache(struct inode *inode, loff_t newsize) 712 { 713 struct address_space *mapping = inode->i_mapping; 714 loff_t holebegin = round_up(newsize, PAGE_SIZE); 715 716 /* 717 * unmap_mapping_range is called twice, first simply for 718 * efficiency so that truncate_inode_pages does fewer 719 * single-page unmaps. However after this first call, and 720 * before truncate_inode_pages finishes, it is possible for 721 * private pages to be COWed, which remain after 722 * truncate_inode_pages finishes, hence the second 723 * unmap_mapping_range call must be made for correctness. 724 */ 725 unmap_mapping_range(mapping, holebegin, 0, 1); 726 truncate_inode_pages(mapping, newsize); 727 unmap_mapping_range(mapping, holebegin, 0, 1); 728 } 729 EXPORT_SYMBOL(truncate_pagecache); 730 731 /** 732 * truncate_setsize - update inode and pagecache for a new file size 733 * @inode: inode 734 * @newsize: new file size 735 * 736 * truncate_setsize updates i_size and performs pagecache truncation (if 737 * necessary) to @newsize. It will be typically be called from the filesystem's 738 * setattr function when ATTR_SIZE is passed in. 739 * 740 * Must be called with a lock serializing truncates and writes (generally 741 * i_rwsem but e.g. xfs uses a different lock) and before all filesystem 742 * specific block truncation has been performed. 743 */ 744 void truncate_setsize(struct inode *inode, loff_t newsize) 745 { 746 loff_t oldsize = inode->i_size; 747 748 i_size_write(inode, newsize); 749 if (newsize > oldsize) 750 pagecache_isize_extended(inode, oldsize, newsize); 751 truncate_pagecache(inode, newsize); 752 } 753 EXPORT_SYMBOL(truncate_setsize); 754 755 /** 756 * pagecache_isize_extended - update pagecache after extension of i_size 757 * @inode: inode for which i_size was extended 758 * @from: original inode size 759 * @to: new inode size 760 * 761 * Handle extension of inode size either caused by extending truncate or 762 * by write starting after current i_size. We mark the page straddling 763 * current i_size RO so that page_mkwrite() is called on the first 764 * write access to the page. The filesystem will update its per-block 765 * information before user writes to the page via mmap after the i_size 766 * has been changed. 767 * 768 * The function must be called after i_size is updated so that page fault 769 * coming after we unlock the folio will already see the new i_size. 770 * The function must be called while we still hold i_rwsem - this not only 771 * makes sure i_size is stable but also that userspace cannot observe new 772 * i_size value before we are prepared to store mmap writes at new inode size. 773 */ 774 void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to) 775 { 776 int bsize = i_blocksize(inode); 777 loff_t rounded_from; 778 struct folio *folio; 779 780 WARN_ON(to > inode->i_size); 781 782 if (from >= to || bsize >= PAGE_SIZE) 783 return; 784 /* Page straddling @from will not have any hole block created? */ 785 rounded_from = round_up(from, bsize); 786 if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1))) 787 return; 788 789 folio = filemap_lock_folio(inode->i_mapping, from / PAGE_SIZE); 790 /* Folio not cached? Nothing to do */ 791 if (IS_ERR(folio)) 792 return; 793 /* 794 * See folio_clear_dirty_for_io() for details why folio_mark_dirty() 795 * is needed. 796 */ 797 if (folio_mkclean(folio)) 798 folio_mark_dirty(folio); 799 800 /* 801 * The post-eof range of the folio must be zeroed before it is exposed 802 * to the file. Writeback normally does this, but since i_size has been 803 * increased we handle it here. 804 */ 805 if (folio_test_dirty(folio)) { 806 unsigned int offset, end; 807 808 offset = from - folio_pos(folio); 809 end = min_t(unsigned int, to - folio_pos(folio), 810 folio_size(folio)); 811 folio_zero_segment(folio, offset, end); 812 } 813 814 folio_unlock(folio); 815 folio_put(folio); 816 } 817 EXPORT_SYMBOL(pagecache_isize_extended); 818 819 /** 820 * truncate_pagecache_range - unmap and remove pagecache that is hole-punched 821 * @inode: inode 822 * @lstart: offset of beginning of hole 823 * @lend: offset of last byte of hole 824 * 825 * This function should typically be called before the filesystem 826 * releases resources associated with the freed range (eg. deallocates 827 * blocks). This way, pagecache will always stay logically coherent 828 * with on-disk format, and the filesystem would not have to deal with 829 * situations such as writepage being called for a page that has already 830 * had its underlying blocks deallocated. 831 */ 832 void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend) 833 { 834 struct address_space *mapping = inode->i_mapping; 835 loff_t unmap_start = round_up(lstart, PAGE_SIZE); 836 loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1; 837 /* 838 * This rounding is currently just for example: unmap_mapping_range 839 * expands its hole outwards, whereas we want it to contract the hole 840 * inwards. However, existing callers of truncate_pagecache_range are 841 * doing their own page rounding first. Note that unmap_mapping_range 842 * allows holelen 0 for all, and we allow lend -1 for end of file. 843 */ 844 845 /* 846 * Unlike in truncate_pagecache, unmap_mapping_range is called only 847 * once (before truncating pagecache), and without "even_cows" flag: 848 * hole-punching should not remove private COWed pages from the hole. 849 */ 850 if ((u64)unmap_end > (u64)unmap_start) 851 unmap_mapping_range(mapping, unmap_start, 852 1 + unmap_end - unmap_start, 0); 853 truncate_inode_pages_range(mapping, lstart, lend); 854 } 855 EXPORT_SYMBOL(truncate_pagecache_range); 856