1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * mm/truncate.c - code for taking down pages from address_spaces 4 * 5 * Copyright (C) 2002, Linus Torvalds 6 * 7 * 10Sep2002 Andrew Morton 8 * Initial version. 9 */ 10 11 #include <linux/kernel.h> 12 #include <linux/backing-dev.h> 13 #include <linux/dax.h> 14 #include <linux/gfp.h> 15 #include <linux/mm.h> 16 #include <linux/swap.h> 17 #include <linux/export.h> 18 #include <linux/pagemap.h> 19 #include <linux/highmem.h> 20 #include <linux/pagevec.h> 21 #include <linux/task_io_accounting_ops.h> 22 #include <linux/buffer_head.h> /* grr. try_to_release_page */ 23 #include <linux/shmem_fs.h> 24 #include <linux/rmap.h> 25 #include "internal.h" 26 27 /* 28 * Regular page slots are stabilized by the page lock even without the tree 29 * itself locked. These unlocked entries need verification under the tree 30 * lock. 31 */ 32 static inline void __clear_shadow_entry(struct address_space *mapping, 33 pgoff_t index, void *entry) 34 { 35 XA_STATE(xas, &mapping->i_pages, index); 36 37 xas_set_update(&xas, workingset_update_node); 38 if (xas_load(&xas) != entry) 39 return; 40 xas_store(&xas, NULL); 41 } 42 43 static void clear_shadow_entry(struct address_space *mapping, pgoff_t index, 44 void *entry) 45 { 46 spin_lock(&mapping->host->i_lock); 47 xa_lock_irq(&mapping->i_pages); 48 __clear_shadow_entry(mapping, index, entry); 49 xa_unlock_irq(&mapping->i_pages); 50 if (mapping_shrinkable(mapping)) 51 inode_add_lru(mapping->host); 52 spin_unlock(&mapping->host->i_lock); 53 } 54 55 /* 56 * Unconditionally remove exceptional entries. Usually called from truncate 57 * path. Note that the folio_batch may be altered by this function by removing 58 * exceptional entries similar to what folio_batch_remove_exceptionals() does. 59 */ 60 static void truncate_folio_batch_exceptionals(struct address_space *mapping, 61 struct folio_batch *fbatch, pgoff_t *indices) 62 { 63 int i, j; 64 bool dax; 65 66 /* Handled by shmem itself */ 67 if (shmem_mapping(mapping)) 68 return; 69 70 for (j = 0; j < folio_batch_count(fbatch); j++) 71 if (xa_is_value(fbatch->folios[j])) 72 break; 73 74 if (j == folio_batch_count(fbatch)) 75 return; 76 77 dax = dax_mapping(mapping); 78 if (!dax) { 79 spin_lock(&mapping->host->i_lock); 80 xa_lock_irq(&mapping->i_pages); 81 } 82 83 for (i = j; i < folio_batch_count(fbatch); i++) { 84 struct folio *folio = fbatch->folios[i]; 85 pgoff_t index = indices[i]; 86 87 if (!xa_is_value(folio)) { 88 fbatch->folios[j++] = folio; 89 continue; 90 } 91 92 if (unlikely(dax)) { 93 dax_delete_mapping_entry(mapping, index); 94 continue; 95 } 96 97 __clear_shadow_entry(mapping, index, folio); 98 } 99 100 if (!dax) { 101 xa_unlock_irq(&mapping->i_pages); 102 if (mapping_shrinkable(mapping)) 103 inode_add_lru(mapping->host); 104 spin_unlock(&mapping->host->i_lock); 105 } 106 fbatch->nr = j; 107 } 108 109 /* 110 * Invalidate exceptional entry if easily possible. This handles exceptional 111 * entries for invalidate_inode_pages(). 112 */ 113 static int invalidate_exceptional_entry(struct address_space *mapping, 114 pgoff_t index, void *entry) 115 { 116 /* Handled by shmem itself, or for DAX we do nothing. */ 117 if (shmem_mapping(mapping) || dax_mapping(mapping)) 118 return 1; 119 clear_shadow_entry(mapping, index, entry); 120 return 1; 121 } 122 123 /* 124 * Invalidate exceptional entry if clean. This handles exceptional entries for 125 * invalidate_inode_pages2() so for DAX it evicts only clean entries. 126 */ 127 static int invalidate_exceptional_entry2(struct address_space *mapping, 128 pgoff_t index, void *entry) 129 { 130 /* Handled by shmem itself */ 131 if (shmem_mapping(mapping)) 132 return 1; 133 if (dax_mapping(mapping)) 134 return dax_invalidate_mapping_entry_sync(mapping, index); 135 clear_shadow_entry(mapping, index, entry); 136 return 1; 137 } 138 139 /** 140 * folio_invalidate - Invalidate part or all of a folio. 141 * @folio: The folio which is affected. 142 * @offset: start of the range to invalidate 143 * @length: length of the range to invalidate 144 * 145 * folio_invalidate() is called when all or part of the folio has become 146 * invalidated by a truncate operation. 147 * 148 * folio_invalidate() does not have to release all buffers, but it must 149 * ensure that no dirty buffer is left outside @offset and that no I/O 150 * is underway against any of the blocks which are outside the truncation 151 * point. Because the caller is about to free (and possibly reuse) those 152 * blocks on-disk. 153 */ 154 void folio_invalidate(struct folio *folio, size_t offset, size_t length) 155 { 156 const struct address_space_operations *aops = folio->mapping->a_ops; 157 158 if (aops->invalidate_folio) 159 aops->invalidate_folio(folio, offset, length); 160 } 161 EXPORT_SYMBOL_GPL(folio_invalidate); 162 163 /* 164 * If truncate cannot remove the fs-private metadata from the page, the page 165 * becomes orphaned. It will be left on the LRU and may even be mapped into 166 * user pagetables if we're racing with filemap_fault(). 167 * 168 * We need to bail out if page->mapping is no longer equal to the original 169 * mapping. This happens a) when the VM reclaimed the page while we waited on 170 * its lock, b) when a concurrent invalidate_mapping_pages got there first and 171 * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space. 172 */ 173 static void truncate_cleanup_folio(struct folio *folio) 174 { 175 if (folio_mapped(folio)) 176 unmap_mapping_folio(folio); 177 178 if (folio_has_private(folio)) 179 folio_invalidate(folio, 0, folio_size(folio)); 180 181 /* 182 * Some filesystems seem to re-dirty the page even after 183 * the VM has canceled the dirty bit (eg ext3 journaling). 184 * Hence dirty accounting check is placed after invalidation. 185 */ 186 folio_cancel_dirty(folio); 187 folio_clear_mappedtodisk(folio); 188 } 189 190 /* 191 * This is for invalidate_mapping_pages(). That function can be called at 192 * any time, and is not supposed to throw away dirty pages. But pages can 193 * be marked dirty at any time too, so use remove_mapping which safely 194 * discards clean, unused pages. 195 * 196 * Returns non-zero if the page was successfully invalidated. 197 */ 198 static int 199 invalidate_complete_page(struct address_space *mapping, struct page *page) 200 { 201 202 if (page->mapping != mapping) 203 return 0; 204 205 if (page_has_private(page) && !try_to_release_page(page, 0)) 206 return 0; 207 208 return remove_mapping(mapping, page); 209 } 210 211 int truncate_inode_folio(struct address_space *mapping, struct folio *folio) 212 { 213 if (folio->mapping != mapping) 214 return -EIO; 215 216 truncate_cleanup_folio(folio); 217 filemap_remove_folio(folio); 218 return 0; 219 } 220 221 /* 222 * Handle partial folios. The folio may be entirely within the 223 * range if a split has raced with us. If not, we zero the part of the 224 * folio that's within the [start, end] range, and then split the folio if 225 * it's large. split_page_range() will discard pages which now lie beyond 226 * i_size, and we rely on the caller to discard pages which lie within a 227 * newly created hole. 228 * 229 * Returns false if splitting failed so the caller can avoid 230 * discarding the entire folio which is stubbornly unsplit. 231 */ 232 bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end) 233 { 234 loff_t pos = folio_pos(folio); 235 unsigned int offset, length; 236 237 if (pos < start) 238 offset = start - pos; 239 else 240 offset = 0; 241 length = folio_size(folio); 242 if (pos + length <= (u64)end) 243 length = length - offset; 244 else 245 length = end + 1 - pos - offset; 246 247 folio_wait_writeback(folio); 248 if (length == folio_size(folio)) { 249 truncate_inode_folio(folio->mapping, folio); 250 return true; 251 } 252 253 /* 254 * We may be zeroing pages we're about to discard, but it avoids 255 * doing a complex calculation here, and then doing the zeroing 256 * anyway if the page split fails. 257 */ 258 folio_zero_range(folio, offset, length); 259 260 if (folio_has_private(folio)) 261 folio_invalidate(folio, offset, length); 262 if (!folio_test_large(folio)) 263 return true; 264 if (split_huge_page(&folio->page) == 0) 265 return true; 266 if (folio_test_dirty(folio)) 267 return false; 268 truncate_inode_folio(folio->mapping, folio); 269 return true; 270 } 271 272 /* 273 * Used to get rid of pages on hardware memory corruption. 274 */ 275 int generic_error_remove_page(struct address_space *mapping, struct page *page) 276 { 277 VM_BUG_ON_PAGE(PageTail(page), page); 278 279 if (!mapping) 280 return -EINVAL; 281 /* 282 * Only punch for normal data pages for now. 283 * Handling other types like directories would need more auditing. 284 */ 285 if (!S_ISREG(mapping->host->i_mode)) 286 return -EIO; 287 return truncate_inode_folio(mapping, page_folio(page)); 288 } 289 EXPORT_SYMBOL(generic_error_remove_page); 290 291 /* 292 * Safely invalidate one page from its pagecache mapping. 293 * It only drops clean, unused pages. The page must be locked. 294 * 295 * Returns 1 if the page is successfully invalidated, otherwise 0. 296 */ 297 int invalidate_inode_page(struct page *page) 298 { 299 struct address_space *mapping = page_mapping(page); 300 if (!mapping) 301 return 0; 302 if (PageDirty(page) || PageWriteback(page)) 303 return 0; 304 if (page_mapped(page)) 305 return 0; 306 return invalidate_complete_page(mapping, page); 307 } 308 309 /** 310 * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets 311 * @mapping: mapping to truncate 312 * @lstart: offset from which to truncate 313 * @lend: offset to which to truncate (inclusive) 314 * 315 * Truncate the page cache, removing the pages that are between 316 * specified offsets (and zeroing out partial pages 317 * if lstart or lend + 1 is not page aligned). 318 * 319 * Truncate takes two passes - the first pass is nonblocking. It will not 320 * block on page locks and it will not block on writeback. The second pass 321 * will wait. This is to prevent as much IO as possible in the affected region. 322 * The first pass will remove most pages, so the search cost of the second pass 323 * is low. 324 * 325 * We pass down the cache-hot hint to the page freeing code. Even if the 326 * mapping is large, it is probably the case that the final pages are the most 327 * recently touched, and freeing happens in ascending file offset order. 328 * 329 * Note that since ->invalidate_folio() accepts range to invalidate 330 * truncate_inode_pages_range is able to handle cases where lend + 1 is not 331 * page aligned properly. 332 */ 333 void truncate_inode_pages_range(struct address_space *mapping, 334 loff_t lstart, loff_t lend) 335 { 336 pgoff_t start; /* inclusive */ 337 pgoff_t end; /* exclusive */ 338 struct folio_batch fbatch; 339 pgoff_t indices[PAGEVEC_SIZE]; 340 pgoff_t index; 341 int i; 342 struct folio *folio; 343 bool same_folio; 344 345 if (mapping_empty(mapping)) 346 return; 347 348 /* 349 * 'start' and 'end' always covers the range of pages to be fully 350 * truncated. Partial pages are covered with 'partial_start' at the 351 * start of the range and 'partial_end' at the end of the range. 352 * Note that 'end' is exclusive while 'lend' is inclusive. 353 */ 354 start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT; 355 if (lend == -1) 356 /* 357 * lend == -1 indicates end-of-file so we have to set 'end' 358 * to the highest possible pgoff_t and since the type is 359 * unsigned we're using -1. 360 */ 361 end = -1; 362 else 363 end = (lend + 1) >> PAGE_SHIFT; 364 365 folio_batch_init(&fbatch); 366 index = start; 367 while (index < end && find_lock_entries(mapping, index, end - 1, 368 &fbatch, indices)) { 369 index = indices[folio_batch_count(&fbatch) - 1] + 1; 370 truncate_folio_batch_exceptionals(mapping, &fbatch, indices); 371 for (i = 0; i < folio_batch_count(&fbatch); i++) 372 truncate_cleanup_folio(fbatch.folios[i]); 373 delete_from_page_cache_batch(mapping, &fbatch); 374 for (i = 0; i < folio_batch_count(&fbatch); i++) 375 folio_unlock(fbatch.folios[i]); 376 folio_batch_release(&fbatch); 377 cond_resched(); 378 } 379 380 same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT); 381 folio = __filemap_get_folio(mapping, lstart >> PAGE_SHIFT, FGP_LOCK, 0); 382 if (folio) { 383 same_folio = lend < folio_pos(folio) + folio_size(folio); 384 if (!truncate_inode_partial_folio(folio, lstart, lend)) { 385 start = folio->index + folio_nr_pages(folio); 386 if (same_folio) 387 end = folio->index; 388 } 389 folio_unlock(folio); 390 folio_put(folio); 391 folio = NULL; 392 } 393 394 if (!same_folio) 395 folio = __filemap_get_folio(mapping, lend >> PAGE_SHIFT, 396 FGP_LOCK, 0); 397 if (folio) { 398 if (!truncate_inode_partial_folio(folio, lstart, lend)) 399 end = folio->index; 400 folio_unlock(folio); 401 folio_put(folio); 402 } 403 404 index = start; 405 while (index < end) { 406 cond_resched(); 407 if (!find_get_entries(mapping, index, end - 1, &fbatch, 408 indices)) { 409 /* If all gone from start onwards, we're done */ 410 if (index == start) 411 break; 412 /* Otherwise restart to make sure all gone */ 413 index = start; 414 continue; 415 } 416 417 for (i = 0; i < folio_batch_count(&fbatch); i++) { 418 struct folio *folio = fbatch.folios[i]; 419 420 /* We rely upon deletion not changing page->index */ 421 index = indices[i]; 422 423 if (xa_is_value(folio)) 424 continue; 425 426 folio_lock(folio); 427 VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio); 428 folio_wait_writeback(folio); 429 truncate_inode_folio(mapping, folio); 430 folio_unlock(folio); 431 index = folio_index(folio) + folio_nr_pages(folio) - 1; 432 } 433 truncate_folio_batch_exceptionals(mapping, &fbatch, indices); 434 folio_batch_release(&fbatch); 435 index++; 436 } 437 } 438 EXPORT_SYMBOL(truncate_inode_pages_range); 439 440 /** 441 * truncate_inode_pages - truncate *all* the pages from an offset 442 * @mapping: mapping to truncate 443 * @lstart: offset from which to truncate 444 * 445 * Called under (and serialised by) inode->i_rwsem and 446 * mapping->invalidate_lock. 447 * 448 * Note: When this function returns, there can be a page in the process of 449 * deletion (inside __delete_from_page_cache()) in the specified range. Thus 450 * mapping->nrpages can be non-zero when this function returns even after 451 * truncation of the whole mapping. 452 */ 453 void truncate_inode_pages(struct address_space *mapping, loff_t lstart) 454 { 455 truncate_inode_pages_range(mapping, lstart, (loff_t)-1); 456 } 457 EXPORT_SYMBOL(truncate_inode_pages); 458 459 /** 460 * truncate_inode_pages_final - truncate *all* pages before inode dies 461 * @mapping: mapping to truncate 462 * 463 * Called under (and serialized by) inode->i_rwsem. 464 * 465 * Filesystems have to use this in the .evict_inode path to inform the 466 * VM that this is the final truncate and the inode is going away. 467 */ 468 void truncate_inode_pages_final(struct address_space *mapping) 469 { 470 /* 471 * Page reclaim can not participate in regular inode lifetime 472 * management (can't call iput()) and thus can race with the 473 * inode teardown. Tell it when the address space is exiting, 474 * so that it does not install eviction information after the 475 * final truncate has begun. 476 */ 477 mapping_set_exiting(mapping); 478 479 if (!mapping_empty(mapping)) { 480 /* 481 * As truncation uses a lockless tree lookup, cycle 482 * the tree lock to make sure any ongoing tree 483 * modification that does not see AS_EXITING is 484 * completed before starting the final truncate. 485 */ 486 xa_lock_irq(&mapping->i_pages); 487 xa_unlock_irq(&mapping->i_pages); 488 } 489 490 truncate_inode_pages(mapping, 0); 491 } 492 EXPORT_SYMBOL(truncate_inode_pages_final); 493 494 static unsigned long __invalidate_mapping_pages(struct address_space *mapping, 495 pgoff_t start, pgoff_t end, unsigned long *nr_pagevec) 496 { 497 pgoff_t indices[PAGEVEC_SIZE]; 498 struct folio_batch fbatch; 499 pgoff_t index = start; 500 unsigned long ret; 501 unsigned long count = 0; 502 int i; 503 504 folio_batch_init(&fbatch); 505 while (find_lock_entries(mapping, index, end, &fbatch, indices)) { 506 for (i = 0; i < folio_batch_count(&fbatch); i++) { 507 struct page *page = &fbatch.folios[i]->page; 508 509 /* We rely upon deletion not changing page->index */ 510 index = indices[i]; 511 512 if (xa_is_value(page)) { 513 count += invalidate_exceptional_entry(mapping, 514 index, 515 page); 516 continue; 517 } 518 index += thp_nr_pages(page) - 1; 519 520 ret = invalidate_inode_page(page); 521 unlock_page(page); 522 /* 523 * Invalidation is a hint that the page is no longer 524 * of interest and try to speed up its reclaim. 525 */ 526 if (!ret) { 527 deactivate_file_page(page); 528 /* It is likely on the pagevec of a remote CPU */ 529 if (nr_pagevec) 530 (*nr_pagevec)++; 531 } 532 count += ret; 533 } 534 folio_batch_remove_exceptionals(&fbatch); 535 folio_batch_release(&fbatch); 536 cond_resched(); 537 index++; 538 } 539 return count; 540 } 541 542 /** 543 * invalidate_mapping_pages - Invalidate all clean, unlocked cache of one inode 544 * @mapping: the address_space which holds the cache to invalidate 545 * @start: the offset 'from' which to invalidate 546 * @end: the offset 'to' which to invalidate (inclusive) 547 * 548 * This function removes pages that are clean, unmapped and unlocked, 549 * as well as shadow entries. It will not block on IO activity. 550 * 551 * If you want to remove all the pages of one inode, regardless of 552 * their use and writeback state, use truncate_inode_pages(). 553 * 554 * Return: the number of the cache entries that were invalidated 555 */ 556 unsigned long invalidate_mapping_pages(struct address_space *mapping, 557 pgoff_t start, pgoff_t end) 558 { 559 return __invalidate_mapping_pages(mapping, start, end, NULL); 560 } 561 EXPORT_SYMBOL(invalidate_mapping_pages); 562 563 /** 564 * invalidate_mapping_pagevec - Invalidate all the unlocked pages of one inode 565 * @mapping: the address_space which holds the pages to invalidate 566 * @start: the offset 'from' which to invalidate 567 * @end: the offset 'to' which to invalidate (inclusive) 568 * @nr_pagevec: invalidate failed page number for caller 569 * 570 * This helper is similar to invalidate_mapping_pages(), except that it accounts 571 * for pages that are likely on a pagevec and counts them in @nr_pagevec, which 572 * will be used by the caller. 573 */ 574 void invalidate_mapping_pagevec(struct address_space *mapping, 575 pgoff_t start, pgoff_t end, unsigned long *nr_pagevec) 576 { 577 __invalidate_mapping_pages(mapping, start, end, nr_pagevec); 578 } 579 580 /* 581 * This is like invalidate_complete_page(), except it ignores the page's 582 * refcount. We do this because invalidate_inode_pages2() needs stronger 583 * invalidation guarantees, and cannot afford to leave pages behind because 584 * shrink_page_list() has a temp ref on them, or because they're transiently 585 * sitting in the lru_cache_add() pagevecs. 586 */ 587 static int invalidate_complete_folio2(struct address_space *mapping, 588 struct folio *folio) 589 { 590 if (folio->mapping != mapping) 591 return 0; 592 593 if (folio_has_private(folio) && 594 !filemap_release_folio(folio, GFP_KERNEL)) 595 return 0; 596 597 spin_lock(&mapping->host->i_lock); 598 xa_lock_irq(&mapping->i_pages); 599 if (folio_test_dirty(folio)) 600 goto failed; 601 602 BUG_ON(folio_has_private(folio)); 603 __filemap_remove_folio(folio, NULL); 604 xa_unlock_irq(&mapping->i_pages); 605 if (mapping_shrinkable(mapping)) 606 inode_add_lru(mapping->host); 607 spin_unlock(&mapping->host->i_lock); 608 609 filemap_free_folio(mapping, folio); 610 return 1; 611 failed: 612 xa_unlock_irq(&mapping->i_pages); 613 spin_unlock(&mapping->host->i_lock); 614 return 0; 615 } 616 617 static int folio_launder(struct address_space *mapping, struct folio *folio) 618 { 619 if (!folio_test_dirty(folio)) 620 return 0; 621 if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL) 622 return 0; 623 return mapping->a_ops->launder_folio(folio); 624 } 625 626 /** 627 * invalidate_inode_pages2_range - remove range of pages from an address_space 628 * @mapping: the address_space 629 * @start: the page offset 'from' which to invalidate 630 * @end: the page offset 'to' which to invalidate (inclusive) 631 * 632 * Any pages which are found to be mapped into pagetables are unmapped prior to 633 * invalidation. 634 * 635 * Return: -EBUSY if any pages could not be invalidated. 636 */ 637 int invalidate_inode_pages2_range(struct address_space *mapping, 638 pgoff_t start, pgoff_t end) 639 { 640 pgoff_t indices[PAGEVEC_SIZE]; 641 struct folio_batch fbatch; 642 pgoff_t index; 643 int i; 644 int ret = 0; 645 int ret2 = 0; 646 int did_range_unmap = 0; 647 648 if (mapping_empty(mapping)) 649 return 0; 650 651 folio_batch_init(&fbatch); 652 index = start; 653 while (find_get_entries(mapping, index, end, &fbatch, indices)) { 654 for (i = 0; i < folio_batch_count(&fbatch); i++) { 655 struct folio *folio = fbatch.folios[i]; 656 657 /* We rely upon deletion not changing folio->index */ 658 index = indices[i]; 659 660 if (xa_is_value(folio)) { 661 if (!invalidate_exceptional_entry2(mapping, 662 index, folio)) 663 ret = -EBUSY; 664 continue; 665 } 666 667 if (!did_range_unmap && folio_mapped(folio)) { 668 /* 669 * If folio is mapped, before taking its lock, 670 * zap the rest of the file in one hit. 671 */ 672 unmap_mapping_pages(mapping, index, 673 (1 + end - index), false); 674 did_range_unmap = 1; 675 } 676 677 folio_lock(folio); 678 VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio); 679 if (folio->mapping != mapping) { 680 folio_unlock(folio); 681 continue; 682 } 683 folio_wait_writeback(folio); 684 685 if (folio_mapped(folio)) 686 unmap_mapping_folio(folio); 687 BUG_ON(folio_mapped(folio)); 688 689 ret2 = folio_launder(mapping, folio); 690 if (ret2 == 0) { 691 if (!invalidate_complete_folio2(mapping, folio)) 692 ret2 = -EBUSY; 693 } 694 if (ret2 < 0) 695 ret = ret2; 696 folio_unlock(folio); 697 } 698 folio_batch_remove_exceptionals(&fbatch); 699 folio_batch_release(&fbatch); 700 cond_resched(); 701 index++; 702 } 703 /* 704 * For DAX we invalidate page tables after invalidating page cache. We 705 * could invalidate page tables while invalidating each entry however 706 * that would be expensive. And doing range unmapping before doesn't 707 * work as we have no cheap way to find whether page cache entry didn't 708 * get remapped later. 709 */ 710 if (dax_mapping(mapping)) { 711 unmap_mapping_pages(mapping, start, end - start + 1, false); 712 } 713 return ret; 714 } 715 EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range); 716 717 /** 718 * invalidate_inode_pages2 - remove all pages from an address_space 719 * @mapping: the address_space 720 * 721 * Any pages which are found to be mapped into pagetables are unmapped prior to 722 * invalidation. 723 * 724 * Return: -EBUSY if any pages could not be invalidated. 725 */ 726 int invalidate_inode_pages2(struct address_space *mapping) 727 { 728 return invalidate_inode_pages2_range(mapping, 0, -1); 729 } 730 EXPORT_SYMBOL_GPL(invalidate_inode_pages2); 731 732 /** 733 * truncate_pagecache - unmap and remove pagecache that has been truncated 734 * @inode: inode 735 * @newsize: new file size 736 * 737 * inode's new i_size must already be written before truncate_pagecache 738 * is called. 739 * 740 * This function should typically be called before the filesystem 741 * releases resources associated with the freed range (eg. deallocates 742 * blocks). This way, pagecache will always stay logically coherent 743 * with on-disk format, and the filesystem would not have to deal with 744 * situations such as writepage being called for a page that has already 745 * had its underlying blocks deallocated. 746 */ 747 void truncate_pagecache(struct inode *inode, loff_t newsize) 748 { 749 struct address_space *mapping = inode->i_mapping; 750 loff_t holebegin = round_up(newsize, PAGE_SIZE); 751 752 /* 753 * unmap_mapping_range is called twice, first simply for 754 * efficiency so that truncate_inode_pages does fewer 755 * single-page unmaps. However after this first call, and 756 * before truncate_inode_pages finishes, it is possible for 757 * private pages to be COWed, which remain after 758 * truncate_inode_pages finishes, hence the second 759 * unmap_mapping_range call must be made for correctness. 760 */ 761 unmap_mapping_range(mapping, holebegin, 0, 1); 762 truncate_inode_pages(mapping, newsize); 763 unmap_mapping_range(mapping, holebegin, 0, 1); 764 } 765 EXPORT_SYMBOL(truncate_pagecache); 766 767 /** 768 * truncate_setsize - update inode and pagecache for a new file size 769 * @inode: inode 770 * @newsize: new file size 771 * 772 * truncate_setsize updates i_size and performs pagecache truncation (if 773 * necessary) to @newsize. It will be typically be called from the filesystem's 774 * setattr function when ATTR_SIZE is passed in. 775 * 776 * Must be called with a lock serializing truncates and writes (generally 777 * i_rwsem but e.g. xfs uses a different lock) and before all filesystem 778 * specific block truncation has been performed. 779 */ 780 void truncate_setsize(struct inode *inode, loff_t newsize) 781 { 782 loff_t oldsize = inode->i_size; 783 784 i_size_write(inode, newsize); 785 if (newsize > oldsize) 786 pagecache_isize_extended(inode, oldsize, newsize); 787 truncate_pagecache(inode, newsize); 788 } 789 EXPORT_SYMBOL(truncate_setsize); 790 791 /** 792 * pagecache_isize_extended - update pagecache after extension of i_size 793 * @inode: inode for which i_size was extended 794 * @from: original inode size 795 * @to: new inode size 796 * 797 * Handle extension of inode size either caused by extending truncate or by 798 * write starting after current i_size. We mark the page straddling current 799 * i_size RO so that page_mkwrite() is called on the nearest write access to 800 * the page. This way filesystem can be sure that page_mkwrite() is called on 801 * the page before user writes to the page via mmap after the i_size has been 802 * changed. 803 * 804 * The function must be called after i_size is updated so that page fault 805 * coming after we unlock the page will already see the new i_size. 806 * The function must be called while we still hold i_rwsem - this not only 807 * makes sure i_size is stable but also that userspace cannot observe new 808 * i_size value before we are prepared to store mmap writes at new inode size. 809 */ 810 void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to) 811 { 812 int bsize = i_blocksize(inode); 813 loff_t rounded_from; 814 struct page *page; 815 pgoff_t index; 816 817 WARN_ON(to > inode->i_size); 818 819 if (from >= to || bsize == PAGE_SIZE) 820 return; 821 /* Page straddling @from will not have any hole block created? */ 822 rounded_from = round_up(from, bsize); 823 if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1))) 824 return; 825 826 index = from >> PAGE_SHIFT; 827 page = find_lock_page(inode->i_mapping, index); 828 /* Page not cached? Nothing to do */ 829 if (!page) 830 return; 831 /* 832 * See clear_page_dirty_for_io() for details why set_page_dirty() 833 * is needed. 834 */ 835 if (page_mkclean(page)) 836 set_page_dirty(page); 837 unlock_page(page); 838 put_page(page); 839 } 840 EXPORT_SYMBOL(pagecache_isize_extended); 841 842 /** 843 * truncate_pagecache_range - unmap and remove pagecache that is hole-punched 844 * @inode: inode 845 * @lstart: offset of beginning of hole 846 * @lend: offset of last byte of hole 847 * 848 * This function should typically be called before the filesystem 849 * releases resources associated with the freed range (eg. deallocates 850 * blocks). This way, pagecache will always stay logically coherent 851 * with on-disk format, and the filesystem would not have to deal with 852 * situations such as writepage being called for a page that has already 853 * had its underlying blocks deallocated. 854 */ 855 void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend) 856 { 857 struct address_space *mapping = inode->i_mapping; 858 loff_t unmap_start = round_up(lstart, PAGE_SIZE); 859 loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1; 860 /* 861 * This rounding is currently just for example: unmap_mapping_range 862 * expands its hole outwards, whereas we want it to contract the hole 863 * inwards. However, existing callers of truncate_pagecache_range are 864 * doing their own page rounding first. Note that unmap_mapping_range 865 * allows holelen 0 for all, and we allow lend -1 for end of file. 866 */ 867 868 /* 869 * Unlike in truncate_pagecache, unmap_mapping_range is called only 870 * once (before truncating pagecache), and without "even_cows" flag: 871 * hole-punching should not remove private COWed pages from the hole. 872 */ 873 if ((u64)unmap_end > (u64)unmap_start) 874 unmap_mapping_range(mapping, unmap_start, 875 1 + unmap_end - unmap_start, 0); 876 truncate_inode_pages_range(mapping, lstart, lend); 877 } 878 EXPORT_SYMBOL(truncate_pagecache_range); 879