1 /* 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_shared.h" 20 #include "xfs_format.h" 21 #include "xfs_log_format.h" 22 #include "xfs_trans_resv.h" 23 #include "xfs_mount.h" 24 #include "xfs_inode.h" 25 #include "xfs_trans.h" 26 #include "xfs_inode_item.h" 27 #include "xfs_alloc.h" 28 #include "xfs_error.h" 29 #include "xfs_iomap.h" 30 #include "xfs_trace.h" 31 #include "xfs_bmap.h" 32 #include "xfs_bmap_util.h" 33 #include "xfs_bmap_btree.h" 34 #include "xfs_reflink.h" 35 #include <linux/gfp.h> 36 #include <linux/mpage.h> 37 #include <linux/pagevec.h> 38 #include <linux/writeback.h> 39 40 /* 41 * structure owned by writepages passed to individual writepage calls 42 */ 43 struct xfs_writepage_ctx { 44 struct xfs_bmbt_irec imap; 45 bool imap_valid; 46 unsigned int io_type; 47 struct xfs_ioend *ioend; 48 sector_t last_block; 49 }; 50 51 void 52 xfs_count_page_state( 53 struct page *page, 54 int *delalloc, 55 int *unwritten) 56 { 57 struct buffer_head *bh, *head; 58 59 *delalloc = *unwritten = 0; 60 61 bh = head = page_buffers(page); 62 do { 63 if (buffer_unwritten(bh)) 64 (*unwritten) = 1; 65 else if (buffer_delay(bh)) 66 (*delalloc) = 1; 67 } while ((bh = bh->b_this_page) != head); 68 } 69 70 struct block_device * 71 xfs_find_bdev_for_inode( 72 struct inode *inode) 73 { 74 struct xfs_inode *ip = XFS_I(inode); 75 struct xfs_mount *mp = ip->i_mount; 76 77 if (XFS_IS_REALTIME_INODE(ip)) 78 return mp->m_rtdev_targp->bt_bdev; 79 else 80 return mp->m_ddev_targp->bt_bdev; 81 } 82 83 /* 84 * We're now finished for good with this page. Update the page state via the 85 * associated buffer_heads, paying attention to the start and end offsets that 86 * we need to process on the page. 87 * 88 * Landmine Warning: bh->b_end_io() will call end_page_writeback() on the last 89 * buffer in the IO. Once it does this, it is unsafe to access the bufferhead or 90 * the page at all, as we may be racing with memory reclaim and it can free both 91 * the bufferhead chain and the page as it will see the page as clean and 92 * unused. 93 */ 94 static void 95 xfs_finish_page_writeback( 96 struct inode *inode, 97 struct bio_vec *bvec, 98 int error) 99 { 100 unsigned int end = bvec->bv_offset + bvec->bv_len - 1; 101 struct buffer_head *head, *bh, *next; 102 unsigned int off = 0; 103 unsigned int bsize; 104 105 ASSERT(bvec->bv_offset < PAGE_SIZE); 106 ASSERT((bvec->bv_offset & (i_blocksize(inode) - 1)) == 0); 107 ASSERT(end < PAGE_SIZE); 108 ASSERT((bvec->bv_len & (i_blocksize(inode) - 1)) == 0); 109 110 bh = head = page_buffers(bvec->bv_page); 111 112 bsize = bh->b_size; 113 do { 114 next = bh->b_this_page; 115 if (off < bvec->bv_offset) 116 goto next_bh; 117 if (off > end) 118 break; 119 bh->b_end_io(bh, !error); 120 next_bh: 121 off += bsize; 122 } while ((bh = next) != head); 123 } 124 125 /* 126 * We're now finished for good with this ioend structure. Update the page 127 * state, release holds on bios, and finally free up memory. Do not use the 128 * ioend after this. 129 */ 130 STATIC void 131 xfs_destroy_ioend( 132 struct xfs_ioend *ioend, 133 int error) 134 { 135 struct inode *inode = ioend->io_inode; 136 struct bio *last = ioend->io_bio; 137 struct bio *bio, *next; 138 139 for (bio = &ioend->io_inline_bio; bio; bio = next) { 140 struct bio_vec *bvec; 141 int i; 142 143 /* 144 * For the last bio, bi_private points to the ioend, so we 145 * need to explicitly end the iteration here. 146 */ 147 if (bio == last) 148 next = NULL; 149 else 150 next = bio->bi_private; 151 152 /* walk each page on bio, ending page IO on them */ 153 bio_for_each_segment_all(bvec, bio, i) 154 xfs_finish_page_writeback(inode, bvec, error); 155 156 bio_put(bio); 157 } 158 } 159 160 /* 161 * Fast and loose check if this write could update the on-disk inode size. 162 */ 163 static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend) 164 { 165 return ioend->io_offset + ioend->io_size > 166 XFS_I(ioend->io_inode)->i_d.di_size; 167 } 168 169 STATIC int 170 xfs_setfilesize_trans_alloc( 171 struct xfs_ioend *ioend) 172 { 173 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount; 174 struct xfs_trans *tp; 175 int error; 176 177 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp); 178 if (error) 179 return error; 180 181 ioend->io_append_trans = tp; 182 183 /* 184 * We may pass freeze protection with a transaction. So tell lockdep 185 * we released it. 186 */ 187 __sb_writers_release(ioend->io_inode->i_sb, SB_FREEZE_FS); 188 /* 189 * We hand off the transaction to the completion thread now, so 190 * clear the flag here. 191 */ 192 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS); 193 return 0; 194 } 195 196 /* 197 * Update on-disk file size now that data has been written to disk. 198 */ 199 STATIC int 200 __xfs_setfilesize( 201 struct xfs_inode *ip, 202 struct xfs_trans *tp, 203 xfs_off_t offset, 204 size_t size) 205 { 206 xfs_fsize_t isize; 207 208 xfs_ilock(ip, XFS_ILOCK_EXCL); 209 isize = xfs_new_eof(ip, offset + size); 210 if (!isize) { 211 xfs_iunlock(ip, XFS_ILOCK_EXCL); 212 xfs_trans_cancel(tp); 213 return 0; 214 } 215 216 trace_xfs_setfilesize(ip, offset, size); 217 218 ip->i_d.di_size = isize; 219 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 220 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 221 222 return xfs_trans_commit(tp); 223 } 224 225 int 226 xfs_setfilesize( 227 struct xfs_inode *ip, 228 xfs_off_t offset, 229 size_t size) 230 { 231 struct xfs_mount *mp = ip->i_mount; 232 struct xfs_trans *tp; 233 int error; 234 235 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp); 236 if (error) 237 return error; 238 239 return __xfs_setfilesize(ip, tp, offset, size); 240 } 241 242 STATIC int 243 xfs_setfilesize_ioend( 244 struct xfs_ioend *ioend, 245 int error) 246 { 247 struct xfs_inode *ip = XFS_I(ioend->io_inode); 248 struct xfs_trans *tp = ioend->io_append_trans; 249 250 /* 251 * The transaction may have been allocated in the I/O submission thread, 252 * thus we need to mark ourselves as being in a transaction manually. 253 * Similarly for freeze protection. 254 */ 255 current_set_flags_nested(&tp->t_pflags, PF_FSTRANS); 256 __sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS); 257 258 /* we abort the update if there was an IO error */ 259 if (error) { 260 xfs_trans_cancel(tp); 261 return error; 262 } 263 264 return __xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size); 265 } 266 267 /* 268 * IO write completion. 269 */ 270 STATIC void 271 xfs_end_io( 272 struct work_struct *work) 273 { 274 struct xfs_ioend *ioend = 275 container_of(work, struct xfs_ioend, io_work); 276 struct xfs_inode *ip = XFS_I(ioend->io_inode); 277 xfs_off_t offset = ioend->io_offset; 278 size_t size = ioend->io_size; 279 int error = ioend->io_bio->bi_error; 280 281 /* 282 * Just clean up the in-memory strutures if the fs has been shut down. 283 */ 284 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { 285 error = -EIO; 286 goto done; 287 } 288 289 /* 290 * Clean up any COW blocks on an I/O error. 291 */ 292 if (unlikely(error)) { 293 switch (ioend->io_type) { 294 case XFS_IO_COW: 295 xfs_reflink_cancel_cow_range(ip, offset, size, true); 296 break; 297 } 298 299 goto done; 300 } 301 302 /* 303 * Success: commit the COW or unwritten blocks if needed. 304 */ 305 switch (ioend->io_type) { 306 case XFS_IO_COW: 307 error = xfs_reflink_end_cow(ip, offset, size); 308 break; 309 case XFS_IO_UNWRITTEN: 310 error = xfs_iomap_write_unwritten(ip, offset, size); 311 break; 312 default: 313 ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans); 314 break; 315 } 316 317 done: 318 if (ioend->io_append_trans) 319 error = xfs_setfilesize_ioend(ioend, error); 320 xfs_destroy_ioend(ioend, error); 321 } 322 323 STATIC void 324 xfs_end_bio( 325 struct bio *bio) 326 { 327 struct xfs_ioend *ioend = bio->bi_private; 328 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount; 329 330 if (ioend->io_type == XFS_IO_UNWRITTEN || ioend->io_type == XFS_IO_COW) 331 queue_work(mp->m_unwritten_workqueue, &ioend->io_work); 332 else if (ioend->io_append_trans) 333 queue_work(mp->m_data_workqueue, &ioend->io_work); 334 else 335 xfs_destroy_ioend(ioend, bio->bi_error); 336 } 337 338 STATIC int 339 xfs_map_blocks( 340 struct inode *inode, 341 loff_t offset, 342 struct xfs_bmbt_irec *imap, 343 int type) 344 { 345 struct xfs_inode *ip = XFS_I(inode); 346 struct xfs_mount *mp = ip->i_mount; 347 ssize_t count = i_blocksize(inode); 348 xfs_fileoff_t offset_fsb, end_fsb; 349 int error = 0; 350 int bmapi_flags = XFS_BMAPI_ENTIRE; 351 int nimaps = 1; 352 353 if (XFS_FORCED_SHUTDOWN(mp)) 354 return -EIO; 355 356 ASSERT(type != XFS_IO_COW); 357 if (type == XFS_IO_UNWRITTEN) 358 bmapi_flags |= XFS_BMAPI_IGSTATE; 359 360 xfs_ilock(ip, XFS_ILOCK_SHARED); 361 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || 362 (ip->i_df.if_flags & XFS_IFEXTENTS)); 363 ASSERT(offset <= mp->m_super->s_maxbytes); 364 365 if (offset + count > mp->m_super->s_maxbytes) 366 count = mp->m_super->s_maxbytes - offset; 367 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); 368 offset_fsb = XFS_B_TO_FSBT(mp, offset); 369 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, 370 imap, &nimaps, bmapi_flags); 371 /* 372 * Truncate an overwrite extent if there's a pending CoW 373 * reservation before the end of this extent. This forces us 374 * to come back to writepage to take care of the CoW. 375 */ 376 if (nimaps && type == XFS_IO_OVERWRITE) 377 xfs_reflink_trim_irec_to_next_cow(ip, offset_fsb, imap); 378 xfs_iunlock(ip, XFS_ILOCK_SHARED); 379 380 if (error) 381 return error; 382 383 if (type == XFS_IO_DELALLOC && 384 (!nimaps || isnullstartblock(imap->br_startblock))) { 385 error = xfs_iomap_write_allocate(ip, XFS_DATA_FORK, offset, 386 imap); 387 if (!error) 388 trace_xfs_map_blocks_alloc(ip, offset, count, type, imap); 389 return error; 390 } 391 392 #ifdef DEBUG 393 if (type == XFS_IO_UNWRITTEN) { 394 ASSERT(nimaps); 395 ASSERT(imap->br_startblock != HOLESTARTBLOCK); 396 ASSERT(imap->br_startblock != DELAYSTARTBLOCK); 397 } 398 #endif 399 if (nimaps) 400 trace_xfs_map_blocks_found(ip, offset, count, type, imap); 401 return 0; 402 } 403 404 STATIC bool 405 xfs_imap_valid( 406 struct inode *inode, 407 struct xfs_bmbt_irec *imap, 408 xfs_off_t offset) 409 { 410 offset >>= inode->i_blkbits; 411 412 return offset >= imap->br_startoff && 413 offset < imap->br_startoff + imap->br_blockcount; 414 } 415 416 STATIC void 417 xfs_start_buffer_writeback( 418 struct buffer_head *bh) 419 { 420 ASSERT(buffer_mapped(bh)); 421 ASSERT(buffer_locked(bh)); 422 ASSERT(!buffer_delay(bh)); 423 ASSERT(!buffer_unwritten(bh)); 424 425 mark_buffer_async_write(bh); 426 set_buffer_uptodate(bh); 427 clear_buffer_dirty(bh); 428 } 429 430 STATIC void 431 xfs_start_page_writeback( 432 struct page *page, 433 int clear_dirty) 434 { 435 ASSERT(PageLocked(page)); 436 ASSERT(!PageWriteback(page)); 437 438 /* 439 * if the page was not fully cleaned, we need to ensure that the higher 440 * layers come back to it correctly. That means we need to keep the page 441 * dirty, and for WB_SYNC_ALL writeback we need to ensure the 442 * PAGECACHE_TAG_TOWRITE index mark is not removed so another attempt to 443 * write this page in this writeback sweep will be made. 444 */ 445 if (clear_dirty) { 446 clear_page_dirty_for_io(page); 447 set_page_writeback(page); 448 } else 449 set_page_writeback_keepwrite(page); 450 451 unlock_page(page); 452 } 453 454 static inline int xfs_bio_add_buffer(struct bio *bio, struct buffer_head *bh) 455 { 456 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh)); 457 } 458 459 /* 460 * Submit the bio for an ioend. We are passed an ioend with a bio attached to 461 * it, and we submit that bio. The ioend may be used for multiple bio 462 * submissions, so we only want to allocate an append transaction for the ioend 463 * once. In the case of multiple bio submission, each bio will take an IO 464 * reference to the ioend to ensure that the ioend completion is only done once 465 * all bios have been submitted and the ioend is really done. 466 * 467 * If @fail is non-zero, it means that we have a situation where some part of 468 * the submission process has failed after we have marked paged for writeback 469 * and unlocked them. In this situation, we need to fail the bio and ioend 470 * rather than submit it to IO. This typically only happens on a filesystem 471 * shutdown. 472 */ 473 STATIC int 474 xfs_submit_ioend( 475 struct writeback_control *wbc, 476 struct xfs_ioend *ioend, 477 int status) 478 { 479 /* Convert CoW extents to regular */ 480 if (!status && ioend->io_type == XFS_IO_COW) { 481 status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode), 482 ioend->io_offset, ioend->io_size); 483 } 484 485 /* Reserve log space if we might write beyond the on-disk inode size. */ 486 if (!status && 487 ioend->io_type != XFS_IO_UNWRITTEN && 488 xfs_ioend_is_append(ioend) && 489 !ioend->io_append_trans) 490 status = xfs_setfilesize_trans_alloc(ioend); 491 492 ioend->io_bio->bi_private = ioend; 493 ioend->io_bio->bi_end_io = xfs_end_bio; 494 ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc); 495 496 /* 497 * If we are failing the IO now, just mark the ioend with an 498 * error and finish it. This will run IO completion immediately 499 * as there is only one reference to the ioend at this point in 500 * time. 501 */ 502 if (status) { 503 ioend->io_bio->bi_error = status; 504 bio_endio(ioend->io_bio); 505 return status; 506 } 507 508 submit_bio(ioend->io_bio); 509 return 0; 510 } 511 512 static void 513 xfs_init_bio_from_bh( 514 struct bio *bio, 515 struct buffer_head *bh) 516 { 517 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); 518 bio->bi_bdev = bh->b_bdev; 519 } 520 521 static struct xfs_ioend * 522 xfs_alloc_ioend( 523 struct inode *inode, 524 unsigned int type, 525 xfs_off_t offset, 526 struct buffer_head *bh) 527 { 528 struct xfs_ioend *ioend; 529 struct bio *bio; 530 531 bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, xfs_ioend_bioset); 532 xfs_init_bio_from_bh(bio, bh); 533 534 ioend = container_of(bio, struct xfs_ioend, io_inline_bio); 535 INIT_LIST_HEAD(&ioend->io_list); 536 ioend->io_type = type; 537 ioend->io_inode = inode; 538 ioend->io_size = 0; 539 ioend->io_offset = offset; 540 INIT_WORK(&ioend->io_work, xfs_end_io); 541 ioend->io_append_trans = NULL; 542 ioend->io_bio = bio; 543 return ioend; 544 } 545 546 /* 547 * Allocate a new bio, and chain the old bio to the new one. 548 * 549 * Note that we have to do perform the chaining in this unintuitive order 550 * so that the bi_private linkage is set up in the right direction for the 551 * traversal in xfs_destroy_ioend(). 552 */ 553 static void 554 xfs_chain_bio( 555 struct xfs_ioend *ioend, 556 struct writeback_control *wbc, 557 struct buffer_head *bh) 558 { 559 struct bio *new; 560 561 new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES); 562 xfs_init_bio_from_bh(new, bh); 563 564 bio_chain(ioend->io_bio, new); 565 bio_get(ioend->io_bio); /* for xfs_destroy_ioend */ 566 ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc); 567 submit_bio(ioend->io_bio); 568 ioend->io_bio = new; 569 } 570 571 /* 572 * Test to see if we've been building up a completion structure for 573 * earlier buffers -- if so, we try to append to this ioend if we 574 * can, otherwise we finish off any current ioend and start another. 575 * Return the ioend we finished off so that the caller can submit it 576 * once it has finished processing the dirty page. 577 */ 578 STATIC void 579 xfs_add_to_ioend( 580 struct inode *inode, 581 struct buffer_head *bh, 582 xfs_off_t offset, 583 struct xfs_writepage_ctx *wpc, 584 struct writeback_control *wbc, 585 struct list_head *iolist) 586 { 587 if (!wpc->ioend || wpc->io_type != wpc->ioend->io_type || 588 bh->b_blocknr != wpc->last_block + 1 || 589 offset != wpc->ioend->io_offset + wpc->ioend->io_size) { 590 if (wpc->ioend) 591 list_add(&wpc->ioend->io_list, iolist); 592 wpc->ioend = xfs_alloc_ioend(inode, wpc->io_type, offset, bh); 593 } 594 595 /* 596 * If the buffer doesn't fit into the bio we need to allocate a new 597 * one. This shouldn't happen more than once for a given buffer. 598 */ 599 while (xfs_bio_add_buffer(wpc->ioend->io_bio, bh) != bh->b_size) 600 xfs_chain_bio(wpc->ioend, wbc, bh); 601 602 wpc->ioend->io_size += bh->b_size; 603 wpc->last_block = bh->b_blocknr; 604 xfs_start_buffer_writeback(bh); 605 } 606 607 STATIC void 608 xfs_map_buffer( 609 struct inode *inode, 610 struct buffer_head *bh, 611 struct xfs_bmbt_irec *imap, 612 xfs_off_t offset) 613 { 614 sector_t bn; 615 struct xfs_mount *m = XFS_I(inode)->i_mount; 616 xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff); 617 xfs_daddr_t iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock); 618 619 ASSERT(imap->br_startblock != HOLESTARTBLOCK); 620 ASSERT(imap->br_startblock != DELAYSTARTBLOCK); 621 622 bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) + 623 ((offset - iomap_offset) >> inode->i_blkbits); 624 625 ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode))); 626 627 bh->b_blocknr = bn; 628 set_buffer_mapped(bh); 629 } 630 631 STATIC void 632 xfs_map_at_offset( 633 struct inode *inode, 634 struct buffer_head *bh, 635 struct xfs_bmbt_irec *imap, 636 xfs_off_t offset) 637 { 638 ASSERT(imap->br_startblock != HOLESTARTBLOCK); 639 ASSERT(imap->br_startblock != DELAYSTARTBLOCK); 640 641 xfs_map_buffer(inode, bh, imap, offset); 642 set_buffer_mapped(bh); 643 clear_buffer_delay(bh); 644 clear_buffer_unwritten(bh); 645 } 646 647 /* 648 * Test if a given page contains at least one buffer of a given @type. 649 * If @check_all_buffers is true, then we walk all the buffers in the page to 650 * try to find one of the type passed in. If it is not set, then the caller only 651 * needs to check the first buffer on the page for a match. 652 */ 653 STATIC bool 654 xfs_check_page_type( 655 struct page *page, 656 unsigned int type, 657 bool check_all_buffers) 658 { 659 struct buffer_head *bh; 660 struct buffer_head *head; 661 662 if (PageWriteback(page)) 663 return false; 664 if (!page->mapping) 665 return false; 666 if (!page_has_buffers(page)) 667 return false; 668 669 bh = head = page_buffers(page); 670 do { 671 if (buffer_unwritten(bh)) { 672 if (type == XFS_IO_UNWRITTEN) 673 return true; 674 } else if (buffer_delay(bh)) { 675 if (type == XFS_IO_DELALLOC) 676 return true; 677 } else if (buffer_dirty(bh) && buffer_mapped(bh)) { 678 if (type == XFS_IO_OVERWRITE) 679 return true; 680 } 681 682 /* If we are only checking the first buffer, we are done now. */ 683 if (!check_all_buffers) 684 break; 685 } while ((bh = bh->b_this_page) != head); 686 687 return false; 688 } 689 690 STATIC void 691 xfs_vm_invalidatepage( 692 struct page *page, 693 unsigned int offset, 694 unsigned int length) 695 { 696 trace_xfs_invalidatepage(page->mapping->host, page, offset, 697 length); 698 block_invalidatepage(page, offset, length); 699 } 700 701 /* 702 * If the page has delalloc buffers on it, we need to punch them out before we 703 * invalidate the page. If we don't, we leave a stale delalloc mapping on the 704 * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read 705 * is done on that same region - the delalloc extent is returned when none is 706 * supposed to be there. 707 * 708 * We prevent this by truncating away the delalloc regions on the page before 709 * invalidating it. Because they are delalloc, we can do this without needing a 710 * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this 711 * truncation without a transaction as there is no space left for block 712 * reservation (typically why we see a ENOSPC in writeback). 713 * 714 * This is not a performance critical path, so for now just do the punching a 715 * buffer head at a time. 716 */ 717 STATIC void 718 xfs_aops_discard_page( 719 struct page *page) 720 { 721 struct inode *inode = page->mapping->host; 722 struct xfs_inode *ip = XFS_I(inode); 723 struct buffer_head *bh, *head; 724 loff_t offset = page_offset(page); 725 726 if (!xfs_check_page_type(page, XFS_IO_DELALLOC, true)) 727 goto out_invalidate; 728 729 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 730 goto out_invalidate; 731 732 xfs_alert(ip->i_mount, 733 "page discard on page %p, inode 0x%llx, offset %llu.", 734 page, ip->i_ino, offset); 735 736 xfs_ilock(ip, XFS_ILOCK_EXCL); 737 bh = head = page_buffers(page); 738 do { 739 int error; 740 xfs_fileoff_t start_fsb; 741 742 if (!buffer_delay(bh)) 743 goto next_buffer; 744 745 start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset); 746 error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1); 747 if (error) { 748 /* something screwed, just bail */ 749 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { 750 xfs_alert(ip->i_mount, 751 "page discard unable to remove delalloc mapping."); 752 } 753 break; 754 } 755 next_buffer: 756 offset += i_blocksize(inode); 757 758 } while ((bh = bh->b_this_page) != head); 759 760 xfs_iunlock(ip, XFS_ILOCK_EXCL); 761 out_invalidate: 762 xfs_vm_invalidatepage(page, 0, PAGE_SIZE); 763 return; 764 } 765 766 static int 767 xfs_map_cow( 768 struct xfs_writepage_ctx *wpc, 769 struct inode *inode, 770 loff_t offset, 771 unsigned int *new_type) 772 { 773 struct xfs_inode *ip = XFS_I(inode); 774 struct xfs_bmbt_irec imap; 775 bool is_cow = false; 776 int error; 777 778 /* 779 * If we already have a valid COW mapping keep using it. 780 */ 781 if (wpc->io_type == XFS_IO_COW) { 782 wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap, offset); 783 if (wpc->imap_valid) { 784 *new_type = XFS_IO_COW; 785 return 0; 786 } 787 } 788 789 /* 790 * Else we need to check if there is a COW mapping at this offset. 791 */ 792 xfs_ilock(ip, XFS_ILOCK_SHARED); 793 is_cow = xfs_reflink_find_cow_mapping(ip, offset, &imap); 794 xfs_iunlock(ip, XFS_ILOCK_SHARED); 795 796 if (!is_cow) 797 return 0; 798 799 /* 800 * And if the COW mapping has a delayed extent here we need to 801 * allocate real space for it now. 802 */ 803 if (isnullstartblock(imap.br_startblock)) { 804 error = xfs_iomap_write_allocate(ip, XFS_COW_FORK, offset, 805 &imap); 806 if (error) 807 return error; 808 } 809 810 wpc->io_type = *new_type = XFS_IO_COW; 811 wpc->imap_valid = true; 812 wpc->imap = imap; 813 return 0; 814 } 815 816 /* 817 * We implement an immediate ioend submission policy here to avoid needing to 818 * chain multiple ioends and hence nest mempool allocations which can violate 819 * forward progress guarantees we need to provide. The current ioend we are 820 * adding buffers to is cached on the writepage context, and if the new buffer 821 * does not append to the cached ioend it will create a new ioend and cache that 822 * instead. 823 * 824 * If a new ioend is created and cached, the old ioend is returned and queued 825 * locally for submission once the entire page is processed or an error has been 826 * detected. While ioends are submitted immediately after they are completed, 827 * batching optimisations are provided by higher level block plugging. 828 * 829 * At the end of a writeback pass, there will be a cached ioend remaining on the 830 * writepage context that the caller will need to submit. 831 */ 832 static int 833 xfs_writepage_map( 834 struct xfs_writepage_ctx *wpc, 835 struct writeback_control *wbc, 836 struct inode *inode, 837 struct page *page, 838 loff_t offset, 839 __uint64_t end_offset) 840 { 841 LIST_HEAD(submit_list); 842 struct xfs_ioend *ioend, *next; 843 struct buffer_head *bh, *head; 844 ssize_t len = i_blocksize(inode); 845 int error = 0; 846 int count = 0; 847 int uptodate = 1; 848 unsigned int new_type; 849 850 bh = head = page_buffers(page); 851 offset = page_offset(page); 852 do { 853 if (offset >= end_offset) 854 break; 855 if (!buffer_uptodate(bh)) 856 uptodate = 0; 857 858 /* 859 * set_page_dirty dirties all buffers in a page, independent 860 * of their state. The dirty state however is entirely 861 * meaningless for holes (!mapped && uptodate), so skip 862 * buffers covering holes here. 863 */ 864 if (!buffer_mapped(bh) && buffer_uptodate(bh)) { 865 wpc->imap_valid = false; 866 continue; 867 } 868 869 if (buffer_unwritten(bh)) 870 new_type = XFS_IO_UNWRITTEN; 871 else if (buffer_delay(bh)) 872 new_type = XFS_IO_DELALLOC; 873 else if (buffer_uptodate(bh)) 874 new_type = XFS_IO_OVERWRITE; 875 else { 876 if (PageUptodate(page)) 877 ASSERT(buffer_mapped(bh)); 878 /* 879 * This buffer is not uptodate and will not be 880 * written to disk. Ensure that we will put any 881 * subsequent writeable buffers into a new 882 * ioend. 883 */ 884 wpc->imap_valid = false; 885 continue; 886 } 887 888 if (xfs_is_reflink_inode(XFS_I(inode))) { 889 error = xfs_map_cow(wpc, inode, offset, &new_type); 890 if (error) 891 goto out; 892 } 893 894 if (wpc->io_type != new_type) { 895 wpc->io_type = new_type; 896 wpc->imap_valid = false; 897 } 898 899 if (wpc->imap_valid) 900 wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap, 901 offset); 902 if (!wpc->imap_valid) { 903 error = xfs_map_blocks(inode, offset, &wpc->imap, 904 wpc->io_type); 905 if (error) 906 goto out; 907 wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap, 908 offset); 909 } 910 if (wpc->imap_valid) { 911 lock_buffer(bh); 912 if (wpc->io_type != XFS_IO_OVERWRITE) 913 xfs_map_at_offset(inode, bh, &wpc->imap, offset); 914 xfs_add_to_ioend(inode, bh, offset, wpc, wbc, &submit_list); 915 count++; 916 } 917 918 } while (offset += len, ((bh = bh->b_this_page) != head)); 919 920 if (uptodate && bh == head) 921 SetPageUptodate(page); 922 923 ASSERT(wpc->ioend || list_empty(&submit_list)); 924 925 out: 926 /* 927 * On error, we have to fail the ioend here because we have locked 928 * buffers in the ioend. If we don't do this, we'll deadlock 929 * invalidating the page as that tries to lock the buffers on the page. 930 * Also, because we may have set pages under writeback, we have to make 931 * sure we run IO completion to mark the error state of the IO 932 * appropriately, so we can't cancel the ioend directly here. That means 933 * we have to mark this page as under writeback if we included any 934 * buffers from it in the ioend chain so that completion treats it 935 * correctly. 936 * 937 * If we didn't include the page in the ioend, the on error we can 938 * simply discard and unlock it as there are no other users of the page 939 * or it's buffers right now. The caller will still need to trigger 940 * submission of outstanding ioends on the writepage context so they are 941 * treated correctly on error. 942 */ 943 if (count) { 944 xfs_start_page_writeback(page, !error); 945 946 /* 947 * Preserve the original error if there was one, otherwise catch 948 * submission errors here and propagate into subsequent ioend 949 * submissions. 950 */ 951 list_for_each_entry_safe(ioend, next, &submit_list, io_list) { 952 int error2; 953 954 list_del_init(&ioend->io_list); 955 error2 = xfs_submit_ioend(wbc, ioend, error); 956 if (error2 && !error) 957 error = error2; 958 } 959 } else if (error) { 960 xfs_aops_discard_page(page); 961 ClearPageUptodate(page); 962 unlock_page(page); 963 } else { 964 /* 965 * We can end up here with no error and nothing to write if we 966 * race with a partial page truncate on a sub-page block sized 967 * filesystem. In that case we need to mark the page clean. 968 */ 969 xfs_start_page_writeback(page, 1); 970 end_page_writeback(page); 971 } 972 973 mapping_set_error(page->mapping, error); 974 return error; 975 } 976 977 /* 978 * Write out a dirty page. 979 * 980 * For delalloc space on the page we need to allocate space and flush it. 981 * For unwritten space on the page we need to start the conversion to 982 * regular allocated space. 983 * For any other dirty buffer heads on the page we should flush them. 984 */ 985 STATIC int 986 xfs_do_writepage( 987 struct page *page, 988 struct writeback_control *wbc, 989 void *data) 990 { 991 struct xfs_writepage_ctx *wpc = data; 992 struct inode *inode = page->mapping->host; 993 loff_t offset; 994 __uint64_t end_offset; 995 pgoff_t end_index; 996 997 trace_xfs_writepage(inode, page, 0, 0); 998 999 ASSERT(page_has_buffers(page)); 1000 1001 /* 1002 * Refuse to write the page out if we are called from reclaim context. 1003 * 1004 * This avoids stack overflows when called from deeply used stacks in 1005 * random callers for direct reclaim or memcg reclaim. We explicitly 1006 * allow reclaim from kswapd as the stack usage there is relatively low. 1007 * 1008 * This should never happen except in the case of a VM regression so 1009 * warn about it. 1010 */ 1011 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == 1012 PF_MEMALLOC)) 1013 goto redirty; 1014 1015 /* 1016 * Given that we do not allow direct reclaim to call us, we should 1017 * never be called while in a filesystem transaction. 1018 */ 1019 if (WARN_ON_ONCE(current->flags & PF_FSTRANS)) 1020 goto redirty; 1021 1022 /* 1023 * Is this page beyond the end of the file? 1024 * 1025 * The page index is less than the end_index, adjust the end_offset 1026 * to the highest offset that this page should represent. 1027 * ----------------------------------------------------- 1028 * | file mapping | <EOF> | 1029 * ----------------------------------------------------- 1030 * | Page ... | Page N-2 | Page N-1 | Page N | | 1031 * ^--------------------------------^----------|-------- 1032 * | desired writeback range | see else | 1033 * ---------------------------------^------------------| 1034 */ 1035 offset = i_size_read(inode); 1036 end_index = offset >> PAGE_SHIFT; 1037 if (page->index < end_index) 1038 end_offset = (xfs_off_t)(page->index + 1) << PAGE_SHIFT; 1039 else { 1040 /* 1041 * Check whether the page to write out is beyond or straddles 1042 * i_size or not. 1043 * ------------------------------------------------------- 1044 * | file mapping | <EOF> | 1045 * ------------------------------------------------------- 1046 * | Page ... | Page N-2 | Page N-1 | Page N | Beyond | 1047 * ^--------------------------------^-----------|--------- 1048 * | | Straddles | 1049 * ---------------------------------^-----------|--------| 1050 */ 1051 unsigned offset_into_page = offset & (PAGE_SIZE - 1); 1052 1053 /* 1054 * Skip the page if it is fully outside i_size, e.g. due to a 1055 * truncate operation that is in progress. We must redirty the 1056 * page so that reclaim stops reclaiming it. Otherwise 1057 * xfs_vm_releasepage() is called on it and gets confused. 1058 * 1059 * Note that the end_index is unsigned long, it would overflow 1060 * if the given offset is greater than 16TB on 32-bit system 1061 * and if we do check the page is fully outside i_size or not 1062 * via "if (page->index >= end_index + 1)" as "end_index + 1" 1063 * will be evaluated to 0. Hence this page will be redirtied 1064 * and be written out repeatedly which would result in an 1065 * infinite loop, the user program that perform this operation 1066 * will hang. Instead, we can verify this situation by checking 1067 * if the page to write is totally beyond the i_size or if it's 1068 * offset is just equal to the EOF. 1069 */ 1070 if (page->index > end_index || 1071 (page->index == end_index && offset_into_page == 0)) 1072 goto redirty; 1073 1074 /* 1075 * The page straddles i_size. It must be zeroed out on each 1076 * and every writepage invocation because it may be mmapped. 1077 * "A file is mapped in multiples of the page size. For a file 1078 * that is not a multiple of the page size, the remaining 1079 * memory is zeroed when mapped, and writes to that region are 1080 * not written out to the file." 1081 */ 1082 zero_user_segment(page, offset_into_page, PAGE_SIZE); 1083 1084 /* Adjust the end_offset to the end of file */ 1085 end_offset = offset; 1086 } 1087 1088 return xfs_writepage_map(wpc, wbc, inode, page, offset, end_offset); 1089 1090 redirty: 1091 redirty_page_for_writepage(wbc, page); 1092 unlock_page(page); 1093 return 0; 1094 } 1095 1096 STATIC int 1097 xfs_vm_writepage( 1098 struct page *page, 1099 struct writeback_control *wbc) 1100 { 1101 struct xfs_writepage_ctx wpc = { 1102 .io_type = XFS_IO_INVALID, 1103 }; 1104 int ret; 1105 1106 ret = xfs_do_writepage(page, wbc, &wpc); 1107 if (wpc.ioend) 1108 ret = xfs_submit_ioend(wbc, wpc.ioend, ret); 1109 return ret; 1110 } 1111 1112 STATIC int 1113 xfs_vm_writepages( 1114 struct address_space *mapping, 1115 struct writeback_control *wbc) 1116 { 1117 struct xfs_writepage_ctx wpc = { 1118 .io_type = XFS_IO_INVALID, 1119 }; 1120 int ret; 1121 1122 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED); 1123 if (dax_mapping(mapping)) 1124 return dax_writeback_mapping_range(mapping, 1125 xfs_find_bdev_for_inode(mapping->host), wbc); 1126 1127 ret = write_cache_pages(mapping, wbc, xfs_do_writepage, &wpc); 1128 if (wpc.ioend) 1129 ret = xfs_submit_ioend(wbc, wpc.ioend, ret); 1130 return ret; 1131 } 1132 1133 /* 1134 * Called to move a page into cleanable state - and from there 1135 * to be released. The page should already be clean. We always 1136 * have buffer heads in this call. 1137 * 1138 * Returns 1 if the page is ok to release, 0 otherwise. 1139 */ 1140 STATIC int 1141 xfs_vm_releasepage( 1142 struct page *page, 1143 gfp_t gfp_mask) 1144 { 1145 int delalloc, unwritten; 1146 1147 trace_xfs_releasepage(page->mapping->host, page, 0, 0); 1148 1149 /* 1150 * mm accommodates an old ext3 case where clean pages might not have had 1151 * the dirty bit cleared. Thus, it can send actual dirty pages to 1152 * ->releasepage() via shrink_active_list(). Conversely, 1153 * block_invalidatepage() can send pages that are still marked dirty 1154 * but otherwise have invalidated buffers. 1155 * 1156 * We want to release the latter to avoid unnecessary buildup of the 1157 * LRU, skip the former and warn if we've left any lingering 1158 * delalloc/unwritten buffers on clean pages. Skip pages with delalloc 1159 * or unwritten buffers and warn if the page is not dirty. Otherwise 1160 * try to release the buffers. 1161 */ 1162 xfs_count_page_state(page, &delalloc, &unwritten); 1163 1164 if (delalloc) { 1165 WARN_ON_ONCE(!PageDirty(page)); 1166 return 0; 1167 } 1168 if (unwritten) { 1169 WARN_ON_ONCE(!PageDirty(page)); 1170 return 0; 1171 } 1172 1173 return try_to_free_buffers(page); 1174 } 1175 1176 /* 1177 * If this is O_DIRECT or the mpage code calling tell them how large the mapping 1178 * is, so that we can avoid repeated get_blocks calls. 1179 * 1180 * If the mapping spans EOF, then we have to break the mapping up as the mapping 1181 * for blocks beyond EOF must be marked new so that sub block regions can be 1182 * correctly zeroed. We can't do this for mappings within EOF unless the mapping 1183 * was just allocated or is unwritten, otherwise the callers would overwrite 1184 * existing data with zeros. Hence we have to split the mapping into a range up 1185 * to and including EOF, and a second mapping for beyond EOF. 1186 */ 1187 static void 1188 xfs_map_trim_size( 1189 struct inode *inode, 1190 sector_t iblock, 1191 struct buffer_head *bh_result, 1192 struct xfs_bmbt_irec *imap, 1193 xfs_off_t offset, 1194 ssize_t size) 1195 { 1196 xfs_off_t mapping_size; 1197 1198 mapping_size = imap->br_startoff + imap->br_blockcount - iblock; 1199 mapping_size <<= inode->i_blkbits; 1200 1201 ASSERT(mapping_size > 0); 1202 if (mapping_size > size) 1203 mapping_size = size; 1204 if (offset < i_size_read(inode) && 1205 offset + mapping_size >= i_size_read(inode)) { 1206 /* limit mapping to block that spans EOF */ 1207 mapping_size = roundup_64(i_size_read(inode) - offset, 1208 i_blocksize(inode)); 1209 } 1210 if (mapping_size > LONG_MAX) 1211 mapping_size = LONG_MAX; 1212 1213 bh_result->b_size = mapping_size; 1214 } 1215 1216 static int 1217 xfs_get_blocks( 1218 struct inode *inode, 1219 sector_t iblock, 1220 struct buffer_head *bh_result, 1221 int create) 1222 { 1223 struct xfs_inode *ip = XFS_I(inode); 1224 struct xfs_mount *mp = ip->i_mount; 1225 xfs_fileoff_t offset_fsb, end_fsb; 1226 int error = 0; 1227 int lockmode = 0; 1228 struct xfs_bmbt_irec imap; 1229 int nimaps = 1; 1230 xfs_off_t offset; 1231 ssize_t size; 1232 1233 BUG_ON(create); 1234 1235 if (XFS_FORCED_SHUTDOWN(mp)) 1236 return -EIO; 1237 1238 offset = (xfs_off_t)iblock << inode->i_blkbits; 1239 ASSERT(bh_result->b_size >= i_blocksize(inode)); 1240 size = bh_result->b_size; 1241 1242 if (offset >= i_size_read(inode)) 1243 return 0; 1244 1245 /* 1246 * Direct I/O is usually done on preallocated files, so try getting 1247 * a block mapping without an exclusive lock first. 1248 */ 1249 lockmode = xfs_ilock_data_map_shared(ip); 1250 1251 ASSERT(offset <= mp->m_super->s_maxbytes); 1252 if (offset + size > mp->m_super->s_maxbytes) 1253 size = mp->m_super->s_maxbytes - offset; 1254 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size); 1255 offset_fsb = XFS_B_TO_FSBT(mp, offset); 1256 1257 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, 1258 &imap, &nimaps, XFS_BMAPI_ENTIRE); 1259 if (error) 1260 goto out_unlock; 1261 1262 if (nimaps) { 1263 trace_xfs_get_blocks_found(ip, offset, size, 1264 ISUNWRITTEN(&imap) ? XFS_IO_UNWRITTEN 1265 : XFS_IO_OVERWRITE, &imap); 1266 xfs_iunlock(ip, lockmode); 1267 } else { 1268 trace_xfs_get_blocks_notfound(ip, offset, size); 1269 goto out_unlock; 1270 } 1271 1272 /* trim mapping down to size requested */ 1273 xfs_map_trim_size(inode, iblock, bh_result, &imap, offset, size); 1274 1275 /* 1276 * For unwritten extents do not report a disk address in the buffered 1277 * read case (treat as if we're reading into a hole). 1278 */ 1279 if (imap.br_startblock != HOLESTARTBLOCK && 1280 imap.br_startblock != DELAYSTARTBLOCK && 1281 !ISUNWRITTEN(&imap)) 1282 xfs_map_buffer(inode, bh_result, &imap, offset); 1283 1284 /* 1285 * If this is a realtime file, data may be on a different device. 1286 * to that pointed to from the buffer_head b_bdev currently. 1287 */ 1288 bh_result->b_bdev = xfs_find_bdev_for_inode(inode); 1289 return 0; 1290 1291 out_unlock: 1292 xfs_iunlock(ip, lockmode); 1293 return error; 1294 } 1295 1296 STATIC ssize_t 1297 xfs_vm_direct_IO( 1298 struct kiocb *iocb, 1299 struct iov_iter *iter) 1300 { 1301 /* 1302 * We just need the method present so that open/fcntl allow direct I/O. 1303 */ 1304 return -EINVAL; 1305 } 1306 1307 STATIC sector_t 1308 xfs_vm_bmap( 1309 struct address_space *mapping, 1310 sector_t block) 1311 { 1312 struct inode *inode = (struct inode *)mapping->host; 1313 struct xfs_inode *ip = XFS_I(inode); 1314 1315 trace_xfs_vm_bmap(XFS_I(inode)); 1316 1317 /* 1318 * The swap code (ab-)uses ->bmap to get a block mapping and then 1319 * bypasseѕ the file system for actual I/O. We really can't allow 1320 * that on reflinks inodes, so we have to skip out here. And yes, 1321 * 0 is the magic code for a bmap error.. 1322 */ 1323 if (xfs_is_reflink_inode(ip)) 1324 return 0; 1325 1326 filemap_write_and_wait(mapping); 1327 return generic_block_bmap(mapping, block, xfs_get_blocks); 1328 } 1329 1330 STATIC int 1331 xfs_vm_readpage( 1332 struct file *unused, 1333 struct page *page) 1334 { 1335 trace_xfs_vm_readpage(page->mapping->host, 1); 1336 return mpage_readpage(page, xfs_get_blocks); 1337 } 1338 1339 STATIC int 1340 xfs_vm_readpages( 1341 struct file *unused, 1342 struct address_space *mapping, 1343 struct list_head *pages, 1344 unsigned nr_pages) 1345 { 1346 trace_xfs_vm_readpages(mapping->host, nr_pages); 1347 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks); 1348 } 1349 1350 /* 1351 * This is basically a copy of __set_page_dirty_buffers() with one 1352 * small tweak: buffers beyond EOF do not get marked dirty. If we mark them 1353 * dirty, we'll never be able to clean them because we don't write buffers 1354 * beyond EOF, and that means we can't invalidate pages that span EOF 1355 * that have been marked dirty. Further, the dirty state can leak into 1356 * the file interior if the file is extended, resulting in all sorts of 1357 * bad things happening as the state does not match the underlying data. 1358 * 1359 * XXX: this really indicates that bufferheads in XFS need to die. Warts like 1360 * this only exist because of bufferheads and how the generic code manages them. 1361 */ 1362 STATIC int 1363 xfs_vm_set_page_dirty( 1364 struct page *page) 1365 { 1366 struct address_space *mapping = page->mapping; 1367 struct inode *inode = mapping->host; 1368 loff_t end_offset; 1369 loff_t offset; 1370 int newly_dirty; 1371 1372 if (unlikely(!mapping)) 1373 return !TestSetPageDirty(page); 1374 1375 end_offset = i_size_read(inode); 1376 offset = page_offset(page); 1377 1378 spin_lock(&mapping->private_lock); 1379 if (page_has_buffers(page)) { 1380 struct buffer_head *head = page_buffers(page); 1381 struct buffer_head *bh = head; 1382 1383 do { 1384 if (offset < end_offset) 1385 set_buffer_dirty(bh); 1386 bh = bh->b_this_page; 1387 offset += i_blocksize(inode); 1388 } while (bh != head); 1389 } 1390 /* 1391 * Lock out page->mem_cgroup migration to keep PageDirty 1392 * synchronized with per-memcg dirty page counters. 1393 */ 1394 lock_page_memcg(page); 1395 newly_dirty = !TestSetPageDirty(page); 1396 spin_unlock(&mapping->private_lock); 1397 1398 if (newly_dirty) { 1399 /* sigh - __set_page_dirty() is static, so copy it here, too */ 1400 unsigned long flags; 1401 1402 spin_lock_irqsave(&mapping->tree_lock, flags); 1403 if (page->mapping) { /* Race with truncate? */ 1404 WARN_ON_ONCE(!PageUptodate(page)); 1405 account_page_dirtied(page, mapping); 1406 radix_tree_tag_set(&mapping->page_tree, 1407 page_index(page), PAGECACHE_TAG_DIRTY); 1408 } 1409 spin_unlock_irqrestore(&mapping->tree_lock, flags); 1410 } 1411 unlock_page_memcg(page); 1412 if (newly_dirty) 1413 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 1414 return newly_dirty; 1415 } 1416 1417 const struct address_space_operations xfs_address_space_operations = { 1418 .readpage = xfs_vm_readpage, 1419 .readpages = xfs_vm_readpages, 1420 .writepage = xfs_vm_writepage, 1421 .writepages = xfs_vm_writepages, 1422 .set_page_dirty = xfs_vm_set_page_dirty, 1423 .releasepage = xfs_vm_releasepage, 1424 .invalidatepage = xfs_vm_invalidatepage, 1425 .bmap = xfs_vm_bmap, 1426 .direct_IO = xfs_vm_direct_IO, 1427 .migratepage = buffer_migrate_page, 1428 .is_partially_uptodate = block_is_partially_uptodate, 1429 .error_remove_page = generic_error_remove_page, 1430 }; 1431