1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 4 * Copyright (c) 2016-2018 Christoph Hellwig. 5 * All Rights Reserved. 6 */ 7 #include "xfs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_mount.h" 13 #include "xfs_inode.h" 14 #include "xfs_trans.h" 15 #include "xfs_iomap.h" 16 #include "xfs_trace.h" 17 #include "xfs_bmap.h" 18 #include "xfs_bmap_util.h" 19 #include "xfs_reflink.h" 20 21 struct xfs_writepage_ctx { 22 struct iomap_writepage_ctx ctx; 23 unsigned int data_seq; 24 unsigned int cow_seq; 25 }; 26 27 static inline struct xfs_writepage_ctx * 28 XFS_WPC(struct iomap_writepage_ctx *ctx) 29 { 30 return container_of(ctx, struct xfs_writepage_ctx, ctx); 31 } 32 33 struct block_device * 34 xfs_find_bdev_for_inode( 35 struct inode *inode) 36 { 37 struct xfs_inode *ip = XFS_I(inode); 38 struct xfs_mount *mp = ip->i_mount; 39 40 if (XFS_IS_REALTIME_INODE(ip)) 41 return mp->m_rtdev_targp->bt_bdev; 42 else 43 return mp->m_ddev_targp->bt_bdev; 44 } 45 46 struct dax_device * 47 xfs_find_daxdev_for_inode( 48 struct inode *inode) 49 { 50 struct xfs_inode *ip = XFS_I(inode); 51 struct xfs_mount *mp = ip->i_mount; 52 53 if (XFS_IS_REALTIME_INODE(ip)) 54 return mp->m_rtdev_targp->bt_daxdev; 55 else 56 return mp->m_ddev_targp->bt_daxdev; 57 } 58 59 /* 60 * Fast and loose check if this write could update the on-disk inode size. 61 */ 62 static inline bool xfs_ioend_is_append(struct iomap_ioend *ioend) 63 { 64 return ioend->io_offset + ioend->io_size > 65 XFS_I(ioend->io_inode)->i_d.di_size; 66 } 67 68 STATIC int 69 xfs_setfilesize_trans_alloc( 70 struct iomap_ioend *ioend) 71 { 72 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount; 73 struct xfs_trans *tp; 74 int error; 75 76 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp); 77 if (error) 78 return error; 79 80 ioend->io_private = tp; 81 82 /* 83 * We may pass freeze protection with a transaction. So tell lockdep 84 * we released it. 85 */ 86 __sb_writers_release(ioend->io_inode->i_sb, SB_FREEZE_FS); 87 /* 88 * We hand off the transaction to the completion thread now, so 89 * clear the flag here. 90 */ 91 current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS); 92 return 0; 93 } 94 95 /* 96 * Update on-disk file size now that data has been written to disk. 97 */ 98 STATIC int 99 __xfs_setfilesize( 100 struct xfs_inode *ip, 101 struct xfs_trans *tp, 102 xfs_off_t offset, 103 size_t size) 104 { 105 xfs_fsize_t isize; 106 107 xfs_ilock(ip, XFS_ILOCK_EXCL); 108 isize = xfs_new_eof(ip, offset + size); 109 if (!isize) { 110 xfs_iunlock(ip, XFS_ILOCK_EXCL); 111 xfs_trans_cancel(tp); 112 return 0; 113 } 114 115 trace_xfs_setfilesize(ip, offset, size); 116 117 ip->i_d.di_size = isize; 118 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 119 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 120 121 return xfs_trans_commit(tp); 122 } 123 124 int 125 xfs_setfilesize( 126 struct xfs_inode *ip, 127 xfs_off_t offset, 128 size_t size) 129 { 130 struct xfs_mount *mp = ip->i_mount; 131 struct xfs_trans *tp; 132 int error; 133 134 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp); 135 if (error) 136 return error; 137 138 return __xfs_setfilesize(ip, tp, offset, size); 139 } 140 141 STATIC int 142 xfs_setfilesize_ioend( 143 struct iomap_ioend *ioend, 144 int error) 145 { 146 struct xfs_inode *ip = XFS_I(ioend->io_inode); 147 struct xfs_trans *tp = ioend->io_private; 148 149 /* 150 * The transaction may have been allocated in the I/O submission thread, 151 * thus we need to mark ourselves as being in a transaction manually. 152 * Similarly for freeze protection. 153 */ 154 current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS); 155 __sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS); 156 157 /* we abort the update if there was an IO error */ 158 if (error) { 159 xfs_trans_cancel(tp); 160 return error; 161 } 162 163 return __xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size); 164 } 165 166 /* 167 * IO write completion. 168 */ 169 STATIC void 170 xfs_end_ioend( 171 struct iomap_ioend *ioend) 172 { 173 struct xfs_inode *ip = XFS_I(ioend->io_inode); 174 xfs_off_t offset = ioend->io_offset; 175 size_t size = ioend->io_size; 176 unsigned int nofs_flag; 177 int error; 178 179 /* 180 * We can allocate memory here while doing writeback on behalf of 181 * memory reclaim. To avoid memory allocation deadlocks set the 182 * task-wide nofs context for the following operations. 183 */ 184 nofs_flag = memalloc_nofs_save(); 185 186 /* 187 * Just clean up the in-memory strutures if the fs has been shut down. 188 */ 189 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { 190 error = -EIO; 191 goto done; 192 } 193 194 /* 195 * Clean up any COW blocks on an I/O error. 196 */ 197 error = blk_status_to_errno(ioend->io_bio->bi_status); 198 if (unlikely(error)) { 199 if (ioend->io_flags & IOMAP_F_SHARED) 200 xfs_reflink_cancel_cow_range(ip, offset, size, true); 201 goto done; 202 } 203 204 /* 205 * Success: commit the COW or unwritten blocks if needed. 206 */ 207 if (ioend->io_flags & IOMAP_F_SHARED) 208 error = xfs_reflink_end_cow(ip, offset, size); 209 else if (ioend->io_type == IOMAP_UNWRITTEN) 210 error = xfs_iomap_write_unwritten(ip, offset, size, false); 211 else 212 ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_private); 213 214 done: 215 if (ioend->io_private) 216 error = xfs_setfilesize_ioend(ioend, error); 217 iomap_finish_ioends(ioend, error); 218 memalloc_nofs_restore(nofs_flag); 219 } 220 221 /* 222 * If the to be merged ioend has a preallocated transaction for file 223 * size updates we need to ensure the ioend it is merged into also 224 * has one. If it already has one we can simply cancel the transaction 225 * as it is guaranteed to be clean. 226 */ 227 static void 228 xfs_ioend_merge_private( 229 struct iomap_ioend *ioend, 230 struct iomap_ioend *next) 231 { 232 if (!ioend->io_private) { 233 ioend->io_private = next->io_private; 234 next->io_private = NULL; 235 } else { 236 xfs_setfilesize_ioend(next, -ECANCELED); 237 } 238 } 239 240 /* Finish all pending io completions. */ 241 void 242 xfs_end_io( 243 struct work_struct *work) 244 { 245 struct xfs_inode *ip = 246 container_of(work, struct xfs_inode, i_ioend_work); 247 struct iomap_ioend *ioend; 248 struct list_head tmp; 249 unsigned long flags; 250 251 spin_lock_irqsave(&ip->i_ioend_lock, flags); 252 list_replace_init(&ip->i_ioend_list, &tmp); 253 spin_unlock_irqrestore(&ip->i_ioend_lock, flags); 254 255 iomap_sort_ioends(&tmp); 256 while ((ioend = list_first_entry_or_null(&tmp, struct iomap_ioend, 257 io_list))) { 258 list_del_init(&ioend->io_list); 259 iomap_ioend_try_merge(ioend, &tmp, xfs_ioend_merge_private); 260 xfs_end_ioend(ioend); 261 } 262 } 263 264 static inline bool xfs_ioend_needs_workqueue(struct iomap_ioend *ioend) 265 { 266 return ioend->io_private || 267 ioend->io_type == IOMAP_UNWRITTEN || 268 (ioend->io_flags & IOMAP_F_SHARED); 269 } 270 271 STATIC void 272 xfs_end_bio( 273 struct bio *bio) 274 { 275 struct iomap_ioend *ioend = bio->bi_private; 276 struct xfs_inode *ip = XFS_I(ioend->io_inode); 277 unsigned long flags; 278 279 ASSERT(xfs_ioend_needs_workqueue(ioend)); 280 281 spin_lock_irqsave(&ip->i_ioend_lock, flags); 282 if (list_empty(&ip->i_ioend_list)) 283 WARN_ON_ONCE(!queue_work(ip->i_mount->m_unwritten_workqueue, 284 &ip->i_ioend_work)); 285 list_add_tail(&ioend->io_list, &ip->i_ioend_list); 286 spin_unlock_irqrestore(&ip->i_ioend_lock, flags); 287 } 288 289 /* 290 * Fast revalidation of the cached writeback mapping. Return true if the current 291 * mapping is valid, false otherwise. 292 */ 293 static bool 294 xfs_imap_valid( 295 struct iomap_writepage_ctx *wpc, 296 struct xfs_inode *ip, 297 loff_t offset) 298 { 299 if (offset < wpc->iomap.offset || 300 offset >= wpc->iomap.offset + wpc->iomap.length) 301 return false; 302 /* 303 * If this is a COW mapping, it is sufficient to check that the mapping 304 * covers the offset. Be careful to check this first because the caller 305 * can revalidate a COW mapping without updating the data seqno. 306 */ 307 if (wpc->iomap.flags & IOMAP_F_SHARED) 308 return true; 309 310 /* 311 * This is not a COW mapping. Check the sequence number of the data fork 312 * because concurrent changes could have invalidated the extent. Check 313 * the COW fork because concurrent changes since the last time we 314 * checked (and found nothing at this offset) could have added 315 * overlapping blocks. 316 */ 317 if (XFS_WPC(wpc)->data_seq != READ_ONCE(ip->i_df.if_seq)) 318 return false; 319 if (xfs_inode_has_cow_data(ip) && 320 XFS_WPC(wpc)->cow_seq != READ_ONCE(ip->i_cowfp->if_seq)) 321 return false; 322 return true; 323 } 324 325 /* 326 * Pass in a dellalloc extent and convert it to real extents, return the real 327 * extent that maps offset_fsb in wpc->iomap. 328 * 329 * The current page is held locked so nothing could have removed the block 330 * backing offset_fsb, although it could have moved from the COW to the data 331 * fork by another thread. 332 */ 333 static int 334 xfs_convert_blocks( 335 struct iomap_writepage_ctx *wpc, 336 struct xfs_inode *ip, 337 int whichfork, 338 loff_t offset) 339 { 340 int error; 341 unsigned *seq; 342 343 if (whichfork == XFS_COW_FORK) 344 seq = &XFS_WPC(wpc)->cow_seq; 345 else 346 seq = &XFS_WPC(wpc)->data_seq; 347 348 /* 349 * Attempt to allocate whatever delalloc extent currently backs offset 350 * and put the result into wpc->iomap. Allocate in a loop because it 351 * may take several attempts to allocate real blocks for a contiguous 352 * delalloc extent if free space is sufficiently fragmented. 353 */ 354 do { 355 error = xfs_bmapi_convert_delalloc(ip, whichfork, offset, 356 &wpc->iomap, seq); 357 if (error) 358 return error; 359 } while (wpc->iomap.offset + wpc->iomap.length <= offset); 360 361 return 0; 362 } 363 364 static int 365 xfs_map_blocks( 366 struct iomap_writepage_ctx *wpc, 367 struct inode *inode, 368 loff_t offset) 369 { 370 struct xfs_inode *ip = XFS_I(inode); 371 struct xfs_mount *mp = ip->i_mount; 372 ssize_t count = i_blocksize(inode); 373 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); 374 xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + count); 375 xfs_fileoff_t cow_fsb = NULLFILEOFF; 376 int whichfork = XFS_DATA_FORK; 377 struct xfs_bmbt_irec imap; 378 struct xfs_iext_cursor icur; 379 int retries = 0; 380 int error = 0; 381 382 if (XFS_FORCED_SHUTDOWN(mp)) 383 return -EIO; 384 385 /* 386 * COW fork blocks can overlap data fork blocks even if the blocks 387 * aren't shared. COW I/O always takes precedent, so we must always 388 * check for overlap on reflink inodes unless the mapping is already a 389 * COW one, or the COW fork hasn't changed from the last time we looked 390 * at it. 391 * 392 * It's safe to check the COW fork if_seq here without the ILOCK because 393 * we've indirectly protected against concurrent updates: writeback has 394 * the page locked, which prevents concurrent invalidations by reflink 395 * and directio and prevents concurrent buffered writes to the same 396 * page. Changes to if_seq always happen under i_lock, which protects 397 * against concurrent updates and provides a memory barrier on the way 398 * out that ensures that we always see the current value. 399 */ 400 if (xfs_imap_valid(wpc, ip, offset)) 401 return 0; 402 403 /* 404 * If we don't have a valid map, now it's time to get a new one for this 405 * offset. This will convert delayed allocations (including COW ones) 406 * into real extents. If we return without a valid map, it means we 407 * landed in a hole and we skip the block. 408 */ 409 retry: 410 xfs_ilock(ip, XFS_ILOCK_SHARED); 411 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || 412 (ip->i_df.if_flags & XFS_IFEXTENTS)); 413 414 /* 415 * Check if this is offset is covered by a COW extents, and if yes use 416 * it directly instead of looking up anything in the data fork. 417 */ 418 if (xfs_inode_has_cow_data(ip) && 419 xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &imap)) 420 cow_fsb = imap.br_startoff; 421 if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) { 422 XFS_WPC(wpc)->cow_seq = READ_ONCE(ip->i_cowfp->if_seq); 423 xfs_iunlock(ip, XFS_ILOCK_SHARED); 424 425 whichfork = XFS_COW_FORK; 426 goto allocate_blocks; 427 } 428 429 /* 430 * No COW extent overlap. Revalidate now that we may have updated 431 * ->cow_seq. If the data mapping is still valid, we're done. 432 */ 433 if (xfs_imap_valid(wpc, ip, offset)) { 434 xfs_iunlock(ip, XFS_ILOCK_SHARED); 435 return 0; 436 } 437 438 /* 439 * If we don't have a valid map, now it's time to get a new one for this 440 * offset. This will convert delayed allocations (including COW ones) 441 * into real extents. 442 */ 443 if (!xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap)) 444 imap.br_startoff = end_fsb; /* fake a hole past EOF */ 445 XFS_WPC(wpc)->data_seq = READ_ONCE(ip->i_df.if_seq); 446 xfs_iunlock(ip, XFS_ILOCK_SHARED); 447 448 /* landed in a hole or beyond EOF? */ 449 if (imap.br_startoff > offset_fsb) { 450 imap.br_blockcount = imap.br_startoff - offset_fsb; 451 imap.br_startoff = offset_fsb; 452 imap.br_startblock = HOLESTARTBLOCK; 453 imap.br_state = XFS_EXT_NORM; 454 } 455 456 /* 457 * Truncate to the next COW extent if there is one. This is the only 458 * opportunity to do this because we can skip COW fork lookups for the 459 * subsequent blocks in the mapping; however, the requirement to treat 460 * the COW range separately remains. 461 */ 462 if (cow_fsb != NULLFILEOFF && 463 cow_fsb < imap.br_startoff + imap.br_blockcount) 464 imap.br_blockcount = cow_fsb - imap.br_startoff; 465 466 /* got a delalloc extent? */ 467 if (imap.br_startblock != HOLESTARTBLOCK && 468 isnullstartblock(imap.br_startblock)) 469 goto allocate_blocks; 470 471 xfs_bmbt_to_iomap(ip, &wpc->iomap, &imap, 0); 472 trace_xfs_map_blocks_found(ip, offset, count, whichfork, &imap); 473 return 0; 474 allocate_blocks: 475 error = xfs_convert_blocks(wpc, ip, whichfork, offset); 476 if (error) { 477 /* 478 * If we failed to find the extent in the COW fork we might have 479 * raced with a COW to data fork conversion or truncate. 480 * Restart the lookup to catch the extent in the data fork for 481 * the former case, but prevent additional retries to avoid 482 * looping forever for the latter case. 483 */ 484 if (error == -EAGAIN && whichfork == XFS_COW_FORK && !retries++) 485 goto retry; 486 ASSERT(error != -EAGAIN); 487 return error; 488 } 489 490 /* 491 * Due to merging the return real extent might be larger than the 492 * original delalloc one. Trim the return extent to the next COW 493 * boundary again to force a re-lookup. 494 */ 495 if (whichfork != XFS_COW_FORK && cow_fsb != NULLFILEOFF) { 496 loff_t cow_offset = XFS_FSB_TO_B(mp, cow_fsb); 497 498 if (cow_offset < wpc->iomap.offset + wpc->iomap.length) 499 wpc->iomap.length = cow_offset - wpc->iomap.offset; 500 } 501 502 ASSERT(wpc->iomap.offset <= offset); 503 ASSERT(wpc->iomap.offset + wpc->iomap.length > offset); 504 trace_xfs_map_blocks_alloc(ip, offset, count, whichfork, &imap); 505 return 0; 506 } 507 508 static int 509 xfs_prepare_ioend( 510 struct iomap_ioend *ioend, 511 int status) 512 { 513 unsigned int nofs_flag; 514 515 /* 516 * We can allocate memory here while doing writeback on behalf of 517 * memory reclaim. To avoid memory allocation deadlocks set the 518 * task-wide nofs context for the following operations. 519 */ 520 nofs_flag = memalloc_nofs_save(); 521 522 /* Convert CoW extents to regular */ 523 if (!status && (ioend->io_flags & IOMAP_F_SHARED)) { 524 status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode), 525 ioend->io_offset, ioend->io_size); 526 } 527 528 /* Reserve log space if we might write beyond the on-disk inode size. */ 529 if (!status && 530 ((ioend->io_flags & IOMAP_F_SHARED) || 531 ioend->io_type != IOMAP_UNWRITTEN) && 532 xfs_ioend_is_append(ioend) && 533 !ioend->io_private) 534 status = xfs_setfilesize_trans_alloc(ioend); 535 536 memalloc_nofs_restore(nofs_flag); 537 538 if (xfs_ioend_needs_workqueue(ioend)) 539 ioend->io_bio->bi_end_io = xfs_end_bio; 540 return status; 541 } 542 543 /* 544 * If the page has delalloc blocks on it, we need to punch them out before we 545 * invalidate the page. If we don't, we leave a stale delalloc mapping on the 546 * inode that can trip up a later direct I/O read operation on the same region. 547 * 548 * We prevent this by truncating away the delalloc regions on the page. Because 549 * they are delalloc, we can do this without needing a transaction. Indeed - if 550 * we get ENOSPC errors, we have to be able to do this truncation without a 551 * transaction as there is no space left for block reservation (typically why we 552 * see a ENOSPC in writeback). 553 */ 554 static void 555 xfs_discard_page( 556 struct page *page) 557 { 558 struct inode *inode = page->mapping->host; 559 struct xfs_inode *ip = XFS_I(inode); 560 struct xfs_mount *mp = ip->i_mount; 561 loff_t offset = page_offset(page); 562 xfs_fileoff_t start_fsb = XFS_B_TO_FSBT(mp, offset); 563 int error; 564 565 if (XFS_FORCED_SHUTDOWN(mp)) 566 goto out_invalidate; 567 568 xfs_alert(mp, 569 "page discard on page "PTR_FMT", inode 0x%llx, offset %llu.", 570 page, ip->i_ino, offset); 571 572 error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 573 PAGE_SIZE / i_blocksize(inode)); 574 if (error && !XFS_FORCED_SHUTDOWN(mp)) 575 xfs_alert(mp, "page discard unable to remove delalloc mapping."); 576 out_invalidate: 577 iomap_invalidatepage(page, 0, PAGE_SIZE); 578 } 579 580 static const struct iomap_writeback_ops xfs_writeback_ops = { 581 .map_blocks = xfs_map_blocks, 582 .prepare_ioend = xfs_prepare_ioend, 583 .discard_page = xfs_discard_page, 584 }; 585 586 STATIC int 587 xfs_vm_writepage( 588 struct page *page, 589 struct writeback_control *wbc) 590 { 591 struct xfs_writepage_ctx wpc = { }; 592 593 return iomap_writepage(page, wbc, &wpc.ctx, &xfs_writeback_ops); 594 } 595 596 STATIC int 597 xfs_vm_writepages( 598 struct address_space *mapping, 599 struct writeback_control *wbc) 600 { 601 struct xfs_writepage_ctx wpc = { }; 602 603 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED); 604 return iomap_writepages(mapping, wbc, &wpc.ctx, &xfs_writeback_ops); 605 } 606 607 STATIC int 608 xfs_dax_writepages( 609 struct address_space *mapping, 610 struct writeback_control *wbc) 611 { 612 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED); 613 return dax_writeback_mapping_range(mapping, 614 xfs_find_bdev_for_inode(mapping->host), wbc); 615 } 616 617 STATIC sector_t 618 xfs_vm_bmap( 619 struct address_space *mapping, 620 sector_t block) 621 { 622 struct xfs_inode *ip = XFS_I(mapping->host); 623 624 trace_xfs_vm_bmap(ip); 625 626 /* 627 * The swap code (ab-)uses ->bmap to get a block mapping and then 628 * bypasses the file system for actual I/O. We really can't allow 629 * that on reflinks inodes, so we have to skip out here. And yes, 630 * 0 is the magic code for a bmap error. 631 * 632 * Since we don't pass back blockdev info, we can't return bmap 633 * information for rt files either. 634 */ 635 if (xfs_is_cow_inode(ip) || XFS_IS_REALTIME_INODE(ip)) 636 return 0; 637 return iomap_bmap(mapping, block, &xfs_iomap_ops); 638 } 639 640 STATIC int 641 xfs_vm_readpage( 642 struct file *unused, 643 struct page *page) 644 { 645 return iomap_readpage(page, &xfs_iomap_ops); 646 } 647 648 STATIC int 649 xfs_vm_readpages( 650 struct file *unused, 651 struct address_space *mapping, 652 struct list_head *pages, 653 unsigned nr_pages) 654 { 655 return iomap_readpages(mapping, pages, nr_pages, &xfs_iomap_ops); 656 } 657 658 static int 659 xfs_iomap_swapfile_activate( 660 struct swap_info_struct *sis, 661 struct file *swap_file, 662 sector_t *span) 663 { 664 sis->bdev = xfs_find_bdev_for_inode(file_inode(swap_file)); 665 return iomap_swapfile_activate(sis, swap_file, span, &xfs_iomap_ops); 666 } 667 668 const struct address_space_operations xfs_address_space_operations = { 669 .readpage = xfs_vm_readpage, 670 .readpages = xfs_vm_readpages, 671 .writepage = xfs_vm_writepage, 672 .writepages = xfs_vm_writepages, 673 .set_page_dirty = iomap_set_page_dirty, 674 .releasepage = iomap_releasepage, 675 .invalidatepage = iomap_invalidatepage, 676 .bmap = xfs_vm_bmap, 677 .direct_IO = noop_direct_IO, 678 .migratepage = iomap_migrate_page, 679 .is_partially_uptodate = iomap_is_partially_uptodate, 680 .error_remove_page = generic_error_remove_page, 681 .swap_activate = xfs_iomap_swapfile_activate, 682 }; 683 684 const struct address_space_operations xfs_dax_aops = { 685 .writepages = xfs_dax_writepages, 686 .direct_IO = noop_direct_IO, 687 .set_page_dirty = noop_set_page_dirty, 688 .invalidatepage = noop_invalidatepage, 689 .swap_activate = xfs_iomap_swapfile_activate, 690 }; 691