1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 5 */ 6 7 #include <linux/sched.h> 8 #include <linux/slab.h> 9 #include <linux/spinlock.h> 10 #include <linux/completion.h> 11 #include <linux/buffer_head.h> 12 #include <linux/pagemap.h> 13 #include <linux/folio_batch.h> 14 #include <linux/mpage.h> 15 #include <linux/fs.h> 16 #include <linux/writeback.h> 17 #include <linux/swap.h> 18 #include <linux/gfs2_ondisk.h> 19 #include <linux/backing-dev.h> 20 #include <linux/uio.h> 21 #include <trace/events/writeback.h> 22 #include <linux/sched/signal.h> 23 24 #include "gfs2.h" 25 #include "incore.h" 26 #include "bmap.h" 27 #include "glock.h" 28 #include "inode.h" 29 #include "log.h" 30 #include "meta_io.h" 31 #include "quota.h" 32 #include "trans.h" 33 #include "rgrp.h" 34 #include "super.h" 35 #include "util.h" 36 #include "glops.h" 37 #include "aops.h" 38 39 40 /** 41 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block 42 * @inode: The inode 43 * @lblock: The block number to look up 44 * @bh_result: The buffer head to return the result in 45 * @create: Non-zero if we may add block to the file 46 * 47 * Returns: errno 48 */ 49 50 static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock, 51 struct buffer_head *bh_result, int create) 52 { 53 int error; 54 55 error = gfs2_block_map(inode, lblock, bh_result, 0); 56 if (error) 57 return error; 58 if (!buffer_mapped(bh_result)) 59 return -ENODATA; 60 return 0; 61 } 62 63 /** 64 * gfs2_write_jdata_folio - gfs2 jdata-specific version of block_write_full_folio 65 * @folio: The folio to write 66 * @wbc: The writeback control 67 * 68 * This is the same as calling block_write_full_folio, but it also 69 * writes pages outside of i_size 70 */ 71 static int gfs2_write_jdata_folio(struct folio *folio, 72 struct writeback_control *wbc) 73 { 74 struct inode * const inode = folio->mapping->host; 75 loff_t i_size = i_size_read(inode); 76 77 /* 78 * The folio straddles i_size. It must be zeroed out on each and every 79 * writepage invocation because it may be mmapped. "A file is mapped 80 * in multiples of the page size. For a file that is not a multiple of 81 * the page size, the remaining memory is zeroed when mapped, and 82 * writes to that region are not written out to the file." 83 */ 84 if (folio_pos(folio) < i_size && i_size < folio_next_pos(folio)) 85 folio_zero_segment(folio, offset_in_folio(folio, i_size), 86 folio_size(folio)); 87 88 return __block_write_full_folio(inode, folio, gfs2_get_block_noalloc, 89 wbc); 90 } 91 92 /** 93 * __gfs2_jdata_write_folio - The core of jdata writepage 94 * @folio: The folio to write 95 * @wbc: The writeback control 96 * 97 * Implements the core of write back. If a transaction is required then 98 * the checked flag will have been set and the transaction will have 99 * already been started before this is called. 100 */ 101 static int __gfs2_jdata_write_folio(struct folio *folio, 102 struct writeback_control *wbc) 103 { 104 struct inode *inode = folio->mapping->host; 105 struct gfs2_inode *ip = GFS2_I(inode); 106 107 if (folio_test_checked(folio)) { 108 folio_clear_checked(folio); 109 if (!folio_buffers(folio)) { 110 create_empty_buffers(folio, 111 inode->i_sb->s_blocksize, 112 BIT(BH_Dirty)|BIT(BH_Uptodate)); 113 } 114 gfs2_trans_add_databufs(ip->i_gl, folio, 0, folio_size(folio)); 115 } 116 return gfs2_write_jdata_folio(folio, wbc); 117 } 118 119 /** 120 * gfs2_jdata_writeback - Write jdata folios to the log 121 * @mapping: The mapping to write 122 * @wbc: The writeback control 123 * 124 * Returns: errno 125 */ 126 int gfs2_jdata_writeback(struct address_space *mapping, struct writeback_control *wbc) 127 { 128 struct inode *inode = mapping->host; 129 struct gfs2_inode *ip = GFS2_I(inode); 130 struct gfs2_sbd *sdp = GFS2_SB(mapping->host); 131 struct folio *folio = NULL; 132 int error; 133 134 BUG_ON(current->journal_info); 135 if (gfs2_assert_withdraw(sdp, ip->i_gl->gl_state == LM_ST_EXCLUSIVE)) 136 return 0; 137 138 while ((folio = writeback_iter(mapping, wbc, folio, &error))) { 139 if (folio_test_checked(folio)) { 140 folio_redirty_for_writepage(wbc, folio); 141 folio_unlock(folio); 142 continue; 143 } 144 error = __gfs2_jdata_write_folio(folio, wbc); 145 } 146 147 return error; 148 } 149 150 /** 151 * gfs2_writepages - Write a bunch of dirty pages back to disk 152 * @mapping: The mapping to write 153 * @wbc: Write-back control 154 * 155 * Used for both ordered and writeback modes. 156 */ 157 static int gfs2_writepages(struct address_space *mapping, 158 struct writeback_control *wbc) 159 { 160 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); 161 long initial_nr_to_write = wbc->nr_to_write; 162 struct iomap_writepage_ctx wpc = { 163 .inode = mapping->host, 164 .wbc = wbc, 165 .ops = &gfs2_writeback_ops, 166 }; 167 int ret; 168 169 /* 170 * Even if we didn't write any pages here, we might still be holding 171 * dirty pages in the ail. We forcibly flush the ail because we don't 172 * want balance_dirty_pages() to loop indefinitely trying to write out 173 * pages held in the ail that it can't find. 174 */ 175 ret = iomap_writepages(&wpc); 176 if (ret == 0 && wbc->nr_to_write == initial_nr_to_write) 177 set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags); 178 return ret; 179 } 180 181 /** 182 * gfs2_write_jdata_batch - Write back a folio batch's worth of folios 183 * @mapping: The mapping 184 * @wbc: The writeback control 185 * @fbatch: The batch of folios 186 * @done_index: Page index 187 * 188 * Returns: non-zero if loop should terminate, zero otherwise 189 */ 190 191 static int gfs2_write_jdata_batch(struct address_space *mapping, 192 struct writeback_control *wbc, 193 struct folio_batch *fbatch, 194 pgoff_t *done_index) 195 { 196 struct inode *inode = mapping->host; 197 struct gfs2_sbd *sdp = GFS2_SB(inode); 198 unsigned nrblocks; 199 int i; 200 int ret; 201 size_t size = 0; 202 int nr_folios = folio_batch_count(fbatch); 203 204 for (i = 0; i < nr_folios; i++) 205 size += folio_size(fbatch->folios[i]); 206 nrblocks = size >> inode->i_blkbits; 207 208 ret = gfs2_trans_begin(sdp, nrblocks, nrblocks); 209 if (ret < 0) 210 return ret; 211 212 for (i = 0; i < nr_folios; i++) { 213 struct folio *folio = fbatch->folios[i]; 214 215 *done_index = folio->index; 216 217 folio_lock(folio); 218 219 if (unlikely(folio->mapping != mapping)) { 220 continue_unlock: 221 folio_unlock(folio); 222 continue; 223 } 224 225 if (!folio_test_dirty(folio)) { 226 /* someone wrote it for us */ 227 goto continue_unlock; 228 } 229 230 if (folio_test_writeback(folio)) { 231 if (wbc->sync_mode != WB_SYNC_NONE) 232 folio_wait_writeback(folio); 233 else 234 goto continue_unlock; 235 } 236 237 BUG_ON(folio_test_writeback(folio)); 238 if (!folio_clear_dirty_for_io(folio)) 239 goto continue_unlock; 240 241 trace_wbc_writepage(wbc, inode_to_bdi(inode)); 242 243 ret = __gfs2_jdata_write_folio(folio, wbc); 244 if (unlikely(ret)) { 245 /* 246 * done_index is set past this page, so media errors 247 * will not choke background writeout for the entire 248 * file. This has consequences for range_cyclic 249 * semantics (ie. it may not be suitable for data 250 * integrity writeout). 251 */ 252 *done_index = folio_next_index(folio); 253 ret = 1; 254 break; 255 } 256 257 /* 258 * We stop writing back only if we are not doing 259 * integrity sync. In case of integrity sync we have to 260 * keep going until we have written all the pages 261 * we tagged for writeback prior to entering this loop. 262 */ 263 if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) { 264 ret = 1; 265 break; 266 } 267 268 } 269 gfs2_trans_end(sdp); 270 return ret; 271 } 272 273 /** 274 * gfs2_write_cache_jdata - Like write_cache_pages but different 275 * @mapping: The mapping to write 276 * @wbc: The writeback control 277 * 278 * The reason that we use our own function here is that we need to 279 * start transactions before we grab page locks. This allows us 280 * to get the ordering right. 281 */ 282 283 static int gfs2_write_cache_jdata(struct address_space *mapping, 284 struct writeback_control *wbc) 285 { 286 int ret = 0; 287 int done = 0; 288 struct folio_batch fbatch; 289 int nr_folios; 290 pgoff_t writeback_index; 291 pgoff_t index; 292 pgoff_t end; 293 pgoff_t done_index; 294 int cycled; 295 int range_whole = 0; 296 xa_mark_t tag; 297 298 folio_batch_init(&fbatch); 299 if (wbc->range_cyclic) { 300 writeback_index = mapping->writeback_index; /* prev offset */ 301 index = writeback_index; 302 if (index == 0) 303 cycled = 1; 304 else 305 cycled = 0; 306 end = -1; 307 } else { 308 index = wbc->range_start >> PAGE_SHIFT; 309 end = wbc->range_end >> PAGE_SHIFT; 310 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 311 range_whole = 1; 312 cycled = 1; /* ignore range_cyclic tests */ 313 } 314 tag = wbc_to_tag(wbc); 315 316 retry: 317 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 318 tag_pages_for_writeback(mapping, index, end); 319 done_index = index; 320 while (!done && (index <= end)) { 321 nr_folios = filemap_get_folios_tag(mapping, &index, end, 322 tag, &fbatch); 323 if (nr_folios == 0) 324 break; 325 326 ret = gfs2_write_jdata_batch(mapping, wbc, &fbatch, 327 &done_index); 328 if (ret) 329 done = 1; 330 if (ret > 0) 331 ret = 0; 332 folio_batch_release(&fbatch); 333 cond_resched(); 334 } 335 336 if (!cycled && !done) { 337 /* 338 * range_cyclic: 339 * We hit the last page and there is more work to be done: wrap 340 * back to the start of the file 341 */ 342 cycled = 1; 343 index = 0; 344 end = writeback_index - 1; 345 goto retry; 346 } 347 348 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 349 mapping->writeback_index = done_index; 350 351 return ret; 352 } 353 354 355 /** 356 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk 357 * @mapping: The mapping to write 358 * @wbc: The writeback control 359 * 360 */ 361 362 static int gfs2_jdata_writepages(struct address_space *mapping, 363 struct writeback_control *wbc) 364 { 365 struct gfs2_inode *ip = GFS2_I(mapping->host); 366 struct gfs2_sbd *sdp = GFS2_SB(mapping->host); 367 int ret; 368 369 ret = gfs2_write_cache_jdata(mapping, wbc); 370 if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) { 371 gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL | 372 GFS2_LFC_JDATA_WPAGES); 373 ret = gfs2_write_cache_jdata(mapping, wbc); 374 } 375 return ret; 376 } 377 378 /** 379 * stuffed_read_folio - Fill in a Linux folio with stuffed file data 380 * @ip: the inode 381 * @folio: the folio 382 * 383 * Returns: errno 384 */ 385 static int stuffed_read_folio(struct gfs2_inode *ip, struct folio *folio) 386 { 387 struct buffer_head *dibh = NULL; 388 size_t dsize = i_size_read(&ip->i_inode); 389 void *from = NULL; 390 int error = 0; 391 392 /* 393 * Due to the order of unstuffing files and ->fault(), we can be 394 * asked for a zero folio in the case of a stuffed file being extended, 395 * so we need to supply one here. It doesn't happen often. 396 */ 397 if (unlikely(folio->index)) { 398 dsize = 0; 399 } else { 400 error = gfs2_meta_inode_buffer(ip, &dibh); 401 if (error) 402 goto out; 403 from = dibh->b_data + sizeof(struct gfs2_dinode); 404 } 405 406 folio_fill_tail(folio, 0, from, dsize); 407 brelse(dibh); 408 out: 409 folio_end_read(folio, error == 0); 410 411 return error; 412 } 413 414 /** 415 * gfs2_read_folio - read a folio from a file 416 * @file: The file to read 417 * @folio: The folio in the file 418 */ 419 static int gfs2_read_folio(struct file *file, struct folio *folio) 420 { 421 struct inode *inode = folio->mapping->host; 422 struct gfs2_inode *ip = GFS2_I(inode); 423 struct gfs2_sbd *sdp = GFS2_SB(inode); 424 int error = 0; 425 426 if (!gfs2_is_jdata(ip) || 427 (i_blocksize(inode) == PAGE_SIZE && !folio_buffers(folio))) { 428 iomap_bio_read_folio(folio, &gfs2_iomap_ops); 429 } else if (gfs2_is_stuffed(ip)) { 430 error = stuffed_read_folio(ip, folio); 431 } else { 432 error = mpage_read_folio(folio, gfs2_block_map); 433 } 434 435 if (gfs2_withdrawn(sdp)) 436 return -EIO; 437 438 return error; 439 } 440 441 /** 442 * gfs2_internal_read - read an internal file 443 * @ip: The gfs2 inode 444 * @buf: The buffer to fill 445 * @pos: The file position 446 * @size: The amount to read 447 * 448 */ 449 450 ssize_t gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos, 451 size_t size) 452 { 453 struct address_space *mapping = ip->i_inode.i_mapping; 454 unsigned long index = *pos >> PAGE_SHIFT; 455 size_t copied = 0; 456 457 do { 458 size_t offset, chunk; 459 struct folio *folio; 460 461 folio = read_cache_folio(mapping, index, gfs2_read_folio, NULL); 462 if (IS_ERR(folio)) { 463 if (PTR_ERR(folio) == -EINTR) 464 continue; 465 return PTR_ERR(folio); 466 } 467 offset = *pos + copied - folio_pos(folio); 468 chunk = min(size - copied, folio_size(folio) - offset); 469 memcpy_from_folio(buf + copied, folio, offset, chunk); 470 index = folio_next_index(folio); 471 folio_put(folio); 472 copied += chunk; 473 } while(copied < size); 474 (*pos) += size; 475 return size; 476 } 477 478 /** 479 * gfs2_readahead - Read a bunch of pages at once 480 * @rac: Read-ahead control structure 481 * 482 * Some notes: 483 * 1. This is only for readahead, so we can simply ignore any things 484 * which are slightly inconvenient (such as locking conflicts between 485 * the page lock and the glock) and return having done no I/O. Its 486 * obviously not something we'd want to do on too regular a basis. 487 * Any I/O we ignore at this time will be done via readpage later. 488 * 2. We don't handle stuffed files here we let readpage do the honours. 489 * 3. mpage_readahead() does most of the heavy lifting in the common case. 490 * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places. 491 */ 492 493 static void gfs2_readahead(struct readahead_control *rac) 494 { 495 struct inode *inode = rac->mapping->host; 496 struct gfs2_inode *ip = GFS2_I(inode); 497 498 if (gfs2_is_stuffed(ip)) 499 ; 500 else if (gfs2_is_jdata(ip)) 501 mpage_readahead(rac, gfs2_block_map); 502 else 503 iomap_bio_readahead(rac, &gfs2_iomap_ops); 504 } 505 506 /** 507 * adjust_fs_space - Adjusts the free space available due to gfs2_grow 508 * @inode: the rindex inode 509 */ 510 void adjust_fs_space(struct inode *inode) 511 { 512 struct gfs2_sbd *sdp = GFS2_SB(inode); 513 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 514 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; 515 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; 516 struct buffer_head *m_bh; 517 u64 fs_total, new_free; 518 519 if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0) 520 return; 521 522 /* Total up the file system space, according to the latest rindex. */ 523 fs_total = gfs2_ri_total(sdp); 524 if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0) 525 goto out; 526 527 spin_lock(&sdp->sd_statfs_spin); 528 gfs2_statfs_change_in(m_sc, m_bh->b_data + 529 sizeof(struct gfs2_dinode)); 530 if (fs_total > (m_sc->sc_total + l_sc->sc_total)) 531 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total); 532 else 533 new_free = 0; 534 spin_unlock(&sdp->sd_statfs_spin); 535 fs_warn(sdp, "File system extended by %llu blocks.\n", 536 (unsigned long long)new_free); 537 gfs2_statfs_change(sdp, new_free, new_free, 0); 538 539 update_statfs(sdp, m_bh); 540 brelse(m_bh); 541 out: 542 sdp->sd_rindex_uptodate = 0; 543 gfs2_trans_end(sdp); 544 } 545 546 static bool gfs2_jdata_dirty_folio(struct address_space *mapping, 547 struct folio *folio) 548 { 549 if (current->journal_info) 550 folio_set_checked(folio); 551 return block_dirty_folio(mapping, folio); 552 } 553 554 /** 555 * gfs2_bmap - Block map function 556 * @mapping: Address space info 557 * @lblock: The block to map 558 * 559 * Returns: The disk address for the block or 0 on hole or error 560 */ 561 562 static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock) 563 { 564 struct gfs2_inode *ip = GFS2_I(mapping->host); 565 struct gfs2_holder i_gh; 566 sector_t dblock = 0; 567 int error; 568 569 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); 570 if (error) 571 return 0; 572 573 if (!gfs2_is_stuffed(ip)) 574 dblock = iomap_bmap(mapping, lblock, &gfs2_iomap_ops); 575 576 gfs2_glock_dq_uninit(&i_gh); 577 578 return dblock; 579 } 580 581 static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh) 582 { 583 struct gfs2_bufdata *bd; 584 585 lock_buffer(bh); 586 spin_lock(&sdp->sd_log_lock); 587 clear_buffer_dirty(bh); 588 bd = bh->b_private; 589 if (bd) { 590 if (!list_empty(&bd->bd_list) && !buffer_pinned(bh)) 591 list_del_init(&bd->bd_list); 592 else { 593 spin_lock(&sdp->sd_ail_lock); 594 gfs2_remove_from_journal(bh, REMOVE_JDATA); 595 spin_unlock(&sdp->sd_ail_lock); 596 } 597 } 598 bh->b_bdev = NULL; 599 clear_buffer_mapped(bh); 600 clear_buffer_req(bh); 601 clear_buffer_new(bh); 602 spin_unlock(&sdp->sd_log_lock); 603 unlock_buffer(bh); 604 } 605 606 static void gfs2_invalidate_folio(struct folio *folio, size_t offset, 607 size_t length) 608 { 609 struct gfs2_sbd *sdp = GFS2_SB(folio->mapping->host); 610 size_t stop = offset + length; 611 int partial_page = (offset || length < folio_size(folio)); 612 struct buffer_head *bh, *head; 613 unsigned long pos = 0; 614 615 BUG_ON(!folio_test_locked(folio)); 616 if (!partial_page) 617 folio_clear_checked(folio); 618 head = folio_buffers(folio); 619 if (!head) 620 goto out; 621 622 bh = head; 623 do { 624 if (pos + bh->b_size > stop) 625 return; 626 627 if (offset <= pos) 628 gfs2_discard(sdp, bh); 629 pos += bh->b_size; 630 bh = bh->b_this_page; 631 } while (bh != head); 632 out: 633 if (!partial_page) 634 filemap_release_folio(folio, 0); 635 } 636 637 /** 638 * gfs2_release_folio - free the metadata associated with a folio 639 * @folio: the folio that's being released 640 * @gfp_mask: passed from Linux VFS, ignored by us 641 * 642 * Calls try_to_free_buffers() to free the buffers and put the folio if the 643 * buffers can be released. 644 * 645 * Returns: true if the folio was put or else false 646 */ 647 648 bool gfs2_release_folio(struct folio *folio, gfp_t gfp_mask) 649 { 650 struct address_space *mapping = folio->mapping; 651 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); 652 struct buffer_head *bh, *head; 653 struct gfs2_bufdata *bd; 654 655 head = folio_buffers(folio); 656 if (!head) 657 return false; 658 659 /* 660 * mm accommodates an old ext3 case where clean folios might 661 * not have had the dirty bit cleared. Thus, it can send actual 662 * dirty folios to ->release_folio() via shrink_active_list(). 663 * 664 * As a workaround, we skip folios that contain dirty buffers 665 * below. Once ->release_folio isn't called on dirty folios 666 * anymore, we can warn on dirty buffers like we used to here 667 * again. 668 */ 669 670 spin_lock(&sdp->sd_log_lock); 671 bh = head; 672 do { 673 if (atomic_read(&bh->b_count)) 674 goto cannot_release; 675 bd = bh->b_private; 676 if (bd && bd->bd_tr) 677 goto cannot_release; 678 if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh))) 679 goto cannot_release; 680 bh = bh->b_this_page; 681 } while (bh != head); 682 683 bh = head; 684 do { 685 bd = bh->b_private; 686 if (bd) { 687 gfs2_assert_warn(sdp, bd->bd_bh == bh); 688 bd->bd_bh = NULL; 689 bh->b_private = NULL; 690 /* 691 * The bd may still be queued as a revoke, in which 692 * case we must not dequeue nor free it. 693 */ 694 if (!bd->bd_blkno && !list_empty(&bd->bd_list)) 695 list_del_init(&bd->bd_list); 696 if (list_empty(&bd->bd_list)) 697 kmem_cache_free(gfs2_bufdata_cachep, bd); 698 } 699 700 bh = bh->b_this_page; 701 } while (bh != head); 702 spin_unlock(&sdp->sd_log_lock); 703 704 return try_to_free_buffers(folio); 705 706 cannot_release: 707 spin_unlock(&sdp->sd_log_lock); 708 return false; 709 } 710 711 static const struct address_space_operations gfs2_aops = { 712 .writepages = gfs2_writepages, 713 .read_folio = gfs2_read_folio, 714 .readahead = gfs2_readahead, 715 .dirty_folio = iomap_dirty_folio, 716 .release_folio = iomap_release_folio, 717 .invalidate_folio = iomap_invalidate_folio, 718 .bmap = gfs2_bmap, 719 .migrate_folio = filemap_migrate_folio, 720 .is_partially_uptodate = iomap_is_partially_uptodate, 721 .error_remove_folio = generic_error_remove_folio, 722 }; 723 724 static const struct address_space_operations gfs2_jdata_aops = { 725 .writepages = gfs2_jdata_writepages, 726 .read_folio = gfs2_read_folio, 727 .readahead = gfs2_readahead, 728 .dirty_folio = gfs2_jdata_dirty_folio, 729 .bmap = gfs2_bmap, 730 .migrate_folio = buffer_migrate_folio, 731 .invalidate_folio = gfs2_invalidate_folio, 732 .release_folio = gfs2_release_folio, 733 .is_partially_uptodate = block_is_partially_uptodate, 734 .error_remove_folio = generic_error_remove_folio, 735 }; 736 737 void gfs2_set_aops(struct inode *inode) 738 { 739 if (gfs2_is_jdata(GFS2_I(inode))) 740 inode->i_mapping->a_ops = &gfs2_jdata_aops; 741 else 742 inode->i_mapping->a_ops = &gfs2_aops; 743 } 744