1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 5 */ 6 7 #include <linux/sched.h> 8 #include <linux/slab.h> 9 #include <linux/spinlock.h> 10 #include <linux/completion.h> 11 #include <linux/buffer_head.h> 12 #include <linux/mm.h> 13 #include <linux/pagemap.h> 14 #include <linux/writeback.h> 15 #include <linux/swap.h> 16 #include <linux/delay.h> 17 #include <linux/bio.h> 18 #include <linux/gfs2_ondisk.h> 19 20 #include "gfs2.h" 21 #include "incore.h" 22 #include "glock.h" 23 #include "glops.h" 24 #include "inode.h" 25 #include "log.h" 26 #include "lops.h" 27 #include "meta_io.h" 28 #include "rgrp.h" 29 #include "trans.h" 30 #include "util.h" 31 #include "trace_gfs2.h" 32 33 static void gfs2_aspace_write_folio(struct folio *folio, 34 struct writeback_control *wbc) 35 { 36 struct buffer_head *bh, *head; 37 int nr_underway = 0; 38 blk_opf_t write_flags = REQ_META | REQ_PRIO | wbc_to_write_flags(wbc); 39 40 BUG_ON(!folio_test_locked(folio)); 41 42 head = folio_buffers(folio); 43 bh = head; 44 45 do { 46 if (!buffer_mapped(bh)) 47 continue; 48 /* 49 * If it's a fully non-blocking write attempt and we cannot 50 * lock the buffer then redirty the page. Note that this can 51 * potentially cause a busy-wait loop from flusher thread and kswapd 52 * activity, but those code paths have their own higher-level 53 * throttling. 54 */ 55 if (wbc->sync_mode != WB_SYNC_NONE) { 56 lock_buffer(bh); 57 } else if (!trylock_buffer(bh)) { 58 folio_redirty_for_writepage(wbc, folio); 59 continue; 60 } 61 if (test_clear_buffer_dirty(bh)) { 62 mark_buffer_async_write(bh); 63 } else { 64 unlock_buffer(bh); 65 } 66 } while ((bh = bh->b_this_page) != head); 67 68 /* 69 * The folio and its buffers are protected from truncation by 70 * the writeback flag, so we can drop the bh refcounts early. 71 */ 72 BUG_ON(folio_test_writeback(folio)); 73 folio_start_writeback(folio); 74 75 do { 76 struct buffer_head *next = bh->b_this_page; 77 if (buffer_async_write(bh)) { 78 submit_bh(REQ_OP_WRITE | write_flags, bh); 79 nr_underway++; 80 } 81 bh = next; 82 } while (bh != head); 83 folio_unlock(folio); 84 85 if (nr_underway == 0) 86 folio_end_writeback(folio); 87 } 88 89 static int gfs2_aspace_writepages(struct address_space *mapping, 90 struct writeback_control *wbc) 91 { 92 struct folio *folio = NULL; 93 int error; 94 95 while ((folio = writeback_iter(mapping, wbc, folio, &error))) 96 gfs2_aspace_write_folio(folio, wbc); 97 98 return error; 99 } 100 101 const struct address_space_operations gfs2_meta_aops = { 102 .dirty_folio = block_dirty_folio, 103 .invalidate_folio = block_invalidate_folio, 104 .writepages = gfs2_aspace_writepages, 105 .release_folio = gfs2_release_folio, 106 .migrate_folio = buffer_migrate_folio_norefs, 107 }; 108 109 const struct address_space_operations gfs2_rgrp_aops = { 110 .dirty_folio = block_dirty_folio, 111 .invalidate_folio = block_invalidate_folio, 112 .writepages = gfs2_aspace_writepages, 113 .release_folio = gfs2_release_folio, 114 .migrate_folio = buffer_migrate_folio_norefs, 115 }; 116 117 /** 118 * gfs2_getbuf - Get a buffer with a given address space 119 * @gl: the glock 120 * @blkno: the block number (filesystem scope) 121 * @create: 1 if the buffer should be created 122 * 123 * Returns: the buffer 124 */ 125 126 struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create) 127 { 128 struct address_space *mapping = gfs2_glock2aspace(gl); 129 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 130 struct folio *folio; 131 struct buffer_head *bh; 132 unsigned int shift; 133 unsigned long index; 134 unsigned int bufnum; 135 136 if (mapping == NULL) 137 mapping = gfs2_aspace(sdp); 138 139 shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift; 140 index = blkno >> shift; /* convert block to page */ 141 bufnum = blkno - (index << shift); /* block buf index within page */ 142 143 if (create) { 144 folio = __filemap_get_folio(mapping, index, 145 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, 146 mapping_gfp_mask(mapping) | __GFP_NOFAIL); 147 bh = folio_buffers(folio); 148 if (!bh) 149 bh = create_empty_buffers(folio, 150 sdp->sd_sb.sb_bsize, 0); 151 } else { 152 folio = __filemap_get_folio(mapping, index, 153 FGP_LOCK | FGP_ACCESSED, 0); 154 if (IS_ERR(folio)) 155 return NULL; 156 bh = folio_buffers(folio); 157 } 158 159 if (!bh) 160 goto out_unlock; 161 162 bh = get_nth_bh(bh, bufnum); 163 if (!buffer_mapped(bh)) 164 map_bh(bh, sdp->sd_vfs, blkno); 165 166 out_unlock: 167 folio_unlock(folio); 168 folio_put(folio); 169 170 return bh; 171 } 172 173 static void meta_prep_new(struct buffer_head *bh) 174 { 175 struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data; 176 177 lock_buffer(bh); 178 clear_buffer_dirty(bh); 179 set_buffer_uptodate(bh); 180 unlock_buffer(bh); 181 182 mh->mh_magic = cpu_to_be32(GFS2_MAGIC); 183 } 184 185 /** 186 * gfs2_meta_new - Get a block 187 * @gl: The glock associated with this block 188 * @blkno: The block number 189 * 190 * Returns: The buffer 191 */ 192 193 struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno) 194 { 195 struct buffer_head *bh; 196 bh = gfs2_getbuf(gl, blkno, CREATE); 197 meta_prep_new(bh); 198 return bh; 199 } 200 201 static void gfs2_meta_read_endio(struct bio *bio) 202 { 203 struct folio_iter fi; 204 205 bio_for_each_folio_all(fi, bio) { 206 struct folio *folio = fi.folio; 207 struct buffer_head *bh = folio_buffers(folio); 208 size_t len = fi.length; 209 210 while (bh_offset(bh) < fi.offset) 211 bh = bh->b_this_page; 212 do { 213 struct buffer_head *next = bh->b_this_page; 214 len -= bh->b_size; 215 bh->b_end_io(bh, !bio->bi_status); 216 bh = next; 217 } while (bh && len); 218 } 219 bio_put(bio); 220 } 221 222 /* 223 * Submit several consecutive buffer head I/O requests as a single bio I/O 224 * request. (See submit_bh_wbc.) 225 */ 226 static void gfs2_submit_bhs(blk_opf_t opf, struct buffer_head *bhs[], int num) 227 { 228 while (num > 0) { 229 struct buffer_head *bh = *bhs; 230 struct bio *bio; 231 232 bio = bio_alloc(bh->b_bdev, num, opf, GFP_NOIO); 233 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> SECTOR_SHIFT); 234 while (num > 0) { 235 bh = *bhs; 236 if (!bio_add_folio(bio, bh->b_folio, bh->b_size, bh_offset(bh))) { 237 BUG_ON(bio->bi_iter.bi_size == 0); 238 break; 239 } 240 bhs++; 241 num--; 242 } 243 bio->bi_end_io = gfs2_meta_read_endio; 244 submit_bio(bio); 245 } 246 } 247 248 /** 249 * gfs2_meta_read - Read a block from disk 250 * @gl: The glock covering the block 251 * @blkno: The block number 252 * @flags: flags 253 * @rahead: Do read-ahead 254 * @bhp: the place where the buffer is returned (NULL on failure) 255 * 256 * Returns: errno 257 */ 258 259 int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags, 260 int rahead, struct buffer_head **bhp) 261 { 262 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 263 struct buffer_head *bh, *bhs[2]; 264 int num = 0; 265 266 if (gfs2_withdrawing_or_withdrawn(sdp) && 267 !gfs2_withdraw_in_prog(sdp)) { 268 *bhp = NULL; 269 return -EIO; 270 } 271 272 *bhp = bh = gfs2_getbuf(gl, blkno, CREATE); 273 274 lock_buffer(bh); 275 if (buffer_uptodate(bh)) { 276 unlock_buffer(bh); 277 flags &= ~DIO_WAIT; 278 } else { 279 bh->b_end_io = end_buffer_read_sync; 280 get_bh(bh); 281 bhs[num++] = bh; 282 } 283 284 if (rahead) { 285 bh = gfs2_getbuf(gl, blkno + 1, CREATE); 286 287 lock_buffer(bh); 288 if (buffer_uptodate(bh)) { 289 unlock_buffer(bh); 290 brelse(bh); 291 } else { 292 bh->b_end_io = end_buffer_read_sync; 293 bhs[num++] = bh; 294 } 295 } 296 297 gfs2_submit_bhs(REQ_OP_READ | REQ_META | REQ_PRIO, bhs, num); 298 if (!(flags & DIO_WAIT)) 299 return 0; 300 301 bh = *bhp; 302 wait_on_buffer(bh); 303 if (unlikely(!buffer_uptodate(bh))) { 304 struct gfs2_trans *tr = current->journal_info; 305 if (tr && test_bit(TR_TOUCHED, &tr->tr_flags)) 306 gfs2_io_error_bh_wd(sdp, bh); 307 brelse(bh); 308 *bhp = NULL; 309 return -EIO; 310 } 311 312 return 0; 313 } 314 315 /** 316 * gfs2_meta_wait - Reread a block from disk 317 * @sdp: the filesystem 318 * @bh: The block to wait for 319 * 320 * Returns: errno 321 */ 322 323 int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh) 324 { 325 if (gfs2_withdrawing_or_withdrawn(sdp) && 326 !gfs2_withdraw_in_prog(sdp)) 327 return -EIO; 328 329 wait_on_buffer(bh); 330 331 if (!buffer_uptodate(bh)) { 332 struct gfs2_trans *tr = current->journal_info; 333 if (tr && test_bit(TR_TOUCHED, &tr->tr_flags)) 334 gfs2_io_error_bh_wd(sdp, bh); 335 return -EIO; 336 } 337 if (gfs2_withdrawing_or_withdrawn(sdp) && 338 !gfs2_withdraw_in_prog(sdp)) 339 return -EIO; 340 341 return 0; 342 } 343 344 void gfs2_remove_from_journal(struct buffer_head *bh, int meta) 345 { 346 struct address_space *mapping = bh->b_folio->mapping; 347 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); 348 struct gfs2_bufdata *bd = bh->b_private; 349 struct gfs2_trans *tr = current->journal_info; 350 int was_pinned = 0; 351 352 if (test_clear_buffer_pinned(bh)) { 353 trace_gfs2_pin(bd, 0); 354 atomic_dec(&sdp->sd_log_pinned); 355 list_del_init(&bd->bd_list); 356 if (meta == REMOVE_META) 357 tr->tr_num_buf_rm++; 358 else 359 tr->tr_num_databuf_rm++; 360 set_bit(TR_TOUCHED, &tr->tr_flags); 361 was_pinned = 1; 362 brelse(bh); 363 } 364 if (bd) { 365 if (bd->bd_tr) { 366 gfs2_trans_add_revoke(sdp, bd); 367 } else if (was_pinned) { 368 bh->b_private = NULL; 369 kmem_cache_free(gfs2_bufdata_cachep, bd); 370 } else if (!list_empty(&bd->bd_ail_st_list) && 371 !list_empty(&bd->bd_ail_gl_list)) { 372 gfs2_remove_from_ail(bd); 373 } 374 } 375 clear_buffer_dirty(bh); 376 clear_buffer_uptodate(bh); 377 } 378 379 /** 380 * gfs2_ail1_wipe - remove deleted/freed buffers from the ail1 list 381 * @sdp: superblock 382 * @bstart: starting block address of buffers to remove 383 * @blen: length of buffers to be removed 384 * 385 * This function is called from gfs2_journal wipe, whose job is to remove 386 * buffers, corresponding to deleted blocks, from the journal. If we find any 387 * bufdata elements on the system ail1 list, they haven't been written to 388 * the journal yet. So we remove them. 389 */ 390 static void gfs2_ail1_wipe(struct gfs2_sbd *sdp, u64 bstart, u32 blen) 391 { 392 struct gfs2_trans *tr, *s; 393 struct gfs2_bufdata *bd, *bs; 394 struct buffer_head *bh; 395 u64 end = bstart + blen; 396 397 gfs2_log_lock(sdp); 398 spin_lock(&sdp->sd_ail_lock); 399 list_for_each_entry_safe(tr, s, &sdp->sd_ail1_list, tr_list) { 400 list_for_each_entry_safe(bd, bs, &tr->tr_ail1_list, 401 bd_ail_st_list) { 402 bh = bd->bd_bh; 403 if (bh->b_blocknr < bstart || bh->b_blocknr >= end) 404 continue; 405 406 gfs2_remove_from_journal(bh, REMOVE_JDATA); 407 } 408 } 409 spin_unlock(&sdp->sd_ail_lock); 410 gfs2_log_unlock(sdp); 411 } 412 413 static struct buffer_head *gfs2_getjdatabuf(struct gfs2_inode *ip, u64 blkno) 414 { 415 struct address_space *mapping = ip->i_inode.i_mapping; 416 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 417 struct folio *folio; 418 struct buffer_head *bh; 419 unsigned int shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift; 420 unsigned long index = blkno >> shift; /* convert block to page */ 421 unsigned int bufnum = blkno - (index << shift); 422 423 folio = __filemap_get_folio(mapping, index, FGP_LOCK | FGP_ACCESSED, 0); 424 if (IS_ERR(folio)) 425 return NULL; 426 bh = folio_buffers(folio); 427 if (bh) 428 bh = get_nth_bh(bh, bufnum); 429 folio_unlock(folio); 430 folio_put(folio); 431 return bh; 432 } 433 434 /** 435 * gfs2_journal_wipe - make inode's buffers so they aren't dirty/pinned anymore 436 * @ip: the inode who owns the buffers 437 * @bstart: the first buffer in the run 438 * @blen: the number of buffers in the run 439 * 440 */ 441 442 void gfs2_journal_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen) 443 { 444 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 445 struct buffer_head *bh; 446 int ty; 447 448 /* This can only happen during incomplete inode creation. */ 449 if (!ip->i_gl) 450 return; 451 452 gfs2_ail1_wipe(sdp, bstart, blen); 453 while (blen) { 454 ty = REMOVE_META; 455 bh = gfs2_getbuf(ip->i_gl, bstart, NO_CREATE); 456 if (!bh && gfs2_is_jdata(ip)) { 457 bh = gfs2_getjdatabuf(ip, bstart); 458 ty = REMOVE_JDATA; 459 } 460 if (bh) { 461 lock_buffer(bh); 462 gfs2_log_lock(sdp); 463 spin_lock(&sdp->sd_ail_lock); 464 gfs2_remove_from_journal(bh, ty); 465 spin_unlock(&sdp->sd_ail_lock); 466 gfs2_log_unlock(sdp); 467 unlock_buffer(bh); 468 brelse(bh); 469 } 470 471 bstart++; 472 blen--; 473 } 474 } 475 476 /** 477 * gfs2_meta_buffer - Get a metadata buffer 478 * @ip: The GFS2 inode 479 * @mtype: The block type (GFS2_METATYPE_*) 480 * @num: The block number (device relative) of the buffer 481 * @bhp: the buffer is returned here 482 * 483 * Returns: errno 484 */ 485 486 int gfs2_meta_buffer(struct gfs2_inode *ip, u32 mtype, u64 num, 487 struct buffer_head **bhp) 488 { 489 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 490 struct gfs2_glock *gl = ip->i_gl; 491 struct buffer_head *bh; 492 int ret = 0; 493 int rahead = 0; 494 495 if (num == ip->i_no_addr) 496 rahead = ip->i_rahead; 497 498 ret = gfs2_meta_read(gl, num, DIO_WAIT, rahead, &bh); 499 if (ret == 0 && gfs2_metatype_check(sdp, bh, mtype)) { 500 brelse(bh); 501 ret = -EIO; 502 } else { 503 *bhp = bh; 504 } 505 return ret; 506 } 507 508 /** 509 * gfs2_meta_ra - start readahead on an extent of a file 510 * @gl: the glock the blocks belong to 511 * @dblock: the starting disk block 512 * @extlen: the number of blocks in the extent 513 * 514 * returns: the first buffer in the extent 515 */ 516 517 struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen) 518 { 519 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 520 struct buffer_head *first_bh, *bh; 521 u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >> 522 sdp->sd_sb.sb_bsize_shift; 523 524 BUG_ON(!extlen); 525 526 if (max_ra < 1) 527 max_ra = 1; 528 if (extlen > max_ra) 529 extlen = max_ra; 530 531 first_bh = gfs2_getbuf(gl, dblock, CREATE); 532 533 if (buffer_uptodate(first_bh)) 534 goto out; 535 bh_read_nowait(first_bh, REQ_META | REQ_PRIO); 536 537 dblock++; 538 extlen--; 539 540 while (extlen) { 541 bh = gfs2_getbuf(gl, dblock, CREATE); 542 543 bh_readahead(bh, REQ_RAHEAD | REQ_META | REQ_PRIO); 544 brelse(bh); 545 dblock++; 546 extlen--; 547 if (!buffer_locked(first_bh) && buffer_uptodate(first_bh)) 548 goto out; 549 } 550 551 wait_on_buffer(first_bh); 552 out: 553 return first_bh; 554 } 555 556