1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 5 */ 6 7 #include <linux/sched.h> 8 #include <linux/slab.h> 9 #include <linux/spinlock.h> 10 #include <linux/completion.h> 11 #include <linux/buffer_head.h> 12 #include <linux/mm.h> 13 #include <linux/pagemap.h> 14 #include <linux/writeback.h> 15 #include <linux/swap.h> 16 #include <linux/delay.h> 17 #include <linux/bio.h> 18 #include <linux/gfs2_ondisk.h> 19 20 #include "gfs2.h" 21 #include "incore.h" 22 #include "glock.h" 23 #include "glops.h" 24 #include "inode.h" 25 #include "log.h" 26 #include "lops.h" 27 #include "meta_io.h" 28 #include "rgrp.h" 29 #include "trans.h" 30 #include "util.h" 31 #include "trace_gfs2.h" 32 33 static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wbc) 34 { 35 struct buffer_head *bh, *head; 36 int nr_underway = 0; 37 blk_opf_t write_flags = REQ_META | REQ_PRIO | wbc_to_write_flags(wbc); 38 39 BUG_ON(!PageLocked(page)); 40 BUG_ON(!page_has_buffers(page)); 41 42 head = page_buffers(page); 43 bh = head; 44 45 do { 46 if (!buffer_mapped(bh)) 47 continue; 48 /* 49 * If it's a fully non-blocking write attempt and we cannot 50 * lock the buffer then redirty the page. Note that this can 51 * potentially cause a busy-wait loop from flusher thread and kswapd 52 * activity, but those code paths have their own higher-level 53 * throttling. 54 */ 55 if (wbc->sync_mode != WB_SYNC_NONE) { 56 lock_buffer(bh); 57 } else if (!trylock_buffer(bh)) { 58 redirty_page_for_writepage(wbc, page); 59 continue; 60 } 61 if (test_clear_buffer_dirty(bh)) { 62 mark_buffer_async_write(bh); 63 } else { 64 unlock_buffer(bh); 65 } 66 } while ((bh = bh->b_this_page) != head); 67 68 /* 69 * The page and its buffers are protected by PageWriteback(), so we can 70 * drop the bh refcounts early. 71 */ 72 BUG_ON(PageWriteback(page)); 73 set_page_writeback(page); 74 75 do { 76 struct buffer_head *next = bh->b_this_page; 77 if (buffer_async_write(bh)) { 78 submit_bh(REQ_OP_WRITE | write_flags, bh); 79 nr_underway++; 80 } 81 bh = next; 82 } while (bh != head); 83 unlock_page(page); 84 85 if (nr_underway == 0) 86 end_page_writeback(page); 87 88 return 0; 89 } 90 91 const struct address_space_operations gfs2_meta_aops = { 92 .dirty_folio = block_dirty_folio, 93 .invalidate_folio = block_invalidate_folio, 94 .writepage = gfs2_aspace_writepage, 95 .release_folio = gfs2_release_folio, 96 }; 97 98 const struct address_space_operations gfs2_rgrp_aops = { 99 .dirty_folio = block_dirty_folio, 100 .invalidate_folio = block_invalidate_folio, 101 .writepage = gfs2_aspace_writepage, 102 .release_folio = gfs2_release_folio, 103 }; 104 105 /** 106 * gfs2_getbuf - Get a buffer with a given address space 107 * @gl: the glock 108 * @blkno: the block number (filesystem scope) 109 * @create: 1 if the buffer should be created 110 * 111 * Returns: the buffer 112 */ 113 114 struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create) 115 { 116 struct address_space *mapping = gfs2_glock2aspace(gl); 117 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 118 struct folio *folio; 119 struct buffer_head *bh; 120 unsigned int shift; 121 unsigned long index; 122 unsigned int bufnum; 123 124 if (mapping == NULL) 125 mapping = &sdp->sd_aspace; 126 127 shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift; 128 index = blkno >> shift; /* convert block to page */ 129 bufnum = blkno - (index << shift); /* block buf index within page */ 130 131 if (create) { 132 folio = __filemap_get_folio(mapping, index, 133 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, 134 mapping_gfp_mask(mapping) | __GFP_NOFAIL); 135 bh = folio_buffers(folio); 136 if (!bh) 137 bh = create_empty_buffers(folio, 138 sdp->sd_sb.sb_bsize, 0); 139 } else { 140 folio = __filemap_get_folio(mapping, index, 141 FGP_LOCK | FGP_ACCESSED, 0); 142 if (IS_ERR(folio)) 143 return NULL; 144 bh = folio_buffers(folio); 145 } 146 147 if (!bh) 148 goto out_unlock; 149 150 bh = get_nth_bh(bh, bufnum); 151 if (!buffer_mapped(bh)) 152 map_bh(bh, sdp->sd_vfs, blkno); 153 154 out_unlock: 155 folio_unlock(folio); 156 folio_put(folio); 157 158 return bh; 159 } 160 161 static void meta_prep_new(struct buffer_head *bh) 162 { 163 struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data; 164 165 lock_buffer(bh); 166 clear_buffer_dirty(bh); 167 set_buffer_uptodate(bh); 168 unlock_buffer(bh); 169 170 mh->mh_magic = cpu_to_be32(GFS2_MAGIC); 171 } 172 173 /** 174 * gfs2_meta_new - Get a block 175 * @gl: The glock associated with this block 176 * @blkno: The block number 177 * 178 * Returns: The buffer 179 */ 180 181 struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno) 182 { 183 struct buffer_head *bh; 184 bh = gfs2_getbuf(gl, blkno, CREATE); 185 meta_prep_new(bh); 186 return bh; 187 } 188 189 static void gfs2_meta_read_endio(struct bio *bio) 190 { 191 struct bio_vec *bvec; 192 struct bvec_iter_all iter_all; 193 194 bio_for_each_segment_all(bvec, bio, iter_all) { 195 struct page *page = bvec->bv_page; 196 struct buffer_head *bh = page_buffers(page); 197 unsigned int len = bvec->bv_len; 198 199 while (bh_offset(bh) < bvec->bv_offset) 200 bh = bh->b_this_page; 201 do { 202 struct buffer_head *next = bh->b_this_page; 203 len -= bh->b_size; 204 bh->b_end_io(bh, !bio->bi_status); 205 bh = next; 206 } while (bh && len); 207 } 208 bio_put(bio); 209 } 210 211 /* 212 * Submit several consecutive buffer head I/O requests as a single bio I/O 213 * request. (See submit_bh_wbc.) 214 */ 215 static void gfs2_submit_bhs(blk_opf_t opf, struct buffer_head *bhs[], int num) 216 { 217 while (num > 0) { 218 struct buffer_head *bh = *bhs; 219 struct bio *bio; 220 221 bio = bio_alloc(bh->b_bdev, num, opf, GFP_NOIO); 222 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); 223 while (num > 0) { 224 bh = *bhs; 225 if (!bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh))) { 226 BUG_ON(bio->bi_iter.bi_size == 0); 227 break; 228 } 229 bhs++; 230 num--; 231 } 232 bio->bi_end_io = gfs2_meta_read_endio; 233 submit_bio(bio); 234 } 235 } 236 237 /** 238 * gfs2_meta_read - Read a block from disk 239 * @gl: The glock covering the block 240 * @blkno: The block number 241 * @flags: flags 242 * @rahead: Do read-ahead 243 * @bhp: the place where the buffer is returned (NULL on failure) 244 * 245 * Returns: errno 246 */ 247 248 int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags, 249 int rahead, struct buffer_head **bhp) 250 { 251 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 252 struct buffer_head *bh, *bhs[2]; 253 int num = 0; 254 255 if (unlikely(gfs2_withdrawn(sdp)) && !gfs2_withdraw_in_prog(sdp)) { 256 *bhp = NULL; 257 return -EIO; 258 } 259 260 *bhp = bh = gfs2_getbuf(gl, blkno, CREATE); 261 262 lock_buffer(bh); 263 if (buffer_uptodate(bh)) { 264 unlock_buffer(bh); 265 flags &= ~DIO_WAIT; 266 } else { 267 bh->b_end_io = end_buffer_read_sync; 268 get_bh(bh); 269 bhs[num++] = bh; 270 } 271 272 if (rahead) { 273 bh = gfs2_getbuf(gl, blkno + 1, CREATE); 274 275 lock_buffer(bh); 276 if (buffer_uptodate(bh)) { 277 unlock_buffer(bh); 278 brelse(bh); 279 } else { 280 bh->b_end_io = end_buffer_read_sync; 281 bhs[num++] = bh; 282 } 283 } 284 285 gfs2_submit_bhs(REQ_OP_READ | REQ_META | REQ_PRIO, bhs, num); 286 if (!(flags & DIO_WAIT)) 287 return 0; 288 289 bh = *bhp; 290 wait_on_buffer(bh); 291 if (unlikely(!buffer_uptodate(bh))) { 292 struct gfs2_trans *tr = current->journal_info; 293 if (tr && test_bit(TR_TOUCHED, &tr->tr_flags)) 294 gfs2_io_error_bh_wd(sdp, bh); 295 brelse(bh); 296 *bhp = NULL; 297 return -EIO; 298 } 299 300 return 0; 301 } 302 303 /** 304 * gfs2_meta_wait - Reread a block from disk 305 * @sdp: the filesystem 306 * @bh: The block to wait for 307 * 308 * Returns: errno 309 */ 310 311 int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh) 312 { 313 if (unlikely(gfs2_withdrawn(sdp)) && !gfs2_withdraw_in_prog(sdp)) 314 return -EIO; 315 316 wait_on_buffer(bh); 317 318 if (!buffer_uptodate(bh)) { 319 struct gfs2_trans *tr = current->journal_info; 320 if (tr && test_bit(TR_TOUCHED, &tr->tr_flags)) 321 gfs2_io_error_bh_wd(sdp, bh); 322 return -EIO; 323 } 324 if (unlikely(gfs2_withdrawn(sdp)) && !gfs2_withdraw_in_prog(sdp)) 325 return -EIO; 326 327 return 0; 328 } 329 330 void gfs2_remove_from_journal(struct buffer_head *bh, int meta) 331 { 332 struct address_space *mapping = bh->b_folio->mapping; 333 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); 334 struct gfs2_bufdata *bd = bh->b_private; 335 struct gfs2_trans *tr = current->journal_info; 336 int was_pinned = 0; 337 338 if (test_clear_buffer_pinned(bh)) { 339 trace_gfs2_pin(bd, 0); 340 atomic_dec(&sdp->sd_log_pinned); 341 list_del_init(&bd->bd_list); 342 if (meta == REMOVE_META) 343 tr->tr_num_buf_rm++; 344 else 345 tr->tr_num_databuf_rm++; 346 set_bit(TR_TOUCHED, &tr->tr_flags); 347 was_pinned = 1; 348 brelse(bh); 349 } 350 if (bd) { 351 if (bd->bd_tr) { 352 gfs2_trans_add_revoke(sdp, bd); 353 } else if (was_pinned) { 354 bh->b_private = NULL; 355 kmem_cache_free(gfs2_bufdata_cachep, bd); 356 } else if (!list_empty(&bd->bd_ail_st_list) && 357 !list_empty(&bd->bd_ail_gl_list)) { 358 gfs2_remove_from_ail(bd); 359 } 360 } 361 clear_buffer_dirty(bh); 362 clear_buffer_uptodate(bh); 363 } 364 365 /** 366 * gfs2_ail1_wipe - remove deleted/freed buffers from the ail1 list 367 * @sdp: superblock 368 * @bstart: starting block address of buffers to remove 369 * @blen: length of buffers to be removed 370 * 371 * This function is called from gfs2_journal wipe, whose job is to remove 372 * buffers, corresponding to deleted blocks, from the journal. If we find any 373 * bufdata elements on the system ail1 list, they haven't been written to 374 * the journal yet. So we remove them. 375 */ 376 static void gfs2_ail1_wipe(struct gfs2_sbd *sdp, u64 bstart, u32 blen) 377 { 378 struct gfs2_trans *tr, *s; 379 struct gfs2_bufdata *bd, *bs; 380 struct buffer_head *bh; 381 u64 end = bstart + blen; 382 383 gfs2_log_lock(sdp); 384 spin_lock(&sdp->sd_ail_lock); 385 list_for_each_entry_safe(tr, s, &sdp->sd_ail1_list, tr_list) { 386 list_for_each_entry_safe(bd, bs, &tr->tr_ail1_list, 387 bd_ail_st_list) { 388 bh = bd->bd_bh; 389 if (bh->b_blocknr < bstart || bh->b_blocknr >= end) 390 continue; 391 392 gfs2_remove_from_journal(bh, REMOVE_JDATA); 393 } 394 } 395 spin_unlock(&sdp->sd_ail_lock); 396 gfs2_log_unlock(sdp); 397 } 398 399 static struct buffer_head *gfs2_getjdatabuf(struct gfs2_inode *ip, u64 blkno) 400 { 401 struct address_space *mapping = ip->i_inode.i_mapping; 402 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 403 struct folio *folio; 404 struct buffer_head *bh; 405 unsigned int shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift; 406 unsigned long index = blkno >> shift; /* convert block to page */ 407 unsigned int bufnum = blkno - (index << shift); 408 409 folio = __filemap_get_folio(mapping, index, FGP_LOCK | FGP_ACCESSED, 0); 410 if (IS_ERR(folio)) 411 return NULL; 412 bh = folio_buffers(folio); 413 if (bh) 414 bh = get_nth_bh(bh, bufnum); 415 folio_unlock(folio); 416 folio_put(folio); 417 return bh; 418 } 419 420 /** 421 * gfs2_journal_wipe - make inode's buffers so they aren't dirty/pinned anymore 422 * @ip: the inode who owns the buffers 423 * @bstart: the first buffer in the run 424 * @blen: the number of buffers in the run 425 * 426 */ 427 428 void gfs2_journal_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen) 429 { 430 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 431 struct buffer_head *bh; 432 int ty; 433 434 if (!ip->i_gl) { 435 /* This can only happen during incomplete inode creation. */ 436 BUG_ON(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags)); 437 return; 438 } 439 440 gfs2_ail1_wipe(sdp, bstart, blen); 441 while (blen) { 442 ty = REMOVE_META; 443 bh = gfs2_getbuf(ip->i_gl, bstart, NO_CREATE); 444 if (!bh && gfs2_is_jdata(ip)) { 445 bh = gfs2_getjdatabuf(ip, bstart); 446 ty = REMOVE_JDATA; 447 } 448 if (bh) { 449 lock_buffer(bh); 450 gfs2_log_lock(sdp); 451 spin_lock(&sdp->sd_ail_lock); 452 gfs2_remove_from_journal(bh, ty); 453 spin_unlock(&sdp->sd_ail_lock); 454 gfs2_log_unlock(sdp); 455 unlock_buffer(bh); 456 brelse(bh); 457 } 458 459 bstart++; 460 blen--; 461 } 462 } 463 464 /** 465 * gfs2_meta_buffer - Get a metadata buffer 466 * @ip: The GFS2 inode 467 * @mtype: The block type (GFS2_METATYPE_*) 468 * @num: The block number (device relative) of the buffer 469 * @bhp: the buffer is returned here 470 * 471 * Returns: errno 472 */ 473 474 int gfs2_meta_buffer(struct gfs2_inode *ip, u32 mtype, u64 num, 475 struct buffer_head **bhp) 476 { 477 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 478 struct gfs2_glock *gl = ip->i_gl; 479 struct buffer_head *bh; 480 int ret = 0; 481 int rahead = 0; 482 483 if (num == ip->i_no_addr) 484 rahead = ip->i_rahead; 485 486 ret = gfs2_meta_read(gl, num, DIO_WAIT, rahead, &bh); 487 if (ret == 0 && gfs2_metatype_check(sdp, bh, mtype)) { 488 brelse(bh); 489 ret = -EIO; 490 } else { 491 *bhp = bh; 492 } 493 return ret; 494 } 495 496 /** 497 * gfs2_meta_ra - start readahead on an extent of a file 498 * @gl: the glock the blocks belong to 499 * @dblock: the starting disk block 500 * @extlen: the number of blocks in the extent 501 * 502 * returns: the first buffer in the extent 503 */ 504 505 struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen) 506 { 507 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 508 struct buffer_head *first_bh, *bh; 509 u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >> 510 sdp->sd_sb.sb_bsize_shift; 511 512 BUG_ON(!extlen); 513 514 if (max_ra < 1) 515 max_ra = 1; 516 if (extlen > max_ra) 517 extlen = max_ra; 518 519 first_bh = gfs2_getbuf(gl, dblock, CREATE); 520 521 if (buffer_uptodate(first_bh)) 522 goto out; 523 bh_read_nowait(first_bh, REQ_META | REQ_PRIO); 524 525 dblock++; 526 extlen--; 527 528 while (extlen) { 529 bh = gfs2_getbuf(gl, dblock, CREATE); 530 531 bh_readahead(bh, REQ_RAHEAD | REQ_META | REQ_PRIO); 532 brelse(bh); 533 dblock++; 534 extlen--; 535 if (!buffer_locked(first_bh) && buffer_uptodate(first_bh)) 536 goto out; 537 } 538 539 wait_on_buffer(first_bh); 540 out: 541 return first_bh; 542 } 543 544