1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 5 */ 6 7 #include <linux/sched.h> 8 #include <linux/slab.h> 9 #include <linux/spinlock.h> 10 #include <linux/completion.h> 11 #include <linux/buffer_head.h> 12 #include <linux/mempool.h> 13 #include <linux/gfs2_ondisk.h> 14 #include <linux/bio.h> 15 #include <linux/fs.h> 16 #include <linux/list_sort.h> 17 #include <linux/blkdev.h> 18 19 #include "bmap.h" 20 #include "dir.h" 21 #include "gfs2.h" 22 #include "incore.h" 23 #include "inode.h" 24 #include "glock.h" 25 #include "log.h" 26 #include "lops.h" 27 #include "meta_io.h" 28 #include "recovery.h" 29 #include "rgrp.h" 30 #include "trans.h" 31 #include "util.h" 32 #include "trace_gfs2.h" 33 34 /** 35 * gfs2_pin - Pin a buffer in memory 36 * @sdp: The superblock 37 * @bh: The buffer to be pinned 38 * 39 * The log lock must be held when calling this function 40 */ 41 void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh) 42 { 43 struct gfs2_bufdata *bd; 44 45 BUG_ON(!current->journal_info); 46 47 clear_buffer_dirty(bh); 48 if (test_set_buffer_pinned(bh)) 49 gfs2_assert_withdraw(sdp, 0); 50 if (!buffer_uptodate(bh)) 51 gfs2_io_error_bh_wd(sdp, bh); 52 bd = bh->b_private; 53 /* If this buffer is in the AIL and it has already been written 54 * to in-place disk block, remove it from the AIL. 55 */ 56 spin_lock(&sdp->sd_ail_lock); 57 if (bd->bd_tr) 58 list_move(&bd->bd_ail_st_list, &bd->bd_tr->tr_ail2_list); 59 spin_unlock(&sdp->sd_ail_lock); 60 get_bh(bh); 61 atomic_inc(&sdp->sd_log_pinned); 62 trace_gfs2_pin(bd, 1); 63 } 64 65 static bool buffer_is_rgrp(const struct gfs2_bufdata *bd) 66 { 67 return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP; 68 } 69 70 static void maybe_release_space(struct gfs2_bufdata *bd) 71 { 72 struct gfs2_glock *gl = bd->bd_gl; 73 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 74 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); 75 unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number; 76 struct gfs2_bitmap *bi = rgd->rd_bits + index; 77 78 if (bi->bi_clone == NULL) 79 return; 80 if (sdp->sd_args.ar_discard) 81 gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL); 82 memcpy(bi->bi_clone + bi->bi_offset, 83 bd->bd_bh->b_data + bi->bi_offset, bi->bi_bytes); 84 clear_bit(GBF_FULL, &bi->bi_flags); 85 rgd->rd_free_clone = rgd->rd_free; 86 rgd->rd_extfail_pt = rgd->rd_free; 87 } 88 89 /** 90 * gfs2_unpin - Unpin a buffer 91 * @sdp: the filesystem the buffer belongs to 92 * @bh: The buffer to unpin 93 * @ai: 94 * @flags: The inode dirty flags 95 * 96 */ 97 98 static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh, 99 struct gfs2_trans *tr) 100 { 101 struct gfs2_bufdata *bd = bh->b_private; 102 103 BUG_ON(!buffer_uptodate(bh)); 104 BUG_ON(!buffer_pinned(bh)); 105 106 lock_buffer(bh); 107 mark_buffer_dirty(bh); 108 clear_buffer_pinned(bh); 109 110 if (buffer_is_rgrp(bd)) 111 maybe_release_space(bd); 112 113 spin_lock(&sdp->sd_ail_lock); 114 if (bd->bd_tr) { 115 list_del(&bd->bd_ail_st_list); 116 brelse(bh); 117 } else { 118 struct gfs2_glock *gl = bd->bd_gl; 119 list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list); 120 atomic_inc(&gl->gl_ail_count); 121 } 122 bd->bd_tr = tr; 123 list_add(&bd->bd_ail_st_list, &tr->tr_ail1_list); 124 spin_unlock(&sdp->sd_ail_lock); 125 126 clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags); 127 trace_gfs2_pin(bd, 0); 128 unlock_buffer(bh); 129 atomic_dec(&sdp->sd_log_pinned); 130 } 131 132 void gfs2_log_incr_head(struct gfs2_sbd *sdp) 133 { 134 BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) && 135 (sdp->sd_log_flush_head != sdp->sd_log_head)); 136 137 if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) 138 sdp->sd_log_flush_head = 0; 139 } 140 141 u64 gfs2_log_bmap(struct gfs2_jdesc *jd, unsigned int lblock) 142 { 143 struct gfs2_journal_extent *je; 144 145 list_for_each_entry(je, &jd->extent_list, list) { 146 if (lblock >= je->lblock && lblock < je->lblock + je->blocks) 147 return je->dblock + lblock - je->lblock; 148 } 149 150 return -1; 151 } 152 153 /** 154 * gfs2_end_log_write_bh - end log write of pagecache data with buffers 155 * @sdp: The superblock 156 * @bvec: The bio_vec 157 * @error: The i/o status 158 * 159 * This finds the relevant buffers and unlocks them and sets the 160 * error flag according to the status of the i/o request. This is 161 * used when the log is writing data which has an in-place version 162 * that is pinned in the pagecache. 163 */ 164 165 static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, 166 struct bio_vec *bvec, 167 blk_status_t error) 168 { 169 struct buffer_head *bh, *next; 170 struct page *page = bvec->bv_page; 171 unsigned size; 172 173 bh = page_buffers(page); 174 size = bvec->bv_len; 175 while (bh_offset(bh) < bvec->bv_offset) 176 bh = bh->b_this_page; 177 do { 178 if (error) 179 mark_buffer_write_io_error(bh); 180 unlock_buffer(bh); 181 next = bh->b_this_page; 182 size -= bh->b_size; 183 brelse(bh); 184 bh = next; 185 } while(bh && size); 186 } 187 188 /** 189 * gfs2_end_log_write - end of i/o to the log 190 * @bio: The bio 191 * 192 * Each bio_vec contains either data from the pagecache or data 193 * relating to the log itself. Here we iterate over the bio_vec 194 * array, processing both kinds of data. 195 * 196 */ 197 198 static void gfs2_end_log_write(struct bio *bio) 199 { 200 struct gfs2_sbd *sdp = bio->bi_private; 201 struct bio_vec *bvec; 202 struct page *page; 203 struct bvec_iter_all iter_all; 204 205 if (bio->bi_status) { 206 if (!cmpxchg(&sdp->sd_log_error, 0, (int)bio->bi_status)) 207 fs_err(sdp, "Error %d writing to journal, jid=%u\n", 208 bio->bi_status, sdp->sd_jdesc->jd_jid); 209 gfs2_withdraw_delayed(sdp); 210 /* prevent more writes to the journal */ 211 clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); 212 wake_up(&sdp->sd_logd_waitq); 213 } 214 215 bio_for_each_segment_all(bvec, bio, iter_all) { 216 page = bvec->bv_page; 217 if (page_has_buffers(page)) 218 gfs2_end_log_write_bh(sdp, bvec, bio->bi_status); 219 else 220 mempool_free(page, gfs2_page_pool); 221 } 222 223 bio_put(bio); 224 if (atomic_dec_and_test(&sdp->sd_log_in_flight)) 225 wake_up(&sdp->sd_log_flush_wait); 226 } 227 228 /** 229 * gfs2_log_submit_bio - Submit any pending log bio 230 * @biop: Address of the bio pointer 231 * @opf: REQ_OP | op_flags 232 * 233 * Submit any pending part-built or full bio to the block device. If 234 * there is no pending bio, then this is a no-op. 235 */ 236 237 void gfs2_log_submit_bio(struct bio **biop, int opf) 238 { 239 struct bio *bio = *biop; 240 if (bio) { 241 struct gfs2_sbd *sdp = bio->bi_private; 242 atomic_inc(&sdp->sd_log_in_flight); 243 bio->bi_opf = opf; 244 submit_bio(bio); 245 *biop = NULL; 246 } 247 } 248 249 /** 250 * gfs2_log_alloc_bio - Allocate a bio 251 * @sdp: The super block 252 * @blkno: The device block number we want to write to 253 * @end_io: The bi_end_io callback 254 * 255 * Allocate a new bio, initialize it with the given parameters and return it. 256 * 257 * Returns: The newly allocated bio 258 */ 259 260 static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno, 261 bio_end_io_t *end_io) 262 { 263 struct super_block *sb = sdp->sd_vfs; 264 struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES); 265 266 bio->bi_iter.bi_sector = blkno << sdp->sd_fsb2bb_shift; 267 bio_set_dev(bio, sb->s_bdev); 268 bio->bi_end_io = end_io; 269 bio->bi_private = sdp; 270 271 return bio; 272 } 273 274 /** 275 * gfs2_log_get_bio - Get cached log bio, or allocate a new one 276 * @sdp: The super block 277 * @blkno: The device block number we want to write to 278 * @bio: The bio to get or allocate 279 * @op: REQ_OP 280 * @end_io: The bi_end_io callback 281 * @flush: Always flush the current bio and allocate a new one? 282 * 283 * If there is a cached bio, then if the next block number is sequential 284 * with the previous one, return it, otherwise flush the bio to the 285 * device. If there is no cached bio, or we just flushed it, then 286 * allocate a new one. 287 * 288 * Returns: The bio to use for log writes 289 */ 290 291 static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno, 292 struct bio **biop, int op, 293 bio_end_io_t *end_io, bool flush) 294 { 295 struct bio *bio = *biop; 296 297 if (bio) { 298 u64 nblk; 299 300 nblk = bio_end_sector(bio); 301 nblk >>= sdp->sd_fsb2bb_shift; 302 if (blkno == nblk && !flush) 303 return bio; 304 gfs2_log_submit_bio(biop, op); 305 } 306 307 *biop = gfs2_log_alloc_bio(sdp, blkno, end_io); 308 return *biop; 309 } 310 311 /** 312 * gfs2_log_write - write to log 313 * @sdp: the filesystem 314 * @page: the page to write 315 * @size: the size of the data to write 316 * @offset: the offset within the page 317 * @blkno: block number of the log entry 318 * 319 * Try and add the page segment to the current bio. If that fails, 320 * submit the current bio to the device and create a new one, and 321 * then add the page segment to that. 322 */ 323 324 void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page, 325 unsigned size, unsigned offset, u64 blkno) 326 { 327 struct bio *bio; 328 int ret; 329 330 bio = gfs2_log_get_bio(sdp, blkno, &sdp->sd_log_bio, REQ_OP_WRITE, 331 gfs2_end_log_write, false); 332 ret = bio_add_page(bio, page, size, offset); 333 if (ret == 0) { 334 bio = gfs2_log_get_bio(sdp, blkno, &sdp->sd_log_bio, 335 REQ_OP_WRITE, gfs2_end_log_write, true); 336 ret = bio_add_page(bio, page, size, offset); 337 WARN_ON(ret == 0); 338 } 339 } 340 341 /** 342 * gfs2_log_write_bh - write a buffer's content to the log 343 * @sdp: The super block 344 * @bh: The buffer pointing to the in-place location 345 * 346 * This writes the content of the buffer to the next available location 347 * in the log. The buffer will be unlocked once the i/o to the log has 348 * completed. 349 */ 350 351 static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh) 352 { 353 u64 dblock; 354 355 dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head); 356 gfs2_log_incr_head(sdp); 357 gfs2_log_write(sdp, bh->b_page, bh->b_size, bh_offset(bh), dblock); 358 } 359 360 /** 361 * gfs2_log_write_page - write one block stored in a page, into the log 362 * @sdp: The superblock 363 * @page: The struct page 364 * 365 * This writes the first block-sized part of the page into the log. Note 366 * that the page must have been allocated from the gfs2_page_pool mempool 367 * and that after this has been called, ownership has been transferred and 368 * the page may be freed at any time. 369 */ 370 371 void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page) 372 { 373 struct super_block *sb = sdp->sd_vfs; 374 u64 dblock; 375 376 dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head); 377 gfs2_log_incr_head(sdp); 378 gfs2_log_write(sdp, page, sb->s_blocksize, 0, dblock); 379 } 380 381 /** 382 * gfs2_end_log_read - end I/O callback for reads from the log 383 * @bio: The bio 384 * 385 * Simply unlock the pages in the bio. The main thread will wait on them and 386 * process them in order as necessary. 387 */ 388 389 static void gfs2_end_log_read(struct bio *bio) 390 { 391 struct page *page; 392 struct bio_vec *bvec; 393 struct bvec_iter_all iter_all; 394 395 bio_for_each_segment_all(bvec, bio, iter_all) { 396 page = bvec->bv_page; 397 if (bio->bi_status) { 398 int err = blk_status_to_errno(bio->bi_status); 399 400 SetPageError(page); 401 mapping_set_error(page->mapping, err); 402 } 403 unlock_page(page); 404 } 405 406 bio_put(bio); 407 } 408 409 /** 410 * gfs2_jhead_pg_srch - Look for the journal head in a given page. 411 * @jd: The journal descriptor 412 * @page: The page to look in 413 * 414 * Returns: 1 if found, 0 otherwise. 415 */ 416 417 static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd, 418 struct gfs2_log_header_host *head, 419 struct page *page) 420 { 421 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 422 struct gfs2_log_header_host lh; 423 void *kaddr = kmap_atomic(page); 424 unsigned int offset; 425 bool ret = false; 426 427 for (offset = 0; offset < PAGE_SIZE; offset += sdp->sd_sb.sb_bsize) { 428 if (!__get_log_header(sdp, kaddr + offset, 0, &lh)) { 429 if (lh.lh_sequence >= head->lh_sequence) 430 *head = lh; 431 else { 432 ret = true; 433 break; 434 } 435 } 436 } 437 kunmap_atomic(kaddr); 438 return ret; 439 } 440 441 /** 442 * gfs2_jhead_process_page - Search/cleanup a page 443 * @jd: The journal descriptor 444 * @index: Index of the page to look into 445 * @done: If set, perform only cleanup, else search and set if found. 446 * 447 * Find the page with 'index' in the journal's mapping. Search the page for 448 * the journal head if requested (cleanup == false). Release refs on the 449 * page so the page cache can reclaim it (put_page() twice). We grabbed a 450 * reference on this page two times, first when we did a find_or_create_page() 451 * to obtain the page to add it to the bio and second when we do a 452 * find_get_page() here to get the page to wait on while I/O on it is being 453 * completed. 454 * This function is also used to free up a page we might've grabbed but not 455 * used. Maybe we added it to a bio, but not submitted it for I/O. Or we 456 * submitted the I/O, but we already found the jhead so we only need to drop 457 * our references to the page. 458 */ 459 460 static void gfs2_jhead_process_page(struct gfs2_jdesc *jd, unsigned long index, 461 struct gfs2_log_header_host *head, 462 bool *done) 463 { 464 struct page *page; 465 466 page = find_get_page(jd->jd_inode->i_mapping, index); 467 wait_on_page_locked(page); 468 469 if (PageError(page)) 470 *done = true; 471 472 if (!*done) 473 *done = gfs2_jhead_pg_srch(jd, head, page); 474 475 put_page(page); /* Once for find_get_page */ 476 put_page(page); /* Once more for find_or_create_page */ 477 } 478 479 static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs) 480 { 481 struct bio *new; 482 483 new = bio_alloc(GFP_NOIO, nr_iovecs); 484 bio_copy_dev(new, prev); 485 new->bi_iter.bi_sector = bio_end_sector(prev); 486 new->bi_opf = prev->bi_opf; 487 new->bi_write_hint = prev->bi_write_hint; 488 bio_chain(new, prev); 489 submit_bio(prev); 490 return new; 491 } 492 493 /** 494 * gfs2_find_jhead - find the head of a log 495 * @jd: The journal descriptor 496 * @head: The log descriptor for the head of the log is returned here 497 * 498 * Do a search of a journal by reading it in large chunks using bios and find 499 * the valid log entry with the highest sequence number. (i.e. the log head) 500 * 501 * Returns: 0 on success, errno otherwise 502 */ 503 int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head, 504 bool keep_cache) 505 { 506 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 507 struct address_space *mapping = jd->jd_inode->i_mapping; 508 unsigned int block = 0, blocks_submitted = 0, blocks_read = 0; 509 unsigned int bsize = sdp->sd_sb.sb_bsize, off; 510 unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift; 511 unsigned int shift = PAGE_SHIFT - bsize_shift; 512 unsigned int max_blocks = 2 * 1024 * 1024 >> bsize_shift; 513 struct gfs2_journal_extent *je; 514 int sz, ret = 0; 515 struct bio *bio = NULL; 516 struct page *page = NULL; 517 bool done = false; 518 errseq_t since; 519 520 memset(head, 0, sizeof(*head)); 521 if (list_empty(&jd->extent_list)) 522 gfs2_map_journal_extents(sdp, jd); 523 524 since = filemap_sample_wb_err(mapping); 525 list_for_each_entry(je, &jd->extent_list, list) { 526 u64 dblock = je->dblock; 527 528 for (; block < je->lblock + je->blocks; block++, dblock++) { 529 if (!page) { 530 page = find_or_create_page(mapping, 531 block >> shift, GFP_NOFS); 532 if (!page) { 533 ret = -ENOMEM; 534 done = true; 535 goto out; 536 } 537 off = 0; 538 } 539 540 if (bio && (off || block < blocks_submitted + max_blocks)) { 541 sector_t sector = dblock << sdp->sd_fsb2bb_shift; 542 543 if (bio_end_sector(bio) == sector) { 544 sz = bio_add_page(bio, page, bsize, off); 545 if (sz == bsize) 546 goto block_added; 547 } 548 if (off) { 549 unsigned int blocks = 550 (PAGE_SIZE - off) >> bsize_shift; 551 552 bio = gfs2_chain_bio(bio, blocks); 553 goto add_block_to_new_bio; 554 } 555 } 556 557 if (bio) { 558 blocks_submitted = block; 559 submit_bio(bio); 560 } 561 562 bio = gfs2_log_alloc_bio(sdp, dblock, gfs2_end_log_read); 563 bio->bi_opf = REQ_OP_READ; 564 add_block_to_new_bio: 565 sz = bio_add_page(bio, page, bsize, off); 566 BUG_ON(sz != bsize); 567 block_added: 568 off += bsize; 569 if (off == PAGE_SIZE) 570 page = NULL; 571 if (blocks_submitted <= blocks_read + max_blocks) { 572 /* Keep at least one bio in flight */ 573 continue; 574 } 575 576 gfs2_jhead_process_page(jd, blocks_read >> shift, head, &done); 577 blocks_read += PAGE_SIZE >> bsize_shift; 578 if (done) 579 goto out; /* found */ 580 } 581 } 582 583 out: 584 if (bio) 585 submit_bio(bio); 586 while (blocks_read < block) { 587 gfs2_jhead_process_page(jd, blocks_read >> shift, head, &done); 588 blocks_read += PAGE_SIZE >> bsize_shift; 589 } 590 591 if (!ret) 592 ret = filemap_check_wb_err(mapping, since); 593 594 if (!keep_cache) 595 truncate_inode_pages(mapping, 0); 596 597 return ret; 598 } 599 600 static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type, 601 u32 ld_length, u32 ld_data1) 602 { 603 struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO); 604 struct gfs2_log_descriptor *ld = page_address(page); 605 clear_page(ld); 606 ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC); 607 ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD); 608 ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD); 609 ld->ld_type = cpu_to_be32(ld_type); 610 ld->ld_length = cpu_to_be32(ld_length); 611 ld->ld_data1 = cpu_to_be32(ld_data1); 612 ld->ld_data2 = 0; 613 return page; 614 } 615 616 static void gfs2_check_magic(struct buffer_head *bh) 617 { 618 void *kaddr; 619 __be32 *ptr; 620 621 clear_buffer_escaped(bh); 622 kaddr = kmap_atomic(bh->b_page); 623 ptr = kaddr + bh_offset(bh); 624 if (*ptr == cpu_to_be32(GFS2_MAGIC)) 625 set_buffer_escaped(bh); 626 kunmap_atomic(kaddr); 627 } 628 629 static int blocknr_cmp(void *priv, struct list_head *a, struct list_head *b) 630 { 631 struct gfs2_bufdata *bda, *bdb; 632 633 bda = list_entry(a, struct gfs2_bufdata, bd_list); 634 bdb = list_entry(b, struct gfs2_bufdata, bd_list); 635 636 if (bda->bd_bh->b_blocknr < bdb->bd_bh->b_blocknr) 637 return -1; 638 if (bda->bd_bh->b_blocknr > bdb->bd_bh->b_blocknr) 639 return 1; 640 return 0; 641 } 642 643 static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit, 644 unsigned int total, struct list_head *blist, 645 bool is_databuf) 646 { 647 struct gfs2_log_descriptor *ld; 648 struct gfs2_bufdata *bd1 = NULL, *bd2; 649 struct page *page; 650 unsigned int num; 651 unsigned n; 652 __be64 *ptr; 653 654 gfs2_log_lock(sdp); 655 list_sort(NULL, blist, blocknr_cmp); 656 bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list); 657 while(total) { 658 num = total; 659 if (total > limit) 660 num = limit; 661 gfs2_log_unlock(sdp); 662 page = gfs2_get_log_desc(sdp, 663 is_databuf ? GFS2_LOG_DESC_JDATA : 664 GFS2_LOG_DESC_METADATA, num + 1, num); 665 ld = page_address(page); 666 gfs2_log_lock(sdp); 667 ptr = (__be64 *)(ld + 1); 668 669 n = 0; 670 list_for_each_entry_continue(bd1, blist, bd_list) { 671 *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr); 672 if (is_databuf) { 673 gfs2_check_magic(bd1->bd_bh); 674 *ptr++ = cpu_to_be64(buffer_escaped(bd1->bd_bh) ? 1 : 0); 675 } 676 if (++n >= num) 677 break; 678 } 679 680 gfs2_log_unlock(sdp); 681 gfs2_log_write_page(sdp, page); 682 gfs2_log_lock(sdp); 683 684 n = 0; 685 list_for_each_entry_continue(bd2, blist, bd_list) { 686 get_bh(bd2->bd_bh); 687 gfs2_log_unlock(sdp); 688 lock_buffer(bd2->bd_bh); 689 690 if (buffer_escaped(bd2->bd_bh)) { 691 void *kaddr; 692 page = mempool_alloc(gfs2_page_pool, GFP_NOIO); 693 ptr = page_address(page); 694 kaddr = kmap_atomic(bd2->bd_bh->b_page); 695 memcpy(ptr, kaddr + bh_offset(bd2->bd_bh), 696 bd2->bd_bh->b_size); 697 kunmap_atomic(kaddr); 698 *(__be32 *)ptr = 0; 699 clear_buffer_escaped(bd2->bd_bh); 700 unlock_buffer(bd2->bd_bh); 701 brelse(bd2->bd_bh); 702 gfs2_log_write_page(sdp, page); 703 } else { 704 gfs2_log_write_bh(sdp, bd2->bd_bh); 705 } 706 gfs2_log_lock(sdp); 707 if (++n >= num) 708 break; 709 } 710 711 BUG_ON(total < num); 712 total -= num; 713 } 714 gfs2_log_unlock(sdp); 715 } 716 717 static void buf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) 718 { 719 unsigned int limit = buf_limit(sdp); /* 503 for 4k blocks */ 720 unsigned int nbuf; 721 if (tr == NULL) 722 return; 723 nbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm; 724 gfs2_before_commit(sdp, limit, nbuf, &tr->tr_buf, 0); 725 } 726 727 static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) 728 { 729 struct list_head *head; 730 struct gfs2_bufdata *bd; 731 732 if (tr == NULL) 733 return; 734 735 head = &tr->tr_buf; 736 while (!list_empty(head)) { 737 bd = list_first_entry(head, struct gfs2_bufdata, bd_list); 738 list_del_init(&bd->bd_list); 739 gfs2_unpin(sdp, bd->bd_bh, tr); 740 } 741 } 742 743 static void buf_lo_before_scan(struct gfs2_jdesc *jd, 744 struct gfs2_log_header_host *head, int pass) 745 { 746 if (pass != 0) 747 return; 748 749 jd->jd_found_blocks = 0; 750 jd->jd_replayed_blocks = 0; 751 } 752 753 static int buf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start, 754 struct gfs2_log_descriptor *ld, __be64 *ptr, 755 int pass) 756 { 757 struct gfs2_inode *ip = GFS2_I(jd->jd_inode); 758 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 759 struct gfs2_glock *gl = ip->i_gl; 760 unsigned int blks = be32_to_cpu(ld->ld_data1); 761 struct buffer_head *bh_log, *bh_ip; 762 u64 blkno; 763 int error = 0; 764 765 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA) 766 return 0; 767 768 gfs2_replay_incr_blk(jd, &start); 769 770 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) { 771 blkno = be64_to_cpu(*ptr++); 772 773 jd->jd_found_blocks++; 774 775 if (gfs2_revoke_check(jd, blkno, start)) 776 continue; 777 778 error = gfs2_replay_read_block(jd, start, &bh_log); 779 if (error) 780 return error; 781 782 bh_ip = gfs2_meta_new(gl, blkno); 783 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size); 784 785 if (gfs2_meta_check(sdp, bh_ip)) 786 error = -EIO; 787 else { 788 struct gfs2_meta_header *mh = 789 (struct gfs2_meta_header *)bh_ip->b_data; 790 791 if (mh->mh_type == cpu_to_be32(GFS2_METATYPE_RG)) { 792 struct gfs2_rgrpd *rgd; 793 794 rgd = gfs2_blk2rgrpd(sdp, blkno, false); 795 if (rgd && rgd->rd_addr == blkno && 796 rgd->rd_bits && rgd->rd_bits->bi_bh) { 797 fs_info(sdp, "Replaying 0x%llx but we " 798 "already have a bh!\n", 799 (unsigned long long)blkno); 800 fs_info(sdp, "busy:%d, pinned:%d\n", 801 buffer_busy(rgd->rd_bits->bi_bh) ? 1 : 0, 802 buffer_pinned(rgd->rd_bits->bi_bh)); 803 gfs2_dump_glock(NULL, rgd->rd_gl, true); 804 } 805 } 806 mark_buffer_dirty(bh_ip); 807 } 808 brelse(bh_log); 809 brelse(bh_ip); 810 811 if (error) 812 break; 813 814 jd->jd_replayed_blocks++; 815 } 816 817 return error; 818 } 819 820 /** 821 * gfs2_meta_sync - Sync all buffers associated with a glock 822 * @gl: The glock 823 * 824 */ 825 826 static void gfs2_meta_sync(struct gfs2_glock *gl) 827 { 828 struct address_space *mapping = gfs2_glock2aspace(gl); 829 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 830 int error; 831 832 if (mapping == NULL) 833 mapping = &sdp->sd_aspace; 834 835 filemap_fdatawrite(mapping); 836 error = filemap_fdatawait(mapping); 837 838 if (error) 839 gfs2_io_error(gl->gl_name.ln_sbd); 840 } 841 842 static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass) 843 { 844 struct gfs2_inode *ip = GFS2_I(jd->jd_inode); 845 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 846 847 if (error) { 848 gfs2_meta_sync(ip->i_gl); 849 return; 850 } 851 if (pass != 1) 852 return; 853 854 gfs2_meta_sync(ip->i_gl); 855 856 fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n", 857 jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks); 858 } 859 860 static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) 861 { 862 struct gfs2_meta_header *mh; 863 unsigned int offset; 864 struct list_head *head = &sdp->sd_log_revokes; 865 struct gfs2_bufdata *bd; 866 struct page *page; 867 unsigned int length; 868 869 gfs2_write_revokes(sdp); 870 if (!sdp->sd_log_num_revoke) 871 return; 872 873 length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke); 874 page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke); 875 offset = sizeof(struct gfs2_log_descriptor); 876 877 list_for_each_entry(bd, head, bd_list) { 878 sdp->sd_log_num_revoke--; 879 880 if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) { 881 882 gfs2_log_write_page(sdp, page); 883 page = mempool_alloc(gfs2_page_pool, GFP_NOIO); 884 mh = page_address(page); 885 clear_page(mh); 886 mh->mh_magic = cpu_to_be32(GFS2_MAGIC); 887 mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB); 888 mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB); 889 offset = sizeof(struct gfs2_meta_header); 890 } 891 892 *(__be64 *)(page_address(page) + offset) = cpu_to_be64(bd->bd_blkno); 893 offset += sizeof(u64); 894 } 895 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke); 896 897 gfs2_log_write_page(sdp, page); 898 } 899 900 static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) 901 { 902 struct list_head *head = &sdp->sd_log_revokes; 903 struct gfs2_bufdata *bd; 904 struct gfs2_glock *gl; 905 906 while (!list_empty(head)) { 907 bd = list_first_entry(head, struct gfs2_bufdata, bd_list); 908 list_del_init(&bd->bd_list); 909 gl = bd->bd_gl; 910 gfs2_glock_remove_revoke(gl); 911 kmem_cache_free(gfs2_bufdata_cachep, bd); 912 } 913 } 914 915 static void revoke_lo_before_scan(struct gfs2_jdesc *jd, 916 struct gfs2_log_header_host *head, int pass) 917 { 918 if (pass != 0) 919 return; 920 921 jd->jd_found_revokes = 0; 922 jd->jd_replay_tail = head->lh_tail; 923 } 924 925 static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, u32 start, 926 struct gfs2_log_descriptor *ld, __be64 *ptr, 927 int pass) 928 { 929 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 930 unsigned int blks = be32_to_cpu(ld->ld_length); 931 unsigned int revokes = be32_to_cpu(ld->ld_data1); 932 struct buffer_head *bh; 933 unsigned int offset; 934 u64 blkno; 935 int first = 1; 936 int error; 937 938 if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE) 939 return 0; 940 941 offset = sizeof(struct gfs2_log_descriptor); 942 943 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) { 944 error = gfs2_replay_read_block(jd, start, &bh); 945 if (error) 946 return error; 947 948 if (!first) 949 gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB); 950 951 while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) { 952 blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset)); 953 954 error = gfs2_revoke_add(jd, blkno, start); 955 if (error < 0) { 956 brelse(bh); 957 return error; 958 } 959 else if (error) 960 jd->jd_found_revokes++; 961 962 if (!--revokes) 963 break; 964 offset += sizeof(u64); 965 } 966 967 brelse(bh); 968 offset = sizeof(struct gfs2_meta_header); 969 first = 0; 970 } 971 972 return 0; 973 } 974 975 static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass) 976 { 977 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 978 979 if (error) { 980 gfs2_revoke_clean(jd); 981 return; 982 } 983 if (pass != 1) 984 return; 985 986 fs_info(sdp, "jid=%u: Found %u revoke tags\n", 987 jd->jd_jid, jd->jd_found_revokes); 988 989 gfs2_revoke_clean(jd); 990 } 991 992 /** 993 * databuf_lo_before_commit - Scan the data buffers, writing as we go 994 * 995 */ 996 997 static void databuf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) 998 { 999 unsigned int limit = databuf_limit(sdp); 1000 unsigned int nbuf; 1001 if (tr == NULL) 1002 return; 1003 nbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm; 1004 gfs2_before_commit(sdp, limit, nbuf, &tr->tr_databuf, 1); 1005 } 1006 1007 static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start, 1008 struct gfs2_log_descriptor *ld, 1009 __be64 *ptr, int pass) 1010 { 1011 struct gfs2_inode *ip = GFS2_I(jd->jd_inode); 1012 struct gfs2_glock *gl = ip->i_gl; 1013 unsigned int blks = be32_to_cpu(ld->ld_data1); 1014 struct buffer_head *bh_log, *bh_ip; 1015 u64 blkno; 1016 u64 esc; 1017 int error = 0; 1018 1019 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA) 1020 return 0; 1021 1022 gfs2_replay_incr_blk(jd, &start); 1023 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) { 1024 blkno = be64_to_cpu(*ptr++); 1025 esc = be64_to_cpu(*ptr++); 1026 1027 jd->jd_found_blocks++; 1028 1029 if (gfs2_revoke_check(jd, blkno, start)) 1030 continue; 1031 1032 error = gfs2_replay_read_block(jd, start, &bh_log); 1033 if (error) 1034 return error; 1035 1036 bh_ip = gfs2_meta_new(gl, blkno); 1037 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size); 1038 1039 /* Unescape */ 1040 if (esc) { 1041 __be32 *eptr = (__be32 *)bh_ip->b_data; 1042 *eptr = cpu_to_be32(GFS2_MAGIC); 1043 } 1044 mark_buffer_dirty(bh_ip); 1045 1046 brelse(bh_log); 1047 brelse(bh_ip); 1048 1049 jd->jd_replayed_blocks++; 1050 } 1051 1052 return error; 1053 } 1054 1055 /* FIXME: sort out accounting for log blocks etc. */ 1056 1057 static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass) 1058 { 1059 struct gfs2_inode *ip = GFS2_I(jd->jd_inode); 1060 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 1061 1062 if (error) { 1063 gfs2_meta_sync(ip->i_gl); 1064 return; 1065 } 1066 if (pass != 1) 1067 return; 1068 1069 /* data sync? */ 1070 gfs2_meta_sync(ip->i_gl); 1071 1072 fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n", 1073 jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks); 1074 } 1075 1076 static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) 1077 { 1078 struct list_head *head; 1079 struct gfs2_bufdata *bd; 1080 1081 if (tr == NULL) 1082 return; 1083 1084 head = &tr->tr_databuf; 1085 while (!list_empty(head)) { 1086 bd = list_first_entry(head, struct gfs2_bufdata, bd_list); 1087 list_del_init(&bd->bd_list); 1088 gfs2_unpin(sdp, bd->bd_bh, tr); 1089 } 1090 } 1091 1092 1093 static const struct gfs2_log_operations gfs2_buf_lops = { 1094 .lo_before_commit = buf_lo_before_commit, 1095 .lo_after_commit = buf_lo_after_commit, 1096 .lo_before_scan = buf_lo_before_scan, 1097 .lo_scan_elements = buf_lo_scan_elements, 1098 .lo_after_scan = buf_lo_after_scan, 1099 .lo_name = "buf", 1100 }; 1101 1102 static const struct gfs2_log_operations gfs2_revoke_lops = { 1103 .lo_before_commit = revoke_lo_before_commit, 1104 .lo_after_commit = revoke_lo_after_commit, 1105 .lo_before_scan = revoke_lo_before_scan, 1106 .lo_scan_elements = revoke_lo_scan_elements, 1107 .lo_after_scan = revoke_lo_after_scan, 1108 .lo_name = "revoke", 1109 }; 1110 1111 static const struct gfs2_log_operations gfs2_databuf_lops = { 1112 .lo_before_commit = databuf_lo_before_commit, 1113 .lo_after_commit = databuf_lo_after_commit, 1114 .lo_scan_elements = databuf_lo_scan_elements, 1115 .lo_after_scan = databuf_lo_after_scan, 1116 .lo_name = "databuf", 1117 }; 1118 1119 const struct gfs2_log_operations *gfs2_log_ops[] = { 1120 &gfs2_databuf_lops, 1121 &gfs2_buf_lops, 1122 &gfs2_revoke_lops, 1123 NULL, 1124 }; 1125 1126