1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * NILFS recovery logic 4 * 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 6 * 7 * Written by Ryusuke Konishi. 8 */ 9 10 #include <linux/buffer_head.h> 11 #include <linux/blkdev.h> 12 #include <linux/swap.h> 13 #include <linux/slab.h> 14 #include <linux/crc32.h> 15 #include "nilfs.h" 16 #include "segment.h" 17 #include "sufile.h" 18 #include "page.h" 19 #include "segbuf.h" 20 21 /* 22 * Segment check result 23 */ 24 enum { 25 NILFS_SEG_VALID, 26 NILFS_SEG_NO_SUPER_ROOT, 27 NILFS_SEG_FAIL_IO, 28 NILFS_SEG_FAIL_MAGIC, 29 NILFS_SEG_FAIL_SEQ, 30 NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT, 31 NILFS_SEG_FAIL_CHECKSUM_FULL, 32 NILFS_SEG_FAIL_CONSISTENCY, 33 }; 34 35 /* work structure for recovery */ 36 struct nilfs_recovery_block { 37 ino_t ino; /* 38 * Inode number of the file that this block 39 * belongs to 40 */ 41 sector_t blocknr; /* block number */ 42 __u64 vblocknr; /* virtual block number */ 43 unsigned long blkoff; /* File offset of the data block (per block) */ 44 struct list_head list; 45 }; 46 47 48 static int nilfs_warn_segment_error(struct super_block *sb, int err) 49 { 50 const char *msg = NULL; 51 52 switch (err) { 53 case NILFS_SEG_FAIL_IO: 54 nilfs_err(sb, "I/O error reading segment"); 55 return -EIO; 56 case NILFS_SEG_FAIL_MAGIC: 57 msg = "Magic number mismatch"; 58 break; 59 case NILFS_SEG_FAIL_SEQ: 60 msg = "Sequence number mismatch"; 61 break; 62 case NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT: 63 msg = "Checksum error in super root"; 64 break; 65 case NILFS_SEG_FAIL_CHECKSUM_FULL: 66 msg = "Checksum error in segment payload"; 67 break; 68 case NILFS_SEG_FAIL_CONSISTENCY: 69 msg = "Inconsistency found"; 70 break; 71 case NILFS_SEG_NO_SUPER_ROOT: 72 msg = "No super root in the last segment"; 73 break; 74 default: 75 nilfs_err(sb, "unrecognized segment error %d", err); 76 return -EINVAL; 77 } 78 nilfs_warn(sb, "invalid segment: %s", msg); 79 return -EINVAL; 80 } 81 82 /** 83 * nilfs_compute_checksum - compute checksum of blocks continuously 84 * @nilfs: nilfs object 85 * @bhs: buffer head of start block 86 * @sum: place to store result 87 * @offset: offset bytes in the first block 88 * @check_bytes: number of bytes to be checked 89 * @start: DBN of start block 90 * @nblock: number of blocks to be checked 91 * 92 * Return: 0 on success, or %-EIO if an I/O error occurs. 93 */ 94 static int nilfs_compute_checksum(struct the_nilfs *nilfs, 95 struct buffer_head *bhs, u32 *sum, 96 unsigned long offset, u64 check_bytes, 97 sector_t start, unsigned long nblock) 98 { 99 unsigned int blocksize = nilfs->ns_blocksize; 100 unsigned long size; 101 u32 crc; 102 103 BUG_ON(offset >= blocksize); 104 check_bytes -= offset; 105 size = min_t(u64, check_bytes, blocksize - offset); 106 crc = crc32_le(nilfs->ns_crc_seed, 107 (unsigned char *)bhs->b_data + offset, size); 108 if (--nblock > 0) { 109 do { 110 struct buffer_head *bh; 111 112 bh = __bread(nilfs->ns_bdev, ++start, blocksize); 113 if (!bh) 114 return -EIO; 115 check_bytes -= size; 116 size = min_t(u64, check_bytes, blocksize); 117 crc = crc32_le(crc, bh->b_data, size); 118 brelse(bh); 119 } while (--nblock > 0); 120 } 121 *sum = crc; 122 return 0; 123 } 124 125 /** 126 * nilfs_read_super_root_block - read super root block 127 * @nilfs: nilfs object 128 * @sr_block: disk block number of the super root block 129 * @pbh: address of a buffer_head pointer to return super root buffer 130 * @check: CRC check flag 131 * 132 * Return: 0 on success, or one of the following negative error codes on 133 * failure: 134 * * %-EINVAL - Super root block corrupted. 135 * * %-EIO - I/O error. 136 */ 137 int nilfs_read_super_root_block(struct the_nilfs *nilfs, sector_t sr_block, 138 struct buffer_head **pbh, int check) 139 { 140 struct buffer_head *bh_sr; 141 struct nilfs_super_root *sr; 142 u32 crc; 143 int ret; 144 145 *pbh = NULL; 146 bh_sr = __bread(nilfs->ns_bdev, sr_block, nilfs->ns_blocksize); 147 if (unlikely(!bh_sr)) { 148 ret = NILFS_SEG_FAIL_IO; 149 goto failed; 150 } 151 152 sr = (struct nilfs_super_root *)bh_sr->b_data; 153 if (check) { 154 unsigned int bytes = le16_to_cpu(sr->sr_bytes); 155 156 if (bytes == 0 || bytes > nilfs->ns_blocksize) { 157 ret = NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT; 158 goto failed_bh; 159 } 160 if (nilfs_compute_checksum( 161 nilfs, bh_sr, &crc, sizeof(sr->sr_sum), bytes, 162 sr_block, 1)) { 163 ret = NILFS_SEG_FAIL_IO; 164 goto failed_bh; 165 } 166 if (crc != le32_to_cpu(sr->sr_sum)) { 167 ret = NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT; 168 goto failed_bh; 169 } 170 } 171 *pbh = bh_sr; 172 return 0; 173 174 failed_bh: 175 brelse(bh_sr); 176 177 failed: 178 return nilfs_warn_segment_error(nilfs->ns_sb, ret); 179 } 180 181 /** 182 * nilfs_read_log_header - read summary header of the specified log 183 * @nilfs: nilfs object 184 * @start_blocknr: start block number of the log 185 * @sum: pointer to return segment summary structure 186 * 187 * Return: Buffer head pointer, or NULL if an I/O error occurs. 188 */ 189 static struct buffer_head * 190 nilfs_read_log_header(struct the_nilfs *nilfs, sector_t start_blocknr, 191 struct nilfs_segment_summary **sum) 192 { 193 struct buffer_head *bh_sum; 194 195 bh_sum = __bread(nilfs->ns_bdev, start_blocknr, nilfs->ns_blocksize); 196 if (bh_sum) 197 *sum = (struct nilfs_segment_summary *)bh_sum->b_data; 198 return bh_sum; 199 } 200 201 /** 202 * nilfs_validate_log - verify consistency of log 203 * @nilfs: nilfs object 204 * @seg_seq: sequence number of segment 205 * @bh_sum: buffer head of summary block 206 * @sum: segment summary struct 207 * 208 * Return: 0 on success, or one of the following internal codes on failure: 209 * * %NILFS_SEG_FAIL_MAGIC - Magic number mismatch. 210 * * %NILFS_SEG_FAIL_SEQ - Sequence number mismatch. 211 * * %NIFLS_SEG_FAIL_CONSISTENCY - Block count out of range. 212 * * %NILFS_SEG_FAIL_IO - I/O error. 213 * * %NILFS_SEG_FAIL_CHECKSUM_FULL - Full log checksum verification failed. 214 */ 215 static int nilfs_validate_log(struct the_nilfs *nilfs, u64 seg_seq, 216 struct buffer_head *bh_sum, 217 struct nilfs_segment_summary *sum) 218 { 219 unsigned long nblock; 220 u32 crc; 221 int ret; 222 223 ret = NILFS_SEG_FAIL_MAGIC; 224 if (le32_to_cpu(sum->ss_magic) != NILFS_SEGSUM_MAGIC) 225 goto out; 226 227 ret = NILFS_SEG_FAIL_SEQ; 228 if (le64_to_cpu(sum->ss_seq) != seg_seq) 229 goto out; 230 231 nblock = le32_to_cpu(sum->ss_nblocks); 232 ret = NILFS_SEG_FAIL_CONSISTENCY; 233 if (unlikely(nblock == 0 || nblock > nilfs->ns_blocks_per_segment)) 234 /* This limits the number of blocks read in the CRC check */ 235 goto out; 236 237 ret = NILFS_SEG_FAIL_IO; 238 if (nilfs_compute_checksum(nilfs, bh_sum, &crc, sizeof(sum->ss_datasum), 239 ((u64)nblock << nilfs->ns_blocksize_bits), 240 bh_sum->b_blocknr, nblock)) 241 goto out; 242 243 ret = NILFS_SEG_FAIL_CHECKSUM_FULL; 244 if (crc != le32_to_cpu(sum->ss_datasum)) 245 goto out; 246 ret = 0; 247 out: 248 return ret; 249 } 250 251 /** 252 * nilfs_read_summary_info - read an item on summary blocks of a log 253 * @nilfs: nilfs object 254 * @pbh: the current buffer head on summary blocks [in, out] 255 * @offset: the current byte offset on summary blocks [in, out] 256 * @bytes: byte size of the item to be read 257 * 258 * Return: Kernel space address of current segment summary entry, or 259 * NULL if an I/O error occurs. 260 */ 261 static void *nilfs_read_summary_info(struct the_nilfs *nilfs, 262 struct buffer_head **pbh, 263 unsigned int *offset, unsigned int bytes) 264 { 265 void *ptr; 266 sector_t blocknr; 267 268 BUG_ON((*pbh)->b_size < *offset); 269 if (bytes > (*pbh)->b_size - *offset) { 270 blocknr = (*pbh)->b_blocknr; 271 brelse(*pbh); 272 *pbh = __bread(nilfs->ns_bdev, blocknr + 1, 273 nilfs->ns_blocksize); 274 if (unlikely(!*pbh)) 275 return NULL; 276 *offset = 0; 277 } 278 ptr = (*pbh)->b_data + *offset; 279 *offset += bytes; 280 return ptr; 281 } 282 283 /** 284 * nilfs_skip_summary_info - skip items on summary blocks of a log 285 * @nilfs: nilfs object 286 * @pbh: the current buffer head on summary blocks [in, out] 287 * @offset: the current byte offset on summary blocks [in, out] 288 * @bytes: byte size of the item to be skipped 289 * @count: number of items to be skipped 290 */ 291 static void nilfs_skip_summary_info(struct the_nilfs *nilfs, 292 struct buffer_head **pbh, 293 unsigned int *offset, unsigned int bytes, 294 unsigned long count) 295 { 296 unsigned int rest_item_in_current_block 297 = ((*pbh)->b_size - *offset) / bytes; 298 299 if (count <= rest_item_in_current_block) { 300 *offset += bytes * count; 301 } else { 302 sector_t blocknr = (*pbh)->b_blocknr; 303 unsigned int nitem_per_block = (*pbh)->b_size / bytes; 304 unsigned int bcnt; 305 306 count -= rest_item_in_current_block; 307 bcnt = DIV_ROUND_UP(count, nitem_per_block); 308 *offset = bytes * (count - (bcnt - 1) * nitem_per_block); 309 310 brelse(*pbh); 311 *pbh = __bread(nilfs->ns_bdev, blocknr + bcnt, 312 nilfs->ns_blocksize); 313 } 314 } 315 316 /** 317 * nilfs_scan_dsync_log - get block information of a log written for data sync 318 * @nilfs: nilfs object 319 * @start_blocknr: start block number of the log 320 * @sum: log summary information 321 * @head: list head to add nilfs_recovery_block struct 322 * 323 * Return: 0 on success, or one of the following negative error codes on 324 * failure: 325 * * %-EIO - I/O error. 326 * * %-ENOMEM - Insufficient memory available. 327 */ 328 static int nilfs_scan_dsync_log(struct the_nilfs *nilfs, sector_t start_blocknr, 329 struct nilfs_segment_summary *sum, 330 struct list_head *head) 331 { 332 struct buffer_head *bh; 333 unsigned int offset; 334 u32 nfinfo, sumbytes; 335 sector_t blocknr; 336 ino_t ino; 337 int err = -EIO; 338 339 nfinfo = le32_to_cpu(sum->ss_nfinfo); 340 if (!nfinfo) 341 return 0; 342 343 sumbytes = le32_to_cpu(sum->ss_sumbytes); 344 blocknr = start_blocknr + DIV_ROUND_UP(sumbytes, nilfs->ns_blocksize); 345 bh = __bread(nilfs->ns_bdev, start_blocknr, nilfs->ns_blocksize); 346 if (unlikely(!bh)) 347 goto out; 348 349 offset = le16_to_cpu(sum->ss_bytes); 350 for (;;) { 351 unsigned long nblocks, ndatablk, nnodeblk; 352 struct nilfs_finfo *finfo; 353 354 finfo = nilfs_read_summary_info(nilfs, &bh, &offset, 355 sizeof(*finfo)); 356 if (unlikely(!finfo)) 357 goto out; 358 359 ino = le64_to_cpu(finfo->fi_ino); 360 nblocks = le32_to_cpu(finfo->fi_nblocks); 361 ndatablk = le32_to_cpu(finfo->fi_ndatablk); 362 nnodeblk = nblocks - ndatablk; 363 364 while (ndatablk-- > 0) { 365 struct nilfs_recovery_block *rb; 366 struct nilfs_binfo_v *binfo; 367 368 binfo = nilfs_read_summary_info(nilfs, &bh, &offset, 369 sizeof(*binfo)); 370 if (unlikely(!binfo)) 371 goto out; 372 373 rb = kmalloc(sizeof(*rb), GFP_NOFS); 374 if (unlikely(!rb)) { 375 err = -ENOMEM; 376 goto out; 377 } 378 rb->ino = ino; 379 rb->blocknr = blocknr++; 380 rb->vblocknr = le64_to_cpu(binfo->bi_vblocknr); 381 rb->blkoff = le64_to_cpu(binfo->bi_blkoff); 382 /* INIT_LIST_HEAD(&rb->list); */ 383 list_add_tail(&rb->list, head); 384 } 385 if (--nfinfo == 0) 386 break; 387 blocknr += nnodeblk; /* always 0 for data sync logs */ 388 nilfs_skip_summary_info(nilfs, &bh, &offset, sizeof(__le64), 389 nnodeblk); 390 if (unlikely(!bh)) 391 goto out; 392 } 393 err = 0; 394 out: 395 brelse(bh); /* brelse(NULL) is just ignored */ 396 return err; 397 } 398 399 static void dispose_recovery_list(struct list_head *head) 400 { 401 while (!list_empty(head)) { 402 struct nilfs_recovery_block *rb; 403 404 rb = list_first_entry(head, struct nilfs_recovery_block, list); 405 list_del(&rb->list); 406 kfree(rb); 407 } 408 } 409 410 struct nilfs_segment_entry { 411 struct list_head list; 412 __u64 segnum; 413 }; 414 415 static int nilfs_segment_list_add(struct list_head *head, __u64 segnum) 416 { 417 struct nilfs_segment_entry *ent = kmalloc(sizeof(*ent), GFP_NOFS); 418 419 if (unlikely(!ent)) 420 return -ENOMEM; 421 422 ent->segnum = segnum; 423 INIT_LIST_HEAD(&ent->list); 424 list_add_tail(&ent->list, head); 425 return 0; 426 } 427 428 void nilfs_dispose_segment_list(struct list_head *head) 429 { 430 while (!list_empty(head)) { 431 struct nilfs_segment_entry *ent; 432 433 ent = list_first_entry(head, struct nilfs_segment_entry, list); 434 list_del(&ent->list); 435 kfree(ent); 436 } 437 } 438 439 static int nilfs_prepare_segment_for_recovery(struct the_nilfs *nilfs, 440 struct super_block *sb, 441 struct nilfs_recovery_info *ri) 442 { 443 struct list_head *head = &ri->ri_used_segments; 444 struct nilfs_segment_entry *ent, *n; 445 struct inode *sufile = nilfs->ns_sufile; 446 __u64 segnum[4]; 447 int err; 448 int i; 449 450 segnum[0] = nilfs->ns_segnum; 451 segnum[1] = nilfs->ns_nextnum; 452 segnum[2] = ri->ri_segnum; 453 segnum[3] = ri->ri_nextnum; 454 455 /* 456 * Releasing the next segment of the latest super root. 457 * The next segment is invalidated by this recovery. 458 */ 459 err = nilfs_sufile_free(sufile, segnum[1]); 460 if (unlikely(err)) { 461 if (err == -ENOENT) { 462 nilfs_err(sb, 463 "checkpoint log inconsistency at block %llu (segment %llu): next segment %llu is unallocated", 464 (unsigned long long)nilfs->ns_last_pseg, 465 (unsigned long long)nilfs->ns_segnum, 466 (unsigned long long)segnum[1]); 467 err = -EINVAL; 468 } 469 goto failed; 470 } 471 472 for (i = 1; i < 4; i++) { 473 err = nilfs_segment_list_add(head, segnum[i]); 474 if (unlikely(err)) 475 goto failed; 476 } 477 478 /* 479 * Collecting segments written after the latest super root. 480 * These are marked dirty to avoid being reallocated in the next write. 481 */ 482 list_for_each_entry_safe(ent, n, head, list) { 483 if (ent->segnum != segnum[0]) { 484 err = nilfs_sufile_scrap(sufile, ent->segnum); 485 if (unlikely(err)) 486 goto failed; 487 } 488 list_del(&ent->list); 489 kfree(ent); 490 } 491 492 /* Allocate new segments for recovery */ 493 err = nilfs_sufile_alloc(sufile, &segnum[0]); 494 if (unlikely(err)) 495 goto failed; 496 497 nilfs->ns_pseg_offset = 0; 498 nilfs->ns_seg_seq = ri->ri_seq + 2; 499 nilfs->ns_nextnum = nilfs->ns_segnum = segnum[0]; 500 501 failed: 502 /* No need to recover sufile because it will be destroyed on error */ 503 return err; 504 } 505 506 static int nilfs_recovery_copy_block(struct the_nilfs *nilfs, 507 struct nilfs_recovery_block *rb, 508 loff_t pos, struct folio *folio) 509 { 510 struct buffer_head *bh_org; 511 size_t from = offset_in_folio(folio, pos); 512 513 bh_org = __bread(nilfs->ns_bdev, rb->blocknr, nilfs->ns_blocksize); 514 if (unlikely(!bh_org)) 515 return -EIO; 516 517 memcpy_to_folio(folio, from, bh_org->b_data, bh_org->b_size); 518 brelse(bh_org); 519 return 0; 520 } 521 522 static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs, 523 struct super_block *sb, 524 struct nilfs_root *root, 525 struct list_head *head, 526 unsigned long *nr_salvaged_blocks) 527 { 528 struct inode *inode; 529 struct nilfs_recovery_block *rb, *n; 530 unsigned int blocksize = nilfs->ns_blocksize; 531 struct folio *folio; 532 loff_t pos; 533 int err = 0, err2 = 0; 534 535 list_for_each_entry_safe(rb, n, head, list) { 536 inode = nilfs_iget(sb, root, rb->ino); 537 if (IS_ERR(inode)) { 538 err = PTR_ERR(inode); 539 inode = NULL; 540 goto failed_inode; 541 } 542 543 pos = rb->blkoff << inode->i_blkbits; 544 err = block_write_begin(inode->i_mapping, pos, blocksize, 545 &folio, nilfs_get_block); 546 if (unlikely(err)) { 547 loff_t isize = inode->i_size; 548 549 if (pos + blocksize > isize) 550 nilfs_write_failed(inode->i_mapping, 551 pos + blocksize); 552 goto failed_inode; 553 } 554 555 err = nilfs_recovery_copy_block(nilfs, rb, pos, folio); 556 if (unlikely(err)) 557 goto failed_folio; 558 559 err = nilfs_set_file_dirty(inode, 1); 560 if (unlikely(err)) 561 goto failed_folio; 562 563 block_write_end(pos, blocksize, blocksize, folio); 564 565 folio_unlock(folio); 566 folio_put(folio); 567 568 (*nr_salvaged_blocks)++; 569 goto next; 570 571 failed_folio: 572 folio_unlock(folio); 573 folio_put(folio); 574 575 failed_inode: 576 nilfs_warn(sb, 577 "error %d recovering data block (ino=%lu, block-offset=%llu)", 578 err, (unsigned long)rb->ino, 579 (unsigned long long)rb->blkoff); 580 if (!err2) 581 err2 = err; 582 next: 583 iput(inode); /* iput(NULL) is just ignored */ 584 list_del_init(&rb->list); 585 kfree(rb); 586 } 587 return err2; 588 } 589 590 /** 591 * nilfs_do_roll_forward - salvage logical segments newer than the latest 592 * checkpoint 593 * @nilfs: nilfs object 594 * @sb: super block instance 595 * @root: NILFS root instance 596 * @ri: pointer to a nilfs_recovery_info 597 * 598 * Return: 0 on success, or one of the following negative error codes on 599 * failure: 600 * * %-EINVAL - Log format error. 601 * * %-EIO - I/O error. 602 * * %-ENOMEM - Insufficient memory available. 603 */ 604 static int nilfs_do_roll_forward(struct the_nilfs *nilfs, 605 struct super_block *sb, 606 struct nilfs_root *root, 607 struct nilfs_recovery_info *ri) 608 { 609 struct buffer_head *bh_sum = NULL; 610 struct nilfs_segment_summary *sum = NULL; 611 sector_t pseg_start; 612 sector_t seg_start, seg_end; /* Starting/ending DBN of full segment */ 613 unsigned long nsalvaged_blocks = 0; 614 unsigned int flags; 615 u64 seg_seq; 616 __u64 segnum, nextnum = 0; 617 int empty_seg = 0; 618 int err = 0, ret; 619 LIST_HEAD(dsync_blocks); /* list of data blocks to be recovered */ 620 enum { 621 RF_INIT_ST, 622 RF_DSYNC_ST, /* scanning data-sync segments */ 623 }; 624 int state = RF_INIT_ST; 625 626 pseg_start = ri->ri_lsegs_start; 627 seg_seq = ri->ri_lsegs_start_seq; 628 segnum = nilfs_get_segnum_of_block(nilfs, pseg_start); 629 nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end); 630 631 while (segnum != ri->ri_segnum || pseg_start <= ri->ri_pseg_start) { 632 brelse(bh_sum); 633 bh_sum = nilfs_read_log_header(nilfs, pseg_start, &sum); 634 if (!bh_sum) { 635 err = -EIO; 636 goto failed; 637 } 638 639 ret = nilfs_validate_log(nilfs, seg_seq, bh_sum, sum); 640 if (ret) { 641 if (ret == NILFS_SEG_FAIL_IO) { 642 err = -EIO; 643 goto failed; 644 } 645 goto strayed; 646 } 647 648 flags = le16_to_cpu(sum->ss_flags); 649 if (flags & NILFS_SS_SR) 650 goto confused; 651 652 /* Found a valid partial segment; do recovery actions */ 653 nextnum = nilfs_get_segnum_of_block(nilfs, 654 le64_to_cpu(sum->ss_next)); 655 empty_seg = 0; 656 nilfs->ns_ctime = le64_to_cpu(sum->ss_create); 657 if (!(flags & NILFS_SS_GC)) 658 nilfs->ns_nongc_ctime = nilfs->ns_ctime; 659 660 switch (state) { 661 case RF_INIT_ST: 662 if (!(flags & NILFS_SS_LOGBGN) || 663 !(flags & NILFS_SS_SYNDT)) 664 goto try_next_pseg; 665 state = RF_DSYNC_ST; 666 fallthrough; 667 case RF_DSYNC_ST: 668 if (!(flags & NILFS_SS_SYNDT)) 669 goto confused; 670 671 err = nilfs_scan_dsync_log(nilfs, pseg_start, sum, 672 &dsync_blocks); 673 if (unlikely(err)) 674 goto failed; 675 if (flags & NILFS_SS_LOGEND) { 676 err = nilfs_recover_dsync_blocks( 677 nilfs, sb, root, &dsync_blocks, 678 &nsalvaged_blocks); 679 if (unlikely(err)) 680 goto failed; 681 state = RF_INIT_ST; 682 } 683 break; /* Fall through to try_next_pseg */ 684 } 685 686 try_next_pseg: 687 if (pseg_start == ri->ri_lsegs_end) 688 break; 689 pseg_start += le32_to_cpu(sum->ss_nblocks); 690 if (pseg_start < seg_end) 691 continue; 692 goto feed_segment; 693 694 strayed: 695 if (pseg_start == ri->ri_lsegs_end) 696 break; 697 698 feed_segment: 699 /* Looking to the next full segment */ 700 if (empty_seg++) 701 break; 702 seg_seq++; 703 segnum = nextnum; 704 nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end); 705 pseg_start = seg_start; 706 } 707 708 if (nsalvaged_blocks) { 709 nilfs_info(sb, "salvaged %lu blocks", nsalvaged_blocks); 710 ri->ri_need_recovery = NILFS_RECOVERY_ROLLFORWARD_DONE; 711 } 712 out: 713 brelse(bh_sum); 714 dispose_recovery_list(&dsync_blocks); 715 return err; 716 717 confused: 718 err = -EINVAL; 719 failed: 720 nilfs_err(sb, 721 "error %d roll-forwarding partial segment at blocknr = %llu", 722 err, (unsigned long long)pseg_start); 723 goto out; 724 } 725 726 static void nilfs_finish_roll_forward(struct the_nilfs *nilfs, 727 struct nilfs_recovery_info *ri) 728 { 729 struct buffer_head *bh; 730 int err; 731 732 if (nilfs_get_segnum_of_block(nilfs, ri->ri_lsegs_start) != 733 nilfs_get_segnum_of_block(nilfs, ri->ri_super_root)) 734 return; 735 736 bh = __getblk(nilfs->ns_bdev, ri->ri_lsegs_start, nilfs->ns_blocksize); 737 if (WARN_ON(!bh)) 738 return; /* should never happen */ 739 740 lock_buffer(bh); 741 memset(bh->b_data, 0, bh->b_size); 742 set_buffer_uptodate(bh); 743 set_buffer_dirty(bh); 744 unlock_buffer(bh); 745 746 err = sync_dirty_buffer(bh); 747 if (unlikely(err)) 748 nilfs_warn(nilfs->ns_sb, 749 "buffer sync write failed during post-cleaning of recovery."); 750 brelse(bh); 751 } 752 753 /** 754 * nilfs_abort_roll_forward - cleaning up after a failed rollforward recovery 755 * @nilfs: nilfs object 756 */ 757 static void nilfs_abort_roll_forward(struct the_nilfs *nilfs) 758 { 759 struct nilfs_inode_info *ii, *n; 760 LIST_HEAD(head); 761 762 /* Abandon inodes that have read recovery data */ 763 spin_lock(&nilfs->ns_inode_lock); 764 list_splice_init(&nilfs->ns_dirty_files, &head); 765 spin_unlock(&nilfs->ns_inode_lock); 766 if (list_empty(&head)) 767 return; 768 769 set_nilfs_purging(nilfs); 770 list_for_each_entry_safe(ii, n, &head, i_dirty) { 771 spin_lock(&nilfs->ns_inode_lock); 772 list_del_init(&ii->i_dirty); 773 spin_unlock(&nilfs->ns_inode_lock); 774 775 iput(&ii->vfs_inode); 776 } 777 clear_nilfs_purging(nilfs); 778 } 779 780 /** 781 * nilfs_salvage_orphan_logs - salvage logs written after the latest checkpoint 782 * @nilfs: nilfs object 783 * @sb: super block instance 784 * @ri: pointer to a nilfs_recovery_info struct to store search results. 785 * 786 * Return: 0 on success, or one of the following negative error codes on 787 * failure: 788 * * %-EINVAL - Inconsistent filesystem state. 789 * * %-EIO - I/O error. 790 * * %-ENOMEM - Insufficient memory available. 791 * * %-ENOSPC - No space left on device (only in a panic state). 792 * * %-ERESTARTSYS - Interrupted. 793 */ 794 int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs, 795 struct super_block *sb, 796 struct nilfs_recovery_info *ri) 797 { 798 struct nilfs_root *root; 799 int err; 800 801 if (ri->ri_lsegs_start == 0 || ri->ri_lsegs_end == 0) 802 return 0; 803 804 err = nilfs_attach_checkpoint(sb, ri->ri_cno, true, &root); 805 if (unlikely(err)) { 806 nilfs_err(sb, "error %d loading the latest checkpoint", err); 807 return err; 808 } 809 810 err = nilfs_do_roll_forward(nilfs, sb, root, ri); 811 if (unlikely(err)) 812 goto failed; 813 814 if (ri->ri_need_recovery == NILFS_RECOVERY_ROLLFORWARD_DONE) { 815 err = nilfs_prepare_segment_for_recovery(nilfs, sb, ri); 816 if (unlikely(err)) { 817 nilfs_err(sb, "error %d preparing segment for recovery", 818 err); 819 goto failed; 820 } 821 822 err = nilfs_attach_log_writer(sb, root); 823 if (unlikely(err)) 824 goto failed; 825 826 set_nilfs_discontinued(nilfs); 827 err = nilfs_construct_segment(sb); 828 nilfs_detach_log_writer(sb); 829 830 if (unlikely(err)) { 831 nilfs_err(sb, "error %d writing segment for recovery", 832 err); 833 goto put_root; 834 } 835 836 nilfs_finish_roll_forward(nilfs, ri); 837 } 838 839 put_root: 840 nilfs_put_root(root); 841 return err; 842 843 failed: 844 nilfs_abort_roll_forward(nilfs); 845 goto put_root; 846 } 847 848 /** 849 * nilfs_search_super_root - search the latest valid super root 850 * @nilfs: the_nilfs 851 * @ri: pointer to a nilfs_recovery_info struct to store search results. 852 * 853 * nilfs_search_super_root() looks for the latest super-root from a partial 854 * segment pointed by the superblock. It sets up struct the_nilfs through 855 * this search. It fills nilfs_recovery_info (ri) required for recovery. 856 * 857 * Return: 0 on success, or one of the following negative error codes on 858 * failure: 859 * * %-EINVAL - No valid segment found. 860 * * %-EIO - I/O error. 861 * * %-ENOMEM - Insufficient memory available. 862 */ 863 int nilfs_search_super_root(struct the_nilfs *nilfs, 864 struct nilfs_recovery_info *ri) 865 { 866 struct buffer_head *bh_sum = NULL; 867 struct nilfs_segment_summary *sum = NULL; 868 sector_t pseg_start, pseg_end, sr_pseg_start = 0; 869 sector_t seg_start, seg_end; /* range of full segment (block number) */ 870 sector_t b, end; 871 unsigned long nblocks; 872 unsigned int flags; 873 u64 seg_seq; 874 __u64 segnum, nextnum = 0; 875 __u64 cno; 876 LIST_HEAD(segments); 877 int empty_seg = 0, scan_newer = 0; 878 int ret; 879 880 pseg_start = nilfs->ns_last_pseg; 881 seg_seq = nilfs->ns_last_seq; 882 cno = nilfs->ns_last_cno; 883 segnum = nilfs_get_segnum_of_block(nilfs, pseg_start); 884 885 /* Calculate range of segment */ 886 nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end); 887 888 /* Read ahead segment */ 889 b = seg_start; 890 while (b <= seg_end) 891 __breadahead(nilfs->ns_bdev, b++, nilfs->ns_blocksize); 892 893 for (;;) { 894 brelse(bh_sum); 895 ret = NILFS_SEG_FAIL_IO; 896 bh_sum = nilfs_read_log_header(nilfs, pseg_start, &sum); 897 if (!bh_sum) 898 goto failed; 899 900 ret = nilfs_validate_log(nilfs, seg_seq, bh_sum, sum); 901 if (ret) { 902 if (ret == NILFS_SEG_FAIL_IO) 903 goto failed; 904 goto strayed; 905 } 906 907 nblocks = le32_to_cpu(sum->ss_nblocks); 908 pseg_end = pseg_start + nblocks - 1; 909 if (unlikely(pseg_end > seg_end)) { 910 ret = NILFS_SEG_FAIL_CONSISTENCY; 911 goto strayed; 912 } 913 914 /* A valid partial segment */ 915 ri->ri_pseg_start = pseg_start; 916 ri->ri_seq = seg_seq; 917 ri->ri_segnum = segnum; 918 nextnum = nilfs_get_segnum_of_block(nilfs, 919 le64_to_cpu(sum->ss_next)); 920 ri->ri_nextnum = nextnum; 921 empty_seg = 0; 922 923 flags = le16_to_cpu(sum->ss_flags); 924 if (!(flags & NILFS_SS_SR) && !scan_newer) { 925 /* 926 * This will never happen because a superblock 927 * (last_segment) always points to a pseg with 928 * a super root. 929 */ 930 ret = NILFS_SEG_FAIL_CONSISTENCY; 931 goto failed; 932 } 933 934 if (pseg_start == seg_start) { 935 nilfs_get_segment_range(nilfs, nextnum, &b, &end); 936 while (b <= end) 937 __breadahead(nilfs->ns_bdev, b++, 938 nilfs->ns_blocksize); 939 } 940 if (!(flags & NILFS_SS_SR)) { 941 if (!ri->ri_lsegs_start && (flags & NILFS_SS_LOGBGN)) { 942 ri->ri_lsegs_start = pseg_start; 943 ri->ri_lsegs_start_seq = seg_seq; 944 } 945 if (flags & NILFS_SS_LOGEND) 946 ri->ri_lsegs_end = pseg_start; 947 goto try_next_pseg; 948 } 949 950 /* A valid super root was found. */ 951 ri->ri_cno = cno++; 952 ri->ri_super_root = pseg_end; 953 ri->ri_lsegs_start = ri->ri_lsegs_end = 0; 954 955 nilfs_dispose_segment_list(&segments); 956 sr_pseg_start = pseg_start; 957 nilfs->ns_pseg_offset = pseg_start + nblocks - seg_start; 958 nilfs->ns_seg_seq = seg_seq; 959 nilfs->ns_segnum = segnum; 960 nilfs->ns_cno = cno; /* nilfs->ns_cno = ri->ri_cno + 1 */ 961 nilfs->ns_ctime = le64_to_cpu(sum->ss_create); 962 nilfs->ns_nextnum = nextnum; 963 964 if (scan_newer) 965 ri->ri_need_recovery = NILFS_RECOVERY_SR_UPDATED; 966 else { 967 if (nilfs->ns_mount_state & NILFS_VALID_FS) 968 goto super_root_found; 969 scan_newer = 1; 970 } 971 972 try_next_pseg: 973 /* Standing on a course, or met an inconsistent state */ 974 pseg_start += nblocks; 975 if (pseg_start < seg_end) 976 continue; 977 goto feed_segment; 978 979 strayed: 980 /* Off the trail */ 981 if (!scan_newer) 982 /* 983 * This can happen if a checkpoint was written without 984 * barriers, or as a result of an I/O failure. 985 */ 986 goto failed; 987 988 feed_segment: 989 /* Looking to the next full segment */ 990 if (empty_seg++) 991 goto super_root_found; /* found a valid super root */ 992 993 ret = nilfs_segment_list_add(&segments, segnum); 994 if (unlikely(ret)) 995 goto failed; 996 997 seg_seq++; 998 segnum = nextnum; 999 nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end); 1000 pseg_start = seg_start; 1001 } 1002 1003 super_root_found: 1004 /* Updating pointers relating to the latest checkpoint */ 1005 brelse(bh_sum); 1006 list_splice_tail(&segments, &ri->ri_used_segments); 1007 nilfs->ns_last_pseg = sr_pseg_start; 1008 nilfs->ns_last_seq = nilfs->ns_seg_seq; 1009 nilfs->ns_last_cno = ri->ri_cno; 1010 return 0; 1011 1012 failed: 1013 brelse(bh_sum); 1014 nilfs_dispose_segment_list(&segments); 1015 return ret < 0 ? ret : nilfs_warn_segment_error(nilfs->ns_sb, ret); 1016 } 1017