1 /* 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_shared.h" 21 #include "xfs_format.h" 22 #include "xfs_log_format.h" 23 #include "xfs_trans_resv.h" 24 #include "xfs_bit.h" 25 #include "xfs_sb.h" 26 #include "xfs_mount.h" 27 #include "xfs_da_format.h" 28 #include "xfs_da_btree.h" 29 #include "xfs_inode.h" 30 #include "xfs_trans.h" 31 #include "xfs_log.h" 32 #include "xfs_log_priv.h" 33 #include "xfs_log_recover.h" 34 #include "xfs_inode_item.h" 35 #include "xfs_extfree_item.h" 36 #include "xfs_trans_priv.h" 37 #include "xfs_alloc.h" 38 #include "xfs_ialloc.h" 39 #include "xfs_quota.h" 40 #include "xfs_cksum.h" 41 #include "xfs_trace.h" 42 #include "xfs_icache.h" 43 #include "xfs_bmap_btree.h" 44 #include "xfs_error.h" 45 #include "xfs_dir2.h" 46 #include "xfs_rmap_item.h" 47 48 #define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1) 49 50 STATIC int 51 xlog_find_zeroed( 52 struct xlog *, 53 xfs_daddr_t *); 54 STATIC int 55 xlog_clear_stale_blocks( 56 struct xlog *, 57 xfs_lsn_t); 58 #if defined(DEBUG) 59 STATIC void 60 xlog_recover_check_summary( 61 struct xlog *); 62 #else 63 #define xlog_recover_check_summary(log) 64 #endif 65 STATIC int 66 xlog_do_recovery_pass( 67 struct xlog *, xfs_daddr_t, xfs_daddr_t, int, xfs_daddr_t *); 68 69 /* 70 * This structure is used during recovery to record the buf log items which 71 * have been canceled and should not be replayed. 72 */ 73 struct xfs_buf_cancel { 74 xfs_daddr_t bc_blkno; 75 uint bc_len; 76 int bc_refcount; 77 struct list_head bc_list; 78 }; 79 80 /* 81 * Sector aligned buffer routines for buffer create/read/write/access 82 */ 83 84 /* 85 * Verify the given count of basic blocks is valid number of blocks 86 * to specify for an operation involving the given XFS log buffer. 87 * Returns nonzero if the count is valid, 0 otherwise. 88 */ 89 90 static inline int 91 xlog_buf_bbcount_valid( 92 struct xlog *log, 93 int bbcount) 94 { 95 return bbcount > 0 && bbcount <= log->l_logBBsize; 96 } 97 98 /* 99 * Allocate a buffer to hold log data. The buffer needs to be able 100 * to map to a range of nbblks basic blocks at any valid (basic 101 * block) offset within the log. 102 */ 103 STATIC xfs_buf_t * 104 xlog_get_bp( 105 struct xlog *log, 106 int nbblks) 107 { 108 struct xfs_buf *bp; 109 110 if (!xlog_buf_bbcount_valid(log, nbblks)) { 111 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer", 112 nbblks); 113 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp); 114 return NULL; 115 } 116 117 /* 118 * We do log I/O in units of log sectors (a power-of-2 119 * multiple of the basic block size), so we round up the 120 * requested size to accommodate the basic blocks required 121 * for complete log sectors. 122 * 123 * In addition, the buffer may be used for a non-sector- 124 * aligned block offset, in which case an I/O of the 125 * requested size could extend beyond the end of the 126 * buffer. If the requested size is only 1 basic block it 127 * will never straddle a sector boundary, so this won't be 128 * an issue. Nor will this be a problem if the log I/O is 129 * done in basic blocks (sector size 1). But otherwise we 130 * extend the buffer by one extra log sector to ensure 131 * there's space to accommodate this possibility. 132 */ 133 if (nbblks > 1 && log->l_sectBBsize > 1) 134 nbblks += log->l_sectBBsize; 135 nbblks = round_up(nbblks, log->l_sectBBsize); 136 137 bp = xfs_buf_get_uncached(log->l_mp->m_logdev_targp, nbblks, 0); 138 if (bp) 139 xfs_buf_unlock(bp); 140 return bp; 141 } 142 143 STATIC void 144 xlog_put_bp( 145 xfs_buf_t *bp) 146 { 147 xfs_buf_free(bp); 148 } 149 150 /* 151 * Return the address of the start of the given block number's data 152 * in a log buffer. The buffer covers a log sector-aligned region. 153 */ 154 STATIC char * 155 xlog_align( 156 struct xlog *log, 157 xfs_daddr_t blk_no, 158 int nbblks, 159 struct xfs_buf *bp) 160 { 161 xfs_daddr_t offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1); 162 163 ASSERT(offset + nbblks <= bp->b_length); 164 return bp->b_addr + BBTOB(offset); 165 } 166 167 168 /* 169 * nbblks should be uint, but oh well. Just want to catch that 32-bit length. 170 */ 171 STATIC int 172 xlog_bread_noalign( 173 struct xlog *log, 174 xfs_daddr_t blk_no, 175 int nbblks, 176 struct xfs_buf *bp) 177 { 178 int error; 179 180 if (!xlog_buf_bbcount_valid(log, nbblks)) { 181 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer", 182 nbblks); 183 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp); 184 return -EFSCORRUPTED; 185 } 186 187 blk_no = round_down(blk_no, log->l_sectBBsize); 188 nbblks = round_up(nbblks, log->l_sectBBsize); 189 190 ASSERT(nbblks > 0); 191 ASSERT(nbblks <= bp->b_length); 192 193 XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no); 194 bp->b_flags |= XBF_READ; 195 bp->b_io_length = nbblks; 196 bp->b_error = 0; 197 198 error = xfs_buf_submit_wait(bp); 199 if (error && !XFS_FORCED_SHUTDOWN(log->l_mp)) 200 xfs_buf_ioerror_alert(bp, __func__); 201 return error; 202 } 203 204 STATIC int 205 xlog_bread( 206 struct xlog *log, 207 xfs_daddr_t blk_no, 208 int nbblks, 209 struct xfs_buf *bp, 210 char **offset) 211 { 212 int error; 213 214 error = xlog_bread_noalign(log, blk_no, nbblks, bp); 215 if (error) 216 return error; 217 218 *offset = xlog_align(log, blk_no, nbblks, bp); 219 return 0; 220 } 221 222 /* 223 * Read at an offset into the buffer. Returns with the buffer in it's original 224 * state regardless of the result of the read. 225 */ 226 STATIC int 227 xlog_bread_offset( 228 struct xlog *log, 229 xfs_daddr_t blk_no, /* block to read from */ 230 int nbblks, /* blocks to read */ 231 struct xfs_buf *bp, 232 char *offset) 233 { 234 char *orig_offset = bp->b_addr; 235 int orig_len = BBTOB(bp->b_length); 236 int error, error2; 237 238 error = xfs_buf_associate_memory(bp, offset, BBTOB(nbblks)); 239 if (error) 240 return error; 241 242 error = xlog_bread_noalign(log, blk_no, nbblks, bp); 243 244 /* must reset buffer pointer even on error */ 245 error2 = xfs_buf_associate_memory(bp, orig_offset, orig_len); 246 if (error) 247 return error; 248 return error2; 249 } 250 251 /* 252 * Write out the buffer at the given block for the given number of blocks. 253 * The buffer is kept locked across the write and is returned locked. 254 * This can only be used for synchronous log writes. 255 */ 256 STATIC int 257 xlog_bwrite( 258 struct xlog *log, 259 xfs_daddr_t blk_no, 260 int nbblks, 261 struct xfs_buf *bp) 262 { 263 int error; 264 265 if (!xlog_buf_bbcount_valid(log, nbblks)) { 266 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer", 267 nbblks); 268 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp); 269 return -EFSCORRUPTED; 270 } 271 272 blk_no = round_down(blk_no, log->l_sectBBsize); 273 nbblks = round_up(nbblks, log->l_sectBBsize); 274 275 ASSERT(nbblks > 0); 276 ASSERT(nbblks <= bp->b_length); 277 278 XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no); 279 xfs_buf_hold(bp); 280 xfs_buf_lock(bp); 281 bp->b_io_length = nbblks; 282 bp->b_error = 0; 283 284 error = xfs_bwrite(bp); 285 if (error) 286 xfs_buf_ioerror_alert(bp, __func__); 287 xfs_buf_relse(bp); 288 return error; 289 } 290 291 #ifdef DEBUG 292 /* 293 * dump debug superblock and log record information 294 */ 295 STATIC void 296 xlog_header_check_dump( 297 xfs_mount_t *mp, 298 xlog_rec_header_t *head) 299 { 300 xfs_debug(mp, "%s: SB : uuid = %pU, fmt = %d", 301 __func__, &mp->m_sb.sb_uuid, XLOG_FMT); 302 xfs_debug(mp, " log : uuid = %pU, fmt = %d", 303 &head->h_fs_uuid, be32_to_cpu(head->h_fmt)); 304 } 305 #else 306 #define xlog_header_check_dump(mp, head) 307 #endif 308 309 /* 310 * check log record header for recovery 311 */ 312 STATIC int 313 xlog_header_check_recover( 314 xfs_mount_t *mp, 315 xlog_rec_header_t *head) 316 { 317 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)); 318 319 /* 320 * IRIX doesn't write the h_fmt field and leaves it zeroed 321 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover 322 * a dirty log created in IRIX. 323 */ 324 if (unlikely(head->h_fmt != cpu_to_be32(XLOG_FMT))) { 325 xfs_warn(mp, 326 "dirty log written in incompatible format - can't recover"); 327 xlog_header_check_dump(mp, head); 328 XFS_ERROR_REPORT("xlog_header_check_recover(1)", 329 XFS_ERRLEVEL_HIGH, mp); 330 return -EFSCORRUPTED; 331 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) { 332 xfs_warn(mp, 333 "dirty log entry has mismatched uuid - can't recover"); 334 xlog_header_check_dump(mp, head); 335 XFS_ERROR_REPORT("xlog_header_check_recover(2)", 336 XFS_ERRLEVEL_HIGH, mp); 337 return -EFSCORRUPTED; 338 } 339 return 0; 340 } 341 342 /* 343 * read the head block of the log and check the header 344 */ 345 STATIC int 346 xlog_header_check_mount( 347 xfs_mount_t *mp, 348 xlog_rec_header_t *head) 349 { 350 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)); 351 352 if (uuid_is_nil(&head->h_fs_uuid)) { 353 /* 354 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If 355 * h_fs_uuid is nil, we assume this log was last mounted 356 * by IRIX and continue. 357 */ 358 xfs_warn(mp, "nil uuid in log - IRIX style log"); 359 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) { 360 xfs_warn(mp, "log has mismatched uuid - can't recover"); 361 xlog_header_check_dump(mp, head); 362 XFS_ERROR_REPORT("xlog_header_check_mount", 363 XFS_ERRLEVEL_HIGH, mp); 364 return -EFSCORRUPTED; 365 } 366 return 0; 367 } 368 369 STATIC void 370 xlog_recover_iodone( 371 struct xfs_buf *bp) 372 { 373 if (bp->b_error) { 374 /* 375 * We're not going to bother about retrying 376 * this during recovery. One strike! 377 */ 378 if (!XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) { 379 xfs_buf_ioerror_alert(bp, __func__); 380 xfs_force_shutdown(bp->b_target->bt_mount, 381 SHUTDOWN_META_IO_ERROR); 382 } 383 } 384 bp->b_iodone = NULL; 385 xfs_buf_ioend(bp); 386 } 387 388 /* 389 * This routine finds (to an approximation) the first block in the physical 390 * log which contains the given cycle. It uses a binary search algorithm. 391 * Note that the algorithm can not be perfect because the disk will not 392 * necessarily be perfect. 393 */ 394 STATIC int 395 xlog_find_cycle_start( 396 struct xlog *log, 397 struct xfs_buf *bp, 398 xfs_daddr_t first_blk, 399 xfs_daddr_t *last_blk, 400 uint cycle) 401 { 402 char *offset; 403 xfs_daddr_t mid_blk; 404 xfs_daddr_t end_blk; 405 uint mid_cycle; 406 int error; 407 408 end_blk = *last_blk; 409 mid_blk = BLK_AVG(first_blk, end_blk); 410 while (mid_blk != first_blk && mid_blk != end_blk) { 411 error = xlog_bread(log, mid_blk, 1, bp, &offset); 412 if (error) 413 return error; 414 mid_cycle = xlog_get_cycle(offset); 415 if (mid_cycle == cycle) 416 end_blk = mid_blk; /* last_half_cycle == mid_cycle */ 417 else 418 first_blk = mid_blk; /* first_half_cycle == mid_cycle */ 419 mid_blk = BLK_AVG(first_blk, end_blk); 420 } 421 ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) || 422 (mid_blk == end_blk && mid_blk-1 == first_blk)); 423 424 *last_blk = end_blk; 425 426 return 0; 427 } 428 429 /* 430 * Check that a range of blocks does not contain stop_on_cycle_no. 431 * Fill in *new_blk with the block offset where such a block is 432 * found, or with -1 (an invalid block number) if there is no such 433 * block in the range. The scan needs to occur from front to back 434 * and the pointer into the region must be updated since a later 435 * routine will need to perform another test. 436 */ 437 STATIC int 438 xlog_find_verify_cycle( 439 struct xlog *log, 440 xfs_daddr_t start_blk, 441 int nbblks, 442 uint stop_on_cycle_no, 443 xfs_daddr_t *new_blk) 444 { 445 xfs_daddr_t i, j; 446 uint cycle; 447 xfs_buf_t *bp; 448 xfs_daddr_t bufblks; 449 char *buf = NULL; 450 int error = 0; 451 452 /* 453 * Greedily allocate a buffer big enough to handle the full 454 * range of basic blocks we'll be examining. If that fails, 455 * try a smaller size. We need to be able to read at least 456 * a log sector, or we're out of luck. 457 */ 458 bufblks = 1 << ffs(nbblks); 459 while (bufblks > log->l_logBBsize) 460 bufblks >>= 1; 461 while (!(bp = xlog_get_bp(log, bufblks))) { 462 bufblks >>= 1; 463 if (bufblks < log->l_sectBBsize) 464 return -ENOMEM; 465 } 466 467 for (i = start_blk; i < start_blk + nbblks; i += bufblks) { 468 int bcount; 469 470 bcount = min(bufblks, (start_blk + nbblks - i)); 471 472 error = xlog_bread(log, i, bcount, bp, &buf); 473 if (error) 474 goto out; 475 476 for (j = 0; j < bcount; j++) { 477 cycle = xlog_get_cycle(buf); 478 if (cycle == stop_on_cycle_no) { 479 *new_blk = i+j; 480 goto out; 481 } 482 483 buf += BBSIZE; 484 } 485 } 486 487 *new_blk = -1; 488 489 out: 490 xlog_put_bp(bp); 491 return error; 492 } 493 494 /* 495 * Potentially backup over partial log record write. 496 * 497 * In the typical case, last_blk is the number of the block directly after 498 * a good log record. Therefore, we subtract one to get the block number 499 * of the last block in the given buffer. extra_bblks contains the number 500 * of blocks we would have read on a previous read. This happens when the 501 * last log record is split over the end of the physical log. 502 * 503 * extra_bblks is the number of blocks potentially verified on a previous 504 * call to this routine. 505 */ 506 STATIC int 507 xlog_find_verify_log_record( 508 struct xlog *log, 509 xfs_daddr_t start_blk, 510 xfs_daddr_t *last_blk, 511 int extra_bblks) 512 { 513 xfs_daddr_t i; 514 xfs_buf_t *bp; 515 char *offset = NULL; 516 xlog_rec_header_t *head = NULL; 517 int error = 0; 518 int smallmem = 0; 519 int num_blks = *last_blk - start_blk; 520 int xhdrs; 521 522 ASSERT(start_blk != 0 || *last_blk != start_blk); 523 524 if (!(bp = xlog_get_bp(log, num_blks))) { 525 if (!(bp = xlog_get_bp(log, 1))) 526 return -ENOMEM; 527 smallmem = 1; 528 } else { 529 error = xlog_bread(log, start_blk, num_blks, bp, &offset); 530 if (error) 531 goto out; 532 offset += ((num_blks - 1) << BBSHIFT); 533 } 534 535 for (i = (*last_blk) - 1; i >= 0; i--) { 536 if (i < start_blk) { 537 /* valid log record not found */ 538 xfs_warn(log->l_mp, 539 "Log inconsistent (didn't find previous header)"); 540 ASSERT(0); 541 error = -EIO; 542 goto out; 543 } 544 545 if (smallmem) { 546 error = xlog_bread(log, i, 1, bp, &offset); 547 if (error) 548 goto out; 549 } 550 551 head = (xlog_rec_header_t *)offset; 552 553 if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) 554 break; 555 556 if (!smallmem) 557 offset -= BBSIZE; 558 } 559 560 /* 561 * We hit the beginning of the physical log & still no header. Return 562 * to caller. If caller can handle a return of -1, then this routine 563 * will be called again for the end of the physical log. 564 */ 565 if (i == -1) { 566 error = 1; 567 goto out; 568 } 569 570 /* 571 * We have the final block of the good log (the first block 572 * of the log record _before_ the head. So we check the uuid. 573 */ 574 if ((error = xlog_header_check_mount(log->l_mp, head))) 575 goto out; 576 577 /* 578 * We may have found a log record header before we expected one. 579 * last_blk will be the 1st block # with a given cycle #. We may end 580 * up reading an entire log record. In this case, we don't want to 581 * reset last_blk. Only when last_blk points in the middle of a log 582 * record do we update last_blk. 583 */ 584 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { 585 uint h_size = be32_to_cpu(head->h_size); 586 587 xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE; 588 if (h_size % XLOG_HEADER_CYCLE_SIZE) 589 xhdrs++; 590 } else { 591 xhdrs = 1; 592 } 593 594 if (*last_blk - i + extra_bblks != 595 BTOBB(be32_to_cpu(head->h_len)) + xhdrs) 596 *last_blk = i; 597 598 out: 599 xlog_put_bp(bp); 600 return error; 601 } 602 603 /* 604 * Head is defined to be the point of the log where the next log write 605 * could go. This means that incomplete LR writes at the end are 606 * eliminated when calculating the head. We aren't guaranteed that previous 607 * LR have complete transactions. We only know that a cycle number of 608 * current cycle number -1 won't be present in the log if we start writing 609 * from our current block number. 610 * 611 * last_blk contains the block number of the first block with a given 612 * cycle number. 613 * 614 * Return: zero if normal, non-zero if error. 615 */ 616 STATIC int 617 xlog_find_head( 618 struct xlog *log, 619 xfs_daddr_t *return_head_blk) 620 { 621 xfs_buf_t *bp; 622 char *offset; 623 xfs_daddr_t new_blk, first_blk, start_blk, last_blk, head_blk; 624 int num_scan_bblks; 625 uint first_half_cycle, last_half_cycle; 626 uint stop_on_cycle; 627 int error, log_bbnum = log->l_logBBsize; 628 629 /* Is the end of the log device zeroed? */ 630 error = xlog_find_zeroed(log, &first_blk); 631 if (error < 0) { 632 xfs_warn(log->l_mp, "empty log check failed"); 633 return error; 634 } 635 if (error == 1) { 636 *return_head_blk = first_blk; 637 638 /* Is the whole lot zeroed? */ 639 if (!first_blk) { 640 /* Linux XFS shouldn't generate totally zeroed logs - 641 * mkfs etc write a dummy unmount record to a fresh 642 * log so we can store the uuid in there 643 */ 644 xfs_warn(log->l_mp, "totally zeroed log"); 645 } 646 647 return 0; 648 } 649 650 first_blk = 0; /* get cycle # of 1st block */ 651 bp = xlog_get_bp(log, 1); 652 if (!bp) 653 return -ENOMEM; 654 655 error = xlog_bread(log, 0, 1, bp, &offset); 656 if (error) 657 goto bp_err; 658 659 first_half_cycle = xlog_get_cycle(offset); 660 661 last_blk = head_blk = log_bbnum - 1; /* get cycle # of last block */ 662 error = xlog_bread(log, last_blk, 1, bp, &offset); 663 if (error) 664 goto bp_err; 665 666 last_half_cycle = xlog_get_cycle(offset); 667 ASSERT(last_half_cycle != 0); 668 669 /* 670 * If the 1st half cycle number is equal to the last half cycle number, 671 * then the entire log is stamped with the same cycle number. In this 672 * case, head_blk can't be set to zero (which makes sense). The below 673 * math doesn't work out properly with head_blk equal to zero. Instead, 674 * we set it to log_bbnum which is an invalid block number, but this 675 * value makes the math correct. If head_blk doesn't changed through 676 * all the tests below, *head_blk is set to zero at the very end rather 677 * than log_bbnum. In a sense, log_bbnum and zero are the same block 678 * in a circular file. 679 */ 680 if (first_half_cycle == last_half_cycle) { 681 /* 682 * In this case we believe that the entire log should have 683 * cycle number last_half_cycle. We need to scan backwards 684 * from the end verifying that there are no holes still 685 * containing last_half_cycle - 1. If we find such a hole, 686 * then the start of that hole will be the new head. The 687 * simple case looks like 688 * x | x ... | x - 1 | x 689 * Another case that fits this picture would be 690 * x | x + 1 | x ... | x 691 * In this case the head really is somewhere at the end of the 692 * log, as one of the latest writes at the beginning was 693 * incomplete. 694 * One more case is 695 * x | x + 1 | x ... | x - 1 | x 696 * This is really the combination of the above two cases, and 697 * the head has to end up at the start of the x-1 hole at the 698 * end of the log. 699 * 700 * In the 256k log case, we will read from the beginning to the 701 * end of the log and search for cycle numbers equal to x-1. 702 * We don't worry about the x+1 blocks that we encounter, 703 * because we know that they cannot be the head since the log 704 * started with x. 705 */ 706 head_blk = log_bbnum; 707 stop_on_cycle = last_half_cycle - 1; 708 } else { 709 /* 710 * In this case we want to find the first block with cycle 711 * number matching last_half_cycle. We expect the log to be 712 * some variation on 713 * x + 1 ... | x ... | x 714 * The first block with cycle number x (last_half_cycle) will 715 * be where the new head belongs. First we do a binary search 716 * for the first occurrence of last_half_cycle. The binary 717 * search may not be totally accurate, so then we scan back 718 * from there looking for occurrences of last_half_cycle before 719 * us. If that backwards scan wraps around the beginning of 720 * the log, then we look for occurrences of last_half_cycle - 1 721 * at the end of the log. The cases we're looking for look 722 * like 723 * v binary search stopped here 724 * x + 1 ... | x | x + 1 | x ... | x 725 * ^ but we want to locate this spot 726 * or 727 * <---------> less than scan distance 728 * x + 1 ... | x ... | x - 1 | x 729 * ^ we want to locate this spot 730 */ 731 stop_on_cycle = last_half_cycle; 732 if ((error = xlog_find_cycle_start(log, bp, first_blk, 733 &head_blk, last_half_cycle))) 734 goto bp_err; 735 } 736 737 /* 738 * Now validate the answer. Scan back some number of maximum possible 739 * blocks and make sure each one has the expected cycle number. The 740 * maximum is determined by the total possible amount of buffering 741 * in the in-core log. The following number can be made tighter if 742 * we actually look at the block size of the filesystem. 743 */ 744 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log); 745 if (head_blk >= num_scan_bblks) { 746 /* 747 * We are guaranteed that the entire check can be performed 748 * in one buffer. 749 */ 750 start_blk = head_blk - num_scan_bblks; 751 if ((error = xlog_find_verify_cycle(log, 752 start_blk, num_scan_bblks, 753 stop_on_cycle, &new_blk))) 754 goto bp_err; 755 if (new_blk != -1) 756 head_blk = new_blk; 757 } else { /* need to read 2 parts of log */ 758 /* 759 * We are going to scan backwards in the log in two parts. 760 * First we scan the physical end of the log. In this part 761 * of the log, we are looking for blocks with cycle number 762 * last_half_cycle - 1. 763 * If we find one, then we know that the log starts there, as 764 * we've found a hole that didn't get written in going around 765 * the end of the physical log. The simple case for this is 766 * x + 1 ... | x ... | x - 1 | x 767 * <---------> less than scan distance 768 * If all of the blocks at the end of the log have cycle number 769 * last_half_cycle, then we check the blocks at the start of 770 * the log looking for occurrences of last_half_cycle. If we 771 * find one, then our current estimate for the location of the 772 * first occurrence of last_half_cycle is wrong and we move 773 * back to the hole we've found. This case looks like 774 * x + 1 ... | x | x + 1 | x ... 775 * ^ binary search stopped here 776 * Another case we need to handle that only occurs in 256k 777 * logs is 778 * x + 1 ... | x ... | x+1 | x ... 779 * ^ binary search stops here 780 * In a 256k log, the scan at the end of the log will see the 781 * x + 1 blocks. We need to skip past those since that is 782 * certainly not the head of the log. By searching for 783 * last_half_cycle-1 we accomplish that. 784 */ 785 ASSERT(head_blk <= INT_MAX && 786 (xfs_daddr_t) num_scan_bblks >= head_blk); 787 start_blk = log_bbnum - (num_scan_bblks - head_blk); 788 if ((error = xlog_find_verify_cycle(log, start_blk, 789 num_scan_bblks - (int)head_blk, 790 (stop_on_cycle - 1), &new_blk))) 791 goto bp_err; 792 if (new_blk != -1) { 793 head_blk = new_blk; 794 goto validate_head; 795 } 796 797 /* 798 * Scan beginning of log now. The last part of the physical 799 * log is good. This scan needs to verify that it doesn't find 800 * the last_half_cycle. 801 */ 802 start_blk = 0; 803 ASSERT(head_blk <= INT_MAX); 804 if ((error = xlog_find_verify_cycle(log, 805 start_blk, (int)head_blk, 806 stop_on_cycle, &new_blk))) 807 goto bp_err; 808 if (new_blk != -1) 809 head_blk = new_blk; 810 } 811 812 validate_head: 813 /* 814 * Now we need to make sure head_blk is not pointing to a block in 815 * the middle of a log record. 816 */ 817 num_scan_bblks = XLOG_REC_SHIFT(log); 818 if (head_blk >= num_scan_bblks) { 819 start_blk = head_blk - num_scan_bblks; /* don't read head_blk */ 820 821 /* start ptr at last block ptr before head_blk */ 822 error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0); 823 if (error == 1) 824 error = -EIO; 825 if (error) 826 goto bp_err; 827 } else { 828 start_blk = 0; 829 ASSERT(head_blk <= INT_MAX); 830 error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0); 831 if (error < 0) 832 goto bp_err; 833 if (error == 1) { 834 /* We hit the beginning of the log during our search */ 835 start_blk = log_bbnum - (num_scan_bblks - head_blk); 836 new_blk = log_bbnum; 837 ASSERT(start_blk <= INT_MAX && 838 (xfs_daddr_t) log_bbnum-start_blk >= 0); 839 ASSERT(head_blk <= INT_MAX); 840 error = xlog_find_verify_log_record(log, start_blk, 841 &new_blk, (int)head_blk); 842 if (error == 1) 843 error = -EIO; 844 if (error) 845 goto bp_err; 846 if (new_blk != log_bbnum) 847 head_blk = new_blk; 848 } else if (error) 849 goto bp_err; 850 } 851 852 xlog_put_bp(bp); 853 if (head_blk == log_bbnum) 854 *return_head_blk = 0; 855 else 856 *return_head_blk = head_blk; 857 /* 858 * When returning here, we have a good block number. Bad block 859 * means that during a previous crash, we didn't have a clean break 860 * from cycle number N to cycle number N-1. In this case, we need 861 * to find the first block with cycle number N-1. 862 */ 863 return 0; 864 865 bp_err: 866 xlog_put_bp(bp); 867 868 if (error) 869 xfs_warn(log->l_mp, "failed to find log head"); 870 return error; 871 } 872 873 /* 874 * Seek backwards in the log for log record headers. 875 * 876 * Given a starting log block, walk backwards until we find the provided number 877 * of records or hit the provided tail block. The return value is the number of 878 * records encountered or a negative error code. The log block and buffer 879 * pointer of the last record seen are returned in rblk and rhead respectively. 880 */ 881 STATIC int 882 xlog_rseek_logrec_hdr( 883 struct xlog *log, 884 xfs_daddr_t head_blk, 885 xfs_daddr_t tail_blk, 886 int count, 887 struct xfs_buf *bp, 888 xfs_daddr_t *rblk, 889 struct xlog_rec_header **rhead, 890 bool *wrapped) 891 { 892 int i; 893 int error; 894 int found = 0; 895 char *offset = NULL; 896 xfs_daddr_t end_blk; 897 898 *wrapped = false; 899 900 /* 901 * Walk backwards from the head block until we hit the tail or the first 902 * block in the log. 903 */ 904 end_blk = head_blk > tail_blk ? tail_blk : 0; 905 for (i = (int) head_blk - 1; i >= end_blk; i--) { 906 error = xlog_bread(log, i, 1, bp, &offset); 907 if (error) 908 goto out_error; 909 910 if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) { 911 *rblk = i; 912 *rhead = (struct xlog_rec_header *) offset; 913 if (++found == count) 914 break; 915 } 916 } 917 918 /* 919 * If we haven't hit the tail block or the log record header count, 920 * start looking again from the end of the physical log. Note that 921 * callers can pass head == tail if the tail is not yet known. 922 */ 923 if (tail_blk >= head_blk && found != count) { 924 for (i = log->l_logBBsize - 1; i >= (int) tail_blk; i--) { 925 error = xlog_bread(log, i, 1, bp, &offset); 926 if (error) 927 goto out_error; 928 929 if (*(__be32 *)offset == 930 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) { 931 *wrapped = true; 932 *rblk = i; 933 *rhead = (struct xlog_rec_header *) offset; 934 if (++found == count) 935 break; 936 } 937 } 938 } 939 940 return found; 941 942 out_error: 943 return error; 944 } 945 946 /* 947 * Seek forward in the log for log record headers. 948 * 949 * Given head and tail blocks, walk forward from the tail block until we find 950 * the provided number of records or hit the head block. The return value is the 951 * number of records encountered or a negative error code. The log block and 952 * buffer pointer of the last record seen are returned in rblk and rhead 953 * respectively. 954 */ 955 STATIC int 956 xlog_seek_logrec_hdr( 957 struct xlog *log, 958 xfs_daddr_t head_blk, 959 xfs_daddr_t tail_blk, 960 int count, 961 struct xfs_buf *bp, 962 xfs_daddr_t *rblk, 963 struct xlog_rec_header **rhead, 964 bool *wrapped) 965 { 966 int i; 967 int error; 968 int found = 0; 969 char *offset = NULL; 970 xfs_daddr_t end_blk; 971 972 *wrapped = false; 973 974 /* 975 * Walk forward from the tail block until we hit the head or the last 976 * block in the log. 977 */ 978 end_blk = head_blk > tail_blk ? head_blk : log->l_logBBsize - 1; 979 for (i = (int) tail_blk; i <= end_blk; i++) { 980 error = xlog_bread(log, i, 1, bp, &offset); 981 if (error) 982 goto out_error; 983 984 if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) { 985 *rblk = i; 986 *rhead = (struct xlog_rec_header *) offset; 987 if (++found == count) 988 break; 989 } 990 } 991 992 /* 993 * If we haven't hit the head block or the log record header count, 994 * start looking again from the start of the physical log. 995 */ 996 if (tail_blk > head_blk && found != count) { 997 for (i = 0; i < (int) head_blk; i++) { 998 error = xlog_bread(log, i, 1, bp, &offset); 999 if (error) 1000 goto out_error; 1001 1002 if (*(__be32 *)offset == 1003 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) { 1004 *wrapped = true; 1005 *rblk = i; 1006 *rhead = (struct xlog_rec_header *) offset; 1007 if (++found == count) 1008 break; 1009 } 1010 } 1011 } 1012 1013 return found; 1014 1015 out_error: 1016 return error; 1017 } 1018 1019 /* 1020 * Check the log tail for torn writes. This is required when torn writes are 1021 * detected at the head and the head had to be walked back to a previous record. 1022 * The tail of the previous record must now be verified to ensure the torn 1023 * writes didn't corrupt the previous tail. 1024 * 1025 * Return an error if CRC verification fails as recovery cannot proceed. 1026 */ 1027 STATIC int 1028 xlog_verify_tail( 1029 struct xlog *log, 1030 xfs_daddr_t head_blk, 1031 xfs_daddr_t tail_blk) 1032 { 1033 struct xlog_rec_header *thead; 1034 struct xfs_buf *bp; 1035 xfs_daddr_t first_bad; 1036 int count; 1037 int error = 0; 1038 bool wrapped; 1039 xfs_daddr_t tmp_head; 1040 1041 bp = xlog_get_bp(log, 1); 1042 if (!bp) 1043 return -ENOMEM; 1044 1045 /* 1046 * Seek XLOG_MAX_ICLOGS + 1 records past the current tail record to get 1047 * a temporary head block that points after the last possible 1048 * concurrently written record of the tail. 1049 */ 1050 count = xlog_seek_logrec_hdr(log, head_blk, tail_blk, 1051 XLOG_MAX_ICLOGS + 1, bp, &tmp_head, &thead, 1052 &wrapped); 1053 if (count < 0) { 1054 error = count; 1055 goto out; 1056 } 1057 1058 /* 1059 * If the call above didn't find XLOG_MAX_ICLOGS + 1 records, we ran 1060 * into the actual log head. tmp_head points to the start of the record 1061 * so update it to the actual head block. 1062 */ 1063 if (count < XLOG_MAX_ICLOGS + 1) 1064 tmp_head = head_blk; 1065 1066 /* 1067 * We now have a tail and temporary head block that covers at least 1068 * XLOG_MAX_ICLOGS records from the tail. We need to verify that these 1069 * records were completely written. Run a CRC verification pass from 1070 * tail to head and return the result. 1071 */ 1072 error = xlog_do_recovery_pass(log, tmp_head, tail_blk, 1073 XLOG_RECOVER_CRCPASS, &first_bad); 1074 1075 out: 1076 xlog_put_bp(bp); 1077 return error; 1078 } 1079 1080 /* 1081 * Detect and trim torn writes from the head of the log. 1082 * 1083 * Storage without sector atomicity guarantees can result in torn writes in the 1084 * log in the event of a crash. Our only means to detect this scenario is via 1085 * CRC verification. While we can't always be certain that CRC verification 1086 * failure is due to a torn write vs. an unrelated corruption, we do know that 1087 * only a certain number (XLOG_MAX_ICLOGS) of log records can be written out at 1088 * one time. Therefore, CRC verify up to XLOG_MAX_ICLOGS records at the head of 1089 * the log and treat failures in this range as torn writes as a matter of 1090 * policy. In the event of CRC failure, the head is walked back to the last good 1091 * record in the log and the tail is updated from that record and verified. 1092 */ 1093 STATIC int 1094 xlog_verify_head( 1095 struct xlog *log, 1096 xfs_daddr_t *head_blk, /* in/out: unverified head */ 1097 xfs_daddr_t *tail_blk, /* out: tail block */ 1098 struct xfs_buf *bp, 1099 xfs_daddr_t *rhead_blk, /* start blk of last record */ 1100 struct xlog_rec_header **rhead, /* ptr to last record */ 1101 bool *wrapped) /* last rec. wraps phys. log */ 1102 { 1103 struct xlog_rec_header *tmp_rhead; 1104 struct xfs_buf *tmp_bp; 1105 xfs_daddr_t first_bad; 1106 xfs_daddr_t tmp_rhead_blk; 1107 int found; 1108 int error; 1109 bool tmp_wrapped; 1110 1111 /* 1112 * Check the head of the log for torn writes. Search backwards from the 1113 * head until we hit the tail or the maximum number of log record I/Os 1114 * that could have been in flight at one time. Use a temporary buffer so 1115 * we don't trash the rhead/bp pointers from the caller. 1116 */ 1117 tmp_bp = xlog_get_bp(log, 1); 1118 if (!tmp_bp) 1119 return -ENOMEM; 1120 error = xlog_rseek_logrec_hdr(log, *head_blk, *tail_blk, 1121 XLOG_MAX_ICLOGS, tmp_bp, &tmp_rhead_blk, 1122 &tmp_rhead, &tmp_wrapped); 1123 xlog_put_bp(tmp_bp); 1124 if (error < 0) 1125 return error; 1126 1127 /* 1128 * Now run a CRC verification pass over the records starting at the 1129 * block found above to the current head. If a CRC failure occurs, the 1130 * log block of the first bad record is saved in first_bad. 1131 */ 1132 error = xlog_do_recovery_pass(log, *head_blk, tmp_rhead_blk, 1133 XLOG_RECOVER_CRCPASS, &first_bad); 1134 if (error == -EFSBADCRC) { 1135 /* 1136 * We've hit a potential torn write. Reset the error and warn 1137 * about it. 1138 */ 1139 error = 0; 1140 xfs_warn(log->l_mp, 1141 "Torn write (CRC failure) detected at log block 0x%llx. Truncating head block from 0x%llx.", 1142 first_bad, *head_blk); 1143 1144 /* 1145 * Get the header block and buffer pointer for the last good 1146 * record before the bad record. 1147 * 1148 * Note that xlog_find_tail() clears the blocks at the new head 1149 * (i.e., the records with invalid CRC) if the cycle number 1150 * matches the the current cycle. 1151 */ 1152 found = xlog_rseek_logrec_hdr(log, first_bad, *tail_blk, 1, bp, 1153 rhead_blk, rhead, wrapped); 1154 if (found < 0) 1155 return found; 1156 if (found == 0) /* XXX: right thing to do here? */ 1157 return -EIO; 1158 1159 /* 1160 * Reset the head block to the starting block of the first bad 1161 * log record and set the tail block based on the last good 1162 * record. 1163 * 1164 * Bail out if the updated head/tail match as this indicates 1165 * possible corruption outside of the acceptable 1166 * (XLOG_MAX_ICLOGS) range. This is a job for xfs_repair... 1167 */ 1168 *head_blk = first_bad; 1169 *tail_blk = BLOCK_LSN(be64_to_cpu((*rhead)->h_tail_lsn)); 1170 if (*head_blk == *tail_blk) { 1171 ASSERT(0); 1172 return 0; 1173 } 1174 1175 /* 1176 * Now verify the tail based on the updated head. This is 1177 * required because the torn writes trimmed from the head could 1178 * have been written over the tail of a previous record. Return 1179 * any errors since recovery cannot proceed if the tail is 1180 * corrupt. 1181 * 1182 * XXX: This leaves a gap in truly robust protection from torn 1183 * writes in the log. If the head is behind the tail, the tail 1184 * pushes forward to create some space and then a crash occurs 1185 * causing the writes into the previous record's tail region to 1186 * tear, log recovery isn't able to recover. 1187 * 1188 * How likely is this to occur? If possible, can we do something 1189 * more intelligent here? Is it safe to push the tail forward if 1190 * we can determine that the tail is within the range of the 1191 * torn write (e.g., the kernel can only overwrite the tail if 1192 * it has actually been pushed forward)? Alternatively, could we 1193 * somehow prevent this condition at runtime? 1194 */ 1195 error = xlog_verify_tail(log, *head_blk, *tail_blk); 1196 } 1197 1198 return error; 1199 } 1200 1201 /* 1202 * Check whether the head of the log points to an unmount record. In other 1203 * words, determine whether the log is clean. If so, update the in-core state 1204 * appropriately. 1205 */ 1206 static int 1207 xlog_check_unmount_rec( 1208 struct xlog *log, 1209 xfs_daddr_t *head_blk, 1210 xfs_daddr_t *tail_blk, 1211 struct xlog_rec_header *rhead, 1212 xfs_daddr_t rhead_blk, 1213 struct xfs_buf *bp, 1214 bool *clean) 1215 { 1216 struct xlog_op_header *op_head; 1217 xfs_daddr_t umount_data_blk; 1218 xfs_daddr_t after_umount_blk; 1219 int hblks; 1220 int error; 1221 char *offset; 1222 1223 *clean = false; 1224 1225 /* 1226 * Look for unmount record. If we find it, then we know there was a 1227 * clean unmount. Since 'i' could be the last block in the physical 1228 * log, we convert to a log block before comparing to the head_blk. 1229 * 1230 * Save the current tail lsn to use to pass to xlog_clear_stale_blocks() 1231 * below. We won't want to clear the unmount record if there is one, so 1232 * we pass the lsn of the unmount record rather than the block after it. 1233 */ 1234 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { 1235 int h_size = be32_to_cpu(rhead->h_size); 1236 int h_version = be32_to_cpu(rhead->h_version); 1237 1238 if ((h_version & XLOG_VERSION_2) && 1239 (h_size > XLOG_HEADER_CYCLE_SIZE)) { 1240 hblks = h_size / XLOG_HEADER_CYCLE_SIZE; 1241 if (h_size % XLOG_HEADER_CYCLE_SIZE) 1242 hblks++; 1243 } else { 1244 hblks = 1; 1245 } 1246 } else { 1247 hblks = 1; 1248 } 1249 after_umount_blk = rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len)); 1250 after_umount_blk = do_mod(after_umount_blk, log->l_logBBsize); 1251 if (*head_blk == after_umount_blk && 1252 be32_to_cpu(rhead->h_num_logops) == 1) { 1253 umount_data_blk = rhead_blk + hblks; 1254 umount_data_blk = do_mod(umount_data_blk, log->l_logBBsize); 1255 error = xlog_bread(log, umount_data_blk, 1, bp, &offset); 1256 if (error) 1257 return error; 1258 1259 op_head = (struct xlog_op_header *)offset; 1260 if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) { 1261 /* 1262 * Set tail and last sync so that newly written log 1263 * records will point recovery to after the current 1264 * unmount record. 1265 */ 1266 xlog_assign_atomic_lsn(&log->l_tail_lsn, 1267 log->l_curr_cycle, after_umount_blk); 1268 xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1269 log->l_curr_cycle, after_umount_blk); 1270 *tail_blk = after_umount_blk; 1271 1272 *clean = true; 1273 } 1274 } 1275 1276 return 0; 1277 } 1278 1279 static void 1280 xlog_set_state( 1281 struct xlog *log, 1282 xfs_daddr_t head_blk, 1283 struct xlog_rec_header *rhead, 1284 xfs_daddr_t rhead_blk, 1285 bool bump_cycle) 1286 { 1287 /* 1288 * Reset log values according to the state of the log when we 1289 * crashed. In the case where head_blk == 0, we bump curr_cycle 1290 * one because the next write starts a new cycle rather than 1291 * continuing the cycle of the last good log record. At this 1292 * point we have guaranteed that all partial log records have been 1293 * accounted for. Therefore, we know that the last good log record 1294 * written was complete and ended exactly on the end boundary 1295 * of the physical log. 1296 */ 1297 log->l_prev_block = rhead_blk; 1298 log->l_curr_block = (int)head_blk; 1299 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle); 1300 if (bump_cycle) 1301 log->l_curr_cycle++; 1302 atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn)); 1303 atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn)); 1304 xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle, 1305 BBTOB(log->l_curr_block)); 1306 xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle, 1307 BBTOB(log->l_curr_block)); 1308 } 1309 1310 /* 1311 * Find the sync block number or the tail of the log. 1312 * 1313 * This will be the block number of the last record to have its 1314 * associated buffers synced to disk. Every log record header has 1315 * a sync lsn embedded in it. LSNs hold block numbers, so it is easy 1316 * to get a sync block number. The only concern is to figure out which 1317 * log record header to believe. 1318 * 1319 * The following algorithm uses the log record header with the largest 1320 * lsn. The entire log record does not need to be valid. We only care 1321 * that the header is valid. 1322 * 1323 * We could speed up search by using current head_blk buffer, but it is not 1324 * available. 1325 */ 1326 STATIC int 1327 xlog_find_tail( 1328 struct xlog *log, 1329 xfs_daddr_t *head_blk, 1330 xfs_daddr_t *tail_blk) 1331 { 1332 xlog_rec_header_t *rhead; 1333 char *offset = NULL; 1334 xfs_buf_t *bp; 1335 int error; 1336 xfs_daddr_t rhead_blk; 1337 xfs_lsn_t tail_lsn; 1338 bool wrapped = false; 1339 bool clean = false; 1340 1341 /* 1342 * Find previous log record 1343 */ 1344 if ((error = xlog_find_head(log, head_blk))) 1345 return error; 1346 ASSERT(*head_blk < INT_MAX); 1347 1348 bp = xlog_get_bp(log, 1); 1349 if (!bp) 1350 return -ENOMEM; 1351 if (*head_blk == 0) { /* special case */ 1352 error = xlog_bread(log, 0, 1, bp, &offset); 1353 if (error) 1354 goto done; 1355 1356 if (xlog_get_cycle(offset) == 0) { 1357 *tail_blk = 0; 1358 /* leave all other log inited values alone */ 1359 goto done; 1360 } 1361 } 1362 1363 /* 1364 * Search backwards through the log looking for the log record header 1365 * block. This wraps all the way back around to the head so something is 1366 * seriously wrong if we can't find it. 1367 */ 1368 error = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, bp, 1369 &rhead_blk, &rhead, &wrapped); 1370 if (error < 0) 1371 return error; 1372 if (!error) { 1373 xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__); 1374 return -EIO; 1375 } 1376 *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn)); 1377 1378 /* 1379 * Set the log state based on the current head record. 1380 */ 1381 xlog_set_state(log, *head_blk, rhead, rhead_blk, wrapped); 1382 tail_lsn = atomic64_read(&log->l_tail_lsn); 1383 1384 /* 1385 * Look for an unmount record at the head of the log. This sets the log 1386 * state to determine whether recovery is necessary. 1387 */ 1388 error = xlog_check_unmount_rec(log, head_blk, tail_blk, rhead, 1389 rhead_blk, bp, &clean); 1390 if (error) 1391 goto done; 1392 1393 /* 1394 * Verify the log head if the log is not clean (e.g., we have anything 1395 * but an unmount record at the head). This uses CRC verification to 1396 * detect and trim torn writes. If discovered, CRC failures are 1397 * considered torn writes and the log head is trimmed accordingly. 1398 * 1399 * Note that we can only run CRC verification when the log is dirty 1400 * because there's no guarantee that the log data behind an unmount 1401 * record is compatible with the current architecture. 1402 */ 1403 if (!clean) { 1404 xfs_daddr_t orig_head = *head_blk; 1405 1406 error = xlog_verify_head(log, head_blk, tail_blk, bp, 1407 &rhead_blk, &rhead, &wrapped); 1408 if (error) 1409 goto done; 1410 1411 /* update in-core state again if the head changed */ 1412 if (*head_blk != orig_head) { 1413 xlog_set_state(log, *head_blk, rhead, rhead_blk, 1414 wrapped); 1415 tail_lsn = atomic64_read(&log->l_tail_lsn); 1416 error = xlog_check_unmount_rec(log, head_blk, tail_blk, 1417 rhead, rhead_blk, bp, 1418 &clean); 1419 if (error) 1420 goto done; 1421 } 1422 } 1423 1424 /* 1425 * Note that the unmount was clean. If the unmount was not clean, we 1426 * need to know this to rebuild the superblock counters from the perag 1427 * headers if we have a filesystem using non-persistent counters. 1428 */ 1429 if (clean) 1430 log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN; 1431 1432 /* 1433 * Make sure that there are no blocks in front of the head 1434 * with the same cycle number as the head. This can happen 1435 * because we allow multiple outstanding log writes concurrently, 1436 * and the later writes might make it out before earlier ones. 1437 * 1438 * We use the lsn from before modifying it so that we'll never 1439 * overwrite the unmount record after a clean unmount. 1440 * 1441 * Do this only if we are going to recover the filesystem 1442 * 1443 * NOTE: This used to say "if (!readonly)" 1444 * However on Linux, we can & do recover a read-only filesystem. 1445 * We only skip recovery if NORECOVERY is specified on mount, 1446 * in which case we would not be here. 1447 * 1448 * But... if the -device- itself is readonly, just skip this. 1449 * We can't recover this device anyway, so it won't matter. 1450 */ 1451 if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp)) 1452 error = xlog_clear_stale_blocks(log, tail_lsn); 1453 1454 done: 1455 xlog_put_bp(bp); 1456 1457 if (error) 1458 xfs_warn(log->l_mp, "failed to locate log tail"); 1459 return error; 1460 } 1461 1462 /* 1463 * Is the log zeroed at all? 1464 * 1465 * The last binary search should be changed to perform an X block read 1466 * once X becomes small enough. You can then search linearly through 1467 * the X blocks. This will cut down on the number of reads we need to do. 1468 * 1469 * If the log is partially zeroed, this routine will pass back the blkno 1470 * of the first block with cycle number 0. It won't have a complete LR 1471 * preceding it. 1472 * 1473 * Return: 1474 * 0 => the log is completely written to 1475 * 1 => use *blk_no as the first block of the log 1476 * <0 => error has occurred 1477 */ 1478 STATIC int 1479 xlog_find_zeroed( 1480 struct xlog *log, 1481 xfs_daddr_t *blk_no) 1482 { 1483 xfs_buf_t *bp; 1484 char *offset; 1485 uint first_cycle, last_cycle; 1486 xfs_daddr_t new_blk, last_blk, start_blk; 1487 xfs_daddr_t num_scan_bblks; 1488 int error, log_bbnum = log->l_logBBsize; 1489 1490 *blk_no = 0; 1491 1492 /* check totally zeroed log */ 1493 bp = xlog_get_bp(log, 1); 1494 if (!bp) 1495 return -ENOMEM; 1496 error = xlog_bread(log, 0, 1, bp, &offset); 1497 if (error) 1498 goto bp_err; 1499 1500 first_cycle = xlog_get_cycle(offset); 1501 if (first_cycle == 0) { /* completely zeroed log */ 1502 *blk_no = 0; 1503 xlog_put_bp(bp); 1504 return 1; 1505 } 1506 1507 /* check partially zeroed log */ 1508 error = xlog_bread(log, log_bbnum-1, 1, bp, &offset); 1509 if (error) 1510 goto bp_err; 1511 1512 last_cycle = xlog_get_cycle(offset); 1513 if (last_cycle != 0) { /* log completely written to */ 1514 xlog_put_bp(bp); 1515 return 0; 1516 } else if (first_cycle != 1) { 1517 /* 1518 * If the cycle of the last block is zero, the cycle of 1519 * the first block must be 1. If it's not, maybe we're 1520 * not looking at a log... Bail out. 1521 */ 1522 xfs_warn(log->l_mp, 1523 "Log inconsistent or not a log (last==0, first!=1)"); 1524 error = -EINVAL; 1525 goto bp_err; 1526 } 1527 1528 /* we have a partially zeroed log */ 1529 last_blk = log_bbnum-1; 1530 if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0))) 1531 goto bp_err; 1532 1533 /* 1534 * Validate the answer. Because there is no way to guarantee that 1535 * the entire log is made up of log records which are the same size, 1536 * we scan over the defined maximum blocks. At this point, the maximum 1537 * is not chosen to mean anything special. XXXmiken 1538 */ 1539 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log); 1540 ASSERT(num_scan_bblks <= INT_MAX); 1541 1542 if (last_blk < num_scan_bblks) 1543 num_scan_bblks = last_blk; 1544 start_blk = last_blk - num_scan_bblks; 1545 1546 /* 1547 * We search for any instances of cycle number 0 that occur before 1548 * our current estimate of the head. What we're trying to detect is 1549 * 1 ... | 0 | 1 | 0... 1550 * ^ binary search ends here 1551 */ 1552 if ((error = xlog_find_verify_cycle(log, start_blk, 1553 (int)num_scan_bblks, 0, &new_blk))) 1554 goto bp_err; 1555 if (new_blk != -1) 1556 last_blk = new_blk; 1557 1558 /* 1559 * Potentially backup over partial log record write. We don't need 1560 * to search the end of the log because we know it is zero. 1561 */ 1562 error = xlog_find_verify_log_record(log, start_blk, &last_blk, 0); 1563 if (error == 1) 1564 error = -EIO; 1565 if (error) 1566 goto bp_err; 1567 1568 *blk_no = last_blk; 1569 bp_err: 1570 xlog_put_bp(bp); 1571 if (error) 1572 return error; 1573 return 1; 1574 } 1575 1576 /* 1577 * These are simple subroutines used by xlog_clear_stale_blocks() below 1578 * to initialize a buffer full of empty log record headers and write 1579 * them into the log. 1580 */ 1581 STATIC void 1582 xlog_add_record( 1583 struct xlog *log, 1584 char *buf, 1585 int cycle, 1586 int block, 1587 int tail_cycle, 1588 int tail_block) 1589 { 1590 xlog_rec_header_t *recp = (xlog_rec_header_t *)buf; 1591 1592 memset(buf, 0, BBSIZE); 1593 recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM); 1594 recp->h_cycle = cpu_to_be32(cycle); 1595 recp->h_version = cpu_to_be32( 1596 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1); 1597 recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block)); 1598 recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block)); 1599 recp->h_fmt = cpu_to_be32(XLOG_FMT); 1600 memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t)); 1601 } 1602 1603 STATIC int 1604 xlog_write_log_records( 1605 struct xlog *log, 1606 int cycle, 1607 int start_block, 1608 int blocks, 1609 int tail_cycle, 1610 int tail_block) 1611 { 1612 char *offset; 1613 xfs_buf_t *bp; 1614 int balign, ealign; 1615 int sectbb = log->l_sectBBsize; 1616 int end_block = start_block + blocks; 1617 int bufblks; 1618 int error = 0; 1619 int i, j = 0; 1620 1621 /* 1622 * Greedily allocate a buffer big enough to handle the full 1623 * range of basic blocks to be written. If that fails, try 1624 * a smaller size. We need to be able to write at least a 1625 * log sector, or we're out of luck. 1626 */ 1627 bufblks = 1 << ffs(blocks); 1628 while (bufblks > log->l_logBBsize) 1629 bufblks >>= 1; 1630 while (!(bp = xlog_get_bp(log, bufblks))) { 1631 bufblks >>= 1; 1632 if (bufblks < sectbb) 1633 return -ENOMEM; 1634 } 1635 1636 /* We may need to do a read at the start to fill in part of 1637 * the buffer in the starting sector not covered by the first 1638 * write below. 1639 */ 1640 balign = round_down(start_block, sectbb); 1641 if (balign != start_block) { 1642 error = xlog_bread_noalign(log, start_block, 1, bp); 1643 if (error) 1644 goto out_put_bp; 1645 1646 j = start_block - balign; 1647 } 1648 1649 for (i = start_block; i < end_block; i += bufblks) { 1650 int bcount, endcount; 1651 1652 bcount = min(bufblks, end_block - start_block); 1653 endcount = bcount - j; 1654 1655 /* We may need to do a read at the end to fill in part of 1656 * the buffer in the final sector not covered by the write. 1657 * If this is the same sector as the above read, skip it. 1658 */ 1659 ealign = round_down(end_block, sectbb); 1660 if (j == 0 && (start_block + endcount > ealign)) { 1661 offset = bp->b_addr + BBTOB(ealign - start_block); 1662 error = xlog_bread_offset(log, ealign, sectbb, 1663 bp, offset); 1664 if (error) 1665 break; 1666 1667 } 1668 1669 offset = xlog_align(log, start_block, endcount, bp); 1670 for (; j < endcount; j++) { 1671 xlog_add_record(log, offset, cycle, i+j, 1672 tail_cycle, tail_block); 1673 offset += BBSIZE; 1674 } 1675 error = xlog_bwrite(log, start_block, endcount, bp); 1676 if (error) 1677 break; 1678 start_block += endcount; 1679 j = 0; 1680 } 1681 1682 out_put_bp: 1683 xlog_put_bp(bp); 1684 return error; 1685 } 1686 1687 /* 1688 * This routine is called to blow away any incomplete log writes out 1689 * in front of the log head. We do this so that we won't become confused 1690 * if we come up, write only a little bit more, and then crash again. 1691 * If we leave the partial log records out there, this situation could 1692 * cause us to think those partial writes are valid blocks since they 1693 * have the current cycle number. We get rid of them by overwriting them 1694 * with empty log records with the old cycle number rather than the 1695 * current one. 1696 * 1697 * The tail lsn is passed in rather than taken from 1698 * the log so that we will not write over the unmount record after a 1699 * clean unmount in a 512 block log. Doing so would leave the log without 1700 * any valid log records in it until a new one was written. If we crashed 1701 * during that time we would not be able to recover. 1702 */ 1703 STATIC int 1704 xlog_clear_stale_blocks( 1705 struct xlog *log, 1706 xfs_lsn_t tail_lsn) 1707 { 1708 int tail_cycle, head_cycle; 1709 int tail_block, head_block; 1710 int tail_distance, max_distance; 1711 int distance; 1712 int error; 1713 1714 tail_cycle = CYCLE_LSN(tail_lsn); 1715 tail_block = BLOCK_LSN(tail_lsn); 1716 head_cycle = log->l_curr_cycle; 1717 head_block = log->l_curr_block; 1718 1719 /* 1720 * Figure out the distance between the new head of the log 1721 * and the tail. We want to write over any blocks beyond the 1722 * head that we may have written just before the crash, but 1723 * we don't want to overwrite the tail of the log. 1724 */ 1725 if (head_cycle == tail_cycle) { 1726 /* 1727 * The tail is behind the head in the physical log, 1728 * so the distance from the head to the tail is the 1729 * distance from the head to the end of the log plus 1730 * the distance from the beginning of the log to the 1731 * tail. 1732 */ 1733 if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) { 1734 XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)", 1735 XFS_ERRLEVEL_LOW, log->l_mp); 1736 return -EFSCORRUPTED; 1737 } 1738 tail_distance = tail_block + (log->l_logBBsize - head_block); 1739 } else { 1740 /* 1741 * The head is behind the tail in the physical log, 1742 * so the distance from the head to the tail is just 1743 * the tail block minus the head block. 1744 */ 1745 if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){ 1746 XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)", 1747 XFS_ERRLEVEL_LOW, log->l_mp); 1748 return -EFSCORRUPTED; 1749 } 1750 tail_distance = tail_block - head_block; 1751 } 1752 1753 /* 1754 * If the head is right up against the tail, we can't clear 1755 * anything. 1756 */ 1757 if (tail_distance <= 0) { 1758 ASSERT(tail_distance == 0); 1759 return 0; 1760 } 1761 1762 max_distance = XLOG_TOTAL_REC_SHIFT(log); 1763 /* 1764 * Take the smaller of the maximum amount of outstanding I/O 1765 * we could have and the distance to the tail to clear out. 1766 * We take the smaller so that we don't overwrite the tail and 1767 * we don't waste all day writing from the head to the tail 1768 * for no reason. 1769 */ 1770 max_distance = MIN(max_distance, tail_distance); 1771 1772 if ((head_block + max_distance) <= log->l_logBBsize) { 1773 /* 1774 * We can stomp all the blocks we need to without 1775 * wrapping around the end of the log. Just do it 1776 * in a single write. Use the cycle number of the 1777 * current cycle minus one so that the log will look like: 1778 * n ... | n - 1 ... 1779 */ 1780 error = xlog_write_log_records(log, (head_cycle - 1), 1781 head_block, max_distance, tail_cycle, 1782 tail_block); 1783 if (error) 1784 return error; 1785 } else { 1786 /* 1787 * We need to wrap around the end of the physical log in 1788 * order to clear all the blocks. Do it in two separate 1789 * I/Os. The first write should be from the head to the 1790 * end of the physical log, and it should use the current 1791 * cycle number minus one just like above. 1792 */ 1793 distance = log->l_logBBsize - head_block; 1794 error = xlog_write_log_records(log, (head_cycle - 1), 1795 head_block, distance, tail_cycle, 1796 tail_block); 1797 1798 if (error) 1799 return error; 1800 1801 /* 1802 * Now write the blocks at the start of the physical log. 1803 * This writes the remainder of the blocks we want to clear. 1804 * It uses the current cycle number since we're now on the 1805 * same cycle as the head so that we get: 1806 * n ... n ... | n - 1 ... 1807 * ^^^^^ blocks we're writing 1808 */ 1809 distance = max_distance - (log->l_logBBsize - head_block); 1810 error = xlog_write_log_records(log, head_cycle, 0, distance, 1811 tail_cycle, tail_block); 1812 if (error) 1813 return error; 1814 } 1815 1816 return 0; 1817 } 1818 1819 /****************************************************************************** 1820 * 1821 * Log recover routines 1822 * 1823 ****************************************************************************** 1824 */ 1825 1826 /* 1827 * Sort the log items in the transaction. 1828 * 1829 * The ordering constraints are defined by the inode allocation and unlink 1830 * behaviour. The rules are: 1831 * 1832 * 1. Every item is only logged once in a given transaction. Hence it 1833 * represents the last logged state of the item. Hence ordering is 1834 * dependent on the order in which operations need to be performed so 1835 * required initial conditions are always met. 1836 * 1837 * 2. Cancelled buffers are recorded in pass 1 in a separate table and 1838 * there's nothing to replay from them so we can simply cull them 1839 * from the transaction. However, we can't do that until after we've 1840 * replayed all the other items because they may be dependent on the 1841 * cancelled buffer and replaying the cancelled buffer can remove it 1842 * form the cancelled buffer table. Hence they have tobe done last. 1843 * 1844 * 3. Inode allocation buffers must be replayed before inode items that 1845 * read the buffer and replay changes into it. For filesystems using the 1846 * ICREATE transactions, this means XFS_LI_ICREATE objects need to get 1847 * treated the same as inode allocation buffers as they create and 1848 * initialise the buffers directly. 1849 * 1850 * 4. Inode unlink buffers must be replayed after inode items are replayed. 1851 * This ensures that inodes are completely flushed to the inode buffer 1852 * in a "free" state before we remove the unlinked inode list pointer. 1853 * 1854 * Hence the ordering needs to be inode allocation buffers first, inode items 1855 * second, inode unlink buffers third and cancelled buffers last. 1856 * 1857 * But there's a problem with that - we can't tell an inode allocation buffer 1858 * apart from a regular buffer, so we can't separate them. We can, however, 1859 * tell an inode unlink buffer from the others, and so we can separate them out 1860 * from all the other buffers and move them to last. 1861 * 1862 * Hence, 4 lists, in order from head to tail: 1863 * - buffer_list for all buffers except cancelled/inode unlink buffers 1864 * - item_list for all non-buffer items 1865 * - inode_buffer_list for inode unlink buffers 1866 * - cancel_list for the cancelled buffers 1867 * 1868 * Note that we add objects to the tail of the lists so that first-to-last 1869 * ordering is preserved within the lists. Adding objects to the head of the 1870 * list means when we traverse from the head we walk them in last-to-first 1871 * order. For cancelled buffers and inode unlink buffers this doesn't matter, 1872 * but for all other items there may be specific ordering that we need to 1873 * preserve. 1874 */ 1875 STATIC int 1876 xlog_recover_reorder_trans( 1877 struct xlog *log, 1878 struct xlog_recover *trans, 1879 int pass) 1880 { 1881 xlog_recover_item_t *item, *n; 1882 int error = 0; 1883 LIST_HEAD(sort_list); 1884 LIST_HEAD(cancel_list); 1885 LIST_HEAD(buffer_list); 1886 LIST_HEAD(inode_buffer_list); 1887 LIST_HEAD(inode_list); 1888 1889 list_splice_init(&trans->r_itemq, &sort_list); 1890 list_for_each_entry_safe(item, n, &sort_list, ri_list) { 1891 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr; 1892 1893 switch (ITEM_TYPE(item)) { 1894 case XFS_LI_ICREATE: 1895 list_move_tail(&item->ri_list, &buffer_list); 1896 break; 1897 case XFS_LI_BUF: 1898 if (buf_f->blf_flags & XFS_BLF_CANCEL) { 1899 trace_xfs_log_recover_item_reorder_head(log, 1900 trans, item, pass); 1901 list_move(&item->ri_list, &cancel_list); 1902 break; 1903 } 1904 if (buf_f->blf_flags & XFS_BLF_INODE_BUF) { 1905 list_move(&item->ri_list, &inode_buffer_list); 1906 break; 1907 } 1908 list_move_tail(&item->ri_list, &buffer_list); 1909 break; 1910 case XFS_LI_INODE: 1911 case XFS_LI_DQUOT: 1912 case XFS_LI_QUOTAOFF: 1913 case XFS_LI_EFD: 1914 case XFS_LI_EFI: 1915 case XFS_LI_RUI: 1916 case XFS_LI_RUD: 1917 trace_xfs_log_recover_item_reorder_tail(log, 1918 trans, item, pass); 1919 list_move_tail(&item->ri_list, &inode_list); 1920 break; 1921 default: 1922 xfs_warn(log->l_mp, 1923 "%s: unrecognized type of log operation", 1924 __func__); 1925 ASSERT(0); 1926 /* 1927 * return the remaining items back to the transaction 1928 * item list so they can be freed in caller. 1929 */ 1930 if (!list_empty(&sort_list)) 1931 list_splice_init(&sort_list, &trans->r_itemq); 1932 error = -EIO; 1933 goto out; 1934 } 1935 } 1936 out: 1937 ASSERT(list_empty(&sort_list)); 1938 if (!list_empty(&buffer_list)) 1939 list_splice(&buffer_list, &trans->r_itemq); 1940 if (!list_empty(&inode_list)) 1941 list_splice_tail(&inode_list, &trans->r_itemq); 1942 if (!list_empty(&inode_buffer_list)) 1943 list_splice_tail(&inode_buffer_list, &trans->r_itemq); 1944 if (!list_empty(&cancel_list)) 1945 list_splice_tail(&cancel_list, &trans->r_itemq); 1946 return error; 1947 } 1948 1949 /* 1950 * Build up the table of buf cancel records so that we don't replay 1951 * cancelled data in the second pass. For buffer records that are 1952 * not cancel records, there is nothing to do here so we just return. 1953 * 1954 * If we get a cancel record which is already in the table, this indicates 1955 * that the buffer was cancelled multiple times. In order to ensure 1956 * that during pass 2 we keep the record in the table until we reach its 1957 * last occurrence in the log, we keep a reference count in the cancel 1958 * record in the table to tell us how many times we expect to see this 1959 * record during the second pass. 1960 */ 1961 STATIC int 1962 xlog_recover_buffer_pass1( 1963 struct xlog *log, 1964 struct xlog_recover_item *item) 1965 { 1966 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr; 1967 struct list_head *bucket; 1968 struct xfs_buf_cancel *bcp; 1969 1970 /* 1971 * If this isn't a cancel buffer item, then just return. 1972 */ 1973 if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) { 1974 trace_xfs_log_recover_buf_not_cancel(log, buf_f); 1975 return 0; 1976 } 1977 1978 /* 1979 * Insert an xfs_buf_cancel record into the hash table of them. 1980 * If there is already an identical record, bump its reference count. 1981 */ 1982 bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno); 1983 list_for_each_entry(bcp, bucket, bc_list) { 1984 if (bcp->bc_blkno == buf_f->blf_blkno && 1985 bcp->bc_len == buf_f->blf_len) { 1986 bcp->bc_refcount++; 1987 trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f); 1988 return 0; 1989 } 1990 } 1991 1992 bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), KM_SLEEP); 1993 bcp->bc_blkno = buf_f->blf_blkno; 1994 bcp->bc_len = buf_f->blf_len; 1995 bcp->bc_refcount = 1; 1996 list_add_tail(&bcp->bc_list, bucket); 1997 1998 trace_xfs_log_recover_buf_cancel_add(log, buf_f); 1999 return 0; 2000 } 2001 2002 /* 2003 * Check to see whether the buffer being recovered has a corresponding 2004 * entry in the buffer cancel record table. If it is, return the cancel 2005 * buffer structure to the caller. 2006 */ 2007 STATIC struct xfs_buf_cancel * 2008 xlog_peek_buffer_cancelled( 2009 struct xlog *log, 2010 xfs_daddr_t blkno, 2011 uint len, 2012 ushort flags) 2013 { 2014 struct list_head *bucket; 2015 struct xfs_buf_cancel *bcp; 2016 2017 if (!log->l_buf_cancel_table) { 2018 /* empty table means no cancelled buffers in the log */ 2019 ASSERT(!(flags & XFS_BLF_CANCEL)); 2020 return NULL; 2021 } 2022 2023 bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno); 2024 list_for_each_entry(bcp, bucket, bc_list) { 2025 if (bcp->bc_blkno == blkno && bcp->bc_len == len) 2026 return bcp; 2027 } 2028 2029 /* 2030 * We didn't find a corresponding entry in the table, so return 0 so 2031 * that the buffer is NOT cancelled. 2032 */ 2033 ASSERT(!(flags & XFS_BLF_CANCEL)); 2034 return NULL; 2035 } 2036 2037 /* 2038 * If the buffer is being cancelled then return 1 so that it will be cancelled, 2039 * otherwise return 0. If the buffer is actually a buffer cancel item 2040 * (XFS_BLF_CANCEL is set), then decrement the refcount on the entry in the 2041 * table and remove it from the table if this is the last reference. 2042 * 2043 * We remove the cancel record from the table when we encounter its last 2044 * occurrence in the log so that if the same buffer is re-used again after its 2045 * last cancellation we actually replay the changes made at that point. 2046 */ 2047 STATIC int 2048 xlog_check_buffer_cancelled( 2049 struct xlog *log, 2050 xfs_daddr_t blkno, 2051 uint len, 2052 ushort flags) 2053 { 2054 struct xfs_buf_cancel *bcp; 2055 2056 bcp = xlog_peek_buffer_cancelled(log, blkno, len, flags); 2057 if (!bcp) 2058 return 0; 2059 2060 /* 2061 * We've go a match, so return 1 so that the recovery of this buffer 2062 * is cancelled. If this buffer is actually a buffer cancel log 2063 * item, then decrement the refcount on the one in the table and 2064 * remove it if this is the last reference. 2065 */ 2066 if (flags & XFS_BLF_CANCEL) { 2067 if (--bcp->bc_refcount == 0) { 2068 list_del(&bcp->bc_list); 2069 kmem_free(bcp); 2070 } 2071 } 2072 return 1; 2073 } 2074 2075 /* 2076 * Perform recovery for a buffer full of inodes. In these buffers, the only 2077 * data which should be recovered is that which corresponds to the 2078 * di_next_unlinked pointers in the on disk inode structures. The rest of the 2079 * data for the inodes is always logged through the inodes themselves rather 2080 * than the inode buffer and is recovered in xlog_recover_inode_pass2(). 2081 * 2082 * The only time when buffers full of inodes are fully recovered is when the 2083 * buffer is full of newly allocated inodes. In this case the buffer will 2084 * not be marked as an inode buffer and so will be sent to 2085 * xlog_recover_do_reg_buffer() below during recovery. 2086 */ 2087 STATIC int 2088 xlog_recover_do_inode_buffer( 2089 struct xfs_mount *mp, 2090 xlog_recover_item_t *item, 2091 struct xfs_buf *bp, 2092 xfs_buf_log_format_t *buf_f) 2093 { 2094 int i; 2095 int item_index = 0; 2096 int bit = 0; 2097 int nbits = 0; 2098 int reg_buf_offset = 0; 2099 int reg_buf_bytes = 0; 2100 int next_unlinked_offset; 2101 int inodes_per_buf; 2102 xfs_agino_t *logged_nextp; 2103 xfs_agino_t *buffer_nextp; 2104 2105 trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f); 2106 2107 /* 2108 * Post recovery validation only works properly on CRC enabled 2109 * filesystems. 2110 */ 2111 if (xfs_sb_version_hascrc(&mp->m_sb)) 2112 bp->b_ops = &xfs_inode_buf_ops; 2113 2114 inodes_per_buf = BBTOB(bp->b_io_length) >> mp->m_sb.sb_inodelog; 2115 for (i = 0; i < inodes_per_buf; i++) { 2116 next_unlinked_offset = (i * mp->m_sb.sb_inodesize) + 2117 offsetof(xfs_dinode_t, di_next_unlinked); 2118 2119 while (next_unlinked_offset >= 2120 (reg_buf_offset + reg_buf_bytes)) { 2121 /* 2122 * The next di_next_unlinked field is beyond 2123 * the current logged region. Find the next 2124 * logged region that contains or is beyond 2125 * the current di_next_unlinked field. 2126 */ 2127 bit += nbits; 2128 bit = xfs_next_bit(buf_f->blf_data_map, 2129 buf_f->blf_map_size, bit); 2130 2131 /* 2132 * If there are no more logged regions in the 2133 * buffer, then we're done. 2134 */ 2135 if (bit == -1) 2136 return 0; 2137 2138 nbits = xfs_contig_bits(buf_f->blf_data_map, 2139 buf_f->blf_map_size, bit); 2140 ASSERT(nbits > 0); 2141 reg_buf_offset = bit << XFS_BLF_SHIFT; 2142 reg_buf_bytes = nbits << XFS_BLF_SHIFT; 2143 item_index++; 2144 } 2145 2146 /* 2147 * If the current logged region starts after the current 2148 * di_next_unlinked field, then move on to the next 2149 * di_next_unlinked field. 2150 */ 2151 if (next_unlinked_offset < reg_buf_offset) 2152 continue; 2153 2154 ASSERT(item->ri_buf[item_index].i_addr != NULL); 2155 ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0); 2156 ASSERT((reg_buf_offset + reg_buf_bytes) <= 2157 BBTOB(bp->b_io_length)); 2158 2159 /* 2160 * The current logged region contains a copy of the 2161 * current di_next_unlinked field. Extract its value 2162 * and copy it to the buffer copy. 2163 */ 2164 logged_nextp = item->ri_buf[item_index].i_addr + 2165 next_unlinked_offset - reg_buf_offset; 2166 if (unlikely(*logged_nextp == 0)) { 2167 xfs_alert(mp, 2168 "Bad inode buffer log record (ptr = 0x%p, bp = 0x%p). " 2169 "Trying to replay bad (0) inode di_next_unlinked field.", 2170 item, bp); 2171 XFS_ERROR_REPORT("xlog_recover_do_inode_buf", 2172 XFS_ERRLEVEL_LOW, mp); 2173 return -EFSCORRUPTED; 2174 } 2175 2176 buffer_nextp = xfs_buf_offset(bp, next_unlinked_offset); 2177 *buffer_nextp = *logged_nextp; 2178 2179 /* 2180 * If necessary, recalculate the CRC in the on-disk inode. We 2181 * have to leave the inode in a consistent state for whoever 2182 * reads it next.... 2183 */ 2184 xfs_dinode_calc_crc(mp, 2185 xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize)); 2186 2187 } 2188 2189 return 0; 2190 } 2191 2192 /* 2193 * V5 filesystems know the age of the buffer on disk being recovered. We can 2194 * have newer objects on disk than we are replaying, and so for these cases we 2195 * don't want to replay the current change as that will make the buffer contents 2196 * temporarily invalid on disk. 2197 * 2198 * The magic number might not match the buffer type we are going to recover 2199 * (e.g. reallocated blocks), so we ignore the xfs_buf_log_format flags. Hence 2200 * extract the LSN of the existing object in the buffer based on it's current 2201 * magic number. If we don't recognise the magic number in the buffer, then 2202 * return a LSN of -1 so that the caller knows it was an unrecognised block and 2203 * so can recover the buffer. 2204 * 2205 * Note: we cannot rely solely on magic number matches to determine that the 2206 * buffer has a valid LSN - we also need to verify that it belongs to this 2207 * filesystem, so we need to extract the object's LSN and compare it to that 2208 * which we read from the superblock. If the UUIDs don't match, then we've got a 2209 * stale metadata block from an old filesystem instance that we need to recover 2210 * over the top of. 2211 */ 2212 static xfs_lsn_t 2213 xlog_recover_get_buf_lsn( 2214 struct xfs_mount *mp, 2215 struct xfs_buf *bp) 2216 { 2217 __uint32_t magic32; 2218 __uint16_t magic16; 2219 __uint16_t magicda; 2220 void *blk = bp->b_addr; 2221 uuid_t *uuid; 2222 xfs_lsn_t lsn = -1; 2223 2224 /* v4 filesystems always recover immediately */ 2225 if (!xfs_sb_version_hascrc(&mp->m_sb)) 2226 goto recover_immediately; 2227 2228 magic32 = be32_to_cpu(*(__be32 *)blk); 2229 switch (magic32) { 2230 case XFS_ABTB_CRC_MAGIC: 2231 case XFS_ABTC_CRC_MAGIC: 2232 case XFS_ABTB_MAGIC: 2233 case XFS_ABTC_MAGIC: 2234 case XFS_RMAP_CRC_MAGIC: 2235 case XFS_IBT_CRC_MAGIC: 2236 case XFS_IBT_MAGIC: { 2237 struct xfs_btree_block *btb = blk; 2238 2239 lsn = be64_to_cpu(btb->bb_u.s.bb_lsn); 2240 uuid = &btb->bb_u.s.bb_uuid; 2241 break; 2242 } 2243 case XFS_BMAP_CRC_MAGIC: 2244 case XFS_BMAP_MAGIC: { 2245 struct xfs_btree_block *btb = blk; 2246 2247 lsn = be64_to_cpu(btb->bb_u.l.bb_lsn); 2248 uuid = &btb->bb_u.l.bb_uuid; 2249 break; 2250 } 2251 case XFS_AGF_MAGIC: 2252 lsn = be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn); 2253 uuid = &((struct xfs_agf *)blk)->agf_uuid; 2254 break; 2255 case XFS_AGFL_MAGIC: 2256 lsn = be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn); 2257 uuid = &((struct xfs_agfl *)blk)->agfl_uuid; 2258 break; 2259 case XFS_AGI_MAGIC: 2260 lsn = be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn); 2261 uuid = &((struct xfs_agi *)blk)->agi_uuid; 2262 break; 2263 case XFS_SYMLINK_MAGIC: 2264 lsn = be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn); 2265 uuid = &((struct xfs_dsymlink_hdr *)blk)->sl_uuid; 2266 break; 2267 case XFS_DIR3_BLOCK_MAGIC: 2268 case XFS_DIR3_DATA_MAGIC: 2269 case XFS_DIR3_FREE_MAGIC: 2270 lsn = be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn); 2271 uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid; 2272 break; 2273 case XFS_ATTR3_RMT_MAGIC: 2274 /* 2275 * Remote attr blocks are written synchronously, rather than 2276 * being logged. That means they do not contain a valid LSN 2277 * (i.e. transactionally ordered) in them, and hence any time we 2278 * see a buffer to replay over the top of a remote attribute 2279 * block we should simply do so. 2280 */ 2281 goto recover_immediately; 2282 case XFS_SB_MAGIC: 2283 /* 2284 * superblock uuids are magic. We may or may not have a 2285 * sb_meta_uuid on disk, but it will be set in the in-core 2286 * superblock. We set the uuid pointer for verification 2287 * according to the superblock feature mask to ensure we check 2288 * the relevant UUID in the superblock. 2289 */ 2290 lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn); 2291 if (xfs_sb_version_hasmetauuid(&mp->m_sb)) 2292 uuid = &((struct xfs_dsb *)blk)->sb_meta_uuid; 2293 else 2294 uuid = &((struct xfs_dsb *)blk)->sb_uuid; 2295 break; 2296 default: 2297 break; 2298 } 2299 2300 if (lsn != (xfs_lsn_t)-1) { 2301 if (!uuid_equal(&mp->m_sb.sb_meta_uuid, uuid)) 2302 goto recover_immediately; 2303 return lsn; 2304 } 2305 2306 magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic); 2307 switch (magicda) { 2308 case XFS_DIR3_LEAF1_MAGIC: 2309 case XFS_DIR3_LEAFN_MAGIC: 2310 case XFS_DA3_NODE_MAGIC: 2311 lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn); 2312 uuid = &((struct xfs_da3_blkinfo *)blk)->uuid; 2313 break; 2314 default: 2315 break; 2316 } 2317 2318 if (lsn != (xfs_lsn_t)-1) { 2319 if (!uuid_equal(&mp->m_sb.sb_uuid, uuid)) 2320 goto recover_immediately; 2321 return lsn; 2322 } 2323 2324 /* 2325 * We do individual object checks on dquot and inode buffers as they 2326 * have their own individual LSN records. Also, we could have a stale 2327 * buffer here, so we have to at least recognise these buffer types. 2328 * 2329 * A notd complexity here is inode unlinked list processing - it logs 2330 * the inode directly in the buffer, but we don't know which inodes have 2331 * been modified, and there is no global buffer LSN. Hence we need to 2332 * recover all inode buffer types immediately. This problem will be 2333 * fixed by logical logging of the unlinked list modifications. 2334 */ 2335 magic16 = be16_to_cpu(*(__be16 *)blk); 2336 switch (magic16) { 2337 case XFS_DQUOT_MAGIC: 2338 case XFS_DINODE_MAGIC: 2339 goto recover_immediately; 2340 default: 2341 break; 2342 } 2343 2344 /* unknown buffer contents, recover immediately */ 2345 2346 recover_immediately: 2347 return (xfs_lsn_t)-1; 2348 2349 } 2350 2351 /* 2352 * Validate the recovered buffer is of the correct type and attach the 2353 * appropriate buffer operations to them for writeback. Magic numbers are in a 2354 * few places: 2355 * the first 16 bits of the buffer (inode buffer, dquot buffer), 2356 * the first 32 bits of the buffer (most blocks), 2357 * inside a struct xfs_da_blkinfo at the start of the buffer. 2358 */ 2359 static void 2360 xlog_recover_validate_buf_type( 2361 struct xfs_mount *mp, 2362 struct xfs_buf *bp, 2363 xfs_buf_log_format_t *buf_f) 2364 { 2365 struct xfs_da_blkinfo *info = bp->b_addr; 2366 __uint32_t magic32; 2367 __uint16_t magic16; 2368 __uint16_t magicda; 2369 2370 /* 2371 * We can only do post recovery validation on items on CRC enabled 2372 * fielsystems as we need to know when the buffer was written to be able 2373 * to determine if we should have replayed the item. If we replay old 2374 * metadata over a newer buffer, then it will enter a temporarily 2375 * inconsistent state resulting in verification failures. Hence for now 2376 * just avoid the verification stage for non-crc filesystems 2377 */ 2378 if (!xfs_sb_version_hascrc(&mp->m_sb)) 2379 return; 2380 2381 magic32 = be32_to_cpu(*(__be32 *)bp->b_addr); 2382 magic16 = be16_to_cpu(*(__be16*)bp->b_addr); 2383 magicda = be16_to_cpu(info->magic); 2384 switch (xfs_blft_from_flags(buf_f)) { 2385 case XFS_BLFT_BTREE_BUF: 2386 switch (magic32) { 2387 case XFS_ABTB_CRC_MAGIC: 2388 case XFS_ABTC_CRC_MAGIC: 2389 case XFS_ABTB_MAGIC: 2390 case XFS_ABTC_MAGIC: 2391 bp->b_ops = &xfs_allocbt_buf_ops; 2392 break; 2393 case XFS_IBT_CRC_MAGIC: 2394 case XFS_FIBT_CRC_MAGIC: 2395 case XFS_IBT_MAGIC: 2396 case XFS_FIBT_MAGIC: 2397 bp->b_ops = &xfs_inobt_buf_ops; 2398 break; 2399 case XFS_BMAP_CRC_MAGIC: 2400 case XFS_BMAP_MAGIC: 2401 bp->b_ops = &xfs_bmbt_buf_ops; 2402 break; 2403 case XFS_RMAP_CRC_MAGIC: 2404 bp->b_ops = &xfs_rmapbt_buf_ops; 2405 break; 2406 default: 2407 xfs_warn(mp, "Bad btree block magic!"); 2408 ASSERT(0); 2409 break; 2410 } 2411 break; 2412 case XFS_BLFT_AGF_BUF: 2413 if (magic32 != XFS_AGF_MAGIC) { 2414 xfs_warn(mp, "Bad AGF block magic!"); 2415 ASSERT(0); 2416 break; 2417 } 2418 bp->b_ops = &xfs_agf_buf_ops; 2419 break; 2420 case XFS_BLFT_AGFL_BUF: 2421 if (magic32 != XFS_AGFL_MAGIC) { 2422 xfs_warn(mp, "Bad AGFL block magic!"); 2423 ASSERT(0); 2424 break; 2425 } 2426 bp->b_ops = &xfs_agfl_buf_ops; 2427 break; 2428 case XFS_BLFT_AGI_BUF: 2429 if (magic32 != XFS_AGI_MAGIC) { 2430 xfs_warn(mp, "Bad AGI block magic!"); 2431 ASSERT(0); 2432 break; 2433 } 2434 bp->b_ops = &xfs_agi_buf_ops; 2435 break; 2436 case XFS_BLFT_UDQUOT_BUF: 2437 case XFS_BLFT_PDQUOT_BUF: 2438 case XFS_BLFT_GDQUOT_BUF: 2439 #ifdef CONFIG_XFS_QUOTA 2440 if (magic16 != XFS_DQUOT_MAGIC) { 2441 xfs_warn(mp, "Bad DQUOT block magic!"); 2442 ASSERT(0); 2443 break; 2444 } 2445 bp->b_ops = &xfs_dquot_buf_ops; 2446 #else 2447 xfs_alert(mp, 2448 "Trying to recover dquots without QUOTA support built in!"); 2449 ASSERT(0); 2450 #endif 2451 break; 2452 case XFS_BLFT_DINO_BUF: 2453 if (magic16 != XFS_DINODE_MAGIC) { 2454 xfs_warn(mp, "Bad INODE block magic!"); 2455 ASSERT(0); 2456 break; 2457 } 2458 bp->b_ops = &xfs_inode_buf_ops; 2459 break; 2460 case XFS_BLFT_SYMLINK_BUF: 2461 if (magic32 != XFS_SYMLINK_MAGIC) { 2462 xfs_warn(mp, "Bad symlink block magic!"); 2463 ASSERT(0); 2464 break; 2465 } 2466 bp->b_ops = &xfs_symlink_buf_ops; 2467 break; 2468 case XFS_BLFT_DIR_BLOCK_BUF: 2469 if (magic32 != XFS_DIR2_BLOCK_MAGIC && 2470 magic32 != XFS_DIR3_BLOCK_MAGIC) { 2471 xfs_warn(mp, "Bad dir block magic!"); 2472 ASSERT(0); 2473 break; 2474 } 2475 bp->b_ops = &xfs_dir3_block_buf_ops; 2476 break; 2477 case XFS_BLFT_DIR_DATA_BUF: 2478 if (magic32 != XFS_DIR2_DATA_MAGIC && 2479 magic32 != XFS_DIR3_DATA_MAGIC) { 2480 xfs_warn(mp, "Bad dir data magic!"); 2481 ASSERT(0); 2482 break; 2483 } 2484 bp->b_ops = &xfs_dir3_data_buf_ops; 2485 break; 2486 case XFS_BLFT_DIR_FREE_BUF: 2487 if (magic32 != XFS_DIR2_FREE_MAGIC && 2488 magic32 != XFS_DIR3_FREE_MAGIC) { 2489 xfs_warn(mp, "Bad dir3 free magic!"); 2490 ASSERT(0); 2491 break; 2492 } 2493 bp->b_ops = &xfs_dir3_free_buf_ops; 2494 break; 2495 case XFS_BLFT_DIR_LEAF1_BUF: 2496 if (magicda != XFS_DIR2_LEAF1_MAGIC && 2497 magicda != XFS_DIR3_LEAF1_MAGIC) { 2498 xfs_warn(mp, "Bad dir leaf1 magic!"); 2499 ASSERT(0); 2500 break; 2501 } 2502 bp->b_ops = &xfs_dir3_leaf1_buf_ops; 2503 break; 2504 case XFS_BLFT_DIR_LEAFN_BUF: 2505 if (magicda != XFS_DIR2_LEAFN_MAGIC && 2506 magicda != XFS_DIR3_LEAFN_MAGIC) { 2507 xfs_warn(mp, "Bad dir leafn magic!"); 2508 ASSERT(0); 2509 break; 2510 } 2511 bp->b_ops = &xfs_dir3_leafn_buf_ops; 2512 break; 2513 case XFS_BLFT_DA_NODE_BUF: 2514 if (magicda != XFS_DA_NODE_MAGIC && 2515 magicda != XFS_DA3_NODE_MAGIC) { 2516 xfs_warn(mp, "Bad da node magic!"); 2517 ASSERT(0); 2518 break; 2519 } 2520 bp->b_ops = &xfs_da3_node_buf_ops; 2521 break; 2522 case XFS_BLFT_ATTR_LEAF_BUF: 2523 if (magicda != XFS_ATTR_LEAF_MAGIC && 2524 magicda != XFS_ATTR3_LEAF_MAGIC) { 2525 xfs_warn(mp, "Bad attr leaf magic!"); 2526 ASSERT(0); 2527 break; 2528 } 2529 bp->b_ops = &xfs_attr3_leaf_buf_ops; 2530 break; 2531 case XFS_BLFT_ATTR_RMT_BUF: 2532 if (magic32 != XFS_ATTR3_RMT_MAGIC) { 2533 xfs_warn(mp, "Bad attr remote magic!"); 2534 ASSERT(0); 2535 break; 2536 } 2537 bp->b_ops = &xfs_attr3_rmt_buf_ops; 2538 break; 2539 case XFS_BLFT_SB_BUF: 2540 if (magic32 != XFS_SB_MAGIC) { 2541 xfs_warn(mp, "Bad SB block magic!"); 2542 ASSERT(0); 2543 break; 2544 } 2545 bp->b_ops = &xfs_sb_buf_ops; 2546 break; 2547 #ifdef CONFIG_XFS_RT 2548 case XFS_BLFT_RTBITMAP_BUF: 2549 case XFS_BLFT_RTSUMMARY_BUF: 2550 /* no magic numbers for verification of RT buffers */ 2551 bp->b_ops = &xfs_rtbuf_ops; 2552 break; 2553 #endif /* CONFIG_XFS_RT */ 2554 default: 2555 xfs_warn(mp, "Unknown buffer type %d!", 2556 xfs_blft_from_flags(buf_f)); 2557 break; 2558 } 2559 } 2560 2561 /* 2562 * Perform a 'normal' buffer recovery. Each logged region of the 2563 * buffer should be copied over the corresponding region in the 2564 * given buffer. The bitmap in the buf log format structure indicates 2565 * where to place the logged data. 2566 */ 2567 STATIC void 2568 xlog_recover_do_reg_buffer( 2569 struct xfs_mount *mp, 2570 xlog_recover_item_t *item, 2571 struct xfs_buf *bp, 2572 xfs_buf_log_format_t *buf_f) 2573 { 2574 int i; 2575 int bit; 2576 int nbits; 2577 int error; 2578 2579 trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f); 2580 2581 bit = 0; 2582 i = 1; /* 0 is the buf format structure */ 2583 while (1) { 2584 bit = xfs_next_bit(buf_f->blf_data_map, 2585 buf_f->blf_map_size, bit); 2586 if (bit == -1) 2587 break; 2588 nbits = xfs_contig_bits(buf_f->blf_data_map, 2589 buf_f->blf_map_size, bit); 2590 ASSERT(nbits > 0); 2591 ASSERT(item->ri_buf[i].i_addr != NULL); 2592 ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0); 2593 ASSERT(BBTOB(bp->b_io_length) >= 2594 ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT)); 2595 2596 /* 2597 * The dirty regions logged in the buffer, even though 2598 * contiguous, may span multiple chunks. This is because the 2599 * dirty region may span a physical page boundary in a buffer 2600 * and hence be split into two separate vectors for writing into 2601 * the log. Hence we need to trim nbits back to the length of 2602 * the current region being copied out of the log. 2603 */ 2604 if (item->ri_buf[i].i_len < (nbits << XFS_BLF_SHIFT)) 2605 nbits = item->ri_buf[i].i_len >> XFS_BLF_SHIFT; 2606 2607 /* 2608 * Do a sanity check if this is a dquot buffer. Just checking 2609 * the first dquot in the buffer should do. XXXThis is 2610 * probably a good thing to do for other buf types also. 2611 */ 2612 error = 0; 2613 if (buf_f->blf_flags & 2614 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) { 2615 if (item->ri_buf[i].i_addr == NULL) { 2616 xfs_alert(mp, 2617 "XFS: NULL dquot in %s.", __func__); 2618 goto next; 2619 } 2620 if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) { 2621 xfs_alert(mp, 2622 "XFS: dquot too small (%d) in %s.", 2623 item->ri_buf[i].i_len, __func__); 2624 goto next; 2625 } 2626 error = xfs_dqcheck(mp, item->ri_buf[i].i_addr, 2627 -1, 0, XFS_QMOPT_DOWARN, 2628 "dquot_buf_recover"); 2629 if (error) 2630 goto next; 2631 } 2632 2633 memcpy(xfs_buf_offset(bp, 2634 (uint)bit << XFS_BLF_SHIFT), /* dest */ 2635 item->ri_buf[i].i_addr, /* source */ 2636 nbits<<XFS_BLF_SHIFT); /* length */ 2637 next: 2638 i++; 2639 bit += nbits; 2640 } 2641 2642 /* Shouldn't be any more regions */ 2643 ASSERT(i == item->ri_total); 2644 2645 xlog_recover_validate_buf_type(mp, bp, buf_f); 2646 } 2647 2648 /* 2649 * Perform a dquot buffer recovery. 2650 * Simple algorithm: if we have found a QUOTAOFF log item of the same type 2651 * (ie. USR or GRP), then just toss this buffer away; don't recover it. 2652 * Else, treat it as a regular buffer and do recovery. 2653 * 2654 * Return false if the buffer was tossed and true if we recovered the buffer to 2655 * indicate to the caller if the buffer needs writing. 2656 */ 2657 STATIC bool 2658 xlog_recover_do_dquot_buffer( 2659 struct xfs_mount *mp, 2660 struct xlog *log, 2661 struct xlog_recover_item *item, 2662 struct xfs_buf *bp, 2663 struct xfs_buf_log_format *buf_f) 2664 { 2665 uint type; 2666 2667 trace_xfs_log_recover_buf_dquot_buf(log, buf_f); 2668 2669 /* 2670 * Filesystems are required to send in quota flags at mount time. 2671 */ 2672 if (!mp->m_qflags) 2673 return false; 2674 2675 type = 0; 2676 if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF) 2677 type |= XFS_DQ_USER; 2678 if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF) 2679 type |= XFS_DQ_PROJ; 2680 if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF) 2681 type |= XFS_DQ_GROUP; 2682 /* 2683 * This type of quotas was turned off, so ignore this buffer 2684 */ 2685 if (log->l_quotaoffs_flag & type) 2686 return false; 2687 2688 xlog_recover_do_reg_buffer(mp, item, bp, buf_f); 2689 return true; 2690 } 2691 2692 /* 2693 * This routine replays a modification made to a buffer at runtime. 2694 * There are actually two types of buffer, regular and inode, which 2695 * are handled differently. Inode buffers are handled differently 2696 * in that we only recover a specific set of data from them, namely 2697 * the inode di_next_unlinked fields. This is because all other inode 2698 * data is actually logged via inode records and any data we replay 2699 * here which overlaps that may be stale. 2700 * 2701 * When meta-data buffers are freed at run time we log a buffer item 2702 * with the XFS_BLF_CANCEL bit set to indicate that previous copies 2703 * of the buffer in the log should not be replayed at recovery time. 2704 * This is so that if the blocks covered by the buffer are reused for 2705 * file data before we crash we don't end up replaying old, freed 2706 * meta-data into a user's file. 2707 * 2708 * To handle the cancellation of buffer log items, we make two passes 2709 * over the log during recovery. During the first we build a table of 2710 * those buffers which have been cancelled, and during the second we 2711 * only replay those buffers which do not have corresponding cancel 2712 * records in the table. See xlog_recover_buffer_pass[1,2] above 2713 * for more details on the implementation of the table of cancel records. 2714 */ 2715 STATIC int 2716 xlog_recover_buffer_pass2( 2717 struct xlog *log, 2718 struct list_head *buffer_list, 2719 struct xlog_recover_item *item, 2720 xfs_lsn_t current_lsn) 2721 { 2722 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr; 2723 xfs_mount_t *mp = log->l_mp; 2724 xfs_buf_t *bp; 2725 int error; 2726 uint buf_flags; 2727 xfs_lsn_t lsn; 2728 2729 /* 2730 * In this pass we only want to recover all the buffers which have 2731 * not been cancelled and are not cancellation buffers themselves. 2732 */ 2733 if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno, 2734 buf_f->blf_len, buf_f->blf_flags)) { 2735 trace_xfs_log_recover_buf_cancel(log, buf_f); 2736 return 0; 2737 } 2738 2739 trace_xfs_log_recover_buf_recover(log, buf_f); 2740 2741 buf_flags = 0; 2742 if (buf_f->blf_flags & XFS_BLF_INODE_BUF) 2743 buf_flags |= XBF_UNMAPPED; 2744 2745 bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len, 2746 buf_flags, NULL); 2747 if (!bp) 2748 return -ENOMEM; 2749 error = bp->b_error; 2750 if (error) { 2751 xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#1)"); 2752 goto out_release; 2753 } 2754 2755 /* 2756 * Recover the buffer only if we get an LSN from it and it's less than 2757 * the lsn of the transaction we are replaying. 2758 * 2759 * Note that we have to be extremely careful of readahead here. 2760 * Readahead does not attach verfiers to the buffers so if we don't 2761 * actually do any replay after readahead because of the LSN we found 2762 * in the buffer if more recent than that current transaction then we 2763 * need to attach the verifier directly. Failure to do so can lead to 2764 * future recovery actions (e.g. EFI and unlinked list recovery) can 2765 * operate on the buffers and they won't get the verifier attached. This 2766 * can lead to blocks on disk having the correct content but a stale 2767 * CRC. 2768 * 2769 * It is safe to assume these clean buffers are currently up to date. 2770 * If the buffer is dirtied by a later transaction being replayed, then 2771 * the verifier will be reset to match whatever recover turns that 2772 * buffer into. 2773 */ 2774 lsn = xlog_recover_get_buf_lsn(mp, bp); 2775 if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) { 2776 xlog_recover_validate_buf_type(mp, bp, buf_f); 2777 goto out_release; 2778 } 2779 2780 if (buf_f->blf_flags & XFS_BLF_INODE_BUF) { 2781 error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f); 2782 if (error) 2783 goto out_release; 2784 } else if (buf_f->blf_flags & 2785 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) { 2786 bool dirty; 2787 2788 dirty = xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f); 2789 if (!dirty) 2790 goto out_release; 2791 } else { 2792 xlog_recover_do_reg_buffer(mp, item, bp, buf_f); 2793 } 2794 2795 /* 2796 * Perform delayed write on the buffer. Asynchronous writes will be 2797 * slower when taking into account all the buffers to be flushed. 2798 * 2799 * Also make sure that only inode buffers with good sizes stay in 2800 * the buffer cache. The kernel moves inodes in buffers of 1 block 2801 * or mp->m_inode_cluster_size bytes, whichever is bigger. The inode 2802 * buffers in the log can be a different size if the log was generated 2803 * by an older kernel using unclustered inode buffers or a newer kernel 2804 * running with a different inode cluster size. Regardless, if the 2805 * the inode buffer size isn't MAX(blocksize, mp->m_inode_cluster_size) 2806 * for *our* value of mp->m_inode_cluster_size, then we need to keep 2807 * the buffer out of the buffer cache so that the buffer won't 2808 * overlap with future reads of those inodes. 2809 */ 2810 if (XFS_DINODE_MAGIC == 2811 be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) && 2812 (BBTOB(bp->b_io_length) != MAX(log->l_mp->m_sb.sb_blocksize, 2813 (__uint32_t)log->l_mp->m_inode_cluster_size))) { 2814 xfs_buf_stale(bp); 2815 error = xfs_bwrite(bp); 2816 } else { 2817 ASSERT(bp->b_target->bt_mount == mp); 2818 bp->b_iodone = xlog_recover_iodone; 2819 xfs_buf_delwri_queue(bp, buffer_list); 2820 } 2821 2822 out_release: 2823 xfs_buf_relse(bp); 2824 return error; 2825 } 2826 2827 /* 2828 * Inode fork owner changes 2829 * 2830 * If we have been told that we have to reparent the inode fork, it's because an 2831 * extent swap operation on a CRC enabled filesystem has been done and we are 2832 * replaying it. We need to walk the BMBT of the appropriate fork and change the 2833 * owners of it. 2834 * 2835 * The complexity here is that we don't have an inode context to work with, so 2836 * after we've replayed the inode we need to instantiate one. This is where the 2837 * fun begins. 2838 * 2839 * We are in the middle of log recovery, so we can't run transactions. That 2840 * means we cannot use cache coherent inode instantiation via xfs_iget(), as 2841 * that will result in the corresponding iput() running the inode through 2842 * xfs_inactive(). If we've just replayed an inode core that changes the link 2843 * count to zero (i.e. it's been unlinked), then xfs_inactive() will run 2844 * transactions (bad!). 2845 * 2846 * So, to avoid this, we instantiate an inode directly from the inode core we've 2847 * just recovered. We have the buffer still locked, and all we really need to 2848 * instantiate is the inode core and the forks being modified. We can do this 2849 * manually, then run the inode btree owner change, and then tear down the 2850 * xfs_inode without having to run any transactions at all. 2851 * 2852 * Also, because we don't have a transaction context available here but need to 2853 * gather all the buffers we modify for writeback so we pass the buffer_list 2854 * instead for the operation to use. 2855 */ 2856 2857 STATIC int 2858 xfs_recover_inode_owner_change( 2859 struct xfs_mount *mp, 2860 struct xfs_dinode *dip, 2861 struct xfs_inode_log_format *in_f, 2862 struct list_head *buffer_list) 2863 { 2864 struct xfs_inode *ip; 2865 int error; 2866 2867 ASSERT(in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER)); 2868 2869 ip = xfs_inode_alloc(mp, in_f->ilf_ino); 2870 if (!ip) 2871 return -ENOMEM; 2872 2873 /* instantiate the inode */ 2874 xfs_inode_from_disk(ip, dip); 2875 ASSERT(ip->i_d.di_version >= 3); 2876 2877 error = xfs_iformat_fork(ip, dip); 2878 if (error) 2879 goto out_free_ip; 2880 2881 2882 if (in_f->ilf_fields & XFS_ILOG_DOWNER) { 2883 ASSERT(in_f->ilf_fields & XFS_ILOG_DBROOT); 2884 error = xfs_bmbt_change_owner(NULL, ip, XFS_DATA_FORK, 2885 ip->i_ino, buffer_list); 2886 if (error) 2887 goto out_free_ip; 2888 } 2889 2890 if (in_f->ilf_fields & XFS_ILOG_AOWNER) { 2891 ASSERT(in_f->ilf_fields & XFS_ILOG_ABROOT); 2892 error = xfs_bmbt_change_owner(NULL, ip, XFS_ATTR_FORK, 2893 ip->i_ino, buffer_list); 2894 if (error) 2895 goto out_free_ip; 2896 } 2897 2898 out_free_ip: 2899 xfs_inode_free(ip); 2900 return error; 2901 } 2902 2903 STATIC int 2904 xlog_recover_inode_pass2( 2905 struct xlog *log, 2906 struct list_head *buffer_list, 2907 struct xlog_recover_item *item, 2908 xfs_lsn_t current_lsn) 2909 { 2910 xfs_inode_log_format_t *in_f; 2911 xfs_mount_t *mp = log->l_mp; 2912 xfs_buf_t *bp; 2913 xfs_dinode_t *dip; 2914 int len; 2915 char *src; 2916 char *dest; 2917 int error; 2918 int attr_index; 2919 uint fields; 2920 struct xfs_log_dinode *ldip; 2921 uint isize; 2922 int need_free = 0; 2923 2924 if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) { 2925 in_f = item->ri_buf[0].i_addr; 2926 } else { 2927 in_f = kmem_alloc(sizeof(xfs_inode_log_format_t), KM_SLEEP); 2928 need_free = 1; 2929 error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f); 2930 if (error) 2931 goto error; 2932 } 2933 2934 /* 2935 * Inode buffers can be freed, look out for it, 2936 * and do not replay the inode. 2937 */ 2938 if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno, 2939 in_f->ilf_len, 0)) { 2940 error = 0; 2941 trace_xfs_log_recover_inode_cancel(log, in_f); 2942 goto error; 2943 } 2944 trace_xfs_log_recover_inode_recover(log, in_f); 2945 2946 bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, 0, 2947 &xfs_inode_buf_ops); 2948 if (!bp) { 2949 error = -ENOMEM; 2950 goto error; 2951 } 2952 error = bp->b_error; 2953 if (error) { 2954 xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#2)"); 2955 goto out_release; 2956 } 2957 ASSERT(in_f->ilf_fields & XFS_ILOG_CORE); 2958 dip = xfs_buf_offset(bp, in_f->ilf_boffset); 2959 2960 /* 2961 * Make sure the place we're flushing out to really looks 2962 * like an inode! 2963 */ 2964 if (unlikely(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))) { 2965 xfs_alert(mp, 2966 "%s: Bad inode magic number, dip = 0x%p, dino bp = 0x%p, ino = %Ld", 2967 __func__, dip, bp, in_f->ilf_ino); 2968 XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)", 2969 XFS_ERRLEVEL_LOW, mp); 2970 error = -EFSCORRUPTED; 2971 goto out_release; 2972 } 2973 ldip = item->ri_buf[1].i_addr; 2974 if (unlikely(ldip->di_magic != XFS_DINODE_MAGIC)) { 2975 xfs_alert(mp, 2976 "%s: Bad inode log record, rec ptr 0x%p, ino %Ld", 2977 __func__, item, in_f->ilf_ino); 2978 XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)", 2979 XFS_ERRLEVEL_LOW, mp); 2980 error = -EFSCORRUPTED; 2981 goto out_release; 2982 } 2983 2984 /* 2985 * If the inode has an LSN in it, recover the inode only if it's less 2986 * than the lsn of the transaction we are replaying. Note: we still 2987 * need to replay an owner change even though the inode is more recent 2988 * than the transaction as there is no guarantee that all the btree 2989 * blocks are more recent than this transaction, too. 2990 */ 2991 if (dip->di_version >= 3) { 2992 xfs_lsn_t lsn = be64_to_cpu(dip->di_lsn); 2993 2994 if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) { 2995 trace_xfs_log_recover_inode_skip(log, in_f); 2996 error = 0; 2997 goto out_owner_change; 2998 } 2999 } 3000 3001 /* 3002 * di_flushiter is only valid for v1/2 inodes. All changes for v3 inodes 3003 * are transactional and if ordering is necessary we can determine that 3004 * more accurately by the LSN field in the V3 inode core. Don't trust 3005 * the inode versions we might be changing them here - use the 3006 * superblock flag to determine whether we need to look at di_flushiter 3007 * to skip replay when the on disk inode is newer than the log one 3008 */ 3009 if (!xfs_sb_version_hascrc(&mp->m_sb) && 3010 ldip->di_flushiter < be16_to_cpu(dip->di_flushiter)) { 3011 /* 3012 * Deal with the wrap case, DI_MAX_FLUSH is less 3013 * than smaller numbers 3014 */ 3015 if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH && 3016 ldip->di_flushiter < (DI_MAX_FLUSH >> 1)) { 3017 /* do nothing */ 3018 } else { 3019 trace_xfs_log_recover_inode_skip(log, in_f); 3020 error = 0; 3021 goto out_release; 3022 } 3023 } 3024 3025 /* Take the opportunity to reset the flush iteration count */ 3026 ldip->di_flushiter = 0; 3027 3028 if (unlikely(S_ISREG(ldip->di_mode))) { 3029 if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) && 3030 (ldip->di_format != XFS_DINODE_FMT_BTREE)) { 3031 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)", 3032 XFS_ERRLEVEL_LOW, mp, ldip); 3033 xfs_alert(mp, 3034 "%s: Bad regular inode log record, rec ptr 0x%p, " 3035 "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld", 3036 __func__, item, dip, bp, in_f->ilf_ino); 3037 error = -EFSCORRUPTED; 3038 goto out_release; 3039 } 3040 } else if (unlikely(S_ISDIR(ldip->di_mode))) { 3041 if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) && 3042 (ldip->di_format != XFS_DINODE_FMT_BTREE) && 3043 (ldip->di_format != XFS_DINODE_FMT_LOCAL)) { 3044 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)", 3045 XFS_ERRLEVEL_LOW, mp, ldip); 3046 xfs_alert(mp, 3047 "%s: Bad dir inode log record, rec ptr 0x%p, " 3048 "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld", 3049 __func__, item, dip, bp, in_f->ilf_ino); 3050 error = -EFSCORRUPTED; 3051 goto out_release; 3052 } 3053 } 3054 if (unlikely(ldip->di_nextents + ldip->di_anextents > ldip->di_nblocks)){ 3055 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)", 3056 XFS_ERRLEVEL_LOW, mp, ldip); 3057 xfs_alert(mp, 3058 "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, " 3059 "dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld", 3060 __func__, item, dip, bp, in_f->ilf_ino, 3061 ldip->di_nextents + ldip->di_anextents, 3062 ldip->di_nblocks); 3063 error = -EFSCORRUPTED; 3064 goto out_release; 3065 } 3066 if (unlikely(ldip->di_forkoff > mp->m_sb.sb_inodesize)) { 3067 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)", 3068 XFS_ERRLEVEL_LOW, mp, ldip); 3069 xfs_alert(mp, 3070 "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, " 3071 "dino bp 0x%p, ino %Ld, forkoff 0x%x", __func__, 3072 item, dip, bp, in_f->ilf_ino, ldip->di_forkoff); 3073 error = -EFSCORRUPTED; 3074 goto out_release; 3075 } 3076 isize = xfs_log_dinode_size(ldip->di_version); 3077 if (unlikely(item->ri_buf[1].i_len > isize)) { 3078 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)", 3079 XFS_ERRLEVEL_LOW, mp, ldip); 3080 xfs_alert(mp, 3081 "%s: Bad inode log record length %d, rec ptr 0x%p", 3082 __func__, item->ri_buf[1].i_len, item); 3083 error = -EFSCORRUPTED; 3084 goto out_release; 3085 } 3086 3087 /* recover the log dinode inode into the on disk inode */ 3088 xfs_log_dinode_to_disk(ldip, dip); 3089 3090 /* the rest is in on-disk format */ 3091 if (item->ri_buf[1].i_len > isize) { 3092 memcpy((char *)dip + isize, 3093 item->ri_buf[1].i_addr + isize, 3094 item->ri_buf[1].i_len - isize); 3095 } 3096 3097 fields = in_f->ilf_fields; 3098 switch (fields & (XFS_ILOG_DEV | XFS_ILOG_UUID)) { 3099 case XFS_ILOG_DEV: 3100 xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev); 3101 break; 3102 case XFS_ILOG_UUID: 3103 memcpy(XFS_DFORK_DPTR(dip), 3104 &in_f->ilf_u.ilfu_uuid, 3105 sizeof(uuid_t)); 3106 break; 3107 } 3108 3109 if (in_f->ilf_size == 2) 3110 goto out_owner_change; 3111 len = item->ri_buf[2].i_len; 3112 src = item->ri_buf[2].i_addr; 3113 ASSERT(in_f->ilf_size <= 4); 3114 ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK)); 3115 ASSERT(!(fields & XFS_ILOG_DFORK) || 3116 (len == in_f->ilf_dsize)); 3117 3118 switch (fields & XFS_ILOG_DFORK) { 3119 case XFS_ILOG_DDATA: 3120 case XFS_ILOG_DEXT: 3121 memcpy(XFS_DFORK_DPTR(dip), src, len); 3122 break; 3123 3124 case XFS_ILOG_DBROOT: 3125 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len, 3126 (xfs_bmdr_block_t *)XFS_DFORK_DPTR(dip), 3127 XFS_DFORK_DSIZE(dip, mp)); 3128 break; 3129 3130 default: 3131 /* 3132 * There are no data fork flags set. 3133 */ 3134 ASSERT((fields & XFS_ILOG_DFORK) == 0); 3135 break; 3136 } 3137 3138 /* 3139 * If we logged any attribute data, recover it. There may or 3140 * may not have been any other non-core data logged in this 3141 * transaction. 3142 */ 3143 if (in_f->ilf_fields & XFS_ILOG_AFORK) { 3144 if (in_f->ilf_fields & XFS_ILOG_DFORK) { 3145 attr_index = 3; 3146 } else { 3147 attr_index = 2; 3148 } 3149 len = item->ri_buf[attr_index].i_len; 3150 src = item->ri_buf[attr_index].i_addr; 3151 ASSERT(len == in_f->ilf_asize); 3152 3153 switch (in_f->ilf_fields & XFS_ILOG_AFORK) { 3154 case XFS_ILOG_ADATA: 3155 case XFS_ILOG_AEXT: 3156 dest = XFS_DFORK_APTR(dip); 3157 ASSERT(len <= XFS_DFORK_ASIZE(dip, mp)); 3158 memcpy(dest, src, len); 3159 break; 3160 3161 case XFS_ILOG_ABROOT: 3162 dest = XFS_DFORK_APTR(dip); 3163 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, 3164 len, (xfs_bmdr_block_t*)dest, 3165 XFS_DFORK_ASIZE(dip, mp)); 3166 break; 3167 3168 default: 3169 xfs_warn(log->l_mp, "%s: Invalid flag", __func__); 3170 ASSERT(0); 3171 error = -EIO; 3172 goto out_release; 3173 } 3174 } 3175 3176 out_owner_change: 3177 if (in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER)) 3178 error = xfs_recover_inode_owner_change(mp, dip, in_f, 3179 buffer_list); 3180 /* re-generate the checksum. */ 3181 xfs_dinode_calc_crc(log->l_mp, dip); 3182 3183 ASSERT(bp->b_target->bt_mount == mp); 3184 bp->b_iodone = xlog_recover_iodone; 3185 xfs_buf_delwri_queue(bp, buffer_list); 3186 3187 out_release: 3188 xfs_buf_relse(bp); 3189 error: 3190 if (need_free) 3191 kmem_free(in_f); 3192 return error; 3193 } 3194 3195 /* 3196 * Recover QUOTAOFF records. We simply make a note of it in the xlog 3197 * structure, so that we know not to do any dquot item or dquot buffer recovery, 3198 * of that type. 3199 */ 3200 STATIC int 3201 xlog_recover_quotaoff_pass1( 3202 struct xlog *log, 3203 struct xlog_recover_item *item) 3204 { 3205 xfs_qoff_logformat_t *qoff_f = item->ri_buf[0].i_addr; 3206 ASSERT(qoff_f); 3207 3208 /* 3209 * The logitem format's flag tells us if this was user quotaoff, 3210 * group/project quotaoff or both. 3211 */ 3212 if (qoff_f->qf_flags & XFS_UQUOTA_ACCT) 3213 log->l_quotaoffs_flag |= XFS_DQ_USER; 3214 if (qoff_f->qf_flags & XFS_PQUOTA_ACCT) 3215 log->l_quotaoffs_flag |= XFS_DQ_PROJ; 3216 if (qoff_f->qf_flags & XFS_GQUOTA_ACCT) 3217 log->l_quotaoffs_flag |= XFS_DQ_GROUP; 3218 3219 return 0; 3220 } 3221 3222 /* 3223 * Recover a dquot record 3224 */ 3225 STATIC int 3226 xlog_recover_dquot_pass2( 3227 struct xlog *log, 3228 struct list_head *buffer_list, 3229 struct xlog_recover_item *item, 3230 xfs_lsn_t current_lsn) 3231 { 3232 xfs_mount_t *mp = log->l_mp; 3233 xfs_buf_t *bp; 3234 struct xfs_disk_dquot *ddq, *recddq; 3235 int error; 3236 xfs_dq_logformat_t *dq_f; 3237 uint type; 3238 3239 3240 /* 3241 * Filesystems are required to send in quota flags at mount time. 3242 */ 3243 if (mp->m_qflags == 0) 3244 return 0; 3245 3246 recddq = item->ri_buf[1].i_addr; 3247 if (recddq == NULL) { 3248 xfs_alert(log->l_mp, "NULL dquot in %s.", __func__); 3249 return -EIO; 3250 } 3251 if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) { 3252 xfs_alert(log->l_mp, "dquot too small (%d) in %s.", 3253 item->ri_buf[1].i_len, __func__); 3254 return -EIO; 3255 } 3256 3257 /* 3258 * This type of quotas was turned off, so ignore this record. 3259 */ 3260 type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP); 3261 ASSERT(type); 3262 if (log->l_quotaoffs_flag & type) 3263 return 0; 3264 3265 /* 3266 * At this point we know that quota was _not_ turned off. 3267 * Since the mount flags are not indicating to us otherwise, this 3268 * must mean that quota is on, and the dquot needs to be replayed. 3269 * Remember that we may not have fully recovered the superblock yet, 3270 * so we can't do the usual trick of looking at the SB quota bits. 3271 * 3272 * The other possibility, of course, is that the quota subsystem was 3273 * removed since the last mount - ENOSYS. 3274 */ 3275 dq_f = item->ri_buf[0].i_addr; 3276 ASSERT(dq_f); 3277 error = xfs_dqcheck(mp, recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN, 3278 "xlog_recover_dquot_pass2 (log copy)"); 3279 if (error) 3280 return -EIO; 3281 ASSERT(dq_f->qlf_len == 1); 3282 3283 /* 3284 * At this point we are assuming that the dquots have been allocated 3285 * and hence the buffer has valid dquots stamped in it. It should, 3286 * therefore, pass verifier validation. If the dquot is bad, then the 3287 * we'll return an error here, so we don't need to specifically check 3288 * the dquot in the buffer after the verifier has run. 3289 */ 3290 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dq_f->qlf_blkno, 3291 XFS_FSB_TO_BB(mp, dq_f->qlf_len), 0, &bp, 3292 &xfs_dquot_buf_ops); 3293 if (error) 3294 return error; 3295 3296 ASSERT(bp); 3297 ddq = xfs_buf_offset(bp, dq_f->qlf_boffset); 3298 3299 /* 3300 * If the dquot has an LSN in it, recover the dquot only if it's less 3301 * than the lsn of the transaction we are replaying. 3302 */ 3303 if (xfs_sb_version_hascrc(&mp->m_sb)) { 3304 struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddq; 3305 xfs_lsn_t lsn = be64_to_cpu(dqb->dd_lsn); 3306 3307 if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) { 3308 goto out_release; 3309 } 3310 } 3311 3312 memcpy(ddq, recddq, item->ri_buf[1].i_len); 3313 if (xfs_sb_version_hascrc(&mp->m_sb)) { 3314 xfs_update_cksum((char *)ddq, sizeof(struct xfs_dqblk), 3315 XFS_DQUOT_CRC_OFF); 3316 } 3317 3318 ASSERT(dq_f->qlf_size == 2); 3319 ASSERT(bp->b_target->bt_mount == mp); 3320 bp->b_iodone = xlog_recover_iodone; 3321 xfs_buf_delwri_queue(bp, buffer_list); 3322 3323 out_release: 3324 xfs_buf_relse(bp); 3325 return 0; 3326 } 3327 3328 /* 3329 * This routine is called to create an in-core extent free intent 3330 * item from the efi format structure which was logged on disk. 3331 * It allocates an in-core efi, copies the extents from the format 3332 * structure into it, and adds the efi to the AIL with the given 3333 * LSN. 3334 */ 3335 STATIC int 3336 xlog_recover_efi_pass2( 3337 struct xlog *log, 3338 struct xlog_recover_item *item, 3339 xfs_lsn_t lsn) 3340 { 3341 int error; 3342 struct xfs_mount *mp = log->l_mp; 3343 struct xfs_efi_log_item *efip; 3344 struct xfs_efi_log_format *efi_formatp; 3345 3346 efi_formatp = item->ri_buf[0].i_addr; 3347 3348 efip = xfs_efi_init(mp, efi_formatp->efi_nextents); 3349 error = xfs_efi_copy_format(&item->ri_buf[0], &efip->efi_format); 3350 if (error) { 3351 xfs_efi_item_free(efip); 3352 return error; 3353 } 3354 atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents); 3355 3356 spin_lock(&log->l_ailp->xa_lock); 3357 /* 3358 * The EFI has two references. One for the EFD and one for EFI to ensure 3359 * it makes it into the AIL. Insert the EFI into the AIL directly and 3360 * drop the EFI reference. Note that xfs_trans_ail_update() drops the 3361 * AIL lock. 3362 */ 3363 xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn); 3364 xfs_efi_release(efip); 3365 return 0; 3366 } 3367 3368 3369 /* 3370 * This routine is called when an EFD format structure is found in a committed 3371 * transaction in the log. Its purpose is to cancel the corresponding EFI if it 3372 * was still in the log. To do this it searches the AIL for the EFI with an id 3373 * equal to that in the EFD format structure. If we find it we drop the EFD 3374 * reference, which removes the EFI from the AIL and frees it. 3375 */ 3376 STATIC int 3377 xlog_recover_efd_pass2( 3378 struct xlog *log, 3379 struct xlog_recover_item *item) 3380 { 3381 xfs_efd_log_format_t *efd_formatp; 3382 xfs_efi_log_item_t *efip = NULL; 3383 xfs_log_item_t *lip; 3384 __uint64_t efi_id; 3385 struct xfs_ail_cursor cur; 3386 struct xfs_ail *ailp = log->l_ailp; 3387 3388 efd_formatp = item->ri_buf[0].i_addr; 3389 ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) + 3390 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) || 3391 (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) + 3392 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t))))); 3393 efi_id = efd_formatp->efd_efi_id; 3394 3395 /* 3396 * Search for the EFI with the id in the EFD format structure in the 3397 * AIL. 3398 */ 3399 spin_lock(&ailp->xa_lock); 3400 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); 3401 while (lip != NULL) { 3402 if (lip->li_type == XFS_LI_EFI) { 3403 efip = (xfs_efi_log_item_t *)lip; 3404 if (efip->efi_format.efi_id == efi_id) { 3405 /* 3406 * Drop the EFD reference to the EFI. This 3407 * removes the EFI from the AIL and frees it. 3408 */ 3409 spin_unlock(&ailp->xa_lock); 3410 xfs_efi_release(efip); 3411 spin_lock(&ailp->xa_lock); 3412 break; 3413 } 3414 } 3415 lip = xfs_trans_ail_cursor_next(ailp, &cur); 3416 } 3417 3418 xfs_trans_ail_cursor_done(&cur); 3419 spin_unlock(&ailp->xa_lock); 3420 3421 return 0; 3422 } 3423 3424 /* 3425 * This routine is called to create an in-core extent rmap update 3426 * item from the rui format structure which was logged on disk. 3427 * It allocates an in-core rui, copies the extents from the format 3428 * structure into it, and adds the rui to the AIL with the given 3429 * LSN. 3430 */ 3431 STATIC int 3432 xlog_recover_rui_pass2( 3433 struct xlog *log, 3434 struct xlog_recover_item *item, 3435 xfs_lsn_t lsn) 3436 { 3437 int error; 3438 struct xfs_mount *mp = log->l_mp; 3439 struct xfs_rui_log_item *ruip; 3440 struct xfs_rui_log_format *rui_formatp; 3441 3442 rui_formatp = item->ri_buf[0].i_addr; 3443 3444 ruip = xfs_rui_init(mp, rui_formatp->rui_nextents); 3445 error = xfs_rui_copy_format(&item->ri_buf[0], &ruip->rui_format); 3446 if (error) { 3447 xfs_rui_item_free(ruip); 3448 return error; 3449 } 3450 atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents); 3451 3452 spin_lock(&log->l_ailp->xa_lock); 3453 /* 3454 * The RUI has two references. One for the RUD and one for RUI to ensure 3455 * it makes it into the AIL. Insert the RUI into the AIL directly and 3456 * drop the RUI reference. Note that xfs_trans_ail_update() drops the 3457 * AIL lock. 3458 */ 3459 xfs_trans_ail_update(log->l_ailp, &ruip->rui_item, lsn); 3460 xfs_rui_release(ruip); 3461 return 0; 3462 } 3463 3464 3465 /* 3466 * This routine is called when an RUD format structure is found in a committed 3467 * transaction in the log. Its purpose is to cancel the corresponding RUI if it 3468 * was still in the log. To do this it searches the AIL for the RUI with an id 3469 * equal to that in the RUD format structure. If we find it we drop the RUD 3470 * reference, which removes the RUI from the AIL and frees it. 3471 */ 3472 STATIC int 3473 xlog_recover_rud_pass2( 3474 struct xlog *log, 3475 struct xlog_recover_item *item) 3476 { 3477 struct xfs_rud_log_format *rud_formatp; 3478 struct xfs_rui_log_item *ruip = NULL; 3479 struct xfs_log_item *lip; 3480 __uint64_t rui_id; 3481 struct xfs_ail_cursor cur; 3482 struct xfs_ail *ailp = log->l_ailp; 3483 3484 rud_formatp = item->ri_buf[0].i_addr; 3485 ASSERT(item->ri_buf[0].i_len == sizeof(struct xfs_rud_log_format)); 3486 rui_id = rud_formatp->rud_rui_id; 3487 3488 /* 3489 * Search for the RUI with the id in the RUD format structure in the 3490 * AIL. 3491 */ 3492 spin_lock(&ailp->xa_lock); 3493 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); 3494 while (lip != NULL) { 3495 if (lip->li_type == XFS_LI_RUI) { 3496 ruip = (struct xfs_rui_log_item *)lip; 3497 if (ruip->rui_format.rui_id == rui_id) { 3498 /* 3499 * Drop the RUD reference to the RUI. This 3500 * removes the RUI from the AIL and frees it. 3501 */ 3502 spin_unlock(&ailp->xa_lock); 3503 xfs_rui_release(ruip); 3504 spin_lock(&ailp->xa_lock); 3505 break; 3506 } 3507 } 3508 lip = xfs_trans_ail_cursor_next(ailp, &cur); 3509 } 3510 3511 xfs_trans_ail_cursor_done(&cur); 3512 spin_unlock(&ailp->xa_lock); 3513 3514 return 0; 3515 } 3516 3517 /* 3518 * This routine is called when an inode create format structure is found in a 3519 * committed transaction in the log. It's purpose is to initialise the inodes 3520 * being allocated on disk. This requires us to get inode cluster buffers that 3521 * match the range to be intialised, stamped with inode templates and written 3522 * by delayed write so that subsequent modifications will hit the cached buffer 3523 * and only need writing out at the end of recovery. 3524 */ 3525 STATIC int 3526 xlog_recover_do_icreate_pass2( 3527 struct xlog *log, 3528 struct list_head *buffer_list, 3529 xlog_recover_item_t *item) 3530 { 3531 struct xfs_mount *mp = log->l_mp; 3532 struct xfs_icreate_log *icl; 3533 xfs_agnumber_t agno; 3534 xfs_agblock_t agbno; 3535 unsigned int count; 3536 unsigned int isize; 3537 xfs_agblock_t length; 3538 int blks_per_cluster; 3539 int bb_per_cluster; 3540 int cancel_count; 3541 int nbufs; 3542 int i; 3543 3544 icl = (struct xfs_icreate_log *)item->ri_buf[0].i_addr; 3545 if (icl->icl_type != XFS_LI_ICREATE) { 3546 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad type"); 3547 return -EINVAL; 3548 } 3549 3550 if (icl->icl_size != 1) { 3551 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad icl size"); 3552 return -EINVAL; 3553 } 3554 3555 agno = be32_to_cpu(icl->icl_ag); 3556 if (agno >= mp->m_sb.sb_agcount) { 3557 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agno"); 3558 return -EINVAL; 3559 } 3560 agbno = be32_to_cpu(icl->icl_agbno); 3561 if (!agbno || agbno == NULLAGBLOCK || agbno >= mp->m_sb.sb_agblocks) { 3562 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agbno"); 3563 return -EINVAL; 3564 } 3565 isize = be32_to_cpu(icl->icl_isize); 3566 if (isize != mp->m_sb.sb_inodesize) { 3567 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad isize"); 3568 return -EINVAL; 3569 } 3570 count = be32_to_cpu(icl->icl_count); 3571 if (!count) { 3572 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count"); 3573 return -EINVAL; 3574 } 3575 length = be32_to_cpu(icl->icl_length); 3576 if (!length || length >= mp->m_sb.sb_agblocks) { 3577 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad length"); 3578 return -EINVAL; 3579 } 3580 3581 /* 3582 * The inode chunk is either full or sparse and we only support 3583 * m_ialloc_min_blks sized sparse allocations at this time. 3584 */ 3585 if (length != mp->m_ialloc_blks && 3586 length != mp->m_ialloc_min_blks) { 3587 xfs_warn(log->l_mp, 3588 "%s: unsupported chunk length", __FUNCTION__); 3589 return -EINVAL; 3590 } 3591 3592 /* verify inode count is consistent with extent length */ 3593 if ((count >> mp->m_sb.sb_inopblog) != length) { 3594 xfs_warn(log->l_mp, 3595 "%s: inconsistent inode count and chunk length", 3596 __FUNCTION__); 3597 return -EINVAL; 3598 } 3599 3600 /* 3601 * The icreate transaction can cover multiple cluster buffers and these 3602 * buffers could have been freed and reused. Check the individual 3603 * buffers for cancellation so we don't overwrite anything written after 3604 * a cancellation. 3605 */ 3606 blks_per_cluster = xfs_icluster_size_fsb(mp); 3607 bb_per_cluster = XFS_FSB_TO_BB(mp, blks_per_cluster); 3608 nbufs = length / blks_per_cluster; 3609 for (i = 0, cancel_count = 0; i < nbufs; i++) { 3610 xfs_daddr_t daddr; 3611 3612 daddr = XFS_AGB_TO_DADDR(mp, agno, 3613 agbno + i * blks_per_cluster); 3614 if (xlog_check_buffer_cancelled(log, daddr, bb_per_cluster, 0)) 3615 cancel_count++; 3616 } 3617 3618 /* 3619 * We currently only use icreate for a single allocation at a time. This 3620 * means we should expect either all or none of the buffers to be 3621 * cancelled. Be conservative and skip replay if at least one buffer is 3622 * cancelled, but warn the user that something is awry if the buffers 3623 * are not consistent. 3624 * 3625 * XXX: This must be refined to only skip cancelled clusters once we use 3626 * icreate for multiple chunk allocations. 3627 */ 3628 ASSERT(!cancel_count || cancel_count == nbufs); 3629 if (cancel_count) { 3630 if (cancel_count != nbufs) 3631 xfs_warn(mp, 3632 "WARNING: partial inode chunk cancellation, skipped icreate."); 3633 trace_xfs_log_recover_icreate_cancel(log, icl); 3634 return 0; 3635 } 3636 3637 trace_xfs_log_recover_icreate_recover(log, icl); 3638 return xfs_ialloc_inode_init(mp, NULL, buffer_list, count, agno, agbno, 3639 length, be32_to_cpu(icl->icl_gen)); 3640 } 3641 3642 STATIC void 3643 xlog_recover_buffer_ra_pass2( 3644 struct xlog *log, 3645 struct xlog_recover_item *item) 3646 { 3647 struct xfs_buf_log_format *buf_f = item->ri_buf[0].i_addr; 3648 struct xfs_mount *mp = log->l_mp; 3649 3650 if (xlog_peek_buffer_cancelled(log, buf_f->blf_blkno, 3651 buf_f->blf_len, buf_f->blf_flags)) { 3652 return; 3653 } 3654 3655 xfs_buf_readahead(mp->m_ddev_targp, buf_f->blf_blkno, 3656 buf_f->blf_len, NULL); 3657 } 3658 3659 STATIC void 3660 xlog_recover_inode_ra_pass2( 3661 struct xlog *log, 3662 struct xlog_recover_item *item) 3663 { 3664 struct xfs_inode_log_format ilf_buf; 3665 struct xfs_inode_log_format *ilfp; 3666 struct xfs_mount *mp = log->l_mp; 3667 int error; 3668 3669 if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) { 3670 ilfp = item->ri_buf[0].i_addr; 3671 } else { 3672 ilfp = &ilf_buf; 3673 memset(ilfp, 0, sizeof(*ilfp)); 3674 error = xfs_inode_item_format_convert(&item->ri_buf[0], ilfp); 3675 if (error) 3676 return; 3677 } 3678 3679 if (xlog_peek_buffer_cancelled(log, ilfp->ilf_blkno, ilfp->ilf_len, 0)) 3680 return; 3681 3682 xfs_buf_readahead(mp->m_ddev_targp, ilfp->ilf_blkno, 3683 ilfp->ilf_len, &xfs_inode_buf_ra_ops); 3684 } 3685 3686 STATIC void 3687 xlog_recover_dquot_ra_pass2( 3688 struct xlog *log, 3689 struct xlog_recover_item *item) 3690 { 3691 struct xfs_mount *mp = log->l_mp; 3692 struct xfs_disk_dquot *recddq; 3693 struct xfs_dq_logformat *dq_f; 3694 uint type; 3695 int len; 3696 3697 3698 if (mp->m_qflags == 0) 3699 return; 3700 3701 recddq = item->ri_buf[1].i_addr; 3702 if (recddq == NULL) 3703 return; 3704 if (item->ri_buf[1].i_len < sizeof(struct xfs_disk_dquot)) 3705 return; 3706 3707 type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP); 3708 ASSERT(type); 3709 if (log->l_quotaoffs_flag & type) 3710 return; 3711 3712 dq_f = item->ri_buf[0].i_addr; 3713 ASSERT(dq_f); 3714 ASSERT(dq_f->qlf_len == 1); 3715 3716 len = XFS_FSB_TO_BB(mp, dq_f->qlf_len); 3717 if (xlog_peek_buffer_cancelled(log, dq_f->qlf_blkno, len, 0)) 3718 return; 3719 3720 xfs_buf_readahead(mp->m_ddev_targp, dq_f->qlf_blkno, len, 3721 &xfs_dquot_buf_ra_ops); 3722 } 3723 3724 STATIC void 3725 xlog_recover_ra_pass2( 3726 struct xlog *log, 3727 struct xlog_recover_item *item) 3728 { 3729 switch (ITEM_TYPE(item)) { 3730 case XFS_LI_BUF: 3731 xlog_recover_buffer_ra_pass2(log, item); 3732 break; 3733 case XFS_LI_INODE: 3734 xlog_recover_inode_ra_pass2(log, item); 3735 break; 3736 case XFS_LI_DQUOT: 3737 xlog_recover_dquot_ra_pass2(log, item); 3738 break; 3739 case XFS_LI_EFI: 3740 case XFS_LI_EFD: 3741 case XFS_LI_QUOTAOFF: 3742 case XFS_LI_RUI: 3743 case XFS_LI_RUD: 3744 default: 3745 break; 3746 } 3747 } 3748 3749 STATIC int 3750 xlog_recover_commit_pass1( 3751 struct xlog *log, 3752 struct xlog_recover *trans, 3753 struct xlog_recover_item *item) 3754 { 3755 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1); 3756 3757 switch (ITEM_TYPE(item)) { 3758 case XFS_LI_BUF: 3759 return xlog_recover_buffer_pass1(log, item); 3760 case XFS_LI_QUOTAOFF: 3761 return xlog_recover_quotaoff_pass1(log, item); 3762 case XFS_LI_INODE: 3763 case XFS_LI_EFI: 3764 case XFS_LI_EFD: 3765 case XFS_LI_DQUOT: 3766 case XFS_LI_ICREATE: 3767 case XFS_LI_RUI: 3768 case XFS_LI_RUD: 3769 /* nothing to do in pass 1 */ 3770 return 0; 3771 default: 3772 xfs_warn(log->l_mp, "%s: invalid item type (%d)", 3773 __func__, ITEM_TYPE(item)); 3774 ASSERT(0); 3775 return -EIO; 3776 } 3777 } 3778 3779 STATIC int 3780 xlog_recover_commit_pass2( 3781 struct xlog *log, 3782 struct xlog_recover *trans, 3783 struct list_head *buffer_list, 3784 struct xlog_recover_item *item) 3785 { 3786 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2); 3787 3788 switch (ITEM_TYPE(item)) { 3789 case XFS_LI_BUF: 3790 return xlog_recover_buffer_pass2(log, buffer_list, item, 3791 trans->r_lsn); 3792 case XFS_LI_INODE: 3793 return xlog_recover_inode_pass2(log, buffer_list, item, 3794 trans->r_lsn); 3795 case XFS_LI_EFI: 3796 return xlog_recover_efi_pass2(log, item, trans->r_lsn); 3797 case XFS_LI_EFD: 3798 return xlog_recover_efd_pass2(log, item); 3799 case XFS_LI_RUI: 3800 return xlog_recover_rui_pass2(log, item, trans->r_lsn); 3801 case XFS_LI_RUD: 3802 return xlog_recover_rud_pass2(log, item); 3803 case XFS_LI_DQUOT: 3804 return xlog_recover_dquot_pass2(log, buffer_list, item, 3805 trans->r_lsn); 3806 case XFS_LI_ICREATE: 3807 return xlog_recover_do_icreate_pass2(log, buffer_list, item); 3808 case XFS_LI_QUOTAOFF: 3809 /* nothing to do in pass2 */ 3810 return 0; 3811 default: 3812 xfs_warn(log->l_mp, "%s: invalid item type (%d)", 3813 __func__, ITEM_TYPE(item)); 3814 ASSERT(0); 3815 return -EIO; 3816 } 3817 } 3818 3819 STATIC int 3820 xlog_recover_items_pass2( 3821 struct xlog *log, 3822 struct xlog_recover *trans, 3823 struct list_head *buffer_list, 3824 struct list_head *item_list) 3825 { 3826 struct xlog_recover_item *item; 3827 int error = 0; 3828 3829 list_for_each_entry(item, item_list, ri_list) { 3830 error = xlog_recover_commit_pass2(log, trans, 3831 buffer_list, item); 3832 if (error) 3833 return error; 3834 } 3835 3836 return error; 3837 } 3838 3839 /* 3840 * Perform the transaction. 3841 * 3842 * If the transaction modifies a buffer or inode, do it now. Otherwise, 3843 * EFIs and EFDs get queued up by adding entries into the AIL for them. 3844 */ 3845 STATIC int 3846 xlog_recover_commit_trans( 3847 struct xlog *log, 3848 struct xlog_recover *trans, 3849 int pass) 3850 { 3851 int error = 0; 3852 int error2; 3853 int items_queued = 0; 3854 struct xlog_recover_item *item; 3855 struct xlog_recover_item *next; 3856 LIST_HEAD (buffer_list); 3857 LIST_HEAD (ra_list); 3858 LIST_HEAD (done_list); 3859 3860 #define XLOG_RECOVER_COMMIT_QUEUE_MAX 100 3861 3862 hlist_del(&trans->r_list); 3863 3864 error = xlog_recover_reorder_trans(log, trans, pass); 3865 if (error) 3866 return error; 3867 3868 list_for_each_entry_safe(item, next, &trans->r_itemq, ri_list) { 3869 switch (pass) { 3870 case XLOG_RECOVER_PASS1: 3871 error = xlog_recover_commit_pass1(log, trans, item); 3872 break; 3873 case XLOG_RECOVER_PASS2: 3874 xlog_recover_ra_pass2(log, item); 3875 list_move_tail(&item->ri_list, &ra_list); 3876 items_queued++; 3877 if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) { 3878 error = xlog_recover_items_pass2(log, trans, 3879 &buffer_list, &ra_list); 3880 list_splice_tail_init(&ra_list, &done_list); 3881 items_queued = 0; 3882 } 3883 3884 break; 3885 default: 3886 ASSERT(0); 3887 } 3888 3889 if (error) 3890 goto out; 3891 } 3892 3893 out: 3894 if (!list_empty(&ra_list)) { 3895 if (!error) 3896 error = xlog_recover_items_pass2(log, trans, 3897 &buffer_list, &ra_list); 3898 list_splice_tail_init(&ra_list, &done_list); 3899 } 3900 3901 if (!list_empty(&done_list)) 3902 list_splice_init(&done_list, &trans->r_itemq); 3903 3904 error2 = xfs_buf_delwri_submit(&buffer_list); 3905 return error ? error : error2; 3906 } 3907 3908 STATIC void 3909 xlog_recover_add_item( 3910 struct list_head *head) 3911 { 3912 xlog_recover_item_t *item; 3913 3914 item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP); 3915 INIT_LIST_HEAD(&item->ri_list); 3916 list_add_tail(&item->ri_list, head); 3917 } 3918 3919 STATIC int 3920 xlog_recover_add_to_cont_trans( 3921 struct xlog *log, 3922 struct xlog_recover *trans, 3923 char *dp, 3924 int len) 3925 { 3926 xlog_recover_item_t *item; 3927 char *ptr, *old_ptr; 3928 int old_len; 3929 3930 /* 3931 * If the transaction is empty, the header was split across this and the 3932 * previous record. Copy the rest of the header. 3933 */ 3934 if (list_empty(&trans->r_itemq)) { 3935 ASSERT(len <= sizeof(struct xfs_trans_header)); 3936 if (len > sizeof(struct xfs_trans_header)) { 3937 xfs_warn(log->l_mp, "%s: bad header length", __func__); 3938 return -EIO; 3939 } 3940 3941 xlog_recover_add_item(&trans->r_itemq); 3942 ptr = (char *)&trans->r_theader + 3943 sizeof(struct xfs_trans_header) - len; 3944 memcpy(ptr, dp, len); 3945 return 0; 3946 } 3947 3948 /* take the tail entry */ 3949 item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list); 3950 3951 old_ptr = item->ri_buf[item->ri_cnt-1].i_addr; 3952 old_len = item->ri_buf[item->ri_cnt-1].i_len; 3953 3954 ptr = kmem_realloc(old_ptr, len + old_len, KM_SLEEP); 3955 memcpy(&ptr[old_len], dp, len); 3956 item->ri_buf[item->ri_cnt-1].i_len += len; 3957 item->ri_buf[item->ri_cnt-1].i_addr = ptr; 3958 trace_xfs_log_recover_item_add_cont(log, trans, item, 0); 3959 return 0; 3960 } 3961 3962 /* 3963 * The next region to add is the start of a new region. It could be 3964 * a whole region or it could be the first part of a new region. Because 3965 * of this, the assumption here is that the type and size fields of all 3966 * format structures fit into the first 32 bits of the structure. 3967 * 3968 * This works because all regions must be 32 bit aligned. Therefore, we 3969 * either have both fields or we have neither field. In the case we have 3970 * neither field, the data part of the region is zero length. We only have 3971 * a log_op_header and can throw away the header since a new one will appear 3972 * later. If we have at least 4 bytes, then we can determine how many regions 3973 * will appear in the current log item. 3974 */ 3975 STATIC int 3976 xlog_recover_add_to_trans( 3977 struct xlog *log, 3978 struct xlog_recover *trans, 3979 char *dp, 3980 int len) 3981 { 3982 xfs_inode_log_format_t *in_f; /* any will do */ 3983 xlog_recover_item_t *item; 3984 char *ptr; 3985 3986 if (!len) 3987 return 0; 3988 if (list_empty(&trans->r_itemq)) { 3989 /* we need to catch log corruptions here */ 3990 if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) { 3991 xfs_warn(log->l_mp, "%s: bad header magic number", 3992 __func__); 3993 ASSERT(0); 3994 return -EIO; 3995 } 3996 3997 if (len > sizeof(struct xfs_trans_header)) { 3998 xfs_warn(log->l_mp, "%s: bad header length", __func__); 3999 ASSERT(0); 4000 return -EIO; 4001 } 4002 4003 /* 4004 * The transaction header can be arbitrarily split across op 4005 * records. If we don't have the whole thing here, copy what we 4006 * do have and handle the rest in the next record. 4007 */ 4008 if (len == sizeof(struct xfs_trans_header)) 4009 xlog_recover_add_item(&trans->r_itemq); 4010 memcpy(&trans->r_theader, dp, len); 4011 return 0; 4012 } 4013 4014 ptr = kmem_alloc(len, KM_SLEEP); 4015 memcpy(ptr, dp, len); 4016 in_f = (xfs_inode_log_format_t *)ptr; 4017 4018 /* take the tail entry */ 4019 item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list); 4020 if (item->ri_total != 0 && 4021 item->ri_total == item->ri_cnt) { 4022 /* tail item is in use, get a new one */ 4023 xlog_recover_add_item(&trans->r_itemq); 4024 item = list_entry(trans->r_itemq.prev, 4025 xlog_recover_item_t, ri_list); 4026 } 4027 4028 if (item->ri_total == 0) { /* first region to be added */ 4029 if (in_f->ilf_size == 0 || 4030 in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) { 4031 xfs_warn(log->l_mp, 4032 "bad number of regions (%d) in inode log format", 4033 in_f->ilf_size); 4034 ASSERT(0); 4035 kmem_free(ptr); 4036 return -EIO; 4037 } 4038 4039 item->ri_total = in_f->ilf_size; 4040 item->ri_buf = 4041 kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t), 4042 KM_SLEEP); 4043 } 4044 ASSERT(item->ri_total > item->ri_cnt); 4045 /* Description region is ri_buf[0] */ 4046 item->ri_buf[item->ri_cnt].i_addr = ptr; 4047 item->ri_buf[item->ri_cnt].i_len = len; 4048 item->ri_cnt++; 4049 trace_xfs_log_recover_item_add(log, trans, item, 0); 4050 return 0; 4051 } 4052 4053 /* 4054 * Free up any resources allocated by the transaction 4055 * 4056 * Remember that EFIs, EFDs, and IUNLINKs are handled later. 4057 */ 4058 STATIC void 4059 xlog_recover_free_trans( 4060 struct xlog_recover *trans) 4061 { 4062 xlog_recover_item_t *item, *n; 4063 int i; 4064 4065 list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) { 4066 /* Free the regions in the item. */ 4067 list_del(&item->ri_list); 4068 for (i = 0; i < item->ri_cnt; i++) 4069 kmem_free(item->ri_buf[i].i_addr); 4070 /* Free the item itself */ 4071 kmem_free(item->ri_buf); 4072 kmem_free(item); 4073 } 4074 /* Free the transaction recover structure */ 4075 kmem_free(trans); 4076 } 4077 4078 /* 4079 * On error or completion, trans is freed. 4080 */ 4081 STATIC int 4082 xlog_recovery_process_trans( 4083 struct xlog *log, 4084 struct xlog_recover *trans, 4085 char *dp, 4086 unsigned int len, 4087 unsigned int flags, 4088 int pass) 4089 { 4090 int error = 0; 4091 bool freeit = false; 4092 4093 /* mask off ophdr transaction container flags */ 4094 flags &= ~XLOG_END_TRANS; 4095 if (flags & XLOG_WAS_CONT_TRANS) 4096 flags &= ~XLOG_CONTINUE_TRANS; 4097 4098 /* 4099 * Callees must not free the trans structure. We'll decide if we need to 4100 * free it or not based on the operation being done and it's result. 4101 */ 4102 switch (flags) { 4103 /* expected flag values */ 4104 case 0: 4105 case XLOG_CONTINUE_TRANS: 4106 error = xlog_recover_add_to_trans(log, trans, dp, len); 4107 break; 4108 case XLOG_WAS_CONT_TRANS: 4109 error = xlog_recover_add_to_cont_trans(log, trans, dp, len); 4110 break; 4111 case XLOG_COMMIT_TRANS: 4112 error = xlog_recover_commit_trans(log, trans, pass); 4113 /* success or fail, we are now done with this transaction. */ 4114 freeit = true; 4115 break; 4116 4117 /* unexpected flag values */ 4118 case XLOG_UNMOUNT_TRANS: 4119 /* just skip trans */ 4120 xfs_warn(log->l_mp, "%s: Unmount LR", __func__); 4121 freeit = true; 4122 break; 4123 case XLOG_START_TRANS: 4124 default: 4125 xfs_warn(log->l_mp, "%s: bad flag 0x%x", __func__, flags); 4126 ASSERT(0); 4127 error = -EIO; 4128 break; 4129 } 4130 if (error || freeit) 4131 xlog_recover_free_trans(trans); 4132 return error; 4133 } 4134 4135 /* 4136 * Lookup the transaction recovery structure associated with the ID in the 4137 * current ophdr. If the transaction doesn't exist and the start flag is set in 4138 * the ophdr, then allocate a new transaction for future ID matches to find. 4139 * Either way, return what we found during the lookup - an existing transaction 4140 * or nothing. 4141 */ 4142 STATIC struct xlog_recover * 4143 xlog_recover_ophdr_to_trans( 4144 struct hlist_head rhash[], 4145 struct xlog_rec_header *rhead, 4146 struct xlog_op_header *ohead) 4147 { 4148 struct xlog_recover *trans; 4149 xlog_tid_t tid; 4150 struct hlist_head *rhp; 4151 4152 tid = be32_to_cpu(ohead->oh_tid); 4153 rhp = &rhash[XLOG_RHASH(tid)]; 4154 hlist_for_each_entry(trans, rhp, r_list) { 4155 if (trans->r_log_tid == tid) 4156 return trans; 4157 } 4158 4159 /* 4160 * skip over non-start transaction headers - we could be 4161 * processing slack space before the next transaction starts 4162 */ 4163 if (!(ohead->oh_flags & XLOG_START_TRANS)) 4164 return NULL; 4165 4166 ASSERT(be32_to_cpu(ohead->oh_len) == 0); 4167 4168 /* 4169 * This is a new transaction so allocate a new recovery container to 4170 * hold the recovery ops that will follow. 4171 */ 4172 trans = kmem_zalloc(sizeof(struct xlog_recover), KM_SLEEP); 4173 trans->r_log_tid = tid; 4174 trans->r_lsn = be64_to_cpu(rhead->h_lsn); 4175 INIT_LIST_HEAD(&trans->r_itemq); 4176 INIT_HLIST_NODE(&trans->r_list); 4177 hlist_add_head(&trans->r_list, rhp); 4178 4179 /* 4180 * Nothing more to do for this ophdr. Items to be added to this new 4181 * transaction will be in subsequent ophdr containers. 4182 */ 4183 return NULL; 4184 } 4185 4186 STATIC int 4187 xlog_recover_process_ophdr( 4188 struct xlog *log, 4189 struct hlist_head rhash[], 4190 struct xlog_rec_header *rhead, 4191 struct xlog_op_header *ohead, 4192 char *dp, 4193 char *end, 4194 int pass) 4195 { 4196 struct xlog_recover *trans; 4197 unsigned int len; 4198 4199 /* Do we understand who wrote this op? */ 4200 if (ohead->oh_clientid != XFS_TRANSACTION && 4201 ohead->oh_clientid != XFS_LOG) { 4202 xfs_warn(log->l_mp, "%s: bad clientid 0x%x", 4203 __func__, ohead->oh_clientid); 4204 ASSERT(0); 4205 return -EIO; 4206 } 4207 4208 /* 4209 * Check the ophdr contains all the data it is supposed to contain. 4210 */ 4211 len = be32_to_cpu(ohead->oh_len); 4212 if (dp + len > end) { 4213 xfs_warn(log->l_mp, "%s: bad length 0x%x", __func__, len); 4214 WARN_ON(1); 4215 return -EIO; 4216 } 4217 4218 trans = xlog_recover_ophdr_to_trans(rhash, rhead, ohead); 4219 if (!trans) { 4220 /* nothing to do, so skip over this ophdr */ 4221 return 0; 4222 } 4223 4224 return xlog_recovery_process_trans(log, trans, dp, len, 4225 ohead->oh_flags, pass); 4226 } 4227 4228 /* 4229 * There are two valid states of the r_state field. 0 indicates that the 4230 * transaction structure is in a normal state. We have either seen the 4231 * start of the transaction or the last operation we added was not a partial 4232 * operation. If the last operation we added to the transaction was a 4233 * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS. 4234 * 4235 * NOTE: skip LRs with 0 data length. 4236 */ 4237 STATIC int 4238 xlog_recover_process_data( 4239 struct xlog *log, 4240 struct hlist_head rhash[], 4241 struct xlog_rec_header *rhead, 4242 char *dp, 4243 int pass) 4244 { 4245 struct xlog_op_header *ohead; 4246 char *end; 4247 int num_logops; 4248 int error; 4249 4250 end = dp + be32_to_cpu(rhead->h_len); 4251 num_logops = be32_to_cpu(rhead->h_num_logops); 4252 4253 /* check the log format matches our own - else we can't recover */ 4254 if (xlog_header_check_recover(log->l_mp, rhead)) 4255 return -EIO; 4256 4257 while ((dp < end) && num_logops) { 4258 4259 ohead = (struct xlog_op_header *)dp; 4260 dp += sizeof(*ohead); 4261 ASSERT(dp <= end); 4262 4263 /* errors will abort recovery */ 4264 error = xlog_recover_process_ophdr(log, rhash, rhead, ohead, 4265 dp, end, pass); 4266 if (error) 4267 return error; 4268 4269 dp += be32_to_cpu(ohead->oh_len); 4270 num_logops--; 4271 } 4272 return 0; 4273 } 4274 4275 /* Recover the EFI if necessary. */ 4276 STATIC int 4277 xlog_recover_process_efi( 4278 struct xfs_mount *mp, 4279 struct xfs_ail *ailp, 4280 struct xfs_log_item *lip) 4281 { 4282 struct xfs_efi_log_item *efip; 4283 int error; 4284 4285 /* 4286 * Skip EFIs that we've already processed. 4287 */ 4288 efip = container_of(lip, struct xfs_efi_log_item, efi_item); 4289 if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)) 4290 return 0; 4291 4292 spin_unlock(&ailp->xa_lock); 4293 error = xfs_efi_recover(mp, efip); 4294 spin_lock(&ailp->xa_lock); 4295 4296 return error; 4297 } 4298 4299 /* Release the EFI since we're cancelling everything. */ 4300 STATIC void 4301 xlog_recover_cancel_efi( 4302 struct xfs_mount *mp, 4303 struct xfs_ail *ailp, 4304 struct xfs_log_item *lip) 4305 { 4306 struct xfs_efi_log_item *efip; 4307 4308 efip = container_of(lip, struct xfs_efi_log_item, efi_item); 4309 4310 spin_unlock(&ailp->xa_lock); 4311 xfs_efi_release(efip); 4312 spin_lock(&ailp->xa_lock); 4313 } 4314 4315 /* Recover the RUI if necessary. */ 4316 STATIC int 4317 xlog_recover_process_rui( 4318 struct xfs_mount *mp, 4319 struct xfs_ail *ailp, 4320 struct xfs_log_item *lip) 4321 { 4322 struct xfs_rui_log_item *ruip; 4323 int error; 4324 4325 /* 4326 * Skip RUIs that we've already processed. 4327 */ 4328 ruip = container_of(lip, struct xfs_rui_log_item, rui_item); 4329 if (test_bit(XFS_RUI_RECOVERED, &ruip->rui_flags)) 4330 return 0; 4331 4332 spin_unlock(&ailp->xa_lock); 4333 error = xfs_rui_recover(mp, ruip); 4334 spin_lock(&ailp->xa_lock); 4335 4336 return error; 4337 } 4338 4339 /* Release the RUI since we're cancelling everything. */ 4340 STATIC void 4341 xlog_recover_cancel_rui( 4342 struct xfs_mount *mp, 4343 struct xfs_ail *ailp, 4344 struct xfs_log_item *lip) 4345 { 4346 struct xfs_rui_log_item *ruip; 4347 4348 ruip = container_of(lip, struct xfs_rui_log_item, rui_item); 4349 4350 spin_unlock(&ailp->xa_lock); 4351 xfs_rui_release(ruip); 4352 spin_lock(&ailp->xa_lock); 4353 } 4354 4355 /* Is this log item a deferred action intent? */ 4356 static inline bool xlog_item_is_intent(struct xfs_log_item *lip) 4357 { 4358 switch (lip->li_type) { 4359 case XFS_LI_EFI: 4360 case XFS_LI_RUI: 4361 return true; 4362 default: 4363 return false; 4364 } 4365 } 4366 4367 /* 4368 * When this is called, all of the log intent items which did not have 4369 * corresponding log done items should be in the AIL. What we do now 4370 * is update the data structures associated with each one. 4371 * 4372 * Since we process the log intent items in normal transactions, they 4373 * will be removed at some point after the commit. This prevents us 4374 * from just walking down the list processing each one. We'll use a 4375 * flag in the intent item to skip those that we've already processed 4376 * and use the AIL iteration mechanism's generation count to try to 4377 * speed this up at least a bit. 4378 * 4379 * When we start, we know that the intents are the only things in the 4380 * AIL. As we process them, however, other items are added to the 4381 * AIL. 4382 */ 4383 STATIC int 4384 xlog_recover_process_intents( 4385 struct xlog *log) 4386 { 4387 struct xfs_log_item *lip; 4388 int error = 0; 4389 struct xfs_ail_cursor cur; 4390 struct xfs_ail *ailp; 4391 xfs_lsn_t last_lsn; 4392 4393 ailp = log->l_ailp; 4394 spin_lock(&ailp->xa_lock); 4395 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); 4396 last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block); 4397 while (lip != NULL) { 4398 /* 4399 * We're done when we see something other than an intent. 4400 * There should be no intents left in the AIL now. 4401 */ 4402 if (!xlog_item_is_intent(lip)) { 4403 #ifdef DEBUG 4404 for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur)) 4405 ASSERT(!xlog_item_is_intent(lip)); 4406 #endif 4407 break; 4408 } 4409 4410 /* 4411 * We should never see a redo item with a LSN higher than 4412 * the last transaction we found in the log at the start 4413 * of recovery. 4414 */ 4415 ASSERT(XFS_LSN_CMP(last_lsn, lip->li_lsn) >= 0); 4416 4417 switch (lip->li_type) { 4418 case XFS_LI_EFI: 4419 error = xlog_recover_process_efi(log->l_mp, ailp, lip); 4420 break; 4421 case XFS_LI_RUI: 4422 error = xlog_recover_process_rui(log->l_mp, ailp, lip); 4423 break; 4424 } 4425 if (error) 4426 goto out; 4427 lip = xfs_trans_ail_cursor_next(ailp, &cur); 4428 } 4429 out: 4430 xfs_trans_ail_cursor_done(&cur); 4431 spin_unlock(&ailp->xa_lock); 4432 return error; 4433 } 4434 4435 /* 4436 * A cancel occurs when the mount has failed and we're bailing out. 4437 * Release all pending log intent items so they don't pin the AIL. 4438 */ 4439 STATIC int 4440 xlog_recover_cancel_intents( 4441 struct xlog *log) 4442 { 4443 struct xfs_log_item *lip; 4444 int error = 0; 4445 struct xfs_ail_cursor cur; 4446 struct xfs_ail *ailp; 4447 4448 ailp = log->l_ailp; 4449 spin_lock(&ailp->xa_lock); 4450 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); 4451 while (lip != NULL) { 4452 /* 4453 * We're done when we see something other than an intent. 4454 * There should be no intents left in the AIL now. 4455 */ 4456 if (!xlog_item_is_intent(lip)) { 4457 #ifdef DEBUG 4458 for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur)) 4459 ASSERT(!xlog_item_is_intent(lip)); 4460 #endif 4461 break; 4462 } 4463 4464 switch (lip->li_type) { 4465 case XFS_LI_EFI: 4466 xlog_recover_cancel_efi(log->l_mp, ailp, lip); 4467 break; 4468 case XFS_LI_RUI: 4469 xlog_recover_cancel_rui(log->l_mp, ailp, lip); 4470 break; 4471 } 4472 4473 lip = xfs_trans_ail_cursor_next(ailp, &cur); 4474 } 4475 4476 xfs_trans_ail_cursor_done(&cur); 4477 spin_unlock(&ailp->xa_lock); 4478 return error; 4479 } 4480 4481 /* 4482 * This routine performs a transaction to null out a bad inode pointer 4483 * in an agi unlinked inode hash bucket. 4484 */ 4485 STATIC void 4486 xlog_recover_clear_agi_bucket( 4487 xfs_mount_t *mp, 4488 xfs_agnumber_t agno, 4489 int bucket) 4490 { 4491 xfs_trans_t *tp; 4492 xfs_agi_t *agi; 4493 xfs_buf_t *agibp; 4494 int offset; 4495 int error; 4496 4497 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_clearagi, 0, 0, 0, &tp); 4498 if (error) 4499 goto out_error; 4500 4501 error = xfs_read_agi(mp, tp, agno, &agibp); 4502 if (error) 4503 goto out_abort; 4504 4505 agi = XFS_BUF_TO_AGI(agibp); 4506 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO); 4507 offset = offsetof(xfs_agi_t, agi_unlinked) + 4508 (sizeof(xfs_agino_t) * bucket); 4509 xfs_trans_log_buf(tp, agibp, offset, 4510 (offset + sizeof(xfs_agino_t) - 1)); 4511 4512 error = xfs_trans_commit(tp); 4513 if (error) 4514 goto out_error; 4515 return; 4516 4517 out_abort: 4518 xfs_trans_cancel(tp); 4519 out_error: 4520 xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno); 4521 return; 4522 } 4523 4524 STATIC xfs_agino_t 4525 xlog_recover_process_one_iunlink( 4526 struct xfs_mount *mp, 4527 xfs_agnumber_t agno, 4528 xfs_agino_t agino, 4529 int bucket) 4530 { 4531 struct xfs_buf *ibp; 4532 struct xfs_dinode *dip; 4533 struct xfs_inode *ip; 4534 xfs_ino_t ino; 4535 int error; 4536 4537 ino = XFS_AGINO_TO_INO(mp, agno, agino); 4538 error = xfs_iget(mp, NULL, ino, 0, 0, &ip); 4539 if (error) 4540 goto fail; 4541 4542 /* 4543 * Get the on disk inode to find the next inode in the bucket. 4544 */ 4545 error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0, 0); 4546 if (error) 4547 goto fail_iput; 4548 4549 ASSERT(VFS_I(ip)->i_nlink == 0); 4550 ASSERT(VFS_I(ip)->i_mode != 0); 4551 4552 /* setup for the next pass */ 4553 agino = be32_to_cpu(dip->di_next_unlinked); 4554 xfs_buf_relse(ibp); 4555 4556 /* 4557 * Prevent any DMAPI event from being sent when the reference on 4558 * the inode is dropped. 4559 */ 4560 ip->i_d.di_dmevmask = 0; 4561 4562 IRELE(ip); 4563 return agino; 4564 4565 fail_iput: 4566 IRELE(ip); 4567 fail: 4568 /* 4569 * We can't read in the inode this bucket points to, or this inode 4570 * is messed up. Just ditch this bucket of inodes. We will lose 4571 * some inodes and space, but at least we won't hang. 4572 * 4573 * Call xlog_recover_clear_agi_bucket() to perform a transaction to 4574 * clear the inode pointer in the bucket. 4575 */ 4576 xlog_recover_clear_agi_bucket(mp, agno, bucket); 4577 return NULLAGINO; 4578 } 4579 4580 /* 4581 * xlog_iunlink_recover 4582 * 4583 * This is called during recovery to process any inodes which 4584 * we unlinked but not freed when the system crashed. These 4585 * inodes will be on the lists in the AGI blocks. What we do 4586 * here is scan all the AGIs and fully truncate and free any 4587 * inodes found on the lists. Each inode is removed from the 4588 * lists when it has been fully truncated and is freed. The 4589 * freeing of the inode and its removal from the list must be 4590 * atomic. 4591 */ 4592 STATIC void 4593 xlog_recover_process_iunlinks( 4594 struct xlog *log) 4595 { 4596 xfs_mount_t *mp; 4597 xfs_agnumber_t agno; 4598 xfs_agi_t *agi; 4599 xfs_buf_t *agibp; 4600 xfs_agino_t agino; 4601 int bucket; 4602 int error; 4603 uint mp_dmevmask; 4604 4605 mp = log->l_mp; 4606 4607 /* 4608 * Prevent any DMAPI event from being sent while in this function. 4609 */ 4610 mp_dmevmask = mp->m_dmevmask; 4611 mp->m_dmevmask = 0; 4612 4613 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { 4614 /* 4615 * Find the agi for this ag. 4616 */ 4617 error = xfs_read_agi(mp, NULL, agno, &agibp); 4618 if (error) { 4619 /* 4620 * AGI is b0rked. Don't process it. 4621 * 4622 * We should probably mark the filesystem as corrupt 4623 * after we've recovered all the ag's we can.... 4624 */ 4625 continue; 4626 } 4627 /* 4628 * Unlock the buffer so that it can be acquired in the normal 4629 * course of the transaction to truncate and free each inode. 4630 * Because we are not racing with anyone else here for the AGI 4631 * buffer, we don't even need to hold it locked to read the 4632 * initial unlinked bucket entries out of the buffer. We keep 4633 * buffer reference though, so that it stays pinned in memory 4634 * while we need the buffer. 4635 */ 4636 agi = XFS_BUF_TO_AGI(agibp); 4637 xfs_buf_unlock(agibp); 4638 4639 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) { 4640 agino = be32_to_cpu(agi->agi_unlinked[bucket]); 4641 while (agino != NULLAGINO) { 4642 agino = xlog_recover_process_one_iunlink(mp, 4643 agno, agino, bucket); 4644 } 4645 } 4646 xfs_buf_rele(agibp); 4647 } 4648 4649 mp->m_dmevmask = mp_dmevmask; 4650 } 4651 4652 STATIC int 4653 xlog_unpack_data( 4654 struct xlog_rec_header *rhead, 4655 char *dp, 4656 struct xlog *log) 4657 { 4658 int i, j, k; 4659 4660 for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) && 4661 i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) { 4662 *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i]; 4663 dp += BBSIZE; 4664 } 4665 4666 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { 4667 xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead; 4668 for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) { 4669 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 4670 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 4671 *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k]; 4672 dp += BBSIZE; 4673 } 4674 } 4675 4676 return 0; 4677 } 4678 4679 /* 4680 * CRC check, unpack and process a log record. 4681 */ 4682 STATIC int 4683 xlog_recover_process( 4684 struct xlog *log, 4685 struct hlist_head rhash[], 4686 struct xlog_rec_header *rhead, 4687 char *dp, 4688 int pass) 4689 { 4690 int error; 4691 __le32 crc; 4692 4693 crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len)); 4694 4695 /* 4696 * Nothing else to do if this is a CRC verification pass. Just return 4697 * if this a record with a non-zero crc. Unfortunately, mkfs always 4698 * sets h_crc to 0 so we must consider this valid even on v5 supers. 4699 * Otherwise, return EFSBADCRC on failure so the callers up the stack 4700 * know precisely what failed. 4701 */ 4702 if (pass == XLOG_RECOVER_CRCPASS) { 4703 if (rhead->h_crc && crc != rhead->h_crc) 4704 return -EFSBADCRC; 4705 return 0; 4706 } 4707 4708 /* 4709 * We're in the normal recovery path. Issue a warning if and only if the 4710 * CRC in the header is non-zero. This is an advisory warning and the 4711 * zero CRC check prevents warnings from being emitted when upgrading 4712 * the kernel from one that does not add CRCs by default. 4713 */ 4714 if (crc != rhead->h_crc) { 4715 if (rhead->h_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) { 4716 xfs_alert(log->l_mp, 4717 "log record CRC mismatch: found 0x%x, expected 0x%x.", 4718 le32_to_cpu(rhead->h_crc), 4719 le32_to_cpu(crc)); 4720 xfs_hex_dump(dp, 32); 4721 } 4722 4723 /* 4724 * If the filesystem is CRC enabled, this mismatch becomes a 4725 * fatal log corruption failure. 4726 */ 4727 if (xfs_sb_version_hascrc(&log->l_mp->m_sb)) 4728 return -EFSCORRUPTED; 4729 } 4730 4731 error = xlog_unpack_data(rhead, dp, log); 4732 if (error) 4733 return error; 4734 4735 return xlog_recover_process_data(log, rhash, rhead, dp, pass); 4736 } 4737 4738 STATIC int 4739 xlog_valid_rec_header( 4740 struct xlog *log, 4741 struct xlog_rec_header *rhead, 4742 xfs_daddr_t blkno) 4743 { 4744 int hlen; 4745 4746 if (unlikely(rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) { 4747 XFS_ERROR_REPORT("xlog_valid_rec_header(1)", 4748 XFS_ERRLEVEL_LOW, log->l_mp); 4749 return -EFSCORRUPTED; 4750 } 4751 if (unlikely( 4752 (!rhead->h_version || 4753 (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) { 4754 xfs_warn(log->l_mp, "%s: unrecognised log version (%d).", 4755 __func__, be32_to_cpu(rhead->h_version)); 4756 return -EIO; 4757 } 4758 4759 /* LR body must have data or it wouldn't have been written */ 4760 hlen = be32_to_cpu(rhead->h_len); 4761 if (unlikely( hlen <= 0 || hlen > INT_MAX )) { 4762 XFS_ERROR_REPORT("xlog_valid_rec_header(2)", 4763 XFS_ERRLEVEL_LOW, log->l_mp); 4764 return -EFSCORRUPTED; 4765 } 4766 if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) { 4767 XFS_ERROR_REPORT("xlog_valid_rec_header(3)", 4768 XFS_ERRLEVEL_LOW, log->l_mp); 4769 return -EFSCORRUPTED; 4770 } 4771 return 0; 4772 } 4773 4774 /* 4775 * Read the log from tail to head and process the log records found. 4776 * Handle the two cases where the tail and head are in the same cycle 4777 * and where the active portion of the log wraps around the end of 4778 * the physical log separately. The pass parameter is passed through 4779 * to the routines called to process the data and is not looked at 4780 * here. 4781 */ 4782 STATIC int 4783 xlog_do_recovery_pass( 4784 struct xlog *log, 4785 xfs_daddr_t head_blk, 4786 xfs_daddr_t tail_blk, 4787 int pass, 4788 xfs_daddr_t *first_bad) /* out: first bad log rec */ 4789 { 4790 xlog_rec_header_t *rhead; 4791 xfs_daddr_t blk_no; 4792 xfs_daddr_t rhead_blk; 4793 char *offset; 4794 xfs_buf_t *hbp, *dbp; 4795 int error = 0, h_size, h_len; 4796 int bblks, split_bblks; 4797 int hblks, split_hblks, wrapped_hblks; 4798 struct hlist_head rhash[XLOG_RHASH_SIZE]; 4799 4800 ASSERT(head_blk != tail_blk); 4801 rhead_blk = 0; 4802 4803 /* 4804 * Read the header of the tail block and get the iclog buffer size from 4805 * h_size. Use this to tell how many sectors make up the log header. 4806 */ 4807 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { 4808 /* 4809 * When using variable length iclogs, read first sector of 4810 * iclog header and extract the header size from it. Get a 4811 * new hbp that is the correct size. 4812 */ 4813 hbp = xlog_get_bp(log, 1); 4814 if (!hbp) 4815 return -ENOMEM; 4816 4817 error = xlog_bread(log, tail_blk, 1, hbp, &offset); 4818 if (error) 4819 goto bread_err1; 4820 4821 rhead = (xlog_rec_header_t *)offset; 4822 error = xlog_valid_rec_header(log, rhead, tail_blk); 4823 if (error) 4824 goto bread_err1; 4825 4826 /* 4827 * xfsprogs has a bug where record length is based on lsunit but 4828 * h_size (iclog size) is hardcoded to 32k. Now that we 4829 * unconditionally CRC verify the unmount record, this means the 4830 * log buffer can be too small for the record and cause an 4831 * overrun. 4832 * 4833 * Detect this condition here. Use lsunit for the buffer size as 4834 * long as this looks like the mkfs case. Otherwise, return an 4835 * error to avoid a buffer overrun. 4836 */ 4837 h_size = be32_to_cpu(rhead->h_size); 4838 h_len = be32_to_cpu(rhead->h_len); 4839 if (h_len > h_size) { 4840 if (h_len <= log->l_mp->m_logbsize && 4841 be32_to_cpu(rhead->h_num_logops) == 1) { 4842 xfs_warn(log->l_mp, 4843 "invalid iclog size (%d bytes), using lsunit (%d bytes)", 4844 h_size, log->l_mp->m_logbsize); 4845 h_size = log->l_mp->m_logbsize; 4846 } else 4847 return -EFSCORRUPTED; 4848 } 4849 4850 if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) && 4851 (h_size > XLOG_HEADER_CYCLE_SIZE)) { 4852 hblks = h_size / XLOG_HEADER_CYCLE_SIZE; 4853 if (h_size % XLOG_HEADER_CYCLE_SIZE) 4854 hblks++; 4855 xlog_put_bp(hbp); 4856 hbp = xlog_get_bp(log, hblks); 4857 } else { 4858 hblks = 1; 4859 } 4860 } else { 4861 ASSERT(log->l_sectBBsize == 1); 4862 hblks = 1; 4863 hbp = xlog_get_bp(log, 1); 4864 h_size = XLOG_BIG_RECORD_BSIZE; 4865 } 4866 4867 if (!hbp) 4868 return -ENOMEM; 4869 dbp = xlog_get_bp(log, BTOBB(h_size)); 4870 if (!dbp) { 4871 xlog_put_bp(hbp); 4872 return -ENOMEM; 4873 } 4874 4875 memset(rhash, 0, sizeof(rhash)); 4876 blk_no = rhead_blk = tail_blk; 4877 if (tail_blk > head_blk) { 4878 /* 4879 * Perform recovery around the end of the physical log. 4880 * When the head is not on the same cycle number as the tail, 4881 * we can't do a sequential recovery. 4882 */ 4883 while (blk_no < log->l_logBBsize) { 4884 /* 4885 * Check for header wrapping around physical end-of-log 4886 */ 4887 offset = hbp->b_addr; 4888 split_hblks = 0; 4889 wrapped_hblks = 0; 4890 if (blk_no + hblks <= log->l_logBBsize) { 4891 /* Read header in one read */ 4892 error = xlog_bread(log, blk_no, hblks, hbp, 4893 &offset); 4894 if (error) 4895 goto bread_err2; 4896 } else { 4897 /* This LR is split across physical log end */ 4898 if (blk_no != log->l_logBBsize) { 4899 /* some data before physical log end */ 4900 ASSERT(blk_no <= INT_MAX); 4901 split_hblks = log->l_logBBsize - (int)blk_no; 4902 ASSERT(split_hblks > 0); 4903 error = xlog_bread(log, blk_no, 4904 split_hblks, hbp, 4905 &offset); 4906 if (error) 4907 goto bread_err2; 4908 } 4909 4910 /* 4911 * Note: this black magic still works with 4912 * large sector sizes (non-512) only because: 4913 * - we increased the buffer size originally 4914 * by 1 sector giving us enough extra space 4915 * for the second read; 4916 * - the log start is guaranteed to be sector 4917 * aligned; 4918 * - we read the log end (LR header start) 4919 * _first_, then the log start (LR header end) 4920 * - order is important. 4921 */ 4922 wrapped_hblks = hblks - split_hblks; 4923 error = xlog_bread_offset(log, 0, 4924 wrapped_hblks, hbp, 4925 offset + BBTOB(split_hblks)); 4926 if (error) 4927 goto bread_err2; 4928 } 4929 rhead = (xlog_rec_header_t *)offset; 4930 error = xlog_valid_rec_header(log, rhead, 4931 split_hblks ? blk_no : 0); 4932 if (error) 4933 goto bread_err2; 4934 4935 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len)); 4936 blk_no += hblks; 4937 4938 /* Read in data for log record */ 4939 if (blk_no + bblks <= log->l_logBBsize) { 4940 error = xlog_bread(log, blk_no, bblks, dbp, 4941 &offset); 4942 if (error) 4943 goto bread_err2; 4944 } else { 4945 /* This log record is split across the 4946 * physical end of log */ 4947 offset = dbp->b_addr; 4948 split_bblks = 0; 4949 if (blk_no != log->l_logBBsize) { 4950 /* some data is before the physical 4951 * end of log */ 4952 ASSERT(!wrapped_hblks); 4953 ASSERT(blk_no <= INT_MAX); 4954 split_bblks = 4955 log->l_logBBsize - (int)blk_no; 4956 ASSERT(split_bblks > 0); 4957 error = xlog_bread(log, blk_no, 4958 split_bblks, dbp, 4959 &offset); 4960 if (error) 4961 goto bread_err2; 4962 } 4963 4964 /* 4965 * Note: this black magic still works with 4966 * large sector sizes (non-512) only because: 4967 * - we increased the buffer size originally 4968 * by 1 sector giving us enough extra space 4969 * for the second read; 4970 * - the log start is guaranteed to be sector 4971 * aligned; 4972 * - we read the log end (LR header start) 4973 * _first_, then the log start (LR header end) 4974 * - order is important. 4975 */ 4976 error = xlog_bread_offset(log, 0, 4977 bblks - split_bblks, dbp, 4978 offset + BBTOB(split_bblks)); 4979 if (error) 4980 goto bread_err2; 4981 } 4982 4983 error = xlog_recover_process(log, rhash, rhead, offset, 4984 pass); 4985 if (error) 4986 goto bread_err2; 4987 4988 blk_no += bblks; 4989 rhead_blk = blk_no; 4990 } 4991 4992 ASSERT(blk_no >= log->l_logBBsize); 4993 blk_no -= log->l_logBBsize; 4994 rhead_blk = blk_no; 4995 } 4996 4997 /* read first part of physical log */ 4998 while (blk_no < head_blk) { 4999 error = xlog_bread(log, blk_no, hblks, hbp, &offset); 5000 if (error) 5001 goto bread_err2; 5002 5003 rhead = (xlog_rec_header_t *)offset; 5004 error = xlog_valid_rec_header(log, rhead, blk_no); 5005 if (error) 5006 goto bread_err2; 5007 5008 /* blocks in data section */ 5009 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len)); 5010 error = xlog_bread(log, blk_no+hblks, bblks, dbp, 5011 &offset); 5012 if (error) 5013 goto bread_err2; 5014 5015 error = xlog_recover_process(log, rhash, rhead, offset, pass); 5016 if (error) 5017 goto bread_err2; 5018 5019 blk_no += bblks + hblks; 5020 rhead_blk = blk_no; 5021 } 5022 5023 bread_err2: 5024 xlog_put_bp(dbp); 5025 bread_err1: 5026 xlog_put_bp(hbp); 5027 5028 if (error && first_bad) 5029 *first_bad = rhead_blk; 5030 5031 return error; 5032 } 5033 5034 /* 5035 * Do the recovery of the log. We actually do this in two phases. 5036 * The two passes are necessary in order to implement the function 5037 * of cancelling a record written into the log. The first pass 5038 * determines those things which have been cancelled, and the 5039 * second pass replays log items normally except for those which 5040 * have been cancelled. The handling of the replay and cancellations 5041 * takes place in the log item type specific routines. 5042 * 5043 * The table of items which have cancel records in the log is allocated 5044 * and freed at this level, since only here do we know when all of 5045 * the log recovery has been completed. 5046 */ 5047 STATIC int 5048 xlog_do_log_recovery( 5049 struct xlog *log, 5050 xfs_daddr_t head_blk, 5051 xfs_daddr_t tail_blk) 5052 { 5053 int error, i; 5054 5055 ASSERT(head_blk != tail_blk); 5056 5057 /* 5058 * First do a pass to find all of the cancelled buf log items. 5059 * Store them in the buf_cancel_table for use in the second pass. 5060 */ 5061 log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE * 5062 sizeof(struct list_head), 5063 KM_SLEEP); 5064 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++) 5065 INIT_LIST_HEAD(&log->l_buf_cancel_table[i]); 5066 5067 error = xlog_do_recovery_pass(log, head_blk, tail_blk, 5068 XLOG_RECOVER_PASS1, NULL); 5069 if (error != 0) { 5070 kmem_free(log->l_buf_cancel_table); 5071 log->l_buf_cancel_table = NULL; 5072 return error; 5073 } 5074 /* 5075 * Then do a second pass to actually recover the items in the log. 5076 * When it is complete free the table of buf cancel items. 5077 */ 5078 error = xlog_do_recovery_pass(log, head_blk, tail_blk, 5079 XLOG_RECOVER_PASS2, NULL); 5080 #ifdef DEBUG 5081 if (!error) { 5082 int i; 5083 5084 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++) 5085 ASSERT(list_empty(&log->l_buf_cancel_table[i])); 5086 } 5087 #endif /* DEBUG */ 5088 5089 kmem_free(log->l_buf_cancel_table); 5090 log->l_buf_cancel_table = NULL; 5091 5092 return error; 5093 } 5094 5095 /* 5096 * Do the actual recovery 5097 */ 5098 STATIC int 5099 xlog_do_recover( 5100 struct xlog *log, 5101 xfs_daddr_t head_blk, 5102 xfs_daddr_t tail_blk) 5103 { 5104 struct xfs_mount *mp = log->l_mp; 5105 int error; 5106 xfs_buf_t *bp; 5107 xfs_sb_t *sbp; 5108 5109 /* 5110 * First replay the images in the log. 5111 */ 5112 error = xlog_do_log_recovery(log, head_blk, tail_blk); 5113 if (error) 5114 return error; 5115 5116 /* 5117 * If IO errors happened during recovery, bail out. 5118 */ 5119 if (XFS_FORCED_SHUTDOWN(mp)) { 5120 return -EIO; 5121 } 5122 5123 /* 5124 * We now update the tail_lsn since much of the recovery has completed 5125 * and there may be space available to use. If there were no extent 5126 * or iunlinks, we can free up the entire log and set the tail_lsn to 5127 * be the last_sync_lsn. This was set in xlog_find_tail to be the 5128 * lsn of the last known good LR on disk. If there are extent frees 5129 * or iunlinks they will have some entries in the AIL; so we look at 5130 * the AIL to determine how to set the tail_lsn. 5131 */ 5132 xlog_assign_tail_lsn(mp); 5133 5134 /* 5135 * Now that we've finished replaying all buffer and inode 5136 * updates, re-read in the superblock and reverify it. 5137 */ 5138 bp = xfs_getsb(mp, 0); 5139 bp->b_flags &= ~(XBF_DONE | XBF_ASYNC); 5140 ASSERT(!(bp->b_flags & XBF_WRITE)); 5141 bp->b_flags |= XBF_READ; 5142 bp->b_ops = &xfs_sb_buf_ops; 5143 5144 error = xfs_buf_submit_wait(bp); 5145 if (error) { 5146 if (!XFS_FORCED_SHUTDOWN(mp)) { 5147 xfs_buf_ioerror_alert(bp, __func__); 5148 ASSERT(0); 5149 } 5150 xfs_buf_relse(bp); 5151 return error; 5152 } 5153 5154 /* Convert superblock from on-disk format */ 5155 sbp = &mp->m_sb; 5156 xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp)); 5157 xfs_buf_relse(bp); 5158 5159 /* re-initialise in-core superblock and geometry structures */ 5160 xfs_reinit_percpu_counters(mp); 5161 error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi); 5162 if (error) { 5163 xfs_warn(mp, "Failed post-recovery per-ag init: %d", error); 5164 return error; 5165 } 5166 mp->m_alloc_set_aside = xfs_alloc_set_aside(mp); 5167 5168 xlog_recover_check_summary(log); 5169 5170 /* Normal transactions can now occur */ 5171 log->l_flags &= ~XLOG_ACTIVE_RECOVERY; 5172 return 0; 5173 } 5174 5175 /* 5176 * Perform recovery and re-initialize some log variables in xlog_find_tail. 5177 * 5178 * Return error or zero. 5179 */ 5180 int 5181 xlog_recover( 5182 struct xlog *log) 5183 { 5184 xfs_daddr_t head_blk, tail_blk; 5185 int error; 5186 5187 /* find the tail of the log */ 5188 error = xlog_find_tail(log, &head_blk, &tail_blk); 5189 if (error) 5190 return error; 5191 5192 /* 5193 * The superblock was read before the log was available and thus the LSN 5194 * could not be verified. Check the superblock LSN against the current 5195 * LSN now that it's known. 5196 */ 5197 if (xfs_sb_version_hascrc(&log->l_mp->m_sb) && 5198 !xfs_log_check_lsn(log->l_mp, log->l_mp->m_sb.sb_lsn)) 5199 return -EINVAL; 5200 5201 if (tail_blk != head_blk) { 5202 /* There used to be a comment here: 5203 * 5204 * disallow recovery on read-only mounts. note -- mount 5205 * checks for ENOSPC and turns it into an intelligent 5206 * error message. 5207 * ...but this is no longer true. Now, unless you specify 5208 * NORECOVERY (in which case this function would never be 5209 * called), we just go ahead and recover. We do this all 5210 * under the vfs layer, so we can get away with it unless 5211 * the device itself is read-only, in which case we fail. 5212 */ 5213 if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) { 5214 return error; 5215 } 5216 5217 /* 5218 * Version 5 superblock log feature mask validation. We know the 5219 * log is dirty so check if there are any unknown log features 5220 * in what we need to recover. If there are unknown features 5221 * (e.g. unsupported transactions, then simply reject the 5222 * attempt at recovery before touching anything. 5223 */ 5224 if (XFS_SB_VERSION_NUM(&log->l_mp->m_sb) == XFS_SB_VERSION_5 && 5225 xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb, 5226 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) { 5227 xfs_warn(log->l_mp, 5228 "Superblock has unknown incompatible log features (0x%x) enabled.", 5229 (log->l_mp->m_sb.sb_features_log_incompat & 5230 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)); 5231 xfs_warn(log->l_mp, 5232 "The log can not be fully and/or safely recovered by this kernel."); 5233 xfs_warn(log->l_mp, 5234 "Please recover the log on a kernel that supports the unknown features."); 5235 return -EINVAL; 5236 } 5237 5238 /* 5239 * Delay log recovery if the debug hook is set. This is debug 5240 * instrumention to coordinate simulation of I/O failures with 5241 * log recovery. 5242 */ 5243 if (xfs_globals.log_recovery_delay) { 5244 xfs_notice(log->l_mp, 5245 "Delaying log recovery for %d seconds.", 5246 xfs_globals.log_recovery_delay); 5247 msleep(xfs_globals.log_recovery_delay * 1000); 5248 } 5249 5250 xfs_notice(log->l_mp, "Starting recovery (logdev: %s)", 5251 log->l_mp->m_logname ? log->l_mp->m_logname 5252 : "internal"); 5253 5254 error = xlog_do_recover(log, head_blk, tail_blk); 5255 log->l_flags |= XLOG_RECOVERY_NEEDED; 5256 } 5257 return error; 5258 } 5259 5260 /* 5261 * In the first part of recovery we replay inodes and buffers and build 5262 * up the list of extent free items which need to be processed. Here 5263 * we process the extent free items and clean up the on disk unlinked 5264 * inode lists. This is separated from the first part of recovery so 5265 * that the root and real-time bitmap inodes can be read in from disk in 5266 * between the two stages. This is necessary so that we can free space 5267 * in the real-time portion of the file system. 5268 */ 5269 int 5270 xlog_recover_finish( 5271 struct xlog *log) 5272 { 5273 /* 5274 * Now we're ready to do the transactions needed for the 5275 * rest of recovery. Start with completing all the extent 5276 * free intent records and then process the unlinked inode 5277 * lists. At this point, we essentially run in normal mode 5278 * except that we're still performing recovery actions 5279 * rather than accepting new requests. 5280 */ 5281 if (log->l_flags & XLOG_RECOVERY_NEEDED) { 5282 int error; 5283 error = xlog_recover_process_intents(log); 5284 if (error) { 5285 xfs_alert(log->l_mp, "Failed to recover intents"); 5286 return error; 5287 } 5288 5289 /* 5290 * Sync the log to get all the intents out of the AIL. 5291 * This isn't absolutely necessary, but it helps in 5292 * case the unlink transactions would have problems 5293 * pushing the intents out of the way. 5294 */ 5295 xfs_log_force(log->l_mp, XFS_LOG_SYNC); 5296 5297 xlog_recover_process_iunlinks(log); 5298 5299 xlog_recover_check_summary(log); 5300 5301 xfs_notice(log->l_mp, "Ending recovery (logdev: %s)", 5302 log->l_mp->m_logname ? log->l_mp->m_logname 5303 : "internal"); 5304 log->l_flags &= ~XLOG_RECOVERY_NEEDED; 5305 } else { 5306 xfs_info(log->l_mp, "Ending clean mount"); 5307 } 5308 return 0; 5309 } 5310 5311 int 5312 xlog_recover_cancel( 5313 struct xlog *log) 5314 { 5315 int error = 0; 5316 5317 if (log->l_flags & XLOG_RECOVERY_NEEDED) 5318 error = xlog_recover_cancel_intents(log); 5319 5320 return error; 5321 } 5322 5323 #if defined(DEBUG) 5324 /* 5325 * Read all of the agf and agi counters and check that they 5326 * are consistent with the superblock counters. 5327 */ 5328 void 5329 xlog_recover_check_summary( 5330 struct xlog *log) 5331 { 5332 xfs_mount_t *mp; 5333 xfs_agf_t *agfp; 5334 xfs_buf_t *agfbp; 5335 xfs_buf_t *agibp; 5336 xfs_agnumber_t agno; 5337 __uint64_t freeblks; 5338 __uint64_t itotal; 5339 __uint64_t ifree; 5340 int error; 5341 5342 mp = log->l_mp; 5343 5344 freeblks = 0LL; 5345 itotal = 0LL; 5346 ifree = 0LL; 5347 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { 5348 error = xfs_read_agf(mp, NULL, agno, 0, &agfbp); 5349 if (error) { 5350 xfs_alert(mp, "%s agf read failed agno %d error %d", 5351 __func__, agno, error); 5352 } else { 5353 agfp = XFS_BUF_TO_AGF(agfbp); 5354 freeblks += be32_to_cpu(agfp->agf_freeblks) + 5355 be32_to_cpu(agfp->agf_flcount); 5356 xfs_buf_relse(agfbp); 5357 } 5358 5359 error = xfs_read_agi(mp, NULL, agno, &agibp); 5360 if (error) { 5361 xfs_alert(mp, "%s agi read failed agno %d error %d", 5362 __func__, agno, error); 5363 } else { 5364 struct xfs_agi *agi = XFS_BUF_TO_AGI(agibp); 5365 5366 itotal += be32_to_cpu(agi->agi_count); 5367 ifree += be32_to_cpu(agi->agi_freecount); 5368 xfs_buf_relse(agibp); 5369 } 5370 } 5371 } 5372 #endif /* DEBUG */ 5373