1 /* 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_types.h" 21 #include "xfs_bit.h" 22 #include "xfs_log.h" 23 #include "xfs_inum.h" 24 #include "xfs_trans.h" 25 #include "xfs_sb.h" 26 #include "xfs_ag.h" 27 #include "xfs_mount.h" 28 #include "xfs_error.h" 29 #include "xfs_bmap_btree.h" 30 #include "xfs_alloc_btree.h" 31 #include "xfs_ialloc_btree.h" 32 #include "xfs_btree.h" 33 #include "xfs_dinode.h" 34 #include "xfs_inode.h" 35 #include "xfs_inode_item.h" 36 #include "xfs_alloc.h" 37 #include "xfs_ialloc.h" 38 #include "xfs_log_priv.h" 39 #include "xfs_buf_item.h" 40 #include "xfs_log_recover.h" 41 #include "xfs_extfree_item.h" 42 #include "xfs_trans_priv.h" 43 #include "xfs_quota.h" 44 #include "xfs_utils.h" 45 #include "xfs_cksum.h" 46 #include "xfs_trace.h" 47 #include "xfs_icache.h" 48 #include "xfs_icreate_item.h" 49 50 /* Need all the magic numbers and buffer ops structures from these headers */ 51 #include "xfs_symlink.h" 52 #include "xfs_da_btree.h" 53 #include "xfs_dir2_format.h" 54 #include "xfs_dir2_priv.h" 55 #include "xfs_attr_leaf.h" 56 #include "xfs_attr_remote.h" 57 58 STATIC int 59 xlog_find_zeroed( 60 struct xlog *, 61 xfs_daddr_t *); 62 STATIC int 63 xlog_clear_stale_blocks( 64 struct xlog *, 65 xfs_lsn_t); 66 #if defined(DEBUG) 67 STATIC void 68 xlog_recover_check_summary( 69 struct xlog *); 70 #else 71 #define xlog_recover_check_summary(log) 72 #endif 73 74 /* 75 * This structure is used during recovery to record the buf log items which 76 * have been canceled and should not be replayed. 77 */ 78 struct xfs_buf_cancel { 79 xfs_daddr_t bc_blkno; 80 uint bc_len; 81 int bc_refcount; 82 struct list_head bc_list; 83 }; 84 85 /* 86 * Sector aligned buffer routines for buffer create/read/write/access 87 */ 88 89 /* 90 * Verify the given count of basic blocks is valid number of blocks 91 * to specify for an operation involving the given XFS log buffer. 92 * Returns nonzero if the count is valid, 0 otherwise. 93 */ 94 95 static inline int 96 xlog_buf_bbcount_valid( 97 struct xlog *log, 98 int bbcount) 99 { 100 return bbcount > 0 && bbcount <= log->l_logBBsize; 101 } 102 103 /* 104 * Allocate a buffer to hold log data. The buffer needs to be able 105 * to map to a range of nbblks basic blocks at any valid (basic 106 * block) offset within the log. 107 */ 108 STATIC xfs_buf_t * 109 xlog_get_bp( 110 struct xlog *log, 111 int nbblks) 112 { 113 struct xfs_buf *bp; 114 115 if (!xlog_buf_bbcount_valid(log, nbblks)) { 116 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer", 117 nbblks); 118 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp); 119 return NULL; 120 } 121 122 /* 123 * We do log I/O in units of log sectors (a power-of-2 124 * multiple of the basic block size), so we round up the 125 * requested size to accommodate the basic blocks required 126 * for complete log sectors. 127 * 128 * In addition, the buffer may be used for a non-sector- 129 * aligned block offset, in which case an I/O of the 130 * requested size could extend beyond the end of the 131 * buffer. If the requested size is only 1 basic block it 132 * will never straddle a sector boundary, so this won't be 133 * an issue. Nor will this be a problem if the log I/O is 134 * done in basic blocks (sector size 1). But otherwise we 135 * extend the buffer by one extra log sector to ensure 136 * there's space to accommodate this possibility. 137 */ 138 if (nbblks > 1 && log->l_sectBBsize > 1) 139 nbblks += log->l_sectBBsize; 140 nbblks = round_up(nbblks, log->l_sectBBsize); 141 142 bp = xfs_buf_get_uncached(log->l_mp->m_logdev_targp, nbblks, 0); 143 if (bp) 144 xfs_buf_unlock(bp); 145 return bp; 146 } 147 148 STATIC void 149 xlog_put_bp( 150 xfs_buf_t *bp) 151 { 152 xfs_buf_free(bp); 153 } 154 155 /* 156 * Return the address of the start of the given block number's data 157 * in a log buffer. The buffer covers a log sector-aligned region. 158 */ 159 STATIC xfs_caddr_t 160 xlog_align( 161 struct xlog *log, 162 xfs_daddr_t blk_no, 163 int nbblks, 164 struct xfs_buf *bp) 165 { 166 xfs_daddr_t offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1); 167 168 ASSERT(offset + nbblks <= bp->b_length); 169 return bp->b_addr + BBTOB(offset); 170 } 171 172 173 /* 174 * nbblks should be uint, but oh well. Just want to catch that 32-bit length. 175 */ 176 STATIC int 177 xlog_bread_noalign( 178 struct xlog *log, 179 xfs_daddr_t blk_no, 180 int nbblks, 181 struct xfs_buf *bp) 182 { 183 int error; 184 185 if (!xlog_buf_bbcount_valid(log, nbblks)) { 186 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer", 187 nbblks); 188 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp); 189 return EFSCORRUPTED; 190 } 191 192 blk_no = round_down(blk_no, log->l_sectBBsize); 193 nbblks = round_up(nbblks, log->l_sectBBsize); 194 195 ASSERT(nbblks > 0); 196 ASSERT(nbblks <= bp->b_length); 197 198 XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no); 199 XFS_BUF_READ(bp); 200 bp->b_io_length = nbblks; 201 bp->b_error = 0; 202 203 xfsbdstrat(log->l_mp, bp); 204 error = xfs_buf_iowait(bp); 205 if (error) 206 xfs_buf_ioerror_alert(bp, __func__); 207 return error; 208 } 209 210 STATIC int 211 xlog_bread( 212 struct xlog *log, 213 xfs_daddr_t blk_no, 214 int nbblks, 215 struct xfs_buf *bp, 216 xfs_caddr_t *offset) 217 { 218 int error; 219 220 error = xlog_bread_noalign(log, blk_no, nbblks, bp); 221 if (error) 222 return error; 223 224 *offset = xlog_align(log, blk_no, nbblks, bp); 225 return 0; 226 } 227 228 /* 229 * Read at an offset into the buffer. Returns with the buffer in it's original 230 * state regardless of the result of the read. 231 */ 232 STATIC int 233 xlog_bread_offset( 234 struct xlog *log, 235 xfs_daddr_t blk_no, /* block to read from */ 236 int nbblks, /* blocks to read */ 237 struct xfs_buf *bp, 238 xfs_caddr_t offset) 239 { 240 xfs_caddr_t orig_offset = bp->b_addr; 241 int orig_len = BBTOB(bp->b_length); 242 int error, error2; 243 244 error = xfs_buf_associate_memory(bp, offset, BBTOB(nbblks)); 245 if (error) 246 return error; 247 248 error = xlog_bread_noalign(log, blk_no, nbblks, bp); 249 250 /* must reset buffer pointer even on error */ 251 error2 = xfs_buf_associate_memory(bp, orig_offset, orig_len); 252 if (error) 253 return error; 254 return error2; 255 } 256 257 /* 258 * Write out the buffer at the given block for the given number of blocks. 259 * The buffer is kept locked across the write and is returned locked. 260 * This can only be used for synchronous log writes. 261 */ 262 STATIC int 263 xlog_bwrite( 264 struct xlog *log, 265 xfs_daddr_t blk_no, 266 int nbblks, 267 struct xfs_buf *bp) 268 { 269 int error; 270 271 if (!xlog_buf_bbcount_valid(log, nbblks)) { 272 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer", 273 nbblks); 274 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp); 275 return EFSCORRUPTED; 276 } 277 278 blk_no = round_down(blk_no, log->l_sectBBsize); 279 nbblks = round_up(nbblks, log->l_sectBBsize); 280 281 ASSERT(nbblks > 0); 282 ASSERT(nbblks <= bp->b_length); 283 284 XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no); 285 XFS_BUF_ZEROFLAGS(bp); 286 xfs_buf_hold(bp); 287 xfs_buf_lock(bp); 288 bp->b_io_length = nbblks; 289 bp->b_error = 0; 290 291 error = xfs_bwrite(bp); 292 if (error) 293 xfs_buf_ioerror_alert(bp, __func__); 294 xfs_buf_relse(bp); 295 return error; 296 } 297 298 #ifdef DEBUG 299 /* 300 * dump debug superblock and log record information 301 */ 302 STATIC void 303 xlog_header_check_dump( 304 xfs_mount_t *mp, 305 xlog_rec_header_t *head) 306 { 307 xfs_debug(mp, "%s: SB : uuid = %pU, fmt = %d\n", 308 __func__, &mp->m_sb.sb_uuid, XLOG_FMT); 309 xfs_debug(mp, " log : uuid = %pU, fmt = %d\n", 310 &head->h_fs_uuid, be32_to_cpu(head->h_fmt)); 311 } 312 #else 313 #define xlog_header_check_dump(mp, head) 314 #endif 315 316 /* 317 * check log record header for recovery 318 */ 319 STATIC int 320 xlog_header_check_recover( 321 xfs_mount_t *mp, 322 xlog_rec_header_t *head) 323 { 324 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)); 325 326 /* 327 * IRIX doesn't write the h_fmt field and leaves it zeroed 328 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover 329 * a dirty log created in IRIX. 330 */ 331 if (unlikely(head->h_fmt != cpu_to_be32(XLOG_FMT))) { 332 xfs_warn(mp, 333 "dirty log written in incompatible format - can't recover"); 334 xlog_header_check_dump(mp, head); 335 XFS_ERROR_REPORT("xlog_header_check_recover(1)", 336 XFS_ERRLEVEL_HIGH, mp); 337 return XFS_ERROR(EFSCORRUPTED); 338 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) { 339 xfs_warn(mp, 340 "dirty log entry has mismatched uuid - can't recover"); 341 xlog_header_check_dump(mp, head); 342 XFS_ERROR_REPORT("xlog_header_check_recover(2)", 343 XFS_ERRLEVEL_HIGH, mp); 344 return XFS_ERROR(EFSCORRUPTED); 345 } 346 return 0; 347 } 348 349 /* 350 * read the head block of the log and check the header 351 */ 352 STATIC int 353 xlog_header_check_mount( 354 xfs_mount_t *mp, 355 xlog_rec_header_t *head) 356 { 357 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)); 358 359 if (uuid_is_nil(&head->h_fs_uuid)) { 360 /* 361 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If 362 * h_fs_uuid is nil, we assume this log was last mounted 363 * by IRIX and continue. 364 */ 365 xfs_warn(mp, "nil uuid in log - IRIX style log"); 366 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) { 367 xfs_warn(mp, "log has mismatched uuid - can't recover"); 368 xlog_header_check_dump(mp, head); 369 XFS_ERROR_REPORT("xlog_header_check_mount", 370 XFS_ERRLEVEL_HIGH, mp); 371 return XFS_ERROR(EFSCORRUPTED); 372 } 373 return 0; 374 } 375 376 STATIC void 377 xlog_recover_iodone( 378 struct xfs_buf *bp) 379 { 380 if (bp->b_error) { 381 /* 382 * We're not going to bother about retrying 383 * this during recovery. One strike! 384 */ 385 xfs_buf_ioerror_alert(bp, __func__); 386 xfs_force_shutdown(bp->b_target->bt_mount, 387 SHUTDOWN_META_IO_ERROR); 388 } 389 bp->b_iodone = NULL; 390 xfs_buf_ioend(bp, 0); 391 } 392 393 /* 394 * This routine finds (to an approximation) the first block in the physical 395 * log which contains the given cycle. It uses a binary search algorithm. 396 * Note that the algorithm can not be perfect because the disk will not 397 * necessarily be perfect. 398 */ 399 STATIC int 400 xlog_find_cycle_start( 401 struct xlog *log, 402 struct xfs_buf *bp, 403 xfs_daddr_t first_blk, 404 xfs_daddr_t *last_blk, 405 uint cycle) 406 { 407 xfs_caddr_t offset; 408 xfs_daddr_t mid_blk; 409 xfs_daddr_t end_blk; 410 uint mid_cycle; 411 int error; 412 413 end_blk = *last_blk; 414 mid_blk = BLK_AVG(first_blk, end_blk); 415 while (mid_blk != first_blk && mid_blk != end_blk) { 416 error = xlog_bread(log, mid_blk, 1, bp, &offset); 417 if (error) 418 return error; 419 mid_cycle = xlog_get_cycle(offset); 420 if (mid_cycle == cycle) 421 end_blk = mid_blk; /* last_half_cycle == mid_cycle */ 422 else 423 first_blk = mid_blk; /* first_half_cycle == mid_cycle */ 424 mid_blk = BLK_AVG(first_blk, end_blk); 425 } 426 ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) || 427 (mid_blk == end_blk && mid_blk-1 == first_blk)); 428 429 *last_blk = end_blk; 430 431 return 0; 432 } 433 434 /* 435 * Check that a range of blocks does not contain stop_on_cycle_no. 436 * Fill in *new_blk with the block offset where such a block is 437 * found, or with -1 (an invalid block number) if there is no such 438 * block in the range. The scan needs to occur from front to back 439 * and the pointer into the region must be updated since a later 440 * routine will need to perform another test. 441 */ 442 STATIC int 443 xlog_find_verify_cycle( 444 struct xlog *log, 445 xfs_daddr_t start_blk, 446 int nbblks, 447 uint stop_on_cycle_no, 448 xfs_daddr_t *new_blk) 449 { 450 xfs_daddr_t i, j; 451 uint cycle; 452 xfs_buf_t *bp; 453 xfs_daddr_t bufblks; 454 xfs_caddr_t buf = NULL; 455 int error = 0; 456 457 /* 458 * Greedily allocate a buffer big enough to handle the full 459 * range of basic blocks we'll be examining. If that fails, 460 * try a smaller size. We need to be able to read at least 461 * a log sector, or we're out of luck. 462 */ 463 bufblks = 1 << ffs(nbblks); 464 while (bufblks > log->l_logBBsize) 465 bufblks >>= 1; 466 while (!(bp = xlog_get_bp(log, bufblks))) { 467 bufblks >>= 1; 468 if (bufblks < log->l_sectBBsize) 469 return ENOMEM; 470 } 471 472 for (i = start_blk; i < start_blk + nbblks; i += bufblks) { 473 int bcount; 474 475 bcount = min(bufblks, (start_blk + nbblks - i)); 476 477 error = xlog_bread(log, i, bcount, bp, &buf); 478 if (error) 479 goto out; 480 481 for (j = 0; j < bcount; j++) { 482 cycle = xlog_get_cycle(buf); 483 if (cycle == stop_on_cycle_no) { 484 *new_blk = i+j; 485 goto out; 486 } 487 488 buf += BBSIZE; 489 } 490 } 491 492 *new_blk = -1; 493 494 out: 495 xlog_put_bp(bp); 496 return error; 497 } 498 499 /* 500 * Potentially backup over partial log record write. 501 * 502 * In the typical case, last_blk is the number of the block directly after 503 * a good log record. Therefore, we subtract one to get the block number 504 * of the last block in the given buffer. extra_bblks contains the number 505 * of blocks we would have read on a previous read. This happens when the 506 * last log record is split over the end of the physical log. 507 * 508 * extra_bblks is the number of blocks potentially verified on a previous 509 * call to this routine. 510 */ 511 STATIC int 512 xlog_find_verify_log_record( 513 struct xlog *log, 514 xfs_daddr_t start_blk, 515 xfs_daddr_t *last_blk, 516 int extra_bblks) 517 { 518 xfs_daddr_t i; 519 xfs_buf_t *bp; 520 xfs_caddr_t offset = NULL; 521 xlog_rec_header_t *head = NULL; 522 int error = 0; 523 int smallmem = 0; 524 int num_blks = *last_blk - start_blk; 525 int xhdrs; 526 527 ASSERT(start_blk != 0 || *last_blk != start_blk); 528 529 if (!(bp = xlog_get_bp(log, num_blks))) { 530 if (!(bp = xlog_get_bp(log, 1))) 531 return ENOMEM; 532 smallmem = 1; 533 } else { 534 error = xlog_bread(log, start_blk, num_blks, bp, &offset); 535 if (error) 536 goto out; 537 offset += ((num_blks - 1) << BBSHIFT); 538 } 539 540 for (i = (*last_blk) - 1; i >= 0; i--) { 541 if (i < start_blk) { 542 /* valid log record not found */ 543 xfs_warn(log->l_mp, 544 "Log inconsistent (didn't find previous header)"); 545 ASSERT(0); 546 error = XFS_ERROR(EIO); 547 goto out; 548 } 549 550 if (smallmem) { 551 error = xlog_bread(log, i, 1, bp, &offset); 552 if (error) 553 goto out; 554 } 555 556 head = (xlog_rec_header_t *)offset; 557 558 if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) 559 break; 560 561 if (!smallmem) 562 offset -= BBSIZE; 563 } 564 565 /* 566 * We hit the beginning of the physical log & still no header. Return 567 * to caller. If caller can handle a return of -1, then this routine 568 * will be called again for the end of the physical log. 569 */ 570 if (i == -1) { 571 error = -1; 572 goto out; 573 } 574 575 /* 576 * We have the final block of the good log (the first block 577 * of the log record _before_ the head. So we check the uuid. 578 */ 579 if ((error = xlog_header_check_mount(log->l_mp, head))) 580 goto out; 581 582 /* 583 * We may have found a log record header before we expected one. 584 * last_blk will be the 1st block # with a given cycle #. We may end 585 * up reading an entire log record. In this case, we don't want to 586 * reset last_blk. Only when last_blk points in the middle of a log 587 * record do we update last_blk. 588 */ 589 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { 590 uint h_size = be32_to_cpu(head->h_size); 591 592 xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE; 593 if (h_size % XLOG_HEADER_CYCLE_SIZE) 594 xhdrs++; 595 } else { 596 xhdrs = 1; 597 } 598 599 if (*last_blk - i + extra_bblks != 600 BTOBB(be32_to_cpu(head->h_len)) + xhdrs) 601 *last_blk = i; 602 603 out: 604 xlog_put_bp(bp); 605 return error; 606 } 607 608 /* 609 * Head is defined to be the point of the log where the next log write 610 * write could go. This means that incomplete LR writes at the end are 611 * eliminated when calculating the head. We aren't guaranteed that previous 612 * LR have complete transactions. We only know that a cycle number of 613 * current cycle number -1 won't be present in the log if we start writing 614 * from our current block number. 615 * 616 * last_blk contains the block number of the first block with a given 617 * cycle number. 618 * 619 * Return: zero if normal, non-zero if error. 620 */ 621 STATIC int 622 xlog_find_head( 623 struct xlog *log, 624 xfs_daddr_t *return_head_blk) 625 { 626 xfs_buf_t *bp; 627 xfs_caddr_t offset; 628 xfs_daddr_t new_blk, first_blk, start_blk, last_blk, head_blk; 629 int num_scan_bblks; 630 uint first_half_cycle, last_half_cycle; 631 uint stop_on_cycle; 632 int error, log_bbnum = log->l_logBBsize; 633 634 /* Is the end of the log device zeroed? */ 635 if ((error = xlog_find_zeroed(log, &first_blk)) == -1) { 636 *return_head_blk = first_blk; 637 638 /* Is the whole lot zeroed? */ 639 if (!first_blk) { 640 /* Linux XFS shouldn't generate totally zeroed logs - 641 * mkfs etc write a dummy unmount record to a fresh 642 * log so we can store the uuid in there 643 */ 644 xfs_warn(log->l_mp, "totally zeroed log"); 645 } 646 647 return 0; 648 } else if (error) { 649 xfs_warn(log->l_mp, "empty log check failed"); 650 return error; 651 } 652 653 first_blk = 0; /* get cycle # of 1st block */ 654 bp = xlog_get_bp(log, 1); 655 if (!bp) 656 return ENOMEM; 657 658 error = xlog_bread(log, 0, 1, bp, &offset); 659 if (error) 660 goto bp_err; 661 662 first_half_cycle = xlog_get_cycle(offset); 663 664 last_blk = head_blk = log_bbnum - 1; /* get cycle # of last block */ 665 error = xlog_bread(log, last_blk, 1, bp, &offset); 666 if (error) 667 goto bp_err; 668 669 last_half_cycle = xlog_get_cycle(offset); 670 ASSERT(last_half_cycle != 0); 671 672 /* 673 * If the 1st half cycle number is equal to the last half cycle number, 674 * then the entire log is stamped with the same cycle number. In this 675 * case, head_blk can't be set to zero (which makes sense). The below 676 * math doesn't work out properly with head_blk equal to zero. Instead, 677 * we set it to log_bbnum which is an invalid block number, but this 678 * value makes the math correct. If head_blk doesn't changed through 679 * all the tests below, *head_blk is set to zero at the very end rather 680 * than log_bbnum. In a sense, log_bbnum and zero are the same block 681 * in a circular file. 682 */ 683 if (first_half_cycle == last_half_cycle) { 684 /* 685 * In this case we believe that the entire log should have 686 * cycle number last_half_cycle. We need to scan backwards 687 * from the end verifying that there are no holes still 688 * containing last_half_cycle - 1. If we find such a hole, 689 * then the start of that hole will be the new head. The 690 * simple case looks like 691 * x | x ... | x - 1 | x 692 * Another case that fits this picture would be 693 * x | x + 1 | x ... | x 694 * In this case the head really is somewhere at the end of the 695 * log, as one of the latest writes at the beginning was 696 * incomplete. 697 * One more case is 698 * x | x + 1 | x ... | x - 1 | x 699 * This is really the combination of the above two cases, and 700 * the head has to end up at the start of the x-1 hole at the 701 * end of the log. 702 * 703 * In the 256k log case, we will read from the beginning to the 704 * end of the log and search for cycle numbers equal to x-1. 705 * We don't worry about the x+1 blocks that we encounter, 706 * because we know that they cannot be the head since the log 707 * started with x. 708 */ 709 head_blk = log_bbnum; 710 stop_on_cycle = last_half_cycle - 1; 711 } else { 712 /* 713 * In this case we want to find the first block with cycle 714 * number matching last_half_cycle. We expect the log to be 715 * some variation on 716 * x + 1 ... | x ... | x 717 * The first block with cycle number x (last_half_cycle) will 718 * be where the new head belongs. First we do a binary search 719 * for the first occurrence of last_half_cycle. The binary 720 * search may not be totally accurate, so then we scan back 721 * from there looking for occurrences of last_half_cycle before 722 * us. If that backwards scan wraps around the beginning of 723 * the log, then we look for occurrences of last_half_cycle - 1 724 * at the end of the log. The cases we're looking for look 725 * like 726 * v binary search stopped here 727 * x + 1 ... | x | x + 1 | x ... | x 728 * ^ but we want to locate this spot 729 * or 730 * <---------> less than scan distance 731 * x + 1 ... | x ... | x - 1 | x 732 * ^ we want to locate this spot 733 */ 734 stop_on_cycle = last_half_cycle; 735 if ((error = xlog_find_cycle_start(log, bp, first_blk, 736 &head_blk, last_half_cycle))) 737 goto bp_err; 738 } 739 740 /* 741 * Now validate the answer. Scan back some number of maximum possible 742 * blocks and make sure each one has the expected cycle number. The 743 * maximum is determined by the total possible amount of buffering 744 * in the in-core log. The following number can be made tighter if 745 * we actually look at the block size of the filesystem. 746 */ 747 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log); 748 if (head_blk >= num_scan_bblks) { 749 /* 750 * We are guaranteed that the entire check can be performed 751 * in one buffer. 752 */ 753 start_blk = head_blk - num_scan_bblks; 754 if ((error = xlog_find_verify_cycle(log, 755 start_blk, num_scan_bblks, 756 stop_on_cycle, &new_blk))) 757 goto bp_err; 758 if (new_blk != -1) 759 head_blk = new_blk; 760 } else { /* need to read 2 parts of log */ 761 /* 762 * We are going to scan backwards in the log in two parts. 763 * First we scan the physical end of the log. In this part 764 * of the log, we are looking for blocks with cycle number 765 * last_half_cycle - 1. 766 * If we find one, then we know that the log starts there, as 767 * we've found a hole that didn't get written in going around 768 * the end of the physical log. The simple case for this is 769 * x + 1 ... | x ... | x - 1 | x 770 * <---------> less than scan distance 771 * If all of the blocks at the end of the log have cycle number 772 * last_half_cycle, then we check the blocks at the start of 773 * the log looking for occurrences of last_half_cycle. If we 774 * find one, then our current estimate for the location of the 775 * first occurrence of last_half_cycle is wrong and we move 776 * back to the hole we've found. This case looks like 777 * x + 1 ... | x | x + 1 | x ... 778 * ^ binary search stopped here 779 * Another case we need to handle that only occurs in 256k 780 * logs is 781 * x + 1 ... | x ... | x+1 | x ... 782 * ^ binary search stops here 783 * In a 256k log, the scan at the end of the log will see the 784 * x + 1 blocks. We need to skip past those since that is 785 * certainly not the head of the log. By searching for 786 * last_half_cycle-1 we accomplish that. 787 */ 788 ASSERT(head_blk <= INT_MAX && 789 (xfs_daddr_t) num_scan_bblks >= head_blk); 790 start_blk = log_bbnum - (num_scan_bblks - head_blk); 791 if ((error = xlog_find_verify_cycle(log, start_blk, 792 num_scan_bblks - (int)head_blk, 793 (stop_on_cycle - 1), &new_blk))) 794 goto bp_err; 795 if (new_blk != -1) { 796 head_blk = new_blk; 797 goto validate_head; 798 } 799 800 /* 801 * Scan beginning of log now. The last part of the physical 802 * log is good. This scan needs to verify that it doesn't find 803 * the last_half_cycle. 804 */ 805 start_blk = 0; 806 ASSERT(head_blk <= INT_MAX); 807 if ((error = xlog_find_verify_cycle(log, 808 start_blk, (int)head_blk, 809 stop_on_cycle, &new_blk))) 810 goto bp_err; 811 if (new_blk != -1) 812 head_blk = new_blk; 813 } 814 815 validate_head: 816 /* 817 * Now we need to make sure head_blk is not pointing to a block in 818 * the middle of a log record. 819 */ 820 num_scan_bblks = XLOG_REC_SHIFT(log); 821 if (head_blk >= num_scan_bblks) { 822 start_blk = head_blk - num_scan_bblks; /* don't read head_blk */ 823 824 /* start ptr at last block ptr before head_blk */ 825 if ((error = xlog_find_verify_log_record(log, start_blk, 826 &head_blk, 0)) == -1) { 827 error = XFS_ERROR(EIO); 828 goto bp_err; 829 } else if (error) 830 goto bp_err; 831 } else { 832 start_blk = 0; 833 ASSERT(head_blk <= INT_MAX); 834 if ((error = xlog_find_verify_log_record(log, start_blk, 835 &head_blk, 0)) == -1) { 836 /* We hit the beginning of the log during our search */ 837 start_blk = log_bbnum - (num_scan_bblks - head_blk); 838 new_blk = log_bbnum; 839 ASSERT(start_blk <= INT_MAX && 840 (xfs_daddr_t) log_bbnum-start_blk >= 0); 841 ASSERT(head_blk <= INT_MAX); 842 if ((error = xlog_find_verify_log_record(log, 843 start_blk, &new_blk, 844 (int)head_blk)) == -1) { 845 error = XFS_ERROR(EIO); 846 goto bp_err; 847 } else if (error) 848 goto bp_err; 849 if (new_blk != log_bbnum) 850 head_blk = new_blk; 851 } else if (error) 852 goto bp_err; 853 } 854 855 xlog_put_bp(bp); 856 if (head_blk == log_bbnum) 857 *return_head_blk = 0; 858 else 859 *return_head_blk = head_blk; 860 /* 861 * When returning here, we have a good block number. Bad block 862 * means that during a previous crash, we didn't have a clean break 863 * from cycle number N to cycle number N-1. In this case, we need 864 * to find the first block with cycle number N-1. 865 */ 866 return 0; 867 868 bp_err: 869 xlog_put_bp(bp); 870 871 if (error) 872 xfs_warn(log->l_mp, "failed to find log head"); 873 return error; 874 } 875 876 /* 877 * Find the sync block number or the tail of the log. 878 * 879 * This will be the block number of the last record to have its 880 * associated buffers synced to disk. Every log record header has 881 * a sync lsn embedded in it. LSNs hold block numbers, so it is easy 882 * to get a sync block number. The only concern is to figure out which 883 * log record header to believe. 884 * 885 * The following algorithm uses the log record header with the largest 886 * lsn. The entire log record does not need to be valid. We only care 887 * that the header is valid. 888 * 889 * We could speed up search by using current head_blk buffer, but it is not 890 * available. 891 */ 892 STATIC int 893 xlog_find_tail( 894 struct xlog *log, 895 xfs_daddr_t *head_blk, 896 xfs_daddr_t *tail_blk) 897 { 898 xlog_rec_header_t *rhead; 899 xlog_op_header_t *op_head; 900 xfs_caddr_t offset = NULL; 901 xfs_buf_t *bp; 902 int error, i, found; 903 xfs_daddr_t umount_data_blk; 904 xfs_daddr_t after_umount_blk; 905 xfs_lsn_t tail_lsn; 906 int hblks; 907 908 found = 0; 909 910 /* 911 * Find previous log record 912 */ 913 if ((error = xlog_find_head(log, head_blk))) 914 return error; 915 916 bp = xlog_get_bp(log, 1); 917 if (!bp) 918 return ENOMEM; 919 if (*head_blk == 0) { /* special case */ 920 error = xlog_bread(log, 0, 1, bp, &offset); 921 if (error) 922 goto done; 923 924 if (xlog_get_cycle(offset) == 0) { 925 *tail_blk = 0; 926 /* leave all other log inited values alone */ 927 goto done; 928 } 929 } 930 931 /* 932 * Search backwards looking for log record header block 933 */ 934 ASSERT(*head_blk < INT_MAX); 935 for (i = (int)(*head_blk) - 1; i >= 0; i--) { 936 error = xlog_bread(log, i, 1, bp, &offset); 937 if (error) 938 goto done; 939 940 if (*(__be32 *)offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) { 941 found = 1; 942 break; 943 } 944 } 945 /* 946 * If we haven't found the log record header block, start looking 947 * again from the end of the physical log. XXXmiken: There should be 948 * a check here to make sure we didn't search more than N blocks in 949 * the previous code. 950 */ 951 if (!found) { 952 for (i = log->l_logBBsize - 1; i >= (int)(*head_blk); i--) { 953 error = xlog_bread(log, i, 1, bp, &offset); 954 if (error) 955 goto done; 956 957 if (*(__be32 *)offset == 958 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) { 959 found = 2; 960 break; 961 } 962 } 963 } 964 if (!found) { 965 xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__); 966 ASSERT(0); 967 return XFS_ERROR(EIO); 968 } 969 970 /* find blk_no of tail of log */ 971 rhead = (xlog_rec_header_t *)offset; 972 *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn)); 973 974 /* 975 * Reset log values according to the state of the log when we 976 * crashed. In the case where head_blk == 0, we bump curr_cycle 977 * one because the next write starts a new cycle rather than 978 * continuing the cycle of the last good log record. At this 979 * point we have guaranteed that all partial log records have been 980 * accounted for. Therefore, we know that the last good log record 981 * written was complete and ended exactly on the end boundary 982 * of the physical log. 983 */ 984 log->l_prev_block = i; 985 log->l_curr_block = (int)*head_blk; 986 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle); 987 if (found == 2) 988 log->l_curr_cycle++; 989 atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn)); 990 atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn)); 991 xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle, 992 BBTOB(log->l_curr_block)); 993 xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle, 994 BBTOB(log->l_curr_block)); 995 996 /* 997 * Look for unmount record. If we find it, then we know there 998 * was a clean unmount. Since 'i' could be the last block in 999 * the physical log, we convert to a log block before comparing 1000 * to the head_blk. 1001 * 1002 * Save the current tail lsn to use to pass to 1003 * xlog_clear_stale_blocks() below. We won't want to clear the 1004 * unmount record if there is one, so we pass the lsn of the 1005 * unmount record rather than the block after it. 1006 */ 1007 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { 1008 int h_size = be32_to_cpu(rhead->h_size); 1009 int h_version = be32_to_cpu(rhead->h_version); 1010 1011 if ((h_version & XLOG_VERSION_2) && 1012 (h_size > XLOG_HEADER_CYCLE_SIZE)) { 1013 hblks = h_size / XLOG_HEADER_CYCLE_SIZE; 1014 if (h_size % XLOG_HEADER_CYCLE_SIZE) 1015 hblks++; 1016 } else { 1017 hblks = 1; 1018 } 1019 } else { 1020 hblks = 1; 1021 } 1022 after_umount_blk = (i + hblks + (int) 1023 BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize; 1024 tail_lsn = atomic64_read(&log->l_tail_lsn); 1025 if (*head_blk == after_umount_blk && 1026 be32_to_cpu(rhead->h_num_logops) == 1) { 1027 umount_data_blk = (i + hblks) % log->l_logBBsize; 1028 error = xlog_bread(log, umount_data_blk, 1, bp, &offset); 1029 if (error) 1030 goto done; 1031 1032 op_head = (xlog_op_header_t *)offset; 1033 if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) { 1034 /* 1035 * Set tail and last sync so that newly written 1036 * log records will point recovery to after the 1037 * current unmount record. 1038 */ 1039 xlog_assign_atomic_lsn(&log->l_tail_lsn, 1040 log->l_curr_cycle, after_umount_blk); 1041 xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1042 log->l_curr_cycle, after_umount_blk); 1043 *tail_blk = after_umount_blk; 1044 1045 /* 1046 * Note that the unmount was clean. If the unmount 1047 * was not clean, we need to know this to rebuild the 1048 * superblock counters from the perag headers if we 1049 * have a filesystem using non-persistent counters. 1050 */ 1051 log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN; 1052 } 1053 } 1054 1055 /* 1056 * Make sure that there are no blocks in front of the head 1057 * with the same cycle number as the head. This can happen 1058 * because we allow multiple outstanding log writes concurrently, 1059 * and the later writes might make it out before earlier ones. 1060 * 1061 * We use the lsn from before modifying it so that we'll never 1062 * overwrite the unmount record after a clean unmount. 1063 * 1064 * Do this only if we are going to recover the filesystem 1065 * 1066 * NOTE: This used to say "if (!readonly)" 1067 * However on Linux, we can & do recover a read-only filesystem. 1068 * We only skip recovery if NORECOVERY is specified on mount, 1069 * in which case we would not be here. 1070 * 1071 * But... if the -device- itself is readonly, just skip this. 1072 * We can't recover this device anyway, so it won't matter. 1073 */ 1074 if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp)) 1075 error = xlog_clear_stale_blocks(log, tail_lsn); 1076 1077 done: 1078 xlog_put_bp(bp); 1079 1080 if (error) 1081 xfs_warn(log->l_mp, "failed to locate log tail"); 1082 return error; 1083 } 1084 1085 /* 1086 * Is the log zeroed at all? 1087 * 1088 * The last binary search should be changed to perform an X block read 1089 * once X becomes small enough. You can then search linearly through 1090 * the X blocks. This will cut down on the number of reads we need to do. 1091 * 1092 * If the log is partially zeroed, this routine will pass back the blkno 1093 * of the first block with cycle number 0. It won't have a complete LR 1094 * preceding it. 1095 * 1096 * Return: 1097 * 0 => the log is completely written to 1098 * -1 => use *blk_no as the first block of the log 1099 * >0 => error has occurred 1100 */ 1101 STATIC int 1102 xlog_find_zeroed( 1103 struct xlog *log, 1104 xfs_daddr_t *blk_no) 1105 { 1106 xfs_buf_t *bp; 1107 xfs_caddr_t offset; 1108 uint first_cycle, last_cycle; 1109 xfs_daddr_t new_blk, last_blk, start_blk; 1110 xfs_daddr_t num_scan_bblks; 1111 int error, log_bbnum = log->l_logBBsize; 1112 1113 *blk_no = 0; 1114 1115 /* check totally zeroed log */ 1116 bp = xlog_get_bp(log, 1); 1117 if (!bp) 1118 return ENOMEM; 1119 error = xlog_bread(log, 0, 1, bp, &offset); 1120 if (error) 1121 goto bp_err; 1122 1123 first_cycle = xlog_get_cycle(offset); 1124 if (first_cycle == 0) { /* completely zeroed log */ 1125 *blk_no = 0; 1126 xlog_put_bp(bp); 1127 return -1; 1128 } 1129 1130 /* check partially zeroed log */ 1131 error = xlog_bread(log, log_bbnum-1, 1, bp, &offset); 1132 if (error) 1133 goto bp_err; 1134 1135 last_cycle = xlog_get_cycle(offset); 1136 if (last_cycle != 0) { /* log completely written to */ 1137 xlog_put_bp(bp); 1138 return 0; 1139 } else if (first_cycle != 1) { 1140 /* 1141 * If the cycle of the last block is zero, the cycle of 1142 * the first block must be 1. If it's not, maybe we're 1143 * not looking at a log... Bail out. 1144 */ 1145 xfs_warn(log->l_mp, 1146 "Log inconsistent or not a log (last==0, first!=1)"); 1147 return XFS_ERROR(EINVAL); 1148 } 1149 1150 /* we have a partially zeroed log */ 1151 last_blk = log_bbnum-1; 1152 if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0))) 1153 goto bp_err; 1154 1155 /* 1156 * Validate the answer. Because there is no way to guarantee that 1157 * the entire log is made up of log records which are the same size, 1158 * we scan over the defined maximum blocks. At this point, the maximum 1159 * is not chosen to mean anything special. XXXmiken 1160 */ 1161 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log); 1162 ASSERT(num_scan_bblks <= INT_MAX); 1163 1164 if (last_blk < num_scan_bblks) 1165 num_scan_bblks = last_blk; 1166 start_blk = last_blk - num_scan_bblks; 1167 1168 /* 1169 * We search for any instances of cycle number 0 that occur before 1170 * our current estimate of the head. What we're trying to detect is 1171 * 1 ... | 0 | 1 | 0... 1172 * ^ binary search ends here 1173 */ 1174 if ((error = xlog_find_verify_cycle(log, start_blk, 1175 (int)num_scan_bblks, 0, &new_blk))) 1176 goto bp_err; 1177 if (new_blk != -1) 1178 last_blk = new_blk; 1179 1180 /* 1181 * Potentially backup over partial log record write. We don't need 1182 * to search the end of the log because we know it is zero. 1183 */ 1184 if ((error = xlog_find_verify_log_record(log, start_blk, 1185 &last_blk, 0)) == -1) { 1186 error = XFS_ERROR(EIO); 1187 goto bp_err; 1188 } else if (error) 1189 goto bp_err; 1190 1191 *blk_no = last_blk; 1192 bp_err: 1193 xlog_put_bp(bp); 1194 if (error) 1195 return error; 1196 return -1; 1197 } 1198 1199 /* 1200 * These are simple subroutines used by xlog_clear_stale_blocks() below 1201 * to initialize a buffer full of empty log record headers and write 1202 * them into the log. 1203 */ 1204 STATIC void 1205 xlog_add_record( 1206 struct xlog *log, 1207 xfs_caddr_t buf, 1208 int cycle, 1209 int block, 1210 int tail_cycle, 1211 int tail_block) 1212 { 1213 xlog_rec_header_t *recp = (xlog_rec_header_t *)buf; 1214 1215 memset(buf, 0, BBSIZE); 1216 recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM); 1217 recp->h_cycle = cpu_to_be32(cycle); 1218 recp->h_version = cpu_to_be32( 1219 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1); 1220 recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block)); 1221 recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block)); 1222 recp->h_fmt = cpu_to_be32(XLOG_FMT); 1223 memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t)); 1224 } 1225 1226 STATIC int 1227 xlog_write_log_records( 1228 struct xlog *log, 1229 int cycle, 1230 int start_block, 1231 int blocks, 1232 int tail_cycle, 1233 int tail_block) 1234 { 1235 xfs_caddr_t offset; 1236 xfs_buf_t *bp; 1237 int balign, ealign; 1238 int sectbb = log->l_sectBBsize; 1239 int end_block = start_block + blocks; 1240 int bufblks; 1241 int error = 0; 1242 int i, j = 0; 1243 1244 /* 1245 * Greedily allocate a buffer big enough to handle the full 1246 * range of basic blocks to be written. If that fails, try 1247 * a smaller size. We need to be able to write at least a 1248 * log sector, or we're out of luck. 1249 */ 1250 bufblks = 1 << ffs(blocks); 1251 while (bufblks > log->l_logBBsize) 1252 bufblks >>= 1; 1253 while (!(bp = xlog_get_bp(log, bufblks))) { 1254 bufblks >>= 1; 1255 if (bufblks < sectbb) 1256 return ENOMEM; 1257 } 1258 1259 /* We may need to do a read at the start to fill in part of 1260 * the buffer in the starting sector not covered by the first 1261 * write below. 1262 */ 1263 balign = round_down(start_block, sectbb); 1264 if (balign != start_block) { 1265 error = xlog_bread_noalign(log, start_block, 1, bp); 1266 if (error) 1267 goto out_put_bp; 1268 1269 j = start_block - balign; 1270 } 1271 1272 for (i = start_block; i < end_block; i += bufblks) { 1273 int bcount, endcount; 1274 1275 bcount = min(bufblks, end_block - start_block); 1276 endcount = bcount - j; 1277 1278 /* We may need to do a read at the end to fill in part of 1279 * the buffer in the final sector not covered by the write. 1280 * If this is the same sector as the above read, skip it. 1281 */ 1282 ealign = round_down(end_block, sectbb); 1283 if (j == 0 && (start_block + endcount > ealign)) { 1284 offset = bp->b_addr + BBTOB(ealign - start_block); 1285 error = xlog_bread_offset(log, ealign, sectbb, 1286 bp, offset); 1287 if (error) 1288 break; 1289 1290 } 1291 1292 offset = xlog_align(log, start_block, endcount, bp); 1293 for (; j < endcount; j++) { 1294 xlog_add_record(log, offset, cycle, i+j, 1295 tail_cycle, tail_block); 1296 offset += BBSIZE; 1297 } 1298 error = xlog_bwrite(log, start_block, endcount, bp); 1299 if (error) 1300 break; 1301 start_block += endcount; 1302 j = 0; 1303 } 1304 1305 out_put_bp: 1306 xlog_put_bp(bp); 1307 return error; 1308 } 1309 1310 /* 1311 * This routine is called to blow away any incomplete log writes out 1312 * in front of the log head. We do this so that we won't become confused 1313 * if we come up, write only a little bit more, and then crash again. 1314 * If we leave the partial log records out there, this situation could 1315 * cause us to think those partial writes are valid blocks since they 1316 * have the current cycle number. We get rid of them by overwriting them 1317 * with empty log records with the old cycle number rather than the 1318 * current one. 1319 * 1320 * The tail lsn is passed in rather than taken from 1321 * the log so that we will not write over the unmount record after a 1322 * clean unmount in a 512 block log. Doing so would leave the log without 1323 * any valid log records in it until a new one was written. If we crashed 1324 * during that time we would not be able to recover. 1325 */ 1326 STATIC int 1327 xlog_clear_stale_blocks( 1328 struct xlog *log, 1329 xfs_lsn_t tail_lsn) 1330 { 1331 int tail_cycle, head_cycle; 1332 int tail_block, head_block; 1333 int tail_distance, max_distance; 1334 int distance; 1335 int error; 1336 1337 tail_cycle = CYCLE_LSN(tail_lsn); 1338 tail_block = BLOCK_LSN(tail_lsn); 1339 head_cycle = log->l_curr_cycle; 1340 head_block = log->l_curr_block; 1341 1342 /* 1343 * Figure out the distance between the new head of the log 1344 * and the tail. We want to write over any blocks beyond the 1345 * head that we may have written just before the crash, but 1346 * we don't want to overwrite the tail of the log. 1347 */ 1348 if (head_cycle == tail_cycle) { 1349 /* 1350 * The tail is behind the head in the physical log, 1351 * so the distance from the head to the tail is the 1352 * distance from the head to the end of the log plus 1353 * the distance from the beginning of the log to the 1354 * tail. 1355 */ 1356 if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) { 1357 XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)", 1358 XFS_ERRLEVEL_LOW, log->l_mp); 1359 return XFS_ERROR(EFSCORRUPTED); 1360 } 1361 tail_distance = tail_block + (log->l_logBBsize - head_block); 1362 } else { 1363 /* 1364 * The head is behind the tail in the physical log, 1365 * so the distance from the head to the tail is just 1366 * the tail block minus the head block. 1367 */ 1368 if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){ 1369 XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)", 1370 XFS_ERRLEVEL_LOW, log->l_mp); 1371 return XFS_ERROR(EFSCORRUPTED); 1372 } 1373 tail_distance = tail_block - head_block; 1374 } 1375 1376 /* 1377 * If the head is right up against the tail, we can't clear 1378 * anything. 1379 */ 1380 if (tail_distance <= 0) { 1381 ASSERT(tail_distance == 0); 1382 return 0; 1383 } 1384 1385 max_distance = XLOG_TOTAL_REC_SHIFT(log); 1386 /* 1387 * Take the smaller of the maximum amount of outstanding I/O 1388 * we could have and the distance to the tail to clear out. 1389 * We take the smaller so that we don't overwrite the tail and 1390 * we don't waste all day writing from the head to the tail 1391 * for no reason. 1392 */ 1393 max_distance = MIN(max_distance, tail_distance); 1394 1395 if ((head_block + max_distance) <= log->l_logBBsize) { 1396 /* 1397 * We can stomp all the blocks we need to without 1398 * wrapping around the end of the log. Just do it 1399 * in a single write. Use the cycle number of the 1400 * current cycle minus one so that the log will look like: 1401 * n ... | n - 1 ... 1402 */ 1403 error = xlog_write_log_records(log, (head_cycle - 1), 1404 head_block, max_distance, tail_cycle, 1405 tail_block); 1406 if (error) 1407 return error; 1408 } else { 1409 /* 1410 * We need to wrap around the end of the physical log in 1411 * order to clear all the blocks. Do it in two separate 1412 * I/Os. The first write should be from the head to the 1413 * end of the physical log, and it should use the current 1414 * cycle number minus one just like above. 1415 */ 1416 distance = log->l_logBBsize - head_block; 1417 error = xlog_write_log_records(log, (head_cycle - 1), 1418 head_block, distance, tail_cycle, 1419 tail_block); 1420 1421 if (error) 1422 return error; 1423 1424 /* 1425 * Now write the blocks at the start of the physical log. 1426 * This writes the remainder of the blocks we want to clear. 1427 * It uses the current cycle number since we're now on the 1428 * same cycle as the head so that we get: 1429 * n ... n ... | n - 1 ... 1430 * ^^^^^ blocks we're writing 1431 */ 1432 distance = max_distance - (log->l_logBBsize - head_block); 1433 error = xlog_write_log_records(log, head_cycle, 0, distance, 1434 tail_cycle, tail_block); 1435 if (error) 1436 return error; 1437 } 1438 1439 return 0; 1440 } 1441 1442 /****************************************************************************** 1443 * 1444 * Log recover routines 1445 * 1446 ****************************************************************************** 1447 */ 1448 1449 STATIC xlog_recover_t * 1450 xlog_recover_find_tid( 1451 struct hlist_head *head, 1452 xlog_tid_t tid) 1453 { 1454 xlog_recover_t *trans; 1455 1456 hlist_for_each_entry(trans, head, r_list) { 1457 if (trans->r_log_tid == tid) 1458 return trans; 1459 } 1460 return NULL; 1461 } 1462 1463 STATIC void 1464 xlog_recover_new_tid( 1465 struct hlist_head *head, 1466 xlog_tid_t tid, 1467 xfs_lsn_t lsn) 1468 { 1469 xlog_recover_t *trans; 1470 1471 trans = kmem_zalloc(sizeof(xlog_recover_t), KM_SLEEP); 1472 trans->r_log_tid = tid; 1473 trans->r_lsn = lsn; 1474 INIT_LIST_HEAD(&trans->r_itemq); 1475 1476 INIT_HLIST_NODE(&trans->r_list); 1477 hlist_add_head(&trans->r_list, head); 1478 } 1479 1480 STATIC void 1481 xlog_recover_add_item( 1482 struct list_head *head) 1483 { 1484 xlog_recover_item_t *item; 1485 1486 item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP); 1487 INIT_LIST_HEAD(&item->ri_list); 1488 list_add_tail(&item->ri_list, head); 1489 } 1490 1491 STATIC int 1492 xlog_recover_add_to_cont_trans( 1493 struct xlog *log, 1494 struct xlog_recover *trans, 1495 xfs_caddr_t dp, 1496 int len) 1497 { 1498 xlog_recover_item_t *item; 1499 xfs_caddr_t ptr, old_ptr; 1500 int old_len; 1501 1502 if (list_empty(&trans->r_itemq)) { 1503 /* finish copying rest of trans header */ 1504 xlog_recover_add_item(&trans->r_itemq); 1505 ptr = (xfs_caddr_t) &trans->r_theader + 1506 sizeof(xfs_trans_header_t) - len; 1507 memcpy(ptr, dp, len); /* d, s, l */ 1508 return 0; 1509 } 1510 /* take the tail entry */ 1511 item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list); 1512 1513 old_ptr = item->ri_buf[item->ri_cnt-1].i_addr; 1514 old_len = item->ri_buf[item->ri_cnt-1].i_len; 1515 1516 ptr = kmem_realloc(old_ptr, len+old_len, old_len, KM_SLEEP); 1517 memcpy(&ptr[old_len], dp, len); /* d, s, l */ 1518 item->ri_buf[item->ri_cnt-1].i_len += len; 1519 item->ri_buf[item->ri_cnt-1].i_addr = ptr; 1520 trace_xfs_log_recover_item_add_cont(log, trans, item, 0); 1521 return 0; 1522 } 1523 1524 /* 1525 * The next region to add is the start of a new region. It could be 1526 * a whole region or it could be the first part of a new region. Because 1527 * of this, the assumption here is that the type and size fields of all 1528 * format structures fit into the first 32 bits of the structure. 1529 * 1530 * This works because all regions must be 32 bit aligned. Therefore, we 1531 * either have both fields or we have neither field. In the case we have 1532 * neither field, the data part of the region is zero length. We only have 1533 * a log_op_header and can throw away the header since a new one will appear 1534 * later. If we have at least 4 bytes, then we can determine how many regions 1535 * will appear in the current log item. 1536 */ 1537 STATIC int 1538 xlog_recover_add_to_trans( 1539 struct xlog *log, 1540 struct xlog_recover *trans, 1541 xfs_caddr_t dp, 1542 int len) 1543 { 1544 xfs_inode_log_format_t *in_f; /* any will do */ 1545 xlog_recover_item_t *item; 1546 xfs_caddr_t ptr; 1547 1548 if (!len) 1549 return 0; 1550 if (list_empty(&trans->r_itemq)) { 1551 /* we need to catch log corruptions here */ 1552 if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) { 1553 xfs_warn(log->l_mp, "%s: bad header magic number", 1554 __func__); 1555 ASSERT(0); 1556 return XFS_ERROR(EIO); 1557 } 1558 if (len == sizeof(xfs_trans_header_t)) 1559 xlog_recover_add_item(&trans->r_itemq); 1560 memcpy(&trans->r_theader, dp, len); /* d, s, l */ 1561 return 0; 1562 } 1563 1564 ptr = kmem_alloc(len, KM_SLEEP); 1565 memcpy(ptr, dp, len); 1566 in_f = (xfs_inode_log_format_t *)ptr; 1567 1568 /* take the tail entry */ 1569 item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list); 1570 if (item->ri_total != 0 && 1571 item->ri_total == item->ri_cnt) { 1572 /* tail item is in use, get a new one */ 1573 xlog_recover_add_item(&trans->r_itemq); 1574 item = list_entry(trans->r_itemq.prev, 1575 xlog_recover_item_t, ri_list); 1576 } 1577 1578 if (item->ri_total == 0) { /* first region to be added */ 1579 if (in_f->ilf_size == 0 || 1580 in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) { 1581 xfs_warn(log->l_mp, 1582 "bad number of regions (%d) in inode log format", 1583 in_f->ilf_size); 1584 ASSERT(0); 1585 return XFS_ERROR(EIO); 1586 } 1587 1588 item->ri_total = in_f->ilf_size; 1589 item->ri_buf = 1590 kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t), 1591 KM_SLEEP); 1592 } 1593 ASSERT(item->ri_total > item->ri_cnt); 1594 /* Description region is ri_buf[0] */ 1595 item->ri_buf[item->ri_cnt].i_addr = ptr; 1596 item->ri_buf[item->ri_cnt].i_len = len; 1597 item->ri_cnt++; 1598 trace_xfs_log_recover_item_add(log, trans, item, 0); 1599 return 0; 1600 } 1601 1602 /* 1603 * Sort the log items in the transaction. 1604 * 1605 * The ordering constraints are defined by the inode allocation and unlink 1606 * behaviour. The rules are: 1607 * 1608 * 1. Every item is only logged once in a given transaction. Hence it 1609 * represents the last logged state of the item. Hence ordering is 1610 * dependent on the order in which operations need to be performed so 1611 * required initial conditions are always met. 1612 * 1613 * 2. Cancelled buffers are recorded in pass 1 in a separate table and 1614 * there's nothing to replay from them so we can simply cull them 1615 * from the transaction. However, we can't do that until after we've 1616 * replayed all the other items because they may be dependent on the 1617 * cancelled buffer and replaying the cancelled buffer can remove it 1618 * form the cancelled buffer table. Hence they have tobe done last. 1619 * 1620 * 3. Inode allocation buffers must be replayed before inode items that 1621 * read the buffer and replay changes into it. For filesystems using the 1622 * ICREATE transactions, this means XFS_LI_ICREATE objects need to get 1623 * treated the same as inode allocation buffers as they create and 1624 * initialise the buffers directly. 1625 * 1626 * 4. Inode unlink buffers must be replayed after inode items are replayed. 1627 * This ensures that inodes are completely flushed to the inode buffer 1628 * in a "free" state before we remove the unlinked inode list pointer. 1629 * 1630 * Hence the ordering needs to be inode allocation buffers first, inode items 1631 * second, inode unlink buffers third and cancelled buffers last. 1632 * 1633 * But there's a problem with that - we can't tell an inode allocation buffer 1634 * apart from a regular buffer, so we can't separate them. We can, however, 1635 * tell an inode unlink buffer from the others, and so we can separate them out 1636 * from all the other buffers and move them to last. 1637 * 1638 * Hence, 4 lists, in order from head to tail: 1639 * - buffer_list for all buffers except cancelled/inode unlink buffers 1640 * - item_list for all non-buffer items 1641 * - inode_buffer_list for inode unlink buffers 1642 * - cancel_list for the cancelled buffers 1643 * 1644 * Note that we add objects to the tail of the lists so that first-to-last 1645 * ordering is preserved within the lists. Adding objects to the head of the 1646 * list means when we traverse from the head we walk them in last-to-first 1647 * order. For cancelled buffers and inode unlink buffers this doesn't matter, 1648 * but for all other items there may be specific ordering that we need to 1649 * preserve. 1650 */ 1651 STATIC int 1652 xlog_recover_reorder_trans( 1653 struct xlog *log, 1654 struct xlog_recover *trans, 1655 int pass) 1656 { 1657 xlog_recover_item_t *item, *n; 1658 LIST_HEAD(sort_list); 1659 LIST_HEAD(cancel_list); 1660 LIST_HEAD(buffer_list); 1661 LIST_HEAD(inode_buffer_list); 1662 LIST_HEAD(inode_list); 1663 1664 list_splice_init(&trans->r_itemq, &sort_list); 1665 list_for_each_entry_safe(item, n, &sort_list, ri_list) { 1666 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr; 1667 1668 switch (ITEM_TYPE(item)) { 1669 case XFS_LI_ICREATE: 1670 list_move_tail(&item->ri_list, &buffer_list); 1671 break; 1672 case XFS_LI_BUF: 1673 if (buf_f->blf_flags & XFS_BLF_CANCEL) { 1674 trace_xfs_log_recover_item_reorder_head(log, 1675 trans, item, pass); 1676 list_move(&item->ri_list, &cancel_list); 1677 break; 1678 } 1679 if (buf_f->blf_flags & XFS_BLF_INODE_BUF) { 1680 list_move(&item->ri_list, &inode_buffer_list); 1681 break; 1682 } 1683 list_move_tail(&item->ri_list, &buffer_list); 1684 break; 1685 case XFS_LI_INODE: 1686 case XFS_LI_DQUOT: 1687 case XFS_LI_QUOTAOFF: 1688 case XFS_LI_EFD: 1689 case XFS_LI_EFI: 1690 trace_xfs_log_recover_item_reorder_tail(log, 1691 trans, item, pass); 1692 list_move_tail(&item->ri_list, &inode_list); 1693 break; 1694 default: 1695 xfs_warn(log->l_mp, 1696 "%s: unrecognized type of log operation", 1697 __func__); 1698 ASSERT(0); 1699 return XFS_ERROR(EIO); 1700 } 1701 } 1702 ASSERT(list_empty(&sort_list)); 1703 if (!list_empty(&buffer_list)) 1704 list_splice(&buffer_list, &trans->r_itemq); 1705 if (!list_empty(&inode_list)) 1706 list_splice_tail(&inode_list, &trans->r_itemq); 1707 if (!list_empty(&inode_buffer_list)) 1708 list_splice_tail(&inode_buffer_list, &trans->r_itemq); 1709 if (!list_empty(&cancel_list)) 1710 list_splice_tail(&cancel_list, &trans->r_itemq); 1711 return 0; 1712 } 1713 1714 /* 1715 * Build up the table of buf cancel records so that we don't replay 1716 * cancelled data in the second pass. For buffer records that are 1717 * not cancel records, there is nothing to do here so we just return. 1718 * 1719 * If we get a cancel record which is already in the table, this indicates 1720 * that the buffer was cancelled multiple times. In order to ensure 1721 * that during pass 2 we keep the record in the table until we reach its 1722 * last occurrence in the log, we keep a reference count in the cancel 1723 * record in the table to tell us how many times we expect to see this 1724 * record during the second pass. 1725 */ 1726 STATIC int 1727 xlog_recover_buffer_pass1( 1728 struct xlog *log, 1729 struct xlog_recover_item *item) 1730 { 1731 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr; 1732 struct list_head *bucket; 1733 struct xfs_buf_cancel *bcp; 1734 1735 /* 1736 * If this isn't a cancel buffer item, then just return. 1737 */ 1738 if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) { 1739 trace_xfs_log_recover_buf_not_cancel(log, buf_f); 1740 return 0; 1741 } 1742 1743 /* 1744 * Insert an xfs_buf_cancel record into the hash table of them. 1745 * If there is already an identical record, bump its reference count. 1746 */ 1747 bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno); 1748 list_for_each_entry(bcp, bucket, bc_list) { 1749 if (bcp->bc_blkno == buf_f->blf_blkno && 1750 bcp->bc_len == buf_f->blf_len) { 1751 bcp->bc_refcount++; 1752 trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f); 1753 return 0; 1754 } 1755 } 1756 1757 bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), KM_SLEEP); 1758 bcp->bc_blkno = buf_f->blf_blkno; 1759 bcp->bc_len = buf_f->blf_len; 1760 bcp->bc_refcount = 1; 1761 list_add_tail(&bcp->bc_list, bucket); 1762 1763 trace_xfs_log_recover_buf_cancel_add(log, buf_f); 1764 return 0; 1765 } 1766 1767 /* 1768 * Check to see whether the buffer being recovered has a corresponding 1769 * entry in the buffer cancel record table. If it does then return 1 1770 * so that it will be cancelled, otherwise return 0. If the buffer is 1771 * actually a buffer cancel item (XFS_BLF_CANCEL is set), then decrement 1772 * the refcount on the entry in the table and remove it from the table 1773 * if this is the last reference. 1774 * 1775 * We remove the cancel record from the table when we encounter its 1776 * last occurrence in the log so that if the same buffer is re-used 1777 * again after its last cancellation we actually replay the changes 1778 * made at that point. 1779 */ 1780 STATIC int 1781 xlog_check_buffer_cancelled( 1782 struct xlog *log, 1783 xfs_daddr_t blkno, 1784 uint len, 1785 ushort flags) 1786 { 1787 struct list_head *bucket; 1788 struct xfs_buf_cancel *bcp; 1789 1790 if (log->l_buf_cancel_table == NULL) { 1791 /* 1792 * There is nothing in the table built in pass one, 1793 * so this buffer must not be cancelled. 1794 */ 1795 ASSERT(!(flags & XFS_BLF_CANCEL)); 1796 return 0; 1797 } 1798 1799 /* 1800 * Search for an entry in the cancel table that matches our buffer. 1801 */ 1802 bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno); 1803 list_for_each_entry(bcp, bucket, bc_list) { 1804 if (bcp->bc_blkno == blkno && bcp->bc_len == len) 1805 goto found; 1806 } 1807 1808 /* 1809 * We didn't find a corresponding entry in the table, so return 0 so 1810 * that the buffer is NOT cancelled. 1811 */ 1812 ASSERT(!(flags & XFS_BLF_CANCEL)); 1813 return 0; 1814 1815 found: 1816 /* 1817 * We've go a match, so return 1 so that the recovery of this buffer 1818 * is cancelled. If this buffer is actually a buffer cancel log 1819 * item, then decrement the refcount on the one in the table and 1820 * remove it if this is the last reference. 1821 */ 1822 if (flags & XFS_BLF_CANCEL) { 1823 if (--bcp->bc_refcount == 0) { 1824 list_del(&bcp->bc_list); 1825 kmem_free(bcp); 1826 } 1827 } 1828 return 1; 1829 } 1830 1831 /* 1832 * Perform recovery for a buffer full of inodes. In these buffers, the only 1833 * data which should be recovered is that which corresponds to the 1834 * di_next_unlinked pointers in the on disk inode structures. The rest of the 1835 * data for the inodes is always logged through the inodes themselves rather 1836 * than the inode buffer and is recovered in xlog_recover_inode_pass2(). 1837 * 1838 * The only time when buffers full of inodes are fully recovered is when the 1839 * buffer is full of newly allocated inodes. In this case the buffer will 1840 * not be marked as an inode buffer and so will be sent to 1841 * xlog_recover_do_reg_buffer() below during recovery. 1842 */ 1843 STATIC int 1844 xlog_recover_do_inode_buffer( 1845 struct xfs_mount *mp, 1846 xlog_recover_item_t *item, 1847 struct xfs_buf *bp, 1848 xfs_buf_log_format_t *buf_f) 1849 { 1850 int i; 1851 int item_index = 0; 1852 int bit = 0; 1853 int nbits = 0; 1854 int reg_buf_offset = 0; 1855 int reg_buf_bytes = 0; 1856 int next_unlinked_offset; 1857 int inodes_per_buf; 1858 xfs_agino_t *logged_nextp; 1859 xfs_agino_t *buffer_nextp; 1860 1861 trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f); 1862 1863 /* 1864 * Post recovery validation only works properly on CRC enabled 1865 * filesystems. 1866 */ 1867 if (xfs_sb_version_hascrc(&mp->m_sb)) 1868 bp->b_ops = &xfs_inode_buf_ops; 1869 1870 inodes_per_buf = BBTOB(bp->b_io_length) >> mp->m_sb.sb_inodelog; 1871 for (i = 0; i < inodes_per_buf; i++) { 1872 next_unlinked_offset = (i * mp->m_sb.sb_inodesize) + 1873 offsetof(xfs_dinode_t, di_next_unlinked); 1874 1875 while (next_unlinked_offset >= 1876 (reg_buf_offset + reg_buf_bytes)) { 1877 /* 1878 * The next di_next_unlinked field is beyond 1879 * the current logged region. Find the next 1880 * logged region that contains or is beyond 1881 * the current di_next_unlinked field. 1882 */ 1883 bit += nbits; 1884 bit = xfs_next_bit(buf_f->blf_data_map, 1885 buf_f->blf_map_size, bit); 1886 1887 /* 1888 * If there are no more logged regions in the 1889 * buffer, then we're done. 1890 */ 1891 if (bit == -1) 1892 return 0; 1893 1894 nbits = xfs_contig_bits(buf_f->blf_data_map, 1895 buf_f->blf_map_size, bit); 1896 ASSERT(nbits > 0); 1897 reg_buf_offset = bit << XFS_BLF_SHIFT; 1898 reg_buf_bytes = nbits << XFS_BLF_SHIFT; 1899 item_index++; 1900 } 1901 1902 /* 1903 * If the current logged region starts after the current 1904 * di_next_unlinked field, then move on to the next 1905 * di_next_unlinked field. 1906 */ 1907 if (next_unlinked_offset < reg_buf_offset) 1908 continue; 1909 1910 ASSERT(item->ri_buf[item_index].i_addr != NULL); 1911 ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0); 1912 ASSERT((reg_buf_offset + reg_buf_bytes) <= 1913 BBTOB(bp->b_io_length)); 1914 1915 /* 1916 * The current logged region contains a copy of the 1917 * current di_next_unlinked field. Extract its value 1918 * and copy it to the buffer copy. 1919 */ 1920 logged_nextp = item->ri_buf[item_index].i_addr + 1921 next_unlinked_offset - reg_buf_offset; 1922 if (unlikely(*logged_nextp == 0)) { 1923 xfs_alert(mp, 1924 "Bad inode buffer log record (ptr = 0x%p, bp = 0x%p). " 1925 "Trying to replay bad (0) inode di_next_unlinked field.", 1926 item, bp); 1927 XFS_ERROR_REPORT("xlog_recover_do_inode_buf", 1928 XFS_ERRLEVEL_LOW, mp); 1929 return XFS_ERROR(EFSCORRUPTED); 1930 } 1931 1932 buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp, 1933 next_unlinked_offset); 1934 *buffer_nextp = *logged_nextp; 1935 1936 /* 1937 * If necessary, recalculate the CRC in the on-disk inode. We 1938 * have to leave the inode in a consistent state for whoever 1939 * reads it next.... 1940 */ 1941 xfs_dinode_calc_crc(mp, (struct xfs_dinode *) 1942 xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize)); 1943 1944 } 1945 1946 return 0; 1947 } 1948 1949 /* 1950 * Validate the recovered buffer is of the correct type and attach the 1951 * appropriate buffer operations to them for writeback. Magic numbers are in a 1952 * few places: 1953 * the first 16 bits of the buffer (inode buffer, dquot buffer), 1954 * the first 32 bits of the buffer (most blocks), 1955 * inside a struct xfs_da_blkinfo at the start of the buffer. 1956 */ 1957 static void 1958 xlog_recovery_validate_buf_type( 1959 struct xfs_mount *mp, 1960 struct xfs_buf *bp, 1961 xfs_buf_log_format_t *buf_f) 1962 { 1963 struct xfs_da_blkinfo *info = bp->b_addr; 1964 __uint32_t magic32; 1965 __uint16_t magic16; 1966 __uint16_t magicda; 1967 1968 magic32 = be32_to_cpu(*(__be32 *)bp->b_addr); 1969 magic16 = be16_to_cpu(*(__be16*)bp->b_addr); 1970 magicda = be16_to_cpu(info->magic); 1971 switch (xfs_blft_from_flags(buf_f)) { 1972 case XFS_BLFT_BTREE_BUF: 1973 switch (magic32) { 1974 case XFS_ABTB_CRC_MAGIC: 1975 case XFS_ABTC_CRC_MAGIC: 1976 case XFS_ABTB_MAGIC: 1977 case XFS_ABTC_MAGIC: 1978 bp->b_ops = &xfs_allocbt_buf_ops; 1979 break; 1980 case XFS_IBT_CRC_MAGIC: 1981 case XFS_IBT_MAGIC: 1982 bp->b_ops = &xfs_inobt_buf_ops; 1983 break; 1984 case XFS_BMAP_CRC_MAGIC: 1985 case XFS_BMAP_MAGIC: 1986 bp->b_ops = &xfs_bmbt_buf_ops; 1987 break; 1988 default: 1989 xfs_warn(mp, "Bad btree block magic!"); 1990 ASSERT(0); 1991 break; 1992 } 1993 break; 1994 case XFS_BLFT_AGF_BUF: 1995 if (magic32 != XFS_AGF_MAGIC) { 1996 xfs_warn(mp, "Bad AGF block magic!"); 1997 ASSERT(0); 1998 break; 1999 } 2000 bp->b_ops = &xfs_agf_buf_ops; 2001 break; 2002 case XFS_BLFT_AGFL_BUF: 2003 if (!xfs_sb_version_hascrc(&mp->m_sb)) 2004 break; 2005 if (magic32 != XFS_AGFL_MAGIC) { 2006 xfs_warn(mp, "Bad AGFL block magic!"); 2007 ASSERT(0); 2008 break; 2009 } 2010 bp->b_ops = &xfs_agfl_buf_ops; 2011 break; 2012 case XFS_BLFT_AGI_BUF: 2013 if (magic32 != XFS_AGI_MAGIC) { 2014 xfs_warn(mp, "Bad AGI block magic!"); 2015 ASSERT(0); 2016 break; 2017 } 2018 bp->b_ops = &xfs_agi_buf_ops; 2019 break; 2020 case XFS_BLFT_UDQUOT_BUF: 2021 case XFS_BLFT_PDQUOT_BUF: 2022 case XFS_BLFT_GDQUOT_BUF: 2023 #ifdef CONFIG_XFS_QUOTA 2024 if (magic16 != XFS_DQUOT_MAGIC) { 2025 xfs_warn(mp, "Bad DQUOT block magic!"); 2026 ASSERT(0); 2027 break; 2028 } 2029 bp->b_ops = &xfs_dquot_buf_ops; 2030 #else 2031 xfs_alert(mp, 2032 "Trying to recover dquots without QUOTA support built in!"); 2033 ASSERT(0); 2034 #endif 2035 break; 2036 case XFS_BLFT_DINO_BUF: 2037 /* 2038 * we get here with inode allocation buffers, not buffers that 2039 * track unlinked list changes. 2040 */ 2041 if (magic16 != XFS_DINODE_MAGIC) { 2042 xfs_warn(mp, "Bad INODE block magic!"); 2043 ASSERT(0); 2044 break; 2045 } 2046 bp->b_ops = &xfs_inode_buf_ops; 2047 break; 2048 case XFS_BLFT_SYMLINK_BUF: 2049 if (magic32 != XFS_SYMLINK_MAGIC) { 2050 xfs_warn(mp, "Bad symlink block magic!"); 2051 ASSERT(0); 2052 break; 2053 } 2054 bp->b_ops = &xfs_symlink_buf_ops; 2055 break; 2056 case XFS_BLFT_DIR_BLOCK_BUF: 2057 if (magic32 != XFS_DIR2_BLOCK_MAGIC && 2058 magic32 != XFS_DIR3_BLOCK_MAGIC) { 2059 xfs_warn(mp, "Bad dir block magic!"); 2060 ASSERT(0); 2061 break; 2062 } 2063 bp->b_ops = &xfs_dir3_block_buf_ops; 2064 break; 2065 case XFS_BLFT_DIR_DATA_BUF: 2066 if (magic32 != XFS_DIR2_DATA_MAGIC && 2067 magic32 != XFS_DIR3_DATA_MAGIC) { 2068 xfs_warn(mp, "Bad dir data magic!"); 2069 ASSERT(0); 2070 break; 2071 } 2072 bp->b_ops = &xfs_dir3_data_buf_ops; 2073 break; 2074 case XFS_BLFT_DIR_FREE_BUF: 2075 if (magic32 != XFS_DIR2_FREE_MAGIC && 2076 magic32 != XFS_DIR3_FREE_MAGIC) { 2077 xfs_warn(mp, "Bad dir3 free magic!"); 2078 ASSERT(0); 2079 break; 2080 } 2081 bp->b_ops = &xfs_dir3_free_buf_ops; 2082 break; 2083 case XFS_BLFT_DIR_LEAF1_BUF: 2084 if (magicda != XFS_DIR2_LEAF1_MAGIC && 2085 magicda != XFS_DIR3_LEAF1_MAGIC) { 2086 xfs_warn(mp, "Bad dir leaf1 magic!"); 2087 ASSERT(0); 2088 break; 2089 } 2090 bp->b_ops = &xfs_dir3_leaf1_buf_ops; 2091 break; 2092 case XFS_BLFT_DIR_LEAFN_BUF: 2093 if (magicda != XFS_DIR2_LEAFN_MAGIC && 2094 magicda != XFS_DIR3_LEAFN_MAGIC) { 2095 xfs_warn(mp, "Bad dir leafn magic!"); 2096 ASSERT(0); 2097 break; 2098 } 2099 bp->b_ops = &xfs_dir3_leafn_buf_ops; 2100 break; 2101 case XFS_BLFT_DA_NODE_BUF: 2102 if (magicda != XFS_DA_NODE_MAGIC && 2103 magicda != XFS_DA3_NODE_MAGIC) { 2104 xfs_warn(mp, "Bad da node magic!"); 2105 ASSERT(0); 2106 break; 2107 } 2108 bp->b_ops = &xfs_da3_node_buf_ops; 2109 break; 2110 case XFS_BLFT_ATTR_LEAF_BUF: 2111 if (magicda != XFS_ATTR_LEAF_MAGIC && 2112 magicda != XFS_ATTR3_LEAF_MAGIC) { 2113 xfs_warn(mp, "Bad attr leaf magic!"); 2114 ASSERT(0); 2115 break; 2116 } 2117 bp->b_ops = &xfs_attr3_leaf_buf_ops; 2118 break; 2119 case XFS_BLFT_ATTR_RMT_BUF: 2120 if (!xfs_sb_version_hascrc(&mp->m_sb)) 2121 break; 2122 if (magic32 != XFS_ATTR3_RMT_MAGIC) { 2123 xfs_warn(mp, "Bad attr remote magic!"); 2124 ASSERT(0); 2125 break; 2126 } 2127 bp->b_ops = &xfs_attr3_rmt_buf_ops; 2128 break; 2129 case XFS_BLFT_SB_BUF: 2130 if (magic32 != XFS_SB_MAGIC) { 2131 xfs_warn(mp, "Bad SB block magic!"); 2132 ASSERT(0); 2133 break; 2134 } 2135 bp->b_ops = &xfs_sb_buf_ops; 2136 break; 2137 default: 2138 xfs_warn(mp, "Unknown buffer type %d!", 2139 xfs_blft_from_flags(buf_f)); 2140 break; 2141 } 2142 } 2143 2144 /* 2145 * Perform a 'normal' buffer recovery. Each logged region of the 2146 * buffer should be copied over the corresponding region in the 2147 * given buffer. The bitmap in the buf log format structure indicates 2148 * where to place the logged data. 2149 */ 2150 STATIC void 2151 xlog_recover_do_reg_buffer( 2152 struct xfs_mount *mp, 2153 xlog_recover_item_t *item, 2154 struct xfs_buf *bp, 2155 xfs_buf_log_format_t *buf_f) 2156 { 2157 int i; 2158 int bit; 2159 int nbits; 2160 int error; 2161 2162 trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f); 2163 2164 bit = 0; 2165 i = 1; /* 0 is the buf format structure */ 2166 while (1) { 2167 bit = xfs_next_bit(buf_f->blf_data_map, 2168 buf_f->blf_map_size, bit); 2169 if (bit == -1) 2170 break; 2171 nbits = xfs_contig_bits(buf_f->blf_data_map, 2172 buf_f->blf_map_size, bit); 2173 ASSERT(nbits > 0); 2174 ASSERT(item->ri_buf[i].i_addr != NULL); 2175 ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0); 2176 ASSERT(BBTOB(bp->b_io_length) >= 2177 ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT)); 2178 2179 /* 2180 * The dirty regions logged in the buffer, even though 2181 * contiguous, may span multiple chunks. This is because the 2182 * dirty region may span a physical page boundary in a buffer 2183 * and hence be split into two separate vectors for writing into 2184 * the log. Hence we need to trim nbits back to the length of 2185 * the current region being copied out of the log. 2186 */ 2187 if (item->ri_buf[i].i_len < (nbits << XFS_BLF_SHIFT)) 2188 nbits = item->ri_buf[i].i_len >> XFS_BLF_SHIFT; 2189 2190 /* 2191 * Do a sanity check if this is a dquot buffer. Just checking 2192 * the first dquot in the buffer should do. XXXThis is 2193 * probably a good thing to do for other buf types also. 2194 */ 2195 error = 0; 2196 if (buf_f->blf_flags & 2197 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) { 2198 if (item->ri_buf[i].i_addr == NULL) { 2199 xfs_alert(mp, 2200 "XFS: NULL dquot in %s.", __func__); 2201 goto next; 2202 } 2203 if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) { 2204 xfs_alert(mp, 2205 "XFS: dquot too small (%d) in %s.", 2206 item->ri_buf[i].i_len, __func__); 2207 goto next; 2208 } 2209 error = xfs_qm_dqcheck(mp, item->ri_buf[i].i_addr, 2210 -1, 0, XFS_QMOPT_DOWARN, 2211 "dquot_buf_recover"); 2212 if (error) 2213 goto next; 2214 } 2215 2216 memcpy(xfs_buf_offset(bp, 2217 (uint)bit << XFS_BLF_SHIFT), /* dest */ 2218 item->ri_buf[i].i_addr, /* source */ 2219 nbits<<XFS_BLF_SHIFT); /* length */ 2220 next: 2221 i++; 2222 bit += nbits; 2223 } 2224 2225 /* Shouldn't be any more regions */ 2226 ASSERT(i == item->ri_total); 2227 2228 /* 2229 * We can only do post recovery validation on items on CRC enabled 2230 * fielsystems as we need to know when the buffer was written to be able 2231 * to determine if we should have replayed the item. If we replay old 2232 * metadata over a newer buffer, then it will enter a temporarily 2233 * inconsistent state resulting in verification failures. Hence for now 2234 * just avoid the verification stage for non-crc filesystems 2235 */ 2236 if (xfs_sb_version_hascrc(&mp->m_sb)) 2237 xlog_recovery_validate_buf_type(mp, bp, buf_f); 2238 } 2239 2240 /* 2241 * Do some primitive error checking on ondisk dquot data structures. 2242 */ 2243 int 2244 xfs_qm_dqcheck( 2245 struct xfs_mount *mp, 2246 xfs_disk_dquot_t *ddq, 2247 xfs_dqid_t id, 2248 uint type, /* used only when IO_dorepair is true */ 2249 uint flags, 2250 char *str) 2251 { 2252 xfs_dqblk_t *d = (xfs_dqblk_t *)ddq; 2253 int errs = 0; 2254 2255 /* 2256 * We can encounter an uninitialized dquot buffer for 2 reasons: 2257 * 1. If we crash while deleting the quotainode(s), and those blks got 2258 * used for user data. This is because we take the path of regular 2259 * file deletion; however, the size field of quotainodes is never 2260 * updated, so all the tricks that we play in itruncate_finish 2261 * don't quite matter. 2262 * 2263 * 2. We don't play the quota buffers when there's a quotaoff logitem. 2264 * But the allocation will be replayed so we'll end up with an 2265 * uninitialized quota block. 2266 * 2267 * This is all fine; things are still consistent, and we haven't lost 2268 * any quota information. Just don't complain about bad dquot blks. 2269 */ 2270 if (ddq->d_magic != cpu_to_be16(XFS_DQUOT_MAGIC)) { 2271 if (flags & XFS_QMOPT_DOWARN) 2272 xfs_alert(mp, 2273 "%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x", 2274 str, id, be16_to_cpu(ddq->d_magic), XFS_DQUOT_MAGIC); 2275 errs++; 2276 } 2277 if (ddq->d_version != XFS_DQUOT_VERSION) { 2278 if (flags & XFS_QMOPT_DOWARN) 2279 xfs_alert(mp, 2280 "%s : XFS dquot ID 0x%x, version 0x%x != 0x%x", 2281 str, id, ddq->d_version, XFS_DQUOT_VERSION); 2282 errs++; 2283 } 2284 2285 if (ddq->d_flags != XFS_DQ_USER && 2286 ddq->d_flags != XFS_DQ_PROJ && 2287 ddq->d_flags != XFS_DQ_GROUP) { 2288 if (flags & XFS_QMOPT_DOWARN) 2289 xfs_alert(mp, 2290 "%s : XFS dquot ID 0x%x, unknown flags 0x%x", 2291 str, id, ddq->d_flags); 2292 errs++; 2293 } 2294 2295 if (id != -1 && id != be32_to_cpu(ddq->d_id)) { 2296 if (flags & XFS_QMOPT_DOWARN) 2297 xfs_alert(mp, 2298 "%s : ondisk-dquot 0x%p, ID mismatch: " 2299 "0x%x expected, found id 0x%x", 2300 str, ddq, id, be32_to_cpu(ddq->d_id)); 2301 errs++; 2302 } 2303 2304 if (!errs && ddq->d_id) { 2305 if (ddq->d_blk_softlimit && 2306 be64_to_cpu(ddq->d_bcount) > 2307 be64_to_cpu(ddq->d_blk_softlimit)) { 2308 if (!ddq->d_btimer) { 2309 if (flags & XFS_QMOPT_DOWARN) 2310 xfs_alert(mp, 2311 "%s : Dquot ID 0x%x (0x%p) BLK TIMER NOT STARTED", 2312 str, (int)be32_to_cpu(ddq->d_id), ddq); 2313 errs++; 2314 } 2315 } 2316 if (ddq->d_ino_softlimit && 2317 be64_to_cpu(ddq->d_icount) > 2318 be64_to_cpu(ddq->d_ino_softlimit)) { 2319 if (!ddq->d_itimer) { 2320 if (flags & XFS_QMOPT_DOWARN) 2321 xfs_alert(mp, 2322 "%s : Dquot ID 0x%x (0x%p) INODE TIMER NOT STARTED", 2323 str, (int)be32_to_cpu(ddq->d_id), ddq); 2324 errs++; 2325 } 2326 } 2327 if (ddq->d_rtb_softlimit && 2328 be64_to_cpu(ddq->d_rtbcount) > 2329 be64_to_cpu(ddq->d_rtb_softlimit)) { 2330 if (!ddq->d_rtbtimer) { 2331 if (flags & XFS_QMOPT_DOWARN) 2332 xfs_alert(mp, 2333 "%s : Dquot ID 0x%x (0x%p) RTBLK TIMER NOT STARTED", 2334 str, (int)be32_to_cpu(ddq->d_id), ddq); 2335 errs++; 2336 } 2337 } 2338 } 2339 2340 if (!errs || !(flags & XFS_QMOPT_DQREPAIR)) 2341 return errs; 2342 2343 if (flags & XFS_QMOPT_DOWARN) 2344 xfs_notice(mp, "Re-initializing dquot ID 0x%x", id); 2345 2346 /* 2347 * Typically, a repair is only requested by quotacheck. 2348 */ 2349 ASSERT(id != -1); 2350 ASSERT(flags & XFS_QMOPT_DQREPAIR); 2351 memset(d, 0, sizeof(xfs_dqblk_t)); 2352 2353 d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC); 2354 d->dd_diskdq.d_version = XFS_DQUOT_VERSION; 2355 d->dd_diskdq.d_flags = type; 2356 d->dd_diskdq.d_id = cpu_to_be32(id); 2357 2358 if (xfs_sb_version_hascrc(&mp->m_sb)) { 2359 uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid); 2360 xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk), 2361 XFS_DQUOT_CRC_OFF); 2362 } 2363 2364 return errs; 2365 } 2366 2367 /* 2368 * Perform a dquot buffer recovery. 2369 * Simple algorithm: if we have found a QUOTAOFF logitem of the same type 2370 * (ie. USR or GRP), then just toss this buffer away; don't recover it. 2371 * Else, treat it as a regular buffer and do recovery. 2372 */ 2373 STATIC void 2374 xlog_recover_do_dquot_buffer( 2375 struct xfs_mount *mp, 2376 struct xlog *log, 2377 struct xlog_recover_item *item, 2378 struct xfs_buf *bp, 2379 struct xfs_buf_log_format *buf_f) 2380 { 2381 uint type; 2382 2383 trace_xfs_log_recover_buf_dquot_buf(log, buf_f); 2384 2385 /* 2386 * Filesystems are required to send in quota flags at mount time. 2387 */ 2388 if (mp->m_qflags == 0) { 2389 return; 2390 } 2391 2392 type = 0; 2393 if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF) 2394 type |= XFS_DQ_USER; 2395 if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF) 2396 type |= XFS_DQ_PROJ; 2397 if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF) 2398 type |= XFS_DQ_GROUP; 2399 /* 2400 * This type of quotas was turned off, so ignore this buffer 2401 */ 2402 if (log->l_quotaoffs_flag & type) 2403 return; 2404 2405 xlog_recover_do_reg_buffer(mp, item, bp, buf_f); 2406 } 2407 2408 /* 2409 * This routine replays a modification made to a buffer at runtime. 2410 * There are actually two types of buffer, regular and inode, which 2411 * are handled differently. Inode buffers are handled differently 2412 * in that we only recover a specific set of data from them, namely 2413 * the inode di_next_unlinked fields. This is because all other inode 2414 * data is actually logged via inode records and any data we replay 2415 * here which overlaps that may be stale. 2416 * 2417 * When meta-data buffers are freed at run time we log a buffer item 2418 * with the XFS_BLF_CANCEL bit set to indicate that previous copies 2419 * of the buffer in the log should not be replayed at recovery time. 2420 * This is so that if the blocks covered by the buffer are reused for 2421 * file data before we crash we don't end up replaying old, freed 2422 * meta-data into a user's file. 2423 * 2424 * To handle the cancellation of buffer log items, we make two passes 2425 * over the log during recovery. During the first we build a table of 2426 * those buffers which have been cancelled, and during the second we 2427 * only replay those buffers which do not have corresponding cancel 2428 * records in the table. See xlog_recover_do_buffer_pass[1,2] above 2429 * for more details on the implementation of the table of cancel records. 2430 */ 2431 STATIC int 2432 xlog_recover_buffer_pass2( 2433 struct xlog *log, 2434 struct list_head *buffer_list, 2435 struct xlog_recover_item *item) 2436 { 2437 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr; 2438 xfs_mount_t *mp = log->l_mp; 2439 xfs_buf_t *bp; 2440 int error; 2441 uint buf_flags; 2442 2443 /* 2444 * In this pass we only want to recover all the buffers which have 2445 * not been cancelled and are not cancellation buffers themselves. 2446 */ 2447 if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno, 2448 buf_f->blf_len, buf_f->blf_flags)) { 2449 trace_xfs_log_recover_buf_cancel(log, buf_f); 2450 return 0; 2451 } 2452 2453 trace_xfs_log_recover_buf_recover(log, buf_f); 2454 2455 buf_flags = 0; 2456 if (buf_f->blf_flags & XFS_BLF_INODE_BUF) 2457 buf_flags |= XBF_UNMAPPED; 2458 2459 bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len, 2460 buf_flags, NULL); 2461 if (!bp) 2462 return XFS_ERROR(ENOMEM); 2463 error = bp->b_error; 2464 if (error) { 2465 xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#1)"); 2466 xfs_buf_relse(bp); 2467 return error; 2468 } 2469 2470 if (buf_f->blf_flags & XFS_BLF_INODE_BUF) { 2471 error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f); 2472 } else if (buf_f->blf_flags & 2473 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) { 2474 xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f); 2475 } else { 2476 xlog_recover_do_reg_buffer(mp, item, bp, buf_f); 2477 } 2478 if (error) 2479 return XFS_ERROR(error); 2480 2481 /* 2482 * Perform delayed write on the buffer. Asynchronous writes will be 2483 * slower when taking into account all the buffers to be flushed. 2484 * 2485 * Also make sure that only inode buffers with good sizes stay in 2486 * the buffer cache. The kernel moves inodes in buffers of 1 block 2487 * or XFS_INODE_CLUSTER_SIZE bytes, whichever is bigger. The inode 2488 * buffers in the log can be a different size if the log was generated 2489 * by an older kernel using unclustered inode buffers or a newer kernel 2490 * running with a different inode cluster size. Regardless, if the 2491 * the inode buffer size isn't MAX(blocksize, XFS_INODE_CLUSTER_SIZE) 2492 * for *our* value of XFS_INODE_CLUSTER_SIZE, then we need to keep 2493 * the buffer out of the buffer cache so that the buffer won't 2494 * overlap with future reads of those inodes. 2495 */ 2496 if (XFS_DINODE_MAGIC == 2497 be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) && 2498 (BBTOB(bp->b_io_length) != MAX(log->l_mp->m_sb.sb_blocksize, 2499 (__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) { 2500 xfs_buf_stale(bp); 2501 error = xfs_bwrite(bp); 2502 } else { 2503 ASSERT(bp->b_target->bt_mount == mp); 2504 bp->b_iodone = xlog_recover_iodone; 2505 xfs_buf_delwri_queue(bp, buffer_list); 2506 } 2507 2508 xfs_buf_relse(bp); 2509 return error; 2510 } 2511 2512 STATIC int 2513 xlog_recover_inode_pass2( 2514 struct xlog *log, 2515 struct list_head *buffer_list, 2516 struct xlog_recover_item *item) 2517 { 2518 xfs_inode_log_format_t *in_f; 2519 xfs_mount_t *mp = log->l_mp; 2520 xfs_buf_t *bp; 2521 xfs_dinode_t *dip; 2522 int len; 2523 xfs_caddr_t src; 2524 xfs_caddr_t dest; 2525 int error; 2526 int attr_index; 2527 uint fields; 2528 xfs_icdinode_t *dicp; 2529 uint isize; 2530 int need_free = 0; 2531 2532 if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) { 2533 in_f = item->ri_buf[0].i_addr; 2534 } else { 2535 in_f = kmem_alloc(sizeof(xfs_inode_log_format_t), KM_SLEEP); 2536 need_free = 1; 2537 error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f); 2538 if (error) 2539 goto error; 2540 } 2541 2542 /* 2543 * Inode buffers can be freed, look out for it, 2544 * and do not replay the inode. 2545 */ 2546 if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno, 2547 in_f->ilf_len, 0)) { 2548 error = 0; 2549 trace_xfs_log_recover_inode_cancel(log, in_f); 2550 goto error; 2551 } 2552 trace_xfs_log_recover_inode_recover(log, in_f); 2553 2554 bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, 0, 2555 &xfs_inode_buf_ops); 2556 if (!bp) { 2557 error = ENOMEM; 2558 goto error; 2559 } 2560 error = bp->b_error; 2561 if (error) { 2562 xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#2)"); 2563 xfs_buf_relse(bp); 2564 goto error; 2565 } 2566 ASSERT(in_f->ilf_fields & XFS_ILOG_CORE); 2567 dip = (xfs_dinode_t *)xfs_buf_offset(bp, in_f->ilf_boffset); 2568 2569 /* 2570 * Make sure the place we're flushing out to really looks 2571 * like an inode! 2572 */ 2573 if (unlikely(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))) { 2574 xfs_buf_relse(bp); 2575 xfs_alert(mp, 2576 "%s: Bad inode magic number, dip = 0x%p, dino bp = 0x%p, ino = %Ld", 2577 __func__, dip, bp, in_f->ilf_ino); 2578 XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)", 2579 XFS_ERRLEVEL_LOW, mp); 2580 error = EFSCORRUPTED; 2581 goto error; 2582 } 2583 dicp = item->ri_buf[1].i_addr; 2584 if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) { 2585 xfs_buf_relse(bp); 2586 xfs_alert(mp, 2587 "%s: Bad inode log record, rec ptr 0x%p, ino %Ld", 2588 __func__, item, in_f->ilf_ino); 2589 XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)", 2590 XFS_ERRLEVEL_LOW, mp); 2591 error = EFSCORRUPTED; 2592 goto error; 2593 } 2594 2595 /* Skip replay when the on disk inode is newer than the log one */ 2596 if (dicp->di_flushiter < be16_to_cpu(dip->di_flushiter)) { 2597 /* 2598 * Deal with the wrap case, DI_MAX_FLUSH is less 2599 * than smaller numbers 2600 */ 2601 if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH && 2602 dicp->di_flushiter < (DI_MAX_FLUSH >> 1)) { 2603 /* do nothing */ 2604 } else { 2605 xfs_buf_relse(bp); 2606 trace_xfs_log_recover_inode_skip(log, in_f); 2607 error = 0; 2608 goto error; 2609 } 2610 } 2611 /* Take the opportunity to reset the flush iteration count */ 2612 dicp->di_flushiter = 0; 2613 2614 if (unlikely(S_ISREG(dicp->di_mode))) { 2615 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) && 2616 (dicp->di_format != XFS_DINODE_FMT_BTREE)) { 2617 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)", 2618 XFS_ERRLEVEL_LOW, mp, dicp); 2619 xfs_buf_relse(bp); 2620 xfs_alert(mp, 2621 "%s: Bad regular inode log record, rec ptr 0x%p, " 2622 "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld", 2623 __func__, item, dip, bp, in_f->ilf_ino); 2624 error = EFSCORRUPTED; 2625 goto error; 2626 } 2627 } else if (unlikely(S_ISDIR(dicp->di_mode))) { 2628 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) && 2629 (dicp->di_format != XFS_DINODE_FMT_BTREE) && 2630 (dicp->di_format != XFS_DINODE_FMT_LOCAL)) { 2631 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)", 2632 XFS_ERRLEVEL_LOW, mp, dicp); 2633 xfs_buf_relse(bp); 2634 xfs_alert(mp, 2635 "%s: Bad dir inode log record, rec ptr 0x%p, " 2636 "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld", 2637 __func__, item, dip, bp, in_f->ilf_ino); 2638 error = EFSCORRUPTED; 2639 goto error; 2640 } 2641 } 2642 if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){ 2643 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)", 2644 XFS_ERRLEVEL_LOW, mp, dicp); 2645 xfs_buf_relse(bp); 2646 xfs_alert(mp, 2647 "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, " 2648 "dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld", 2649 __func__, item, dip, bp, in_f->ilf_ino, 2650 dicp->di_nextents + dicp->di_anextents, 2651 dicp->di_nblocks); 2652 error = EFSCORRUPTED; 2653 goto error; 2654 } 2655 if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) { 2656 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)", 2657 XFS_ERRLEVEL_LOW, mp, dicp); 2658 xfs_buf_relse(bp); 2659 xfs_alert(mp, 2660 "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, " 2661 "dino bp 0x%p, ino %Ld, forkoff 0x%x", __func__, 2662 item, dip, bp, in_f->ilf_ino, dicp->di_forkoff); 2663 error = EFSCORRUPTED; 2664 goto error; 2665 } 2666 isize = xfs_icdinode_size(dicp->di_version); 2667 if (unlikely(item->ri_buf[1].i_len > isize)) { 2668 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)", 2669 XFS_ERRLEVEL_LOW, mp, dicp); 2670 xfs_buf_relse(bp); 2671 xfs_alert(mp, 2672 "%s: Bad inode log record length %d, rec ptr 0x%p", 2673 __func__, item->ri_buf[1].i_len, item); 2674 error = EFSCORRUPTED; 2675 goto error; 2676 } 2677 2678 /* The core is in in-core format */ 2679 xfs_dinode_to_disk(dip, dicp); 2680 2681 /* the rest is in on-disk format */ 2682 if (item->ri_buf[1].i_len > isize) { 2683 memcpy((char *)dip + isize, 2684 item->ri_buf[1].i_addr + isize, 2685 item->ri_buf[1].i_len - isize); 2686 } 2687 2688 fields = in_f->ilf_fields; 2689 switch (fields & (XFS_ILOG_DEV | XFS_ILOG_UUID)) { 2690 case XFS_ILOG_DEV: 2691 xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev); 2692 break; 2693 case XFS_ILOG_UUID: 2694 memcpy(XFS_DFORK_DPTR(dip), 2695 &in_f->ilf_u.ilfu_uuid, 2696 sizeof(uuid_t)); 2697 break; 2698 } 2699 2700 if (in_f->ilf_size == 2) 2701 goto write_inode_buffer; 2702 len = item->ri_buf[2].i_len; 2703 src = item->ri_buf[2].i_addr; 2704 ASSERT(in_f->ilf_size <= 4); 2705 ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK)); 2706 ASSERT(!(fields & XFS_ILOG_DFORK) || 2707 (len == in_f->ilf_dsize)); 2708 2709 switch (fields & XFS_ILOG_DFORK) { 2710 case XFS_ILOG_DDATA: 2711 case XFS_ILOG_DEXT: 2712 memcpy(XFS_DFORK_DPTR(dip), src, len); 2713 break; 2714 2715 case XFS_ILOG_DBROOT: 2716 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len, 2717 (xfs_bmdr_block_t *)XFS_DFORK_DPTR(dip), 2718 XFS_DFORK_DSIZE(dip, mp)); 2719 break; 2720 2721 default: 2722 /* 2723 * There are no data fork flags set. 2724 */ 2725 ASSERT((fields & XFS_ILOG_DFORK) == 0); 2726 break; 2727 } 2728 2729 /* 2730 * If we logged any attribute data, recover it. There may or 2731 * may not have been any other non-core data logged in this 2732 * transaction. 2733 */ 2734 if (in_f->ilf_fields & XFS_ILOG_AFORK) { 2735 if (in_f->ilf_fields & XFS_ILOG_DFORK) { 2736 attr_index = 3; 2737 } else { 2738 attr_index = 2; 2739 } 2740 len = item->ri_buf[attr_index].i_len; 2741 src = item->ri_buf[attr_index].i_addr; 2742 ASSERT(len == in_f->ilf_asize); 2743 2744 switch (in_f->ilf_fields & XFS_ILOG_AFORK) { 2745 case XFS_ILOG_ADATA: 2746 case XFS_ILOG_AEXT: 2747 dest = XFS_DFORK_APTR(dip); 2748 ASSERT(len <= XFS_DFORK_ASIZE(dip, mp)); 2749 memcpy(dest, src, len); 2750 break; 2751 2752 case XFS_ILOG_ABROOT: 2753 dest = XFS_DFORK_APTR(dip); 2754 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, 2755 len, (xfs_bmdr_block_t*)dest, 2756 XFS_DFORK_ASIZE(dip, mp)); 2757 break; 2758 2759 default: 2760 xfs_warn(log->l_mp, "%s: Invalid flag", __func__); 2761 ASSERT(0); 2762 xfs_buf_relse(bp); 2763 error = EIO; 2764 goto error; 2765 } 2766 } 2767 2768 write_inode_buffer: 2769 /* re-generate the checksum. */ 2770 xfs_dinode_calc_crc(log->l_mp, dip); 2771 2772 ASSERT(bp->b_target->bt_mount == mp); 2773 bp->b_iodone = xlog_recover_iodone; 2774 xfs_buf_delwri_queue(bp, buffer_list); 2775 xfs_buf_relse(bp); 2776 error: 2777 if (need_free) 2778 kmem_free(in_f); 2779 return XFS_ERROR(error); 2780 } 2781 2782 /* 2783 * Recover QUOTAOFF records. We simply make a note of it in the xlog 2784 * structure, so that we know not to do any dquot item or dquot buffer recovery, 2785 * of that type. 2786 */ 2787 STATIC int 2788 xlog_recover_quotaoff_pass1( 2789 struct xlog *log, 2790 struct xlog_recover_item *item) 2791 { 2792 xfs_qoff_logformat_t *qoff_f = item->ri_buf[0].i_addr; 2793 ASSERT(qoff_f); 2794 2795 /* 2796 * The logitem format's flag tells us if this was user quotaoff, 2797 * group/project quotaoff or both. 2798 */ 2799 if (qoff_f->qf_flags & XFS_UQUOTA_ACCT) 2800 log->l_quotaoffs_flag |= XFS_DQ_USER; 2801 if (qoff_f->qf_flags & XFS_PQUOTA_ACCT) 2802 log->l_quotaoffs_flag |= XFS_DQ_PROJ; 2803 if (qoff_f->qf_flags & XFS_GQUOTA_ACCT) 2804 log->l_quotaoffs_flag |= XFS_DQ_GROUP; 2805 2806 return (0); 2807 } 2808 2809 /* 2810 * Recover a dquot record 2811 */ 2812 STATIC int 2813 xlog_recover_dquot_pass2( 2814 struct xlog *log, 2815 struct list_head *buffer_list, 2816 struct xlog_recover_item *item) 2817 { 2818 xfs_mount_t *mp = log->l_mp; 2819 xfs_buf_t *bp; 2820 struct xfs_disk_dquot *ddq, *recddq; 2821 int error; 2822 xfs_dq_logformat_t *dq_f; 2823 uint type; 2824 2825 2826 /* 2827 * Filesystems are required to send in quota flags at mount time. 2828 */ 2829 if (mp->m_qflags == 0) 2830 return (0); 2831 2832 recddq = item->ri_buf[1].i_addr; 2833 if (recddq == NULL) { 2834 xfs_alert(log->l_mp, "NULL dquot in %s.", __func__); 2835 return XFS_ERROR(EIO); 2836 } 2837 if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) { 2838 xfs_alert(log->l_mp, "dquot too small (%d) in %s.", 2839 item->ri_buf[1].i_len, __func__); 2840 return XFS_ERROR(EIO); 2841 } 2842 2843 /* 2844 * This type of quotas was turned off, so ignore this record. 2845 */ 2846 type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP); 2847 ASSERT(type); 2848 if (log->l_quotaoffs_flag & type) 2849 return (0); 2850 2851 /* 2852 * At this point we know that quota was _not_ turned off. 2853 * Since the mount flags are not indicating to us otherwise, this 2854 * must mean that quota is on, and the dquot needs to be replayed. 2855 * Remember that we may not have fully recovered the superblock yet, 2856 * so we can't do the usual trick of looking at the SB quota bits. 2857 * 2858 * The other possibility, of course, is that the quota subsystem was 2859 * removed since the last mount - ENOSYS. 2860 */ 2861 dq_f = item->ri_buf[0].i_addr; 2862 ASSERT(dq_f); 2863 error = xfs_qm_dqcheck(mp, recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN, 2864 "xlog_recover_dquot_pass2 (log copy)"); 2865 if (error) 2866 return XFS_ERROR(EIO); 2867 ASSERT(dq_f->qlf_len == 1); 2868 2869 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dq_f->qlf_blkno, 2870 XFS_FSB_TO_BB(mp, dq_f->qlf_len), 0, &bp, 2871 NULL); 2872 if (error) 2873 return error; 2874 2875 ASSERT(bp); 2876 ddq = (xfs_disk_dquot_t *)xfs_buf_offset(bp, dq_f->qlf_boffset); 2877 2878 /* 2879 * At least the magic num portion should be on disk because this 2880 * was among a chunk of dquots created earlier, and we did some 2881 * minimal initialization then. 2882 */ 2883 error = xfs_qm_dqcheck(mp, ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN, 2884 "xlog_recover_dquot_pass2"); 2885 if (error) { 2886 xfs_buf_relse(bp); 2887 return XFS_ERROR(EIO); 2888 } 2889 2890 memcpy(ddq, recddq, item->ri_buf[1].i_len); 2891 if (xfs_sb_version_hascrc(&mp->m_sb)) { 2892 xfs_update_cksum((char *)ddq, sizeof(struct xfs_dqblk), 2893 XFS_DQUOT_CRC_OFF); 2894 } 2895 2896 ASSERT(dq_f->qlf_size == 2); 2897 ASSERT(bp->b_target->bt_mount == mp); 2898 bp->b_iodone = xlog_recover_iodone; 2899 xfs_buf_delwri_queue(bp, buffer_list); 2900 xfs_buf_relse(bp); 2901 2902 return (0); 2903 } 2904 2905 /* 2906 * This routine is called to create an in-core extent free intent 2907 * item from the efi format structure which was logged on disk. 2908 * It allocates an in-core efi, copies the extents from the format 2909 * structure into it, and adds the efi to the AIL with the given 2910 * LSN. 2911 */ 2912 STATIC int 2913 xlog_recover_efi_pass2( 2914 struct xlog *log, 2915 struct xlog_recover_item *item, 2916 xfs_lsn_t lsn) 2917 { 2918 int error; 2919 xfs_mount_t *mp = log->l_mp; 2920 xfs_efi_log_item_t *efip; 2921 xfs_efi_log_format_t *efi_formatp; 2922 2923 efi_formatp = item->ri_buf[0].i_addr; 2924 2925 efip = xfs_efi_init(mp, efi_formatp->efi_nextents); 2926 if ((error = xfs_efi_copy_format(&(item->ri_buf[0]), 2927 &(efip->efi_format)))) { 2928 xfs_efi_item_free(efip); 2929 return error; 2930 } 2931 atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents); 2932 2933 spin_lock(&log->l_ailp->xa_lock); 2934 /* 2935 * xfs_trans_ail_update() drops the AIL lock. 2936 */ 2937 xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn); 2938 return 0; 2939 } 2940 2941 2942 /* 2943 * This routine is called when an efd format structure is found in 2944 * a committed transaction in the log. It's purpose is to cancel 2945 * the corresponding efi if it was still in the log. To do this 2946 * it searches the AIL for the efi with an id equal to that in the 2947 * efd format structure. If we find it, we remove the efi from the 2948 * AIL and free it. 2949 */ 2950 STATIC int 2951 xlog_recover_efd_pass2( 2952 struct xlog *log, 2953 struct xlog_recover_item *item) 2954 { 2955 xfs_efd_log_format_t *efd_formatp; 2956 xfs_efi_log_item_t *efip = NULL; 2957 xfs_log_item_t *lip; 2958 __uint64_t efi_id; 2959 struct xfs_ail_cursor cur; 2960 struct xfs_ail *ailp = log->l_ailp; 2961 2962 efd_formatp = item->ri_buf[0].i_addr; 2963 ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) + 2964 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) || 2965 (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) + 2966 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t))))); 2967 efi_id = efd_formatp->efd_efi_id; 2968 2969 /* 2970 * Search for the efi with the id in the efd format structure 2971 * in the AIL. 2972 */ 2973 spin_lock(&ailp->xa_lock); 2974 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); 2975 while (lip != NULL) { 2976 if (lip->li_type == XFS_LI_EFI) { 2977 efip = (xfs_efi_log_item_t *)lip; 2978 if (efip->efi_format.efi_id == efi_id) { 2979 /* 2980 * xfs_trans_ail_delete() drops the 2981 * AIL lock. 2982 */ 2983 xfs_trans_ail_delete(ailp, lip, 2984 SHUTDOWN_CORRUPT_INCORE); 2985 xfs_efi_item_free(efip); 2986 spin_lock(&ailp->xa_lock); 2987 break; 2988 } 2989 } 2990 lip = xfs_trans_ail_cursor_next(ailp, &cur); 2991 } 2992 xfs_trans_ail_cursor_done(ailp, &cur); 2993 spin_unlock(&ailp->xa_lock); 2994 2995 return 0; 2996 } 2997 2998 /* 2999 * This routine is called when an inode create format structure is found in a 3000 * committed transaction in the log. It's purpose is to initialise the inodes 3001 * being allocated on disk. This requires us to get inode cluster buffers that 3002 * match the range to be intialised, stamped with inode templates and written 3003 * by delayed write so that subsequent modifications will hit the cached buffer 3004 * and only need writing out at the end of recovery. 3005 */ 3006 STATIC int 3007 xlog_recover_do_icreate_pass2( 3008 struct xlog *log, 3009 struct list_head *buffer_list, 3010 xlog_recover_item_t *item) 3011 { 3012 struct xfs_mount *mp = log->l_mp; 3013 struct xfs_icreate_log *icl; 3014 xfs_agnumber_t agno; 3015 xfs_agblock_t agbno; 3016 unsigned int count; 3017 unsigned int isize; 3018 xfs_agblock_t length; 3019 3020 icl = (struct xfs_icreate_log *)item->ri_buf[0].i_addr; 3021 if (icl->icl_type != XFS_LI_ICREATE) { 3022 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad type"); 3023 return EINVAL; 3024 } 3025 3026 if (icl->icl_size != 1) { 3027 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad icl size"); 3028 return EINVAL; 3029 } 3030 3031 agno = be32_to_cpu(icl->icl_ag); 3032 if (agno >= mp->m_sb.sb_agcount) { 3033 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agno"); 3034 return EINVAL; 3035 } 3036 agbno = be32_to_cpu(icl->icl_agbno); 3037 if (!agbno || agbno == NULLAGBLOCK || agbno >= mp->m_sb.sb_agblocks) { 3038 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agbno"); 3039 return EINVAL; 3040 } 3041 isize = be32_to_cpu(icl->icl_isize); 3042 if (isize != mp->m_sb.sb_inodesize) { 3043 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad isize"); 3044 return EINVAL; 3045 } 3046 count = be32_to_cpu(icl->icl_count); 3047 if (!count) { 3048 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count"); 3049 return EINVAL; 3050 } 3051 length = be32_to_cpu(icl->icl_length); 3052 if (!length || length >= mp->m_sb.sb_agblocks) { 3053 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad length"); 3054 return EINVAL; 3055 } 3056 3057 /* existing allocation is fixed value */ 3058 ASSERT(count == XFS_IALLOC_INODES(mp)); 3059 ASSERT(length == XFS_IALLOC_BLOCKS(mp)); 3060 if (count != XFS_IALLOC_INODES(mp) || 3061 length != XFS_IALLOC_BLOCKS(mp)) { 3062 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count 2"); 3063 return EINVAL; 3064 } 3065 3066 /* 3067 * Inode buffers can be freed. Do not replay the inode initialisation as 3068 * we could be overwriting something written after this inode buffer was 3069 * cancelled. 3070 * 3071 * XXX: we need to iterate all buffers and only init those that are not 3072 * cancelled. I think that a more fine grained factoring of 3073 * xfs_ialloc_inode_init may be appropriate here to enable this to be 3074 * done easily. 3075 */ 3076 if (xlog_check_buffer_cancelled(log, 3077 XFS_AGB_TO_DADDR(mp, agno, agbno), length, 0)) 3078 return 0; 3079 3080 xfs_ialloc_inode_init(mp, NULL, buffer_list, agno, agbno, length, 3081 be32_to_cpu(icl->icl_gen)); 3082 return 0; 3083 } 3084 3085 /* 3086 * Free up any resources allocated by the transaction 3087 * 3088 * Remember that EFIs, EFDs, and IUNLINKs are handled later. 3089 */ 3090 STATIC void 3091 xlog_recover_free_trans( 3092 struct xlog_recover *trans) 3093 { 3094 xlog_recover_item_t *item, *n; 3095 int i; 3096 3097 list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) { 3098 /* Free the regions in the item. */ 3099 list_del(&item->ri_list); 3100 for (i = 0; i < item->ri_cnt; i++) 3101 kmem_free(item->ri_buf[i].i_addr); 3102 /* Free the item itself */ 3103 kmem_free(item->ri_buf); 3104 kmem_free(item); 3105 } 3106 /* Free the transaction recover structure */ 3107 kmem_free(trans); 3108 } 3109 3110 STATIC int 3111 xlog_recover_commit_pass1( 3112 struct xlog *log, 3113 struct xlog_recover *trans, 3114 struct xlog_recover_item *item) 3115 { 3116 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1); 3117 3118 switch (ITEM_TYPE(item)) { 3119 case XFS_LI_BUF: 3120 return xlog_recover_buffer_pass1(log, item); 3121 case XFS_LI_QUOTAOFF: 3122 return xlog_recover_quotaoff_pass1(log, item); 3123 case XFS_LI_INODE: 3124 case XFS_LI_EFI: 3125 case XFS_LI_EFD: 3126 case XFS_LI_DQUOT: 3127 case XFS_LI_ICREATE: 3128 /* nothing to do in pass 1 */ 3129 return 0; 3130 default: 3131 xfs_warn(log->l_mp, "%s: invalid item type (%d)", 3132 __func__, ITEM_TYPE(item)); 3133 ASSERT(0); 3134 return XFS_ERROR(EIO); 3135 } 3136 } 3137 3138 STATIC int 3139 xlog_recover_commit_pass2( 3140 struct xlog *log, 3141 struct xlog_recover *trans, 3142 struct list_head *buffer_list, 3143 struct xlog_recover_item *item) 3144 { 3145 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2); 3146 3147 switch (ITEM_TYPE(item)) { 3148 case XFS_LI_BUF: 3149 return xlog_recover_buffer_pass2(log, buffer_list, item); 3150 case XFS_LI_INODE: 3151 return xlog_recover_inode_pass2(log, buffer_list, item); 3152 case XFS_LI_EFI: 3153 return xlog_recover_efi_pass2(log, item, trans->r_lsn); 3154 case XFS_LI_EFD: 3155 return xlog_recover_efd_pass2(log, item); 3156 case XFS_LI_DQUOT: 3157 return xlog_recover_dquot_pass2(log, buffer_list, item); 3158 case XFS_LI_ICREATE: 3159 return xlog_recover_do_icreate_pass2(log, buffer_list, item); 3160 case XFS_LI_QUOTAOFF: 3161 /* nothing to do in pass2 */ 3162 return 0; 3163 default: 3164 xfs_warn(log->l_mp, "%s: invalid item type (%d)", 3165 __func__, ITEM_TYPE(item)); 3166 ASSERT(0); 3167 return XFS_ERROR(EIO); 3168 } 3169 } 3170 3171 /* 3172 * Perform the transaction. 3173 * 3174 * If the transaction modifies a buffer or inode, do it now. Otherwise, 3175 * EFIs and EFDs get queued up by adding entries into the AIL for them. 3176 */ 3177 STATIC int 3178 xlog_recover_commit_trans( 3179 struct xlog *log, 3180 struct xlog_recover *trans, 3181 int pass) 3182 { 3183 int error = 0, error2; 3184 xlog_recover_item_t *item; 3185 LIST_HEAD (buffer_list); 3186 3187 hlist_del(&trans->r_list); 3188 3189 error = xlog_recover_reorder_trans(log, trans, pass); 3190 if (error) 3191 return error; 3192 3193 list_for_each_entry(item, &trans->r_itemq, ri_list) { 3194 switch (pass) { 3195 case XLOG_RECOVER_PASS1: 3196 error = xlog_recover_commit_pass1(log, trans, item); 3197 break; 3198 case XLOG_RECOVER_PASS2: 3199 error = xlog_recover_commit_pass2(log, trans, 3200 &buffer_list, item); 3201 break; 3202 default: 3203 ASSERT(0); 3204 } 3205 3206 if (error) 3207 goto out; 3208 } 3209 3210 xlog_recover_free_trans(trans); 3211 3212 out: 3213 error2 = xfs_buf_delwri_submit(&buffer_list); 3214 return error ? error : error2; 3215 } 3216 3217 STATIC int 3218 xlog_recover_unmount_trans( 3219 struct xlog *log, 3220 struct xlog_recover *trans) 3221 { 3222 /* Do nothing now */ 3223 xfs_warn(log->l_mp, "%s: Unmount LR", __func__); 3224 return 0; 3225 } 3226 3227 /* 3228 * There are two valid states of the r_state field. 0 indicates that the 3229 * transaction structure is in a normal state. We have either seen the 3230 * start of the transaction or the last operation we added was not a partial 3231 * operation. If the last operation we added to the transaction was a 3232 * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS. 3233 * 3234 * NOTE: skip LRs with 0 data length. 3235 */ 3236 STATIC int 3237 xlog_recover_process_data( 3238 struct xlog *log, 3239 struct hlist_head rhash[], 3240 struct xlog_rec_header *rhead, 3241 xfs_caddr_t dp, 3242 int pass) 3243 { 3244 xfs_caddr_t lp; 3245 int num_logops; 3246 xlog_op_header_t *ohead; 3247 xlog_recover_t *trans; 3248 xlog_tid_t tid; 3249 int error; 3250 unsigned long hash; 3251 uint flags; 3252 3253 lp = dp + be32_to_cpu(rhead->h_len); 3254 num_logops = be32_to_cpu(rhead->h_num_logops); 3255 3256 /* check the log format matches our own - else we can't recover */ 3257 if (xlog_header_check_recover(log->l_mp, rhead)) 3258 return (XFS_ERROR(EIO)); 3259 3260 while ((dp < lp) && num_logops) { 3261 ASSERT(dp + sizeof(xlog_op_header_t) <= lp); 3262 ohead = (xlog_op_header_t *)dp; 3263 dp += sizeof(xlog_op_header_t); 3264 if (ohead->oh_clientid != XFS_TRANSACTION && 3265 ohead->oh_clientid != XFS_LOG) { 3266 xfs_warn(log->l_mp, "%s: bad clientid 0x%x", 3267 __func__, ohead->oh_clientid); 3268 ASSERT(0); 3269 return (XFS_ERROR(EIO)); 3270 } 3271 tid = be32_to_cpu(ohead->oh_tid); 3272 hash = XLOG_RHASH(tid); 3273 trans = xlog_recover_find_tid(&rhash[hash], tid); 3274 if (trans == NULL) { /* not found; add new tid */ 3275 if (ohead->oh_flags & XLOG_START_TRANS) 3276 xlog_recover_new_tid(&rhash[hash], tid, 3277 be64_to_cpu(rhead->h_lsn)); 3278 } else { 3279 if (dp + be32_to_cpu(ohead->oh_len) > lp) { 3280 xfs_warn(log->l_mp, "%s: bad length 0x%x", 3281 __func__, be32_to_cpu(ohead->oh_len)); 3282 WARN_ON(1); 3283 return (XFS_ERROR(EIO)); 3284 } 3285 flags = ohead->oh_flags & ~XLOG_END_TRANS; 3286 if (flags & XLOG_WAS_CONT_TRANS) 3287 flags &= ~XLOG_CONTINUE_TRANS; 3288 switch (flags) { 3289 case XLOG_COMMIT_TRANS: 3290 error = xlog_recover_commit_trans(log, 3291 trans, pass); 3292 break; 3293 case XLOG_UNMOUNT_TRANS: 3294 error = xlog_recover_unmount_trans(log, trans); 3295 break; 3296 case XLOG_WAS_CONT_TRANS: 3297 error = xlog_recover_add_to_cont_trans(log, 3298 trans, dp, 3299 be32_to_cpu(ohead->oh_len)); 3300 break; 3301 case XLOG_START_TRANS: 3302 xfs_warn(log->l_mp, "%s: bad transaction", 3303 __func__); 3304 ASSERT(0); 3305 error = XFS_ERROR(EIO); 3306 break; 3307 case 0: 3308 case XLOG_CONTINUE_TRANS: 3309 error = xlog_recover_add_to_trans(log, trans, 3310 dp, be32_to_cpu(ohead->oh_len)); 3311 break; 3312 default: 3313 xfs_warn(log->l_mp, "%s: bad flag 0x%x", 3314 __func__, flags); 3315 ASSERT(0); 3316 error = XFS_ERROR(EIO); 3317 break; 3318 } 3319 if (error) 3320 return error; 3321 } 3322 dp += be32_to_cpu(ohead->oh_len); 3323 num_logops--; 3324 } 3325 return 0; 3326 } 3327 3328 /* 3329 * Process an extent free intent item that was recovered from 3330 * the log. We need to free the extents that it describes. 3331 */ 3332 STATIC int 3333 xlog_recover_process_efi( 3334 xfs_mount_t *mp, 3335 xfs_efi_log_item_t *efip) 3336 { 3337 xfs_efd_log_item_t *efdp; 3338 xfs_trans_t *tp; 3339 int i; 3340 int error = 0; 3341 xfs_extent_t *extp; 3342 xfs_fsblock_t startblock_fsb; 3343 3344 ASSERT(!test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)); 3345 3346 /* 3347 * First check the validity of the extents described by the 3348 * EFI. If any are bad, then assume that all are bad and 3349 * just toss the EFI. 3350 */ 3351 for (i = 0; i < efip->efi_format.efi_nextents; i++) { 3352 extp = &(efip->efi_format.efi_extents[i]); 3353 startblock_fsb = XFS_BB_TO_FSB(mp, 3354 XFS_FSB_TO_DADDR(mp, extp->ext_start)); 3355 if ((startblock_fsb == 0) || 3356 (extp->ext_len == 0) || 3357 (startblock_fsb >= mp->m_sb.sb_dblocks) || 3358 (extp->ext_len >= mp->m_sb.sb_agblocks)) { 3359 /* 3360 * This will pull the EFI from the AIL and 3361 * free the memory associated with it. 3362 */ 3363 set_bit(XFS_EFI_RECOVERED, &efip->efi_flags); 3364 xfs_efi_release(efip, efip->efi_format.efi_nextents); 3365 return XFS_ERROR(EIO); 3366 } 3367 } 3368 3369 tp = xfs_trans_alloc(mp, 0); 3370 error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, 0, 0); 3371 if (error) 3372 goto abort_error; 3373 efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents); 3374 3375 for (i = 0; i < efip->efi_format.efi_nextents; i++) { 3376 extp = &(efip->efi_format.efi_extents[i]); 3377 error = xfs_free_extent(tp, extp->ext_start, extp->ext_len); 3378 if (error) 3379 goto abort_error; 3380 xfs_trans_log_efd_extent(tp, efdp, extp->ext_start, 3381 extp->ext_len); 3382 } 3383 3384 set_bit(XFS_EFI_RECOVERED, &efip->efi_flags); 3385 error = xfs_trans_commit(tp, 0); 3386 return error; 3387 3388 abort_error: 3389 xfs_trans_cancel(tp, XFS_TRANS_ABORT); 3390 return error; 3391 } 3392 3393 /* 3394 * When this is called, all of the EFIs which did not have 3395 * corresponding EFDs should be in the AIL. What we do now 3396 * is free the extents associated with each one. 3397 * 3398 * Since we process the EFIs in normal transactions, they 3399 * will be removed at some point after the commit. This prevents 3400 * us from just walking down the list processing each one. 3401 * We'll use a flag in the EFI to skip those that we've already 3402 * processed and use the AIL iteration mechanism's generation 3403 * count to try to speed this up at least a bit. 3404 * 3405 * When we start, we know that the EFIs are the only things in 3406 * the AIL. As we process them, however, other items are added 3407 * to the AIL. Since everything added to the AIL must come after 3408 * everything already in the AIL, we stop processing as soon as 3409 * we see something other than an EFI in the AIL. 3410 */ 3411 STATIC int 3412 xlog_recover_process_efis( 3413 struct xlog *log) 3414 { 3415 xfs_log_item_t *lip; 3416 xfs_efi_log_item_t *efip; 3417 int error = 0; 3418 struct xfs_ail_cursor cur; 3419 struct xfs_ail *ailp; 3420 3421 ailp = log->l_ailp; 3422 spin_lock(&ailp->xa_lock); 3423 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); 3424 while (lip != NULL) { 3425 /* 3426 * We're done when we see something other than an EFI. 3427 * There should be no EFIs left in the AIL now. 3428 */ 3429 if (lip->li_type != XFS_LI_EFI) { 3430 #ifdef DEBUG 3431 for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur)) 3432 ASSERT(lip->li_type != XFS_LI_EFI); 3433 #endif 3434 break; 3435 } 3436 3437 /* 3438 * Skip EFIs that we've already processed. 3439 */ 3440 efip = (xfs_efi_log_item_t *)lip; 3441 if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)) { 3442 lip = xfs_trans_ail_cursor_next(ailp, &cur); 3443 continue; 3444 } 3445 3446 spin_unlock(&ailp->xa_lock); 3447 error = xlog_recover_process_efi(log->l_mp, efip); 3448 spin_lock(&ailp->xa_lock); 3449 if (error) 3450 goto out; 3451 lip = xfs_trans_ail_cursor_next(ailp, &cur); 3452 } 3453 out: 3454 xfs_trans_ail_cursor_done(ailp, &cur); 3455 spin_unlock(&ailp->xa_lock); 3456 return error; 3457 } 3458 3459 /* 3460 * This routine performs a transaction to null out a bad inode pointer 3461 * in an agi unlinked inode hash bucket. 3462 */ 3463 STATIC void 3464 xlog_recover_clear_agi_bucket( 3465 xfs_mount_t *mp, 3466 xfs_agnumber_t agno, 3467 int bucket) 3468 { 3469 xfs_trans_t *tp; 3470 xfs_agi_t *agi; 3471 xfs_buf_t *agibp; 3472 int offset; 3473 int error; 3474 3475 tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET); 3476 error = xfs_trans_reserve(tp, 0, XFS_CLEAR_AGI_BUCKET_LOG_RES(mp), 3477 0, 0, 0); 3478 if (error) 3479 goto out_abort; 3480 3481 error = xfs_read_agi(mp, tp, agno, &agibp); 3482 if (error) 3483 goto out_abort; 3484 3485 agi = XFS_BUF_TO_AGI(agibp); 3486 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO); 3487 offset = offsetof(xfs_agi_t, agi_unlinked) + 3488 (sizeof(xfs_agino_t) * bucket); 3489 xfs_trans_log_buf(tp, agibp, offset, 3490 (offset + sizeof(xfs_agino_t) - 1)); 3491 3492 error = xfs_trans_commit(tp, 0); 3493 if (error) 3494 goto out_error; 3495 return; 3496 3497 out_abort: 3498 xfs_trans_cancel(tp, XFS_TRANS_ABORT); 3499 out_error: 3500 xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno); 3501 return; 3502 } 3503 3504 STATIC xfs_agino_t 3505 xlog_recover_process_one_iunlink( 3506 struct xfs_mount *mp, 3507 xfs_agnumber_t agno, 3508 xfs_agino_t agino, 3509 int bucket) 3510 { 3511 struct xfs_buf *ibp; 3512 struct xfs_dinode *dip; 3513 struct xfs_inode *ip; 3514 xfs_ino_t ino; 3515 int error; 3516 3517 ino = XFS_AGINO_TO_INO(mp, agno, agino); 3518 error = xfs_iget(mp, NULL, ino, 0, 0, &ip); 3519 if (error) 3520 goto fail; 3521 3522 /* 3523 * Get the on disk inode to find the next inode in the bucket. 3524 */ 3525 error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0, 0); 3526 if (error) 3527 goto fail_iput; 3528 3529 ASSERT(ip->i_d.di_nlink == 0); 3530 ASSERT(ip->i_d.di_mode != 0); 3531 3532 /* setup for the next pass */ 3533 agino = be32_to_cpu(dip->di_next_unlinked); 3534 xfs_buf_relse(ibp); 3535 3536 /* 3537 * Prevent any DMAPI event from being sent when the reference on 3538 * the inode is dropped. 3539 */ 3540 ip->i_d.di_dmevmask = 0; 3541 3542 IRELE(ip); 3543 return agino; 3544 3545 fail_iput: 3546 IRELE(ip); 3547 fail: 3548 /* 3549 * We can't read in the inode this bucket points to, or this inode 3550 * is messed up. Just ditch this bucket of inodes. We will lose 3551 * some inodes and space, but at least we won't hang. 3552 * 3553 * Call xlog_recover_clear_agi_bucket() to perform a transaction to 3554 * clear the inode pointer in the bucket. 3555 */ 3556 xlog_recover_clear_agi_bucket(mp, agno, bucket); 3557 return NULLAGINO; 3558 } 3559 3560 /* 3561 * xlog_iunlink_recover 3562 * 3563 * This is called during recovery to process any inodes which 3564 * we unlinked but not freed when the system crashed. These 3565 * inodes will be on the lists in the AGI blocks. What we do 3566 * here is scan all the AGIs and fully truncate and free any 3567 * inodes found on the lists. Each inode is removed from the 3568 * lists when it has been fully truncated and is freed. The 3569 * freeing of the inode and its removal from the list must be 3570 * atomic. 3571 */ 3572 STATIC void 3573 xlog_recover_process_iunlinks( 3574 struct xlog *log) 3575 { 3576 xfs_mount_t *mp; 3577 xfs_agnumber_t agno; 3578 xfs_agi_t *agi; 3579 xfs_buf_t *agibp; 3580 xfs_agino_t agino; 3581 int bucket; 3582 int error; 3583 uint mp_dmevmask; 3584 3585 mp = log->l_mp; 3586 3587 /* 3588 * Prevent any DMAPI event from being sent while in this function. 3589 */ 3590 mp_dmevmask = mp->m_dmevmask; 3591 mp->m_dmevmask = 0; 3592 3593 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { 3594 /* 3595 * Find the agi for this ag. 3596 */ 3597 error = xfs_read_agi(mp, NULL, agno, &agibp); 3598 if (error) { 3599 /* 3600 * AGI is b0rked. Don't process it. 3601 * 3602 * We should probably mark the filesystem as corrupt 3603 * after we've recovered all the ag's we can.... 3604 */ 3605 continue; 3606 } 3607 /* 3608 * Unlock the buffer so that it can be acquired in the normal 3609 * course of the transaction to truncate and free each inode. 3610 * Because we are not racing with anyone else here for the AGI 3611 * buffer, we don't even need to hold it locked to read the 3612 * initial unlinked bucket entries out of the buffer. We keep 3613 * buffer reference though, so that it stays pinned in memory 3614 * while we need the buffer. 3615 */ 3616 agi = XFS_BUF_TO_AGI(agibp); 3617 xfs_buf_unlock(agibp); 3618 3619 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) { 3620 agino = be32_to_cpu(agi->agi_unlinked[bucket]); 3621 while (agino != NULLAGINO) { 3622 agino = xlog_recover_process_one_iunlink(mp, 3623 agno, agino, bucket); 3624 } 3625 } 3626 xfs_buf_rele(agibp); 3627 } 3628 3629 mp->m_dmevmask = mp_dmevmask; 3630 } 3631 3632 /* 3633 * Upack the log buffer data and crc check it. If the check fails, issue a 3634 * warning if and only if the CRC in the header is non-zero. This makes the 3635 * check an advisory warning, and the zero CRC check will prevent failure 3636 * warnings from being emitted when upgrading the kernel from one that does not 3637 * add CRCs by default. 3638 * 3639 * When filesystems are CRC enabled, this CRC mismatch becomes a fatal log 3640 * corruption failure 3641 */ 3642 STATIC int 3643 xlog_unpack_data_crc( 3644 struct xlog_rec_header *rhead, 3645 xfs_caddr_t dp, 3646 struct xlog *log) 3647 { 3648 __le32 crc; 3649 3650 crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len)); 3651 if (crc != rhead->h_crc) { 3652 if (rhead->h_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) { 3653 xfs_alert(log->l_mp, 3654 "log record CRC mismatch: found 0x%x, expected 0x%x.\n", 3655 le32_to_cpu(rhead->h_crc), 3656 le32_to_cpu(crc)); 3657 xfs_hex_dump(dp, 32); 3658 } 3659 3660 /* 3661 * If we've detected a log record corruption, then we can't 3662 * recover past this point. Abort recovery if we are enforcing 3663 * CRC protection by punting an error back up the stack. 3664 */ 3665 if (xfs_sb_version_hascrc(&log->l_mp->m_sb)) 3666 return EFSCORRUPTED; 3667 } 3668 3669 return 0; 3670 } 3671 3672 STATIC int 3673 xlog_unpack_data( 3674 struct xlog_rec_header *rhead, 3675 xfs_caddr_t dp, 3676 struct xlog *log) 3677 { 3678 int i, j, k; 3679 int error; 3680 3681 error = xlog_unpack_data_crc(rhead, dp, log); 3682 if (error) 3683 return error; 3684 3685 for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) && 3686 i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) { 3687 *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i]; 3688 dp += BBSIZE; 3689 } 3690 3691 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { 3692 xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead; 3693 for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) { 3694 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3695 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3696 *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k]; 3697 dp += BBSIZE; 3698 } 3699 } 3700 3701 return 0; 3702 } 3703 3704 STATIC int 3705 xlog_valid_rec_header( 3706 struct xlog *log, 3707 struct xlog_rec_header *rhead, 3708 xfs_daddr_t blkno) 3709 { 3710 int hlen; 3711 3712 if (unlikely(rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) { 3713 XFS_ERROR_REPORT("xlog_valid_rec_header(1)", 3714 XFS_ERRLEVEL_LOW, log->l_mp); 3715 return XFS_ERROR(EFSCORRUPTED); 3716 } 3717 if (unlikely( 3718 (!rhead->h_version || 3719 (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) { 3720 xfs_warn(log->l_mp, "%s: unrecognised log version (%d).", 3721 __func__, be32_to_cpu(rhead->h_version)); 3722 return XFS_ERROR(EIO); 3723 } 3724 3725 /* LR body must have data or it wouldn't have been written */ 3726 hlen = be32_to_cpu(rhead->h_len); 3727 if (unlikely( hlen <= 0 || hlen > INT_MAX )) { 3728 XFS_ERROR_REPORT("xlog_valid_rec_header(2)", 3729 XFS_ERRLEVEL_LOW, log->l_mp); 3730 return XFS_ERROR(EFSCORRUPTED); 3731 } 3732 if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) { 3733 XFS_ERROR_REPORT("xlog_valid_rec_header(3)", 3734 XFS_ERRLEVEL_LOW, log->l_mp); 3735 return XFS_ERROR(EFSCORRUPTED); 3736 } 3737 return 0; 3738 } 3739 3740 /* 3741 * Read the log from tail to head and process the log records found. 3742 * Handle the two cases where the tail and head are in the same cycle 3743 * and where the active portion of the log wraps around the end of 3744 * the physical log separately. The pass parameter is passed through 3745 * to the routines called to process the data and is not looked at 3746 * here. 3747 */ 3748 STATIC int 3749 xlog_do_recovery_pass( 3750 struct xlog *log, 3751 xfs_daddr_t head_blk, 3752 xfs_daddr_t tail_blk, 3753 int pass) 3754 { 3755 xlog_rec_header_t *rhead; 3756 xfs_daddr_t blk_no; 3757 xfs_caddr_t offset; 3758 xfs_buf_t *hbp, *dbp; 3759 int error = 0, h_size; 3760 int bblks, split_bblks; 3761 int hblks, split_hblks, wrapped_hblks; 3762 struct hlist_head rhash[XLOG_RHASH_SIZE]; 3763 3764 ASSERT(head_blk != tail_blk); 3765 3766 /* 3767 * Read the header of the tail block and get the iclog buffer size from 3768 * h_size. Use this to tell how many sectors make up the log header. 3769 */ 3770 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { 3771 /* 3772 * When using variable length iclogs, read first sector of 3773 * iclog header and extract the header size from it. Get a 3774 * new hbp that is the correct size. 3775 */ 3776 hbp = xlog_get_bp(log, 1); 3777 if (!hbp) 3778 return ENOMEM; 3779 3780 error = xlog_bread(log, tail_blk, 1, hbp, &offset); 3781 if (error) 3782 goto bread_err1; 3783 3784 rhead = (xlog_rec_header_t *)offset; 3785 error = xlog_valid_rec_header(log, rhead, tail_blk); 3786 if (error) 3787 goto bread_err1; 3788 h_size = be32_to_cpu(rhead->h_size); 3789 if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) && 3790 (h_size > XLOG_HEADER_CYCLE_SIZE)) { 3791 hblks = h_size / XLOG_HEADER_CYCLE_SIZE; 3792 if (h_size % XLOG_HEADER_CYCLE_SIZE) 3793 hblks++; 3794 xlog_put_bp(hbp); 3795 hbp = xlog_get_bp(log, hblks); 3796 } else { 3797 hblks = 1; 3798 } 3799 } else { 3800 ASSERT(log->l_sectBBsize == 1); 3801 hblks = 1; 3802 hbp = xlog_get_bp(log, 1); 3803 h_size = XLOG_BIG_RECORD_BSIZE; 3804 } 3805 3806 if (!hbp) 3807 return ENOMEM; 3808 dbp = xlog_get_bp(log, BTOBB(h_size)); 3809 if (!dbp) { 3810 xlog_put_bp(hbp); 3811 return ENOMEM; 3812 } 3813 3814 memset(rhash, 0, sizeof(rhash)); 3815 if (tail_blk <= head_blk) { 3816 for (blk_no = tail_blk; blk_no < head_blk; ) { 3817 error = xlog_bread(log, blk_no, hblks, hbp, &offset); 3818 if (error) 3819 goto bread_err2; 3820 3821 rhead = (xlog_rec_header_t *)offset; 3822 error = xlog_valid_rec_header(log, rhead, blk_no); 3823 if (error) 3824 goto bread_err2; 3825 3826 /* blocks in data section */ 3827 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len)); 3828 error = xlog_bread(log, blk_no + hblks, bblks, dbp, 3829 &offset); 3830 if (error) 3831 goto bread_err2; 3832 3833 error = xlog_unpack_data(rhead, offset, log); 3834 if (error) 3835 goto bread_err2; 3836 3837 error = xlog_recover_process_data(log, 3838 rhash, rhead, offset, pass); 3839 if (error) 3840 goto bread_err2; 3841 blk_no += bblks + hblks; 3842 } 3843 } else { 3844 /* 3845 * Perform recovery around the end of the physical log. 3846 * When the head is not on the same cycle number as the tail, 3847 * we can't do a sequential recovery as above. 3848 */ 3849 blk_no = tail_blk; 3850 while (blk_no < log->l_logBBsize) { 3851 /* 3852 * Check for header wrapping around physical end-of-log 3853 */ 3854 offset = hbp->b_addr; 3855 split_hblks = 0; 3856 wrapped_hblks = 0; 3857 if (blk_no + hblks <= log->l_logBBsize) { 3858 /* Read header in one read */ 3859 error = xlog_bread(log, blk_no, hblks, hbp, 3860 &offset); 3861 if (error) 3862 goto bread_err2; 3863 } else { 3864 /* This LR is split across physical log end */ 3865 if (blk_no != log->l_logBBsize) { 3866 /* some data before physical log end */ 3867 ASSERT(blk_no <= INT_MAX); 3868 split_hblks = log->l_logBBsize - (int)blk_no; 3869 ASSERT(split_hblks > 0); 3870 error = xlog_bread(log, blk_no, 3871 split_hblks, hbp, 3872 &offset); 3873 if (error) 3874 goto bread_err2; 3875 } 3876 3877 /* 3878 * Note: this black magic still works with 3879 * large sector sizes (non-512) only because: 3880 * - we increased the buffer size originally 3881 * by 1 sector giving us enough extra space 3882 * for the second read; 3883 * - the log start is guaranteed to be sector 3884 * aligned; 3885 * - we read the log end (LR header start) 3886 * _first_, then the log start (LR header end) 3887 * - order is important. 3888 */ 3889 wrapped_hblks = hblks - split_hblks; 3890 error = xlog_bread_offset(log, 0, 3891 wrapped_hblks, hbp, 3892 offset + BBTOB(split_hblks)); 3893 if (error) 3894 goto bread_err2; 3895 } 3896 rhead = (xlog_rec_header_t *)offset; 3897 error = xlog_valid_rec_header(log, rhead, 3898 split_hblks ? blk_no : 0); 3899 if (error) 3900 goto bread_err2; 3901 3902 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len)); 3903 blk_no += hblks; 3904 3905 /* Read in data for log record */ 3906 if (blk_no + bblks <= log->l_logBBsize) { 3907 error = xlog_bread(log, blk_no, bblks, dbp, 3908 &offset); 3909 if (error) 3910 goto bread_err2; 3911 } else { 3912 /* This log record is split across the 3913 * physical end of log */ 3914 offset = dbp->b_addr; 3915 split_bblks = 0; 3916 if (blk_no != log->l_logBBsize) { 3917 /* some data is before the physical 3918 * end of log */ 3919 ASSERT(!wrapped_hblks); 3920 ASSERT(blk_no <= INT_MAX); 3921 split_bblks = 3922 log->l_logBBsize - (int)blk_no; 3923 ASSERT(split_bblks > 0); 3924 error = xlog_bread(log, blk_no, 3925 split_bblks, dbp, 3926 &offset); 3927 if (error) 3928 goto bread_err2; 3929 } 3930 3931 /* 3932 * Note: this black magic still works with 3933 * large sector sizes (non-512) only because: 3934 * - we increased the buffer size originally 3935 * by 1 sector giving us enough extra space 3936 * for the second read; 3937 * - the log start is guaranteed to be sector 3938 * aligned; 3939 * - we read the log end (LR header start) 3940 * _first_, then the log start (LR header end) 3941 * - order is important. 3942 */ 3943 error = xlog_bread_offset(log, 0, 3944 bblks - split_bblks, dbp, 3945 offset + BBTOB(split_bblks)); 3946 if (error) 3947 goto bread_err2; 3948 } 3949 3950 error = xlog_unpack_data(rhead, offset, log); 3951 if (error) 3952 goto bread_err2; 3953 3954 error = xlog_recover_process_data(log, rhash, 3955 rhead, offset, pass); 3956 if (error) 3957 goto bread_err2; 3958 blk_no += bblks; 3959 } 3960 3961 ASSERT(blk_no >= log->l_logBBsize); 3962 blk_no -= log->l_logBBsize; 3963 3964 /* read first part of physical log */ 3965 while (blk_no < head_blk) { 3966 error = xlog_bread(log, blk_no, hblks, hbp, &offset); 3967 if (error) 3968 goto bread_err2; 3969 3970 rhead = (xlog_rec_header_t *)offset; 3971 error = xlog_valid_rec_header(log, rhead, blk_no); 3972 if (error) 3973 goto bread_err2; 3974 3975 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len)); 3976 error = xlog_bread(log, blk_no+hblks, bblks, dbp, 3977 &offset); 3978 if (error) 3979 goto bread_err2; 3980 3981 error = xlog_unpack_data(rhead, offset, log); 3982 if (error) 3983 goto bread_err2; 3984 3985 error = xlog_recover_process_data(log, rhash, 3986 rhead, offset, pass); 3987 if (error) 3988 goto bread_err2; 3989 blk_no += bblks + hblks; 3990 } 3991 } 3992 3993 bread_err2: 3994 xlog_put_bp(dbp); 3995 bread_err1: 3996 xlog_put_bp(hbp); 3997 return error; 3998 } 3999 4000 /* 4001 * Do the recovery of the log. We actually do this in two phases. 4002 * The two passes are necessary in order to implement the function 4003 * of cancelling a record written into the log. The first pass 4004 * determines those things which have been cancelled, and the 4005 * second pass replays log items normally except for those which 4006 * have been cancelled. The handling of the replay and cancellations 4007 * takes place in the log item type specific routines. 4008 * 4009 * The table of items which have cancel records in the log is allocated 4010 * and freed at this level, since only here do we know when all of 4011 * the log recovery has been completed. 4012 */ 4013 STATIC int 4014 xlog_do_log_recovery( 4015 struct xlog *log, 4016 xfs_daddr_t head_blk, 4017 xfs_daddr_t tail_blk) 4018 { 4019 int error, i; 4020 4021 ASSERT(head_blk != tail_blk); 4022 4023 /* 4024 * First do a pass to find all of the cancelled buf log items. 4025 * Store them in the buf_cancel_table for use in the second pass. 4026 */ 4027 log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE * 4028 sizeof(struct list_head), 4029 KM_SLEEP); 4030 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++) 4031 INIT_LIST_HEAD(&log->l_buf_cancel_table[i]); 4032 4033 error = xlog_do_recovery_pass(log, head_blk, tail_blk, 4034 XLOG_RECOVER_PASS1); 4035 if (error != 0) { 4036 kmem_free(log->l_buf_cancel_table); 4037 log->l_buf_cancel_table = NULL; 4038 return error; 4039 } 4040 /* 4041 * Then do a second pass to actually recover the items in the log. 4042 * When it is complete free the table of buf cancel items. 4043 */ 4044 error = xlog_do_recovery_pass(log, head_blk, tail_blk, 4045 XLOG_RECOVER_PASS2); 4046 #ifdef DEBUG 4047 if (!error) { 4048 int i; 4049 4050 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++) 4051 ASSERT(list_empty(&log->l_buf_cancel_table[i])); 4052 } 4053 #endif /* DEBUG */ 4054 4055 kmem_free(log->l_buf_cancel_table); 4056 log->l_buf_cancel_table = NULL; 4057 4058 return error; 4059 } 4060 4061 /* 4062 * Do the actual recovery 4063 */ 4064 STATIC int 4065 xlog_do_recover( 4066 struct xlog *log, 4067 xfs_daddr_t head_blk, 4068 xfs_daddr_t tail_blk) 4069 { 4070 int error; 4071 xfs_buf_t *bp; 4072 xfs_sb_t *sbp; 4073 4074 /* 4075 * First replay the images in the log. 4076 */ 4077 error = xlog_do_log_recovery(log, head_blk, tail_blk); 4078 if (error) 4079 return error; 4080 4081 /* 4082 * If IO errors happened during recovery, bail out. 4083 */ 4084 if (XFS_FORCED_SHUTDOWN(log->l_mp)) { 4085 return (EIO); 4086 } 4087 4088 /* 4089 * We now update the tail_lsn since much of the recovery has completed 4090 * and there may be space available to use. If there were no extent 4091 * or iunlinks, we can free up the entire log and set the tail_lsn to 4092 * be the last_sync_lsn. This was set in xlog_find_tail to be the 4093 * lsn of the last known good LR on disk. If there are extent frees 4094 * or iunlinks they will have some entries in the AIL; so we look at 4095 * the AIL to determine how to set the tail_lsn. 4096 */ 4097 xlog_assign_tail_lsn(log->l_mp); 4098 4099 /* 4100 * Now that we've finished replaying all buffer and inode 4101 * updates, re-read in the superblock and reverify it. 4102 */ 4103 bp = xfs_getsb(log->l_mp, 0); 4104 XFS_BUF_UNDONE(bp); 4105 ASSERT(!(XFS_BUF_ISWRITE(bp))); 4106 XFS_BUF_READ(bp); 4107 XFS_BUF_UNASYNC(bp); 4108 bp->b_ops = &xfs_sb_buf_ops; 4109 xfsbdstrat(log->l_mp, bp); 4110 error = xfs_buf_iowait(bp); 4111 if (error) { 4112 xfs_buf_ioerror_alert(bp, __func__); 4113 ASSERT(0); 4114 xfs_buf_relse(bp); 4115 return error; 4116 } 4117 4118 /* Convert superblock from on-disk format */ 4119 sbp = &log->l_mp->m_sb; 4120 xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp)); 4121 ASSERT(sbp->sb_magicnum == XFS_SB_MAGIC); 4122 ASSERT(xfs_sb_good_version(sbp)); 4123 xfs_buf_relse(bp); 4124 4125 /* We've re-read the superblock so re-initialize per-cpu counters */ 4126 xfs_icsb_reinit_counters(log->l_mp); 4127 4128 xlog_recover_check_summary(log); 4129 4130 /* Normal transactions can now occur */ 4131 log->l_flags &= ~XLOG_ACTIVE_RECOVERY; 4132 return 0; 4133 } 4134 4135 /* 4136 * Perform recovery and re-initialize some log variables in xlog_find_tail. 4137 * 4138 * Return error or zero. 4139 */ 4140 int 4141 xlog_recover( 4142 struct xlog *log) 4143 { 4144 xfs_daddr_t head_blk, tail_blk; 4145 int error; 4146 4147 /* find the tail of the log */ 4148 if ((error = xlog_find_tail(log, &head_blk, &tail_blk))) 4149 return error; 4150 4151 if (tail_blk != head_blk) { 4152 /* There used to be a comment here: 4153 * 4154 * disallow recovery on read-only mounts. note -- mount 4155 * checks for ENOSPC and turns it into an intelligent 4156 * error message. 4157 * ...but this is no longer true. Now, unless you specify 4158 * NORECOVERY (in which case this function would never be 4159 * called), we just go ahead and recover. We do this all 4160 * under the vfs layer, so we can get away with it unless 4161 * the device itself is read-only, in which case we fail. 4162 */ 4163 if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) { 4164 return error; 4165 } 4166 4167 /* 4168 * Version 5 superblock log feature mask validation. We know the 4169 * log is dirty so check if there are any unknown log features 4170 * in what we need to recover. If there are unknown features 4171 * (e.g. unsupported transactions, then simply reject the 4172 * attempt at recovery before touching anything. 4173 */ 4174 if (XFS_SB_VERSION_NUM(&log->l_mp->m_sb) == XFS_SB_VERSION_5 && 4175 xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb, 4176 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) { 4177 xfs_warn(log->l_mp, 4178 "Superblock has unknown incompatible log features (0x%x) enabled.\n" 4179 "The log can not be fully and/or safely recovered by this kernel.\n" 4180 "Please recover the log on a kernel that supports the unknown features.", 4181 (log->l_mp->m_sb.sb_features_log_incompat & 4182 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)); 4183 return EINVAL; 4184 } 4185 4186 xfs_notice(log->l_mp, "Starting recovery (logdev: %s)", 4187 log->l_mp->m_logname ? log->l_mp->m_logname 4188 : "internal"); 4189 4190 error = xlog_do_recover(log, head_blk, tail_blk); 4191 log->l_flags |= XLOG_RECOVERY_NEEDED; 4192 } 4193 return error; 4194 } 4195 4196 /* 4197 * In the first part of recovery we replay inodes and buffers and build 4198 * up the list of extent free items which need to be processed. Here 4199 * we process the extent free items and clean up the on disk unlinked 4200 * inode lists. This is separated from the first part of recovery so 4201 * that the root and real-time bitmap inodes can be read in from disk in 4202 * between the two stages. This is necessary so that we can free space 4203 * in the real-time portion of the file system. 4204 */ 4205 int 4206 xlog_recover_finish( 4207 struct xlog *log) 4208 { 4209 /* 4210 * Now we're ready to do the transactions needed for the 4211 * rest of recovery. Start with completing all the extent 4212 * free intent records and then process the unlinked inode 4213 * lists. At this point, we essentially run in normal mode 4214 * except that we're still performing recovery actions 4215 * rather than accepting new requests. 4216 */ 4217 if (log->l_flags & XLOG_RECOVERY_NEEDED) { 4218 int error; 4219 error = xlog_recover_process_efis(log); 4220 if (error) { 4221 xfs_alert(log->l_mp, "Failed to recover EFIs"); 4222 return error; 4223 } 4224 /* 4225 * Sync the log to get all the EFIs out of the AIL. 4226 * This isn't absolutely necessary, but it helps in 4227 * case the unlink transactions would have problems 4228 * pushing the EFIs out of the way. 4229 */ 4230 xfs_log_force(log->l_mp, XFS_LOG_SYNC); 4231 4232 xlog_recover_process_iunlinks(log); 4233 4234 xlog_recover_check_summary(log); 4235 4236 xfs_notice(log->l_mp, "Ending recovery (logdev: %s)", 4237 log->l_mp->m_logname ? log->l_mp->m_logname 4238 : "internal"); 4239 log->l_flags &= ~XLOG_RECOVERY_NEEDED; 4240 } else { 4241 xfs_info(log->l_mp, "Ending clean mount"); 4242 } 4243 return 0; 4244 } 4245 4246 4247 #if defined(DEBUG) 4248 /* 4249 * Read all of the agf and agi counters and check that they 4250 * are consistent with the superblock counters. 4251 */ 4252 void 4253 xlog_recover_check_summary( 4254 struct xlog *log) 4255 { 4256 xfs_mount_t *mp; 4257 xfs_agf_t *agfp; 4258 xfs_buf_t *agfbp; 4259 xfs_buf_t *agibp; 4260 xfs_agnumber_t agno; 4261 __uint64_t freeblks; 4262 __uint64_t itotal; 4263 __uint64_t ifree; 4264 int error; 4265 4266 mp = log->l_mp; 4267 4268 freeblks = 0LL; 4269 itotal = 0LL; 4270 ifree = 0LL; 4271 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { 4272 error = xfs_read_agf(mp, NULL, agno, 0, &agfbp); 4273 if (error) { 4274 xfs_alert(mp, "%s agf read failed agno %d error %d", 4275 __func__, agno, error); 4276 } else { 4277 agfp = XFS_BUF_TO_AGF(agfbp); 4278 freeblks += be32_to_cpu(agfp->agf_freeblks) + 4279 be32_to_cpu(agfp->agf_flcount); 4280 xfs_buf_relse(agfbp); 4281 } 4282 4283 error = xfs_read_agi(mp, NULL, agno, &agibp); 4284 if (error) { 4285 xfs_alert(mp, "%s agi read failed agno %d error %d", 4286 __func__, agno, error); 4287 } else { 4288 struct xfs_agi *agi = XFS_BUF_TO_AGI(agibp); 4289 4290 itotal += be32_to_cpu(agi->agi_count); 4291 ifree += be32_to_cpu(agi->agi_freecount); 4292 xfs_buf_relse(agibp); 4293 } 4294 } 4295 } 4296 #endif /* DEBUG */ 4297