1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_bit.h" 13 #include "xfs_sb.h" 14 #include "xfs_mount.h" 15 #include "xfs_defer.h" 16 #include "xfs_inode.h" 17 #include "xfs_trans.h" 18 #include "xfs_log.h" 19 #include "xfs_log_priv.h" 20 #include "xfs_log_recover.h" 21 #include "xfs_trans_priv.h" 22 #include "xfs_alloc.h" 23 #include "xfs_ialloc.h" 24 #include "xfs_trace.h" 25 #include "xfs_icache.h" 26 #include "xfs_error.h" 27 #include "xfs_buf_item.h" 28 #include "xfs_ag.h" 29 30 #define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1) 31 32 STATIC int 33 xlog_find_zeroed( 34 struct xlog *, 35 xfs_daddr_t *); 36 STATIC int 37 xlog_clear_stale_blocks( 38 struct xlog *, 39 xfs_lsn_t); 40 #if defined(DEBUG) 41 STATIC void 42 xlog_recover_check_summary( 43 struct xlog *); 44 #else 45 #define xlog_recover_check_summary(log) 46 #endif 47 STATIC int 48 xlog_do_recovery_pass( 49 struct xlog *, xfs_daddr_t, xfs_daddr_t, int, xfs_daddr_t *); 50 51 /* 52 * Sector aligned buffer routines for buffer create/read/write/access 53 */ 54 55 /* 56 * Verify the log-relative block number and length in basic blocks are valid for 57 * an operation involving the given XFS log buffer. Returns true if the fields 58 * are valid, false otherwise. 59 */ 60 static inline bool 61 xlog_verify_bno( 62 struct xlog *log, 63 xfs_daddr_t blk_no, 64 int bbcount) 65 { 66 if (blk_no < 0 || blk_no >= log->l_logBBsize) 67 return false; 68 if (bbcount <= 0 || (blk_no + bbcount) > log->l_logBBsize) 69 return false; 70 return true; 71 } 72 73 /* 74 * Allocate a buffer to hold log data. The buffer needs to be able to map to 75 * a range of nbblks basic blocks at any valid offset within the log. 76 */ 77 static char * 78 xlog_alloc_buffer( 79 struct xlog *log, 80 int nbblks) 81 { 82 int align_mask = xfs_buftarg_dma_alignment(log->l_targ); 83 84 /* 85 * Pass log block 0 since we don't have an addr yet, buffer will be 86 * verified on read. 87 */ 88 if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, 0, nbblks))) { 89 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer", 90 nbblks); 91 return NULL; 92 } 93 94 /* 95 * We do log I/O in units of log sectors (a power-of-2 multiple of the 96 * basic block size), so we round up the requested size to accommodate 97 * the basic blocks required for complete log sectors. 98 * 99 * In addition, the buffer may be used for a non-sector-aligned block 100 * offset, in which case an I/O of the requested size could extend 101 * beyond the end of the buffer. If the requested size is only 1 basic 102 * block it will never straddle a sector boundary, so this won't be an 103 * issue. Nor will this be a problem if the log I/O is done in basic 104 * blocks (sector size 1). But otherwise we extend the buffer by one 105 * extra log sector to ensure there's space to accommodate this 106 * possibility. 107 */ 108 if (nbblks > 1 && log->l_sectBBsize > 1) 109 nbblks += log->l_sectBBsize; 110 nbblks = round_up(nbblks, log->l_sectBBsize); 111 return kmem_alloc_io(BBTOB(nbblks), align_mask, KM_MAYFAIL | KM_ZERO); 112 } 113 114 /* 115 * Return the address of the start of the given block number's data 116 * in a log buffer. The buffer covers a log sector-aligned region. 117 */ 118 static inline unsigned int 119 xlog_align( 120 struct xlog *log, 121 xfs_daddr_t blk_no) 122 { 123 return BBTOB(blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1)); 124 } 125 126 static int 127 xlog_do_io( 128 struct xlog *log, 129 xfs_daddr_t blk_no, 130 unsigned int nbblks, 131 char *data, 132 unsigned int op) 133 { 134 int error; 135 136 if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, blk_no, nbblks))) { 137 xfs_warn(log->l_mp, 138 "Invalid log block/length (0x%llx, 0x%x) for buffer", 139 blk_no, nbblks); 140 return -EFSCORRUPTED; 141 } 142 143 blk_no = round_down(blk_no, log->l_sectBBsize); 144 nbblks = round_up(nbblks, log->l_sectBBsize); 145 ASSERT(nbblks > 0); 146 147 error = xfs_rw_bdev(log->l_targ->bt_bdev, log->l_logBBstart + blk_no, 148 BBTOB(nbblks), data, op); 149 if (error && !XFS_FORCED_SHUTDOWN(log->l_mp)) { 150 xfs_alert(log->l_mp, 151 "log recovery %s I/O error at daddr 0x%llx len %d error %d", 152 op == REQ_OP_WRITE ? "write" : "read", 153 blk_no, nbblks, error); 154 } 155 return error; 156 } 157 158 STATIC int 159 xlog_bread_noalign( 160 struct xlog *log, 161 xfs_daddr_t blk_no, 162 int nbblks, 163 char *data) 164 { 165 return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ); 166 } 167 168 STATIC int 169 xlog_bread( 170 struct xlog *log, 171 xfs_daddr_t blk_no, 172 int nbblks, 173 char *data, 174 char **offset) 175 { 176 int error; 177 178 error = xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ); 179 if (!error) 180 *offset = data + xlog_align(log, blk_no); 181 return error; 182 } 183 184 STATIC int 185 xlog_bwrite( 186 struct xlog *log, 187 xfs_daddr_t blk_no, 188 int nbblks, 189 char *data) 190 { 191 return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_WRITE); 192 } 193 194 #ifdef DEBUG 195 /* 196 * dump debug superblock and log record information 197 */ 198 STATIC void 199 xlog_header_check_dump( 200 xfs_mount_t *mp, 201 xlog_rec_header_t *head) 202 { 203 xfs_debug(mp, "%s: SB : uuid = %pU, fmt = %d", 204 __func__, &mp->m_sb.sb_uuid, XLOG_FMT); 205 xfs_debug(mp, " log : uuid = %pU, fmt = %d", 206 &head->h_fs_uuid, be32_to_cpu(head->h_fmt)); 207 } 208 #else 209 #define xlog_header_check_dump(mp, head) 210 #endif 211 212 /* 213 * check log record header for recovery 214 */ 215 STATIC int 216 xlog_header_check_recover( 217 xfs_mount_t *mp, 218 xlog_rec_header_t *head) 219 { 220 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)); 221 222 /* 223 * IRIX doesn't write the h_fmt field and leaves it zeroed 224 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover 225 * a dirty log created in IRIX. 226 */ 227 if (XFS_IS_CORRUPT(mp, head->h_fmt != cpu_to_be32(XLOG_FMT))) { 228 xfs_warn(mp, 229 "dirty log written in incompatible format - can't recover"); 230 xlog_header_check_dump(mp, head); 231 return -EFSCORRUPTED; 232 } 233 if (XFS_IS_CORRUPT(mp, !uuid_equal(&mp->m_sb.sb_uuid, 234 &head->h_fs_uuid))) { 235 xfs_warn(mp, 236 "dirty log entry has mismatched uuid - can't recover"); 237 xlog_header_check_dump(mp, head); 238 return -EFSCORRUPTED; 239 } 240 return 0; 241 } 242 243 /* 244 * read the head block of the log and check the header 245 */ 246 STATIC int 247 xlog_header_check_mount( 248 xfs_mount_t *mp, 249 xlog_rec_header_t *head) 250 { 251 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)); 252 253 if (uuid_is_null(&head->h_fs_uuid)) { 254 /* 255 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If 256 * h_fs_uuid is null, we assume this log was last mounted 257 * by IRIX and continue. 258 */ 259 xfs_warn(mp, "null uuid in log - IRIX style log"); 260 } else if (XFS_IS_CORRUPT(mp, !uuid_equal(&mp->m_sb.sb_uuid, 261 &head->h_fs_uuid))) { 262 xfs_warn(mp, "log has mismatched uuid - can't recover"); 263 xlog_header_check_dump(mp, head); 264 return -EFSCORRUPTED; 265 } 266 return 0; 267 } 268 269 /* 270 * This routine finds (to an approximation) the first block in the physical 271 * log which contains the given cycle. It uses a binary search algorithm. 272 * Note that the algorithm can not be perfect because the disk will not 273 * necessarily be perfect. 274 */ 275 STATIC int 276 xlog_find_cycle_start( 277 struct xlog *log, 278 char *buffer, 279 xfs_daddr_t first_blk, 280 xfs_daddr_t *last_blk, 281 uint cycle) 282 { 283 char *offset; 284 xfs_daddr_t mid_blk; 285 xfs_daddr_t end_blk; 286 uint mid_cycle; 287 int error; 288 289 end_blk = *last_blk; 290 mid_blk = BLK_AVG(first_blk, end_blk); 291 while (mid_blk != first_blk && mid_blk != end_blk) { 292 error = xlog_bread(log, mid_blk, 1, buffer, &offset); 293 if (error) 294 return error; 295 mid_cycle = xlog_get_cycle(offset); 296 if (mid_cycle == cycle) 297 end_blk = mid_blk; /* last_half_cycle == mid_cycle */ 298 else 299 first_blk = mid_blk; /* first_half_cycle == mid_cycle */ 300 mid_blk = BLK_AVG(first_blk, end_blk); 301 } 302 ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) || 303 (mid_blk == end_blk && mid_blk-1 == first_blk)); 304 305 *last_blk = end_blk; 306 307 return 0; 308 } 309 310 /* 311 * Check that a range of blocks does not contain stop_on_cycle_no. 312 * Fill in *new_blk with the block offset where such a block is 313 * found, or with -1 (an invalid block number) if there is no such 314 * block in the range. The scan needs to occur from front to back 315 * and the pointer into the region must be updated since a later 316 * routine will need to perform another test. 317 */ 318 STATIC int 319 xlog_find_verify_cycle( 320 struct xlog *log, 321 xfs_daddr_t start_blk, 322 int nbblks, 323 uint stop_on_cycle_no, 324 xfs_daddr_t *new_blk) 325 { 326 xfs_daddr_t i, j; 327 uint cycle; 328 char *buffer; 329 xfs_daddr_t bufblks; 330 char *buf = NULL; 331 int error = 0; 332 333 /* 334 * Greedily allocate a buffer big enough to handle the full 335 * range of basic blocks we'll be examining. If that fails, 336 * try a smaller size. We need to be able to read at least 337 * a log sector, or we're out of luck. 338 */ 339 bufblks = 1 << ffs(nbblks); 340 while (bufblks > log->l_logBBsize) 341 bufblks >>= 1; 342 while (!(buffer = xlog_alloc_buffer(log, bufblks))) { 343 bufblks >>= 1; 344 if (bufblks < log->l_sectBBsize) 345 return -ENOMEM; 346 } 347 348 for (i = start_blk; i < start_blk + nbblks; i += bufblks) { 349 int bcount; 350 351 bcount = min(bufblks, (start_blk + nbblks - i)); 352 353 error = xlog_bread(log, i, bcount, buffer, &buf); 354 if (error) 355 goto out; 356 357 for (j = 0; j < bcount; j++) { 358 cycle = xlog_get_cycle(buf); 359 if (cycle == stop_on_cycle_no) { 360 *new_blk = i+j; 361 goto out; 362 } 363 364 buf += BBSIZE; 365 } 366 } 367 368 *new_blk = -1; 369 370 out: 371 kmem_free(buffer); 372 return error; 373 } 374 375 static inline int 376 xlog_logrec_hblks(struct xlog *log, struct xlog_rec_header *rh) 377 { 378 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { 379 int h_size = be32_to_cpu(rh->h_size); 380 381 if ((be32_to_cpu(rh->h_version) & XLOG_VERSION_2) && 382 h_size > XLOG_HEADER_CYCLE_SIZE) 383 return DIV_ROUND_UP(h_size, XLOG_HEADER_CYCLE_SIZE); 384 } 385 return 1; 386 } 387 388 /* 389 * Potentially backup over partial log record write. 390 * 391 * In the typical case, last_blk is the number of the block directly after 392 * a good log record. Therefore, we subtract one to get the block number 393 * of the last block in the given buffer. extra_bblks contains the number 394 * of blocks we would have read on a previous read. This happens when the 395 * last log record is split over the end of the physical log. 396 * 397 * extra_bblks is the number of blocks potentially verified on a previous 398 * call to this routine. 399 */ 400 STATIC int 401 xlog_find_verify_log_record( 402 struct xlog *log, 403 xfs_daddr_t start_blk, 404 xfs_daddr_t *last_blk, 405 int extra_bblks) 406 { 407 xfs_daddr_t i; 408 char *buffer; 409 char *offset = NULL; 410 xlog_rec_header_t *head = NULL; 411 int error = 0; 412 int smallmem = 0; 413 int num_blks = *last_blk - start_blk; 414 int xhdrs; 415 416 ASSERT(start_blk != 0 || *last_blk != start_blk); 417 418 buffer = xlog_alloc_buffer(log, num_blks); 419 if (!buffer) { 420 buffer = xlog_alloc_buffer(log, 1); 421 if (!buffer) 422 return -ENOMEM; 423 smallmem = 1; 424 } else { 425 error = xlog_bread(log, start_blk, num_blks, buffer, &offset); 426 if (error) 427 goto out; 428 offset += ((num_blks - 1) << BBSHIFT); 429 } 430 431 for (i = (*last_blk) - 1; i >= 0; i--) { 432 if (i < start_blk) { 433 /* valid log record not found */ 434 xfs_warn(log->l_mp, 435 "Log inconsistent (didn't find previous header)"); 436 ASSERT(0); 437 error = -EFSCORRUPTED; 438 goto out; 439 } 440 441 if (smallmem) { 442 error = xlog_bread(log, i, 1, buffer, &offset); 443 if (error) 444 goto out; 445 } 446 447 head = (xlog_rec_header_t *)offset; 448 449 if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) 450 break; 451 452 if (!smallmem) 453 offset -= BBSIZE; 454 } 455 456 /* 457 * We hit the beginning of the physical log & still no header. Return 458 * to caller. If caller can handle a return of -1, then this routine 459 * will be called again for the end of the physical log. 460 */ 461 if (i == -1) { 462 error = 1; 463 goto out; 464 } 465 466 /* 467 * We have the final block of the good log (the first block 468 * of the log record _before_ the head. So we check the uuid. 469 */ 470 if ((error = xlog_header_check_mount(log->l_mp, head))) 471 goto out; 472 473 /* 474 * We may have found a log record header before we expected one. 475 * last_blk will be the 1st block # with a given cycle #. We may end 476 * up reading an entire log record. In this case, we don't want to 477 * reset last_blk. Only when last_blk points in the middle of a log 478 * record do we update last_blk. 479 */ 480 xhdrs = xlog_logrec_hblks(log, head); 481 482 if (*last_blk - i + extra_bblks != 483 BTOBB(be32_to_cpu(head->h_len)) + xhdrs) 484 *last_blk = i; 485 486 out: 487 kmem_free(buffer); 488 return error; 489 } 490 491 /* 492 * Head is defined to be the point of the log where the next log write 493 * could go. This means that incomplete LR writes at the end are 494 * eliminated when calculating the head. We aren't guaranteed that previous 495 * LR have complete transactions. We only know that a cycle number of 496 * current cycle number -1 won't be present in the log if we start writing 497 * from our current block number. 498 * 499 * last_blk contains the block number of the first block with a given 500 * cycle number. 501 * 502 * Return: zero if normal, non-zero if error. 503 */ 504 STATIC int 505 xlog_find_head( 506 struct xlog *log, 507 xfs_daddr_t *return_head_blk) 508 { 509 char *buffer; 510 char *offset; 511 xfs_daddr_t new_blk, first_blk, start_blk, last_blk, head_blk; 512 int num_scan_bblks; 513 uint first_half_cycle, last_half_cycle; 514 uint stop_on_cycle; 515 int error, log_bbnum = log->l_logBBsize; 516 517 /* Is the end of the log device zeroed? */ 518 error = xlog_find_zeroed(log, &first_blk); 519 if (error < 0) { 520 xfs_warn(log->l_mp, "empty log check failed"); 521 return error; 522 } 523 if (error == 1) { 524 *return_head_blk = first_blk; 525 526 /* Is the whole lot zeroed? */ 527 if (!first_blk) { 528 /* Linux XFS shouldn't generate totally zeroed logs - 529 * mkfs etc write a dummy unmount record to a fresh 530 * log so we can store the uuid in there 531 */ 532 xfs_warn(log->l_mp, "totally zeroed log"); 533 } 534 535 return 0; 536 } 537 538 first_blk = 0; /* get cycle # of 1st block */ 539 buffer = xlog_alloc_buffer(log, 1); 540 if (!buffer) 541 return -ENOMEM; 542 543 error = xlog_bread(log, 0, 1, buffer, &offset); 544 if (error) 545 goto out_free_buffer; 546 547 first_half_cycle = xlog_get_cycle(offset); 548 549 last_blk = head_blk = log_bbnum - 1; /* get cycle # of last block */ 550 error = xlog_bread(log, last_blk, 1, buffer, &offset); 551 if (error) 552 goto out_free_buffer; 553 554 last_half_cycle = xlog_get_cycle(offset); 555 ASSERT(last_half_cycle != 0); 556 557 /* 558 * If the 1st half cycle number is equal to the last half cycle number, 559 * then the entire log is stamped with the same cycle number. In this 560 * case, head_blk can't be set to zero (which makes sense). The below 561 * math doesn't work out properly with head_blk equal to zero. Instead, 562 * we set it to log_bbnum which is an invalid block number, but this 563 * value makes the math correct. If head_blk doesn't changed through 564 * all the tests below, *head_blk is set to zero at the very end rather 565 * than log_bbnum. In a sense, log_bbnum and zero are the same block 566 * in a circular file. 567 */ 568 if (first_half_cycle == last_half_cycle) { 569 /* 570 * In this case we believe that the entire log should have 571 * cycle number last_half_cycle. We need to scan backwards 572 * from the end verifying that there are no holes still 573 * containing last_half_cycle - 1. If we find such a hole, 574 * then the start of that hole will be the new head. The 575 * simple case looks like 576 * x | x ... | x - 1 | x 577 * Another case that fits this picture would be 578 * x | x + 1 | x ... | x 579 * In this case the head really is somewhere at the end of the 580 * log, as one of the latest writes at the beginning was 581 * incomplete. 582 * One more case is 583 * x | x + 1 | x ... | x - 1 | x 584 * This is really the combination of the above two cases, and 585 * the head has to end up at the start of the x-1 hole at the 586 * end of the log. 587 * 588 * In the 256k log case, we will read from the beginning to the 589 * end of the log and search for cycle numbers equal to x-1. 590 * We don't worry about the x+1 blocks that we encounter, 591 * because we know that they cannot be the head since the log 592 * started with x. 593 */ 594 head_blk = log_bbnum; 595 stop_on_cycle = last_half_cycle - 1; 596 } else { 597 /* 598 * In this case we want to find the first block with cycle 599 * number matching last_half_cycle. We expect the log to be 600 * some variation on 601 * x + 1 ... | x ... | x 602 * The first block with cycle number x (last_half_cycle) will 603 * be where the new head belongs. First we do a binary search 604 * for the first occurrence of last_half_cycle. The binary 605 * search may not be totally accurate, so then we scan back 606 * from there looking for occurrences of last_half_cycle before 607 * us. If that backwards scan wraps around the beginning of 608 * the log, then we look for occurrences of last_half_cycle - 1 609 * at the end of the log. The cases we're looking for look 610 * like 611 * v binary search stopped here 612 * x + 1 ... | x | x + 1 | x ... | x 613 * ^ but we want to locate this spot 614 * or 615 * <---------> less than scan distance 616 * x + 1 ... | x ... | x - 1 | x 617 * ^ we want to locate this spot 618 */ 619 stop_on_cycle = last_half_cycle; 620 error = xlog_find_cycle_start(log, buffer, first_blk, &head_blk, 621 last_half_cycle); 622 if (error) 623 goto out_free_buffer; 624 } 625 626 /* 627 * Now validate the answer. Scan back some number of maximum possible 628 * blocks and make sure each one has the expected cycle number. The 629 * maximum is determined by the total possible amount of buffering 630 * in the in-core log. The following number can be made tighter if 631 * we actually look at the block size of the filesystem. 632 */ 633 num_scan_bblks = min_t(int, log_bbnum, XLOG_TOTAL_REC_SHIFT(log)); 634 if (head_blk >= num_scan_bblks) { 635 /* 636 * We are guaranteed that the entire check can be performed 637 * in one buffer. 638 */ 639 start_blk = head_blk - num_scan_bblks; 640 if ((error = xlog_find_verify_cycle(log, 641 start_blk, num_scan_bblks, 642 stop_on_cycle, &new_blk))) 643 goto out_free_buffer; 644 if (new_blk != -1) 645 head_blk = new_blk; 646 } else { /* need to read 2 parts of log */ 647 /* 648 * We are going to scan backwards in the log in two parts. 649 * First we scan the physical end of the log. In this part 650 * of the log, we are looking for blocks with cycle number 651 * last_half_cycle - 1. 652 * If we find one, then we know that the log starts there, as 653 * we've found a hole that didn't get written in going around 654 * the end of the physical log. The simple case for this is 655 * x + 1 ... | x ... | x - 1 | x 656 * <---------> less than scan distance 657 * If all of the blocks at the end of the log have cycle number 658 * last_half_cycle, then we check the blocks at the start of 659 * the log looking for occurrences of last_half_cycle. If we 660 * find one, then our current estimate for the location of the 661 * first occurrence of last_half_cycle is wrong and we move 662 * back to the hole we've found. This case looks like 663 * x + 1 ... | x | x + 1 | x ... 664 * ^ binary search stopped here 665 * Another case we need to handle that only occurs in 256k 666 * logs is 667 * x + 1 ... | x ... | x+1 | x ... 668 * ^ binary search stops here 669 * In a 256k log, the scan at the end of the log will see the 670 * x + 1 blocks. We need to skip past those since that is 671 * certainly not the head of the log. By searching for 672 * last_half_cycle-1 we accomplish that. 673 */ 674 ASSERT(head_blk <= INT_MAX && 675 (xfs_daddr_t) num_scan_bblks >= head_blk); 676 start_blk = log_bbnum - (num_scan_bblks - head_blk); 677 if ((error = xlog_find_verify_cycle(log, start_blk, 678 num_scan_bblks - (int)head_blk, 679 (stop_on_cycle - 1), &new_blk))) 680 goto out_free_buffer; 681 if (new_blk != -1) { 682 head_blk = new_blk; 683 goto validate_head; 684 } 685 686 /* 687 * Scan beginning of log now. The last part of the physical 688 * log is good. This scan needs to verify that it doesn't find 689 * the last_half_cycle. 690 */ 691 start_blk = 0; 692 ASSERT(head_blk <= INT_MAX); 693 if ((error = xlog_find_verify_cycle(log, 694 start_blk, (int)head_blk, 695 stop_on_cycle, &new_blk))) 696 goto out_free_buffer; 697 if (new_blk != -1) 698 head_blk = new_blk; 699 } 700 701 validate_head: 702 /* 703 * Now we need to make sure head_blk is not pointing to a block in 704 * the middle of a log record. 705 */ 706 num_scan_bblks = XLOG_REC_SHIFT(log); 707 if (head_blk >= num_scan_bblks) { 708 start_blk = head_blk - num_scan_bblks; /* don't read head_blk */ 709 710 /* start ptr at last block ptr before head_blk */ 711 error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0); 712 if (error == 1) 713 error = -EIO; 714 if (error) 715 goto out_free_buffer; 716 } else { 717 start_blk = 0; 718 ASSERT(head_blk <= INT_MAX); 719 error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0); 720 if (error < 0) 721 goto out_free_buffer; 722 if (error == 1) { 723 /* We hit the beginning of the log during our search */ 724 start_blk = log_bbnum - (num_scan_bblks - head_blk); 725 new_blk = log_bbnum; 726 ASSERT(start_blk <= INT_MAX && 727 (xfs_daddr_t) log_bbnum-start_blk >= 0); 728 ASSERT(head_blk <= INT_MAX); 729 error = xlog_find_verify_log_record(log, start_blk, 730 &new_blk, (int)head_blk); 731 if (error == 1) 732 error = -EIO; 733 if (error) 734 goto out_free_buffer; 735 if (new_blk != log_bbnum) 736 head_blk = new_blk; 737 } else if (error) 738 goto out_free_buffer; 739 } 740 741 kmem_free(buffer); 742 if (head_blk == log_bbnum) 743 *return_head_blk = 0; 744 else 745 *return_head_blk = head_blk; 746 /* 747 * When returning here, we have a good block number. Bad block 748 * means that during a previous crash, we didn't have a clean break 749 * from cycle number N to cycle number N-1. In this case, we need 750 * to find the first block with cycle number N-1. 751 */ 752 return 0; 753 754 out_free_buffer: 755 kmem_free(buffer); 756 if (error) 757 xfs_warn(log->l_mp, "failed to find log head"); 758 return error; 759 } 760 761 /* 762 * Seek backwards in the log for log record headers. 763 * 764 * Given a starting log block, walk backwards until we find the provided number 765 * of records or hit the provided tail block. The return value is the number of 766 * records encountered or a negative error code. The log block and buffer 767 * pointer of the last record seen are returned in rblk and rhead respectively. 768 */ 769 STATIC int 770 xlog_rseek_logrec_hdr( 771 struct xlog *log, 772 xfs_daddr_t head_blk, 773 xfs_daddr_t tail_blk, 774 int count, 775 char *buffer, 776 xfs_daddr_t *rblk, 777 struct xlog_rec_header **rhead, 778 bool *wrapped) 779 { 780 int i; 781 int error; 782 int found = 0; 783 char *offset = NULL; 784 xfs_daddr_t end_blk; 785 786 *wrapped = false; 787 788 /* 789 * Walk backwards from the head block until we hit the tail or the first 790 * block in the log. 791 */ 792 end_blk = head_blk > tail_blk ? tail_blk : 0; 793 for (i = (int) head_blk - 1; i >= end_blk; i--) { 794 error = xlog_bread(log, i, 1, buffer, &offset); 795 if (error) 796 goto out_error; 797 798 if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) { 799 *rblk = i; 800 *rhead = (struct xlog_rec_header *) offset; 801 if (++found == count) 802 break; 803 } 804 } 805 806 /* 807 * If we haven't hit the tail block or the log record header count, 808 * start looking again from the end of the physical log. Note that 809 * callers can pass head == tail if the tail is not yet known. 810 */ 811 if (tail_blk >= head_blk && found != count) { 812 for (i = log->l_logBBsize - 1; i >= (int) tail_blk; i--) { 813 error = xlog_bread(log, i, 1, buffer, &offset); 814 if (error) 815 goto out_error; 816 817 if (*(__be32 *)offset == 818 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) { 819 *wrapped = true; 820 *rblk = i; 821 *rhead = (struct xlog_rec_header *) offset; 822 if (++found == count) 823 break; 824 } 825 } 826 } 827 828 return found; 829 830 out_error: 831 return error; 832 } 833 834 /* 835 * Seek forward in the log for log record headers. 836 * 837 * Given head and tail blocks, walk forward from the tail block until we find 838 * the provided number of records or hit the head block. The return value is the 839 * number of records encountered or a negative error code. The log block and 840 * buffer pointer of the last record seen are returned in rblk and rhead 841 * respectively. 842 */ 843 STATIC int 844 xlog_seek_logrec_hdr( 845 struct xlog *log, 846 xfs_daddr_t head_blk, 847 xfs_daddr_t tail_blk, 848 int count, 849 char *buffer, 850 xfs_daddr_t *rblk, 851 struct xlog_rec_header **rhead, 852 bool *wrapped) 853 { 854 int i; 855 int error; 856 int found = 0; 857 char *offset = NULL; 858 xfs_daddr_t end_blk; 859 860 *wrapped = false; 861 862 /* 863 * Walk forward from the tail block until we hit the head or the last 864 * block in the log. 865 */ 866 end_blk = head_blk > tail_blk ? head_blk : log->l_logBBsize - 1; 867 for (i = (int) tail_blk; i <= end_blk; i++) { 868 error = xlog_bread(log, i, 1, buffer, &offset); 869 if (error) 870 goto out_error; 871 872 if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) { 873 *rblk = i; 874 *rhead = (struct xlog_rec_header *) offset; 875 if (++found == count) 876 break; 877 } 878 } 879 880 /* 881 * If we haven't hit the head block or the log record header count, 882 * start looking again from the start of the physical log. 883 */ 884 if (tail_blk > head_blk && found != count) { 885 for (i = 0; i < (int) head_blk; i++) { 886 error = xlog_bread(log, i, 1, buffer, &offset); 887 if (error) 888 goto out_error; 889 890 if (*(__be32 *)offset == 891 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) { 892 *wrapped = true; 893 *rblk = i; 894 *rhead = (struct xlog_rec_header *) offset; 895 if (++found == count) 896 break; 897 } 898 } 899 } 900 901 return found; 902 903 out_error: 904 return error; 905 } 906 907 /* 908 * Calculate distance from head to tail (i.e., unused space in the log). 909 */ 910 static inline int 911 xlog_tail_distance( 912 struct xlog *log, 913 xfs_daddr_t head_blk, 914 xfs_daddr_t tail_blk) 915 { 916 if (head_blk < tail_blk) 917 return tail_blk - head_blk; 918 919 return tail_blk + (log->l_logBBsize - head_blk); 920 } 921 922 /* 923 * Verify the log tail. This is particularly important when torn or incomplete 924 * writes have been detected near the front of the log and the head has been 925 * walked back accordingly. 926 * 927 * We also have to handle the case where the tail was pinned and the head 928 * blocked behind the tail right before a crash. If the tail had been pushed 929 * immediately prior to the crash and the subsequent checkpoint was only 930 * partially written, it's possible it overwrote the last referenced tail in the 931 * log with garbage. This is not a coherency problem because the tail must have 932 * been pushed before it can be overwritten, but appears as log corruption to 933 * recovery because we have no way to know the tail was updated if the 934 * subsequent checkpoint didn't write successfully. 935 * 936 * Therefore, CRC check the log from tail to head. If a failure occurs and the 937 * offending record is within max iclog bufs from the head, walk the tail 938 * forward and retry until a valid tail is found or corruption is detected out 939 * of the range of a possible overwrite. 940 */ 941 STATIC int 942 xlog_verify_tail( 943 struct xlog *log, 944 xfs_daddr_t head_blk, 945 xfs_daddr_t *tail_blk, 946 int hsize) 947 { 948 struct xlog_rec_header *thead; 949 char *buffer; 950 xfs_daddr_t first_bad; 951 int error = 0; 952 bool wrapped; 953 xfs_daddr_t tmp_tail; 954 xfs_daddr_t orig_tail = *tail_blk; 955 956 buffer = xlog_alloc_buffer(log, 1); 957 if (!buffer) 958 return -ENOMEM; 959 960 /* 961 * Make sure the tail points to a record (returns positive count on 962 * success). 963 */ 964 error = xlog_seek_logrec_hdr(log, head_blk, *tail_blk, 1, buffer, 965 &tmp_tail, &thead, &wrapped); 966 if (error < 0) 967 goto out; 968 if (*tail_blk != tmp_tail) 969 *tail_blk = tmp_tail; 970 971 /* 972 * Run a CRC check from the tail to the head. We can't just check 973 * MAX_ICLOGS records past the tail because the tail may point to stale 974 * blocks cleared during the search for the head/tail. These blocks are 975 * overwritten with zero-length records and thus record count is not a 976 * reliable indicator of the iclog state before a crash. 977 */ 978 first_bad = 0; 979 error = xlog_do_recovery_pass(log, head_blk, *tail_blk, 980 XLOG_RECOVER_CRCPASS, &first_bad); 981 while ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) { 982 int tail_distance; 983 984 /* 985 * Is corruption within range of the head? If so, retry from 986 * the next record. Otherwise return an error. 987 */ 988 tail_distance = xlog_tail_distance(log, head_blk, first_bad); 989 if (tail_distance > BTOBB(XLOG_MAX_ICLOGS * hsize)) 990 break; 991 992 /* skip to the next record; returns positive count on success */ 993 error = xlog_seek_logrec_hdr(log, head_blk, first_bad, 2, 994 buffer, &tmp_tail, &thead, &wrapped); 995 if (error < 0) 996 goto out; 997 998 *tail_blk = tmp_tail; 999 first_bad = 0; 1000 error = xlog_do_recovery_pass(log, head_blk, *tail_blk, 1001 XLOG_RECOVER_CRCPASS, &first_bad); 1002 } 1003 1004 if (!error && *tail_blk != orig_tail) 1005 xfs_warn(log->l_mp, 1006 "Tail block (0x%llx) overwrite detected. Updated to 0x%llx", 1007 orig_tail, *tail_blk); 1008 out: 1009 kmem_free(buffer); 1010 return error; 1011 } 1012 1013 /* 1014 * Detect and trim torn writes from the head of the log. 1015 * 1016 * Storage without sector atomicity guarantees can result in torn writes in the 1017 * log in the event of a crash. Our only means to detect this scenario is via 1018 * CRC verification. While we can't always be certain that CRC verification 1019 * failure is due to a torn write vs. an unrelated corruption, we do know that 1020 * only a certain number (XLOG_MAX_ICLOGS) of log records can be written out at 1021 * one time. Therefore, CRC verify up to XLOG_MAX_ICLOGS records at the head of 1022 * the log and treat failures in this range as torn writes as a matter of 1023 * policy. In the event of CRC failure, the head is walked back to the last good 1024 * record in the log and the tail is updated from that record and verified. 1025 */ 1026 STATIC int 1027 xlog_verify_head( 1028 struct xlog *log, 1029 xfs_daddr_t *head_blk, /* in/out: unverified head */ 1030 xfs_daddr_t *tail_blk, /* out: tail block */ 1031 char *buffer, 1032 xfs_daddr_t *rhead_blk, /* start blk of last record */ 1033 struct xlog_rec_header **rhead, /* ptr to last record */ 1034 bool *wrapped) /* last rec. wraps phys. log */ 1035 { 1036 struct xlog_rec_header *tmp_rhead; 1037 char *tmp_buffer; 1038 xfs_daddr_t first_bad; 1039 xfs_daddr_t tmp_rhead_blk; 1040 int found; 1041 int error; 1042 bool tmp_wrapped; 1043 1044 /* 1045 * Check the head of the log for torn writes. Search backwards from the 1046 * head until we hit the tail or the maximum number of log record I/Os 1047 * that could have been in flight at one time. Use a temporary buffer so 1048 * we don't trash the rhead/buffer pointers from the caller. 1049 */ 1050 tmp_buffer = xlog_alloc_buffer(log, 1); 1051 if (!tmp_buffer) 1052 return -ENOMEM; 1053 error = xlog_rseek_logrec_hdr(log, *head_blk, *tail_blk, 1054 XLOG_MAX_ICLOGS, tmp_buffer, 1055 &tmp_rhead_blk, &tmp_rhead, &tmp_wrapped); 1056 kmem_free(tmp_buffer); 1057 if (error < 0) 1058 return error; 1059 1060 /* 1061 * Now run a CRC verification pass over the records starting at the 1062 * block found above to the current head. If a CRC failure occurs, the 1063 * log block of the first bad record is saved in first_bad. 1064 */ 1065 error = xlog_do_recovery_pass(log, *head_blk, tmp_rhead_blk, 1066 XLOG_RECOVER_CRCPASS, &first_bad); 1067 if ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) { 1068 /* 1069 * We've hit a potential torn write. Reset the error and warn 1070 * about it. 1071 */ 1072 error = 0; 1073 xfs_warn(log->l_mp, 1074 "Torn write (CRC failure) detected at log block 0x%llx. Truncating head block from 0x%llx.", 1075 first_bad, *head_blk); 1076 1077 /* 1078 * Get the header block and buffer pointer for the last good 1079 * record before the bad record. 1080 * 1081 * Note that xlog_find_tail() clears the blocks at the new head 1082 * (i.e., the records with invalid CRC) if the cycle number 1083 * matches the current cycle. 1084 */ 1085 found = xlog_rseek_logrec_hdr(log, first_bad, *tail_blk, 1, 1086 buffer, rhead_blk, rhead, wrapped); 1087 if (found < 0) 1088 return found; 1089 if (found == 0) /* XXX: right thing to do here? */ 1090 return -EIO; 1091 1092 /* 1093 * Reset the head block to the starting block of the first bad 1094 * log record and set the tail block based on the last good 1095 * record. 1096 * 1097 * Bail out if the updated head/tail match as this indicates 1098 * possible corruption outside of the acceptable 1099 * (XLOG_MAX_ICLOGS) range. This is a job for xfs_repair... 1100 */ 1101 *head_blk = first_bad; 1102 *tail_blk = BLOCK_LSN(be64_to_cpu((*rhead)->h_tail_lsn)); 1103 if (*head_blk == *tail_blk) { 1104 ASSERT(0); 1105 return 0; 1106 } 1107 } 1108 if (error) 1109 return error; 1110 1111 return xlog_verify_tail(log, *head_blk, tail_blk, 1112 be32_to_cpu((*rhead)->h_size)); 1113 } 1114 1115 /* 1116 * We need to make sure we handle log wrapping properly, so we can't use the 1117 * calculated logbno directly. Make sure it wraps to the correct bno inside the 1118 * log. 1119 * 1120 * The log is limited to 32 bit sizes, so we use the appropriate modulus 1121 * operation here and cast it back to a 64 bit daddr on return. 1122 */ 1123 static inline xfs_daddr_t 1124 xlog_wrap_logbno( 1125 struct xlog *log, 1126 xfs_daddr_t bno) 1127 { 1128 int mod; 1129 1130 div_s64_rem(bno, log->l_logBBsize, &mod); 1131 return mod; 1132 } 1133 1134 /* 1135 * Check whether the head of the log points to an unmount record. In other 1136 * words, determine whether the log is clean. If so, update the in-core state 1137 * appropriately. 1138 */ 1139 static int 1140 xlog_check_unmount_rec( 1141 struct xlog *log, 1142 xfs_daddr_t *head_blk, 1143 xfs_daddr_t *tail_blk, 1144 struct xlog_rec_header *rhead, 1145 xfs_daddr_t rhead_blk, 1146 char *buffer, 1147 bool *clean) 1148 { 1149 struct xlog_op_header *op_head; 1150 xfs_daddr_t umount_data_blk; 1151 xfs_daddr_t after_umount_blk; 1152 int hblks; 1153 int error; 1154 char *offset; 1155 1156 *clean = false; 1157 1158 /* 1159 * Look for unmount record. If we find it, then we know there was a 1160 * clean unmount. Since 'i' could be the last block in the physical 1161 * log, we convert to a log block before comparing to the head_blk. 1162 * 1163 * Save the current tail lsn to use to pass to xlog_clear_stale_blocks() 1164 * below. We won't want to clear the unmount record if there is one, so 1165 * we pass the lsn of the unmount record rather than the block after it. 1166 */ 1167 hblks = xlog_logrec_hblks(log, rhead); 1168 after_umount_blk = xlog_wrap_logbno(log, 1169 rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len))); 1170 1171 if (*head_blk == after_umount_blk && 1172 be32_to_cpu(rhead->h_num_logops) == 1) { 1173 umount_data_blk = xlog_wrap_logbno(log, rhead_blk + hblks); 1174 error = xlog_bread(log, umount_data_blk, 1, buffer, &offset); 1175 if (error) 1176 return error; 1177 1178 op_head = (struct xlog_op_header *)offset; 1179 if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) { 1180 /* 1181 * Set tail and last sync so that newly written log 1182 * records will point recovery to after the current 1183 * unmount record. 1184 */ 1185 xlog_assign_atomic_lsn(&log->l_tail_lsn, 1186 log->l_curr_cycle, after_umount_blk); 1187 xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1188 log->l_curr_cycle, after_umount_blk); 1189 *tail_blk = after_umount_blk; 1190 1191 *clean = true; 1192 } 1193 } 1194 1195 return 0; 1196 } 1197 1198 static void 1199 xlog_set_state( 1200 struct xlog *log, 1201 xfs_daddr_t head_blk, 1202 struct xlog_rec_header *rhead, 1203 xfs_daddr_t rhead_blk, 1204 bool bump_cycle) 1205 { 1206 /* 1207 * Reset log values according to the state of the log when we 1208 * crashed. In the case where head_blk == 0, we bump curr_cycle 1209 * one because the next write starts a new cycle rather than 1210 * continuing the cycle of the last good log record. At this 1211 * point we have guaranteed that all partial log records have been 1212 * accounted for. Therefore, we know that the last good log record 1213 * written was complete and ended exactly on the end boundary 1214 * of the physical log. 1215 */ 1216 log->l_prev_block = rhead_blk; 1217 log->l_curr_block = (int)head_blk; 1218 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle); 1219 if (bump_cycle) 1220 log->l_curr_cycle++; 1221 atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn)); 1222 atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn)); 1223 xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle, 1224 BBTOB(log->l_curr_block)); 1225 xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle, 1226 BBTOB(log->l_curr_block)); 1227 } 1228 1229 /* 1230 * Find the sync block number or the tail of the log. 1231 * 1232 * This will be the block number of the last record to have its 1233 * associated buffers synced to disk. Every log record header has 1234 * a sync lsn embedded in it. LSNs hold block numbers, so it is easy 1235 * to get a sync block number. The only concern is to figure out which 1236 * log record header to believe. 1237 * 1238 * The following algorithm uses the log record header with the largest 1239 * lsn. The entire log record does not need to be valid. We only care 1240 * that the header is valid. 1241 * 1242 * We could speed up search by using current head_blk buffer, but it is not 1243 * available. 1244 */ 1245 STATIC int 1246 xlog_find_tail( 1247 struct xlog *log, 1248 xfs_daddr_t *head_blk, 1249 xfs_daddr_t *tail_blk) 1250 { 1251 xlog_rec_header_t *rhead; 1252 char *offset = NULL; 1253 char *buffer; 1254 int error; 1255 xfs_daddr_t rhead_blk; 1256 xfs_lsn_t tail_lsn; 1257 bool wrapped = false; 1258 bool clean = false; 1259 1260 /* 1261 * Find previous log record 1262 */ 1263 if ((error = xlog_find_head(log, head_blk))) 1264 return error; 1265 ASSERT(*head_blk < INT_MAX); 1266 1267 buffer = xlog_alloc_buffer(log, 1); 1268 if (!buffer) 1269 return -ENOMEM; 1270 if (*head_blk == 0) { /* special case */ 1271 error = xlog_bread(log, 0, 1, buffer, &offset); 1272 if (error) 1273 goto done; 1274 1275 if (xlog_get_cycle(offset) == 0) { 1276 *tail_blk = 0; 1277 /* leave all other log inited values alone */ 1278 goto done; 1279 } 1280 } 1281 1282 /* 1283 * Search backwards through the log looking for the log record header 1284 * block. This wraps all the way back around to the head so something is 1285 * seriously wrong if we can't find it. 1286 */ 1287 error = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, buffer, 1288 &rhead_blk, &rhead, &wrapped); 1289 if (error < 0) 1290 goto done; 1291 if (!error) { 1292 xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__); 1293 error = -EFSCORRUPTED; 1294 goto done; 1295 } 1296 *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn)); 1297 1298 /* 1299 * Set the log state based on the current head record. 1300 */ 1301 xlog_set_state(log, *head_blk, rhead, rhead_blk, wrapped); 1302 tail_lsn = atomic64_read(&log->l_tail_lsn); 1303 1304 /* 1305 * Look for an unmount record at the head of the log. This sets the log 1306 * state to determine whether recovery is necessary. 1307 */ 1308 error = xlog_check_unmount_rec(log, head_blk, tail_blk, rhead, 1309 rhead_blk, buffer, &clean); 1310 if (error) 1311 goto done; 1312 1313 /* 1314 * Verify the log head if the log is not clean (e.g., we have anything 1315 * but an unmount record at the head). This uses CRC verification to 1316 * detect and trim torn writes. If discovered, CRC failures are 1317 * considered torn writes and the log head is trimmed accordingly. 1318 * 1319 * Note that we can only run CRC verification when the log is dirty 1320 * because there's no guarantee that the log data behind an unmount 1321 * record is compatible with the current architecture. 1322 */ 1323 if (!clean) { 1324 xfs_daddr_t orig_head = *head_blk; 1325 1326 error = xlog_verify_head(log, head_blk, tail_blk, buffer, 1327 &rhead_blk, &rhead, &wrapped); 1328 if (error) 1329 goto done; 1330 1331 /* update in-core state again if the head changed */ 1332 if (*head_blk != orig_head) { 1333 xlog_set_state(log, *head_blk, rhead, rhead_blk, 1334 wrapped); 1335 tail_lsn = atomic64_read(&log->l_tail_lsn); 1336 error = xlog_check_unmount_rec(log, head_blk, tail_blk, 1337 rhead, rhead_blk, buffer, 1338 &clean); 1339 if (error) 1340 goto done; 1341 } 1342 } 1343 1344 /* 1345 * Note that the unmount was clean. If the unmount was not clean, we 1346 * need to know this to rebuild the superblock counters from the perag 1347 * headers if we have a filesystem using non-persistent counters. 1348 */ 1349 if (clean) 1350 log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN; 1351 1352 /* 1353 * Make sure that there are no blocks in front of the head 1354 * with the same cycle number as the head. This can happen 1355 * because we allow multiple outstanding log writes concurrently, 1356 * and the later writes might make it out before earlier ones. 1357 * 1358 * We use the lsn from before modifying it so that we'll never 1359 * overwrite the unmount record after a clean unmount. 1360 * 1361 * Do this only if we are going to recover the filesystem 1362 * 1363 * NOTE: This used to say "if (!readonly)" 1364 * However on Linux, we can & do recover a read-only filesystem. 1365 * We only skip recovery if NORECOVERY is specified on mount, 1366 * in which case we would not be here. 1367 * 1368 * But... if the -device- itself is readonly, just skip this. 1369 * We can't recover this device anyway, so it won't matter. 1370 */ 1371 if (!xfs_readonly_buftarg(log->l_targ)) 1372 error = xlog_clear_stale_blocks(log, tail_lsn); 1373 1374 done: 1375 kmem_free(buffer); 1376 1377 if (error) 1378 xfs_warn(log->l_mp, "failed to locate log tail"); 1379 return error; 1380 } 1381 1382 /* 1383 * Is the log zeroed at all? 1384 * 1385 * The last binary search should be changed to perform an X block read 1386 * once X becomes small enough. You can then search linearly through 1387 * the X blocks. This will cut down on the number of reads we need to do. 1388 * 1389 * If the log is partially zeroed, this routine will pass back the blkno 1390 * of the first block with cycle number 0. It won't have a complete LR 1391 * preceding it. 1392 * 1393 * Return: 1394 * 0 => the log is completely written to 1395 * 1 => use *blk_no as the first block of the log 1396 * <0 => error has occurred 1397 */ 1398 STATIC int 1399 xlog_find_zeroed( 1400 struct xlog *log, 1401 xfs_daddr_t *blk_no) 1402 { 1403 char *buffer; 1404 char *offset; 1405 uint first_cycle, last_cycle; 1406 xfs_daddr_t new_blk, last_blk, start_blk; 1407 xfs_daddr_t num_scan_bblks; 1408 int error, log_bbnum = log->l_logBBsize; 1409 1410 *blk_no = 0; 1411 1412 /* check totally zeroed log */ 1413 buffer = xlog_alloc_buffer(log, 1); 1414 if (!buffer) 1415 return -ENOMEM; 1416 error = xlog_bread(log, 0, 1, buffer, &offset); 1417 if (error) 1418 goto out_free_buffer; 1419 1420 first_cycle = xlog_get_cycle(offset); 1421 if (first_cycle == 0) { /* completely zeroed log */ 1422 *blk_no = 0; 1423 kmem_free(buffer); 1424 return 1; 1425 } 1426 1427 /* check partially zeroed log */ 1428 error = xlog_bread(log, log_bbnum-1, 1, buffer, &offset); 1429 if (error) 1430 goto out_free_buffer; 1431 1432 last_cycle = xlog_get_cycle(offset); 1433 if (last_cycle != 0) { /* log completely written to */ 1434 kmem_free(buffer); 1435 return 0; 1436 } 1437 1438 /* we have a partially zeroed log */ 1439 last_blk = log_bbnum-1; 1440 error = xlog_find_cycle_start(log, buffer, 0, &last_blk, 0); 1441 if (error) 1442 goto out_free_buffer; 1443 1444 /* 1445 * Validate the answer. Because there is no way to guarantee that 1446 * the entire log is made up of log records which are the same size, 1447 * we scan over the defined maximum blocks. At this point, the maximum 1448 * is not chosen to mean anything special. XXXmiken 1449 */ 1450 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log); 1451 ASSERT(num_scan_bblks <= INT_MAX); 1452 1453 if (last_blk < num_scan_bblks) 1454 num_scan_bblks = last_blk; 1455 start_blk = last_blk - num_scan_bblks; 1456 1457 /* 1458 * We search for any instances of cycle number 0 that occur before 1459 * our current estimate of the head. What we're trying to detect is 1460 * 1 ... | 0 | 1 | 0... 1461 * ^ binary search ends here 1462 */ 1463 if ((error = xlog_find_verify_cycle(log, start_blk, 1464 (int)num_scan_bblks, 0, &new_blk))) 1465 goto out_free_buffer; 1466 if (new_blk != -1) 1467 last_blk = new_blk; 1468 1469 /* 1470 * Potentially backup over partial log record write. We don't need 1471 * to search the end of the log because we know it is zero. 1472 */ 1473 error = xlog_find_verify_log_record(log, start_blk, &last_blk, 0); 1474 if (error == 1) 1475 error = -EIO; 1476 if (error) 1477 goto out_free_buffer; 1478 1479 *blk_no = last_blk; 1480 out_free_buffer: 1481 kmem_free(buffer); 1482 if (error) 1483 return error; 1484 return 1; 1485 } 1486 1487 /* 1488 * These are simple subroutines used by xlog_clear_stale_blocks() below 1489 * to initialize a buffer full of empty log record headers and write 1490 * them into the log. 1491 */ 1492 STATIC void 1493 xlog_add_record( 1494 struct xlog *log, 1495 char *buf, 1496 int cycle, 1497 int block, 1498 int tail_cycle, 1499 int tail_block) 1500 { 1501 xlog_rec_header_t *recp = (xlog_rec_header_t *)buf; 1502 1503 memset(buf, 0, BBSIZE); 1504 recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM); 1505 recp->h_cycle = cpu_to_be32(cycle); 1506 recp->h_version = cpu_to_be32( 1507 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1); 1508 recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block)); 1509 recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block)); 1510 recp->h_fmt = cpu_to_be32(XLOG_FMT); 1511 memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t)); 1512 } 1513 1514 STATIC int 1515 xlog_write_log_records( 1516 struct xlog *log, 1517 int cycle, 1518 int start_block, 1519 int blocks, 1520 int tail_cycle, 1521 int tail_block) 1522 { 1523 char *offset; 1524 char *buffer; 1525 int balign, ealign; 1526 int sectbb = log->l_sectBBsize; 1527 int end_block = start_block + blocks; 1528 int bufblks; 1529 int error = 0; 1530 int i, j = 0; 1531 1532 /* 1533 * Greedily allocate a buffer big enough to handle the full 1534 * range of basic blocks to be written. If that fails, try 1535 * a smaller size. We need to be able to write at least a 1536 * log sector, or we're out of luck. 1537 */ 1538 bufblks = 1 << ffs(blocks); 1539 while (bufblks > log->l_logBBsize) 1540 bufblks >>= 1; 1541 while (!(buffer = xlog_alloc_buffer(log, bufblks))) { 1542 bufblks >>= 1; 1543 if (bufblks < sectbb) 1544 return -ENOMEM; 1545 } 1546 1547 /* We may need to do a read at the start to fill in part of 1548 * the buffer in the starting sector not covered by the first 1549 * write below. 1550 */ 1551 balign = round_down(start_block, sectbb); 1552 if (balign != start_block) { 1553 error = xlog_bread_noalign(log, start_block, 1, buffer); 1554 if (error) 1555 goto out_free_buffer; 1556 1557 j = start_block - balign; 1558 } 1559 1560 for (i = start_block; i < end_block; i += bufblks) { 1561 int bcount, endcount; 1562 1563 bcount = min(bufblks, end_block - start_block); 1564 endcount = bcount - j; 1565 1566 /* We may need to do a read at the end to fill in part of 1567 * the buffer in the final sector not covered by the write. 1568 * If this is the same sector as the above read, skip it. 1569 */ 1570 ealign = round_down(end_block, sectbb); 1571 if (j == 0 && (start_block + endcount > ealign)) { 1572 error = xlog_bread_noalign(log, ealign, sectbb, 1573 buffer + BBTOB(ealign - start_block)); 1574 if (error) 1575 break; 1576 1577 } 1578 1579 offset = buffer + xlog_align(log, start_block); 1580 for (; j < endcount; j++) { 1581 xlog_add_record(log, offset, cycle, i+j, 1582 tail_cycle, tail_block); 1583 offset += BBSIZE; 1584 } 1585 error = xlog_bwrite(log, start_block, endcount, buffer); 1586 if (error) 1587 break; 1588 start_block += endcount; 1589 j = 0; 1590 } 1591 1592 out_free_buffer: 1593 kmem_free(buffer); 1594 return error; 1595 } 1596 1597 /* 1598 * This routine is called to blow away any incomplete log writes out 1599 * in front of the log head. We do this so that we won't become confused 1600 * if we come up, write only a little bit more, and then crash again. 1601 * If we leave the partial log records out there, this situation could 1602 * cause us to think those partial writes are valid blocks since they 1603 * have the current cycle number. We get rid of them by overwriting them 1604 * with empty log records with the old cycle number rather than the 1605 * current one. 1606 * 1607 * The tail lsn is passed in rather than taken from 1608 * the log so that we will not write over the unmount record after a 1609 * clean unmount in a 512 block log. Doing so would leave the log without 1610 * any valid log records in it until a new one was written. If we crashed 1611 * during that time we would not be able to recover. 1612 */ 1613 STATIC int 1614 xlog_clear_stale_blocks( 1615 struct xlog *log, 1616 xfs_lsn_t tail_lsn) 1617 { 1618 int tail_cycle, head_cycle; 1619 int tail_block, head_block; 1620 int tail_distance, max_distance; 1621 int distance; 1622 int error; 1623 1624 tail_cycle = CYCLE_LSN(tail_lsn); 1625 tail_block = BLOCK_LSN(tail_lsn); 1626 head_cycle = log->l_curr_cycle; 1627 head_block = log->l_curr_block; 1628 1629 /* 1630 * Figure out the distance between the new head of the log 1631 * and the tail. We want to write over any blocks beyond the 1632 * head that we may have written just before the crash, but 1633 * we don't want to overwrite the tail of the log. 1634 */ 1635 if (head_cycle == tail_cycle) { 1636 /* 1637 * The tail is behind the head in the physical log, 1638 * so the distance from the head to the tail is the 1639 * distance from the head to the end of the log plus 1640 * the distance from the beginning of the log to the 1641 * tail. 1642 */ 1643 if (XFS_IS_CORRUPT(log->l_mp, 1644 head_block < tail_block || 1645 head_block >= log->l_logBBsize)) 1646 return -EFSCORRUPTED; 1647 tail_distance = tail_block + (log->l_logBBsize - head_block); 1648 } else { 1649 /* 1650 * The head is behind the tail in the physical log, 1651 * so the distance from the head to the tail is just 1652 * the tail block minus the head block. 1653 */ 1654 if (XFS_IS_CORRUPT(log->l_mp, 1655 head_block >= tail_block || 1656 head_cycle != tail_cycle + 1)) 1657 return -EFSCORRUPTED; 1658 tail_distance = tail_block - head_block; 1659 } 1660 1661 /* 1662 * If the head is right up against the tail, we can't clear 1663 * anything. 1664 */ 1665 if (tail_distance <= 0) { 1666 ASSERT(tail_distance == 0); 1667 return 0; 1668 } 1669 1670 max_distance = XLOG_TOTAL_REC_SHIFT(log); 1671 /* 1672 * Take the smaller of the maximum amount of outstanding I/O 1673 * we could have and the distance to the tail to clear out. 1674 * We take the smaller so that we don't overwrite the tail and 1675 * we don't waste all day writing from the head to the tail 1676 * for no reason. 1677 */ 1678 max_distance = min(max_distance, tail_distance); 1679 1680 if ((head_block + max_distance) <= log->l_logBBsize) { 1681 /* 1682 * We can stomp all the blocks we need to without 1683 * wrapping around the end of the log. Just do it 1684 * in a single write. Use the cycle number of the 1685 * current cycle minus one so that the log will look like: 1686 * n ... | n - 1 ... 1687 */ 1688 error = xlog_write_log_records(log, (head_cycle - 1), 1689 head_block, max_distance, tail_cycle, 1690 tail_block); 1691 if (error) 1692 return error; 1693 } else { 1694 /* 1695 * We need to wrap around the end of the physical log in 1696 * order to clear all the blocks. Do it in two separate 1697 * I/Os. The first write should be from the head to the 1698 * end of the physical log, and it should use the current 1699 * cycle number minus one just like above. 1700 */ 1701 distance = log->l_logBBsize - head_block; 1702 error = xlog_write_log_records(log, (head_cycle - 1), 1703 head_block, distance, tail_cycle, 1704 tail_block); 1705 1706 if (error) 1707 return error; 1708 1709 /* 1710 * Now write the blocks at the start of the physical log. 1711 * This writes the remainder of the blocks we want to clear. 1712 * It uses the current cycle number since we're now on the 1713 * same cycle as the head so that we get: 1714 * n ... n ... | n - 1 ... 1715 * ^^^^^ blocks we're writing 1716 */ 1717 distance = max_distance - (log->l_logBBsize - head_block); 1718 error = xlog_write_log_records(log, head_cycle, 0, distance, 1719 tail_cycle, tail_block); 1720 if (error) 1721 return error; 1722 } 1723 1724 return 0; 1725 } 1726 1727 /* 1728 * Release the recovered intent item in the AIL that matches the given intent 1729 * type and intent id. 1730 */ 1731 void 1732 xlog_recover_release_intent( 1733 struct xlog *log, 1734 unsigned short intent_type, 1735 uint64_t intent_id) 1736 { 1737 struct xfs_ail_cursor cur; 1738 struct xfs_log_item *lip; 1739 struct xfs_ail *ailp = log->l_ailp; 1740 1741 spin_lock(&ailp->ail_lock); 1742 for (lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); lip != NULL; 1743 lip = xfs_trans_ail_cursor_next(ailp, &cur)) { 1744 if (lip->li_type != intent_type) 1745 continue; 1746 if (!lip->li_ops->iop_match(lip, intent_id)) 1747 continue; 1748 1749 spin_unlock(&ailp->ail_lock); 1750 lip->li_ops->iop_release(lip); 1751 spin_lock(&ailp->ail_lock); 1752 break; 1753 } 1754 1755 xfs_trans_ail_cursor_done(&cur); 1756 spin_unlock(&ailp->ail_lock); 1757 } 1758 1759 /****************************************************************************** 1760 * 1761 * Log recover routines 1762 * 1763 ****************************************************************************** 1764 */ 1765 static const struct xlog_recover_item_ops *xlog_recover_item_ops[] = { 1766 &xlog_buf_item_ops, 1767 &xlog_inode_item_ops, 1768 &xlog_dquot_item_ops, 1769 &xlog_quotaoff_item_ops, 1770 &xlog_icreate_item_ops, 1771 &xlog_efi_item_ops, 1772 &xlog_efd_item_ops, 1773 &xlog_rui_item_ops, 1774 &xlog_rud_item_ops, 1775 &xlog_cui_item_ops, 1776 &xlog_cud_item_ops, 1777 &xlog_bui_item_ops, 1778 &xlog_bud_item_ops, 1779 }; 1780 1781 static const struct xlog_recover_item_ops * 1782 xlog_find_item_ops( 1783 struct xlog_recover_item *item) 1784 { 1785 unsigned int i; 1786 1787 for (i = 0; i < ARRAY_SIZE(xlog_recover_item_ops); i++) 1788 if (ITEM_TYPE(item) == xlog_recover_item_ops[i]->item_type) 1789 return xlog_recover_item_ops[i]; 1790 1791 return NULL; 1792 } 1793 1794 /* 1795 * Sort the log items in the transaction. 1796 * 1797 * The ordering constraints are defined by the inode allocation and unlink 1798 * behaviour. The rules are: 1799 * 1800 * 1. Every item is only logged once in a given transaction. Hence it 1801 * represents the last logged state of the item. Hence ordering is 1802 * dependent on the order in which operations need to be performed so 1803 * required initial conditions are always met. 1804 * 1805 * 2. Cancelled buffers are recorded in pass 1 in a separate table and 1806 * there's nothing to replay from them so we can simply cull them 1807 * from the transaction. However, we can't do that until after we've 1808 * replayed all the other items because they may be dependent on the 1809 * cancelled buffer and replaying the cancelled buffer can remove it 1810 * form the cancelled buffer table. Hence they have tobe done last. 1811 * 1812 * 3. Inode allocation buffers must be replayed before inode items that 1813 * read the buffer and replay changes into it. For filesystems using the 1814 * ICREATE transactions, this means XFS_LI_ICREATE objects need to get 1815 * treated the same as inode allocation buffers as they create and 1816 * initialise the buffers directly. 1817 * 1818 * 4. Inode unlink buffers must be replayed after inode items are replayed. 1819 * This ensures that inodes are completely flushed to the inode buffer 1820 * in a "free" state before we remove the unlinked inode list pointer. 1821 * 1822 * Hence the ordering needs to be inode allocation buffers first, inode items 1823 * second, inode unlink buffers third and cancelled buffers last. 1824 * 1825 * But there's a problem with that - we can't tell an inode allocation buffer 1826 * apart from a regular buffer, so we can't separate them. We can, however, 1827 * tell an inode unlink buffer from the others, and so we can separate them out 1828 * from all the other buffers and move them to last. 1829 * 1830 * Hence, 4 lists, in order from head to tail: 1831 * - buffer_list for all buffers except cancelled/inode unlink buffers 1832 * - item_list for all non-buffer items 1833 * - inode_buffer_list for inode unlink buffers 1834 * - cancel_list for the cancelled buffers 1835 * 1836 * Note that we add objects to the tail of the lists so that first-to-last 1837 * ordering is preserved within the lists. Adding objects to the head of the 1838 * list means when we traverse from the head we walk them in last-to-first 1839 * order. For cancelled buffers and inode unlink buffers this doesn't matter, 1840 * but for all other items there may be specific ordering that we need to 1841 * preserve. 1842 */ 1843 STATIC int 1844 xlog_recover_reorder_trans( 1845 struct xlog *log, 1846 struct xlog_recover *trans, 1847 int pass) 1848 { 1849 struct xlog_recover_item *item, *n; 1850 int error = 0; 1851 LIST_HEAD(sort_list); 1852 LIST_HEAD(cancel_list); 1853 LIST_HEAD(buffer_list); 1854 LIST_HEAD(inode_buffer_list); 1855 LIST_HEAD(item_list); 1856 1857 list_splice_init(&trans->r_itemq, &sort_list); 1858 list_for_each_entry_safe(item, n, &sort_list, ri_list) { 1859 enum xlog_recover_reorder fate = XLOG_REORDER_ITEM_LIST; 1860 1861 item->ri_ops = xlog_find_item_ops(item); 1862 if (!item->ri_ops) { 1863 xfs_warn(log->l_mp, 1864 "%s: unrecognized type of log operation (%d)", 1865 __func__, ITEM_TYPE(item)); 1866 ASSERT(0); 1867 /* 1868 * return the remaining items back to the transaction 1869 * item list so they can be freed in caller. 1870 */ 1871 if (!list_empty(&sort_list)) 1872 list_splice_init(&sort_list, &trans->r_itemq); 1873 error = -EFSCORRUPTED; 1874 break; 1875 } 1876 1877 if (item->ri_ops->reorder) 1878 fate = item->ri_ops->reorder(item); 1879 1880 switch (fate) { 1881 case XLOG_REORDER_BUFFER_LIST: 1882 list_move_tail(&item->ri_list, &buffer_list); 1883 break; 1884 case XLOG_REORDER_CANCEL_LIST: 1885 trace_xfs_log_recover_item_reorder_head(log, 1886 trans, item, pass); 1887 list_move(&item->ri_list, &cancel_list); 1888 break; 1889 case XLOG_REORDER_INODE_BUFFER_LIST: 1890 list_move(&item->ri_list, &inode_buffer_list); 1891 break; 1892 case XLOG_REORDER_ITEM_LIST: 1893 trace_xfs_log_recover_item_reorder_tail(log, 1894 trans, item, pass); 1895 list_move_tail(&item->ri_list, &item_list); 1896 break; 1897 } 1898 } 1899 1900 ASSERT(list_empty(&sort_list)); 1901 if (!list_empty(&buffer_list)) 1902 list_splice(&buffer_list, &trans->r_itemq); 1903 if (!list_empty(&item_list)) 1904 list_splice_tail(&item_list, &trans->r_itemq); 1905 if (!list_empty(&inode_buffer_list)) 1906 list_splice_tail(&inode_buffer_list, &trans->r_itemq); 1907 if (!list_empty(&cancel_list)) 1908 list_splice_tail(&cancel_list, &trans->r_itemq); 1909 return error; 1910 } 1911 1912 void 1913 xlog_buf_readahead( 1914 struct xlog *log, 1915 xfs_daddr_t blkno, 1916 uint len, 1917 const struct xfs_buf_ops *ops) 1918 { 1919 if (!xlog_is_buffer_cancelled(log, blkno, len)) 1920 xfs_buf_readahead(log->l_mp->m_ddev_targp, blkno, len, ops); 1921 } 1922 1923 STATIC int 1924 xlog_recover_items_pass2( 1925 struct xlog *log, 1926 struct xlog_recover *trans, 1927 struct list_head *buffer_list, 1928 struct list_head *item_list) 1929 { 1930 struct xlog_recover_item *item; 1931 int error = 0; 1932 1933 list_for_each_entry(item, item_list, ri_list) { 1934 trace_xfs_log_recover_item_recover(log, trans, item, 1935 XLOG_RECOVER_PASS2); 1936 1937 if (item->ri_ops->commit_pass2) 1938 error = item->ri_ops->commit_pass2(log, buffer_list, 1939 item, trans->r_lsn); 1940 if (error) 1941 return error; 1942 } 1943 1944 return error; 1945 } 1946 1947 /* 1948 * Perform the transaction. 1949 * 1950 * If the transaction modifies a buffer or inode, do it now. Otherwise, 1951 * EFIs and EFDs get queued up by adding entries into the AIL for them. 1952 */ 1953 STATIC int 1954 xlog_recover_commit_trans( 1955 struct xlog *log, 1956 struct xlog_recover *trans, 1957 int pass, 1958 struct list_head *buffer_list) 1959 { 1960 int error = 0; 1961 int items_queued = 0; 1962 struct xlog_recover_item *item; 1963 struct xlog_recover_item *next; 1964 LIST_HEAD (ra_list); 1965 LIST_HEAD (done_list); 1966 1967 #define XLOG_RECOVER_COMMIT_QUEUE_MAX 100 1968 1969 hlist_del_init(&trans->r_list); 1970 1971 error = xlog_recover_reorder_trans(log, trans, pass); 1972 if (error) 1973 return error; 1974 1975 list_for_each_entry_safe(item, next, &trans->r_itemq, ri_list) { 1976 trace_xfs_log_recover_item_recover(log, trans, item, pass); 1977 1978 switch (pass) { 1979 case XLOG_RECOVER_PASS1: 1980 if (item->ri_ops->commit_pass1) 1981 error = item->ri_ops->commit_pass1(log, item); 1982 break; 1983 case XLOG_RECOVER_PASS2: 1984 if (item->ri_ops->ra_pass2) 1985 item->ri_ops->ra_pass2(log, item); 1986 list_move_tail(&item->ri_list, &ra_list); 1987 items_queued++; 1988 if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) { 1989 error = xlog_recover_items_pass2(log, trans, 1990 buffer_list, &ra_list); 1991 list_splice_tail_init(&ra_list, &done_list); 1992 items_queued = 0; 1993 } 1994 1995 break; 1996 default: 1997 ASSERT(0); 1998 } 1999 2000 if (error) 2001 goto out; 2002 } 2003 2004 out: 2005 if (!list_empty(&ra_list)) { 2006 if (!error) 2007 error = xlog_recover_items_pass2(log, trans, 2008 buffer_list, &ra_list); 2009 list_splice_tail_init(&ra_list, &done_list); 2010 } 2011 2012 if (!list_empty(&done_list)) 2013 list_splice_init(&done_list, &trans->r_itemq); 2014 2015 return error; 2016 } 2017 2018 STATIC void 2019 xlog_recover_add_item( 2020 struct list_head *head) 2021 { 2022 struct xlog_recover_item *item; 2023 2024 item = kmem_zalloc(sizeof(struct xlog_recover_item), 0); 2025 INIT_LIST_HEAD(&item->ri_list); 2026 list_add_tail(&item->ri_list, head); 2027 } 2028 2029 STATIC int 2030 xlog_recover_add_to_cont_trans( 2031 struct xlog *log, 2032 struct xlog_recover *trans, 2033 char *dp, 2034 int len) 2035 { 2036 struct xlog_recover_item *item; 2037 char *ptr, *old_ptr; 2038 int old_len; 2039 2040 /* 2041 * If the transaction is empty, the header was split across this and the 2042 * previous record. Copy the rest of the header. 2043 */ 2044 if (list_empty(&trans->r_itemq)) { 2045 ASSERT(len <= sizeof(struct xfs_trans_header)); 2046 if (len > sizeof(struct xfs_trans_header)) { 2047 xfs_warn(log->l_mp, "%s: bad header length", __func__); 2048 return -EFSCORRUPTED; 2049 } 2050 2051 xlog_recover_add_item(&trans->r_itemq); 2052 ptr = (char *)&trans->r_theader + 2053 sizeof(struct xfs_trans_header) - len; 2054 memcpy(ptr, dp, len); 2055 return 0; 2056 } 2057 2058 /* take the tail entry */ 2059 item = list_entry(trans->r_itemq.prev, struct xlog_recover_item, 2060 ri_list); 2061 2062 old_ptr = item->ri_buf[item->ri_cnt-1].i_addr; 2063 old_len = item->ri_buf[item->ri_cnt-1].i_len; 2064 2065 ptr = krealloc(old_ptr, len + old_len, GFP_KERNEL | __GFP_NOFAIL); 2066 memcpy(&ptr[old_len], dp, len); 2067 item->ri_buf[item->ri_cnt-1].i_len += len; 2068 item->ri_buf[item->ri_cnt-1].i_addr = ptr; 2069 trace_xfs_log_recover_item_add_cont(log, trans, item, 0); 2070 return 0; 2071 } 2072 2073 /* 2074 * The next region to add is the start of a new region. It could be 2075 * a whole region or it could be the first part of a new region. Because 2076 * of this, the assumption here is that the type and size fields of all 2077 * format structures fit into the first 32 bits of the structure. 2078 * 2079 * This works because all regions must be 32 bit aligned. Therefore, we 2080 * either have both fields or we have neither field. In the case we have 2081 * neither field, the data part of the region is zero length. We only have 2082 * a log_op_header and can throw away the header since a new one will appear 2083 * later. If we have at least 4 bytes, then we can determine how many regions 2084 * will appear in the current log item. 2085 */ 2086 STATIC int 2087 xlog_recover_add_to_trans( 2088 struct xlog *log, 2089 struct xlog_recover *trans, 2090 char *dp, 2091 int len) 2092 { 2093 struct xfs_inode_log_format *in_f; /* any will do */ 2094 struct xlog_recover_item *item; 2095 char *ptr; 2096 2097 if (!len) 2098 return 0; 2099 if (list_empty(&trans->r_itemq)) { 2100 /* we need to catch log corruptions here */ 2101 if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) { 2102 xfs_warn(log->l_mp, "%s: bad header magic number", 2103 __func__); 2104 ASSERT(0); 2105 return -EFSCORRUPTED; 2106 } 2107 2108 if (len > sizeof(struct xfs_trans_header)) { 2109 xfs_warn(log->l_mp, "%s: bad header length", __func__); 2110 ASSERT(0); 2111 return -EFSCORRUPTED; 2112 } 2113 2114 /* 2115 * The transaction header can be arbitrarily split across op 2116 * records. If we don't have the whole thing here, copy what we 2117 * do have and handle the rest in the next record. 2118 */ 2119 if (len == sizeof(struct xfs_trans_header)) 2120 xlog_recover_add_item(&trans->r_itemq); 2121 memcpy(&trans->r_theader, dp, len); 2122 return 0; 2123 } 2124 2125 ptr = kmem_alloc(len, 0); 2126 memcpy(ptr, dp, len); 2127 in_f = (struct xfs_inode_log_format *)ptr; 2128 2129 /* take the tail entry */ 2130 item = list_entry(trans->r_itemq.prev, struct xlog_recover_item, 2131 ri_list); 2132 if (item->ri_total != 0 && 2133 item->ri_total == item->ri_cnt) { 2134 /* tail item is in use, get a new one */ 2135 xlog_recover_add_item(&trans->r_itemq); 2136 item = list_entry(trans->r_itemq.prev, 2137 struct xlog_recover_item, ri_list); 2138 } 2139 2140 if (item->ri_total == 0) { /* first region to be added */ 2141 if (in_f->ilf_size == 0 || 2142 in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) { 2143 xfs_warn(log->l_mp, 2144 "bad number of regions (%d) in inode log format", 2145 in_f->ilf_size); 2146 ASSERT(0); 2147 kmem_free(ptr); 2148 return -EFSCORRUPTED; 2149 } 2150 2151 item->ri_total = in_f->ilf_size; 2152 item->ri_buf = 2153 kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t), 2154 0); 2155 } 2156 2157 if (item->ri_total <= item->ri_cnt) { 2158 xfs_warn(log->l_mp, 2159 "log item region count (%d) overflowed size (%d)", 2160 item->ri_cnt, item->ri_total); 2161 ASSERT(0); 2162 kmem_free(ptr); 2163 return -EFSCORRUPTED; 2164 } 2165 2166 /* Description region is ri_buf[0] */ 2167 item->ri_buf[item->ri_cnt].i_addr = ptr; 2168 item->ri_buf[item->ri_cnt].i_len = len; 2169 item->ri_cnt++; 2170 trace_xfs_log_recover_item_add(log, trans, item, 0); 2171 return 0; 2172 } 2173 2174 /* 2175 * Free up any resources allocated by the transaction 2176 * 2177 * Remember that EFIs, EFDs, and IUNLINKs are handled later. 2178 */ 2179 STATIC void 2180 xlog_recover_free_trans( 2181 struct xlog_recover *trans) 2182 { 2183 struct xlog_recover_item *item, *n; 2184 int i; 2185 2186 hlist_del_init(&trans->r_list); 2187 2188 list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) { 2189 /* Free the regions in the item. */ 2190 list_del(&item->ri_list); 2191 for (i = 0; i < item->ri_cnt; i++) 2192 kmem_free(item->ri_buf[i].i_addr); 2193 /* Free the item itself */ 2194 kmem_free(item->ri_buf); 2195 kmem_free(item); 2196 } 2197 /* Free the transaction recover structure */ 2198 kmem_free(trans); 2199 } 2200 2201 /* 2202 * On error or completion, trans is freed. 2203 */ 2204 STATIC int 2205 xlog_recovery_process_trans( 2206 struct xlog *log, 2207 struct xlog_recover *trans, 2208 char *dp, 2209 unsigned int len, 2210 unsigned int flags, 2211 int pass, 2212 struct list_head *buffer_list) 2213 { 2214 int error = 0; 2215 bool freeit = false; 2216 2217 /* mask off ophdr transaction container flags */ 2218 flags &= ~XLOG_END_TRANS; 2219 if (flags & XLOG_WAS_CONT_TRANS) 2220 flags &= ~XLOG_CONTINUE_TRANS; 2221 2222 /* 2223 * Callees must not free the trans structure. We'll decide if we need to 2224 * free it or not based on the operation being done and it's result. 2225 */ 2226 switch (flags) { 2227 /* expected flag values */ 2228 case 0: 2229 case XLOG_CONTINUE_TRANS: 2230 error = xlog_recover_add_to_trans(log, trans, dp, len); 2231 break; 2232 case XLOG_WAS_CONT_TRANS: 2233 error = xlog_recover_add_to_cont_trans(log, trans, dp, len); 2234 break; 2235 case XLOG_COMMIT_TRANS: 2236 error = xlog_recover_commit_trans(log, trans, pass, 2237 buffer_list); 2238 /* success or fail, we are now done with this transaction. */ 2239 freeit = true; 2240 break; 2241 2242 /* unexpected flag values */ 2243 case XLOG_UNMOUNT_TRANS: 2244 /* just skip trans */ 2245 xfs_warn(log->l_mp, "%s: Unmount LR", __func__); 2246 freeit = true; 2247 break; 2248 case XLOG_START_TRANS: 2249 default: 2250 xfs_warn(log->l_mp, "%s: bad flag 0x%x", __func__, flags); 2251 ASSERT(0); 2252 error = -EFSCORRUPTED; 2253 break; 2254 } 2255 if (error || freeit) 2256 xlog_recover_free_trans(trans); 2257 return error; 2258 } 2259 2260 /* 2261 * Lookup the transaction recovery structure associated with the ID in the 2262 * current ophdr. If the transaction doesn't exist and the start flag is set in 2263 * the ophdr, then allocate a new transaction for future ID matches to find. 2264 * Either way, return what we found during the lookup - an existing transaction 2265 * or nothing. 2266 */ 2267 STATIC struct xlog_recover * 2268 xlog_recover_ophdr_to_trans( 2269 struct hlist_head rhash[], 2270 struct xlog_rec_header *rhead, 2271 struct xlog_op_header *ohead) 2272 { 2273 struct xlog_recover *trans; 2274 xlog_tid_t tid; 2275 struct hlist_head *rhp; 2276 2277 tid = be32_to_cpu(ohead->oh_tid); 2278 rhp = &rhash[XLOG_RHASH(tid)]; 2279 hlist_for_each_entry(trans, rhp, r_list) { 2280 if (trans->r_log_tid == tid) 2281 return trans; 2282 } 2283 2284 /* 2285 * skip over non-start transaction headers - we could be 2286 * processing slack space before the next transaction starts 2287 */ 2288 if (!(ohead->oh_flags & XLOG_START_TRANS)) 2289 return NULL; 2290 2291 ASSERT(be32_to_cpu(ohead->oh_len) == 0); 2292 2293 /* 2294 * This is a new transaction so allocate a new recovery container to 2295 * hold the recovery ops that will follow. 2296 */ 2297 trans = kmem_zalloc(sizeof(struct xlog_recover), 0); 2298 trans->r_log_tid = tid; 2299 trans->r_lsn = be64_to_cpu(rhead->h_lsn); 2300 INIT_LIST_HEAD(&trans->r_itemq); 2301 INIT_HLIST_NODE(&trans->r_list); 2302 hlist_add_head(&trans->r_list, rhp); 2303 2304 /* 2305 * Nothing more to do for this ophdr. Items to be added to this new 2306 * transaction will be in subsequent ophdr containers. 2307 */ 2308 return NULL; 2309 } 2310 2311 STATIC int 2312 xlog_recover_process_ophdr( 2313 struct xlog *log, 2314 struct hlist_head rhash[], 2315 struct xlog_rec_header *rhead, 2316 struct xlog_op_header *ohead, 2317 char *dp, 2318 char *end, 2319 int pass, 2320 struct list_head *buffer_list) 2321 { 2322 struct xlog_recover *trans; 2323 unsigned int len; 2324 int error; 2325 2326 /* Do we understand who wrote this op? */ 2327 if (ohead->oh_clientid != XFS_TRANSACTION && 2328 ohead->oh_clientid != XFS_LOG) { 2329 xfs_warn(log->l_mp, "%s: bad clientid 0x%x", 2330 __func__, ohead->oh_clientid); 2331 ASSERT(0); 2332 return -EFSCORRUPTED; 2333 } 2334 2335 /* 2336 * Check the ophdr contains all the data it is supposed to contain. 2337 */ 2338 len = be32_to_cpu(ohead->oh_len); 2339 if (dp + len > end) { 2340 xfs_warn(log->l_mp, "%s: bad length 0x%x", __func__, len); 2341 WARN_ON(1); 2342 return -EFSCORRUPTED; 2343 } 2344 2345 trans = xlog_recover_ophdr_to_trans(rhash, rhead, ohead); 2346 if (!trans) { 2347 /* nothing to do, so skip over this ophdr */ 2348 return 0; 2349 } 2350 2351 /* 2352 * The recovered buffer queue is drained only once we know that all 2353 * recovery items for the current LSN have been processed. This is 2354 * required because: 2355 * 2356 * - Buffer write submission updates the metadata LSN of the buffer. 2357 * - Log recovery skips items with a metadata LSN >= the current LSN of 2358 * the recovery item. 2359 * - Separate recovery items against the same metadata buffer can share 2360 * a current LSN. I.e., consider that the LSN of a recovery item is 2361 * defined as the starting LSN of the first record in which its 2362 * transaction appears, that a record can hold multiple transactions, 2363 * and/or that a transaction can span multiple records. 2364 * 2365 * In other words, we are allowed to submit a buffer from log recovery 2366 * once per current LSN. Otherwise, we may incorrectly skip recovery 2367 * items and cause corruption. 2368 * 2369 * We don't know up front whether buffers are updated multiple times per 2370 * LSN. Therefore, track the current LSN of each commit log record as it 2371 * is processed and drain the queue when it changes. Use commit records 2372 * because they are ordered correctly by the logging code. 2373 */ 2374 if (log->l_recovery_lsn != trans->r_lsn && 2375 ohead->oh_flags & XLOG_COMMIT_TRANS) { 2376 error = xfs_buf_delwri_submit(buffer_list); 2377 if (error) 2378 return error; 2379 log->l_recovery_lsn = trans->r_lsn; 2380 } 2381 2382 return xlog_recovery_process_trans(log, trans, dp, len, 2383 ohead->oh_flags, pass, buffer_list); 2384 } 2385 2386 /* 2387 * There are two valid states of the r_state field. 0 indicates that the 2388 * transaction structure is in a normal state. We have either seen the 2389 * start of the transaction or the last operation we added was not a partial 2390 * operation. If the last operation we added to the transaction was a 2391 * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS. 2392 * 2393 * NOTE: skip LRs with 0 data length. 2394 */ 2395 STATIC int 2396 xlog_recover_process_data( 2397 struct xlog *log, 2398 struct hlist_head rhash[], 2399 struct xlog_rec_header *rhead, 2400 char *dp, 2401 int pass, 2402 struct list_head *buffer_list) 2403 { 2404 struct xlog_op_header *ohead; 2405 char *end; 2406 int num_logops; 2407 int error; 2408 2409 end = dp + be32_to_cpu(rhead->h_len); 2410 num_logops = be32_to_cpu(rhead->h_num_logops); 2411 2412 /* check the log format matches our own - else we can't recover */ 2413 if (xlog_header_check_recover(log->l_mp, rhead)) 2414 return -EIO; 2415 2416 trace_xfs_log_recover_record(log, rhead, pass); 2417 while ((dp < end) && num_logops) { 2418 2419 ohead = (struct xlog_op_header *)dp; 2420 dp += sizeof(*ohead); 2421 ASSERT(dp <= end); 2422 2423 /* errors will abort recovery */ 2424 error = xlog_recover_process_ophdr(log, rhash, rhead, ohead, 2425 dp, end, pass, buffer_list); 2426 if (error) 2427 return error; 2428 2429 dp += be32_to_cpu(ohead->oh_len); 2430 num_logops--; 2431 } 2432 return 0; 2433 } 2434 2435 /* Take all the collected deferred ops and finish them in order. */ 2436 static int 2437 xlog_finish_defer_ops( 2438 struct xfs_mount *mp, 2439 struct list_head *capture_list) 2440 { 2441 struct xfs_defer_capture *dfc, *next; 2442 struct xfs_trans *tp; 2443 struct xfs_inode *ip; 2444 int error = 0; 2445 2446 list_for_each_entry_safe(dfc, next, capture_list, dfc_list) { 2447 struct xfs_trans_res resv; 2448 2449 /* 2450 * Create a new transaction reservation from the captured 2451 * information. Set logcount to 1 to force the new transaction 2452 * to regrant every roll so that we can make forward progress 2453 * in recovery no matter how full the log might be. 2454 */ 2455 resv.tr_logres = dfc->dfc_logres; 2456 resv.tr_logcount = 1; 2457 resv.tr_logflags = XFS_TRANS_PERM_LOG_RES; 2458 2459 error = xfs_trans_alloc(mp, &resv, dfc->dfc_blkres, 2460 dfc->dfc_rtxres, XFS_TRANS_RESERVE, &tp); 2461 if (error) { 2462 xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR); 2463 return error; 2464 } 2465 2466 /* 2467 * Transfer to this new transaction all the dfops we captured 2468 * from recovering a single intent item. 2469 */ 2470 list_del_init(&dfc->dfc_list); 2471 xfs_defer_ops_continue(dfc, tp, &ip); 2472 2473 error = xfs_trans_commit(tp); 2474 if (ip) { 2475 xfs_iunlock(ip, XFS_ILOCK_EXCL); 2476 xfs_irele(ip); 2477 } 2478 if (error) 2479 return error; 2480 } 2481 2482 ASSERT(list_empty(capture_list)); 2483 return 0; 2484 } 2485 2486 /* Release all the captured defer ops and capture structures in this list. */ 2487 static void 2488 xlog_abort_defer_ops( 2489 struct xfs_mount *mp, 2490 struct list_head *capture_list) 2491 { 2492 struct xfs_defer_capture *dfc; 2493 struct xfs_defer_capture *next; 2494 2495 list_for_each_entry_safe(dfc, next, capture_list, dfc_list) { 2496 list_del_init(&dfc->dfc_list); 2497 xfs_defer_ops_release(mp, dfc); 2498 } 2499 } 2500 /* 2501 * When this is called, all of the log intent items which did not have 2502 * corresponding log done items should be in the AIL. What we do now 2503 * is update the data structures associated with each one. 2504 * 2505 * Since we process the log intent items in normal transactions, they 2506 * will be removed at some point after the commit. This prevents us 2507 * from just walking down the list processing each one. We'll use a 2508 * flag in the intent item to skip those that we've already processed 2509 * and use the AIL iteration mechanism's generation count to try to 2510 * speed this up at least a bit. 2511 * 2512 * When we start, we know that the intents are the only things in the 2513 * AIL. As we process them, however, other items are added to the 2514 * AIL. 2515 */ 2516 STATIC int 2517 xlog_recover_process_intents( 2518 struct xlog *log) 2519 { 2520 LIST_HEAD(capture_list); 2521 struct xfs_ail_cursor cur; 2522 struct xfs_log_item *lip; 2523 struct xfs_ail *ailp; 2524 int error = 0; 2525 #if defined(DEBUG) || defined(XFS_WARN) 2526 xfs_lsn_t last_lsn; 2527 #endif 2528 2529 ailp = log->l_ailp; 2530 spin_lock(&ailp->ail_lock); 2531 #if defined(DEBUG) || defined(XFS_WARN) 2532 last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block); 2533 #endif 2534 for (lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); 2535 lip != NULL; 2536 lip = xfs_trans_ail_cursor_next(ailp, &cur)) { 2537 /* 2538 * We're done when we see something other than an intent. 2539 * There should be no intents left in the AIL now. 2540 */ 2541 if (!xlog_item_is_intent(lip)) { 2542 #ifdef DEBUG 2543 for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur)) 2544 ASSERT(!xlog_item_is_intent(lip)); 2545 #endif 2546 break; 2547 } 2548 2549 /* 2550 * We should never see a redo item with a LSN higher than 2551 * the last transaction we found in the log at the start 2552 * of recovery. 2553 */ 2554 ASSERT(XFS_LSN_CMP(last_lsn, lip->li_lsn) >= 0); 2555 2556 /* 2557 * NOTE: If your intent processing routine can create more 2558 * deferred ops, you /must/ attach them to the capture list in 2559 * the recover routine or else those subsequent intents will be 2560 * replayed in the wrong order! 2561 */ 2562 spin_unlock(&ailp->ail_lock); 2563 error = lip->li_ops->iop_recover(lip, &capture_list); 2564 spin_lock(&ailp->ail_lock); 2565 if (error) { 2566 trace_xlog_intent_recovery_failed(log->l_mp, error, 2567 lip->li_ops->iop_recover); 2568 break; 2569 } 2570 } 2571 2572 xfs_trans_ail_cursor_done(&cur); 2573 spin_unlock(&ailp->ail_lock); 2574 if (error) 2575 goto err; 2576 2577 error = xlog_finish_defer_ops(log->l_mp, &capture_list); 2578 if (error) 2579 goto err; 2580 2581 return 0; 2582 err: 2583 xlog_abort_defer_ops(log->l_mp, &capture_list); 2584 return error; 2585 } 2586 2587 /* 2588 * A cancel occurs when the mount has failed and we're bailing out. 2589 * Release all pending log intent items so they don't pin the AIL. 2590 */ 2591 STATIC void 2592 xlog_recover_cancel_intents( 2593 struct xlog *log) 2594 { 2595 struct xfs_log_item *lip; 2596 struct xfs_ail_cursor cur; 2597 struct xfs_ail *ailp; 2598 2599 ailp = log->l_ailp; 2600 spin_lock(&ailp->ail_lock); 2601 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); 2602 while (lip != NULL) { 2603 /* 2604 * We're done when we see something other than an intent. 2605 * There should be no intents left in the AIL now. 2606 */ 2607 if (!xlog_item_is_intent(lip)) { 2608 #ifdef DEBUG 2609 for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur)) 2610 ASSERT(!xlog_item_is_intent(lip)); 2611 #endif 2612 break; 2613 } 2614 2615 spin_unlock(&ailp->ail_lock); 2616 lip->li_ops->iop_release(lip); 2617 spin_lock(&ailp->ail_lock); 2618 lip = xfs_trans_ail_cursor_next(ailp, &cur); 2619 } 2620 2621 xfs_trans_ail_cursor_done(&cur); 2622 spin_unlock(&ailp->ail_lock); 2623 } 2624 2625 /* 2626 * This routine performs a transaction to null out a bad inode pointer 2627 * in an agi unlinked inode hash bucket. 2628 */ 2629 STATIC void 2630 xlog_recover_clear_agi_bucket( 2631 xfs_mount_t *mp, 2632 xfs_agnumber_t agno, 2633 int bucket) 2634 { 2635 xfs_trans_t *tp; 2636 xfs_agi_t *agi; 2637 struct xfs_buf *agibp; 2638 int offset; 2639 int error; 2640 2641 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_clearagi, 0, 0, 0, &tp); 2642 if (error) 2643 goto out_error; 2644 2645 error = xfs_read_agi(mp, tp, agno, &agibp); 2646 if (error) 2647 goto out_abort; 2648 2649 agi = agibp->b_addr; 2650 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO); 2651 offset = offsetof(xfs_agi_t, agi_unlinked) + 2652 (sizeof(xfs_agino_t) * bucket); 2653 xfs_trans_log_buf(tp, agibp, offset, 2654 (offset + sizeof(xfs_agino_t) - 1)); 2655 2656 error = xfs_trans_commit(tp); 2657 if (error) 2658 goto out_error; 2659 return; 2660 2661 out_abort: 2662 xfs_trans_cancel(tp); 2663 out_error: 2664 xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno); 2665 return; 2666 } 2667 2668 STATIC xfs_agino_t 2669 xlog_recover_process_one_iunlink( 2670 struct xfs_mount *mp, 2671 xfs_agnumber_t agno, 2672 xfs_agino_t agino, 2673 int bucket) 2674 { 2675 struct xfs_buf *ibp; 2676 struct xfs_dinode *dip; 2677 struct xfs_inode *ip; 2678 xfs_ino_t ino; 2679 int error; 2680 2681 ino = XFS_AGINO_TO_INO(mp, agno, agino); 2682 error = xfs_iget(mp, NULL, ino, 0, 0, &ip); 2683 if (error) 2684 goto fail; 2685 2686 /* 2687 * Get the on disk inode to find the next inode in the bucket. 2688 */ 2689 error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &ibp); 2690 if (error) 2691 goto fail_iput; 2692 dip = xfs_buf_offset(ibp, ip->i_imap.im_boffset); 2693 2694 xfs_iflags_clear(ip, XFS_IRECOVERY); 2695 ASSERT(VFS_I(ip)->i_nlink == 0); 2696 ASSERT(VFS_I(ip)->i_mode != 0); 2697 2698 /* setup for the next pass */ 2699 agino = be32_to_cpu(dip->di_next_unlinked); 2700 xfs_buf_relse(ibp); 2701 2702 xfs_irele(ip); 2703 return agino; 2704 2705 fail_iput: 2706 xfs_irele(ip); 2707 fail: 2708 /* 2709 * We can't read in the inode this bucket points to, or this inode 2710 * is messed up. Just ditch this bucket of inodes. We will lose 2711 * some inodes and space, but at least we won't hang. 2712 * 2713 * Call xlog_recover_clear_agi_bucket() to perform a transaction to 2714 * clear the inode pointer in the bucket. 2715 */ 2716 xlog_recover_clear_agi_bucket(mp, agno, bucket); 2717 return NULLAGINO; 2718 } 2719 2720 /* 2721 * Recover AGI unlinked lists 2722 * 2723 * This is called during recovery to process any inodes which we unlinked but 2724 * not freed when the system crashed. These inodes will be on the lists in the 2725 * AGI blocks. What we do here is scan all the AGIs and fully truncate and free 2726 * any inodes found on the lists. Each inode is removed from the lists when it 2727 * has been fully truncated and is freed. The freeing of the inode and its 2728 * removal from the list must be atomic. 2729 * 2730 * If everything we touch in the agi processing loop is already in memory, this 2731 * loop can hold the cpu for a long time. It runs without lock contention, 2732 * memory allocation contention, the need wait for IO, etc, and so will run 2733 * until we either run out of inodes to process, run low on memory or we run out 2734 * of log space. 2735 * 2736 * This behaviour is bad for latency on single CPU and non-preemptible kernels, 2737 * and can prevent other filesystem work (such as CIL pushes) from running. This 2738 * can lead to deadlocks if the recovery process runs out of log reservation 2739 * space. Hence we need to yield the CPU when there is other kernel work 2740 * scheduled on this CPU to ensure other scheduled work can run without undue 2741 * latency. 2742 */ 2743 STATIC void 2744 xlog_recover_process_iunlinks( 2745 struct xlog *log) 2746 { 2747 struct xfs_mount *mp = log->l_mp; 2748 struct xfs_perag *pag; 2749 xfs_agnumber_t agno; 2750 struct xfs_agi *agi; 2751 struct xfs_buf *agibp; 2752 xfs_agino_t agino; 2753 int bucket; 2754 int error; 2755 2756 for_each_perag(mp, agno, pag) { 2757 error = xfs_read_agi(mp, NULL, pag->pag_agno, &agibp); 2758 if (error) { 2759 /* 2760 * AGI is b0rked. Don't process it. 2761 * 2762 * We should probably mark the filesystem as corrupt 2763 * after we've recovered all the ag's we can.... 2764 */ 2765 continue; 2766 } 2767 /* 2768 * Unlock the buffer so that it can be acquired in the normal 2769 * course of the transaction to truncate and free each inode. 2770 * Because we are not racing with anyone else here for the AGI 2771 * buffer, we don't even need to hold it locked to read the 2772 * initial unlinked bucket entries out of the buffer. We keep 2773 * buffer reference though, so that it stays pinned in memory 2774 * while we need the buffer. 2775 */ 2776 agi = agibp->b_addr; 2777 xfs_buf_unlock(agibp); 2778 2779 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) { 2780 agino = be32_to_cpu(agi->agi_unlinked[bucket]); 2781 while (agino != NULLAGINO) { 2782 agino = xlog_recover_process_one_iunlink(mp, 2783 pag->pag_agno, agino, bucket); 2784 cond_resched(); 2785 } 2786 } 2787 xfs_buf_rele(agibp); 2788 } 2789 } 2790 2791 STATIC void 2792 xlog_unpack_data( 2793 struct xlog_rec_header *rhead, 2794 char *dp, 2795 struct xlog *log) 2796 { 2797 int i, j, k; 2798 2799 for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) && 2800 i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) { 2801 *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i]; 2802 dp += BBSIZE; 2803 } 2804 2805 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { 2806 xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead; 2807 for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) { 2808 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 2809 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 2810 *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k]; 2811 dp += BBSIZE; 2812 } 2813 } 2814 } 2815 2816 /* 2817 * CRC check, unpack and process a log record. 2818 */ 2819 STATIC int 2820 xlog_recover_process( 2821 struct xlog *log, 2822 struct hlist_head rhash[], 2823 struct xlog_rec_header *rhead, 2824 char *dp, 2825 int pass, 2826 struct list_head *buffer_list) 2827 { 2828 __le32 old_crc = rhead->h_crc; 2829 __le32 crc; 2830 2831 crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len)); 2832 2833 /* 2834 * Nothing else to do if this is a CRC verification pass. Just return 2835 * if this a record with a non-zero crc. Unfortunately, mkfs always 2836 * sets old_crc to 0 so we must consider this valid even on v5 supers. 2837 * Otherwise, return EFSBADCRC on failure so the callers up the stack 2838 * know precisely what failed. 2839 */ 2840 if (pass == XLOG_RECOVER_CRCPASS) { 2841 if (old_crc && crc != old_crc) 2842 return -EFSBADCRC; 2843 return 0; 2844 } 2845 2846 /* 2847 * We're in the normal recovery path. Issue a warning if and only if the 2848 * CRC in the header is non-zero. This is an advisory warning and the 2849 * zero CRC check prevents warnings from being emitted when upgrading 2850 * the kernel from one that does not add CRCs by default. 2851 */ 2852 if (crc != old_crc) { 2853 if (old_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) { 2854 xfs_alert(log->l_mp, 2855 "log record CRC mismatch: found 0x%x, expected 0x%x.", 2856 le32_to_cpu(old_crc), 2857 le32_to_cpu(crc)); 2858 xfs_hex_dump(dp, 32); 2859 } 2860 2861 /* 2862 * If the filesystem is CRC enabled, this mismatch becomes a 2863 * fatal log corruption failure. 2864 */ 2865 if (xfs_sb_version_hascrc(&log->l_mp->m_sb)) { 2866 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp); 2867 return -EFSCORRUPTED; 2868 } 2869 } 2870 2871 xlog_unpack_data(rhead, dp, log); 2872 2873 return xlog_recover_process_data(log, rhash, rhead, dp, pass, 2874 buffer_list); 2875 } 2876 2877 STATIC int 2878 xlog_valid_rec_header( 2879 struct xlog *log, 2880 struct xlog_rec_header *rhead, 2881 xfs_daddr_t blkno, 2882 int bufsize) 2883 { 2884 int hlen; 2885 2886 if (XFS_IS_CORRUPT(log->l_mp, 2887 rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) 2888 return -EFSCORRUPTED; 2889 if (XFS_IS_CORRUPT(log->l_mp, 2890 (!rhead->h_version || 2891 (be32_to_cpu(rhead->h_version) & 2892 (~XLOG_VERSION_OKBITS))))) { 2893 xfs_warn(log->l_mp, "%s: unrecognised log version (%d).", 2894 __func__, be32_to_cpu(rhead->h_version)); 2895 return -EFSCORRUPTED; 2896 } 2897 2898 /* 2899 * LR body must have data (or it wouldn't have been written) 2900 * and h_len must not be greater than LR buffer size. 2901 */ 2902 hlen = be32_to_cpu(rhead->h_len); 2903 if (XFS_IS_CORRUPT(log->l_mp, hlen <= 0 || hlen > bufsize)) 2904 return -EFSCORRUPTED; 2905 2906 if (XFS_IS_CORRUPT(log->l_mp, 2907 blkno > log->l_logBBsize || blkno > INT_MAX)) 2908 return -EFSCORRUPTED; 2909 return 0; 2910 } 2911 2912 /* 2913 * Read the log from tail to head and process the log records found. 2914 * Handle the two cases where the tail and head are in the same cycle 2915 * and where the active portion of the log wraps around the end of 2916 * the physical log separately. The pass parameter is passed through 2917 * to the routines called to process the data and is not looked at 2918 * here. 2919 */ 2920 STATIC int 2921 xlog_do_recovery_pass( 2922 struct xlog *log, 2923 xfs_daddr_t head_blk, 2924 xfs_daddr_t tail_blk, 2925 int pass, 2926 xfs_daddr_t *first_bad) /* out: first bad log rec */ 2927 { 2928 xlog_rec_header_t *rhead; 2929 xfs_daddr_t blk_no, rblk_no; 2930 xfs_daddr_t rhead_blk; 2931 char *offset; 2932 char *hbp, *dbp; 2933 int error = 0, h_size, h_len; 2934 int error2 = 0; 2935 int bblks, split_bblks; 2936 int hblks, split_hblks, wrapped_hblks; 2937 int i; 2938 struct hlist_head rhash[XLOG_RHASH_SIZE]; 2939 LIST_HEAD (buffer_list); 2940 2941 ASSERT(head_blk != tail_blk); 2942 blk_no = rhead_blk = tail_blk; 2943 2944 for (i = 0; i < XLOG_RHASH_SIZE; i++) 2945 INIT_HLIST_HEAD(&rhash[i]); 2946 2947 /* 2948 * Read the header of the tail block and get the iclog buffer size from 2949 * h_size. Use this to tell how many sectors make up the log header. 2950 */ 2951 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { 2952 /* 2953 * When using variable length iclogs, read first sector of 2954 * iclog header and extract the header size from it. Get a 2955 * new hbp that is the correct size. 2956 */ 2957 hbp = xlog_alloc_buffer(log, 1); 2958 if (!hbp) 2959 return -ENOMEM; 2960 2961 error = xlog_bread(log, tail_blk, 1, hbp, &offset); 2962 if (error) 2963 goto bread_err1; 2964 2965 rhead = (xlog_rec_header_t *)offset; 2966 2967 /* 2968 * xfsprogs has a bug where record length is based on lsunit but 2969 * h_size (iclog size) is hardcoded to 32k. Now that we 2970 * unconditionally CRC verify the unmount record, this means the 2971 * log buffer can be too small for the record and cause an 2972 * overrun. 2973 * 2974 * Detect this condition here. Use lsunit for the buffer size as 2975 * long as this looks like the mkfs case. Otherwise, return an 2976 * error to avoid a buffer overrun. 2977 */ 2978 h_size = be32_to_cpu(rhead->h_size); 2979 h_len = be32_to_cpu(rhead->h_len); 2980 if (h_len > h_size && h_len <= log->l_mp->m_logbsize && 2981 rhead->h_num_logops == cpu_to_be32(1)) { 2982 xfs_warn(log->l_mp, 2983 "invalid iclog size (%d bytes), using lsunit (%d bytes)", 2984 h_size, log->l_mp->m_logbsize); 2985 h_size = log->l_mp->m_logbsize; 2986 } 2987 2988 error = xlog_valid_rec_header(log, rhead, tail_blk, h_size); 2989 if (error) 2990 goto bread_err1; 2991 2992 hblks = xlog_logrec_hblks(log, rhead); 2993 if (hblks != 1) { 2994 kmem_free(hbp); 2995 hbp = xlog_alloc_buffer(log, hblks); 2996 } 2997 } else { 2998 ASSERT(log->l_sectBBsize == 1); 2999 hblks = 1; 3000 hbp = xlog_alloc_buffer(log, 1); 3001 h_size = XLOG_BIG_RECORD_BSIZE; 3002 } 3003 3004 if (!hbp) 3005 return -ENOMEM; 3006 dbp = xlog_alloc_buffer(log, BTOBB(h_size)); 3007 if (!dbp) { 3008 kmem_free(hbp); 3009 return -ENOMEM; 3010 } 3011 3012 memset(rhash, 0, sizeof(rhash)); 3013 if (tail_blk > head_blk) { 3014 /* 3015 * Perform recovery around the end of the physical log. 3016 * When the head is not on the same cycle number as the tail, 3017 * we can't do a sequential recovery. 3018 */ 3019 while (blk_no < log->l_logBBsize) { 3020 /* 3021 * Check for header wrapping around physical end-of-log 3022 */ 3023 offset = hbp; 3024 split_hblks = 0; 3025 wrapped_hblks = 0; 3026 if (blk_no + hblks <= log->l_logBBsize) { 3027 /* Read header in one read */ 3028 error = xlog_bread(log, blk_no, hblks, hbp, 3029 &offset); 3030 if (error) 3031 goto bread_err2; 3032 } else { 3033 /* This LR is split across physical log end */ 3034 if (blk_no != log->l_logBBsize) { 3035 /* some data before physical log end */ 3036 ASSERT(blk_no <= INT_MAX); 3037 split_hblks = log->l_logBBsize - (int)blk_no; 3038 ASSERT(split_hblks > 0); 3039 error = xlog_bread(log, blk_no, 3040 split_hblks, hbp, 3041 &offset); 3042 if (error) 3043 goto bread_err2; 3044 } 3045 3046 /* 3047 * Note: this black magic still works with 3048 * large sector sizes (non-512) only because: 3049 * - we increased the buffer size originally 3050 * by 1 sector giving us enough extra space 3051 * for the second read; 3052 * - the log start is guaranteed to be sector 3053 * aligned; 3054 * - we read the log end (LR header start) 3055 * _first_, then the log start (LR header end) 3056 * - order is important. 3057 */ 3058 wrapped_hblks = hblks - split_hblks; 3059 error = xlog_bread_noalign(log, 0, 3060 wrapped_hblks, 3061 offset + BBTOB(split_hblks)); 3062 if (error) 3063 goto bread_err2; 3064 } 3065 rhead = (xlog_rec_header_t *)offset; 3066 error = xlog_valid_rec_header(log, rhead, 3067 split_hblks ? blk_no : 0, h_size); 3068 if (error) 3069 goto bread_err2; 3070 3071 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len)); 3072 blk_no += hblks; 3073 3074 /* 3075 * Read the log record data in multiple reads if it 3076 * wraps around the end of the log. Note that if the 3077 * header already wrapped, blk_no could point past the 3078 * end of the log. The record data is contiguous in 3079 * that case. 3080 */ 3081 if (blk_no + bblks <= log->l_logBBsize || 3082 blk_no >= log->l_logBBsize) { 3083 rblk_no = xlog_wrap_logbno(log, blk_no); 3084 error = xlog_bread(log, rblk_no, bblks, dbp, 3085 &offset); 3086 if (error) 3087 goto bread_err2; 3088 } else { 3089 /* This log record is split across the 3090 * physical end of log */ 3091 offset = dbp; 3092 split_bblks = 0; 3093 if (blk_no != log->l_logBBsize) { 3094 /* some data is before the physical 3095 * end of log */ 3096 ASSERT(!wrapped_hblks); 3097 ASSERT(blk_no <= INT_MAX); 3098 split_bblks = 3099 log->l_logBBsize - (int)blk_no; 3100 ASSERT(split_bblks > 0); 3101 error = xlog_bread(log, blk_no, 3102 split_bblks, dbp, 3103 &offset); 3104 if (error) 3105 goto bread_err2; 3106 } 3107 3108 /* 3109 * Note: this black magic still works with 3110 * large sector sizes (non-512) only because: 3111 * - we increased the buffer size originally 3112 * by 1 sector giving us enough extra space 3113 * for the second read; 3114 * - the log start is guaranteed to be sector 3115 * aligned; 3116 * - we read the log end (LR header start) 3117 * _first_, then the log start (LR header end) 3118 * - order is important. 3119 */ 3120 error = xlog_bread_noalign(log, 0, 3121 bblks - split_bblks, 3122 offset + BBTOB(split_bblks)); 3123 if (error) 3124 goto bread_err2; 3125 } 3126 3127 error = xlog_recover_process(log, rhash, rhead, offset, 3128 pass, &buffer_list); 3129 if (error) 3130 goto bread_err2; 3131 3132 blk_no += bblks; 3133 rhead_blk = blk_no; 3134 } 3135 3136 ASSERT(blk_no >= log->l_logBBsize); 3137 blk_no -= log->l_logBBsize; 3138 rhead_blk = blk_no; 3139 } 3140 3141 /* read first part of physical log */ 3142 while (blk_no < head_blk) { 3143 error = xlog_bread(log, blk_no, hblks, hbp, &offset); 3144 if (error) 3145 goto bread_err2; 3146 3147 rhead = (xlog_rec_header_t *)offset; 3148 error = xlog_valid_rec_header(log, rhead, blk_no, h_size); 3149 if (error) 3150 goto bread_err2; 3151 3152 /* blocks in data section */ 3153 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len)); 3154 error = xlog_bread(log, blk_no+hblks, bblks, dbp, 3155 &offset); 3156 if (error) 3157 goto bread_err2; 3158 3159 error = xlog_recover_process(log, rhash, rhead, offset, pass, 3160 &buffer_list); 3161 if (error) 3162 goto bread_err2; 3163 3164 blk_no += bblks + hblks; 3165 rhead_blk = blk_no; 3166 } 3167 3168 bread_err2: 3169 kmem_free(dbp); 3170 bread_err1: 3171 kmem_free(hbp); 3172 3173 /* 3174 * Submit buffers that have been added from the last record processed, 3175 * regardless of error status. 3176 */ 3177 if (!list_empty(&buffer_list)) 3178 error2 = xfs_buf_delwri_submit(&buffer_list); 3179 3180 if (error && first_bad) 3181 *first_bad = rhead_blk; 3182 3183 /* 3184 * Transactions are freed at commit time but transactions without commit 3185 * records on disk are never committed. Free any that may be left in the 3186 * hash table. 3187 */ 3188 for (i = 0; i < XLOG_RHASH_SIZE; i++) { 3189 struct hlist_node *tmp; 3190 struct xlog_recover *trans; 3191 3192 hlist_for_each_entry_safe(trans, tmp, &rhash[i], r_list) 3193 xlog_recover_free_trans(trans); 3194 } 3195 3196 return error ? error : error2; 3197 } 3198 3199 /* 3200 * Do the recovery of the log. We actually do this in two phases. 3201 * The two passes are necessary in order to implement the function 3202 * of cancelling a record written into the log. The first pass 3203 * determines those things which have been cancelled, and the 3204 * second pass replays log items normally except for those which 3205 * have been cancelled. The handling of the replay and cancellations 3206 * takes place in the log item type specific routines. 3207 * 3208 * The table of items which have cancel records in the log is allocated 3209 * and freed at this level, since only here do we know when all of 3210 * the log recovery has been completed. 3211 */ 3212 STATIC int 3213 xlog_do_log_recovery( 3214 struct xlog *log, 3215 xfs_daddr_t head_blk, 3216 xfs_daddr_t tail_blk) 3217 { 3218 int error, i; 3219 3220 ASSERT(head_blk != tail_blk); 3221 3222 /* 3223 * First do a pass to find all of the cancelled buf log items. 3224 * Store them in the buf_cancel_table for use in the second pass. 3225 */ 3226 log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE * 3227 sizeof(struct list_head), 3228 0); 3229 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++) 3230 INIT_LIST_HEAD(&log->l_buf_cancel_table[i]); 3231 3232 error = xlog_do_recovery_pass(log, head_blk, tail_blk, 3233 XLOG_RECOVER_PASS1, NULL); 3234 if (error != 0) { 3235 kmem_free(log->l_buf_cancel_table); 3236 log->l_buf_cancel_table = NULL; 3237 return error; 3238 } 3239 /* 3240 * Then do a second pass to actually recover the items in the log. 3241 * When it is complete free the table of buf cancel items. 3242 */ 3243 error = xlog_do_recovery_pass(log, head_blk, tail_blk, 3244 XLOG_RECOVER_PASS2, NULL); 3245 #ifdef DEBUG 3246 if (!error) { 3247 int i; 3248 3249 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++) 3250 ASSERT(list_empty(&log->l_buf_cancel_table[i])); 3251 } 3252 #endif /* DEBUG */ 3253 3254 kmem_free(log->l_buf_cancel_table); 3255 log->l_buf_cancel_table = NULL; 3256 3257 return error; 3258 } 3259 3260 /* 3261 * Do the actual recovery 3262 */ 3263 STATIC int 3264 xlog_do_recover( 3265 struct xlog *log, 3266 xfs_daddr_t head_blk, 3267 xfs_daddr_t tail_blk) 3268 { 3269 struct xfs_mount *mp = log->l_mp; 3270 struct xfs_buf *bp = mp->m_sb_bp; 3271 struct xfs_sb *sbp = &mp->m_sb; 3272 int error; 3273 3274 trace_xfs_log_recover(log, head_blk, tail_blk); 3275 3276 /* 3277 * First replay the images in the log. 3278 */ 3279 error = xlog_do_log_recovery(log, head_blk, tail_blk); 3280 if (error) 3281 return error; 3282 3283 /* 3284 * If IO errors happened during recovery, bail out. 3285 */ 3286 if (XFS_FORCED_SHUTDOWN(mp)) 3287 return -EIO; 3288 3289 /* 3290 * We now update the tail_lsn since much of the recovery has completed 3291 * and there may be space available to use. If there were no extent 3292 * or iunlinks, we can free up the entire log and set the tail_lsn to 3293 * be the last_sync_lsn. This was set in xlog_find_tail to be the 3294 * lsn of the last known good LR on disk. If there are extent frees 3295 * or iunlinks they will have some entries in the AIL; so we look at 3296 * the AIL to determine how to set the tail_lsn. 3297 */ 3298 xlog_assign_tail_lsn(mp); 3299 3300 /* 3301 * Now that we've finished replaying all buffer and inode updates, 3302 * re-read the superblock and reverify it. 3303 */ 3304 xfs_buf_lock(bp); 3305 xfs_buf_hold(bp); 3306 error = _xfs_buf_read(bp, XBF_READ); 3307 if (error) { 3308 if (!XFS_FORCED_SHUTDOWN(mp)) { 3309 xfs_buf_ioerror_alert(bp, __this_address); 3310 ASSERT(0); 3311 } 3312 xfs_buf_relse(bp); 3313 return error; 3314 } 3315 3316 /* Convert superblock from on-disk format */ 3317 xfs_sb_from_disk(sbp, bp->b_addr); 3318 xfs_buf_relse(bp); 3319 3320 /* re-initialise in-core superblock and geometry structures */ 3321 xfs_reinit_percpu_counters(mp); 3322 error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi); 3323 if (error) { 3324 xfs_warn(mp, "Failed post-recovery per-ag init: %d", error); 3325 return error; 3326 } 3327 mp->m_alloc_set_aside = xfs_alloc_set_aside(mp); 3328 3329 xlog_recover_check_summary(log); 3330 3331 /* Normal transactions can now occur */ 3332 log->l_flags &= ~XLOG_ACTIVE_RECOVERY; 3333 return 0; 3334 } 3335 3336 /* 3337 * Perform recovery and re-initialize some log variables in xlog_find_tail. 3338 * 3339 * Return error or zero. 3340 */ 3341 int 3342 xlog_recover( 3343 struct xlog *log) 3344 { 3345 xfs_daddr_t head_blk, tail_blk; 3346 int error; 3347 3348 /* find the tail of the log */ 3349 error = xlog_find_tail(log, &head_blk, &tail_blk); 3350 if (error) 3351 return error; 3352 3353 /* 3354 * The superblock was read before the log was available and thus the LSN 3355 * could not be verified. Check the superblock LSN against the current 3356 * LSN now that it's known. 3357 */ 3358 if (xfs_sb_version_hascrc(&log->l_mp->m_sb) && 3359 !xfs_log_check_lsn(log->l_mp, log->l_mp->m_sb.sb_lsn)) 3360 return -EINVAL; 3361 3362 if (tail_blk != head_blk) { 3363 /* There used to be a comment here: 3364 * 3365 * disallow recovery on read-only mounts. note -- mount 3366 * checks for ENOSPC and turns it into an intelligent 3367 * error message. 3368 * ...but this is no longer true. Now, unless you specify 3369 * NORECOVERY (in which case this function would never be 3370 * called), we just go ahead and recover. We do this all 3371 * under the vfs layer, so we can get away with it unless 3372 * the device itself is read-only, in which case we fail. 3373 */ 3374 if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) { 3375 return error; 3376 } 3377 3378 /* 3379 * Version 5 superblock log feature mask validation. We know the 3380 * log is dirty so check if there are any unknown log features 3381 * in what we need to recover. If there are unknown features 3382 * (e.g. unsupported transactions, then simply reject the 3383 * attempt at recovery before touching anything. 3384 */ 3385 if (XFS_SB_VERSION_NUM(&log->l_mp->m_sb) == XFS_SB_VERSION_5 && 3386 xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb, 3387 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) { 3388 xfs_warn(log->l_mp, 3389 "Superblock has unknown incompatible log features (0x%x) enabled.", 3390 (log->l_mp->m_sb.sb_features_log_incompat & 3391 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)); 3392 xfs_warn(log->l_mp, 3393 "The log can not be fully and/or safely recovered by this kernel."); 3394 xfs_warn(log->l_mp, 3395 "Please recover the log on a kernel that supports the unknown features."); 3396 return -EINVAL; 3397 } 3398 3399 /* 3400 * Delay log recovery if the debug hook is set. This is debug 3401 * instrumentation to coordinate simulation of I/O failures with 3402 * log recovery. 3403 */ 3404 if (xfs_globals.log_recovery_delay) { 3405 xfs_notice(log->l_mp, 3406 "Delaying log recovery for %d seconds.", 3407 xfs_globals.log_recovery_delay); 3408 msleep(xfs_globals.log_recovery_delay * 1000); 3409 } 3410 3411 xfs_notice(log->l_mp, "Starting recovery (logdev: %s)", 3412 log->l_mp->m_logname ? log->l_mp->m_logname 3413 : "internal"); 3414 3415 error = xlog_do_recover(log, head_blk, tail_blk); 3416 log->l_flags |= XLOG_RECOVERY_NEEDED; 3417 } 3418 return error; 3419 } 3420 3421 /* 3422 * In the first part of recovery we replay inodes and buffers and build 3423 * up the list of extent free items which need to be processed. Here 3424 * we process the extent free items and clean up the on disk unlinked 3425 * inode lists. This is separated from the first part of recovery so 3426 * that the root and real-time bitmap inodes can be read in from disk in 3427 * between the two stages. This is necessary so that we can free space 3428 * in the real-time portion of the file system. 3429 */ 3430 int 3431 xlog_recover_finish( 3432 struct xlog *log) 3433 { 3434 /* 3435 * Now we're ready to do the transactions needed for the 3436 * rest of recovery. Start with completing all the extent 3437 * free intent records and then process the unlinked inode 3438 * lists. At this point, we essentially run in normal mode 3439 * except that we're still performing recovery actions 3440 * rather than accepting new requests. 3441 */ 3442 if (log->l_flags & XLOG_RECOVERY_NEEDED) { 3443 int error; 3444 error = xlog_recover_process_intents(log); 3445 if (error) { 3446 /* 3447 * Cancel all the unprocessed intent items now so that 3448 * we don't leave them pinned in the AIL. This can 3449 * cause the AIL to livelock on the pinned item if 3450 * anyone tries to push the AIL (inode reclaim does 3451 * this) before we get around to xfs_log_mount_cancel. 3452 */ 3453 xlog_recover_cancel_intents(log); 3454 xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); 3455 xfs_alert(log->l_mp, "Failed to recover intents"); 3456 return error; 3457 } 3458 3459 /* 3460 * Sync the log to get all the intents out of the AIL. 3461 * This isn't absolutely necessary, but it helps in 3462 * case the unlink transactions would have problems 3463 * pushing the intents out of the way. 3464 */ 3465 xfs_log_force(log->l_mp, XFS_LOG_SYNC); 3466 3467 xlog_recover_process_iunlinks(log); 3468 3469 xlog_recover_check_summary(log); 3470 3471 xfs_notice(log->l_mp, "Ending recovery (logdev: %s)", 3472 log->l_mp->m_logname ? log->l_mp->m_logname 3473 : "internal"); 3474 log->l_flags &= ~XLOG_RECOVERY_NEEDED; 3475 } else { 3476 xfs_info(log->l_mp, "Ending clean mount"); 3477 } 3478 return 0; 3479 } 3480 3481 void 3482 xlog_recover_cancel( 3483 struct xlog *log) 3484 { 3485 if (log->l_flags & XLOG_RECOVERY_NEEDED) 3486 xlog_recover_cancel_intents(log); 3487 } 3488 3489 #if defined(DEBUG) 3490 /* 3491 * Read all of the agf and agi counters and check that they 3492 * are consistent with the superblock counters. 3493 */ 3494 STATIC void 3495 xlog_recover_check_summary( 3496 struct xlog *log) 3497 { 3498 struct xfs_mount *mp = log->l_mp; 3499 struct xfs_perag *pag; 3500 struct xfs_buf *agfbp; 3501 struct xfs_buf *agibp; 3502 xfs_agnumber_t agno; 3503 uint64_t freeblks; 3504 uint64_t itotal; 3505 uint64_t ifree; 3506 int error; 3507 3508 mp = log->l_mp; 3509 3510 freeblks = 0LL; 3511 itotal = 0LL; 3512 ifree = 0LL; 3513 for_each_perag(mp, agno, pag) { 3514 error = xfs_read_agf(mp, NULL, pag->pag_agno, 0, &agfbp); 3515 if (error) { 3516 xfs_alert(mp, "%s agf read failed agno %d error %d", 3517 __func__, pag->pag_agno, error); 3518 } else { 3519 struct xfs_agf *agfp = agfbp->b_addr; 3520 3521 freeblks += be32_to_cpu(agfp->agf_freeblks) + 3522 be32_to_cpu(agfp->agf_flcount); 3523 xfs_buf_relse(agfbp); 3524 } 3525 3526 error = xfs_read_agi(mp, NULL, pag->pag_agno, &agibp); 3527 if (error) { 3528 xfs_alert(mp, "%s agi read failed agno %d error %d", 3529 __func__, pag->pag_agno, error); 3530 } else { 3531 struct xfs_agi *agi = agibp->b_addr; 3532 3533 itotal += be32_to_cpu(agi->agi_count); 3534 ifree += be32_to_cpu(agi->agi_freecount); 3535 xfs_buf_relse(agibp); 3536 } 3537 } 3538 } 3539 #endif /* DEBUG */ 3540