1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_mount.h" 13 #include "xfs_errortag.h" 14 #include "xfs_error.h" 15 #include "xfs_trans.h" 16 #include "xfs_trans_priv.h" 17 #include "xfs_log.h" 18 #include "xfs_log_priv.h" 19 #include "xfs_trace.h" 20 #include "xfs_sysfs.h" 21 #include "xfs_sb.h" 22 #include "xfs_health.h" 23 24 kmem_zone_t *xfs_log_ticket_zone; 25 26 /* Local miscellaneous function prototypes */ 27 STATIC int 28 xlog_commit_record( 29 struct xlog *log, 30 struct xlog_ticket *ticket, 31 struct xlog_in_core **iclog, 32 xfs_lsn_t *commitlsnp); 33 34 STATIC struct xlog * 35 xlog_alloc_log( 36 struct xfs_mount *mp, 37 struct xfs_buftarg *log_target, 38 xfs_daddr_t blk_offset, 39 int num_bblks); 40 STATIC int 41 xlog_space_left( 42 struct xlog *log, 43 atomic64_t *head); 44 STATIC void 45 xlog_dealloc_log( 46 struct xlog *log); 47 48 /* local state machine functions */ 49 STATIC void xlog_state_done_syncing( 50 struct xlog_in_core *iclog, 51 bool aborted); 52 STATIC int 53 xlog_state_get_iclog_space( 54 struct xlog *log, 55 int len, 56 struct xlog_in_core **iclog, 57 struct xlog_ticket *ticket, 58 int *continued_write, 59 int *logoffsetp); 60 STATIC int 61 xlog_state_release_iclog( 62 struct xlog *log, 63 struct xlog_in_core *iclog); 64 STATIC void 65 xlog_state_switch_iclogs( 66 struct xlog *log, 67 struct xlog_in_core *iclog, 68 int eventual_size); 69 STATIC void 70 xlog_state_want_sync( 71 struct xlog *log, 72 struct xlog_in_core *iclog); 73 74 STATIC void 75 xlog_grant_push_ail( 76 struct xlog *log, 77 int need_bytes); 78 STATIC void 79 xlog_regrant_reserve_log_space( 80 struct xlog *log, 81 struct xlog_ticket *ticket); 82 STATIC void 83 xlog_ungrant_log_space( 84 struct xlog *log, 85 struct xlog_ticket *ticket); 86 87 #if defined(DEBUG) 88 STATIC void 89 xlog_verify_dest_ptr( 90 struct xlog *log, 91 void *ptr); 92 STATIC void 93 xlog_verify_grant_tail( 94 struct xlog *log); 95 STATIC void 96 xlog_verify_iclog( 97 struct xlog *log, 98 struct xlog_in_core *iclog, 99 int count); 100 STATIC void 101 xlog_verify_tail_lsn( 102 struct xlog *log, 103 struct xlog_in_core *iclog, 104 xfs_lsn_t tail_lsn); 105 #else 106 #define xlog_verify_dest_ptr(a,b) 107 #define xlog_verify_grant_tail(a) 108 #define xlog_verify_iclog(a,b,c) 109 #define xlog_verify_tail_lsn(a,b,c) 110 #endif 111 112 STATIC int 113 xlog_iclogs_empty( 114 struct xlog *log); 115 116 static void 117 xlog_grant_sub_space( 118 struct xlog *log, 119 atomic64_t *head, 120 int bytes) 121 { 122 int64_t head_val = atomic64_read(head); 123 int64_t new, old; 124 125 do { 126 int cycle, space; 127 128 xlog_crack_grant_head_val(head_val, &cycle, &space); 129 130 space -= bytes; 131 if (space < 0) { 132 space += log->l_logsize; 133 cycle--; 134 } 135 136 old = head_val; 137 new = xlog_assign_grant_head_val(cycle, space); 138 head_val = atomic64_cmpxchg(head, old, new); 139 } while (head_val != old); 140 } 141 142 static void 143 xlog_grant_add_space( 144 struct xlog *log, 145 atomic64_t *head, 146 int bytes) 147 { 148 int64_t head_val = atomic64_read(head); 149 int64_t new, old; 150 151 do { 152 int tmp; 153 int cycle, space; 154 155 xlog_crack_grant_head_val(head_val, &cycle, &space); 156 157 tmp = log->l_logsize - space; 158 if (tmp > bytes) 159 space += bytes; 160 else { 161 space = bytes - tmp; 162 cycle++; 163 } 164 165 old = head_val; 166 new = xlog_assign_grant_head_val(cycle, space); 167 head_val = atomic64_cmpxchg(head, old, new); 168 } while (head_val != old); 169 } 170 171 STATIC void 172 xlog_grant_head_init( 173 struct xlog_grant_head *head) 174 { 175 xlog_assign_grant_head(&head->grant, 1, 0); 176 INIT_LIST_HEAD(&head->waiters); 177 spin_lock_init(&head->lock); 178 } 179 180 STATIC void 181 xlog_grant_head_wake_all( 182 struct xlog_grant_head *head) 183 { 184 struct xlog_ticket *tic; 185 186 spin_lock(&head->lock); 187 list_for_each_entry(tic, &head->waiters, t_queue) 188 wake_up_process(tic->t_task); 189 spin_unlock(&head->lock); 190 } 191 192 static inline int 193 xlog_ticket_reservation( 194 struct xlog *log, 195 struct xlog_grant_head *head, 196 struct xlog_ticket *tic) 197 { 198 if (head == &log->l_write_head) { 199 ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV); 200 return tic->t_unit_res; 201 } else { 202 if (tic->t_flags & XLOG_TIC_PERM_RESERV) 203 return tic->t_unit_res * tic->t_cnt; 204 else 205 return tic->t_unit_res; 206 } 207 } 208 209 STATIC bool 210 xlog_grant_head_wake( 211 struct xlog *log, 212 struct xlog_grant_head *head, 213 int *free_bytes) 214 { 215 struct xlog_ticket *tic; 216 int need_bytes; 217 218 list_for_each_entry(tic, &head->waiters, t_queue) { 219 need_bytes = xlog_ticket_reservation(log, head, tic); 220 if (*free_bytes < need_bytes) 221 return false; 222 223 *free_bytes -= need_bytes; 224 trace_xfs_log_grant_wake_up(log, tic); 225 wake_up_process(tic->t_task); 226 } 227 228 return true; 229 } 230 231 STATIC int 232 xlog_grant_head_wait( 233 struct xlog *log, 234 struct xlog_grant_head *head, 235 struct xlog_ticket *tic, 236 int need_bytes) __releases(&head->lock) 237 __acquires(&head->lock) 238 { 239 list_add_tail(&tic->t_queue, &head->waiters); 240 241 do { 242 if (XLOG_FORCED_SHUTDOWN(log)) 243 goto shutdown; 244 xlog_grant_push_ail(log, need_bytes); 245 246 __set_current_state(TASK_UNINTERRUPTIBLE); 247 spin_unlock(&head->lock); 248 249 XFS_STATS_INC(log->l_mp, xs_sleep_logspace); 250 251 trace_xfs_log_grant_sleep(log, tic); 252 schedule(); 253 trace_xfs_log_grant_wake(log, tic); 254 255 spin_lock(&head->lock); 256 if (XLOG_FORCED_SHUTDOWN(log)) 257 goto shutdown; 258 } while (xlog_space_left(log, &head->grant) < need_bytes); 259 260 list_del_init(&tic->t_queue); 261 return 0; 262 shutdown: 263 list_del_init(&tic->t_queue); 264 return -EIO; 265 } 266 267 /* 268 * Atomically get the log space required for a log ticket. 269 * 270 * Once a ticket gets put onto head->waiters, it will only return after the 271 * needed reservation is satisfied. 272 * 273 * This function is structured so that it has a lock free fast path. This is 274 * necessary because every new transaction reservation will come through this 275 * path. Hence any lock will be globally hot if we take it unconditionally on 276 * every pass. 277 * 278 * As tickets are only ever moved on and off head->waiters under head->lock, we 279 * only need to take that lock if we are going to add the ticket to the queue 280 * and sleep. We can avoid taking the lock if the ticket was never added to 281 * head->waiters because the t_queue list head will be empty and we hold the 282 * only reference to it so it can safely be checked unlocked. 283 */ 284 STATIC int 285 xlog_grant_head_check( 286 struct xlog *log, 287 struct xlog_grant_head *head, 288 struct xlog_ticket *tic, 289 int *need_bytes) 290 { 291 int free_bytes; 292 int error = 0; 293 294 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); 295 296 /* 297 * If there are other waiters on the queue then give them a chance at 298 * logspace before us. Wake up the first waiters, if we do not wake 299 * up all the waiters then go to sleep waiting for more free space, 300 * otherwise try to get some space for this transaction. 301 */ 302 *need_bytes = xlog_ticket_reservation(log, head, tic); 303 free_bytes = xlog_space_left(log, &head->grant); 304 if (!list_empty_careful(&head->waiters)) { 305 spin_lock(&head->lock); 306 if (!xlog_grant_head_wake(log, head, &free_bytes) || 307 free_bytes < *need_bytes) { 308 error = xlog_grant_head_wait(log, head, tic, 309 *need_bytes); 310 } 311 spin_unlock(&head->lock); 312 } else if (free_bytes < *need_bytes) { 313 spin_lock(&head->lock); 314 error = xlog_grant_head_wait(log, head, tic, *need_bytes); 315 spin_unlock(&head->lock); 316 } 317 318 return error; 319 } 320 321 static void 322 xlog_tic_reset_res(xlog_ticket_t *tic) 323 { 324 tic->t_res_num = 0; 325 tic->t_res_arr_sum = 0; 326 tic->t_res_num_ophdrs = 0; 327 } 328 329 static void 330 xlog_tic_add_region(xlog_ticket_t *tic, uint len, uint type) 331 { 332 if (tic->t_res_num == XLOG_TIC_LEN_MAX) { 333 /* add to overflow and start again */ 334 tic->t_res_o_flow += tic->t_res_arr_sum; 335 tic->t_res_num = 0; 336 tic->t_res_arr_sum = 0; 337 } 338 339 tic->t_res_arr[tic->t_res_num].r_len = len; 340 tic->t_res_arr[tic->t_res_num].r_type = type; 341 tic->t_res_arr_sum += len; 342 tic->t_res_num++; 343 } 344 345 /* 346 * Replenish the byte reservation required by moving the grant write head. 347 */ 348 int 349 xfs_log_regrant( 350 struct xfs_mount *mp, 351 struct xlog_ticket *tic) 352 { 353 struct xlog *log = mp->m_log; 354 int need_bytes; 355 int error = 0; 356 357 if (XLOG_FORCED_SHUTDOWN(log)) 358 return -EIO; 359 360 XFS_STATS_INC(mp, xs_try_logspace); 361 362 /* 363 * This is a new transaction on the ticket, so we need to change the 364 * transaction ID so that the next transaction has a different TID in 365 * the log. Just add one to the existing tid so that we can see chains 366 * of rolling transactions in the log easily. 367 */ 368 tic->t_tid++; 369 370 xlog_grant_push_ail(log, tic->t_unit_res); 371 372 tic->t_curr_res = tic->t_unit_res; 373 xlog_tic_reset_res(tic); 374 375 if (tic->t_cnt > 0) 376 return 0; 377 378 trace_xfs_log_regrant(log, tic); 379 380 error = xlog_grant_head_check(log, &log->l_write_head, tic, 381 &need_bytes); 382 if (error) 383 goto out_error; 384 385 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); 386 trace_xfs_log_regrant_exit(log, tic); 387 xlog_verify_grant_tail(log); 388 return 0; 389 390 out_error: 391 /* 392 * If we are failing, make sure the ticket doesn't have any current 393 * reservations. We don't want to add this back when the ticket/ 394 * transaction gets cancelled. 395 */ 396 tic->t_curr_res = 0; 397 tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */ 398 return error; 399 } 400 401 /* 402 * Reserve log space and return a ticket corresponding to the reservation. 403 * 404 * Each reservation is going to reserve extra space for a log record header. 405 * When writes happen to the on-disk log, we don't subtract the length of the 406 * log record header from any reservation. By wasting space in each 407 * reservation, we prevent over allocation problems. 408 */ 409 int 410 xfs_log_reserve( 411 struct xfs_mount *mp, 412 int unit_bytes, 413 int cnt, 414 struct xlog_ticket **ticp, 415 uint8_t client, 416 bool permanent) 417 { 418 struct xlog *log = mp->m_log; 419 struct xlog_ticket *tic; 420 int need_bytes; 421 int error = 0; 422 423 ASSERT(client == XFS_TRANSACTION || client == XFS_LOG); 424 425 if (XLOG_FORCED_SHUTDOWN(log)) 426 return -EIO; 427 428 XFS_STATS_INC(mp, xs_try_logspace); 429 430 ASSERT(*ticp == NULL); 431 tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent, 432 KM_SLEEP | KM_MAYFAIL); 433 if (!tic) 434 return -ENOMEM; 435 436 *ticp = tic; 437 438 xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt 439 : tic->t_unit_res); 440 441 trace_xfs_log_reserve(log, tic); 442 443 error = xlog_grant_head_check(log, &log->l_reserve_head, tic, 444 &need_bytes); 445 if (error) 446 goto out_error; 447 448 xlog_grant_add_space(log, &log->l_reserve_head.grant, need_bytes); 449 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); 450 trace_xfs_log_reserve_exit(log, tic); 451 xlog_verify_grant_tail(log); 452 return 0; 453 454 out_error: 455 /* 456 * If we are failing, make sure the ticket doesn't have any current 457 * reservations. We don't want to add this back when the ticket/ 458 * transaction gets cancelled. 459 */ 460 tic->t_curr_res = 0; 461 tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */ 462 return error; 463 } 464 465 466 /* 467 * NOTES: 468 * 469 * 1. currblock field gets updated at startup and after in-core logs 470 * marked as with WANT_SYNC. 471 */ 472 473 /* 474 * This routine is called when a user of a log manager ticket is done with 475 * the reservation. If the ticket was ever used, then a commit record for 476 * the associated transaction is written out as a log operation header with 477 * no data. The flag XLOG_TIC_INITED is set when the first write occurs with 478 * a given ticket. If the ticket was one with a permanent reservation, then 479 * a few operations are done differently. Permanent reservation tickets by 480 * default don't release the reservation. They just commit the current 481 * transaction with the belief that the reservation is still needed. A flag 482 * must be passed in before permanent reservations are actually released. 483 * When these type of tickets are not released, they need to be set into 484 * the inited state again. By doing this, a start record will be written 485 * out when the next write occurs. 486 */ 487 xfs_lsn_t 488 xfs_log_done( 489 struct xfs_mount *mp, 490 struct xlog_ticket *ticket, 491 struct xlog_in_core **iclog, 492 bool regrant) 493 { 494 struct xlog *log = mp->m_log; 495 xfs_lsn_t lsn = 0; 496 497 if (XLOG_FORCED_SHUTDOWN(log) || 498 /* 499 * If nothing was ever written, don't write out commit record. 500 * If we get an error, just continue and give back the log ticket. 501 */ 502 (((ticket->t_flags & XLOG_TIC_INITED) == 0) && 503 (xlog_commit_record(log, ticket, iclog, &lsn)))) { 504 lsn = (xfs_lsn_t) -1; 505 regrant = false; 506 } 507 508 509 if (!regrant) { 510 trace_xfs_log_done_nonperm(log, ticket); 511 512 /* 513 * Release ticket if not permanent reservation or a specific 514 * request has been made to release a permanent reservation. 515 */ 516 xlog_ungrant_log_space(log, ticket); 517 } else { 518 trace_xfs_log_done_perm(log, ticket); 519 520 xlog_regrant_reserve_log_space(log, ticket); 521 /* If this ticket was a permanent reservation and we aren't 522 * trying to release it, reset the inited flags; so next time 523 * we write, a start record will be written out. 524 */ 525 ticket->t_flags |= XLOG_TIC_INITED; 526 } 527 528 xfs_log_ticket_put(ticket); 529 return lsn; 530 } 531 532 int 533 xfs_log_release_iclog( 534 struct xfs_mount *mp, 535 struct xlog_in_core *iclog) 536 { 537 if (xlog_state_release_iclog(mp->m_log, iclog)) { 538 xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR); 539 return -EIO; 540 } 541 542 return 0; 543 } 544 545 /* 546 * Mount a log filesystem 547 * 548 * mp - ubiquitous xfs mount point structure 549 * log_target - buftarg of on-disk log device 550 * blk_offset - Start block # where block size is 512 bytes (BBSIZE) 551 * num_bblocks - Number of BBSIZE blocks in on-disk log 552 * 553 * Return error or zero. 554 */ 555 int 556 xfs_log_mount( 557 xfs_mount_t *mp, 558 xfs_buftarg_t *log_target, 559 xfs_daddr_t blk_offset, 560 int num_bblks) 561 { 562 bool fatal = xfs_sb_version_hascrc(&mp->m_sb); 563 int error = 0; 564 int min_logfsbs; 565 566 if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) { 567 xfs_notice(mp, "Mounting V%d Filesystem", 568 XFS_SB_VERSION_NUM(&mp->m_sb)); 569 } else { 570 xfs_notice(mp, 571 "Mounting V%d filesystem in no-recovery mode. Filesystem will be inconsistent.", 572 XFS_SB_VERSION_NUM(&mp->m_sb)); 573 ASSERT(mp->m_flags & XFS_MOUNT_RDONLY); 574 } 575 576 mp->m_log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks); 577 if (IS_ERR(mp->m_log)) { 578 error = PTR_ERR(mp->m_log); 579 goto out; 580 } 581 582 /* 583 * Validate the given log space and drop a critical message via syslog 584 * if the log size is too small that would lead to some unexpected 585 * situations in transaction log space reservation stage. 586 * 587 * Note: we can't just reject the mount if the validation fails. This 588 * would mean that people would have to downgrade their kernel just to 589 * remedy the situation as there is no way to grow the log (short of 590 * black magic surgery with xfs_db). 591 * 592 * We can, however, reject mounts for CRC format filesystems, as the 593 * mkfs binary being used to make the filesystem should never create a 594 * filesystem with a log that is too small. 595 */ 596 min_logfsbs = xfs_log_calc_minimum_size(mp); 597 598 if (mp->m_sb.sb_logblocks < min_logfsbs) { 599 xfs_warn(mp, 600 "Log size %d blocks too small, minimum size is %d blocks", 601 mp->m_sb.sb_logblocks, min_logfsbs); 602 error = -EINVAL; 603 } else if (mp->m_sb.sb_logblocks > XFS_MAX_LOG_BLOCKS) { 604 xfs_warn(mp, 605 "Log size %d blocks too large, maximum size is %lld blocks", 606 mp->m_sb.sb_logblocks, XFS_MAX_LOG_BLOCKS); 607 error = -EINVAL; 608 } else if (XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks) > XFS_MAX_LOG_BYTES) { 609 xfs_warn(mp, 610 "log size %lld bytes too large, maximum size is %lld bytes", 611 XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks), 612 XFS_MAX_LOG_BYTES); 613 error = -EINVAL; 614 } else if (mp->m_sb.sb_logsunit > 1 && 615 mp->m_sb.sb_logsunit % mp->m_sb.sb_blocksize) { 616 xfs_warn(mp, 617 "log stripe unit %u bytes must be a multiple of block size", 618 mp->m_sb.sb_logsunit); 619 error = -EINVAL; 620 fatal = true; 621 } 622 if (error) { 623 /* 624 * Log check errors are always fatal on v5; or whenever bad 625 * metadata leads to a crash. 626 */ 627 if (fatal) { 628 xfs_crit(mp, "AAIEEE! Log failed size checks. Abort!"); 629 ASSERT(0); 630 goto out_free_log; 631 } 632 xfs_crit(mp, "Log size out of supported range."); 633 xfs_crit(mp, 634 "Continuing onwards, but if log hangs are experienced then please report this message in the bug report."); 635 } 636 637 /* 638 * Initialize the AIL now we have a log. 639 */ 640 error = xfs_trans_ail_init(mp); 641 if (error) { 642 xfs_warn(mp, "AIL initialisation failed: error %d", error); 643 goto out_free_log; 644 } 645 mp->m_log->l_ailp = mp->m_ail; 646 647 /* 648 * skip log recovery on a norecovery mount. pretend it all 649 * just worked. 650 */ 651 if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) { 652 int readonly = (mp->m_flags & XFS_MOUNT_RDONLY); 653 654 if (readonly) 655 mp->m_flags &= ~XFS_MOUNT_RDONLY; 656 657 error = xlog_recover(mp->m_log); 658 659 if (readonly) 660 mp->m_flags |= XFS_MOUNT_RDONLY; 661 if (error) { 662 xfs_warn(mp, "log mount/recovery failed: error %d", 663 error); 664 xlog_recover_cancel(mp->m_log); 665 goto out_destroy_ail; 666 } 667 } 668 669 error = xfs_sysfs_init(&mp->m_log->l_kobj, &xfs_log_ktype, &mp->m_kobj, 670 "log"); 671 if (error) 672 goto out_destroy_ail; 673 674 /* Normal transactions can now occur */ 675 mp->m_log->l_flags &= ~XLOG_ACTIVE_RECOVERY; 676 677 /* 678 * Now the log has been fully initialised and we know were our 679 * space grant counters are, we can initialise the permanent ticket 680 * needed for delayed logging to work. 681 */ 682 xlog_cil_init_post_recovery(mp->m_log); 683 684 return 0; 685 686 out_destroy_ail: 687 xfs_trans_ail_destroy(mp); 688 out_free_log: 689 xlog_dealloc_log(mp->m_log); 690 out: 691 return error; 692 } 693 694 /* 695 * Finish the recovery of the file system. This is separate from the 696 * xfs_log_mount() call, because it depends on the code in xfs_mountfs() to read 697 * in the root and real-time bitmap inodes between calling xfs_log_mount() and 698 * here. 699 * 700 * If we finish recovery successfully, start the background log work. If we are 701 * not doing recovery, then we have a RO filesystem and we don't need to start 702 * it. 703 */ 704 int 705 xfs_log_mount_finish( 706 struct xfs_mount *mp) 707 { 708 int error = 0; 709 bool readonly = (mp->m_flags & XFS_MOUNT_RDONLY); 710 bool recovered = mp->m_log->l_flags & XLOG_RECOVERY_NEEDED; 711 712 if (mp->m_flags & XFS_MOUNT_NORECOVERY) { 713 ASSERT(mp->m_flags & XFS_MOUNT_RDONLY); 714 return 0; 715 } else if (readonly) { 716 /* Allow unlinked processing to proceed */ 717 mp->m_flags &= ~XFS_MOUNT_RDONLY; 718 } 719 720 /* 721 * During the second phase of log recovery, we need iget and 722 * iput to behave like they do for an active filesystem. 723 * xfs_fs_drop_inode needs to be able to prevent the deletion 724 * of inodes before we're done replaying log items on those 725 * inodes. Turn it off immediately after recovery finishes 726 * so that we don't leak the quota inodes if subsequent mount 727 * activities fail. 728 * 729 * We let all inodes involved in redo item processing end up on 730 * the LRU instead of being evicted immediately so that if we do 731 * something to an unlinked inode, the irele won't cause 732 * premature truncation and freeing of the inode, which results 733 * in log recovery failure. We have to evict the unreferenced 734 * lru inodes after clearing SB_ACTIVE because we don't 735 * otherwise clean up the lru if there's a subsequent failure in 736 * xfs_mountfs, which leads to us leaking the inodes if nothing 737 * else (e.g. quotacheck) references the inodes before the 738 * mount failure occurs. 739 */ 740 mp->m_super->s_flags |= SB_ACTIVE; 741 error = xlog_recover_finish(mp->m_log); 742 if (!error) 743 xfs_log_work_queue(mp); 744 mp->m_super->s_flags &= ~SB_ACTIVE; 745 evict_inodes(mp->m_super); 746 747 /* 748 * Drain the buffer LRU after log recovery. This is required for v4 749 * filesystems to avoid leaving around buffers with NULL verifier ops, 750 * but we do it unconditionally to make sure we're always in a clean 751 * cache state after mount. 752 * 753 * Don't push in the error case because the AIL may have pending intents 754 * that aren't removed until recovery is cancelled. 755 */ 756 if (!error && recovered) { 757 xfs_log_force(mp, XFS_LOG_SYNC); 758 xfs_ail_push_all_sync(mp->m_ail); 759 } 760 xfs_wait_buftarg(mp->m_ddev_targp); 761 762 if (readonly) 763 mp->m_flags |= XFS_MOUNT_RDONLY; 764 765 return error; 766 } 767 768 /* 769 * The mount has failed. Cancel the recovery if it hasn't completed and destroy 770 * the log. 771 */ 772 void 773 xfs_log_mount_cancel( 774 struct xfs_mount *mp) 775 { 776 xlog_recover_cancel(mp->m_log); 777 xfs_log_unmount(mp); 778 } 779 780 /* 781 * Final log writes as part of unmount. 782 * 783 * Mark the filesystem clean as unmount happens. Note that during relocation 784 * this routine needs to be executed as part of source-bag while the 785 * deallocation must not be done until source-end. 786 */ 787 788 /* Actually write the unmount record to disk. */ 789 static void 790 xfs_log_write_unmount_record( 791 struct xfs_mount *mp) 792 { 793 /* the data section must be 32 bit size aligned */ 794 struct xfs_unmount_log_format magic = { 795 .magic = XLOG_UNMOUNT_TYPE, 796 }; 797 struct xfs_log_iovec reg = { 798 .i_addr = &magic, 799 .i_len = sizeof(magic), 800 .i_type = XLOG_REG_TYPE_UNMOUNT, 801 }; 802 struct xfs_log_vec vec = { 803 .lv_niovecs = 1, 804 .lv_iovecp = ®, 805 }; 806 struct xlog *log = mp->m_log; 807 struct xlog_in_core *iclog; 808 struct xlog_ticket *tic = NULL; 809 xfs_lsn_t lsn; 810 uint flags = XLOG_UNMOUNT_TRANS; 811 int error; 812 813 error = xfs_log_reserve(mp, 600, 1, &tic, XFS_LOG, 0); 814 if (error) 815 goto out_err; 816 817 /* 818 * If we think the summary counters are bad, clear the unmount header 819 * flag in the unmount record so that the summary counters will be 820 * recalculated during log recovery at next mount. Refer to 821 * xlog_check_unmount_rec for more details. 822 */ 823 if (XFS_TEST_ERROR(xfs_fs_has_sickness(mp, XFS_SICK_FS_COUNTERS), mp, 824 XFS_ERRTAG_FORCE_SUMMARY_RECALC)) { 825 xfs_alert(mp, "%s: will fix summary counters at next mount", 826 __func__); 827 flags &= ~XLOG_UNMOUNT_TRANS; 828 } 829 830 /* remove inited flag, and account for space used */ 831 tic->t_flags = 0; 832 tic->t_curr_res -= sizeof(magic); 833 error = xlog_write(log, &vec, tic, &lsn, NULL, flags); 834 /* 835 * At this point, we're umounting anyway, so there's no point in 836 * transitioning log state to IOERROR. Just continue... 837 */ 838 out_err: 839 if (error) 840 xfs_alert(mp, "%s: unmount record failed", __func__); 841 842 spin_lock(&log->l_icloglock); 843 iclog = log->l_iclog; 844 atomic_inc(&iclog->ic_refcnt); 845 xlog_state_want_sync(log, iclog); 846 spin_unlock(&log->l_icloglock); 847 error = xlog_state_release_iclog(log, iclog); 848 849 spin_lock(&log->l_icloglock); 850 switch (iclog->ic_state) { 851 default: 852 if (!XLOG_FORCED_SHUTDOWN(log)) { 853 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); 854 break; 855 } 856 /* fall through */ 857 case XLOG_STATE_ACTIVE: 858 case XLOG_STATE_DIRTY: 859 spin_unlock(&log->l_icloglock); 860 break; 861 } 862 863 if (tic) { 864 trace_xfs_log_umount_write(log, tic); 865 xlog_ungrant_log_space(log, tic); 866 xfs_log_ticket_put(tic); 867 } 868 } 869 870 /* 871 * Unmount record used to have a string "Unmount filesystem--" in the 872 * data section where the "Un" was really a magic number (XLOG_UNMOUNT_TYPE). 873 * We just write the magic number now since that particular field isn't 874 * currently architecture converted and "Unmount" is a bit foo. 875 * As far as I know, there weren't any dependencies on the old behaviour. 876 */ 877 878 static int 879 xfs_log_unmount_write(xfs_mount_t *mp) 880 { 881 struct xlog *log = mp->m_log; 882 xlog_in_core_t *iclog; 883 #ifdef DEBUG 884 xlog_in_core_t *first_iclog; 885 #endif 886 int error; 887 888 /* 889 * Don't write out unmount record on norecovery mounts or ro devices. 890 * Or, if we are doing a forced umount (typically because of IO errors). 891 */ 892 if (mp->m_flags & XFS_MOUNT_NORECOVERY || 893 xfs_readonly_buftarg(log->l_targ)) { 894 ASSERT(mp->m_flags & XFS_MOUNT_RDONLY); 895 return 0; 896 } 897 898 error = xfs_log_force(mp, XFS_LOG_SYNC); 899 ASSERT(error || !(XLOG_FORCED_SHUTDOWN(log))); 900 901 #ifdef DEBUG 902 first_iclog = iclog = log->l_iclog; 903 do { 904 if (!(iclog->ic_state & XLOG_STATE_IOERROR)) { 905 ASSERT(iclog->ic_state & XLOG_STATE_ACTIVE); 906 ASSERT(iclog->ic_offset == 0); 907 } 908 iclog = iclog->ic_next; 909 } while (iclog != first_iclog); 910 #endif 911 if (! (XLOG_FORCED_SHUTDOWN(log))) { 912 xfs_log_write_unmount_record(mp); 913 } else { 914 /* 915 * We're already in forced_shutdown mode, couldn't 916 * even attempt to write out the unmount transaction. 917 * 918 * Go through the motions of sync'ing and releasing 919 * the iclog, even though no I/O will actually happen, 920 * we need to wait for other log I/Os that may already 921 * be in progress. Do this as a separate section of 922 * code so we'll know if we ever get stuck here that 923 * we're in this odd situation of trying to unmount 924 * a file system that went into forced_shutdown as 925 * the result of an unmount.. 926 */ 927 spin_lock(&log->l_icloglock); 928 iclog = log->l_iclog; 929 atomic_inc(&iclog->ic_refcnt); 930 931 xlog_state_want_sync(log, iclog); 932 spin_unlock(&log->l_icloglock); 933 error = xlog_state_release_iclog(log, iclog); 934 935 spin_lock(&log->l_icloglock); 936 937 if ( ! ( iclog->ic_state == XLOG_STATE_ACTIVE 938 || iclog->ic_state == XLOG_STATE_DIRTY 939 || iclog->ic_state == XLOG_STATE_IOERROR) ) { 940 941 xlog_wait(&iclog->ic_force_wait, 942 &log->l_icloglock); 943 } else { 944 spin_unlock(&log->l_icloglock); 945 } 946 } 947 948 return error; 949 } /* xfs_log_unmount_write */ 950 951 /* 952 * Empty the log for unmount/freeze. 953 * 954 * To do this, we first need to shut down the background log work so it is not 955 * trying to cover the log as we clean up. We then need to unpin all objects in 956 * the log so we can then flush them out. Once they have completed their IO and 957 * run the callbacks removing themselves from the AIL, we can write the unmount 958 * record. 959 */ 960 void 961 xfs_log_quiesce( 962 struct xfs_mount *mp) 963 { 964 cancel_delayed_work_sync(&mp->m_log->l_work); 965 xfs_log_force(mp, XFS_LOG_SYNC); 966 967 /* 968 * The superblock buffer is uncached and while xfs_ail_push_all_sync() 969 * will push it, xfs_wait_buftarg() will not wait for it. Further, 970 * xfs_buf_iowait() cannot be used because it was pushed with the 971 * XBF_ASYNC flag set, so we need to use a lock/unlock pair to wait for 972 * the IO to complete. 973 */ 974 xfs_ail_push_all_sync(mp->m_ail); 975 xfs_wait_buftarg(mp->m_ddev_targp); 976 xfs_buf_lock(mp->m_sb_bp); 977 xfs_buf_unlock(mp->m_sb_bp); 978 979 xfs_log_unmount_write(mp); 980 } 981 982 /* 983 * Shut down and release the AIL and Log. 984 * 985 * During unmount, we need to ensure we flush all the dirty metadata objects 986 * from the AIL so that the log is empty before we write the unmount record to 987 * the log. Once this is done, we can tear down the AIL and the log. 988 */ 989 void 990 xfs_log_unmount( 991 struct xfs_mount *mp) 992 { 993 xfs_log_quiesce(mp); 994 995 xfs_trans_ail_destroy(mp); 996 997 xfs_sysfs_del(&mp->m_log->l_kobj); 998 999 xlog_dealloc_log(mp->m_log); 1000 } 1001 1002 void 1003 xfs_log_item_init( 1004 struct xfs_mount *mp, 1005 struct xfs_log_item *item, 1006 int type, 1007 const struct xfs_item_ops *ops) 1008 { 1009 item->li_mountp = mp; 1010 item->li_ailp = mp->m_ail; 1011 item->li_type = type; 1012 item->li_ops = ops; 1013 item->li_lv = NULL; 1014 1015 INIT_LIST_HEAD(&item->li_ail); 1016 INIT_LIST_HEAD(&item->li_cil); 1017 INIT_LIST_HEAD(&item->li_bio_list); 1018 INIT_LIST_HEAD(&item->li_trans); 1019 } 1020 1021 /* 1022 * Wake up processes waiting for log space after we have moved the log tail. 1023 */ 1024 void 1025 xfs_log_space_wake( 1026 struct xfs_mount *mp) 1027 { 1028 struct xlog *log = mp->m_log; 1029 int free_bytes; 1030 1031 if (XLOG_FORCED_SHUTDOWN(log)) 1032 return; 1033 1034 if (!list_empty_careful(&log->l_write_head.waiters)) { 1035 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); 1036 1037 spin_lock(&log->l_write_head.lock); 1038 free_bytes = xlog_space_left(log, &log->l_write_head.grant); 1039 xlog_grant_head_wake(log, &log->l_write_head, &free_bytes); 1040 spin_unlock(&log->l_write_head.lock); 1041 } 1042 1043 if (!list_empty_careful(&log->l_reserve_head.waiters)) { 1044 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); 1045 1046 spin_lock(&log->l_reserve_head.lock); 1047 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); 1048 xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes); 1049 spin_unlock(&log->l_reserve_head.lock); 1050 } 1051 } 1052 1053 /* 1054 * Determine if we have a transaction that has gone to disk that needs to be 1055 * covered. To begin the transition to the idle state firstly the log needs to 1056 * be idle. That means the CIL, the AIL and the iclogs needs to be empty before 1057 * we start attempting to cover the log. 1058 * 1059 * Only if we are then in a state where covering is needed, the caller is 1060 * informed that dummy transactions are required to move the log into the idle 1061 * state. 1062 * 1063 * If there are any items in the AIl or CIL, then we do not want to attempt to 1064 * cover the log as we may be in a situation where there isn't log space 1065 * available to run a dummy transaction and this can lead to deadlocks when the 1066 * tail of the log is pinned by an item that is modified in the CIL. Hence 1067 * there's no point in running a dummy transaction at this point because we 1068 * can't start trying to idle the log until both the CIL and AIL are empty. 1069 */ 1070 static int 1071 xfs_log_need_covered(xfs_mount_t *mp) 1072 { 1073 struct xlog *log = mp->m_log; 1074 int needed = 0; 1075 1076 if (!xfs_fs_writable(mp, SB_FREEZE_WRITE)) 1077 return 0; 1078 1079 if (!xlog_cil_empty(log)) 1080 return 0; 1081 1082 spin_lock(&log->l_icloglock); 1083 switch (log->l_covered_state) { 1084 case XLOG_STATE_COVER_DONE: 1085 case XLOG_STATE_COVER_DONE2: 1086 case XLOG_STATE_COVER_IDLE: 1087 break; 1088 case XLOG_STATE_COVER_NEED: 1089 case XLOG_STATE_COVER_NEED2: 1090 if (xfs_ail_min_lsn(log->l_ailp)) 1091 break; 1092 if (!xlog_iclogs_empty(log)) 1093 break; 1094 1095 needed = 1; 1096 if (log->l_covered_state == XLOG_STATE_COVER_NEED) 1097 log->l_covered_state = XLOG_STATE_COVER_DONE; 1098 else 1099 log->l_covered_state = XLOG_STATE_COVER_DONE2; 1100 break; 1101 default: 1102 needed = 1; 1103 break; 1104 } 1105 spin_unlock(&log->l_icloglock); 1106 return needed; 1107 } 1108 1109 /* 1110 * We may be holding the log iclog lock upon entering this routine. 1111 */ 1112 xfs_lsn_t 1113 xlog_assign_tail_lsn_locked( 1114 struct xfs_mount *mp) 1115 { 1116 struct xlog *log = mp->m_log; 1117 struct xfs_log_item *lip; 1118 xfs_lsn_t tail_lsn; 1119 1120 assert_spin_locked(&mp->m_ail->ail_lock); 1121 1122 /* 1123 * To make sure we always have a valid LSN for the log tail we keep 1124 * track of the last LSN which was committed in log->l_last_sync_lsn, 1125 * and use that when the AIL was empty. 1126 */ 1127 lip = xfs_ail_min(mp->m_ail); 1128 if (lip) 1129 tail_lsn = lip->li_lsn; 1130 else 1131 tail_lsn = atomic64_read(&log->l_last_sync_lsn); 1132 trace_xfs_log_assign_tail_lsn(log, tail_lsn); 1133 atomic64_set(&log->l_tail_lsn, tail_lsn); 1134 return tail_lsn; 1135 } 1136 1137 xfs_lsn_t 1138 xlog_assign_tail_lsn( 1139 struct xfs_mount *mp) 1140 { 1141 xfs_lsn_t tail_lsn; 1142 1143 spin_lock(&mp->m_ail->ail_lock); 1144 tail_lsn = xlog_assign_tail_lsn_locked(mp); 1145 spin_unlock(&mp->m_ail->ail_lock); 1146 1147 return tail_lsn; 1148 } 1149 1150 /* 1151 * Return the space in the log between the tail and the head. The head 1152 * is passed in the cycle/bytes formal parms. In the special case where 1153 * the reserve head has wrapped passed the tail, this calculation is no 1154 * longer valid. In this case, just return 0 which means there is no space 1155 * in the log. This works for all places where this function is called 1156 * with the reserve head. Of course, if the write head were to ever 1157 * wrap the tail, we should blow up. Rather than catch this case here, 1158 * we depend on other ASSERTions in other parts of the code. XXXmiken 1159 * 1160 * This code also handles the case where the reservation head is behind 1161 * the tail. The details of this case are described below, but the end 1162 * result is that we return the size of the log as the amount of space left. 1163 */ 1164 STATIC int 1165 xlog_space_left( 1166 struct xlog *log, 1167 atomic64_t *head) 1168 { 1169 int free_bytes; 1170 int tail_bytes; 1171 int tail_cycle; 1172 int head_cycle; 1173 int head_bytes; 1174 1175 xlog_crack_grant_head(head, &head_cycle, &head_bytes); 1176 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes); 1177 tail_bytes = BBTOB(tail_bytes); 1178 if (tail_cycle == head_cycle && head_bytes >= tail_bytes) 1179 free_bytes = log->l_logsize - (head_bytes - tail_bytes); 1180 else if (tail_cycle + 1 < head_cycle) 1181 return 0; 1182 else if (tail_cycle < head_cycle) { 1183 ASSERT(tail_cycle == (head_cycle - 1)); 1184 free_bytes = tail_bytes - head_bytes; 1185 } else { 1186 /* 1187 * The reservation head is behind the tail. 1188 * In this case we just want to return the size of the 1189 * log as the amount of space left. 1190 */ 1191 xfs_alert(log->l_mp, "xlog_space_left: head behind tail"); 1192 xfs_alert(log->l_mp, 1193 " tail_cycle = %d, tail_bytes = %d", 1194 tail_cycle, tail_bytes); 1195 xfs_alert(log->l_mp, 1196 " GH cycle = %d, GH bytes = %d", 1197 head_cycle, head_bytes); 1198 ASSERT(0); 1199 free_bytes = log->l_logsize; 1200 } 1201 return free_bytes; 1202 } 1203 1204 1205 static void 1206 xlog_ioend_work( 1207 struct work_struct *work) 1208 { 1209 struct xlog_in_core *iclog = 1210 container_of(work, struct xlog_in_core, ic_end_io_work); 1211 struct xlog *log = iclog->ic_log; 1212 bool aborted = false; 1213 int error; 1214 1215 error = blk_status_to_errno(iclog->ic_bio.bi_status); 1216 #ifdef DEBUG 1217 /* treat writes with injected CRC errors as failed */ 1218 if (iclog->ic_fail_crc) 1219 error = -EIO; 1220 #endif 1221 1222 /* 1223 * Race to shutdown the filesystem if we see an error. 1224 */ 1225 if (XFS_TEST_ERROR(error, log->l_mp, XFS_ERRTAG_IODONE_IOERR)) { 1226 xfs_alert(log->l_mp, "log I/O error %d", error); 1227 xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); 1228 /* 1229 * This flag will be propagated to the trans-committed 1230 * callback routines to let them know that the log-commit 1231 * didn't succeed. 1232 */ 1233 aborted = true; 1234 } else if (iclog->ic_state & XLOG_STATE_IOERROR) { 1235 aborted = true; 1236 } 1237 1238 xlog_state_done_syncing(iclog, aborted); 1239 bio_uninit(&iclog->ic_bio); 1240 1241 /* 1242 * Drop the lock to signal that we are done. Nothing references the 1243 * iclog after this, so an unmount waiting on this lock can now tear it 1244 * down safely. As such, it is unsafe to reference the iclog after the 1245 * unlock as we could race with it being freed. 1246 */ 1247 up(&iclog->ic_sema); 1248 } 1249 1250 /* 1251 * Return size of each in-core log record buffer. 1252 * 1253 * All machines get 8 x 32kB buffers by default, unless tuned otherwise. 1254 * 1255 * If the filesystem blocksize is too large, we may need to choose a 1256 * larger size since the directory code currently logs entire blocks. 1257 */ 1258 STATIC void 1259 xlog_get_iclog_buffer_size( 1260 struct xfs_mount *mp, 1261 struct xlog *log) 1262 { 1263 if (mp->m_logbufs <= 0) 1264 mp->m_logbufs = XLOG_MAX_ICLOGS; 1265 if (mp->m_logbsize <= 0) 1266 mp->m_logbsize = XLOG_BIG_RECORD_BSIZE; 1267 1268 log->l_iclog_bufs = mp->m_logbufs; 1269 log->l_iclog_size = mp->m_logbsize; 1270 1271 /* 1272 * # headers = size / 32k - one header holds cycles from 32k of data. 1273 */ 1274 log->l_iclog_heads = 1275 DIV_ROUND_UP(mp->m_logbsize, XLOG_HEADER_CYCLE_SIZE); 1276 log->l_iclog_hsize = log->l_iclog_heads << BBSHIFT; 1277 } 1278 1279 void 1280 xfs_log_work_queue( 1281 struct xfs_mount *mp) 1282 { 1283 queue_delayed_work(mp->m_sync_workqueue, &mp->m_log->l_work, 1284 msecs_to_jiffies(xfs_syncd_centisecs * 10)); 1285 } 1286 1287 /* 1288 * Every sync period we need to unpin all items in the AIL and push them to 1289 * disk. If there is nothing dirty, then we might need to cover the log to 1290 * indicate that the filesystem is idle. 1291 */ 1292 static void 1293 xfs_log_worker( 1294 struct work_struct *work) 1295 { 1296 struct xlog *log = container_of(to_delayed_work(work), 1297 struct xlog, l_work); 1298 struct xfs_mount *mp = log->l_mp; 1299 1300 /* dgc: errors ignored - not fatal and nowhere to report them */ 1301 if (xfs_log_need_covered(mp)) { 1302 /* 1303 * Dump a transaction into the log that contains no real change. 1304 * This is needed to stamp the current tail LSN into the log 1305 * during the covering operation. 1306 * 1307 * We cannot use an inode here for this - that will push dirty 1308 * state back up into the VFS and then periodic inode flushing 1309 * will prevent log covering from making progress. Hence we 1310 * synchronously log the superblock instead to ensure the 1311 * superblock is immediately unpinned and can be written back. 1312 */ 1313 xfs_sync_sb(mp, true); 1314 } else 1315 xfs_log_force(mp, 0); 1316 1317 /* start pushing all the metadata that is currently dirty */ 1318 xfs_ail_push_all(mp->m_ail); 1319 1320 /* queue us up again */ 1321 xfs_log_work_queue(mp); 1322 } 1323 1324 /* 1325 * This routine initializes some of the log structure for a given mount point. 1326 * Its primary purpose is to fill in enough, so recovery can occur. However, 1327 * some other stuff may be filled in too. 1328 */ 1329 STATIC struct xlog * 1330 xlog_alloc_log( 1331 struct xfs_mount *mp, 1332 struct xfs_buftarg *log_target, 1333 xfs_daddr_t blk_offset, 1334 int num_bblks) 1335 { 1336 struct xlog *log; 1337 xlog_rec_header_t *head; 1338 xlog_in_core_t **iclogp; 1339 xlog_in_core_t *iclog, *prev_iclog=NULL; 1340 int i; 1341 int error = -ENOMEM; 1342 uint log2_size = 0; 1343 1344 log = kmem_zalloc(sizeof(struct xlog), KM_MAYFAIL); 1345 if (!log) { 1346 xfs_warn(mp, "Log allocation failed: No memory!"); 1347 goto out; 1348 } 1349 1350 log->l_mp = mp; 1351 log->l_targ = log_target; 1352 log->l_logsize = BBTOB(num_bblks); 1353 log->l_logBBstart = blk_offset; 1354 log->l_logBBsize = num_bblks; 1355 log->l_covered_state = XLOG_STATE_COVER_IDLE; 1356 log->l_flags |= XLOG_ACTIVE_RECOVERY; 1357 INIT_DELAYED_WORK(&log->l_work, xfs_log_worker); 1358 1359 log->l_prev_block = -1; 1360 /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */ 1361 xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0); 1362 xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0); 1363 log->l_curr_cycle = 1; /* 0 is bad since this is initial value */ 1364 1365 xlog_grant_head_init(&log->l_reserve_head); 1366 xlog_grant_head_init(&log->l_write_head); 1367 1368 error = -EFSCORRUPTED; 1369 if (xfs_sb_version_hassector(&mp->m_sb)) { 1370 log2_size = mp->m_sb.sb_logsectlog; 1371 if (log2_size < BBSHIFT) { 1372 xfs_warn(mp, "Log sector size too small (0x%x < 0x%x)", 1373 log2_size, BBSHIFT); 1374 goto out_free_log; 1375 } 1376 1377 log2_size -= BBSHIFT; 1378 if (log2_size > mp->m_sectbb_log) { 1379 xfs_warn(mp, "Log sector size too large (0x%x > 0x%x)", 1380 log2_size, mp->m_sectbb_log); 1381 goto out_free_log; 1382 } 1383 1384 /* for larger sector sizes, must have v2 or external log */ 1385 if (log2_size && log->l_logBBstart > 0 && 1386 !xfs_sb_version_haslogv2(&mp->m_sb)) { 1387 xfs_warn(mp, 1388 "log sector size (0x%x) invalid for configuration.", 1389 log2_size); 1390 goto out_free_log; 1391 } 1392 } 1393 log->l_sectBBsize = 1 << log2_size; 1394 1395 xlog_get_iclog_buffer_size(mp, log); 1396 1397 spin_lock_init(&log->l_icloglock); 1398 init_waitqueue_head(&log->l_flush_wait); 1399 1400 iclogp = &log->l_iclog; 1401 /* 1402 * The amount of memory to allocate for the iclog structure is 1403 * rather funky due to the way the structure is defined. It is 1404 * done this way so that we can use different sizes for machines 1405 * with different amounts of memory. See the definition of 1406 * xlog_in_core_t in xfs_log_priv.h for details. 1407 */ 1408 ASSERT(log->l_iclog_size >= 4096); 1409 for (i = 0; i < log->l_iclog_bufs; i++) { 1410 size_t bvec_size = howmany(log->l_iclog_size, PAGE_SIZE) * 1411 sizeof(struct bio_vec); 1412 1413 iclog = kmem_zalloc(sizeof(*iclog) + bvec_size, KM_MAYFAIL); 1414 if (!iclog) 1415 goto out_free_iclog; 1416 1417 *iclogp = iclog; 1418 iclog->ic_prev = prev_iclog; 1419 prev_iclog = iclog; 1420 1421 iclog->ic_data = kmem_alloc_large(log->l_iclog_size, 1422 KM_MAYFAIL); 1423 if (!iclog->ic_data) 1424 goto out_free_iclog; 1425 #ifdef DEBUG 1426 log->l_iclog_bak[i] = &iclog->ic_header; 1427 #endif 1428 head = &iclog->ic_header; 1429 memset(head, 0, sizeof(xlog_rec_header_t)); 1430 head->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM); 1431 head->h_version = cpu_to_be32( 1432 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1); 1433 head->h_size = cpu_to_be32(log->l_iclog_size); 1434 /* new fields */ 1435 head->h_fmt = cpu_to_be32(XLOG_FMT); 1436 memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t)); 1437 1438 iclog->ic_size = log->l_iclog_size - log->l_iclog_hsize; 1439 iclog->ic_state = XLOG_STATE_ACTIVE; 1440 iclog->ic_log = log; 1441 atomic_set(&iclog->ic_refcnt, 0); 1442 spin_lock_init(&iclog->ic_callback_lock); 1443 INIT_LIST_HEAD(&iclog->ic_callbacks); 1444 iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize; 1445 1446 init_waitqueue_head(&iclog->ic_force_wait); 1447 init_waitqueue_head(&iclog->ic_write_wait); 1448 INIT_WORK(&iclog->ic_end_io_work, xlog_ioend_work); 1449 sema_init(&iclog->ic_sema, 1); 1450 1451 iclogp = &iclog->ic_next; 1452 } 1453 *iclogp = log->l_iclog; /* complete ring */ 1454 log->l_iclog->ic_prev = prev_iclog; /* re-write 1st prev ptr */ 1455 1456 log->l_ioend_workqueue = alloc_workqueue("xfs-log/%s", 1457 WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_HIGHPRI, 0, 1458 mp->m_fsname); 1459 if (!log->l_ioend_workqueue) 1460 goto out_free_iclog; 1461 1462 error = xlog_cil_init(log); 1463 if (error) 1464 goto out_destroy_workqueue; 1465 return log; 1466 1467 out_destroy_workqueue: 1468 destroy_workqueue(log->l_ioend_workqueue); 1469 out_free_iclog: 1470 for (iclog = log->l_iclog; iclog; iclog = prev_iclog) { 1471 prev_iclog = iclog->ic_next; 1472 kmem_free(iclog->ic_data); 1473 kmem_free(iclog); 1474 } 1475 out_free_log: 1476 kmem_free(log); 1477 out: 1478 return ERR_PTR(error); 1479 } /* xlog_alloc_log */ 1480 1481 1482 /* 1483 * Write out the commit record of a transaction associated with the given 1484 * ticket. Return the lsn of the commit record. 1485 */ 1486 STATIC int 1487 xlog_commit_record( 1488 struct xlog *log, 1489 struct xlog_ticket *ticket, 1490 struct xlog_in_core **iclog, 1491 xfs_lsn_t *commitlsnp) 1492 { 1493 struct xfs_mount *mp = log->l_mp; 1494 int error; 1495 struct xfs_log_iovec reg = { 1496 .i_addr = NULL, 1497 .i_len = 0, 1498 .i_type = XLOG_REG_TYPE_COMMIT, 1499 }; 1500 struct xfs_log_vec vec = { 1501 .lv_niovecs = 1, 1502 .lv_iovecp = ®, 1503 }; 1504 1505 ASSERT_ALWAYS(iclog); 1506 error = xlog_write(log, &vec, ticket, commitlsnp, iclog, 1507 XLOG_COMMIT_TRANS); 1508 if (error) 1509 xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR); 1510 return error; 1511 } 1512 1513 /* 1514 * Push on the buffer cache code if we ever use more than 75% of the on-disk 1515 * log space. This code pushes on the lsn which would supposedly free up 1516 * the 25% which we want to leave free. We may need to adopt a policy which 1517 * pushes on an lsn which is further along in the log once we reach the high 1518 * water mark. In this manner, we would be creating a low water mark. 1519 */ 1520 STATIC void 1521 xlog_grant_push_ail( 1522 struct xlog *log, 1523 int need_bytes) 1524 { 1525 xfs_lsn_t threshold_lsn = 0; 1526 xfs_lsn_t last_sync_lsn; 1527 int free_blocks; 1528 int free_bytes; 1529 int threshold_block; 1530 int threshold_cycle; 1531 int free_threshold; 1532 1533 ASSERT(BTOBB(need_bytes) < log->l_logBBsize); 1534 1535 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); 1536 free_blocks = BTOBBT(free_bytes); 1537 1538 /* 1539 * Set the threshold for the minimum number of free blocks in the 1540 * log to the maximum of what the caller needs, one quarter of the 1541 * log, and 256 blocks. 1542 */ 1543 free_threshold = BTOBB(need_bytes); 1544 free_threshold = max(free_threshold, (log->l_logBBsize >> 2)); 1545 free_threshold = max(free_threshold, 256); 1546 if (free_blocks >= free_threshold) 1547 return; 1548 1549 xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle, 1550 &threshold_block); 1551 threshold_block += free_threshold; 1552 if (threshold_block >= log->l_logBBsize) { 1553 threshold_block -= log->l_logBBsize; 1554 threshold_cycle += 1; 1555 } 1556 threshold_lsn = xlog_assign_lsn(threshold_cycle, 1557 threshold_block); 1558 /* 1559 * Don't pass in an lsn greater than the lsn of the last 1560 * log record known to be on disk. Use a snapshot of the last sync lsn 1561 * so that it doesn't change between the compare and the set. 1562 */ 1563 last_sync_lsn = atomic64_read(&log->l_last_sync_lsn); 1564 if (XFS_LSN_CMP(threshold_lsn, last_sync_lsn) > 0) 1565 threshold_lsn = last_sync_lsn; 1566 1567 /* 1568 * Get the transaction layer to kick the dirty buffers out to 1569 * disk asynchronously. No point in trying to do this if 1570 * the filesystem is shutting down. 1571 */ 1572 if (!XLOG_FORCED_SHUTDOWN(log)) 1573 xfs_ail_push(log->l_ailp, threshold_lsn); 1574 } 1575 1576 /* 1577 * Stamp cycle number in every block 1578 */ 1579 STATIC void 1580 xlog_pack_data( 1581 struct xlog *log, 1582 struct xlog_in_core *iclog, 1583 int roundoff) 1584 { 1585 int i, j, k; 1586 int size = iclog->ic_offset + roundoff; 1587 __be32 cycle_lsn; 1588 char *dp; 1589 1590 cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn); 1591 1592 dp = iclog->ic_datap; 1593 for (i = 0; i < BTOBB(size); i++) { 1594 if (i >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) 1595 break; 1596 iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp; 1597 *(__be32 *)dp = cycle_lsn; 1598 dp += BBSIZE; 1599 } 1600 1601 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { 1602 xlog_in_core_2_t *xhdr = iclog->ic_data; 1603 1604 for ( ; i < BTOBB(size); i++) { 1605 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 1606 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 1607 xhdr[j].hic_xheader.xh_cycle_data[k] = *(__be32 *)dp; 1608 *(__be32 *)dp = cycle_lsn; 1609 dp += BBSIZE; 1610 } 1611 1612 for (i = 1; i < log->l_iclog_heads; i++) 1613 xhdr[i].hic_xheader.xh_cycle = cycle_lsn; 1614 } 1615 } 1616 1617 /* 1618 * Calculate the checksum for a log buffer. 1619 * 1620 * This is a little more complicated than it should be because the various 1621 * headers and the actual data are non-contiguous. 1622 */ 1623 __le32 1624 xlog_cksum( 1625 struct xlog *log, 1626 struct xlog_rec_header *rhead, 1627 char *dp, 1628 int size) 1629 { 1630 uint32_t crc; 1631 1632 /* first generate the crc for the record header ... */ 1633 crc = xfs_start_cksum_update((char *)rhead, 1634 sizeof(struct xlog_rec_header), 1635 offsetof(struct xlog_rec_header, h_crc)); 1636 1637 /* ... then for additional cycle data for v2 logs ... */ 1638 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { 1639 union xlog_in_core2 *xhdr = (union xlog_in_core2 *)rhead; 1640 int i; 1641 int xheads; 1642 1643 xheads = size / XLOG_HEADER_CYCLE_SIZE; 1644 if (size % XLOG_HEADER_CYCLE_SIZE) 1645 xheads++; 1646 1647 for (i = 1; i < xheads; i++) { 1648 crc = crc32c(crc, &xhdr[i].hic_xheader, 1649 sizeof(struct xlog_rec_ext_header)); 1650 } 1651 } 1652 1653 /* ... and finally for the payload */ 1654 crc = crc32c(crc, dp, size); 1655 1656 return xfs_end_cksum(crc); 1657 } 1658 1659 static void 1660 xlog_bio_end_io( 1661 struct bio *bio) 1662 { 1663 struct xlog_in_core *iclog = bio->bi_private; 1664 1665 queue_work(iclog->ic_log->l_ioend_workqueue, 1666 &iclog->ic_end_io_work); 1667 } 1668 1669 static void 1670 xlog_map_iclog_data( 1671 struct bio *bio, 1672 void *data, 1673 size_t count) 1674 { 1675 do { 1676 struct page *page = kmem_to_page(data); 1677 unsigned int off = offset_in_page(data); 1678 size_t len = min_t(size_t, count, PAGE_SIZE - off); 1679 1680 WARN_ON_ONCE(bio_add_page(bio, page, len, off) != len); 1681 1682 data += len; 1683 count -= len; 1684 } while (count); 1685 } 1686 1687 STATIC void 1688 xlog_write_iclog( 1689 struct xlog *log, 1690 struct xlog_in_core *iclog, 1691 uint64_t bno, 1692 unsigned int count, 1693 bool need_flush) 1694 { 1695 ASSERT(bno < log->l_logBBsize); 1696 1697 /* 1698 * We lock the iclogbufs here so that we can serialise against I/O 1699 * completion during unmount. We might be processing a shutdown 1700 * triggered during unmount, and that can occur asynchronously to the 1701 * unmount thread, and hence we need to ensure that completes before 1702 * tearing down the iclogbufs. Hence we need to hold the buffer lock 1703 * across the log IO to archieve that. 1704 */ 1705 down(&iclog->ic_sema); 1706 if (unlikely(iclog->ic_state & XLOG_STATE_IOERROR)) { 1707 /* 1708 * It would seem logical to return EIO here, but we rely on 1709 * the log state machine to propagate I/O errors instead of 1710 * doing it here. We kick of the state machine and unlock 1711 * the buffer manually, the code needs to be kept in sync 1712 * with the I/O completion path. 1713 */ 1714 xlog_state_done_syncing(iclog, XFS_LI_ABORTED); 1715 up(&iclog->ic_sema); 1716 return; 1717 } 1718 1719 iclog->ic_io_size = count; 1720 1721 bio_init(&iclog->ic_bio, iclog->ic_bvec, howmany(count, PAGE_SIZE)); 1722 bio_set_dev(&iclog->ic_bio, log->l_targ->bt_bdev); 1723 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno; 1724 iclog->ic_bio.bi_end_io = xlog_bio_end_io; 1725 iclog->ic_bio.bi_private = iclog; 1726 iclog->ic_bio.bi_opf = REQ_OP_WRITE | REQ_META | REQ_SYNC | REQ_FUA; 1727 if (need_flush) 1728 iclog->ic_bio.bi_opf |= REQ_PREFLUSH; 1729 1730 xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, iclog->ic_io_size); 1731 if (is_vmalloc_addr(iclog->ic_data)) 1732 flush_kernel_vmap_range(iclog->ic_data, iclog->ic_io_size); 1733 1734 /* 1735 * If this log buffer would straddle the end of the log we will have 1736 * to split it up into two bios, so that we can continue at the start. 1737 */ 1738 if (bno + BTOBB(count) > log->l_logBBsize) { 1739 struct bio *split; 1740 1741 split = bio_split(&iclog->ic_bio, log->l_logBBsize - bno, 1742 GFP_NOIO, &fs_bio_set); 1743 bio_chain(split, &iclog->ic_bio); 1744 submit_bio(split); 1745 1746 /* restart at logical offset zero for the remainder */ 1747 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart; 1748 } 1749 1750 submit_bio(&iclog->ic_bio); 1751 } 1752 1753 /* 1754 * We need to bump cycle number for the part of the iclog that is 1755 * written to the start of the log. Watch out for the header magic 1756 * number case, though. 1757 */ 1758 static void 1759 xlog_split_iclog( 1760 struct xlog *log, 1761 void *data, 1762 uint64_t bno, 1763 unsigned int count) 1764 { 1765 unsigned int split_offset = BBTOB(log->l_logBBsize - bno); 1766 unsigned int i; 1767 1768 for (i = split_offset; i < count; i += BBSIZE) { 1769 uint32_t cycle = get_unaligned_be32(data + i); 1770 1771 if (++cycle == XLOG_HEADER_MAGIC_NUM) 1772 cycle++; 1773 put_unaligned_be32(cycle, data + i); 1774 } 1775 } 1776 1777 static int 1778 xlog_calc_iclog_size( 1779 struct xlog *log, 1780 struct xlog_in_core *iclog, 1781 uint32_t *roundoff) 1782 { 1783 uint32_t count_init, count; 1784 bool use_lsunit; 1785 1786 use_lsunit = xfs_sb_version_haslogv2(&log->l_mp->m_sb) && 1787 log->l_mp->m_sb.sb_logsunit > 1; 1788 1789 /* Add for LR header */ 1790 count_init = log->l_iclog_hsize + iclog->ic_offset; 1791 1792 /* Round out the log write size */ 1793 if (use_lsunit) { 1794 /* we have a v2 stripe unit to use */ 1795 count = XLOG_LSUNITTOB(log, XLOG_BTOLSUNIT(log, count_init)); 1796 } else { 1797 count = BBTOB(BTOBB(count_init)); 1798 } 1799 1800 ASSERT(count >= count_init); 1801 *roundoff = count - count_init; 1802 1803 if (use_lsunit) 1804 ASSERT(*roundoff < log->l_mp->m_sb.sb_logsunit); 1805 else 1806 ASSERT(*roundoff < BBTOB(1)); 1807 return count; 1808 } 1809 1810 /* 1811 * Flush out the in-core log (iclog) to the on-disk log in an asynchronous 1812 * fashion. Previously, we should have moved the current iclog 1813 * ptr in the log to point to the next available iclog. This allows further 1814 * write to continue while this code syncs out an iclog ready to go. 1815 * Before an in-core log can be written out, the data section must be scanned 1816 * to save away the 1st word of each BBSIZE block into the header. We replace 1817 * it with the current cycle count. Each BBSIZE block is tagged with the 1818 * cycle count because there in an implicit assumption that drives will 1819 * guarantee that entire 512 byte blocks get written at once. In other words, 1820 * we can't have part of a 512 byte block written and part not written. By 1821 * tagging each block, we will know which blocks are valid when recovering 1822 * after an unclean shutdown. 1823 * 1824 * This routine is single threaded on the iclog. No other thread can be in 1825 * this routine with the same iclog. Changing contents of iclog can there- 1826 * fore be done without grabbing the state machine lock. Updating the global 1827 * log will require grabbing the lock though. 1828 * 1829 * The entire log manager uses a logical block numbering scheme. Only 1830 * xlog_write_iclog knows about the fact that the log may not start with 1831 * block zero on a given device. 1832 */ 1833 STATIC void 1834 xlog_sync( 1835 struct xlog *log, 1836 struct xlog_in_core *iclog) 1837 { 1838 unsigned int count; /* byte count of bwrite */ 1839 unsigned int roundoff; /* roundoff to BB or stripe */ 1840 uint64_t bno; 1841 unsigned int size; 1842 bool need_flush = true, split = false; 1843 1844 ASSERT(atomic_read(&iclog->ic_refcnt) == 0); 1845 1846 count = xlog_calc_iclog_size(log, iclog, &roundoff); 1847 1848 /* move grant heads by roundoff in sync */ 1849 xlog_grant_add_space(log, &log->l_reserve_head.grant, roundoff); 1850 xlog_grant_add_space(log, &log->l_write_head.grant, roundoff); 1851 1852 /* put cycle number in every block */ 1853 xlog_pack_data(log, iclog, roundoff); 1854 1855 /* real byte length */ 1856 size = iclog->ic_offset; 1857 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) 1858 size += roundoff; 1859 iclog->ic_header.h_len = cpu_to_be32(size); 1860 1861 XFS_STATS_INC(log->l_mp, xs_log_writes); 1862 XFS_STATS_ADD(log->l_mp, xs_log_blocks, BTOBB(count)); 1863 1864 bno = BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn)); 1865 1866 /* Do we need to split this write into 2 parts? */ 1867 if (bno + BTOBB(count) > log->l_logBBsize) { 1868 xlog_split_iclog(log, &iclog->ic_header, bno, count); 1869 split = true; 1870 } 1871 1872 /* calculcate the checksum */ 1873 iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header, 1874 iclog->ic_datap, size); 1875 /* 1876 * Intentionally corrupt the log record CRC based on the error injection 1877 * frequency, if defined. This facilitates testing log recovery in the 1878 * event of torn writes. Hence, set the IOABORT state to abort the log 1879 * write on I/O completion and shutdown the fs. The subsequent mount 1880 * detects the bad CRC and attempts to recover. 1881 */ 1882 #ifdef DEBUG 1883 if (XFS_TEST_ERROR(false, log->l_mp, XFS_ERRTAG_LOG_BAD_CRC)) { 1884 iclog->ic_header.h_crc &= cpu_to_le32(0xAAAAAAAA); 1885 iclog->ic_fail_crc = true; 1886 xfs_warn(log->l_mp, 1887 "Intentionally corrupted log record at LSN 0x%llx. Shutdown imminent.", 1888 be64_to_cpu(iclog->ic_header.h_lsn)); 1889 } 1890 #endif 1891 1892 /* 1893 * Flush the data device before flushing the log to make sure all meta 1894 * data written back from the AIL actually made it to disk before 1895 * stamping the new log tail LSN into the log buffer. For an external 1896 * log we need to issue the flush explicitly, and unfortunately 1897 * synchronously here; for an internal log we can simply use the block 1898 * layer state machine for preflushes. 1899 */ 1900 if (log->l_targ != log->l_mp->m_ddev_targp || split) { 1901 xfs_blkdev_issue_flush(log->l_mp->m_ddev_targp); 1902 need_flush = false; 1903 } 1904 1905 xlog_verify_iclog(log, iclog, count); 1906 xlog_write_iclog(log, iclog, bno, count, need_flush); 1907 } 1908 1909 /* 1910 * Deallocate a log structure 1911 */ 1912 STATIC void 1913 xlog_dealloc_log( 1914 struct xlog *log) 1915 { 1916 xlog_in_core_t *iclog, *next_iclog; 1917 int i; 1918 1919 xlog_cil_destroy(log); 1920 1921 /* 1922 * Cycle all the iclogbuf locks to make sure all log IO completion 1923 * is done before we tear down these buffers. 1924 */ 1925 iclog = log->l_iclog; 1926 for (i = 0; i < log->l_iclog_bufs; i++) { 1927 down(&iclog->ic_sema); 1928 up(&iclog->ic_sema); 1929 iclog = iclog->ic_next; 1930 } 1931 1932 iclog = log->l_iclog; 1933 for (i = 0; i < log->l_iclog_bufs; i++) { 1934 next_iclog = iclog->ic_next; 1935 kmem_free(iclog->ic_data); 1936 kmem_free(iclog); 1937 iclog = next_iclog; 1938 } 1939 1940 log->l_mp->m_log = NULL; 1941 destroy_workqueue(log->l_ioend_workqueue); 1942 kmem_free(log); 1943 } /* xlog_dealloc_log */ 1944 1945 /* 1946 * Update counters atomically now that memcpy is done. 1947 */ 1948 /* ARGSUSED */ 1949 static inline void 1950 xlog_state_finish_copy( 1951 struct xlog *log, 1952 struct xlog_in_core *iclog, 1953 int record_cnt, 1954 int copy_bytes) 1955 { 1956 spin_lock(&log->l_icloglock); 1957 1958 be32_add_cpu(&iclog->ic_header.h_num_logops, record_cnt); 1959 iclog->ic_offset += copy_bytes; 1960 1961 spin_unlock(&log->l_icloglock); 1962 } /* xlog_state_finish_copy */ 1963 1964 1965 1966 1967 /* 1968 * print out info relating to regions written which consume 1969 * the reservation 1970 */ 1971 void 1972 xlog_print_tic_res( 1973 struct xfs_mount *mp, 1974 struct xlog_ticket *ticket) 1975 { 1976 uint i; 1977 uint ophdr_spc = ticket->t_res_num_ophdrs * (uint)sizeof(xlog_op_header_t); 1978 1979 /* match with XLOG_REG_TYPE_* in xfs_log.h */ 1980 #define REG_TYPE_STR(type, str) [XLOG_REG_TYPE_##type] = str 1981 static char *res_type_str[] = { 1982 REG_TYPE_STR(BFORMAT, "bformat"), 1983 REG_TYPE_STR(BCHUNK, "bchunk"), 1984 REG_TYPE_STR(EFI_FORMAT, "efi_format"), 1985 REG_TYPE_STR(EFD_FORMAT, "efd_format"), 1986 REG_TYPE_STR(IFORMAT, "iformat"), 1987 REG_TYPE_STR(ICORE, "icore"), 1988 REG_TYPE_STR(IEXT, "iext"), 1989 REG_TYPE_STR(IBROOT, "ibroot"), 1990 REG_TYPE_STR(ILOCAL, "ilocal"), 1991 REG_TYPE_STR(IATTR_EXT, "iattr_ext"), 1992 REG_TYPE_STR(IATTR_BROOT, "iattr_broot"), 1993 REG_TYPE_STR(IATTR_LOCAL, "iattr_local"), 1994 REG_TYPE_STR(QFORMAT, "qformat"), 1995 REG_TYPE_STR(DQUOT, "dquot"), 1996 REG_TYPE_STR(QUOTAOFF, "quotaoff"), 1997 REG_TYPE_STR(LRHEADER, "LR header"), 1998 REG_TYPE_STR(UNMOUNT, "unmount"), 1999 REG_TYPE_STR(COMMIT, "commit"), 2000 REG_TYPE_STR(TRANSHDR, "trans header"), 2001 REG_TYPE_STR(ICREATE, "inode create"), 2002 REG_TYPE_STR(RUI_FORMAT, "rui_format"), 2003 REG_TYPE_STR(RUD_FORMAT, "rud_format"), 2004 REG_TYPE_STR(CUI_FORMAT, "cui_format"), 2005 REG_TYPE_STR(CUD_FORMAT, "cud_format"), 2006 REG_TYPE_STR(BUI_FORMAT, "bui_format"), 2007 REG_TYPE_STR(BUD_FORMAT, "bud_format"), 2008 }; 2009 BUILD_BUG_ON(ARRAY_SIZE(res_type_str) != XLOG_REG_TYPE_MAX + 1); 2010 #undef REG_TYPE_STR 2011 2012 xfs_warn(mp, "ticket reservation summary:"); 2013 xfs_warn(mp, " unit res = %d bytes", 2014 ticket->t_unit_res); 2015 xfs_warn(mp, " current res = %d bytes", 2016 ticket->t_curr_res); 2017 xfs_warn(mp, " total reg = %u bytes (o/flow = %u bytes)", 2018 ticket->t_res_arr_sum, ticket->t_res_o_flow); 2019 xfs_warn(mp, " ophdrs = %u (ophdr space = %u bytes)", 2020 ticket->t_res_num_ophdrs, ophdr_spc); 2021 xfs_warn(mp, " ophdr + reg = %u bytes", 2022 ticket->t_res_arr_sum + ticket->t_res_o_flow + ophdr_spc); 2023 xfs_warn(mp, " num regions = %u", 2024 ticket->t_res_num); 2025 2026 for (i = 0; i < ticket->t_res_num; i++) { 2027 uint r_type = ticket->t_res_arr[i].r_type; 2028 xfs_warn(mp, "region[%u]: %s - %u bytes", i, 2029 ((r_type <= 0 || r_type > XLOG_REG_TYPE_MAX) ? 2030 "bad-rtype" : res_type_str[r_type]), 2031 ticket->t_res_arr[i].r_len); 2032 } 2033 } 2034 2035 /* 2036 * Print a summary of the transaction. 2037 */ 2038 void 2039 xlog_print_trans( 2040 struct xfs_trans *tp) 2041 { 2042 struct xfs_mount *mp = tp->t_mountp; 2043 struct xfs_log_item *lip; 2044 2045 /* dump core transaction and ticket info */ 2046 xfs_warn(mp, "transaction summary:"); 2047 xfs_warn(mp, " log res = %d", tp->t_log_res); 2048 xfs_warn(mp, " log count = %d", tp->t_log_count); 2049 xfs_warn(mp, " flags = 0x%x", tp->t_flags); 2050 2051 xlog_print_tic_res(mp, tp->t_ticket); 2052 2053 /* dump each log item */ 2054 list_for_each_entry(lip, &tp->t_items, li_trans) { 2055 struct xfs_log_vec *lv = lip->li_lv; 2056 struct xfs_log_iovec *vec; 2057 int i; 2058 2059 xfs_warn(mp, "log item: "); 2060 xfs_warn(mp, " type = 0x%x", lip->li_type); 2061 xfs_warn(mp, " flags = 0x%lx", lip->li_flags); 2062 if (!lv) 2063 continue; 2064 xfs_warn(mp, " niovecs = %d", lv->lv_niovecs); 2065 xfs_warn(mp, " size = %d", lv->lv_size); 2066 xfs_warn(mp, " bytes = %d", lv->lv_bytes); 2067 xfs_warn(mp, " buf len = %d", lv->lv_buf_len); 2068 2069 /* dump each iovec for the log item */ 2070 vec = lv->lv_iovecp; 2071 for (i = 0; i < lv->lv_niovecs; i++) { 2072 int dumplen = min(vec->i_len, 32); 2073 2074 xfs_warn(mp, " iovec[%d]", i); 2075 xfs_warn(mp, " type = 0x%x", vec->i_type); 2076 xfs_warn(mp, " len = %d", vec->i_len); 2077 xfs_warn(mp, " first %d bytes of iovec[%d]:", dumplen, i); 2078 xfs_hex_dump(vec->i_addr, dumplen); 2079 2080 vec++; 2081 } 2082 } 2083 } 2084 2085 /* 2086 * Calculate the potential space needed by the log vector. Each region gets 2087 * its own xlog_op_header_t and may need to be double word aligned. 2088 */ 2089 static int 2090 xlog_write_calc_vec_length( 2091 struct xlog_ticket *ticket, 2092 struct xfs_log_vec *log_vector) 2093 { 2094 struct xfs_log_vec *lv; 2095 int headers = 0; 2096 int len = 0; 2097 int i; 2098 2099 /* acct for start rec of xact */ 2100 if (ticket->t_flags & XLOG_TIC_INITED) 2101 headers++; 2102 2103 for (lv = log_vector; lv; lv = lv->lv_next) { 2104 /* we don't write ordered log vectors */ 2105 if (lv->lv_buf_len == XFS_LOG_VEC_ORDERED) 2106 continue; 2107 2108 headers += lv->lv_niovecs; 2109 2110 for (i = 0; i < lv->lv_niovecs; i++) { 2111 struct xfs_log_iovec *vecp = &lv->lv_iovecp[i]; 2112 2113 len += vecp->i_len; 2114 xlog_tic_add_region(ticket, vecp->i_len, vecp->i_type); 2115 } 2116 } 2117 2118 ticket->t_res_num_ophdrs += headers; 2119 len += headers * sizeof(struct xlog_op_header); 2120 2121 return len; 2122 } 2123 2124 /* 2125 * If first write for transaction, insert start record We can't be trying to 2126 * commit if we are inited. We can't have any "partial_copy" if we are inited. 2127 */ 2128 static int 2129 xlog_write_start_rec( 2130 struct xlog_op_header *ophdr, 2131 struct xlog_ticket *ticket) 2132 { 2133 if (!(ticket->t_flags & XLOG_TIC_INITED)) 2134 return 0; 2135 2136 ophdr->oh_tid = cpu_to_be32(ticket->t_tid); 2137 ophdr->oh_clientid = ticket->t_clientid; 2138 ophdr->oh_len = 0; 2139 ophdr->oh_flags = XLOG_START_TRANS; 2140 ophdr->oh_res2 = 0; 2141 2142 ticket->t_flags &= ~XLOG_TIC_INITED; 2143 2144 return sizeof(struct xlog_op_header); 2145 } 2146 2147 static xlog_op_header_t * 2148 xlog_write_setup_ophdr( 2149 struct xlog *log, 2150 struct xlog_op_header *ophdr, 2151 struct xlog_ticket *ticket, 2152 uint flags) 2153 { 2154 ophdr->oh_tid = cpu_to_be32(ticket->t_tid); 2155 ophdr->oh_clientid = ticket->t_clientid; 2156 ophdr->oh_res2 = 0; 2157 2158 /* are we copying a commit or unmount record? */ 2159 ophdr->oh_flags = flags; 2160 2161 /* 2162 * We've seen logs corrupted with bad transaction client ids. This 2163 * makes sure that XFS doesn't generate them on. Turn this into an EIO 2164 * and shut down the filesystem. 2165 */ 2166 switch (ophdr->oh_clientid) { 2167 case XFS_TRANSACTION: 2168 case XFS_VOLUME: 2169 case XFS_LOG: 2170 break; 2171 default: 2172 xfs_warn(log->l_mp, 2173 "Bad XFS transaction clientid 0x%x in ticket "PTR_FMT, 2174 ophdr->oh_clientid, ticket); 2175 return NULL; 2176 } 2177 2178 return ophdr; 2179 } 2180 2181 /* 2182 * Set up the parameters of the region copy into the log. This has 2183 * to handle region write split across multiple log buffers - this 2184 * state is kept external to this function so that this code can 2185 * be written in an obvious, self documenting manner. 2186 */ 2187 static int 2188 xlog_write_setup_copy( 2189 struct xlog_ticket *ticket, 2190 struct xlog_op_header *ophdr, 2191 int space_available, 2192 int space_required, 2193 int *copy_off, 2194 int *copy_len, 2195 int *last_was_partial_copy, 2196 int *bytes_consumed) 2197 { 2198 int still_to_copy; 2199 2200 still_to_copy = space_required - *bytes_consumed; 2201 *copy_off = *bytes_consumed; 2202 2203 if (still_to_copy <= space_available) { 2204 /* write of region completes here */ 2205 *copy_len = still_to_copy; 2206 ophdr->oh_len = cpu_to_be32(*copy_len); 2207 if (*last_was_partial_copy) 2208 ophdr->oh_flags |= (XLOG_END_TRANS|XLOG_WAS_CONT_TRANS); 2209 *last_was_partial_copy = 0; 2210 *bytes_consumed = 0; 2211 return 0; 2212 } 2213 2214 /* partial write of region, needs extra log op header reservation */ 2215 *copy_len = space_available; 2216 ophdr->oh_len = cpu_to_be32(*copy_len); 2217 ophdr->oh_flags |= XLOG_CONTINUE_TRANS; 2218 if (*last_was_partial_copy) 2219 ophdr->oh_flags |= XLOG_WAS_CONT_TRANS; 2220 *bytes_consumed += *copy_len; 2221 (*last_was_partial_copy)++; 2222 2223 /* account for new log op header */ 2224 ticket->t_curr_res -= sizeof(struct xlog_op_header); 2225 ticket->t_res_num_ophdrs++; 2226 2227 return sizeof(struct xlog_op_header); 2228 } 2229 2230 static int 2231 xlog_write_copy_finish( 2232 struct xlog *log, 2233 struct xlog_in_core *iclog, 2234 uint flags, 2235 int *record_cnt, 2236 int *data_cnt, 2237 int *partial_copy, 2238 int *partial_copy_len, 2239 int log_offset, 2240 struct xlog_in_core **commit_iclog) 2241 { 2242 if (*partial_copy) { 2243 /* 2244 * This iclog has already been marked WANT_SYNC by 2245 * xlog_state_get_iclog_space. 2246 */ 2247 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); 2248 *record_cnt = 0; 2249 *data_cnt = 0; 2250 return xlog_state_release_iclog(log, iclog); 2251 } 2252 2253 *partial_copy = 0; 2254 *partial_copy_len = 0; 2255 2256 if (iclog->ic_size - log_offset <= sizeof(xlog_op_header_t)) { 2257 /* no more space in this iclog - push it. */ 2258 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); 2259 *record_cnt = 0; 2260 *data_cnt = 0; 2261 2262 spin_lock(&log->l_icloglock); 2263 xlog_state_want_sync(log, iclog); 2264 spin_unlock(&log->l_icloglock); 2265 2266 if (!commit_iclog) 2267 return xlog_state_release_iclog(log, iclog); 2268 ASSERT(flags & XLOG_COMMIT_TRANS); 2269 *commit_iclog = iclog; 2270 } 2271 2272 return 0; 2273 } 2274 2275 /* 2276 * Write some region out to in-core log 2277 * 2278 * This will be called when writing externally provided regions or when 2279 * writing out a commit record for a given transaction. 2280 * 2281 * General algorithm: 2282 * 1. Find total length of this write. This may include adding to the 2283 * lengths passed in. 2284 * 2. Check whether we violate the tickets reservation. 2285 * 3. While writing to this iclog 2286 * A. Reserve as much space in this iclog as can get 2287 * B. If this is first write, save away start lsn 2288 * C. While writing this region: 2289 * 1. If first write of transaction, write start record 2290 * 2. Write log operation header (header per region) 2291 * 3. Find out if we can fit entire region into this iclog 2292 * 4. Potentially, verify destination memcpy ptr 2293 * 5. Memcpy (partial) region 2294 * 6. If partial copy, release iclog; otherwise, continue 2295 * copying more regions into current iclog 2296 * 4. Mark want sync bit (in simulation mode) 2297 * 5. Release iclog for potential flush to on-disk log. 2298 * 2299 * ERRORS: 2300 * 1. Panic if reservation is overrun. This should never happen since 2301 * reservation amounts are generated internal to the filesystem. 2302 * NOTES: 2303 * 1. Tickets are single threaded data structures. 2304 * 2. The XLOG_END_TRANS & XLOG_CONTINUE_TRANS flags are passed down to the 2305 * syncing routine. When a single log_write region needs to span 2306 * multiple in-core logs, the XLOG_CONTINUE_TRANS bit should be set 2307 * on all log operation writes which don't contain the end of the 2308 * region. The XLOG_END_TRANS bit is used for the in-core log 2309 * operation which contains the end of the continued log_write region. 2310 * 3. When xlog_state_get_iclog_space() grabs the rest of the current iclog, 2311 * we don't really know exactly how much space will be used. As a result, 2312 * we don't update ic_offset until the end when we know exactly how many 2313 * bytes have been written out. 2314 */ 2315 int 2316 xlog_write( 2317 struct xlog *log, 2318 struct xfs_log_vec *log_vector, 2319 struct xlog_ticket *ticket, 2320 xfs_lsn_t *start_lsn, 2321 struct xlog_in_core **commit_iclog, 2322 uint flags) 2323 { 2324 struct xlog_in_core *iclog = NULL; 2325 struct xfs_log_iovec *vecp; 2326 struct xfs_log_vec *lv; 2327 int len; 2328 int index; 2329 int partial_copy = 0; 2330 int partial_copy_len = 0; 2331 int contwr = 0; 2332 int record_cnt = 0; 2333 int data_cnt = 0; 2334 int error; 2335 2336 *start_lsn = 0; 2337 2338 len = xlog_write_calc_vec_length(ticket, log_vector); 2339 2340 /* 2341 * Region headers and bytes are already accounted for. 2342 * We only need to take into account start records and 2343 * split regions in this function. 2344 */ 2345 if (ticket->t_flags & XLOG_TIC_INITED) 2346 ticket->t_curr_res -= sizeof(xlog_op_header_t); 2347 2348 /* 2349 * Commit record headers need to be accounted for. These 2350 * come in as separate writes so are easy to detect. 2351 */ 2352 if (flags & (XLOG_COMMIT_TRANS | XLOG_UNMOUNT_TRANS)) 2353 ticket->t_curr_res -= sizeof(xlog_op_header_t); 2354 2355 if (ticket->t_curr_res < 0) { 2356 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, 2357 "ctx ticket reservation ran out. Need to up reservation"); 2358 xlog_print_tic_res(log->l_mp, ticket); 2359 xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); 2360 } 2361 2362 index = 0; 2363 lv = log_vector; 2364 vecp = lv->lv_iovecp; 2365 while (lv && (!lv->lv_niovecs || index < lv->lv_niovecs)) { 2366 void *ptr; 2367 int log_offset; 2368 2369 error = xlog_state_get_iclog_space(log, len, &iclog, ticket, 2370 &contwr, &log_offset); 2371 if (error) 2372 return error; 2373 2374 ASSERT(log_offset <= iclog->ic_size - 1); 2375 ptr = iclog->ic_datap + log_offset; 2376 2377 /* start_lsn is the first lsn written to. That's all we need. */ 2378 if (!*start_lsn) 2379 *start_lsn = be64_to_cpu(iclog->ic_header.h_lsn); 2380 2381 /* 2382 * This loop writes out as many regions as can fit in the amount 2383 * of space which was allocated by xlog_state_get_iclog_space(). 2384 */ 2385 while (lv && (!lv->lv_niovecs || index < lv->lv_niovecs)) { 2386 struct xfs_log_iovec *reg; 2387 struct xlog_op_header *ophdr; 2388 int start_rec_copy; 2389 int copy_len; 2390 int copy_off; 2391 bool ordered = false; 2392 2393 /* ordered log vectors have no regions to write */ 2394 if (lv->lv_buf_len == XFS_LOG_VEC_ORDERED) { 2395 ASSERT(lv->lv_niovecs == 0); 2396 ordered = true; 2397 goto next_lv; 2398 } 2399 2400 reg = &vecp[index]; 2401 ASSERT(reg->i_len % sizeof(int32_t) == 0); 2402 ASSERT((unsigned long)ptr % sizeof(int32_t) == 0); 2403 2404 start_rec_copy = xlog_write_start_rec(ptr, ticket); 2405 if (start_rec_copy) { 2406 record_cnt++; 2407 xlog_write_adv_cnt(&ptr, &len, &log_offset, 2408 start_rec_copy); 2409 } 2410 2411 ophdr = xlog_write_setup_ophdr(log, ptr, ticket, flags); 2412 if (!ophdr) 2413 return -EIO; 2414 2415 xlog_write_adv_cnt(&ptr, &len, &log_offset, 2416 sizeof(struct xlog_op_header)); 2417 2418 len += xlog_write_setup_copy(ticket, ophdr, 2419 iclog->ic_size-log_offset, 2420 reg->i_len, 2421 ©_off, ©_len, 2422 &partial_copy, 2423 &partial_copy_len); 2424 xlog_verify_dest_ptr(log, ptr); 2425 2426 /* 2427 * Copy region. 2428 * 2429 * Unmount records just log an opheader, so can have 2430 * empty payloads with no data region to copy. Hence we 2431 * only copy the payload if the vector says it has data 2432 * to copy. 2433 */ 2434 ASSERT(copy_len >= 0); 2435 if (copy_len > 0) { 2436 memcpy(ptr, reg->i_addr + copy_off, copy_len); 2437 xlog_write_adv_cnt(&ptr, &len, &log_offset, 2438 copy_len); 2439 } 2440 copy_len += start_rec_copy + sizeof(xlog_op_header_t); 2441 record_cnt++; 2442 data_cnt += contwr ? copy_len : 0; 2443 2444 error = xlog_write_copy_finish(log, iclog, flags, 2445 &record_cnt, &data_cnt, 2446 &partial_copy, 2447 &partial_copy_len, 2448 log_offset, 2449 commit_iclog); 2450 if (error) 2451 return error; 2452 2453 /* 2454 * if we had a partial copy, we need to get more iclog 2455 * space but we don't want to increment the region 2456 * index because there is still more is this region to 2457 * write. 2458 * 2459 * If we completed writing this region, and we flushed 2460 * the iclog (indicated by resetting of the record 2461 * count), then we also need to get more log space. If 2462 * this was the last record, though, we are done and 2463 * can just return. 2464 */ 2465 if (partial_copy) 2466 break; 2467 2468 if (++index == lv->lv_niovecs) { 2469 next_lv: 2470 lv = lv->lv_next; 2471 index = 0; 2472 if (lv) 2473 vecp = lv->lv_iovecp; 2474 } 2475 if (record_cnt == 0 && !ordered) { 2476 if (!lv) 2477 return 0; 2478 break; 2479 } 2480 } 2481 } 2482 2483 ASSERT(len == 0); 2484 2485 xlog_state_finish_copy(log, iclog, record_cnt, data_cnt); 2486 if (!commit_iclog) 2487 return xlog_state_release_iclog(log, iclog); 2488 2489 ASSERT(flags & XLOG_COMMIT_TRANS); 2490 *commit_iclog = iclog; 2491 return 0; 2492 } 2493 2494 2495 /***************************************************************************** 2496 * 2497 * State Machine functions 2498 * 2499 ***************************************************************************** 2500 */ 2501 2502 /* Clean iclogs starting from the head. This ordering must be 2503 * maintained, so an iclog doesn't become ACTIVE beyond one that 2504 * is SYNCING. This is also required to maintain the notion that we use 2505 * a ordered wait queue to hold off would be writers to the log when every 2506 * iclog is trying to sync to disk. 2507 * 2508 * State Change: DIRTY -> ACTIVE 2509 */ 2510 STATIC void 2511 xlog_state_clean_log( 2512 struct xlog *log) 2513 { 2514 xlog_in_core_t *iclog; 2515 int changed = 0; 2516 2517 iclog = log->l_iclog; 2518 do { 2519 if (iclog->ic_state == XLOG_STATE_DIRTY) { 2520 iclog->ic_state = XLOG_STATE_ACTIVE; 2521 iclog->ic_offset = 0; 2522 ASSERT(list_empty_careful(&iclog->ic_callbacks)); 2523 /* 2524 * If the number of ops in this iclog indicate it just 2525 * contains the dummy transaction, we can 2526 * change state into IDLE (the second time around). 2527 * Otherwise we should change the state into 2528 * NEED a dummy. 2529 * We don't need to cover the dummy. 2530 */ 2531 if (!changed && 2532 (be32_to_cpu(iclog->ic_header.h_num_logops) == 2533 XLOG_COVER_OPS)) { 2534 changed = 1; 2535 } else { 2536 /* 2537 * We have two dirty iclogs so start over 2538 * This could also be num of ops indicates 2539 * this is not the dummy going out. 2540 */ 2541 changed = 2; 2542 } 2543 iclog->ic_header.h_num_logops = 0; 2544 memset(iclog->ic_header.h_cycle_data, 0, 2545 sizeof(iclog->ic_header.h_cycle_data)); 2546 iclog->ic_header.h_lsn = 0; 2547 } else if (iclog->ic_state == XLOG_STATE_ACTIVE) 2548 /* do nothing */; 2549 else 2550 break; /* stop cleaning */ 2551 iclog = iclog->ic_next; 2552 } while (iclog != log->l_iclog); 2553 2554 /* log is locked when we are called */ 2555 /* 2556 * Change state for the dummy log recording. 2557 * We usually go to NEED. But we go to NEED2 if the changed indicates 2558 * we are done writing the dummy record. 2559 * If we are done with the second dummy recored (DONE2), then 2560 * we go to IDLE. 2561 */ 2562 if (changed) { 2563 switch (log->l_covered_state) { 2564 case XLOG_STATE_COVER_IDLE: 2565 case XLOG_STATE_COVER_NEED: 2566 case XLOG_STATE_COVER_NEED2: 2567 log->l_covered_state = XLOG_STATE_COVER_NEED; 2568 break; 2569 2570 case XLOG_STATE_COVER_DONE: 2571 if (changed == 1) 2572 log->l_covered_state = XLOG_STATE_COVER_NEED2; 2573 else 2574 log->l_covered_state = XLOG_STATE_COVER_NEED; 2575 break; 2576 2577 case XLOG_STATE_COVER_DONE2: 2578 if (changed == 1) 2579 log->l_covered_state = XLOG_STATE_COVER_IDLE; 2580 else 2581 log->l_covered_state = XLOG_STATE_COVER_NEED; 2582 break; 2583 2584 default: 2585 ASSERT(0); 2586 } 2587 } 2588 } /* xlog_state_clean_log */ 2589 2590 STATIC xfs_lsn_t 2591 xlog_get_lowest_lsn( 2592 struct xlog *log) 2593 { 2594 struct xlog_in_core *iclog = log->l_iclog; 2595 xfs_lsn_t lowest_lsn = 0, lsn; 2596 2597 do { 2598 if (iclog->ic_state & (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY)) 2599 continue; 2600 2601 lsn = be64_to_cpu(iclog->ic_header.h_lsn); 2602 if ((lsn && !lowest_lsn) || XFS_LSN_CMP(lsn, lowest_lsn) < 0) 2603 lowest_lsn = lsn; 2604 } while ((iclog = iclog->ic_next) != log->l_iclog); 2605 2606 return lowest_lsn; 2607 } 2608 2609 STATIC void 2610 xlog_state_do_callback( 2611 struct xlog *log, 2612 bool aborted, 2613 struct xlog_in_core *ciclog) 2614 { 2615 xlog_in_core_t *iclog; 2616 xlog_in_core_t *first_iclog; /* used to know when we've 2617 * processed all iclogs once */ 2618 int flushcnt = 0; 2619 xfs_lsn_t lowest_lsn; 2620 int ioerrors; /* counter: iclogs with errors */ 2621 int loopdidcallbacks; /* flag: inner loop did callbacks*/ 2622 int funcdidcallbacks; /* flag: function did callbacks */ 2623 int repeats; /* for issuing console warnings if 2624 * looping too many times */ 2625 int wake = 0; 2626 2627 spin_lock(&log->l_icloglock); 2628 first_iclog = iclog = log->l_iclog; 2629 ioerrors = 0; 2630 funcdidcallbacks = 0; 2631 repeats = 0; 2632 2633 do { 2634 /* 2635 * Scan all iclogs starting with the one pointed to by the 2636 * log. Reset this starting point each time the log is 2637 * unlocked (during callbacks). 2638 * 2639 * Keep looping through iclogs until one full pass is made 2640 * without running any callbacks. 2641 */ 2642 first_iclog = log->l_iclog; 2643 iclog = log->l_iclog; 2644 loopdidcallbacks = 0; 2645 repeats++; 2646 2647 do { 2648 2649 /* skip all iclogs in the ACTIVE & DIRTY states */ 2650 if (iclog->ic_state & 2651 (XLOG_STATE_ACTIVE|XLOG_STATE_DIRTY)) { 2652 iclog = iclog->ic_next; 2653 continue; 2654 } 2655 2656 /* 2657 * Between marking a filesystem SHUTDOWN and stopping 2658 * the log, we do flush all iclogs to disk (if there 2659 * wasn't a log I/O error). So, we do want things to 2660 * go smoothly in case of just a SHUTDOWN w/o a 2661 * LOG_IO_ERROR. 2662 */ 2663 if (!(iclog->ic_state & XLOG_STATE_IOERROR)) { 2664 /* 2665 * Can only perform callbacks in order. Since 2666 * this iclog is not in the DONE_SYNC/ 2667 * DO_CALLBACK state, we skip the rest and 2668 * just try to clean up. If we set our iclog 2669 * to DO_CALLBACK, we will not process it when 2670 * we retry since a previous iclog is in the 2671 * CALLBACK and the state cannot change since 2672 * we are holding the l_icloglock. 2673 */ 2674 if (!(iclog->ic_state & 2675 (XLOG_STATE_DONE_SYNC | 2676 XLOG_STATE_DO_CALLBACK))) { 2677 if (ciclog && (ciclog->ic_state == 2678 XLOG_STATE_DONE_SYNC)) { 2679 ciclog->ic_state = XLOG_STATE_DO_CALLBACK; 2680 } 2681 break; 2682 } 2683 /* 2684 * We now have an iclog that is in either the 2685 * DO_CALLBACK or DONE_SYNC states. The other 2686 * states (WANT_SYNC, SYNCING, or CALLBACK were 2687 * caught by the above if and are going to 2688 * clean (i.e. we aren't doing their callbacks) 2689 * see the above if. 2690 */ 2691 2692 /* 2693 * We will do one more check here to see if we 2694 * have chased our tail around. 2695 */ 2696 2697 lowest_lsn = xlog_get_lowest_lsn(log); 2698 if (lowest_lsn && 2699 XFS_LSN_CMP(lowest_lsn, 2700 be64_to_cpu(iclog->ic_header.h_lsn)) < 0) { 2701 iclog = iclog->ic_next; 2702 continue; /* Leave this iclog for 2703 * another thread */ 2704 } 2705 2706 iclog->ic_state = XLOG_STATE_CALLBACK; 2707 2708 2709 /* 2710 * Completion of a iclog IO does not imply that 2711 * a transaction has completed, as transactions 2712 * can be large enough to span many iclogs. We 2713 * cannot change the tail of the log half way 2714 * through a transaction as this may be the only 2715 * transaction in the log and moving th etail to 2716 * point to the middle of it will prevent 2717 * recovery from finding the start of the 2718 * transaction. Hence we should only update the 2719 * last_sync_lsn if this iclog contains 2720 * transaction completion callbacks on it. 2721 * 2722 * We have to do this before we drop the 2723 * icloglock to ensure we are the only one that 2724 * can update it. 2725 */ 2726 ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn), 2727 be64_to_cpu(iclog->ic_header.h_lsn)) <= 0); 2728 if (!list_empty_careful(&iclog->ic_callbacks)) 2729 atomic64_set(&log->l_last_sync_lsn, 2730 be64_to_cpu(iclog->ic_header.h_lsn)); 2731 2732 } else 2733 ioerrors++; 2734 2735 spin_unlock(&log->l_icloglock); 2736 2737 /* 2738 * Keep processing entries in the callback list until 2739 * we come around and it is empty. We need to 2740 * atomically see that the list is empty and change the 2741 * state to DIRTY so that we don't miss any more 2742 * callbacks being added. 2743 */ 2744 spin_lock(&iclog->ic_callback_lock); 2745 while (!list_empty(&iclog->ic_callbacks)) { 2746 LIST_HEAD(tmp); 2747 2748 list_splice_init(&iclog->ic_callbacks, &tmp); 2749 2750 spin_unlock(&iclog->ic_callback_lock); 2751 xlog_cil_process_committed(&tmp, aborted); 2752 spin_lock(&iclog->ic_callback_lock); 2753 } 2754 2755 loopdidcallbacks++; 2756 funcdidcallbacks++; 2757 2758 spin_lock(&log->l_icloglock); 2759 spin_unlock(&iclog->ic_callback_lock); 2760 if (!(iclog->ic_state & XLOG_STATE_IOERROR)) 2761 iclog->ic_state = XLOG_STATE_DIRTY; 2762 2763 /* 2764 * Transition from DIRTY to ACTIVE if applicable. 2765 * NOP if STATE_IOERROR. 2766 */ 2767 xlog_state_clean_log(log); 2768 2769 /* wake up threads waiting in xfs_log_force() */ 2770 wake_up_all(&iclog->ic_force_wait); 2771 2772 iclog = iclog->ic_next; 2773 } while (first_iclog != iclog); 2774 2775 if (repeats > 5000) { 2776 flushcnt += repeats; 2777 repeats = 0; 2778 xfs_warn(log->l_mp, 2779 "%s: possible infinite loop (%d iterations)", 2780 __func__, flushcnt); 2781 } 2782 } while (!ioerrors && loopdidcallbacks); 2783 2784 #ifdef DEBUG 2785 /* 2786 * Make one last gasp attempt to see if iclogs are being left in limbo. 2787 * If the above loop finds an iclog earlier than the current iclog and 2788 * in one of the syncing states, the current iclog is put into 2789 * DO_CALLBACK and the callbacks are deferred to the completion of the 2790 * earlier iclog. Walk the iclogs in order and make sure that no iclog 2791 * is in DO_CALLBACK unless an earlier iclog is in one of the syncing 2792 * states. 2793 * 2794 * Note that SYNCING|IOABORT is a valid state so we cannot just check 2795 * for ic_state == SYNCING. 2796 */ 2797 if (funcdidcallbacks) { 2798 first_iclog = iclog = log->l_iclog; 2799 do { 2800 ASSERT(iclog->ic_state != XLOG_STATE_DO_CALLBACK); 2801 /* 2802 * Terminate the loop if iclogs are found in states 2803 * which will cause other threads to clean up iclogs. 2804 * 2805 * SYNCING - i/o completion will go through logs 2806 * DONE_SYNC - interrupt thread should be waiting for 2807 * l_icloglock 2808 * IOERROR - give up hope all ye who enter here 2809 */ 2810 if (iclog->ic_state == XLOG_STATE_WANT_SYNC || 2811 iclog->ic_state & XLOG_STATE_SYNCING || 2812 iclog->ic_state == XLOG_STATE_DONE_SYNC || 2813 iclog->ic_state == XLOG_STATE_IOERROR ) 2814 break; 2815 iclog = iclog->ic_next; 2816 } while (first_iclog != iclog); 2817 } 2818 #endif 2819 2820 if (log->l_iclog->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_IOERROR)) 2821 wake = 1; 2822 spin_unlock(&log->l_icloglock); 2823 2824 if (wake) 2825 wake_up_all(&log->l_flush_wait); 2826 } 2827 2828 2829 /* 2830 * Finish transitioning this iclog to the dirty state. 2831 * 2832 * Make sure that we completely execute this routine only when this is 2833 * the last call to the iclog. There is a good chance that iclog flushes, 2834 * when we reach the end of the physical log, get turned into 2 separate 2835 * calls to bwrite. Hence, one iclog flush could generate two calls to this 2836 * routine. By using the reference count bwritecnt, we guarantee that only 2837 * the second completion goes through. 2838 * 2839 * Callbacks could take time, so they are done outside the scope of the 2840 * global state machine log lock. 2841 */ 2842 STATIC void 2843 xlog_state_done_syncing( 2844 struct xlog_in_core *iclog, 2845 bool aborted) 2846 { 2847 struct xlog *log = iclog->ic_log; 2848 2849 spin_lock(&log->l_icloglock); 2850 2851 ASSERT(iclog->ic_state == XLOG_STATE_SYNCING || 2852 iclog->ic_state == XLOG_STATE_IOERROR); 2853 ASSERT(atomic_read(&iclog->ic_refcnt) == 0); 2854 2855 /* 2856 * If we got an error, either on the first buffer, or in the case of 2857 * split log writes, on the second, we mark ALL iclogs STATE_IOERROR, 2858 * and none should ever be attempted to be written to disk 2859 * again. 2860 */ 2861 if (iclog->ic_state != XLOG_STATE_IOERROR) 2862 iclog->ic_state = XLOG_STATE_DONE_SYNC; 2863 2864 /* 2865 * Someone could be sleeping prior to writing out the next 2866 * iclog buffer, we wake them all, one will get to do the 2867 * I/O, the others get to wait for the result. 2868 */ 2869 wake_up_all(&iclog->ic_write_wait); 2870 spin_unlock(&log->l_icloglock); 2871 xlog_state_do_callback(log, aborted, iclog); /* also cleans log */ 2872 } /* xlog_state_done_syncing */ 2873 2874 2875 /* 2876 * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must 2877 * sleep. We wait on the flush queue on the head iclog as that should be 2878 * the first iclog to complete flushing. Hence if all iclogs are syncing, 2879 * we will wait here and all new writes will sleep until a sync completes. 2880 * 2881 * The in-core logs are used in a circular fashion. They are not used 2882 * out-of-order even when an iclog past the head is free. 2883 * 2884 * return: 2885 * * log_offset where xlog_write() can start writing into the in-core 2886 * log's data space. 2887 * * in-core log pointer to which xlog_write() should write. 2888 * * boolean indicating this is a continued write to an in-core log. 2889 * If this is the last write, then the in-core log's offset field 2890 * needs to be incremented, depending on the amount of data which 2891 * is copied. 2892 */ 2893 STATIC int 2894 xlog_state_get_iclog_space( 2895 struct xlog *log, 2896 int len, 2897 struct xlog_in_core **iclogp, 2898 struct xlog_ticket *ticket, 2899 int *continued_write, 2900 int *logoffsetp) 2901 { 2902 int log_offset; 2903 xlog_rec_header_t *head; 2904 xlog_in_core_t *iclog; 2905 int error; 2906 2907 restart: 2908 spin_lock(&log->l_icloglock); 2909 if (XLOG_FORCED_SHUTDOWN(log)) { 2910 spin_unlock(&log->l_icloglock); 2911 return -EIO; 2912 } 2913 2914 iclog = log->l_iclog; 2915 if (iclog->ic_state != XLOG_STATE_ACTIVE) { 2916 XFS_STATS_INC(log->l_mp, xs_log_noiclogs); 2917 2918 /* Wait for log writes to have flushed */ 2919 xlog_wait(&log->l_flush_wait, &log->l_icloglock); 2920 goto restart; 2921 } 2922 2923 head = &iclog->ic_header; 2924 2925 atomic_inc(&iclog->ic_refcnt); /* prevents sync */ 2926 log_offset = iclog->ic_offset; 2927 2928 /* On the 1st write to an iclog, figure out lsn. This works 2929 * if iclogs marked XLOG_STATE_WANT_SYNC always write out what they are 2930 * committing to. If the offset is set, that's how many blocks 2931 * must be written. 2932 */ 2933 if (log_offset == 0) { 2934 ticket->t_curr_res -= log->l_iclog_hsize; 2935 xlog_tic_add_region(ticket, 2936 log->l_iclog_hsize, 2937 XLOG_REG_TYPE_LRHEADER); 2938 head->h_cycle = cpu_to_be32(log->l_curr_cycle); 2939 head->h_lsn = cpu_to_be64( 2940 xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block)); 2941 ASSERT(log->l_curr_block >= 0); 2942 } 2943 2944 /* If there is enough room to write everything, then do it. Otherwise, 2945 * claim the rest of the region and make sure the XLOG_STATE_WANT_SYNC 2946 * bit is on, so this will get flushed out. Don't update ic_offset 2947 * until you know exactly how many bytes get copied. Therefore, wait 2948 * until later to update ic_offset. 2949 * 2950 * xlog_write() algorithm assumes that at least 2 xlog_op_header_t's 2951 * can fit into remaining data section. 2952 */ 2953 if (iclog->ic_size - iclog->ic_offset < 2*sizeof(xlog_op_header_t)) { 2954 xlog_state_switch_iclogs(log, iclog, iclog->ic_size); 2955 2956 /* 2957 * If I'm the only one writing to this iclog, sync it to disk. 2958 * We need to do an atomic compare and decrement here to avoid 2959 * racing with concurrent atomic_dec_and_lock() calls in 2960 * xlog_state_release_iclog() when there is more than one 2961 * reference to the iclog. 2962 */ 2963 if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1)) { 2964 /* we are the only one */ 2965 spin_unlock(&log->l_icloglock); 2966 error = xlog_state_release_iclog(log, iclog); 2967 if (error) 2968 return error; 2969 } else { 2970 spin_unlock(&log->l_icloglock); 2971 } 2972 goto restart; 2973 } 2974 2975 /* Do we have enough room to write the full amount in the remainder 2976 * of this iclog? Or must we continue a write on the next iclog and 2977 * mark this iclog as completely taken? In the case where we switch 2978 * iclogs (to mark it taken), this particular iclog will release/sync 2979 * to disk in xlog_write(). 2980 */ 2981 if (len <= iclog->ic_size - iclog->ic_offset) { 2982 *continued_write = 0; 2983 iclog->ic_offset += len; 2984 } else { 2985 *continued_write = 1; 2986 xlog_state_switch_iclogs(log, iclog, iclog->ic_size); 2987 } 2988 *iclogp = iclog; 2989 2990 ASSERT(iclog->ic_offset <= iclog->ic_size); 2991 spin_unlock(&log->l_icloglock); 2992 2993 *logoffsetp = log_offset; 2994 return 0; 2995 } /* xlog_state_get_iclog_space */ 2996 2997 /* The first cnt-1 times through here we don't need to 2998 * move the grant write head because the permanent 2999 * reservation has reserved cnt times the unit amount. 3000 * Release part of current permanent unit reservation and 3001 * reset current reservation to be one units worth. Also 3002 * move grant reservation head forward. 3003 */ 3004 STATIC void 3005 xlog_regrant_reserve_log_space( 3006 struct xlog *log, 3007 struct xlog_ticket *ticket) 3008 { 3009 trace_xfs_log_regrant_reserve_enter(log, ticket); 3010 3011 if (ticket->t_cnt > 0) 3012 ticket->t_cnt--; 3013 3014 xlog_grant_sub_space(log, &log->l_reserve_head.grant, 3015 ticket->t_curr_res); 3016 xlog_grant_sub_space(log, &log->l_write_head.grant, 3017 ticket->t_curr_res); 3018 ticket->t_curr_res = ticket->t_unit_res; 3019 xlog_tic_reset_res(ticket); 3020 3021 trace_xfs_log_regrant_reserve_sub(log, ticket); 3022 3023 /* just return if we still have some of the pre-reserved space */ 3024 if (ticket->t_cnt > 0) 3025 return; 3026 3027 xlog_grant_add_space(log, &log->l_reserve_head.grant, 3028 ticket->t_unit_res); 3029 3030 trace_xfs_log_regrant_reserve_exit(log, ticket); 3031 3032 ticket->t_curr_res = ticket->t_unit_res; 3033 xlog_tic_reset_res(ticket); 3034 } /* xlog_regrant_reserve_log_space */ 3035 3036 3037 /* 3038 * Give back the space left from a reservation. 3039 * 3040 * All the information we need to make a correct determination of space left 3041 * is present. For non-permanent reservations, things are quite easy. The 3042 * count should have been decremented to zero. We only need to deal with the 3043 * space remaining in the current reservation part of the ticket. If the 3044 * ticket contains a permanent reservation, there may be left over space which 3045 * needs to be released. A count of N means that N-1 refills of the current 3046 * reservation can be done before we need to ask for more space. The first 3047 * one goes to fill up the first current reservation. Once we run out of 3048 * space, the count will stay at zero and the only space remaining will be 3049 * in the current reservation field. 3050 */ 3051 STATIC void 3052 xlog_ungrant_log_space( 3053 struct xlog *log, 3054 struct xlog_ticket *ticket) 3055 { 3056 int bytes; 3057 3058 if (ticket->t_cnt > 0) 3059 ticket->t_cnt--; 3060 3061 trace_xfs_log_ungrant_enter(log, ticket); 3062 trace_xfs_log_ungrant_sub(log, ticket); 3063 3064 /* 3065 * If this is a permanent reservation ticket, we may be able to free 3066 * up more space based on the remaining count. 3067 */ 3068 bytes = ticket->t_curr_res; 3069 if (ticket->t_cnt > 0) { 3070 ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV); 3071 bytes += ticket->t_unit_res*ticket->t_cnt; 3072 } 3073 3074 xlog_grant_sub_space(log, &log->l_reserve_head.grant, bytes); 3075 xlog_grant_sub_space(log, &log->l_write_head.grant, bytes); 3076 3077 trace_xfs_log_ungrant_exit(log, ticket); 3078 3079 xfs_log_space_wake(log->l_mp); 3080 } 3081 3082 /* 3083 * Flush iclog to disk if this is the last reference to the given iclog and 3084 * the WANT_SYNC bit is set. 3085 * 3086 * When this function is entered, the iclog is not necessarily in the 3087 * WANT_SYNC state. It may be sitting around waiting to get filled. 3088 * 3089 * 3090 */ 3091 STATIC int 3092 xlog_state_release_iclog( 3093 struct xlog *log, 3094 struct xlog_in_core *iclog) 3095 { 3096 int sync = 0; /* do we sync? */ 3097 3098 if (iclog->ic_state & XLOG_STATE_IOERROR) 3099 return -EIO; 3100 3101 ASSERT(atomic_read(&iclog->ic_refcnt) > 0); 3102 if (!atomic_dec_and_lock(&iclog->ic_refcnt, &log->l_icloglock)) 3103 return 0; 3104 3105 if (iclog->ic_state & XLOG_STATE_IOERROR) { 3106 spin_unlock(&log->l_icloglock); 3107 return -EIO; 3108 } 3109 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE || 3110 iclog->ic_state == XLOG_STATE_WANT_SYNC); 3111 3112 if (iclog->ic_state == XLOG_STATE_WANT_SYNC) { 3113 /* update tail before writing to iclog */ 3114 xfs_lsn_t tail_lsn = xlog_assign_tail_lsn(log->l_mp); 3115 sync++; 3116 iclog->ic_state = XLOG_STATE_SYNCING; 3117 iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn); 3118 xlog_verify_tail_lsn(log, iclog, tail_lsn); 3119 /* cycle incremented when incrementing curr_block */ 3120 } 3121 spin_unlock(&log->l_icloglock); 3122 3123 /* 3124 * We let the log lock go, so it's possible that we hit a log I/O 3125 * error or some other SHUTDOWN condition that marks the iclog 3126 * as XLOG_STATE_IOERROR before the bwrite. However, we know that 3127 * this iclog has consistent data, so we ignore IOERROR 3128 * flags after this point. 3129 */ 3130 if (sync) 3131 xlog_sync(log, iclog); 3132 return 0; 3133 } /* xlog_state_release_iclog */ 3134 3135 3136 /* 3137 * This routine will mark the current iclog in the ring as WANT_SYNC 3138 * and move the current iclog pointer to the next iclog in the ring. 3139 * When this routine is called from xlog_state_get_iclog_space(), the 3140 * exact size of the iclog has not yet been determined. All we know is 3141 * that every data block. We have run out of space in this log record. 3142 */ 3143 STATIC void 3144 xlog_state_switch_iclogs( 3145 struct xlog *log, 3146 struct xlog_in_core *iclog, 3147 int eventual_size) 3148 { 3149 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE); 3150 if (!eventual_size) 3151 eventual_size = iclog->ic_offset; 3152 iclog->ic_state = XLOG_STATE_WANT_SYNC; 3153 iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block); 3154 log->l_prev_block = log->l_curr_block; 3155 log->l_prev_cycle = log->l_curr_cycle; 3156 3157 /* roll log?: ic_offset changed later */ 3158 log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize); 3159 3160 /* Round up to next log-sunit */ 3161 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb) && 3162 log->l_mp->m_sb.sb_logsunit > 1) { 3163 uint32_t sunit_bb = BTOBB(log->l_mp->m_sb.sb_logsunit); 3164 log->l_curr_block = roundup(log->l_curr_block, sunit_bb); 3165 } 3166 3167 if (log->l_curr_block >= log->l_logBBsize) { 3168 /* 3169 * Rewind the current block before the cycle is bumped to make 3170 * sure that the combined LSN never transiently moves forward 3171 * when the log wraps to the next cycle. This is to support the 3172 * unlocked sample of these fields from xlog_valid_lsn(). Most 3173 * other cases should acquire l_icloglock. 3174 */ 3175 log->l_curr_block -= log->l_logBBsize; 3176 ASSERT(log->l_curr_block >= 0); 3177 smp_wmb(); 3178 log->l_curr_cycle++; 3179 if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM) 3180 log->l_curr_cycle++; 3181 } 3182 ASSERT(iclog == log->l_iclog); 3183 log->l_iclog = iclog->ic_next; 3184 } /* xlog_state_switch_iclogs */ 3185 3186 /* 3187 * Write out all data in the in-core log as of this exact moment in time. 3188 * 3189 * Data may be written to the in-core log during this call. However, 3190 * we don't guarantee this data will be written out. A change from past 3191 * implementation means this routine will *not* write out zero length LRs. 3192 * 3193 * Basically, we try and perform an intelligent scan of the in-core logs. 3194 * If we determine there is no flushable data, we just return. There is no 3195 * flushable data if: 3196 * 3197 * 1. the current iclog is active and has no data; the previous iclog 3198 * is in the active or dirty state. 3199 * 2. the current iclog is drity, and the previous iclog is in the 3200 * active or dirty state. 3201 * 3202 * We may sleep if: 3203 * 3204 * 1. the current iclog is not in the active nor dirty state. 3205 * 2. the current iclog dirty, and the previous iclog is not in the 3206 * active nor dirty state. 3207 * 3. the current iclog is active, and there is another thread writing 3208 * to this particular iclog. 3209 * 4. a) the current iclog is active and has no other writers 3210 * b) when we return from flushing out this iclog, it is still 3211 * not in the active nor dirty state. 3212 */ 3213 int 3214 xfs_log_force( 3215 struct xfs_mount *mp, 3216 uint flags) 3217 { 3218 struct xlog *log = mp->m_log; 3219 struct xlog_in_core *iclog; 3220 xfs_lsn_t lsn; 3221 3222 XFS_STATS_INC(mp, xs_log_force); 3223 trace_xfs_log_force(mp, 0, _RET_IP_); 3224 3225 xlog_cil_force(log); 3226 3227 spin_lock(&log->l_icloglock); 3228 iclog = log->l_iclog; 3229 if (iclog->ic_state & XLOG_STATE_IOERROR) 3230 goto out_error; 3231 3232 if (iclog->ic_state == XLOG_STATE_DIRTY || 3233 (iclog->ic_state == XLOG_STATE_ACTIVE && 3234 atomic_read(&iclog->ic_refcnt) == 0 && iclog->ic_offset == 0)) { 3235 /* 3236 * If the head is dirty or (active and empty), then we need to 3237 * look at the previous iclog. 3238 * 3239 * If the previous iclog is active or dirty we are done. There 3240 * is nothing to sync out. Otherwise, we attach ourselves to the 3241 * previous iclog and go to sleep. 3242 */ 3243 iclog = iclog->ic_prev; 3244 if (iclog->ic_state == XLOG_STATE_ACTIVE || 3245 iclog->ic_state == XLOG_STATE_DIRTY) 3246 goto out_unlock; 3247 } else if (iclog->ic_state == XLOG_STATE_ACTIVE) { 3248 if (atomic_read(&iclog->ic_refcnt) == 0) { 3249 /* 3250 * We are the only one with access to this iclog. 3251 * 3252 * Flush it out now. There should be a roundoff of zero 3253 * to show that someone has already taken care of the 3254 * roundoff from the previous sync. 3255 */ 3256 atomic_inc(&iclog->ic_refcnt); 3257 lsn = be64_to_cpu(iclog->ic_header.h_lsn); 3258 xlog_state_switch_iclogs(log, iclog, 0); 3259 spin_unlock(&log->l_icloglock); 3260 3261 if (xlog_state_release_iclog(log, iclog)) 3262 return -EIO; 3263 3264 spin_lock(&log->l_icloglock); 3265 if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn || 3266 iclog->ic_state == XLOG_STATE_DIRTY) 3267 goto out_unlock; 3268 } else { 3269 /* 3270 * Someone else is writing to this iclog. 3271 * 3272 * Use its call to flush out the data. However, the 3273 * other thread may not force out this LR, so we mark 3274 * it WANT_SYNC. 3275 */ 3276 xlog_state_switch_iclogs(log, iclog, 0); 3277 } 3278 } else { 3279 /* 3280 * If the head iclog is not active nor dirty, we just attach 3281 * ourselves to the head and go to sleep if necessary. 3282 */ 3283 ; 3284 } 3285 3286 if (!(flags & XFS_LOG_SYNC)) 3287 goto out_unlock; 3288 3289 if (iclog->ic_state & XLOG_STATE_IOERROR) 3290 goto out_error; 3291 XFS_STATS_INC(mp, xs_log_force_sleep); 3292 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); 3293 if (iclog->ic_state & XLOG_STATE_IOERROR) 3294 return -EIO; 3295 return 0; 3296 3297 out_unlock: 3298 spin_unlock(&log->l_icloglock); 3299 return 0; 3300 out_error: 3301 spin_unlock(&log->l_icloglock); 3302 return -EIO; 3303 } 3304 3305 static int 3306 __xfs_log_force_lsn( 3307 struct xfs_mount *mp, 3308 xfs_lsn_t lsn, 3309 uint flags, 3310 int *log_flushed, 3311 bool already_slept) 3312 { 3313 struct xlog *log = mp->m_log; 3314 struct xlog_in_core *iclog; 3315 3316 spin_lock(&log->l_icloglock); 3317 iclog = log->l_iclog; 3318 if (iclog->ic_state & XLOG_STATE_IOERROR) 3319 goto out_error; 3320 3321 while (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) { 3322 iclog = iclog->ic_next; 3323 if (iclog == log->l_iclog) 3324 goto out_unlock; 3325 } 3326 3327 if (iclog->ic_state == XLOG_STATE_DIRTY) 3328 goto out_unlock; 3329 3330 if (iclog->ic_state == XLOG_STATE_ACTIVE) { 3331 /* 3332 * We sleep here if we haven't already slept (e.g. this is the 3333 * first time we've looked at the correct iclog buf) and the 3334 * buffer before us is going to be sync'ed. The reason for this 3335 * is that if we are doing sync transactions here, by waiting 3336 * for the previous I/O to complete, we can allow a few more 3337 * transactions into this iclog before we close it down. 3338 * 3339 * Otherwise, we mark the buffer WANT_SYNC, and bump up the 3340 * refcnt so we can release the log (which drops the ref count). 3341 * The state switch keeps new transaction commits from using 3342 * this buffer. When the current commits finish writing into 3343 * the buffer, the refcount will drop to zero and the buffer 3344 * will go out then. 3345 */ 3346 if (!already_slept && 3347 (iclog->ic_prev->ic_state & 3348 (XLOG_STATE_WANT_SYNC | XLOG_STATE_SYNCING))) { 3349 ASSERT(!(iclog->ic_state & XLOG_STATE_IOERROR)); 3350 3351 XFS_STATS_INC(mp, xs_log_force_sleep); 3352 3353 xlog_wait(&iclog->ic_prev->ic_write_wait, 3354 &log->l_icloglock); 3355 return -EAGAIN; 3356 } 3357 atomic_inc(&iclog->ic_refcnt); 3358 xlog_state_switch_iclogs(log, iclog, 0); 3359 spin_unlock(&log->l_icloglock); 3360 if (xlog_state_release_iclog(log, iclog)) 3361 return -EIO; 3362 if (log_flushed) 3363 *log_flushed = 1; 3364 spin_lock(&log->l_icloglock); 3365 } 3366 3367 if (!(flags & XFS_LOG_SYNC) || 3368 (iclog->ic_state & (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY))) 3369 goto out_unlock; 3370 3371 if (iclog->ic_state & XLOG_STATE_IOERROR) 3372 goto out_error; 3373 3374 XFS_STATS_INC(mp, xs_log_force_sleep); 3375 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); 3376 if (iclog->ic_state & XLOG_STATE_IOERROR) 3377 return -EIO; 3378 return 0; 3379 3380 out_unlock: 3381 spin_unlock(&log->l_icloglock); 3382 return 0; 3383 out_error: 3384 spin_unlock(&log->l_icloglock); 3385 return -EIO; 3386 } 3387 3388 /* 3389 * Force the in-core log to disk for a specific LSN. 3390 * 3391 * Find in-core log with lsn. 3392 * If it is in the DIRTY state, just return. 3393 * If it is in the ACTIVE state, move the in-core log into the WANT_SYNC 3394 * state and go to sleep or return. 3395 * If it is in any other state, go to sleep or return. 3396 * 3397 * Synchronous forces are implemented with a wait queue. All callers trying 3398 * to force a given lsn to disk must wait on the queue attached to the 3399 * specific in-core log. When given in-core log finally completes its write 3400 * to disk, that thread will wake up all threads waiting on the queue. 3401 */ 3402 int 3403 xfs_log_force_lsn( 3404 struct xfs_mount *mp, 3405 xfs_lsn_t lsn, 3406 uint flags, 3407 int *log_flushed) 3408 { 3409 int ret; 3410 ASSERT(lsn != 0); 3411 3412 XFS_STATS_INC(mp, xs_log_force); 3413 trace_xfs_log_force(mp, lsn, _RET_IP_); 3414 3415 lsn = xlog_cil_force_lsn(mp->m_log, lsn); 3416 if (lsn == NULLCOMMITLSN) 3417 return 0; 3418 3419 ret = __xfs_log_force_lsn(mp, lsn, flags, log_flushed, false); 3420 if (ret == -EAGAIN) 3421 ret = __xfs_log_force_lsn(mp, lsn, flags, log_flushed, true); 3422 return ret; 3423 } 3424 3425 /* 3426 * Called when we want to mark the current iclog as being ready to sync to 3427 * disk. 3428 */ 3429 STATIC void 3430 xlog_state_want_sync( 3431 struct xlog *log, 3432 struct xlog_in_core *iclog) 3433 { 3434 assert_spin_locked(&log->l_icloglock); 3435 3436 if (iclog->ic_state == XLOG_STATE_ACTIVE) { 3437 xlog_state_switch_iclogs(log, iclog, 0); 3438 } else { 3439 ASSERT(iclog->ic_state & 3440 (XLOG_STATE_WANT_SYNC|XLOG_STATE_IOERROR)); 3441 } 3442 } 3443 3444 3445 /***************************************************************************** 3446 * 3447 * TICKET functions 3448 * 3449 ***************************************************************************** 3450 */ 3451 3452 /* 3453 * Free a used ticket when its refcount falls to zero. 3454 */ 3455 void 3456 xfs_log_ticket_put( 3457 xlog_ticket_t *ticket) 3458 { 3459 ASSERT(atomic_read(&ticket->t_ref) > 0); 3460 if (atomic_dec_and_test(&ticket->t_ref)) 3461 kmem_zone_free(xfs_log_ticket_zone, ticket); 3462 } 3463 3464 xlog_ticket_t * 3465 xfs_log_ticket_get( 3466 xlog_ticket_t *ticket) 3467 { 3468 ASSERT(atomic_read(&ticket->t_ref) > 0); 3469 atomic_inc(&ticket->t_ref); 3470 return ticket; 3471 } 3472 3473 /* 3474 * Figure out the total log space unit (in bytes) that would be 3475 * required for a log ticket. 3476 */ 3477 int 3478 xfs_log_calc_unit_res( 3479 struct xfs_mount *mp, 3480 int unit_bytes) 3481 { 3482 struct xlog *log = mp->m_log; 3483 int iclog_space; 3484 uint num_headers; 3485 3486 /* 3487 * Permanent reservations have up to 'cnt'-1 active log operations 3488 * in the log. A unit in this case is the amount of space for one 3489 * of these log operations. Normal reservations have a cnt of 1 3490 * and their unit amount is the total amount of space required. 3491 * 3492 * The following lines of code account for non-transaction data 3493 * which occupy space in the on-disk log. 3494 * 3495 * Normal form of a transaction is: 3496 * <oph><trans-hdr><start-oph><reg1-oph><reg1><reg2-oph>...<commit-oph> 3497 * and then there are LR hdrs, split-recs and roundoff at end of syncs. 3498 * 3499 * We need to account for all the leadup data and trailer data 3500 * around the transaction data. 3501 * And then we need to account for the worst case in terms of using 3502 * more space. 3503 * The worst case will happen if: 3504 * - the placement of the transaction happens to be such that the 3505 * roundoff is at its maximum 3506 * - the transaction data is synced before the commit record is synced 3507 * i.e. <transaction-data><roundoff> | <commit-rec><roundoff> 3508 * Therefore the commit record is in its own Log Record. 3509 * This can happen as the commit record is called with its 3510 * own region to xlog_write(). 3511 * This then means that in the worst case, roundoff can happen for 3512 * the commit-rec as well. 3513 * The commit-rec is smaller than padding in this scenario and so it is 3514 * not added separately. 3515 */ 3516 3517 /* for trans header */ 3518 unit_bytes += sizeof(xlog_op_header_t); 3519 unit_bytes += sizeof(xfs_trans_header_t); 3520 3521 /* for start-rec */ 3522 unit_bytes += sizeof(xlog_op_header_t); 3523 3524 /* 3525 * for LR headers - the space for data in an iclog is the size minus 3526 * the space used for the headers. If we use the iclog size, then we 3527 * undercalculate the number of headers required. 3528 * 3529 * Furthermore - the addition of op headers for split-recs might 3530 * increase the space required enough to require more log and op 3531 * headers, so take that into account too. 3532 * 3533 * IMPORTANT: This reservation makes the assumption that if this 3534 * transaction is the first in an iclog and hence has the LR headers 3535 * accounted to it, then the remaining space in the iclog is 3536 * exclusively for this transaction. i.e. if the transaction is larger 3537 * than the iclog, it will be the only thing in that iclog. 3538 * Fundamentally, this means we must pass the entire log vector to 3539 * xlog_write to guarantee this. 3540 */ 3541 iclog_space = log->l_iclog_size - log->l_iclog_hsize; 3542 num_headers = howmany(unit_bytes, iclog_space); 3543 3544 /* for split-recs - ophdrs added when data split over LRs */ 3545 unit_bytes += sizeof(xlog_op_header_t) * num_headers; 3546 3547 /* add extra header reservations if we overrun */ 3548 while (!num_headers || 3549 howmany(unit_bytes, iclog_space) > num_headers) { 3550 unit_bytes += sizeof(xlog_op_header_t); 3551 num_headers++; 3552 } 3553 unit_bytes += log->l_iclog_hsize * num_headers; 3554 3555 /* for commit-rec LR header - note: padding will subsume the ophdr */ 3556 unit_bytes += log->l_iclog_hsize; 3557 3558 /* for roundoff padding for transaction data and one for commit record */ 3559 if (xfs_sb_version_haslogv2(&mp->m_sb) && mp->m_sb.sb_logsunit > 1) { 3560 /* log su roundoff */ 3561 unit_bytes += 2 * mp->m_sb.sb_logsunit; 3562 } else { 3563 /* BB roundoff */ 3564 unit_bytes += 2 * BBSIZE; 3565 } 3566 3567 return unit_bytes; 3568 } 3569 3570 /* 3571 * Allocate and initialise a new log ticket. 3572 */ 3573 struct xlog_ticket * 3574 xlog_ticket_alloc( 3575 struct xlog *log, 3576 int unit_bytes, 3577 int cnt, 3578 char client, 3579 bool permanent, 3580 xfs_km_flags_t alloc_flags) 3581 { 3582 struct xlog_ticket *tic; 3583 int unit_res; 3584 3585 tic = kmem_zone_zalloc(xfs_log_ticket_zone, alloc_flags); 3586 if (!tic) 3587 return NULL; 3588 3589 unit_res = xfs_log_calc_unit_res(log->l_mp, unit_bytes); 3590 3591 atomic_set(&tic->t_ref, 1); 3592 tic->t_task = current; 3593 INIT_LIST_HEAD(&tic->t_queue); 3594 tic->t_unit_res = unit_res; 3595 tic->t_curr_res = unit_res; 3596 tic->t_cnt = cnt; 3597 tic->t_ocnt = cnt; 3598 tic->t_tid = prandom_u32(); 3599 tic->t_clientid = client; 3600 tic->t_flags = XLOG_TIC_INITED; 3601 if (permanent) 3602 tic->t_flags |= XLOG_TIC_PERM_RESERV; 3603 3604 xlog_tic_reset_res(tic); 3605 3606 return tic; 3607 } 3608 3609 3610 /****************************************************************************** 3611 * 3612 * Log debug routines 3613 * 3614 ****************************************************************************** 3615 */ 3616 #if defined(DEBUG) 3617 /* 3618 * Make sure that the destination ptr is within the valid data region of 3619 * one of the iclogs. This uses backup pointers stored in a different 3620 * part of the log in case we trash the log structure. 3621 */ 3622 STATIC void 3623 xlog_verify_dest_ptr( 3624 struct xlog *log, 3625 void *ptr) 3626 { 3627 int i; 3628 int good_ptr = 0; 3629 3630 for (i = 0; i < log->l_iclog_bufs; i++) { 3631 if (ptr >= log->l_iclog_bak[i] && 3632 ptr <= log->l_iclog_bak[i] + log->l_iclog_size) 3633 good_ptr++; 3634 } 3635 3636 if (!good_ptr) 3637 xfs_emerg(log->l_mp, "%s: invalid ptr", __func__); 3638 } 3639 3640 /* 3641 * Check to make sure the grant write head didn't just over lap the tail. If 3642 * the cycles are the same, we can't be overlapping. Otherwise, make sure that 3643 * the cycles differ by exactly one and check the byte count. 3644 * 3645 * This check is run unlocked, so can give false positives. Rather than assert 3646 * on failures, use a warn-once flag and a panic tag to allow the admin to 3647 * determine if they want to panic the machine when such an error occurs. For 3648 * debug kernels this will have the same effect as using an assert but, unlinke 3649 * an assert, it can be turned off at runtime. 3650 */ 3651 STATIC void 3652 xlog_verify_grant_tail( 3653 struct xlog *log) 3654 { 3655 int tail_cycle, tail_blocks; 3656 int cycle, space; 3657 3658 xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &space); 3659 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks); 3660 if (tail_cycle != cycle) { 3661 if (cycle - 1 != tail_cycle && 3662 !(log->l_flags & XLOG_TAIL_WARN)) { 3663 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, 3664 "%s: cycle - 1 != tail_cycle", __func__); 3665 log->l_flags |= XLOG_TAIL_WARN; 3666 } 3667 3668 if (space > BBTOB(tail_blocks) && 3669 !(log->l_flags & XLOG_TAIL_WARN)) { 3670 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, 3671 "%s: space > BBTOB(tail_blocks)", __func__); 3672 log->l_flags |= XLOG_TAIL_WARN; 3673 } 3674 } 3675 } 3676 3677 /* check if it will fit */ 3678 STATIC void 3679 xlog_verify_tail_lsn( 3680 struct xlog *log, 3681 struct xlog_in_core *iclog, 3682 xfs_lsn_t tail_lsn) 3683 { 3684 int blocks; 3685 3686 if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) { 3687 blocks = 3688 log->l_logBBsize - (log->l_prev_block - BLOCK_LSN(tail_lsn)); 3689 if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize)) 3690 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__); 3691 } else { 3692 ASSERT(CYCLE_LSN(tail_lsn)+1 == log->l_prev_cycle); 3693 3694 if (BLOCK_LSN(tail_lsn) == log->l_prev_block) 3695 xfs_emerg(log->l_mp, "%s: tail wrapped", __func__); 3696 3697 blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block; 3698 if (blocks < BTOBB(iclog->ic_offset) + 1) 3699 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__); 3700 } 3701 } /* xlog_verify_tail_lsn */ 3702 3703 /* 3704 * Perform a number of checks on the iclog before writing to disk. 3705 * 3706 * 1. Make sure the iclogs are still circular 3707 * 2. Make sure we have a good magic number 3708 * 3. Make sure we don't have magic numbers in the data 3709 * 4. Check fields of each log operation header for: 3710 * A. Valid client identifier 3711 * B. tid ptr value falls in valid ptr space (user space code) 3712 * C. Length in log record header is correct according to the 3713 * individual operation headers within record. 3714 * 5. When a bwrite will occur within 5 blocks of the front of the physical 3715 * log, check the preceding blocks of the physical log to make sure all 3716 * the cycle numbers agree with the current cycle number. 3717 */ 3718 STATIC void 3719 xlog_verify_iclog( 3720 struct xlog *log, 3721 struct xlog_in_core *iclog, 3722 int count) 3723 { 3724 xlog_op_header_t *ophead; 3725 xlog_in_core_t *icptr; 3726 xlog_in_core_2_t *xhdr; 3727 void *base_ptr, *ptr, *p; 3728 ptrdiff_t field_offset; 3729 uint8_t clientid; 3730 int len, i, j, k, op_len; 3731 int idx; 3732 3733 /* check validity of iclog pointers */ 3734 spin_lock(&log->l_icloglock); 3735 icptr = log->l_iclog; 3736 for (i = 0; i < log->l_iclog_bufs; i++, icptr = icptr->ic_next) 3737 ASSERT(icptr); 3738 3739 if (icptr != log->l_iclog) 3740 xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__); 3741 spin_unlock(&log->l_icloglock); 3742 3743 /* check log magic numbers */ 3744 if (iclog->ic_header.h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) 3745 xfs_emerg(log->l_mp, "%s: invalid magic num", __func__); 3746 3747 base_ptr = ptr = &iclog->ic_header; 3748 p = &iclog->ic_header; 3749 for (ptr += BBSIZE; ptr < base_ptr + count; ptr += BBSIZE) { 3750 if (*(__be32 *)ptr == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) 3751 xfs_emerg(log->l_mp, "%s: unexpected magic num", 3752 __func__); 3753 } 3754 3755 /* check fields */ 3756 len = be32_to_cpu(iclog->ic_header.h_num_logops); 3757 base_ptr = ptr = iclog->ic_datap; 3758 ophead = ptr; 3759 xhdr = iclog->ic_data; 3760 for (i = 0; i < len; i++) { 3761 ophead = ptr; 3762 3763 /* clientid is only 1 byte */ 3764 p = &ophead->oh_clientid; 3765 field_offset = p - base_ptr; 3766 if (field_offset & 0x1ff) { 3767 clientid = ophead->oh_clientid; 3768 } else { 3769 idx = BTOBBT((char *)&ophead->oh_clientid - iclog->ic_datap); 3770 if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) { 3771 j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3772 k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3773 clientid = xlog_get_client_id( 3774 xhdr[j].hic_xheader.xh_cycle_data[k]); 3775 } else { 3776 clientid = xlog_get_client_id( 3777 iclog->ic_header.h_cycle_data[idx]); 3778 } 3779 } 3780 if (clientid != XFS_TRANSACTION && clientid != XFS_LOG) 3781 xfs_warn(log->l_mp, 3782 "%s: invalid clientid %d op "PTR_FMT" offset 0x%lx", 3783 __func__, clientid, ophead, 3784 (unsigned long)field_offset); 3785 3786 /* check length */ 3787 p = &ophead->oh_len; 3788 field_offset = p - base_ptr; 3789 if (field_offset & 0x1ff) { 3790 op_len = be32_to_cpu(ophead->oh_len); 3791 } else { 3792 idx = BTOBBT((uintptr_t)&ophead->oh_len - 3793 (uintptr_t)iclog->ic_datap); 3794 if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) { 3795 j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3796 k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3797 op_len = be32_to_cpu(xhdr[j].hic_xheader.xh_cycle_data[k]); 3798 } else { 3799 op_len = be32_to_cpu(iclog->ic_header.h_cycle_data[idx]); 3800 } 3801 } 3802 ptr += sizeof(xlog_op_header_t) + op_len; 3803 } 3804 } /* xlog_verify_iclog */ 3805 #endif 3806 3807 /* 3808 * Mark all iclogs IOERROR. l_icloglock is held by the caller. 3809 */ 3810 STATIC int 3811 xlog_state_ioerror( 3812 struct xlog *log) 3813 { 3814 xlog_in_core_t *iclog, *ic; 3815 3816 iclog = log->l_iclog; 3817 if (! (iclog->ic_state & XLOG_STATE_IOERROR)) { 3818 /* 3819 * Mark all the incore logs IOERROR. 3820 * From now on, no log flushes will result. 3821 */ 3822 ic = iclog; 3823 do { 3824 ic->ic_state = XLOG_STATE_IOERROR; 3825 ic = ic->ic_next; 3826 } while (ic != iclog); 3827 return 0; 3828 } 3829 /* 3830 * Return non-zero, if state transition has already happened. 3831 */ 3832 return 1; 3833 } 3834 3835 /* 3836 * This is called from xfs_force_shutdown, when we're forcibly 3837 * shutting down the filesystem, typically because of an IO error. 3838 * Our main objectives here are to make sure that: 3839 * a. if !logerror, flush the logs to disk. Anything modified 3840 * after this is ignored. 3841 * b. the filesystem gets marked 'SHUTDOWN' for all interested 3842 * parties to find out, 'atomically'. 3843 * c. those who're sleeping on log reservations, pinned objects and 3844 * other resources get woken up, and be told the bad news. 3845 * d. nothing new gets queued up after (b) and (c) are done. 3846 * 3847 * Note: for the !logerror case we need to flush the regions held in memory out 3848 * to disk first. This needs to be done before the log is marked as shutdown, 3849 * otherwise the iclog writes will fail. 3850 */ 3851 int 3852 xfs_log_force_umount( 3853 struct xfs_mount *mp, 3854 int logerror) 3855 { 3856 struct xlog *log; 3857 int retval; 3858 3859 log = mp->m_log; 3860 3861 /* 3862 * If this happens during log recovery, don't worry about 3863 * locking; the log isn't open for business yet. 3864 */ 3865 if (!log || 3866 log->l_flags & XLOG_ACTIVE_RECOVERY) { 3867 mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN; 3868 if (mp->m_sb_bp) 3869 mp->m_sb_bp->b_flags |= XBF_DONE; 3870 return 0; 3871 } 3872 3873 /* 3874 * Somebody could've already done the hard work for us. 3875 * No need to get locks for this. 3876 */ 3877 if (logerror && log->l_iclog->ic_state & XLOG_STATE_IOERROR) { 3878 ASSERT(XLOG_FORCED_SHUTDOWN(log)); 3879 return 1; 3880 } 3881 3882 /* 3883 * Flush all the completed transactions to disk before marking the log 3884 * being shut down. We need to do it in this order to ensure that 3885 * completed operations are safely on disk before we shut down, and that 3886 * we don't have to issue any buffer IO after the shutdown flags are set 3887 * to guarantee this. 3888 */ 3889 if (!logerror) 3890 xfs_log_force(mp, XFS_LOG_SYNC); 3891 3892 /* 3893 * mark the filesystem and the as in a shutdown state and wake 3894 * everybody up to tell them the bad news. 3895 */ 3896 spin_lock(&log->l_icloglock); 3897 mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN; 3898 if (mp->m_sb_bp) 3899 mp->m_sb_bp->b_flags |= XBF_DONE; 3900 3901 /* 3902 * Mark the log and the iclogs with IO error flags to prevent any 3903 * further log IO from being issued or completed. 3904 */ 3905 log->l_flags |= XLOG_IO_ERROR; 3906 retval = xlog_state_ioerror(log); 3907 spin_unlock(&log->l_icloglock); 3908 3909 /* 3910 * We don't want anybody waiting for log reservations after this. That 3911 * means we have to wake up everybody queued up on reserveq as well as 3912 * writeq. In addition, we make sure in xlog_{re}grant_log_space that 3913 * we don't enqueue anything once the SHUTDOWN flag is set, and this 3914 * action is protected by the grant locks. 3915 */ 3916 xlog_grant_head_wake_all(&log->l_reserve_head); 3917 xlog_grant_head_wake_all(&log->l_write_head); 3918 3919 /* 3920 * Wake up everybody waiting on xfs_log_force. Wake the CIL push first 3921 * as if the log writes were completed. The abort handling in the log 3922 * item committed callback functions will do this again under lock to 3923 * avoid races. 3924 */ 3925 wake_up_all(&log->l_cilp->xc_commit_wait); 3926 xlog_state_do_callback(log, true, NULL); 3927 3928 #ifdef XFSERRORDEBUG 3929 { 3930 xlog_in_core_t *iclog; 3931 3932 spin_lock(&log->l_icloglock); 3933 iclog = log->l_iclog; 3934 do { 3935 ASSERT(iclog->ic_callback == 0); 3936 iclog = iclog->ic_next; 3937 } while (iclog != log->l_iclog); 3938 spin_unlock(&log->l_icloglock); 3939 } 3940 #endif 3941 /* return non-zero if log IOERROR transition had already happened */ 3942 return retval; 3943 } 3944 3945 STATIC int 3946 xlog_iclogs_empty( 3947 struct xlog *log) 3948 { 3949 xlog_in_core_t *iclog; 3950 3951 iclog = log->l_iclog; 3952 do { 3953 /* endianness does not matter here, zero is zero in 3954 * any language. 3955 */ 3956 if (iclog->ic_header.h_num_logops) 3957 return 0; 3958 iclog = iclog->ic_next; 3959 } while (iclog != log->l_iclog); 3960 return 1; 3961 } 3962 3963 /* 3964 * Verify that an LSN stamped into a piece of metadata is valid. This is 3965 * intended for use in read verifiers on v5 superblocks. 3966 */ 3967 bool 3968 xfs_log_check_lsn( 3969 struct xfs_mount *mp, 3970 xfs_lsn_t lsn) 3971 { 3972 struct xlog *log = mp->m_log; 3973 bool valid; 3974 3975 /* 3976 * norecovery mode skips mount-time log processing and unconditionally 3977 * resets the in-core LSN. We can't validate in this mode, but 3978 * modifications are not allowed anyways so just return true. 3979 */ 3980 if (mp->m_flags & XFS_MOUNT_NORECOVERY) 3981 return true; 3982 3983 /* 3984 * Some metadata LSNs are initialized to NULL (e.g., the agfl). This is 3985 * handled by recovery and thus safe to ignore here. 3986 */ 3987 if (lsn == NULLCOMMITLSN) 3988 return true; 3989 3990 valid = xlog_valid_lsn(mp->m_log, lsn); 3991 3992 /* warn the user about what's gone wrong before verifier failure */ 3993 if (!valid) { 3994 spin_lock(&log->l_icloglock); 3995 xfs_warn(mp, 3996 "Corruption warning: Metadata has LSN (%d:%d) ahead of current LSN (%d:%d). " 3997 "Please unmount and run xfs_repair (>= v4.3) to resolve.", 3998 CYCLE_LSN(lsn), BLOCK_LSN(lsn), 3999 log->l_curr_cycle, log->l_curr_block); 4000 spin_unlock(&log->l_icloglock); 4001 } 4002 4003 return valid; 4004 } 4005 4006 bool 4007 xfs_log_in_recovery( 4008 struct xfs_mount *mp) 4009 { 4010 struct xlog *log = mp->m_log; 4011 4012 return log->l_flags & XLOG_ACTIVE_RECOVERY; 4013 } 4014