1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_mount.h" 13 #include "xfs_errortag.h" 14 #include "xfs_error.h" 15 #include "xfs_trans.h" 16 #include "xfs_trans_priv.h" 17 #include "xfs_log.h" 18 #include "xfs_log_priv.h" 19 #include "xfs_trace.h" 20 #include "xfs_sysfs.h" 21 #include "xfs_sb.h" 22 #include "xfs_health.h" 23 24 struct kmem_cache *xfs_log_ticket_cache; 25 26 /* Local miscellaneous function prototypes */ 27 STATIC struct xlog * 28 xlog_alloc_log( 29 struct xfs_mount *mp, 30 struct xfs_buftarg *log_target, 31 xfs_daddr_t blk_offset, 32 int num_bblks); 33 STATIC int 34 xlog_space_left( 35 struct xlog *log, 36 atomic64_t *head); 37 STATIC void 38 xlog_dealloc_log( 39 struct xlog *log); 40 41 /* local state machine functions */ 42 STATIC void xlog_state_done_syncing( 43 struct xlog_in_core *iclog); 44 STATIC void xlog_state_do_callback( 45 struct xlog *log); 46 STATIC int 47 xlog_state_get_iclog_space( 48 struct xlog *log, 49 int len, 50 struct xlog_in_core **iclog, 51 struct xlog_ticket *ticket, 52 int *continued_write, 53 int *logoffsetp); 54 STATIC void 55 xlog_grant_push_ail( 56 struct xlog *log, 57 int need_bytes); 58 STATIC void 59 xlog_sync( 60 struct xlog *log, 61 struct xlog_in_core *iclog); 62 #if defined(DEBUG) 63 STATIC void 64 xlog_verify_dest_ptr( 65 struct xlog *log, 66 void *ptr); 67 STATIC void 68 xlog_verify_grant_tail( 69 struct xlog *log); 70 STATIC void 71 xlog_verify_iclog( 72 struct xlog *log, 73 struct xlog_in_core *iclog, 74 int count); 75 STATIC void 76 xlog_verify_tail_lsn( 77 struct xlog *log, 78 struct xlog_in_core *iclog); 79 #else 80 #define xlog_verify_dest_ptr(a,b) 81 #define xlog_verify_grant_tail(a) 82 #define xlog_verify_iclog(a,b,c) 83 #define xlog_verify_tail_lsn(a,b) 84 #endif 85 86 STATIC int 87 xlog_iclogs_empty( 88 struct xlog *log); 89 90 static int 91 xfs_log_cover(struct xfs_mount *); 92 93 static void 94 xlog_grant_sub_space( 95 struct xlog *log, 96 atomic64_t *head, 97 int bytes) 98 { 99 int64_t head_val = atomic64_read(head); 100 int64_t new, old; 101 102 do { 103 int cycle, space; 104 105 xlog_crack_grant_head_val(head_val, &cycle, &space); 106 107 space -= bytes; 108 if (space < 0) { 109 space += log->l_logsize; 110 cycle--; 111 } 112 113 old = head_val; 114 new = xlog_assign_grant_head_val(cycle, space); 115 head_val = atomic64_cmpxchg(head, old, new); 116 } while (head_val != old); 117 } 118 119 static void 120 xlog_grant_add_space( 121 struct xlog *log, 122 atomic64_t *head, 123 int bytes) 124 { 125 int64_t head_val = atomic64_read(head); 126 int64_t new, old; 127 128 do { 129 int tmp; 130 int cycle, space; 131 132 xlog_crack_grant_head_val(head_val, &cycle, &space); 133 134 tmp = log->l_logsize - space; 135 if (tmp > bytes) 136 space += bytes; 137 else { 138 space = bytes - tmp; 139 cycle++; 140 } 141 142 old = head_val; 143 new = xlog_assign_grant_head_val(cycle, space); 144 head_val = atomic64_cmpxchg(head, old, new); 145 } while (head_val != old); 146 } 147 148 STATIC void 149 xlog_grant_head_init( 150 struct xlog_grant_head *head) 151 { 152 xlog_assign_grant_head(&head->grant, 1, 0); 153 INIT_LIST_HEAD(&head->waiters); 154 spin_lock_init(&head->lock); 155 } 156 157 STATIC void 158 xlog_grant_head_wake_all( 159 struct xlog_grant_head *head) 160 { 161 struct xlog_ticket *tic; 162 163 spin_lock(&head->lock); 164 list_for_each_entry(tic, &head->waiters, t_queue) 165 wake_up_process(tic->t_task); 166 spin_unlock(&head->lock); 167 } 168 169 static inline int 170 xlog_ticket_reservation( 171 struct xlog *log, 172 struct xlog_grant_head *head, 173 struct xlog_ticket *tic) 174 { 175 if (head == &log->l_write_head) { 176 ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV); 177 return tic->t_unit_res; 178 } else { 179 if (tic->t_flags & XLOG_TIC_PERM_RESERV) 180 return tic->t_unit_res * tic->t_cnt; 181 else 182 return tic->t_unit_res; 183 } 184 } 185 186 STATIC bool 187 xlog_grant_head_wake( 188 struct xlog *log, 189 struct xlog_grant_head *head, 190 int *free_bytes) 191 { 192 struct xlog_ticket *tic; 193 int need_bytes; 194 bool woken_task = false; 195 196 list_for_each_entry(tic, &head->waiters, t_queue) { 197 198 /* 199 * There is a chance that the size of the CIL checkpoints in 200 * progress at the last AIL push target calculation resulted in 201 * limiting the target to the log head (l_last_sync_lsn) at the 202 * time. This may not reflect where the log head is now as the 203 * CIL checkpoints may have completed. 204 * 205 * Hence when we are woken here, it may be that the head of the 206 * log that has moved rather than the tail. As the tail didn't 207 * move, there still won't be space available for the 208 * reservation we require. However, if the AIL has already 209 * pushed to the target defined by the old log head location, we 210 * will hang here waiting for something else to update the AIL 211 * push target. 212 * 213 * Therefore, if there isn't space to wake the first waiter on 214 * the grant head, we need to push the AIL again to ensure the 215 * target reflects both the current log tail and log head 216 * position before we wait for the tail to move again. 217 */ 218 219 need_bytes = xlog_ticket_reservation(log, head, tic); 220 if (*free_bytes < need_bytes) { 221 if (!woken_task) 222 xlog_grant_push_ail(log, need_bytes); 223 return false; 224 } 225 226 *free_bytes -= need_bytes; 227 trace_xfs_log_grant_wake_up(log, tic); 228 wake_up_process(tic->t_task); 229 woken_task = true; 230 } 231 232 return true; 233 } 234 235 STATIC int 236 xlog_grant_head_wait( 237 struct xlog *log, 238 struct xlog_grant_head *head, 239 struct xlog_ticket *tic, 240 int need_bytes) __releases(&head->lock) 241 __acquires(&head->lock) 242 { 243 list_add_tail(&tic->t_queue, &head->waiters); 244 245 do { 246 if (xlog_is_shutdown(log)) 247 goto shutdown; 248 xlog_grant_push_ail(log, need_bytes); 249 250 __set_current_state(TASK_UNINTERRUPTIBLE); 251 spin_unlock(&head->lock); 252 253 XFS_STATS_INC(log->l_mp, xs_sleep_logspace); 254 255 trace_xfs_log_grant_sleep(log, tic); 256 schedule(); 257 trace_xfs_log_grant_wake(log, tic); 258 259 spin_lock(&head->lock); 260 if (xlog_is_shutdown(log)) 261 goto shutdown; 262 } while (xlog_space_left(log, &head->grant) < need_bytes); 263 264 list_del_init(&tic->t_queue); 265 return 0; 266 shutdown: 267 list_del_init(&tic->t_queue); 268 return -EIO; 269 } 270 271 /* 272 * Atomically get the log space required for a log ticket. 273 * 274 * Once a ticket gets put onto head->waiters, it will only return after the 275 * needed reservation is satisfied. 276 * 277 * This function is structured so that it has a lock free fast path. This is 278 * necessary because every new transaction reservation will come through this 279 * path. Hence any lock will be globally hot if we take it unconditionally on 280 * every pass. 281 * 282 * As tickets are only ever moved on and off head->waiters under head->lock, we 283 * only need to take that lock if we are going to add the ticket to the queue 284 * and sleep. We can avoid taking the lock if the ticket was never added to 285 * head->waiters because the t_queue list head will be empty and we hold the 286 * only reference to it so it can safely be checked unlocked. 287 */ 288 STATIC int 289 xlog_grant_head_check( 290 struct xlog *log, 291 struct xlog_grant_head *head, 292 struct xlog_ticket *tic, 293 int *need_bytes) 294 { 295 int free_bytes; 296 int error = 0; 297 298 ASSERT(!xlog_in_recovery(log)); 299 300 /* 301 * If there are other waiters on the queue then give them a chance at 302 * logspace before us. Wake up the first waiters, if we do not wake 303 * up all the waiters then go to sleep waiting for more free space, 304 * otherwise try to get some space for this transaction. 305 */ 306 *need_bytes = xlog_ticket_reservation(log, head, tic); 307 free_bytes = xlog_space_left(log, &head->grant); 308 if (!list_empty_careful(&head->waiters)) { 309 spin_lock(&head->lock); 310 if (!xlog_grant_head_wake(log, head, &free_bytes) || 311 free_bytes < *need_bytes) { 312 error = xlog_grant_head_wait(log, head, tic, 313 *need_bytes); 314 } 315 spin_unlock(&head->lock); 316 } else if (free_bytes < *need_bytes) { 317 spin_lock(&head->lock); 318 error = xlog_grant_head_wait(log, head, tic, *need_bytes); 319 spin_unlock(&head->lock); 320 } 321 322 return error; 323 } 324 325 static void 326 xlog_tic_reset_res(xlog_ticket_t *tic) 327 { 328 tic->t_res_num = 0; 329 tic->t_res_arr_sum = 0; 330 tic->t_res_num_ophdrs = 0; 331 } 332 333 static void 334 xlog_tic_add_region(xlog_ticket_t *tic, uint len, uint type) 335 { 336 if (tic->t_res_num == XLOG_TIC_LEN_MAX) { 337 /* add to overflow and start again */ 338 tic->t_res_o_flow += tic->t_res_arr_sum; 339 tic->t_res_num = 0; 340 tic->t_res_arr_sum = 0; 341 } 342 343 tic->t_res_arr[tic->t_res_num].r_len = len; 344 tic->t_res_arr[tic->t_res_num].r_type = type; 345 tic->t_res_arr_sum += len; 346 tic->t_res_num++; 347 } 348 349 bool 350 xfs_log_writable( 351 struct xfs_mount *mp) 352 { 353 /* 354 * Do not write to the log on norecovery mounts, if the data or log 355 * devices are read-only, or if the filesystem is shutdown. Read-only 356 * mounts allow internal writes for log recovery and unmount purposes, 357 * so don't restrict that case. 358 */ 359 if (xfs_has_norecovery(mp)) 360 return false; 361 if (xfs_readonly_buftarg(mp->m_ddev_targp)) 362 return false; 363 if (xfs_readonly_buftarg(mp->m_log->l_targ)) 364 return false; 365 if (xlog_is_shutdown(mp->m_log)) 366 return false; 367 return true; 368 } 369 370 /* 371 * Replenish the byte reservation required by moving the grant write head. 372 */ 373 int 374 xfs_log_regrant( 375 struct xfs_mount *mp, 376 struct xlog_ticket *tic) 377 { 378 struct xlog *log = mp->m_log; 379 int need_bytes; 380 int error = 0; 381 382 if (xlog_is_shutdown(log)) 383 return -EIO; 384 385 XFS_STATS_INC(mp, xs_try_logspace); 386 387 /* 388 * This is a new transaction on the ticket, so we need to change the 389 * transaction ID so that the next transaction has a different TID in 390 * the log. Just add one to the existing tid so that we can see chains 391 * of rolling transactions in the log easily. 392 */ 393 tic->t_tid++; 394 395 xlog_grant_push_ail(log, tic->t_unit_res); 396 397 tic->t_curr_res = tic->t_unit_res; 398 xlog_tic_reset_res(tic); 399 400 if (tic->t_cnt > 0) 401 return 0; 402 403 trace_xfs_log_regrant(log, tic); 404 405 error = xlog_grant_head_check(log, &log->l_write_head, tic, 406 &need_bytes); 407 if (error) 408 goto out_error; 409 410 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); 411 trace_xfs_log_regrant_exit(log, tic); 412 xlog_verify_grant_tail(log); 413 return 0; 414 415 out_error: 416 /* 417 * If we are failing, make sure the ticket doesn't have any current 418 * reservations. We don't want to add this back when the ticket/ 419 * transaction gets cancelled. 420 */ 421 tic->t_curr_res = 0; 422 tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */ 423 return error; 424 } 425 426 /* 427 * Reserve log space and return a ticket corresponding to the reservation. 428 * 429 * Each reservation is going to reserve extra space for a log record header. 430 * When writes happen to the on-disk log, we don't subtract the length of the 431 * log record header from any reservation. By wasting space in each 432 * reservation, we prevent over allocation problems. 433 */ 434 int 435 xfs_log_reserve( 436 struct xfs_mount *mp, 437 int unit_bytes, 438 int cnt, 439 struct xlog_ticket **ticp, 440 uint8_t client, 441 bool permanent) 442 { 443 struct xlog *log = mp->m_log; 444 struct xlog_ticket *tic; 445 int need_bytes; 446 int error = 0; 447 448 ASSERT(client == XFS_TRANSACTION || client == XFS_LOG); 449 450 if (xlog_is_shutdown(log)) 451 return -EIO; 452 453 XFS_STATS_INC(mp, xs_try_logspace); 454 455 ASSERT(*ticp == NULL); 456 tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent); 457 *ticp = tic; 458 459 xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt 460 : tic->t_unit_res); 461 462 trace_xfs_log_reserve(log, tic); 463 464 error = xlog_grant_head_check(log, &log->l_reserve_head, tic, 465 &need_bytes); 466 if (error) 467 goto out_error; 468 469 xlog_grant_add_space(log, &log->l_reserve_head.grant, need_bytes); 470 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); 471 trace_xfs_log_reserve_exit(log, tic); 472 xlog_verify_grant_tail(log); 473 return 0; 474 475 out_error: 476 /* 477 * If we are failing, make sure the ticket doesn't have any current 478 * reservations. We don't want to add this back when the ticket/ 479 * transaction gets cancelled. 480 */ 481 tic->t_curr_res = 0; 482 tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */ 483 return error; 484 } 485 486 /* 487 * Run all the pending iclog callbacks and wake log force waiters and iclog 488 * space waiters so they can process the newly set shutdown state. We really 489 * don't care what order we process callbacks here because the log is shut down 490 * and so state cannot change on disk anymore. 491 * 492 * We avoid processing actively referenced iclogs so that we don't run callbacks 493 * while the iclog owner might still be preparing the iclog for IO submssion. 494 * These will be caught by xlog_state_iclog_release() and call this function 495 * again to process any callbacks that may have been added to that iclog. 496 */ 497 static void 498 xlog_state_shutdown_callbacks( 499 struct xlog *log) 500 { 501 struct xlog_in_core *iclog; 502 LIST_HEAD(cb_list); 503 504 spin_lock(&log->l_icloglock); 505 iclog = log->l_iclog; 506 do { 507 if (atomic_read(&iclog->ic_refcnt)) { 508 /* Reference holder will re-run iclog callbacks. */ 509 continue; 510 } 511 list_splice_init(&iclog->ic_callbacks, &cb_list); 512 wake_up_all(&iclog->ic_write_wait); 513 wake_up_all(&iclog->ic_force_wait); 514 } while ((iclog = iclog->ic_next) != log->l_iclog); 515 516 wake_up_all(&log->l_flush_wait); 517 spin_unlock(&log->l_icloglock); 518 519 xlog_cil_process_committed(&cb_list); 520 } 521 522 /* 523 * Flush iclog to disk if this is the last reference to the given iclog and the 524 * it is in the WANT_SYNC state. 525 * 526 * If the caller passes in a non-zero @old_tail_lsn and the current log tail 527 * does not match, there may be metadata on disk that must be persisted before 528 * this iclog is written. To satisfy that requirement, set the 529 * XLOG_ICL_NEED_FLUSH flag as a condition for writing this iclog with the new 530 * log tail value. 531 * 532 * If XLOG_ICL_NEED_FUA is already set on the iclog, we need to ensure that the 533 * log tail is updated correctly. NEED_FUA indicates that the iclog will be 534 * written to stable storage, and implies that a commit record is contained 535 * within the iclog. We need to ensure that the log tail does not move beyond 536 * the tail that the first commit record in the iclog ordered against, otherwise 537 * correct recovery of that checkpoint becomes dependent on future operations 538 * performed on this iclog. 539 * 540 * Hence if NEED_FUA is set and the current iclog tail lsn is empty, write the 541 * current tail into iclog. Once the iclog tail is set, future operations must 542 * not modify it, otherwise they potentially violate ordering constraints for 543 * the checkpoint commit that wrote the initial tail lsn value. The tail lsn in 544 * the iclog will get zeroed on activation of the iclog after sync, so we 545 * always capture the tail lsn on the iclog on the first NEED_FUA release 546 * regardless of the number of active reference counts on this iclog. 547 */ 548 549 int 550 xlog_state_release_iclog( 551 struct xlog *log, 552 struct xlog_in_core *iclog, 553 xfs_lsn_t old_tail_lsn) 554 { 555 xfs_lsn_t tail_lsn; 556 bool last_ref; 557 558 lockdep_assert_held(&log->l_icloglock); 559 560 trace_xlog_iclog_release(iclog, _RET_IP_); 561 /* 562 * Grabbing the current log tail needs to be atomic w.r.t. the writing 563 * of the tail LSN into the iclog so we guarantee that the log tail does 564 * not move between deciding if a cache flush is required and writing 565 * the LSN into the iclog below. 566 */ 567 if (old_tail_lsn || iclog->ic_state == XLOG_STATE_WANT_SYNC) { 568 tail_lsn = xlog_assign_tail_lsn(log->l_mp); 569 570 if (old_tail_lsn && tail_lsn != old_tail_lsn) 571 iclog->ic_flags |= XLOG_ICL_NEED_FLUSH; 572 573 if ((iclog->ic_flags & XLOG_ICL_NEED_FUA) && 574 !iclog->ic_header.h_tail_lsn) 575 iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn); 576 } 577 578 last_ref = atomic_dec_and_test(&iclog->ic_refcnt); 579 580 if (xlog_is_shutdown(log)) { 581 /* 582 * If there are no more references to this iclog, process the 583 * pending iclog callbacks that were waiting on the release of 584 * this iclog. 585 */ 586 if (last_ref) { 587 spin_unlock(&log->l_icloglock); 588 xlog_state_shutdown_callbacks(log); 589 spin_lock(&log->l_icloglock); 590 } 591 return -EIO; 592 } 593 594 if (!last_ref) 595 return 0; 596 597 if (iclog->ic_state != XLOG_STATE_WANT_SYNC) { 598 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE); 599 return 0; 600 } 601 602 iclog->ic_state = XLOG_STATE_SYNCING; 603 if (!iclog->ic_header.h_tail_lsn) 604 iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn); 605 xlog_verify_tail_lsn(log, iclog); 606 trace_xlog_iclog_syncing(iclog, _RET_IP_); 607 608 spin_unlock(&log->l_icloglock); 609 xlog_sync(log, iclog); 610 spin_lock(&log->l_icloglock); 611 return 0; 612 } 613 614 /* 615 * Mount a log filesystem 616 * 617 * mp - ubiquitous xfs mount point structure 618 * log_target - buftarg of on-disk log device 619 * blk_offset - Start block # where block size is 512 bytes (BBSIZE) 620 * num_bblocks - Number of BBSIZE blocks in on-disk log 621 * 622 * Return error or zero. 623 */ 624 int 625 xfs_log_mount( 626 xfs_mount_t *mp, 627 xfs_buftarg_t *log_target, 628 xfs_daddr_t blk_offset, 629 int num_bblks) 630 { 631 struct xlog *log; 632 bool fatal = xfs_has_crc(mp); 633 int error = 0; 634 int min_logfsbs; 635 636 if (!xfs_has_norecovery(mp)) { 637 xfs_notice(mp, "Mounting V%d Filesystem", 638 XFS_SB_VERSION_NUM(&mp->m_sb)); 639 } else { 640 xfs_notice(mp, 641 "Mounting V%d filesystem in no-recovery mode. Filesystem will be inconsistent.", 642 XFS_SB_VERSION_NUM(&mp->m_sb)); 643 ASSERT(xfs_is_readonly(mp)); 644 } 645 646 log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks); 647 if (IS_ERR(log)) { 648 error = PTR_ERR(log); 649 goto out; 650 } 651 mp->m_log = log; 652 653 /* 654 * Validate the given log space and drop a critical message via syslog 655 * if the log size is too small that would lead to some unexpected 656 * situations in transaction log space reservation stage. 657 * 658 * Note: we can't just reject the mount if the validation fails. This 659 * would mean that people would have to downgrade their kernel just to 660 * remedy the situation as there is no way to grow the log (short of 661 * black magic surgery with xfs_db). 662 * 663 * We can, however, reject mounts for CRC format filesystems, as the 664 * mkfs binary being used to make the filesystem should never create a 665 * filesystem with a log that is too small. 666 */ 667 min_logfsbs = xfs_log_calc_minimum_size(mp); 668 669 if (mp->m_sb.sb_logblocks < min_logfsbs) { 670 xfs_warn(mp, 671 "Log size %d blocks too small, minimum size is %d blocks", 672 mp->m_sb.sb_logblocks, min_logfsbs); 673 error = -EINVAL; 674 } else if (mp->m_sb.sb_logblocks > XFS_MAX_LOG_BLOCKS) { 675 xfs_warn(mp, 676 "Log size %d blocks too large, maximum size is %lld blocks", 677 mp->m_sb.sb_logblocks, XFS_MAX_LOG_BLOCKS); 678 error = -EINVAL; 679 } else if (XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks) > XFS_MAX_LOG_BYTES) { 680 xfs_warn(mp, 681 "log size %lld bytes too large, maximum size is %lld bytes", 682 XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks), 683 XFS_MAX_LOG_BYTES); 684 error = -EINVAL; 685 } else if (mp->m_sb.sb_logsunit > 1 && 686 mp->m_sb.sb_logsunit % mp->m_sb.sb_blocksize) { 687 xfs_warn(mp, 688 "log stripe unit %u bytes must be a multiple of block size", 689 mp->m_sb.sb_logsunit); 690 error = -EINVAL; 691 fatal = true; 692 } 693 if (error) { 694 /* 695 * Log check errors are always fatal on v5; or whenever bad 696 * metadata leads to a crash. 697 */ 698 if (fatal) { 699 xfs_crit(mp, "AAIEEE! Log failed size checks. Abort!"); 700 ASSERT(0); 701 goto out_free_log; 702 } 703 xfs_crit(mp, "Log size out of supported range."); 704 xfs_crit(mp, 705 "Continuing onwards, but if log hangs are experienced then please report this message in the bug report."); 706 } 707 708 /* 709 * Initialize the AIL now we have a log. 710 */ 711 error = xfs_trans_ail_init(mp); 712 if (error) { 713 xfs_warn(mp, "AIL initialisation failed: error %d", error); 714 goto out_free_log; 715 } 716 log->l_ailp = mp->m_ail; 717 718 /* 719 * skip log recovery on a norecovery mount. pretend it all 720 * just worked. 721 */ 722 if (!xfs_has_norecovery(mp)) { 723 /* 724 * log recovery ignores readonly state and so we need to clear 725 * mount-based read only state so it can write to disk. 726 */ 727 bool readonly = test_and_clear_bit(XFS_OPSTATE_READONLY, 728 &mp->m_opstate); 729 error = xlog_recover(log); 730 if (readonly) 731 set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate); 732 if (error) { 733 xfs_warn(mp, "log mount/recovery failed: error %d", 734 error); 735 xlog_recover_cancel(log); 736 goto out_destroy_ail; 737 } 738 } 739 740 error = xfs_sysfs_init(&log->l_kobj, &xfs_log_ktype, &mp->m_kobj, 741 "log"); 742 if (error) 743 goto out_destroy_ail; 744 745 /* Normal transactions can now occur */ 746 clear_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate); 747 748 /* 749 * Now the log has been fully initialised and we know were our 750 * space grant counters are, we can initialise the permanent ticket 751 * needed for delayed logging to work. 752 */ 753 xlog_cil_init_post_recovery(log); 754 755 return 0; 756 757 out_destroy_ail: 758 xfs_trans_ail_destroy(mp); 759 out_free_log: 760 xlog_dealloc_log(log); 761 out: 762 return error; 763 } 764 765 /* 766 * Finish the recovery of the file system. This is separate from the 767 * xfs_log_mount() call, because it depends on the code in xfs_mountfs() to read 768 * in the root and real-time bitmap inodes between calling xfs_log_mount() and 769 * here. 770 * 771 * If we finish recovery successfully, start the background log work. If we are 772 * not doing recovery, then we have a RO filesystem and we don't need to start 773 * it. 774 */ 775 int 776 xfs_log_mount_finish( 777 struct xfs_mount *mp) 778 { 779 struct xlog *log = mp->m_log; 780 bool readonly; 781 int error = 0; 782 783 if (xfs_has_norecovery(mp)) { 784 ASSERT(xfs_is_readonly(mp)); 785 return 0; 786 } 787 788 /* 789 * log recovery ignores readonly state and so we need to clear 790 * mount-based read only state so it can write to disk. 791 */ 792 readonly = test_and_clear_bit(XFS_OPSTATE_READONLY, &mp->m_opstate); 793 794 /* 795 * During the second phase of log recovery, we need iget and 796 * iput to behave like they do for an active filesystem. 797 * xfs_fs_drop_inode needs to be able to prevent the deletion 798 * of inodes before we're done replaying log items on those 799 * inodes. Turn it off immediately after recovery finishes 800 * so that we don't leak the quota inodes if subsequent mount 801 * activities fail. 802 * 803 * We let all inodes involved in redo item processing end up on 804 * the LRU instead of being evicted immediately so that if we do 805 * something to an unlinked inode, the irele won't cause 806 * premature truncation and freeing of the inode, which results 807 * in log recovery failure. We have to evict the unreferenced 808 * lru inodes after clearing SB_ACTIVE because we don't 809 * otherwise clean up the lru if there's a subsequent failure in 810 * xfs_mountfs, which leads to us leaking the inodes if nothing 811 * else (e.g. quotacheck) references the inodes before the 812 * mount failure occurs. 813 */ 814 mp->m_super->s_flags |= SB_ACTIVE; 815 xfs_log_work_queue(mp); 816 if (xlog_recovery_needed(log)) 817 error = xlog_recover_finish(log); 818 mp->m_super->s_flags &= ~SB_ACTIVE; 819 evict_inodes(mp->m_super); 820 821 /* 822 * Drain the buffer LRU after log recovery. This is required for v4 823 * filesystems to avoid leaving around buffers with NULL verifier ops, 824 * but we do it unconditionally to make sure we're always in a clean 825 * cache state after mount. 826 * 827 * Don't push in the error case because the AIL may have pending intents 828 * that aren't removed until recovery is cancelled. 829 */ 830 if (xlog_recovery_needed(log)) { 831 if (!error) { 832 xfs_log_force(mp, XFS_LOG_SYNC); 833 xfs_ail_push_all_sync(mp->m_ail); 834 } 835 xfs_notice(mp, "Ending recovery (logdev: %s)", 836 mp->m_logname ? mp->m_logname : "internal"); 837 } else { 838 xfs_info(mp, "Ending clean mount"); 839 } 840 xfs_buftarg_drain(mp->m_ddev_targp); 841 842 clear_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate); 843 if (readonly) 844 set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate); 845 846 /* Make sure the log is dead if we're returning failure. */ 847 ASSERT(!error || xlog_is_shutdown(log)); 848 849 return error; 850 } 851 852 /* 853 * The mount has failed. Cancel the recovery if it hasn't completed and destroy 854 * the log. 855 */ 856 void 857 xfs_log_mount_cancel( 858 struct xfs_mount *mp) 859 { 860 xlog_recover_cancel(mp->m_log); 861 xfs_log_unmount(mp); 862 } 863 864 /* 865 * Flush out the iclog to disk ensuring that device caches are flushed and 866 * the iclog hits stable storage before any completion waiters are woken. 867 */ 868 static inline int 869 xlog_force_iclog( 870 struct xlog_in_core *iclog) 871 { 872 atomic_inc(&iclog->ic_refcnt); 873 iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA; 874 if (iclog->ic_state == XLOG_STATE_ACTIVE) 875 xlog_state_switch_iclogs(iclog->ic_log, iclog, 0); 876 return xlog_state_release_iclog(iclog->ic_log, iclog, 0); 877 } 878 879 /* 880 * Wait for the iclog and all prior iclogs to be written disk as required by the 881 * log force state machine. Waiting on ic_force_wait ensures iclog completions 882 * have been ordered and callbacks run before we are woken here, hence 883 * guaranteeing that all the iclogs up to this one are on stable storage. 884 */ 885 int 886 xlog_wait_on_iclog( 887 struct xlog_in_core *iclog) 888 __releases(iclog->ic_log->l_icloglock) 889 { 890 struct xlog *log = iclog->ic_log; 891 892 trace_xlog_iclog_wait_on(iclog, _RET_IP_); 893 if (!xlog_is_shutdown(log) && 894 iclog->ic_state != XLOG_STATE_ACTIVE && 895 iclog->ic_state != XLOG_STATE_DIRTY) { 896 XFS_STATS_INC(log->l_mp, xs_log_force_sleep); 897 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); 898 } else { 899 spin_unlock(&log->l_icloglock); 900 } 901 902 if (xlog_is_shutdown(log)) 903 return -EIO; 904 return 0; 905 } 906 907 /* 908 * Write out an unmount record using the ticket provided. We have to account for 909 * the data space used in the unmount ticket as this write is not done from a 910 * transaction context that has already done the accounting for us. 911 */ 912 static int 913 xlog_write_unmount_record( 914 struct xlog *log, 915 struct xlog_ticket *ticket) 916 { 917 struct xfs_unmount_log_format ulf = { 918 .magic = XLOG_UNMOUNT_TYPE, 919 }; 920 struct xfs_log_iovec reg = { 921 .i_addr = &ulf, 922 .i_len = sizeof(ulf), 923 .i_type = XLOG_REG_TYPE_UNMOUNT, 924 }; 925 struct xfs_log_vec vec = { 926 .lv_niovecs = 1, 927 .lv_iovecp = ®, 928 }; 929 930 /* account for space used by record data */ 931 ticket->t_curr_res -= sizeof(ulf); 932 933 return xlog_write(log, NULL, &vec, ticket, XLOG_UNMOUNT_TRANS); 934 } 935 936 /* 937 * Mark the filesystem clean by writing an unmount record to the head of the 938 * log. 939 */ 940 static void 941 xlog_unmount_write( 942 struct xlog *log) 943 { 944 struct xfs_mount *mp = log->l_mp; 945 struct xlog_in_core *iclog; 946 struct xlog_ticket *tic = NULL; 947 int error; 948 949 error = xfs_log_reserve(mp, 600, 1, &tic, XFS_LOG, 0); 950 if (error) 951 goto out_err; 952 953 error = xlog_write_unmount_record(log, tic); 954 /* 955 * At this point, we're umounting anyway, so there's no point in 956 * transitioning log state to shutdown. Just continue... 957 */ 958 out_err: 959 if (error) 960 xfs_alert(mp, "%s: unmount record failed", __func__); 961 962 spin_lock(&log->l_icloglock); 963 iclog = log->l_iclog; 964 error = xlog_force_iclog(iclog); 965 xlog_wait_on_iclog(iclog); 966 967 if (tic) { 968 trace_xfs_log_umount_write(log, tic); 969 xfs_log_ticket_ungrant(log, tic); 970 } 971 } 972 973 static void 974 xfs_log_unmount_verify_iclog( 975 struct xlog *log) 976 { 977 struct xlog_in_core *iclog = log->l_iclog; 978 979 do { 980 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE); 981 ASSERT(iclog->ic_offset == 0); 982 } while ((iclog = iclog->ic_next) != log->l_iclog); 983 } 984 985 /* 986 * Unmount record used to have a string "Unmount filesystem--" in the 987 * data section where the "Un" was really a magic number (XLOG_UNMOUNT_TYPE). 988 * We just write the magic number now since that particular field isn't 989 * currently architecture converted and "Unmount" is a bit foo. 990 * As far as I know, there weren't any dependencies on the old behaviour. 991 */ 992 static void 993 xfs_log_unmount_write( 994 struct xfs_mount *mp) 995 { 996 struct xlog *log = mp->m_log; 997 998 if (!xfs_log_writable(mp)) 999 return; 1000 1001 xfs_log_force(mp, XFS_LOG_SYNC); 1002 1003 if (xlog_is_shutdown(log)) 1004 return; 1005 1006 /* 1007 * If we think the summary counters are bad, avoid writing the unmount 1008 * record to force log recovery at next mount, after which the summary 1009 * counters will be recalculated. Refer to xlog_check_unmount_rec for 1010 * more details. 1011 */ 1012 if (XFS_TEST_ERROR(xfs_fs_has_sickness(mp, XFS_SICK_FS_COUNTERS), mp, 1013 XFS_ERRTAG_FORCE_SUMMARY_RECALC)) { 1014 xfs_alert(mp, "%s: will fix summary counters at next mount", 1015 __func__); 1016 return; 1017 } 1018 1019 xfs_log_unmount_verify_iclog(log); 1020 xlog_unmount_write(log); 1021 } 1022 1023 /* 1024 * Empty the log for unmount/freeze. 1025 * 1026 * To do this, we first need to shut down the background log work so it is not 1027 * trying to cover the log as we clean up. We then need to unpin all objects in 1028 * the log so we can then flush them out. Once they have completed their IO and 1029 * run the callbacks removing themselves from the AIL, we can cover the log. 1030 */ 1031 int 1032 xfs_log_quiesce( 1033 struct xfs_mount *mp) 1034 { 1035 /* 1036 * Clear log incompat features since we're quiescing the log. Report 1037 * failures, though it's not fatal to have a higher log feature 1038 * protection level than the log contents actually require. 1039 */ 1040 if (xfs_clear_incompat_log_features(mp)) { 1041 int error; 1042 1043 error = xfs_sync_sb(mp, false); 1044 if (error) 1045 xfs_warn(mp, 1046 "Failed to clear log incompat features on quiesce"); 1047 } 1048 1049 cancel_delayed_work_sync(&mp->m_log->l_work); 1050 xfs_log_force(mp, XFS_LOG_SYNC); 1051 1052 /* 1053 * The superblock buffer is uncached and while xfs_ail_push_all_sync() 1054 * will push it, xfs_buftarg_wait() will not wait for it. Further, 1055 * xfs_buf_iowait() cannot be used because it was pushed with the 1056 * XBF_ASYNC flag set, so we need to use a lock/unlock pair to wait for 1057 * the IO to complete. 1058 */ 1059 xfs_ail_push_all_sync(mp->m_ail); 1060 xfs_buftarg_wait(mp->m_ddev_targp); 1061 xfs_buf_lock(mp->m_sb_bp); 1062 xfs_buf_unlock(mp->m_sb_bp); 1063 1064 return xfs_log_cover(mp); 1065 } 1066 1067 void 1068 xfs_log_clean( 1069 struct xfs_mount *mp) 1070 { 1071 xfs_log_quiesce(mp); 1072 xfs_log_unmount_write(mp); 1073 } 1074 1075 /* 1076 * Shut down and release the AIL and Log. 1077 * 1078 * During unmount, we need to ensure we flush all the dirty metadata objects 1079 * from the AIL so that the log is empty before we write the unmount record to 1080 * the log. Once this is done, we can tear down the AIL and the log. 1081 */ 1082 void 1083 xfs_log_unmount( 1084 struct xfs_mount *mp) 1085 { 1086 xfs_log_clean(mp); 1087 1088 xfs_buftarg_drain(mp->m_ddev_targp); 1089 1090 xfs_trans_ail_destroy(mp); 1091 1092 xfs_sysfs_del(&mp->m_log->l_kobj); 1093 1094 xlog_dealloc_log(mp->m_log); 1095 } 1096 1097 void 1098 xfs_log_item_init( 1099 struct xfs_mount *mp, 1100 struct xfs_log_item *item, 1101 int type, 1102 const struct xfs_item_ops *ops) 1103 { 1104 item->li_log = mp->m_log; 1105 item->li_ailp = mp->m_ail; 1106 item->li_type = type; 1107 item->li_ops = ops; 1108 item->li_lv = NULL; 1109 1110 INIT_LIST_HEAD(&item->li_ail); 1111 INIT_LIST_HEAD(&item->li_cil); 1112 INIT_LIST_HEAD(&item->li_bio_list); 1113 INIT_LIST_HEAD(&item->li_trans); 1114 } 1115 1116 /* 1117 * Wake up processes waiting for log space after we have moved the log tail. 1118 */ 1119 void 1120 xfs_log_space_wake( 1121 struct xfs_mount *mp) 1122 { 1123 struct xlog *log = mp->m_log; 1124 int free_bytes; 1125 1126 if (xlog_is_shutdown(log)) 1127 return; 1128 1129 if (!list_empty_careful(&log->l_write_head.waiters)) { 1130 ASSERT(!xlog_in_recovery(log)); 1131 1132 spin_lock(&log->l_write_head.lock); 1133 free_bytes = xlog_space_left(log, &log->l_write_head.grant); 1134 xlog_grant_head_wake(log, &log->l_write_head, &free_bytes); 1135 spin_unlock(&log->l_write_head.lock); 1136 } 1137 1138 if (!list_empty_careful(&log->l_reserve_head.waiters)) { 1139 ASSERT(!xlog_in_recovery(log)); 1140 1141 spin_lock(&log->l_reserve_head.lock); 1142 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); 1143 xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes); 1144 spin_unlock(&log->l_reserve_head.lock); 1145 } 1146 } 1147 1148 /* 1149 * Determine if we have a transaction that has gone to disk that needs to be 1150 * covered. To begin the transition to the idle state firstly the log needs to 1151 * be idle. That means the CIL, the AIL and the iclogs needs to be empty before 1152 * we start attempting to cover the log. 1153 * 1154 * Only if we are then in a state where covering is needed, the caller is 1155 * informed that dummy transactions are required to move the log into the idle 1156 * state. 1157 * 1158 * If there are any items in the AIl or CIL, then we do not want to attempt to 1159 * cover the log as we may be in a situation where there isn't log space 1160 * available to run a dummy transaction and this can lead to deadlocks when the 1161 * tail of the log is pinned by an item that is modified in the CIL. Hence 1162 * there's no point in running a dummy transaction at this point because we 1163 * can't start trying to idle the log until both the CIL and AIL are empty. 1164 */ 1165 static bool 1166 xfs_log_need_covered( 1167 struct xfs_mount *mp) 1168 { 1169 struct xlog *log = mp->m_log; 1170 bool needed = false; 1171 1172 if (!xlog_cil_empty(log)) 1173 return false; 1174 1175 spin_lock(&log->l_icloglock); 1176 switch (log->l_covered_state) { 1177 case XLOG_STATE_COVER_DONE: 1178 case XLOG_STATE_COVER_DONE2: 1179 case XLOG_STATE_COVER_IDLE: 1180 break; 1181 case XLOG_STATE_COVER_NEED: 1182 case XLOG_STATE_COVER_NEED2: 1183 if (xfs_ail_min_lsn(log->l_ailp)) 1184 break; 1185 if (!xlog_iclogs_empty(log)) 1186 break; 1187 1188 needed = true; 1189 if (log->l_covered_state == XLOG_STATE_COVER_NEED) 1190 log->l_covered_state = XLOG_STATE_COVER_DONE; 1191 else 1192 log->l_covered_state = XLOG_STATE_COVER_DONE2; 1193 break; 1194 default: 1195 needed = true; 1196 break; 1197 } 1198 spin_unlock(&log->l_icloglock); 1199 return needed; 1200 } 1201 1202 /* 1203 * Explicitly cover the log. This is similar to background log covering but 1204 * intended for usage in quiesce codepaths. The caller is responsible to ensure 1205 * the log is idle and suitable for covering. The CIL, iclog buffers and AIL 1206 * must all be empty. 1207 */ 1208 static int 1209 xfs_log_cover( 1210 struct xfs_mount *mp) 1211 { 1212 int error = 0; 1213 bool need_covered; 1214 1215 ASSERT((xlog_cil_empty(mp->m_log) && xlog_iclogs_empty(mp->m_log) && 1216 !xfs_ail_min_lsn(mp->m_log->l_ailp)) || 1217 xlog_is_shutdown(mp->m_log)); 1218 1219 if (!xfs_log_writable(mp)) 1220 return 0; 1221 1222 /* 1223 * xfs_log_need_covered() is not idempotent because it progresses the 1224 * state machine if the log requires covering. Therefore, we must call 1225 * this function once and use the result until we've issued an sb sync. 1226 * Do so first to make that abundantly clear. 1227 * 1228 * Fall into the covering sequence if the log needs covering or the 1229 * mount has lazy superblock accounting to sync to disk. The sb sync 1230 * used for covering accumulates the in-core counters, so covering 1231 * handles this for us. 1232 */ 1233 need_covered = xfs_log_need_covered(mp); 1234 if (!need_covered && !xfs_has_lazysbcount(mp)) 1235 return 0; 1236 1237 /* 1238 * To cover the log, commit the superblock twice (at most) in 1239 * independent checkpoints. The first serves as a reference for the 1240 * tail pointer. The sync transaction and AIL push empties the AIL and 1241 * updates the in-core tail to the LSN of the first checkpoint. The 1242 * second commit updates the on-disk tail with the in-core LSN, 1243 * covering the log. Push the AIL one more time to leave it empty, as 1244 * we found it. 1245 */ 1246 do { 1247 error = xfs_sync_sb(mp, true); 1248 if (error) 1249 break; 1250 xfs_ail_push_all_sync(mp->m_ail); 1251 } while (xfs_log_need_covered(mp)); 1252 1253 return error; 1254 } 1255 1256 /* 1257 * We may be holding the log iclog lock upon entering this routine. 1258 */ 1259 xfs_lsn_t 1260 xlog_assign_tail_lsn_locked( 1261 struct xfs_mount *mp) 1262 { 1263 struct xlog *log = mp->m_log; 1264 struct xfs_log_item *lip; 1265 xfs_lsn_t tail_lsn; 1266 1267 assert_spin_locked(&mp->m_ail->ail_lock); 1268 1269 /* 1270 * To make sure we always have a valid LSN for the log tail we keep 1271 * track of the last LSN which was committed in log->l_last_sync_lsn, 1272 * and use that when the AIL was empty. 1273 */ 1274 lip = xfs_ail_min(mp->m_ail); 1275 if (lip) 1276 tail_lsn = lip->li_lsn; 1277 else 1278 tail_lsn = atomic64_read(&log->l_last_sync_lsn); 1279 trace_xfs_log_assign_tail_lsn(log, tail_lsn); 1280 atomic64_set(&log->l_tail_lsn, tail_lsn); 1281 return tail_lsn; 1282 } 1283 1284 xfs_lsn_t 1285 xlog_assign_tail_lsn( 1286 struct xfs_mount *mp) 1287 { 1288 xfs_lsn_t tail_lsn; 1289 1290 spin_lock(&mp->m_ail->ail_lock); 1291 tail_lsn = xlog_assign_tail_lsn_locked(mp); 1292 spin_unlock(&mp->m_ail->ail_lock); 1293 1294 return tail_lsn; 1295 } 1296 1297 /* 1298 * Return the space in the log between the tail and the head. The head 1299 * is passed in the cycle/bytes formal parms. In the special case where 1300 * the reserve head has wrapped passed the tail, this calculation is no 1301 * longer valid. In this case, just return 0 which means there is no space 1302 * in the log. This works for all places where this function is called 1303 * with the reserve head. Of course, if the write head were to ever 1304 * wrap the tail, we should blow up. Rather than catch this case here, 1305 * we depend on other ASSERTions in other parts of the code. XXXmiken 1306 * 1307 * If reservation head is behind the tail, we have a problem. Warn about it, 1308 * but then treat it as if the log is empty. 1309 * 1310 * If the log is shut down, the head and tail may be invalid or out of whack, so 1311 * shortcut invalidity asserts in this case so that we don't trigger them 1312 * falsely. 1313 */ 1314 STATIC int 1315 xlog_space_left( 1316 struct xlog *log, 1317 atomic64_t *head) 1318 { 1319 int tail_bytes; 1320 int tail_cycle; 1321 int head_cycle; 1322 int head_bytes; 1323 1324 xlog_crack_grant_head(head, &head_cycle, &head_bytes); 1325 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes); 1326 tail_bytes = BBTOB(tail_bytes); 1327 if (tail_cycle == head_cycle && head_bytes >= tail_bytes) 1328 return log->l_logsize - (head_bytes - tail_bytes); 1329 if (tail_cycle + 1 < head_cycle) 1330 return 0; 1331 1332 /* Ignore potential inconsistency when shutdown. */ 1333 if (xlog_is_shutdown(log)) 1334 return log->l_logsize; 1335 1336 if (tail_cycle < head_cycle) { 1337 ASSERT(tail_cycle == (head_cycle - 1)); 1338 return tail_bytes - head_bytes; 1339 } 1340 1341 /* 1342 * The reservation head is behind the tail. In this case we just want to 1343 * return the size of the log as the amount of space left. 1344 */ 1345 xfs_alert(log->l_mp, "xlog_space_left: head behind tail"); 1346 xfs_alert(log->l_mp, " tail_cycle = %d, tail_bytes = %d", 1347 tail_cycle, tail_bytes); 1348 xfs_alert(log->l_mp, " GH cycle = %d, GH bytes = %d", 1349 head_cycle, head_bytes); 1350 ASSERT(0); 1351 return log->l_logsize; 1352 } 1353 1354 1355 static void 1356 xlog_ioend_work( 1357 struct work_struct *work) 1358 { 1359 struct xlog_in_core *iclog = 1360 container_of(work, struct xlog_in_core, ic_end_io_work); 1361 struct xlog *log = iclog->ic_log; 1362 int error; 1363 1364 error = blk_status_to_errno(iclog->ic_bio.bi_status); 1365 #ifdef DEBUG 1366 /* treat writes with injected CRC errors as failed */ 1367 if (iclog->ic_fail_crc) 1368 error = -EIO; 1369 #endif 1370 1371 /* 1372 * Race to shutdown the filesystem if we see an error. 1373 */ 1374 if (XFS_TEST_ERROR(error, log->l_mp, XFS_ERRTAG_IODONE_IOERR)) { 1375 xfs_alert(log->l_mp, "log I/O error %d", error); 1376 xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); 1377 } 1378 1379 xlog_state_done_syncing(iclog); 1380 bio_uninit(&iclog->ic_bio); 1381 1382 /* 1383 * Drop the lock to signal that we are done. Nothing references the 1384 * iclog after this, so an unmount waiting on this lock can now tear it 1385 * down safely. As such, it is unsafe to reference the iclog after the 1386 * unlock as we could race with it being freed. 1387 */ 1388 up(&iclog->ic_sema); 1389 } 1390 1391 /* 1392 * Return size of each in-core log record buffer. 1393 * 1394 * All machines get 8 x 32kB buffers by default, unless tuned otherwise. 1395 * 1396 * If the filesystem blocksize is too large, we may need to choose a 1397 * larger size since the directory code currently logs entire blocks. 1398 */ 1399 STATIC void 1400 xlog_get_iclog_buffer_size( 1401 struct xfs_mount *mp, 1402 struct xlog *log) 1403 { 1404 if (mp->m_logbufs <= 0) 1405 mp->m_logbufs = XLOG_MAX_ICLOGS; 1406 if (mp->m_logbsize <= 0) 1407 mp->m_logbsize = XLOG_BIG_RECORD_BSIZE; 1408 1409 log->l_iclog_bufs = mp->m_logbufs; 1410 log->l_iclog_size = mp->m_logbsize; 1411 1412 /* 1413 * # headers = size / 32k - one header holds cycles from 32k of data. 1414 */ 1415 log->l_iclog_heads = 1416 DIV_ROUND_UP(mp->m_logbsize, XLOG_HEADER_CYCLE_SIZE); 1417 log->l_iclog_hsize = log->l_iclog_heads << BBSHIFT; 1418 } 1419 1420 void 1421 xfs_log_work_queue( 1422 struct xfs_mount *mp) 1423 { 1424 queue_delayed_work(mp->m_sync_workqueue, &mp->m_log->l_work, 1425 msecs_to_jiffies(xfs_syncd_centisecs * 10)); 1426 } 1427 1428 /* 1429 * Clear the log incompat flags if we have the opportunity. 1430 * 1431 * This only happens if we're about to log the second dummy transaction as part 1432 * of covering the log and we can get the log incompat feature usage lock. 1433 */ 1434 static inline void 1435 xlog_clear_incompat( 1436 struct xlog *log) 1437 { 1438 struct xfs_mount *mp = log->l_mp; 1439 1440 if (!xfs_sb_has_incompat_log_feature(&mp->m_sb, 1441 XFS_SB_FEAT_INCOMPAT_LOG_ALL)) 1442 return; 1443 1444 if (log->l_covered_state != XLOG_STATE_COVER_DONE2) 1445 return; 1446 1447 if (!down_write_trylock(&log->l_incompat_users)) 1448 return; 1449 1450 xfs_clear_incompat_log_features(mp); 1451 up_write(&log->l_incompat_users); 1452 } 1453 1454 /* 1455 * Every sync period we need to unpin all items in the AIL and push them to 1456 * disk. If there is nothing dirty, then we might need to cover the log to 1457 * indicate that the filesystem is idle. 1458 */ 1459 static void 1460 xfs_log_worker( 1461 struct work_struct *work) 1462 { 1463 struct xlog *log = container_of(to_delayed_work(work), 1464 struct xlog, l_work); 1465 struct xfs_mount *mp = log->l_mp; 1466 1467 /* dgc: errors ignored - not fatal and nowhere to report them */ 1468 if (xfs_fs_writable(mp, SB_FREEZE_WRITE) && xfs_log_need_covered(mp)) { 1469 /* 1470 * Dump a transaction into the log that contains no real change. 1471 * This is needed to stamp the current tail LSN into the log 1472 * during the covering operation. 1473 * 1474 * We cannot use an inode here for this - that will push dirty 1475 * state back up into the VFS and then periodic inode flushing 1476 * will prevent log covering from making progress. Hence we 1477 * synchronously log the superblock instead to ensure the 1478 * superblock is immediately unpinned and can be written back. 1479 */ 1480 xlog_clear_incompat(log); 1481 xfs_sync_sb(mp, true); 1482 } else 1483 xfs_log_force(mp, 0); 1484 1485 /* start pushing all the metadata that is currently dirty */ 1486 xfs_ail_push_all(mp->m_ail); 1487 1488 /* queue us up again */ 1489 xfs_log_work_queue(mp); 1490 } 1491 1492 /* 1493 * This routine initializes some of the log structure for a given mount point. 1494 * Its primary purpose is to fill in enough, so recovery can occur. However, 1495 * some other stuff may be filled in too. 1496 */ 1497 STATIC struct xlog * 1498 xlog_alloc_log( 1499 struct xfs_mount *mp, 1500 struct xfs_buftarg *log_target, 1501 xfs_daddr_t blk_offset, 1502 int num_bblks) 1503 { 1504 struct xlog *log; 1505 xlog_rec_header_t *head; 1506 xlog_in_core_t **iclogp; 1507 xlog_in_core_t *iclog, *prev_iclog=NULL; 1508 int i; 1509 int error = -ENOMEM; 1510 uint log2_size = 0; 1511 1512 log = kmem_zalloc(sizeof(struct xlog), KM_MAYFAIL); 1513 if (!log) { 1514 xfs_warn(mp, "Log allocation failed: No memory!"); 1515 goto out; 1516 } 1517 1518 log->l_mp = mp; 1519 log->l_targ = log_target; 1520 log->l_logsize = BBTOB(num_bblks); 1521 log->l_logBBstart = blk_offset; 1522 log->l_logBBsize = num_bblks; 1523 log->l_covered_state = XLOG_STATE_COVER_IDLE; 1524 set_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate); 1525 INIT_DELAYED_WORK(&log->l_work, xfs_log_worker); 1526 1527 log->l_prev_block = -1; 1528 /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */ 1529 xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0); 1530 xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0); 1531 log->l_curr_cycle = 1; /* 0 is bad since this is initial value */ 1532 1533 if (xfs_has_logv2(mp) && mp->m_sb.sb_logsunit > 1) 1534 log->l_iclog_roundoff = mp->m_sb.sb_logsunit; 1535 else 1536 log->l_iclog_roundoff = BBSIZE; 1537 1538 xlog_grant_head_init(&log->l_reserve_head); 1539 xlog_grant_head_init(&log->l_write_head); 1540 1541 error = -EFSCORRUPTED; 1542 if (xfs_has_sector(mp)) { 1543 log2_size = mp->m_sb.sb_logsectlog; 1544 if (log2_size < BBSHIFT) { 1545 xfs_warn(mp, "Log sector size too small (0x%x < 0x%x)", 1546 log2_size, BBSHIFT); 1547 goto out_free_log; 1548 } 1549 1550 log2_size -= BBSHIFT; 1551 if (log2_size > mp->m_sectbb_log) { 1552 xfs_warn(mp, "Log sector size too large (0x%x > 0x%x)", 1553 log2_size, mp->m_sectbb_log); 1554 goto out_free_log; 1555 } 1556 1557 /* for larger sector sizes, must have v2 or external log */ 1558 if (log2_size && log->l_logBBstart > 0 && 1559 !xfs_has_logv2(mp)) { 1560 xfs_warn(mp, 1561 "log sector size (0x%x) invalid for configuration.", 1562 log2_size); 1563 goto out_free_log; 1564 } 1565 } 1566 log->l_sectBBsize = 1 << log2_size; 1567 1568 init_rwsem(&log->l_incompat_users); 1569 1570 xlog_get_iclog_buffer_size(mp, log); 1571 1572 spin_lock_init(&log->l_icloglock); 1573 init_waitqueue_head(&log->l_flush_wait); 1574 1575 iclogp = &log->l_iclog; 1576 /* 1577 * The amount of memory to allocate for the iclog structure is 1578 * rather funky due to the way the structure is defined. It is 1579 * done this way so that we can use different sizes for machines 1580 * with different amounts of memory. See the definition of 1581 * xlog_in_core_t in xfs_log_priv.h for details. 1582 */ 1583 ASSERT(log->l_iclog_size >= 4096); 1584 for (i = 0; i < log->l_iclog_bufs; i++) { 1585 size_t bvec_size = howmany(log->l_iclog_size, PAGE_SIZE) * 1586 sizeof(struct bio_vec); 1587 1588 iclog = kmem_zalloc(sizeof(*iclog) + bvec_size, KM_MAYFAIL); 1589 if (!iclog) 1590 goto out_free_iclog; 1591 1592 *iclogp = iclog; 1593 iclog->ic_prev = prev_iclog; 1594 prev_iclog = iclog; 1595 1596 iclog->ic_data = kvzalloc(log->l_iclog_size, 1597 GFP_KERNEL | __GFP_RETRY_MAYFAIL); 1598 if (!iclog->ic_data) 1599 goto out_free_iclog; 1600 #ifdef DEBUG 1601 log->l_iclog_bak[i] = &iclog->ic_header; 1602 #endif 1603 head = &iclog->ic_header; 1604 memset(head, 0, sizeof(xlog_rec_header_t)); 1605 head->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM); 1606 head->h_version = cpu_to_be32( 1607 xfs_has_logv2(log->l_mp) ? 2 : 1); 1608 head->h_size = cpu_to_be32(log->l_iclog_size); 1609 /* new fields */ 1610 head->h_fmt = cpu_to_be32(XLOG_FMT); 1611 memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t)); 1612 1613 iclog->ic_size = log->l_iclog_size - log->l_iclog_hsize; 1614 iclog->ic_state = XLOG_STATE_ACTIVE; 1615 iclog->ic_log = log; 1616 atomic_set(&iclog->ic_refcnt, 0); 1617 INIT_LIST_HEAD(&iclog->ic_callbacks); 1618 iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize; 1619 1620 init_waitqueue_head(&iclog->ic_force_wait); 1621 init_waitqueue_head(&iclog->ic_write_wait); 1622 INIT_WORK(&iclog->ic_end_io_work, xlog_ioend_work); 1623 sema_init(&iclog->ic_sema, 1); 1624 1625 iclogp = &iclog->ic_next; 1626 } 1627 *iclogp = log->l_iclog; /* complete ring */ 1628 log->l_iclog->ic_prev = prev_iclog; /* re-write 1st prev ptr */ 1629 1630 log->l_ioend_workqueue = alloc_workqueue("xfs-log/%s", 1631 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | 1632 WQ_HIGHPRI), 1633 0, mp->m_super->s_id); 1634 if (!log->l_ioend_workqueue) 1635 goto out_free_iclog; 1636 1637 error = xlog_cil_init(log); 1638 if (error) 1639 goto out_destroy_workqueue; 1640 return log; 1641 1642 out_destroy_workqueue: 1643 destroy_workqueue(log->l_ioend_workqueue); 1644 out_free_iclog: 1645 for (iclog = log->l_iclog; iclog; iclog = prev_iclog) { 1646 prev_iclog = iclog->ic_next; 1647 kmem_free(iclog->ic_data); 1648 kmem_free(iclog); 1649 if (prev_iclog == log->l_iclog) 1650 break; 1651 } 1652 out_free_log: 1653 kmem_free(log); 1654 out: 1655 return ERR_PTR(error); 1656 } /* xlog_alloc_log */ 1657 1658 /* 1659 * Compute the LSN that we'd need to push the log tail towards in order to have 1660 * (a) enough on-disk log space to log the number of bytes specified, (b) at 1661 * least 25% of the log space free, and (c) at least 256 blocks free. If the 1662 * log free space already meets all three thresholds, this function returns 1663 * NULLCOMMITLSN. 1664 */ 1665 xfs_lsn_t 1666 xlog_grant_push_threshold( 1667 struct xlog *log, 1668 int need_bytes) 1669 { 1670 xfs_lsn_t threshold_lsn = 0; 1671 xfs_lsn_t last_sync_lsn; 1672 int free_blocks; 1673 int free_bytes; 1674 int threshold_block; 1675 int threshold_cycle; 1676 int free_threshold; 1677 1678 ASSERT(BTOBB(need_bytes) < log->l_logBBsize); 1679 1680 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); 1681 free_blocks = BTOBBT(free_bytes); 1682 1683 /* 1684 * Set the threshold for the minimum number of free blocks in the 1685 * log to the maximum of what the caller needs, one quarter of the 1686 * log, and 256 blocks. 1687 */ 1688 free_threshold = BTOBB(need_bytes); 1689 free_threshold = max(free_threshold, (log->l_logBBsize >> 2)); 1690 free_threshold = max(free_threshold, 256); 1691 if (free_blocks >= free_threshold) 1692 return NULLCOMMITLSN; 1693 1694 xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle, 1695 &threshold_block); 1696 threshold_block += free_threshold; 1697 if (threshold_block >= log->l_logBBsize) { 1698 threshold_block -= log->l_logBBsize; 1699 threshold_cycle += 1; 1700 } 1701 threshold_lsn = xlog_assign_lsn(threshold_cycle, 1702 threshold_block); 1703 /* 1704 * Don't pass in an lsn greater than the lsn of the last 1705 * log record known to be on disk. Use a snapshot of the last sync lsn 1706 * so that it doesn't change between the compare and the set. 1707 */ 1708 last_sync_lsn = atomic64_read(&log->l_last_sync_lsn); 1709 if (XFS_LSN_CMP(threshold_lsn, last_sync_lsn) > 0) 1710 threshold_lsn = last_sync_lsn; 1711 1712 return threshold_lsn; 1713 } 1714 1715 /* 1716 * Push the tail of the log if we need to do so to maintain the free log space 1717 * thresholds set out by xlog_grant_push_threshold. We may need to adopt a 1718 * policy which pushes on an lsn which is further along in the log once we 1719 * reach the high water mark. In this manner, we would be creating a low water 1720 * mark. 1721 */ 1722 STATIC void 1723 xlog_grant_push_ail( 1724 struct xlog *log, 1725 int need_bytes) 1726 { 1727 xfs_lsn_t threshold_lsn; 1728 1729 threshold_lsn = xlog_grant_push_threshold(log, need_bytes); 1730 if (threshold_lsn == NULLCOMMITLSN || xlog_is_shutdown(log)) 1731 return; 1732 1733 /* 1734 * Get the transaction layer to kick the dirty buffers out to 1735 * disk asynchronously. No point in trying to do this if 1736 * the filesystem is shutting down. 1737 */ 1738 xfs_ail_push(log->l_ailp, threshold_lsn); 1739 } 1740 1741 /* 1742 * Stamp cycle number in every block 1743 */ 1744 STATIC void 1745 xlog_pack_data( 1746 struct xlog *log, 1747 struct xlog_in_core *iclog, 1748 int roundoff) 1749 { 1750 int i, j, k; 1751 int size = iclog->ic_offset + roundoff; 1752 __be32 cycle_lsn; 1753 char *dp; 1754 1755 cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn); 1756 1757 dp = iclog->ic_datap; 1758 for (i = 0; i < BTOBB(size); i++) { 1759 if (i >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) 1760 break; 1761 iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp; 1762 *(__be32 *)dp = cycle_lsn; 1763 dp += BBSIZE; 1764 } 1765 1766 if (xfs_has_logv2(log->l_mp)) { 1767 xlog_in_core_2_t *xhdr = iclog->ic_data; 1768 1769 for ( ; i < BTOBB(size); i++) { 1770 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 1771 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 1772 xhdr[j].hic_xheader.xh_cycle_data[k] = *(__be32 *)dp; 1773 *(__be32 *)dp = cycle_lsn; 1774 dp += BBSIZE; 1775 } 1776 1777 for (i = 1; i < log->l_iclog_heads; i++) 1778 xhdr[i].hic_xheader.xh_cycle = cycle_lsn; 1779 } 1780 } 1781 1782 /* 1783 * Calculate the checksum for a log buffer. 1784 * 1785 * This is a little more complicated than it should be because the various 1786 * headers and the actual data are non-contiguous. 1787 */ 1788 __le32 1789 xlog_cksum( 1790 struct xlog *log, 1791 struct xlog_rec_header *rhead, 1792 char *dp, 1793 int size) 1794 { 1795 uint32_t crc; 1796 1797 /* first generate the crc for the record header ... */ 1798 crc = xfs_start_cksum_update((char *)rhead, 1799 sizeof(struct xlog_rec_header), 1800 offsetof(struct xlog_rec_header, h_crc)); 1801 1802 /* ... then for additional cycle data for v2 logs ... */ 1803 if (xfs_has_logv2(log->l_mp)) { 1804 union xlog_in_core2 *xhdr = (union xlog_in_core2 *)rhead; 1805 int i; 1806 int xheads; 1807 1808 xheads = DIV_ROUND_UP(size, XLOG_HEADER_CYCLE_SIZE); 1809 1810 for (i = 1; i < xheads; i++) { 1811 crc = crc32c(crc, &xhdr[i].hic_xheader, 1812 sizeof(struct xlog_rec_ext_header)); 1813 } 1814 } 1815 1816 /* ... and finally for the payload */ 1817 crc = crc32c(crc, dp, size); 1818 1819 return xfs_end_cksum(crc); 1820 } 1821 1822 static void 1823 xlog_bio_end_io( 1824 struct bio *bio) 1825 { 1826 struct xlog_in_core *iclog = bio->bi_private; 1827 1828 queue_work(iclog->ic_log->l_ioend_workqueue, 1829 &iclog->ic_end_io_work); 1830 } 1831 1832 static int 1833 xlog_map_iclog_data( 1834 struct bio *bio, 1835 void *data, 1836 size_t count) 1837 { 1838 do { 1839 struct page *page = kmem_to_page(data); 1840 unsigned int off = offset_in_page(data); 1841 size_t len = min_t(size_t, count, PAGE_SIZE - off); 1842 1843 if (bio_add_page(bio, page, len, off) != len) 1844 return -EIO; 1845 1846 data += len; 1847 count -= len; 1848 } while (count); 1849 1850 return 0; 1851 } 1852 1853 STATIC void 1854 xlog_write_iclog( 1855 struct xlog *log, 1856 struct xlog_in_core *iclog, 1857 uint64_t bno, 1858 unsigned int count) 1859 { 1860 ASSERT(bno < log->l_logBBsize); 1861 trace_xlog_iclog_write(iclog, _RET_IP_); 1862 1863 /* 1864 * We lock the iclogbufs here so that we can serialise against I/O 1865 * completion during unmount. We might be processing a shutdown 1866 * triggered during unmount, and that can occur asynchronously to the 1867 * unmount thread, and hence we need to ensure that completes before 1868 * tearing down the iclogbufs. Hence we need to hold the buffer lock 1869 * across the log IO to archieve that. 1870 */ 1871 down(&iclog->ic_sema); 1872 if (xlog_is_shutdown(log)) { 1873 /* 1874 * It would seem logical to return EIO here, but we rely on 1875 * the log state machine to propagate I/O errors instead of 1876 * doing it here. We kick of the state machine and unlock 1877 * the buffer manually, the code needs to be kept in sync 1878 * with the I/O completion path. 1879 */ 1880 xlog_state_done_syncing(iclog); 1881 up(&iclog->ic_sema); 1882 return; 1883 } 1884 1885 /* 1886 * We use REQ_SYNC | REQ_IDLE here to tell the block layer the are more 1887 * IOs coming immediately after this one. This prevents the block layer 1888 * writeback throttle from throttling log writes behind background 1889 * metadata writeback and causing priority inversions. 1890 */ 1891 bio_init(&iclog->ic_bio, log->l_targ->bt_bdev, iclog->ic_bvec, 1892 howmany(count, PAGE_SIZE), 1893 REQ_OP_WRITE | REQ_META | REQ_SYNC | REQ_IDLE); 1894 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno; 1895 iclog->ic_bio.bi_end_io = xlog_bio_end_io; 1896 iclog->ic_bio.bi_private = iclog; 1897 1898 if (iclog->ic_flags & XLOG_ICL_NEED_FLUSH) { 1899 iclog->ic_bio.bi_opf |= REQ_PREFLUSH; 1900 /* 1901 * For external log devices, we also need to flush the data 1902 * device cache first to ensure all metadata writeback covered 1903 * by the LSN in this iclog is on stable storage. This is slow, 1904 * but it *must* complete before we issue the external log IO. 1905 */ 1906 if (log->l_targ != log->l_mp->m_ddev_targp) 1907 blkdev_issue_flush(log->l_mp->m_ddev_targp->bt_bdev); 1908 } 1909 if (iclog->ic_flags & XLOG_ICL_NEED_FUA) 1910 iclog->ic_bio.bi_opf |= REQ_FUA; 1911 1912 iclog->ic_flags &= ~(XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA); 1913 1914 if (xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count)) { 1915 xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); 1916 return; 1917 } 1918 if (is_vmalloc_addr(iclog->ic_data)) 1919 flush_kernel_vmap_range(iclog->ic_data, count); 1920 1921 /* 1922 * If this log buffer would straddle the end of the log we will have 1923 * to split it up into two bios, so that we can continue at the start. 1924 */ 1925 if (bno + BTOBB(count) > log->l_logBBsize) { 1926 struct bio *split; 1927 1928 split = bio_split(&iclog->ic_bio, log->l_logBBsize - bno, 1929 GFP_NOIO, &fs_bio_set); 1930 bio_chain(split, &iclog->ic_bio); 1931 submit_bio(split); 1932 1933 /* restart at logical offset zero for the remainder */ 1934 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart; 1935 } 1936 1937 submit_bio(&iclog->ic_bio); 1938 } 1939 1940 /* 1941 * We need to bump cycle number for the part of the iclog that is 1942 * written to the start of the log. Watch out for the header magic 1943 * number case, though. 1944 */ 1945 static void 1946 xlog_split_iclog( 1947 struct xlog *log, 1948 void *data, 1949 uint64_t bno, 1950 unsigned int count) 1951 { 1952 unsigned int split_offset = BBTOB(log->l_logBBsize - bno); 1953 unsigned int i; 1954 1955 for (i = split_offset; i < count; i += BBSIZE) { 1956 uint32_t cycle = get_unaligned_be32(data + i); 1957 1958 if (++cycle == XLOG_HEADER_MAGIC_NUM) 1959 cycle++; 1960 put_unaligned_be32(cycle, data + i); 1961 } 1962 } 1963 1964 static int 1965 xlog_calc_iclog_size( 1966 struct xlog *log, 1967 struct xlog_in_core *iclog, 1968 uint32_t *roundoff) 1969 { 1970 uint32_t count_init, count; 1971 1972 /* Add for LR header */ 1973 count_init = log->l_iclog_hsize + iclog->ic_offset; 1974 count = roundup(count_init, log->l_iclog_roundoff); 1975 1976 *roundoff = count - count_init; 1977 1978 ASSERT(count >= count_init); 1979 ASSERT(*roundoff < log->l_iclog_roundoff); 1980 return count; 1981 } 1982 1983 /* 1984 * Flush out the in-core log (iclog) to the on-disk log in an asynchronous 1985 * fashion. Previously, we should have moved the current iclog 1986 * ptr in the log to point to the next available iclog. This allows further 1987 * write to continue while this code syncs out an iclog ready to go. 1988 * Before an in-core log can be written out, the data section must be scanned 1989 * to save away the 1st word of each BBSIZE block into the header. We replace 1990 * it with the current cycle count. Each BBSIZE block is tagged with the 1991 * cycle count because there in an implicit assumption that drives will 1992 * guarantee that entire 512 byte blocks get written at once. In other words, 1993 * we can't have part of a 512 byte block written and part not written. By 1994 * tagging each block, we will know which blocks are valid when recovering 1995 * after an unclean shutdown. 1996 * 1997 * This routine is single threaded on the iclog. No other thread can be in 1998 * this routine with the same iclog. Changing contents of iclog can there- 1999 * fore be done without grabbing the state machine lock. Updating the global 2000 * log will require grabbing the lock though. 2001 * 2002 * The entire log manager uses a logical block numbering scheme. Only 2003 * xlog_write_iclog knows about the fact that the log may not start with 2004 * block zero on a given device. 2005 */ 2006 STATIC void 2007 xlog_sync( 2008 struct xlog *log, 2009 struct xlog_in_core *iclog) 2010 { 2011 unsigned int count; /* byte count of bwrite */ 2012 unsigned int roundoff; /* roundoff to BB or stripe */ 2013 uint64_t bno; 2014 unsigned int size; 2015 2016 ASSERT(atomic_read(&iclog->ic_refcnt) == 0); 2017 trace_xlog_iclog_sync(iclog, _RET_IP_); 2018 2019 count = xlog_calc_iclog_size(log, iclog, &roundoff); 2020 2021 /* move grant heads by roundoff in sync */ 2022 xlog_grant_add_space(log, &log->l_reserve_head.grant, roundoff); 2023 xlog_grant_add_space(log, &log->l_write_head.grant, roundoff); 2024 2025 /* put cycle number in every block */ 2026 xlog_pack_data(log, iclog, roundoff); 2027 2028 /* real byte length */ 2029 size = iclog->ic_offset; 2030 if (xfs_has_logv2(log->l_mp)) 2031 size += roundoff; 2032 iclog->ic_header.h_len = cpu_to_be32(size); 2033 2034 XFS_STATS_INC(log->l_mp, xs_log_writes); 2035 XFS_STATS_ADD(log->l_mp, xs_log_blocks, BTOBB(count)); 2036 2037 bno = BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn)); 2038 2039 /* Do we need to split this write into 2 parts? */ 2040 if (bno + BTOBB(count) > log->l_logBBsize) 2041 xlog_split_iclog(log, &iclog->ic_header, bno, count); 2042 2043 /* calculcate the checksum */ 2044 iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header, 2045 iclog->ic_datap, size); 2046 /* 2047 * Intentionally corrupt the log record CRC based on the error injection 2048 * frequency, if defined. This facilitates testing log recovery in the 2049 * event of torn writes. Hence, set the IOABORT state to abort the log 2050 * write on I/O completion and shutdown the fs. The subsequent mount 2051 * detects the bad CRC and attempts to recover. 2052 */ 2053 #ifdef DEBUG 2054 if (XFS_TEST_ERROR(false, log->l_mp, XFS_ERRTAG_LOG_BAD_CRC)) { 2055 iclog->ic_header.h_crc &= cpu_to_le32(0xAAAAAAAA); 2056 iclog->ic_fail_crc = true; 2057 xfs_warn(log->l_mp, 2058 "Intentionally corrupted log record at LSN 0x%llx. Shutdown imminent.", 2059 be64_to_cpu(iclog->ic_header.h_lsn)); 2060 } 2061 #endif 2062 xlog_verify_iclog(log, iclog, count); 2063 xlog_write_iclog(log, iclog, bno, count); 2064 } 2065 2066 /* 2067 * Deallocate a log structure 2068 */ 2069 STATIC void 2070 xlog_dealloc_log( 2071 struct xlog *log) 2072 { 2073 xlog_in_core_t *iclog, *next_iclog; 2074 int i; 2075 2076 xlog_cil_destroy(log); 2077 2078 /* 2079 * Cycle all the iclogbuf locks to make sure all log IO completion 2080 * is done before we tear down these buffers. 2081 */ 2082 iclog = log->l_iclog; 2083 for (i = 0; i < log->l_iclog_bufs; i++) { 2084 down(&iclog->ic_sema); 2085 up(&iclog->ic_sema); 2086 iclog = iclog->ic_next; 2087 } 2088 2089 iclog = log->l_iclog; 2090 for (i = 0; i < log->l_iclog_bufs; i++) { 2091 next_iclog = iclog->ic_next; 2092 kmem_free(iclog->ic_data); 2093 kmem_free(iclog); 2094 iclog = next_iclog; 2095 } 2096 2097 log->l_mp->m_log = NULL; 2098 destroy_workqueue(log->l_ioend_workqueue); 2099 kmem_free(log); 2100 } 2101 2102 /* 2103 * Update counters atomically now that memcpy is done. 2104 */ 2105 static inline void 2106 xlog_state_finish_copy( 2107 struct xlog *log, 2108 struct xlog_in_core *iclog, 2109 int record_cnt, 2110 int copy_bytes) 2111 { 2112 lockdep_assert_held(&log->l_icloglock); 2113 2114 be32_add_cpu(&iclog->ic_header.h_num_logops, record_cnt); 2115 iclog->ic_offset += copy_bytes; 2116 } 2117 2118 /* 2119 * print out info relating to regions written which consume 2120 * the reservation 2121 */ 2122 void 2123 xlog_print_tic_res( 2124 struct xfs_mount *mp, 2125 struct xlog_ticket *ticket) 2126 { 2127 uint i; 2128 uint ophdr_spc = ticket->t_res_num_ophdrs * (uint)sizeof(xlog_op_header_t); 2129 2130 /* match with XLOG_REG_TYPE_* in xfs_log.h */ 2131 #define REG_TYPE_STR(type, str) [XLOG_REG_TYPE_##type] = str 2132 static char *res_type_str[] = { 2133 REG_TYPE_STR(BFORMAT, "bformat"), 2134 REG_TYPE_STR(BCHUNK, "bchunk"), 2135 REG_TYPE_STR(EFI_FORMAT, "efi_format"), 2136 REG_TYPE_STR(EFD_FORMAT, "efd_format"), 2137 REG_TYPE_STR(IFORMAT, "iformat"), 2138 REG_TYPE_STR(ICORE, "icore"), 2139 REG_TYPE_STR(IEXT, "iext"), 2140 REG_TYPE_STR(IBROOT, "ibroot"), 2141 REG_TYPE_STR(ILOCAL, "ilocal"), 2142 REG_TYPE_STR(IATTR_EXT, "iattr_ext"), 2143 REG_TYPE_STR(IATTR_BROOT, "iattr_broot"), 2144 REG_TYPE_STR(IATTR_LOCAL, "iattr_local"), 2145 REG_TYPE_STR(QFORMAT, "qformat"), 2146 REG_TYPE_STR(DQUOT, "dquot"), 2147 REG_TYPE_STR(QUOTAOFF, "quotaoff"), 2148 REG_TYPE_STR(LRHEADER, "LR header"), 2149 REG_TYPE_STR(UNMOUNT, "unmount"), 2150 REG_TYPE_STR(COMMIT, "commit"), 2151 REG_TYPE_STR(TRANSHDR, "trans header"), 2152 REG_TYPE_STR(ICREATE, "inode create"), 2153 REG_TYPE_STR(RUI_FORMAT, "rui_format"), 2154 REG_TYPE_STR(RUD_FORMAT, "rud_format"), 2155 REG_TYPE_STR(CUI_FORMAT, "cui_format"), 2156 REG_TYPE_STR(CUD_FORMAT, "cud_format"), 2157 REG_TYPE_STR(BUI_FORMAT, "bui_format"), 2158 REG_TYPE_STR(BUD_FORMAT, "bud_format"), 2159 }; 2160 BUILD_BUG_ON(ARRAY_SIZE(res_type_str) != XLOG_REG_TYPE_MAX + 1); 2161 #undef REG_TYPE_STR 2162 2163 xfs_warn(mp, "ticket reservation summary:"); 2164 xfs_warn(mp, " unit res = %d bytes", 2165 ticket->t_unit_res); 2166 xfs_warn(mp, " current res = %d bytes", 2167 ticket->t_curr_res); 2168 xfs_warn(mp, " total reg = %u bytes (o/flow = %u bytes)", 2169 ticket->t_res_arr_sum, ticket->t_res_o_flow); 2170 xfs_warn(mp, " ophdrs = %u (ophdr space = %u bytes)", 2171 ticket->t_res_num_ophdrs, ophdr_spc); 2172 xfs_warn(mp, " ophdr + reg = %u bytes", 2173 ticket->t_res_arr_sum + ticket->t_res_o_flow + ophdr_spc); 2174 xfs_warn(mp, " num regions = %u", 2175 ticket->t_res_num); 2176 2177 for (i = 0; i < ticket->t_res_num; i++) { 2178 uint r_type = ticket->t_res_arr[i].r_type; 2179 xfs_warn(mp, "region[%u]: %s - %u bytes", i, 2180 ((r_type <= 0 || r_type > XLOG_REG_TYPE_MAX) ? 2181 "bad-rtype" : res_type_str[r_type]), 2182 ticket->t_res_arr[i].r_len); 2183 } 2184 } 2185 2186 /* 2187 * Print a summary of the transaction. 2188 */ 2189 void 2190 xlog_print_trans( 2191 struct xfs_trans *tp) 2192 { 2193 struct xfs_mount *mp = tp->t_mountp; 2194 struct xfs_log_item *lip; 2195 2196 /* dump core transaction and ticket info */ 2197 xfs_warn(mp, "transaction summary:"); 2198 xfs_warn(mp, " log res = %d", tp->t_log_res); 2199 xfs_warn(mp, " log count = %d", tp->t_log_count); 2200 xfs_warn(mp, " flags = 0x%x", tp->t_flags); 2201 2202 xlog_print_tic_res(mp, tp->t_ticket); 2203 2204 /* dump each log item */ 2205 list_for_each_entry(lip, &tp->t_items, li_trans) { 2206 struct xfs_log_vec *lv = lip->li_lv; 2207 struct xfs_log_iovec *vec; 2208 int i; 2209 2210 xfs_warn(mp, "log item: "); 2211 xfs_warn(mp, " type = 0x%x", lip->li_type); 2212 xfs_warn(mp, " flags = 0x%lx", lip->li_flags); 2213 if (!lv) 2214 continue; 2215 xfs_warn(mp, " niovecs = %d", lv->lv_niovecs); 2216 xfs_warn(mp, " size = %d", lv->lv_size); 2217 xfs_warn(mp, " bytes = %d", lv->lv_bytes); 2218 xfs_warn(mp, " buf len = %d", lv->lv_buf_len); 2219 2220 /* dump each iovec for the log item */ 2221 vec = lv->lv_iovecp; 2222 for (i = 0; i < lv->lv_niovecs; i++) { 2223 int dumplen = min(vec->i_len, 32); 2224 2225 xfs_warn(mp, " iovec[%d]", i); 2226 xfs_warn(mp, " type = 0x%x", vec->i_type); 2227 xfs_warn(mp, " len = %d", vec->i_len); 2228 xfs_warn(mp, " first %d bytes of iovec[%d]:", dumplen, i); 2229 xfs_hex_dump(vec->i_addr, dumplen); 2230 2231 vec++; 2232 } 2233 } 2234 } 2235 2236 /* 2237 * Calculate the potential space needed by the log vector. We may need a start 2238 * record, and each region gets its own struct xlog_op_header and may need to be 2239 * double word aligned. 2240 */ 2241 static int 2242 xlog_write_calc_vec_length( 2243 struct xlog_ticket *ticket, 2244 struct xfs_log_vec *log_vector, 2245 uint optype) 2246 { 2247 struct xfs_log_vec *lv; 2248 int headers = 0; 2249 int len = 0; 2250 int i; 2251 2252 if (optype & XLOG_START_TRANS) 2253 headers++; 2254 2255 for (lv = log_vector; lv; lv = lv->lv_next) { 2256 /* we don't write ordered log vectors */ 2257 if (lv->lv_buf_len == XFS_LOG_VEC_ORDERED) 2258 continue; 2259 2260 headers += lv->lv_niovecs; 2261 2262 for (i = 0; i < lv->lv_niovecs; i++) { 2263 struct xfs_log_iovec *vecp = &lv->lv_iovecp[i]; 2264 2265 len += vecp->i_len; 2266 xlog_tic_add_region(ticket, vecp->i_len, vecp->i_type); 2267 } 2268 } 2269 2270 ticket->t_res_num_ophdrs += headers; 2271 len += headers * sizeof(struct xlog_op_header); 2272 2273 return len; 2274 } 2275 2276 static void 2277 xlog_write_start_rec( 2278 struct xlog_op_header *ophdr, 2279 struct xlog_ticket *ticket) 2280 { 2281 ophdr->oh_tid = cpu_to_be32(ticket->t_tid); 2282 ophdr->oh_clientid = ticket->t_clientid; 2283 ophdr->oh_len = 0; 2284 ophdr->oh_flags = XLOG_START_TRANS; 2285 ophdr->oh_res2 = 0; 2286 } 2287 2288 static xlog_op_header_t * 2289 xlog_write_setup_ophdr( 2290 struct xlog *log, 2291 struct xlog_op_header *ophdr, 2292 struct xlog_ticket *ticket, 2293 uint flags) 2294 { 2295 ophdr->oh_tid = cpu_to_be32(ticket->t_tid); 2296 ophdr->oh_clientid = ticket->t_clientid; 2297 ophdr->oh_res2 = 0; 2298 2299 /* are we copying a commit or unmount record? */ 2300 ophdr->oh_flags = flags; 2301 2302 /* 2303 * We've seen logs corrupted with bad transaction client ids. This 2304 * makes sure that XFS doesn't generate them on. Turn this into an EIO 2305 * and shut down the filesystem. 2306 */ 2307 switch (ophdr->oh_clientid) { 2308 case XFS_TRANSACTION: 2309 case XFS_VOLUME: 2310 case XFS_LOG: 2311 break; 2312 default: 2313 xfs_warn(log->l_mp, 2314 "Bad XFS transaction clientid 0x%x in ticket "PTR_FMT, 2315 ophdr->oh_clientid, ticket); 2316 return NULL; 2317 } 2318 2319 return ophdr; 2320 } 2321 2322 /* 2323 * Set up the parameters of the region copy into the log. This has 2324 * to handle region write split across multiple log buffers - this 2325 * state is kept external to this function so that this code can 2326 * be written in an obvious, self documenting manner. 2327 */ 2328 static int 2329 xlog_write_setup_copy( 2330 struct xlog_ticket *ticket, 2331 struct xlog_op_header *ophdr, 2332 int space_available, 2333 int space_required, 2334 int *copy_off, 2335 int *copy_len, 2336 int *last_was_partial_copy, 2337 int *bytes_consumed) 2338 { 2339 int still_to_copy; 2340 2341 still_to_copy = space_required - *bytes_consumed; 2342 *copy_off = *bytes_consumed; 2343 2344 if (still_to_copy <= space_available) { 2345 /* write of region completes here */ 2346 *copy_len = still_to_copy; 2347 ophdr->oh_len = cpu_to_be32(*copy_len); 2348 if (*last_was_partial_copy) 2349 ophdr->oh_flags |= (XLOG_END_TRANS|XLOG_WAS_CONT_TRANS); 2350 *last_was_partial_copy = 0; 2351 *bytes_consumed = 0; 2352 return 0; 2353 } 2354 2355 /* partial write of region, needs extra log op header reservation */ 2356 *copy_len = space_available; 2357 ophdr->oh_len = cpu_to_be32(*copy_len); 2358 ophdr->oh_flags |= XLOG_CONTINUE_TRANS; 2359 if (*last_was_partial_copy) 2360 ophdr->oh_flags |= XLOG_WAS_CONT_TRANS; 2361 *bytes_consumed += *copy_len; 2362 (*last_was_partial_copy)++; 2363 2364 /* account for new log op header */ 2365 ticket->t_curr_res -= sizeof(struct xlog_op_header); 2366 ticket->t_res_num_ophdrs++; 2367 2368 return sizeof(struct xlog_op_header); 2369 } 2370 2371 static int 2372 xlog_write_copy_finish( 2373 struct xlog *log, 2374 struct xlog_in_core *iclog, 2375 uint flags, 2376 int *record_cnt, 2377 int *data_cnt, 2378 int *partial_copy, 2379 int *partial_copy_len, 2380 int log_offset) 2381 { 2382 int error; 2383 2384 if (*partial_copy) { 2385 /* 2386 * This iclog has already been marked WANT_SYNC by 2387 * xlog_state_get_iclog_space. 2388 */ 2389 spin_lock(&log->l_icloglock); 2390 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); 2391 *record_cnt = 0; 2392 *data_cnt = 0; 2393 goto release_iclog; 2394 } 2395 2396 *partial_copy = 0; 2397 *partial_copy_len = 0; 2398 2399 if (iclog->ic_size - log_offset > sizeof(xlog_op_header_t)) 2400 return 0; 2401 2402 /* no more space in this iclog - push it. */ 2403 spin_lock(&log->l_icloglock); 2404 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); 2405 *record_cnt = 0; 2406 *data_cnt = 0; 2407 2408 if (iclog->ic_state == XLOG_STATE_ACTIVE) 2409 xlog_state_switch_iclogs(log, iclog, 0); 2410 else 2411 ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC || 2412 xlog_is_shutdown(log)); 2413 release_iclog: 2414 error = xlog_state_release_iclog(log, iclog, 0); 2415 spin_unlock(&log->l_icloglock); 2416 return error; 2417 } 2418 2419 /* 2420 * Write some region out to in-core log 2421 * 2422 * This will be called when writing externally provided regions or when 2423 * writing out a commit record for a given transaction. 2424 * 2425 * General algorithm: 2426 * 1. Find total length of this write. This may include adding to the 2427 * lengths passed in. 2428 * 2. Check whether we violate the tickets reservation. 2429 * 3. While writing to this iclog 2430 * A. Reserve as much space in this iclog as can get 2431 * B. If this is first write, save away start lsn 2432 * C. While writing this region: 2433 * 1. If first write of transaction, write start record 2434 * 2. Write log operation header (header per region) 2435 * 3. Find out if we can fit entire region into this iclog 2436 * 4. Potentially, verify destination memcpy ptr 2437 * 5. Memcpy (partial) region 2438 * 6. If partial copy, release iclog; otherwise, continue 2439 * copying more regions into current iclog 2440 * 4. Mark want sync bit (in simulation mode) 2441 * 5. Release iclog for potential flush to on-disk log. 2442 * 2443 * ERRORS: 2444 * 1. Panic if reservation is overrun. This should never happen since 2445 * reservation amounts are generated internal to the filesystem. 2446 * NOTES: 2447 * 1. Tickets are single threaded data structures. 2448 * 2. The XLOG_END_TRANS & XLOG_CONTINUE_TRANS flags are passed down to the 2449 * syncing routine. When a single log_write region needs to span 2450 * multiple in-core logs, the XLOG_CONTINUE_TRANS bit should be set 2451 * on all log operation writes which don't contain the end of the 2452 * region. The XLOG_END_TRANS bit is used for the in-core log 2453 * operation which contains the end of the continued log_write region. 2454 * 3. When xlog_state_get_iclog_space() grabs the rest of the current iclog, 2455 * we don't really know exactly how much space will be used. As a result, 2456 * we don't update ic_offset until the end when we know exactly how many 2457 * bytes have been written out. 2458 */ 2459 int 2460 xlog_write( 2461 struct xlog *log, 2462 struct xfs_cil_ctx *ctx, 2463 struct xfs_log_vec *log_vector, 2464 struct xlog_ticket *ticket, 2465 uint optype) 2466 { 2467 struct xlog_in_core *iclog = NULL; 2468 struct xfs_log_vec *lv = log_vector; 2469 struct xfs_log_iovec *vecp = lv->lv_iovecp; 2470 int index = 0; 2471 int len; 2472 int partial_copy = 0; 2473 int partial_copy_len = 0; 2474 int contwr = 0; 2475 int record_cnt = 0; 2476 int data_cnt = 0; 2477 int error = 0; 2478 2479 /* 2480 * If this is a commit or unmount transaction, we don't need a start 2481 * record to be written. We do, however, have to account for the 2482 * commit or unmount header that gets written. Hence we always have 2483 * to account for an extra xlog_op_header here. 2484 */ 2485 ticket->t_curr_res -= sizeof(struct xlog_op_header); 2486 if (ticket->t_curr_res < 0) { 2487 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, 2488 "ctx ticket reservation ran out. Need to up reservation"); 2489 xlog_print_tic_res(log->l_mp, ticket); 2490 xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); 2491 } 2492 2493 len = xlog_write_calc_vec_length(ticket, log_vector, optype); 2494 while (lv && (!lv->lv_niovecs || index < lv->lv_niovecs)) { 2495 void *ptr; 2496 int log_offset; 2497 2498 error = xlog_state_get_iclog_space(log, len, &iclog, ticket, 2499 &contwr, &log_offset); 2500 if (error) 2501 return error; 2502 2503 ASSERT(log_offset <= iclog->ic_size - 1); 2504 ptr = iclog->ic_datap + log_offset; 2505 2506 /* 2507 * If we have a context pointer, pass it the first iclog we are 2508 * writing to so it can record state needed for iclog write 2509 * ordering. 2510 */ 2511 if (ctx) { 2512 xlog_cil_set_ctx_write_state(ctx, iclog); 2513 ctx = NULL; 2514 } 2515 2516 /* 2517 * This loop writes out as many regions as can fit in the amount 2518 * of space which was allocated by xlog_state_get_iclog_space(). 2519 */ 2520 while (lv && (!lv->lv_niovecs || index < lv->lv_niovecs)) { 2521 struct xfs_log_iovec *reg; 2522 struct xlog_op_header *ophdr; 2523 int copy_len; 2524 int copy_off; 2525 bool ordered = false; 2526 bool wrote_start_rec = false; 2527 2528 /* ordered log vectors have no regions to write */ 2529 if (lv->lv_buf_len == XFS_LOG_VEC_ORDERED) { 2530 ASSERT(lv->lv_niovecs == 0); 2531 ordered = true; 2532 goto next_lv; 2533 } 2534 2535 reg = &vecp[index]; 2536 ASSERT(reg->i_len % sizeof(int32_t) == 0); 2537 ASSERT((unsigned long)ptr % sizeof(int32_t) == 0); 2538 2539 /* 2540 * Before we start formatting log vectors, we need to 2541 * write a start record. Only do this for the first 2542 * iclog we write to. 2543 */ 2544 if (optype & XLOG_START_TRANS) { 2545 xlog_write_start_rec(ptr, ticket); 2546 xlog_write_adv_cnt(&ptr, &len, &log_offset, 2547 sizeof(struct xlog_op_header)); 2548 optype &= ~XLOG_START_TRANS; 2549 wrote_start_rec = true; 2550 } 2551 2552 ophdr = xlog_write_setup_ophdr(log, ptr, ticket, optype); 2553 if (!ophdr) 2554 return -EIO; 2555 2556 xlog_write_adv_cnt(&ptr, &len, &log_offset, 2557 sizeof(struct xlog_op_header)); 2558 2559 len += xlog_write_setup_copy(ticket, ophdr, 2560 iclog->ic_size-log_offset, 2561 reg->i_len, 2562 ©_off, ©_len, 2563 &partial_copy, 2564 &partial_copy_len); 2565 xlog_verify_dest_ptr(log, ptr); 2566 2567 /* 2568 * Copy region. 2569 * 2570 * Unmount records just log an opheader, so can have 2571 * empty payloads with no data region to copy. Hence we 2572 * only copy the payload if the vector says it has data 2573 * to copy. 2574 */ 2575 ASSERT(copy_len >= 0); 2576 if (copy_len > 0) { 2577 memcpy(ptr, reg->i_addr + copy_off, copy_len); 2578 xlog_write_adv_cnt(&ptr, &len, &log_offset, 2579 copy_len); 2580 } 2581 copy_len += sizeof(struct xlog_op_header); 2582 record_cnt++; 2583 if (wrote_start_rec) { 2584 copy_len += sizeof(struct xlog_op_header); 2585 record_cnt++; 2586 } 2587 data_cnt += contwr ? copy_len : 0; 2588 2589 error = xlog_write_copy_finish(log, iclog, optype, 2590 &record_cnt, &data_cnt, 2591 &partial_copy, 2592 &partial_copy_len, 2593 log_offset); 2594 if (error) 2595 return error; 2596 2597 /* 2598 * if we had a partial copy, we need to get more iclog 2599 * space but we don't want to increment the region 2600 * index because there is still more is this region to 2601 * write. 2602 * 2603 * If we completed writing this region, and we flushed 2604 * the iclog (indicated by resetting of the record 2605 * count), then we also need to get more log space. If 2606 * this was the last record, though, we are done and 2607 * can just return. 2608 */ 2609 if (partial_copy) 2610 break; 2611 2612 if (++index == lv->lv_niovecs) { 2613 next_lv: 2614 lv = lv->lv_next; 2615 index = 0; 2616 if (lv) 2617 vecp = lv->lv_iovecp; 2618 } 2619 if (record_cnt == 0 && !ordered) { 2620 if (!lv) 2621 return 0; 2622 break; 2623 } 2624 } 2625 } 2626 2627 ASSERT(len == 0); 2628 2629 spin_lock(&log->l_icloglock); 2630 xlog_state_finish_copy(log, iclog, record_cnt, data_cnt); 2631 error = xlog_state_release_iclog(log, iclog, 0); 2632 spin_unlock(&log->l_icloglock); 2633 2634 return error; 2635 } 2636 2637 static void 2638 xlog_state_activate_iclog( 2639 struct xlog_in_core *iclog, 2640 int *iclogs_changed) 2641 { 2642 ASSERT(list_empty_careful(&iclog->ic_callbacks)); 2643 trace_xlog_iclog_activate(iclog, _RET_IP_); 2644 2645 /* 2646 * If the number of ops in this iclog indicate it just contains the 2647 * dummy transaction, we can change state into IDLE (the second time 2648 * around). Otherwise we should change the state into NEED a dummy. 2649 * We don't need to cover the dummy. 2650 */ 2651 if (*iclogs_changed == 0 && 2652 iclog->ic_header.h_num_logops == cpu_to_be32(XLOG_COVER_OPS)) { 2653 *iclogs_changed = 1; 2654 } else { 2655 /* 2656 * We have two dirty iclogs so start over. This could also be 2657 * num of ops indicating this is not the dummy going out. 2658 */ 2659 *iclogs_changed = 2; 2660 } 2661 2662 iclog->ic_state = XLOG_STATE_ACTIVE; 2663 iclog->ic_offset = 0; 2664 iclog->ic_header.h_num_logops = 0; 2665 memset(iclog->ic_header.h_cycle_data, 0, 2666 sizeof(iclog->ic_header.h_cycle_data)); 2667 iclog->ic_header.h_lsn = 0; 2668 iclog->ic_header.h_tail_lsn = 0; 2669 } 2670 2671 /* 2672 * Loop through all iclogs and mark all iclogs currently marked DIRTY as 2673 * ACTIVE after iclog I/O has completed. 2674 */ 2675 static void 2676 xlog_state_activate_iclogs( 2677 struct xlog *log, 2678 int *iclogs_changed) 2679 { 2680 struct xlog_in_core *iclog = log->l_iclog; 2681 2682 do { 2683 if (iclog->ic_state == XLOG_STATE_DIRTY) 2684 xlog_state_activate_iclog(iclog, iclogs_changed); 2685 /* 2686 * The ordering of marking iclogs ACTIVE must be maintained, so 2687 * an iclog doesn't become ACTIVE beyond one that is SYNCING. 2688 */ 2689 else if (iclog->ic_state != XLOG_STATE_ACTIVE) 2690 break; 2691 } while ((iclog = iclog->ic_next) != log->l_iclog); 2692 } 2693 2694 static int 2695 xlog_covered_state( 2696 int prev_state, 2697 int iclogs_changed) 2698 { 2699 /* 2700 * We go to NEED for any non-covering writes. We go to NEED2 if we just 2701 * wrote the first covering record (DONE). We go to IDLE if we just 2702 * wrote the second covering record (DONE2) and remain in IDLE until a 2703 * non-covering write occurs. 2704 */ 2705 switch (prev_state) { 2706 case XLOG_STATE_COVER_IDLE: 2707 if (iclogs_changed == 1) 2708 return XLOG_STATE_COVER_IDLE; 2709 fallthrough; 2710 case XLOG_STATE_COVER_NEED: 2711 case XLOG_STATE_COVER_NEED2: 2712 break; 2713 case XLOG_STATE_COVER_DONE: 2714 if (iclogs_changed == 1) 2715 return XLOG_STATE_COVER_NEED2; 2716 break; 2717 case XLOG_STATE_COVER_DONE2: 2718 if (iclogs_changed == 1) 2719 return XLOG_STATE_COVER_IDLE; 2720 break; 2721 default: 2722 ASSERT(0); 2723 } 2724 2725 return XLOG_STATE_COVER_NEED; 2726 } 2727 2728 STATIC void 2729 xlog_state_clean_iclog( 2730 struct xlog *log, 2731 struct xlog_in_core *dirty_iclog) 2732 { 2733 int iclogs_changed = 0; 2734 2735 trace_xlog_iclog_clean(dirty_iclog, _RET_IP_); 2736 2737 dirty_iclog->ic_state = XLOG_STATE_DIRTY; 2738 2739 xlog_state_activate_iclogs(log, &iclogs_changed); 2740 wake_up_all(&dirty_iclog->ic_force_wait); 2741 2742 if (iclogs_changed) { 2743 log->l_covered_state = xlog_covered_state(log->l_covered_state, 2744 iclogs_changed); 2745 } 2746 } 2747 2748 STATIC xfs_lsn_t 2749 xlog_get_lowest_lsn( 2750 struct xlog *log) 2751 { 2752 struct xlog_in_core *iclog = log->l_iclog; 2753 xfs_lsn_t lowest_lsn = 0, lsn; 2754 2755 do { 2756 if (iclog->ic_state == XLOG_STATE_ACTIVE || 2757 iclog->ic_state == XLOG_STATE_DIRTY) 2758 continue; 2759 2760 lsn = be64_to_cpu(iclog->ic_header.h_lsn); 2761 if ((lsn && !lowest_lsn) || XFS_LSN_CMP(lsn, lowest_lsn) < 0) 2762 lowest_lsn = lsn; 2763 } while ((iclog = iclog->ic_next) != log->l_iclog); 2764 2765 return lowest_lsn; 2766 } 2767 2768 /* 2769 * Completion of a iclog IO does not imply that a transaction has completed, as 2770 * transactions can be large enough to span many iclogs. We cannot change the 2771 * tail of the log half way through a transaction as this may be the only 2772 * transaction in the log and moving the tail to point to the middle of it 2773 * will prevent recovery from finding the start of the transaction. Hence we 2774 * should only update the last_sync_lsn if this iclog contains transaction 2775 * completion callbacks on it. 2776 * 2777 * We have to do this before we drop the icloglock to ensure we are the only one 2778 * that can update it. 2779 * 2780 * If we are moving the last_sync_lsn forwards, we also need to ensure we kick 2781 * the reservation grant head pushing. This is due to the fact that the push 2782 * target is bound by the current last_sync_lsn value. Hence if we have a large 2783 * amount of log space bound up in this committing transaction then the 2784 * last_sync_lsn value may be the limiting factor preventing tail pushing from 2785 * freeing space in the log. Hence once we've updated the last_sync_lsn we 2786 * should push the AIL to ensure the push target (and hence the grant head) is 2787 * no longer bound by the old log head location and can move forwards and make 2788 * progress again. 2789 */ 2790 static void 2791 xlog_state_set_callback( 2792 struct xlog *log, 2793 struct xlog_in_core *iclog, 2794 xfs_lsn_t header_lsn) 2795 { 2796 trace_xlog_iclog_callback(iclog, _RET_IP_); 2797 iclog->ic_state = XLOG_STATE_CALLBACK; 2798 2799 ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn), 2800 header_lsn) <= 0); 2801 2802 if (list_empty_careful(&iclog->ic_callbacks)) 2803 return; 2804 2805 atomic64_set(&log->l_last_sync_lsn, header_lsn); 2806 xlog_grant_push_ail(log, 0); 2807 } 2808 2809 /* 2810 * Return true if we need to stop processing, false to continue to the next 2811 * iclog. The caller will need to run callbacks if the iclog is returned in the 2812 * XLOG_STATE_CALLBACK state. 2813 */ 2814 static bool 2815 xlog_state_iodone_process_iclog( 2816 struct xlog *log, 2817 struct xlog_in_core *iclog) 2818 { 2819 xfs_lsn_t lowest_lsn; 2820 xfs_lsn_t header_lsn; 2821 2822 switch (iclog->ic_state) { 2823 case XLOG_STATE_ACTIVE: 2824 case XLOG_STATE_DIRTY: 2825 /* 2826 * Skip all iclogs in the ACTIVE & DIRTY states: 2827 */ 2828 return false; 2829 case XLOG_STATE_DONE_SYNC: 2830 /* 2831 * Now that we have an iclog that is in the DONE_SYNC state, do 2832 * one more check here to see if we have chased our tail around. 2833 * If this is not the lowest lsn iclog, then we will leave it 2834 * for another completion to process. 2835 */ 2836 header_lsn = be64_to_cpu(iclog->ic_header.h_lsn); 2837 lowest_lsn = xlog_get_lowest_lsn(log); 2838 if (lowest_lsn && XFS_LSN_CMP(lowest_lsn, header_lsn) < 0) 2839 return false; 2840 xlog_state_set_callback(log, iclog, header_lsn); 2841 return false; 2842 default: 2843 /* 2844 * Can only perform callbacks in order. Since this iclog is not 2845 * in the DONE_SYNC state, we skip the rest and just try to 2846 * clean up. 2847 */ 2848 return true; 2849 } 2850 } 2851 2852 /* 2853 * Loop over all the iclogs, running attached callbacks on them. Return true if 2854 * we ran any callbacks, indicating that we dropped the icloglock. We don't need 2855 * to handle transient shutdown state here at all because 2856 * xlog_state_shutdown_callbacks() will be run to do the necessary shutdown 2857 * cleanup of the callbacks. 2858 */ 2859 static bool 2860 xlog_state_do_iclog_callbacks( 2861 struct xlog *log) 2862 __releases(&log->l_icloglock) 2863 __acquires(&log->l_icloglock) 2864 { 2865 struct xlog_in_core *first_iclog = log->l_iclog; 2866 struct xlog_in_core *iclog = first_iclog; 2867 bool ran_callback = false; 2868 2869 do { 2870 LIST_HEAD(cb_list); 2871 2872 if (xlog_state_iodone_process_iclog(log, iclog)) 2873 break; 2874 if (iclog->ic_state != XLOG_STATE_CALLBACK) { 2875 iclog = iclog->ic_next; 2876 continue; 2877 } 2878 list_splice_init(&iclog->ic_callbacks, &cb_list); 2879 spin_unlock(&log->l_icloglock); 2880 2881 trace_xlog_iclog_callbacks_start(iclog, _RET_IP_); 2882 xlog_cil_process_committed(&cb_list); 2883 trace_xlog_iclog_callbacks_done(iclog, _RET_IP_); 2884 ran_callback = true; 2885 2886 spin_lock(&log->l_icloglock); 2887 xlog_state_clean_iclog(log, iclog); 2888 iclog = iclog->ic_next; 2889 } while (iclog != first_iclog); 2890 2891 return ran_callback; 2892 } 2893 2894 2895 /* 2896 * Loop running iclog completion callbacks until there are no more iclogs in a 2897 * state that can run callbacks. 2898 */ 2899 STATIC void 2900 xlog_state_do_callback( 2901 struct xlog *log) 2902 { 2903 int flushcnt = 0; 2904 int repeats = 0; 2905 2906 spin_lock(&log->l_icloglock); 2907 while (xlog_state_do_iclog_callbacks(log)) { 2908 if (xlog_is_shutdown(log)) 2909 break; 2910 2911 if (++repeats > 5000) { 2912 flushcnt += repeats; 2913 repeats = 0; 2914 xfs_warn(log->l_mp, 2915 "%s: possible infinite loop (%d iterations)", 2916 __func__, flushcnt); 2917 } 2918 } 2919 2920 if (log->l_iclog->ic_state == XLOG_STATE_ACTIVE) 2921 wake_up_all(&log->l_flush_wait); 2922 2923 spin_unlock(&log->l_icloglock); 2924 } 2925 2926 2927 /* 2928 * Finish transitioning this iclog to the dirty state. 2929 * 2930 * Callbacks could take time, so they are done outside the scope of the 2931 * global state machine log lock. 2932 */ 2933 STATIC void 2934 xlog_state_done_syncing( 2935 struct xlog_in_core *iclog) 2936 { 2937 struct xlog *log = iclog->ic_log; 2938 2939 spin_lock(&log->l_icloglock); 2940 ASSERT(atomic_read(&iclog->ic_refcnt) == 0); 2941 trace_xlog_iclog_sync_done(iclog, _RET_IP_); 2942 2943 /* 2944 * If we got an error, either on the first buffer, or in the case of 2945 * split log writes, on the second, we shut down the file system and 2946 * no iclogs should ever be attempted to be written to disk again. 2947 */ 2948 if (!xlog_is_shutdown(log)) { 2949 ASSERT(iclog->ic_state == XLOG_STATE_SYNCING); 2950 iclog->ic_state = XLOG_STATE_DONE_SYNC; 2951 } 2952 2953 /* 2954 * Someone could be sleeping prior to writing out the next 2955 * iclog buffer, we wake them all, one will get to do the 2956 * I/O, the others get to wait for the result. 2957 */ 2958 wake_up_all(&iclog->ic_write_wait); 2959 spin_unlock(&log->l_icloglock); 2960 xlog_state_do_callback(log); 2961 } 2962 2963 /* 2964 * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must 2965 * sleep. We wait on the flush queue on the head iclog as that should be 2966 * the first iclog to complete flushing. Hence if all iclogs are syncing, 2967 * we will wait here and all new writes will sleep until a sync completes. 2968 * 2969 * The in-core logs are used in a circular fashion. They are not used 2970 * out-of-order even when an iclog past the head is free. 2971 * 2972 * return: 2973 * * log_offset where xlog_write() can start writing into the in-core 2974 * log's data space. 2975 * * in-core log pointer to which xlog_write() should write. 2976 * * boolean indicating this is a continued write to an in-core log. 2977 * If this is the last write, then the in-core log's offset field 2978 * needs to be incremented, depending on the amount of data which 2979 * is copied. 2980 */ 2981 STATIC int 2982 xlog_state_get_iclog_space( 2983 struct xlog *log, 2984 int len, 2985 struct xlog_in_core **iclogp, 2986 struct xlog_ticket *ticket, 2987 int *continued_write, 2988 int *logoffsetp) 2989 { 2990 int log_offset; 2991 xlog_rec_header_t *head; 2992 xlog_in_core_t *iclog; 2993 2994 restart: 2995 spin_lock(&log->l_icloglock); 2996 if (xlog_is_shutdown(log)) { 2997 spin_unlock(&log->l_icloglock); 2998 return -EIO; 2999 } 3000 3001 iclog = log->l_iclog; 3002 if (iclog->ic_state != XLOG_STATE_ACTIVE) { 3003 XFS_STATS_INC(log->l_mp, xs_log_noiclogs); 3004 3005 /* Wait for log writes to have flushed */ 3006 xlog_wait(&log->l_flush_wait, &log->l_icloglock); 3007 goto restart; 3008 } 3009 3010 head = &iclog->ic_header; 3011 3012 atomic_inc(&iclog->ic_refcnt); /* prevents sync */ 3013 log_offset = iclog->ic_offset; 3014 3015 trace_xlog_iclog_get_space(iclog, _RET_IP_); 3016 3017 /* On the 1st write to an iclog, figure out lsn. This works 3018 * if iclogs marked XLOG_STATE_WANT_SYNC always write out what they are 3019 * committing to. If the offset is set, that's how many blocks 3020 * must be written. 3021 */ 3022 if (log_offset == 0) { 3023 ticket->t_curr_res -= log->l_iclog_hsize; 3024 xlog_tic_add_region(ticket, 3025 log->l_iclog_hsize, 3026 XLOG_REG_TYPE_LRHEADER); 3027 head->h_cycle = cpu_to_be32(log->l_curr_cycle); 3028 head->h_lsn = cpu_to_be64( 3029 xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block)); 3030 ASSERT(log->l_curr_block >= 0); 3031 } 3032 3033 /* If there is enough room to write everything, then do it. Otherwise, 3034 * claim the rest of the region and make sure the XLOG_STATE_WANT_SYNC 3035 * bit is on, so this will get flushed out. Don't update ic_offset 3036 * until you know exactly how many bytes get copied. Therefore, wait 3037 * until later to update ic_offset. 3038 * 3039 * xlog_write() algorithm assumes that at least 2 xlog_op_header_t's 3040 * can fit into remaining data section. 3041 */ 3042 if (iclog->ic_size - iclog->ic_offset < 2*sizeof(xlog_op_header_t)) { 3043 int error = 0; 3044 3045 xlog_state_switch_iclogs(log, iclog, iclog->ic_size); 3046 3047 /* 3048 * If we are the only one writing to this iclog, sync it to 3049 * disk. We need to do an atomic compare and decrement here to 3050 * avoid racing with concurrent atomic_dec_and_lock() calls in 3051 * xlog_state_release_iclog() when there is more than one 3052 * reference to the iclog. 3053 */ 3054 if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1)) 3055 error = xlog_state_release_iclog(log, iclog, 0); 3056 spin_unlock(&log->l_icloglock); 3057 if (error) 3058 return error; 3059 goto restart; 3060 } 3061 3062 /* Do we have enough room to write the full amount in the remainder 3063 * of this iclog? Or must we continue a write on the next iclog and 3064 * mark this iclog as completely taken? In the case where we switch 3065 * iclogs (to mark it taken), this particular iclog will release/sync 3066 * to disk in xlog_write(). 3067 */ 3068 if (len <= iclog->ic_size - iclog->ic_offset) { 3069 *continued_write = 0; 3070 iclog->ic_offset += len; 3071 } else { 3072 *continued_write = 1; 3073 xlog_state_switch_iclogs(log, iclog, iclog->ic_size); 3074 } 3075 *iclogp = iclog; 3076 3077 ASSERT(iclog->ic_offset <= iclog->ic_size); 3078 spin_unlock(&log->l_icloglock); 3079 3080 *logoffsetp = log_offset; 3081 return 0; 3082 } 3083 3084 /* 3085 * The first cnt-1 times a ticket goes through here we don't need to move the 3086 * grant write head because the permanent reservation has reserved cnt times the 3087 * unit amount. Release part of current permanent unit reservation and reset 3088 * current reservation to be one units worth. Also move grant reservation head 3089 * forward. 3090 */ 3091 void 3092 xfs_log_ticket_regrant( 3093 struct xlog *log, 3094 struct xlog_ticket *ticket) 3095 { 3096 trace_xfs_log_ticket_regrant(log, ticket); 3097 3098 if (ticket->t_cnt > 0) 3099 ticket->t_cnt--; 3100 3101 xlog_grant_sub_space(log, &log->l_reserve_head.grant, 3102 ticket->t_curr_res); 3103 xlog_grant_sub_space(log, &log->l_write_head.grant, 3104 ticket->t_curr_res); 3105 ticket->t_curr_res = ticket->t_unit_res; 3106 xlog_tic_reset_res(ticket); 3107 3108 trace_xfs_log_ticket_regrant_sub(log, ticket); 3109 3110 /* just return if we still have some of the pre-reserved space */ 3111 if (!ticket->t_cnt) { 3112 xlog_grant_add_space(log, &log->l_reserve_head.grant, 3113 ticket->t_unit_res); 3114 trace_xfs_log_ticket_regrant_exit(log, ticket); 3115 3116 ticket->t_curr_res = ticket->t_unit_res; 3117 xlog_tic_reset_res(ticket); 3118 } 3119 3120 xfs_log_ticket_put(ticket); 3121 } 3122 3123 /* 3124 * Give back the space left from a reservation. 3125 * 3126 * All the information we need to make a correct determination of space left 3127 * is present. For non-permanent reservations, things are quite easy. The 3128 * count should have been decremented to zero. We only need to deal with the 3129 * space remaining in the current reservation part of the ticket. If the 3130 * ticket contains a permanent reservation, there may be left over space which 3131 * needs to be released. A count of N means that N-1 refills of the current 3132 * reservation can be done before we need to ask for more space. The first 3133 * one goes to fill up the first current reservation. Once we run out of 3134 * space, the count will stay at zero and the only space remaining will be 3135 * in the current reservation field. 3136 */ 3137 void 3138 xfs_log_ticket_ungrant( 3139 struct xlog *log, 3140 struct xlog_ticket *ticket) 3141 { 3142 int bytes; 3143 3144 trace_xfs_log_ticket_ungrant(log, ticket); 3145 3146 if (ticket->t_cnt > 0) 3147 ticket->t_cnt--; 3148 3149 trace_xfs_log_ticket_ungrant_sub(log, ticket); 3150 3151 /* 3152 * If this is a permanent reservation ticket, we may be able to free 3153 * up more space based on the remaining count. 3154 */ 3155 bytes = ticket->t_curr_res; 3156 if (ticket->t_cnt > 0) { 3157 ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV); 3158 bytes += ticket->t_unit_res*ticket->t_cnt; 3159 } 3160 3161 xlog_grant_sub_space(log, &log->l_reserve_head.grant, bytes); 3162 xlog_grant_sub_space(log, &log->l_write_head.grant, bytes); 3163 3164 trace_xfs_log_ticket_ungrant_exit(log, ticket); 3165 3166 xfs_log_space_wake(log->l_mp); 3167 xfs_log_ticket_put(ticket); 3168 } 3169 3170 /* 3171 * This routine will mark the current iclog in the ring as WANT_SYNC and move 3172 * the current iclog pointer to the next iclog in the ring. 3173 */ 3174 void 3175 xlog_state_switch_iclogs( 3176 struct xlog *log, 3177 struct xlog_in_core *iclog, 3178 int eventual_size) 3179 { 3180 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE); 3181 assert_spin_locked(&log->l_icloglock); 3182 trace_xlog_iclog_switch(iclog, _RET_IP_); 3183 3184 if (!eventual_size) 3185 eventual_size = iclog->ic_offset; 3186 iclog->ic_state = XLOG_STATE_WANT_SYNC; 3187 iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block); 3188 log->l_prev_block = log->l_curr_block; 3189 log->l_prev_cycle = log->l_curr_cycle; 3190 3191 /* roll log?: ic_offset changed later */ 3192 log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize); 3193 3194 /* Round up to next log-sunit */ 3195 if (log->l_iclog_roundoff > BBSIZE) { 3196 uint32_t sunit_bb = BTOBB(log->l_iclog_roundoff); 3197 log->l_curr_block = roundup(log->l_curr_block, sunit_bb); 3198 } 3199 3200 if (log->l_curr_block >= log->l_logBBsize) { 3201 /* 3202 * Rewind the current block before the cycle is bumped to make 3203 * sure that the combined LSN never transiently moves forward 3204 * when the log wraps to the next cycle. This is to support the 3205 * unlocked sample of these fields from xlog_valid_lsn(). Most 3206 * other cases should acquire l_icloglock. 3207 */ 3208 log->l_curr_block -= log->l_logBBsize; 3209 ASSERT(log->l_curr_block >= 0); 3210 smp_wmb(); 3211 log->l_curr_cycle++; 3212 if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM) 3213 log->l_curr_cycle++; 3214 } 3215 ASSERT(iclog == log->l_iclog); 3216 log->l_iclog = iclog->ic_next; 3217 } 3218 3219 /* 3220 * Force the iclog to disk and check if the iclog has been completed before 3221 * xlog_force_iclog() returns. This can happen on synchronous (e.g. 3222 * pmem) or fast async storage because we drop the icloglock to issue the IO. 3223 * If completion has already occurred, tell the caller so that it can avoid an 3224 * unnecessary wait on the iclog. 3225 */ 3226 static int 3227 xlog_force_and_check_iclog( 3228 struct xlog_in_core *iclog, 3229 bool *completed) 3230 { 3231 xfs_lsn_t lsn = be64_to_cpu(iclog->ic_header.h_lsn); 3232 int error; 3233 3234 *completed = false; 3235 error = xlog_force_iclog(iclog); 3236 if (error) 3237 return error; 3238 3239 /* 3240 * If the iclog has already been completed and reused the header LSN 3241 * will have been rewritten by completion 3242 */ 3243 if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) 3244 *completed = true; 3245 return 0; 3246 } 3247 3248 /* 3249 * Write out all data in the in-core log as of this exact moment in time. 3250 * 3251 * Data may be written to the in-core log during this call. However, 3252 * we don't guarantee this data will be written out. A change from past 3253 * implementation means this routine will *not* write out zero length LRs. 3254 * 3255 * Basically, we try and perform an intelligent scan of the in-core logs. 3256 * If we determine there is no flushable data, we just return. There is no 3257 * flushable data if: 3258 * 3259 * 1. the current iclog is active and has no data; the previous iclog 3260 * is in the active or dirty state. 3261 * 2. the current iclog is drity, and the previous iclog is in the 3262 * active or dirty state. 3263 * 3264 * We may sleep if: 3265 * 3266 * 1. the current iclog is not in the active nor dirty state. 3267 * 2. the current iclog dirty, and the previous iclog is not in the 3268 * active nor dirty state. 3269 * 3. the current iclog is active, and there is another thread writing 3270 * to this particular iclog. 3271 * 4. a) the current iclog is active and has no other writers 3272 * b) when we return from flushing out this iclog, it is still 3273 * not in the active nor dirty state. 3274 */ 3275 int 3276 xfs_log_force( 3277 struct xfs_mount *mp, 3278 uint flags) 3279 { 3280 struct xlog *log = mp->m_log; 3281 struct xlog_in_core *iclog; 3282 3283 XFS_STATS_INC(mp, xs_log_force); 3284 trace_xfs_log_force(mp, 0, _RET_IP_); 3285 3286 xlog_cil_force(log); 3287 3288 spin_lock(&log->l_icloglock); 3289 if (xlog_is_shutdown(log)) 3290 goto out_error; 3291 3292 iclog = log->l_iclog; 3293 trace_xlog_iclog_force(iclog, _RET_IP_); 3294 3295 if (iclog->ic_state == XLOG_STATE_DIRTY || 3296 (iclog->ic_state == XLOG_STATE_ACTIVE && 3297 atomic_read(&iclog->ic_refcnt) == 0 && iclog->ic_offset == 0)) { 3298 /* 3299 * If the head is dirty or (active and empty), then we need to 3300 * look at the previous iclog. 3301 * 3302 * If the previous iclog is active or dirty we are done. There 3303 * is nothing to sync out. Otherwise, we attach ourselves to the 3304 * previous iclog and go to sleep. 3305 */ 3306 iclog = iclog->ic_prev; 3307 } else if (iclog->ic_state == XLOG_STATE_ACTIVE) { 3308 if (atomic_read(&iclog->ic_refcnt) == 0) { 3309 /* We have exclusive access to this iclog. */ 3310 bool completed; 3311 3312 if (xlog_force_and_check_iclog(iclog, &completed)) 3313 goto out_error; 3314 3315 if (completed) 3316 goto out_unlock; 3317 } else { 3318 /* 3319 * Someone else is still writing to this iclog, so we 3320 * need to ensure that when they release the iclog it 3321 * gets synced immediately as we may be waiting on it. 3322 */ 3323 xlog_state_switch_iclogs(log, iclog, 0); 3324 } 3325 } 3326 3327 /* 3328 * The iclog we are about to wait on may contain the checkpoint pushed 3329 * by the above xlog_cil_force() call, but it may not have been pushed 3330 * to disk yet. Like the ACTIVE case above, we need to make sure caches 3331 * are flushed when this iclog is written. 3332 */ 3333 if (iclog->ic_state == XLOG_STATE_WANT_SYNC) 3334 iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA; 3335 3336 if (flags & XFS_LOG_SYNC) 3337 return xlog_wait_on_iclog(iclog); 3338 out_unlock: 3339 spin_unlock(&log->l_icloglock); 3340 return 0; 3341 out_error: 3342 spin_unlock(&log->l_icloglock); 3343 return -EIO; 3344 } 3345 3346 /* 3347 * Force the log to a specific LSN. 3348 * 3349 * If an iclog with that lsn can be found: 3350 * If it is in the DIRTY state, just return. 3351 * If it is in the ACTIVE state, move the in-core log into the WANT_SYNC 3352 * state and go to sleep or return. 3353 * If it is in any other state, go to sleep or return. 3354 * 3355 * Synchronous forces are implemented with a wait queue. All callers trying 3356 * to force a given lsn to disk must wait on the queue attached to the 3357 * specific in-core log. When given in-core log finally completes its write 3358 * to disk, that thread will wake up all threads waiting on the queue. 3359 */ 3360 static int 3361 xlog_force_lsn( 3362 struct xlog *log, 3363 xfs_lsn_t lsn, 3364 uint flags, 3365 int *log_flushed, 3366 bool already_slept) 3367 { 3368 struct xlog_in_core *iclog; 3369 bool completed; 3370 3371 spin_lock(&log->l_icloglock); 3372 if (xlog_is_shutdown(log)) 3373 goto out_error; 3374 3375 iclog = log->l_iclog; 3376 while (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) { 3377 trace_xlog_iclog_force_lsn(iclog, _RET_IP_); 3378 iclog = iclog->ic_next; 3379 if (iclog == log->l_iclog) 3380 goto out_unlock; 3381 } 3382 3383 switch (iclog->ic_state) { 3384 case XLOG_STATE_ACTIVE: 3385 /* 3386 * We sleep here if we haven't already slept (e.g. this is the 3387 * first time we've looked at the correct iclog buf) and the 3388 * buffer before us is going to be sync'ed. The reason for this 3389 * is that if we are doing sync transactions here, by waiting 3390 * for the previous I/O to complete, we can allow a few more 3391 * transactions into this iclog before we close it down. 3392 * 3393 * Otherwise, we mark the buffer WANT_SYNC, and bump up the 3394 * refcnt so we can release the log (which drops the ref count). 3395 * The state switch keeps new transaction commits from using 3396 * this buffer. When the current commits finish writing into 3397 * the buffer, the refcount will drop to zero and the buffer 3398 * will go out then. 3399 */ 3400 if (!already_slept && 3401 (iclog->ic_prev->ic_state == XLOG_STATE_WANT_SYNC || 3402 iclog->ic_prev->ic_state == XLOG_STATE_SYNCING)) { 3403 xlog_wait(&iclog->ic_prev->ic_write_wait, 3404 &log->l_icloglock); 3405 return -EAGAIN; 3406 } 3407 if (xlog_force_and_check_iclog(iclog, &completed)) 3408 goto out_error; 3409 if (log_flushed) 3410 *log_flushed = 1; 3411 if (completed) 3412 goto out_unlock; 3413 break; 3414 case XLOG_STATE_WANT_SYNC: 3415 /* 3416 * This iclog may contain the checkpoint pushed by the 3417 * xlog_cil_force_seq() call, but there are other writers still 3418 * accessing it so it hasn't been pushed to disk yet. Like the 3419 * ACTIVE case above, we need to make sure caches are flushed 3420 * when this iclog is written. 3421 */ 3422 iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA; 3423 break; 3424 default: 3425 /* 3426 * The entire checkpoint was written by the CIL force and is on 3427 * its way to disk already. It will be stable when it 3428 * completes, so we don't need to manipulate caches here at all. 3429 * We just need to wait for completion if necessary. 3430 */ 3431 break; 3432 } 3433 3434 if (flags & XFS_LOG_SYNC) 3435 return xlog_wait_on_iclog(iclog); 3436 out_unlock: 3437 spin_unlock(&log->l_icloglock); 3438 return 0; 3439 out_error: 3440 spin_unlock(&log->l_icloglock); 3441 return -EIO; 3442 } 3443 3444 /* 3445 * Force the log to a specific checkpoint sequence. 3446 * 3447 * First force the CIL so that all the required changes have been flushed to the 3448 * iclogs. If the CIL force completed it will return a commit LSN that indicates 3449 * the iclog that needs to be flushed to stable storage. If the caller needs 3450 * a synchronous log force, we will wait on the iclog with the LSN returned by 3451 * xlog_cil_force_seq() to be completed. 3452 */ 3453 int 3454 xfs_log_force_seq( 3455 struct xfs_mount *mp, 3456 xfs_csn_t seq, 3457 uint flags, 3458 int *log_flushed) 3459 { 3460 struct xlog *log = mp->m_log; 3461 xfs_lsn_t lsn; 3462 int ret; 3463 ASSERT(seq != 0); 3464 3465 XFS_STATS_INC(mp, xs_log_force); 3466 trace_xfs_log_force(mp, seq, _RET_IP_); 3467 3468 lsn = xlog_cil_force_seq(log, seq); 3469 if (lsn == NULLCOMMITLSN) 3470 return 0; 3471 3472 ret = xlog_force_lsn(log, lsn, flags, log_flushed, false); 3473 if (ret == -EAGAIN) { 3474 XFS_STATS_INC(mp, xs_log_force_sleep); 3475 ret = xlog_force_lsn(log, lsn, flags, log_flushed, true); 3476 } 3477 return ret; 3478 } 3479 3480 /* 3481 * Free a used ticket when its refcount falls to zero. 3482 */ 3483 void 3484 xfs_log_ticket_put( 3485 xlog_ticket_t *ticket) 3486 { 3487 ASSERT(atomic_read(&ticket->t_ref) > 0); 3488 if (atomic_dec_and_test(&ticket->t_ref)) 3489 kmem_cache_free(xfs_log_ticket_cache, ticket); 3490 } 3491 3492 xlog_ticket_t * 3493 xfs_log_ticket_get( 3494 xlog_ticket_t *ticket) 3495 { 3496 ASSERT(atomic_read(&ticket->t_ref) > 0); 3497 atomic_inc(&ticket->t_ref); 3498 return ticket; 3499 } 3500 3501 /* 3502 * Figure out the total log space unit (in bytes) that would be 3503 * required for a log ticket. 3504 */ 3505 static int 3506 xlog_calc_unit_res( 3507 struct xlog *log, 3508 int unit_bytes) 3509 { 3510 int iclog_space; 3511 uint num_headers; 3512 3513 /* 3514 * Permanent reservations have up to 'cnt'-1 active log operations 3515 * in the log. A unit in this case is the amount of space for one 3516 * of these log operations. Normal reservations have a cnt of 1 3517 * and their unit amount is the total amount of space required. 3518 * 3519 * The following lines of code account for non-transaction data 3520 * which occupy space in the on-disk log. 3521 * 3522 * Normal form of a transaction is: 3523 * <oph><trans-hdr><start-oph><reg1-oph><reg1><reg2-oph>...<commit-oph> 3524 * and then there are LR hdrs, split-recs and roundoff at end of syncs. 3525 * 3526 * We need to account for all the leadup data and trailer data 3527 * around the transaction data. 3528 * And then we need to account for the worst case in terms of using 3529 * more space. 3530 * The worst case will happen if: 3531 * - the placement of the transaction happens to be such that the 3532 * roundoff is at its maximum 3533 * - the transaction data is synced before the commit record is synced 3534 * i.e. <transaction-data><roundoff> | <commit-rec><roundoff> 3535 * Therefore the commit record is in its own Log Record. 3536 * This can happen as the commit record is called with its 3537 * own region to xlog_write(). 3538 * This then means that in the worst case, roundoff can happen for 3539 * the commit-rec as well. 3540 * The commit-rec is smaller than padding in this scenario and so it is 3541 * not added separately. 3542 */ 3543 3544 /* for trans header */ 3545 unit_bytes += sizeof(xlog_op_header_t); 3546 unit_bytes += sizeof(xfs_trans_header_t); 3547 3548 /* for start-rec */ 3549 unit_bytes += sizeof(xlog_op_header_t); 3550 3551 /* 3552 * for LR headers - the space for data in an iclog is the size minus 3553 * the space used for the headers. If we use the iclog size, then we 3554 * undercalculate the number of headers required. 3555 * 3556 * Furthermore - the addition of op headers for split-recs might 3557 * increase the space required enough to require more log and op 3558 * headers, so take that into account too. 3559 * 3560 * IMPORTANT: This reservation makes the assumption that if this 3561 * transaction is the first in an iclog and hence has the LR headers 3562 * accounted to it, then the remaining space in the iclog is 3563 * exclusively for this transaction. i.e. if the transaction is larger 3564 * than the iclog, it will be the only thing in that iclog. 3565 * Fundamentally, this means we must pass the entire log vector to 3566 * xlog_write to guarantee this. 3567 */ 3568 iclog_space = log->l_iclog_size - log->l_iclog_hsize; 3569 num_headers = howmany(unit_bytes, iclog_space); 3570 3571 /* for split-recs - ophdrs added when data split over LRs */ 3572 unit_bytes += sizeof(xlog_op_header_t) * num_headers; 3573 3574 /* add extra header reservations if we overrun */ 3575 while (!num_headers || 3576 howmany(unit_bytes, iclog_space) > num_headers) { 3577 unit_bytes += sizeof(xlog_op_header_t); 3578 num_headers++; 3579 } 3580 unit_bytes += log->l_iclog_hsize * num_headers; 3581 3582 /* for commit-rec LR header - note: padding will subsume the ophdr */ 3583 unit_bytes += log->l_iclog_hsize; 3584 3585 /* roundoff padding for transaction data and one for commit record */ 3586 unit_bytes += 2 * log->l_iclog_roundoff; 3587 3588 return unit_bytes; 3589 } 3590 3591 int 3592 xfs_log_calc_unit_res( 3593 struct xfs_mount *mp, 3594 int unit_bytes) 3595 { 3596 return xlog_calc_unit_res(mp->m_log, unit_bytes); 3597 } 3598 3599 /* 3600 * Allocate and initialise a new log ticket. 3601 */ 3602 struct xlog_ticket * 3603 xlog_ticket_alloc( 3604 struct xlog *log, 3605 int unit_bytes, 3606 int cnt, 3607 char client, 3608 bool permanent) 3609 { 3610 struct xlog_ticket *tic; 3611 int unit_res; 3612 3613 tic = kmem_cache_zalloc(xfs_log_ticket_cache, GFP_NOFS | __GFP_NOFAIL); 3614 3615 unit_res = xlog_calc_unit_res(log, unit_bytes); 3616 3617 atomic_set(&tic->t_ref, 1); 3618 tic->t_task = current; 3619 INIT_LIST_HEAD(&tic->t_queue); 3620 tic->t_unit_res = unit_res; 3621 tic->t_curr_res = unit_res; 3622 tic->t_cnt = cnt; 3623 tic->t_ocnt = cnt; 3624 tic->t_tid = prandom_u32(); 3625 tic->t_clientid = client; 3626 if (permanent) 3627 tic->t_flags |= XLOG_TIC_PERM_RESERV; 3628 3629 xlog_tic_reset_res(tic); 3630 3631 return tic; 3632 } 3633 3634 #if defined(DEBUG) 3635 /* 3636 * Make sure that the destination ptr is within the valid data region of 3637 * one of the iclogs. This uses backup pointers stored in a different 3638 * part of the log in case we trash the log structure. 3639 */ 3640 STATIC void 3641 xlog_verify_dest_ptr( 3642 struct xlog *log, 3643 void *ptr) 3644 { 3645 int i; 3646 int good_ptr = 0; 3647 3648 for (i = 0; i < log->l_iclog_bufs; i++) { 3649 if (ptr >= log->l_iclog_bak[i] && 3650 ptr <= log->l_iclog_bak[i] + log->l_iclog_size) 3651 good_ptr++; 3652 } 3653 3654 if (!good_ptr) 3655 xfs_emerg(log->l_mp, "%s: invalid ptr", __func__); 3656 } 3657 3658 /* 3659 * Check to make sure the grant write head didn't just over lap the tail. If 3660 * the cycles are the same, we can't be overlapping. Otherwise, make sure that 3661 * the cycles differ by exactly one and check the byte count. 3662 * 3663 * This check is run unlocked, so can give false positives. Rather than assert 3664 * on failures, use a warn-once flag and a panic tag to allow the admin to 3665 * determine if they want to panic the machine when such an error occurs. For 3666 * debug kernels this will have the same effect as using an assert but, unlinke 3667 * an assert, it can be turned off at runtime. 3668 */ 3669 STATIC void 3670 xlog_verify_grant_tail( 3671 struct xlog *log) 3672 { 3673 int tail_cycle, tail_blocks; 3674 int cycle, space; 3675 3676 xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &space); 3677 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks); 3678 if (tail_cycle != cycle) { 3679 if (cycle - 1 != tail_cycle && 3680 !test_and_set_bit(XLOG_TAIL_WARN, &log->l_opstate)) { 3681 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, 3682 "%s: cycle - 1 != tail_cycle", __func__); 3683 } 3684 3685 if (space > BBTOB(tail_blocks) && 3686 !test_and_set_bit(XLOG_TAIL_WARN, &log->l_opstate)) { 3687 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, 3688 "%s: space > BBTOB(tail_blocks)", __func__); 3689 } 3690 } 3691 } 3692 3693 /* check if it will fit */ 3694 STATIC void 3695 xlog_verify_tail_lsn( 3696 struct xlog *log, 3697 struct xlog_in_core *iclog) 3698 { 3699 xfs_lsn_t tail_lsn = be64_to_cpu(iclog->ic_header.h_tail_lsn); 3700 int blocks; 3701 3702 if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) { 3703 blocks = 3704 log->l_logBBsize - (log->l_prev_block - BLOCK_LSN(tail_lsn)); 3705 if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize)) 3706 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__); 3707 } else { 3708 ASSERT(CYCLE_LSN(tail_lsn)+1 == log->l_prev_cycle); 3709 3710 if (BLOCK_LSN(tail_lsn) == log->l_prev_block) 3711 xfs_emerg(log->l_mp, "%s: tail wrapped", __func__); 3712 3713 blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block; 3714 if (blocks < BTOBB(iclog->ic_offset) + 1) 3715 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__); 3716 } 3717 } 3718 3719 /* 3720 * Perform a number of checks on the iclog before writing to disk. 3721 * 3722 * 1. Make sure the iclogs are still circular 3723 * 2. Make sure we have a good magic number 3724 * 3. Make sure we don't have magic numbers in the data 3725 * 4. Check fields of each log operation header for: 3726 * A. Valid client identifier 3727 * B. tid ptr value falls in valid ptr space (user space code) 3728 * C. Length in log record header is correct according to the 3729 * individual operation headers within record. 3730 * 5. When a bwrite will occur within 5 blocks of the front of the physical 3731 * log, check the preceding blocks of the physical log to make sure all 3732 * the cycle numbers agree with the current cycle number. 3733 */ 3734 STATIC void 3735 xlog_verify_iclog( 3736 struct xlog *log, 3737 struct xlog_in_core *iclog, 3738 int count) 3739 { 3740 xlog_op_header_t *ophead; 3741 xlog_in_core_t *icptr; 3742 xlog_in_core_2_t *xhdr; 3743 void *base_ptr, *ptr, *p; 3744 ptrdiff_t field_offset; 3745 uint8_t clientid; 3746 int len, i, j, k, op_len; 3747 int idx; 3748 3749 /* check validity of iclog pointers */ 3750 spin_lock(&log->l_icloglock); 3751 icptr = log->l_iclog; 3752 for (i = 0; i < log->l_iclog_bufs; i++, icptr = icptr->ic_next) 3753 ASSERT(icptr); 3754 3755 if (icptr != log->l_iclog) 3756 xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__); 3757 spin_unlock(&log->l_icloglock); 3758 3759 /* check log magic numbers */ 3760 if (iclog->ic_header.h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) 3761 xfs_emerg(log->l_mp, "%s: invalid magic num", __func__); 3762 3763 base_ptr = ptr = &iclog->ic_header; 3764 p = &iclog->ic_header; 3765 for (ptr += BBSIZE; ptr < base_ptr + count; ptr += BBSIZE) { 3766 if (*(__be32 *)ptr == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) 3767 xfs_emerg(log->l_mp, "%s: unexpected magic num", 3768 __func__); 3769 } 3770 3771 /* check fields */ 3772 len = be32_to_cpu(iclog->ic_header.h_num_logops); 3773 base_ptr = ptr = iclog->ic_datap; 3774 ophead = ptr; 3775 xhdr = iclog->ic_data; 3776 for (i = 0; i < len; i++) { 3777 ophead = ptr; 3778 3779 /* clientid is only 1 byte */ 3780 p = &ophead->oh_clientid; 3781 field_offset = p - base_ptr; 3782 if (field_offset & 0x1ff) { 3783 clientid = ophead->oh_clientid; 3784 } else { 3785 idx = BTOBBT((char *)&ophead->oh_clientid - iclog->ic_datap); 3786 if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) { 3787 j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3788 k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3789 clientid = xlog_get_client_id( 3790 xhdr[j].hic_xheader.xh_cycle_data[k]); 3791 } else { 3792 clientid = xlog_get_client_id( 3793 iclog->ic_header.h_cycle_data[idx]); 3794 } 3795 } 3796 if (clientid != XFS_TRANSACTION && clientid != XFS_LOG) 3797 xfs_warn(log->l_mp, 3798 "%s: invalid clientid %d op "PTR_FMT" offset 0x%lx", 3799 __func__, clientid, ophead, 3800 (unsigned long)field_offset); 3801 3802 /* check length */ 3803 p = &ophead->oh_len; 3804 field_offset = p - base_ptr; 3805 if (field_offset & 0x1ff) { 3806 op_len = be32_to_cpu(ophead->oh_len); 3807 } else { 3808 idx = BTOBBT((uintptr_t)&ophead->oh_len - 3809 (uintptr_t)iclog->ic_datap); 3810 if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) { 3811 j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3812 k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3813 op_len = be32_to_cpu(xhdr[j].hic_xheader.xh_cycle_data[k]); 3814 } else { 3815 op_len = be32_to_cpu(iclog->ic_header.h_cycle_data[idx]); 3816 } 3817 } 3818 ptr += sizeof(xlog_op_header_t) + op_len; 3819 } 3820 } 3821 #endif 3822 3823 /* 3824 * Perform a forced shutdown on the log. This should be called once and once 3825 * only by the high level filesystem shutdown code to shut the log subsystem 3826 * down cleanly. 3827 * 3828 * Our main objectives here are to make sure that: 3829 * a. if the shutdown was not due to a log IO error, flush the logs to 3830 * disk. Anything modified after this is ignored. 3831 * b. the log gets atomically marked 'XLOG_IO_ERROR' for all interested 3832 * parties to find out. Nothing new gets queued after this is done. 3833 * c. Tasks sleeping on log reservations, pinned objects and 3834 * other resources get woken up. 3835 * 3836 * Return true if the shutdown cause was a log IO error and we actually shut the 3837 * log down. 3838 */ 3839 bool 3840 xlog_force_shutdown( 3841 struct xlog *log, 3842 int shutdown_flags) 3843 { 3844 bool log_error = (shutdown_flags & SHUTDOWN_LOG_IO_ERROR); 3845 3846 /* 3847 * If this happens during log recovery then we aren't using the runtime 3848 * log mechanisms yet so there's nothing to shut down. 3849 */ 3850 if (!log || xlog_in_recovery(log)) 3851 return false; 3852 3853 ASSERT(!xlog_is_shutdown(log)); 3854 3855 /* 3856 * Flush all the completed transactions to disk before marking the log 3857 * being shut down. We need to do this first as shutting down the log 3858 * before the force will prevent the log force from flushing the iclogs 3859 * to disk. 3860 * 3861 * Re-entry due to a log IO error shutdown during the log force is 3862 * prevented by the atomicity of higher level shutdown code. 3863 */ 3864 if (!log_error) 3865 xfs_log_force(log->l_mp, XFS_LOG_SYNC); 3866 3867 /* 3868 * Atomically set the shutdown state. If the shutdown state is already 3869 * set, there someone else is performing the shutdown and so we are done 3870 * here. This should never happen because we should only ever get called 3871 * once by the first shutdown caller. 3872 * 3873 * Much of the log state machine transitions assume that shutdown state 3874 * cannot change once they hold the log->l_icloglock. Hence we need to 3875 * hold that lock here, even though we use the atomic test_and_set_bit() 3876 * operation to set the shutdown state. 3877 */ 3878 spin_lock(&log->l_icloglock); 3879 if (test_and_set_bit(XLOG_IO_ERROR, &log->l_opstate)) { 3880 spin_unlock(&log->l_icloglock); 3881 ASSERT(0); 3882 return false; 3883 } 3884 spin_unlock(&log->l_icloglock); 3885 3886 /* 3887 * We don't want anybody waiting for log reservations after this. That 3888 * means we have to wake up everybody queued up on reserveq as well as 3889 * writeq. In addition, we make sure in xlog_{re}grant_log_space that 3890 * we don't enqueue anything once the SHUTDOWN flag is set, and this 3891 * action is protected by the grant locks. 3892 */ 3893 xlog_grant_head_wake_all(&log->l_reserve_head); 3894 xlog_grant_head_wake_all(&log->l_write_head); 3895 3896 /* 3897 * Wake up everybody waiting on xfs_log_force. Wake the CIL push first 3898 * as if the log writes were completed. The abort handling in the log 3899 * item committed callback functions will do this again under lock to 3900 * avoid races. 3901 */ 3902 spin_lock(&log->l_cilp->xc_push_lock); 3903 wake_up_all(&log->l_cilp->xc_start_wait); 3904 wake_up_all(&log->l_cilp->xc_commit_wait); 3905 spin_unlock(&log->l_cilp->xc_push_lock); 3906 xlog_state_shutdown_callbacks(log); 3907 3908 return log_error; 3909 } 3910 3911 STATIC int 3912 xlog_iclogs_empty( 3913 struct xlog *log) 3914 { 3915 xlog_in_core_t *iclog; 3916 3917 iclog = log->l_iclog; 3918 do { 3919 /* endianness does not matter here, zero is zero in 3920 * any language. 3921 */ 3922 if (iclog->ic_header.h_num_logops) 3923 return 0; 3924 iclog = iclog->ic_next; 3925 } while (iclog != log->l_iclog); 3926 return 1; 3927 } 3928 3929 /* 3930 * Verify that an LSN stamped into a piece of metadata is valid. This is 3931 * intended for use in read verifiers on v5 superblocks. 3932 */ 3933 bool 3934 xfs_log_check_lsn( 3935 struct xfs_mount *mp, 3936 xfs_lsn_t lsn) 3937 { 3938 struct xlog *log = mp->m_log; 3939 bool valid; 3940 3941 /* 3942 * norecovery mode skips mount-time log processing and unconditionally 3943 * resets the in-core LSN. We can't validate in this mode, but 3944 * modifications are not allowed anyways so just return true. 3945 */ 3946 if (xfs_has_norecovery(mp)) 3947 return true; 3948 3949 /* 3950 * Some metadata LSNs are initialized to NULL (e.g., the agfl). This is 3951 * handled by recovery and thus safe to ignore here. 3952 */ 3953 if (lsn == NULLCOMMITLSN) 3954 return true; 3955 3956 valid = xlog_valid_lsn(mp->m_log, lsn); 3957 3958 /* warn the user about what's gone wrong before verifier failure */ 3959 if (!valid) { 3960 spin_lock(&log->l_icloglock); 3961 xfs_warn(mp, 3962 "Corruption warning: Metadata has LSN (%d:%d) ahead of current LSN (%d:%d). " 3963 "Please unmount and run xfs_repair (>= v4.3) to resolve.", 3964 CYCLE_LSN(lsn), BLOCK_LSN(lsn), 3965 log->l_curr_cycle, log->l_curr_block); 3966 spin_unlock(&log->l_icloglock); 3967 } 3968 3969 return valid; 3970 } 3971 3972 /* 3973 * Notify the log that we're about to start using a feature that is protected 3974 * by a log incompat feature flag. This will prevent log covering from 3975 * clearing those flags. 3976 */ 3977 void 3978 xlog_use_incompat_feat( 3979 struct xlog *log) 3980 { 3981 down_read(&log->l_incompat_users); 3982 } 3983 3984 /* Notify the log that we've finished using log incompat features. */ 3985 void 3986 xlog_drop_incompat_feat( 3987 struct xlog *log) 3988 { 3989 up_read(&log->l_incompat_users); 3990 } 3991