1 /* 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_shared.h" 21 #include "xfs_format.h" 22 #include "xfs_log_format.h" 23 #include "xfs_trans_resv.h" 24 #include "xfs_mount.h" 25 #include "xfs_error.h" 26 #include "xfs_trans.h" 27 #include "xfs_trans_priv.h" 28 #include "xfs_log.h" 29 #include "xfs_log_priv.h" 30 #include "xfs_log_recover.h" 31 #include "xfs_inode.h" 32 #include "xfs_trace.h" 33 #include "xfs_fsops.h" 34 #include "xfs_cksum.h" 35 #include "xfs_sysfs.h" 36 #include "xfs_sb.h" 37 38 kmem_zone_t *xfs_log_ticket_zone; 39 40 /* Local miscellaneous function prototypes */ 41 STATIC int 42 xlog_commit_record( 43 struct xlog *log, 44 struct xlog_ticket *ticket, 45 struct xlog_in_core **iclog, 46 xfs_lsn_t *commitlsnp); 47 48 STATIC struct xlog * 49 xlog_alloc_log( 50 struct xfs_mount *mp, 51 struct xfs_buftarg *log_target, 52 xfs_daddr_t blk_offset, 53 int num_bblks); 54 STATIC int 55 xlog_space_left( 56 struct xlog *log, 57 atomic64_t *head); 58 STATIC int 59 xlog_sync( 60 struct xlog *log, 61 struct xlog_in_core *iclog); 62 STATIC void 63 xlog_dealloc_log( 64 struct xlog *log); 65 66 /* local state machine functions */ 67 STATIC void xlog_state_done_syncing(xlog_in_core_t *iclog, int); 68 STATIC void 69 xlog_state_do_callback( 70 struct xlog *log, 71 int aborted, 72 struct xlog_in_core *iclog); 73 STATIC int 74 xlog_state_get_iclog_space( 75 struct xlog *log, 76 int len, 77 struct xlog_in_core **iclog, 78 struct xlog_ticket *ticket, 79 int *continued_write, 80 int *logoffsetp); 81 STATIC int 82 xlog_state_release_iclog( 83 struct xlog *log, 84 struct xlog_in_core *iclog); 85 STATIC void 86 xlog_state_switch_iclogs( 87 struct xlog *log, 88 struct xlog_in_core *iclog, 89 int eventual_size); 90 STATIC void 91 xlog_state_want_sync( 92 struct xlog *log, 93 struct xlog_in_core *iclog); 94 95 STATIC void 96 xlog_grant_push_ail( 97 struct xlog *log, 98 int need_bytes); 99 STATIC void 100 xlog_regrant_reserve_log_space( 101 struct xlog *log, 102 struct xlog_ticket *ticket); 103 STATIC void 104 xlog_ungrant_log_space( 105 struct xlog *log, 106 struct xlog_ticket *ticket); 107 108 #if defined(DEBUG) 109 STATIC void 110 xlog_verify_dest_ptr( 111 struct xlog *log, 112 void *ptr); 113 STATIC void 114 xlog_verify_grant_tail( 115 struct xlog *log); 116 STATIC void 117 xlog_verify_iclog( 118 struct xlog *log, 119 struct xlog_in_core *iclog, 120 int count, 121 bool syncing); 122 STATIC void 123 xlog_verify_tail_lsn( 124 struct xlog *log, 125 struct xlog_in_core *iclog, 126 xfs_lsn_t tail_lsn); 127 #else 128 #define xlog_verify_dest_ptr(a,b) 129 #define xlog_verify_grant_tail(a) 130 #define xlog_verify_iclog(a,b,c,d) 131 #define xlog_verify_tail_lsn(a,b,c) 132 #endif 133 134 STATIC int 135 xlog_iclogs_empty( 136 struct xlog *log); 137 138 static void 139 xlog_grant_sub_space( 140 struct xlog *log, 141 atomic64_t *head, 142 int bytes) 143 { 144 int64_t head_val = atomic64_read(head); 145 int64_t new, old; 146 147 do { 148 int cycle, space; 149 150 xlog_crack_grant_head_val(head_val, &cycle, &space); 151 152 space -= bytes; 153 if (space < 0) { 154 space += log->l_logsize; 155 cycle--; 156 } 157 158 old = head_val; 159 new = xlog_assign_grant_head_val(cycle, space); 160 head_val = atomic64_cmpxchg(head, old, new); 161 } while (head_val != old); 162 } 163 164 static void 165 xlog_grant_add_space( 166 struct xlog *log, 167 atomic64_t *head, 168 int bytes) 169 { 170 int64_t head_val = atomic64_read(head); 171 int64_t new, old; 172 173 do { 174 int tmp; 175 int cycle, space; 176 177 xlog_crack_grant_head_val(head_val, &cycle, &space); 178 179 tmp = log->l_logsize - space; 180 if (tmp > bytes) 181 space += bytes; 182 else { 183 space = bytes - tmp; 184 cycle++; 185 } 186 187 old = head_val; 188 new = xlog_assign_grant_head_val(cycle, space); 189 head_val = atomic64_cmpxchg(head, old, new); 190 } while (head_val != old); 191 } 192 193 STATIC void 194 xlog_grant_head_init( 195 struct xlog_grant_head *head) 196 { 197 xlog_assign_grant_head(&head->grant, 1, 0); 198 INIT_LIST_HEAD(&head->waiters); 199 spin_lock_init(&head->lock); 200 } 201 202 STATIC void 203 xlog_grant_head_wake_all( 204 struct xlog_grant_head *head) 205 { 206 struct xlog_ticket *tic; 207 208 spin_lock(&head->lock); 209 list_for_each_entry(tic, &head->waiters, t_queue) 210 wake_up_process(tic->t_task); 211 spin_unlock(&head->lock); 212 } 213 214 static inline int 215 xlog_ticket_reservation( 216 struct xlog *log, 217 struct xlog_grant_head *head, 218 struct xlog_ticket *tic) 219 { 220 if (head == &log->l_write_head) { 221 ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV); 222 return tic->t_unit_res; 223 } else { 224 if (tic->t_flags & XLOG_TIC_PERM_RESERV) 225 return tic->t_unit_res * tic->t_cnt; 226 else 227 return tic->t_unit_res; 228 } 229 } 230 231 STATIC bool 232 xlog_grant_head_wake( 233 struct xlog *log, 234 struct xlog_grant_head *head, 235 int *free_bytes) 236 { 237 struct xlog_ticket *tic; 238 int need_bytes; 239 240 list_for_each_entry(tic, &head->waiters, t_queue) { 241 need_bytes = xlog_ticket_reservation(log, head, tic); 242 if (*free_bytes < need_bytes) 243 return false; 244 245 *free_bytes -= need_bytes; 246 trace_xfs_log_grant_wake_up(log, tic); 247 wake_up_process(tic->t_task); 248 } 249 250 return true; 251 } 252 253 STATIC int 254 xlog_grant_head_wait( 255 struct xlog *log, 256 struct xlog_grant_head *head, 257 struct xlog_ticket *tic, 258 int need_bytes) __releases(&head->lock) 259 __acquires(&head->lock) 260 { 261 list_add_tail(&tic->t_queue, &head->waiters); 262 263 do { 264 if (XLOG_FORCED_SHUTDOWN(log)) 265 goto shutdown; 266 xlog_grant_push_ail(log, need_bytes); 267 268 __set_current_state(TASK_UNINTERRUPTIBLE); 269 spin_unlock(&head->lock); 270 271 XFS_STATS_INC(log->l_mp, xs_sleep_logspace); 272 273 trace_xfs_log_grant_sleep(log, tic); 274 schedule(); 275 trace_xfs_log_grant_wake(log, tic); 276 277 spin_lock(&head->lock); 278 if (XLOG_FORCED_SHUTDOWN(log)) 279 goto shutdown; 280 } while (xlog_space_left(log, &head->grant) < need_bytes); 281 282 list_del_init(&tic->t_queue); 283 return 0; 284 shutdown: 285 list_del_init(&tic->t_queue); 286 return -EIO; 287 } 288 289 /* 290 * Atomically get the log space required for a log ticket. 291 * 292 * Once a ticket gets put onto head->waiters, it will only return after the 293 * needed reservation is satisfied. 294 * 295 * This function is structured so that it has a lock free fast path. This is 296 * necessary because every new transaction reservation will come through this 297 * path. Hence any lock will be globally hot if we take it unconditionally on 298 * every pass. 299 * 300 * As tickets are only ever moved on and off head->waiters under head->lock, we 301 * only need to take that lock if we are going to add the ticket to the queue 302 * and sleep. We can avoid taking the lock if the ticket was never added to 303 * head->waiters because the t_queue list head will be empty and we hold the 304 * only reference to it so it can safely be checked unlocked. 305 */ 306 STATIC int 307 xlog_grant_head_check( 308 struct xlog *log, 309 struct xlog_grant_head *head, 310 struct xlog_ticket *tic, 311 int *need_bytes) 312 { 313 int free_bytes; 314 int error = 0; 315 316 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); 317 318 /* 319 * If there are other waiters on the queue then give them a chance at 320 * logspace before us. Wake up the first waiters, if we do not wake 321 * up all the waiters then go to sleep waiting for more free space, 322 * otherwise try to get some space for this transaction. 323 */ 324 *need_bytes = xlog_ticket_reservation(log, head, tic); 325 free_bytes = xlog_space_left(log, &head->grant); 326 if (!list_empty_careful(&head->waiters)) { 327 spin_lock(&head->lock); 328 if (!xlog_grant_head_wake(log, head, &free_bytes) || 329 free_bytes < *need_bytes) { 330 error = xlog_grant_head_wait(log, head, tic, 331 *need_bytes); 332 } 333 spin_unlock(&head->lock); 334 } else if (free_bytes < *need_bytes) { 335 spin_lock(&head->lock); 336 error = xlog_grant_head_wait(log, head, tic, *need_bytes); 337 spin_unlock(&head->lock); 338 } 339 340 return error; 341 } 342 343 static void 344 xlog_tic_reset_res(xlog_ticket_t *tic) 345 { 346 tic->t_res_num = 0; 347 tic->t_res_arr_sum = 0; 348 tic->t_res_num_ophdrs = 0; 349 } 350 351 static void 352 xlog_tic_add_region(xlog_ticket_t *tic, uint len, uint type) 353 { 354 if (tic->t_res_num == XLOG_TIC_LEN_MAX) { 355 /* add to overflow and start again */ 356 tic->t_res_o_flow += tic->t_res_arr_sum; 357 tic->t_res_num = 0; 358 tic->t_res_arr_sum = 0; 359 } 360 361 tic->t_res_arr[tic->t_res_num].r_len = len; 362 tic->t_res_arr[tic->t_res_num].r_type = type; 363 tic->t_res_arr_sum += len; 364 tic->t_res_num++; 365 } 366 367 /* 368 * Replenish the byte reservation required by moving the grant write head. 369 */ 370 int 371 xfs_log_regrant( 372 struct xfs_mount *mp, 373 struct xlog_ticket *tic) 374 { 375 struct xlog *log = mp->m_log; 376 int need_bytes; 377 int error = 0; 378 379 if (XLOG_FORCED_SHUTDOWN(log)) 380 return -EIO; 381 382 XFS_STATS_INC(mp, xs_try_logspace); 383 384 /* 385 * This is a new transaction on the ticket, so we need to change the 386 * transaction ID so that the next transaction has a different TID in 387 * the log. Just add one to the existing tid so that we can see chains 388 * of rolling transactions in the log easily. 389 */ 390 tic->t_tid++; 391 392 xlog_grant_push_ail(log, tic->t_unit_res); 393 394 tic->t_curr_res = tic->t_unit_res; 395 xlog_tic_reset_res(tic); 396 397 if (tic->t_cnt > 0) 398 return 0; 399 400 trace_xfs_log_regrant(log, tic); 401 402 error = xlog_grant_head_check(log, &log->l_write_head, tic, 403 &need_bytes); 404 if (error) 405 goto out_error; 406 407 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); 408 trace_xfs_log_regrant_exit(log, tic); 409 xlog_verify_grant_tail(log); 410 return 0; 411 412 out_error: 413 /* 414 * If we are failing, make sure the ticket doesn't have any current 415 * reservations. We don't want to add this back when the ticket/ 416 * transaction gets cancelled. 417 */ 418 tic->t_curr_res = 0; 419 tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */ 420 return error; 421 } 422 423 /* 424 * Reserve log space and return a ticket corresponding the reservation. 425 * 426 * Each reservation is going to reserve extra space for a log record header. 427 * When writes happen to the on-disk log, we don't subtract the length of the 428 * log record header from any reservation. By wasting space in each 429 * reservation, we prevent over allocation problems. 430 */ 431 int 432 xfs_log_reserve( 433 struct xfs_mount *mp, 434 int unit_bytes, 435 int cnt, 436 struct xlog_ticket **ticp, 437 __uint8_t client, 438 bool permanent, 439 uint t_type) 440 { 441 struct xlog *log = mp->m_log; 442 struct xlog_ticket *tic; 443 int need_bytes; 444 int error = 0; 445 446 ASSERT(client == XFS_TRANSACTION || client == XFS_LOG); 447 448 if (XLOG_FORCED_SHUTDOWN(log)) 449 return -EIO; 450 451 XFS_STATS_INC(mp, xs_try_logspace); 452 453 ASSERT(*ticp == NULL); 454 tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent, 455 KM_SLEEP | KM_MAYFAIL); 456 if (!tic) 457 return -ENOMEM; 458 459 tic->t_trans_type = t_type; 460 *ticp = tic; 461 462 xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt 463 : tic->t_unit_res); 464 465 trace_xfs_log_reserve(log, tic); 466 467 error = xlog_grant_head_check(log, &log->l_reserve_head, tic, 468 &need_bytes); 469 if (error) 470 goto out_error; 471 472 xlog_grant_add_space(log, &log->l_reserve_head.grant, need_bytes); 473 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); 474 trace_xfs_log_reserve_exit(log, tic); 475 xlog_verify_grant_tail(log); 476 return 0; 477 478 out_error: 479 /* 480 * If we are failing, make sure the ticket doesn't have any current 481 * reservations. We don't want to add this back when the ticket/ 482 * transaction gets cancelled. 483 */ 484 tic->t_curr_res = 0; 485 tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */ 486 return error; 487 } 488 489 490 /* 491 * NOTES: 492 * 493 * 1. currblock field gets updated at startup and after in-core logs 494 * marked as with WANT_SYNC. 495 */ 496 497 /* 498 * This routine is called when a user of a log manager ticket is done with 499 * the reservation. If the ticket was ever used, then a commit record for 500 * the associated transaction is written out as a log operation header with 501 * no data. The flag XLOG_TIC_INITED is set when the first write occurs with 502 * a given ticket. If the ticket was one with a permanent reservation, then 503 * a few operations are done differently. Permanent reservation tickets by 504 * default don't release the reservation. They just commit the current 505 * transaction with the belief that the reservation is still needed. A flag 506 * must be passed in before permanent reservations are actually released. 507 * When these type of tickets are not released, they need to be set into 508 * the inited state again. By doing this, a start record will be written 509 * out when the next write occurs. 510 */ 511 xfs_lsn_t 512 xfs_log_done( 513 struct xfs_mount *mp, 514 struct xlog_ticket *ticket, 515 struct xlog_in_core **iclog, 516 bool regrant) 517 { 518 struct xlog *log = mp->m_log; 519 xfs_lsn_t lsn = 0; 520 521 if (XLOG_FORCED_SHUTDOWN(log) || 522 /* 523 * If nothing was ever written, don't write out commit record. 524 * If we get an error, just continue and give back the log ticket. 525 */ 526 (((ticket->t_flags & XLOG_TIC_INITED) == 0) && 527 (xlog_commit_record(log, ticket, iclog, &lsn)))) { 528 lsn = (xfs_lsn_t) -1; 529 regrant = false; 530 } 531 532 533 if (!regrant) { 534 trace_xfs_log_done_nonperm(log, ticket); 535 536 /* 537 * Release ticket if not permanent reservation or a specific 538 * request has been made to release a permanent reservation. 539 */ 540 xlog_ungrant_log_space(log, ticket); 541 } else { 542 trace_xfs_log_done_perm(log, ticket); 543 544 xlog_regrant_reserve_log_space(log, ticket); 545 /* If this ticket was a permanent reservation and we aren't 546 * trying to release it, reset the inited flags; so next time 547 * we write, a start record will be written out. 548 */ 549 ticket->t_flags |= XLOG_TIC_INITED; 550 } 551 552 xfs_log_ticket_put(ticket); 553 return lsn; 554 } 555 556 /* 557 * Attaches a new iclog I/O completion callback routine during 558 * transaction commit. If the log is in error state, a non-zero 559 * return code is handed back and the caller is responsible for 560 * executing the callback at an appropriate time. 561 */ 562 int 563 xfs_log_notify( 564 struct xfs_mount *mp, 565 struct xlog_in_core *iclog, 566 xfs_log_callback_t *cb) 567 { 568 int abortflg; 569 570 spin_lock(&iclog->ic_callback_lock); 571 abortflg = (iclog->ic_state & XLOG_STATE_IOERROR); 572 if (!abortflg) { 573 ASSERT_ALWAYS((iclog->ic_state == XLOG_STATE_ACTIVE) || 574 (iclog->ic_state == XLOG_STATE_WANT_SYNC)); 575 cb->cb_next = NULL; 576 *(iclog->ic_callback_tail) = cb; 577 iclog->ic_callback_tail = &(cb->cb_next); 578 } 579 spin_unlock(&iclog->ic_callback_lock); 580 return abortflg; 581 } 582 583 int 584 xfs_log_release_iclog( 585 struct xfs_mount *mp, 586 struct xlog_in_core *iclog) 587 { 588 if (xlog_state_release_iclog(mp->m_log, iclog)) { 589 xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR); 590 return -EIO; 591 } 592 593 return 0; 594 } 595 596 /* 597 * Mount a log filesystem 598 * 599 * mp - ubiquitous xfs mount point structure 600 * log_target - buftarg of on-disk log device 601 * blk_offset - Start block # where block size is 512 bytes (BBSIZE) 602 * num_bblocks - Number of BBSIZE blocks in on-disk log 603 * 604 * Return error or zero. 605 */ 606 int 607 xfs_log_mount( 608 xfs_mount_t *mp, 609 xfs_buftarg_t *log_target, 610 xfs_daddr_t blk_offset, 611 int num_bblks) 612 { 613 int error = 0; 614 int min_logfsbs; 615 616 if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) { 617 xfs_notice(mp, "Mounting V%d Filesystem", 618 XFS_SB_VERSION_NUM(&mp->m_sb)); 619 } else { 620 xfs_notice(mp, 621 "Mounting V%d filesystem in no-recovery mode. Filesystem will be inconsistent.", 622 XFS_SB_VERSION_NUM(&mp->m_sb)); 623 ASSERT(mp->m_flags & XFS_MOUNT_RDONLY); 624 } 625 626 mp->m_log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks); 627 if (IS_ERR(mp->m_log)) { 628 error = PTR_ERR(mp->m_log); 629 goto out; 630 } 631 632 /* 633 * Validate the given log space and drop a critical message via syslog 634 * if the log size is too small that would lead to some unexpected 635 * situations in transaction log space reservation stage. 636 * 637 * Note: we can't just reject the mount if the validation fails. This 638 * would mean that people would have to downgrade their kernel just to 639 * remedy the situation as there is no way to grow the log (short of 640 * black magic surgery with xfs_db). 641 * 642 * We can, however, reject mounts for CRC format filesystems, as the 643 * mkfs binary being used to make the filesystem should never create a 644 * filesystem with a log that is too small. 645 */ 646 min_logfsbs = xfs_log_calc_minimum_size(mp); 647 648 if (mp->m_sb.sb_logblocks < min_logfsbs) { 649 xfs_warn(mp, 650 "Log size %d blocks too small, minimum size is %d blocks", 651 mp->m_sb.sb_logblocks, min_logfsbs); 652 error = -EINVAL; 653 } else if (mp->m_sb.sb_logblocks > XFS_MAX_LOG_BLOCKS) { 654 xfs_warn(mp, 655 "Log size %d blocks too large, maximum size is %lld blocks", 656 mp->m_sb.sb_logblocks, XFS_MAX_LOG_BLOCKS); 657 error = -EINVAL; 658 } else if (XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks) > XFS_MAX_LOG_BYTES) { 659 xfs_warn(mp, 660 "log size %lld bytes too large, maximum size is %lld bytes", 661 XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks), 662 XFS_MAX_LOG_BYTES); 663 error = -EINVAL; 664 } 665 if (error) { 666 if (xfs_sb_version_hascrc(&mp->m_sb)) { 667 xfs_crit(mp, "AAIEEE! Log failed size checks. Abort!"); 668 ASSERT(0); 669 goto out_free_log; 670 } 671 xfs_crit(mp, "Log size out of supported range."); 672 xfs_crit(mp, 673 "Continuing onwards, but if log hangs are experienced then please report this message in the bug report."); 674 } 675 676 /* 677 * Initialize the AIL now we have a log. 678 */ 679 error = xfs_trans_ail_init(mp); 680 if (error) { 681 xfs_warn(mp, "AIL initialisation failed: error %d", error); 682 goto out_free_log; 683 } 684 mp->m_log->l_ailp = mp->m_ail; 685 686 /* 687 * skip log recovery on a norecovery mount. pretend it all 688 * just worked. 689 */ 690 if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) { 691 int readonly = (mp->m_flags & XFS_MOUNT_RDONLY); 692 693 if (readonly) 694 mp->m_flags &= ~XFS_MOUNT_RDONLY; 695 696 error = xlog_recover(mp->m_log); 697 698 if (readonly) 699 mp->m_flags |= XFS_MOUNT_RDONLY; 700 if (error) { 701 xfs_warn(mp, "log mount/recovery failed: error %d", 702 error); 703 xlog_recover_cancel(mp->m_log); 704 goto out_destroy_ail; 705 } 706 } 707 708 error = xfs_sysfs_init(&mp->m_log->l_kobj, &xfs_log_ktype, &mp->m_kobj, 709 "log"); 710 if (error) 711 goto out_destroy_ail; 712 713 /* Normal transactions can now occur */ 714 mp->m_log->l_flags &= ~XLOG_ACTIVE_RECOVERY; 715 716 /* 717 * Now the log has been fully initialised and we know were our 718 * space grant counters are, we can initialise the permanent ticket 719 * needed for delayed logging to work. 720 */ 721 xlog_cil_init_post_recovery(mp->m_log); 722 723 return 0; 724 725 out_destroy_ail: 726 xfs_trans_ail_destroy(mp); 727 out_free_log: 728 xlog_dealloc_log(mp->m_log); 729 out: 730 return error; 731 } 732 733 /* 734 * Finish the recovery of the file system. This is separate from the 735 * xfs_log_mount() call, because it depends on the code in xfs_mountfs() to read 736 * in the root and real-time bitmap inodes between calling xfs_log_mount() and 737 * here. 738 * 739 * If we finish recovery successfully, start the background log work. If we are 740 * not doing recovery, then we have a RO filesystem and we don't need to start 741 * it. 742 */ 743 int 744 xfs_log_mount_finish( 745 struct xfs_mount *mp) 746 { 747 int error = 0; 748 749 if (mp->m_flags & XFS_MOUNT_NORECOVERY) { 750 ASSERT(mp->m_flags & XFS_MOUNT_RDONLY); 751 return 0; 752 } 753 754 error = xlog_recover_finish(mp->m_log); 755 if (!error) 756 xfs_log_work_queue(mp); 757 758 return error; 759 } 760 761 /* 762 * The mount has failed. Cancel the recovery if it hasn't completed and destroy 763 * the log. 764 */ 765 int 766 xfs_log_mount_cancel( 767 struct xfs_mount *mp) 768 { 769 int error; 770 771 error = xlog_recover_cancel(mp->m_log); 772 xfs_log_unmount(mp); 773 774 return error; 775 } 776 777 /* 778 * Final log writes as part of unmount. 779 * 780 * Mark the filesystem clean as unmount happens. Note that during relocation 781 * this routine needs to be executed as part of source-bag while the 782 * deallocation must not be done until source-end. 783 */ 784 785 /* 786 * Unmount record used to have a string "Unmount filesystem--" in the 787 * data section where the "Un" was really a magic number (XLOG_UNMOUNT_TYPE). 788 * We just write the magic number now since that particular field isn't 789 * currently architecture converted and "Unmount" is a bit foo. 790 * As far as I know, there weren't any dependencies on the old behaviour. 791 */ 792 793 int 794 xfs_log_unmount_write(xfs_mount_t *mp) 795 { 796 struct xlog *log = mp->m_log; 797 xlog_in_core_t *iclog; 798 #ifdef DEBUG 799 xlog_in_core_t *first_iclog; 800 #endif 801 xlog_ticket_t *tic = NULL; 802 xfs_lsn_t lsn; 803 int error; 804 805 /* 806 * Don't write out unmount record on read-only mounts. 807 * Or, if we are doing a forced umount (typically because of IO errors). 808 */ 809 if (mp->m_flags & XFS_MOUNT_RDONLY) 810 return 0; 811 812 error = _xfs_log_force(mp, XFS_LOG_SYNC, NULL); 813 ASSERT(error || !(XLOG_FORCED_SHUTDOWN(log))); 814 815 #ifdef DEBUG 816 first_iclog = iclog = log->l_iclog; 817 do { 818 if (!(iclog->ic_state & XLOG_STATE_IOERROR)) { 819 ASSERT(iclog->ic_state & XLOG_STATE_ACTIVE); 820 ASSERT(iclog->ic_offset == 0); 821 } 822 iclog = iclog->ic_next; 823 } while (iclog != first_iclog); 824 #endif 825 if (! (XLOG_FORCED_SHUTDOWN(log))) { 826 error = xfs_log_reserve(mp, 600, 1, &tic, 827 XFS_LOG, 0, XLOG_UNMOUNT_REC_TYPE); 828 if (!error) { 829 /* the data section must be 32 bit size aligned */ 830 struct { 831 __uint16_t magic; 832 __uint16_t pad1; 833 __uint32_t pad2; /* may as well make it 64 bits */ 834 } magic = { 835 .magic = XLOG_UNMOUNT_TYPE, 836 }; 837 struct xfs_log_iovec reg = { 838 .i_addr = &magic, 839 .i_len = sizeof(magic), 840 .i_type = XLOG_REG_TYPE_UNMOUNT, 841 }; 842 struct xfs_log_vec vec = { 843 .lv_niovecs = 1, 844 .lv_iovecp = ®, 845 }; 846 847 /* remove inited flag, and account for space used */ 848 tic->t_flags = 0; 849 tic->t_curr_res -= sizeof(magic); 850 error = xlog_write(log, &vec, tic, &lsn, 851 NULL, XLOG_UNMOUNT_TRANS); 852 /* 853 * At this point, we're umounting anyway, 854 * so there's no point in transitioning log state 855 * to IOERROR. Just continue... 856 */ 857 } 858 859 if (error) 860 xfs_alert(mp, "%s: unmount record failed", __func__); 861 862 863 spin_lock(&log->l_icloglock); 864 iclog = log->l_iclog; 865 atomic_inc(&iclog->ic_refcnt); 866 xlog_state_want_sync(log, iclog); 867 spin_unlock(&log->l_icloglock); 868 error = xlog_state_release_iclog(log, iclog); 869 870 spin_lock(&log->l_icloglock); 871 if (!(iclog->ic_state == XLOG_STATE_ACTIVE || 872 iclog->ic_state == XLOG_STATE_DIRTY)) { 873 if (!XLOG_FORCED_SHUTDOWN(log)) { 874 xlog_wait(&iclog->ic_force_wait, 875 &log->l_icloglock); 876 } else { 877 spin_unlock(&log->l_icloglock); 878 } 879 } else { 880 spin_unlock(&log->l_icloglock); 881 } 882 if (tic) { 883 trace_xfs_log_umount_write(log, tic); 884 xlog_ungrant_log_space(log, tic); 885 xfs_log_ticket_put(tic); 886 } 887 } else { 888 /* 889 * We're already in forced_shutdown mode, couldn't 890 * even attempt to write out the unmount transaction. 891 * 892 * Go through the motions of sync'ing and releasing 893 * the iclog, even though no I/O will actually happen, 894 * we need to wait for other log I/Os that may already 895 * be in progress. Do this as a separate section of 896 * code so we'll know if we ever get stuck here that 897 * we're in this odd situation of trying to unmount 898 * a file system that went into forced_shutdown as 899 * the result of an unmount.. 900 */ 901 spin_lock(&log->l_icloglock); 902 iclog = log->l_iclog; 903 atomic_inc(&iclog->ic_refcnt); 904 905 xlog_state_want_sync(log, iclog); 906 spin_unlock(&log->l_icloglock); 907 error = xlog_state_release_iclog(log, iclog); 908 909 spin_lock(&log->l_icloglock); 910 911 if ( ! ( iclog->ic_state == XLOG_STATE_ACTIVE 912 || iclog->ic_state == XLOG_STATE_DIRTY 913 || iclog->ic_state == XLOG_STATE_IOERROR) ) { 914 915 xlog_wait(&iclog->ic_force_wait, 916 &log->l_icloglock); 917 } else { 918 spin_unlock(&log->l_icloglock); 919 } 920 } 921 922 return error; 923 } /* xfs_log_unmount_write */ 924 925 /* 926 * Empty the log for unmount/freeze. 927 * 928 * To do this, we first need to shut down the background log work so it is not 929 * trying to cover the log as we clean up. We then need to unpin all objects in 930 * the log so we can then flush them out. Once they have completed their IO and 931 * run the callbacks removing themselves from the AIL, we can write the unmount 932 * record. 933 */ 934 void 935 xfs_log_quiesce( 936 struct xfs_mount *mp) 937 { 938 cancel_delayed_work_sync(&mp->m_log->l_work); 939 xfs_log_force(mp, XFS_LOG_SYNC); 940 941 /* 942 * The superblock buffer is uncached and while xfs_ail_push_all_sync() 943 * will push it, xfs_wait_buftarg() will not wait for it. Further, 944 * xfs_buf_iowait() cannot be used because it was pushed with the 945 * XBF_ASYNC flag set, so we need to use a lock/unlock pair to wait for 946 * the IO to complete. 947 */ 948 xfs_ail_push_all_sync(mp->m_ail); 949 xfs_wait_buftarg(mp->m_ddev_targp); 950 xfs_buf_lock(mp->m_sb_bp); 951 xfs_buf_unlock(mp->m_sb_bp); 952 953 xfs_log_unmount_write(mp); 954 } 955 956 /* 957 * Shut down and release the AIL and Log. 958 * 959 * During unmount, we need to ensure we flush all the dirty metadata objects 960 * from the AIL so that the log is empty before we write the unmount record to 961 * the log. Once this is done, we can tear down the AIL and the log. 962 */ 963 void 964 xfs_log_unmount( 965 struct xfs_mount *mp) 966 { 967 xfs_log_quiesce(mp); 968 969 xfs_trans_ail_destroy(mp); 970 971 xfs_sysfs_del(&mp->m_log->l_kobj); 972 973 xlog_dealloc_log(mp->m_log); 974 } 975 976 void 977 xfs_log_item_init( 978 struct xfs_mount *mp, 979 struct xfs_log_item *item, 980 int type, 981 const struct xfs_item_ops *ops) 982 { 983 item->li_mountp = mp; 984 item->li_ailp = mp->m_ail; 985 item->li_type = type; 986 item->li_ops = ops; 987 item->li_lv = NULL; 988 989 INIT_LIST_HEAD(&item->li_ail); 990 INIT_LIST_HEAD(&item->li_cil); 991 } 992 993 /* 994 * Wake up processes waiting for log space after we have moved the log tail. 995 */ 996 void 997 xfs_log_space_wake( 998 struct xfs_mount *mp) 999 { 1000 struct xlog *log = mp->m_log; 1001 int free_bytes; 1002 1003 if (XLOG_FORCED_SHUTDOWN(log)) 1004 return; 1005 1006 if (!list_empty_careful(&log->l_write_head.waiters)) { 1007 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); 1008 1009 spin_lock(&log->l_write_head.lock); 1010 free_bytes = xlog_space_left(log, &log->l_write_head.grant); 1011 xlog_grant_head_wake(log, &log->l_write_head, &free_bytes); 1012 spin_unlock(&log->l_write_head.lock); 1013 } 1014 1015 if (!list_empty_careful(&log->l_reserve_head.waiters)) { 1016 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); 1017 1018 spin_lock(&log->l_reserve_head.lock); 1019 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); 1020 xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes); 1021 spin_unlock(&log->l_reserve_head.lock); 1022 } 1023 } 1024 1025 /* 1026 * Determine if we have a transaction that has gone to disk that needs to be 1027 * covered. To begin the transition to the idle state firstly the log needs to 1028 * be idle. That means the CIL, the AIL and the iclogs needs to be empty before 1029 * we start attempting to cover the log. 1030 * 1031 * Only if we are then in a state where covering is needed, the caller is 1032 * informed that dummy transactions are required to move the log into the idle 1033 * state. 1034 * 1035 * If there are any items in the AIl or CIL, then we do not want to attempt to 1036 * cover the log as we may be in a situation where there isn't log space 1037 * available to run a dummy transaction and this can lead to deadlocks when the 1038 * tail of the log is pinned by an item that is modified in the CIL. Hence 1039 * there's no point in running a dummy transaction at this point because we 1040 * can't start trying to idle the log until both the CIL and AIL are empty. 1041 */ 1042 int 1043 xfs_log_need_covered(xfs_mount_t *mp) 1044 { 1045 struct xlog *log = mp->m_log; 1046 int needed = 0; 1047 1048 if (!xfs_fs_writable(mp, SB_FREEZE_WRITE)) 1049 return 0; 1050 1051 if (!xlog_cil_empty(log)) 1052 return 0; 1053 1054 spin_lock(&log->l_icloglock); 1055 switch (log->l_covered_state) { 1056 case XLOG_STATE_COVER_DONE: 1057 case XLOG_STATE_COVER_DONE2: 1058 case XLOG_STATE_COVER_IDLE: 1059 break; 1060 case XLOG_STATE_COVER_NEED: 1061 case XLOG_STATE_COVER_NEED2: 1062 if (xfs_ail_min_lsn(log->l_ailp)) 1063 break; 1064 if (!xlog_iclogs_empty(log)) 1065 break; 1066 1067 needed = 1; 1068 if (log->l_covered_state == XLOG_STATE_COVER_NEED) 1069 log->l_covered_state = XLOG_STATE_COVER_DONE; 1070 else 1071 log->l_covered_state = XLOG_STATE_COVER_DONE2; 1072 break; 1073 default: 1074 needed = 1; 1075 break; 1076 } 1077 spin_unlock(&log->l_icloglock); 1078 return needed; 1079 } 1080 1081 /* 1082 * We may be holding the log iclog lock upon entering this routine. 1083 */ 1084 xfs_lsn_t 1085 xlog_assign_tail_lsn_locked( 1086 struct xfs_mount *mp) 1087 { 1088 struct xlog *log = mp->m_log; 1089 struct xfs_log_item *lip; 1090 xfs_lsn_t tail_lsn; 1091 1092 assert_spin_locked(&mp->m_ail->xa_lock); 1093 1094 /* 1095 * To make sure we always have a valid LSN for the log tail we keep 1096 * track of the last LSN which was committed in log->l_last_sync_lsn, 1097 * and use that when the AIL was empty. 1098 */ 1099 lip = xfs_ail_min(mp->m_ail); 1100 if (lip) 1101 tail_lsn = lip->li_lsn; 1102 else 1103 tail_lsn = atomic64_read(&log->l_last_sync_lsn); 1104 trace_xfs_log_assign_tail_lsn(log, tail_lsn); 1105 atomic64_set(&log->l_tail_lsn, tail_lsn); 1106 return tail_lsn; 1107 } 1108 1109 xfs_lsn_t 1110 xlog_assign_tail_lsn( 1111 struct xfs_mount *mp) 1112 { 1113 xfs_lsn_t tail_lsn; 1114 1115 spin_lock(&mp->m_ail->xa_lock); 1116 tail_lsn = xlog_assign_tail_lsn_locked(mp); 1117 spin_unlock(&mp->m_ail->xa_lock); 1118 1119 return tail_lsn; 1120 } 1121 1122 /* 1123 * Return the space in the log between the tail and the head. The head 1124 * is passed in the cycle/bytes formal parms. In the special case where 1125 * the reserve head has wrapped passed the tail, this calculation is no 1126 * longer valid. In this case, just return 0 which means there is no space 1127 * in the log. This works for all places where this function is called 1128 * with the reserve head. Of course, if the write head were to ever 1129 * wrap the tail, we should blow up. Rather than catch this case here, 1130 * we depend on other ASSERTions in other parts of the code. XXXmiken 1131 * 1132 * This code also handles the case where the reservation head is behind 1133 * the tail. The details of this case are described below, but the end 1134 * result is that we return the size of the log as the amount of space left. 1135 */ 1136 STATIC int 1137 xlog_space_left( 1138 struct xlog *log, 1139 atomic64_t *head) 1140 { 1141 int free_bytes; 1142 int tail_bytes; 1143 int tail_cycle; 1144 int head_cycle; 1145 int head_bytes; 1146 1147 xlog_crack_grant_head(head, &head_cycle, &head_bytes); 1148 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes); 1149 tail_bytes = BBTOB(tail_bytes); 1150 if (tail_cycle == head_cycle && head_bytes >= tail_bytes) 1151 free_bytes = log->l_logsize - (head_bytes - tail_bytes); 1152 else if (tail_cycle + 1 < head_cycle) 1153 return 0; 1154 else if (tail_cycle < head_cycle) { 1155 ASSERT(tail_cycle == (head_cycle - 1)); 1156 free_bytes = tail_bytes - head_bytes; 1157 } else { 1158 /* 1159 * The reservation head is behind the tail. 1160 * In this case we just want to return the size of the 1161 * log as the amount of space left. 1162 */ 1163 xfs_alert(log->l_mp, "xlog_space_left: head behind tail"); 1164 xfs_alert(log->l_mp, 1165 " tail_cycle = %d, tail_bytes = %d", 1166 tail_cycle, tail_bytes); 1167 xfs_alert(log->l_mp, 1168 " GH cycle = %d, GH bytes = %d", 1169 head_cycle, head_bytes); 1170 ASSERT(0); 1171 free_bytes = log->l_logsize; 1172 } 1173 return free_bytes; 1174 } 1175 1176 1177 /* 1178 * Log function which is called when an io completes. 1179 * 1180 * The log manager needs its own routine, in order to control what 1181 * happens with the buffer after the write completes. 1182 */ 1183 void 1184 xlog_iodone(xfs_buf_t *bp) 1185 { 1186 struct xlog_in_core *iclog = bp->b_fspriv; 1187 struct xlog *l = iclog->ic_log; 1188 int aborted = 0; 1189 1190 /* 1191 * Race to shutdown the filesystem if we see an error or the iclog is in 1192 * IOABORT state. The IOABORT state is only set in DEBUG mode to inject 1193 * CRC errors into log recovery. 1194 */ 1195 if (XFS_TEST_ERROR(bp->b_error, l->l_mp, XFS_ERRTAG_IODONE_IOERR, 1196 XFS_RANDOM_IODONE_IOERR) || 1197 iclog->ic_state & XLOG_STATE_IOABORT) { 1198 if (iclog->ic_state & XLOG_STATE_IOABORT) 1199 iclog->ic_state &= ~XLOG_STATE_IOABORT; 1200 1201 xfs_buf_ioerror_alert(bp, __func__); 1202 xfs_buf_stale(bp); 1203 xfs_force_shutdown(l->l_mp, SHUTDOWN_LOG_IO_ERROR); 1204 /* 1205 * This flag will be propagated to the trans-committed 1206 * callback routines to let them know that the log-commit 1207 * didn't succeed. 1208 */ 1209 aborted = XFS_LI_ABORTED; 1210 } else if (iclog->ic_state & XLOG_STATE_IOERROR) { 1211 aborted = XFS_LI_ABORTED; 1212 } 1213 1214 /* log I/O is always issued ASYNC */ 1215 ASSERT(XFS_BUF_ISASYNC(bp)); 1216 xlog_state_done_syncing(iclog, aborted); 1217 1218 /* 1219 * drop the buffer lock now that we are done. Nothing references 1220 * the buffer after this, so an unmount waiting on this lock can now 1221 * tear it down safely. As such, it is unsafe to reference the buffer 1222 * (bp) after the unlock as we could race with it being freed. 1223 */ 1224 xfs_buf_unlock(bp); 1225 } 1226 1227 /* 1228 * Return size of each in-core log record buffer. 1229 * 1230 * All machines get 8 x 32kB buffers by default, unless tuned otherwise. 1231 * 1232 * If the filesystem blocksize is too large, we may need to choose a 1233 * larger size since the directory code currently logs entire blocks. 1234 */ 1235 1236 STATIC void 1237 xlog_get_iclog_buffer_size( 1238 struct xfs_mount *mp, 1239 struct xlog *log) 1240 { 1241 int size; 1242 int xhdrs; 1243 1244 if (mp->m_logbufs <= 0) 1245 log->l_iclog_bufs = XLOG_MAX_ICLOGS; 1246 else 1247 log->l_iclog_bufs = mp->m_logbufs; 1248 1249 /* 1250 * Buffer size passed in from mount system call. 1251 */ 1252 if (mp->m_logbsize > 0) { 1253 size = log->l_iclog_size = mp->m_logbsize; 1254 log->l_iclog_size_log = 0; 1255 while (size != 1) { 1256 log->l_iclog_size_log++; 1257 size >>= 1; 1258 } 1259 1260 if (xfs_sb_version_haslogv2(&mp->m_sb)) { 1261 /* # headers = size / 32k 1262 * one header holds cycles from 32k of data 1263 */ 1264 1265 xhdrs = mp->m_logbsize / XLOG_HEADER_CYCLE_SIZE; 1266 if (mp->m_logbsize % XLOG_HEADER_CYCLE_SIZE) 1267 xhdrs++; 1268 log->l_iclog_hsize = xhdrs << BBSHIFT; 1269 log->l_iclog_heads = xhdrs; 1270 } else { 1271 ASSERT(mp->m_logbsize <= XLOG_BIG_RECORD_BSIZE); 1272 log->l_iclog_hsize = BBSIZE; 1273 log->l_iclog_heads = 1; 1274 } 1275 goto done; 1276 } 1277 1278 /* All machines use 32kB buffers by default. */ 1279 log->l_iclog_size = XLOG_BIG_RECORD_BSIZE; 1280 log->l_iclog_size_log = XLOG_BIG_RECORD_BSHIFT; 1281 1282 /* the default log size is 16k or 32k which is one header sector */ 1283 log->l_iclog_hsize = BBSIZE; 1284 log->l_iclog_heads = 1; 1285 1286 done: 1287 /* are we being asked to make the sizes selected above visible? */ 1288 if (mp->m_logbufs == 0) 1289 mp->m_logbufs = log->l_iclog_bufs; 1290 if (mp->m_logbsize == 0) 1291 mp->m_logbsize = log->l_iclog_size; 1292 } /* xlog_get_iclog_buffer_size */ 1293 1294 1295 void 1296 xfs_log_work_queue( 1297 struct xfs_mount *mp) 1298 { 1299 queue_delayed_work(mp->m_log_workqueue, &mp->m_log->l_work, 1300 msecs_to_jiffies(xfs_syncd_centisecs * 10)); 1301 } 1302 1303 /* 1304 * Every sync period we need to unpin all items in the AIL and push them to 1305 * disk. If there is nothing dirty, then we might need to cover the log to 1306 * indicate that the filesystem is idle. 1307 */ 1308 void 1309 xfs_log_worker( 1310 struct work_struct *work) 1311 { 1312 struct xlog *log = container_of(to_delayed_work(work), 1313 struct xlog, l_work); 1314 struct xfs_mount *mp = log->l_mp; 1315 1316 /* dgc: errors ignored - not fatal and nowhere to report them */ 1317 if (xfs_log_need_covered(mp)) { 1318 /* 1319 * Dump a transaction into the log that contains no real change. 1320 * This is needed to stamp the current tail LSN into the log 1321 * during the covering operation. 1322 * 1323 * We cannot use an inode here for this - that will push dirty 1324 * state back up into the VFS and then periodic inode flushing 1325 * will prevent log covering from making progress. Hence we 1326 * synchronously log the superblock instead to ensure the 1327 * superblock is immediately unpinned and can be written back. 1328 */ 1329 xfs_sync_sb(mp, true); 1330 } else 1331 xfs_log_force(mp, 0); 1332 1333 /* start pushing all the metadata that is currently dirty */ 1334 xfs_ail_push_all(mp->m_ail); 1335 1336 /* queue us up again */ 1337 xfs_log_work_queue(mp); 1338 } 1339 1340 /* 1341 * This routine initializes some of the log structure for a given mount point. 1342 * Its primary purpose is to fill in enough, so recovery can occur. However, 1343 * some other stuff may be filled in too. 1344 */ 1345 STATIC struct xlog * 1346 xlog_alloc_log( 1347 struct xfs_mount *mp, 1348 struct xfs_buftarg *log_target, 1349 xfs_daddr_t blk_offset, 1350 int num_bblks) 1351 { 1352 struct xlog *log; 1353 xlog_rec_header_t *head; 1354 xlog_in_core_t **iclogp; 1355 xlog_in_core_t *iclog, *prev_iclog=NULL; 1356 xfs_buf_t *bp; 1357 int i; 1358 int error = -ENOMEM; 1359 uint log2_size = 0; 1360 1361 log = kmem_zalloc(sizeof(struct xlog), KM_MAYFAIL); 1362 if (!log) { 1363 xfs_warn(mp, "Log allocation failed: No memory!"); 1364 goto out; 1365 } 1366 1367 log->l_mp = mp; 1368 log->l_targ = log_target; 1369 log->l_logsize = BBTOB(num_bblks); 1370 log->l_logBBstart = blk_offset; 1371 log->l_logBBsize = num_bblks; 1372 log->l_covered_state = XLOG_STATE_COVER_IDLE; 1373 log->l_flags |= XLOG_ACTIVE_RECOVERY; 1374 INIT_DELAYED_WORK(&log->l_work, xfs_log_worker); 1375 1376 log->l_prev_block = -1; 1377 /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */ 1378 xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0); 1379 xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0); 1380 log->l_curr_cycle = 1; /* 0 is bad since this is initial value */ 1381 1382 xlog_grant_head_init(&log->l_reserve_head); 1383 xlog_grant_head_init(&log->l_write_head); 1384 1385 error = -EFSCORRUPTED; 1386 if (xfs_sb_version_hassector(&mp->m_sb)) { 1387 log2_size = mp->m_sb.sb_logsectlog; 1388 if (log2_size < BBSHIFT) { 1389 xfs_warn(mp, "Log sector size too small (0x%x < 0x%x)", 1390 log2_size, BBSHIFT); 1391 goto out_free_log; 1392 } 1393 1394 log2_size -= BBSHIFT; 1395 if (log2_size > mp->m_sectbb_log) { 1396 xfs_warn(mp, "Log sector size too large (0x%x > 0x%x)", 1397 log2_size, mp->m_sectbb_log); 1398 goto out_free_log; 1399 } 1400 1401 /* for larger sector sizes, must have v2 or external log */ 1402 if (log2_size && log->l_logBBstart > 0 && 1403 !xfs_sb_version_haslogv2(&mp->m_sb)) { 1404 xfs_warn(mp, 1405 "log sector size (0x%x) invalid for configuration.", 1406 log2_size); 1407 goto out_free_log; 1408 } 1409 } 1410 log->l_sectBBsize = 1 << log2_size; 1411 1412 xlog_get_iclog_buffer_size(mp, log); 1413 1414 /* 1415 * Use a NULL block for the extra log buffer used during splits so that 1416 * it will trigger errors if we ever try to do IO on it without first 1417 * having set it up properly. 1418 */ 1419 error = -ENOMEM; 1420 bp = xfs_buf_alloc(mp->m_logdev_targp, XFS_BUF_DADDR_NULL, 1421 BTOBB(log->l_iclog_size), 0); 1422 if (!bp) 1423 goto out_free_log; 1424 1425 /* 1426 * The iclogbuf buffer locks are held over IO but we are not going to do 1427 * IO yet. Hence unlock the buffer so that the log IO path can grab it 1428 * when appropriately. 1429 */ 1430 ASSERT(xfs_buf_islocked(bp)); 1431 xfs_buf_unlock(bp); 1432 1433 /* use high priority wq for log I/O completion */ 1434 bp->b_ioend_wq = mp->m_log_workqueue; 1435 bp->b_iodone = xlog_iodone; 1436 log->l_xbuf = bp; 1437 1438 spin_lock_init(&log->l_icloglock); 1439 init_waitqueue_head(&log->l_flush_wait); 1440 1441 iclogp = &log->l_iclog; 1442 /* 1443 * The amount of memory to allocate for the iclog structure is 1444 * rather funky due to the way the structure is defined. It is 1445 * done this way so that we can use different sizes for machines 1446 * with different amounts of memory. See the definition of 1447 * xlog_in_core_t in xfs_log_priv.h for details. 1448 */ 1449 ASSERT(log->l_iclog_size >= 4096); 1450 for (i=0; i < log->l_iclog_bufs; i++) { 1451 *iclogp = kmem_zalloc(sizeof(xlog_in_core_t), KM_MAYFAIL); 1452 if (!*iclogp) 1453 goto out_free_iclog; 1454 1455 iclog = *iclogp; 1456 iclog->ic_prev = prev_iclog; 1457 prev_iclog = iclog; 1458 1459 bp = xfs_buf_get_uncached(mp->m_logdev_targp, 1460 BTOBB(log->l_iclog_size), 0); 1461 if (!bp) 1462 goto out_free_iclog; 1463 1464 ASSERT(xfs_buf_islocked(bp)); 1465 xfs_buf_unlock(bp); 1466 1467 /* use high priority wq for log I/O completion */ 1468 bp->b_ioend_wq = mp->m_log_workqueue; 1469 bp->b_iodone = xlog_iodone; 1470 iclog->ic_bp = bp; 1471 iclog->ic_data = bp->b_addr; 1472 #ifdef DEBUG 1473 log->l_iclog_bak[i] = &iclog->ic_header; 1474 #endif 1475 head = &iclog->ic_header; 1476 memset(head, 0, sizeof(xlog_rec_header_t)); 1477 head->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM); 1478 head->h_version = cpu_to_be32( 1479 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1); 1480 head->h_size = cpu_to_be32(log->l_iclog_size); 1481 /* new fields */ 1482 head->h_fmt = cpu_to_be32(XLOG_FMT); 1483 memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t)); 1484 1485 iclog->ic_size = BBTOB(bp->b_length) - log->l_iclog_hsize; 1486 iclog->ic_state = XLOG_STATE_ACTIVE; 1487 iclog->ic_log = log; 1488 atomic_set(&iclog->ic_refcnt, 0); 1489 spin_lock_init(&iclog->ic_callback_lock); 1490 iclog->ic_callback_tail = &(iclog->ic_callback); 1491 iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize; 1492 1493 init_waitqueue_head(&iclog->ic_force_wait); 1494 init_waitqueue_head(&iclog->ic_write_wait); 1495 1496 iclogp = &iclog->ic_next; 1497 } 1498 *iclogp = log->l_iclog; /* complete ring */ 1499 log->l_iclog->ic_prev = prev_iclog; /* re-write 1st prev ptr */ 1500 1501 error = xlog_cil_init(log); 1502 if (error) 1503 goto out_free_iclog; 1504 return log; 1505 1506 out_free_iclog: 1507 for (iclog = log->l_iclog; iclog; iclog = prev_iclog) { 1508 prev_iclog = iclog->ic_next; 1509 if (iclog->ic_bp) 1510 xfs_buf_free(iclog->ic_bp); 1511 kmem_free(iclog); 1512 } 1513 spinlock_destroy(&log->l_icloglock); 1514 xfs_buf_free(log->l_xbuf); 1515 out_free_log: 1516 kmem_free(log); 1517 out: 1518 return ERR_PTR(error); 1519 } /* xlog_alloc_log */ 1520 1521 1522 /* 1523 * Write out the commit record of a transaction associated with the given 1524 * ticket. Return the lsn of the commit record. 1525 */ 1526 STATIC int 1527 xlog_commit_record( 1528 struct xlog *log, 1529 struct xlog_ticket *ticket, 1530 struct xlog_in_core **iclog, 1531 xfs_lsn_t *commitlsnp) 1532 { 1533 struct xfs_mount *mp = log->l_mp; 1534 int error; 1535 struct xfs_log_iovec reg = { 1536 .i_addr = NULL, 1537 .i_len = 0, 1538 .i_type = XLOG_REG_TYPE_COMMIT, 1539 }; 1540 struct xfs_log_vec vec = { 1541 .lv_niovecs = 1, 1542 .lv_iovecp = ®, 1543 }; 1544 1545 ASSERT_ALWAYS(iclog); 1546 error = xlog_write(log, &vec, ticket, commitlsnp, iclog, 1547 XLOG_COMMIT_TRANS); 1548 if (error) 1549 xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR); 1550 return error; 1551 } 1552 1553 /* 1554 * Push on the buffer cache code if we ever use more than 75% of the on-disk 1555 * log space. This code pushes on the lsn which would supposedly free up 1556 * the 25% which we want to leave free. We may need to adopt a policy which 1557 * pushes on an lsn which is further along in the log once we reach the high 1558 * water mark. In this manner, we would be creating a low water mark. 1559 */ 1560 STATIC void 1561 xlog_grant_push_ail( 1562 struct xlog *log, 1563 int need_bytes) 1564 { 1565 xfs_lsn_t threshold_lsn = 0; 1566 xfs_lsn_t last_sync_lsn; 1567 int free_blocks; 1568 int free_bytes; 1569 int threshold_block; 1570 int threshold_cycle; 1571 int free_threshold; 1572 1573 ASSERT(BTOBB(need_bytes) < log->l_logBBsize); 1574 1575 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); 1576 free_blocks = BTOBBT(free_bytes); 1577 1578 /* 1579 * Set the threshold for the minimum number of free blocks in the 1580 * log to the maximum of what the caller needs, one quarter of the 1581 * log, and 256 blocks. 1582 */ 1583 free_threshold = BTOBB(need_bytes); 1584 free_threshold = MAX(free_threshold, (log->l_logBBsize >> 2)); 1585 free_threshold = MAX(free_threshold, 256); 1586 if (free_blocks >= free_threshold) 1587 return; 1588 1589 xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle, 1590 &threshold_block); 1591 threshold_block += free_threshold; 1592 if (threshold_block >= log->l_logBBsize) { 1593 threshold_block -= log->l_logBBsize; 1594 threshold_cycle += 1; 1595 } 1596 threshold_lsn = xlog_assign_lsn(threshold_cycle, 1597 threshold_block); 1598 /* 1599 * Don't pass in an lsn greater than the lsn of the last 1600 * log record known to be on disk. Use a snapshot of the last sync lsn 1601 * so that it doesn't change between the compare and the set. 1602 */ 1603 last_sync_lsn = atomic64_read(&log->l_last_sync_lsn); 1604 if (XFS_LSN_CMP(threshold_lsn, last_sync_lsn) > 0) 1605 threshold_lsn = last_sync_lsn; 1606 1607 /* 1608 * Get the transaction layer to kick the dirty buffers out to 1609 * disk asynchronously. No point in trying to do this if 1610 * the filesystem is shutting down. 1611 */ 1612 if (!XLOG_FORCED_SHUTDOWN(log)) 1613 xfs_ail_push(log->l_ailp, threshold_lsn); 1614 } 1615 1616 /* 1617 * Stamp cycle number in every block 1618 */ 1619 STATIC void 1620 xlog_pack_data( 1621 struct xlog *log, 1622 struct xlog_in_core *iclog, 1623 int roundoff) 1624 { 1625 int i, j, k; 1626 int size = iclog->ic_offset + roundoff; 1627 __be32 cycle_lsn; 1628 char *dp; 1629 1630 cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn); 1631 1632 dp = iclog->ic_datap; 1633 for (i = 0; i < BTOBB(size); i++) { 1634 if (i >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) 1635 break; 1636 iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp; 1637 *(__be32 *)dp = cycle_lsn; 1638 dp += BBSIZE; 1639 } 1640 1641 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { 1642 xlog_in_core_2_t *xhdr = iclog->ic_data; 1643 1644 for ( ; i < BTOBB(size); i++) { 1645 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 1646 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 1647 xhdr[j].hic_xheader.xh_cycle_data[k] = *(__be32 *)dp; 1648 *(__be32 *)dp = cycle_lsn; 1649 dp += BBSIZE; 1650 } 1651 1652 for (i = 1; i < log->l_iclog_heads; i++) 1653 xhdr[i].hic_xheader.xh_cycle = cycle_lsn; 1654 } 1655 } 1656 1657 /* 1658 * Calculate the checksum for a log buffer. 1659 * 1660 * This is a little more complicated than it should be because the various 1661 * headers and the actual data are non-contiguous. 1662 */ 1663 __le32 1664 xlog_cksum( 1665 struct xlog *log, 1666 struct xlog_rec_header *rhead, 1667 char *dp, 1668 int size) 1669 { 1670 __uint32_t crc; 1671 1672 /* first generate the crc for the record header ... */ 1673 crc = xfs_start_cksum((char *)rhead, 1674 sizeof(struct xlog_rec_header), 1675 offsetof(struct xlog_rec_header, h_crc)); 1676 1677 /* ... then for additional cycle data for v2 logs ... */ 1678 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { 1679 union xlog_in_core2 *xhdr = (union xlog_in_core2 *)rhead; 1680 int i; 1681 int xheads; 1682 1683 xheads = size / XLOG_HEADER_CYCLE_SIZE; 1684 if (size % XLOG_HEADER_CYCLE_SIZE) 1685 xheads++; 1686 1687 for (i = 1; i < xheads; i++) { 1688 crc = crc32c(crc, &xhdr[i].hic_xheader, 1689 sizeof(struct xlog_rec_ext_header)); 1690 } 1691 } 1692 1693 /* ... and finally for the payload */ 1694 crc = crc32c(crc, dp, size); 1695 1696 return xfs_end_cksum(crc); 1697 } 1698 1699 /* 1700 * The bdstrat callback function for log bufs. This gives us a central 1701 * place to trap bufs in case we get hit by a log I/O error and need to 1702 * shutdown. Actually, in practice, even when we didn't get a log error, 1703 * we transition the iclogs to IOERROR state *after* flushing all existing 1704 * iclogs to disk. This is because we don't want anymore new transactions to be 1705 * started or completed afterwards. 1706 * 1707 * We lock the iclogbufs here so that we can serialise against IO completion 1708 * during unmount. We might be processing a shutdown triggered during unmount, 1709 * and that can occur asynchronously to the unmount thread, and hence we need to 1710 * ensure that completes before tearing down the iclogbufs. Hence we need to 1711 * hold the buffer lock across the log IO to acheive that. 1712 */ 1713 STATIC int 1714 xlog_bdstrat( 1715 struct xfs_buf *bp) 1716 { 1717 struct xlog_in_core *iclog = bp->b_fspriv; 1718 1719 xfs_buf_lock(bp); 1720 if (iclog->ic_state & XLOG_STATE_IOERROR) { 1721 xfs_buf_ioerror(bp, -EIO); 1722 xfs_buf_stale(bp); 1723 xfs_buf_ioend(bp); 1724 /* 1725 * It would seem logical to return EIO here, but we rely on 1726 * the log state machine to propagate I/O errors instead of 1727 * doing it here. Similarly, IO completion will unlock the 1728 * buffer, so we don't do it here. 1729 */ 1730 return 0; 1731 } 1732 1733 xfs_buf_submit(bp); 1734 return 0; 1735 } 1736 1737 /* 1738 * Flush out the in-core log (iclog) to the on-disk log in an asynchronous 1739 * fashion. Previously, we should have moved the current iclog 1740 * ptr in the log to point to the next available iclog. This allows further 1741 * write to continue while this code syncs out an iclog ready to go. 1742 * Before an in-core log can be written out, the data section must be scanned 1743 * to save away the 1st word of each BBSIZE block into the header. We replace 1744 * it with the current cycle count. Each BBSIZE block is tagged with the 1745 * cycle count because there in an implicit assumption that drives will 1746 * guarantee that entire 512 byte blocks get written at once. In other words, 1747 * we can't have part of a 512 byte block written and part not written. By 1748 * tagging each block, we will know which blocks are valid when recovering 1749 * after an unclean shutdown. 1750 * 1751 * This routine is single threaded on the iclog. No other thread can be in 1752 * this routine with the same iclog. Changing contents of iclog can there- 1753 * fore be done without grabbing the state machine lock. Updating the global 1754 * log will require grabbing the lock though. 1755 * 1756 * The entire log manager uses a logical block numbering scheme. Only 1757 * log_sync (and then only bwrite()) know about the fact that the log may 1758 * not start with block zero on a given device. The log block start offset 1759 * is added immediately before calling bwrite(). 1760 */ 1761 1762 STATIC int 1763 xlog_sync( 1764 struct xlog *log, 1765 struct xlog_in_core *iclog) 1766 { 1767 xfs_buf_t *bp; 1768 int i; 1769 uint count; /* byte count of bwrite */ 1770 uint count_init; /* initial count before roundup */ 1771 int roundoff; /* roundoff to BB or stripe */ 1772 int split = 0; /* split write into two regions */ 1773 int error; 1774 int v2 = xfs_sb_version_haslogv2(&log->l_mp->m_sb); 1775 int size; 1776 1777 XFS_STATS_INC(log->l_mp, xs_log_writes); 1778 ASSERT(atomic_read(&iclog->ic_refcnt) == 0); 1779 1780 /* Add for LR header */ 1781 count_init = log->l_iclog_hsize + iclog->ic_offset; 1782 1783 /* Round out the log write size */ 1784 if (v2 && log->l_mp->m_sb.sb_logsunit > 1) { 1785 /* we have a v2 stripe unit to use */ 1786 count = XLOG_LSUNITTOB(log, XLOG_BTOLSUNIT(log, count_init)); 1787 } else { 1788 count = BBTOB(BTOBB(count_init)); 1789 } 1790 roundoff = count - count_init; 1791 ASSERT(roundoff >= 0); 1792 ASSERT((v2 && log->l_mp->m_sb.sb_logsunit > 1 && 1793 roundoff < log->l_mp->m_sb.sb_logsunit) 1794 || 1795 (log->l_mp->m_sb.sb_logsunit <= 1 && 1796 roundoff < BBTOB(1))); 1797 1798 /* move grant heads by roundoff in sync */ 1799 xlog_grant_add_space(log, &log->l_reserve_head.grant, roundoff); 1800 xlog_grant_add_space(log, &log->l_write_head.grant, roundoff); 1801 1802 /* put cycle number in every block */ 1803 xlog_pack_data(log, iclog, roundoff); 1804 1805 /* real byte length */ 1806 size = iclog->ic_offset; 1807 if (v2) 1808 size += roundoff; 1809 iclog->ic_header.h_len = cpu_to_be32(size); 1810 1811 bp = iclog->ic_bp; 1812 XFS_BUF_SET_ADDR(bp, BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn))); 1813 1814 XFS_STATS_ADD(log->l_mp, xs_log_blocks, BTOBB(count)); 1815 1816 /* Do we need to split this write into 2 parts? */ 1817 if (XFS_BUF_ADDR(bp) + BTOBB(count) > log->l_logBBsize) { 1818 char *dptr; 1819 1820 split = count - (BBTOB(log->l_logBBsize - XFS_BUF_ADDR(bp))); 1821 count = BBTOB(log->l_logBBsize - XFS_BUF_ADDR(bp)); 1822 iclog->ic_bwritecnt = 2; 1823 1824 /* 1825 * Bump the cycle numbers at the start of each block in the 1826 * part of the iclog that ends up in the buffer that gets 1827 * written to the start of the log. 1828 * 1829 * Watch out for the header magic number case, though. 1830 */ 1831 dptr = (char *)&iclog->ic_header + count; 1832 for (i = 0; i < split; i += BBSIZE) { 1833 __uint32_t cycle = be32_to_cpu(*(__be32 *)dptr); 1834 if (++cycle == XLOG_HEADER_MAGIC_NUM) 1835 cycle++; 1836 *(__be32 *)dptr = cpu_to_be32(cycle); 1837 1838 dptr += BBSIZE; 1839 } 1840 } else { 1841 iclog->ic_bwritecnt = 1; 1842 } 1843 1844 /* calculcate the checksum */ 1845 iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header, 1846 iclog->ic_datap, size); 1847 #ifdef DEBUG 1848 /* 1849 * Intentionally corrupt the log record CRC based on the error injection 1850 * frequency, if defined. This facilitates testing log recovery in the 1851 * event of torn writes. Hence, set the IOABORT state to abort the log 1852 * write on I/O completion and shutdown the fs. The subsequent mount 1853 * detects the bad CRC and attempts to recover. 1854 */ 1855 if (log->l_badcrc_factor && 1856 (prandom_u32() % log->l_badcrc_factor == 0)) { 1857 iclog->ic_header.h_crc &= 0xAAAAAAAA; 1858 iclog->ic_state |= XLOG_STATE_IOABORT; 1859 xfs_warn(log->l_mp, 1860 "Intentionally corrupted log record at LSN 0x%llx. Shutdown imminent.", 1861 be64_to_cpu(iclog->ic_header.h_lsn)); 1862 } 1863 #endif 1864 1865 bp->b_io_length = BTOBB(count); 1866 bp->b_fspriv = iclog; 1867 XFS_BUF_ZEROFLAGS(bp); 1868 XFS_BUF_ASYNC(bp); 1869 bp->b_flags |= XBF_SYNCIO; 1870 1871 if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) { 1872 bp->b_flags |= XBF_FUA; 1873 1874 /* 1875 * Flush the data device before flushing the log to make 1876 * sure all meta data written back from the AIL actually made 1877 * it to disk before stamping the new log tail LSN into the 1878 * log buffer. For an external log we need to issue the 1879 * flush explicitly, and unfortunately synchronously here; 1880 * for an internal log we can simply use the block layer 1881 * state machine for preflushes. 1882 */ 1883 if (log->l_mp->m_logdev_targp != log->l_mp->m_ddev_targp) 1884 xfs_blkdev_issue_flush(log->l_mp->m_ddev_targp); 1885 else 1886 bp->b_flags |= XBF_FLUSH; 1887 } 1888 1889 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); 1890 ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize); 1891 1892 xlog_verify_iclog(log, iclog, count, true); 1893 1894 /* account for log which doesn't start at block #0 */ 1895 XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart); 1896 /* 1897 * Don't call xfs_bwrite here. We do log-syncs even when the filesystem 1898 * is shutting down. 1899 */ 1900 XFS_BUF_WRITE(bp); 1901 1902 error = xlog_bdstrat(bp); 1903 if (error) { 1904 xfs_buf_ioerror_alert(bp, "xlog_sync"); 1905 return error; 1906 } 1907 if (split) { 1908 bp = iclog->ic_log->l_xbuf; 1909 XFS_BUF_SET_ADDR(bp, 0); /* logical 0 */ 1910 xfs_buf_associate_memory(bp, 1911 (char *)&iclog->ic_header + count, split); 1912 bp->b_fspriv = iclog; 1913 XFS_BUF_ZEROFLAGS(bp); 1914 XFS_BUF_ASYNC(bp); 1915 bp->b_flags |= XBF_SYNCIO; 1916 if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) 1917 bp->b_flags |= XBF_FUA; 1918 1919 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); 1920 ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize); 1921 1922 /* account for internal log which doesn't start at block #0 */ 1923 XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart); 1924 XFS_BUF_WRITE(bp); 1925 error = xlog_bdstrat(bp); 1926 if (error) { 1927 xfs_buf_ioerror_alert(bp, "xlog_sync (split)"); 1928 return error; 1929 } 1930 } 1931 return 0; 1932 } /* xlog_sync */ 1933 1934 /* 1935 * Deallocate a log structure 1936 */ 1937 STATIC void 1938 xlog_dealloc_log( 1939 struct xlog *log) 1940 { 1941 xlog_in_core_t *iclog, *next_iclog; 1942 int i; 1943 1944 xlog_cil_destroy(log); 1945 1946 /* 1947 * Cycle all the iclogbuf locks to make sure all log IO completion 1948 * is done before we tear down these buffers. 1949 */ 1950 iclog = log->l_iclog; 1951 for (i = 0; i < log->l_iclog_bufs; i++) { 1952 xfs_buf_lock(iclog->ic_bp); 1953 xfs_buf_unlock(iclog->ic_bp); 1954 iclog = iclog->ic_next; 1955 } 1956 1957 /* 1958 * Always need to ensure that the extra buffer does not point to memory 1959 * owned by another log buffer before we free it. Also, cycle the lock 1960 * first to ensure we've completed IO on it. 1961 */ 1962 xfs_buf_lock(log->l_xbuf); 1963 xfs_buf_unlock(log->l_xbuf); 1964 xfs_buf_set_empty(log->l_xbuf, BTOBB(log->l_iclog_size)); 1965 xfs_buf_free(log->l_xbuf); 1966 1967 iclog = log->l_iclog; 1968 for (i = 0; i < log->l_iclog_bufs; i++) { 1969 xfs_buf_free(iclog->ic_bp); 1970 next_iclog = iclog->ic_next; 1971 kmem_free(iclog); 1972 iclog = next_iclog; 1973 } 1974 spinlock_destroy(&log->l_icloglock); 1975 1976 log->l_mp->m_log = NULL; 1977 kmem_free(log); 1978 } /* xlog_dealloc_log */ 1979 1980 /* 1981 * Update counters atomically now that memcpy is done. 1982 */ 1983 /* ARGSUSED */ 1984 static inline void 1985 xlog_state_finish_copy( 1986 struct xlog *log, 1987 struct xlog_in_core *iclog, 1988 int record_cnt, 1989 int copy_bytes) 1990 { 1991 spin_lock(&log->l_icloglock); 1992 1993 be32_add_cpu(&iclog->ic_header.h_num_logops, record_cnt); 1994 iclog->ic_offset += copy_bytes; 1995 1996 spin_unlock(&log->l_icloglock); 1997 } /* xlog_state_finish_copy */ 1998 1999 2000 2001 2002 /* 2003 * print out info relating to regions written which consume 2004 * the reservation 2005 */ 2006 void 2007 xlog_print_tic_res( 2008 struct xfs_mount *mp, 2009 struct xlog_ticket *ticket) 2010 { 2011 uint i; 2012 uint ophdr_spc = ticket->t_res_num_ophdrs * (uint)sizeof(xlog_op_header_t); 2013 2014 /* match with XLOG_REG_TYPE_* in xfs_log.h */ 2015 static char *res_type_str[XLOG_REG_TYPE_MAX] = { 2016 "bformat", 2017 "bchunk", 2018 "efi_format", 2019 "efd_format", 2020 "iformat", 2021 "icore", 2022 "iext", 2023 "ibroot", 2024 "ilocal", 2025 "iattr_ext", 2026 "iattr_broot", 2027 "iattr_local", 2028 "qformat", 2029 "dquot", 2030 "quotaoff", 2031 "LR header", 2032 "unmount", 2033 "commit", 2034 "trans header" 2035 }; 2036 static char *trans_type_str[XFS_TRANS_TYPE_MAX] = { 2037 "SETATTR_NOT_SIZE", 2038 "SETATTR_SIZE", 2039 "INACTIVE", 2040 "CREATE", 2041 "CREATE_TRUNC", 2042 "TRUNCATE_FILE", 2043 "REMOVE", 2044 "LINK", 2045 "RENAME", 2046 "MKDIR", 2047 "RMDIR", 2048 "SYMLINK", 2049 "SET_DMATTRS", 2050 "GROWFS", 2051 "STRAT_WRITE", 2052 "DIOSTRAT", 2053 "WRITE_SYNC", 2054 "WRITEID", 2055 "ADDAFORK", 2056 "ATTRINVAL", 2057 "ATRUNCATE", 2058 "ATTR_SET", 2059 "ATTR_RM", 2060 "ATTR_FLAG", 2061 "CLEAR_AGI_BUCKET", 2062 "QM_SBCHANGE", 2063 "DUMMY1", 2064 "DUMMY2", 2065 "QM_QUOTAOFF", 2066 "QM_DQALLOC", 2067 "QM_SETQLIM", 2068 "QM_DQCLUSTER", 2069 "QM_QINOCREATE", 2070 "QM_QUOTAOFF_END", 2071 "FSYNC_TS", 2072 "GROWFSRT_ALLOC", 2073 "GROWFSRT_ZERO", 2074 "GROWFSRT_FREE", 2075 "SWAPEXT", 2076 "CHECKPOINT", 2077 "ICREATE", 2078 "CREATE_TMPFILE" 2079 }; 2080 2081 xfs_warn(mp, "xlog_write: reservation summary:"); 2082 xfs_warn(mp, " trans type = %s (%u)", 2083 ((ticket->t_trans_type <= 0 || 2084 ticket->t_trans_type > XFS_TRANS_TYPE_MAX) ? 2085 "bad-trans-type" : trans_type_str[ticket->t_trans_type-1]), 2086 ticket->t_trans_type); 2087 xfs_warn(mp, " unit res = %d bytes", 2088 ticket->t_unit_res); 2089 xfs_warn(mp, " current res = %d bytes", 2090 ticket->t_curr_res); 2091 xfs_warn(mp, " total reg = %u bytes (o/flow = %u bytes)", 2092 ticket->t_res_arr_sum, ticket->t_res_o_flow); 2093 xfs_warn(mp, " ophdrs = %u (ophdr space = %u bytes)", 2094 ticket->t_res_num_ophdrs, ophdr_spc); 2095 xfs_warn(mp, " ophdr + reg = %u bytes", 2096 ticket->t_res_arr_sum + ticket->t_res_o_flow + ophdr_spc); 2097 xfs_warn(mp, " num regions = %u", 2098 ticket->t_res_num); 2099 2100 for (i = 0; i < ticket->t_res_num; i++) { 2101 uint r_type = ticket->t_res_arr[i].r_type; 2102 xfs_warn(mp, "region[%u]: %s - %u bytes", i, 2103 ((r_type <= 0 || r_type > XLOG_REG_TYPE_MAX) ? 2104 "bad-rtype" : res_type_str[r_type-1]), 2105 ticket->t_res_arr[i].r_len); 2106 } 2107 2108 xfs_alert_tag(mp, XFS_PTAG_LOGRES, 2109 "xlog_write: reservation ran out. Need to up reservation"); 2110 xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR); 2111 } 2112 2113 /* 2114 * Calculate the potential space needed by the log vector. Each region gets 2115 * its own xlog_op_header_t and may need to be double word aligned. 2116 */ 2117 static int 2118 xlog_write_calc_vec_length( 2119 struct xlog_ticket *ticket, 2120 struct xfs_log_vec *log_vector) 2121 { 2122 struct xfs_log_vec *lv; 2123 int headers = 0; 2124 int len = 0; 2125 int i; 2126 2127 /* acct for start rec of xact */ 2128 if (ticket->t_flags & XLOG_TIC_INITED) 2129 headers++; 2130 2131 for (lv = log_vector; lv; lv = lv->lv_next) { 2132 /* we don't write ordered log vectors */ 2133 if (lv->lv_buf_len == XFS_LOG_VEC_ORDERED) 2134 continue; 2135 2136 headers += lv->lv_niovecs; 2137 2138 for (i = 0; i < lv->lv_niovecs; i++) { 2139 struct xfs_log_iovec *vecp = &lv->lv_iovecp[i]; 2140 2141 len += vecp->i_len; 2142 xlog_tic_add_region(ticket, vecp->i_len, vecp->i_type); 2143 } 2144 } 2145 2146 ticket->t_res_num_ophdrs += headers; 2147 len += headers * sizeof(struct xlog_op_header); 2148 2149 return len; 2150 } 2151 2152 /* 2153 * If first write for transaction, insert start record We can't be trying to 2154 * commit if we are inited. We can't have any "partial_copy" if we are inited. 2155 */ 2156 static int 2157 xlog_write_start_rec( 2158 struct xlog_op_header *ophdr, 2159 struct xlog_ticket *ticket) 2160 { 2161 if (!(ticket->t_flags & XLOG_TIC_INITED)) 2162 return 0; 2163 2164 ophdr->oh_tid = cpu_to_be32(ticket->t_tid); 2165 ophdr->oh_clientid = ticket->t_clientid; 2166 ophdr->oh_len = 0; 2167 ophdr->oh_flags = XLOG_START_TRANS; 2168 ophdr->oh_res2 = 0; 2169 2170 ticket->t_flags &= ~XLOG_TIC_INITED; 2171 2172 return sizeof(struct xlog_op_header); 2173 } 2174 2175 static xlog_op_header_t * 2176 xlog_write_setup_ophdr( 2177 struct xlog *log, 2178 struct xlog_op_header *ophdr, 2179 struct xlog_ticket *ticket, 2180 uint flags) 2181 { 2182 ophdr->oh_tid = cpu_to_be32(ticket->t_tid); 2183 ophdr->oh_clientid = ticket->t_clientid; 2184 ophdr->oh_res2 = 0; 2185 2186 /* are we copying a commit or unmount record? */ 2187 ophdr->oh_flags = flags; 2188 2189 /* 2190 * We've seen logs corrupted with bad transaction client ids. This 2191 * makes sure that XFS doesn't generate them on. Turn this into an EIO 2192 * and shut down the filesystem. 2193 */ 2194 switch (ophdr->oh_clientid) { 2195 case XFS_TRANSACTION: 2196 case XFS_VOLUME: 2197 case XFS_LOG: 2198 break; 2199 default: 2200 xfs_warn(log->l_mp, 2201 "Bad XFS transaction clientid 0x%x in ticket 0x%p", 2202 ophdr->oh_clientid, ticket); 2203 return NULL; 2204 } 2205 2206 return ophdr; 2207 } 2208 2209 /* 2210 * Set up the parameters of the region copy into the log. This has 2211 * to handle region write split across multiple log buffers - this 2212 * state is kept external to this function so that this code can 2213 * be written in an obvious, self documenting manner. 2214 */ 2215 static int 2216 xlog_write_setup_copy( 2217 struct xlog_ticket *ticket, 2218 struct xlog_op_header *ophdr, 2219 int space_available, 2220 int space_required, 2221 int *copy_off, 2222 int *copy_len, 2223 int *last_was_partial_copy, 2224 int *bytes_consumed) 2225 { 2226 int still_to_copy; 2227 2228 still_to_copy = space_required - *bytes_consumed; 2229 *copy_off = *bytes_consumed; 2230 2231 if (still_to_copy <= space_available) { 2232 /* write of region completes here */ 2233 *copy_len = still_to_copy; 2234 ophdr->oh_len = cpu_to_be32(*copy_len); 2235 if (*last_was_partial_copy) 2236 ophdr->oh_flags |= (XLOG_END_TRANS|XLOG_WAS_CONT_TRANS); 2237 *last_was_partial_copy = 0; 2238 *bytes_consumed = 0; 2239 return 0; 2240 } 2241 2242 /* partial write of region, needs extra log op header reservation */ 2243 *copy_len = space_available; 2244 ophdr->oh_len = cpu_to_be32(*copy_len); 2245 ophdr->oh_flags |= XLOG_CONTINUE_TRANS; 2246 if (*last_was_partial_copy) 2247 ophdr->oh_flags |= XLOG_WAS_CONT_TRANS; 2248 *bytes_consumed += *copy_len; 2249 (*last_was_partial_copy)++; 2250 2251 /* account for new log op header */ 2252 ticket->t_curr_res -= sizeof(struct xlog_op_header); 2253 ticket->t_res_num_ophdrs++; 2254 2255 return sizeof(struct xlog_op_header); 2256 } 2257 2258 static int 2259 xlog_write_copy_finish( 2260 struct xlog *log, 2261 struct xlog_in_core *iclog, 2262 uint flags, 2263 int *record_cnt, 2264 int *data_cnt, 2265 int *partial_copy, 2266 int *partial_copy_len, 2267 int log_offset, 2268 struct xlog_in_core **commit_iclog) 2269 { 2270 if (*partial_copy) { 2271 /* 2272 * This iclog has already been marked WANT_SYNC by 2273 * xlog_state_get_iclog_space. 2274 */ 2275 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); 2276 *record_cnt = 0; 2277 *data_cnt = 0; 2278 return xlog_state_release_iclog(log, iclog); 2279 } 2280 2281 *partial_copy = 0; 2282 *partial_copy_len = 0; 2283 2284 if (iclog->ic_size - log_offset <= sizeof(xlog_op_header_t)) { 2285 /* no more space in this iclog - push it. */ 2286 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); 2287 *record_cnt = 0; 2288 *data_cnt = 0; 2289 2290 spin_lock(&log->l_icloglock); 2291 xlog_state_want_sync(log, iclog); 2292 spin_unlock(&log->l_icloglock); 2293 2294 if (!commit_iclog) 2295 return xlog_state_release_iclog(log, iclog); 2296 ASSERT(flags & XLOG_COMMIT_TRANS); 2297 *commit_iclog = iclog; 2298 } 2299 2300 return 0; 2301 } 2302 2303 /* 2304 * Write some region out to in-core log 2305 * 2306 * This will be called when writing externally provided regions or when 2307 * writing out a commit record for a given transaction. 2308 * 2309 * General algorithm: 2310 * 1. Find total length of this write. This may include adding to the 2311 * lengths passed in. 2312 * 2. Check whether we violate the tickets reservation. 2313 * 3. While writing to this iclog 2314 * A. Reserve as much space in this iclog as can get 2315 * B. If this is first write, save away start lsn 2316 * C. While writing this region: 2317 * 1. If first write of transaction, write start record 2318 * 2. Write log operation header (header per region) 2319 * 3. Find out if we can fit entire region into this iclog 2320 * 4. Potentially, verify destination memcpy ptr 2321 * 5. Memcpy (partial) region 2322 * 6. If partial copy, release iclog; otherwise, continue 2323 * copying more regions into current iclog 2324 * 4. Mark want sync bit (in simulation mode) 2325 * 5. Release iclog for potential flush to on-disk log. 2326 * 2327 * ERRORS: 2328 * 1. Panic if reservation is overrun. This should never happen since 2329 * reservation amounts are generated internal to the filesystem. 2330 * NOTES: 2331 * 1. Tickets are single threaded data structures. 2332 * 2. The XLOG_END_TRANS & XLOG_CONTINUE_TRANS flags are passed down to the 2333 * syncing routine. When a single log_write region needs to span 2334 * multiple in-core logs, the XLOG_CONTINUE_TRANS bit should be set 2335 * on all log operation writes which don't contain the end of the 2336 * region. The XLOG_END_TRANS bit is used for the in-core log 2337 * operation which contains the end of the continued log_write region. 2338 * 3. When xlog_state_get_iclog_space() grabs the rest of the current iclog, 2339 * we don't really know exactly how much space will be used. As a result, 2340 * we don't update ic_offset until the end when we know exactly how many 2341 * bytes have been written out. 2342 */ 2343 int 2344 xlog_write( 2345 struct xlog *log, 2346 struct xfs_log_vec *log_vector, 2347 struct xlog_ticket *ticket, 2348 xfs_lsn_t *start_lsn, 2349 struct xlog_in_core **commit_iclog, 2350 uint flags) 2351 { 2352 struct xlog_in_core *iclog = NULL; 2353 struct xfs_log_iovec *vecp; 2354 struct xfs_log_vec *lv; 2355 int len; 2356 int index; 2357 int partial_copy = 0; 2358 int partial_copy_len = 0; 2359 int contwr = 0; 2360 int record_cnt = 0; 2361 int data_cnt = 0; 2362 int error; 2363 2364 *start_lsn = 0; 2365 2366 len = xlog_write_calc_vec_length(ticket, log_vector); 2367 2368 /* 2369 * Region headers and bytes are already accounted for. 2370 * We only need to take into account start records and 2371 * split regions in this function. 2372 */ 2373 if (ticket->t_flags & XLOG_TIC_INITED) 2374 ticket->t_curr_res -= sizeof(xlog_op_header_t); 2375 2376 /* 2377 * Commit record headers need to be accounted for. These 2378 * come in as separate writes so are easy to detect. 2379 */ 2380 if (flags & (XLOG_COMMIT_TRANS | XLOG_UNMOUNT_TRANS)) 2381 ticket->t_curr_res -= sizeof(xlog_op_header_t); 2382 2383 if (ticket->t_curr_res < 0) 2384 xlog_print_tic_res(log->l_mp, ticket); 2385 2386 index = 0; 2387 lv = log_vector; 2388 vecp = lv->lv_iovecp; 2389 while (lv && (!lv->lv_niovecs || index < lv->lv_niovecs)) { 2390 void *ptr; 2391 int log_offset; 2392 2393 error = xlog_state_get_iclog_space(log, len, &iclog, ticket, 2394 &contwr, &log_offset); 2395 if (error) 2396 return error; 2397 2398 ASSERT(log_offset <= iclog->ic_size - 1); 2399 ptr = iclog->ic_datap + log_offset; 2400 2401 /* start_lsn is the first lsn written to. That's all we need. */ 2402 if (!*start_lsn) 2403 *start_lsn = be64_to_cpu(iclog->ic_header.h_lsn); 2404 2405 /* 2406 * This loop writes out as many regions as can fit in the amount 2407 * of space which was allocated by xlog_state_get_iclog_space(). 2408 */ 2409 while (lv && (!lv->lv_niovecs || index < lv->lv_niovecs)) { 2410 struct xfs_log_iovec *reg; 2411 struct xlog_op_header *ophdr; 2412 int start_rec_copy; 2413 int copy_len; 2414 int copy_off; 2415 bool ordered = false; 2416 2417 /* ordered log vectors have no regions to write */ 2418 if (lv->lv_buf_len == XFS_LOG_VEC_ORDERED) { 2419 ASSERT(lv->lv_niovecs == 0); 2420 ordered = true; 2421 goto next_lv; 2422 } 2423 2424 reg = &vecp[index]; 2425 ASSERT(reg->i_len % sizeof(__int32_t) == 0); 2426 ASSERT((unsigned long)ptr % sizeof(__int32_t) == 0); 2427 2428 start_rec_copy = xlog_write_start_rec(ptr, ticket); 2429 if (start_rec_copy) { 2430 record_cnt++; 2431 xlog_write_adv_cnt(&ptr, &len, &log_offset, 2432 start_rec_copy); 2433 } 2434 2435 ophdr = xlog_write_setup_ophdr(log, ptr, ticket, flags); 2436 if (!ophdr) 2437 return -EIO; 2438 2439 xlog_write_adv_cnt(&ptr, &len, &log_offset, 2440 sizeof(struct xlog_op_header)); 2441 2442 len += xlog_write_setup_copy(ticket, ophdr, 2443 iclog->ic_size-log_offset, 2444 reg->i_len, 2445 ©_off, ©_len, 2446 &partial_copy, 2447 &partial_copy_len); 2448 xlog_verify_dest_ptr(log, ptr); 2449 2450 /* 2451 * Copy region. 2452 * 2453 * Unmount records just log an opheader, so can have 2454 * empty payloads with no data region to copy. Hence we 2455 * only copy the payload if the vector says it has data 2456 * to copy. 2457 */ 2458 ASSERT(copy_len >= 0); 2459 if (copy_len > 0) { 2460 memcpy(ptr, reg->i_addr + copy_off, copy_len); 2461 xlog_write_adv_cnt(&ptr, &len, &log_offset, 2462 copy_len); 2463 } 2464 copy_len += start_rec_copy + sizeof(xlog_op_header_t); 2465 record_cnt++; 2466 data_cnt += contwr ? copy_len : 0; 2467 2468 error = xlog_write_copy_finish(log, iclog, flags, 2469 &record_cnt, &data_cnt, 2470 &partial_copy, 2471 &partial_copy_len, 2472 log_offset, 2473 commit_iclog); 2474 if (error) 2475 return error; 2476 2477 /* 2478 * if we had a partial copy, we need to get more iclog 2479 * space but we don't want to increment the region 2480 * index because there is still more is this region to 2481 * write. 2482 * 2483 * If we completed writing this region, and we flushed 2484 * the iclog (indicated by resetting of the record 2485 * count), then we also need to get more log space. If 2486 * this was the last record, though, we are done and 2487 * can just return. 2488 */ 2489 if (partial_copy) 2490 break; 2491 2492 if (++index == lv->lv_niovecs) { 2493 next_lv: 2494 lv = lv->lv_next; 2495 index = 0; 2496 if (lv) 2497 vecp = lv->lv_iovecp; 2498 } 2499 if (record_cnt == 0 && ordered == false) { 2500 if (!lv) 2501 return 0; 2502 break; 2503 } 2504 } 2505 } 2506 2507 ASSERT(len == 0); 2508 2509 xlog_state_finish_copy(log, iclog, record_cnt, data_cnt); 2510 if (!commit_iclog) 2511 return xlog_state_release_iclog(log, iclog); 2512 2513 ASSERT(flags & XLOG_COMMIT_TRANS); 2514 *commit_iclog = iclog; 2515 return 0; 2516 } 2517 2518 2519 /***************************************************************************** 2520 * 2521 * State Machine functions 2522 * 2523 ***************************************************************************** 2524 */ 2525 2526 /* Clean iclogs starting from the head. This ordering must be 2527 * maintained, so an iclog doesn't become ACTIVE beyond one that 2528 * is SYNCING. This is also required to maintain the notion that we use 2529 * a ordered wait queue to hold off would be writers to the log when every 2530 * iclog is trying to sync to disk. 2531 * 2532 * State Change: DIRTY -> ACTIVE 2533 */ 2534 STATIC void 2535 xlog_state_clean_log( 2536 struct xlog *log) 2537 { 2538 xlog_in_core_t *iclog; 2539 int changed = 0; 2540 2541 iclog = log->l_iclog; 2542 do { 2543 if (iclog->ic_state == XLOG_STATE_DIRTY) { 2544 iclog->ic_state = XLOG_STATE_ACTIVE; 2545 iclog->ic_offset = 0; 2546 ASSERT(iclog->ic_callback == NULL); 2547 /* 2548 * If the number of ops in this iclog indicate it just 2549 * contains the dummy transaction, we can 2550 * change state into IDLE (the second time around). 2551 * Otherwise we should change the state into 2552 * NEED a dummy. 2553 * We don't need to cover the dummy. 2554 */ 2555 if (!changed && 2556 (be32_to_cpu(iclog->ic_header.h_num_logops) == 2557 XLOG_COVER_OPS)) { 2558 changed = 1; 2559 } else { 2560 /* 2561 * We have two dirty iclogs so start over 2562 * This could also be num of ops indicates 2563 * this is not the dummy going out. 2564 */ 2565 changed = 2; 2566 } 2567 iclog->ic_header.h_num_logops = 0; 2568 memset(iclog->ic_header.h_cycle_data, 0, 2569 sizeof(iclog->ic_header.h_cycle_data)); 2570 iclog->ic_header.h_lsn = 0; 2571 } else if (iclog->ic_state == XLOG_STATE_ACTIVE) 2572 /* do nothing */; 2573 else 2574 break; /* stop cleaning */ 2575 iclog = iclog->ic_next; 2576 } while (iclog != log->l_iclog); 2577 2578 /* log is locked when we are called */ 2579 /* 2580 * Change state for the dummy log recording. 2581 * We usually go to NEED. But we go to NEED2 if the changed indicates 2582 * we are done writing the dummy record. 2583 * If we are done with the second dummy recored (DONE2), then 2584 * we go to IDLE. 2585 */ 2586 if (changed) { 2587 switch (log->l_covered_state) { 2588 case XLOG_STATE_COVER_IDLE: 2589 case XLOG_STATE_COVER_NEED: 2590 case XLOG_STATE_COVER_NEED2: 2591 log->l_covered_state = XLOG_STATE_COVER_NEED; 2592 break; 2593 2594 case XLOG_STATE_COVER_DONE: 2595 if (changed == 1) 2596 log->l_covered_state = XLOG_STATE_COVER_NEED2; 2597 else 2598 log->l_covered_state = XLOG_STATE_COVER_NEED; 2599 break; 2600 2601 case XLOG_STATE_COVER_DONE2: 2602 if (changed == 1) 2603 log->l_covered_state = XLOG_STATE_COVER_IDLE; 2604 else 2605 log->l_covered_state = XLOG_STATE_COVER_NEED; 2606 break; 2607 2608 default: 2609 ASSERT(0); 2610 } 2611 } 2612 } /* xlog_state_clean_log */ 2613 2614 STATIC xfs_lsn_t 2615 xlog_get_lowest_lsn( 2616 struct xlog *log) 2617 { 2618 xlog_in_core_t *lsn_log; 2619 xfs_lsn_t lowest_lsn, lsn; 2620 2621 lsn_log = log->l_iclog; 2622 lowest_lsn = 0; 2623 do { 2624 if (!(lsn_log->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_DIRTY))) { 2625 lsn = be64_to_cpu(lsn_log->ic_header.h_lsn); 2626 if ((lsn && !lowest_lsn) || 2627 (XFS_LSN_CMP(lsn, lowest_lsn) < 0)) { 2628 lowest_lsn = lsn; 2629 } 2630 } 2631 lsn_log = lsn_log->ic_next; 2632 } while (lsn_log != log->l_iclog); 2633 return lowest_lsn; 2634 } 2635 2636 2637 STATIC void 2638 xlog_state_do_callback( 2639 struct xlog *log, 2640 int aborted, 2641 struct xlog_in_core *ciclog) 2642 { 2643 xlog_in_core_t *iclog; 2644 xlog_in_core_t *first_iclog; /* used to know when we've 2645 * processed all iclogs once */ 2646 xfs_log_callback_t *cb, *cb_next; 2647 int flushcnt = 0; 2648 xfs_lsn_t lowest_lsn; 2649 int ioerrors; /* counter: iclogs with errors */ 2650 int loopdidcallbacks; /* flag: inner loop did callbacks*/ 2651 int funcdidcallbacks; /* flag: function did callbacks */ 2652 int repeats; /* for issuing console warnings if 2653 * looping too many times */ 2654 int wake = 0; 2655 2656 spin_lock(&log->l_icloglock); 2657 first_iclog = iclog = log->l_iclog; 2658 ioerrors = 0; 2659 funcdidcallbacks = 0; 2660 repeats = 0; 2661 2662 do { 2663 /* 2664 * Scan all iclogs starting with the one pointed to by the 2665 * log. Reset this starting point each time the log is 2666 * unlocked (during callbacks). 2667 * 2668 * Keep looping through iclogs until one full pass is made 2669 * without running any callbacks. 2670 */ 2671 first_iclog = log->l_iclog; 2672 iclog = log->l_iclog; 2673 loopdidcallbacks = 0; 2674 repeats++; 2675 2676 do { 2677 2678 /* skip all iclogs in the ACTIVE & DIRTY states */ 2679 if (iclog->ic_state & 2680 (XLOG_STATE_ACTIVE|XLOG_STATE_DIRTY)) { 2681 iclog = iclog->ic_next; 2682 continue; 2683 } 2684 2685 /* 2686 * Between marking a filesystem SHUTDOWN and stopping 2687 * the log, we do flush all iclogs to disk (if there 2688 * wasn't a log I/O error). So, we do want things to 2689 * go smoothly in case of just a SHUTDOWN w/o a 2690 * LOG_IO_ERROR. 2691 */ 2692 if (!(iclog->ic_state & XLOG_STATE_IOERROR)) { 2693 /* 2694 * Can only perform callbacks in order. Since 2695 * this iclog is not in the DONE_SYNC/ 2696 * DO_CALLBACK state, we skip the rest and 2697 * just try to clean up. If we set our iclog 2698 * to DO_CALLBACK, we will not process it when 2699 * we retry since a previous iclog is in the 2700 * CALLBACK and the state cannot change since 2701 * we are holding the l_icloglock. 2702 */ 2703 if (!(iclog->ic_state & 2704 (XLOG_STATE_DONE_SYNC | 2705 XLOG_STATE_DO_CALLBACK))) { 2706 if (ciclog && (ciclog->ic_state == 2707 XLOG_STATE_DONE_SYNC)) { 2708 ciclog->ic_state = XLOG_STATE_DO_CALLBACK; 2709 } 2710 break; 2711 } 2712 /* 2713 * We now have an iclog that is in either the 2714 * DO_CALLBACK or DONE_SYNC states. The other 2715 * states (WANT_SYNC, SYNCING, or CALLBACK were 2716 * caught by the above if and are going to 2717 * clean (i.e. we aren't doing their callbacks) 2718 * see the above if. 2719 */ 2720 2721 /* 2722 * We will do one more check here to see if we 2723 * have chased our tail around. 2724 */ 2725 2726 lowest_lsn = xlog_get_lowest_lsn(log); 2727 if (lowest_lsn && 2728 XFS_LSN_CMP(lowest_lsn, 2729 be64_to_cpu(iclog->ic_header.h_lsn)) < 0) { 2730 iclog = iclog->ic_next; 2731 continue; /* Leave this iclog for 2732 * another thread */ 2733 } 2734 2735 iclog->ic_state = XLOG_STATE_CALLBACK; 2736 2737 2738 /* 2739 * Completion of a iclog IO does not imply that 2740 * a transaction has completed, as transactions 2741 * can be large enough to span many iclogs. We 2742 * cannot change the tail of the log half way 2743 * through a transaction as this may be the only 2744 * transaction in the log and moving th etail to 2745 * point to the middle of it will prevent 2746 * recovery from finding the start of the 2747 * transaction. Hence we should only update the 2748 * last_sync_lsn if this iclog contains 2749 * transaction completion callbacks on it. 2750 * 2751 * We have to do this before we drop the 2752 * icloglock to ensure we are the only one that 2753 * can update it. 2754 */ 2755 ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn), 2756 be64_to_cpu(iclog->ic_header.h_lsn)) <= 0); 2757 if (iclog->ic_callback) 2758 atomic64_set(&log->l_last_sync_lsn, 2759 be64_to_cpu(iclog->ic_header.h_lsn)); 2760 2761 } else 2762 ioerrors++; 2763 2764 spin_unlock(&log->l_icloglock); 2765 2766 /* 2767 * Keep processing entries in the callback list until 2768 * we come around and it is empty. We need to 2769 * atomically see that the list is empty and change the 2770 * state to DIRTY so that we don't miss any more 2771 * callbacks being added. 2772 */ 2773 spin_lock(&iclog->ic_callback_lock); 2774 cb = iclog->ic_callback; 2775 while (cb) { 2776 iclog->ic_callback_tail = &(iclog->ic_callback); 2777 iclog->ic_callback = NULL; 2778 spin_unlock(&iclog->ic_callback_lock); 2779 2780 /* perform callbacks in the order given */ 2781 for (; cb; cb = cb_next) { 2782 cb_next = cb->cb_next; 2783 cb->cb_func(cb->cb_arg, aborted); 2784 } 2785 spin_lock(&iclog->ic_callback_lock); 2786 cb = iclog->ic_callback; 2787 } 2788 2789 loopdidcallbacks++; 2790 funcdidcallbacks++; 2791 2792 spin_lock(&log->l_icloglock); 2793 ASSERT(iclog->ic_callback == NULL); 2794 spin_unlock(&iclog->ic_callback_lock); 2795 if (!(iclog->ic_state & XLOG_STATE_IOERROR)) 2796 iclog->ic_state = XLOG_STATE_DIRTY; 2797 2798 /* 2799 * Transition from DIRTY to ACTIVE if applicable. 2800 * NOP if STATE_IOERROR. 2801 */ 2802 xlog_state_clean_log(log); 2803 2804 /* wake up threads waiting in xfs_log_force() */ 2805 wake_up_all(&iclog->ic_force_wait); 2806 2807 iclog = iclog->ic_next; 2808 } while (first_iclog != iclog); 2809 2810 if (repeats > 5000) { 2811 flushcnt += repeats; 2812 repeats = 0; 2813 xfs_warn(log->l_mp, 2814 "%s: possible infinite loop (%d iterations)", 2815 __func__, flushcnt); 2816 } 2817 } while (!ioerrors && loopdidcallbacks); 2818 2819 #ifdef DEBUG 2820 /* 2821 * Make one last gasp attempt to see if iclogs are being left in limbo. 2822 * If the above loop finds an iclog earlier than the current iclog and 2823 * in one of the syncing states, the current iclog is put into 2824 * DO_CALLBACK and the callbacks are deferred to the completion of the 2825 * earlier iclog. Walk the iclogs in order and make sure that no iclog 2826 * is in DO_CALLBACK unless an earlier iclog is in one of the syncing 2827 * states. 2828 * 2829 * Note that SYNCING|IOABORT is a valid state so we cannot just check 2830 * for ic_state == SYNCING. 2831 */ 2832 if (funcdidcallbacks) { 2833 first_iclog = iclog = log->l_iclog; 2834 do { 2835 ASSERT(iclog->ic_state != XLOG_STATE_DO_CALLBACK); 2836 /* 2837 * Terminate the loop if iclogs are found in states 2838 * which will cause other threads to clean up iclogs. 2839 * 2840 * SYNCING - i/o completion will go through logs 2841 * DONE_SYNC - interrupt thread should be waiting for 2842 * l_icloglock 2843 * IOERROR - give up hope all ye who enter here 2844 */ 2845 if (iclog->ic_state == XLOG_STATE_WANT_SYNC || 2846 iclog->ic_state & XLOG_STATE_SYNCING || 2847 iclog->ic_state == XLOG_STATE_DONE_SYNC || 2848 iclog->ic_state == XLOG_STATE_IOERROR ) 2849 break; 2850 iclog = iclog->ic_next; 2851 } while (first_iclog != iclog); 2852 } 2853 #endif 2854 2855 if (log->l_iclog->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_IOERROR)) 2856 wake = 1; 2857 spin_unlock(&log->l_icloglock); 2858 2859 if (wake) 2860 wake_up_all(&log->l_flush_wait); 2861 } 2862 2863 2864 /* 2865 * Finish transitioning this iclog to the dirty state. 2866 * 2867 * Make sure that we completely execute this routine only when this is 2868 * the last call to the iclog. There is a good chance that iclog flushes, 2869 * when we reach the end of the physical log, get turned into 2 separate 2870 * calls to bwrite. Hence, one iclog flush could generate two calls to this 2871 * routine. By using the reference count bwritecnt, we guarantee that only 2872 * the second completion goes through. 2873 * 2874 * Callbacks could take time, so they are done outside the scope of the 2875 * global state machine log lock. 2876 */ 2877 STATIC void 2878 xlog_state_done_syncing( 2879 xlog_in_core_t *iclog, 2880 int aborted) 2881 { 2882 struct xlog *log = iclog->ic_log; 2883 2884 spin_lock(&log->l_icloglock); 2885 2886 ASSERT(iclog->ic_state == XLOG_STATE_SYNCING || 2887 iclog->ic_state == XLOG_STATE_IOERROR); 2888 ASSERT(atomic_read(&iclog->ic_refcnt) == 0); 2889 ASSERT(iclog->ic_bwritecnt == 1 || iclog->ic_bwritecnt == 2); 2890 2891 2892 /* 2893 * If we got an error, either on the first buffer, or in the case of 2894 * split log writes, on the second, we mark ALL iclogs STATE_IOERROR, 2895 * and none should ever be attempted to be written to disk 2896 * again. 2897 */ 2898 if (iclog->ic_state != XLOG_STATE_IOERROR) { 2899 if (--iclog->ic_bwritecnt == 1) { 2900 spin_unlock(&log->l_icloglock); 2901 return; 2902 } 2903 iclog->ic_state = XLOG_STATE_DONE_SYNC; 2904 } 2905 2906 /* 2907 * Someone could be sleeping prior to writing out the next 2908 * iclog buffer, we wake them all, one will get to do the 2909 * I/O, the others get to wait for the result. 2910 */ 2911 wake_up_all(&iclog->ic_write_wait); 2912 spin_unlock(&log->l_icloglock); 2913 xlog_state_do_callback(log, aborted, iclog); /* also cleans log */ 2914 } /* xlog_state_done_syncing */ 2915 2916 2917 /* 2918 * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must 2919 * sleep. We wait on the flush queue on the head iclog as that should be 2920 * the first iclog to complete flushing. Hence if all iclogs are syncing, 2921 * we will wait here and all new writes will sleep until a sync completes. 2922 * 2923 * The in-core logs are used in a circular fashion. They are not used 2924 * out-of-order even when an iclog past the head is free. 2925 * 2926 * return: 2927 * * log_offset where xlog_write() can start writing into the in-core 2928 * log's data space. 2929 * * in-core log pointer to which xlog_write() should write. 2930 * * boolean indicating this is a continued write to an in-core log. 2931 * If this is the last write, then the in-core log's offset field 2932 * needs to be incremented, depending on the amount of data which 2933 * is copied. 2934 */ 2935 STATIC int 2936 xlog_state_get_iclog_space( 2937 struct xlog *log, 2938 int len, 2939 struct xlog_in_core **iclogp, 2940 struct xlog_ticket *ticket, 2941 int *continued_write, 2942 int *logoffsetp) 2943 { 2944 int log_offset; 2945 xlog_rec_header_t *head; 2946 xlog_in_core_t *iclog; 2947 int error; 2948 2949 restart: 2950 spin_lock(&log->l_icloglock); 2951 if (XLOG_FORCED_SHUTDOWN(log)) { 2952 spin_unlock(&log->l_icloglock); 2953 return -EIO; 2954 } 2955 2956 iclog = log->l_iclog; 2957 if (iclog->ic_state != XLOG_STATE_ACTIVE) { 2958 XFS_STATS_INC(log->l_mp, xs_log_noiclogs); 2959 2960 /* Wait for log writes to have flushed */ 2961 xlog_wait(&log->l_flush_wait, &log->l_icloglock); 2962 goto restart; 2963 } 2964 2965 head = &iclog->ic_header; 2966 2967 atomic_inc(&iclog->ic_refcnt); /* prevents sync */ 2968 log_offset = iclog->ic_offset; 2969 2970 /* On the 1st write to an iclog, figure out lsn. This works 2971 * if iclogs marked XLOG_STATE_WANT_SYNC always write out what they are 2972 * committing to. If the offset is set, that's how many blocks 2973 * must be written. 2974 */ 2975 if (log_offset == 0) { 2976 ticket->t_curr_res -= log->l_iclog_hsize; 2977 xlog_tic_add_region(ticket, 2978 log->l_iclog_hsize, 2979 XLOG_REG_TYPE_LRHEADER); 2980 head->h_cycle = cpu_to_be32(log->l_curr_cycle); 2981 head->h_lsn = cpu_to_be64( 2982 xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block)); 2983 ASSERT(log->l_curr_block >= 0); 2984 } 2985 2986 /* If there is enough room to write everything, then do it. Otherwise, 2987 * claim the rest of the region and make sure the XLOG_STATE_WANT_SYNC 2988 * bit is on, so this will get flushed out. Don't update ic_offset 2989 * until you know exactly how many bytes get copied. Therefore, wait 2990 * until later to update ic_offset. 2991 * 2992 * xlog_write() algorithm assumes that at least 2 xlog_op_header_t's 2993 * can fit into remaining data section. 2994 */ 2995 if (iclog->ic_size - iclog->ic_offset < 2*sizeof(xlog_op_header_t)) { 2996 xlog_state_switch_iclogs(log, iclog, iclog->ic_size); 2997 2998 /* 2999 * If I'm the only one writing to this iclog, sync it to disk. 3000 * We need to do an atomic compare and decrement here to avoid 3001 * racing with concurrent atomic_dec_and_lock() calls in 3002 * xlog_state_release_iclog() when there is more than one 3003 * reference to the iclog. 3004 */ 3005 if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1)) { 3006 /* we are the only one */ 3007 spin_unlock(&log->l_icloglock); 3008 error = xlog_state_release_iclog(log, iclog); 3009 if (error) 3010 return error; 3011 } else { 3012 spin_unlock(&log->l_icloglock); 3013 } 3014 goto restart; 3015 } 3016 3017 /* Do we have enough room to write the full amount in the remainder 3018 * of this iclog? Or must we continue a write on the next iclog and 3019 * mark this iclog as completely taken? In the case where we switch 3020 * iclogs (to mark it taken), this particular iclog will release/sync 3021 * to disk in xlog_write(). 3022 */ 3023 if (len <= iclog->ic_size - iclog->ic_offset) { 3024 *continued_write = 0; 3025 iclog->ic_offset += len; 3026 } else { 3027 *continued_write = 1; 3028 xlog_state_switch_iclogs(log, iclog, iclog->ic_size); 3029 } 3030 *iclogp = iclog; 3031 3032 ASSERT(iclog->ic_offset <= iclog->ic_size); 3033 spin_unlock(&log->l_icloglock); 3034 3035 *logoffsetp = log_offset; 3036 return 0; 3037 } /* xlog_state_get_iclog_space */ 3038 3039 /* The first cnt-1 times through here we don't need to 3040 * move the grant write head because the permanent 3041 * reservation has reserved cnt times the unit amount. 3042 * Release part of current permanent unit reservation and 3043 * reset current reservation to be one units worth. Also 3044 * move grant reservation head forward. 3045 */ 3046 STATIC void 3047 xlog_regrant_reserve_log_space( 3048 struct xlog *log, 3049 struct xlog_ticket *ticket) 3050 { 3051 trace_xfs_log_regrant_reserve_enter(log, ticket); 3052 3053 if (ticket->t_cnt > 0) 3054 ticket->t_cnt--; 3055 3056 xlog_grant_sub_space(log, &log->l_reserve_head.grant, 3057 ticket->t_curr_res); 3058 xlog_grant_sub_space(log, &log->l_write_head.grant, 3059 ticket->t_curr_res); 3060 ticket->t_curr_res = ticket->t_unit_res; 3061 xlog_tic_reset_res(ticket); 3062 3063 trace_xfs_log_regrant_reserve_sub(log, ticket); 3064 3065 /* just return if we still have some of the pre-reserved space */ 3066 if (ticket->t_cnt > 0) 3067 return; 3068 3069 xlog_grant_add_space(log, &log->l_reserve_head.grant, 3070 ticket->t_unit_res); 3071 3072 trace_xfs_log_regrant_reserve_exit(log, ticket); 3073 3074 ticket->t_curr_res = ticket->t_unit_res; 3075 xlog_tic_reset_res(ticket); 3076 } /* xlog_regrant_reserve_log_space */ 3077 3078 3079 /* 3080 * Give back the space left from a reservation. 3081 * 3082 * All the information we need to make a correct determination of space left 3083 * is present. For non-permanent reservations, things are quite easy. The 3084 * count should have been decremented to zero. We only need to deal with the 3085 * space remaining in the current reservation part of the ticket. If the 3086 * ticket contains a permanent reservation, there may be left over space which 3087 * needs to be released. A count of N means that N-1 refills of the current 3088 * reservation can be done before we need to ask for more space. The first 3089 * one goes to fill up the first current reservation. Once we run out of 3090 * space, the count will stay at zero and the only space remaining will be 3091 * in the current reservation field. 3092 */ 3093 STATIC void 3094 xlog_ungrant_log_space( 3095 struct xlog *log, 3096 struct xlog_ticket *ticket) 3097 { 3098 int bytes; 3099 3100 if (ticket->t_cnt > 0) 3101 ticket->t_cnt--; 3102 3103 trace_xfs_log_ungrant_enter(log, ticket); 3104 trace_xfs_log_ungrant_sub(log, ticket); 3105 3106 /* 3107 * If this is a permanent reservation ticket, we may be able to free 3108 * up more space based on the remaining count. 3109 */ 3110 bytes = ticket->t_curr_res; 3111 if (ticket->t_cnt > 0) { 3112 ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV); 3113 bytes += ticket->t_unit_res*ticket->t_cnt; 3114 } 3115 3116 xlog_grant_sub_space(log, &log->l_reserve_head.grant, bytes); 3117 xlog_grant_sub_space(log, &log->l_write_head.grant, bytes); 3118 3119 trace_xfs_log_ungrant_exit(log, ticket); 3120 3121 xfs_log_space_wake(log->l_mp); 3122 } 3123 3124 /* 3125 * Flush iclog to disk if this is the last reference to the given iclog and 3126 * the WANT_SYNC bit is set. 3127 * 3128 * When this function is entered, the iclog is not necessarily in the 3129 * WANT_SYNC state. It may be sitting around waiting to get filled. 3130 * 3131 * 3132 */ 3133 STATIC int 3134 xlog_state_release_iclog( 3135 struct xlog *log, 3136 struct xlog_in_core *iclog) 3137 { 3138 int sync = 0; /* do we sync? */ 3139 3140 if (iclog->ic_state & XLOG_STATE_IOERROR) 3141 return -EIO; 3142 3143 ASSERT(atomic_read(&iclog->ic_refcnt) > 0); 3144 if (!atomic_dec_and_lock(&iclog->ic_refcnt, &log->l_icloglock)) 3145 return 0; 3146 3147 if (iclog->ic_state & XLOG_STATE_IOERROR) { 3148 spin_unlock(&log->l_icloglock); 3149 return -EIO; 3150 } 3151 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE || 3152 iclog->ic_state == XLOG_STATE_WANT_SYNC); 3153 3154 if (iclog->ic_state == XLOG_STATE_WANT_SYNC) { 3155 /* update tail before writing to iclog */ 3156 xfs_lsn_t tail_lsn = xlog_assign_tail_lsn(log->l_mp); 3157 sync++; 3158 iclog->ic_state = XLOG_STATE_SYNCING; 3159 iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn); 3160 xlog_verify_tail_lsn(log, iclog, tail_lsn); 3161 /* cycle incremented when incrementing curr_block */ 3162 } 3163 spin_unlock(&log->l_icloglock); 3164 3165 /* 3166 * We let the log lock go, so it's possible that we hit a log I/O 3167 * error or some other SHUTDOWN condition that marks the iclog 3168 * as XLOG_STATE_IOERROR before the bwrite. However, we know that 3169 * this iclog has consistent data, so we ignore IOERROR 3170 * flags after this point. 3171 */ 3172 if (sync) 3173 return xlog_sync(log, iclog); 3174 return 0; 3175 } /* xlog_state_release_iclog */ 3176 3177 3178 /* 3179 * This routine will mark the current iclog in the ring as WANT_SYNC 3180 * and move the current iclog pointer to the next iclog in the ring. 3181 * When this routine is called from xlog_state_get_iclog_space(), the 3182 * exact size of the iclog has not yet been determined. All we know is 3183 * that every data block. We have run out of space in this log record. 3184 */ 3185 STATIC void 3186 xlog_state_switch_iclogs( 3187 struct xlog *log, 3188 struct xlog_in_core *iclog, 3189 int eventual_size) 3190 { 3191 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE); 3192 if (!eventual_size) 3193 eventual_size = iclog->ic_offset; 3194 iclog->ic_state = XLOG_STATE_WANT_SYNC; 3195 iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block); 3196 log->l_prev_block = log->l_curr_block; 3197 log->l_prev_cycle = log->l_curr_cycle; 3198 3199 /* roll log?: ic_offset changed later */ 3200 log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize); 3201 3202 /* Round up to next log-sunit */ 3203 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb) && 3204 log->l_mp->m_sb.sb_logsunit > 1) { 3205 __uint32_t sunit_bb = BTOBB(log->l_mp->m_sb.sb_logsunit); 3206 log->l_curr_block = roundup(log->l_curr_block, sunit_bb); 3207 } 3208 3209 if (log->l_curr_block >= log->l_logBBsize) { 3210 /* 3211 * Rewind the current block before the cycle is bumped to make 3212 * sure that the combined LSN never transiently moves forward 3213 * when the log wraps to the next cycle. This is to support the 3214 * unlocked sample of these fields from xlog_valid_lsn(). Most 3215 * other cases should acquire l_icloglock. 3216 */ 3217 log->l_curr_block -= log->l_logBBsize; 3218 ASSERT(log->l_curr_block >= 0); 3219 smp_wmb(); 3220 log->l_curr_cycle++; 3221 if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM) 3222 log->l_curr_cycle++; 3223 } 3224 ASSERT(iclog == log->l_iclog); 3225 log->l_iclog = iclog->ic_next; 3226 } /* xlog_state_switch_iclogs */ 3227 3228 /* 3229 * Write out all data in the in-core log as of this exact moment in time. 3230 * 3231 * Data may be written to the in-core log during this call. However, 3232 * we don't guarantee this data will be written out. A change from past 3233 * implementation means this routine will *not* write out zero length LRs. 3234 * 3235 * Basically, we try and perform an intelligent scan of the in-core logs. 3236 * If we determine there is no flushable data, we just return. There is no 3237 * flushable data if: 3238 * 3239 * 1. the current iclog is active and has no data; the previous iclog 3240 * is in the active or dirty state. 3241 * 2. the current iclog is drity, and the previous iclog is in the 3242 * active or dirty state. 3243 * 3244 * We may sleep if: 3245 * 3246 * 1. the current iclog is not in the active nor dirty state. 3247 * 2. the current iclog dirty, and the previous iclog is not in the 3248 * active nor dirty state. 3249 * 3. the current iclog is active, and there is another thread writing 3250 * to this particular iclog. 3251 * 4. a) the current iclog is active and has no other writers 3252 * b) when we return from flushing out this iclog, it is still 3253 * not in the active nor dirty state. 3254 */ 3255 int 3256 _xfs_log_force( 3257 struct xfs_mount *mp, 3258 uint flags, 3259 int *log_flushed) 3260 { 3261 struct xlog *log = mp->m_log; 3262 struct xlog_in_core *iclog; 3263 xfs_lsn_t lsn; 3264 3265 XFS_STATS_INC(mp, xs_log_force); 3266 3267 xlog_cil_force(log); 3268 3269 spin_lock(&log->l_icloglock); 3270 3271 iclog = log->l_iclog; 3272 if (iclog->ic_state & XLOG_STATE_IOERROR) { 3273 spin_unlock(&log->l_icloglock); 3274 return -EIO; 3275 } 3276 3277 /* If the head iclog is not active nor dirty, we just attach 3278 * ourselves to the head and go to sleep. 3279 */ 3280 if (iclog->ic_state == XLOG_STATE_ACTIVE || 3281 iclog->ic_state == XLOG_STATE_DIRTY) { 3282 /* 3283 * If the head is dirty or (active and empty), then 3284 * we need to look at the previous iclog. If the previous 3285 * iclog is active or dirty we are done. There is nothing 3286 * to sync out. Otherwise, we attach ourselves to the 3287 * previous iclog and go to sleep. 3288 */ 3289 if (iclog->ic_state == XLOG_STATE_DIRTY || 3290 (atomic_read(&iclog->ic_refcnt) == 0 3291 && iclog->ic_offset == 0)) { 3292 iclog = iclog->ic_prev; 3293 if (iclog->ic_state == XLOG_STATE_ACTIVE || 3294 iclog->ic_state == XLOG_STATE_DIRTY) 3295 goto no_sleep; 3296 else 3297 goto maybe_sleep; 3298 } else { 3299 if (atomic_read(&iclog->ic_refcnt) == 0) { 3300 /* We are the only one with access to this 3301 * iclog. Flush it out now. There should 3302 * be a roundoff of zero to show that someone 3303 * has already taken care of the roundoff from 3304 * the previous sync. 3305 */ 3306 atomic_inc(&iclog->ic_refcnt); 3307 lsn = be64_to_cpu(iclog->ic_header.h_lsn); 3308 xlog_state_switch_iclogs(log, iclog, 0); 3309 spin_unlock(&log->l_icloglock); 3310 3311 if (xlog_state_release_iclog(log, iclog)) 3312 return -EIO; 3313 3314 if (log_flushed) 3315 *log_flushed = 1; 3316 spin_lock(&log->l_icloglock); 3317 if (be64_to_cpu(iclog->ic_header.h_lsn) == lsn && 3318 iclog->ic_state != XLOG_STATE_DIRTY) 3319 goto maybe_sleep; 3320 else 3321 goto no_sleep; 3322 } else { 3323 /* Someone else is writing to this iclog. 3324 * Use its call to flush out the data. However, 3325 * the other thread may not force out this LR, 3326 * so we mark it WANT_SYNC. 3327 */ 3328 xlog_state_switch_iclogs(log, iclog, 0); 3329 goto maybe_sleep; 3330 } 3331 } 3332 } 3333 3334 /* By the time we come around again, the iclog could've been filled 3335 * which would give it another lsn. If we have a new lsn, just 3336 * return because the relevant data has been flushed. 3337 */ 3338 maybe_sleep: 3339 if (flags & XFS_LOG_SYNC) { 3340 /* 3341 * We must check if we're shutting down here, before 3342 * we wait, while we're holding the l_icloglock. 3343 * Then we check again after waking up, in case our 3344 * sleep was disturbed by a bad news. 3345 */ 3346 if (iclog->ic_state & XLOG_STATE_IOERROR) { 3347 spin_unlock(&log->l_icloglock); 3348 return -EIO; 3349 } 3350 XFS_STATS_INC(mp, xs_log_force_sleep); 3351 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); 3352 /* 3353 * No need to grab the log lock here since we're 3354 * only deciding whether or not to return EIO 3355 * and the memory read should be atomic. 3356 */ 3357 if (iclog->ic_state & XLOG_STATE_IOERROR) 3358 return -EIO; 3359 if (log_flushed) 3360 *log_flushed = 1; 3361 } else { 3362 3363 no_sleep: 3364 spin_unlock(&log->l_icloglock); 3365 } 3366 return 0; 3367 } 3368 3369 /* 3370 * Wrapper for _xfs_log_force(), to be used when caller doesn't care 3371 * about errors or whether the log was flushed or not. This is the normal 3372 * interface to use when trying to unpin items or move the log forward. 3373 */ 3374 void 3375 xfs_log_force( 3376 xfs_mount_t *mp, 3377 uint flags) 3378 { 3379 int error; 3380 3381 trace_xfs_log_force(mp, 0); 3382 error = _xfs_log_force(mp, flags, NULL); 3383 if (error) 3384 xfs_warn(mp, "%s: error %d returned.", __func__, error); 3385 } 3386 3387 /* 3388 * Force the in-core log to disk for a specific LSN. 3389 * 3390 * Find in-core log with lsn. 3391 * If it is in the DIRTY state, just return. 3392 * If it is in the ACTIVE state, move the in-core log into the WANT_SYNC 3393 * state and go to sleep or return. 3394 * If it is in any other state, go to sleep or return. 3395 * 3396 * Synchronous forces are implemented with a signal variable. All callers 3397 * to force a given lsn to disk will wait on a the sv attached to the 3398 * specific in-core log. When given in-core log finally completes its 3399 * write to disk, that thread will wake up all threads waiting on the 3400 * sv. 3401 */ 3402 int 3403 _xfs_log_force_lsn( 3404 struct xfs_mount *mp, 3405 xfs_lsn_t lsn, 3406 uint flags, 3407 int *log_flushed) 3408 { 3409 struct xlog *log = mp->m_log; 3410 struct xlog_in_core *iclog; 3411 int already_slept = 0; 3412 3413 ASSERT(lsn != 0); 3414 3415 XFS_STATS_INC(mp, xs_log_force); 3416 3417 lsn = xlog_cil_force_lsn(log, lsn); 3418 if (lsn == NULLCOMMITLSN) 3419 return 0; 3420 3421 try_again: 3422 spin_lock(&log->l_icloglock); 3423 iclog = log->l_iclog; 3424 if (iclog->ic_state & XLOG_STATE_IOERROR) { 3425 spin_unlock(&log->l_icloglock); 3426 return -EIO; 3427 } 3428 3429 do { 3430 if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) { 3431 iclog = iclog->ic_next; 3432 continue; 3433 } 3434 3435 if (iclog->ic_state == XLOG_STATE_DIRTY) { 3436 spin_unlock(&log->l_icloglock); 3437 return 0; 3438 } 3439 3440 if (iclog->ic_state == XLOG_STATE_ACTIVE) { 3441 /* 3442 * We sleep here if we haven't already slept (e.g. 3443 * this is the first time we've looked at the correct 3444 * iclog buf) and the buffer before us is going to 3445 * be sync'ed. The reason for this is that if we 3446 * are doing sync transactions here, by waiting for 3447 * the previous I/O to complete, we can allow a few 3448 * more transactions into this iclog before we close 3449 * it down. 3450 * 3451 * Otherwise, we mark the buffer WANT_SYNC, and bump 3452 * up the refcnt so we can release the log (which 3453 * drops the ref count). The state switch keeps new 3454 * transaction commits from using this buffer. When 3455 * the current commits finish writing into the buffer, 3456 * the refcount will drop to zero and the buffer will 3457 * go out then. 3458 */ 3459 if (!already_slept && 3460 (iclog->ic_prev->ic_state & 3461 (XLOG_STATE_WANT_SYNC | XLOG_STATE_SYNCING))) { 3462 ASSERT(!(iclog->ic_state & XLOG_STATE_IOERROR)); 3463 3464 XFS_STATS_INC(mp, xs_log_force_sleep); 3465 3466 xlog_wait(&iclog->ic_prev->ic_write_wait, 3467 &log->l_icloglock); 3468 if (log_flushed) 3469 *log_flushed = 1; 3470 already_slept = 1; 3471 goto try_again; 3472 } 3473 atomic_inc(&iclog->ic_refcnt); 3474 xlog_state_switch_iclogs(log, iclog, 0); 3475 spin_unlock(&log->l_icloglock); 3476 if (xlog_state_release_iclog(log, iclog)) 3477 return -EIO; 3478 if (log_flushed) 3479 *log_flushed = 1; 3480 spin_lock(&log->l_icloglock); 3481 } 3482 3483 if ((flags & XFS_LOG_SYNC) && /* sleep */ 3484 !(iclog->ic_state & 3485 (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY))) { 3486 /* 3487 * Don't wait on completion if we know that we've 3488 * gotten a log write error. 3489 */ 3490 if (iclog->ic_state & XLOG_STATE_IOERROR) { 3491 spin_unlock(&log->l_icloglock); 3492 return -EIO; 3493 } 3494 XFS_STATS_INC(mp, xs_log_force_sleep); 3495 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); 3496 /* 3497 * No need to grab the log lock here since we're 3498 * only deciding whether or not to return EIO 3499 * and the memory read should be atomic. 3500 */ 3501 if (iclog->ic_state & XLOG_STATE_IOERROR) 3502 return -EIO; 3503 3504 if (log_flushed) 3505 *log_flushed = 1; 3506 } else { /* just return */ 3507 spin_unlock(&log->l_icloglock); 3508 } 3509 3510 return 0; 3511 } while (iclog != log->l_iclog); 3512 3513 spin_unlock(&log->l_icloglock); 3514 return 0; 3515 } 3516 3517 /* 3518 * Wrapper for _xfs_log_force_lsn(), to be used when caller doesn't care 3519 * about errors or whether the log was flushed or not. This is the normal 3520 * interface to use when trying to unpin items or move the log forward. 3521 */ 3522 void 3523 xfs_log_force_lsn( 3524 xfs_mount_t *mp, 3525 xfs_lsn_t lsn, 3526 uint flags) 3527 { 3528 int error; 3529 3530 trace_xfs_log_force(mp, lsn); 3531 error = _xfs_log_force_lsn(mp, lsn, flags, NULL); 3532 if (error) 3533 xfs_warn(mp, "%s: error %d returned.", __func__, error); 3534 } 3535 3536 /* 3537 * Called when we want to mark the current iclog as being ready to sync to 3538 * disk. 3539 */ 3540 STATIC void 3541 xlog_state_want_sync( 3542 struct xlog *log, 3543 struct xlog_in_core *iclog) 3544 { 3545 assert_spin_locked(&log->l_icloglock); 3546 3547 if (iclog->ic_state == XLOG_STATE_ACTIVE) { 3548 xlog_state_switch_iclogs(log, iclog, 0); 3549 } else { 3550 ASSERT(iclog->ic_state & 3551 (XLOG_STATE_WANT_SYNC|XLOG_STATE_IOERROR)); 3552 } 3553 } 3554 3555 3556 /***************************************************************************** 3557 * 3558 * TICKET functions 3559 * 3560 ***************************************************************************** 3561 */ 3562 3563 /* 3564 * Free a used ticket when its refcount falls to zero. 3565 */ 3566 void 3567 xfs_log_ticket_put( 3568 xlog_ticket_t *ticket) 3569 { 3570 ASSERT(atomic_read(&ticket->t_ref) > 0); 3571 if (atomic_dec_and_test(&ticket->t_ref)) 3572 kmem_zone_free(xfs_log_ticket_zone, ticket); 3573 } 3574 3575 xlog_ticket_t * 3576 xfs_log_ticket_get( 3577 xlog_ticket_t *ticket) 3578 { 3579 ASSERT(atomic_read(&ticket->t_ref) > 0); 3580 atomic_inc(&ticket->t_ref); 3581 return ticket; 3582 } 3583 3584 /* 3585 * Figure out the total log space unit (in bytes) that would be 3586 * required for a log ticket. 3587 */ 3588 int 3589 xfs_log_calc_unit_res( 3590 struct xfs_mount *mp, 3591 int unit_bytes) 3592 { 3593 struct xlog *log = mp->m_log; 3594 int iclog_space; 3595 uint num_headers; 3596 3597 /* 3598 * Permanent reservations have up to 'cnt'-1 active log operations 3599 * in the log. A unit in this case is the amount of space for one 3600 * of these log operations. Normal reservations have a cnt of 1 3601 * and their unit amount is the total amount of space required. 3602 * 3603 * The following lines of code account for non-transaction data 3604 * which occupy space in the on-disk log. 3605 * 3606 * Normal form of a transaction is: 3607 * <oph><trans-hdr><start-oph><reg1-oph><reg1><reg2-oph>...<commit-oph> 3608 * and then there are LR hdrs, split-recs and roundoff at end of syncs. 3609 * 3610 * We need to account for all the leadup data and trailer data 3611 * around the transaction data. 3612 * And then we need to account for the worst case in terms of using 3613 * more space. 3614 * The worst case will happen if: 3615 * - the placement of the transaction happens to be such that the 3616 * roundoff is at its maximum 3617 * - the transaction data is synced before the commit record is synced 3618 * i.e. <transaction-data><roundoff> | <commit-rec><roundoff> 3619 * Therefore the commit record is in its own Log Record. 3620 * This can happen as the commit record is called with its 3621 * own region to xlog_write(). 3622 * This then means that in the worst case, roundoff can happen for 3623 * the commit-rec as well. 3624 * The commit-rec is smaller than padding in this scenario and so it is 3625 * not added separately. 3626 */ 3627 3628 /* for trans header */ 3629 unit_bytes += sizeof(xlog_op_header_t); 3630 unit_bytes += sizeof(xfs_trans_header_t); 3631 3632 /* for start-rec */ 3633 unit_bytes += sizeof(xlog_op_header_t); 3634 3635 /* 3636 * for LR headers - the space for data in an iclog is the size minus 3637 * the space used for the headers. If we use the iclog size, then we 3638 * undercalculate the number of headers required. 3639 * 3640 * Furthermore - the addition of op headers for split-recs might 3641 * increase the space required enough to require more log and op 3642 * headers, so take that into account too. 3643 * 3644 * IMPORTANT: This reservation makes the assumption that if this 3645 * transaction is the first in an iclog and hence has the LR headers 3646 * accounted to it, then the remaining space in the iclog is 3647 * exclusively for this transaction. i.e. if the transaction is larger 3648 * than the iclog, it will be the only thing in that iclog. 3649 * Fundamentally, this means we must pass the entire log vector to 3650 * xlog_write to guarantee this. 3651 */ 3652 iclog_space = log->l_iclog_size - log->l_iclog_hsize; 3653 num_headers = howmany(unit_bytes, iclog_space); 3654 3655 /* for split-recs - ophdrs added when data split over LRs */ 3656 unit_bytes += sizeof(xlog_op_header_t) * num_headers; 3657 3658 /* add extra header reservations if we overrun */ 3659 while (!num_headers || 3660 howmany(unit_bytes, iclog_space) > num_headers) { 3661 unit_bytes += sizeof(xlog_op_header_t); 3662 num_headers++; 3663 } 3664 unit_bytes += log->l_iclog_hsize * num_headers; 3665 3666 /* for commit-rec LR header - note: padding will subsume the ophdr */ 3667 unit_bytes += log->l_iclog_hsize; 3668 3669 /* for roundoff padding for transaction data and one for commit record */ 3670 if (xfs_sb_version_haslogv2(&mp->m_sb) && mp->m_sb.sb_logsunit > 1) { 3671 /* log su roundoff */ 3672 unit_bytes += 2 * mp->m_sb.sb_logsunit; 3673 } else { 3674 /* BB roundoff */ 3675 unit_bytes += 2 * BBSIZE; 3676 } 3677 3678 return unit_bytes; 3679 } 3680 3681 /* 3682 * Allocate and initialise a new log ticket. 3683 */ 3684 struct xlog_ticket * 3685 xlog_ticket_alloc( 3686 struct xlog *log, 3687 int unit_bytes, 3688 int cnt, 3689 char client, 3690 bool permanent, 3691 xfs_km_flags_t alloc_flags) 3692 { 3693 struct xlog_ticket *tic; 3694 int unit_res; 3695 3696 tic = kmem_zone_zalloc(xfs_log_ticket_zone, alloc_flags); 3697 if (!tic) 3698 return NULL; 3699 3700 unit_res = xfs_log_calc_unit_res(log->l_mp, unit_bytes); 3701 3702 atomic_set(&tic->t_ref, 1); 3703 tic->t_task = current; 3704 INIT_LIST_HEAD(&tic->t_queue); 3705 tic->t_unit_res = unit_res; 3706 tic->t_curr_res = unit_res; 3707 tic->t_cnt = cnt; 3708 tic->t_ocnt = cnt; 3709 tic->t_tid = prandom_u32(); 3710 tic->t_clientid = client; 3711 tic->t_flags = XLOG_TIC_INITED; 3712 tic->t_trans_type = 0; 3713 if (permanent) 3714 tic->t_flags |= XLOG_TIC_PERM_RESERV; 3715 3716 xlog_tic_reset_res(tic); 3717 3718 return tic; 3719 } 3720 3721 3722 /****************************************************************************** 3723 * 3724 * Log debug routines 3725 * 3726 ****************************************************************************** 3727 */ 3728 #if defined(DEBUG) 3729 /* 3730 * Make sure that the destination ptr is within the valid data region of 3731 * one of the iclogs. This uses backup pointers stored in a different 3732 * part of the log in case we trash the log structure. 3733 */ 3734 void 3735 xlog_verify_dest_ptr( 3736 struct xlog *log, 3737 void *ptr) 3738 { 3739 int i; 3740 int good_ptr = 0; 3741 3742 for (i = 0; i < log->l_iclog_bufs; i++) { 3743 if (ptr >= log->l_iclog_bak[i] && 3744 ptr <= log->l_iclog_bak[i] + log->l_iclog_size) 3745 good_ptr++; 3746 } 3747 3748 if (!good_ptr) 3749 xfs_emerg(log->l_mp, "%s: invalid ptr", __func__); 3750 } 3751 3752 /* 3753 * Check to make sure the grant write head didn't just over lap the tail. If 3754 * the cycles are the same, we can't be overlapping. Otherwise, make sure that 3755 * the cycles differ by exactly one and check the byte count. 3756 * 3757 * This check is run unlocked, so can give false positives. Rather than assert 3758 * on failures, use a warn-once flag and a panic tag to allow the admin to 3759 * determine if they want to panic the machine when such an error occurs. For 3760 * debug kernels this will have the same effect as using an assert but, unlinke 3761 * an assert, it can be turned off at runtime. 3762 */ 3763 STATIC void 3764 xlog_verify_grant_tail( 3765 struct xlog *log) 3766 { 3767 int tail_cycle, tail_blocks; 3768 int cycle, space; 3769 3770 xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &space); 3771 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks); 3772 if (tail_cycle != cycle) { 3773 if (cycle - 1 != tail_cycle && 3774 !(log->l_flags & XLOG_TAIL_WARN)) { 3775 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, 3776 "%s: cycle - 1 != tail_cycle", __func__); 3777 log->l_flags |= XLOG_TAIL_WARN; 3778 } 3779 3780 if (space > BBTOB(tail_blocks) && 3781 !(log->l_flags & XLOG_TAIL_WARN)) { 3782 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, 3783 "%s: space > BBTOB(tail_blocks)", __func__); 3784 log->l_flags |= XLOG_TAIL_WARN; 3785 } 3786 } 3787 } 3788 3789 /* check if it will fit */ 3790 STATIC void 3791 xlog_verify_tail_lsn( 3792 struct xlog *log, 3793 struct xlog_in_core *iclog, 3794 xfs_lsn_t tail_lsn) 3795 { 3796 int blocks; 3797 3798 if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) { 3799 blocks = 3800 log->l_logBBsize - (log->l_prev_block - BLOCK_LSN(tail_lsn)); 3801 if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize)) 3802 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__); 3803 } else { 3804 ASSERT(CYCLE_LSN(tail_lsn)+1 == log->l_prev_cycle); 3805 3806 if (BLOCK_LSN(tail_lsn) == log->l_prev_block) 3807 xfs_emerg(log->l_mp, "%s: tail wrapped", __func__); 3808 3809 blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block; 3810 if (blocks < BTOBB(iclog->ic_offset) + 1) 3811 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__); 3812 } 3813 } /* xlog_verify_tail_lsn */ 3814 3815 /* 3816 * Perform a number of checks on the iclog before writing to disk. 3817 * 3818 * 1. Make sure the iclogs are still circular 3819 * 2. Make sure we have a good magic number 3820 * 3. Make sure we don't have magic numbers in the data 3821 * 4. Check fields of each log operation header for: 3822 * A. Valid client identifier 3823 * B. tid ptr value falls in valid ptr space (user space code) 3824 * C. Length in log record header is correct according to the 3825 * individual operation headers within record. 3826 * 5. When a bwrite will occur within 5 blocks of the front of the physical 3827 * log, check the preceding blocks of the physical log to make sure all 3828 * the cycle numbers agree with the current cycle number. 3829 */ 3830 STATIC void 3831 xlog_verify_iclog( 3832 struct xlog *log, 3833 struct xlog_in_core *iclog, 3834 int count, 3835 bool syncing) 3836 { 3837 xlog_op_header_t *ophead; 3838 xlog_in_core_t *icptr; 3839 xlog_in_core_2_t *xhdr; 3840 void *base_ptr, *ptr, *p; 3841 ptrdiff_t field_offset; 3842 __uint8_t clientid; 3843 int len, i, j, k, op_len; 3844 int idx; 3845 3846 /* check validity of iclog pointers */ 3847 spin_lock(&log->l_icloglock); 3848 icptr = log->l_iclog; 3849 for (i = 0; i < log->l_iclog_bufs; i++, icptr = icptr->ic_next) 3850 ASSERT(icptr); 3851 3852 if (icptr != log->l_iclog) 3853 xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__); 3854 spin_unlock(&log->l_icloglock); 3855 3856 /* check log magic numbers */ 3857 if (iclog->ic_header.h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) 3858 xfs_emerg(log->l_mp, "%s: invalid magic num", __func__); 3859 3860 base_ptr = ptr = &iclog->ic_header; 3861 p = &iclog->ic_header; 3862 for (ptr += BBSIZE; ptr < base_ptr + count; ptr += BBSIZE) { 3863 if (*(__be32 *)ptr == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) 3864 xfs_emerg(log->l_mp, "%s: unexpected magic num", 3865 __func__); 3866 } 3867 3868 /* check fields */ 3869 len = be32_to_cpu(iclog->ic_header.h_num_logops); 3870 base_ptr = ptr = iclog->ic_datap; 3871 ophead = ptr; 3872 xhdr = iclog->ic_data; 3873 for (i = 0; i < len; i++) { 3874 ophead = ptr; 3875 3876 /* clientid is only 1 byte */ 3877 p = &ophead->oh_clientid; 3878 field_offset = p - base_ptr; 3879 if (!syncing || (field_offset & 0x1ff)) { 3880 clientid = ophead->oh_clientid; 3881 } else { 3882 idx = BTOBBT((char *)&ophead->oh_clientid - iclog->ic_datap); 3883 if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) { 3884 j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3885 k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3886 clientid = xlog_get_client_id( 3887 xhdr[j].hic_xheader.xh_cycle_data[k]); 3888 } else { 3889 clientid = xlog_get_client_id( 3890 iclog->ic_header.h_cycle_data[idx]); 3891 } 3892 } 3893 if (clientid != XFS_TRANSACTION && clientid != XFS_LOG) 3894 xfs_warn(log->l_mp, 3895 "%s: invalid clientid %d op 0x%p offset 0x%lx", 3896 __func__, clientid, ophead, 3897 (unsigned long)field_offset); 3898 3899 /* check length */ 3900 p = &ophead->oh_len; 3901 field_offset = p - base_ptr; 3902 if (!syncing || (field_offset & 0x1ff)) { 3903 op_len = be32_to_cpu(ophead->oh_len); 3904 } else { 3905 idx = BTOBBT((uintptr_t)&ophead->oh_len - 3906 (uintptr_t)iclog->ic_datap); 3907 if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) { 3908 j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3909 k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3910 op_len = be32_to_cpu(xhdr[j].hic_xheader.xh_cycle_data[k]); 3911 } else { 3912 op_len = be32_to_cpu(iclog->ic_header.h_cycle_data[idx]); 3913 } 3914 } 3915 ptr += sizeof(xlog_op_header_t) + op_len; 3916 } 3917 } /* xlog_verify_iclog */ 3918 #endif 3919 3920 /* 3921 * Mark all iclogs IOERROR. l_icloglock is held by the caller. 3922 */ 3923 STATIC int 3924 xlog_state_ioerror( 3925 struct xlog *log) 3926 { 3927 xlog_in_core_t *iclog, *ic; 3928 3929 iclog = log->l_iclog; 3930 if (! (iclog->ic_state & XLOG_STATE_IOERROR)) { 3931 /* 3932 * Mark all the incore logs IOERROR. 3933 * From now on, no log flushes will result. 3934 */ 3935 ic = iclog; 3936 do { 3937 ic->ic_state = XLOG_STATE_IOERROR; 3938 ic = ic->ic_next; 3939 } while (ic != iclog); 3940 return 0; 3941 } 3942 /* 3943 * Return non-zero, if state transition has already happened. 3944 */ 3945 return 1; 3946 } 3947 3948 /* 3949 * This is called from xfs_force_shutdown, when we're forcibly 3950 * shutting down the filesystem, typically because of an IO error. 3951 * Our main objectives here are to make sure that: 3952 * a. if !logerror, flush the logs to disk. Anything modified 3953 * after this is ignored. 3954 * b. the filesystem gets marked 'SHUTDOWN' for all interested 3955 * parties to find out, 'atomically'. 3956 * c. those who're sleeping on log reservations, pinned objects and 3957 * other resources get woken up, and be told the bad news. 3958 * d. nothing new gets queued up after (b) and (c) are done. 3959 * 3960 * Note: for the !logerror case we need to flush the regions held in memory out 3961 * to disk first. This needs to be done before the log is marked as shutdown, 3962 * otherwise the iclog writes will fail. 3963 */ 3964 int 3965 xfs_log_force_umount( 3966 struct xfs_mount *mp, 3967 int logerror) 3968 { 3969 struct xlog *log; 3970 int retval; 3971 3972 log = mp->m_log; 3973 3974 /* 3975 * If this happens during log recovery, don't worry about 3976 * locking; the log isn't open for business yet. 3977 */ 3978 if (!log || 3979 log->l_flags & XLOG_ACTIVE_RECOVERY) { 3980 mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN; 3981 if (mp->m_sb_bp) 3982 XFS_BUF_DONE(mp->m_sb_bp); 3983 return 0; 3984 } 3985 3986 /* 3987 * Somebody could've already done the hard work for us. 3988 * No need to get locks for this. 3989 */ 3990 if (logerror && log->l_iclog->ic_state & XLOG_STATE_IOERROR) { 3991 ASSERT(XLOG_FORCED_SHUTDOWN(log)); 3992 return 1; 3993 } 3994 3995 /* 3996 * Flush all the completed transactions to disk before marking the log 3997 * being shut down. We need to do it in this order to ensure that 3998 * completed operations are safely on disk before we shut down, and that 3999 * we don't have to issue any buffer IO after the shutdown flags are set 4000 * to guarantee this. 4001 */ 4002 if (!logerror) 4003 _xfs_log_force(mp, XFS_LOG_SYNC, NULL); 4004 4005 /* 4006 * mark the filesystem and the as in a shutdown state and wake 4007 * everybody up to tell them the bad news. 4008 */ 4009 spin_lock(&log->l_icloglock); 4010 mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN; 4011 if (mp->m_sb_bp) 4012 XFS_BUF_DONE(mp->m_sb_bp); 4013 4014 /* 4015 * Mark the log and the iclogs with IO error flags to prevent any 4016 * further log IO from being issued or completed. 4017 */ 4018 log->l_flags |= XLOG_IO_ERROR; 4019 retval = xlog_state_ioerror(log); 4020 spin_unlock(&log->l_icloglock); 4021 4022 /* 4023 * We don't want anybody waiting for log reservations after this. That 4024 * means we have to wake up everybody queued up on reserveq as well as 4025 * writeq. In addition, we make sure in xlog_{re}grant_log_space that 4026 * we don't enqueue anything once the SHUTDOWN flag is set, and this 4027 * action is protected by the grant locks. 4028 */ 4029 xlog_grant_head_wake_all(&log->l_reserve_head); 4030 xlog_grant_head_wake_all(&log->l_write_head); 4031 4032 /* 4033 * Wake up everybody waiting on xfs_log_force. Wake the CIL push first 4034 * as if the log writes were completed. The abort handling in the log 4035 * item committed callback functions will do this again under lock to 4036 * avoid races. 4037 */ 4038 wake_up_all(&log->l_cilp->xc_commit_wait); 4039 xlog_state_do_callback(log, XFS_LI_ABORTED, NULL); 4040 4041 #ifdef XFSERRORDEBUG 4042 { 4043 xlog_in_core_t *iclog; 4044 4045 spin_lock(&log->l_icloglock); 4046 iclog = log->l_iclog; 4047 do { 4048 ASSERT(iclog->ic_callback == 0); 4049 iclog = iclog->ic_next; 4050 } while (iclog != log->l_iclog); 4051 spin_unlock(&log->l_icloglock); 4052 } 4053 #endif 4054 /* return non-zero if log IOERROR transition had already happened */ 4055 return retval; 4056 } 4057 4058 STATIC int 4059 xlog_iclogs_empty( 4060 struct xlog *log) 4061 { 4062 xlog_in_core_t *iclog; 4063 4064 iclog = log->l_iclog; 4065 do { 4066 /* endianness does not matter here, zero is zero in 4067 * any language. 4068 */ 4069 if (iclog->ic_header.h_num_logops) 4070 return 0; 4071 iclog = iclog->ic_next; 4072 } while (iclog != log->l_iclog); 4073 return 1; 4074 } 4075 4076 /* 4077 * Verify that an LSN stamped into a piece of metadata is valid. This is 4078 * intended for use in read verifiers on v5 superblocks. 4079 */ 4080 bool 4081 xfs_log_check_lsn( 4082 struct xfs_mount *mp, 4083 xfs_lsn_t lsn) 4084 { 4085 struct xlog *log = mp->m_log; 4086 bool valid; 4087 4088 /* 4089 * norecovery mode skips mount-time log processing and unconditionally 4090 * resets the in-core LSN. We can't validate in this mode, but 4091 * modifications are not allowed anyways so just return true. 4092 */ 4093 if (mp->m_flags & XFS_MOUNT_NORECOVERY) 4094 return true; 4095 4096 /* 4097 * Some metadata LSNs are initialized to NULL (e.g., the agfl). This is 4098 * handled by recovery and thus safe to ignore here. 4099 */ 4100 if (lsn == NULLCOMMITLSN) 4101 return true; 4102 4103 valid = xlog_valid_lsn(mp->m_log, lsn); 4104 4105 /* warn the user about what's gone wrong before verifier failure */ 4106 if (!valid) { 4107 spin_lock(&log->l_icloglock); 4108 xfs_warn(mp, 4109 "Corruption warning: Metadata has LSN (%d:%d) ahead of current LSN (%d:%d). " 4110 "Please unmount and run xfs_repair (>= v4.3) to resolve.", 4111 CYCLE_LSN(lsn), BLOCK_LSN(lsn), 4112 log->l_curr_cycle, log->l_curr_block); 4113 spin_unlock(&log->l_icloglock); 4114 } 4115 4116 return valid; 4117 } 4118