1 /* 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_types.h" 21 #include "xfs_log.h" 22 #include "xfs_trans.h" 23 #include "xfs_sb.h" 24 #include "xfs_ag.h" 25 #include "xfs_mount.h" 26 #include "xfs_error.h" 27 #include "xfs_log_priv.h" 28 #include "xfs_buf_item.h" 29 #include "xfs_bmap_btree.h" 30 #include "xfs_alloc_btree.h" 31 #include "xfs_ialloc_btree.h" 32 #include "xfs_log_recover.h" 33 #include "xfs_trans_priv.h" 34 #include "xfs_dinode.h" 35 #include "xfs_inode.h" 36 #include "xfs_trace.h" 37 #include "xfs_fsops.h" 38 #include "xfs_cksum.h" 39 40 kmem_zone_t *xfs_log_ticket_zone; 41 42 /* Local miscellaneous function prototypes */ 43 STATIC int 44 xlog_commit_record( 45 struct xlog *log, 46 struct xlog_ticket *ticket, 47 struct xlog_in_core **iclog, 48 xfs_lsn_t *commitlsnp); 49 50 STATIC struct xlog * 51 xlog_alloc_log( 52 struct xfs_mount *mp, 53 struct xfs_buftarg *log_target, 54 xfs_daddr_t blk_offset, 55 int num_bblks); 56 STATIC int 57 xlog_space_left( 58 struct xlog *log, 59 atomic64_t *head); 60 STATIC int 61 xlog_sync( 62 struct xlog *log, 63 struct xlog_in_core *iclog); 64 STATIC void 65 xlog_dealloc_log( 66 struct xlog *log); 67 68 /* local state machine functions */ 69 STATIC void xlog_state_done_syncing(xlog_in_core_t *iclog, int); 70 STATIC void 71 xlog_state_do_callback( 72 struct xlog *log, 73 int aborted, 74 struct xlog_in_core *iclog); 75 STATIC int 76 xlog_state_get_iclog_space( 77 struct xlog *log, 78 int len, 79 struct xlog_in_core **iclog, 80 struct xlog_ticket *ticket, 81 int *continued_write, 82 int *logoffsetp); 83 STATIC int 84 xlog_state_release_iclog( 85 struct xlog *log, 86 struct xlog_in_core *iclog); 87 STATIC void 88 xlog_state_switch_iclogs( 89 struct xlog *log, 90 struct xlog_in_core *iclog, 91 int eventual_size); 92 STATIC void 93 xlog_state_want_sync( 94 struct xlog *log, 95 struct xlog_in_core *iclog); 96 97 STATIC void 98 xlog_grant_push_ail( 99 struct xlog *log, 100 int need_bytes); 101 STATIC void 102 xlog_regrant_reserve_log_space( 103 struct xlog *log, 104 struct xlog_ticket *ticket); 105 STATIC void 106 xlog_ungrant_log_space( 107 struct xlog *log, 108 struct xlog_ticket *ticket); 109 110 #if defined(DEBUG) 111 STATIC void 112 xlog_verify_dest_ptr( 113 struct xlog *log, 114 char *ptr); 115 STATIC void 116 xlog_verify_grant_tail( 117 struct xlog *log); 118 STATIC void 119 xlog_verify_iclog( 120 struct xlog *log, 121 struct xlog_in_core *iclog, 122 int count, 123 bool syncing); 124 STATIC void 125 xlog_verify_tail_lsn( 126 struct xlog *log, 127 struct xlog_in_core *iclog, 128 xfs_lsn_t tail_lsn); 129 #else 130 #define xlog_verify_dest_ptr(a,b) 131 #define xlog_verify_grant_tail(a) 132 #define xlog_verify_iclog(a,b,c,d) 133 #define xlog_verify_tail_lsn(a,b,c) 134 #endif 135 136 STATIC int 137 xlog_iclogs_empty( 138 struct xlog *log); 139 140 static void 141 xlog_grant_sub_space( 142 struct xlog *log, 143 atomic64_t *head, 144 int bytes) 145 { 146 int64_t head_val = atomic64_read(head); 147 int64_t new, old; 148 149 do { 150 int cycle, space; 151 152 xlog_crack_grant_head_val(head_val, &cycle, &space); 153 154 space -= bytes; 155 if (space < 0) { 156 space += log->l_logsize; 157 cycle--; 158 } 159 160 old = head_val; 161 new = xlog_assign_grant_head_val(cycle, space); 162 head_val = atomic64_cmpxchg(head, old, new); 163 } while (head_val != old); 164 } 165 166 static void 167 xlog_grant_add_space( 168 struct xlog *log, 169 atomic64_t *head, 170 int bytes) 171 { 172 int64_t head_val = atomic64_read(head); 173 int64_t new, old; 174 175 do { 176 int tmp; 177 int cycle, space; 178 179 xlog_crack_grant_head_val(head_val, &cycle, &space); 180 181 tmp = log->l_logsize - space; 182 if (tmp > bytes) 183 space += bytes; 184 else { 185 space = bytes - tmp; 186 cycle++; 187 } 188 189 old = head_val; 190 new = xlog_assign_grant_head_val(cycle, space); 191 head_val = atomic64_cmpxchg(head, old, new); 192 } while (head_val != old); 193 } 194 195 STATIC void 196 xlog_grant_head_init( 197 struct xlog_grant_head *head) 198 { 199 xlog_assign_grant_head(&head->grant, 1, 0); 200 INIT_LIST_HEAD(&head->waiters); 201 spin_lock_init(&head->lock); 202 } 203 204 STATIC void 205 xlog_grant_head_wake_all( 206 struct xlog_grant_head *head) 207 { 208 struct xlog_ticket *tic; 209 210 spin_lock(&head->lock); 211 list_for_each_entry(tic, &head->waiters, t_queue) 212 wake_up_process(tic->t_task); 213 spin_unlock(&head->lock); 214 } 215 216 static inline int 217 xlog_ticket_reservation( 218 struct xlog *log, 219 struct xlog_grant_head *head, 220 struct xlog_ticket *tic) 221 { 222 if (head == &log->l_write_head) { 223 ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV); 224 return tic->t_unit_res; 225 } else { 226 if (tic->t_flags & XLOG_TIC_PERM_RESERV) 227 return tic->t_unit_res * tic->t_cnt; 228 else 229 return tic->t_unit_res; 230 } 231 } 232 233 STATIC bool 234 xlog_grant_head_wake( 235 struct xlog *log, 236 struct xlog_grant_head *head, 237 int *free_bytes) 238 { 239 struct xlog_ticket *tic; 240 int need_bytes; 241 242 list_for_each_entry(tic, &head->waiters, t_queue) { 243 need_bytes = xlog_ticket_reservation(log, head, tic); 244 if (*free_bytes < need_bytes) 245 return false; 246 247 *free_bytes -= need_bytes; 248 trace_xfs_log_grant_wake_up(log, tic); 249 wake_up_process(tic->t_task); 250 } 251 252 return true; 253 } 254 255 STATIC int 256 xlog_grant_head_wait( 257 struct xlog *log, 258 struct xlog_grant_head *head, 259 struct xlog_ticket *tic, 260 int need_bytes) 261 { 262 list_add_tail(&tic->t_queue, &head->waiters); 263 264 do { 265 if (XLOG_FORCED_SHUTDOWN(log)) 266 goto shutdown; 267 xlog_grant_push_ail(log, need_bytes); 268 269 __set_current_state(TASK_UNINTERRUPTIBLE); 270 spin_unlock(&head->lock); 271 272 XFS_STATS_INC(xs_sleep_logspace); 273 274 trace_xfs_log_grant_sleep(log, tic); 275 schedule(); 276 trace_xfs_log_grant_wake(log, tic); 277 278 spin_lock(&head->lock); 279 if (XLOG_FORCED_SHUTDOWN(log)) 280 goto shutdown; 281 } while (xlog_space_left(log, &head->grant) < need_bytes); 282 283 list_del_init(&tic->t_queue); 284 return 0; 285 shutdown: 286 list_del_init(&tic->t_queue); 287 return XFS_ERROR(EIO); 288 } 289 290 /* 291 * Atomically get the log space required for a log ticket. 292 * 293 * Once a ticket gets put onto head->waiters, it will only return after the 294 * needed reservation is satisfied. 295 * 296 * This function is structured so that it has a lock free fast path. This is 297 * necessary because every new transaction reservation will come through this 298 * path. Hence any lock will be globally hot if we take it unconditionally on 299 * every pass. 300 * 301 * As tickets are only ever moved on and off head->waiters under head->lock, we 302 * only need to take that lock if we are going to add the ticket to the queue 303 * and sleep. We can avoid taking the lock if the ticket was never added to 304 * head->waiters because the t_queue list head will be empty and we hold the 305 * only reference to it so it can safely be checked unlocked. 306 */ 307 STATIC int 308 xlog_grant_head_check( 309 struct xlog *log, 310 struct xlog_grant_head *head, 311 struct xlog_ticket *tic, 312 int *need_bytes) 313 { 314 int free_bytes; 315 int error = 0; 316 317 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); 318 319 /* 320 * If there are other waiters on the queue then give them a chance at 321 * logspace before us. Wake up the first waiters, if we do not wake 322 * up all the waiters then go to sleep waiting for more free space, 323 * otherwise try to get some space for this transaction. 324 */ 325 *need_bytes = xlog_ticket_reservation(log, head, tic); 326 free_bytes = xlog_space_left(log, &head->grant); 327 if (!list_empty_careful(&head->waiters)) { 328 spin_lock(&head->lock); 329 if (!xlog_grant_head_wake(log, head, &free_bytes) || 330 free_bytes < *need_bytes) { 331 error = xlog_grant_head_wait(log, head, tic, 332 *need_bytes); 333 } 334 spin_unlock(&head->lock); 335 } else if (free_bytes < *need_bytes) { 336 spin_lock(&head->lock); 337 error = xlog_grant_head_wait(log, head, tic, *need_bytes); 338 spin_unlock(&head->lock); 339 } 340 341 return error; 342 } 343 344 static void 345 xlog_tic_reset_res(xlog_ticket_t *tic) 346 { 347 tic->t_res_num = 0; 348 tic->t_res_arr_sum = 0; 349 tic->t_res_num_ophdrs = 0; 350 } 351 352 static void 353 xlog_tic_add_region(xlog_ticket_t *tic, uint len, uint type) 354 { 355 if (tic->t_res_num == XLOG_TIC_LEN_MAX) { 356 /* add to overflow and start again */ 357 tic->t_res_o_flow += tic->t_res_arr_sum; 358 tic->t_res_num = 0; 359 tic->t_res_arr_sum = 0; 360 } 361 362 tic->t_res_arr[tic->t_res_num].r_len = len; 363 tic->t_res_arr[tic->t_res_num].r_type = type; 364 tic->t_res_arr_sum += len; 365 tic->t_res_num++; 366 } 367 368 /* 369 * Replenish the byte reservation required by moving the grant write head. 370 */ 371 int 372 xfs_log_regrant( 373 struct xfs_mount *mp, 374 struct xlog_ticket *tic) 375 { 376 struct xlog *log = mp->m_log; 377 int need_bytes; 378 int error = 0; 379 380 if (XLOG_FORCED_SHUTDOWN(log)) 381 return XFS_ERROR(EIO); 382 383 XFS_STATS_INC(xs_try_logspace); 384 385 /* 386 * This is a new transaction on the ticket, so we need to change the 387 * transaction ID so that the next transaction has a different TID in 388 * the log. Just add one to the existing tid so that we can see chains 389 * of rolling transactions in the log easily. 390 */ 391 tic->t_tid++; 392 393 xlog_grant_push_ail(log, tic->t_unit_res); 394 395 tic->t_curr_res = tic->t_unit_res; 396 xlog_tic_reset_res(tic); 397 398 if (tic->t_cnt > 0) 399 return 0; 400 401 trace_xfs_log_regrant(log, tic); 402 403 error = xlog_grant_head_check(log, &log->l_write_head, tic, 404 &need_bytes); 405 if (error) 406 goto out_error; 407 408 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); 409 trace_xfs_log_regrant_exit(log, tic); 410 xlog_verify_grant_tail(log); 411 return 0; 412 413 out_error: 414 /* 415 * If we are failing, make sure the ticket doesn't have any current 416 * reservations. We don't want to add this back when the ticket/ 417 * transaction gets cancelled. 418 */ 419 tic->t_curr_res = 0; 420 tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */ 421 return error; 422 } 423 424 /* 425 * Reserve log space and return a ticket corresponding the reservation. 426 * 427 * Each reservation is going to reserve extra space for a log record header. 428 * When writes happen to the on-disk log, we don't subtract the length of the 429 * log record header from any reservation. By wasting space in each 430 * reservation, we prevent over allocation problems. 431 */ 432 int 433 xfs_log_reserve( 434 struct xfs_mount *mp, 435 int unit_bytes, 436 int cnt, 437 struct xlog_ticket **ticp, 438 __uint8_t client, 439 bool permanent, 440 uint t_type) 441 { 442 struct xlog *log = mp->m_log; 443 struct xlog_ticket *tic; 444 int need_bytes; 445 int error = 0; 446 447 ASSERT(client == XFS_TRANSACTION || client == XFS_LOG); 448 449 if (XLOG_FORCED_SHUTDOWN(log)) 450 return XFS_ERROR(EIO); 451 452 XFS_STATS_INC(xs_try_logspace); 453 454 ASSERT(*ticp == NULL); 455 tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent, 456 KM_SLEEP | KM_MAYFAIL); 457 if (!tic) 458 return XFS_ERROR(ENOMEM); 459 460 tic->t_trans_type = t_type; 461 *ticp = tic; 462 463 xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt 464 : tic->t_unit_res); 465 466 trace_xfs_log_reserve(log, tic); 467 468 error = xlog_grant_head_check(log, &log->l_reserve_head, tic, 469 &need_bytes); 470 if (error) 471 goto out_error; 472 473 xlog_grant_add_space(log, &log->l_reserve_head.grant, need_bytes); 474 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); 475 trace_xfs_log_reserve_exit(log, tic); 476 xlog_verify_grant_tail(log); 477 return 0; 478 479 out_error: 480 /* 481 * If we are failing, make sure the ticket doesn't have any current 482 * reservations. We don't want to add this back when the ticket/ 483 * transaction gets cancelled. 484 */ 485 tic->t_curr_res = 0; 486 tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */ 487 return error; 488 } 489 490 491 /* 492 * NOTES: 493 * 494 * 1. currblock field gets updated at startup and after in-core logs 495 * marked as with WANT_SYNC. 496 */ 497 498 /* 499 * This routine is called when a user of a log manager ticket is done with 500 * the reservation. If the ticket was ever used, then a commit record for 501 * the associated transaction is written out as a log operation header with 502 * no data. The flag XLOG_TIC_INITED is set when the first write occurs with 503 * a given ticket. If the ticket was one with a permanent reservation, then 504 * a few operations are done differently. Permanent reservation tickets by 505 * default don't release the reservation. They just commit the current 506 * transaction with the belief that the reservation is still needed. A flag 507 * must be passed in before permanent reservations are actually released. 508 * When these type of tickets are not released, they need to be set into 509 * the inited state again. By doing this, a start record will be written 510 * out when the next write occurs. 511 */ 512 xfs_lsn_t 513 xfs_log_done( 514 struct xfs_mount *mp, 515 struct xlog_ticket *ticket, 516 struct xlog_in_core **iclog, 517 uint flags) 518 { 519 struct xlog *log = mp->m_log; 520 xfs_lsn_t lsn = 0; 521 522 if (XLOG_FORCED_SHUTDOWN(log) || 523 /* 524 * If nothing was ever written, don't write out commit record. 525 * If we get an error, just continue and give back the log ticket. 526 */ 527 (((ticket->t_flags & XLOG_TIC_INITED) == 0) && 528 (xlog_commit_record(log, ticket, iclog, &lsn)))) { 529 lsn = (xfs_lsn_t) -1; 530 if (ticket->t_flags & XLOG_TIC_PERM_RESERV) { 531 flags |= XFS_LOG_REL_PERM_RESERV; 532 } 533 } 534 535 536 if ((ticket->t_flags & XLOG_TIC_PERM_RESERV) == 0 || 537 (flags & XFS_LOG_REL_PERM_RESERV)) { 538 trace_xfs_log_done_nonperm(log, ticket); 539 540 /* 541 * Release ticket if not permanent reservation or a specific 542 * request has been made to release a permanent reservation. 543 */ 544 xlog_ungrant_log_space(log, ticket); 545 xfs_log_ticket_put(ticket); 546 } else { 547 trace_xfs_log_done_perm(log, ticket); 548 549 xlog_regrant_reserve_log_space(log, ticket); 550 /* If this ticket was a permanent reservation and we aren't 551 * trying to release it, reset the inited flags; so next time 552 * we write, a start record will be written out. 553 */ 554 ticket->t_flags |= XLOG_TIC_INITED; 555 } 556 557 return lsn; 558 } 559 560 /* 561 * Attaches a new iclog I/O completion callback routine during 562 * transaction commit. If the log is in error state, a non-zero 563 * return code is handed back and the caller is responsible for 564 * executing the callback at an appropriate time. 565 */ 566 int 567 xfs_log_notify( 568 struct xfs_mount *mp, 569 struct xlog_in_core *iclog, 570 xfs_log_callback_t *cb) 571 { 572 int abortflg; 573 574 spin_lock(&iclog->ic_callback_lock); 575 abortflg = (iclog->ic_state & XLOG_STATE_IOERROR); 576 if (!abortflg) { 577 ASSERT_ALWAYS((iclog->ic_state == XLOG_STATE_ACTIVE) || 578 (iclog->ic_state == XLOG_STATE_WANT_SYNC)); 579 cb->cb_next = NULL; 580 *(iclog->ic_callback_tail) = cb; 581 iclog->ic_callback_tail = &(cb->cb_next); 582 } 583 spin_unlock(&iclog->ic_callback_lock); 584 return abortflg; 585 } 586 587 int 588 xfs_log_release_iclog( 589 struct xfs_mount *mp, 590 struct xlog_in_core *iclog) 591 { 592 if (xlog_state_release_iclog(mp->m_log, iclog)) { 593 xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR); 594 return EIO; 595 } 596 597 return 0; 598 } 599 600 /* 601 * Mount a log filesystem 602 * 603 * mp - ubiquitous xfs mount point structure 604 * log_target - buftarg of on-disk log device 605 * blk_offset - Start block # where block size is 512 bytes (BBSIZE) 606 * num_bblocks - Number of BBSIZE blocks in on-disk log 607 * 608 * Return error or zero. 609 */ 610 int 611 xfs_log_mount( 612 xfs_mount_t *mp, 613 xfs_buftarg_t *log_target, 614 xfs_daddr_t blk_offset, 615 int num_bblks) 616 { 617 int error; 618 619 if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) 620 xfs_notice(mp, "Mounting Filesystem"); 621 else { 622 xfs_notice(mp, 623 "Mounting filesystem in no-recovery mode. Filesystem will be inconsistent."); 624 ASSERT(mp->m_flags & XFS_MOUNT_RDONLY); 625 } 626 627 mp->m_log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks); 628 if (IS_ERR(mp->m_log)) { 629 error = -PTR_ERR(mp->m_log); 630 goto out; 631 } 632 633 /* 634 * Initialize the AIL now we have a log. 635 */ 636 error = xfs_trans_ail_init(mp); 637 if (error) { 638 xfs_warn(mp, "AIL initialisation failed: error %d", error); 639 goto out_free_log; 640 } 641 mp->m_log->l_ailp = mp->m_ail; 642 643 /* 644 * skip log recovery on a norecovery mount. pretend it all 645 * just worked. 646 */ 647 if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) { 648 int readonly = (mp->m_flags & XFS_MOUNT_RDONLY); 649 650 if (readonly) 651 mp->m_flags &= ~XFS_MOUNT_RDONLY; 652 653 error = xlog_recover(mp->m_log); 654 655 if (readonly) 656 mp->m_flags |= XFS_MOUNT_RDONLY; 657 if (error) { 658 xfs_warn(mp, "log mount/recovery failed: error %d", 659 error); 660 goto out_destroy_ail; 661 } 662 } 663 664 /* Normal transactions can now occur */ 665 mp->m_log->l_flags &= ~XLOG_ACTIVE_RECOVERY; 666 667 /* 668 * Now the log has been fully initialised and we know were our 669 * space grant counters are, we can initialise the permanent ticket 670 * needed for delayed logging to work. 671 */ 672 xlog_cil_init_post_recovery(mp->m_log); 673 674 return 0; 675 676 out_destroy_ail: 677 xfs_trans_ail_destroy(mp); 678 out_free_log: 679 xlog_dealloc_log(mp->m_log); 680 out: 681 return error; 682 } 683 684 /* 685 * Finish the recovery of the file system. This is separate from the 686 * xfs_log_mount() call, because it depends on the code in xfs_mountfs() to read 687 * in the root and real-time bitmap inodes between calling xfs_log_mount() and 688 * here. 689 * 690 * If we finish recovery successfully, start the background log work. If we are 691 * not doing recovery, then we have a RO filesystem and we don't need to start 692 * it. 693 */ 694 int 695 xfs_log_mount_finish(xfs_mount_t *mp) 696 { 697 int error = 0; 698 699 if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) { 700 error = xlog_recover_finish(mp->m_log); 701 if (!error) 702 xfs_log_work_queue(mp); 703 } else { 704 ASSERT(mp->m_flags & XFS_MOUNT_RDONLY); 705 } 706 707 708 return error; 709 } 710 711 /* 712 * Final log writes as part of unmount. 713 * 714 * Mark the filesystem clean as unmount happens. Note that during relocation 715 * this routine needs to be executed as part of source-bag while the 716 * deallocation must not be done until source-end. 717 */ 718 719 /* 720 * Unmount record used to have a string "Unmount filesystem--" in the 721 * data section where the "Un" was really a magic number (XLOG_UNMOUNT_TYPE). 722 * We just write the magic number now since that particular field isn't 723 * currently architecture converted and "nUmount" is a bit foo. 724 * As far as I know, there weren't any dependencies on the old behaviour. 725 */ 726 727 int 728 xfs_log_unmount_write(xfs_mount_t *mp) 729 { 730 struct xlog *log = mp->m_log; 731 xlog_in_core_t *iclog; 732 #ifdef DEBUG 733 xlog_in_core_t *first_iclog; 734 #endif 735 xlog_ticket_t *tic = NULL; 736 xfs_lsn_t lsn; 737 int error; 738 739 /* 740 * Don't write out unmount record on read-only mounts. 741 * Or, if we are doing a forced umount (typically because of IO errors). 742 */ 743 if (mp->m_flags & XFS_MOUNT_RDONLY) 744 return 0; 745 746 error = _xfs_log_force(mp, XFS_LOG_SYNC, NULL); 747 ASSERT(error || !(XLOG_FORCED_SHUTDOWN(log))); 748 749 #ifdef DEBUG 750 first_iclog = iclog = log->l_iclog; 751 do { 752 if (!(iclog->ic_state & XLOG_STATE_IOERROR)) { 753 ASSERT(iclog->ic_state & XLOG_STATE_ACTIVE); 754 ASSERT(iclog->ic_offset == 0); 755 } 756 iclog = iclog->ic_next; 757 } while (iclog != first_iclog); 758 #endif 759 if (! (XLOG_FORCED_SHUTDOWN(log))) { 760 error = xfs_log_reserve(mp, 600, 1, &tic, 761 XFS_LOG, 0, XLOG_UNMOUNT_REC_TYPE); 762 if (!error) { 763 /* the data section must be 32 bit size aligned */ 764 struct { 765 __uint16_t magic; 766 __uint16_t pad1; 767 __uint32_t pad2; /* may as well make it 64 bits */ 768 } magic = { 769 .magic = XLOG_UNMOUNT_TYPE, 770 }; 771 struct xfs_log_iovec reg = { 772 .i_addr = &magic, 773 .i_len = sizeof(magic), 774 .i_type = XLOG_REG_TYPE_UNMOUNT, 775 }; 776 struct xfs_log_vec vec = { 777 .lv_niovecs = 1, 778 .lv_iovecp = ®, 779 }; 780 781 /* remove inited flag, and account for space used */ 782 tic->t_flags = 0; 783 tic->t_curr_res -= sizeof(magic); 784 error = xlog_write(log, &vec, tic, &lsn, 785 NULL, XLOG_UNMOUNT_TRANS); 786 /* 787 * At this point, we're umounting anyway, 788 * so there's no point in transitioning log state 789 * to IOERROR. Just continue... 790 */ 791 } 792 793 if (error) 794 xfs_alert(mp, "%s: unmount record failed", __func__); 795 796 797 spin_lock(&log->l_icloglock); 798 iclog = log->l_iclog; 799 atomic_inc(&iclog->ic_refcnt); 800 xlog_state_want_sync(log, iclog); 801 spin_unlock(&log->l_icloglock); 802 error = xlog_state_release_iclog(log, iclog); 803 804 spin_lock(&log->l_icloglock); 805 if (!(iclog->ic_state == XLOG_STATE_ACTIVE || 806 iclog->ic_state == XLOG_STATE_DIRTY)) { 807 if (!XLOG_FORCED_SHUTDOWN(log)) { 808 xlog_wait(&iclog->ic_force_wait, 809 &log->l_icloglock); 810 } else { 811 spin_unlock(&log->l_icloglock); 812 } 813 } else { 814 spin_unlock(&log->l_icloglock); 815 } 816 if (tic) { 817 trace_xfs_log_umount_write(log, tic); 818 xlog_ungrant_log_space(log, tic); 819 xfs_log_ticket_put(tic); 820 } 821 } else { 822 /* 823 * We're already in forced_shutdown mode, couldn't 824 * even attempt to write out the unmount transaction. 825 * 826 * Go through the motions of sync'ing and releasing 827 * the iclog, even though no I/O will actually happen, 828 * we need to wait for other log I/Os that may already 829 * be in progress. Do this as a separate section of 830 * code so we'll know if we ever get stuck here that 831 * we're in this odd situation of trying to unmount 832 * a file system that went into forced_shutdown as 833 * the result of an unmount.. 834 */ 835 spin_lock(&log->l_icloglock); 836 iclog = log->l_iclog; 837 atomic_inc(&iclog->ic_refcnt); 838 839 xlog_state_want_sync(log, iclog); 840 spin_unlock(&log->l_icloglock); 841 error = xlog_state_release_iclog(log, iclog); 842 843 spin_lock(&log->l_icloglock); 844 845 if ( ! ( iclog->ic_state == XLOG_STATE_ACTIVE 846 || iclog->ic_state == XLOG_STATE_DIRTY 847 || iclog->ic_state == XLOG_STATE_IOERROR) ) { 848 849 xlog_wait(&iclog->ic_force_wait, 850 &log->l_icloglock); 851 } else { 852 spin_unlock(&log->l_icloglock); 853 } 854 } 855 856 return error; 857 } /* xfs_log_unmount_write */ 858 859 /* 860 * Empty the log for unmount/freeze. 861 * 862 * To do this, we first need to shut down the background log work so it is not 863 * trying to cover the log as we clean up. We then need to unpin all objects in 864 * the log so we can then flush them out. Once they have completed their IO and 865 * run the callbacks removing themselves from the AIL, we can write the unmount 866 * record. 867 */ 868 void 869 xfs_log_quiesce( 870 struct xfs_mount *mp) 871 { 872 cancel_delayed_work_sync(&mp->m_log->l_work); 873 xfs_log_force(mp, XFS_LOG_SYNC); 874 875 /* 876 * The superblock buffer is uncached and while xfs_ail_push_all_sync() 877 * will push it, xfs_wait_buftarg() will not wait for it. Further, 878 * xfs_buf_iowait() cannot be used because it was pushed with the 879 * XBF_ASYNC flag set, so we need to use a lock/unlock pair to wait for 880 * the IO to complete. 881 */ 882 xfs_ail_push_all_sync(mp->m_ail); 883 xfs_wait_buftarg(mp->m_ddev_targp); 884 xfs_buf_lock(mp->m_sb_bp); 885 xfs_buf_unlock(mp->m_sb_bp); 886 887 xfs_log_unmount_write(mp); 888 } 889 890 /* 891 * Shut down and release the AIL and Log. 892 * 893 * During unmount, we need to ensure we flush all the dirty metadata objects 894 * from the AIL so that the log is empty before we write the unmount record to 895 * the log. Once this is done, we can tear down the AIL and the log. 896 */ 897 void 898 xfs_log_unmount( 899 struct xfs_mount *mp) 900 { 901 xfs_log_quiesce(mp); 902 903 xfs_trans_ail_destroy(mp); 904 xlog_dealloc_log(mp->m_log); 905 } 906 907 void 908 xfs_log_item_init( 909 struct xfs_mount *mp, 910 struct xfs_log_item *item, 911 int type, 912 const struct xfs_item_ops *ops) 913 { 914 item->li_mountp = mp; 915 item->li_ailp = mp->m_ail; 916 item->li_type = type; 917 item->li_ops = ops; 918 item->li_lv = NULL; 919 920 INIT_LIST_HEAD(&item->li_ail); 921 INIT_LIST_HEAD(&item->li_cil); 922 } 923 924 /* 925 * Wake up processes waiting for log space after we have moved the log tail. 926 */ 927 void 928 xfs_log_space_wake( 929 struct xfs_mount *mp) 930 { 931 struct xlog *log = mp->m_log; 932 int free_bytes; 933 934 if (XLOG_FORCED_SHUTDOWN(log)) 935 return; 936 937 if (!list_empty_careful(&log->l_write_head.waiters)) { 938 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); 939 940 spin_lock(&log->l_write_head.lock); 941 free_bytes = xlog_space_left(log, &log->l_write_head.grant); 942 xlog_grant_head_wake(log, &log->l_write_head, &free_bytes); 943 spin_unlock(&log->l_write_head.lock); 944 } 945 946 if (!list_empty_careful(&log->l_reserve_head.waiters)) { 947 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); 948 949 spin_lock(&log->l_reserve_head.lock); 950 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); 951 xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes); 952 spin_unlock(&log->l_reserve_head.lock); 953 } 954 } 955 956 /* 957 * Determine if we have a transaction that has gone to disk 958 * that needs to be covered. To begin the transition to the idle state 959 * firstly the log needs to be idle (no AIL and nothing in the iclogs). 960 * If we are then in a state where covering is needed, the caller is informed 961 * that dummy transactions are required to move the log into the idle state. 962 * 963 * Because this is called as part of the sync process, we should also indicate 964 * that dummy transactions should be issued in anything but the covered or 965 * idle states. This ensures that the log tail is accurately reflected in 966 * the log at the end of the sync, hence if a crash occurrs avoids replay 967 * of transactions where the metadata is already on disk. 968 */ 969 int 970 xfs_log_need_covered(xfs_mount_t *mp) 971 { 972 int needed = 0; 973 struct xlog *log = mp->m_log; 974 975 if (!xfs_fs_writable(mp)) 976 return 0; 977 978 spin_lock(&log->l_icloglock); 979 switch (log->l_covered_state) { 980 case XLOG_STATE_COVER_DONE: 981 case XLOG_STATE_COVER_DONE2: 982 case XLOG_STATE_COVER_IDLE: 983 break; 984 case XLOG_STATE_COVER_NEED: 985 case XLOG_STATE_COVER_NEED2: 986 if (!xfs_ail_min_lsn(log->l_ailp) && 987 xlog_iclogs_empty(log)) { 988 if (log->l_covered_state == XLOG_STATE_COVER_NEED) 989 log->l_covered_state = XLOG_STATE_COVER_DONE; 990 else 991 log->l_covered_state = XLOG_STATE_COVER_DONE2; 992 } 993 /* FALLTHRU */ 994 default: 995 needed = 1; 996 break; 997 } 998 spin_unlock(&log->l_icloglock); 999 return needed; 1000 } 1001 1002 /* 1003 * We may be holding the log iclog lock upon entering this routine. 1004 */ 1005 xfs_lsn_t 1006 xlog_assign_tail_lsn_locked( 1007 struct xfs_mount *mp) 1008 { 1009 struct xlog *log = mp->m_log; 1010 struct xfs_log_item *lip; 1011 xfs_lsn_t tail_lsn; 1012 1013 assert_spin_locked(&mp->m_ail->xa_lock); 1014 1015 /* 1016 * To make sure we always have a valid LSN for the log tail we keep 1017 * track of the last LSN which was committed in log->l_last_sync_lsn, 1018 * and use that when the AIL was empty. 1019 */ 1020 lip = xfs_ail_min(mp->m_ail); 1021 if (lip) 1022 tail_lsn = lip->li_lsn; 1023 else 1024 tail_lsn = atomic64_read(&log->l_last_sync_lsn); 1025 atomic64_set(&log->l_tail_lsn, tail_lsn); 1026 return tail_lsn; 1027 } 1028 1029 xfs_lsn_t 1030 xlog_assign_tail_lsn( 1031 struct xfs_mount *mp) 1032 { 1033 xfs_lsn_t tail_lsn; 1034 1035 spin_lock(&mp->m_ail->xa_lock); 1036 tail_lsn = xlog_assign_tail_lsn_locked(mp); 1037 spin_unlock(&mp->m_ail->xa_lock); 1038 1039 return tail_lsn; 1040 } 1041 1042 /* 1043 * Return the space in the log between the tail and the head. The head 1044 * is passed in the cycle/bytes formal parms. In the special case where 1045 * the reserve head has wrapped passed the tail, this calculation is no 1046 * longer valid. In this case, just return 0 which means there is no space 1047 * in the log. This works for all places where this function is called 1048 * with the reserve head. Of course, if the write head were to ever 1049 * wrap the tail, we should blow up. Rather than catch this case here, 1050 * we depend on other ASSERTions in other parts of the code. XXXmiken 1051 * 1052 * This code also handles the case where the reservation head is behind 1053 * the tail. The details of this case are described below, but the end 1054 * result is that we return the size of the log as the amount of space left. 1055 */ 1056 STATIC int 1057 xlog_space_left( 1058 struct xlog *log, 1059 atomic64_t *head) 1060 { 1061 int free_bytes; 1062 int tail_bytes; 1063 int tail_cycle; 1064 int head_cycle; 1065 int head_bytes; 1066 1067 xlog_crack_grant_head(head, &head_cycle, &head_bytes); 1068 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes); 1069 tail_bytes = BBTOB(tail_bytes); 1070 if (tail_cycle == head_cycle && head_bytes >= tail_bytes) 1071 free_bytes = log->l_logsize - (head_bytes - tail_bytes); 1072 else if (tail_cycle + 1 < head_cycle) 1073 return 0; 1074 else if (tail_cycle < head_cycle) { 1075 ASSERT(tail_cycle == (head_cycle - 1)); 1076 free_bytes = tail_bytes - head_bytes; 1077 } else { 1078 /* 1079 * The reservation head is behind the tail. 1080 * In this case we just want to return the size of the 1081 * log as the amount of space left. 1082 */ 1083 xfs_alert(log->l_mp, 1084 "xlog_space_left: head behind tail\n" 1085 " tail_cycle = %d, tail_bytes = %d\n" 1086 " GH cycle = %d, GH bytes = %d", 1087 tail_cycle, tail_bytes, head_cycle, head_bytes); 1088 ASSERT(0); 1089 free_bytes = log->l_logsize; 1090 } 1091 return free_bytes; 1092 } 1093 1094 1095 /* 1096 * Log function which is called when an io completes. 1097 * 1098 * The log manager needs its own routine, in order to control what 1099 * happens with the buffer after the write completes. 1100 */ 1101 void 1102 xlog_iodone(xfs_buf_t *bp) 1103 { 1104 struct xlog_in_core *iclog = bp->b_fspriv; 1105 struct xlog *l = iclog->ic_log; 1106 int aborted = 0; 1107 1108 /* 1109 * Race to shutdown the filesystem if we see an error. 1110 */ 1111 if (XFS_TEST_ERROR((xfs_buf_geterror(bp)), l->l_mp, 1112 XFS_ERRTAG_IODONE_IOERR, XFS_RANDOM_IODONE_IOERR)) { 1113 xfs_buf_ioerror_alert(bp, __func__); 1114 xfs_buf_stale(bp); 1115 xfs_force_shutdown(l->l_mp, SHUTDOWN_LOG_IO_ERROR); 1116 /* 1117 * This flag will be propagated to the trans-committed 1118 * callback routines to let them know that the log-commit 1119 * didn't succeed. 1120 */ 1121 aborted = XFS_LI_ABORTED; 1122 } else if (iclog->ic_state & XLOG_STATE_IOERROR) { 1123 aborted = XFS_LI_ABORTED; 1124 } 1125 1126 /* log I/O is always issued ASYNC */ 1127 ASSERT(XFS_BUF_ISASYNC(bp)); 1128 xlog_state_done_syncing(iclog, aborted); 1129 /* 1130 * do not reference the buffer (bp) here as we could race 1131 * with it being freed after writing the unmount record to the 1132 * log. 1133 */ 1134 } 1135 1136 /* 1137 * Return size of each in-core log record buffer. 1138 * 1139 * All machines get 8 x 32kB buffers by default, unless tuned otherwise. 1140 * 1141 * If the filesystem blocksize is too large, we may need to choose a 1142 * larger size since the directory code currently logs entire blocks. 1143 */ 1144 1145 STATIC void 1146 xlog_get_iclog_buffer_size( 1147 struct xfs_mount *mp, 1148 struct xlog *log) 1149 { 1150 int size; 1151 int xhdrs; 1152 1153 if (mp->m_logbufs <= 0) 1154 log->l_iclog_bufs = XLOG_MAX_ICLOGS; 1155 else 1156 log->l_iclog_bufs = mp->m_logbufs; 1157 1158 /* 1159 * Buffer size passed in from mount system call. 1160 */ 1161 if (mp->m_logbsize > 0) { 1162 size = log->l_iclog_size = mp->m_logbsize; 1163 log->l_iclog_size_log = 0; 1164 while (size != 1) { 1165 log->l_iclog_size_log++; 1166 size >>= 1; 1167 } 1168 1169 if (xfs_sb_version_haslogv2(&mp->m_sb)) { 1170 /* # headers = size / 32k 1171 * one header holds cycles from 32k of data 1172 */ 1173 1174 xhdrs = mp->m_logbsize / XLOG_HEADER_CYCLE_SIZE; 1175 if (mp->m_logbsize % XLOG_HEADER_CYCLE_SIZE) 1176 xhdrs++; 1177 log->l_iclog_hsize = xhdrs << BBSHIFT; 1178 log->l_iclog_heads = xhdrs; 1179 } else { 1180 ASSERT(mp->m_logbsize <= XLOG_BIG_RECORD_BSIZE); 1181 log->l_iclog_hsize = BBSIZE; 1182 log->l_iclog_heads = 1; 1183 } 1184 goto done; 1185 } 1186 1187 /* All machines use 32kB buffers by default. */ 1188 log->l_iclog_size = XLOG_BIG_RECORD_BSIZE; 1189 log->l_iclog_size_log = XLOG_BIG_RECORD_BSHIFT; 1190 1191 /* the default log size is 16k or 32k which is one header sector */ 1192 log->l_iclog_hsize = BBSIZE; 1193 log->l_iclog_heads = 1; 1194 1195 done: 1196 /* are we being asked to make the sizes selected above visible? */ 1197 if (mp->m_logbufs == 0) 1198 mp->m_logbufs = log->l_iclog_bufs; 1199 if (mp->m_logbsize == 0) 1200 mp->m_logbsize = log->l_iclog_size; 1201 } /* xlog_get_iclog_buffer_size */ 1202 1203 1204 void 1205 xfs_log_work_queue( 1206 struct xfs_mount *mp) 1207 { 1208 queue_delayed_work(mp->m_log_workqueue, &mp->m_log->l_work, 1209 msecs_to_jiffies(xfs_syncd_centisecs * 10)); 1210 } 1211 1212 /* 1213 * Every sync period we need to unpin all items in the AIL and push them to 1214 * disk. If there is nothing dirty, then we might need to cover the log to 1215 * indicate that the filesystem is idle. 1216 */ 1217 void 1218 xfs_log_worker( 1219 struct work_struct *work) 1220 { 1221 struct xlog *log = container_of(to_delayed_work(work), 1222 struct xlog, l_work); 1223 struct xfs_mount *mp = log->l_mp; 1224 1225 /* dgc: errors ignored - not fatal and nowhere to report them */ 1226 if (xfs_log_need_covered(mp)) 1227 xfs_fs_log_dummy(mp); 1228 else 1229 xfs_log_force(mp, 0); 1230 1231 /* start pushing all the metadata that is currently dirty */ 1232 xfs_ail_push_all(mp->m_ail); 1233 1234 /* queue us up again */ 1235 xfs_log_work_queue(mp); 1236 } 1237 1238 /* 1239 * This routine initializes some of the log structure for a given mount point. 1240 * Its primary purpose is to fill in enough, so recovery can occur. However, 1241 * some other stuff may be filled in too. 1242 */ 1243 STATIC struct xlog * 1244 xlog_alloc_log( 1245 struct xfs_mount *mp, 1246 struct xfs_buftarg *log_target, 1247 xfs_daddr_t blk_offset, 1248 int num_bblks) 1249 { 1250 struct xlog *log; 1251 xlog_rec_header_t *head; 1252 xlog_in_core_t **iclogp; 1253 xlog_in_core_t *iclog, *prev_iclog=NULL; 1254 xfs_buf_t *bp; 1255 int i; 1256 int error = ENOMEM; 1257 uint log2_size = 0; 1258 1259 log = kmem_zalloc(sizeof(struct xlog), KM_MAYFAIL); 1260 if (!log) { 1261 xfs_warn(mp, "Log allocation failed: No memory!"); 1262 goto out; 1263 } 1264 1265 log->l_mp = mp; 1266 log->l_targ = log_target; 1267 log->l_logsize = BBTOB(num_bblks); 1268 log->l_logBBstart = blk_offset; 1269 log->l_logBBsize = num_bblks; 1270 log->l_covered_state = XLOG_STATE_COVER_IDLE; 1271 log->l_flags |= XLOG_ACTIVE_RECOVERY; 1272 INIT_DELAYED_WORK(&log->l_work, xfs_log_worker); 1273 1274 log->l_prev_block = -1; 1275 /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */ 1276 xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0); 1277 xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0); 1278 log->l_curr_cycle = 1; /* 0 is bad since this is initial value */ 1279 1280 xlog_grant_head_init(&log->l_reserve_head); 1281 xlog_grant_head_init(&log->l_write_head); 1282 1283 error = EFSCORRUPTED; 1284 if (xfs_sb_version_hassector(&mp->m_sb)) { 1285 log2_size = mp->m_sb.sb_logsectlog; 1286 if (log2_size < BBSHIFT) { 1287 xfs_warn(mp, "Log sector size too small (0x%x < 0x%x)", 1288 log2_size, BBSHIFT); 1289 goto out_free_log; 1290 } 1291 1292 log2_size -= BBSHIFT; 1293 if (log2_size > mp->m_sectbb_log) { 1294 xfs_warn(mp, "Log sector size too large (0x%x > 0x%x)", 1295 log2_size, mp->m_sectbb_log); 1296 goto out_free_log; 1297 } 1298 1299 /* for larger sector sizes, must have v2 or external log */ 1300 if (log2_size && log->l_logBBstart > 0 && 1301 !xfs_sb_version_haslogv2(&mp->m_sb)) { 1302 xfs_warn(mp, 1303 "log sector size (0x%x) invalid for configuration.", 1304 log2_size); 1305 goto out_free_log; 1306 } 1307 } 1308 log->l_sectBBsize = 1 << log2_size; 1309 1310 xlog_get_iclog_buffer_size(mp, log); 1311 1312 error = ENOMEM; 1313 bp = xfs_buf_alloc(mp->m_logdev_targp, 0, BTOBB(log->l_iclog_size), 0); 1314 if (!bp) 1315 goto out_free_log; 1316 bp->b_iodone = xlog_iodone; 1317 ASSERT(xfs_buf_islocked(bp)); 1318 log->l_xbuf = bp; 1319 1320 spin_lock_init(&log->l_icloglock); 1321 init_waitqueue_head(&log->l_flush_wait); 1322 1323 iclogp = &log->l_iclog; 1324 /* 1325 * The amount of memory to allocate for the iclog structure is 1326 * rather funky due to the way the structure is defined. It is 1327 * done this way so that we can use different sizes for machines 1328 * with different amounts of memory. See the definition of 1329 * xlog_in_core_t in xfs_log_priv.h for details. 1330 */ 1331 ASSERT(log->l_iclog_size >= 4096); 1332 for (i=0; i < log->l_iclog_bufs; i++) { 1333 *iclogp = kmem_zalloc(sizeof(xlog_in_core_t), KM_MAYFAIL); 1334 if (!*iclogp) 1335 goto out_free_iclog; 1336 1337 iclog = *iclogp; 1338 iclog->ic_prev = prev_iclog; 1339 prev_iclog = iclog; 1340 1341 bp = xfs_buf_get_uncached(mp->m_logdev_targp, 1342 BTOBB(log->l_iclog_size), 0); 1343 if (!bp) 1344 goto out_free_iclog; 1345 1346 bp->b_iodone = xlog_iodone; 1347 iclog->ic_bp = bp; 1348 iclog->ic_data = bp->b_addr; 1349 #ifdef DEBUG 1350 log->l_iclog_bak[i] = (xfs_caddr_t)&(iclog->ic_header); 1351 #endif 1352 head = &iclog->ic_header; 1353 memset(head, 0, sizeof(xlog_rec_header_t)); 1354 head->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM); 1355 head->h_version = cpu_to_be32( 1356 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1); 1357 head->h_size = cpu_to_be32(log->l_iclog_size); 1358 /* new fields */ 1359 head->h_fmt = cpu_to_be32(XLOG_FMT); 1360 memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t)); 1361 1362 iclog->ic_size = BBTOB(bp->b_length) - log->l_iclog_hsize; 1363 iclog->ic_state = XLOG_STATE_ACTIVE; 1364 iclog->ic_log = log; 1365 atomic_set(&iclog->ic_refcnt, 0); 1366 spin_lock_init(&iclog->ic_callback_lock); 1367 iclog->ic_callback_tail = &(iclog->ic_callback); 1368 iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize; 1369 1370 ASSERT(xfs_buf_islocked(iclog->ic_bp)); 1371 init_waitqueue_head(&iclog->ic_force_wait); 1372 init_waitqueue_head(&iclog->ic_write_wait); 1373 1374 iclogp = &iclog->ic_next; 1375 } 1376 *iclogp = log->l_iclog; /* complete ring */ 1377 log->l_iclog->ic_prev = prev_iclog; /* re-write 1st prev ptr */ 1378 1379 error = xlog_cil_init(log); 1380 if (error) 1381 goto out_free_iclog; 1382 return log; 1383 1384 out_free_iclog: 1385 for (iclog = log->l_iclog; iclog; iclog = prev_iclog) { 1386 prev_iclog = iclog->ic_next; 1387 if (iclog->ic_bp) 1388 xfs_buf_free(iclog->ic_bp); 1389 kmem_free(iclog); 1390 } 1391 spinlock_destroy(&log->l_icloglock); 1392 xfs_buf_free(log->l_xbuf); 1393 out_free_log: 1394 kmem_free(log); 1395 out: 1396 return ERR_PTR(-error); 1397 } /* xlog_alloc_log */ 1398 1399 1400 /* 1401 * Write out the commit record of a transaction associated with the given 1402 * ticket. Return the lsn of the commit record. 1403 */ 1404 STATIC int 1405 xlog_commit_record( 1406 struct xlog *log, 1407 struct xlog_ticket *ticket, 1408 struct xlog_in_core **iclog, 1409 xfs_lsn_t *commitlsnp) 1410 { 1411 struct xfs_mount *mp = log->l_mp; 1412 int error; 1413 struct xfs_log_iovec reg = { 1414 .i_addr = NULL, 1415 .i_len = 0, 1416 .i_type = XLOG_REG_TYPE_COMMIT, 1417 }; 1418 struct xfs_log_vec vec = { 1419 .lv_niovecs = 1, 1420 .lv_iovecp = ®, 1421 }; 1422 1423 ASSERT_ALWAYS(iclog); 1424 error = xlog_write(log, &vec, ticket, commitlsnp, iclog, 1425 XLOG_COMMIT_TRANS); 1426 if (error) 1427 xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR); 1428 return error; 1429 } 1430 1431 /* 1432 * Push on the buffer cache code if we ever use more than 75% of the on-disk 1433 * log space. This code pushes on the lsn which would supposedly free up 1434 * the 25% which we want to leave free. We may need to adopt a policy which 1435 * pushes on an lsn which is further along in the log once we reach the high 1436 * water mark. In this manner, we would be creating a low water mark. 1437 */ 1438 STATIC void 1439 xlog_grant_push_ail( 1440 struct xlog *log, 1441 int need_bytes) 1442 { 1443 xfs_lsn_t threshold_lsn = 0; 1444 xfs_lsn_t last_sync_lsn; 1445 int free_blocks; 1446 int free_bytes; 1447 int threshold_block; 1448 int threshold_cycle; 1449 int free_threshold; 1450 1451 ASSERT(BTOBB(need_bytes) < log->l_logBBsize); 1452 1453 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); 1454 free_blocks = BTOBBT(free_bytes); 1455 1456 /* 1457 * Set the threshold for the minimum number of free blocks in the 1458 * log to the maximum of what the caller needs, one quarter of the 1459 * log, and 256 blocks. 1460 */ 1461 free_threshold = BTOBB(need_bytes); 1462 free_threshold = MAX(free_threshold, (log->l_logBBsize >> 2)); 1463 free_threshold = MAX(free_threshold, 256); 1464 if (free_blocks >= free_threshold) 1465 return; 1466 1467 xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle, 1468 &threshold_block); 1469 threshold_block += free_threshold; 1470 if (threshold_block >= log->l_logBBsize) { 1471 threshold_block -= log->l_logBBsize; 1472 threshold_cycle += 1; 1473 } 1474 threshold_lsn = xlog_assign_lsn(threshold_cycle, 1475 threshold_block); 1476 /* 1477 * Don't pass in an lsn greater than the lsn of the last 1478 * log record known to be on disk. Use a snapshot of the last sync lsn 1479 * so that it doesn't change between the compare and the set. 1480 */ 1481 last_sync_lsn = atomic64_read(&log->l_last_sync_lsn); 1482 if (XFS_LSN_CMP(threshold_lsn, last_sync_lsn) > 0) 1483 threshold_lsn = last_sync_lsn; 1484 1485 /* 1486 * Get the transaction layer to kick the dirty buffers out to 1487 * disk asynchronously. No point in trying to do this if 1488 * the filesystem is shutting down. 1489 */ 1490 if (!XLOG_FORCED_SHUTDOWN(log)) 1491 xfs_ail_push(log->l_ailp, threshold_lsn); 1492 } 1493 1494 /* 1495 * Stamp cycle number in every block 1496 */ 1497 STATIC void 1498 xlog_pack_data( 1499 struct xlog *log, 1500 struct xlog_in_core *iclog, 1501 int roundoff) 1502 { 1503 int i, j, k; 1504 int size = iclog->ic_offset + roundoff; 1505 __be32 cycle_lsn; 1506 xfs_caddr_t dp; 1507 1508 cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn); 1509 1510 dp = iclog->ic_datap; 1511 for (i = 0; i < BTOBB(size); i++) { 1512 if (i >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) 1513 break; 1514 iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp; 1515 *(__be32 *)dp = cycle_lsn; 1516 dp += BBSIZE; 1517 } 1518 1519 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { 1520 xlog_in_core_2_t *xhdr = iclog->ic_data; 1521 1522 for ( ; i < BTOBB(size); i++) { 1523 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 1524 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 1525 xhdr[j].hic_xheader.xh_cycle_data[k] = *(__be32 *)dp; 1526 *(__be32 *)dp = cycle_lsn; 1527 dp += BBSIZE; 1528 } 1529 1530 for (i = 1; i < log->l_iclog_heads; i++) 1531 xhdr[i].hic_xheader.xh_cycle = cycle_lsn; 1532 } 1533 } 1534 1535 /* 1536 * Calculate the checksum for a log buffer. 1537 * 1538 * This is a little more complicated than it should be because the various 1539 * headers and the actual data are non-contiguous. 1540 */ 1541 __le32 1542 xlog_cksum( 1543 struct xlog *log, 1544 struct xlog_rec_header *rhead, 1545 char *dp, 1546 int size) 1547 { 1548 __uint32_t crc; 1549 1550 /* first generate the crc for the record header ... */ 1551 crc = xfs_start_cksum((char *)rhead, 1552 sizeof(struct xlog_rec_header), 1553 offsetof(struct xlog_rec_header, h_crc)); 1554 1555 /* ... then for additional cycle data for v2 logs ... */ 1556 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { 1557 union xlog_in_core2 *xhdr = (union xlog_in_core2 *)rhead; 1558 int i; 1559 1560 for (i = 1; i < log->l_iclog_heads; i++) { 1561 crc = crc32c(crc, &xhdr[i].hic_xheader, 1562 sizeof(struct xlog_rec_ext_header)); 1563 } 1564 } 1565 1566 /* ... and finally for the payload */ 1567 crc = crc32c(crc, dp, size); 1568 1569 return xfs_end_cksum(crc); 1570 } 1571 1572 /* 1573 * The bdstrat callback function for log bufs. This gives us a central 1574 * place to trap bufs in case we get hit by a log I/O error and need to 1575 * shutdown. Actually, in practice, even when we didn't get a log error, 1576 * we transition the iclogs to IOERROR state *after* flushing all existing 1577 * iclogs to disk. This is because we don't want anymore new transactions to be 1578 * started or completed afterwards. 1579 */ 1580 STATIC int 1581 xlog_bdstrat( 1582 struct xfs_buf *bp) 1583 { 1584 struct xlog_in_core *iclog = bp->b_fspriv; 1585 1586 if (iclog->ic_state & XLOG_STATE_IOERROR) { 1587 xfs_buf_ioerror(bp, EIO); 1588 xfs_buf_stale(bp); 1589 xfs_buf_ioend(bp, 0); 1590 /* 1591 * It would seem logical to return EIO here, but we rely on 1592 * the log state machine to propagate I/O errors instead of 1593 * doing it here. 1594 */ 1595 return 0; 1596 } 1597 1598 xfs_buf_iorequest(bp); 1599 return 0; 1600 } 1601 1602 /* 1603 * Flush out the in-core log (iclog) to the on-disk log in an asynchronous 1604 * fashion. Previously, we should have moved the current iclog 1605 * ptr in the log to point to the next available iclog. This allows further 1606 * write to continue while this code syncs out an iclog ready to go. 1607 * Before an in-core log can be written out, the data section must be scanned 1608 * to save away the 1st word of each BBSIZE block into the header. We replace 1609 * it with the current cycle count. Each BBSIZE block is tagged with the 1610 * cycle count because there in an implicit assumption that drives will 1611 * guarantee that entire 512 byte blocks get written at once. In other words, 1612 * we can't have part of a 512 byte block written and part not written. By 1613 * tagging each block, we will know which blocks are valid when recovering 1614 * after an unclean shutdown. 1615 * 1616 * This routine is single threaded on the iclog. No other thread can be in 1617 * this routine with the same iclog. Changing contents of iclog can there- 1618 * fore be done without grabbing the state machine lock. Updating the global 1619 * log will require grabbing the lock though. 1620 * 1621 * The entire log manager uses a logical block numbering scheme. Only 1622 * log_sync (and then only bwrite()) know about the fact that the log may 1623 * not start with block zero on a given device. The log block start offset 1624 * is added immediately before calling bwrite(). 1625 */ 1626 1627 STATIC int 1628 xlog_sync( 1629 struct xlog *log, 1630 struct xlog_in_core *iclog) 1631 { 1632 xfs_buf_t *bp; 1633 int i; 1634 uint count; /* byte count of bwrite */ 1635 uint count_init; /* initial count before roundup */ 1636 int roundoff; /* roundoff to BB or stripe */ 1637 int split = 0; /* split write into two regions */ 1638 int error; 1639 int v2 = xfs_sb_version_haslogv2(&log->l_mp->m_sb); 1640 int size; 1641 1642 XFS_STATS_INC(xs_log_writes); 1643 ASSERT(atomic_read(&iclog->ic_refcnt) == 0); 1644 1645 /* Add for LR header */ 1646 count_init = log->l_iclog_hsize + iclog->ic_offset; 1647 1648 /* Round out the log write size */ 1649 if (v2 && log->l_mp->m_sb.sb_logsunit > 1) { 1650 /* we have a v2 stripe unit to use */ 1651 count = XLOG_LSUNITTOB(log, XLOG_BTOLSUNIT(log, count_init)); 1652 } else { 1653 count = BBTOB(BTOBB(count_init)); 1654 } 1655 roundoff = count - count_init; 1656 ASSERT(roundoff >= 0); 1657 ASSERT((v2 && log->l_mp->m_sb.sb_logsunit > 1 && 1658 roundoff < log->l_mp->m_sb.sb_logsunit) 1659 || 1660 (log->l_mp->m_sb.sb_logsunit <= 1 && 1661 roundoff < BBTOB(1))); 1662 1663 /* move grant heads by roundoff in sync */ 1664 xlog_grant_add_space(log, &log->l_reserve_head.grant, roundoff); 1665 xlog_grant_add_space(log, &log->l_write_head.grant, roundoff); 1666 1667 /* put cycle number in every block */ 1668 xlog_pack_data(log, iclog, roundoff); 1669 1670 /* real byte length */ 1671 size = iclog->ic_offset; 1672 if (v2) 1673 size += roundoff; 1674 iclog->ic_header.h_len = cpu_to_be32(size); 1675 1676 bp = iclog->ic_bp; 1677 XFS_BUF_SET_ADDR(bp, BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn))); 1678 1679 XFS_STATS_ADD(xs_log_blocks, BTOBB(count)); 1680 1681 /* Do we need to split this write into 2 parts? */ 1682 if (XFS_BUF_ADDR(bp) + BTOBB(count) > log->l_logBBsize) { 1683 char *dptr; 1684 1685 split = count - (BBTOB(log->l_logBBsize - XFS_BUF_ADDR(bp))); 1686 count = BBTOB(log->l_logBBsize - XFS_BUF_ADDR(bp)); 1687 iclog->ic_bwritecnt = 2; 1688 1689 /* 1690 * Bump the cycle numbers at the start of each block in the 1691 * part of the iclog that ends up in the buffer that gets 1692 * written to the start of the log. 1693 * 1694 * Watch out for the header magic number case, though. 1695 */ 1696 dptr = (char *)&iclog->ic_header + count; 1697 for (i = 0; i < split; i += BBSIZE) { 1698 __uint32_t cycle = be32_to_cpu(*(__be32 *)dptr); 1699 if (++cycle == XLOG_HEADER_MAGIC_NUM) 1700 cycle++; 1701 *(__be32 *)dptr = cpu_to_be32(cycle); 1702 1703 dptr += BBSIZE; 1704 } 1705 } else { 1706 iclog->ic_bwritecnt = 1; 1707 } 1708 1709 /* calculcate the checksum */ 1710 iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header, 1711 iclog->ic_datap, size); 1712 1713 bp->b_io_length = BTOBB(count); 1714 bp->b_fspriv = iclog; 1715 XFS_BUF_ZEROFLAGS(bp); 1716 XFS_BUF_ASYNC(bp); 1717 bp->b_flags |= XBF_SYNCIO; 1718 1719 if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) { 1720 bp->b_flags |= XBF_FUA; 1721 1722 /* 1723 * Flush the data device before flushing the log to make 1724 * sure all meta data written back from the AIL actually made 1725 * it to disk before stamping the new log tail LSN into the 1726 * log buffer. For an external log we need to issue the 1727 * flush explicitly, and unfortunately synchronously here; 1728 * for an internal log we can simply use the block layer 1729 * state machine for preflushes. 1730 */ 1731 if (log->l_mp->m_logdev_targp != log->l_mp->m_ddev_targp) 1732 xfs_blkdev_issue_flush(log->l_mp->m_ddev_targp); 1733 else 1734 bp->b_flags |= XBF_FLUSH; 1735 } 1736 1737 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); 1738 ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize); 1739 1740 xlog_verify_iclog(log, iclog, count, true); 1741 1742 /* account for log which doesn't start at block #0 */ 1743 XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart); 1744 /* 1745 * Don't call xfs_bwrite here. We do log-syncs even when the filesystem 1746 * is shutting down. 1747 */ 1748 XFS_BUF_WRITE(bp); 1749 1750 error = xlog_bdstrat(bp); 1751 if (error) { 1752 xfs_buf_ioerror_alert(bp, "xlog_sync"); 1753 return error; 1754 } 1755 if (split) { 1756 bp = iclog->ic_log->l_xbuf; 1757 XFS_BUF_SET_ADDR(bp, 0); /* logical 0 */ 1758 xfs_buf_associate_memory(bp, 1759 (char *)&iclog->ic_header + count, split); 1760 bp->b_fspriv = iclog; 1761 XFS_BUF_ZEROFLAGS(bp); 1762 XFS_BUF_ASYNC(bp); 1763 bp->b_flags |= XBF_SYNCIO; 1764 if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) 1765 bp->b_flags |= XBF_FUA; 1766 1767 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); 1768 ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize); 1769 1770 /* account for internal log which doesn't start at block #0 */ 1771 XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart); 1772 XFS_BUF_WRITE(bp); 1773 error = xlog_bdstrat(bp); 1774 if (error) { 1775 xfs_buf_ioerror_alert(bp, "xlog_sync (split)"); 1776 return error; 1777 } 1778 } 1779 return 0; 1780 } /* xlog_sync */ 1781 1782 /* 1783 * Deallocate a log structure 1784 */ 1785 STATIC void 1786 xlog_dealloc_log( 1787 struct xlog *log) 1788 { 1789 xlog_in_core_t *iclog, *next_iclog; 1790 int i; 1791 1792 xlog_cil_destroy(log); 1793 1794 /* 1795 * always need to ensure that the extra buffer does not point to memory 1796 * owned by another log buffer before we free it. 1797 */ 1798 xfs_buf_set_empty(log->l_xbuf, BTOBB(log->l_iclog_size)); 1799 xfs_buf_free(log->l_xbuf); 1800 1801 iclog = log->l_iclog; 1802 for (i=0; i<log->l_iclog_bufs; i++) { 1803 xfs_buf_free(iclog->ic_bp); 1804 next_iclog = iclog->ic_next; 1805 kmem_free(iclog); 1806 iclog = next_iclog; 1807 } 1808 spinlock_destroy(&log->l_icloglock); 1809 1810 log->l_mp->m_log = NULL; 1811 kmem_free(log); 1812 } /* xlog_dealloc_log */ 1813 1814 /* 1815 * Update counters atomically now that memcpy is done. 1816 */ 1817 /* ARGSUSED */ 1818 static inline void 1819 xlog_state_finish_copy( 1820 struct xlog *log, 1821 struct xlog_in_core *iclog, 1822 int record_cnt, 1823 int copy_bytes) 1824 { 1825 spin_lock(&log->l_icloglock); 1826 1827 be32_add_cpu(&iclog->ic_header.h_num_logops, record_cnt); 1828 iclog->ic_offset += copy_bytes; 1829 1830 spin_unlock(&log->l_icloglock); 1831 } /* xlog_state_finish_copy */ 1832 1833 1834 1835 1836 /* 1837 * print out info relating to regions written which consume 1838 * the reservation 1839 */ 1840 void 1841 xlog_print_tic_res( 1842 struct xfs_mount *mp, 1843 struct xlog_ticket *ticket) 1844 { 1845 uint i; 1846 uint ophdr_spc = ticket->t_res_num_ophdrs * (uint)sizeof(xlog_op_header_t); 1847 1848 /* match with XLOG_REG_TYPE_* in xfs_log.h */ 1849 static char *res_type_str[XLOG_REG_TYPE_MAX] = { 1850 "bformat", 1851 "bchunk", 1852 "efi_format", 1853 "efd_format", 1854 "iformat", 1855 "icore", 1856 "iext", 1857 "ibroot", 1858 "ilocal", 1859 "iattr_ext", 1860 "iattr_broot", 1861 "iattr_local", 1862 "qformat", 1863 "dquot", 1864 "quotaoff", 1865 "LR header", 1866 "unmount", 1867 "commit", 1868 "trans header" 1869 }; 1870 static char *trans_type_str[XFS_TRANS_TYPE_MAX] = { 1871 "SETATTR_NOT_SIZE", 1872 "SETATTR_SIZE", 1873 "INACTIVE", 1874 "CREATE", 1875 "CREATE_TRUNC", 1876 "TRUNCATE_FILE", 1877 "REMOVE", 1878 "LINK", 1879 "RENAME", 1880 "MKDIR", 1881 "RMDIR", 1882 "SYMLINK", 1883 "SET_DMATTRS", 1884 "GROWFS", 1885 "STRAT_WRITE", 1886 "DIOSTRAT", 1887 "WRITE_SYNC", 1888 "WRITEID", 1889 "ADDAFORK", 1890 "ATTRINVAL", 1891 "ATRUNCATE", 1892 "ATTR_SET", 1893 "ATTR_RM", 1894 "ATTR_FLAG", 1895 "CLEAR_AGI_BUCKET", 1896 "QM_SBCHANGE", 1897 "DUMMY1", 1898 "DUMMY2", 1899 "QM_QUOTAOFF", 1900 "QM_DQALLOC", 1901 "QM_SETQLIM", 1902 "QM_DQCLUSTER", 1903 "QM_QINOCREATE", 1904 "QM_QUOTAOFF_END", 1905 "SB_UNIT", 1906 "FSYNC_TS", 1907 "GROWFSRT_ALLOC", 1908 "GROWFSRT_ZERO", 1909 "GROWFSRT_FREE", 1910 "SWAPEXT" 1911 }; 1912 1913 xfs_warn(mp, 1914 "xlog_write: reservation summary:\n" 1915 " trans type = %s (%u)\n" 1916 " unit res = %d bytes\n" 1917 " current res = %d bytes\n" 1918 " total reg = %u bytes (o/flow = %u bytes)\n" 1919 " ophdrs = %u (ophdr space = %u bytes)\n" 1920 " ophdr + reg = %u bytes\n" 1921 " num regions = %u\n", 1922 ((ticket->t_trans_type <= 0 || 1923 ticket->t_trans_type > XFS_TRANS_TYPE_MAX) ? 1924 "bad-trans-type" : trans_type_str[ticket->t_trans_type-1]), 1925 ticket->t_trans_type, 1926 ticket->t_unit_res, 1927 ticket->t_curr_res, 1928 ticket->t_res_arr_sum, ticket->t_res_o_flow, 1929 ticket->t_res_num_ophdrs, ophdr_spc, 1930 ticket->t_res_arr_sum + 1931 ticket->t_res_o_flow + ophdr_spc, 1932 ticket->t_res_num); 1933 1934 for (i = 0; i < ticket->t_res_num; i++) { 1935 uint r_type = ticket->t_res_arr[i].r_type; 1936 xfs_warn(mp, "region[%u]: %s - %u bytes\n", i, 1937 ((r_type <= 0 || r_type > XLOG_REG_TYPE_MAX) ? 1938 "bad-rtype" : res_type_str[r_type-1]), 1939 ticket->t_res_arr[i].r_len); 1940 } 1941 1942 xfs_alert_tag(mp, XFS_PTAG_LOGRES, 1943 "xlog_write: reservation ran out. Need to up reservation"); 1944 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 1945 } 1946 1947 /* 1948 * Calculate the potential space needed by the log vector. Each region gets 1949 * its own xlog_op_header_t and may need to be double word aligned. 1950 */ 1951 static int 1952 xlog_write_calc_vec_length( 1953 struct xlog_ticket *ticket, 1954 struct xfs_log_vec *log_vector) 1955 { 1956 struct xfs_log_vec *lv; 1957 int headers = 0; 1958 int len = 0; 1959 int i; 1960 1961 /* acct for start rec of xact */ 1962 if (ticket->t_flags & XLOG_TIC_INITED) 1963 headers++; 1964 1965 for (lv = log_vector; lv; lv = lv->lv_next) { 1966 /* we don't write ordered log vectors */ 1967 if (lv->lv_buf_len == XFS_LOG_VEC_ORDERED) 1968 continue; 1969 1970 headers += lv->lv_niovecs; 1971 1972 for (i = 0; i < lv->lv_niovecs; i++) { 1973 struct xfs_log_iovec *vecp = &lv->lv_iovecp[i]; 1974 1975 len += vecp->i_len; 1976 xlog_tic_add_region(ticket, vecp->i_len, vecp->i_type); 1977 } 1978 } 1979 1980 ticket->t_res_num_ophdrs += headers; 1981 len += headers * sizeof(struct xlog_op_header); 1982 1983 return len; 1984 } 1985 1986 /* 1987 * If first write for transaction, insert start record We can't be trying to 1988 * commit if we are inited. We can't have any "partial_copy" if we are inited. 1989 */ 1990 static int 1991 xlog_write_start_rec( 1992 struct xlog_op_header *ophdr, 1993 struct xlog_ticket *ticket) 1994 { 1995 if (!(ticket->t_flags & XLOG_TIC_INITED)) 1996 return 0; 1997 1998 ophdr->oh_tid = cpu_to_be32(ticket->t_tid); 1999 ophdr->oh_clientid = ticket->t_clientid; 2000 ophdr->oh_len = 0; 2001 ophdr->oh_flags = XLOG_START_TRANS; 2002 ophdr->oh_res2 = 0; 2003 2004 ticket->t_flags &= ~XLOG_TIC_INITED; 2005 2006 return sizeof(struct xlog_op_header); 2007 } 2008 2009 static xlog_op_header_t * 2010 xlog_write_setup_ophdr( 2011 struct xlog *log, 2012 struct xlog_op_header *ophdr, 2013 struct xlog_ticket *ticket, 2014 uint flags) 2015 { 2016 ophdr->oh_tid = cpu_to_be32(ticket->t_tid); 2017 ophdr->oh_clientid = ticket->t_clientid; 2018 ophdr->oh_res2 = 0; 2019 2020 /* are we copying a commit or unmount record? */ 2021 ophdr->oh_flags = flags; 2022 2023 /* 2024 * We've seen logs corrupted with bad transaction client ids. This 2025 * makes sure that XFS doesn't generate them on. Turn this into an EIO 2026 * and shut down the filesystem. 2027 */ 2028 switch (ophdr->oh_clientid) { 2029 case XFS_TRANSACTION: 2030 case XFS_VOLUME: 2031 case XFS_LOG: 2032 break; 2033 default: 2034 xfs_warn(log->l_mp, 2035 "Bad XFS transaction clientid 0x%x in ticket 0x%p", 2036 ophdr->oh_clientid, ticket); 2037 return NULL; 2038 } 2039 2040 return ophdr; 2041 } 2042 2043 /* 2044 * Set up the parameters of the region copy into the log. This has 2045 * to handle region write split across multiple log buffers - this 2046 * state is kept external to this function so that this code can 2047 * can be written in an obvious, self documenting manner. 2048 */ 2049 static int 2050 xlog_write_setup_copy( 2051 struct xlog_ticket *ticket, 2052 struct xlog_op_header *ophdr, 2053 int space_available, 2054 int space_required, 2055 int *copy_off, 2056 int *copy_len, 2057 int *last_was_partial_copy, 2058 int *bytes_consumed) 2059 { 2060 int still_to_copy; 2061 2062 still_to_copy = space_required - *bytes_consumed; 2063 *copy_off = *bytes_consumed; 2064 2065 if (still_to_copy <= space_available) { 2066 /* write of region completes here */ 2067 *copy_len = still_to_copy; 2068 ophdr->oh_len = cpu_to_be32(*copy_len); 2069 if (*last_was_partial_copy) 2070 ophdr->oh_flags |= (XLOG_END_TRANS|XLOG_WAS_CONT_TRANS); 2071 *last_was_partial_copy = 0; 2072 *bytes_consumed = 0; 2073 return 0; 2074 } 2075 2076 /* partial write of region, needs extra log op header reservation */ 2077 *copy_len = space_available; 2078 ophdr->oh_len = cpu_to_be32(*copy_len); 2079 ophdr->oh_flags |= XLOG_CONTINUE_TRANS; 2080 if (*last_was_partial_copy) 2081 ophdr->oh_flags |= XLOG_WAS_CONT_TRANS; 2082 *bytes_consumed += *copy_len; 2083 (*last_was_partial_copy)++; 2084 2085 /* account for new log op header */ 2086 ticket->t_curr_res -= sizeof(struct xlog_op_header); 2087 ticket->t_res_num_ophdrs++; 2088 2089 return sizeof(struct xlog_op_header); 2090 } 2091 2092 static int 2093 xlog_write_copy_finish( 2094 struct xlog *log, 2095 struct xlog_in_core *iclog, 2096 uint flags, 2097 int *record_cnt, 2098 int *data_cnt, 2099 int *partial_copy, 2100 int *partial_copy_len, 2101 int log_offset, 2102 struct xlog_in_core **commit_iclog) 2103 { 2104 if (*partial_copy) { 2105 /* 2106 * This iclog has already been marked WANT_SYNC by 2107 * xlog_state_get_iclog_space. 2108 */ 2109 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); 2110 *record_cnt = 0; 2111 *data_cnt = 0; 2112 return xlog_state_release_iclog(log, iclog); 2113 } 2114 2115 *partial_copy = 0; 2116 *partial_copy_len = 0; 2117 2118 if (iclog->ic_size - log_offset <= sizeof(xlog_op_header_t)) { 2119 /* no more space in this iclog - push it. */ 2120 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); 2121 *record_cnt = 0; 2122 *data_cnt = 0; 2123 2124 spin_lock(&log->l_icloglock); 2125 xlog_state_want_sync(log, iclog); 2126 spin_unlock(&log->l_icloglock); 2127 2128 if (!commit_iclog) 2129 return xlog_state_release_iclog(log, iclog); 2130 ASSERT(flags & XLOG_COMMIT_TRANS); 2131 *commit_iclog = iclog; 2132 } 2133 2134 return 0; 2135 } 2136 2137 /* 2138 * Write some region out to in-core log 2139 * 2140 * This will be called when writing externally provided regions or when 2141 * writing out a commit record for a given transaction. 2142 * 2143 * General algorithm: 2144 * 1. Find total length of this write. This may include adding to the 2145 * lengths passed in. 2146 * 2. Check whether we violate the tickets reservation. 2147 * 3. While writing to this iclog 2148 * A. Reserve as much space in this iclog as can get 2149 * B. If this is first write, save away start lsn 2150 * C. While writing this region: 2151 * 1. If first write of transaction, write start record 2152 * 2. Write log operation header (header per region) 2153 * 3. Find out if we can fit entire region into this iclog 2154 * 4. Potentially, verify destination memcpy ptr 2155 * 5. Memcpy (partial) region 2156 * 6. If partial copy, release iclog; otherwise, continue 2157 * copying more regions into current iclog 2158 * 4. Mark want sync bit (in simulation mode) 2159 * 5. Release iclog for potential flush to on-disk log. 2160 * 2161 * ERRORS: 2162 * 1. Panic if reservation is overrun. This should never happen since 2163 * reservation amounts are generated internal to the filesystem. 2164 * NOTES: 2165 * 1. Tickets are single threaded data structures. 2166 * 2. The XLOG_END_TRANS & XLOG_CONTINUE_TRANS flags are passed down to the 2167 * syncing routine. When a single log_write region needs to span 2168 * multiple in-core logs, the XLOG_CONTINUE_TRANS bit should be set 2169 * on all log operation writes which don't contain the end of the 2170 * region. The XLOG_END_TRANS bit is used for the in-core log 2171 * operation which contains the end of the continued log_write region. 2172 * 3. When xlog_state_get_iclog_space() grabs the rest of the current iclog, 2173 * we don't really know exactly how much space will be used. As a result, 2174 * we don't update ic_offset until the end when we know exactly how many 2175 * bytes have been written out. 2176 */ 2177 int 2178 xlog_write( 2179 struct xlog *log, 2180 struct xfs_log_vec *log_vector, 2181 struct xlog_ticket *ticket, 2182 xfs_lsn_t *start_lsn, 2183 struct xlog_in_core **commit_iclog, 2184 uint flags) 2185 { 2186 struct xlog_in_core *iclog = NULL; 2187 struct xfs_log_iovec *vecp; 2188 struct xfs_log_vec *lv; 2189 int len; 2190 int index; 2191 int partial_copy = 0; 2192 int partial_copy_len = 0; 2193 int contwr = 0; 2194 int record_cnt = 0; 2195 int data_cnt = 0; 2196 int error; 2197 2198 *start_lsn = 0; 2199 2200 len = xlog_write_calc_vec_length(ticket, log_vector); 2201 2202 /* 2203 * Region headers and bytes are already accounted for. 2204 * We only need to take into account start records and 2205 * split regions in this function. 2206 */ 2207 if (ticket->t_flags & XLOG_TIC_INITED) 2208 ticket->t_curr_res -= sizeof(xlog_op_header_t); 2209 2210 /* 2211 * Commit record headers need to be accounted for. These 2212 * come in as separate writes so are easy to detect. 2213 */ 2214 if (flags & (XLOG_COMMIT_TRANS | XLOG_UNMOUNT_TRANS)) 2215 ticket->t_curr_res -= sizeof(xlog_op_header_t); 2216 2217 if (ticket->t_curr_res < 0) 2218 xlog_print_tic_res(log->l_mp, ticket); 2219 2220 index = 0; 2221 lv = log_vector; 2222 vecp = lv->lv_iovecp; 2223 while (lv && (!lv->lv_niovecs || index < lv->lv_niovecs)) { 2224 void *ptr; 2225 int log_offset; 2226 2227 error = xlog_state_get_iclog_space(log, len, &iclog, ticket, 2228 &contwr, &log_offset); 2229 if (error) 2230 return error; 2231 2232 ASSERT(log_offset <= iclog->ic_size - 1); 2233 ptr = iclog->ic_datap + log_offset; 2234 2235 /* start_lsn is the first lsn written to. That's all we need. */ 2236 if (!*start_lsn) 2237 *start_lsn = be64_to_cpu(iclog->ic_header.h_lsn); 2238 2239 /* 2240 * This loop writes out as many regions as can fit in the amount 2241 * of space which was allocated by xlog_state_get_iclog_space(). 2242 */ 2243 while (lv && (!lv->lv_niovecs || index < lv->lv_niovecs)) { 2244 struct xfs_log_iovec *reg; 2245 struct xlog_op_header *ophdr; 2246 int start_rec_copy; 2247 int copy_len; 2248 int copy_off; 2249 bool ordered = false; 2250 2251 /* ordered log vectors have no regions to write */ 2252 if (lv->lv_buf_len == XFS_LOG_VEC_ORDERED) { 2253 ASSERT(lv->lv_niovecs == 0); 2254 ordered = true; 2255 goto next_lv; 2256 } 2257 2258 reg = &vecp[index]; 2259 ASSERT(reg->i_len % sizeof(__int32_t) == 0); 2260 ASSERT((unsigned long)ptr % sizeof(__int32_t) == 0); 2261 2262 start_rec_copy = xlog_write_start_rec(ptr, ticket); 2263 if (start_rec_copy) { 2264 record_cnt++; 2265 xlog_write_adv_cnt(&ptr, &len, &log_offset, 2266 start_rec_copy); 2267 } 2268 2269 ophdr = xlog_write_setup_ophdr(log, ptr, ticket, flags); 2270 if (!ophdr) 2271 return XFS_ERROR(EIO); 2272 2273 xlog_write_adv_cnt(&ptr, &len, &log_offset, 2274 sizeof(struct xlog_op_header)); 2275 2276 len += xlog_write_setup_copy(ticket, ophdr, 2277 iclog->ic_size-log_offset, 2278 reg->i_len, 2279 ©_off, ©_len, 2280 &partial_copy, 2281 &partial_copy_len); 2282 xlog_verify_dest_ptr(log, ptr); 2283 2284 /* copy region */ 2285 ASSERT(copy_len >= 0); 2286 memcpy(ptr, reg->i_addr + copy_off, copy_len); 2287 xlog_write_adv_cnt(&ptr, &len, &log_offset, copy_len); 2288 2289 copy_len += start_rec_copy + sizeof(xlog_op_header_t); 2290 record_cnt++; 2291 data_cnt += contwr ? copy_len : 0; 2292 2293 error = xlog_write_copy_finish(log, iclog, flags, 2294 &record_cnt, &data_cnt, 2295 &partial_copy, 2296 &partial_copy_len, 2297 log_offset, 2298 commit_iclog); 2299 if (error) 2300 return error; 2301 2302 /* 2303 * if we had a partial copy, we need to get more iclog 2304 * space but we don't want to increment the region 2305 * index because there is still more is this region to 2306 * write. 2307 * 2308 * If we completed writing this region, and we flushed 2309 * the iclog (indicated by resetting of the record 2310 * count), then we also need to get more log space. If 2311 * this was the last record, though, we are done and 2312 * can just return. 2313 */ 2314 if (partial_copy) 2315 break; 2316 2317 if (++index == lv->lv_niovecs) { 2318 next_lv: 2319 lv = lv->lv_next; 2320 index = 0; 2321 if (lv) 2322 vecp = lv->lv_iovecp; 2323 } 2324 if (record_cnt == 0 && ordered == false) { 2325 if (!lv) 2326 return 0; 2327 break; 2328 } 2329 } 2330 } 2331 2332 ASSERT(len == 0); 2333 2334 xlog_state_finish_copy(log, iclog, record_cnt, data_cnt); 2335 if (!commit_iclog) 2336 return xlog_state_release_iclog(log, iclog); 2337 2338 ASSERT(flags & XLOG_COMMIT_TRANS); 2339 *commit_iclog = iclog; 2340 return 0; 2341 } 2342 2343 2344 /***************************************************************************** 2345 * 2346 * State Machine functions 2347 * 2348 ***************************************************************************** 2349 */ 2350 2351 /* Clean iclogs starting from the head. This ordering must be 2352 * maintained, so an iclog doesn't become ACTIVE beyond one that 2353 * is SYNCING. This is also required to maintain the notion that we use 2354 * a ordered wait queue to hold off would be writers to the log when every 2355 * iclog is trying to sync to disk. 2356 * 2357 * State Change: DIRTY -> ACTIVE 2358 */ 2359 STATIC void 2360 xlog_state_clean_log( 2361 struct xlog *log) 2362 { 2363 xlog_in_core_t *iclog; 2364 int changed = 0; 2365 2366 iclog = log->l_iclog; 2367 do { 2368 if (iclog->ic_state == XLOG_STATE_DIRTY) { 2369 iclog->ic_state = XLOG_STATE_ACTIVE; 2370 iclog->ic_offset = 0; 2371 ASSERT(iclog->ic_callback == NULL); 2372 /* 2373 * If the number of ops in this iclog indicate it just 2374 * contains the dummy transaction, we can 2375 * change state into IDLE (the second time around). 2376 * Otherwise we should change the state into 2377 * NEED a dummy. 2378 * We don't need to cover the dummy. 2379 */ 2380 if (!changed && 2381 (be32_to_cpu(iclog->ic_header.h_num_logops) == 2382 XLOG_COVER_OPS)) { 2383 changed = 1; 2384 } else { 2385 /* 2386 * We have two dirty iclogs so start over 2387 * This could also be num of ops indicates 2388 * this is not the dummy going out. 2389 */ 2390 changed = 2; 2391 } 2392 iclog->ic_header.h_num_logops = 0; 2393 memset(iclog->ic_header.h_cycle_data, 0, 2394 sizeof(iclog->ic_header.h_cycle_data)); 2395 iclog->ic_header.h_lsn = 0; 2396 } else if (iclog->ic_state == XLOG_STATE_ACTIVE) 2397 /* do nothing */; 2398 else 2399 break; /* stop cleaning */ 2400 iclog = iclog->ic_next; 2401 } while (iclog != log->l_iclog); 2402 2403 /* log is locked when we are called */ 2404 /* 2405 * Change state for the dummy log recording. 2406 * We usually go to NEED. But we go to NEED2 if the changed indicates 2407 * we are done writing the dummy record. 2408 * If we are done with the second dummy recored (DONE2), then 2409 * we go to IDLE. 2410 */ 2411 if (changed) { 2412 switch (log->l_covered_state) { 2413 case XLOG_STATE_COVER_IDLE: 2414 case XLOG_STATE_COVER_NEED: 2415 case XLOG_STATE_COVER_NEED2: 2416 log->l_covered_state = XLOG_STATE_COVER_NEED; 2417 break; 2418 2419 case XLOG_STATE_COVER_DONE: 2420 if (changed == 1) 2421 log->l_covered_state = XLOG_STATE_COVER_NEED2; 2422 else 2423 log->l_covered_state = XLOG_STATE_COVER_NEED; 2424 break; 2425 2426 case XLOG_STATE_COVER_DONE2: 2427 if (changed == 1) 2428 log->l_covered_state = XLOG_STATE_COVER_IDLE; 2429 else 2430 log->l_covered_state = XLOG_STATE_COVER_NEED; 2431 break; 2432 2433 default: 2434 ASSERT(0); 2435 } 2436 } 2437 } /* xlog_state_clean_log */ 2438 2439 STATIC xfs_lsn_t 2440 xlog_get_lowest_lsn( 2441 struct xlog *log) 2442 { 2443 xlog_in_core_t *lsn_log; 2444 xfs_lsn_t lowest_lsn, lsn; 2445 2446 lsn_log = log->l_iclog; 2447 lowest_lsn = 0; 2448 do { 2449 if (!(lsn_log->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_DIRTY))) { 2450 lsn = be64_to_cpu(lsn_log->ic_header.h_lsn); 2451 if ((lsn && !lowest_lsn) || 2452 (XFS_LSN_CMP(lsn, lowest_lsn) < 0)) { 2453 lowest_lsn = lsn; 2454 } 2455 } 2456 lsn_log = lsn_log->ic_next; 2457 } while (lsn_log != log->l_iclog); 2458 return lowest_lsn; 2459 } 2460 2461 2462 STATIC void 2463 xlog_state_do_callback( 2464 struct xlog *log, 2465 int aborted, 2466 struct xlog_in_core *ciclog) 2467 { 2468 xlog_in_core_t *iclog; 2469 xlog_in_core_t *first_iclog; /* used to know when we've 2470 * processed all iclogs once */ 2471 xfs_log_callback_t *cb, *cb_next; 2472 int flushcnt = 0; 2473 xfs_lsn_t lowest_lsn; 2474 int ioerrors; /* counter: iclogs with errors */ 2475 int loopdidcallbacks; /* flag: inner loop did callbacks*/ 2476 int funcdidcallbacks; /* flag: function did callbacks */ 2477 int repeats; /* for issuing console warnings if 2478 * looping too many times */ 2479 int wake = 0; 2480 2481 spin_lock(&log->l_icloglock); 2482 first_iclog = iclog = log->l_iclog; 2483 ioerrors = 0; 2484 funcdidcallbacks = 0; 2485 repeats = 0; 2486 2487 do { 2488 /* 2489 * Scan all iclogs starting with the one pointed to by the 2490 * log. Reset this starting point each time the log is 2491 * unlocked (during callbacks). 2492 * 2493 * Keep looping through iclogs until one full pass is made 2494 * without running any callbacks. 2495 */ 2496 first_iclog = log->l_iclog; 2497 iclog = log->l_iclog; 2498 loopdidcallbacks = 0; 2499 repeats++; 2500 2501 do { 2502 2503 /* skip all iclogs in the ACTIVE & DIRTY states */ 2504 if (iclog->ic_state & 2505 (XLOG_STATE_ACTIVE|XLOG_STATE_DIRTY)) { 2506 iclog = iclog->ic_next; 2507 continue; 2508 } 2509 2510 /* 2511 * Between marking a filesystem SHUTDOWN and stopping 2512 * the log, we do flush all iclogs to disk (if there 2513 * wasn't a log I/O error). So, we do want things to 2514 * go smoothly in case of just a SHUTDOWN w/o a 2515 * LOG_IO_ERROR. 2516 */ 2517 if (!(iclog->ic_state & XLOG_STATE_IOERROR)) { 2518 /* 2519 * Can only perform callbacks in order. Since 2520 * this iclog is not in the DONE_SYNC/ 2521 * DO_CALLBACK state, we skip the rest and 2522 * just try to clean up. If we set our iclog 2523 * to DO_CALLBACK, we will not process it when 2524 * we retry since a previous iclog is in the 2525 * CALLBACK and the state cannot change since 2526 * we are holding the l_icloglock. 2527 */ 2528 if (!(iclog->ic_state & 2529 (XLOG_STATE_DONE_SYNC | 2530 XLOG_STATE_DO_CALLBACK))) { 2531 if (ciclog && (ciclog->ic_state == 2532 XLOG_STATE_DONE_SYNC)) { 2533 ciclog->ic_state = XLOG_STATE_DO_CALLBACK; 2534 } 2535 break; 2536 } 2537 /* 2538 * We now have an iclog that is in either the 2539 * DO_CALLBACK or DONE_SYNC states. The other 2540 * states (WANT_SYNC, SYNCING, or CALLBACK were 2541 * caught by the above if and are going to 2542 * clean (i.e. we aren't doing their callbacks) 2543 * see the above if. 2544 */ 2545 2546 /* 2547 * We will do one more check here to see if we 2548 * have chased our tail around. 2549 */ 2550 2551 lowest_lsn = xlog_get_lowest_lsn(log); 2552 if (lowest_lsn && 2553 XFS_LSN_CMP(lowest_lsn, 2554 be64_to_cpu(iclog->ic_header.h_lsn)) < 0) { 2555 iclog = iclog->ic_next; 2556 continue; /* Leave this iclog for 2557 * another thread */ 2558 } 2559 2560 iclog->ic_state = XLOG_STATE_CALLBACK; 2561 2562 2563 /* 2564 * Completion of a iclog IO does not imply that 2565 * a transaction has completed, as transactions 2566 * can be large enough to span many iclogs. We 2567 * cannot change the tail of the log half way 2568 * through a transaction as this may be the only 2569 * transaction in the log and moving th etail to 2570 * point to the middle of it will prevent 2571 * recovery from finding the start of the 2572 * transaction. Hence we should only update the 2573 * last_sync_lsn if this iclog contains 2574 * transaction completion callbacks on it. 2575 * 2576 * We have to do this before we drop the 2577 * icloglock to ensure we are the only one that 2578 * can update it. 2579 */ 2580 ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn), 2581 be64_to_cpu(iclog->ic_header.h_lsn)) <= 0); 2582 if (iclog->ic_callback) 2583 atomic64_set(&log->l_last_sync_lsn, 2584 be64_to_cpu(iclog->ic_header.h_lsn)); 2585 2586 } else 2587 ioerrors++; 2588 2589 spin_unlock(&log->l_icloglock); 2590 2591 /* 2592 * Keep processing entries in the callback list until 2593 * we come around and it is empty. We need to 2594 * atomically see that the list is empty and change the 2595 * state to DIRTY so that we don't miss any more 2596 * callbacks being added. 2597 */ 2598 spin_lock(&iclog->ic_callback_lock); 2599 cb = iclog->ic_callback; 2600 while (cb) { 2601 iclog->ic_callback_tail = &(iclog->ic_callback); 2602 iclog->ic_callback = NULL; 2603 spin_unlock(&iclog->ic_callback_lock); 2604 2605 /* perform callbacks in the order given */ 2606 for (; cb; cb = cb_next) { 2607 cb_next = cb->cb_next; 2608 cb->cb_func(cb->cb_arg, aborted); 2609 } 2610 spin_lock(&iclog->ic_callback_lock); 2611 cb = iclog->ic_callback; 2612 } 2613 2614 loopdidcallbacks++; 2615 funcdidcallbacks++; 2616 2617 spin_lock(&log->l_icloglock); 2618 ASSERT(iclog->ic_callback == NULL); 2619 spin_unlock(&iclog->ic_callback_lock); 2620 if (!(iclog->ic_state & XLOG_STATE_IOERROR)) 2621 iclog->ic_state = XLOG_STATE_DIRTY; 2622 2623 /* 2624 * Transition from DIRTY to ACTIVE if applicable. 2625 * NOP if STATE_IOERROR. 2626 */ 2627 xlog_state_clean_log(log); 2628 2629 /* wake up threads waiting in xfs_log_force() */ 2630 wake_up_all(&iclog->ic_force_wait); 2631 2632 iclog = iclog->ic_next; 2633 } while (first_iclog != iclog); 2634 2635 if (repeats > 5000) { 2636 flushcnt += repeats; 2637 repeats = 0; 2638 xfs_warn(log->l_mp, 2639 "%s: possible infinite loop (%d iterations)", 2640 __func__, flushcnt); 2641 } 2642 } while (!ioerrors && loopdidcallbacks); 2643 2644 /* 2645 * make one last gasp attempt to see if iclogs are being left in 2646 * limbo.. 2647 */ 2648 #ifdef DEBUG 2649 if (funcdidcallbacks) { 2650 first_iclog = iclog = log->l_iclog; 2651 do { 2652 ASSERT(iclog->ic_state != XLOG_STATE_DO_CALLBACK); 2653 /* 2654 * Terminate the loop if iclogs are found in states 2655 * which will cause other threads to clean up iclogs. 2656 * 2657 * SYNCING - i/o completion will go through logs 2658 * DONE_SYNC - interrupt thread should be waiting for 2659 * l_icloglock 2660 * IOERROR - give up hope all ye who enter here 2661 */ 2662 if (iclog->ic_state == XLOG_STATE_WANT_SYNC || 2663 iclog->ic_state == XLOG_STATE_SYNCING || 2664 iclog->ic_state == XLOG_STATE_DONE_SYNC || 2665 iclog->ic_state == XLOG_STATE_IOERROR ) 2666 break; 2667 iclog = iclog->ic_next; 2668 } while (first_iclog != iclog); 2669 } 2670 #endif 2671 2672 if (log->l_iclog->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_IOERROR)) 2673 wake = 1; 2674 spin_unlock(&log->l_icloglock); 2675 2676 if (wake) 2677 wake_up_all(&log->l_flush_wait); 2678 } 2679 2680 2681 /* 2682 * Finish transitioning this iclog to the dirty state. 2683 * 2684 * Make sure that we completely execute this routine only when this is 2685 * the last call to the iclog. There is a good chance that iclog flushes, 2686 * when we reach the end of the physical log, get turned into 2 separate 2687 * calls to bwrite. Hence, one iclog flush could generate two calls to this 2688 * routine. By using the reference count bwritecnt, we guarantee that only 2689 * the second completion goes through. 2690 * 2691 * Callbacks could take time, so they are done outside the scope of the 2692 * global state machine log lock. 2693 */ 2694 STATIC void 2695 xlog_state_done_syncing( 2696 xlog_in_core_t *iclog, 2697 int aborted) 2698 { 2699 struct xlog *log = iclog->ic_log; 2700 2701 spin_lock(&log->l_icloglock); 2702 2703 ASSERT(iclog->ic_state == XLOG_STATE_SYNCING || 2704 iclog->ic_state == XLOG_STATE_IOERROR); 2705 ASSERT(atomic_read(&iclog->ic_refcnt) == 0); 2706 ASSERT(iclog->ic_bwritecnt == 1 || iclog->ic_bwritecnt == 2); 2707 2708 2709 /* 2710 * If we got an error, either on the first buffer, or in the case of 2711 * split log writes, on the second, we mark ALL iclogs STATE_IOERROR, 2712 * and none should ever be attempted to be written to disk 2713 * again. 2714 */ 2715 if (iclog->ic_state != XLOG_STATE_IOERROR) { 2716 if (--iclog->ic_bwritecnt == 1) { 2717 spin_unlock(&log->l_icloglock); 2718 return; 2719 } 2720 iclog->ic_state = XLOG_STATE_DONE_SYNC; 2721 } 2722 2723 /* 2724 * Someone could be sleeping prior to writing out the next 2725 * iclog buffer, we wake them all, one will get to do the 2726 * I/O, the others get to wait for the result. 2727 */ 2728 wake_up_all(&iclog->ic_write_wait); 2729 spin_unlock(&log->l_icloglock); 2730 xlog_state_do_callback(log, aborted, iclog); /* also cleans log */ 2731 } /* xlog_state_done_syncing */ 2732 2733 2734 /* 2735 * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must 2736 * sleep. We wait on the flush queue on the head iclog as that should be 2737 * the first iclog to complete flushing. Hence if all iclogs are syncing, 2738 * we will wait here and all new writes will sleep until a sync completes. 2739 * 2740 * The in-core logs are used in a circular fashion. They are not used 2741 * out-of-order even when an iclog past the head is free. 2742 * 2743 * return: 2744 * * log_offset where xlog_write() can start writing into the in-core 2745 * log's data space. 2746 * * in-core log pointer to which xlog_write() should write. 2747 * * boolean indicating this is a continued write to an in-core log. 2748 * If this is the last write, then the in-core log's offset field 2749 * needs to be incremented, depending on the amount of data which 2750 * is copied. 2751 */ 2752 STATIC int 2753 xlog_state_get_iclog_space( 2754 struct xlog *log, 2755 int len, 2756 struct xlog_in_core **iclogp, 2757 struct xlog_ticket *ticket, 2758 int *continued_write, 2759 int *logoffsetp) 2760 { 2761 int log_offset; 2762 xlog_rec_header_t *head; 2763 xlog_in_core_t *iclog; 2764 int error; 2765 2766 restart: 2767 spin_lock(&log->l_icloglock); 2768 if (XLOG_FORCED_SHUTDOWN(log)) { 2769 spin_unlock(&log->l_icloglock); 2770 return XFS_ERROR(EIO); 2771 } 2772 2773 iclog = log->l_iclog; 2774 if (iclog->ic_state != XLOG_STATE_ACTIVE) { 2775 XFS_STATS_INC(xs_log_noiclogs); 2776 2777 /* Wait for log writes to have flushed */ 2778 xlog_wait(&log->l_flush_wait, &log->l_icloglock); 2779 goto restart; 2780 } 2781 2782 head = &iclog->ic_header; 2783 2784 atomic_inc(&iclog->ic_refcnt); /* prevents sync */ 2785 log_offset = iclog->ic_offset; 2786 2787 /* On the 1st write to an iclog, figure out lsn. This works 2788 * if iclogs marked XLOG_STATE_WANT_SYNC always write out what they are 2789 * committing to. If the offset is set, that's how many blocks 2790 * must be written. 2791 */ 2792 if (log_offset == 0) { 2793 ticket->t_curr_res -= log->l_iclog_hsize; 2794 xlog_tic_add_region(ticket, 2795 log->l_iclog_hsize, 2796 XLOG_REG_TYPE_LRHEADER); 2797 head->h_cycle = cpu_to_be32(log->l_curr_cycle); 2798 head->h_lsn = cpu_to_be64( 2799 xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block)); 2800 ASSERT(log->l_curr_block >= 0); 2801 } 2802 2803 /* If there is enough room to write everything, then do it. Otherwise, 2804 * claim the rest of the region and make sure the XLOG_STATE_WANT_SYNC 2805 * bit is on, so this will get flushed out. Don't update ic_offset 2806 * until you know exactly how many bytes get copied. Therefore, wait 2807 * until later to update ic_offset. 2808 * 2809 * xlog_write() algorithm assumes that at least 2 xlog_op_header_t's 2810 * can fit into remaining data section. 2811 */ 2812 if (iclog->ic_size - iclog->ic_offset < 2*sizeof(xlog_op_header_t)) { 2813 xlog_state_switch_iclogs(log, iclog, iclog->ic_size); 2814 2815 /* 2816 * If I'm the only one writing to this iclog, sync it to disk. 2817 * We need to do an atomic compare and decrement here to avoid 2818 * racing with concurrent atomic_dec_and_lock() calls in 2819 * xlog_state_release_iclog() when there is more than one 2820 * reference to the iclog. 2821 */ 2822 if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1)) { 2823 /* we are the only one */ 2824 spin_unlock(&log->l_icloglock); 2825 error = xlog_state_release_iclog(log, iclog); 2826 if (error) 2827 return error; 2828 } else { 2829 spin_unlock(&log->l_icloglock); 2830 } 2831 goto restart; 2832 } 2833 2834 /* Do we have enough room to write the full amount in the remainder 2835 * of this iclog? Or must we continue a write on the next iclog and 2836 * mark this iclog as completely taken? In the case where we switch 2837 * iclogs (to mark it taken), this particular iclog will release/sync 2838 * to disk in xlog_write(). 2839 */ 2840 if (len <= iclog->ic_size - iclog->ic_offset) { 2841 *continued_write = 0; 2842 iclog->ic_offset += len; 2843 } else { 2844 *continued_write = 1; 2845 xlog_state_switch_iclogs(log, iclog, iclog->ic_size); 2846 } 2847 *iclogp = iclog; 2848 2849 ASSERT(iclog->ic_offset <= iclog->ic_size); 2850 spin_unlock(&log->l_icloglock); 2851 2852 *logoffsetp = log_offset; 2853 return 0; 2854 } /* xlog_state_get_iclog_space */ 2855 2856 /* The first cnt-1 times through here we don't need to 2857 * move the grant write head because the permanent 2858 * reservation has reserved cnt times the unit amount. 2859 * Release part of current permanent unit reservation and 2860 * reset current reservation to be one units worth. Also 2861 * move grant reservation head forward. 2862 */ 2863 STATIC void 2864 xlog_regrant_reserve_log_space( 2865 struct xlog *log, 2866 struct xlog_ticket *ticket) 2867 { 2868 trace_xfs_log_regrant_reserve_enter(log, ticket); 2869 2870 if (ticket->t_cnt > 0) 2871 ticket->t_cnt--; 2872 2873 xlog_grant_sub_space(log, &log->l_reserve_head.grant, 2874 ticket->t_curr_res); 2875 xlog_grant_sub_space(log, &log->l_write_head.grant, 2876 ticket->t_curr_res); 2877 ticket->t_curr_res = ticket->t_unit_res; 2878 xlog_tic_reset_res(ticket); 2879 2880 trace_xfs_log_regrant_reserve_sub(log, ticket); 2881 2882 /* just return if we still have some of the pre-reserved space */ 2883 if (ticket->t_cnt > 0) 2884 return; 2885 2886 xlog_grant_add_space(log, &log->l_reserve_head.grant, 2887 ticket->t_unit_res); 2888 2889 trace_xfs_log_regrant_reserve_exit(log, ticket); 2890 2891 ticket->t_curr_res = ticket->t_unit_res; 2892 xlog_tic_reset_res(ticket); 2893 } /* xlog_regrant_reserve_log_space */ 2894 2895 2896 /* 2897 * Give back the space left from a reservation. 2898 * 2899 * All the information we need to make a correct determination of space left 2900 * is present. For non-permanent reservations, things are quite easy. The 2901 * count should have been decremented to zero. We only need to deal with the 2902 * space remaining in the current reservation part of the ticket. If the 2903 * ticket contains a permanent reservation, there may be left over space which 2904 * needs to be released. A count of N means that N-1 refills of the current 2905 * reservation can be done before we need to ask for more space. The first 2906 * one goes to fill up the first current reservation. Once we run out of 2907 * space, the count will stay at zero and the only space remaining will be 2908 * in the current reservation field. 2909 */ 2910 STATIC void 2911 xlog_ungrant_log_space( 2912 struct xlog *log, 2913 struct xlog_ticket *ticket) 2914 { 2915 int bytes; 2916 2917 if (ticket->t_cnt > 0) 2918 ticket->t_cnt--; 2919 2920 trace_xfs_log_ungrant_enter(log, ticket); 2921 trace_xfs_log_ungrant_sub(log, ticket); 2922 2923 /* 2924 * If this is a permanent reservation ticket, we may be able to free 2925 * up more space based on the remaining count. 2926 */ 2927 bytes = ticket->t_curr_res; 2928 if (ticket->t_cnt > 0) { 2929 ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV); 2930 bytes += ticket->t_unit_res*ticket->t_cnt; 2931 } 2932 2933 xlog_grant_sub_space(log, &log->l_reserve_head.grant, bytes); 2934 xlog_grant_sub_space(log, &log->l_write_head.grant, bytes); 2935 2936 trace_xfs_log_ungrant_exit(log, ticket); 2937 2938 xfs_log_space_wake(log->l_mp); 2939 } 2940 2941 /* 2942 * Flush iclog to disk if this is the last reference to the given iclog and 2943 * the WANT_SYNC bit is set. 2944 * 2945 * When this function is entered, the iclog is not necessarily in the 2946 * WANT_SYNC state. It may be sitting around waiting to get filled. 2947 * 2948 * 2949 */ 2950 STATIC int 2951 xlog_state_release_iclog( 2952 struct xlog *log, 2953 struct xlog_in_core *iclog) 2954 { 2955 int sync = 0; /* do we sync? */ 2956 2957 if (iclog->ic_state & XLOG_STATE_IOERROR) 2958 return XFS_ERROR(EIO); 2959 2960 ASSERT(atomic_read(&iclog->ic_refcnt) > 0); 2961 if (!atomic_dec_and_lock(&iclog->ic_refcnt, &log->l_icloglock)) 2962 return 0; 2963 2964 if (iclog->ic_state & XLOG_STATE_IOERROR) { 2965 spin_unlock(&log->l_icloglock); 2966 return XFS_ERROR(EIO); 2967 } 2968 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE || 2969 iclog->ic_state == XLOG_STATE_WANT_SYNC); 2970 2971 if (iclog->ic_state == XLOG_STATE_WANT_SYNC) { 2972 /* update tail before writing to iclog */ 2973 xfs_lsn_t tail_lsn = xlog_assign_tail_lsn(log->l_mp); 2974 sync++; 2975 iclog->ic_state = XLOG_STATE_SYNCING; 2976 iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn); 2977 xlog_verify_tail_lsn(log, iclog, tail_lsn); 2978 /* cycle incremented when incrementing curr_block */ 2979 } 2980 spin_unlock(&log->l_icloglock); 2981 2982 /* 2983 * We let the log lock go, so it's possible that we hit a log I/O 2984 * error or some other SHUTDOWN condition that marks the iclog 2985 * as XLOG_STATE_IOERROR before the bwrite. However, we know that 2986 * this iclog has consistent data, so we ignore IOERROR 2987 * flags after this point. 2988 */ 2989 if (sync) 2990 return xlog_sync(log, iclog); 2991 return 0; 2992 } /* xlog_state_release_iclog */ 2993 2994 2995 /* 2996 * This routine will mark the current iclog in the ring as WANT_SYNC 2997 * and move the current iclog pointer to the next iclog in the ring. 2998 * When this routine is called from xlog_state_get_iclog_space(), the 2999 * exact size of the iclog has not yet been determined. All we know is 3000 * that every data block. We have run out of space in this log record. 3001 */ 3002 STATIC void 3003 xlog_state_switch_iclogs( 3004 struct xlog *log, 3005 struct xlog_in_core *iclog, 3006 int eventual_size) 3007 { 3008 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE); 3009 if (!eventual_size) 3010 eventual_size = iclog->ic_offset; 3011 iclog->ic_state = XLOG_STATE_WANT_SYNC; 3012 iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block); 3013 log->l_prev_block = log->l_curr_block; 3014 log->l_prev_cycle = log->l_curr_cycle; 3015 3016 /* roll log?: ic_offset changed later */ 3017 log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize); 3018 3019 /* Round up to next log-sunit */ 3020 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb) && 3021 log->l_mp->m_sb.sb_logsunit > 1) { 3022 __uint32_t sunit_bb = BTOBB(log->l_mp->m_sb.sb_logsunit); 3023 log->l_curr_block = roundup(log->l_curr_block, sunit_bb); 3024 } 3025 3026 if (log->l_curr_block >= log->l_logBBsize) { 3027 log->l_curr_cycle++; 3028 if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM) 3029 log->l_curr_cycle++; 3030 log->l_curr_block -= log->l_logBBsize; 3031 ASSERT(log->l_curr_block >= 0); 3032 } 3033 ASSERT(iclog == log->l_iclog); 3034 log->l_iclog = iclog->ic_next; 3035 } /* xlog_state_switch_iclogs */ 3036 3037 /* 3038 * Write out all data in the in-core log as of this exact moment in time. 3039 * 3040 * Data may be written to the in-core log during this call. However, 3041 * we don't guarantee this data will be written out. A change from past 3042 * implementation means this routine will *not* write out zero length LRs. 3043 * 3044 * Basically, we try and perform an intelligent scan of the in-core logs. 3045 * If we determine there is no flushable data, we just return. There is no 3046 * flushable data if: 3047 * 3048 * 1. the current iclog is active and has no data; the previous iclog 3049 * is in the active or dirty state. 3050 * 2. the current iclog is drity, and the previous iclog is in the 3051 * active or dirty state. 3052 * 3053 * We may sleep if: 3054 * 3055 * 1. the current iclog is not in the active nor dirty state. 3056 * 2. the current iclog dirty, and the previous iclog is not in the 3057 * active nor dirty state. 3058 * 3. the current iclog is active, and there is another thread writing 3059 * to this particular iclog. 3060 * 4. a) the current iclog is active and has no other writers 3061 * b) when we return from flushing out this iclog, it is still 3062 * not in the active nor dirty state. 3063 */ 3064 int 3065 _xfs_log_force( 3066 struct xfs_mount *mp, 3067 uint flags, 3068 int *log_flushed) 3069 { 3070 struct xlog *log = mp->m_log; 3071 struct xlog_in_core *iclog; 3072 xfs_lsn_t lsn; 3073 3074 XFS_STATS_INC(xs_log_force); 3075 3076 xlog_cil_force(log); 3077 3078 spin_lock(&log->l_icloglock); 3079 3080 iclog = log->l_iclog; 3081 if (iclog->ic_state & XLOG_STATE_IOERROR) { 3082 spin_unlock(&log->l_icloglock); 3083 return XFS_ERROR(EIO); 3084 } 3085 3086 /* If the head iclog is not active nor dirty, we just attach 3087 * ourselves to the head and go to sleep. 3088 */ 3089 if (iclog->ic_state == XLOG_STATE_ACTIVE || 3090 iclog->ic_state == XLOG_STATE_DIRTY) { 3091 /* 3092 * If the head is dirty or (active and empty), then 3093 * we need to look at the previous iclog. If the previous 3094 * iclog is active or dirty we are done. There is nothing 3095 * to sync out. Otherwise, we attach ourselves to the 3096 * previous iclog and go to sleep. 3097 */ 3098 if (iclog->ic_state == XLOG_STATE_DIRTY || 3099 (atomic_read(&iclog->ic_refcnt) == 0 3100 && iclog->ic_offset == 0)) { 3101 iclog = iclog->ic_prev; 3102 if (iclog->ic_state == XLOG_STATE_ACTIVE || 3103 iclog->ic_state == XLOG_STATE_DIRTY) 3104 goto no_sleep; 3105 else 3106 goto maybe_sleep; 3107 } else { 3108 if (atomic_read(&iclog->ic_refcnt) == 0) { 3109 /* We are the only one with access to this 3110 * iclog. Flush it out now. There should 3111 * be a roundoff of zero to show that someone 3112 * has already taken care of the roundoff from 3113 * the previous sync. 3114 */ 3115 atomic_inc(&iclog->ic_refcnt); 3116 lsn = be64_to_cpu(iclog->ic_header.h_lsn); 3117 xlog_state_switch_iclogs(log, iclog, 0); 3118 spin_unlock(&log->l_icloglock); 3119 3120 if (xlog_state_release_iclog(log, iclog)) 3121 return XFS_ERROR(EIO); 3122 3123 if (log_flushed) 3124 *log_flushed = 1; 3125 spin_lock(&log->l_icloglock); 3126 if (be64_to_cpu(iclog->ic_header.h_lsn) == lsn && 3127 iclog->ic_state != XLOG_STATE_DIRTY) 3128 goto maybe_sleep; 3129 else 3130 goto no_sleep; 3131 } else { 3132 /* Someone else is writing to this iclog. 3133 * Use its call to flush out the data. However, 3134 * the other thread may not force out this LR, 3135 * so we mark it WANT_SYNC. 3136 */ 3137 xlog_state_switch_iclogs(log, iclog, 0); 3138 goto maybe_sleep; 3139 } 3140 } 3141 } 3142 3143 /* By the time we come around again, the iclog could've been filled 3144 * which would give it another lsn. If we have a new lsn, just 3145 * return because the relevant data has been flushed. 3146 */ 3147 maybe_sleep: 3148 if (flags & XFS_LOG_SYNC) { 3149 /* 3150 * We must check if we're shutting down here, before 3151 * we wait, while we're holding the l_icloglock. 3152 * Then we check again after waking up, in case our 3153 * sleep was disturbed by a bad news. 3154 */ 3155 if (iclog->ic_state & XLOG_STATE_IOERROR) { 3156 spin_unlock(&log->l_icloglock); 3157 return XFS_ERROR(EIO); 3158 } 3159 XFS_STATS_INC(xs_log_force_sleep); 3160 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); 3161 /* 3162 * No need to grab the log lock here since we're 3163 * only deciding whether or not to return EIO 3164 * and the memory read should be atomic. 3165 */ 3166 if (iclog->ic_state & XLOG_STATE_IOERROR) 3167 return XFS_ERROR(EIO); 3168 if (log_flushed) 3169 *log_flushed = 1; 3170 } else { 3171 3172 no_sleep: 3173 spin_unlock(&log->l_icloglock); 3174 } 3175 return 0; 3176 } 3177 3178 /* 3179 * Wrapper for _xfs_log_force(), to be used when caller doesn't care 3180 * about errors or whether the log was flushed or not. This is the normal 3181 * interface to use when trying to unpin items or move the log forward. 3182 */ 3183 void 3184 xfs_log_force( 3185 xfs_mount_t *mp, 3186 uint flags) 3187 { 3188 int error; 3189 3190 trace_xfs_log_force(mp, 0); 3191 error = _xfs_log_force(mp, flags, NULL); 3192 if (error) 3193 xfs_warn(mp, "%s: error %d returned.", __func__, error); 3194 } 3195 3196 /* 3197 * Force the in-core log to disk for a specific LSN. 3198 * 3199 * Find in-core log with lsn. 3200 * If it is in the DIRTY state, just return. 3201 * If it is in the ACTIVE state, move the in-core log into the WANT_SYNC 3202 * state and go to sleep or return. 3203 * If it is in any other state, go to sleep or return. 3204 * 3205 * Synchronous forces are implemented with a signal variable. All callers 3206 * to force a given lsn to disk will wait on a the sv attached to the 3207 * specific in-core log. When given in-core log finally completes its 3208 * write to disk, that thread will wake up all threads waiting on the 3209 * sv. 3210 */ 3211 int 3212 _xfs_log_force_lsn( 3213 struct xfs_mount *mp, 3214 xfs_lsn_t lsn, 3215 uint flags, 3216 int *log_flushed) 3217 { 3218 struct xlog *log = mp->m_log; 3219 struct xlog_in_core *iclog; 3220 int already_slept = 0; 3221 3222 ASSERT(lsn != 0); 3223 3224 XFS_STATS_INC(xs_log_force); 3225 3226 lsn = xlog_cil_force_lsn(log, lsn); 3227 if (lsn == NULLCOMMITLSN) 3228 return 0; 3229 3230 try_again: 3231 spin_lock(&log->l_icloglock); 3232 iclog = log->l_iclog; 3233 if (iclog->ic_state & XLOG_STATE_IOERROR) { 3234 spin_unlock(&log->l_icloglock); 3235 return XFS_ERROR(EIO); 3236 } 3237 3238 do { 3239 if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) { 3240 iclog = iclog->ic_next; 3241 continue; 3242 } 3243 3244 if (iclog->ic_state == XLOG_STATE_DIRTY) { 3245 spin_unlock(&log->l_icloglock); 3246 return 0; 3247 } 3248 3249 if (iclog->ic_state == XLOG_STATE_ACTIVE) { 3250 /* 3251 * We sleep here if we haven't already slept (e.g. 3252 * this is the first time we've looked at the correct 3253 * iclog buf) and the buffer before us is going to 3254 * be sync'ed. The reason for this is that if we 3255 * are doing sync transactions here, by waiting for 3256 * the previous I/O to complete, we can allow a few 3257 * more transactions into this iclog before we close 3258 * it down. 3259 * 3260 * Otherwise, we mark the buffer WANT_SYNC, and bump 3261 * up the refcnt so we can release the log (which 3262 * drops the ref count). The state switch keeps new 3263 * transaction commits from using this buffer. When 3264 * the current commits finish writing into the buffer, 3265 * the refcount will drop to zero and the buffer will 3266 * go out then. 3267 */ 3268 if (!already_slept && 3269 (iclog->ic_prev->ic_state & 3270 (XLOG_STATE_WANT_SYNC | XLOG_STATE_SYNCING))) { 3271 ASSERT(!(iclog->ic_state & XLOG_STATE_IOERROR)); 3272 3273 XFS_STATS_INC(xs_log_force_sleep); 3274 3275 xlog_wait(&iclog->ic_prev->ic_write_wait, 3276 &log->l_icloglock); 3277 if (log_flushed) 3278 *log_flushed = 1; 3279 already_slept = 1; 3280 goto try_again; 3281 } 3282 atomic_inc(&iclog->ic_refcnt); 3283 xlog_state_switch_iclogs(log, iclog, 0); 3284 spin_unlock(&log->l_icloglock); 3285 if (xlog_state_release_iclog(log, iclog)) 3286 return XFS_ERROR(EIO); 3287 if (log_flushed) 3288 *log_flushed = 1; 3289 spin_lock(&log->l_icloglock); 3290 } 3291 3292 if ((flags & XFS_LOG_SYNC) && /* sleep */ 3293 !(iclog->ic_state & 3294 (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY))) { 3295 /* 3296 * Don't wait on completion if we know that we've 3297 * gotten a log write error. 3298 */ 3299 if (iclog->ic_state & XLOG_STATE_IOERROR) { 3300 spin_unlock(&log->l_icloglock); 3301 return XFS_ERROR(EIO); 3302 } 3303 XFS_STATS_INC(xs_log_force_sleep); 3304 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); 3305 /* 3306 * No need to grab the log lock here since we're 3307 * only deciding whether or not to return EIO 3308 * and the memory read should be atomic. 3309 */ 3310 if (iclog->ic_state & XLOG_STATE_IOERROR) 3311 return XFS_ERROR(EIO); 3312 3313 if (log_flushed) 3314 *log_flushed = 1; 3315 } else { /* just return */ 3316 spin_unlock(&log->l_icloglock); 3317 } 3318 3319 return 0; 3320 } while (iclog != log->l_iclog); 3321 3322 spin_unlock(&log->l_icloglock); 3323 return 0; 3324 } 3325 3326 /* 3327 * Wrapper for _xfs_log_force_lsn(), to be used when caller doesn't care 3328 * about errors or whether the log was flushed or not. This is the normal 3329 * interface to use when trying to unpin items or move the log forward. 3330 */ 3331 void 3332 xfs_log_force_lsn( 3333 xfs_mount_t *mp, 3334 xfs_lsn_t lsn, 3335 uint flags) 3336 { 3337 int error; 3338 3339 trace_xfs_log_force(mp, lsn); 3340 error = _xfs_log_force_lsn(mp, lsn, flags, NULL); 3341 if (error) 3342 xfs_warn(mp, "%s: error %d returned.", __func__, error); 3343 } 3344 3345 /* 3346 * Called when we want to mark the current iclog as being ready to sync to 3347 * disk. 3348 */ 3349 STATIC void 3350 xlog_state_want_sync( 3351 struct xlog *log, 3352 struct xlog_in_core *iclog) 3353 { 3354 assert_spin_locked(&log->l_icloglock); 3355 3356 if (iclog->ic_state == XLOG_STATE_ACTIVE) { 3357 xlog_state_switch_iclogs(log, iclog, 0); 3358 } else { 3359 ASSERT(iclog->ic_state & 3360 (XLOG_STATE_WANT_SYNC|XLOG_STATE_IOERROR)); 3361 } 3362 } 3363 3364 3365 /***************************************************************************** 3366 * 3367 * TICKET functions 3368 * 3369 ***************************************************************************** 3370 */ 3371 3372 /* 3373 * Free a used ticket when its refcount falls to zero. 3374 */ 3375 void 3376 xfs_log_ticket_put( 3377 xlog_ticket_t *ticket) 3378 { 3379 ASSERT(atomic_read(&ticket->t_ref) > 0); 3380 if (atomic_dec_and_test(&ticket->t_ref)) 3381 kmem_zone_free(xfs_log_ticket_zone, ticket); 3382 } 3383 3384 xlog_ticket_t * 3385 xfs_log_ticket_get( 3386 xlog_ticket_t *ticket) 3387 { 3388 ASSERT(atomic_read(&ticket->t_ref) > 0); 3389 atomic_inc(&ticket->t_ref); 3390 return ticket; 3391 } 3392 3393 /* 3394 * Allocate and initialise a new log ticket. 3395 */ 3396 struct xlog_ticket * 3397 xlog_ticket_alloc( 3398 struct xlog *log, 3399 int unit_bytes, 3400 int cnt, 3401 char client, 3402 bool permanent, 3403 xfs_km_flags_t alloc_flags) 3404 { 3405 struct xlog_ticket *tic; 3406 uint num_headers; 3407 int iclog_space; 3408 3409 tic = kmem_zone_zalloc(xfs_log_ticket_zone, alloc_flags); 3410 if (!tic) 3411 return NULL; 3412 3413 /* 3414 * Permanent reservations have up to 'cnt'-1 active log operations 3415 * in the log. A unit in this case is the amount of space for one 3416 * of these log operations. Normal reservations have a cnt of 1 3417 * and their unit amount is the total amount of space required. 3418 * 3419 * The following lines of code account for non-transaction data 3420 * which occupy space in the on-disk log. 3421 * 3422 * Normal form of a transaction is: 3423 * <oph><trans-hdr><start-oph><reg1-oph><reg1><reg2-oph>...<commit-oph> 3424 * and then there are LR hdrs, split-recs and roundoff at end of syncs. 3425 * 3426 * We need to account for all the leadup data and trailer data 3427 * around the transaction data. 3428 * And then we need to account for the worst case in terms of using 3429 * more space. 3430 * The worst case will happen if: 3431 * - the placement of the transaction happens to be such that the 3432 * roundoff is at its maximum 3433 * - the transaction data is synced before the commit record is synced 3434 * i.e. <transaction-data><roundoff> | <commit-rec><roundoff> 3435 * Therefore the commit record is in its own Log Record. 3436 * This can happen as the commit record is called with its 3437 * own region to xlog_write(). 3438 * This then means that in the worst case, roundoff can happen for 3439 * the commit-rec as well. 3440 * The commit-rec is smaller than padding in this scenario and so it is 3441 * not added separately. 3442 */ 3443 3444 /* for trans header */ 3445 unit_bytes += sizeof(xlog_op_header_t); 3446 unit_bytes += sizeof(xfs_trans_header_t); 3447 3448 /* for start-rec */ 3449 unit_bytes += sizeof(xlog_op_header_t); 3450 3451 /* 3452 * for LR headers - the space for data in an iclog is the size minus 3453 * the space used for the headers. If we use the iclog size, then we 3454 * undercalculate the number of headers required. 3455 * 3456 * Furthermore - the addition of op headers for split-recs might 3457 * increase the space required enough to require more log and op 3458 * headers, so take that into account too. 3459 * 3460 * IMPORTANT: This reservation makes the assumption that if this 3461 * transaction is the first in an iclog and hence has the LR headers 3462 * accounted to it, then the remaining space in the iclog is 3463 * exclusively for this transaction. i.e. if the transaction is larger 3464 * than the iclog, it will be the only thing in that iclog. 3465 * Fundamentally, this means we must pass the entire log vector to 3466 * xlog_write to guarantee this. 3467 */ 3468 iclog_space = log->l_iclog_size - log->l_iclog_hsize; 3469 num_headers = howmany(unit_bytes, iclog_space); 3470 3471 /* for split-recs - ophdrs added when data split over LRs */ 3472 unit_bytes += sizeof(xlog_op_header_t) * num_headers; 3473 3474 /* add extra header reservations if we overrun */ 3475 while (!num_headers || 3476 howmany(unit_bytes, iclog_space) > num_headers) { 3477 unit_bytes += sizeof(xlog_op_header_t); 3478 num_headers++; 3479 } 3480 unit_bytes += log->l_iclog_hsize * num_headers; 3481 3482 /* for commit-rec LR header - note: padding will subsume the ophdr */ 3483 unit_bytes += log->l_iclog_hsize; 3484 3485 /* for roundoff padding for transaction data and one for commit record */ 3486 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb) && 3487 log->l_mp->m_sb.sb_logsunit > 1) { 3488 /* log su roundoff */ 3489 unit_bytes += 2*log->l_mp->m_sb.sb_logsunit; 3490 } else { 3491 /* BB roundoff */ 3492 unit_bytes += 2*BBSIZE; 3493 } 3494 3495 atomic_set(&tic->t_ref, 1); 3496 tic->t_task = current; 3497 INIT_LIST_HEAD(&tic->t_queue); 3498 tic->t_unit_res = unit_bytes; 3499 tic->t_curr_res = unit_bytes; 3500 tic->t_cnt = cnt; 3501 tic->t_ocnt = cnt; 3502 tic->t_tid = prandom_u32(); 3503 tic->t_clientid = client; 3504 tic->t_flags = XLOG_TIC_INITED; 3505 tic->t_trans_type = 0; 3506 if (permanent) 3507 tic->t_flags |= XLOG_TIC_PERM_RESERV; 3508 3509 xlog_tic_reset_res(tic); 3510 3511 return tic; 3512 } 3513 3514 3515 /****************************************************************************** 3516 * 3517 * Log debug routines 3518 * 3519 ****************************************************************************** 3520 */ 3521 #if defined(DEBUG) 3522 /* 3523 * Make sure that the destination ptr is within the valid data region of 3524 * one of the iclogs. This uses backup pointers stored in a different 3525 * part of the log in case we trash the log structure. 3526 */ 3527 void 3528 xlog_verify_dest_ptr( 3529 struct xlog *log, 3530 char *ptr) 3531 { 3532 int i; 3533 int good_ptr = 0; 3534 3535 for (i = 0; i < log->l_iclog_bufs; i++) { 3536 if (ptr >= log->l_iclog_bak[i] && 3537 ptr <= log->l_iclog_bak[i] + log->l_iclog_size) 3538 good_ptr++; 3539 } 3540 3541 if (!good_ptr) 3542 xfs_emerg(log->l_mp, "%s: invalid ptr", __func__); 3543 } 3544 3545 /* 3546 * Check to make sure the grant write head didn't just over lap the tail. If 3547 * the cycles are the same, we can't be overlapping. Otherwise, make sure that 3548 * the cycles differ by exactly one and check the byte count. 3549 * 3550 * This check is run unlocked, so can give false positives. Rather than assert 3551 * on failures, use a warn-once flag and a panic tag to allow the admin to 3552 * determine if they want to panic the machine when such an error occurs. For 3553 * debug kernels this will have the same effect as using an assert but, unlinke 3554 * an assert, it can be turned off at runtime. 3555 */ 3556 STATIC void 3557 xlog_verify_grant_tail( 3558 struct xlog *log) 3559 { 3560 int tail_cycle, tail_blocks; 3561 int cycle, space; 3562 3563 xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &space); 3564 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks); 3565 if (tail_cycle != cycle) { 3566 if (cycle - 1 != tail_cycle && 3567 !(log->l_flags & XLOG_TAIL_WARN)) { 3568 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, 3569 "%s: cycle - 1 != tail_cycle", __func__); 3570 log->l_flags |= XLOG_TAIL_WARN; 3571 } 3572 3573 if (space > BBTOB(tail_blocks) && 3574 !(log->l_flags & XLOG_TAIL_WARN)) { 3575 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, 3576 "%s: space > BBTOB(tail_blocks)", __func__); 3577 log->l_flags |= XLOG_TAIL_WARN; 3578 } 3579 } 3580 } 3581 3582 /* check if it will fit */ 3583 STATIC void 3584 xlog_verify_tail_lsn( 3585 struct xlog *log, 3586 struct xlog_in_core *iclog, 3587 xfs_lsn_t tail_lsn) 3588 { 3589 int blocks; 3590 3591 if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) { 3592 blocks = 3593 log->l_logBBsize - (log->l_prev_block - BLOCK_LSN(tail_lsn)); 3594 if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize)) 3595 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__); 3596 } else { 3597 ASSERT(CYCLE_LSN(tail_lsn)+1 == log->l_prev_cycle); 3598 3599 if (BLOCK_LSN(tail_lsn) == log->l_prev_block) 3600 xfs_emerg(log->l_mp, "%s: tail wrapped", __func__); 3601 3602 blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block; 3603 if (blocks < BTOBB(iclog->ic_offset) + 1) 3604 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__); 3605 } 3606 } /* xlog_verify_tail_lsn */ 3607 3608 /* 3609 * Perform a number of checks on the iclog before writing to disk. 3610 * 3611 * 1. Make sure the iclogs are still circular 3612 * 2. Make sure we have a good magic number 3613 * 3. Make sure we don't have magic numbers in the data 3614 * 4. Check fields of each log operation header for: 3615 * A. Valid client identifier 3616 * B. tid ptr value falls in valid ptr space (user space code) 3617 * C. Length in log record header is correct according to the 3618 * individual operation headers within record. 3619 * 5. When a bwrite will occur within 5 blocks of the front of the physical 3620 * log, check the preceding blocks of the physical log to make sure all 3621 * the cycle numbers agree with the current cycle number. 3622 */ 3623 STATIC void 3624 xlog_verify_iclog( 3625 struct xlog *log, 3626 struct xlog_in_core *iclog, 3627 int count, 3628 bool syncing) 3629 { 3630 xlog_op_header_t *ophead; 3631 xlog_in_core_t *icptr; 3632 xlog_in_core_2_t *xhdr; 3633 xfs_caddr_t ptr; 3634 xfs_caddr_t base_ptr; 3635 __psint_t field_offset; 3636 __uint8_t clientid; 3637 int len, i, j, k, op_len; 3638 int idx; 3639 3640 /* check validity of iclog pointers */ 3641 spin_lock(&log->l_icloglock); 3642 icptr = log->l_iclog; 3643 for (i=0; i < log->l_iclog_bufs; i++) { 3644 if (icptr == NULL) 3645 xfs_emerg(log->l_mp, "%s: invalid ptr", __func__); 3646 icptr = icptr->ic_next; 3647 } 3648 if (icptr != log->l_iclog) 3649 xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__); 3650 spin_unlock(&log->l_icloglock); 3651 3652 /* check log magic numbers */ 3653 if (iclog->ic_header.h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) 3654 xfs_emerg(log->l_mp, "%s: invalid magic num", __func__); 3655 3656 ptr = (xfs_caddr_t) &iclog->ic_header; 3657 for (ptr += BBSIZE; ptr < ((xfs_caddr_t)&iclog->ic_header) + count; 3658 ptr += BBSIZE) { 3659 if (*(__be32 *)ptr == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) 3660 xfs_emerg(log->l_mp, "%s: unexpected magic num", 3661 __func__); 3662 } 3663 3664 /* check fields */ 3665 len = be32_to_cpu(iclog->ic_header.h_num_logops); 3666 ptr = iclog->ic_datap; 3667 base_ptr = ptr; 3668 ophead = (xlog_op_header_t *)ptr; 3669 xhdr = iclog->ic_data; 3670 for (i = 0; i < len; i++) { 3671 ophead = (xlog_op_header_t *)ptr; 3672 3673 /* clientid is only 1 byte */ 3674 field_offset = (__psint_t) 3675 ((xfs_caddr_t)&(ophead->oh_clientid) - base_ptr); 3676 if (!syncing || (field_offset & 0x1ff)) { 3677 clientid = ophead->oh_clientid; 3678 } else { 3679 idx = BTOBBT((xfs_caddr_t)&(ophead->oh_clientid) - iclog->ic_datap); 3680 if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) { 3681 j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3682 k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3683 clientid = xlog_get_client_id( 3684 xhdr[j].hic_xheader.xh_cycle_data[k]); 3685 } else { 3686 clientid = xlog_get_client_id( 3687 iclog->ic_header.h_cycle_data[idx]); 3688 } 3689 } 3690 if (clientid != XFS_TRANSACTION && clientid != XFS_LOG) 3691 xfs_warn(log->l_mp, 3692 "%s: invalid clientid %d op 0x%p offset 0x%lx", 3693 __func__, clientid, ophead, 3694 (unsigned long)field_offset); 3695 3696 /* check length */ 3697 field_offset = (__psint_t) 3698 ((xfs_caddr_t)&(ophead->oh_len) - base_ptr); 3699 if (!syncing || (field_offset & 0x1ff)) { 3700 op_len = be32_to_cpu(ophead->oh_len); 3701 } else { 3702 idx = BTOBBT((__psint_t)&ophead->oh_len - 3703 (__psint_t)iclog->ic_datap); 3704 if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) { 3705 j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3706 k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3707 op_len = be32_to_cpu(xhdr[j].hic_xheader.xh_cycle_data[k]); 3708 } else { 3709 op_len = be32_to_cpu(iclog->ic_header.h_cycle_data[idx]); 3710 } 3711 } 3712 ptr += sizeof(xlog_op_header_t) + op_len; 3713 } 3714 } /* xlog_verify_iclog */ 3715 #endif 3716 3717 /* 3718 * Mark all iclogs IOERROR. l_icloglock is held by the caller. 3719 */ 3720 STATIC int 3721 xlog_state_ioerror( 3722 struct xlog *log) 3723 { 3724 xlog_in_core_t *iclog, *ic; 3725 3726 iclog = log->l_iclog; 3727 if (! (iclog->ic_state & XLOG_STATE_IOERROR)) { 3728 /* 3729 * Mark all the incore logs IOERROR. 3730 * From now on, no log flushes will result. 3731 */ 3732 ic = iclog; 3733 do { 3734 ic->ic_state = XLOG_STATE_IOERROR; 3735 ic = ic->ic_next; 3736 } while (ic != iclog); 3737 return 0; 3738 } 3739 /* 3740 * Return non-zero, if state transition has already happened. 3741 */ 3742 return 1; 3743 } 3744 3745 /* 3746 * This is called from xfs_force_shutdown, when we're forcibly 3747 * shutting down the filesystem, typically because of an IO error. 3748 * Our main objectives here are to make sure that: 3749 * a. the filesystem gets marked 'SHUTDOWN' for all interested 3750 * parties to find out, 'atomically'. 3751 * b. those who're sleeping on log reservations, pinned objects and 3752 * other resources get woken up, and be told the bad news. 3753 * c. nothing new gets queued up after (a) and (b) are done. 3754 * d. if !logerror, flush the iclogs to disk, then seal them off 3755 * for business. 3756 * 3757 * Note: for delayed logging the !logerror case needs to flush the regions 3758 * held in memory out to the iclogs before flushing them to disk. This needs 3759 * to be done before the log is marked as shutdown, otherwise the flush to the 3760 * iclogs will fail. 3761 */ 3762 int 3763 xfs_log_force_umount( 3764 struct xfs_mount *mp, 3765 int logerror) 3766 { 3767 struct xlog *log; 3768 int retval; 3769 3770 log = mp->m_log; 3771 3772 /* 3773 * If this happens during log recovery, don't worry about 3774 * locking; the log isn't open for business yet. 3775 */ 3776 if (!log || 3777 log->l_flags & XLOG_ACTIVE_RECOVERY) { 3778 mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN; 3779 if (mp->m_sb_bp) 3780 XFS_BUF_DONE(mp->m_sb_bp); 3781 return 0; 3782 } 3783 3784 /* 3785 * Somebody could've already done the hard work for us. 3786 * No need to get locks for this. 3787 */ 3788 if (logerror && log->l_iclog->ic_state & XLOG_STATE_IOERROR) { 3789 ASSERT(XLOG_FORCED_SHUTDOWN(log)); 3790 return 1; 3791 } 3792 retval = 0; 3793 3794 /* 3795 * Flush the in memory commit item list before marking the log as 3796 * being shut down. We need to do it in this order to ensure all the 3797 * completed transactions are flushed to disk with the xfs_log_force() 3798 * call below. 3799 */ 3800 if (!logerror) 3801 xlog_cil_force(log); 3802 3803 /* 3804 * mark the filesystem and the as in a shutdown state and wake 3805 * everybody up to tell them the bad news. 3806 */ 3807 spin_lock(&log->l_icloglock); 3808 mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN; 3809 if (mp->m_sb_bp) 3810 XFS_BUF_DONE(mp->m_sb_bp); 3811 3812 /* 3813 * This flag is sort of redundant because of the mount flag, but 3814 * it's good to maintain the separation between the log and the rest 3815 * of XFS. 3816 */ 3817 log->l_flags |= XLOG_IO_ERROR; 3818 3819 /* 3820 * If we hit a log error, we want to mark all the iclogs IOERROR 3821 * while we're still holding the loglock. 3822 */ 3823 if (logerror) 3824 retval = xlog_state_ioerror(log); 3825 spin_unlock(&log->l_icloglock); 3826 3827 /* 3828 * We don't want anybody waiting for log reservations after this. That 3829 * means we have to wake up everybody queued up on reserveq as well as 3830 * writeq. In addition, we make sure in xlog_{re}grant_log_space that 3831 * we don't enqueue anything once the SHUTDOWN flag is set, and this 3832 * action is protected by the grant locks. 3833 */ 3834 xlog_grant_head_wake_all(&log->l_reserve_head); 3835 xlog_grant_head_wake_all(&log->l_write_head); 3836 3837 if (!(log->l_iclog->ic_state & XLOG_STATE_IOERROR)) { 3838 ASSERT(!logerror); 3839 /* 3840 * Force the incore logs to disk before shutting the 3841 * log down completely. 3842 */ 3843 _xfs_log_force(mp, XFS_LOG_SYNC, NULL); 3844 3845 spin_lock(&log->l_icloglock); 3846 retval = xlog_state_ioerror(log); 3847 spin_unlock(&log->l_icloglock); 3848 } 3849 /* 3850 * Wake up everybody waiting on xfs_log_force. 3851 * Callback all log item committed functions as if the 3852 * log writes were completed. 3853 */ 3854 xlog_state_do_callback(log, XFS_LI_ABORTED, NULL); 3855 3856 #ifdef XFSERRORDEBUG 3857 { 3858 xlog_in_core_t *iclog; 3859 3860 spin_lock(&log->l_icloglock); 3861 iclog = log->l_iclog; 3862 do { 3863 ASSERT(iclog->ic_callback == 0); 3864 iclog = iclog->ic_next; 3865 } while (iclog != log->l_iclog); 3866 spin_unlock(&log->l_icloglock); 3867 } 3868 #endif 3869 /* return non-zero if log IOERROR transition had already happened */ 3870 return retval; 3871 } 3872 3873 STATIC int 3874 xlog_iclogs_empty( 3875 struct xlog *log) 3876 { 3877 xlog_in_core_t *iclog; 3878 3879 iclog = log->l_iclog; 3880 do { 3881 /* endianness does not matter here, zero is zero in 3882 * any language. 3883 */ 3884 if (iclog->ic_header.h_num_logops) 3885 return 0; 3886 iclog = iclog->ic_next; 3887 } while (iclog != log->l_iclog); 3888 return 1; 3889 } 3890 3891