1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * linux/fs/jbd2/checkpoint.c 4 * 5 * Written by Stephen C. Tweedie <sct@redhat.com>, 1999 6 * 7 * Copyright 1999 Red Hat Software --- All Rights Reserved 8 * 9 * Checkpoint routines for the generic filesystem journaling code. 10 * Part of the ext2fs journaling system. 11 * 12 * Checkpointing is the process of ensuring that a section of the log is 13 * committed fully to disk, so that that portion of the log can be 14 * reused. 15 */ 16 17 #include <linux/time.h> 18 #include <linux/fs.h> 19 #include <linux/jbd2.h> 20 #include <linux/errno.h> 21 #include <linux/slab.h> 22 #include <linux/blkdev.h> 23 #include <trace/events/jbd2.h> 24 25 /* 26 * Unlink a buffer from a transaction checkpoint list. 27 * 28 * Called with j_list_lock held. 29 */ 30 static inline void __buffer_unlink(struct journal_head *jh) 31 { 32 transaction_t *transaction = jh->b_cp_transaction; 33 34 jh->b_cpnext->b_cpprev = jh->b_cpprev; 35 jh->b_cpprev->b_cpnext = jh->b_cpnext; 36 if (transaction->t_checkpoint_list == jh) { 37 transaction->t_checkpoint_list = jh->b_cpnext; 38 if (transaction->t_checkpoint_list == jh) 39 transaction->t_checkpoint_list = NULL; 40 } 41 } 42 43 /* 44 * __jbd2_log_wait_for_space: wait until there is space in the journal. 45 * 46 * Called under j-state_lock *only*. It will be unlocked if we have to wait 47 * for a checkpoint to free up some space in the log. 48 */ 49 void __jbd2_log_wait_for_space(journal_t *journal) 50 __acquires(&journal->j_state_lock) 51 __releases(&journal->j_state_lock) 52 { 53 int nblocks, space_left; 54 /* assert_spin_locked(&journal->j_state_lock); */ 55 56 nblocks = journal->j_max_transaction_buffers; 57 while (jbd2_log_space_left(journal) < nblocks) { 58 write_unlock(&journal->j_state_lock); 59 mutex_lock_io(&journal->j_checkpoint_mutex); 60 61 /* 62 * Test again, another process may have checkpointed while we 63 * were waiting for the checkpoint lock. If there are no 64 * transactions ready to be checkpointed, try to recover 65 * journal space by calling cleanup_journal_tail(), and if 66 * that doesn't work, by waiting for the currently committing 67 * transaction to complete. If there is absolutely no way 68 * to make progress, this is either a BUG or corrupted 69 * filesystem, so abort the journal and leave a stack 70 * trace for forensic evidence. 71 */ 72 write_lock(&journal->j_state_lock); 73 if (journal->j_flags & JBD2_ABORT) { 74 mutex_unlock(&journal->j_checkpoint_mutex); 75 return; 76 } 77 spin_lock(&journal->j_list_lock); 78 space_left = jbd2_log_space_left(journal); 79 if (space_left < nblocks) { 80 int chkpt = journal->j_checkpoint_transactions != NULL; 81 tid_t tid = 0; 82 83 if (journal->j_committing_transaction) 84 tid = journal->j_committing_transaction->t_tid; 85 spin_unlock(&journal->j_list_lock); 86 write_unlock(&journal->j_state_lock); 87 if (chkpt) { 88 jbd2_log_do_checkpoint(journal); 89 } else if (jbd2_cleanup_journal_tail(journal) == 0) { 90 /* We were able to recover space; yay! */ 91 ; 92 } else if (tid) { 93 /* 94 * jbd2_journal_commit_transaction() may want 95 * to take the checkpoint_mutex if JBD2_FLUSHED 96 * is set. So we need to temporarily drop it. 97 */ 98 mutex_unlock(&journal->j_checkpoint_mutex); 99 jbd2_log_wait_commit(journal, tid); 100 write_lock(&journal->j_state_lock); 101 continue; 102 } else { 103 printk(KERN_ERR "%s: needed %d blocks and " 104 "only had %d space available\n", 105 __func__, nblocks, space_left); 106 printk(KERN_ERR "%s: no way to get more " 107 "journal space in %s\n", __func__, 108 journal->j_devname); 109 WARN_ON(1); 110 jbd2_journal_abort(journal, -EIO); 111 } 112 write_lock(&journal->j_state_lock); 113 } else { 114 spin_unlock(&journal->j_list_lock); 115 } 116 mutex_unlock(&journal->j_checkpoint_mutex); 117 } 118 } 119 120 static void 121 __flush_batch(journal_t *journal, int *batch_count) 122 { 123 int i; 124 struct blk_plug plug; 125 126 blk_start_plug(&plug); 127 for (i = 0; i < *batch_count; i++) 128 write_dirty_buffer(journal->j_chkpt_bhs[i], REQ_SYNC); 129 blk_finish_plug(&plug); 130 131 for (i = 0; i < *batch_count; i++) { 132 struct buffer_head *bh = journal->j_chkpt_bhs[i]; 133 BUFFER_TRACE(bh, "brelse"); 134 __brelse(bh); 135 journal->j_chkpt_bhs[i] = NULL; 136 } 137 *batch_count = 0; 138 } 139 140 /* 141 * Perform an actual checkpoint. We take the first transaction on the 142 * list of transactions to be checkpointed and send all its buffers 143 * to disk. We submit larger chunks of data at once. 144 * 145 * The journal should be locked before calling this function. 146 * Called with j_checkpoint_mutex held. 147 */ 148 int jbd2_log_do_checkpoint(journal_t *journal) 149 { 150 struct journal_head *jh; 151 struct buffer_head *bh; 152 transaction_t *transaction; 153 tid_t this_tid; 154 int result, batch_count = 0; 155 156 jbd2_debug(1, "Start checkpoint\n"); 157 158 /* 159 * First thing: if there are any transactions in the log which 160 * don't need checkpointing, just eliminate them from the 161 * journal straight away. 162 */ 163 result = jbd2_cleanup_journal_tail(journal); 164 trace_jbd2_checkpoint(journal, result); 165 jbd2_debug(1, "cleanup_journal_tail returned %d\n", result); 166 if (result <= 0) 167 return result; 168 169 /* 170 * OK, we need to start writing disk blocks. Take one transaction 171 * and write it. 172 */ 173 spin_lock(&journal->j_list_lock); 174 if (!journal->j_checkpoint_transactions) 175 goto out; 176 transaction = journal->j_checkpoint_transactions; 177 if (transaction->t_chp_stats.cs_chp_time == 0) 178 transaction->t_chp_stats.cs_chp_time = jiffies; 179 this_tid = transaction->t_tid; 180 restart: 181 /* 182 * If someone cleaned up this transaction while we slept, we're 183 * done (maybe it's a new transaction, but it fell at the same 184 * address). 185 */ 186 if (journal->j_checkpoint_transactions != transaction || 187 transaction->t_tid != this_tid) 188 goto out; 189 190 /* checkpoint all of the transaction's buffers */ 191 while (transaction->t_checkpoint_list) { 192 jh = transaction->t_checkpoint_list; 193 bh = jh2bh(jh); 194 195 if (jh->b_transaction != NULL) { 196 transaction_t *t = jh->b_transaction; 197 tid_t tid = t->t_tid; 198 199 transaction->t_chp_stats.cs_forced_to_close++; 200 spin_unlock(&journal->j_list_lock); 201 if (unlikely(journal->j_flags & JBD2_UNMOUNT)) 202 /* 203 * The journal thread is dead; so 204 * starting and waiting for a commit 205 * to finish will cause us to wait for 206 * a _very_ long time. 207 */ 208 printk(KERN_ERR 209 "JBD2: %s: Waiting for Godot: block %llu\n", 210 journal->j_devname, (unsigned long long) bh->b_blocknr); 211 212 if (batch_count) 213 __flush_batch(journal, &batch_count); 214 jbd2_log_start_commit(journal, tid); 215 /* 216 * jbd2_journal_commit_transaction() may want 217 * to take the checkpoint_mutex if JBD2_FLUSHED 218 * is set, jbd2_update_log_tail() called by 219 * jbd2_journal_commit_transaction() may also take 220 * checkpoint_mutex. So we need to temporarily 221 * drop it. 222 */ 223 mutex_unlock(&journal->j_checkpoint_mutex); 224 jbd2_log_wait_commit(journal, tid); 225 mutex_lock_io(&journal->j_checkpoint_mutex); 226 spin_lock(&journal->j_list_lock); 227 goto restart; 228 } 229 if (!trylock_buffer(bh)) { 230 /* 231 * The buffer is locked, it may be writing back, or 232 * flushing out in the last couple of cycles, or 233 * re-adding into a new transaction, need to check 234 * it again until it's unlocked. 235 */ 236 get_bh(bh); 237 spin_unlock(&journal->j_list_lock); 238 wait_on_buffer(bh); 239 /* the journal_head may have gone by now */ 240 BUFFER_TRACE(bh, "brelse"); 241 __brelse(bh); 242 goto retry; 243 } else if (!buffer_dirty(bh)) { 244 unlock_buffer(bh); 245 BUFFER_TRACE(bh, "remove from checkpoint"); 246 /* 247 * If the transaction was released or the checkpoint 248 * list was empty, we're done. 249 */ 250 if (__jbd2_journal_remove_checkpoint(jh) || 251 !transaction->t_checkpoint_list) 252 goto out; 253 } else { 254 unlock_buffer(bh); 255 /* 256 * We are about to write the buffer, it could be 257 * raced by some other transaction shrink or buffer 258 * re-log logic once we release the j_list_lock, 259 * leave it on the checkpoint list and check status 260 * again to make sure it's clean. 261 */ 262 BUFFER_TRACE(bh, "queue"); 263 get_bh(bh); 264 J_ASSERT_BH(bh, !buffer_jwrite(bh)); 265 journal->j_chkpt_bhs[batch_count++] = bh; 266 transaction->t_chp_stats.cs_written++; 267 transaction->t_checkpoint_list = jh->b_cpnext; 268 } 269 270 if ((batch_count == JBD2_NR_BATCH) || 271 need_resched() || spin_needbreak(&journal->j_list_lock) || 272 jh2bh(transaction->t_checkpoint_list) == journal->j_chkpt_bhs[0]) 273 goto unlock_and_flush; 274 } 275 276 if (batch_count) { 277 unlock_and_flush: 278 spin_unlock(&journal->j_list_lock); 279 retry: 280 if (batch_count) 281 __flush_batch(journal, &batch_count); 282 spin_lock(&journal->j_list_lock); 283 goto restart; 284 } 285 286 out: 287 spin_unlock(&journal->j_list_lock); 288 result = jbd2_cleanup_journal_tail(journal); 289 290 return (result < 0) ? result : 0; 291 } 292 293 /* 294 * Check the list of checkpoint transactions for the journal to see if 295 * we have already got rid of any since the last update of the log tail 296 * in the journal superblock. If so, we can instantly roll the 297 * superblock forward to remove those transactions from the log. 298 * 299 * Return <0 on error, 0 on success, 1 if there was nothing to clean up. 300 * 301 * Called with the journal lock held. 302 * 303 * This is the only part of the journaling code which really needs to be 304 * aware of transaction aborts. Checkpointing involves writing to the 305 * main filesystem area rather than to the journal, so it can proceed 306 * even in abort state, but we must not update the super block if 307 * checkpointing may have failed. Otherwise, we would lose some metadata 308 * buffers which should be written-back to the filesystem. 309 */ 310 311 int jbd2_cleanup_journal_tail(journal_t *journal) 312 { 313 tid_t first_tid; 314 unsigned long blocknr; 315 316 if (is_journal_aborted(journal)) 317 return -EIO; 318 319 if (!jbd2_journal_get_log_tail(journal, &first_tid, &blocknr)) 320 return 1; 321 J_ASSERT(blocknr != 0); 322 323 /* 324 * We need to make sure that any blocks that were recently written out 325 * --- perhaps by jbd2_log_do_checkpoint() --- are flushed out before 326 * we drop the transactions from the journal. It's unlikely this will 327 * be necessary, especially with an appropriately sized journal, but we 328 * need this to guarantee correctness. Fortunately 329 * jbd2_cleanup_journal_tail() doesn't get called all that often. 330 */ 331 if (journal->j_flags & JBD2_BARRIER) 332 blkdev_issue_flush(journal->j_fs_dev); 333 334 return __jbd2_update_log_tail(journal, first_tid, blocknr); 335 } 336 337 338 /* Checkpoint list management */ 339 340 /* 341 * journal_shrink_one_cp_list 342 * 343 * Find all the written-back checkpoint buffers in the given list 344 * and try to release them. If the whole transaction is released, set 345 * the 'released' parameter. Return the number of released checkpointed 346 * buffers. 347 * 348 * Called with j_list_lock held. 349 */ 350 static unsigned long journal_shrink_one_cp_list(struct journal_head *jh, 351 enum jbd2_shrink_type type, 352 bool *released) 353 { 354 struct journal_head *last_jh; 355 struct journal_head *next_jh = jh; 356 unsigned long nr_freed = 0; 357 int ret; 358 359 *released = false; 360 if (!jh) 361 return 0; 362 363 last_jh = jh->b_cpprev; 364 do { 365 jh = next_jh; 366 next_jh = jh->b_cpnext; 367 368 if (type == JBD2_SHRINK_DESTROY) { 369 ret = __jbd2_journal_remove_checkpoint(jh); 370 } else { 371 ret = jbd2_journal_try_remove_checkpoint(jh); 372 if (ret < 0) { 373 if (type == JBD2_SHRINK_BUSY_SKIP) 374 continue; 375 break; 376 } 377 } 378 379 nr_freed++; 380 if (ret) { 381 *released = true; 382 break; 383 } 384 385 if (need_resched()) 386 break; 387 } while (jh != last_jh); 388 389 return nr_freed; 390 } 391 392 /* 393 * jbd2_journal_shrink_checkpoint_list 394 * 395 * Find 'nr_to_scan' written-back checkpoint buffers in the journal 396 * and try to release them. Return the number of released checkpointed 397 * buffers. 398 * 399 * Called with j_list_lock held. 400 */ 401 unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal, 402 unsigned long *nr_to_scan) 403 { 404 transaction_t *transaction, *last_transaction, *next_transaction; 405 bool __maybe_unused released; 406 tid_t first_tid = 0, last_tid = 0, next_tid = 0; 407 tid_t tid = 0; 408 unsigned long nr_freed = 0; 409 unsigned long freed; 410 411 again: 412 spin_lock(&journal->j_list_lock); 413 if (!journal->j_checkpoint_transactions) { 414 spin_unlock(&journal->j_list_lock); 415 goto out; 416 } 417 418 /* 419 * Get next shrink transaction, resume previous scan or start 420 * over again. If some others do checkpoint and drop transaction 421 * from the checkpoint list, we ignore saved j_shrink_transaction 422 * and start over unconditionally. 423 */ 424 if (journal->j_shrink_transaction) 425 transaction = journal->j_shrink_transaction; 426 else 427 transaction = journal->j_checkpoint_transactions; 428 429 if (!first_tid) 430 first_tid = transaction->t_tid; 431 last_transaction = journal->j_checkpoint_transactions->t_cpprev; 432 next_transaction = transaction; 433 last_tid = last_transaction->t_tid; 434 do { 435 transaction = next_transaction; 436 next_transaction = transaction->t_cpnext; 437 tid = transaction->t_tid; 438 439 freed = journal_shrink_one_cp_list(transaction->t_checkpoint_list, 440 JBD2_SHRINK_BUSY_SKIP, &released); 441 nr_freed += freed; 442 (*nr_to_scan) -= min(*nr_to_scan, freed); 443 if (*nr_to_scan == 0) 444 break; 445 if (need_resched() || spin_needbreak(&journal->j_list_lock)) 446 break; 447 } while (transaction != last_transaction); 448 449 if (transaction != last_transaction) { 450 journal->j_shrink_transaction = next_transaction; 451 next_tid = next_transaction->t_tid; 452 } else { 453 journal->j_shrink_transaction = NULL; 454 next_tid = 0; 455 } 456 457 spin_unlock(&journal->j_list_lock); 458 cond_resched(); 459 460 if (*nr_to_scan && next_tid) 461 goto again; 462 out: 463 trace_jbd2_shrink_checkpoint_list(journal, first_tid, tid, last_tid, 464 nr_freed, next_tid); 465 466 return nr_freed; 467 } 468 469 /* 470 * journal_clean_checkpoint_list 471 * 472 * Find all the written-back checkpoint buffers in the journal and release them. 473 * If 'type' is JBD2_SHRINK_DESTROY, release all buffers unconditionally. If 474 * 'type' is JBD2_SHRINK_BUSY_STOP, will stop release buffers if encounters a 475 * busy buffer. To avoid wasting CPU cycles scanning the buffer list in some 476 * cases, don't pass JBD2_SHRINK_BUSY_SKIP 'type' for this function. 477 * 478 * Called with j_list_lock held. 479 */ 480 void __jbd2_journal_clean_checkpoint_list(journal_t *journal, 481 enum jbd2_shrink_type type) 482 { 483 transaction_t *transaction, *last_transaction, *next_transaction; 484 bool released; 485 486 WARN_ON_ONCE(type == JBD2_SHRINK_BUSY_SKIP); 487 488 transaction = journal->j_checkpoint_transactions; 489 if (!transaction) 490 return; 491 492 last_transaction = transaction->t_cpprev; 493 next_transaction = transaction; 494 do { 495 transaction = next_transaction; 496 next_transaction = transaction->t_cpnext; 497 journal_shrink_one_cp_list(transaction->t_checkpoint_list, 498 type, &released); 499 /* 500 * This function only frees up some memory if possible so we 501 * dont have an obligation to finish processing. Bail out if 502 * preemption requested: 503 */ 504 if (need_resched()) 505 return; 506 /* 507 * Stop scanning if we couldn't free the transaction. This 508 * avoids pointless scanning of transactions which still 509 * weren't checkpointed. 510 */ 511 if (!released) 512 return; 513 } while (transaction != last_transaction); 514 } 515 516 /* 517 * Remove buffers from all checkpoint lists as journal is aborted and we just 518 * need to free memory 519 */ 520 void jbd2_journal_destroy_checkpoint(journal_t *journal) 521 { 522 /* 523 * We loop because __jbd2_journal_clean_checkpoint_list() may abort 524 * early due to a need of rescheduling. 525 */ 526 while (1) { 527 spin_lock(&journal->j_list_lock); 528 if (!journal->j_checkpoint_transactions) { 529 spin_unlock(&journal->j_list_lock); 530 break; 531 } 532 __jbd2_journal_clean_checkpoint_list(journal, JBD2_SHRINK_DESTROY); 533 spin_unlock(&journal->j_list_lock); 534 cond_resched(); 535 } 536 } 537 538 /* 539 * journal_remove_checkpoint: called after a buffer has been committed 540 * to disk (either by being write-back flushed to disk, or being 541 * committed to the log). 542 * 543 * We cannot safely clean a transaction out of the log until all of the 544 * buffer updates committed in that transaction have safely been stored 545 * elsewhere on disk. To achieve this, all of the buffers in a 546 * transaction need to be maintained on the transaction's checkpoint 547 * lists until they have been rewritten, at which point this function is 548 * called to remove the buffer from the existing transaction's 549 * checkpoint lists. 550 * 551 * The function returns 1 if it frees the transaction, 0 otherwise. 552 * The function can free jh and bh. 553 * 554 * This function is called with j_list_lock held. 555 */ 556 int __jbd2_journal_remove_checkpoint(struct journal_head *jh) 557 { 558 struct transaction_chp_stats_s *stats; 559 transaction_t *transaction; 560 journal_t *journal; 561 562 JBUFFER_TRACE(jh, "entry"); 563 564 transaction = jh->b_cp_transaction; 565 if (!transaction) { 566 JBUFFER_TRACE(jh, "not on transaction"); 567 return 0; 568 } 569 journal = transaction->t_journal; 570 571 JBUFFER_TRACE(jh, "removing from transaction"); 572 573 __buffer_unlink(jh); 574 jh->b_cp_transaction = NULL; 575 percpu_counter_dec(&journal->j_checkpoint_jh_count); 576 jbd2_journal_put_journal_head(jh); 577 578 /* Is this transaction empty? */ 579 if (transaction->t_checkpoint_list) 580 return 0; 581 582 /* 583 * There is one special case to worry about: if we have just pulled the 584 * buffer off a running or committing transaction's checkpoing list, 585 * then even if the checkpoint list is empty, the transaction obviously 586 * cannot be dropped! 587 * 588 * The locking here around t_state is a bit sleazy. 589 * See the comment at the end of jbd2_journal_commit_transaction(). 590 */ 591 if (transaction->t_state != T_FINISHED) 592 return 0; 593 594 /* 595 * OK, that was the last buffer for the transaction, we can now 596 * safely remove this transaction from the log. 597 */ 598 stats = &transaction->t_chp_stats; 599 if (stats->cs_chp_time) 600 stats->cs_chp_time = jbd2_time_diff(stats->cs_chp_time, 601 jiffies); 602 trace_jbd2_checkpoint_stats(journal->j_fs_dev->bd_dev, 603 transaction->t_tid, stats); 604 605 __jbd2_journal_drop_transaction(journal, transaction); 606 jbd2_journal_free_transaction(transaction); 607 return 1; 608 } 609 610 /* 611 * Check the checkpoint buffer and try to remove it from the checkpoint 612 * list if it's clean. Returns -EBUSY if it is not clean, returns 1 if 613 * it frees the transaction, 0 otherwise. 614 * 615 * This function is called with j_list_lock held. 616 */ 617 int jbd2_journal_try_remove_checkpoint(struct journal_head *jh) 618 { 619 struct buffer_head *bh = jh2bh(jh); 620 621 if (jh->b_transaction) 622 return -EBUSY; 623 if (!trylock_buffer(bh)) 624 return -EBUSY; 625 if (buffer_dirty(bh)) { 626 unlock_buffer(bh); 627 return -EBUSY; 628 } 629 unlock_buffer(bh); 630 631 /* 632 * Buffer is clean and the IO has finished (we held the buffer 633 * lock) so the checkpoint is done. We can safely remove the 634 * buffer from this transaction. 635 */ 636 JBUFFER_TRACE(jh, "remove from checkpoint list"); 637 return __jbd2_journal_remove_checkpoint(jh); 638 } 639 640 /* 641 * journal_insert_checkpoint: put a committed buffer onto a checkpoint 642 * list so that we know when it is safe to clean the transaction out of 643 * the log. 644 * 645 * Called with the journal locked. 646 * Called with j_list_lock held. 647 */ 648 void __jbd2_journal_insert_checkpoint(struct journal_head *jh, 649 transaction_t *transaction) 650 { 651 JBUFFER_TRACE(jh, "entry"); 652 J_ASSERT_JH(jh, buffer_dirty(jh2bh(jh)) || buffer_jbddirty(jh2bh(jh))); 653 J_ASSERT_JH(jh, jh->b_cp_transaction == NULL); 654 655 /* Get reference for checkpointing transaction */ 656 jbd2_journal_grab_journal_head(jh2bh(jh)); 657 jh->b_cp_transaction = transaction; 658 659 if (!transaction->t_checkpoint_list) { 660 jh->b_cpnext = jh->b_cpprev = jh; 661 } else { 662 jh->b_cpnext = transaction->t_checkpoint_list; 663 jh->b_cpprev = transaction->t_checkpoint_list->b_cpprev; 664 jh->b_cpprev->b_cpnext = jh; 665 jh->b_cpnext->b_cpprev = jh; 666 } 667 transaction->t_checkpoint_list = jh; 668 percpu_counter_inc(&transaction->t_journal->j_checkpoint_jh_count); 669 } 670 671 /* 672 * We've finished with this transaction structure: adios... 673 * 674 * The transaction must have no links except for the checkpoint by this 675 * point. 676 * 677 * Called with the journal locked. 678 * Called with j_list_lock held. 679 */ 680 681 void __jbd2_journal_drop_transaction(journal_t *journal, transaction_t *transaction) 682 { 683 assert_spin_locked(&journal->j_list_lock); 684 685 journal->j_shrink_transaction = NULL; 686 if (transaction->t_cpnext) { 687 transaction->t_cpnext->t_cpprev = transaction->t_cpprev; 688 transaction->t_cpprev->t_cpnext = transaction->t_cpnext; 689 if (journal->j_checkpoint_transactions == transaction) 690 journal->j_checkpoint_transactions = 691 transaction->t_cpnext; 692 if (journal->j_checkpoint_transactions == transaction) 693 journal->j_checkpoint_transactions = NULL; 694 } 695 696 J_ASSERT(transaction->t_state == T_FINISHED); 697 J_ASSERT(transaction->t_buffers == NULL); 698 J_ASSERT(transaction->t_forget == NULL); 699 J_ASSERT(transaction->t_shadow_list == NULL); 700 J_ASSERT(transaction->t_checkpoint_list == NULL); 701 J_ASSERT(atomic_read(&transaction->t_updates) == 0); 702 J_ASSERT(journal->j_committing_transaction != transaction); 703 J_ASSERT(journal->j_running_transaction != transaction); 704 705 trace_jbd2_drop_transaction(journal, transaction); 706 707 jbd2_debug(1, "Dropping transaction %d, all done\n", transaction->t_tid); 708 } 709