1 /* 2 * linux/fs/jbd2/revoke.c 3 * 4 * Written by Stephen C. Tweedie <sct@redhat.com>, 2000 5 * 6 * Copyright 2000 Red Hat corp --- All Rights Reserved 7 * 8 * This file is part of the Linux kernel and is made available under 9 * the terms of the GNU General Public License, version 2, or at your 10 * option, any later version, incorporated herein by reference. 11 * 12 * Journal revoke routines for the generic filesystem journaling code; 13 * part of the ext2fs journaling system. 14 * 15 * Revoke is the mechanism used to prevent old log records for deleted 16 * metadata from being replayed on top of newer data using the same 17 * blocks. The revoke mechanism is used in two separate places: 18 * 19 * + Commit: during commit we write the entire list of the current 20 * transaction's revoked blocks to the journal 21 * 22 * + Recovery: during recovery we record the transaction ID of all 23 * revoked blocks. If there are multiple revoke records in the log 24 * for a single block, only the last one counts, and if there is a log 25 * entry for a block beyond the last revoke, then that log entry still 26 * gets replayed. 27 * 28 * We can get interactions between revokes and new log data within a 29 * single transaction: 30 * 31 * Block is revoked and then journaled: 32 * The desired end result is the journaling of the new block, so we 33 * cancel the revoke before the transaction commits. 34 * 35 * Block is journaled and then revoked: 36 * The revoke must take precedence over the write of the block, so we 37 * need either to cancel the journal entry or to write the revoke 38 * later in the log than the log block. In this case, we choose the 39 * latter: journaling a block cancels any revoke record for that block 40 * in the current transaction, so any revoke for that block in the 41 * transaction must have happened after the block was journaled and so 42 * the revoke must take precedence. 43 * 44 * Block is revoked and then written as data: 45 * The data write is allowed to succeed, but the revoke is _not_ 46 * cancelled. We still need to prevent old log records from 47 * overwriting the new data. We don't even need to clear the revoke 48 * bit here. 49 * 50 * We cache revoke status of a buffer in the current transaction in b_states 51 * bits. As the name says, revokevalid flag indicates that the cached revoke 52 * status of a buffer is valid and we can rely on the cached status. 53 * 54 * Revoke information on buffers is a tri-state value: 55 * 56 * RevokeValid clear: no cached revoke status, need to look it up 57 * RevokeValid set, Revoked clear: 58 * buffer has not been revoked, and cancel_revoke 59 * need do nothing. 60 * RevokeValid set, Revoked set: 61 * buffer has been revoked. 62 * 63 * Locking rules: 64 * We keep two hash tables of revoke records. One hashtable belongs to the 65 * running transaction (is pointed to by journal->j_revoke), the other one 66 * belongs to the committing transaction. Accesses to the second hash table 67 * happen only from the kjournald and no other thread touches this table. Also 68 * journal_switch_revoke_table() which switches which hashtable belongs to the 69 * running and which to the committing transaction is called only from 70 * kjournald. Therefore we need no locks when accessing the hashtable belonging 71 * to the committing transaction. 72 * 73 * All users operating on the hash table belonging to the running transaction 74 * have a handle to the transaction. Therefore they are safe from kjournald 75 * switching hash tables under them. For operations on the lists of entries in 76 * the hash table j_revoke_lock is used. 77 * 78 * Finally, also replay code uses the hash tables but at this moment no one else 79 * can touch them (filesystem isn't mounted yet) and hence no locking is 80 * needed. 81 */ 82 83 #ifndef __KERNEL__ 84 #include "jfs_user.h" 85 #else 86 #include <linux/time.h> 87 #include <linux/fs.h> 88 #include <linux/jbd2.h> 89 #include <linux/errno.h> 90 #include <linux/slab.h> 91 #include <linux/list.h> 92 #include <linux/init.h> 93 #include <linux/bio.h> 94 #include <linux/log2.h> 95 #include <linux/hash.h> 96 #endif 97 98 static struct kmem_cache *jbd2_revoke_record_cache; 99 static struct kmem_cache *jbd2_revoke_table_cache; 100 101 /* Each revoke record represents one single revoked block. During 102 journal replay, this involves recording the transaction ID of the 103 last transaction to revoke this block. */ 104 105 struct jbd2_revoke_record_s 106 { 107 struct list_head hash; 108 tid_t sequence; /* Used for recovery only */ 109 unsigned long long blocknr; 110 }; 111 112 113 /* The revoke table is just a simple hash table of revoke records. */ 114 struct jbd2_revoke_table_s 115 { 116 /* It is conceivable that we might want a larger hash table 117 * for recovery. Must be a power of two. */ 118 int hash_size; 119 int hash_shift; 120 struct list_head *hash_table; 121 }; 122 123 124 #ifdef __KERNEL__ 125 static void write_one_revoke_record(journal_t *, transaction_t *, 126 struct list_head *, 127 struct buffer_head **, int *, 128 struct jbd2_revoke_record_s *, int); 129 static void flush_descriptor(journal_t *, struct buffer_head *, int, int); 130 #endif 131 132 /* Utility functions to maintain the revoke table */ 133 134 static inline int hash(journal_t *journal, unsigned long long block) 135 { 136 return hash_64(block, journal->j_revoke->hash_shift); 137 } 138 139 static int insert_revoke_hash(journal_t *journal, unsigned long long blocknr, 140 tid_t seq) 141 { 142 struct list_head *hash_list; 143 struct jbd2_revoke_record_s *record; 144 145 repeat: 146 record = kmem_cache_alloc(jbd2_revoke_record_cache, GFP_NOFS); 147 if (!record) 148 goto oom; 149 150 record->sequence = seq; 151 record->blocknr = blocknr; 152 hash_list = &journal->j_revoke->hash_table[hash(journal, blocknr)]; 153 spin_lock(&journal->j_revoke_lock); 154 list_add(&record->hash, hash_list); 155 spin_unlock(&journal->j_revoke_lock); 156 return 0; 157 158 oom: 159 if (!journal_oom_retry) 160 return -ENOMEM; 161 jbd_debug(1, "ENOMEM in %s, retrying\n", __func__); 162 yield(); 163 goto repeat; 164 } 165 166 /* Find a revoke record in the journal's hash table. */ 167 168 static struct jbd2_revoke_record_s *find_revoke_record(journal_t *journal, 169 unsigned long long blocknr) 170 { 171 struct list_head *hash_list; 172 struct jbd2_revoke_record_s *record; 173 174 hash_list = &journal->j_revoke->hash_table[hash(journal, blocknr)]; 175 176 spin_lock(&journal->j_revoke_lock); 177 record = (struct jbd2_revoke_record_s *) hash_list->next; 178 while (&(record->hash) != hash_list) { 179 if (record->blocknr == blocknr) { 180 spin_unlock(&journal->j_revoke_lock); 181 return record; 182 } 183 record = (struct jbd2_revoke_record_s *) record->hash.next; 184 } 185 spin_unlock(&journal->j_revoke_lock); 186 return NULL; 187 } 188 189 void jbd2_journal_destroy_revoke_caches(void) 190 { 191 if (jbd2_revoke_record_cache) { 192 kmem_cache_destroy(jbd2_revoke_record_cache); 193 jbd2_revoke_record_cache = NULL; 194 } 195 if (jbd2_revoke_table_cache) { 196 kmem_cache_destroy(jbd2_revoke_table_cache); 197 jbd2_revoke_table_cache = NULL; 198 } 199 } 200 201 int __init jbd2_journal_init_revoke_caches(void) 202 { 203 J_ASSERT(!jbd2_revoke_record_cache); 204 J_ASSERT(!jbd2_revoke_table_cache); 205 206 jbd2_revoke_record_cache = KMEM_CACHE(jbd2_revoke_record_s, 207 SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY); 208 if (!jbd2_revoke_record_cache) 209 goto record_cache_failure; 210 211 jbd2_revoke_table_cache = KMEM_CACHE(jbd2_revoke_table_s, 212 SLAB_TEMPORARY); 213 if (!jbd2_revoke_table_cache) 214 goto table_cache_failure; 215 return 0; 216 table_cache_failure: 217 jbd2_journal_destroy_revoke_caches(); 218 record_cache_failure: 219 return -ENOMEM; 220 } 221 222 static struct jbd2_revoke_table_s *jbd2_journal_init_revoke_table(int hash_size) 223 { 224 int shift = 0; 225 int tmp = hash_size; 226 struct jbd2_revoke_table_s *table; 227 228 table = kmem_cache_alloc(jbd2_revoke_table_cache, GFP_KERNEL); 229 if (!table) 230 goto out; 231 232 while((tmp >>= 1UL) != 0UL) 233 shift++; 234 235 table->hash_size = hash_size; 236 table->hash_shift = shift; 237 table->hash_table = 238 kmalloc(hash_size * sizeof(struct list_head), GFP_KERNEL); 239 if (!table->hash_table) { 240 kmem_cache_free(jbd2_revoke_table_cache, table); 241 table = NULL; 242 goto out; 243 } 244 245 for (tmp = 0; tmp < hash_size; tmp++) 246 INIT_LIST_HEAD(&table->hash_table[tmp]); 247 248 out: 249 return table; 250 } 251 252 static void jbd2_journal_destroy_revoke_table(struct jbd2_revoke_table_s *table) 253 { 254 int i; 255 struct list_head *hash_list; 256 257 for (i = 0; i < table->hash_size; i++) { 258 hash_list = &table->hash_table[i]; 259 J_ASSERT(list_empty(hash_list)); 260 } 261 262 kfree(table->hash_table); 263 kmem_cache_free(jbd2_revoke_table_cache, table); 264 } 265 266 /* Initialise the revoke table for a given journal to a given size. */ 267 int jbd2_journal_init_revoke(journal_t *journal, int hash_size) 268 { 269 J_ASSERT(journal->j_revoke_table[0] == NULL); 270 J_ASSERT(is_power_of_2(hash_size)); 271 272 journal->j_revoke_table[0] = jbd2_journal_init_revoke_table(hash_size); 273 if (!journal->j_revoke_table[0]) 274 goto fail0; 275 276 journal->j_revoke_table[1] = jbd2_journal_init_revoke_table(hash_size); 277 if (!journal->j_revoke_table[1]) 278 goto fail1; 279 280 journal->j_revoke = journal->j_revoke_table[1]; 281 282 spin_lock_init(&journal->j_revoke_lock); 283 284 return 0; 285 286 fail1: 287 jbd2_journal_destroy_revoke_table(journal->j_revoke_table[0]); 288 fail0: 289 return -ENOMEM; 290 } 291 292 /* Destroy a journal's revoke table. The table must already be empty! */ 293 void jbd2_journal_destroy_revoke(journal_t *journal) 294 { 295 journal->j_revoke = NULL; 296 if (journal->j_revoke_table[0]) 297 jbd2_journal_destroy_revoke_table(journal->j_revoke_table[0]); 298 if (journal->j_revoke_table[1]) 299 jbd2_journal_destroy_revoke_table(journal->j_revoke_table[1]); 300 } 301 302 303 #ifdef __KERNEL__ 304 305 /* 306 * jbd2_journal_revoke: revoke a given buffer_head from the journal. This 307 * prevents the block from being replayed during recovery if we take a 308 * crash after this current transaction commits. Any subsequent 309 * metadata writes of the buffer in this transaction cancel the 310 * revoke. 311 * 312 * Note that this call may block --- it is up to the caller to make 313 * sure that there are no further calls to journal_write_metadata 314 * before the revoke is complete. In ext3, this implies calling the 315 * revoke before clearing the block bitmap when we are deleting 316 * metadata. 317 * 318 * Revoke performs a jbd2_journal_forget on any buffer_head passed in as a 319 * parameter, but does _not_ forget the buffer_head if the bh was only 320 * found implicitly. 321 * 322 * bh_in may not be a journalled buffer - it may have come off 323 * the hash tables without an attached journal_head. 324 * 325 * If bh_in is non-zero, jbd2_journal_revoke() will decrement its b_count 326 * by one. 327 */ 328 329 int jbd2_journal_revoke(handle_t *handle, unsigned long long blocknr, 330 struct buffer_head *bh_in) 331 { 332 struct buffer_head *bh = NULL; 333 journal_t *journal; 334 struct block_device *bdev; 335 int err; 336 337 might_sleep(); 338 if (bh_in) 339 BUFFER_TRACE(bh_in, "enter"); 340 341 journal = handle->h_transaction->t_journal; 342 if (!jbd2_journal_set_features(journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)){ 343 J_ASSERT (!"Cannot set revoke feature!"); 344 return -EINVAL; 345 } 346 347 bdev = journal->j_fs_dev; 348 bh = bh_in; 349 350 if (!bh) { 351 bh = __find_get_block(bdev, blocknr, journal->j_blocksize); 352 if (bh) 353 BUFFER_TRACE(bh, "found on hash"); 354 } 355 #ifdef JBD2_EXPENSIVE_CHECKING 356 else { 357 struct buffer_head *bh2; 358 359 /* If there is a different buffer_head lying around in 360 * memory anywhere... */ 361 bh2 = __find_get_block(bdev, blocknr, journal->j_blocksize); 362 if (bh2) { 363 /* ... and it has RevokeValid status... */ 364 if (bh2 != bh && buffer_revokevalid(bh2)) 365 /* ...then it better be revoked too, 366 * since it's illegal to create a revoke 367 * record against a buffer_head which is 368 * not marked revoked --- that would 369 * risk missing a subsequent revoke 370 * cancel. */ 371 J_ASSERT_BH(bh2, buffer_revoked(bh2)); 372 put_bh(bh2); 373 } 374 } 375 #endif 376 377 /* We really ought not ever to revoke twice in a row without 378 first having the revoke cancelled: it's illegal to free a 379 block twice without allocating it in between! */ 380 if (bh) { 381 if (!J_EXPECT_BH(bh, !buffer_revoked(bh), 382 "inconsistent data on disk")) { 383 if (!bh_in) 384 brelse(bh); 385 return -EIO; 386 } 387 set_buffer_revoked(bh); 388 set_buffer_revokevalid(bh); 389 if (bh_in) { 390 BUFFER_TRACE(bh_in, "call jbd2_journal_forget"); 391 jbd2_journal_forget(handle, bh_in); 392 } else { 393 BUFFER_TRACE(bh, "call brelse"); 394 __brelse(bh); 395 } 396 } 397 398 jbd_debug(2, "insert revoke for block %llu, bh_in=%p\n",blocknr, bh_in); 399 err = insert_revoke_hash(journal, blocknr, 400 handle->h_transaction->t_tid); 401 BUFFER_TRACE(bh_in, "exit"); 402 return err; 403 } 404 405 /* 406 * Cancel an outstanding revoke. For use only internally by the 407 * journaling code (called from jbd2_journal_get_write_access). 408 * 409 * We trust buffer_revoked() on the buffer if the buffer is already 410 * being journaled: if there is no revoke pending on the buffer, then we 411 * don't do anything here. 412 * 413 * This would break if it were possible for a buffer to be revoked and 414 * discarded, and then reallocated within the same transaction. In such 415 * a case we would have lost the revoked bit, but when we arrived here 416 * the second time we would still have a pending revoke to cancel. So, 417 * do not trust the Revoked bit on buffers unless RevokeValid is also 418 * set. 419 */ 420 int jbd2_journal_cancel_revoke(handle_t *handle, struct journal_head *jh) 421 { 422 struct jbd2_revoke_record_s *record; 423 journal_t *journal = handle->h_transaction->t_journal; 424 int need_cancel; 425 int did_revoke = 0; /* akpm: debug */ 426 struct buffer_head *bh = jh2bh(jh); 427 428 jbd_debug(4, "journal_head %p, cancelling revoke\n", jh); 429 430 /* Is the existing Revoke bit valid? If so, we trust it, and 431 * only perform the full cancel if the revoke bit is set. If 432 * not, we can't trust the revoke bit, and we need to do the 433 * full search for a revoke record. */ 434 if (test_set_buffer_revokevalid(bh)) { 435 need_cancel = test_clear_buffer_revoked(bh); 436 } else { 437 need_cancel = 1; 438 clear_buffer_revoked(bh); 439 } 440 441 if (need_cancel) { 442 record = find_revoke_record(journal, bh->b_blocknr); 443 if (record) { 444 jbd_debug(4, "cancelled existing revoke on " 445 "blocknr %llu\n", (unsigned long long)bh->b_blocknr); 446 spin_lock(&journal->j_revoke_lock); 447 list_del(&record->hash); 448 spin_unlock(&journal->j_revoke_lock); 449 kmem_cache_free(jbd2_revoke_record_cache, record); 450 did_revoke = 1; 451 } 452 } 453 454 #ifdef JBD2_EXPENSIVE_CHECKING 455 /* There better not be one left behind by now! */ 456 record = find_revoke_record(journal, bh->b_blocknr); 457 J_ASSERT_JH(jh, record == NULL); 458 #endif 459 460 /* Finally, have we just cleared revoke on an unhashed 461 * buffer_head? If so, we'd better make sure we clear the 462 * revoked status on any hashed alias too, otherwise the revoke 463 * state machine will get very upset later on. */ 464 if (need_cancel) { 465 struct buffer_head *bh2; 466 bh2 = __find_get_block(bh->b_bdev, bh->b_blocknr, bh->b_size); 467 if (bh2) { 468 if (bh2 != bh) 469 clear_buffer_revoked(bh2); 470 __brelse(bh2); 471 } 472 } 473 return did_revoke; 474 } 475 476 /* 477 * journal_clear_revoked_flag clears revoked flag of buffers in 478 * revoke table to reflect there is no revoked buffers in the next 479 * transaction which is going to be started. 480 */ 481 void jbd2_clear_buffer_revoked_flags(journal_t *journal) 482 { 483 struct jbd2_revoke_table_s *revoke = journal->j_revoke; 484 int i = 0; 485 486 for (i = 0; i < revoke->hash_size; i++) { 487 struct list_head *hash_list; 488 struct list_head *list_entry; 489 hash_list = &revoke->hash_table[i]; 490 491 list_for_each(list_entry, hash_list) { 492 struct jbd2_revoke_record_s *record; 493 struct buffer_head *bh; 494 record = (struct jbd2_revoke_record_s *)list_entry; 495 bh = __find_get_block(journal->j_fs_dev, 496 record->blocknr, 497 journal->j_blocksize); 498 if (bh) { 499 clear_buffer_revoked(bh); 500 __brelse(bh); 501 } 502 } 503 } 504 } 505 506 /* journal_switch_revoke table select j_revoke for next transaction 507 * we do not want to suspend any processing until all revokes are 508 * written -bzzz 509 */ 510 void jbd2_journal_switch_revoke_table(journal_t *journal) 511 { 512 int i; 513 514 if (journal->j_revoke == journal->j_revoke_table[0]) 515 journal->j_revoke = journal->j_revoke_table[1]; 516 else 517 journal->j_revoke = journal->j_revoke_table[0]; 518 519 for (i = 0; i < journal->j_revoke->hash_size; i++) 520 INIT_LIST_HEAD(&journal->j_revoke->hash_table[i]); 521 } 522 523 /* 524 * Write revoke records to the journal for all entries in the current 525 * revoke hash, deleting the entries as we go. 526 */ 527 void jbd2_journal_write_revoke_records(journal_t *journal, 528 transaction_t *transaction, 529 struct list_head *log_bufs, 530 int write_op) 531 { 532 struct buffer_head *descriptor; 533 struct jbd2_revoke_record_s *record; 534 struct jbd2_revoke_table_s *revoke; 535 struct list_head *hash_list; 536 int i, offset, count; 537 538 descriptor = NULL; 539 offset = 0; 540 count = 0; 541 542 /* select revoke table for committing transaction */ 543 revoke = journal->j_revoke == journal->j_revoke_table[0] ? 544 journal->j_revoke_table[1] : journal->j_revoke_table[0]; 545 546 for (i = 0; i < revoke->hash_size; i++) { 547 hash_list = &revoke->hash_table[i]; 548 549 while (!list_empty(hash_list)) { 550 record = (struct jbd2_revoke_record_s *) 551 hash_list->next; 552 write_one_revoke_record(journal, transaction, log_bufs, 553 &descriptor, &offset, 554 record, write_op); 555 count++; 556 list_del(&record->hash); 557 kmem_cache_free(jbd2_revoke_record_cache, record); 558 } 559 } 560 if (descriptor) 561 flush_descriptor(journal, descriptor, offset, write_op); 562 jbd_debug(1, "Wrote %d revoke records\n", count); 563 } 564 565 /* 566 * Write out one revoke record. We need to create a new descriptor 567 * block if the old one is full or if we have not already created one. 568 */ 569 570 static void write_one_revoke_record(journal_t *journal, 571 transaction_t *transaction, 572 struct list_head *log_bufs, 573 struct buffer_head **descriptorp, 574 int *offsetp, 575 struct jbd2_revoke_record_s *record, 576 int write_op) 577 { 578 int csum_size = 0; 579 struct buffer_head *descriptor; 580 int offset; 581 journal_header_t *header; 582 583 /* If we are already aborting, this all becomes a noop. We 584 still need to go round the loop in 585 jbd2_journal_write_revoke_records in order to free all of the 586 revoke records: only the IO to the journal is omitted. */ 587 if (is_journal_aborted(journal)) 588 return; 589 590 descriptor = *descriptorp; 591 offset = *offsetp; 592 593 /* Do we need to leave space at the end for a checksum? */ 594 if (jbd2_journal_has_csum_v2or3(journal)) 595 csum_size = sizeof(struct jbd2_journal_revoke_tail); 596 597 /* Make sure we have a descriptor with space left for the record */ 598 if (descriptor) { 599 if (offset >= journal->j_blocksize - csum_size) { 600 flush_descriptor(journal, descriptor, offset, write_op); 601 descriptor = NULL; 602 } 603 } 604 605 if (!descriptor) { 606 descriptor = jbd2_journal_get_descriptor_buffer(journal); 607 if (!descriptor) 608 return; 609 header = (journal_header_t *)descriptor->b_data; 610 header->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER); 611 header->h_blocktype = cpu_to_be32(JBD2_REVOKE_BLOCK); 612 header->h_sequence = cpu_to_be32(transaction->t_tid); 613 614 /* Record it so that we can wait for IO completion later */ 615 BUFFER_TRACE(descriptor, "file in log_bufs"); 616 jbd2_file_log_bh(log_bufs, descriptor); 617 618 offset = sizeof(jbd2_journal_revoke_header_t); 619 *descriptorp = descriptor; 620 } 621 622 if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT)) { 623 * ((__be64 *)(&descriptor->b_data[offset])) = 624 cpu_to_be64(record->blocknr); 625 offset += 8; 626 627 } else { 628 * ((__be32 *)(&descriptor->b_data[offset])) = 629 cpu_to_be32(record->blocknr); 630 offset += 4; 631 } 632 633 *offsetp = offset; 634 } 635 636 static void jbd2_revoke_csum_set(journal_t *j, struct buffer_head *bh) 637 { 638 struct jbd2_journal_revoke_tail *tail; 639 __u32 csum; 640 641 if (!jbd2_journal_has_csum_v2or3(j)) 642 return; 643 644 tail = (struct jbd2_journal_revoke_tail *)(bh->b_data + j->j_blocksize - 645 sizeof(struct jbd2_journal_revoke_tail)); 646 tail->r_checksum = 0; 647 csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize); 648 tail->r_checksum = cpu_to_be32(csum); 649 } 650 651 /* 652 * Flush a revoke descriptor out to the journal. If we are aborting, 653 * this is a noop; otherwise we are generating a buffer which needs to 654 * be waited for during commit, so it has to go onto the appropriate 655 * journal buffer list. 656 */ 657 658 static void flush_descriptor(journal_t *journal, 659 struct buffer_head *descriptor, 660 int offset, int write_op) 661 { 662 jbd2_journal_revoke_header_t *header; 663 664 if (is_journal_aborted(journal)) { 665 put_bh(descriptor); 666 return; 667 } 668 669 header = (jbd2_journal_revoke_header_t *)descriptor->b_data; 670 header->r_count = cpu_to_be32(offset); 671 jbd2_revoke_csum_set(journal, descriptor); 672 673 set_buffer_jwrite(descriptor); 674 BUFFER_TRACE(descriptor, "write"); 675 set_buffer_dirty(descriptor); 676 write_dirty_buffer(descriptor, write_op); 677 } 678 #endif 679 680 /* 681 * Revoke support for recovery. 682 * 683 * Recovery needs to be able to: 684 * 685 * record all revoke records, including the tid of the latest instance 686 * of each revoke in the journal 687 * 688 * check whether a given block in a given transaction should be replayed 689 * (ie. has not been revoked by a revoke record in that or a subsequent 690 * transaction) 691 * 692 * empty the revoke table after recovery. 693 */ 694 695 /* 696 * First, setting revoke records. We create a new revoke record for 697 * every block ever revoked in the log as we scan it for recovery, and 698 * we update the existing records if we find multiple revokes for a 699 * single block. 700 */ 701 702 int jbd2_journal_set_revoke(journal_t *journal, 703 unsigned long long blocknr, 704 tid_t sequence) 705 { 706 struct jbd2_revoke_record_s *record; 707 708 record = find_revoke_record(journal, blocknr); 709 if (record) { 710 /* If we have multiple occurrences, only record the 711 * latest sequence number in the hashed record */ 712 if (tid_gt(sequence, record->sequence)) 713 record->sequence = sequence; 714 return 0; 715 } 716 return insert_revoke_hash(journal, blocknr, sequence); 717 } 718 719 /* 720 * Test revoke records. For a given block referenced in the log, has 721 * that block been revoked? A revoke record with a given transaction 722 * sequence number revokes all blocks in that transaction and earlier 723 * ones, but later transactions still need replayed. 724 */ 725 726 int jbd2_journal_test_revoke(journal_t *journal, 727 unsigned long long blocknr, 728 tid_t sequence) 729 { 730 struct jbd2_revoke_record_s *record; 731 732 record = find_revoke_record(journal, blocknr); 733 if (!record) 734 return 0; 735 if (tid_gt(sequence, record->sequence)) 736 return 0; 737 return 1; 738 } 739 740 /* 741 * Finally, once recovery is over, we need to clear the revoke table so 742 * that it can be reused by the running filesystem. 743 */ 744 745 void jbd2_journal_clear_revoke(journal_t *journal) 746 { 747 int i; 748 struct list_head *hash_list; 749 struct jbd2_revoke_record_s *record; 750 struct jbd2_revoke_table_s *revoke; 751 752 revoke = journal->j_revoke; 753 754 for (i = 0; i < revoke->hash_size; i++) { 755 hash_list = &revoke->hash_table[i]; 756 while (!list_empty(hash_list)) { 757 record = (struct jbd2_revoke_record_s*) hash_list->next; 758 list_del(&record->hash); 759 kmem_cache_free(jbd2_revoke_record_cache, record); 760 } 761 } 762 } 763