1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * bcachefs journalling code, for btree insertions 4 * 5 * Copyright 2012 Google, Inc. 6 */ 7 8 #include "bcachefs.h" 9 #include "alloc_foreground.h" 10 #include "bkey_methods.h" 11 #include "btree_gc.h" 12 #include "btree_update.h" 13 #include "btree_write_buffer.h" 14 #include "buckets.h" 15 #include "error.h" 16 #include "journal.h" 17 #include "journal_io.h" 18 #include "journal_reclaim.h" 19 #include "journal_sb.h" 20 #include "journal_seq_blacklist.h" 21 #include "trace.h" 22 23 static const char * const bch2_journal_errors[] = { 24 #define x(n) #n, 25 JOURNAL_ERRORS() 26 #undef x 27 NULL 28 }; 29 30 static inline bool journal_seq_unwritten(struct journal *j, u64 seq) 31 { 32 return seq > j->seq_ondisk; 33 } 34 35 static bool __journal_entry_is_open(union journal_res_state state) 36 { 37 return state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL; 38 } 39 40 static inline unsigned nr_unwritten_journal_entries(struct journal *j) 41 { 42 return atomic64_read(&j->seq) - j->seq_ondisk; 43 } 44 45 static bool journal_entry_is_open(struct journal *j) 46 { 47 return __journal_entry_is_open(j->reservations); 48 } 49 50 static inline struct journal_buf * 51 journal_seq_to_buf(struct journal *j, u64 seq) 52 { 53 struct journal_buf *buf = NULL; 54 55 EBUG_ON(seq > journal_cur_seq(j)); 56 57 if (journal_seq_unwritten(j, seq)) { 58 buf = j->buf + (seq & JOURNAL_BUF_MASK); 59 EBUG_ON(le64_to_cpu(buf->data->seq) != seq); 60 } 61 return buf; 62 } 63 64 static void journal_pin_list_init(struct journal_entry_pin_list *p, int count) 65 { 66 unsigned i; 67 68 for (i = 0; i < ARRAY_SIZE(p->list); i++) 69 INIT_LIST_HEAD(&p->list[i]); 70 INIT_LIST_HEAD(&p->flushed); 71 atomic_set(&p->count, count); 72 p->devs.nr = 0; 73 } 74 75 /* 76 * Detect stuck journal conditions and trigger shutdown. Technically the journal 77 * can end up stuck for a variety of reasons, such as a blocked I/O, journal 78 * reservation lockup, etc. Since this is a fatal error with potentially 79 * unpredictable characteristics, we want to be fairly conservative before we 80 * decide to shut things down. 81 * 82 * Consider the journal stuck when it appears full with no ability to commit 83 * btree transactions, to discard journal buckets, nor acquire priority 84 * (reserved watermark) reservation. 85 */ 86 static inline bool 87 journal_error_check_stuck(struct journal *j, int error, unsigned flags) 88 { 89 struct bch_fs *c = container_of(j, struct bch_fs, journal); 90 bool stuck = false; 91 struct printbuf buf = PRINTBUF; 92 93 if (!(error == JOURNAL_ERR_journal_full || 94 error == JOURNAL_ERR_journal_pin_full) || 95 nr_unwritten_journal_entries(j) || 96 (flags & BCH_WATERMARK_MASK) != BCH_WATERMARK_reclaim) 97 return stuck; 98 99 spin_lock(&j->lock); 100 101 if (j->can_discard) { 102 spin_unlock(&j->lock); 103 return stuck; 104 } 105 106 stuck = true; 107 108 /* 109 * The journal shutdown path will set ->err_seq, but do it here first to 110 * serialize against concurrent failures and avoid duplicate error 111 * reports. 112 */ 113 if (j->err_seq) { 114 spin_unlock(&j->lock); 115 return stuck; 116 } 117 j->err_seq = journal_cur_seq(j); 118 spin_unlock(&j->lock); 119 120 bch_err(c, "Journal stuck! Hava a pre-reservation but journal full (error %s)", 121 bch2_journal_errors[error]); 122 bch2_journal_debug_to_text(&buf, j); 123 bch_err(c, "%s", buf.buf); 124 125 printbuf_reset(&buf); 126 bch2_journal_pins_to_text(&buf, j); 127 bch_err(c, "Journal pins:\n%s", buf.buf); 128 printbuf_exit(&buf); 129 130 bch2_fatal_error(c); 131 dump_stack(); 132 133 return stuck; 134 } 135 136 /* 137 * Final processing when the last reference of a journal buffer has been 138 * dropped. Drop the pin list reference acquired at journal entry open and write 139 * the buffer, if requested. 140 */ 141 void bch2_journal_buf_put_final(struct journal *j, u64 seq, bool write) 142 { 143 struct bch_fs *c = container_of(j, struct bch_fs, journal); 144 145 lockdep_assert_held(&j->lock); 146 147 if (__bch2_journal_pin_put(j, seq)) 148 bch2_journal_reclaim_fast(j); 149 if (write) 150 closure_call(&j->io, bch2_journal_write, c->io_complete_wq, NULL); 151 } 152 153 /* 154 * Returns true if journal entry is now closed: 155 * 156 * We don't close a journal_buf until the next journal_buf is finished writing, 157 * and can be opened again - this also initializes the next journal_buf: 158 */ 159 static void __journal_entry_close(struct journal *j, unsigned closed_val) 160 { 161 struct bch_fs *c = container_of(j, struct bch_fs, journal); 162 struct journal_buf *buf = journal_cur_buf(j); 163 union journal_res_state old, new; 164 u64 v = atomic64_read(&j->reservations.counter); 165 unsigned sectors; 166 167 BUG_ON(closed_val != JOURNAL_ENTRY_CLOSED_VAL && 168 closed_val != JOURNAL_ENTRY_ERROR_VAL); 169 170 lockdep_assert_held(&j->lock); 171 172 do { 173 old.v = new.v = v; 174 new.cur_entry_offset = closed_val; 175 176 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL || 177 old.cur_entry_offset == new.cur_entry_offset) 178 return; 179 } while ((v = atomic64_cmpxchg(&j->reservations.counter, 180 old.v, new.v)) != old.v); 181 182 if (!__journal_entry_is_open(old)) 183 return; 184 185 /* Close out old buffer: */ 186 buf->data->u64s = cpu_to_le32(old.cur_entry_offset); 187 188 trace_journal_entry_close(c, vstruct_bytes(buf->data)); 189 190 sectors = vstruct_blocks_plus(buf->data, c->block_bits, 191 buf->u64s_reserved) << c->block_bits; 192 BUG_ON(sectors > buf->sectors); 193 buf->sectors = sectors; 194 195 /* 196 * We have to set last_seq here, _before_ opening a new journal entry: 197 * 198 * A threads may replace an old pin with a new pin on their current 199 * journal reservation - the expectation being that the journal will 200 * contain either what the old pin protected or what the new pin 201 * protects. 202 * 203 * After the old pin is dropped journal_last_seq() won't include the old 204 * pin, so we can only write the updated last_seq on the entry that 205 * contains whatever the new pin protects. 206 * 207 * Restated, we can _not_ update last_seq for a given entry if there 208 * could be a newer entry open with reservations/pins that have been 209 * taken against it. 210 * 211 * Hence, we want update/set last_seq on the current journal entry right 212 * before we open a new one: 213 */ 214 buf->last_seq = journal_last_seq(j); 215 buf->data->last_seq = cpu_to_le64(buf->last_seq); 216 BUG_ON(buf->last_seq > le64_to_cpu(buf->data->seq)); 217 218 cancel_delayed_work(&j->write_work); 219 220 bch2_journal_space_available(j); 221 222 __bch2_journal_buf_put(j, old.idx, le64_to_cpu(buf->data->seq)); 223 } 224 225 void bch2_journal_halt(struct journal *j) 226 { 227 spin_lock(&j->lock); 228 __journal_entry_close(j, JOURNAL_ENTRY_ERROR_VAL); 229 if (!j->err_seq) 230 j->err_seq = journal_cur_seq(j); 231 journal_wake(j); 232 spin_unlock(&j->lock); 233 } 234 235 static bool journal_entry_want_write(struct journal *j) 236 { 237 bool ret = !journal_entry_is_open(j) || 238 journal_cur_seq(j) == journal_last_unwritten_seq(j); 239 240 /* Don't close it yet if we already have a write in flight: */ 241 if (ret) 242 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL); 243 else if (nr_unwritten_journal_entries(j)) { 244 struct journal_buf *buf = journal_cur_buf(j); 245 246 if (!buf->flush_time) { 247 buf->flush_time = local_clock() ?: 1; 248 buf->expires = jiffies; 249 } 250 } 251 252 return ret; 253 } 254 255 bool bch2_journal_entry_close(struct journal *j) 256 { 257 bool ret; 258 259 spin_lock(&j->lock); 260 ret = journal_entry_want_write(j); 261 spin_unlock(&j->lock); 262 263 return ret; 264 } 265 266 /* 267 * should _only_ called from journal_res_get() - when we actually want a 268 * journal reservation - journal entry is open means journal is dirty: 269 */ 270 static int journal_entry_open(struct journal *j) 271 { 272 struct bch_fs *c = container_of(j, struct bch_fs, journal); 273 struct journal_buf *buf = j->buf + 274 ((journal_cur_seq(j) + 1) & JOURNAL_BUF_MASK); 275 union journal_res_state old, new; 276 int u64s; 277 u64 v; 278 279 lockdep_assert_held(&j->lock); 280 BUG_ON(journal_entry_is_open(j)); 281 BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb)); 282 283 if (j->blocked) 284 return JOURNAL_ERR_blocked; 285 286 if (j->cur_entry_error) 287 return j->cur_entry_error; 288 289 if (bch2_journal_error(j)) 290 return JOURNAL_ERR_insufficient_devices; /* -EROFS */ 291 292 if (!fifo_free(&j->pin)) 293 return JOURNAL_ERR_journal_pin_full; 294 295 if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf)) 296 return JOURNAL_ERR_max_in_flight; 297 298 BUG_ON(!j->cur_entry_sectors); 299 300 buf->expires = 301 (journal_cur_seq(j) == j->flushed_seq_ondisk 302 ? jiffies 303 : j->last_flush_write) + 304 msecs_to_jiffies(c->opts.journal_flush_delay); 305 306 buf->u64s_reserved = j->entry_u64s_reserved; 307 buf->disk_sectors = j->cur_entry_sectors; 308 buf->sectors = min(buf->disk_sectors, buf->buf_size >> 9); 309 310 u64s = (int) (buf->sectors << 9) / sizeof(u64) - 311 journal_entry_overhead(j); 312 u64s = clamp_t(int, u64s, 0, JOURNAL_ENTRY_CLOSED_VAL - 1); 313 314 if (u64s <= (ssize_t) j->early_journal_entries.nr) 315 return JOURNAL_ERR_journal_full; 316 317 if (fifo_empty(&j->pin) && j->reclaim_thread) 318 wake_up_process(j->reclaim_thread); 319 320 /* 321 * The fifo_push() needs to happen at the same time as j->seq is 322 * incremented for journal_last_seq() to be calculated correctly 323 */ 324 atomic64_inc(&j->seq); 325 journal_pin_list_init(fifo_push_ref(&j->pin), 1); 326 327 BUG_ON(j->pin.back - 1 != atomic64_read(&j->seq)); 328 329 BUG_ON(j->buf + (journal_cur_seq(j) & JOURNAL_BUF_MASK) != buf); 330 331 bkey_extent_init(&buf->key); 332 buf->noflush = false; 333 buf->must_flush = false; 334 buf->separate_flush = false; 335 buf->flush_time = 0; 336 buf->need_flush_to_write_buffer = true; 337 338 memset(buf->data, 0, sizeof(*buf->data)); 339 buf->data->seq = cpu_to_le64(journal_cur_seq(j)); 340 buf->data->u64s = 0; 341 342 if (j->early_journal_entries.nr) { 343 memcpy(buf->data->_data, j->early_journal_entries.data, 344 j->early_journal_entries.nr * sizeof(u64)); 345 le32_add_cpu(&buf->data->u64s, j->early_journal_entries.nr); 346 } 347 348 /* 349 * Must be set before marking the journal entry as open: 350 */ 351 j->cur_entry_u64s = u64s; 352 353 v = atomic64_read(&j->reservations.counter); 354 do { 355 old.v = new.v = v; 356 357 BUG_ON(old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL); 358 359 new.idx++; 360 BUG_ON(journal_state_count(new, new.idx)); 361 BUG_ON(new.idx != (journal_cur_seq(j) & JOURNAL_BUF_MASK)); 362 363 journal_state_inc(&new); 364 365 /* Handle any already added entries */ 366 new.cur_entry_offset = le32_to_cpu(buf->data->u64s); 367 } while ((v = atomic64_cmpxchg(&j->reservations.counter, 368 old.v, new.v)) != old.v); 369 370 mod_delayed_work(c->io_complete_wq, 371 &j->write_work, 372 msecs_to_jiffies(c->opts.journal_flush_delay)); 373 journal_wake(j); 374 375 if (j->early_journal_entries.nr) 376 darray_exit(&j->early_journal_entries); 377 return 0; 378 } 379 380 static bool journal_quiesced(struct journal *j) 381 { 382 bool ret = atomic64_read(&j->seq) == j->seq_ondisk; 383 384 if (!ret) 385 bch2_journal_entry_close(j); 386 return ret; 387 } 388 389 static void journal_quiesce(struct journal *j) 390 { 391 wait_event(j->wait, journal_quiesced(j)); 392 } 393 394 static void journal_write_work(struct work_struct *work) 395 { 396 struct journal *j = container_of(work, struct journal, write_work.work); 397 struct bch_fs *c = container_of(j, struct bch_fs, journal); 398 long delta; 399 400 spin_lock(&j->lock); 401 if (!__journal_entry_is_open(j->reservations)) 402 goto unlock; 403 404 delta = journal_cur_buf(j)->expires - jiffies; 405 406 if (delta > 0) 407 mod_delayed_work(c->io_complete_wq, &j->write_work, delta); 408 else 409 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL); 410 unlock: 411 spin_unlock(&j->lock); 412 } 413 414 static int __journal_res_get(struct journal *j, struct journal_res *res, 415 unsigned flags) 416 { 417 struct bch_fs *c = container_of(j, struct bch_fs, journal); 418 struct journal_buf *buf; 419 bool can_discard; 420 int ret; 421 retry: 422 if (journal_res_get_fast(j, res, flags)) 423 return 0; 424 425 if (bch2_journal_error(j)) 426 return -BCH_ERR_erofs_journal_err; 427 428 spin_lock(&j->lock); 429 430 /* check once more in case somebody else shut things down... */ 431 if (bch2_journal_error(j)) { 432 spin_unlock(&j->lock); 433 return -BCH_ERR_erofs_journal_err; 434 } 435 436 /* 437 * Recheck after taking the lock, so we don't race with another thread 438 * that just did journal_entry_open() and call bch2_journal_entry_close() 439 * unnecessarily 440 */ 441 if (journal_res_get_fast(j, res, flags)) { 442 spin_unlock(&j->lock); 443 return 0; 444 } 445 446 if ((flags & BCH_WATERMARK_MASK) < j->watermark) { 447 /* 448 * Don't want to close current journal entry, just need to 449 * invoke reclaim: 450 */ 451 ret = JOURNAL_ERR_journal_full; 452 goto unlock; 453 } 454 455 /* 456 * If we couldn't get a reservation because the current buf filled up, 457 * and we had room for a bigger entry on disk, signal that we want to 458 * realloc the journal bufs: 459 */ 460 buf = journal_cur_buf(j); 461 if (journal_entry_is_open(j) && 462 buf->buf_size >> 9 < buf->disk_sectors && 463 buf->buf_size < JOURNAL_ENTRY_SIZE_MAX) 464 j->buf_size_want = max(j->buf_size_want, buf->buf_size << 1); 465 466 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL); 467 ret = journal_entry_open(j); 468 469 if (ret == JOURNAL_ERR_max_in_flight) { 470 track_event_change(&c->times[BCH_TIME_blocked_journal_max_in_flight], 471 &j->max_in_flight_start, true); 472 trace_and_count(c, journal_entry_full, c); 473 } 474 unlock: 475 can_discard = j->can_discard; 476 spin_unlock(&j->lock); 477 478 if (!ret) 479 goto retry; 480 if (journal_error_check_stuck(j, ret, flags)) 481 ret = -BCH_ERR_journal_res_get_blocked; 482 483 /* 484 * Journal is full - can't rely on reclaim from work item due to 485 * freezing: 486 */ 487 if ((ret == JOURNAL_ERR_journal_full || 488 ret == JOURNAL_ERR_journal_pin_full) && 489 !(flags & JOURNAL_RES_GET_NONBLOCK)) { 490 if (can_discard) { 491 bch2_journal_do_discards(j); 492 goto retry; 493 } 494 495 if (mutex_trylock(&j->reclaim_lock)) { 496 bch2_journal_reclaim(j); 497 mutex_unlock(&j->reclaim_lock); 498 } 499 } 500 501 return ret == JOURNAL_ERR_insufficient_devices 502 ? -BCH_ERR_erofs_journal_err 503 : -BCH_ERR_journal_res_get_blocked; 504 } 505 506 /* 507 * Essentially the entry function to the journaling code. When bcachefs is doing 508 * a btree insert, it calls this function to get the current journal write. 509 * Journal write is the structure used set up journal writes. The calling 510 * function will then add its keys to the structure, queuing them for the next 511 * write. 512 * 513 * To ensure forward progress, the current task must not be holding any 514 * btree node write locks. 515 */ 516 int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res, 517 unsigned flags) 518 { 519 int ret; 520 521 closure_wait_event(&j->async_wait, 522 (ret = __journal_res_get(j, res, flags)) != -BCH_ERR_journal_res_get_blocked || 523 (flags & JOURNAL_RES_GET_NONBLOCK)); 524 return ret; 525 } 526 527 /* journal_entry_res: */ 528 529 void bch2_journal_entry_res_resize(struct journal *j, 530 struct journal_entry_res *res, 531 unsigned new_u64s) 532 { 533 union journal_res_state state; 534 int d = new_u64s - res->u64s; 535 536 spin_lock(&j->lock); 537 538 j->entry_u64s_reserved += d; 539 if (d <= 0) 540 goto out; 541 542 j->cur_entry_u64s = max_t(int, 0, j->cur_entry_u64s - d); 543 smp_mb(); 544 state = READ_ONCE(j->reservations); 545 546 if (state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL && 547 state.cur_entry_offset > j->cur_entry_u64s) { 548 j->cur_entry_u64s += d; 549 /* 550 * Not enough room in current journal entry, have to flush it: 551 */ 552 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL); 553 } else { 554 journal_cur_buf(j)->u64s_reserved += d; 555 } 556 out: 557 spin_unlock(&j->lock); 558 res->u64s += d; 559 } 560 561 /* journal flushing: */ 562 563 /** 564 * bch2_journal_flush_seq_async - wait for a journal entry to be written 565 * @j: journal object 566 * @seq: seq to flush 567 * @parent: closure object to wait with 568 * Returns: 1 if @seq has already been flushed, 0 if @seq is being flushed, 569 * -EIO if @seq will never be flushed 570 * 571 * Like bch2_journal_wait_on_seq, except that it triggers a write immediately if 572 * necessary 573 */ 574 int bch2_journal_flush_seq_async(struct journal *j, u64 seq, 575 struct closure *parent) 576 { 577 struct journal_buf *buf; 578 int ret = 0; 579 580 if (seq <= j->flushed_seq_ondisk) 581 return 1; 582 583 spin_lock(&j->lock); 584 585 if (WARN_ONCE(seq > journal_cur_seq(j), 586 "requested to flush journal seq %llu, but currently at %llu", 587 seq, journal_cur_seq(j))) 588 goto out; 589 590 /* Recheck under lock: */ 591 if (j->err_seq && seq >= j->err_seq) { 592 ret = -EIO; 593 goto out; 594 } 595 596 if (seq <= j->flushed_seq_ondisk) { 597 ret = 1; 598 goto out; 599 } 600 601 /* if seq was written, but not flushed - flush a newer one instead */ 602 seq = max(seq, journal_last_unwritten_seq(j)); 603 604 recheck_need_open: 605 if (seq > journal_cur_seq(j)) { 606 struct journal_res res = { 0 }; 607 608 if (journal_entry_is_open(j)) 609 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL); 610 611 spin_unlock(&j->lock); 612 613 ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0); 614 if (ret) 615 return ret; 616 617 seq = res.seq; 618 buf = j->buf + (seq & JOURNAL_BUF_MASK); 619 buf->must_flush = true; 620 621 if (!buf->flush_time) { 622 buf->flush_time = local_clock() ?: 1; 623 buf->expires = jiffies; 624 } 625 626 if (parent && !closure_wait(&buf->wait, parent)) 627 BUG(); 628 629 bch2_journal_res_put(j, &res); 630 631 spin_lock(&j->lock); 632 goto want_write; 633 } 634 635 /* 636 * if write was kicked off without a flush, flush the next sequence 637 * number instead 638 */ 639 buf = journal_seq_to_buf(j, seq); 640 if (buf->noflush) { 641 seq++; 642 goto recheck_need_open; 643 } 644 645 buf->must_flush = true; 646 647 if (parent && !closure_wait(&buf->wait, parent)) 648 BUG(); 649 want_write: 650 if (seq == journal_cur_seq(j)) 651 journal_entry_want_write(j); 652 out: 653 spin_unlock(&j->lock); 654 return ret; 655 } 656 657 int bch2_journal_flush_seq(struct journal *j, u64 seq) 658 { 659 u64 start_time = local_clock(); 660 int ret, ret2; 661 662 /* 663 * Don't update time_stats when @seq is already flushed: 664 */ 665 if (seq <= j->flushed_seq_ondisk) 666 return 0; 667 668 ret = wait_event_interruptible(j->wait, (ret2 = bch2_journal_flush_seq_async(j, seq, NULL))); 669 670 if (!ret) 671 bch2_time_stats_update(j->flush_seq_time, start_time); 672 673 return ret ?: ret2 < 0 ? ret2 : 0; 674 } 675 676 /* 677 * bch2_journal_flush_async - if there is an open journal entry, or a journal 678 * still being written, write it and wait for the write to complete 679 */ 680 void bch2_journal_flush_async(struct journal *j, struct closure *parent) 681 { 682 bch2_journal_flush_seq_async(j, atomic64_read(&j->seq), parent); 683 } 684 685 int bch2_journal_flush(struct journal *j) 686 { 687 return bch2_journal_flush_seq(j, atomic64_read(&j->seq)); 688 } 689 690 /* 691 * bch2_journal_noflush_seq - tell the journal not to issue any flushes before 692 * @seq 693 */ 694 bool bch2_journal_noflush_seq(struct journal *j, u64 seq) 695 { 696 struct bch_fs *c = container_of(j, struct bch_fs, journal); 697 u64 unwritten_seq; 698 bool ret = false; 699 700 if (!(c->sb.features & (1ULL << BCH_FEATURE_journal_no_flush))) 701 return false; 702 703 if (seq <= c->journal.flushed_seq_ondisk) 704 return false; 705 706 spin_lock(&j->lock); 707 if (seq <= c->journal.flushed_seq_ondisk) 708 goto out; 709 710 for (unwritten_seq = journal_last_unwritten_seq(j); 711 unwritten_seq < seq; 712 unwritten_seq++) { 713 struct journal_buf *buf = journal_seq_to_buf(j, unwritten_seq); 714 715 /* journal write is already in flight, and was a flush write: */ 716 if (unwritten_seq == journal_last_unwritten_seq(j) && !buf->noflush) 717 goto out; 718 719 buf->noflush = true; 720 } 721 722 ret = true; 723 out: 724 spin_unlock(&j->lock); 725 return ret; 726 } 727 728 int bch2_journal_meta(struct journal *j) 729 { 730 struct journal_buf *buf; 731 struct journal_res res; 732 int ret; 733 734 memset(&res, 0, sizeof(res)); 735 736 ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0); 737 if (ret) 738 return ret; 739 740 buf = j->buf + (res.seq & JOURNAL_BUF_MASK); 741 buf->must_flush = true; 742 743 if (!buf->flush_time) { 744 buf->flush_time = local_clock() ?: 1; 745 buf->expires = jiffies; 746 } 747 748 bch2_journal_res_put(j, &res); 749 750 return bch2_journal_flush_seq(j, res.seq); 751 } 752 753 /* block/unlock the journal: */ 754 755 void bch2_journal_unblock(struct journal *j) 756 { 757 spin_lock(&j->lock); 758 j->blocked--; 759 spin_unlock(&j->lock); 760 761 journal_wake(j); 762 } 763 764 void bch2_journal_block(struct journal *j) 765 { 766 spin_lock(&j->lock); 767 j->blocked++; 768 spin_unlock(&j->lock); 769 770 journal_quiesce(j); 771 } 772 773 static struct journal_buf *__bch2_next_write_buffer_flush_journal_buf(struct journal *j, u64 max_seq) 774 { 775 struct journal_buf *ret = NULL; 776 777 mutex_lock(&j->buf_lock); 778 spin_lock(&j->lock); 779 max_seq = min(max_seq, journal_cur_seq(j)); 780 781 for (u64 seq = journal_last_unwritten_seq(j); 782 seq <= max_seq; 783 seq++) { 784 unsigned idx = seq & JOURNAL_BUF_MASK; 785 struct journal_buf *buf = j->buf + idx; 786 787 if (buf->need_flush_to_write_buffer) { 788 if (seq == journal_cur_seq(j)) 789 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL); 790 791 union journal_res_state s; 792 s.v = atomic64_read_acquire(&j->reservations.counter); 793 794 ret = journal_state_count(s, idx) 795 ? ERR_PTR(-EAGAIN) 796 : buf; 797 break; 798 } 799 } 800 801 spin_unlock(&j->lock); 802 if (IS_ERR_OR_NULL(ret)) 803 mutex_unlock(&j->buf_lock); 804 return ret; 805 } 806 807 struct journal_buf *bch2_next_write_buffer_flush_journal_buf(struct journal *j, u64 max_seq) 808 { 809 struct journal_buf *ret; 810 811 wait_event(j->wait, (ret = __bch2_next_write_buffer_flush_journal_buf(j, max_seq)) != ERR_PTR(-EAGAIN)); 812 return ret; 813 } 814 815 /* allocate journal on a device: */ 816 817 static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr, 818 bool new_fs, struct closure *cl) 819 { 820 struct bch_fs *c = ca->fs; 821 struct journal_device *ja = &ca->journal; 822 u64 *new_bucket_seq = NULL, *new_buckets = NULL; 823 struct open_bucket **ob = NULL; 824 long *bu = NULL; 825 unsigned i, pos, nr_got = 0, nr_want = nr - ja->nr; 826 int ret = 0; 827 828 BUG_ON(nr <= ja->nr); 829 830 bu = kcalloc(nr_want, sizeof(*bu), GFP_KERNEL); 831 ob = kcalloc(nr_want, sizeof(*ob), GFP_KERNEL); 832 new_buckets = kcalloc(nr, sizeof(u64), GFP_KERNEL); 833 new_bucket_seq = kcalloc(nr, sizeof(u64), GFP_KERNEL); 834 if (!bu || !ob || !new_buckets || !new_bucket_seq) { 835 ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets; 836 goto err_free; 837 } 838 839 for (nr_got = 0; nr_got < nr_want; nr_got++) { 840 if (new_fs) { 841 bu[nr_got] = bch2_bucket_alloc_new_fs(ca); 842 if (bu[nr_got] < 0) { 843 ret = -BCH_ERR_ENOSPC_bucket_alloc; 844 break; 845 } 846 } else { 847 ob[nr_got] = bch2_bucket_alloc(c, ca, BCH_WATERMARK_normal, cl); 848 ret = PTR_ERR_OR_ZERO(ob[nr_got]); 849 if (ret) 850 break; 851 852 ret = bch2_trans_run(c, 853 bch2_trans_mark_metadata_bucket(trans, ca, 854 ob[nr_got]->bucket, BCH_DATA_journal, 855 ca->mi.bucket_size)); 856 if (ret) { 857 bch2_open_bucket_put(c, ob[nr_got]); 858 bch_err_msg(c, ret, "marking new journal buckets"); 859 break; 860 } 861 862 bu[nr_got] = ob[nr_got]->bucket; 863 } 864 } 865 866 if (!nr_got) 867 goto err_free; 868 869 /* Don't return an error if we successfully allocated some buckets: */ 870 ret = 0; 871 872 if (c) { 873 bch2_journal_flush_all_pins(&c->journal); 874 bch2_journal_block(&c->journal); 875 mutex_lock(&c->sb_lock); 876 } 877 878 memcpy(new_buckets, ja->buckets, ja->nr * sizeof(u64)); 879 memcpy(new_bucket_seq, ja->bucket_seq, ja->nr * sizeof(u64)); 880 881 BUG_ON(ja->discard_idx > ja->nr); 882 883 pos = ja->discard_idx ?: ja->nr; 884 885 memmove(new_buckets + pos + nr_got, 886 new_buckets + pos, 887 sizeof(new_buckets[0]) * (ja->nr - pos)); 888 memmove(new_bucket_seq + pos + nr_got, 889 new_bucket_seq + pos, 890 sizeof(new_bucket_seq[0]) * (ja->nr - pos)); 891 892 for (i = 0; i < nr_got; i++) { 893 new_buckets[pos + i] = bu[i]; 894 new_bucket_seq[pos + i] = 0; 895 } 896 897 nr = ja->nr + nr_got; 898 899 ret = bch2_journal_buckets_to_sb(c, ca, new_buckets, nr); 900 if (ret) 901 goto err_unblock; 902 903 if (!new_fs) 904 bch2_write_super(c); 905 906 /* Commit: */ 907 if (c) 908 spin_lock(&c->journal.lock); 909 910 swap(new_buckets, ja->buckets); 911 swap(new_bucket_seq, ja->bucket_seq); 912 ja->nr = nr; 913 914 if (pos <= ja->discard_idx) 915 ja->discard_idx = (ja->discard_idx + nr_got) % ja->nr; 916 if (pos <= ja->dirty_idx_ondisk) 917 ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + nr_got) % ja->nr; 918 if (pos <= ja->dirty_idx) 919 ja->dirty_idx = (ja->dirty_idx + nr_got) % ja->nr; 920 if (pos <= ja->cur_idx) 921 ja->cur_idx = (ja->cur_idx + nr_got) % ja->nr; 922 923 if (c) 924 spin_unlock(&c->journal.lock); 925 err_unblock: 926 if (c) { 927 bch2_journal_unblock(&c->journal); 928 mutex_unlock(&c->sb_lock); 929 } 930 931 if (ret && !new_fs) 932 for (i = 0; i < nr_got; i++) 933 bch2_trans_run(c, 934 bch2_trans_mark_metadata_bucket(trans, ca, 935 bu[i], BCH_DATA_free, 0)); 936 err_free: 937 if (!new_fs) 938 for (i = 0; i < nr_got; i++) 939 bch2_open_bucket_put(c, ob[i]); 940 941 kfree(new_bucket_seq); 942 kfree(new_buckets); 943 kfree(ob); 944 kfree(bu); 945 return ret; 946 } 947 948 /* 949 * Allocate more journal space at runtime - not currently making use if it, but 950 * the code works: 951 */ 952 int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca, 953 unsigned nr) 954 { 955 struct journal_device *ja = &ca->journal; 956 struct closure cl; 957 int ret = 0; 958 959 closure_init_stack(&cl); 960 961 down_write(&c->state_lock); 962 963 /* don't handle reducing nr of buckets yet: */ 964 if (nr < ja->nr) 965 goto unlock; 966 967 while (ja->nr < nr) { 968 struct disk_reservation disk_res = { 0, 0, 0 }; 969 970 /* 971 * note: journal buckets aren't really counted as _sectors_ used yet, so 972 * we don't need the disk reservation to avoid the BUG_ON() in buckets.c 973 * when space used goes up without a reservation - but we do need the 974 * reservation to ensure we'll actually be able to allocate: 975 * 976 * XXX: that's not right, disk reservations only ensure a 977 * filesystem-wide allocation will succeed, this is a device 978 * specific allocation - we can hang here: 979 */ 980 981 ret = bch2_disk_reservation_get(c, &disk_res, 982 bucket_to_sector(ca, nr - ja->nr), 1, 0); 983 if (ret) 984 break; 985 986 ret = __bch2_set_nr_journal_buckets(ca, nr, false, &cl); 987 988 bch2_disk_reservation_put(c, &disk_res); 989 990 closure_sync(&cl); 991 992 if (ret && ret != -BCH_ERR_bucket_alloc_blocked) 993 break; 994 } 995 996 bch_err_fn(c, ret); 997 unlock: 998 up_write(&c->state_lock); 999 return ret; 1000 } 1001 1002 int bch2_dev_journal_alloc(struct bch_dev *ca) 1003 { 1004 unsigned nr; 1005 int ret; 1006 1007 if (dynamic_fault("bcachefs:add:journal_alloc")) { 1008 ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets; 1009 goto err; 1010 } 1011 1012 /* 1/128th of the device by default: */ 1013 nr = ca->mi.nbuckets >> 7; 1014 1015 /* 1016 * clamp journal size to 8192 buckets or 8GB (in sectors), whichever 1017 * is smaller: 1018 */ 1019 nr = clamp_t(unsigned, nr, 1020 BCH_JOURNAL_BUCKETS_MIN, 1021 min(1 << 13, 1022 (1 << 24) / ca->mi.bucket_size)); 1023 1024 ret = __bch2_set_nr_journal_buckets(ca, nr, true, NULL); 1025 err: 1026 bch_err_fn(ca, ret); 1027 return ret; 1028 } 1029 1030 int bch2_fs_journal_alloc(struct bch_fs *c) 1031 { 1032 for_each_online_member(c, ca) { 1033 if (ca->journal.nr) 1034 continue; 1035 1036 int ret = bch2_dev_journal_alloc(ca); 1037 if (ret) { 1038 percpu_ref_put(&ca->io_ref); 1039 return ret; 1040 } 1041 } 1042 1043 return 0; 1044 } 1045 1046 /* startup/shutdown: */ 1047 1048 static bool bch2_journal_writing_to_device(struct journal *j, unsigned dev_idx) 1049 { 1050 bool ret = false; 1051 u64 seq; 1052 1053 spin_lock(&j->lock); 1054 for (seq = journal_last_unwritten_seq(j); 1055 seq <= journal_cur_seq(j) && !ret; 1056 seq++) { 1057 struct journal_buf *buf = journal_seq_to_buf(j, seq); 1058 1059 if (bch2_bkey_has_device_c(bkey_i_to_s_c(&buf->key), dev_idx)) 1060 ret = true; 1061 } 1062 spin_unlock(&j->lock); 1063 1064 return ret; 1065 } 1066 1067 void bch2_dev_journal_stop(struct journal *j, struct bch_dev *ca) 1068 { 1069 wait_event(j->wait, !bch2_journal_writing_to_device(j, ca->dev_idx)); 1070 } 1071 1072 void bch2_fs_journal_stop(struct journal *j) 1073 { 1074 bch2_journal_reclaim_stop(j); 1075 bch2_journal_flush_all_pins(j); 1076 1077 wait_event(j->wait, bch2_journal_entry_close(j)); 1078 1079 /* 1080 * Always write a new journal entry, to make sure the clock hands are up 1081 * to date (and match the superblock) 1082 */ 1083 bch2_journal_meta(j); 1084 1085 journal_quiesce(j); 1086 1087 BUG_ON(!bch2_journal_error(j) && 1088 test_bit(JOURNAL_REPLAY_DONE, &j->flags) && 1089 j->last_empty_seq != journal_cur_seq(j)); 1090 1091 cancel_delayed_work_sync(&j->write_work); 1092 } 1093 1094 int bch2_fs_journal_start(struct journal *j, u64 cur_seq) 1095 { 1096 struct bch_fs *c = container_of(j, struct bch_fs, journal); 1097 struct journal_entry_pin_list *p; 1098 struct journal_replay *i, **_i; 1099 struct genradix_iter iter; 1100 bool had_entries = false; 1101 unsigned ptr; 1102 u64 last_seq = cur_seq, nr, seq; 1103 1104 genradix_for_each_reverse(&c->journal_entries, iter, _i) { 1105 i = *_i; 1106 1107 if (!i || i->ignore) 1108 continue; 1109 1110 last_seq = le64_to_cpu(i->j.last_seq); 1111 break; 1112 } 1113 1114 nr = cur_seq - last_seq; 1115 1116 if (nr + 1 > j->pin.size) { 1117 free_fifo(&j->pin); 1118 init_fifo(&j->pin, roundup_pow_of_two(nr + 1), GFP_KERNEL); 1119 if (!j->pin.data) { 1120 bch_err(c, "error reallocating journal fifo (%llu open entries)", nr); 1121 return -BCH_ERR_ENOMEM_journal_pin_fifo; 1122 } 1123 } 1124 1125 j->replay_journal_seq = last_seq; 1126 j->replay_journal_seq_end = cur_seq; 1127 j->last_seq_ondisk = last_seq; 1128 j->flushed_seq_ondisk = cur_seq - 1; 1129 j->seq_ondisk = cur_seq - 1; 1130 j->pin.front = last_seq; 1131 j->pin.back = cur_seq; 1132 atomic64_set(&j->seq, cur_seq - 1); 1133 1134 fifo_for_each_entry_ptr(p, &j->pin, seq) 1135 journal_pin_list_init(p, 1); 1136 1137 genradix_for_each(&c->journal_entries, iter, _i) { 1138 i = *_i; 1139 1140 if (!i || i->ignore) 1141 continue; 1142 1143 seq = le64_to_cpu(i->j.seq); 1144 BUG_ON(seq >= cur_seq); 1145 1146 if (seq < last_seq) 1147 continue; 1148 1149 if (journal_entry_empty(&i->j)) 1150 j->last_empty_seq = le64_to_cpu(i->j.seq); 1151 1152 p = journal_seq_pin(j, seq); 1153 1154 p->devs.nr = 0; 1155 for (ptr = 0; ptr < i->nr_ptrs; ptr++) 1156 bch2_dev_list_add_dev(&p->devs, i->ptrs[ptr].dev); 1157 1158 had_entries = true; 1159 } 1160 1161 if (!had_entries) 1162 j->last_empty_seq = cur_seq; 1163 1164 spin_lock(&j->lock); 1165 1166 set_bit(JOURNAL_STARTED, &j->flags); 1167 j->last_flush_write = jiffies; 1168 1169 j->reservations.idx = j->reservations.unwritten_idx = journal_cur_seq(j); 1170 j->reservations.unwritten_idx++; 1171 1172 c->last_bucket_seq_cleanup = journal_cur_seq(j); 1173 1174 bch2_journal_space_available(j); 1175 spin_unlock(&j->lock); 1176 1177 return bch2_journal_reclaim_start(j); 1178 } 1179 1180 /* init/exit: */ 1181 1182 void bch2_dev_journal_exit(struct bch_dev *ca) 1183 { 1184 kfree(ca->journal.bio); 1185 kfree(ca->journal.buckets); 1186 kfree(ca->journal.bucket_seq); 1187 1188 ca->journal.bio = NULL; 1189 ca->journal.buckets = NULL; 1190 ca->journal.bucket_seq = NULL; 1191 } 1192 1193 int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb) 1194 { 1195 struct journal_device *ja = &ca->journal; 1196 struct bch_sb_field_journal *journal_buckets = 1197 bch2_sb_field_get(sb, journal); 1198 struct bch_sb_field_journal_v2 *journal_buckets_v2 = 1199 bch2_sb_field_get(sb, journal_v2); 1200 unsigned i, nr_bvecs; 1201 1202 ja->nr = 0; 1203 1204 if (journal_buckets_v2) { 1205 unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2); 1206 1207 for (i = 0; i < nr; i++) 1208 ja->nr += le64_to_cpu(journal_buckets_v2->d[i].nr); 1209 } else if (journal_buckets) { 1210 ja->nr = bch2_nr_journal_buckets(journal_buckets); 1211 } 1212 1213 ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL); 1214 if (!ja->bucket_seq) 1215 return -BCH_ERR_ENOMEM_dev_journal_init; 1216 1217 nr_bvecs = DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE); 1218 1219 ca->journal.bio = bio_kmalloc(nr_bvecs, GFP_KERNEL); 1220 if (!ca->journal.bio) 1221 return -BCH_ERR_ENOMEM_dev_journal_init; 1222 1223 bio_init(ca->journal.bio, NULL, ca->journal.bio->bi_inline_vecs, nr_bvecs, 0); 1224 1225 ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL); 1226 if (!ja->buckets) 1227 return -BCH_ERR_ENOMEM_dev_journal_init; 1228 1229 if (journal_buckets_v2) { 1230 unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2); 1231 unsigned j, dst = 0; 1232 1233 for (i = 0; i < nr; i++) 1234 for (j = 0; j < le64_to_cpu(journal_buckets_v2->d[i].nr); j++) 1235 ja->buckets[dst++] = 1236 le64_to_cpu(journal_buckets_v2->d[i].start) + j; 1237 } else if (journal_buckets) { 1238 for (i = 0; i < ja->nr; i++) 1239 ja->buckets[i] = le64_to_cpu(journal_buckets->buckets[i]); 1240 } 1241 1242 return 0; 1243 } 1244 1245 void bch2_fs_journal_exit(struct journal *j) 1246 { 1247 unsigned i; 1248 1249 darray_exit(&j->early_journal_entries); 1250 1251 for (i = 0; i < ARRAY_SIZE(j->buf); i++) 1252 kvpfree(j->buf[i].data, j->buf[i].buf_size); 1253 free_fifo(&j->pin); 1254 } 1255 1256 int bch2_fs_journal_init(struct journal *j) 1257 { 1258 static struct lock_class_key res_key; 1259 unsigned i; 1260 1261 mutex_init(&j->buf_lock); 1262 spin_lock_init(&j->lock); 1263 spin_lock_init(&j->err_lock); 1264 init_waitqueue_head(&j->wait); 1265 INIT_DELAYED_WORK(&j->write_work, journal_write_work); 1266 init_waitqueue_head(&j->reclaim_wait); 1267 init_waitqueue_head(&j->pin_flush_wait); 1268 mutex_init(&j->reclaim_lock); 1269 mutex_init(&j->discard_lock); 1270 1271 lockdep_init_map(&j->res_map, "journal res", &res_key, 0); 1272 1273 atomic64_set(&j->reservations.counter, 1274 ((union journal_res_state) 1275 { .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v); 1276 1277 if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL))) 1278 return -BCH_ERR_ENOMEM_journal_pin_fifo; 1279 1280 for (i = 0; i < ARRAY_SIZE(j->buf); i++) { 1281 j->buf[i].buf_size = JOURNAL_ENTRY_SIZE_MIN; 1282 j->buf[i].data = kvpmalloc(j->buf[i].buf_size, GFP_KERNEL); 1283 if (!j->buf[i].data) 1284 return -BCH_ERR_ENOMEM_journal_buf; 1285 } 1286 1287 j->pin.front = j->pin.back = 1; 1288 return 0; 1289 } 1290 1291 /* debug: */ 1292 1293 void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j) 1294 { 1295 struct bch_fs *c = container_of(j, struct bch_fs, journal); 1296 union journal_res_state s; 1297 unsigned long now = jiffies; 1298 u64 nr_writes = j->nr_flush_writes + j->nr_noflush_writes; 1299 1300 if (!out->nr_tabstops) 1301 printbuf_tabstop_push(out, 24); 1302 out->atomic++; 1303 1304 rcu_read_lock(); 1305 s = READ_ONCE(j->reservations); 1306 1307 prt_printf(out, "dirty journal entries:\t%llu/%llu\n", fifo_used(&j->pin), j->pin.size); 1308 prt_printf(out, "seq:\t\t\t%llu\n", journal_cur_seq(j)); 1309 prt_printf(out, "seq_ondisk:\t\t%llu\n", j->seq_ondisk); 1310 prt_printf(out, "last_seq:\t\t%llu\n", journal_last_seq(j)); 1311 prt_printf(out, "last_seq_ondisk:\t%llu\n", j->last_seq_ondisk); 1312 prt_printf(out, "flushed_seq_ondisk:\t%llu\n", j->flushed_seq_ondisk); 1313 prt_printf(out, "watermark:\t\t%s\n", bch2_watermarks[j->watermark]); 1314 prt_printf(out, "each entry reserved:\t%u\n", j->entry_u64s_reserved); 1315 prt_printf(out, "nr flush writes:\t%llu\n", j->nr_flush_writes); 1316 prt_printf(out, "nr noflush writes:\t%llu\n", j->nr_noflush_writes); 1317 prt_printf(out, "average write size:\t"); 1318 prt_human_readable_u64(out, nr_writes ? div64_u64(j->entry_bytes_written, nr_writes) : 0); 1319 prt_newline(out); 1320 prt_printf(out, "nr direct reclaim:\t%llu\n", j->nr_direct_reclaim); 1321 prt_printf(out, "nr background reclaim:\t%llu\n", j->nr_background_reclaim); 1322 prt_printf(out, "reclaim kicked:\t\t%u\n", j->reclaim_kicked); 1323 prt_printf(out, "reclaim runs in:\t%u ms\n", time_after(j->next_reclaim, now) 1324 ? jiffies_to_msecs(j->next_reclaim - jiffies) : 0); 1325 prt_printf(out, "current entry sectors:\t%u\n", j->cur_entry_sectors); 1326 prt_printf(out, "current entry error:\t%s\n", bch2_journal_errors[j->cur_entry_error]); 1327 prt_printf(out, "current entry:\t\t"); 1328 1329 switch (s.cur_entry_offset) { 1330 case JOURNAL_ENTRY_ERROR_VAL: 1331 prt_printf(out, "error"); 1332 break; 1333 case JOURNAL_ENTRY_CLOSED_VAL: 1334 prt_printf(out, "closed"); 1335 break; 1336 default: 1337 prt_printf(out, "%u/%u", s.cur_entry_offset, j->cur_entry_u64s); 1338 break; 1339 } 1340 1341 prt_newline(out); 1342 1343 for (u64 seq = journal_cur_seq(j); 1344 seq >= journal_last_unwritten_seq(j); 1345 --seq) { 1346 unsigned i = seq & JOURNAL_BUF_MASK; 1347 1348 prt_printf(out, "unwritten entry:"); 1349 prt_tab(out); 1350 prt_printf(out, "%llu", seq); 1351 prt_newline(out); 1352 printbuf_indent_add(out, 2); 1353 1354 prt_printf(out, "refcount:"); 1355 prt_tab(out); 1356 prt_printf(out, "%u", journal_state_count(s, i)); 1357 prt_newline(out); 1358 1359 prt_printf(out, "sectors:"); 1360 prt_tab(out); 1361 prt_printf(out, "%u", j->buf[i].sectors); 1362 prt_newline(out); 1363 1364 prt_printf(out, "expires"); 1365 prt_tab(out); 1366 prt_printf(out, "%li jiffies", j->buf[i].expires - jiffies); 1367 prt_newline(out); 1368 1369 printbuf_indent_sub(out, 2); 1370 } 1371 1372 prt_printf(out, 1373 "replay done:\t\t%i\n", 1374 test_bit(JOURNAL_REPLAY_DONE, &j->flags)); 1375 1376 prt_printf(out, "space:\n"); 1377 prt_printf(out, "\tdiscarded\t%u:%u\n", 1378 j->space[journal_space_discarded].next_entry, 1379 j->space[journal_space_discarded].total); 1380 prt_printf(out, "\tclean ondisk\t%u:%u\n", 1381 j->space[journal_space_clean_ondisk].next_entry, 1382 j->space[journal_space_clean_ondisk].total); 1383 prt_printf(out, "\tclean\t\t%u:%u\n", 1384 j->space[journal_space_clean].next_entry, 1385 j->space[journal_space_clean].total); 1386 prt_printf(out, "\ttotal\t\t%u:%u\n", 1387 j->space[journal_space_total].next_entry, 1388 j->space[journal_space_total].total); 1389 1390 for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) { 1391 struct journal_device *ja = &ca->journal; 1392 1393 if (!test_bit(ca->dev_idx, c->rw_devs[BCH_DATA_journal].d)) 1394 continue; 1395 1396 if (!ja->nr) 1397 continue; 1398 1399 prt_printf(out, "dev %u:\n", ca->dev_idx); 1400 prt_printf(out, "\tnr\t\t%u\n", ja->nr); 1401 prt_printf(out, "\tbucket size\t%u\n", ca->mi.bucket_size); 1402 prt_printf(out, "\tavailable\t%u:%u\n", bch2_journal_dev_buckets_available(j, ja, journal_space_discarded), ja->sectors_free); 1403 prt_printf(out, "\tdiscard_idx\t%u\n", ja->discard_idx); 1404 prt_printf(out, "\tdirty_ondisk\t%u (seq %llu)\n", ja->dirty_idx_ondisk, ja->bucket_seq[ja->dirty_idx_ondisk]); 1405 prt_printf(out, "\tdirty_idx\t%u (seq %llu)\n", ja->dirty_idx, ja->bucket_seq[ja->dirty_idx]); 1406 prt_printf(out, "\tcur_idx\t\t%u (seq %llu)\n", ja->cur_idx, ja->bucket_seq[ja->cur_idx]); 1407 } 1408 1409 rcu_read_unlock(); 1410 1411 --out->atomic; 1412 } 1413 1414 void bch2_journal_debug_to_text(struct printbuf *out, struct journal *j) 1415 { 1416 spin_lock(&j->lock); 1417 __bch2_journal_debug_to_text(out, j); 1418 spin_unlock(&j->lock); 1419 } 1420 1421 bool bch2_journal_seq_pins_to_text(struct printbuf *out, struct journal *j, u64 *seq) 1422 { 1423 struct journal_entry_pin_list *pin_list; 1424 struct journal_entry_pin *pin; 1425 unsigned i; 1426 1427 spin_lock(&j->lock); 1428 *seq = max(*seq, j->pin.front); 1429 1430 if (*seq >= j->pin.back) { 1431 spin_unlock(&j->lock); 1432 return true; 1433 } 1434 1435 out->atomic++; 1436 1437 pin_list = journal_seq_pin(j, *seq); 1438 1439 prt_printf(out, "%llu: count %u", *seq, atomic_read(&pin_list->count)); 1440 prt_newline(out); 1441 printbuf_indent_add(out, 2); 1442 1443 for (i = 0; i < ARRAY_SIZE(pin_list->list); i++) 1444 list_for_each_entry(pin, &pin_list->list[i], list) { 1445 prt_printf(out, "\t%px %ps", pin, pin->flush); 1446 prt_newline(out); 1447 } 1448 1449 if (!list_empty(&pin_list->flushed)) { 1450 prt_printf(out, "flushed:"); 1451 prt_newline(out); 1452 } 1453 1454 list_for_each_entry(pin, &pin_list->flushed, list) { 1455 prt_printf(out, "\t%px %ps", pin, pin->flush); 1456 prt_newline(out); 1457 } 1458 1459 printbuf_indent_sub(out, 2); 1460 1461 --out->atomic; 1462 spin_unlock(&j->lock); 1463 1464 return false; 1465 } 1466 1467 void bch2_journal_pins_to_text(struct printbuf *out, struct journal *j) 1468 { 1469 u64 seq = 0; 1470 1471 while (!bch2_journal_seq_pins_to_text(out, j, &seq)) 1472 seq++; 1473 } 1474