1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * bcachefs journalling code, for btree insertions 4 * 5 * Copyright 2012 Google, Inc. 6 */ 7 8 #include "bcachefs.h" 9 #include "alloc_foreground.h" 10 #include "bkey_methods.h" 11 #include "btree_gc.h" 12 #include "btree_update.h" 13 #include "btree_write_buffer.h" 14 #include "buckets.h" 15 #include "error.h" 16 #include "journal.h" 17 #include "journal_io.h" 18 #include "journal_reclaim.h" 19 #include "journal_sb.h" 20 #include "journal_seq_blacklist.h" 21 #include "trace.h" 22 23 static const char * const bch2_journal_errors[] = { 24 #define x(n) #n, 25 JOURNAL_ERRORS() 26 #undef x 27 NULL 28 }; 29 30 static inline bool journal_seq_unwritten(struct journal *j, u64 seq) 31 { 32 return seq > j->seq_ondisk; 33 } 34 35 static bool __journal_entry_is_open(union journal_res_state state) 36 { 37 return state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL; 38 } 39 40 static inline unsigned nr_unwritten_journal_entries(struct journal *j) 41 { 42 return atomic64_read(&j->seq) - j->seq_ondisk; 43 } 44 45 static bool journal_entry_is_open(struct journal *j) 46 { 47 return __journal_entry_is_open(j->reservations); 48 } 49 50 static void bch2_journal_buf_to_text(struct printbuf *out, struct journal *j, u64 seq) 51 { 52 union journal_res_state s = READ_ONCE(j->reservations); 53 unsigned i = seq & JOURNAL_BUF_MASK; 54 struct journal_buf *buf = j->buf + i; 55 56 prt_printf(out, "seq:\t%llu\n", seq); 57 printbuf_indent_add(out, 2); 58 59 prt_printf(out, "refcount:\t%u\n", journal_state_count(s, i)); 60 61 prt_printf(out, "size:\t"); 62 prt_human_readable_u64(out, vstruct_bytes(buf->data)); 63 prt_newline(out); 64 65 prt_printf(out, "expires:\t"); 66 prt_printf(out, "%li jiffies\n", buf->expires - jiffies); 67 68 prt_printf(out, "flags:\t"); 69 if (buf->noflush) 70 prt_str(out, "noflush "); 71 if (buf->must_flush) 72 prt_str(out, "must_flush "); 73 if (buf->separate_flush) 74 prt_str(out, "separate_flush "); 75 if (buf->need_flush_to_write_buffer) 76 prt_str(out, "need_flush_to_write_buffer "); 77 if (buf->write_started) 78 prt_str(out, "write_started "); 79 if (buf->write_allocated) 80 prt_str(out, "write_allocated "); 81 if (buf->write_done) 82 prt_str(out, "write_done"); 83 prt_newline(out); 84 85 printbuf_indent_sub(out, 2); 86 } 87 88 static void bch2_journal_bufs_to_text(struct printbuf *out, struct journal *j) 89 { 90 if (!out->nr_tabstops) 91 printbuf_tabstop_push(out, 24); 92 93 for (u64 seq = journal_last_unwritten_seq(j); 94 seq <= journal_cur_seq(j); 95 seq++) 96 bch2_journal_buf_to_text(out, j, seq); 97 prt_printf(out, "last buf %s\n", journal_entry_is_open(j) ? "open" : "closed"); 98 } 99 100 static inline struct journal_buf * 101 journal_seq_to_buf(struct journal *j, u64 seq) 102 { 103 struct journal_buf *buf = NULL; 104 105 EBUG_ON(seq > journal_cur_seq(j)); 106 107 if (journal_seq_unwritten(j, seq)) { 108 buf = j->buf + (seq & JOURNAL_BUF_MASK); 109 EBUG_ON(le64_to_cpu(buf->data->seq) != seq); 110 } 111 return buf; 112 } 113 114 static void journal_pin_list_init(struct journal_entry_pin_list *p, int count) 115 { 116 unsigned i; 117 118 for (i = 0; i < ARRAY_SIZE(p->list); i++) 119 INIT_LIST_HEAD(&p->list[i]); 120 INIT_LIST_HEAD(&p->flushed); 121 atomic_set(&p->count, count); 122 p->devs.nr = 0; 123 } 124 125 /* 126 * Detect stuck journal conditions and trigger shutdown. Technically the journal 127 * can end up stuck for a variety of reasons, such as a blocked I/O, journal 128 * reservation lockup, etc. Since this is a fatal error with potentially 129 * unpredictable characteristics, we want to be fairly conservative before we 130 * decide to shut things down. 131 * 132 * Consider the journal stuck when it appears full with no ability to commit 133 * btree transactions, to discard journal buckets, nor acquire priority 134 * (reserved watermark) reservation. 135 */ 136 static inline bool 137 journal_error_check_stuck(struct journal *j, int error, unsigned flags) 138 { 139 struct bch_fs *c = container_of(j, struct bch_fs, journal); 140 bool stuck = false; 141 struct printbuf buf = PRINTBUF; 142 143 if (!(error == JOURNAL_ERR_journal_full || 144 error == JOURNAL_ERR_journal_pin_full) || 145 nr_unwritten_journal_entries(j) || 146 (flags & BCH_WATERMARK_MASK) != BCH_WATERMARK_reclaim) 147 return stuck; 148 149 spin_lock(&j->lock); 150 151 if (j->can_discard) { 152 spin_unlock(&j->lock); 153 return stuck; 154 } 155 156 stuck = true; 157 158 /* 159 * The journal shutdown path will set ->err_seq, but do it here first to 160 * serialize against concurrent failures and avoid duplicate error 161 * reports. 162 */ 163 if (j->err_seq) { 164 spin_unlock(&j->lock); 165 return stuck; 166 } 167 j->err_seq = journal_cur_seq(j); 168 spin_unlock(&j->lock); 169 170 bch_err(c, "Journal stuck! Hava a pre-reservation but journal full (error %s)", 171 bch2_journal_errors[error]); 172 bch2_journal_debug_to_text(&buf, j); 173 bch_err(c, "%s", buf.buf); 174 175 printbuf_reset(&buf); 176 bch2_journal_pins_to_text(&buf, j); 177 bch_err(c, "Journal pins:\n%s", buf.buf); 178 printbuf_exit(&buf); 179 180 bch2_fatal_error(c); 181 dump_stack(); 182 183 return stuck; 184 } 185 186 void bch2_journal_do_writes(struct journal *j) 187 { 188 for (u64 seq = journal_last_unwritten_seq(j); 189 seq <= journal_cur_seq(j); 190 seq++) { 191 unsigned idx = seq & JOURNAL_BUF_MASK; 192 struct journal_buf *w = j->buf + idx; 193 194 if (w->write_started && !w->write_allocated) 195 break; 196 if (w->write_started) 197 continue; 198 199 if (!journal_state_count(j->reservations, idx)) { 200 w->write_started = true; 201 closure_call(&w->io, bch2_journal_write, j->wq, NULL); 202 } 203 204 break; 205 } 206 } 207 208 /* 209 * Final processing when the last reference of a journal buffer has been 210 * dropped. Drop the pin list reference acquired at journal entry open and write 211 * the buffer, if requested. 212 */ 213 void bch2_journal_buf_put_final(struct journal *j, u64 seq) 214 { 215 lockdep_assert_held(&j->lock); 216 217 if (__bch2_journal_pin_put(j, seq)) 218 bch2_journal_reclaim_fast(j); 219 bch2_journal_do_writes(j); 220 221 /* 222 * for __bch2_next_write_buffer_flush_journal_buf(), when quiescing an 223 * open journal entry 224 */ 225 wake_up(&j->wait); 226 } 227 228 /* 229 * Returns true if journal entry is now closed: 230 * 231 * We don't close a journal_buf until the next journal_buf is finished writing, 232 * and can be opened again - this also initializes the next journal_buf: 233 */ 234 static void __journal_entry_close(struct journal *j, unsigned closed_val, bool trace) 235 { 236 struct bch_fs *c = container_of(j, struct bch_fs, journal); 237 struct journal_buf *buf = journal_cur_buf(j); 238 union journal_res_state old, new; 239 unsigned sectors; 240 241 BUG_ON(closed_val != JOURNAL_ENTRY_CLOSED_VAL && 242 closed_val != JOURNAL_ENTRY_ERROR_VAL); 243 244 lockdep_assert_held(&j->lock); 245 246 old.v = atomic64_read(&j->reservations.counter); 247 do { 248 new.v = old.v; 249 new.cur_entry_offset = closed_val; 250 251 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL || 252 old.cur_entry_offset == new.cur_entry_offset) 253 return; 254 } while (!atomic64_try_cmpxchg(&j->reservations.counter, 255 &old.v, new.v)); 256 257 if (!__journal_entry_is_open(old)) 258 return; 259 260 if (old.cur_entry_offset == JOURNAL_ENTRY_BLOCKED_VAL) 261 old.cur_entry_offset = j->cur_entry_offset_if_blocked; 262 263 /* Close out old buffer: */ 264 buf->data->u64s = cpu_to_le32(old.cur_entry_offset); 265 266 if (trace_journal_entry_close_enabled() && trace) { 267 struct printbuf pbuf = PRINTBUF; 268 pbuf.atomic++; 269 270 prt_str(&pbuf, "entry size: "); 271 prt_human_readable_u64(&pbuf, vstruct_bytes(buf->data)); 272 prt_newline(&pbuf); 273 bch2_prt_task_backtrace(&pbuf, current, 1, GFP_NOWAIT); 274 trace_journal_entry_close(c, pbuf.buf); 275 printbuf_exit(&pbuf); 276 } 277 278 sectors = vstruct_blocks_plus(buf->data, c->block_bits, 279 buf->u64s_reserved) << c->block_bits; 280 BUG_ON(sectors > buf->sectors); 281 buf->sectors = sectors; 282 283 /* 284 * We have to set last_seq here, _before_ opening a new journal entry: 285 * 286 * A threads may replace an old pin with a new pin on their current 287 * journal reservation - the expectation being that the journal will 288 * contain either what the old pin protected or what the new pin 289 * protects. 290 * 291 * After the old pin is dropped journal_last_seq() won't include the old 292 * pin, so we can only write the updated last_seq on the entry that 293 * contains whatever the new pin protects. 294 * 295 * Restated, we can _not_ update last_seq for a given entry if there 296 * could be a newer entry open with reservations/pins that have been 297 * taken against it. 298 * 299 * Hence, we want update/set last_seq on the current journal entry right 300 * before we open a new one: 301 */ 302 buf->last_seq = journal_last_seq(j); 303 buf->data->last_seq = cpu_to_le64(buf->last_seq); 304 BUG_ON(buf->last_seq > le64_to_cpu(buf->data->seq)); 305 306 cancel_delayed_work(&j->write_work); 307 308 bch2_journal_space_available(j); 309 310 __bch2_journal_buf_put(j, old.idx, le64_to_cpu(buf->data->seq)); 311 } 312 313 void bch2_journal_halt(struct journal *j) 314 { 315 spin_lock(&j->lock); 316 __journal_entry_close(j, JOURNAL_ENTRY_ERROR_VAL, true); 317 if (!j->err_seq) 318 j->err_seq = journal_cur_seq(j); 319 journal_wake(j); 320 spin_unlock(&j->lock); 321 } 322 323 static bool journal_entry_want_write(struct journal *j) 324 { 325 bool ret = !journal_entry_is_open(j) || 326 journal_cur_seq(j) == journal_last_unwritten_seq(j); 327 328 /* Don't close it yet if we already have a write in flight: */ 329 if (ret) 330 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true); 331 else if (nr_unwritten_journal_entries(j)) { 332 struct journal_buf *buf = journal_cur_buf(j); 333 334 if (!buf->flush_time) { 335 buf->flush_time = local_clock() ?: 1; 336 buf->expires = jiffies; 337 } 338 } 339 340 return ret; 341 } 342 343 bool bch2_journal_entry_close(struct journal *j) 344 { 345 bool ret; 346 347 spin_lock(&j->lock); 348 ret = journal_entry_want_write(j); 349 spin_unlock(&j->lock); 350 351 return ret; 352 } 353 354 /* 355 * should _only_ called from journal_res_get() - when we actually want a 356 * journal reservation - journal entry is open means journal is dirty: 357 */ 358 static int journal_entry_open(struct journal *j) 359 { 360 struct bch_fs *c = container_of(j, struct bch_fs, journal); 361 struct journal_buf *buf = j->buf + 362 ((journal_cur_seq(j) + 1) & JOURNAL_BUF_MASK); 363 union journal_res_state old, new; 364 int u64s; 365 366 lockdep_assert_held(&j->lock); 367 BUG_ON(journal_entry_is_open(j)); 368 BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb)); 369 370 if (j->blocked) 371 return JOURNAL_ERR_blocked; 372 373 if (j->cur_entry_error) 374 return j->cur_entry_error; 375 376 if (bch2_journal_error(j)) 377 return JOURNAL_ERR_insufficient_devices; /* -EROFS */ 378 379 if (!fifo_free(&j->pin)) 380 return JOURNAL_ERR_journal_pin_full; 381 382 if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf)) 383 return JOURNAL_ERR_max_in_flight; 384 385 if (bch2_fs_fatal_err_on(journal_cur_seq(j) >= JOURNAL_SEQ_MAX, 386 c, "cannot start: journal seq overflow")) 387 return JOURNAL_ERR_insufficient_devices; /* -EROFS */ 388 389 BUG_ON(!j->cur_entry_sectors); 390 391 buf->expires = 392 (journal_cur_seq(j) == j->flushed_seq_ondisk 393 ? jiffies 394 : j->last_flush_write) + 395 msecs_to_jiffies(c->opts.journal_flush_delay); 396 397 buf->u64s_reserved = j->entry_u64s_reserved; 398 buf->disk_sectors = j->cur_entry_sectors; 399 buf->sectors = min(buf->disk_sectors, buf->buf_size >> 9); 400 401 u64s = (int) (buf->sectors << 9) / sizeof(u64) - 402 journal_entry_overhead(j); 403 u64s = clamp_t(int, u64s, 0, JOURNAL_ENTRY_CLOSED_VAL - 1); 404 405 if (u64s <= (ssize_t) j->early_journal_entries.nr) 406 return JOURNAL_ERR_journal_full; 407 408 if (fifo_empty(&j->pin) && j->reclaim_thread) 409 wake_up_process(j->reclaim_thread); 410 411 /* 412 * The fifo_push() needs to happen at the same time as j->seq is 413 * incremented for journal_last_seq() to be calculated correctly 414 */ 415 atomic64_inc(&j->seq); 416 journal_pin_list_init(fifo_push_ref(&j->pin), 1); 417 418 BUG_ON(j->pin.back - 1 != atomic64_read(&j->seq)); 419 420 BUG_ON(j->buf + (journal_cur_seq(j) & JOURNAL_BUF_MASK) != buf); 421 422 bkey_extent_init(&buf->key); 423 buf->noflush = false; 424 buf->must_flush = false; 425 buf->separate_flush = false; 426 buf->flush_time = 0; 427 buf->need_flush_to_write_buffer = true; 428 buf->write_started = false; 429 buf->write_allocated = false; 430 buf->write_done = false; 431 432 memset(buf->data, 0, sizeof(*buf->data)); 433 buf->data->seq = cpu_to_le64(journal_cur_seq(j)); 434 buf->data->u64s = 0; 435 436 if (j->early_journal_entries.nr) { 437 memcpy(buf->data->_data, j->early_journal_entries.data, 438 j->early_journal_entries.nr * sizeof(u64)); 439 le32_add_cpu(&buf->data->u64s, j->early_journal_entries.nr); 440 } 441 442 /* 443 * Must be set before marking the journal entry as open: 444 */ 445 j->cur_entry_u64s = u64s; 446 447 old.v = atomic64_read(&j->reservations.counter); 448 do { 449 new.v = old.v; 450 451 BUG_ON(old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL); 452 453 new.idx++; 454 BUG_ON(journal_state_count(new, new.idx)); 455 BUG_ON(new.idx != (journal_cur_seq(j) & JOURNAL_BUF_MASK)); 456 457 journal_state_inc(&new); 458 459 /* Handle any already added entries */ 460 new.cur_entry_offset = le32_to_cpu(buf->data->u64s); 461 } while (!atomic64_try_cmpxchg(&j->reservations.counter, 462 &old.v, new.v)); 463 464 if (nr_unwritten_journal_entries(j) == 1) 465 mod_delayed_work(j->wq, 466 &j->write_work, 467 msecs_to_jiffies(c->opts.journal_flush_delay)); 468 journal_wake(j); 469 470 if (j->early_journal_entries.nr) 471 darray_exit(&j->early_journal_entries); 472 return 0; 473 } 474 475 static bool journal_quiesced(struct journal *j) 476 { 477 bool ret = atomic64_read(&j->seq) == j->seq_ondisk; 478 479 if (!ret) 480 bch2_journal_entry_close(j); 481 return ret; 482 } 483 484 static void journal_quiesce(struct journal *j) 485 { 486 wait_event(j->wait, journal_quiesced(j)); 487 } 488 489 static void journal_write_work(struct work_struct *work) 490 { 491 struct journal *j = container_of(work, struct journal, write_work.work); 492 493 spin_lock(&j->lock); 494 if (__journal_entry_is_open(j->reservations)) { 495 long delta = journal_cur_buf(j)->expires - jiffies; 496 497 if (delta > 0) 498 mod_delayed_work(j->wq, &j->write_work, delta); 499 else 500 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true); 501 } 502 spin_unlock(&j->lock); 503 } 504 505 static int __journal_res_get(struct journal *j, struct journal_res *res, 506 unsigned flags) 507 { 508 struct bch_fs *c = container_of(j, struct bch_fs, journal); 509 struct journal_buf *buf; 510 bool can_discard; 511 int ret; 512 retry: 513 if (journal_res_get_fast(j, res, flags)) 514 return 0; 515 516 if (bch2_journal_error(j)) 517 return -BCH_ERR_erofs_journal_err; 518 519 if (j->blocked) 520 return -BCH_ERR_journal_res_get_blocked; 521 522 if ((flags & BCH_WATERMARK_MASK) < j->watermark) { 523 ret = JOURNAL_ERR_journal_full; 524 can_discard = j->can_discard; 525 goto out; 526 } 527 528 if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf) && !journal_entry_is_open(j)) { 529 ret = JOURNAL_ERR_max_in_flight; 530 goto out; 531 } 532 533 spin_lock(&j->lock); 534 535 /* 536 * Recheck after taking the lock, so we don't race with another thread 537 * that just did journal_entry_open() and call bch2_journal_entry_close() 538 * unnecessarily 539 */ 540 if (journal_res_get_fast(j, res, flags)) { 541 ret = 0; 542 goto unlock; 543 } 544 545 /* 546 * If we couldn't get a reservation because the current buf filled up, 547 * and we had room for a bigger entry on disk, signal that we want to 548 * realloc the journal bufs: 549 */ 550 buf = journal_cur_buf(j); 551 if (journal_entry_is_open(j) && 552 buf->buf_size >> 9 < buf->disk_sectors && 553 buf->buf_size < JOURNAL_ENTRY_SIZE_MAX) 554 j->buf_size_want = max(j->buf_size_want, buf->buf_size << 1); 555 556 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, false); 557 ret = journal_entry_open(j) ?: JOURNAL_ERR_retry; 558 unlock: 559 can_discard = j->can_discard; 560 spin_unlock(&j->lock); 561 out: 562 if (ret == JOURNAL_ERR_retry) 563 goto retry; 564 if (!ret) 565 return 0; 566 567 if (journal_error_check_stuck(j, ret, flags)) 568 ret = -BCH_ERR_journal_res_get_blocked; 569 570 if (ret == JOURNAL_ERR_max_in_flight && 571 track_event_change(&c->times[BCH_TIME_blocked_journal_max_in_flight], true)) { 572 573 struct printbuf buf = PRINTBUF; 574 prt_printf(&buf, "seq %llu\n", journal_cur_seq(j)); 575 bch2_journal_bufs_to_text(&buf, j); 576 trace_journal_entry_full(c, buf.buf); 577 printbuf_exit(&buf); 578 count_event(c, journal_entry_full); 579 } 580 581 /* 582 * Journal is full - can't rely on reclaim from work item due to 583 * freezing: 584 */ 585 if ((ret == JOURNAL_ERR_journal_full || 586 ret == JOURNAL_ERR_journal_pin_full) && 587 !(flags & JOURNAL_RES_GET_NONBLOCK)) { 588 if (can_discard) { 589 bch2_journal_do_discards(j); 590 goto retry; 591 } 592 593 if (mutex_trylock(&j->reclaim_lock)) { 594 bch2_journal_reclaim(j); 595 mutex_unlock(&j->reclaim_lock); 596 } 597 } 598 599 return ret == JOURNAL_ERR_insufficient_devices 600 ? -BCH_ERR_erofs_journal_err 601 : -BCH_ERR_journal_res_get_blocked; 602 } 603 604 /* 605 * Essentially the entry function to the journaling code. When bcachefs is doing 606 * a btree insert, it calls this function to get the current journal write. 607 * Journal write is the structure used set up journal writes. The calling 608 * function will then add its keys to the structure, queuing them for the next 609 * write. 610 * 611 * To ensure forward progress, the current task must not be holding any 612 * btree node write locks. 613 */ 614 int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res, 615 unsigned flags) 616 { 617 int ret; 618 619 if (closure_wait_event_timeout(&j->async_wait, 620 (ret = __journal_res_get(j, res, flags)) != -BCH_ERR_journal_res_get_blocked || 621 (flags & JOURNAL_RES_GET_NONBLOCK), 622 HZ * 10)) 623 return ret; 624 625 struct bch_fs *c = container_of(j, struct bch_fs, journal); 626 struct printbuf buf = PRINTBUF; 627 bch2_journal_debug_to_text(&buf, j); 628 bch_err(c, "Journal stuck? Waited for 10 seconds...\n%s", 629 buf.buf); 630 printbuf_exit(&buf); 631 632 closure_wait_event(&j->async_wait, 633 (ret = __journal_res_get(j, res, flags)) != -BCH_ERR_journal_res_get_blocked || 634 (flags & JOURNAL_RES_GET_NONBLOCK)); 635 return ret; 636 } 637 638 /* journal_entry_res: */ 639 640 void bch2_journal_entry_res_resize(struct journal *j, 641 struct journal_entry_res *res, 642 unsigned new_u64s) 643 { 644 union journal_res_state state; 645 int d = new_u64s - res->u64s; 646 647 spin_lock(&j->lock); 648 649 j->entry_u64s_reserved += d; 650 if (d <= 0) 651 goto out; 652 653 j->cur_entry_u64s = max_t(int, 0, j->cur_entry_u64s - d); 654 smp_mb(); 655 state = READ_ONCE(j->reservations); 656 657 if (state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL && 658 state.cur_entry_offset > j->cur_entry_u64s) { 659 j->cur_entry_u64s += d; 660 /* 661 * Not enough room in current journal entry, have to flush it: 662 */ 663 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true); 664 } else { 665 journal_cur_buf(j)->u64s_reserved += d; 666 } 667 out: 668 spin_unlock(&j->lock); 669 res->u64s += d; 670 } 671 672 /* journal flushing: */ 673 674 /** 675 * bch2_journal_flush_seq_async - wait for a journal entry to be written 676 * @j: journal object 677 * @seq: seq to flush 678 * @parent: closure object to wait with 679 * Returns: 1 if @seq has already been flushed, 0 if @seq is being flushed, 680 * -BCH_ERR_journal_flush_err if @seq will never be flushed 681 * 682 * Like bch2_journal_wait_on_seq, except that it triggers a write immediately if 683 * necessary 684 */ 685 int bch2_journal_flush_seq_async(struct journal *j, u64 seq, 686 struct closure *parent) 687 { 688 struct journal_buf *buf; 689 int ret = 0; 690 691 if (seq <= j->flushed_seq_ondisk) 692 return 1; 693 694 spin_lock(&j->lock); 695 696 if (WARN_ONCE(seq > journal_cur_seq(j), 697 "requested to flush journal seq %llu, but currently at %llu", 698 seq, journal_cur_seq(j))) 699 goto out; 700 701 /* Recheck under lock: */ 702 if (j->err_seq && seq >= j->err_seq) { 703 ret = -BCH_ERR_journal_flush_err; 704 goto out; 705 } 706 707 if (seq <= j->flushed_seq_ondisk) { 708 ret = 1; 709 goto out; 710 } 711 712 /* if seq was written, but not flushed - flush a newer one instead */ 713 seq = max(seq, journal_last_unwritten_seq(j)); 714 715 recheck_need_open: 716 if (seq > journal_cur_seq(j)) { 717 struct journal_res res = { 0 }; 718 719 if (journal_entry_is_open(j)) 720 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true); 721 722 spin_unlock(&j->lock); 723 724 /* 725 * We're called from bch2_journal_flush_seq() -> wait_event(); 726 * but this might block. We won't usually block, so we won't 727 * livelock: 728 */ 729 sched_annotate_sleep(); 730 ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0); 731 if (ret) 732 return ret; 733 734 seq = res.seq; 735 buf = journal_seq_to_buf(j, seq); 736 buf->must_flush = true; 737 738 if (!buf->flush_time) { 739 buf->flush_time = local_clock() ?: 1; 740 buf->expires = jiffies; 741 } 742 743 if (parent && !closure_wait(&buf->wait, parent)) 744 BUG(); 745 746 bch2_journal_res_put(j, &res); 747 748 spin_lock(&j->lock); 749 goto want_write; 750 } 751 752 /* 753 * if write was kicked off without a flush, or if we promised it 754 * wouldn't be a flush, flush the next sequence number instead 755 */ 756 buf = journal_seq_to_buf(j, seq); 757 if (buf->noflush) { 758 seq++; 759 goto recheck_need_open; 760 } 761 762 buf->must_flush = true; 763 764 if (parent && !closure_wait(&buf->wait, parent)) 765 BUG(); 766 want_write: 767 if (seq == journal_cur_seq(j)) 768 journal_entry_want_write(j); 769 out: 770 spin_unlock(&j->lock); 771 return ret; 772 } 773 774 int bch2_journal_flush_seq(struct journal *j, u64 seq, unsigned task_state) 775 { 776 u64 start_time = local_clock(); 777 int ret, ret2; 778 779 /* 780 * Don't update time_stats when @seq is already flushed: 781 */ 782 if (seq <= j->flushed_seq_ondisk) 783 return 0; 784 785 ret = wait_event_state(j->wait, 786 (ret2 = bch2_journal_flush_seq_async(j, seq, NULL)), 787 task_state); 788 789 if (!ret) 790 bch2_time_stats_update(j->flush_seq_time, start_time); 791 792 return ret ?: ret2 < 0 ? ret2 : 0; 793 } 794 795 /* 796 * bch2_journal_flush_async - if there is an open journal entry, or a journal 797 * still being written, write it and wait for the write to complete 798 */ 799 void bch2_journal_flush_async(struct journal *j, struct closure *parent) 800 { 801 bch2_journal_flush_seq_async(j, atomic64_read(&j->seq), parent); 802 } 803 804 int bch2_journal_flush(struct journal *j) 805 { 806 return bch2_journal_flush_seq(j, atomic64_read(&j->seq), TASK_UNINTERRUPTIBLE); 807 } 808 809 /* 810 * bch2_journal_noflush_seq - ask the journal not to issue any flushes in the 811 * range [start, end) 812 * @seq 813 */ 814 bool bch2_journal_noflush_seq(struct journal *j, u64 start, u64 end) 815 { 816 struct bch_fs *c = container_of(j, struct bch_fs, journal); 817 u64 unwritten_seq; 818 bool ret = false; 819 820 if (!(c->sb.features & (1ULL << BCH_FEATURE_journal_no_flush))) 821 return false; 822 823 if (c->journal.flushed_seq_ondisk >= start) 824 return false; 825 826 spin_lock(&j->lock); 827 if (c->journal.flushed_seq_ondisk >= start) 828 goto out; 829 830 for (unwritten_seq = journal_last_unwritten_seq(j); 831 unwritten_seq < end; 832 unwritten_seq++) { 833 struct journal_buf *buf = journal_seq_to_buf(j, unwritten_seq); 834 835 /* journal flush already in flight, or flush requseted */ 836 if (buf->must_flush) 837 goto out; 838 839 buf->noflush = true; 840 } 841 842 ret = true; 843 out: 844 spin_unlock(&j->lock); 845 return ret; 846 } 847 848 static int __bch2_journal_meta(struct journal *j) 849 { 850 struct journal_res res = {}; 851 int ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0); 852 if (ret) 853 return ret; 854 855 struct journal_buf *buf = j->buf + (res.seq & JOURNAL_BUF_MASK); 856 buf->must_flush = true; 857 858 if (!buf->flush_time) { 859 buf->flush_time = local_clock() ?: 1; 860 buf->expires = jiffies; 861 } 862 863 bch2_journal_res_put(j, &res); 864 865 return bch2_journal_flush_seq(j, res.seq, TASK_UNINTERRUPTIBLE); 866 } 867 868 int bch2_journal_meta(struct journal *j) 869 { 870 struct bch_fs *c = container_of(j, struct bch_fs, journal); 871 872 if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_journal)) 873 return -EROFS; 874 875 int ret = __bch2_journal_meta(j); 876 bch2_write_ref_put(c, BCH_WRITE_REF_journal); 877 return ret; 878 } 879 880 /* block/unlock the journal: */ 881 882 void bch2_journal_unblock(struct journal *j) 883 { 884 spin_lock(&j->lock); 885 if (!--j->blocked && 886 j->cur_entry_offset_if_blocked < JOURNAL_ENTRY_CLOSED_VAL && 887 j->reservations.cur_entry_offset == JOURNAL_ENTRY_BLOCKED_VAL) { 888 union journal_res_state old, new; 889 890 old.v = atomic64_read(&j->reservations.counter); 891 do { 892 new.v = old.v; 893 new.cur_entry_offset = j->cur_entry_offset_if_blocked; 894 } while (!atomic64_try_cmpxchg(&j->reservations.counter, &old.v, new.v)); 895 } 896 spin_unlock(&j->lock); 897 898 journal_wake(j); 899 } 900 901 static void __bch2_journal_block(struct journal *j) 902 { 903 if (!j->blocked++) { 904 union journal_res_state old, new; 905 906 old.v = atomic64_read(&j->reservations.counter); 907 do { 908 j->cur_entry_offset_if_blocked = old.cur_entry_offset; 909 910 if (j->cur_entry_offset_if_blocked >= JOURNAL_ENTRY_CLOSED_VAL) 911 break; 912 913 new.v = old.v; 914 new.cur_entry_offset = JOURNAL_ENTRY_BLOCKED_VAL; 915 } while (!atomic64_try_cmpxchg(&j->reservations.counter, &old.v, new.v)); 916 917 journal_cur_buf(j)->data->u64s = cpu_to_le32(old.cur_entry_offset); 918 } 919 } 920 921 void bch2_journal_block(struct journal *j) 922 { 923 spin_lock(&j->lock); 924 __bch2_journal_block(j); 925 spin_unlock(&j->lock); 926 927 journal_quiesce(j); 928 } 929 930 static struct journal_buf *__bch2_next_write_buffer_flush_journal_buf(struct journal *j, 931 u64 max_seq, bool *blocked) 932 { 933 struct journal_buf *ret = NULL; 934 935 /* We're inside wait_event(), but using mutex_lock(: */ 936 sched_annotate_sleep(); 937 mutex_lock(&j->buf_lock); 938 spin_lock(&j->lock); 939 max_seq = min(max_seq, journal_cur_seq(j)); 940 941 for (u64 seq = journal_last_unwritten_seq(j); 942 seq <= max_seq; 943 seq++) { 944 unsigned idx = seq & JOURNAL_BUF_MASK; 945 struct journal_buf *buf = j->buf + idx; 946 947 if (buf->need_flush_to_write_buffer) { 948 union journal_res_state s; 949 s.v = atomic64_read_acquire(&j->reservations.counter); 950 951 unsigned open = seq == journal_cur_seq(j) && __journal_entry_is_open(s); 952 953 if (open && !*blocked) { 954 __bch2_journal_block(j); 955 *blocked = true; 956 } 957 958 ret = journal_state_count(s, idx) > open 959 ? ERR_PTR(-EAGAIN) 960 : buf; 961 break; 962 } 963 } 964 965 spin_unlock(&j->lock); 966 if (IS_ERR_OR_NULL(ret)) 967 mutex_unlock(&j->buf_lock); 968 return ret; 969 } 970 971 struct journal_buf *bch2_next_write_buffer_flush_journal_buf(struct journal *j, 972 u64 max_seq, bool *blocked) 973 { 974 struct journal_buf *ret; 975 *blocked = false; 976 977 wait_event(j->wait, (ret = __bch2_next_write_buffer_flush_journal_buf(j, 978 max_seq, blocked)) != ERR_PTR(-EAGAIN)); 979 if (IS_ERR_OR_NULL(ret) && *blocked) 980 bch2_journal_unblock(j); 981 982 return ret; 983 } 984 985 /* allocate journal on a device: */ 986 987 static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr, 988 bool new_fs, struct closure *cl) 989 { 990 struct bch_fs *c = ca->fs; 991 struct journal_device *ja = &ca->journal; 992 u64 *new_bucket_seq = NULL, *new_buckets = NULL; 993 struct open_bucket **ob = NULL; 994 long *bu = NULL; 995 unsigned i, pos, nr_got = 0, nr_want = nr - ja->nr; 996 int ret = 0; 997 998 BUG_ON(nr <= ja->nr); 999 1000 bu = kcalloc(nr_want, sizeof(*bu), GFP_KERNEL); 1001 ob = kcalloc(nr_want, sizeof(*ob), GFP_KERNEL); 1002 new_buckets = kcalloc(nr, sizeof(u64), GFP_KERNEL); 1003 new_bucket_seq = kcalloc(nr, sizeof(u64), GFP_KERNEL); 1004 if (!bu || !ob || !new_buckets || !new_bucket_seq) { 1005 ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets; 1006 goto err_free; 1007 } 1008 1009 for (nr_got = 0; nr_got < nr_want; nr_got++) { 1010 enum bch_watermark watermark = new_fs 1011 ? BCH_WATERMARK_btree 1012 : BCH_WATERMARK_normal; 1013 1014 ob[nr_got] = bch2_bucket_alloc(c, ca, watermark, 1015 BCH_DATA_journal, cl); 1016 ret = PTR_ERR_OR_ZERO(ob[nr_got]); 1017 if (ret) 1018 break; 1019 1020 if (!new_fs) { 1021 ret = bch2_trans_run(c, 1022 bch2_trans_mark_metadata_bucket(trans, ca, 1023 ob[nr_got]->bucket, BCH_DATA_journal, 1024 ca->mi.bucket_size, BTREE_TRIGGER_transactional)); 1025 if (ret) { 1026 bch2_open_bucket_put(c, ob[nr_got]); 1027 bch_err_msg(c, ret, "marking new journal buckets"); 1028 break; 1029 } 1030 } 1031 1032 bu[nr_got] = ob[nr_got]->bucket; 1033 } 1034 1035 if (!nr_got) 1036 goto err_free; 1037 1038 /* Don't return an error if we successfully allocated some buckets: */ 1039 ret = 0; 1040 1041 if (c) { 1042 bch2_journal_flush_all_pins(&c->journal); 1043 bch2_journal_block(&c->journal); 1044 mutex_lock(&c->sb_lock); 1045 } 1046 1047 memcpy(new_buckets, ja->buckets, ja->nr * sizeof(u64)); 1048 memcpy(new_bucket_seq, ja->bucket_seq, ja->nr * sizeof(u64)); 1049 1050 BUG_ON(ja->discard_idx > ja->nr); 1051 1052 pos = ja->discard_idx ?: ja->nr; 1053 1054 memmove(new_buckets + pos + nr_got, 1055 new_buckets + pos, 1056 sizeof(new_buckets[0]) * (ja->nr - pos)); 1057 memmove(new_bucket_seq + pos + nr_got, 1058 new_bucket_seq + pos, 1059 sizeof(new_bucket_seq[0]) * (ja->nr - pos)); 1060 1061 for (i = 0; i < nr_got; i++) { 1062 new_buckets[pos + i] = bu[i]; 1063 new_bucket_seq[pos + i] = 0; 1064 } 1065 1066 nr = ja->nr + nr_got; 1067 1068 ret = bch2_journal_buckets_to_sb(c, ca, new_buckets, nr); 1069 if (ret) 1070 goto err_unblock; 1071 1072 bch2_write_super(c); 1073 1074 /* Commit: */ 1075 if (c) 1076 spin_lock(&c->journal.lock); 1077 1078 swap(new_buckets, ja->buckets); 1079 swap(new_bucket_seq, ja->bucket_seq); 1080 ja->nr = nr; 1081 1082 if (pos <= ja->discard_idx) 1083 ja->discard_idx = (ja->discard_idx + nr_got) % ja->nr; 1084 if (pos <= ja->dirty_idx_ondisk) 1085 ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + nr_got) % ja->nr; 1086 if (pos <= ja->dirty_idx) 1087 ja->dirty_idx = (ja->dirty_idx + nr_got) % ja->nr; 1088 if (pos <= ja->cur_idx) 1089 ja->cur_idx = (ja->cur_idx + nr_got) % ja->nr; 1090 1091 if (c) 1092 spin_unlock(&c->journal.lock); 1093 err_unblock: 1094 if (c) { 1095 bch2_journal_unblock(&c->journal); 1096 mutex_unlock(&c->sb_lock); 1097 } 1098 1099 if (ret && !new_fs) 1100 for (i = 0; i < nr_got; i++) 1101 bch2_trans_run(c, 1102 bch2_trans_mark_metadata_bucket(trans, ca, 1103 bu[i], BCH_DATA_free, 0, 1104 BTREE_TRIGGER_transactional)); 1105 err_free: 1106 for (i = 0; i < nr_got; i++) 1107 bch2_open_bucket_put(c, ob[i]); 1108 1109 kfree(new_bucket_seq); 1110 kfree(new_buckets); 1111 kfree(ob); 1112 kfree(bu); 1113 return ret; 1114 } 1115 1116 /* 1117 * Allocate more journal space at runtime - not currently making use if it, but 1118 * the code works: 1119 */ 1120 int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca, 1121 unsigned nr) 1122 { 1123 struct journal_device *ja = &ca->journal; 1124 struct closure cl; 1125 int ret = 0; 1126 1127 closure_init_stack(&cl); 1128 1129 down_write(&c->state_lock); 1130 1131 /* don't handle reducing nr of buckets yet: */ 1132 if (nr < ja->nr) 1133 goto unlock; 1134 1135 while (ja->nr < nr) { 1136 struct disk_reservation disk_res = { 0, 0, 0 }; 1137 1138 /* 1139 * note: journal buckets aren't really counted as _sectors_ used yet, so 1140 * we don't need the disk reservation to avoid the BUG_ON() in buckets.c 1141 * when space used goes up without a reservation - but we do need the 1142 * reservation to ensure we'll actually be able to allocate: 1143 * 1144 * XXX: that's not right, disk reservations only ensure a 1145 * filesystem-wide allocation will succeed, this is a device 1146 * specific allocation - we can hang here: 1147 */ 1148 1149 ret = bch2_disk_reservation_get(c, &disk_res, 1150 bucket_to_sector(ca, nr - ja->nr), 1, 0); 1151 if (ret) 1152 break; 1153 1154 ret = __bch2_set_nr_journal_buckets(ca, nr, false, &cl); 1155 1156 bch2_disk_reservation_put(c, &disk_res); 1157 1158 closure_sync(&cl); 1159 1160 if (ret && ret != -BCH_ERR_bucket_alloc_blocked) 1161 break; 1162 } 1163 1164 bch_err_fn(c, ret); 1165 unlock: 1166 up_write(&c->state_lock); 1167 return ret; 1168 } 1169 1170 int bch2_dev_journal_alloc(struct bch_dev *ca, bool new_fs) 1171 { 1172 unsigned nr; 1173 int ret; 1174 1175 if (dynamic_fault("bcachefs:add:journal_alloc")) { 1176 ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets; 1177 goto err; 1178 } 1179 1180 /* 1/128th of the device by default: */ 1181 nr = ca->mi.nbuckets >> 7; 1182 1183 /* 1184 * clamp journal size to 8192 buckets or 8GB (in sectors), whichever 1185 * is smaller: 1186 */ 1187 nr = clamp_t(unsigned, nr, 1188 BCH_JOURNAL_BUCKETS_MIN, 1189 min(1 << 13, 1190 (1 << 24) / ca->mi.bucket_size)); 1191 1192 ret = __bch2_set_nr_journal_buckets(ca, nr, new_fs, NULL); 1193 err: 1194 bch_err_fn(ca, ret); 1195 return ret; 1196 } 1197 1198 int bch2_fs_journal_alloc(struct bch_fs *c) 1199 { 1200 for_each_online_member(c, ca) { 1201 if (ca->journal.nr) 1202 continue; 1203 1204 int ret = bch2_dev_journal_alloc(ca, true); 1205 if (ret) { 1206 percpu_ref_put(&ca->io_ref); 1207 return ret; 1208 } 1209 } 1210 1211 return 0; 1212 } 1213 1214 /* startup/shutdown: */ 1215 1216 static bool bch2_journal_writing_to_device(struct journal *j, unsigned dev_idx) 1217 { 1218 bool ret = false; 1219 u64 seq; 1220 1221 spin_lock(&j->lock); 1222 for (seq = journal_last_unwritten_seq(j); 1223 seq <= journal_cur_seq(j) && !ret; 1224 seq++) { 1225 struct journal_buf *buf = journal_seq_to_buf(j, seq); 1226 1227 if (bch2_bkey_has_device_c(bkey_i_to_s_c(&buf->key), dev_idx)) 1228 ret = true; 1229 } 1230 spin_unlock(&j->lock); 1231 1232 return ret; 1233 } 1234 1235 void bch2_dev_journal_stop(struct journal *j, struct bch_dev *ca) 1236 { 1237 wait_event(j->wait, !bch2_journal_writing_to_device(j, ca->dev_idx)); 1238 } 1239 1240 void bch2_fs_journal_stop(struct journal *j) 1241 { 1242 if (!test_bit(JOURNAL_running, &j->flags)) 1243 return; 1244 1245 bch2_journal_reclaim_stop(j); 1246 bch2_journal_flush_all_pins(j); 1247 1248 wait_event(j->wait, bch2_journal_entry_close(j)); 1249 1250 /* 1251 * Always write a new journal entry, to make sure the clock hands are up 1252 * to date (and match the superblock) 1253 */ 1254 __bch2_journal_meta(j); 1255 1256 journal_quiesce(j); 1257 cancel_delayed_work_sync(&j->write_work); 1258 1259 WARN(!bch2_journal_error(j) && 1260 test_bit(JOURNAL_replay_done, &j->flags) && 1261 j->last_empty_seq != journal_cur_seq(j), 1262 "journal shutdown error: cur seq %llu but last empty seq %llu", 1263 journal_cur_seq(j), j->last_empty_seq); 1264 1265 if (!bch2_journal_error(j)) 1266 clear_bit(JOURNAL_running, &j->flags); 1267 } 1268 1269 int bch2_fs_journal_start(struct journal *j, u64 cur_seq) 1270 { 1271 struct bch_fs *c = container_of(j, struct bch_fs, journal); 1272 struct journal_entry_pin_list *p; 1273 struct journal_replay *i, **_i; 1274 struct genradix_iter iter; 1275 bool had_entries = false; 1276 u64 last_seq = cur_seq, nr, seq; 1277 1278 if (cur_seq >= JOURNAL_SEQ_MAX) { 1279 bch_err(c, "cannot start: journal seq overflow"); 1280 return -EINVAL; 1281 } 1282 1283 genradix_for_each_reverse(&c->journal_entries, iter, _i) { 1284 i = *_i; 1285 1286 if (journal_replay_ignore(i)) 1287 continue; 1288 1289 last_seq = le64_to_cpu(i->j.last_seq); 1290 break; 1291 } 1292 1293 nr = cur_seq - last_seq; 1294 1295 if (nr + 1 > j->pin.size) { 1296 free_fifo(&j->pin); 1297 init_fifo(&j->pin, roundup_pow_of_two(nr + 1), GFP_KERNEL); 1298 if (!j->pin.data) { 1299 bch_err(c, "error reallocating journal fifo (%llu open entries)", nr); 1300 return -BCH_ERR_ENOMEM_journal_pin_fifo; 1301 } 1302 } 1303 1304 j->replay_journal_seq = last_seq; 1305 j->replay_journal_seq_end = cur_seq; 1306 j->last_seq_ondisk = last_seq; 1307 j->flushed_seq_ondisk = cur_seq - 1; 1308 j->seq_ondisk = cur_seq - 1; 1309 j->pin.front = last_seq; 1310 j->pin.back = cur_seq; 1311 atomic64_set(&j->seq, cur_seq - 1); 1312 1313 fifo_for_each_entry_ptr(p, &j->pin, seq) 1314 journal_pin_list_init(p, 1); 1315 1316 genradix_for_each(&c->journal_entries, iter, _i) { 1317 i = *_i; 1318 1319 if (journal_replay_ignore(i)) 1320 continue; 1321 1322 seq = le64_to_cpu(i->j.seq); 1323 BUG_ON(seq >= cur_seq); 1324 1325 if (seq < last_seq) 1326 continue; 1327 1328 if (journal_entry_empty(&i->j)) 1329 j->last_empty_seq = le64_to_cpu(i->j.seq); 1330 1331 p = journal_seq_pin(j, seq); 1332 1333 p->devs.nr = 0; 1334 darray_for_each(i->ptrs, ptr) 1335 bch2_dev_list_add_dev(&p->devs, ptr->dev); 1336 1337 had_entries = true; 1338 } 1339 1340 if (!had_entries) 1341 j->last_empty_seq = cur_seq - 1; /* to match j->seq */ 1342 1343 spin_lock(&j->lock); 1344 1345 set_bit(JOURNAL_running, &j->flags); 1346 j->last_flush_write = jiffies; 1347 1348 j->reservations.idx = j->reservations.unwritten_idx = journal_cur_seq(j); 1349 j->reservations.unwritten_idx++; 1350 1351 c->last_bucket_seq_cleanup = journal_cur_seq(j); 1352 1353 bch2_journal_space_available(j); 1354 spin_unlock(&j->lock); 1355 1356 return bch2_journal_reclaim_start(j); 1357 } 1358 1359 /* init/exit: */ 1360 1361 void bch2_dev_journal_exit(struct bch_dev *ca) 1362 { 1363 struct journal_device *ja = &ca->journal; 1364 1365 for (unsigned i = 0; i < ARRAY_SIZE(ja->bio); i++) { 1366 kfree(ja->bio[i]); 1367 ja->bio[i] = NULL; 1368 } 1369 1370 kfree(ja->buckets); 1371 kfree(ja->bucket_seq); 1372 ja->buckets = NULL; 1373 ja->bucket_seq = NULL; 1374 } 1375 1376 int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb) 1377 { 1378 struct journal_device *ja = &ca->journal; 1379 struct bch_sb_field_journal *journal_buckets = 1380 bch2_sb_field_get(sb, journal); 1381 struct bch_sb_field_journal_v2 *journal_buckets_v2 = 1382 bch2_sb_field_get(sb, journal_v2); 1383 1384 ja->nr = 0; 1385 1386 if (journal_buckets_v2) { 1387 unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2); 1388 1389 for (unsigned i = 0; i < nr; i++) 1390 ja->nr += le64_to_cpu(journal_buckets_v2->d[i].nr); 1391 } else if (journal_buckets) { 1392 ja->nr = bch2_nr_journal_buckets(journal_buckets); 1393 } 1394 1395 ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL); 1396 if (!ja->bucket_seq) 1397 return -BCH_ERR_ENOMEM_dev_journal_init; 1398 1399 unsigned nr_bvecs = DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE); 1400 1401 for (unsigned i = 0; i < ARRAY_SIZE(ja->bio); i++) { 1402 ja->bio[i] = kmalloc(struct_size(ja->bio[i], bio.bi_inline_vecs, 1403 nr_bvecs), GFP_KERNEL); 1404 if (!ja->bio[i]) 1405 return -BCH_ERR_ENOMEM_dev_journal_init; 1406 1407 ja->bio[i]->ca = ca; 1408 ja->bio[i]->buf_idx = i; 1409 bio_init(&ja->bio[i]->bio, NULL, ja->bio[i]->bio.bi_inline_vecs, nr_bvecs, 0); 1410 } 1411 1412 ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL); 1413 if (!ja->buckets) 1414 return -BCH_ERR_ENOMEM_dev_journal_init; 1415 1416 if (journal_buckets_v2) { 1417 unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2); 1418 unsigned dst = 0; 1419 1420 for (unsigned i = 0; i < nr; i++) 1421 for (unsigned j = 0; j < le64_to_cpu(journal_buckets_v2->d[i].nr); j++) 1422 ja->buckets[dst++] = 1423 le64_to_cpu(journal_buckets_v2->d[i].start) + j; 1424 } else if (journal_buckets) { 1425 for (unsigned i = 0; i < ja->nr; i++) 1426 ja->buckets[i] = le64_to_cpu(journal_buckets->buckets[i]); 1427 } 1428 1429 return 0; 1430 } 1431 1432 void bch2_fs_journal_exit(struct journal *j) 1433 { 1434 if (j->wq) 1435 destroy_workqueue(j->wq); 1436 1437 darray_exit(&j->early_journal_entries); 1438 1439 for (unsigned i = 0; i < ARRAY_SIZE(j->buf); i++) 1440 kvfree(j->buf[i].data); 1441 free_fifo(&j->pin); 1442 } 1443 1444 int bch2_fs_journal_init(struct journal *j) 1445 { 1446 static struct lock_class_key res_key; 1447 1448 mutex_init(&j->buf_lock); 1449 spin_lock_init(&j->lock); 1450 spin_lock_init(&j->err_lock); 1451 init_waitqueue_head(&j->wait); 1452 INIT_DELAYED_WORK(&j->write_work, journal_write_work); 1453 init_waitqueue_head(&j->reclaim_wait); 1454 init_waitqueue_head(&j->pin_flush_wait); 1455 mutex_init(&j->reclaim_lock); 1456 mutex_init(&j->discard_lock); 1457 1458 lockdep_init_map(&j->res_map, "journal res", &res_key, 0); 1459 1460 atomic64_set(&j->reservations.counter, 1461 ((union journal_res_state) 1462 { .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v); 1463 1464 if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL))) 1465 return -BCH_ERR_ENOMEM_journal_pin_fifo; 1466 1467 for (unsigned i = 0; i < ARRAY_SIZE(j->buf); i++) { 1468 j->buf[i].buf_size = JOURNAL_ENTRY_SIZE_MIN; 1469 j->buf[i].data = kvmalloc(j->buf[i].buf_size, GFP_KERNEL); 1470 if (!j->buf[i].data) 1471 return -BCH_ERR_ENOMEM_journal_buf; 1472 j->buf[i].idx = i; 1473 } 1474 1475 j->pin.front = j->pin.back = 1; 1476 1477 j->wq = alloc_workqueue("bcachefs_journal", 1478 WQ_HIGHPRI|WQ_FREEZABLE|WQ_UNBOUND|WQ_MEM_RECLAIM, 512); 1479 if (!j->wq) 1480 return -BCH_ERR_ENOMEM_fs_other_alloc; 1481 return 0; 1482 } 1483 1484 /* debug: */ 1485 1486 static const char * const bch2_journal_flags_strs[] = { 1487 #define x(n) #n, 1488 JOURNAL_FLAGS() 1489 #undef x 1490 NULL 1491 }; 1492 1493 void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j) 1494 { 1495 struct bch_fs *c = container_of(j, struct bch_fs, journal); 1496 union journal_res_state s; 1497 unsigned long now = jiffies; 1498 u64 nr_writes = j->nr_flush_writes + j->nr_noflush_writes; 1499 1500 printbuf_tabstops_reset(out); 1501 printbuf_tabstop_push(out, 28); 1502 out->atomic++; 1503 1504 rcu_read_lock(); 1505 s = READ_ONCE(j->reservations); 1506 1507 prt_printf(out, "flags:\t"); 1508 prt_bitflags(out, bch2_journal_flags_strs, j->flags); 1509 prt_newline(out); 1510 prt_printf(out, "dirty journal entries:\t%llu/%llu\n", fifo_used(&j->pin), j->pin.size); 1511 prt_printf(out, "seq:\t%llu\n", journal_cur_seq(j)); 1512 prt_printf(out, "seq_ondisk:\t%llu\n", j->seq_ondisk); 1513 prt_printf(out, "last_seq:\t%llu\n", journal_last_seq(j)); 1514 prt_printf(out, "last_seq_ondisk:\t%llu\n", j->last_seq_ondisk); 1515 prt_printf(out, "flushed_seq_ondisk:\t%llu\n", j->flushed_seq_ondisk); 1516 prt_printf(out, "watermark:\t%s\n", bch2_watermarks[j->watermark]); 1517 prt_printf(out, "each entry reserved:\t%u\n", j->entry_u64s_reserved); 1518 prt_printf(out, "nr flush writes:\t%llu\n", j->nr_flush_writes); 1519 prt_printf(out, "nr noflush writes:\t%llu\n", j->nr_noflush_writes); 1520 prt_printf(out, "average write size:\t"); 1521 prt_human_readable_u64(out, nr_writes ? div64_u64(j->entry_bytes_written, nr_writes) : 0); 1522 prt_newline(out); 1523 prt_printf(out, "nr direct reclaim:\t%llu\n", j->nr_direct_reclaim); 1524 prt_printf(out, "nr background reclaim:\t%llu\n", j->nr_background_reclaim); 1525 prt_printf(out, "reclaim kicked:\t%u\n", j->reclaim_kicked); 1526 prt_printf(out, "reclaim runs in:\t%u ms\n", time_after(j->next_reclaim, now) 1527 ? jiffies_to_msecs(j->next_reclaim - jiffies) : 0); 1528 prt_printf(out, "blocked:\t%u\n", j->blocked); 1529 prt_printf(out, "current entry sectors:\t%u\n", j->cur_entry_sectors); 1530 prt_printf(out, "current entry error:\t%s\n", bch2_journal_errors[j->cur_entry_error]); 1531 prt_printf(out, "current entry:\t"); 1532 1533 switch (s.cur_entry_offset) { 1534 case JOURNAL_ENTRY_ERROR_VAL: 1535 prt_printf(out, "error\n"); 1536 break; 1537 case JOURNAL_ENTRY_CLOSED_VAL: 1538 prt_printf(out, "closed\n"); 1539 break; 1540 case JOURNAL_ENTRY_BLOCKED_VAL: 1541 prt_printf(out, "blocked\n"); 1542 break; 1543 default: 1544 prt_printf(out, "%u/%u\n", s.cur_entry_offset, j->cur_entry_u64s); 1545 break; 1546 } 1547 1548 prt_printf(out, "unwritten entries:\n"); 1549 bch2_journal_bufs_to_text(out, j); 1550 1551 prt_printf(out, "space:\n"); 1552 printbuf_indent_add(out, 2); 1553 prt_printf(out, "discarded\t%u:%u\n", 1554 j->space[journal_space_discarded].next_entry, 1555 j->space[journal_space_discarded].total); 1556 prt_printf(out, "clean ondisk\t%u:%u\n", 1557 j->space[journal_space_clean_ondisk].next_entry, 1558 j->space[journal_space_clean_ondisk].total); 1559 prt_printf(out, "clean\t%u:%u\n", 1560 j->space[journal_space_clean].next_entry, 1561 j->space[journal_space_clean].total); 1562 prt_printf(out, "total\t%u:%u\n", 1563 j->space[journal_space_total].next_entry, 1564 j->space[journal_space_total].total); 1565 printbuf_indent_sub(out, 2); 1566 1567 for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) { 1568 if (!ca->mi.durability) 1569 continue; 1570 1571 struct journal_device *ja = &ca->journal; 1572 1573 if (!test_bit(ca->dev_idx, c->rw_devs[BCH_DATA_journal].d)) 1574 continue; 1575 1576 if (!ja->nr) 1577 continue; 1578 1579 prt_printf(out, "dev %u:\n", ca->dev_idx); 1580 prt_printf(out, "durability %u:\n", ca->mi.durability); 1581 printbuf_indent_add(out, 2); 1582 prt_printf(out, "nr\t%u\n", ja->nr); 1583 prt_printf(out, "bucket size\t%u\n", ca->mi.bucket_size); 1584 prt_printf(out, "available\t%u:%u\n", bch2_journal_dev_buckets_available(j, ja, journal_space_discarded), ja->sectors_free); 1585 prt_printf(out, "discard_idx\t%u\n", ja->discard_idx); 1586 prt_printf(out, "dirty_ondisk\t%u (seq %llu)\n",ja->dirty_idx_ondisk, ja->bucket_seq[ja->dirty_idx_ondisk]); 1587 prt_printf(out, "dirty_idx\t%u (seq %llu)\n", ja->dirty_idx, ja->bucket_seq[ja->dirty_idx]); 1588 prt_printf(out, "cur_idx\t%u (seq %llu)\n", ja->cur_idx, ja->bucket_seq[ja->cur_idx]); 1589 printbuf_indent_sub(out, 2); 1590 } 1591 1592 prt_printf(out, "replicas want %u need %u\n", c->opts.metadata_replicas, c->opts.metadata_replicas_required); 1593 1594 rcu_read_unlock(); 1595 1596 --out->atomic; 1597 } 1598 1599 void bch2_journal_debug_to_text(struct printbuf *out, struct journal *j) 1600 { 1601 spin_lock(&j->lock); 1602 __bch2_journal_debug_to_text(out, j); 1603 spin_unlock(&j->lock); 1604 } 1605 1606 bool bch2_journal_seq_pins_to_text(struct printbuf *out, struct journal *j, u64 *seq) 1607 { 1608 struct journal_entry_pin_list *pin_list; 1609 struct journal_entry_pin *pin; 1610 1611 spin_lock(&j->lock); 1612 if (!test_bit(JOURNAL_running, &j->flags)) { 1613 spin_unlock(&j->lock); 1614 return true; 1615 } 1616 1617 *seq = max(*seq, j->pin.front); 1618 1619 if (*seq >= j->pin.back) { 1620 spin_unlock(&j->lock); 1621 return true; 1622 } 1623 1624 out->atomic++; 1625 1626 pin_list = journal_seq_pin(j, *seq); 1627 1628 prt_printf(out, "%llu: count %u\n", *seq, atomic_read(&pin_list->count)); 1629 printbuf_indent_add(out, 2); 1630 1631 for (unsigned i = 0; i < ARRAY_SIZE(pin_list->list); i++) 1632 list_for_each_entry(pin, &pin_list->list[i], list) 1633 prt_printf(out, "\t%px %ps\n", pin, pin->flush); 1634 1635 if (!list_empty(&pin_list->flushed)) 1636 prt_printf(out, "flushed:\n"); 1637 1638 list_for_each_entry(pin, &pin_list->flushed, list) 1639 prt_printf(out, "\t%px %ps\n", pin, pin->flush); 1640 1641 printbuf_indent_sub(out, 2); 1642 1643 --out->atomic; 1644 spin_unlock(&j->lock); 1645 1646 return false; 1647 } 1648 1649 void bch2_journal_pins_to_text(struct printbuf *out, struct journal *j) 1650 { 1651 u64 seq = 0; 1652 1653 while (!bch2_journal_seq_pins_to_text(out, j, &seq)) 1654 seq++; 1655 } 1656