1 // SPDX-License-Identifier: GPL-2.0 2 3 #include "bcachefs.h" 4 #include "btree_key_cache.h" 5 #include "btree_update.h" 6 #include "btree_write_buffer.h" 7 #include "buckets.h" 8 #include "errcode.h" 9 #include "error.h" 10 #include "journal.h" 11 #include "journal_io.h" 12 #include "journal_reclaim.h" 13 #include "replicas.h" 14 #include "sb-members.h" 15 #include "trace.h" 16 17 #include <linux/kthread.h> 18 #include <linux/sched/mm.h> 19 20 /* Free space calculations: */ 21 22 static unsigned journal_space_from(struct journal_device *ja, 23 enum journal_space_from from) 24 { 25 switch (from) { 26 case journal_space_discarded: 27 return ja->discard_idx; 28 case journal_space_clean_ondisk: 29 return ja->dirty_idx_ondisk; 30 case journal_space_clean: 31 return ja->dirty_idx; 32 default: 33 BUG(); 34 } 35 } 36 37 unsigned bch2_journal_dev_buckets_available(struct journal *j, 38 struct journal_device *ja, 39 enum journal_space_from from) 40 { 41 unsigned available = (journal_space_from(ja, from) - 42 ja->cur_idx - 1 + ja->nr) % ja->nr; 43 44 /* 45 * Don't use the last bucket unless writing the new last_seq 46 * will make another bucket available: 47 */ 48 if (available && ja->dirty_idx_ondisk == ja->dirty_idx) 49 --available; 50 51 return available; 52 } 53 54 void bch2_journal_set_watermark(struct journal *j) 55 { 56 struct bch_fs *c = container_of(j, struct bch_fs, journal); 57 bool low_on_space = j->space[journal_space_clean].total * 4 <= 58 j->space[journal_space_total].total; 59 bool low_on_pin = fifo_free(&j->pin) < j->pin.size / 4; 60 bool low_on_wb = bch2_btree_write_buffer_must_wait(c); 61 unsigned watermark = low_on_space || low_on_pin || low_on_wb 62 ? BCH_WATERMARK_reclaim 63 : BCH_WATERMARK_stripe; 64 65 if (track_event_change(&c->times[BCH_TIME_blocked_journal_low_on_space], low_on_space) || 66 track_event_change(&c->times[BCH_TIME_blocked_journal_low_on_pin], low_on_pin) || 67 track_event_change(&c->times[BCH_TIME_blocked_write_buffer_full], low_on_wb)) 68 trace_and_count(c, journal_full, c); 69 70 mod_bit(JOURNAL_space_low, &j->flags, low_on_space || low_on_pin); 71 72 swap(watermark, j->watermark); 73 if (watermark > j->watermark) 74 journal_wake(j); 75 } 76 77 static struct journal_space 78 journal_dev_space_available(struct journal *j, struct bch_dev *ca, 79 enum journal_space_from from) 80 { 81 struct journal_device *ja = &ca->journal; 82 unsigned sectors, buckets, unwritten; 83 u64 seq; 84 85 if (from == journal_space_total) 86 return (struct journal_space) { 87 .next_entry = ca->mi.bucket_size, 88 .total = ca->mi.bucket_size * ja->nr, 89 }; 90 91 buckets = bch2_journal_dev_buckets_available(j, ja, from); 92 sectors = ja->sectors_free; 93 94 /* 95 * We that we don't allocate the space for a journal entry 96 * until we write it out - thus, account for it here: 97 */ 98 for (seq = journal_last_unwritten_seq(j); 99 seq <= journal_cur_seq(j); 100 seq++) { 101 unwritten = j->buf[seq & JOURNAL_BUF_MASK].sectors; 102 103 if (!unwritten) 104 continue; 105 106 /* entry won't fit on this device, skip: */ 107 if (unwritten > ca->mi.bucket_size) 108 continue; 109 110 if (unwritten >= sectors) { 111 if (!buckets) { 112 sectors = 0; 113 break; 114 } 115 116 buckets--; 117 sectors = ca->mi.bucket_size; 118 } 119 120 sectors -= unwritten; 121 } 122 123 if (sectors < ca->mi.bucket_size && buckets) { 124 buckets--; 125 sectors = ca->mi.bucket_size; 126 } 127 128 return (struct journal_space) { 129 .next_entry = sectors, 130 .total = sectors + buckets * ca->mi.bucket_size, 131 }; 132 } 133 134 static struct journal_space __journal_space_available(struct journal *j, unsigned nr_devs_want, 135 enum journal_space_from from) 136 { 137 struct bch_fs *c = container_of(j, struct bch_fs, journal); 138 unsigned pos, nr_devs = 0; 139 struct journal_space space, dev_space[BCH_SB_MEMBERS_MAX]; 140 141 BUG_ON(nr_devs_want > ARRAY_SIZE(dev_space)); 142 143 rcu_read_lock(); 144 for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) { 145 if (!ca->journal.nr) 146 continue; 147 148 space = journal_dev_space_available(j, ca, from); 149 if (!space.next_entry) 150 continue; 151 152 for (pos = 0; pos < nr_devs; pos++) 153 if (space.total > dev_space[pos].total) 154 break; 155 156 array_insert_item(dev_space, nr_devs, pos, space); 157 } 158 rcu_read_unlock(); 159 160 if (nr_devs < nr_devs_want) 161 return (struct journal_space) { 0, 0 }; 162 163 /* 164 * We sorted largest to smallest, and we want the smallest out of the 165 * @nr_devs_want largest devices: 166 */ 167 return dev_space[nr_devs_want - 1]; 168 } 169 170 void bch2_journal_space_available(struct journal *j) 171 { 172 struct bch_fs *c = container_of(j, struct bch_fs, journal); 173 unsigned clean, clean_ondisk, total; 174 unsigned max_entry_size = min(j->buf[0].buf_size >> 9, 175 j->buf[1].buf_size >> 9); 176 unsigned nr_online = 0, nr_devs_want; 177 bool can_discard = false; 178 int ret = 0; 179 180 lockdep_assert_held(&j->lock); 181 182 rcu_read_lock(); 183 for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) { 184 struct journal_device *ja = &ca->journal; 185 186 if (!ja->nr) 187 continue; 188 189 while (ja->dirty_idx != ja->cur_idx && 190 ja->bucket_seq[ja->dirty_idx] < journal_last_seq(j)) 191 ja->dirty_idx = (ja->dirty_idx + 1) % ja->nr; 192 193 while (ja->dirty_idx_ondisk != ja->dirty_idx && 194 ja->bucket_seq[ja->dirty_idx_ondisk] < j->last_seq_ondisk) 195 ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + 1) % ja->nr; 196 197 if (ja->discard_idx != ja->dirty_idx_ondisk) 198 can_discard = true; 199 200 max_entry_size = min_t(unsigned, max_entry_size, ca->mi.bucket_size); 201 nr_online++; 202 } 203 rcu_read_unlock(); 204 205 j->can_discard = can_discard; 206 207 if (nr_online < metadata_replicas_required(c)) { 208 struct printbuf buf = PRINTBUF; 209 prt_printf(&buf, "insufficient writeable journal devices available: have %u, need %u\n" 210 "rw journal devs:", nr_online, metadata_replicas_required(c)); 211 212 rcu_read_lock(); 213 for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) 214 prt_printf(&buf, " %s", ca->name); 215 rcu_read_unlock(); 216 217 bch_err(c, "%s", buf.buf); 218 printbuf_exit(&buf); 219 ret = JOURNAL_ERR_insufficient_devices; 220 goto out; 221 } 222 223 nr_devs_want = min_t(unsigned, nr_online, c->opts.metadata_replicas); 224 225 for (unsigned i = 0; i < journal_space_nr; i++) 226 j->space[i] = __journal_space_available(j, nr_devs_want, i); 227 228 clean_ondisk = j->space[journal_space_clean_ondisk].total; 229 clean = j->space[journal_space_clean].total; 230 total = j->space[journal_space_total].total; 231 232 if (!j->space[journal_space_discarded].next_entry) 233 ret = JOURNAL_ERR_journal_full; 234 235 if ((j->space[journal_space_clean_ondisk].next_entry < 236 j->space[journal_space_clean_ondisk].total) && 237 (clean - clean_ondisk <= total / 8) && 238 (clean_ondisk * 2 > clean)) 239 set_bit(JOURNAL_may_skip_flush, &j->flags); 240 else 241 clear_bit(JOURNAL_may_skip_flush, &j->flags); 242 243 bch2_journal_set_watermark(j); 244 out: 245 j->cur_entry_sectors = !ret ? j->space[journal_space_discarded].next_entry : 0; 246 j->cur_entry_error = ret; 247 248 if (!ret) 249 journal_wake(j); 250 } 251 252 /* Discards - last part of journal reclaim: */ 253 254 static bool should_discard_bucket(struct journal *j, struct journal_device *ja) 255 { 256 bool ret; 257 258 spin_lock(&j->lock); 259 ret = ja->discard_idx != ja->dirty_idx_ondisk; 260 spin_unlock(&j->lock); 261 262 return ret; 263 } 264 265 /* 266 * Advance ja->discard_idx as long as it points to buckets that are no longer 267 * dirty, issuing discards if necessary: 268 */ 269 void bch2_journal_do_discards(struct journal *j) 270 { 271 struct bch_fs *c = container_of(j, struct bch_fs, journal); 272 273 mutex_lock(&j->discard_lock); 274 275 for_each_rw_member(c, ca) { 276 struct journal_device *ja = &ca->journal; 277 278 while (should_discard_bucket(j, ja)) { 279 if (!c->opts.nochanges && 280 ca->mi.discard && 281 bdev_max_discard_sectors(ca->disk_sb.bdev)) 282 blkdev_issue_discard(ca->disk_sb.bdev, 283 bucket_to_sector(ca, 284 ja->buckets[ja->discard_idx]), 285 ca->mi.bucket_size, GFP_NOFS); 286 287 spin_lock(&j->lock); 288 ja->discard_idx = (ja->discard_idx + 1) % ja->nr; 289 290 bch2_journal_space_available(j); 291 spin_unlock(&j->lock); 292 } 293 } 294 295 mutex_unlock(&j->discard_lock); 296 } 297 298 /* 299 * Journal entry pinning - machinery for holding a reference on a given journal 300 * entry, holding it open to ensure it gets replayed during recovery: 301 */ 302 303 void bch2_journal_reclaim_fast(struct journal *j) 304 { 305 bool popped = false; 306 307 lockdep_assert_held(&j->lock); 308 309 /* 310 * Unpin journal entries whose reference counts reached zero, meaning 311 * all btree nodes got written out 312 */ 313 while (!fifo_empty(&j->pin) && 314 j->pin.front <= j->seq_ondisk && 315 !atomic_read(&fifo_peek_front(&j->pin).count)) { 316 j->pin.front++; 317 popped = true; 318 } 319 320 if (popped) 321 bch2_journal_space_available(j); 322 } 323 324 bool __bch2_journal_pin_put(struct journal *j, u64 seq) 325 { 326 struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq); 327 328 return atomic_dec_and_test(&pin_list->count); 329 } 330 331 void bch2_journal_pin_put(struct journal *j, u64 seq) 332 { 333 if (__bch2_journal_pin_put(j, seq)) { 334 spin_lock(&j->lock); 335 bch2_journal_reclaim_fast(j); 336 spin_unlock(&j->lock); 337 } 338 } 339 340 static inline bool __journal_pin_drop(struct journal *j, 341 struct journal_entry_pin *pin) 342 { 343 struct journal_entry_pin_list *pin_list; 344 345 if (!journal_pin_active(pin)) 346 return false; 347 348 if (j->flush_in_progress == pin) 349 j->flush_in_progress_dropped = true; 350 351 pin_list = journal_seq_pin(j, pin->seq); 352 pin->seq = 0; 353 list_del_init(&pin->list); 354 355 /* 356 * Unpinning a journal entry may make journal_next_bucket() succeed, if 357 * writing a new last_seq will now make another bucket available: 358 */ 359 return atomic_dec_and_test(&pin_list->count) && 360 pin_list == &fifo_peek_front(&j->pin); 361 } 362 363 void bch2_journal_pin_drop(struct journal *j, 364 struct journal_entry_pin *pin) 365 { 366 spin_lock(&j->lock); 367 if (__journal_pin_drop(j, pin)) 368 bch2_journal_reclaim_fast(j); 369 spin_unlock(&j->lock); 370 } 371 372 static enum journal_pin_type journal_pin_type(journal_pin_flush_fn fn) 373 { 374 if (fn == bch2_btree_node_flush0 || 375 fn == bch2_btree_node_flush1) 376 return JOURNAL_PIN_btree; 377 else if (fn == bch2_btree_key_cache_journal_flush) 378 return JOURNAL_PIN_key_cache; 379 else 380 return JOURNAL_PIN_other; 381 } 382 383 static inline void bch2_journal_pin_set_locked(struct journal *j, u64 seq, 384 struct journal_entry_pin *pin, 385 journal_pin_flush_fn flush_fn, 386 enum journal_pin_type type) 387 { 388 struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq); 389 390 /* 391 * flush_fn is how we identify journal pins in debugfs, so must always 392 * exist, even if it doesn't do anything: 393 */ 394 BUG_ON(!flush_fn); 395 396 atomic_inc(&pin_list->count); 397 pin->seq = seq; 398 pin->flush = flush_fn; 399 list_add(&pin->list, &pin_list->list[type]); 400 } 401 402 void bch2_journal_pin_copy(struct journal *j, 403 struct journal_entry_pin *dst, 404 struct journal_entry_pin *src, 405 journal_pin_flush_fn flush_fn) 406 { 407 spin_lock(&j->lock); 408 409 u64 seq = READ_ONCE(src->seq); 410 411 if (seq < journal_last_seq(j)) { 412 /* 413 * bch2_journal_pin_copy() raced with bch2_journal_pin_drop() on 414 * the src pin - with the pin dropped, the entry to pin might no 415 * longer to exist, but that means there's no longer anything to 416 * copy and we can bail out here: 417 */ 418 spin_unlock(&j->lock); 419 return; 420 } 421 422 bool reclaim = __journal_pin_drop(j, dst); 423 424 bch2_journal_pin_set_locked(j, seq, dst, flush_fn, journal_pin_type(flush_fn)); 425 426 if (reclaim) 427 bch2_journal_reclaim_fast(j); 428 429 /* 430 * If the journal is currently full, we might want to call flush_fn 431 * immediately: 432 */ 433 if (seq == journal_last_seq(j)) 434 journal_wake(j); 435 spin_unlock(&j->lock); 436 } 437 438 void bch2_journal_pin_set(struct journal *j, u64 seq, 439 struct journal_entry_pin *pin, 440 journal_pin_flush_fn flush_fn) 441 { 442 spin_lock(&j->lock); 443 444 BUG_ON(seq < journal_last_seq(j)); 445 446 bool reclaim = __journal_pin_drop(j, pin); 447 448 bch2_journal_pin_set_locked(j, seq, pin, flush_fn, journal_pin_type(flush_fn)); 449 450 if (reclaim) 451 bch2_journal_reclaim_fast(j); 452 /* 453 * If the journal is currently full, we might want to call flush_fn 454 * immediately: 455 */ 456 if (seq == journal_last_seq(j)) 457 journal_wake(j); 458 459 spin_unlock(&j->lock); 460 } 461 462 /** 463 * bch2_journal_pin_flush: ensure journal pin callback is no longer running 464 * @j: journal object 465 * @pin: pin to flush 466 */ 467 void bch2_journal_pin_flush(struct journal *j, struct journal_entry_pin *pin) 468 { 469 BUG_ON(journal_pin_active(pin)); 470 471 wait_event(j->pin_flush_wait, j->flush_in_progress != pin); 472 } 473 474 /* 475 * Journal reclaim: flush references to open journal entries to reclaim space in 476 * the journal 477 * 478 * May be done by the journal code in the background as needed to free up space 479 * for more journal entries, or as part of doing a clean shutdown, or to migrate 480 * data off of a specific device: 481 */ 482 483 static struct journal_entry_pin * 484 journal_get_next_pin(struct journal *j, 485 u64 seq_to_flush, 486 unsigned allowed_below_seq, 487 unsigned allowed_above_seq, 488 u64 *seq) 489 { 490 struct journal_entry_pin_list *pin_list; 491 struct journal_entry_pin *ret = NULL; 492 unsigned i; 493 494 fifo_for_each_entry_ptr(pin_list, &j->pin, *seq) { 495 if (*seq > seq_to_flush && !allowed_above_seq) 496 break; 497 498 for (i = 0; i < JOURNAL_PIN_NR; i++) 499 if ((((1U << i) & allowed_below_seq) && *seq <= seq_to_flush) || 500 ((1U << i) & allowed_above_seq)) { 501 ret = list_first_entry_or_null(&pin_list->list[i], 502 struct journal_entry_pin, list); 503 if (ret) 504 return ret; 505 } 506 } 507 508 return NULL; 509 } 510 511 /* returns true if we did work */ 512 static size_t journal_flush_pins(struct journal *j, 513 u64 seq_to_flush, 514 unsigned allowed_below_seq, 515 unsigned allowed_above_seq, 516 unsigned min_any, 517 unsigned min_key_cache) 518 { 519 struct journal_entry_pin *pin; 520 size_t nr_flushed = 0; 521 journal_pin_flush_fn flush_fn; 522 u64 seq; 523 int err; 524 525 lockdep_assert_held(&j->reclaim_lock); 526 527 while (1) { 528 unsigned allowed_above = allowed_above_seq; 529 unsigned allowed_below = allowed_below_seq; 530 531 if (min_any) { 532 allowed_above |= ~0; 533 allowed_below |= ~0; 534 } 535 536 if (min_key_cache) { 537 allowed_above |= 1U << JOURNAL_PIN_key_cache; 538 allowed_below |= 1U << JOURNAL_PIN_key_cache; 539 } 540 541 cond_resched(); 542 543 j->last_flushed = jiffies; 544 545 spin_lock(&j->lock); 546 pin = journal_get_next_pin(j, seq_to_flush, allowed_below, allowed_above, &seq); 547 if (pin) { 548 BUG_ON(j->flush_in_progress); 549 j->flush_in_progress = pin; 550 j->flush_in_progress_dropped = false; 551 flush_fn = pin->flush; 552 } 553 spin_unlock(&j->lock); 554 555 if (!pin) 556 break; 557 558 if (min_key_cache && pin->flush == bch2_btree_key_cache_journal_flush) 559 min_key_cache--; 560 561 if (min_any) 562 min_any--; 563 564 err = flush_fn(j, pin, seq); 565 566 spin_lock(&j->lock); 567 /* Pin might have been dropped or rearmed: */ 568 if (likely(!err && !j->flush_in_progress_dropped)) 569 list_move(&pin->list, &journal_seq_pin(j, seq)->flushed); 570 j->flush_in_progress = NULL; 571 j->flush_in_progress_dropped = false; 572 spin_unlock(&j->lock); 573 574 wake_up(&j->pin_flush_wait); 575 576 if (err) 577 break; 578 579 nr_flushed++; 580 } 581 582 return nr_flushed; 583 } 584 585 static u64 journal_seq_to_flush(struct journal *j) 586 { 587 struct bch_fs *c = container_of(j, struct bch_fs, journal); 588 u64 seq_to_flush = 0; 589 590 spin_lock(&j->lock); 591 592 for_each_rw_member(c, ca) { 593 struct journal_device *ja = &ca->journal; 594 unsigned nr_buckets, bucket_to_flush; 595 596 if (!ja->nr) 597 continue; 598 599 /* Try to keep the journal at most half full: */ 600 nr_buckets = ja->nr / 2; 601 602 nr_buckets = min(nr_buckets, ja->nr); 603 604 bucket_to_flush = (ja->cur_idx + nr_buckets) % ja->nr; 605 seq_to_flush = max(seq_to_flush, 606 ja->bucket_seq[bucket_to_flush]); 607 } 608 609 /* Also flush if the pin fifo is more than half full */ 610 seq_to_flush = max_t(s64, seq_to_flush, 611 (s64) journal_cur_seq(j) - 612 (j->pin.size >> 1)); 613 spin_unlock(&j->lock); 614 615 return seq_to_flush; 616 } 617 618 /** 619 * __bch2_journal_reclaim - free up journal buckets 620 * @j: journal object 621 * @direct: direct or background reclaim? 622 * @kicked: requested to run since we last ran? 623 * Returns: 0 on success, or -EIO if the journal has been shutdown 624 * 625 * Background journal reclaim writes out btree nodes. It should be run 626 * early enough so that we never completely run out of journal buckets. 627 * 628 * High watermarks for triggering background reclaim: 629 * - FIFO has fewer than 512 entries left 630 * - fewer than 25% journal buckets free 631 * 632 * Background reclaim runs until low watermarks are reached: 633 * - FIFO has more than 1024 entries left 634 * - more than 50% journal buckets free 635 * 636 * As long as a reclaim can complete in the time it takes to fill up 637 * 512 journal entries or 25% of all journal buckets, then 638 * journal_next_bucket() should not stall. 639 */ 640 static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked) 641 { 642 struct bch_fs *c = container_of(j, struct bch_fs, journal); 643 bool kthread = (current->flags & PF_KTHREAD) != 0; 644 u64 seq_to_flush; 645 size_t min_nr, min_key_cache, nr_flushed; 646 unsigned flags; 647 int ret = 0; 648 649 /* 650 * We can't invoke memory reclaim while holding the reclaim_lock - 651 * journal reclaim is required to make progress for memory reclaim 652 * (cleaning the caches), so we can't get stuck in memory reclaim while 653 * we're holding the reclaim lock: 654 */ 655 lockdep_assert_held(&j->reclaim_lock); 656 flags = memalloc_noreclaim_save(); 657 658 do { 659 if (kthread && kthread_should_stop()) 660 break; 661 662 if (bch2_journal_error(j)) { 663 ret = -EIO; 664 break; 665 } 666 667 bch2_journal_do_discards(j); 668 669 seq_to_flush = journal_seq_to_flush(j); 670 min_nr = 0; 671 672 /* 673 * If it's been longer than j->reclaim_delay_ms since we last flushed, 674 * make sure to flush at least one journal pin: 675 */ 676 if (time_after(jiffies, j->last_flushed + 677 msecs_to_jiffies(c->opts.journal_reclaim_delay))) 678 min_nr = 1; 679 680 if (j->watermark != BCH_WATERMARK_stripe) 681 min_nr = 1; 682 683 if (atomic_read(&c->btree_cache.dirty) * 2 > c->btree_cache.used) 684 min_nr = 1; 685 686 min_key_cache = min(bch2_nr_btree_keys_need_flush(c), (size_t) 128); 687 688 trace_and_count(c, journal_reclaim_start, c, 689 direct, kicked, 690 min_nr, min_key_cache, 691 atomic_read(&c->btree_cache.dirty), 692 c->btree_cache.used, 693 atomic_long_read(&c->btree_key_cache.nr_dirty), 694 atomic_long_read(&c->btree_key_cache.nr_keys)); 695 696 nr_flushed = journal_flush_pins(j, seq_to_flush, 697 ~0, 0, 698 min_nr, min_key_cache); 699 700 if (direct) 701 j->nr_direct_reclaim += nr_flushed; 702 else 703 j->nr_background_reclaim += nr_flushed; 704 trace_and_count(c, journal_reclaim_finish, c, nr_flushed); 705 706 if (nr_flushed) 707 wake_up(&j->reclaim_wait); 708 } while ((min_nr || min_key_cache) && nr_flushed && !direct); 709 710 memalloc_noreclaim_restore(flags); 711 712 return ret; 713 } 714 715 int bch2_journal_reclaim(struct journal *j) 716 { 717 return __bch2_journal_reclaim(j, true, true); 718 } 719 720 static int bch2_journal_reclaim_thread(void *arg) 721 { 722 struct journal *j = arg; 723 struct bch_fs *c = container_of(j, struct bch_fs, journal); 724 unsigned long delay, now; 725 bool journal_empty; 726 int ret = 0; 727 728 set_freezable(); 729 730 j->last_flushed = jiffies; 731 732 while (!ret && !kthread_should_stop()) { 733 bool kicked = j->reclaim_kicked; 734 735 j->reclaim_kicked = false; 736 737 mutex_lock(&j->reclaim_lock); 738 ret = __bch2_journal_reclaim(j, false, kicked); 739 mutex_unlock(&j->reclaim_lock); 740 741 now = jiffies; 742 delay = msecs_to_jiffies(c->opts.journal_reclaim_delay); 743 j->next_reclaim = j->last_flushed + delay; 744 745 if (!time_in_range(j->next_reclaim, now, now + delay)) 746 j->next_reclaim = now + delay; 747 748 while (1) { 749 set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE); 750 if (kthread_should_stop()) 751 break; 752 if (j->reclaim_kicked) 753 break; 754 755 spin_lock(&j->lock); 756 journal_empty = fifo_empty(&j->pin); 757 spin_unlock(&j->lock); 758 759 if (journal_empty) 760 schedule(); 761 else if (time_after(j->next_reclaim, jiffies)) 762 schedule_timeout(j->next_reclaim - jiffies); 763 else 764 break; 765 } 766 __set_current_state(TASK_RUNNING); 767 } 768 769 return 0; 770 } 771 772 void bch2_journal_reclaim_stop(struct journal *j) 773 { 774 struct task_struct *p = j->reclaim_thread; 775 776 j->reclaim_thread = NULL; 777 778 if (p) { 779 kthread_stop(p); 780 put_task_struct(p); 781 } 782 } 783 784 int bch2_journal_reclaim_start(struct journal *j) 785 { 786 struct bch_fs *c = container_of(j, struct bch_fs, journal); 787 struct task_struct *p; 788 int ret; 789 790 if (j->reclaim_thread) 791 return 0; 792 793 p = kthread_create(bch2_journal_reclaim_thread, j, 794 "bch-reclaim/%s", c->name); 795 ret = PTR_ERR_OR_ZERO(p); 796 bch_err_msg(c, ret, "creating journal reclaim thread"); 797 if (ret) 798 return ret; 799 800 get_task_struct(p); 801 j->reclaim_thread = p; 802 wake_up_process(p); 803 return 0; 804 } 805 806 static int journal_flush_done(struct journal *j, u64 seq_to_flush, 807 bool *did_work) 808 { 809 int ret; 810 811 ret = bch2_journal_error(j); 812 if (ret) 813 return ret; 814 815 mutex_lock(&j->reclaim_lock); 816 817 if (journal_flush_pins(j, seq_to_flush, 818 (1U << JOURNAL_PIN_key_cache)| 819 (1U << JOURNAL_PIN_other), 0, 0, 0) || 820 journal_flush_pins(j, seq_to_flush, 821 (1U << JOURNAL_PIN_btree), 0, 0, 0)) 822 *did_work = true; 823 824 if (seq_to_flush > journal_cur_seq(j)) 825 bch2_journal_entry_close(j); 826 827 spin_lock(&j->lock); 828 /* 829 * If journal replay hasn't completed, the unreplayed journal entries 830 * hold refs on their corresponding sequence numbers 831 */ 832 ret = !test_bit(JOURNAL_replay_done, &j->flags) || 833 journal_last_seq(j) > seq_to_flush || 834 !fifo_used(&j->pin); 835 836 spin_unlock(&j->lock); 837 mutex_unlock(&j->reclaim_lock); 838 839 return ret; 840 } 841 842 bool bch2_journal_flush_pins(struct journal *j, u64 seq_to_flush) 843 { 844 /* time_stats this */ 845 bool did_work = false; 846 847 if (!test_bit(JOURNAL_running, &j->flags)) 848 return false; 849 850 closure_wait_event(&j->async_wait, 851 journal_flush_done(j, seq_to_flush, &did_work)); 852 853 return did_work; 854 } 855 856 int bch2_journal_flush_device_pins(struct journal *j, int dev_idx) 857 { 858 struct bch_fs *c = container_of(j, struct bch_fs, journal); 859 struct journal_entry_pin_list *p; 860 u64 iter, seq = 0; 861 int ret = 0; 862 863 spin_lock(&j->lock); 864 fifo_for_each_entry_ptr(p, &j->pin, iter) 865 if (dev_idx >= 0 866 ? bch2_dev_list_has_dev(p->devs, dev_idx) 867 : p->devs.nr < c->opts.metadata_replicas) 868 seq = iter; 869 spin_unlock(&j->lock); 870 871 bch2_journal_flush_pins(j, seq); 872 873 ret = bch2_journal_error(j); 874 if (ret) 875 return ret; 876 877 mutex_lock(&c->replicas_gc_lock); 878 bch2_replicas_gc_start(c, 1 << BCH_DATA_journal); 879 880 /* 881 * Now that we've populated replicas_gc, write to the journal to mark 882 * active journal devices. This handles the case where the journal might 883 * be empty. Otherwise we could clear all journal replicas and 884 * temporarily put the fs into an unrecoverable state. Journal recovery 885 * expects to find devices marked for journal data on unclean mount. 886 */ 887 ret = bch2_journal_meta(&c->journal); 888 if (ret) 889 goto err; 890 891 seq = 0; 892 spin_lock(&j->lock); 893 while (!ret) { 894 struct bch_replicas_padded replicas; 895 896 seq = max(seq, journal_last_seq(j)); 897 if (seq >= j->pin.back) 898 break; 899 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal, 900 journal_seq_pin(j, seq)->devs); 901 seq++; 902 903 if (replicas.e.nr_devs) { 904 spin_unlock(&j->lock); 905 ret = bch2_mark_replicas(c, &replicas.e); 906 spin_lock(&j->lock); 907 } 908 } 909 spin_unlock(&j->lock); 910 err: 911 ret = bch2_replicas_gc_end(c, ret); 912 mutex_unlock(&c->replicas_gc_lock); 913 914 return ret; 915 } 916