1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "btree_key_cache.h"
5 #include "btree_update.h"
6 #include "btree_write_buffer.h"
7 #include "buckets.h"
8 #include "errcode.h"
9 #include "error.h"
10 #include "journal.h"
11 #include "journal_io.h"
12 #include "journal_reclaim.h"
13 #include "replicas.h"
14 #include "sb-members.h"
15 #include "trace.h"
16
17 #include <linux/kthread.h>
18 #include <linux/sched/mm.h>
19
20 /* Free space calculations: */
21
journal_space_from(struct journal_device * ja,enum journal_space_from from)22 static unsigned journal_space_from(struct journal_device *ja,
23 enum journal_space_from from)
24 {
25 switch (from) {
26 case journal_space_discarded:
27 return ja->discard_idx;
28 case journal_space_clean_ondisk:
29 return ja->dirty_idx_ondisk;
30 case journal_space_clean:
31 return ja->dirty_idx;
32 default:
33 BUG();
34 }
35 }
36
bch2_journal_dev_buckets_available(struct journal * j,struct journal_device * ja,enum journal_space_from from)37 unsigned bch2_journal_dev_buckets_available(struct journal *j,
38 struct journal_device *ja,
39 enum journal_space_from from)
40 {
41 if (!ja->nr)
42 return 0;
43
44 unsigned available = (journal_space_from(ja, from) -
45 ja->cur_idx - 1 + ja->nr) % ja->nr;
46
47 /*
48 * Don't use the last bucket unless writing the new last_seq
49 * will make another bucket available:
50 */
51 if (available && ja->dirty_idx_ondisk == ja->dirty_idx)
52 --available;
53
54 return available;
55 }
56
bch2_journal_set_watermark(struct journal * j)57 void bch2_journal_set_watermark(struct journal *j)
58 {
59 struct bch_fs *c = container_of(j, struct bch_fs, journal);
60 bool low_on_space = j->space[journal_space_clean].total * 4 <=
61 j->space[journal_space_total].total;
62 bool low_on_pin = fifo_free(&j->pin) < j->pin.size / 4;
63 bool low_on_wb = bch2_btree_write_buffer_must_wait(c);
64 unsigned watermark = low_on_space || low_on_pin || low_on_wb
65 ? BCH_WATERMARK_reclaim
66 : BCH_WATERMARK_stripe;
67
68 if (track_event_change(&c->times[BCH_TIME_blocked_journal_low_on_space], low_on_space) ||
69 track_event_change(&c->times[BCH_TIME_blocked_journal_low_on_pin], low_on_pin) ||
70 track_event_change(&c->times[BCH_TIME_blocked_write_buffer_full], low_on_wb))
71 trace_and_count(c, journal_full, c);
72
73 mod_bit(JOURNAL_space_low, &j->flags, low_on_space || low_on_pin);
74
75 swap(watermark, j->watermark);
76 if (watermark > j->watermark)
77 journal_wake(j);
78 }
79
80 static struct journal_space
journal_dev_space_available(struct journal * j,struct bch_dev * ca,enum journal_space_from from)81 journal_dev_space_available(struct journal *j, struct bch_dev *ca,
82 enum journal_space_from from)
83 {
84 struct journal_device *ja = &ca->journal;
85 unsigned sectors, buckets, unwritten;
86 u64 seq;
87
88 if (from == journal_space_total)
89 return (struct journal_space) {
90 .next_entry = ca->mi.bucket_size,
91 .total = ca->mi.bucket_size * ja->nr,
92 };
93
94 buckets = bch2_journal_dev_buckets_available(j, ja, from);
95 sectors = ja->sectors_free;
96
97 /*
98 * We that we don't allocate the space for a journal entry
99 * until we write it out - thus, account for it here:
100 */
101 for (seq = journal_last_unwritten_seq(j);
102 seq <= journal_cur_seq(j);
103 seq++) {
104 unwritten = j->buf[seq & JOURNAL_BUF_MASK].sectors;
105
106 if (!unwritten)
107 continue;
108
109 /* entry won't fit on this device, skip: */
110 if (unwritten > ca->mi.bucket_size)
111 continue;
112
113 if (unwritten >= sectors) {
114 if (!buckets) {
115 sectors = 0;
116 break;
117 }
118
119 buckets--;
120 sectors = ca->mi.bucket_size;
121 }
122
123 sectors -= unwritten;
124 }
125
126 if (sectors < ca->mi.bucket_size && buckets) {
127 buckets--;
128 sectors = ca->mi.bucket_size;
129 }
130
131 return (struct journal_space) {
132 .next_entry = sectors,
133 .total = sectors + buckets * ca->mi.bucket_size,
134 };
135 }
136
__journal_space_available(struct journal * j,unsigned nr_devs_want,enum journal_space_from from)137 static struct journal_space __journal_space_available(struct journal *j, unsigned nr_devs_want,
138 enum journal_space_from from)
139 {
140 struct bch_fs *c = container_of(j, struct bch_fs, journal);
141 unsigned pos, nr_devs = 0;
142 struct journal_space space, dev_space[BCH_SB_MEMBERS_MAX];
143 unsigned min_bucket_size = U32_MAX;
144
145 BUG_ON(nr_devs_want > ARRAY_SIZE(dev_space));
146
147 rcu_read_lock();
148 for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) {
149 if (!ca->journal.nr ||
150 !ca->mi.durability)
151 continue;
152
153 min_bucket_size = min(min_bucket_size, ca->mi.bucket_size);
154
155 space = journal_dev_space_available(j, ca, from);
156 if (!space.next_entry)
157 continue;
158
159 for (pos = 0; pos < nr_devs; pos++)
160 if (space.total > dev_space[pos].total)
161 break;
162
163 array_insert_item(dev_space, nr_devs, pos, space);
164 }
165 rcu_read_unlock();
166
167 if (nr_devs < nr_devs_want)
168 return (struct journal_space) { 0, 0 };
169
170 /*
171 * We sorted largest to smallest, and we want the smallest out of the
172 * @nr_devs_want largest devices:
173 */
174 space = dev_space[nr_devs_want - 1];
175 space.next_entry = min(space.next_entry, min_bucket_size);
176 return space;
177 }
178
bch2_journal_space_available(struct journal * j)179 void bch2_journal_space_available(struct journal *j)
180 {
181 struct bch_fs *c = container_of(j, struct bch_fs, journal);
182 unsigned clean, clean_ondisk, total;
183 unsigned max_entry_size = min(j->buf[0].buf_size >> 9,
184 j->buf[1].buf_size >> 9);
185 unsigned nr_online = 0, nr_devs_want;
186 bool can_discard = false;
187 int ret = 0;
188
189 lockdep_assert_held(&j->lock);
190
191 rcu_read_lock();
192 for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) {
193 struct journal_device *ja = &ca->journal;
194
195 if (!ja->nr)
196 continue;
197
198 while (ja->dirty_idx != ja->cur_idx &&
199 ja->bucket_seq[ja->dirty_idx] < journal_last_seq(j))
200 ja->dirty_idx = (ja->dirty_idx + 1) % ja->nr;
201
202 while (ja->dirty_idx_ondisk != ja->dirty_idx &&
203 ja->bucket_seq[ja->dirty_idx_ondisk] < j->last_seq_ondisk)
204 ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + 1) % ja->nr;
205
206 if (ja->discard_idx != ja->dirty_idx_ondisk)
207 can_discard = true;
208
209 max_entry_size = min_t(unsigned, max_entry_size, ca->mi.bucket_size);
210 nr_online++;
211 }
212 rcu_read_unlock();
213
214 j->can_discard = can_discard;
215
216 if (nr_online < metadata_replicas_required(c)) {
217 struct printbuf buf = PRINTBUF;
218 buf.atomic++;
219 prt_printf(&buf, "insufficient writeable journal devices available: have %u, need %u\n"
220 "rw journal devs:", nr_online, metadata_replicas_required(c));
221
222 rcu_read_lock();
223 for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal])
224 prt_printf(&buf, " %s", ca->name);
225 rcu_read_unlock();
226
227 bch_err(c, "%s", buf.buf);
228 printbuf_exit(&buf);
229 ret = JOURNAL_ERR_insufficient_devices;
230 goto out;
231 }
232
233 nr_devs_want = min_t(unsigned, nr_online, c->opts.metadata_replicas);
234
235 for (unsigned i = 0; i < journal_space_nr; i++)
236 j->space[i] = __journal_space_available(j, nr_devs_want, i);
237
238 clean_ondisk = j->space[journal_space_clean_ondisk].total;
239 clean = j->space[journal_space_clean].total;
240 total = j->space[journal_space_total].total;
241
242 if (!j->space[journal_space_discarded].next_entry)
243 ret = JOURNAL_ERR_journal_full;
244
245 if ((j->space[journal_space_clean_ondisk].next_entry <
246 j->space[journal_space_clean_ondisk].total) &&
247 (clean - clean_ondisk <= total / 8) &&
248 (clean_ondisk * 2 > clean))
249 set_bit(JOURNAL_may_skip_flush, &j->flags);
250 else
251 clear_bit(JOURNAL_may_skip_flush, &j->flags);
252
253 bch2_journal_set_watermark(j);
254 out:
255 j->cur_entry_sectors = !ret ? j->space[journal_space_discarded].next_entry : 0;
256 j->cur_entry_error = ret;
257
258 if (!ret)
259 journal_wake(j);
260 }
261
262 /* Discards - last part of journal reclaim: */
263
should_discard_bucket(struct journal * j,struct journal_device * ja)264 static bool should_discard_bucket(struct journal *j, struct journal_device *ja)
265 {
266 bool ret;
267
268 spin_lock(&j->lock);
269 ret = ja->discard_idx != ja->dirty_idx_ondisk;
270 spin_unlock(&j->lock);
271
272 return ret;
273 }
274
275 /*
276 * Advance ja->discard_idx as long as it points to buckets that are no longer
277 * dirty, issuing discards if necessary:
278 */
bch2_journal_do_discards(struct journal * j)279 void bch2_journal_do_discards(struct journal *j)
280 {
281 struct bch_fs *c = container_of(j, struct bch_fs, journal);
282
283 mutex_lock(&j->discard_lock);
284
285 for_each_rw_member(c, ca) {
286 struct journal_device *ja = &ca->journal;
287
288 while (should_discard_bucket(j, ja)) {
289 if (!c->opts.nochanges &&
290 ca->mi.discard &&
291 bdev_max_discard_sectors(ca->disk_sb.bdev))
292 blkdev_issue_discard(ca->disk_sb.bdev,
293 bucket_to_sector(ca,
294 ja->buckets[ja->discard_idx]),
295 ca->mi.bucket_size, GFP_NOFS);
296
297 spin_lock(&j->lock);
298 ja->discard_idx = (ja->discard_idx + 1) % ja->nr;
299
300 bch2_journal_space_available(j);
301 spin_unlock(&j->lock);
302 }
303 }
304
305 mutex_unlock(&j->discard_lock);
306 }
307
308 /*
309 * Journal entry pinning - machinery for holding a reference on a given journal
310 * entry, holding it open to ensure it gets replayed during recovery:
311 */
312
bch2_journal_reclaim_fast(struct journal * j)313 void bch2_journal_reclaim_fast(struct journal *j)
314 {
315 bool popped = false;
316
317 lockdep_assert_held(&j->lock);
318
319 /*
320 * Unpin journal entries whose reference counts reached zero, meaning
321 * all btree nodes got written out
322 */
323 while (!fifo_empty(&j->pin) &&
324 j->pin.front <= j->seq_ondisk &&
325 !atomic_read(&fifo_peek_front(&j->pin).count)) {
326 j->pin.front++;
327 popped = true;
328 }
329
330 if (popped) {
331 bch2_journal_space_available(j);
332 __closure_wake_up(&j->reclaim_flush_wait);
333 }
334 }
335
__bch2_journal_pin_put(struct journal * j,u64 seq)336 bool __bch2_journal_pin_put(struct journal *j, u64 seq)
337 {
338 struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
339
340 return atomic_dec_and_test(&pin_list->count);
341 }
342
bch2_journal_pin_put(struct journal * j,u64 seq)343 void bch2_journal_pin_put(struct journal *j, u64 seq)
344 {
345 if (__bch2_journal_pin_put(j, seq)) {
346 spin_lock(&j->lock);
347 bch2_journal_reclaim_fast(j);
348 spin_unlock(&j->lock);
349 }
350 }
351
__journal_pin_drop(struct journal * j,struct journal_entry_pin * pin)352 static inline bool __journal_pin_drop(struct journal *j,
353 struct journal_entry_pin *pin)
354 {
355 struct journal_entry_pin_list *pin_list;
356
357 if (!journal_pin_active(pin))
358 return false;
359
360 if (j->flush_in_progress == pin)
361 j->flush_in_progress_dropped = true;
362
363 pin_list = journal_seq_pin(j, pin->seq);
364 pin->seq = 0;
365 list_del_init(&pin->list);
366
367 if (j->reclaim_flush_wait.list.first)
368 __closure_wake_up(&j->reclaim_flush_wait);
369
370 /*
371 * Unpinning a journal entry may make journal_next_bucket() succeed, if
372 * writing a new last_seq will now make another bucket available:
373 */
374 return atomic_dec_and_test(&pin_list->count) &&
375 pin_list == &fifo_peek_front(&j->pin);
376 }
377
bch2_journal_pin_drop(struct journal * j,struct journal_entry_pin * pin)378 void bch2_journal_pin_drop(struct journal *j,
379 struct journal_entry_pin *pin)
380 {
381 spin_lock(&j->lock);
382 if (__journal_pin_drop(j, pin))
383 bch2_journal_reclaim_fast(j);
384 spin_unlock(&j->lock);
385 }
386
journal_pin_type(struct journal_entry_pin * pin,journal_pin_flush_fn fn)387 static enum journal_pin_type journal_pin_type(struct journal_entry_pin *pin,
388 journal_pin_flush_fn fn)
389 {
390 if (fn == bch2_btree_node_flush0 ||
391 fn == bch2_btree_node_flush1) {
392 unsigned idx = fn == bch2_btree_node_flush1;
393 struct btree *b = container_of(pin, struct btree, writes[idx].journal);
394
395 return JOURNAL_PIN_TYPE_btree0 - b->c.level;
396 } else if (fn == bch2_btree_key_cache_journal_flush)
397 return JOURNAL_PIN_TYPE_key_cache;
398 else
399 return JOURNAL_PIN_TYPE_other;
400 }
401
bch2_journal_pin_set_locked(struct journal * j,u64 seq,struct journal_entry_pin * pin,journal_pin_flush_fn flush_fn,enum journal_pin_type type)402 static inline void bch2_journal_pin_set_locked(struct journal *j, u64 seq,
403 struct journal_entry_pin *pin,
404 journal_pin_flush_fn flush_fn,
405 enum journal_pin_type type)
406 {
407 struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
408
409 /*
410 * flush_fn is how we identify journal pins in debugfs, so must always
411 * exist, even if it doesn't do anything:
412 */
413 BUG_ON(!flush_fn);
414
415 atomic_inc(&pin_list->count);
416 pin->seq = seq;
417 pin->flush = flush_fn;
418
419 if (list_empty(&pin_list->unflushed[type]) &&
420 j->reclaim_flush_wait.list.first)
421 __closure_wake_up(&j->reclaim_flush_wait);
422
423 list_add(&pin->list, &pin_list->unflushed[type]);
424 }
425
bch2_journal_pin_copy(struct journal * j,struct journal_entry_pin * dst,struct journal_entry_pin * src,journal_pin_flush_fn flush_fn)426 void bch2_journal_pin_copy(struct journal *j,
427 struct journal_entry_pin *dst,
428 struct journal_entry_pin *src,
429 journal_pin_flush_fn flush_fn)
430 {
431 spin_lock(&j->lock);
432
433 u64 seq = READ_ONCE(src->seq);
434
435 if (seq < journal_last_seq(j)) {
436 /*
437 * bch2_journal_pin_copy() raced with bch2_journal_pin_drop() on
438 * the src pin - with the pin dropped, the entry to pin might no
439 * longer to exist, but that means there's no longer anything to
440 * copy and we can bail out here:
441 */
442 spin_unlock(&j->lock);
443 return;
444 }
445
446 bool reclaim = __journal_pin_drop(j, dst);
447
448 bch2_journal_pin_set_locked(j, seq, dst, flush_fn, journal_pin_type(dst, flush_fn));
449
450 if (reclaim)
451 bch2_journal_reclaim_fast(j);
452
453 /*
454 * If the journal is currently full, we might want to call flush_fn
455 * immediately:
456 */
457 if (seq == journal_last_seq(j))
458 journal_wake(j);
459 spin_unlock(&j->lock);
460 }
461
bch2_journal_pin_set(struct journal * j,u64 seq,struct journal_entry_pin * pin,journal_pin_flush_fn flush_fn)462 void bch2_journal_pin_set(struct journal *j, u64 seq,
463 struct journal_entry_pin *pin,
464 journal_pin_flush_fn flush_fn)
465 {
466 spin_lock(&j->lock);
467
468 BUG_ON(seq < journal_last_seq(j));
469
470 bool reclaim = __journal_pin_drop(j, pin);
471
472 bch2_journal_pin_set_locked(j, seq, pin, flush_fn, journal_pin_type(pin, flush_fn));
473
474 if (reclaim)
475 bch2_journal_reclaim_fast(j);
476 /*
477 * If the journal is currently full, we might want to call flush_fn
478 * immediately:
479 */
480 if (seq == journal_last_seq(j))
481 journal_wake(j);
482
483 spin_unlock(&j->lock);
484 }
485
486 /**
487 * bch2_journal_pin_flush: ensure journal pin callback is no longer running
488 * @j: journal object
489 * @pin: pin to flush
490 */
bch2_journal_pin_flush(struct journal * j,struct journal_entry_pin * pin)491 void bch2_journal_pin_flush(struct journal *j, struct journal_entry_pin *pin)
492 {
493 BUG_ON(journal_pin_active(pin));
494
495 wait_event(j->pin_flush_wait, j->flush_in_progress != pin);
496 }
497
498 /*
499 * Journal reclaim: flush references to open journal entries to reclaim space in
500 * the journal
501 *
502 * May be done by the journal code in the background as needed to free up space
503 * for more journal entries, or as part of doing a clean shutdown, or to migrate
504 * data off of a specific device:
505 */
506
507 static struct journal_entry_pin *
journal_get_next_pin(struct journal * j,u64 seq_to_flush,unsigned allowed_below_seq,unsigned allowed_above_seq,u64 * seq)508 journal_get_next_pin(struct journal *j,
509 u64 seq_to_flush,
510 unsigned allowed_below_seq,
511 unsigned allowed_above_seq,
512 u64 *seq)
513 {
514 struct journal_entry_pin_list *pin_list;
515 struct journal_entry_pin *ret = NULL;
516
517 fifo_for_each_entry_ptr(pin_list, &j->pin, *seq) {
518 if (*seq > seq_to_flush && !allowed_above_seq)
519 break;
520
521 for (unsigned i = 0; i < JOURNAL_PIN_TYPE_NR; i++)
522 if (((BIT(i) & allowed_below_seq) && *seq <= seq_to_flush) ||
523 (BIT(i) & allowed_above_seq)) {
524 ret = list_first_entry_or_null(&pin_list->unflushed[i],
525 struct journal_entry_pin, list);
526 if (ret)
527 return ret;
528 }
529 }
530
531 return NULL;
532 }
533
534 /* returns true if we did work */
journal_flush_pins(struct journal * j,u64 seq_to_flush,unsigned allowed_below_seq,unsigned allowed_above_seq,unsigned min_any,unsigned min_key_cache)535 static size_t journal_flush_pins(struct journal *j,
536 u64 seq_to_flush,
537 unsigned allowed_below_seq,
538 unsigned allowed_above_seq,
539 unsigned min_any,
540 unsigned min_key_cache)
541 {
542 struct journal_entry_pin *pin;
543 size_t nr_flushed = 0;
544 journal_pin_flush_fn flush_fn;
545 u64 seq;
546 int err;
547
548 lockdep_assert_held(&j->reclaim_lock);
549
550 while (1) {
551 unsigned allowed_above = allowed_above_seq;
552 unsigned allowed_below = allowed_below_seq;
553
554 if (min_any) {
555 allowed_above |= ~0;
556 allowed_below |= ~0;
557 }
558
559 if (min_key_cache) {
560 allowed_above |= BIT(JOURNAL_PIN_TYPE_key_cache);
561 allowed_below |= BIT(JOURNAL_PIN_TYPE_key_cache);
562 }
563
564 cond_resched();
565
566 j->last_flushed = jiffies;
567
568 spin_lock(&j->lock);
569 pin = journal_get_next_pin(j, seq_to_flush,
570 allowed_below,
571 allowed_above, &seq);
572 if (pin) {
573 BUG_ON(j->flush_in_progress);
574 j->flush_in_progress = pin;
575 j->flush_in_progress_dropped = false;
576 flush_fn = pin->flush;
577 }
578 spin_unlock(&j->lock);
579
580 if (!pin)
581 break;
582
583 if (min_key_cache && pin->flush == bch2_btree_key_cache_journal_flush)
584 min_key_cache--;
585
586 if (min_any)
587 min_any--;
588
589 err = flush_fn(j, pin, seq);
590
591 spin_lock(&j->lock);
592 /* Pin might have been dropped or rearmed: */
593 if (likely(!err && !j->flush_in_progress_dropped))
594 list_move(&pin->list, &journal_seq_pin(j, seq)->flushed[journal_pin_type(pin, flush_fn)]);
595 j->flush_in_progress = NULL;
596 j->flush_in_progress_dropped = false;
597 spin_unlock(&j->lock);
598
599 wake_up(&j->pin_flush_wait);
600
601 if (err)
602 break;
603
604 nr_flushed++;
605 }
606
607 return nr_flushed;
608 }
609
journal_seq_to_flush(struct journal * j)610 static u64 journal_seq_to_flush(struct journal *j)
611 {
612 struct bch_fs *c = container_of(j, struct bch_fs, journal);
613 u64 seq_to_flush = 0;
614
615 spin_lock(&j->lock);
616
617 for_each_rw_member(c, ca) {
618 struct journal_device *ja = &ca->journal;
619 unsigned nr_buckets, bucket_to_flush;
620
621 if (!ja->nr)
622 continue;
623
624 /* Try to keep the journal at most half full: */
625 nr_buckets = ja->nr / 2;
626
627 nr_buckets = min(nr_buckets, ja->nr);
628
629 bucket_to_flush = (ja->cur_idx + nr_buckets) % ja->nr;
630 seq_to_flush = max(seq_to_flush,
631 ja->bucket_seq[bucket_to_flush]);
632 }
633
634 /* Also flush if the pin fifo is more than half full */
635 seq_to_flush = max_t(s64, seq_to_flush,
636 (s64) journal_cur_seq(j) -
637 (j->pin.size >> 1));
638 spin_unlock(&j->lock);
639
640 return seq_to_flush;
641 }
642
643 /**
644 * __bch2_journal_reclaim - free up journal buckets
645 * @j: journal object
646 * @direct: direct or background reclaim?
647 * @kicked: requested to run since we last ran?
648 * Returns: 0 on success, or -EIO if the journal has been shutdown
649 *
650 * Background journal reclaim writes out btree nodes. It should be run
651 * early enough so that we never completely run out of journal buckets.
652 *
653 * High watermarks for triggering background reclaim:
654 * - FIFO has fewer than 512 entries left
655 * - fewer than 25% journal buckets free
656 *
657 * Background reclaim runs until low watermarks are reached:
658 * - FIFO has more than 1024 entries left
659 * - more than 50% journal buckets free
660 *
661 * As long as a reclaim can complete in the time it takes to fill up
662 * 512 journal entries or 25% of all journal buckets, then
663 * journal_next_bucket() should not stall.
664 */
__bch2_journal_reclaim(struct journal * j,bool direct,bool kicked)665 static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked)
666 {
667 struct bch_fs *c = container_of(j, struct bch_fs, journal);
668 struct btree_cache *bc = &c->btree_cache;
669 bool kthread = (current->flags & PF_KTHREAD) != 0;
670 u64 seq_to_flush;
671 size_t min_nr, min_key_cache, nr_flushed;
672 unsigned flags;
673 int ret = 0;
674
675 /*
676 * We can't invoke memory reclaim while holding the reclaim_lock -
677 * journal reclaim is required to make progress for memory reclaim
678 * (cleaning the caches), so we can't get stuck in memory reclaim while
679 * we're holding the reclaim lock:
680 */
681 lockdep_assert_held(&j->reclaim_lock);
682 flags = memalloc_noreclaim_save();
683
684 do {
685 if (kthread && kthread_should_stop())
686 break;
687
688 if (bch2_journal_error(j)) {
689 ret = -EIO;
690 break;
691 }
692
693 bch2_journal_do_discards(j);
694
695 seq_to_flush = journal_seq_to_flush(j);
696 min_nr = 0;
697
698 /*
699 * If it's been longer than j->reclaim_delay_ms since we last flushed,
700 * make sure to flush at least one journal pin:
701 */
702 if (time_after(jiffies, j->last_flushed +
703 msecs_to_jiffies(c->opts.journal_reclaim_delay)))
704 min_nr = 1;
705
706 if (j->watermark != BCH_WATERMARK_stripe)
707 min_nr = 1;
708
709 size_t btree_cache_live = bc->live[0].nr + bc->live[1].nr;
710 if (atomic_long_read(&bc->nr_dirty) * 2 > btree_cache_live)
711 min_nr = 1;
712
713 min_key_cache = min(bch2_nr_btree_keys_need_flush(c), (size_t) 128);
714
715 trace_and_count(c, journal_reclaim_start, c,
716 direct, kicked,
717 min_nr, min_key_cache,
718 atomic_long_read(&bc->nr_dirty), btree_cache_live,
719 atomic_long_read(&c->btree_key_cache.nr_dirty),
720 atomic_long_read(&c->btree_key_cache.nr_keys));
721
722 nr_flushed = journal_flush_pins(j, seq_to_flush,
723 ~0, 0,
724 min_nr, min_key_cache);
725
726 if (direct)
727 j->nr_direct_reclaim += nr_flushed;
728 else
729 j->nr_background_reclaim += nr_flushed;
730 trace_and_count(c, journal_reclaim_finish, c, nr_flushed);
731
732 if (nr_flushed)
733 wake_up(&j->reclaim_wait);
734 } while ((min_nr || min_key_cache) && nr_flushed && !direct);
735
736 memalloc_noreclaim_restore(flags);
737
738 return ret;
739 }
740
bch2_journal_reclaim(struct journal * j)741 int bch2_journal_reclaim(struct journal *j)
742 {
743 return __bch2_journal_reclaim(j, true, true);
744 }
745
bch2_journal_reclaim_thread(void * arg)746 static int bch2_journal_reclaim_thread(void *arg)
747 {
748 struct journal *j = arg;
749 struct bch_fs *c = container_of(j, struct bch_fs, journal);
750 unsigned long delay, now;
751 bool journal_empty;
752 int ret = 0;
753
754 set_freezable();
755
756 j->last_flushed = jiffies;
757
758 while (!ret && !kthread_should_stop()) {
759 bool kicked = j->reclaim_kicked;
760
761 j->reclaim_kicked = false;
762
763 mutex_lock(&j->reclaim_lock);
764 ret = __bch2_journal_reclaim(j, false, kicked);
765 mutex_unlock(&j->reclaim_lock);
766
767 now = jiffies;
768 delay = msecs_to_jiffies(c->opts.journal_reclaim_delay);
769 j->next_reclaim = j->last_flushed + delay;
770
771 if (!time_in_range(j->next_reclaim, now, now + delay))
772 j->next_reclaim = now + delay;
773
774 while (1) {
775 set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
776 if (kthread_should_stop())
777 break;
778 if (j->reclaim_kicked)
779 break;
780
781 spin_lock(&j->lock);
782 journal_empty = fifo_empty(&j->pin);
783 spin_unlock(&j->lock);
784
785 long timeout = j->next_reclaim - jiffies;
786
787 if (journal_empty)
788 schedule();
789 else if (timeout > 0)
790 schedule_timeout(timeout);
791 else
792 break;
793 }
794 __set_current_state(TASK_RUNNING);
795 }
796
797 return 0;
798 }
799
bch2_journal_reclaim_stop(struct journal * j)800 void bch2_journal_reclaim_stop(struct journal *j)
801 {
802 struct task_struct *p = j->reclaim_thread;
803
804 j->reclaim_thread = NULL;
805
806 if (p) {
807 kthread_stop(p);
808 put_task_struct(p);
809 }
810 }
811
bch2_journal_reclaim_start(struct journal * j)812 int bch2_journal_reclaim_start(struct journal *j)
813 {
814 struct bch_fs *c = container_of(j, struct bch_fs, journal);
815 struct task_struct *p;
816 int ret;
817
818 if (j->reclaim_thread)
819 return 0;
820
821 p = kthread_create(bch2_journal_reclaim_thread, j,
822 "bch-reclaim/%s", c->name);
823 ret = PTR_ERR_OR_ZERO(p);
824 bch_err_msg(c, ret, "creating journal reclaim thread");
825 if (ret)
826 return ret;
827
828 get_task_struct(p);
829 j->reclaim_thread = p;
830 wake_up_process(p);
831 return 0;
832 }
833
journal_pins_still_flushing(struct journal * j,u64 seq_to_flush,unsigned types)834 static bool journal_pins_still_flushing(struct journal *j, u64 seq_to_flush,
835 unsigned types)
836 {
837 struct journal_entry_pin_list *pin_list;
838 u64 seq;
839
840 spin_lock(&j->lock);
841 fifo_for_each_entry_ptr(pin_list, &j->pin, seq) {
842 if (seq > seq_to_flush)
843 break;
844
845 for (unsigned i = 0; i < JOURNAL_PIN_TYPE_NR; i++)
846 if ((BIT(i) & types) &&
847 (!list_empty(&pin_list->unflushed[i]) ||
848 !list_empty(&pin_list->flushed[i]))) {
849 spin_unlock(&j->lock);
850 return true;
851 }
852 }
853 spin_unlock(&j->lock);
854
855 return false;
856 }
857
journal_flush_pins_or_still_flushing(struct journal * j,u64 seq_to_flush,unsigned types)858 static bool journal_flush_pins_or_still_flushing(struct journal *j, u64 seq_to_flush,
859 unsigned types)
860 {
861 return journal_flush_pins(j, seq_to_flush, types, 0, 0, 0) ||
862 journal_pins_still_flushing(j, seq_to_flush, types);
863 }
864
journal_flush_done(struct journal * j,u64 seq_to_flush,bool * did_work)865 static int journal_flush_done(struct journal *j, u64 seq_to_flush,
866 bool *did_work)
867 {
868 int ret = 0;
869
870 ret = bch2_journal_error(j);
871 if (ret)
872 return ret;
873
874 mutex_lock(&j->reclaim_lock);
875
876 for (int type = JOURNAL_PIN_TYPE_NR - 1;
877 type >= 0;
878 --type)
879 if (journal_flush_pins_or_still_flushing(j, seq_to_flush, BIT(type))) {
880 *did_work = true;
881 goto unlock;
882 }
883
884 if (seq_to_flush > journal_cur_seq(j))
885 bch2_journal_entry_close(j);
886
887 spin_lock(&j->lock);
888 /*
889 * If journal replay hasn't completed, the unreplayed journal entries
890 * hold refs on their corresponding sequence numbers
891 */
892 ret = !test_bit(JOURNAL_replay_done, &j->flags) ||
893 journal_last_seq(j) > seq_to_flush ||
894 !fifo_used(&j->pin);
895
896 spin_unlock(&j->lock);
897 unlock:
898 mutex_unlock(&j->reclaim_lock);
899
900 return ret;
901 }
902
bch2_journal_flush_pins(struct journal * j,u64 seq_to_flush)903 bool bch2_journal_flush_pins(struct journal *j, u64 seq_to_flush)
904 {
905 /* time_stats this */
906 bool did_work = false;
907
908 if (!test_bit(JOURNAL_running, &j->flags))
909 return false;
910
911 closure_wait_event(&j->reclaim_flush_wait,
912 journal_flush_done(j, seq_to_flush, &did_work));
913
914 return did_work;
915 }
916
bch2_journal_flush_device_pins(struct journal * j,int dev_idx)917 int bch2_journal_flush_device_pins(struct journal *j, int dev_idx)
918 {
919 struct bch_fs *c = container_of(j, struct bch_fs, journal);
920 struct journal_entry_pin_list *p;
921 u64 iter, seq = 0;
922 int ret = 0;
923
924 spin_lock(&j->lock);
925 fifo_for_each_entry_ptr(p, &j->pin, iter)
926 if (dev_idx >= 0
927 ? bch2_dev_list_has_dev(p->devs, dev_idx)
928 : p->devs.nr < c->opts.metadata_replicas)
929 seq = iter;
930 spin_unlock(&j->lock);
931
932 bch2_journal_flush_pins(j, seq);
933
934 ret = bch2_journal_error(j);
935 if (ret)
936 return ret;
937
938 mutex_lock(&c->replicas_gc_lock);
939 bch2_replicas_gc_start(c, 1 << BCH_DATA_journal);
940
941 /*
942 * Now that we've populated replicas_gc, write to the journal to mark
943 * active journal devices. This handles the case where the journal might
944 * be empty. Otherwise we could clear all journal replicas and
945 * temporarily put the fs into an unrecoverable state. Journal recovery
946 * expects to find devices marked for journal data on unclean mount.
947 */
948 ret = bch2_journal_meta(&c->journal);
949 if (ret)
950 goto err;
951
952 seq = 0;
953 spin_lock(&j->lock);
954 while (!ret) {
955 struct bch_replicas_padded replicas;
956
957 seq = max(seq, journal_last_seq(j));
958 if (seq >= j->pin.back)
959 break;
960 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
961 journal_seq_pin(j, seq)->devs);
962 seq++;
963
964 if (replicas.e.nr_devs) {
965 spin_unlock(&j->lock);
966 ret = bch2_mark_replicas(c, &replicas.e);
967 spin_lock(&j->lock);
968 }
969 }
970 spin_unlock(&j->lock);
971 err:
972 ret = bch2_replicas_gc_end(c, ret);
973 mutex_unlock(&c->replicas_gc_lock);
974
975 return ret;
976 }
977
bch2_journal_seq_pins_to_text(struct printbuf * out,struct journal * j,u64 * seq)978 bool bch2_journal_seq_pins_to_text(struct printbuf *out, struct journal *j, u64 *seq)
979 {
980 struct journal_entry_pin_list *pin_list;
981 struct journal_entry_pin *pin;
982
983 spin_lock(&j->lock);
984 if (!test_bit(JOURNAL_running, &j->flags)) {
985 spin_unlock(&j->lock);
986 return true;
987 }
988
989 *seq = max(*seq, j->pin.front);
990
991 if (*seq >= j->pin.back) {
992 spin_unlock(&j->lock);
993 return true;
994 }
995
996 out->atomic++;
997
998 pin_list = journal_seq_pin(j, *seq);
999
1000 prt_printf(out, "%llu: count %u\n", *seq, atomic_read(&pin_list->count));
1001 printbuf_indent_add(out, 2);
1002
1003 prt_printf(out, "unflushed:\n");
1004 for (unsigned i = 0; i < ARRAY_SIZE(pin_list->unflushed); i++)
1005 list_for_each_entry(pin, &pin_list->unflushed[i], list)
1006 prt_printf(out, "\t%px %ps\n", pin, pin->flush);
1007
1008 prt_printf(out, "flushed:\n");
1009 for (unsigned i = 0; i < ARRAY_SIZE(pin_list->flushed); i++)
1010 list_for_each_entry(pin, &pin_list->flushed[i], list)
1011 prt_printf(out, "\t%px %ps\n", pin, pin->flush);
1012
1013 printbuf_indent_sub(out, 2);
1014
1015 --out->atomic;
1016 spin_unlock(&j->lock);
1017
1018 return false;
1019 }
1020
bch2_journal_pins_to_text(struct printbuf * out,struct journal * j)1021 void bch2_journal_pins_to_text(struct printbuf *out, struct journal *j)
1022 {
1023 u64 seq = 0;
1024
1025 while (!bch2_journal_seq_pins_to_text(out, j, &seq))
1026 seq++;
1027 }
1028