xref: /linux/fs/bcachefs/journal_reclaim.c (revision f96a974170b749e3a56844e25b31d46a7233b6f6)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "bcachefs.h"
4 #include "btree_key_cache.h"
5 #include "btree_update.h"
6 #include "btree_write_buffer.h"
7 #include "buckets.h"
8 #include "errcode.h"
9 #include "error.h"
10 #include "journal.h"
11 #include "journal_io.h"
12 #include "journal_reclaim.h"
13 #include "replicas.h"
14 #include "sb-members.h"
15 #include "trace.h"
16 
17 #include <linux/kthread.h>
18 #include <linux/sched/mm.h>
19 
20 /* Free space calculations: */
21 
22 static unsigned journal_space_from(struct journal_device *ja,
23 				   enum journal_space_from from)
24 {
25 	switch (from) {
26 	case journal_space_discarded:
27 		return ja->discard_idx;
28 	case journal_space_clean_ondisk:
29 		return ja->dirty_idx_ondisk;
30 	case journal_space_clean:
31 		return ja->dirty_idx;
32 	default:
33 		BUG();
34 	}
35 }
36 
37 unsigned bch2_journal_dev_buckets_available(struct journal *j,
38 					    struct journal_device *ja,
39 					    enum journal_space_from from)
40 {
41 	if (!ja->nr)
42 		return 0;
43 
44 	unsigned available = (journal_space_from(ja, from) -
45 			      ja->cur_idx - 1 + ja->nr) % ja->nr;
46 
47 	/*
48 	 * Don't use the last bucket unless writing the new last_seq
49 	 * will make another bucket available:
50 	 */
51 	if (available && ja->dirty_idx_ondisk == ja->dirty_idx)
52 		--available;
53 
54 	return available;
55 }
56 
57 void bch2_journal_set_watermark(struct journal *j)
58 {
59 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
60 	bool low_on_space = j->space[journal_space_clean].total * 4 <=
61 		j->space[journal_space_total].total;
62 	bool low_on_pin = fifo_free(&j->pin) < j->pin.size / 4;
63 	bool low_on_wb = bch2_btree_write_buffer_must_wait(c);
64 	unsigned watermark = low_on_space || low_on_pin || low_on_wb
65 		? BCH_WATERMARK_reclaim
66 		: BCH_WATERMARK_stripe;
67 
68 	if (track_event_change(&c->times[BCH_TIME_blocked_journal_low_on_space], low_on_space) ||
69 	    track_event_change(&c->times[BCH_TIME_blocked_journal_low_on_pin], low_on_pin) ||
70 	    track_event_change(&c->times[BCH_TIME_blocked_write_buffer_full], low_on_wb))
71 		trace_and_count(c, journal_full, c);
72 
73 	mod_bit(JOURNAL_space_low, &j->flags, low_on_space || low_on_pin);
74 
75 	swap(watermark, j->watermark);
76 	if (watermark > j->watermark)
77 		journal_wake(j);
78 }
79 
80 static struct journal_space
81 journal_dev_space_available(struct journal *j, struct bch_dev *ca,
82 			    enum journal_space_from from)
83 {
84 	struct journal_device *ja = &ca->journal;
85 	unsigned sectors, buckets, unwritten;
86 	u64 seq;
87 
88 	if (from == journal_space_total)
89 		return (struct journal_space) {
90 			.next_entry	= ca->mi.bucket_size,
91 			.total		= ca->mi.bucket_size * ja->nr,
92 		};
93 
94 	buckets = bch2_journal_dev_buckets_available(j, ja, from);
95 	sectors = ja->sectors_free;
96 
97 	/*
98 	 * We that we don't allocate the space for a journal entry
99 	 * until we write it out - thus, account for it here:
100 	 */
101 	for (seq = journal_last_unwritten_seq(j);
102 	     seq <= journal_cur_seq(j);
103 	     seq++) {
104 		unwritten = j->buf[seq & JOURNAL_BUF_MASK].sectors;
105 
106 		if (!unwritten)
107 			continue;
108 
109 		/* entry won't fit on this device, skip: */
110 		if (unwritten > ca->mi.bucket_size)
111 			continue;
112 
113 		if (unwritten >= sectors) {
114 			if (!buckets) {
115 				sectors = 0;
116 				break;
117 			}
118 
119 			buckets--;
120 			sectors = ca->mi.bucket_size;
121 		}
122 
123 		sectors -= unwritten;
124 	}
125 
126 	if (sectors < ca->mi.bucket_size && buckets) {
127 		buckets--;
128 		sectors = ca->mi.bucket_size;
129 	}
130 
131 	return (struct journal_space) {
132 		.next_entry	= sectors,
133 		.total		= sectors + buckets * ca->mi.bucket_size,
134 	};
135 }
136 
137 static struct journal_space __journal_space_available(struct journal *j, unsigned nr_devs_want,
138 			    enum journal_space_from from)
139 {
140 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
141 	unsigned pos, nr_devs = 0;
142 	struct journal_space space, dev_space[BCH_SB_MEMBERS_MAX];
143 	unsigned min_bucket_size = U32_MAX;
144 
145 	BUG_ON(nr_devs_want > ARRAY_SIZE(dev_space));
146 
147 	rcu_read_lock();
148 	for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) {
149 		if (!ca->journal.nr ||
150 		    !ca->mi.durability)
151 			continue;
152 
153 		min_bucket_size = min(min_bucket_size, ca->mi.bucket_size);
154 
155 		space = journal_dev_space_available(j, ca, from);
156 		if (!space.next_entry)
157 			continue;
158 
159 		for (pos = 0; pos < nr_devs; pos++)
160 			if (space.total > dev_space[pos].total)
161 				break;
162 
163 		array_insert_item(dev_space, nr_devs, pos, space);
164 	}
165 	rcu_read_unlock();
166 
167 	if (nr_devs < nr_devs_want)
168 		return (struct journal_space) { 0, 0 };
169 
170 	/*
171 	 * We sorted largest to smallest, and we want the smallest out of the
172 	 * @nr_devs_want largest devices:
173 	 */
174 	space = dev_space[nr_devs_want - 1];
175 	space.next_entry = min(space.next_entry, min_bucket_size);
176 	return space;
177 }
178 
179 void bch2_journal_space_available(struct journal *j)
180 {
181 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
182 	unsigned clean, clean_ondisk, total;
183 	unsigned max_entry_size	 = min(j->buf[0].buf_size >> 9,
184 				       j->buf[1].buf_size >> 9);
185 	unsigned nr_online = 0, nr_devs_want;
186 	bool can_discard = false;
187 	int ret = 0;
188 
189 	lockdep_assert_held(&j->lock);
190 
191 	rcu_read_lock();
192 	for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) {
193 		struct journal_device *ja = &ca->journal;
194 
195 		if (!ja->nr)
196 			continue;
197 
198 		while (ja->dirty_idx != ja->cur_idx &&
199 		       ja->bucket_seq[ja->dirty_idx] < journal_last_seq(j))
200 			ja->dirty_idx = (ja->dirty_idx + 1) % ja->nr;
201 
202 		while (ja->dirty_idx_ondisk != ja->dirty_idx &&
203 		       ja->bucket_seq[ja->dirty_idx_ondisk] < j->last_seq_ondisk)
204 			ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + 1) % ja->nr;
205 
206 		if (ja->discard_idx != ja->dirty_idx_ondisk)
207 			can_discard = true;
208 
209 		max_entry_size = min_t(unsigned, max_entry_size, ca->mi.bucket_size);
210 		nr_online++;
211 	}
212 	rcu_read_unlock();
213 
214 	j->can_discard = can_discard;
215 
216 	if (nr_online < metadata_replicas_required(c)) {
217 		struct printbuf buf = PRINTBUF;
218 		buf.atomic++;
219 		prt_printf(&buf, "insufficient writeable journal devices available: have %u, need %u\n"
220 			   "rw journal devs:", nr_online, metadata_replicas_required(c));
221 
222 		rcu_read_lock();
223 		for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal])
224 			prt_printf(&buf, " %s", ca->name);
225 		rcu_read_unlock();
226 
227 		bch_err(c, "%s", buf.buf);
228 		printbuf_exit(&buf);
229 		ret = JOURNAL_ERR_insufficient_devices;
230 		goto out;
231 	}
232 
233 	nr_devs_want = min_t(unsigned, nr_online, c->opts.metadata_replicas);
234 
235 	for (unsigned i = 0; i < journal_space_nr; i++)
236 		j->space[i] = __journal_space_available(j, nr_devs_want, i);
237 
238 	clean_ondisk	= j->space[journal_space_clean_ondisk].total;
239 	clean		= j->space[journal_space_clean].total;
240 	total		= j->space[journal_space_total].total;
241 
242 	if (!j->space[journal_space_discarded].next_entry)
243 		ret = JOURNAL_ERR_journal_full;
244 
245 	if ((j->space[journal_space_clean_ondisk].next_entry <
246 	     j->space[journal_space_clean_ondisk].total) &&
247 	    (clean - clean_ondisk <= total / 8) &&
248 	    (clean_ondisk * 2 > clean))
249 		set_bit(JOURNAL_may_skip_flush, &j->flags);
250 	else
251 		clear_bit(JOURNAL_may_skip_flush, &j->flags);
252 
253 	bch2_journal_set_watermark(j);
254 out:
255 	j->cur_entry_sectors	= !ret ? j->space[journal_space_discarded].next_entry : 0;
256 	j->cur_entry_error	= ret;
257 
258 	if (!ret)
259 		journal_wake(j);
260 }
261 
262 /* Discards - last part of journal reclaim: */
263 
264 static bool should_discard_bucket(struct journal *j, struct journal_device *ja)
265 {
266 	bool ret;
267 
268 	spin_lock(&j->lock);
269 	ret = ja->discard_idx != ja->dirty_idx_ondisk;
270 	spin_unlock(&j->lock);
271 
272 	return ret;
273 }
274 
275 /*
276  * Advance ja->discard_idx as long as it points to buckets that are no longer
277  * dirty, issuing discards if necessary:
278  */
279 void bch2_journal_do_discards(struct journal *j)
280 {
281 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
282 
283 	mutex_lock(&j->discard_lock);
284 
285 	for_each_rw_member(c, ca) {
286 		struct journal_device *ja = &ca->journal;
287 
288 		while (should_discard_bucket(j, ja)) {
289 			if (!c->opts.nochanges &&
290 			    ca->mi.discard &&
291 			    bdev_max_discard_sectors(ca->disk_sb.bdev))
292 				blkdev_issue_discard(ca->disk_sb.bdev,
293 					bucket_to_sector(ca,
294 						ja->buckets[ja->discard_idx]),
295 					ca->mi.bucket_size, GFP_NOFS);
296 
297 			spin_lock(&j->lock);
298 			ja->discard_idx = (ja->discard_idx + 1) % ja->nr;
299 
300 			bch2_journal_space_available(j);
301 			spin_unlock(&j->lock);
302 		}
303 	}
304 
305 	mutex_unlock(&j->discard_lock);
306 }
307 
308 /*
309  * Journal entry pinning - machinery for holding a reference on a given journal
310  * entry, holding it open to ensure it gets replayed during recovery:
311  */
312 
313 void bch2_journal_reclaim_fast(struct journal *j)
314 {
315 	bool popped = false;
316 
317 	lockdep_assert_held(&j->lock);
318 
319 	/*
320 	 * Unpin journal entries whose reference counts reached zero, meaning
321 	 * all btree nodes got written out
322 	 */
323 	while (!fifo_empty(&j->pin) &&
324 	       j->pin.front <= j->seq_ondisk &&
325 	       !atomic_read(&fifo_peek_front(&j->pin).count)) {
326 		j->pin.front++;
327 		popped = true;
328 	}
329 
330 	if (popped)
331 		bch2_journal_space_available(j);
332 }
333 
334 bool __bch2_journal_pin_put(struct journal *j, u64 seq)
335 {
336 	struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
337 
338 	return atomic_dec_and_test(&pin_list->count);
339 }
340 
341 void bch2_journal_pin_put(struct journal *j, u64 seq)
342 {
343 	if (__bch2_journal_pin_put(j, seq)) {
344 		spin_lock(&j->lock);
345 		bch2_journal_reclaim_fast(j);
346 		spin_unlock(&j->lock);
347 	}
348 }
349 
350 static inline bool __journal_pin_drop(struct journal *j,
351 				      struct journal_entry_pin *pin)
352 {
353 	struct journal_entry_pin_list *pin_list;
354 
355 	if (!journal_pin_active(pin))
356 		return false;
357 
358 	if (j->flush_in_progress == pin)
359 		j->flush_in_progress_dropped = true;
360 
361 	pin_list = journal_seq_pin(j, pin->seq);
362 	pin->seq = 0;
363 	list_del_init(&pin->list);
364 
365 	/*
366 	 * Unpinning a journal entry may make journal_next_bucket() succeed, if
367 	 * writing a new last_seq will now make another bucket available:
368 	 */
369 	return atomic_dec_and_test(&pin_list->count) &&
370 		pin_list == &fifo_peek_front(&j->pin);
371 }
372 
373 void bch2_journal_pin_drop(struct journal *j,
374 			   struct journal_entry_pin *pin)
375 {
376 	spin_lock(&j->lock);
377 	if (__journal_pin_drop(j, pin))
378 		bch2_journal_reclaim_fast(j);
379 	spin_unlock(&j->lock);
380 }
381 
382 static enum journal_pin_type journal_pin_type(journal_pin_flush_fn fn)
383 {
384 	if (fn == bch2_btree_node_flush0 ||
385 	    fn == bch2_btree_node_flush1)
386 		return JOURNAL_PIN_btree;
387 	else if (fn == bch2_btree_key_cache_journal_flush)
388 		return JOURNAL_PIN_key_cache;
389 	else
390 		return JOURNAL_PIN_other;
391 }
392 
393 static inline void bch2_journal_pin_set_locked(struct journal *j, u64 seq,
394 			  struct journal_entry_pin *pin,
395 			  journal_pin_flush_fn flush_fn,
396 			  enum journal_pin_type type)
397 {
398 	struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
399 
400 	/*
401 	 * flush_fn is how we identify journal pins in debugfs, so must always
402 	 * exist, even if it doesn't do anything:
403 	 */
404 	BUG_ON(!flush_fn);
405 
406 	atomic_inc(&pin_list->count);
407 	pin->seq	= seq;
408 	pin->flush	= flush_fn;
409 	list_add(&pin->list, &pin_list->list[type]);
410 }
411 
412 void bch2_journal_pin_copy(struct journal *j,
413 			   struct journal_entry_pin *dst,
414 			   struct journal_entry_pin *src,
415 			   journal_pin_flush_fn flush_fn)
416 {
417 	spin_lock(&j->lock);
418 
419 	u64 seq = READ_ONCE(src->seq);
420 
421 	if (seq < journal_last_seq(j)) {
422 		/*
423 		 * bch2_journal_pin_copy() raced with bch2_journal_pin_drop() on
424 		 * the src pin - with the pin dropped, the entry to pin might no
425 		 * longer to exist, but that means there's no longer anything to
426 		 * copy and we can bail out here:
427 		 */
428 		spin_unlock(&j->lock);
429 		return;
430 	}
431 
432 	bool reclaim = __journal_pin_drop(j, dst);
433 
434 	bch2_journal_pin_set_locked(j, seq, dst, flush_fn, journal_pin_type(flush_fn));
435 
436 	if (reclaim)
437 		bch2_journal_reclaim_fast(j);
438 
439 	/*
440 	 * If the journal is currently full,  we might want to call flush_fn
441 	 * immediately:
442 	 */
443 	if (seq == journal_last_seq(j))
444 		journal_wake(j);
445 	spin_unlock(&j->lock);
446 }
447 
448 void bch2_journal_pin_set(struct journal *j, u64 seq,
449 			  struct journal_entry_pin *pin,
450 			  journal_pin_flush_fn flush_fn)
451 {
452 	spin_lock(&j->lock);
453 
454 	BUG_ON(seq < journal_last_seq(j));
455 
456 	bool reclaim = __journal_pin_drop(j, pin);
457 
458 	bch2_journal_pin_set_locked(j, seq, pin, flush_fn, journal_pin_type(flush_fn));
459 
460 	if (reclaim)
461 		bch2_journal_reclaim_fast(j);
462 	/*
463 	 * If the journal is currently full,  we might want to call flush_fn
464 	 * immediately:
465 	 */
466 	if (seq == journal_last_seq(j))
467 		journal_wake(j);
468 
469 	spin_unlock(&j->lock);
470 }
471 
472 /**
473  * bch2_journal_pin_flush: ensure journal pin callback is no longer running
474  * @j:		journal object
475  * @pin:	pin to flush
476  */
477 void bch2_journal_pin_flush(struct journal *j, struct journal_entry_pin *pin)
478 {
479 	BUG_ON(journal_pin_active(pin));
480 
481 	wait_event(j->pin_flush_wait, j->flush_in_progress != pin);
482 }
483 
484 /*
485  * Journal reclaim: flush references to open journal entries to reclaim space in
486  * the journal
487  *
488  * May be done by the journal code in the background as needed to free up space
489  * for more journal entries, or as part of doing a clean shutdown, or to migrate
490  * data off of a specific device:
491  */
492 
493 static struct journal_entry_pin *
494 journal_get_next_pin(struct journal *j,
495 		     u64 seq_to_flush,
496 		     unsigned allowed_below_seq,
497 		     unsigned allowed_above_seq,
498 		     u64 *seq)
499 {
500 	struct journal_entry_pin_list *pin_list;
501 	struct journal_entry_pin *ret = NULL;
502 	unsigned i;
503 
504 	fifo_for_each_entry_ptr(pin_list, &j->pin, *seq) {
505 		if (*seq > seq_to_flush && !allowed_above_seq)
506 			break;
507 
508 		for (i = 0; i < JOURNAL_PIN_NR; i++)
509 			if ((((1U << i) & allowed_below_seq) && *seq <= seq_to_flush) ||
510 			    ((1U << i) & allowed_above_seq)) {
511 				ret = list_first_entry_or_null(&pin_list->list[i],
512 					struct journal_entry_pin, list);
513 				if (ret)
514 					return ret;
515 			}
516 	}
517 
518 	return NULL;
519 }
520 
521 /* returns true if we did work */
522 static size_t journal_flush_pins(struct journal *j,
523 				 u64 seq_to_flush,
524 				 unsigned allowed_below_seq,
525 				 unsigned allowed_above_seq,
526 				 unsigned min_any,
527 				 unsigned min_key_cache)
528 {
529 	struct journal_entry_pin *pin;
530 	size_t nr_flushed = 0;
531 	journal_pin_flush_fn flush_fn;
532 	u64 seq;
533 	int err;
534 
535 	lockdep_assert_held(&j->reclaim_lock);
536 
537 	while (1) {
538 		unsigned allowed_above = allowed_above_seq;
539 		unsigned allowed_below = allowed_below_seq;
540 
541 		if (min_any) {
542 			allowed_above |= ~0;
543 			allowed_below |= ~0;
544 		}
545 
546 		if (min_key_cache) {
547 			allowed_above |= 1U << JOURNAL_PIN_key_cache;
548 			allowed_below |= 1U << JOURNAL_PIN_key_cache;
549 		}
550 
551 		cond_resched();
552 
553 		j->last_flushed = jiffies;
554 
555 		spin_lock(&j->lock);
556 		pin = journal_get_next_pin(j, seq_to_flush, allowed_below, allowed_above, &seq);
557 		if (pin) {
558 			BUG_ON(j->flush_in_progress);
559 			j->flush_in_progress = pin;
560 			j->flush_in_progress_dropped = false;
561 			flush_fn = pin->flush;
562 		}
563 		spin_unlock(&j->lock);
564 
565 		if (!pin)
566 			break;
567 
568 		if (min_key_cache && pin->flush == bch2_btree_key_cache_journal_flush)
569 			min_key_cache--;
570 
571 		if (min_any)
572 			min_any--;
573 
574 		err = flush_fn(j, pin, seq);
575 
576 		spin_lock(&j->lock);
577 		/* Pin might have been dropped or rearmed: */
578 		if (likely(!err && !j->flush_in_progress_dropped))
579 			list_move(&pin->list, &journal_seq_pin(j, seq)->flushed);
580 		j->flush_in_progress = NULL;
581 		j->flush_in_progress_dropped = false;
582 		spin_unlock(&j->lock);
583 
584 		wake_up(&j->pin_flush_wait);
585 
586 		if (err)
587 			break;
588 
589 		nr_flushed++;
590 	}
591 
592 	return nr_flushed;
593 }
594 
595 static u64 journal_seq_to_flush(struct journal *j)
596 {
597 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
598 	u64 seq_to_flush = 0;
599 
600 	spin_lock(&j->lock);
601 
602 	for_each_rw_member(c, ca) {
603 		struct journal_device *ja = &ca->journal;
604 		unsigned nr_buckets, bucket_to_flush;
605 
606 		if (!ja->nr)
607 			continue;
608 
609 		/* Try to keep the journal at most half full: */
610 		nr_buckets = ja->nr / 2;
611 
612 		nr_buckets = min(nr_buckets, ja->nr);
613 
614 		bucket_to_flush = (ja->cur_idx + nr_buckets) % ja->nr;
615 		seq_to_flush = max(seq_to_flush,
616 				   ja->bucket_seq[bucket_to_flush]);
617 	}
618 
619 	/* Also flush if the pin fifo is more than half full */
620 	seq_to_flush = max_t(s64, seq_to_flush,
621 			     (s64) journal_cur_seq(j) -
622 			     (j->pin.size >> 1));
623 	spin_unlock(&j->lock);
624 
625 	return seq_to_flush;
626 }
627 
628 /**
629  * __bch2_journal_reclaim - free up journal buckets
630  * @j:		journal object
631  * @direct:	direct or background reclaim?
632  * @kicked:	requested to run since we last ran?
633  * Returns:	0 on success, or -EIO if the journal has been shutdown
634  *
635  * Background journal reclaim writes out btree nodes. It should be run
636  * early enough so that we never completely run out of journal buckets.
637  *
638  * High watermarks for triggering background reclaim:
639  * - FIFO has fewer than 512 entries left
640  * - fewer than 25% journal buckets free
641  *
642  * Background reclaim runs until low watermarks are reached:
643  * - FIFO has more than 1024 entries left
644  * - more than 50% journal buckets free
645  *
646  * As long as a reclaim can complete in the time it takes to fill up
647  * 512 journal entries or 25% of all journal buckets, then
648  * journal_next_bucket() should not stall.
649  */
650 static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked)
651 {
652 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
653 	struct btree_cache *bc = &c->btree_cache;
654 	bool kthread = (current->flags & PF_KTHREAD) != 0;
655 	u64 seq_to_flush;
656 	size_t min_nr, min_key_cache, nr_flushed;
657 	unsigned flags;
658 	int ret = 0;
659 
660 	/*
661 	 * We can't invoke memory reclaim while holding the reclaim_lock -
662 	 * journal reclaim is required to make progress for memory reclaim
663 	 * (cleaning the caches), so we can't get stuck in memory reclaim while
664 	 * we're holding the reclaim lock:
665 	 */
666 	lockdep_assert_held(&j->reclaim_lock);
667 	flags = memalloc_noreclaim_save();
668 
669 	do {
670 		if (kthread && kthread_should_stop())
671 			break;
672 
673 		if (bch2_journal_error(j)) {
674 			ret = -EIO;
675 			break;
676 		}
677 
678 		bch2_journal_do_discards(j);
679 
680 		seq_to_flush = journal_seq_to_flush(j);
681 		min_nr = 0;
682 
683 		/*
684 		 * If it's been longer than j->reclaim_delay_ms since we last flushed,
685 		 * make sure to flush at least one journal pin:
686 		 */
687 		if (time_after(jiffies, j->last_flushed +
688 			       msecs_to_jiffies(c->opts.journal_reclaim_delay)))
689 			min_nr = 1;
690 
691 		if (j->watermark != BCH_WATERMARK_stripe)
692 			min_nr = 1;
693 
694 		size_t btree_cache_live = bc->live[0].nr + bc->live[1].nr;
695 		if (atomic_long_read(&bc->nr_dirty) * 2 > btree_cache_live)
696 			min_nr = 1;
697 
698 		min_key_cache = min(bch2_nr_btree_keys_need_flush(c), (size_t) 128);
699 
700 		trace_and_count(c, journal_reclaim_start, c,
701 				direct, kicked,
702 				min_nr, min_key_cache,
703 				atomic_long_read(&bc->nr_dirty), btree_cache_live,
704 				atomic_long_read(&c->btree_key_cache.nr_dirty),
705 				atomic_long_read(&c->btree_key_cache.nr_keys));
706 
707 		nr_flushed = journal_flush_pins(j, seq_to_flush,
708 						~0, 0,
709 						min_nr, min_key_cache);
710 
711 		if (direct)
712 			j->nr_direct_reclaim += nr_flushed;
713 		else
714 			j->nr_background_reclaim += nr_flushed;
715 		trace_and_count(c, journal_reclaim_finish, c, nr_flushed);
716 
717 		if (nr_flushed)
718 			wake_up(&j->reclaim_wait);
719 	} while ((min_nr || min_key_cache) && nr_flushed && !direct);
720 
721 	memalloc_noreclaim_restore(flags);
722 
723 	return ret;
724 }
725 
726 int bch2_journal_reclaim(struct journal *j)
727 {
728 	return __bch2_journal_reclaim(j, true, true);
729 }
730 
731 static int bch2_journal_reclaim_thread(void *arg)
732 {
733 	struct journal *j = arg;
734 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
735 	unsigned long delay, now;
736 	bool journal_empty;
737 	int ret = 0;
738 
739 	set_freezable();
740 
741 	j->last_flushed = jiffies;
742 
743 	while (!ret && !kthread_should_stop()) {
744 		bool kicked = j->reclaim_kicked;
745 
746 		j->reclaim_kicked = false;
747 
748 		mutex_lock(&j->reclaim_lock);
749 		ret = __bch2_journal_reclaim(j, false, kicked);
750 		mutex_unlock(&j->reclaim_lock);
751 
752 		now = jiffies;
753 		delay = msecs_to_jiffies(c->opts.journal_reclaim_delay);
754 		j->next_reclaim = j->last_flushed + delay;
755 
756 		if (!time_in_range(j->next_reclaim, now, now + delay))
757 			j->next_reclaim = now + delay;
758 
759 		while (1) {
760 			set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
761 			if (kthread_should_stop())
762 				break;
763 			if (j->reclaim_kicked)
764 				break;
765 
766 			spin_lock(&j->lock);
767 			journal_empty = fifo_empty(&j->pin);
768 			spin_unlock(&j->lock);
769 
770 			long timeout = j->next_reclaim - jiffies;
771 
772 			if (journal_empty)
773 				schedule();
774 			else if (timeout > 0)
775 				schedule_timeout(timeout);
776 			else
777 				break;
778 		}
779 		__set_current_state(TASK_RUNNING);
780 	}
781 
782 	return 0;
783 }
784 
785 void bch2_journal_reclaim_stop(struct journal *j)
786 {
787 	struct task_struct *p = j->reclaim_thread;
788 
789 	j->reclaim_thread = NULL;
790 
791 	if (p) {
792 		kthread_stop(p);
793 		put_task_struct(p);
794 	}
795 }
796 
797 int bch2_journal_reclaim_start(struct journal *j)
798 {
799 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
800 	struct task_struct *p;
801 	int ret;
802 
803 	if (j->reclaim_thread)
804 		return 0;
805 
806 	p = kthread_create(bch2_journal_reclaim_thread, j,
807 			   "bch-reclaim/%s", c->name);
808 	ret = PTR_ERR_OR_ZERO(p);
809 	bch_err_msg(c, ret, "creating journal reclaim thread");
810 	if (ret)
811 		return ret;
812 
813 	get_task_struct(p);
814 	j->reclaim_thread = p;
815 	wake_up_process(p);
816 	return 0;
817 }
818 
819 static int journal_flush_done(struct journal *j, u64 seq_to_flush,
820 			      bool *did_work)
821 {
822 	int ret;
823 
824 	ret = bch2_journal_error(j);
825 	if (ret)
826 		return ret;
827 
828 	mutex_lock(&j->reclaim_lock);
829 
830 	if (journal_flush_pins(j, seq_to_flush,
831 			       (1U << JOURNAL_PIN_key_cache)|
832 			       (1U << JOURNAL_PIN_other), 0, 0, 0) ||
833 	    journal_flush_pins(j, seq_to_flush,
834 			       (1U << JOURNAL_PIN_btree), 0, 0, 0))
835 		*did_work = true;
836 
837 	if (seq_to_flush > journal_cur_seq(j))
838 		bch2_journal_entry_close(j);
839 
840 	spin_lock(&j->lock);
841 	/*
842 	 * If journal replay hasn't completed, the unreplayed journal entries
843 	 * hold refs on their corresponding sequence numbers
844 	 */
845 	ret = !test_bit(JOURNAL_replay_done, &j->flags) ||
846 		journal_last_seq(j) > seq_to_flush ||
847 		!fifo_used(&j->pin);
848 
849 	spin_unlock(&j->lock);
850 	mutex_unlock(&j->reclaim_lock);
851 
852 	return ret;
853 }
854 
855 bool bch2_journal_flush_pins(struct journal *j, u64 seq_to_flush)
856 {
857 	/* time_stats this */
858 	bool did_work = false;
859 
860 	if (!test_bit(JOURNAL_running, &j->flags))
861 		return false;
862 
863 	closure_wait_event(&j->async_wait,
864 		journal_flush_done(j, seq_to_flush, &did_work));
865 
866 	return did_work;
867 }
868 
869 int bch2_journal_flush_device_pins(struct journal *j, int dev_idx)
870 {
871 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
872 	struct journal_entry_pin_list *p;
873 	u64 iter, seq = 0;
874 	int ret = 0;
875 
876 	spin_lock(&j->lock);
877 	fifo_for_each_entry_ptr(p, &j->pin, iter)
878 		if (dev_idx >= 0
879 		    ? bch2_dev_list_has_dev(p->devs, dev_idx)
880 		    : p->devs.nr < c->opts.metadata_replicas)
881 			seq = iter;
882 	spin_unlock(&j->lock);
883 
884 	bch2_journal_flush_pins(j, seq);
885 
886 	ret = bch2_journal_error(j);
887 	if (ret)
888 		return ret;
889 
890 	mutex_lock(&c->replicas_gc_lock);
891 	bch2_replicas_gc_start(c, 1 << BCH_DATA_journal);
892 
893 	/*
894 	 * Now that we've populated replicas_gc, write to the journal to mark
895 	 * active journal devices. This handles the case where the journal might
896 	 * be empty. Otherwise we could clear all journal replicas and
897 	 * temporarily put the fs into an unrecoverable state. Journal recovery
898 	 * expects to find devices marked for journal data on unclean mount.
899 	 */
900 	ret = bch2_journal_meta(&c->journal);
901 	if (ret)
902 		goto err;
903 
904 	seq = 0;
905 	spin_lock(&j->lock);
906 	while (!ret) {
907 		struct bch_replicas_padded replicas;
908 
909 		seq = max(seq, journal_last_seq(j));
910 		if (seq >= j->pin.back)
911 			break;
912 		bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
913 					 journal_seq_pin(j, seq)->devs);
914 		seq++;
915 
916 		if (replicas.e.nr_devs) {
917 			spin_unlock(&j->lock);
918 			ret = bch2_mark_replicas(c, &replicas.e);
919 			spin_lock(&j->lock);
920 		}
921 	}
922 	spin_unlock(&j->lock);
923 err:
924 	ret = bch2_replicas_gc_end(c, ret);
925 	mutex_unlock(&c->replicas_gc_lock);
926 
927 	return ret;
928 }
929