xref: /linux/fs/bcachefs/journal.c (revision f6154d8babbb8a98f0d3ea325aafae2e33bfd8be)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bcachefs journalling code, for btree insertions
4  *
5  * Copyright 2012 Google, Inc.
6  */
7 
8 #include "bcachefs.h"
9 #include "alloc_foreground.h"
10 #include "bkey_methods.h"
11 #include "btree_gc.h"
12 #include "btree_update.h"
13 #include "buckets.h"
14 #include "error.h"
15 #include "journal.h"
16 #include "journal_io.h"
17 #include "journal_reclaim.h"
18 #include "journal_sb.h"
19 #include "journal_seq_blacklist.h"
20 #include "trace.h"
21 
22 static const char * const bch2_journal_errors[] = {
23 #define x(n)	#n,
24 	JOURNAL_ERRORS()
25 #undef x
26 	NULL
27 };
28 
29 static inline bool journal_seq_unwritten(struct journal *j, u64 seq)
30 {
31 	return seq > j->seq_ondisk;
32 }
33 
34 static bool __journal_entry_is_open(union journal_res_state state)
35 {
36 	return state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL;
37 }
38 
39 static inline unsigned nr_unwritten_journal_entries(struct journal *j)
40 {
41 	return atomic64_read(&j->seq) - j->seq_ondisk;
42 }
43 
44 static bool journal_entry_is_open(struct journal *j)
45 {
46 	return __journal_entry_is_open(j->reservations);
47 }
48 
49 static inline struct journal_buf *
50 journal_seq_to_buf(struct journal *j, u64 seq)
51 {
52 	struct journal_buf *buf = NULL;
53 
54 	EBUG_ON(seq > journal_cur_seq(j));
55 
56 	if (journal_seq_unwritten(j, seq)) {
57 		buf = j->buf + (seq & JOURNAL_BUF_MASK);
58 		EBUG_ON(le64_to_cpu(buf->data->seq) != seq);
59 	}
60 	return buf;
61 }
62 
63 static void journal_pin_list_init(struct journal_entry_pin_list *p, int count)
64 {
65 	unsigned i;
66 
67 	for (i = 0; i < ARRAY_SIZE(p->list); i++)
68 		INIT_LIST_HEAD(&p->list[i]);
69 	INIT_LIST_HEAD(&p->flushed);
70 	atomic_set(&p->count, count);
71 	p->devs.nr = 0;
72 }
73 
74 /*
75  * Detect stuck journal conditions and trigger shutdown. Technically the journal
76  * can end up stuck for a variety of reasons, such as a blocked I/O, journal
77  * reservation lockup, etc. Since this is a fatal error with potentially
78  * unpredictable characteristics, we want to be fairly conservative before we
79  * decide to shut things down.
80  *
81  * Consider the journal stuck when it appears full with no ability to commit
82  * btree transactions, to discard journal buckets, nor acquire priority
83  * (reserved watermark) reservation.
84  */
85 static inline bool
86 journal_error_check_stuck(struct journal *j, int error, unsigned flags)
87 {
88 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
89 	bool stuck = false;
90 	struct printbuf buf = PRINTBUF;
91 
92 	if (!(error == JOURNAL_ERR_journal_full ||
93 	      error == JOURNAL_ERR_journal_pin_full) ||
94 	    nr_unwritten_journal_entries(j) ||
95 	    (flags & BCH_WATERMARK_MASK) != BCH_WATERMARK_reclaim)
96 		return stuck;
97 
98 	spin_lock(&j->lock);
99 
100 	if (j->can_discard) {
101 		spin_unlock(&j->lock);
102 		return stuck;
103 	}
104 
105 	stuck = true;
106 
107 	/*
108 	 * The journal shutdown path will set ->err_seq, but do it here first to
109 	 * serialize against concurrent failures and avoid duplicate error
110 	 * reports.
111 	 */
112 	if (j->err_seq) {
113 		spin_unlock(&j->lock);
114 		return stuck;
115 	}
116 	j->err_seq = journal_cur_seq(j);
117 	spin_unlock(&j->lock);
118 
119 	bch_err(c, "Journal stuck! Hava a pre-reservation but journal full (error %s)",
120 		bch2_journal_errors[error]);
121 	bch2_journal_debug_to_text(&buf, j);
122 	bch_err(c, "%s", buf.buf);
123 
124 	printbuf_reset(&buf);
125 	bch2_journal_pins_to_text(&buf, j);
126 	bch_err(c, "Journal pins:\n%s", buf.buf);
127 	printbuf_exit(&buf);
128 
129 	bch2_fatal_error(c);
130 	dump_stack();
131 
132 	return stuck;
133 }
134 
135 /*
136  * Final processing when the last reference of a journal buffer has been
137  * dropped. Drop the pin list reference acquired at journal entry open and write
138  * the buffer, if requested.
139  */
140 void bch2_journal_buf_put_final(struct journal *j, u64 seq, bool write)
141 {
142 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
143 
144 	lockdep_assert_held(&j->lock);
145 
146 	if (__bch2_journal_pin_put(j, seq))
147 		bch2_journal_reclaim_fast(j);
148 	if (write)
149 		closure_call(&j->io, bch2_journal_write, c->io_complete_wq, NULL);
150 }
151 
152 /*
153  * Returns true if journal entry is now closed:
154  *
155  * We don't close a journal_buf until the next journal_buf is finished writing,
156  * and can be opened again - this also initializes the next journal_buf:
157  */
158 static void __journal_entry_close(struct journal *j, unsigned closed_val)
159 {
160 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
161 	struct journal_buf *buf = journal_cur_buf(j);
162 	union journal_res_state old, new;
163 	u64 v = atomic64_read(&j->reservations.counter);
164 	unsigned sectors;
165 
166 	BUG_ON(closed_val != JOURNAL_ENTRY_CLOSED_VAL &&
167 	       closed_val != JOURNAL_ENTRY_ERROR_VAL);
168 
169 	lockdep_assert_held(&j->lock);
170 
171 	do {
172 		old.v = new.v = v;
173 		new.cur_entry_offset = closed_val;
174 
175 		if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL ||
176 		    old.cur_entry_offset == new.cur_entry_offset)
177 			return;
178 	} while ((v = atomic64_cmpxchg(&j->reservations.counter,
179 				       old.v, new.v)) != old.v);
180 
181 	if (!__journal_entry_is_open(old))
182 		return;
183 
184 	/* Close out old buffer: */
185 	buf->data->u64s		= cpu_to_le32(old.cur_entry_offset);
186 
187 	sectors = vstruct_blocks_plus(buf->data, c->block_bits,
188 				      buf->u64s_reserved) << c->block_bits;
189 	BUG_ON(sectors > buf->sectors);
190 	buf->sectors = sectors;
191 
192 	/*
193 	 * We have to set last_seq here, _before_ opening a new journal entry:
194 	 *
195 	 * A threads may replace an old pin with a new pin on their current
196 	 * journal reservation - the expectation being that the journal will
197 	 * contain either what the old pin protected or what the new pin
198 	 * protects.
199 	 *
200 	 * After the old pin is dropped journal_last_seq() won't include the old
201 	 * pin, so we can only write the updated last_seq on the entry that
202 	 * contains whatever the new pin protects.
203 	 *
204 	 * Restated, we can _not_ update last_seq for a given entry if there
205 	 * could be a newer entry open with reservations/pins that have been
206 	 * taken against it.
207 	 *
208 	 * Hence, we want update/set last_seq on the current journal entry right
209 	 * before we open a new one:
210 	 */
211 	buf->last_seq		= journal_last_seq(j);
212 	buf->data->last_seq	= cpu_to_le64(buf->last_seq);
213 	BUG_ON(buf->last_seq > le64_to_cpu(buf->data->seq));
214 
215 	cancel_delayed_work(&j->write_work);
216 
217 	bch2_journal_space_available(j);
218 
219 	__bch2_journal_buf_put(j, old.idx, le64_to_cpu(buf->data->seq));
220 }
221 
222 void bch2_journal_halt(struct journal *j)
223 {
224 	spin_lock(&j->lock);
225 	__journal_entry_close(j, JOURNAL_ENTRY_ERROR_VAL);
226 	if (!j->err_seq)
227 		j->err_seq = journal_cur_seq(j);
228 	journal_wake(j);
229 	spin_unlock(&j->lock);
230 }
231 
232 static bool journal_entry_want_write(struct journal *j)
233 {
234 	bool ret = !journal_entry_is_open(j) ||
235 		journal_cur_seq(j) == journal_last_unwritten_seq(j);
236 
237 	/* Don't close it yet if we already have a write in flight: */
238 	if (ret)
239 		__journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
240 	else if (nr_unwritten_journal_entries(j)) {
241 		struct journal_buf *buf = journal_cur_buf(j);
242 
243 		if (!buf->flush_time) {
244 			buf->flush_time	= local_clock() ?: 1;
245 			buf->expires = jiffies;
246 		}
247 	}
248 
249 	return ret;
250 }
251 
252 static bool journal_entry_close(struct journal *j)
253 {
254 	bool ret;
255 
256 	spin_lock(&j->lock);
257 	ret = journal_entry_want_write(j);
258 	spin_unlock(&j->lock);
259 
260 	return ret;
261 }
262 
263 /*
264  * should _only_ called from journal_res_get() - when we actually want a
265  * journal reservation - journal entry is open means journal is dirty:
266  */
267 static int journal_entry_open(struct journal *j)
268 {
269 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
270 	struct journal_buf *buf = j->buf +
271 		((journal_cur_seq(j) + 1) & JOURNAL_BUF_MASK);
272 	union journal_res_state old, new;
273 	int u64s;
274 	u64 v;
275 
276 	lockdep_assert_held(&j->lock);
277 	BUG_ON(journal_entry_is_open(j));
278 	BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
279 
280 	if (j->blocked)
281 		return JOURNAL_ERR_blocked;
282 
283 	if (j->cur_entry_error)
284 		return j->cur_entry_error;
285 
286 	if (bch2_journal_error(j))
287 		return JOURNAL_ERR_insufficient_devices; /* -EROFS */
288 
289 	if (!fifo_free(&j->pin))
290 		return JOURNAL_ERR_journal_pin_full;
291 
292 	if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf))
293 		return JOURNAL_ERR_max_in_flight;
294 
295 	BUG_ON(!j->cur_entry_sectors);
296 
297 	buf->expires		=
298 		(journal_cur_seq(j) == j->flushed_seq_ondisk
299 		 ? jiffies
300 		 : j->last_flush_write) +
301 		msecs_to_jiffies(c->opts.journal_flush_delay);
302 
303 	buf->u64s_reserved	= j->entry_u64s_reserved;
304 	buf->disk_sectors	= j->cur_entry_sectors;
305 	buf->sectors		= min(buf->disk_sectors, buf->buf_size >> 9);
306 
307 	u64s = (int) (buf->sectors << 9) / sizeof(u64) -
308 		journal_entry_overhead(j);
309 	u64s = clamp_t(int, u64s, 0, JOURNAL_ENTRY_CLOSED_VAL - 1);
310 
311 	if (u64s <= (ssize_t) j->early_journal_entries.nr)
312 		return JOURNAL_ERR_journal_full;
313 
314 	if (fifo_empty(&j->pin) && j->reclaim_thread)
315 		wake_up_process(j->reclaim_thread);
316 
317 	/*
318 	 * The fifo_push() needs to happen at the same time as j->seq is
319 	 * incremented for journal_last_seq() to be calculated correctly
320 	 */
321 	atomic64_inc(&j->seq);
322 	journal_pin_list_init(fifo_push_ref(&j->pin), 1);
323 
324 	BUG_ON(j->pin.back - 1 != atomic64_read(&j->seq));
325 
326 	BUG_ON(j->buf + (journal_cur_seq(j) & JOURNAL_BUF_MASK) != buf);
327 
328 	bkey_extent_init(&buf->key);
329 	buf->noflush	= false;
330 	buf->must_flush	= false;
331 	buf->separate_flush = false;
332 	buf->flush_time	= 0;
333 
334 	memset(buf->data, 0, sizeof(*buf->data));
335 	buf->data->seq	= cpu_to_le64(journal_cur_seq(j));
336 	buf->data->u64s	= 0;
337 
338 	if (j->early_journal_entries.nr) {
339 		memcpy(buf->data->_data, j->early_journal_entries.data,
340 		       j->early_journal_entries.nr * sizeof(u64));
341 		le32_add_cpu(&buf->data->u64s, j->early_journal_entries.nr);
342 	}
343 
344 	/*
345 	 * Must be set before marking the journal entry as open:
346 	 */
347 	j->cur_entry_u64s = u64s;
348 
349 	v = atomic64_read(&j->reservations.counter);
350 	do {
351 		old.v = new.v = v;
352 
353 		BUG_ON(old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL);
354 
355 		new.idx++;
356 		BUG_ON(journal_state_count(new, new.idx));
357 		BUG_ON(new.idx != (journal_cur_seq(j) & JOURNAL_BUF_MASK));
358 
359 		journal_state_inc(&new);
360 
361 		/* Handle any already added entries */
362 		new.cur_entry_offset = le32_to_cpu(buf->data->u64s);
363 	} while ((v = atomic64_cmpxchg(&j->reservations.counter,
364 				       old.v, new.v)) != old.v);
365 
366 	if (j->res_get_blocked_start)
367 		bch2_time_stats_update(j->blocked_time,
368 				       j->res_get_blocked_start);
369 	j->res_get_blocked_start = 0;
370 
371 	mod_delayed_work(c->io_complete_wq,
372 			 &j->write_work,
373 			 msecs_to_jiffies(c->opts.journal_flush_delay));
374 	journal_wake(j);
375 
376 	if (j->early_journal_entries.nr)
377 		darray_exit(&j->early_journal_entries);
378 	return 0;
379 }
380 
381 static bool journal_quiesced(struct journal *j)
382 {
383 	bool ret = atomic64_read(&j->seq) == j->seq_ondisk;
384 
385 	if (!ret)
386 		journal_entry_close(j);
387 	return ret;
388 }
389 
390 static void journal_quiesce(struct journal *j)
391 {
392 	wait_event(j->wait, journal_quiesced(j));
393 }
394 
395 static void journal_write_work(struct work_struct *work)
396 {
397 	struct journal *j = container_of(work, struct journal, write_work.work);
398 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
399 	long delta;
400 
401 	spin_lock(&j->lock);
402 	if (!__journal_entry_is_open(j->reservations))
403 		goto unlock;
404 
405 	delta = journal_cur_buf(j)->expires - jiffies;
406 
407 	if (delta > 0)
408 		mod_delayed_work(c->io_complete_wq, &j->write_work, delta);
409 	else
410 		__journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
411 unlock:
412 	spin_unlock(&j->lock);
413 }
414 
415 static int __journal_res_get(struct journal *j, struct journal_res *res,
416 			     unsigned flags)
417 {
418 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
419 	struct journal_buf *buf;
420 	bool can_discard;
421 	int ret;
422 retry:
423 	if (journal_res_get_fast(j, res, flags))
424 		return 0;
425 
426 	if (bch2_journal_error(j))
427 		return -BCH_ERR_erofs_journal_err;
428 
429 	spin_lock(&j->lock);
430 
431 	/* check once more in case somebody else shut things down... */
432 	if (bch2_journal_error(j)) {
433 		spin_unlock(&j->lock);
434 		return -BCH_ERR_erofs_journal_err;
435 	}
436 
437 	/*
438 	 * Recheck after taking the lock, so we don't race with another thread
439 	 * that just did journal_entry_open() and call journal_entry_close()
440 	 * unnecessarily
441 	 */
442 	if (journal_res_get_fast(j, res, flags)) {
443 		spin_unlock(&j->lock);
444 		return 0;
445 	}
446 
447 	if ((flags & BCH_WATERMARK_MASK) < j->watermark) {
448 		/*
449 		 * Don't want to close current journal entry, just need to
450 		 * invoke reclaim:
451 		 */
452 		ret = JOURNAL_ERR_journal_full;
453 		goto unlock;
454 	}
455 
456 	/*
457 	 * If we couldn't get a reservation because the current buf filled up,
458 	 * and we had room for a bigger entry on disk, signal that we want to
459 	 * realloc the journal bufs:
460 	 */
461 	buf = journal_cur_buf(j);
462 	if (journal_entry_is_open(j) &&
463 	    buf->buf_size >> 9 < buf->disk_sectors &&
464 	    buf->buf_size < JOURNAL_ENTRY_SIZE_MAX)
465 		j->buf_size_want = max(j->buf_size_want, buf->buf_size << 1);
466 
467 	__journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
468 	ret = journal_entry_open(j);
469 
470 	if (ret == JOURNAL_ERR_max_in_flight)
471 		trace_and_count(c, journal_entry_full, c);
472 unlock:
473 	if ((ret && ret != JOURNAL_ERR_insufficient_devices) &&
474 	    !j->res_get_blocked_start) {
475 		j->res_get_blocked_start = local_clock() ?: 1;
476 		trace_and_count(c, journal_full, c);
477 	}
478 
479 	can_discard = j->can_discard;
480 	spin_unlock(&j->lock);
481 
482 	if (!ret)
483 		goto retry;
484 	if (journal_error_check_stuck(j, ret, flags))
485 		ret = -BCH_ERR_journal_res_get_blocked;
486 
487 	/*
488 	 * Journal is full - can't rely on reclaim from work item due to
489 	 * freezing:
490 	 */
491 	if ((ret == JOURNAL_ERR_journal_full ||
492 	     ret == JOURNAL_ERR_journal_pin_full) &&
493 	    !(flags & JOURNAL_RES_GET_NONBLOCK)) {
494 		if (can_discard) {
495 			bch2_journal_do_discards(j);
496 			goto retry;
497 		}
498 
499 		if (mutex_trylock(&j->reclaim_lock)) {
500 			bch2_journal_reclaim(j);
501 			mutex_unlock(&j->reclaim_lock);
502 		}
503 	}
504 
505 	return ret == JOURNAL_ERR_insufficient_devices
506 		? -BCH_ERR_erofs_journal_err
507 		: -BCH_ERR_journal_res_get_blocked;
508 }
509 
510 /*
511  * Essentially the entry function to the journaling code. When bcachefs is doing
512  * a btree insert, it calls this function to get the current journal write.
513  * Journal write is the structure used set up journal writes. The calling
514  * function will then add its keys to the structure, queuing them for the next
515  * write.
516  *
517  * To ensure forward progress, the current task must not be holding any
518  * btree node write locks.
519  */
520 int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
521 				  unsigned flags)
522 {
523 	int ret;
524 
525 	closure_wait_event(&j->async_wait,
526 		   (ret = __journal_res_get(j, res, flags)) != -BCH_ERR_journal_res_get_blocked ||
527 		   (flags & JOURNAL_RES_GET_NONBLOCK));
528 	return ret;
529 }
530 
531 /* journal_entry_res: */
532 
533 void bch2_journal_entry_res_resize(struct journal *j,
534 				   struct journal_entry_res *res,
535 				   unsigned new_u64s)
536 {
537 	union journal_res_state state;
538 	int d = new_u64s - res->u64s;
539 
540 	spin_lock(&j->lock);
541 
542 	j->entry_u64s_reserved += d;
543 	if (d <= 0)
544 		goto out;
545 
546 	j->cur_entry_u64s = max_t(int, 0, j->cur_entry_u64s - d);
547 	smp_mb();
548 	state = READ_ONCE(j->reservations);
549 
550 	if (state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL &&
551 	    state.cur_entry_offset > j->cur_entry_u64s) {
552 		j->cur_entry_u64s += d;
553 		/*
554 		 * Not enough room in current journal entry, have to flush it:
555 		 */
556 		__journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
557 	} else {
558 		journal_cur_buf(j)->u64s_reserved += d;
559 	}
560 out:
561 	spin_unlock(&j->lock);
562 	res->u64s += d;
563 }
564 
565 /* journal flushing: */
566 
567 /**
568  * bch2_journal_flush_seq_async - wait for a journal entry to be written
569  * @j:		journal object
570  * @seq:	seq to flush
571  * @parent:	closure object to wait with
572  * Returns:	1 if @seq has already been flushed, 0 if @seq is being flushed,
573  *		-EIO if @seq will never be flushed
574  *
575  * Like bch2_journal_wait_on_seq, except that it triggers a write immediately if
576  * necessary
577  */
578 int bch2_journal_flush_seq_async(struct journal *j, u64 seq,
579 				 struct closure *parent)
580 {
581 	struct journal_buf *buf;
582 	int ret = 0;
583 
584 	if (seq <= j->flushed_seq_ondisk)
585 		return 1;
586 
587 	spin_lock(&j->lock);
588 
589 	if (WARN_ONCE(seq > journal_cur_seq(j),
590 		      "requested to flush journal seq %llu, but currently at %llu",
591 		      seq, journal_cur_seq(j)))
592 		goto out;
593 
594 	/* Recheck under lock: */
595 	if (j->err_seq && seq >= j->err_seq) {
596 		ret = -EIO;
597 		goto out;
598 	}
599 
600 	if (seq <= j->flushed_seq_ondisk) {
601 		ret = 1;
602 		goto out;
603 	}
604 
605 	/* if seq was written, but not flushed - flush a newer one instead */
606 	seq = max(seq, journal_last_unwritten_seq(j));
607 
608 recheck_need_open:
609 	if (seq > journal_cur_seq(j)) {
610 		struct journal_res res = { 0 };
611 
612 		if (journal_entry_is_open(j))
613 			__journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
614 
615 		spin_unlock(&j->lock);
616 
617 		ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
618 		if (ret)
619 			return ret;
620 
621 		seq = res.seq;
622 		buf = j->buf + (seq & JOURNAL_BUF_MASK);
623 		buf->must_flush = true;
624 
625 		if (!buf->flush_time) {
626 			buf->flush_time	= local_clock() ?: 1;
627 			buf->expires = jiffies;
628 		}
629 
630 		if (parent && !closure_wait(&buf->wait, parent))
631 			BUG();
632 
633 		bch2_journal_res_put(j, &res);
634 
635 		spin_lock(&j->lock);
636 		goto want_write;
637 	}
638 
639 	/*
640 	 * if write was kicked off without a flush, flush the next sequence
641 	 * number instead
642 	 */
643 	buf = journal_seq_to_buf(j, seq);
644 	if (buf->noflush) {
645 		seq++;
646 		goto recheck_need_open;
647 	}
648 
649 	buf->must_flush = true;
650 
651 	if (parent && !closure_wait(&buf->wait, parent))
652 		BUG();
653 want_write:
654 	if (seq == journal_cur_seq(j))
655 		journal_entry_want_write(j);
656 out:
657 	spin_unlock(&j->lock);
658 	return ret;
659 }
660 
661 int bch2_journal_flush_seq(struct journal *j, u64 seq)
662 {
663 	u64 start_time = local_clock();
664 	int ret, ret2;
665 
666 	/*
667 	 * Don't update time_stats when @seq is already flushed:
668 	 */
669 	if (seq <= j->flushed_seq_ondisk)
670 		return 0;
671 
672 	ret = wait_event_interruptible(j->wait, (ret2 = bch2_journal_flush_seq_async(j, seq, NULL)));
673 
674 	if (!ret)
675 		bch2_time_stats_update(j->flush_seq_time, start_time);
676 
677 	return ret ?: ret2 < 0 ? ret2 : 0;
678 }
679 
680 /*
681  * bch2_journal_flush_async - if there is an open journal entry, or a journal
682  * still being written, write it and wait for the write to complete
683  */
684 void bch2_journal_flush_async(struct journal *j, struct closure *parent)
685 {
686 	bch2_journal_flush_seq_async(j, atomic64_read(&j->seq), parent);
687 }
688 
689 int bch2_journal_flush(struct journal *j)
690 {
691 	return bch2_journal_flush_seq(j, atomic64_read(&j->seq));
692 }
693 
694 /*
695  * bch2_journal_noflush_seq - tell the journal not to issue any flushes before
696  * @seq
697  */
698 bool bch2_journal_noflush_seq(struct journal *j, u64 seq)
699 {
700 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
701 	u64 unwritten_seq;
702 	bool ret = false;
703 
704 	if (!(c->sb.features & (1ULL << BCH_FEATURE_journal_no_flush)))
705 		return false;
706 
707 	if (seq <= c->journal.flushed_seq_ondisk)
708 		return false;
709 
710 	spin_lock(&j->lock);
711 	if (seq <= c->journal.flushed_seq_ondisk)
712 		goto out;
713 
714 	for (unwritten_seq = journal_last_unwritten_seq(j);
715 	     unwritten_seq < seq;
716 	     unwritten_seq++) {
717 		struct journal_buf *buf = journal_seq_to_buf(j, unwritten_seq);
718 
719 		/* journal write is already in flight, and was a flush write: */
720 		if (unwritten_seq == journal_last_unwritten_seq(j) && !buf->noflush)
721 			goto out;
722 
723 		buf->noflush = true;
724 	}
725 
726 	ret = true;
727 out:
728 	spin_unlock(&j->lock);
729 	return ret;
730 }
731 
732 int bch2_journal_meta(struct journal *j)
733 {
734 	struct journal_buf *buf;
735 	struct journal_res res;
736 	int ret;
737 
738 	memset(&res, 0, sizeof(res));
739 
740 	ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
741 	if (ret)
742 		return ret;
743 
744 	buf = j->buf + (res.seq & JOURNAL_BUF_MASK);
745 	buf->must_flush = true;
746 
747 	if (!buf->flush_time) {
748 		buf->flush_time	= local_clock() ?: 1;
749 		buf->expires = jiffies;
750 	}
751 
752 	bch2_journal_res_put(j, &res);
753 
754 	return bch2_journal_flush_seq(j, res.seq);
755 }
756 
757 /* block/unlock the journal: */
758 
759 void bch2_journal_unblock(struct journal *j)
760 {
761 	spin_lock(&j->lock);
762 	j->blocked--;
763 	spin_unlock(&j->lock);
764 
765 	journal_wake(j);
766 }
767 
768 void bch2_journal_block(struct journal *j)
769 {
770 	spin_lock(&j->lock);
771 	j->blocked++;
772 	spin_unlock(&j->lock);
773 
774 	journal_quiesce(j);
775 }
776 
777 /* allocate journal on a device: */
778 
779 static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
780 					 bool new_fs, struct closure *cl)
781 {
782 	struct bch_fs *c = ca->fs;
783 	struct journal_device *ja = &ca->journal;
784 	u64 *new_bucket_seq = NULL, *new_buckets = NULL;
785 	struct open_bucket **ob = NULL;
786 	long *bu = NULL;
787 	unsigned i, pos, nr_got = 0, nr_want = nr - ja->nr;
788 	int ret = 0;
789 
790 	BUG_ON(nr <= ja->nr);
791 
792 	bu		= kcalloc(nr_want, sizeof(*bu), GFP_KERNEL);
793 	ob		= kcalloc(nr_want, sizeof(*ob), GFP_KERNEL);
794 	new_buckets	= kcalloc(nr, sizeof(u64), GFP_KERNEL);
795 	new_bucket_seq	= kcalloc(nr, sizeof(u64), GFP_KERNEL);
796 	if (!bu || !ob || !new_buckets || !new_bucket_seq) {
797 		ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
798 		goto err_free;
799 	}
800 
801 	for (nr_got = 0; nr_got < nr_want; nr_got++) {
802 		if (new_fs) {
803 			bu[nr_got] = bch2_bucket_alloc_new_fs(ca);
804 			if (bu[nr_got] < 0) {
805 				ret = -BCH_ERR_ENOSPC_bucket_alloc;
806 				break;
807 			}
808 		} else {
809 			ob[nr_got] = bch2_bucket_alloc(c, ca, BCH_WATERMARK_normal, cl);
810 			ret = PTR_ERR_OR_ZERO(ob[nr_got]);
811 			if (ret)
812 				break;
813 
814 			ret = bch2_trans_run(c,
815 				bch2_trans_mark_metadata_bucket(trans, ca,
816 						ob[nr_got]->bucket, BCH_DATA_journal,
817 						ca->mi.bucket_size));
818 			if (ret) {
819 				bch2_open_bucket_put(c, ob[nr_got]);
820 				bch_err_msg(c, ret, "marking new journal buckets");
821 				break;
822 			}
823 
824 			bu[nr_got] = ob[nr_got]->bucket;
825 		}
826 	}
827 
828 	if (!nr_got)
829 		goto err_free;
830 
831 	/* Don't return an error if we successfully allocated some buckets: */
832 	ret = 0;
833 
834 	if (c) {
835 		bch2_journal_flush_all_pins(&c->journal);
836 		bch2_journal_block(&c->journal);
837 		mutex_lock(&c->sb_lock);
838 	}
839 
840 	memcpy(new_buckets,	ja->buckets,	ja->nr * sizeof(u64));
841 	memcpy(new_bucket_seq,	ja->bucket_seq,	ja->nr * sizeof(u64));
842 
843 	BUG_ON(ja->discard_idx > ja->nr);
844 
845 	pos = ja->discard_idx ?: ja->nr;
846 
847 	memmove(new_buckets + pos + nr_got,
848 		new_buckets + pos,
849 		sizeof(new_buckets[0]) * (ja->nr - pos));
850 	memmove(new_bucket_seq + pos + nr_got,
851 		new_bucket_seq + pos,
852 		sizeof(new_bucket_seq[0]) * (ja->nr - pos));
853 
854 	for (i = 0; i < nr_got; i++) {
855 		new_buckets[pos + i] = bu[i];
856 		new_bucket_seq[pos + i] = 0;
857 	}
858 
859 	nr = ja->nr + nr_got;
860 
861 	ret = bch2_journal_buckets_to_sb(c, ca, new_buckets, nr);
862 	if (ret)
863 		goto err_unblock;
864 
865 	if (!new_fs)
866 		bch2_write_super(c);
867 
868 	/* Commit: */
869 	if (c)
870 		spin_lock(&c->journal.lock);
871 
872 	swap(new_buckets,	ja->buckets);
873 	swap(new_bucket_seq,	ja->bucket_seq);
874 	ja->nr = nr;
875 
876 	if (pos <= ja->discard_idx)
877 		ja->discard_idx = (ja->discard_idx + nr_got) % ja->nr;
878 	if (pos <= ja->dirty_idx_ondisk)
879 		ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + nr_got) % ja->nr;
880 	if (pos <= ja->dirty_idx)
881 		ja->dirty_idx = (ja->dirty_idx + nr_got) % ja->nr;
882 	if (pos <= ja->cur_idx)
883 		ja->cur_idx = (ja->cur_idx + nr_got) % ja->nr;
884 
885 	if (c)
886 		spin_unlock(&c->journal.lock);
887 err_unblock:
888 	if (c) {
889 		bch2_journal_unblock(&c->journal);
890 		mutex_unlock(&c->sb_lock);
891 	}
892 
893 	if (ret && !new_fs)
894 		for (i = 0; i < nr_got; i++)
895 			bch2_trans_run(c,
896 				bch2_trans_mark_metadata_bucket(trans, ca,
897 						bu[i], BCH_DATA_free, 0));
898 err_free:
899 	if (!new_fs)
900 		for (i = 0; i < nr_got; i++)
901 			bch2_open_bucket_put(c, ob[i]);
902 
903 	kfree(new_bucket_seq);
904 	kfree(new_buckets);
905 	kfree(ob);
906 	kfree(bu);
907 	return ret;
908 }
909 
910 /*
911  * Allocate more journal space at runtime - not currently making use if it, but
912  * the code works:
913  */
914 int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
915 				unsigned nr)
916 {
917 	struct journal_device *ja = &ca->journal;
918 	struct closure cl;
919 	int ret = 0;
920 
921 	closure_init_stack(&cl);
922 
923 	down_write(&c->state_lock);
924 
925 	/* don't handle reducing nr of buckets yet: */
926 	if (nr < ja->nr)
927 		goto unlock;
928 
929 	while (ja->nr < nr) {
930 		struct disk_reservation disk_res = { 0, 0, 0 };
931 
932 		/*
933 		 * note: journal buckets aren't really counted as _sectors_ used yet, so
934 		 * we don't need the disk reservation to avoid the BUG_ON() in buckets.c
935 		 * when space used goes up without a reservation - but we do need the
936 		 * reservation to ensure we'll actually be able to allocate:
937 		 *
938 		 * XXX: that's not right, disk reservations only ensure a
939 		 * filesystem-wide allocation will succeed, this is a device
940 		 * specific allocation - we can hang here:
941 		 */
942 
943 		ret = bch2_disk_reservation_get(c, &disk_res,
944 						bucket_to_sector(ca, nr - ja->nr), 1, 0);
945 		if (ret)
946 			break;
947 
948 		ret = __bch2_set_nr_journal_buckets(ca, nr, false, &cl);
949 
950 		bch2_disk_reservation_put(c, &disk_res);
951 
952 		closure_sync(&cl);
953 
954 		if (ret && ret != -BCH_ERR_bucket_alloc_blocked)
955 			break;
956 	}
957 
958 	if (ret)
959 		bch_err_fn(c, ret);
960 unlock:
961 	up_write(&c->state_lock);
962 	return ret;
963 }
964 
965 int bch2_dev_journal_alloc(struct bch_dev *ca)
966 {
967 	unsigned nr;
968 	int ret;
969 
970 	if (dynamic_fault("bcachefs:add:journal_alloc")) {
971 		ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
972 		goto err;
973 	}
974 
975 	/* 1/128th of the device by default: */
976 	nr = ca->mi.nbuckets >> 7;
977 
978 	/*
979 	 * clamp journal size to 8192 buckets or 8GB (in sectors), whichever
980 	 * is smaller:
981 	 */
982 	nr = clamp_t(unsigned, nr,
983 		     BCH_JOURNAL_BUCKETS_MIN,
984 		     min(1 << 13,
985 			 (1 << 24) / ca->mi.bucket_size));
986 
987 	ret = __bch2_set_nr_journal_buckets(ca, nr, true, NULL);
988 err:
989 	if (ret)
990 		bch_err_fn(ca, ret);
991 	return ret;
992 }
993 
994 int bch2_fs_journal_alloc(struct bch_fs *c)
995 {
996 	struct bch_dev *ca;
997 	unsigned i;
998 
999 	for_each_online_member(ca, c, i) {
1000 		if (ca->journal.nr)
1001 			continue;
1002 
1003 		int ret = bch2_dev_journal_alloc(ca);
1004 		if (ret) {
1005 			percpu_ref_put(&ca->io_ref);
1006 			return ret;
1007 		}
1008 	}
1009 
1010 	return 0;
1011 }
1012 
1013 /* startup/shutdown: */
1014 
1015 static bool bch2_journal_writing_to_device(struct journal *j, unsigned dev_idx)
1016 {
1017 	bool ret = false;
1018 	u64 seq;
1019 
1020 	spin_lock(&j->lock);
1021 	for (seq = journal_last_unwritten_seq(j);
1022 	     seq <= journal_cur_seq(j) && !ret;
1023 	     seq++) {
1024 		struct journal_buf *buf = journal_seq_to_buf(j, seq);
1025 
1026 		if (bch2_bkey_has_device_c(bkey_i_to_s_c(&buf->key), dev_idx))
1027 			ret = true;
1028 	}
1029 	spin_unlock(&j->lock);
1030 
1031 	return ret;
1032 }
1033 
1034 void bch2_dev_journal_stop(struct journal *j, struct bch_dev *ca)
1035 {
1036 	wait_event(j->wait, !bch2_journal_writing_to_device(j, ca->dev_idx));
1037 }
1038 
1039 void bch2_fs_journal_stop(struct journal *j)
1040 {
1041 	bch2_journal_reclaim_stop(j);
1042 	bch2_journal_flush_all_pins(j);
1043 
1044 	wait_event(j->wait, journal_entry_close(j));
1045 
1046 	/*
1047 	 * Always write a new journal entry, to make sure the clock hands are up
1048 	 * to date (and match the superblock)
1049 	 */
1050 	bch2_journal_meta(j);
1051 
1052 	journal_quiesce(j);
1053 
1054 	BUG_ON(!bch2_journal_error(j) &&
1055 	       test_bit(JOURNAL_REPLAY_DONE, &j->flags) &&
1056 	       j->last_empty_seq != journal_cur_seq(j));
1057 
1058 	cancel_delayed_work_sync(&j->write_work);
1059 }
1060 
1061 int bch2_fs_journal_start(struct journal *j, u64 cur_seq)
1062 {
1063 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
1064 	struct journal_entry_pin_list *p;
1065 	struct journal_replay *i, **_i;
1066 	struct genradix_iter iter;
1067 	bool had_entries = false;
1068 	unsigned ptr;
1069 	u64 last_seq = cur_seq, nr, seq;
1070 
1071 	genradix_for_each_reverse(&c->journal_entries, iter, _i) {
1072 		i = *_i;
1073 
1074 		if (!i || i->ignore)
1075 			continue;
1076 
1077 		last_seq = le64_to_cpu(i->j.last_seq);
1078 		break;
1079 	}
1080 
1081 	nr = cur_seq - last_seq;
1082 
1083 	if (nr + 1 > j->pin.size) {
1084 		free_fifo(&j->pin);
1085 		init_fifo(&j->pin, roundup_pow_of_two(nr + 1), GFP_KERNEL);
1086 		if (!j->pin.data) {
1087 			bch_err(c, "error reallocating journal fifo (%llu open entries)", nr);
1088 			return -BCH_ERR_ENOMEM_journal_pin_fifo;
1089 		}
1090 	}
1091 
1092 	j->replay_journal_seq	= last_seq;
1093 	j->replay_journal_seq_end = cur_seq;
1094 	j->last_seq_ondisk	= last_seq;
1095 	j->flushed_seq_ondisk	= cur_seq - 1;
1096 	j->seq_ondisk		= cur_seq - 1;
1097 	j->pin.front		= last_seq;
1098 	j->pin.back		= cur_seq;
1099 	atomic64_set(&j->seq, cur_seq - 1);
1100 
1101 	fifo_for_each_entry_ptr(p, &j->pin, seq)
1102 		journal_pin_list_init(p, 1);
1103 
1104 	genradix_for_each(&c->journal_entries, iter, _i) {
1105 		i = *_i;
1106 
1107 		if (!i || i->ignore)
1108 			continue;
1109 
1110 		seq = le64_to_cpu(i->j.seq);
1111 		BUG_ON(seq >= cur_seq);
1112 
1113 		if (seq < last_seq)
1114 			continue;
1115 
1116 		if (journal_entry_empty(&i->j))
1117 			j->last_empty_seq = le64_to_cpu(i->j.seq);
1118 
1119 		p = journal_seq_pin(j, seq);
1120 
1121 		p->devs.nr = 0;
1122 		for (ptr = 0; ptr < i->nr_ptrs; ptr++)
1123 			bch2_dev_list_add_dev(&p->devs, i->ptrs[ptr].dev);
1124 
1125 		had_entries = true;
1126 	}
1127 
1128 	if (!had_entries)
1129 		j->last_empty_seq = cur_seq;
1130 
1131 	spin_lock(&j->lock);
1132 
1133 	set_bit(JOURNAL_STARTED, &j->flags);
1134 	j->last_flush_write = jiffies;
1135 
1136 	j->reservations.idx = j->reservations.unwritten_idx = journal_cur_seq(j);
1137 	j->reservations.unwritten_idx++;
1138 
1139 	c->last_bucket_seq_cleanup = journal_cur_seq(j);
1140 
1141 	bch2_journal_space_available(j);
1142 	spin_unlock(&j->lock);
1143 
1144 	return bch2_journal_reclaim_start(j);
1145 }
1146 
1147 /* init/exit: */
1148 
1149 void bch2_dev_journal_exit(struct bch_dev *ca)
1150 {
1151 	kfree(ca->journal.bio);
1152 	kfree(ca->journal.buckets);
1153 	kfree(ca->journal.bucket_seq);
1154 
1155 	ca->journal.bio		= NULL;
1156 	ca->journal.buckets	= NULL;
1157 	ca->journal.bucket_seq	= NULL;
1158 }
1159 
1160 int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
1161 {
1162 	struct journal_device *ja = &ca->journal;
1163 	struct bch_sb_field_journal *journal_buckets =
1164 		bch2_sb_field_get(sb, journal);
1165 	struct bch_sb_field_journal_v2 *journal_buckets_v2 =
1166 		bch2_sb_field_get(sb, journal_v2);
1167 	unsigned i, nr_bvecs;
1168 
1169 	ja->nr = 0;
1170 
1171 	if (journal_buckets_v2) {
1172 		unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2);
1173 
1174 		for (i = 0; i < nr; i++)
1175 			ja->nr += le64_to_cpu(journal_buckets_v2->d[i].nr);
1176 	} else if (journal_buckets) {
1177 		ja->nr = bch2_nr_journal_buckets(journal_buckets);
1178 	}
1179 
1180 	ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1181 	if (!ja->bucket_seq)
1182 		return -BCH_ERR_ENOMEM_dev_journal_init;
1183 
1184 	nr_bvecs = DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE);
1185 
1186 	ca->journal.bio = bio_kmalloc(nr_bvecs, GFP_KERNEL);
1187 	if (!ca->journal.bio)
1188 		return -BCH_ERR_ENOMEM_dev_journal_init;
1189 
1190 	bio_init(ca->journal.bio, NULL, ca->journal.bio->bi_inline_vecs, nr_bvecs, 0);
1191 
1192 	ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1193 	if (!ja->buckets)
1194 		return -BCH_ERR_ENOMEM_dev_journal_init;
1195 
1196 	if (journal_buckets_v2) {
1197 		unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2);
1198 		unsigned j, dst = 0;
1199 
1200 		for (i = 0; i < nr; i++)
1201 			for (j = 0; j < le64_to_cpu(journal_buckets_v2->d[i].nr); j++)
1202 				ja->buckets[dst++] =
1203 					le64_to_cpu(journal_buckets_v2->d[i].start) + j;
1204 	} else if (journal_buckets) {
1205 		for (i = 0; i < ja->nr; i++)
1206 			ja->buckets[i] = le64_to_cpu(journal_buckets->buckets[i]);
1207 	}
1208 
1209 	return 0;
1210 }
1211 
1212 void bch2_fs_journal_exit(struct journal *j)
1213 {
1214 	unsigned i;
1215 
1216 	darray_exit(&j->early_journal_entries);
1217 
1218 	for (i = 0; i < ARRAY_SIZE(j->buf); i++)
1219 		kvpfree(j->buf[i].data, j->buf[i].buf_size);
1220 	free_fifo(&j->pin);
1221 }
1222 
1223 int bch2_fs_journal_init(struct journal *j)
1224 {
1225 	static struct lock_class_key res_key;
1226 	unsigned i;
1227 
1228 	spin_lock_init(&j->lock);
1229 	spin_lock_init(&j->err_lock);
1230 	init_waitqueue_head(&j->wait);
1231 	INIT_DELAYED_WORK(&j->write_work, journal_write_work);
1232 	init_waitqueue_head(&j->reclaim_wait);
1233 	init_waitqueue_head(&j->pin_flush_wait);
1234 	mutex_init(&j->reclaim_lock);
1235 	mutex_init(&j->discard_lock);
1236 
1237 	lockdep_init_map(&j->res_map, "journal res", &res_key, 0);
1238 
1239 	atomic64_set(&j->reservations.counter,
1240 		((union journal_res_state)
1241 		 { .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v);
1242 
1243 	if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)))
1244 		return -BCH_ERR_ENOMEM_journal_pin_fifo;
1245 
1246 	for (i = 0; i < ARRAY_SIZE(j->buf); i++) {
1247 		j->buf[i].buf_size = JOURNAL_ENTRY_SIZE_MIN;
1248 		j->buf[i].data = kvpmalloc(j->buf[i].buf_size, GFP_KERNEL);
1249 		if (!j->buf[i].data)
1250 			return -BCH_ERR_ENOMEM_journal_buf;
1251 	}
1252 
1253 	j->pin.front = j->pin.back = 1;
1254 	return 0;
1255 }
1256 
1257 /* debug: */
1258 
1259 void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1260 {
1261 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
1262 	union journal_res_state s;
1263 	struct bch_dev *ca;
1264 	unsigned long now = jiffies;
1265 	u64 seq;
1266 	unsigned i;
1267 
1268 	if (!out->nr_tabstops)
1269 		printbuf_tabstop_push(out, 24);
1270 	out->atomic++;
1271 
1272 	rcu_read_lock();
1273 	s = READ_ONCE(j->reservations);
1274 
1275 	prt_printf(out, "dirty journal entries:\t%llu/%llu\n",	fifo_used(&j->pin), j->pin.size);
1276 	prt_printf(out, "seq:\t\t\t%llu\n",			journal_cur_seq(j));
1277 	prt_printf(out, "seq_ondisk:\t\t%llu\n",		j->seq_ondisk);
1278 	prt_printf(out, "last_seq:\t\t%llu\n",		journal_last_seq(j));
1279 	prt_printf(out, "last_seq_ondisk:\t%llu\n",		j->last_seq_ondisk);
1280 	prt_printf(out, "flushed_seq_ondisk:\t%llu\n",	j->flushed_seq_ondisk);
1281 	prt_printf(out, "watermark:\t\t%s\n",		bch2_watermarks[j->watermark]);
1282 	prt_printf(out, "each entry reserved:\t%u\n",	j->entry_u64s_reserved);
1283 	prt_printf(out, "nr flush writes:\t%llu\n",		j->nr_flush_writes);
1284 	prt_printf(out, "nr noflush writes:\t%llu\n",	j->nr_noflush_writes);
1285 	prt_printf(out, "nr direct reclaim:\t%llu\n",	j->nr_direct_reclaim);
1286 	prt_printf(out, "nr background reclaim:\t%llu\n",	j->nr_background_reclaim);
1287 	prt_printf(out, "reclaim kicked:\t\t%u\n",		j->reclaim_kicked);
1288 	prt_printf(out, "reclaim runs in:\t%u ms\n",	time_after(j->next_reclaim, now)
1289 	       ? jiffies_to_msecs(j->next_reclaim - jiffies) : 0);
1290 	prt_printf(out, "current entry sectors:\t%u\n",	j->cur_entry_sectors);
1291 	prt_printf(out, "current entry error:\t%s\n",	bch2_journal_errors[j->cur_entry_error]);
1292 	prt_printf(out, "current entry:\t\t");
1293 
1294 	switch (s.cur_entry_offset) {
1295 	case JOURNAL_ENTRY_ERROR_VAL:
1296 		prt_printf(out, "error");
1297 		break;
1298 	case JOURNAL_ENTRY_CLOSED_VAL:
1299 		prt_printf(out, "closed");
1300 		break;
1301 	default:
1302 		prt_printf(out, "%u/%u", s.cur_entry_offset, j->cur_entry_u64s);
1303 		break;
1304 	}
1305 
1306 	prt_newline(out);
1307 
1308 	for (seq = journal_cur_seq(j);
1309 	     seq >= journal_last_unwritten_seq(j);
1310 	     --seq) {
1311 		i = seq & JOURNAL_BUF_MASK;
1312 
1313 		prt_printf(out, "unwritten entry:");
1314 		prt_tab(out);
1315 		prt_printf(out, "%llu", seq);
1316 		prt_newline(out);
1317 		printbuf_indent_add(out, 2);
1318 
1319 		prt_printf(out, "refcount:");
1320 		prt_tab(out);
1321 		prt_printf(out, "%u", journal_state_count(s, i));
1322 		prt_newline(out);
1323 
1324 		prt_printf(out, "sectors:");
1325 		prt_tab(out);
1326 		prt_printf(out, "%u", j->buf[i].sectors);
1327 		prt_newline(out);
1328 
1329 		prt_printf(out, "expires");
1330 		prt_tab(out);
1331 		prt_printf(out, "%li jiffies", j->buf[i].expires - jiffies);
1332 		prt_newline(out);
1333 
1334 		printbuf_indent_sub(out, 2);
1335 	}
1336 
1337 	prt_printf(out,
1338 	       "replay done:\t\t%i\n",
1339 	       test_bit(JOURNAL_REPLAY_DONE,	&j->flags));
1340 
1341 	prt_printf(out, "space:\n");
1342 	prt_printf(out, "\tdiscarded\t%u:%u\n",
1343 	       j->space[journal_space_discarded].next_entry,
1344 	       j->space[journal_space_discarded].total);
1345 	prt_printf(out, "\tclean ondisk\t%u:%u\n",
1346 	       j->space[journal_space_clean_ondisk].next_entry,
1347 	       j->space[journal_space_clean_ondisk].total);
1348 	prt_printf(out, "\tclean\t\t%u:%u\n",
1349 	       j->space[journal_space_clean].next_entry,
1350 	       j->space[journal_space_clean].total);
1351 	prt_printf(out, "\ttotal\t\t%u:%u\n",
1352 	       j->space[journal_space_total].next_entry,
1353 	       j->space[journal_space_total].total);
1354 
1355 	for_each_member_device_rcu(ca, c, i,
1356 				   &c->rw_devs[BCH_DATA_journal]) {
1357 		struct journal_device *ja = &ca->journal;
1358 
1359 		if (!test_bit(ca->dev_idx, c->rw_devs[BCH_DATA_journal].d))
1360 			continue;
1361 
1362 		if (!ja->nr)
1363 			continue;
1364 
1365 		prt_printf(out, "dev %u:\n",		i);
1366 		prt_printf(out, "\tnr\t\t%u\n",		ja->nr);
1367 		prt_printf(out, "\tbucket size\t%u\n",	ca->mi.bucket_size);
1368 		prt_printf(out, "\tavailable\t%u:%u\n",	bch2_journal_dev_buckets_available(j, ja, journal_space_discarded), ja->sectors_free);
1369 		prt_printf(out, "\tdiscard_idx\t%u\n",	ja->discard_idx);
1370 		prt_printf(out, "\tdirty_ondisk\t%u (seq %llu)\n", ja->dirty_idx_ondisk,	ja->bucket_seq[ja->dirty_idx_ondisk]);
1371 		prt_printf(out, "\tdirty_idx\t%u (seq %llu)\n", ja->dirty_idx,		ja->bucket_seq[ja->dirty_idx]);
1372 		prt_printf(out, "\tcur_idx\t\t%u (seq %llu)\n", ja->cur_idx,		ja->bucket_seq[ja->cur_idx]);
1373 	}
1374 
1375 	rcu_read_unlock();
1376 
1377 	--out->atomic;
1378 }
1379 
1380 void bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1381 {
1382 	spin_lock(&j->lock);
1383 	__bch2_journal_debug_to_text(out, j);
1384 	spin_unlock(&j->lock);
1385 }
1386 
1387 bool bch2_journal_seq_pins_to_text(struct printbuf *out, struct journal *j, u64 *seq)
1388 {
1389 	struct journal_entry_pin_list *pin_list;
1390 	struct journal_entry_pin *pin;
1391 	unsigned i;
1392 
1393 	spin_lock(&j->lock);
1394 	*seq = max(*seq, j->pin.front);
1395 
1396 	if (*seq >= j->pin.back) {
1397 		spin_unlock(&j->lock);
1398 		return true;
1399 	}
1400 
1401 	out->atomic++;
1402 
1403 	pin_list = journal_seq_pin(j, *seq);
1404 
1405 	prt_printf(out, "%llu: count %u", *seq, atomic_read(&pin_list->count));
1406 	prt_newline(out);
1407 	printbuf_indent_add(out, 2);
1408 
1409 	for (i = 0; i < ARRAY_SIZE(pin_list->list); i++)
1410 		list_for_each_entry(pin, &pin_list->list[i], list) {
1411 			prt_printf(out, "\t%px %ps", pin, pin->flush);
1412 			prt_newline(out);
1413 		}
1414 
1415 	if (!list_empty(&pin_list->flushed)) {
1416 		prt_printf(out, "flushed:");
1417 		prt_newline(out);
1418 	}
1419 
1420 	list_for_each_entry(pin, &pin_list->flushed, list) {
1421 		prt_printf(out, "\t%px %ps", pin, pin->flush);
1422 		prt_newline(out);
1423 	}
1424 
1425 	printbuf_indent_sub(out, 2);
1426 
1427 	--out->atomic;
1428 	spin_unlock(&j->lock);
1429 
1430 	return false;
1431 }
1432 
1433 void bch2_journal_pins_to_text(struct printbuf *out, struct journal *j)
1434 {
1435 	u64 seq = 0;
1436 
1437 	while (!bch2_journal_seq_pins_to_text(out, j, &seq))
1438 		seq++;
1439 }
1440