xref: /linux/fs/bcachefs/journal.c (revision 2d7f3d1a5866705be2393150e1ffdf67030ab88d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bcachefs journalling code, for btree insertions
4  *
5  * Copyright 2012 Google, Inc.
6  */
7 
8 #include "bcachefs.h"
9 #include "alloc_foreground.h"
10 #include "bkey_methods.h"
11 #include "btree_gc.h"
12 #include "btree_update.h"
13 #include "btree_write_buffer.h"
14 #include "buckets.h"
15 #include "error.h"
16 #include "journal.h"
17 #include "journal_io.h"
18 #include "journal_reclaim.h"
19 #include "journal_sb.h"
20 #include "journal_seq_blacklist.h"
21 #include "trace.h"
22 
23 static const char * const bch2_journal_errors[] = {
24 #define x(n)	#n,
25 	JOURNAL_ERRORS()
26 #undef x
27 	NULL
28 };
29 
30 static void bch2_journal_buf_to_text(struct printbuf *out, struct journal *j, u64 seq)
31 {
32 	union journal_res_state s = READ_ONCE(j->reservations);
33 	unsigned i = seq & JOURNAL_BUF_MASK;
34 	struct journal_buf *buf = j->buf + i;
35 
36 	prt_printf(out, "seq:");
37 	prt_tab(out);
38 	prt_printf(out, "%llu", seq);
39 	prt_newline(out);
40 	printbuf_indent_add(out, 2);
41 
42 	prt_printf(out, "refcount:");
43 	prt_tab(out);
44 	prt_printf(out, "%u", journal_state_count(s, i));
45 	prt_newline(out);
46 
47 	prt_printf(out, "size:");
48 	prt_tab(out);
49 	prt_human_readable_u64(out, vstruct_bytes(buf->data));
50 	prt_newline(out);
51 
52 	prt_printf(out, "expires");
53 	prt_tab(out);
54 	prt_printf(out, "%li jiffies", buf->expires - jiffies);
55 	prt_newline(out);
56 
57 	printbuf_indent_sub(out, 2);
58 }
59 
60 static void bch2_journal_bufs_to_text(struct printbuf *out, struct journal *j)
61 {
62 	if (!out->nr_tabstops)
63 		printbuf_tabstop_push(out, 24);
64 
65 	for (u64 seq = journal_last_unwritten_seq(j);
66 	     seq <= journal_cur_seq(j);
67 	     seq++)
68 		bch2_journal_buf_to_text(out, j, seq);
69 }
70 
71 static inline bool journal_seq_unwritten(struct journal *j, u64 seq)
72 {
73 	return seq > j->seq_ondisk;
74 }
75 
76 static bool __journal_entry_is_open(union journal_res_state state)
77 {
78 	return state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL;
79 }
80 
81 static inline unsigned nr_unwritten_journal_entries(struct journal *j)
82 {
83 	return atomic64_read(&j->seq) - j->seq_ondisk;
84 }
85 
86 static bool journal_entry_is_open(struct journal *j)
87 {
88 	return __journal_entry_is_open(j->reservations);
89 }
90 
91 static inline struct journal_buf *
92 journal_seq_to_buf(struct journal *j, u64 seq)
93 {
94 	struct journal_buf *buf = NULL;
95 
96 	EBUG_ON(seq > journal_cur_seq(j));
97 
98 	if (journal_seq_unwritten(j, seq)) {
99 		buf = j->buf + (seq & JOURNAL_BUF_MASK);
100 		EBUG_ON(le64_to_cpu(buf->data->seq) != seq);
101 	}
102 	return buf;
103 }
104 
105 static void journal_pin_list_init(struct journal_entry_pin_list *p, int count)
106 {
107 	unsigned i;
108 
109 	for (i = 0; i < ARRAY_SIZE(p->list); i++)
110 		INIT_LIST_HEAD(&p->list[i]);
111 	INIT_LIST_HEAD(&p->flushed);
112 	atomic_set(&p->count, count);
113 	p->devs.nr = 0;
114 }
115 
116 /*
117  * Detect stuck journal conditions and trigger shutdown. Technically the journal
118  * can end up stuck for a variety of reasons, such as a blocked I/O, journal
119  * reservation lockup, etc. Since this is a fatal error with potentially
120  * unpredictable characteristics, we want to be fairly conservative before we
121  * decide to shut things down.
122  *
123  * Consider the journal stuck when it appears full with no ability to commit
124  * btree transactions, to discard journal buckets, nor acquire priority
125  * (reserved watermark) reservation.
126  */
127 static inline bool
128 journal_error_check_stuck(struct journal *j, int error, unsigned flags)
129 {
130 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
131 	bool stuck = false;
132 	struct printbuf buf = PRINTBUF;
133 
134 	if (!(error == JOURNAL_ERR_journal_full ||
135 	      error == JOURNAL_ERR_journal_pin_full) ||
136 	    nr_unwritten_journal_entries(j) ||
137 	    (flags & BCH_WATERMARK_MASK) != BCH_WATERMARK_reclaim)
138 		return stuck;
139 
140 	spin_lock(&j->lock);
141 
142 	if (j->can_discard) {
143 		spin_unlock(&j->lock);
144 		return stuck;
145 	}
146 
147 	stuck = true;
148 
149 	/*
150 	 * The journal shutdown path will set ->err_seq, but do it here first to
151 	 * serialize against concurrent failures and avoid duplicate error
152 	 * reports.
153 	 */
154 	if (j->err_seq) {
155 		spin_unlock(&j->lock);
156 		return stuck;
157 	}
158 	j->err_seq = journal_cur_seq(j);
159 	spin_unlock(&j->lock);
160 
161 	bch_err(c, "Journal stuck! Hava a pre-reservation but journal full (error %s)",
162 		bch2_journal_errors[error]);
163 	bch2_journal_debug_to_text(&buf, j);
164 	bch_err(c, "%s", buf.buf);
165 
166 	printbuf_reset(&buf);
167 	bch2_journal_pins_to_text(&buf, j);
168 	bch_err(c, "Journal pins:\n%s", buf.buf);
169 	printbuf_exit(&buf);
170 
171 	bch2_fatal_error(c);
172 	dump_stack();
173 
174 	return stuck;
175 }
176 
177 /*
178  * Final processing when the last reference of a journal buffer has been
179  * dropped. Drop the pin list reference acquired at journal entry open and write
180  * the buffer, if requested.
181  */
182 void bch2_journal_buf_put_final(struct journal *j, u64 seq, bool write)
183 {
184 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
185 
186 	lockdep_assert_held(&j->lock);
187 
188 	if (__bch2_journal_pin_put(j, seq))
189 		bch2_journal_reclaim_fast(j);
190 	if (write)
191 		closure_call(&j->io, bch2_journal_write, c->io_complete_wq, NULL);
192 }
193 
194 /*
195  * Returns true if journal entry is now closed:
196  *
197  * We don't close a journal_buf until the next journal_buf is finished writing,
198  * and can be opened again - this also initializes the next journal_buf:
199  */
200 static void __journal_entry_close(struct journal *j, unsigned closed_val, bool trace)
201 {
202 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
203 	struct journal_buf *buf = journal_cur_buf(j);
204 	union journal_res_state old, new;
205 	u64 v = atomic64_read(&j->reservations.counter);
206 	unsigned sectors;
207 
208 	BUG_ON(closed_val != JOURNAL_ENTRY_CLOSED_VAL &&
209 	       closed_val != JOURNAL_ENTRY_ERROR_VAL);
210 
211 	lockdep_assert_held(&j->lock);
212 
213 	do {
214 		old.v = new.v = v;
215 		new.cur_entry_offset = closed_val;
216 
217 		if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL ||
218 		    old.cur_entry_offset == new.cur_entry_offset)
219 			return;
220 	} while ((v = atomic64_cmpxchg(&j->reservations.counter,
221 				       old.v, new.v)) != old.v);
222 
223 	if (!__journal_entry_is_open(old))
224 		return;
225 
226 	/* Close out old buffer: */
227 	buf->data->u64s		= cpu_to_le32(old.cur_entry_offset);
228 
229 	if (trace_journal_entry_close_enabled() && trace) {
230 		struct printbuf pbuf = PRINTBUF;
231 		pbuf.atomic++;
232 
233 		prt_str(&pbuf, "entry size: ");
234 		prt_human_readable_u64(&pbuf, vstruct_bytes(buf->data));
235 		prt_newline(&pbuf);
236 		bch2_prt_task_backtrace(&pbuf, current, 1, GFP_NOWAIT);
237 		trace_journal_entry_close(c, pbuf.buf);
238 		printbuf_exit(&pbuf);
239 	}
240 
241 	sectors = vstruct_blocks_plus(buf->data, c->block_bits,
242 				      buf->u64s_reserved) << c->block_bits;
243 	BUG_ON(sectors > buf->sectors);
244 	buf->sectors = sectors;
245 
246 	/*
247 	 * We have to set last_seq here, _before_ opening a new journal entry:
248 	 *
249 	 * A threads may replace an old pin with a new pin on their current
250 	 * journal reservation - the expectation being that the journal will
251 	 * contain either what the old pin protected or what the new pin
252 	 * protects.
253 	 *
254 	 * After the old pin is dropped journal_last_seq() won't include the old
255 	 * pin, so we can only write the updated last_seq on the entry that
256 	 * contains whatever the new pin protects.
257 	 *
258 	 * Restated, we can _not_ update last_seq for a given entry if there
259 	 * could be a newer entry open with reservations/pins that have been
260 	 * taken against it.
261 	 *
262 	 * Hence, we want update/set last_seq on the current journal entry right
263 	 * before we open a new one:
264 	 */
265 	buf->last_seq		= journal_last_seq(j);
266 	buf->data->last_seq	= cpu_to_le64(buf->last_seq);
267 	BUG_ON(buf->last_seq > le64_to_cpu(buf->data->seq));
268 
269 	cancel_delayed_work(&j->write_work);
270 
271 	bch2_journal_space_available(j);
272 
273 	__bch2_journal_buf_put(j, old.idx, le64_to_cpu(buf->data->seq));
274 }
275 
276 void bch2_journal_halt(struct journal *j)
277 {
278 	spin_lock(&j->lock);
279 	__journal_entry_close(j, JOURNAL_ENTRY_ERROR_VAL, true);
280 	if (!j->err_seq)
281 		j->err_seq = journal_cur_seq(j);
282 	journal_wake(j);
283 	spin_unlock(&j->lock);
284 }
285 
286 static bool journal_entry_want_write(struct journal *j)
287 {
288 	bool ret = !journal_entry_is_open(j) ||
289 		journal_cur_seq(j) == journal_last_unwritten_seq(j);
290 
291 	/* Don't close it yet if we already have a write in flight: */
292 	if (ret)
293 		__journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
294 	else if (nr_unwritten_journal_entries(j)) {
295 		struct journal_buf *buf = journal_cur_buf(j);
296 
297 		if (!buf->flush_time) {
298 			buf->flush_time	= local_clock() ?: 1;
299 			buf->expires = jiffies;
300 		}
301 	}
302 
303 	return ret;
304 }
305 
306 bool bch2_journal_entry_close(struct journal *j)
307 {
308 	bool ret;
309 
310 	spin_lock(&j->lock);
311 	ret = journal_entry_want_write(j);
312 	spin_unlock(&j->lock);
313 
314 	return ret;
315 }
316 
317 /*
318  * should _only_ called from journal_res_get() - when we actually want a
319  * journal reservation - journal entry is open means journal is dirty:
320  */
321 static int journal_entry_open(struct journal *j)
322 {
323 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
324 	struct journal_buf *buf = j->buf +
325 		((journal_cur_seq(j) + 1) & JOURNAL_BUF_MASK);
326 	union journal_res_state old, new;
327 	int u64s;
328 	u64 v;
329 
330 	lockdep_assert_held(&j->lock);
331 	BUG_ON(journal_entry_is_open(j));
332 	BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
333 
334 	if (j->blocked)
335 		return JOURNAL_ERR_blocked;
336 
337 	if (j->cur_entry_error)
338 		return j->cur_entry_error;
339 
340 	if (bch2_journal_error(j))
341 		return JOURNAL_ERR_insufficient_devices; /* -EROFS */
342 
343 	if (!fifo_free(&j->pin))
344 		return JOURNAL_ERR_journal_pin_full;
345 
346 	if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf))
347 		return JOURNAL_ERR_max_in_flight;
348 
349 	BUG_ON(!j->cur_entry_sectors);
350 
351 	buf->expires		=
352 		(journal_cur_seq(j) == j->flushed_seq_ondisk
353 		 ? jiffies
354 		 : j->last_flush_write) +
355 		msecs_to_jiffies(c->opts.journal_flush_delay);
356 
357 	buf->u64s_reserved	= j->entry_u64s_reserved;
358 	buf->disk_sectors	= j->cur_entry_sectors;
359 	buf->sectors		= min(buf->disk_sectors, buf->buf_size >> 9);
360 
361 	u64s = (int) (buf->sectors << 9) / sizeof(u64) -
362 		journal_entry_overhead(j);
363 	u64s = clamp_t(int, u64s, 0, JOURNAL_ENTRY_CLOSED_VAL - 1);
364 
365 	if (u64s <= (ssize_t) j->early_journal_entries.nr)
366 		return JOURNAL_ERR_journal_full;
367 
368 	if (fifo_empty(&j->pin) && j->reclaim_thread)
369 		wake_up_process(j->reclaim_thread);
370 
371 	/*
372 	 * The fifo_push() needs to happen at the same time as j->seq is
373 	 * incremented for journal_last_seq() to be calculated correctly
374 	 */
375 	atomic64_inc(&j->seq);
376 	journal_pin_list_init(fifo_push_ref(&j->pin), 1);
377 
378 	BUG_ON(j->pin.back - 1 != atomic64_read(&j->seq));
379 
380 	BUG_ON(j->buf + (journal_cur_seq(j) & JOURNAL_BUF_MASK) != buf);
381 
382 	bkey_extent_init(&buf->key);
383 	buf->noflush	= false;
384 	buf->must_flush	= false;
385 	buf->separate_flush = false;
386 	buf->flush_time	= 0;
387 	buf->need_flush_to_write_buffer = true;
388 
389 	memset(buf->data, 0, sizeof(*buf->data));
390 	buf->data->seq	= cpu_to_le64(journal_cur_seq(j));
391 	buf->data->u64s	= 0;
392 
393 	if (j->early_journal_entries.nr) {
394 		memcpy(buf->data->_data, j->early_journal_entries.data,
395 		       j->early_journal_entries.nr * sizeof(u64));
396 		le32_add_cpu(&buf->data->u64s, j->early_journal_entries.nr);
397 	}
398 
399 	/*
400 	 * Must be set before marking the journal entry as open:
401 	 */
402 	j->cur_entry_u64s = u64s;
403 
404 	v = atomic64_read(&j->reservations.counter);
405 	do {
406 		old.v = new.v = v;
407 
408 		BUG_ON(old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL);
409 
410 		new.idx++;
411 		BUG_ON(journal_state_count(new, new.idx));
412 		BUG_ON(new.idx != (journal_cur_seq(j) & JOURNAL_BUF_MASK));
413 
414 		journal_state_inc(&new);
415 
416 		/* Handle any already added entries */
417 		new.cur_entry_offset = le32_to_cpu(buf->data->u64s);
418 	} while ((v = atomic64_cmpxchg(&j->reservations.counter,
419 				       old.v, new.v)) != old.v);
420 
421 	mod_delayed_work(c->io_complete_wq,
422 			 &j->write_work,
423 			 msecs_to_jiffies(c->opts.journal_flush_delay));
424 	journal_wake(j);
425 
426 	if (j->early_journal_entries.nr)
427 		darray_exit(&j->early_journal_entries);
428 	return 0;
429 }
430 
431 static bool journal_quiesced(struct journal *j)
432 {
433 	bool ret = atomic64_read(&j->seq) == j->seq_ondisk;
434 
435 	if (!ret)
436 		bch2_journal_entry_close(j);
437 	return ret;
438 }
439 
440 static void journal_quiesce(struct journal *j)
441 {
442 	wait_event(j->wait, journal_quiesced(j));
443 }
444 
445 static void journal_write_work(struct work_struct *work)
446 {
447 	struct journal *j = container_of(work, struct journal, write_work.work);
448 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
449 	long delta;
450 
451 	spin_lock(&j->lock);
452 	if (!__journal_entry_is_open(j->reservations))
453 		goto unlock;
454 
455 	delta = journal_cur_buf(j)->expires - jiffies;
456 
457 	if (delta > 0)
458 		mod_delayed_work(c->io_complete_wq, &j->write_work, delta);
459 	else
460 		__journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
461 unlock:
462 	spin_unlock(&j->lock);
463 }
464 
465 static int __journal_res_get(struct journal *j, struct journal_res *res,
466 			     unsigned flags)
467 {
468 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
469 	struct journal_buf *buf;
470 	bool can_discard;
471 	int ret;
472 retry:
473 	if (journal_res_get_fast(j, res, flags))
474 		return 0;
475 
476 	if (bch2_journal_error(j))
477 		return -BCH_ERR_erofs_journal_err;
478 
479 	spin_lock(&j->lock);
480 
481 	/* check once more in case somebody else shut things down... */
482 	if (bch2_journal_error(j)) {
483 		spin_unlock(&j->lock);
484 		return -BCH_ERR_erofs_journal_err;
485 	}
486 
487 	/*
488 	 * Recheck after taking the lock, so we don't race with another thread
489 	 * that just did journal_entry_open() and call bch2_journal_entry_close()
490 	 * unnecessarily
491 	 */
492 	if (journal_res_get_fast(j, res, flags)) {
493 		spin_unlock(&j->lock);
494 		return 0;
495 	}
496 
497 	if ((flags & BCH_WATERMARK_MASK) < j->watermark) {
498 		/*
499 		 * Don't want to close current journal entry, just need to
500 		 * invoke reclaim:
501 		 */
502 		ret = JOURNAL_ERR_journal_full;
503 		goto unlock;
504 	}
505 
506 	/*
507 	 * If we couldn't get a reservation because the current buf filled up,
508 	 * and we had room for a bigger entry on disk, signal that we want to
509 	 * realloc the journal bufs:
510 	 */
511 	buf = journal_cur_buf(j);
512 	if (journal_entry_is_open(j) &&
513 	    buf->buf_size >> 9 < buf->disk_sectors &&
514 	    buf->buf_size < JOURNAL_ENTRY_SIZE_MAX)
515 		j->buf_size_want = max(j->buf_size_want, buf->buf_size << 1);
516 
517 	__journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, false);
518 	ret = journal_entry_open(j);
519 
520 	if (ret == JOURNAL_ERR_max_in_flight) {
521 		track_event_change(&c->times[BCH_TIME_blocked_journal_max_in_flight],
522 				   &j->max_in_flight_start, true);
523 		if (trace_journal_entry_full_enabled()) {
524 			struct printbuf buf = PRINTBUF;
525 			buf.atomic++;
526 
527 			bch2_journal_bufs_to_text(&buf, j);
528 			trace_journal_entry_full(c, buf.buf);
529 			printbuf_exit(&buf);
530 		}
531 		count_event(c, journal_entry_full);
532 	}
533 unlock:
534 	can_discard = j->can_discard;
535 	spin_unlock(&j->lock);
536 
537 	if (!ret)
538 		goto retry;
539 	if (journal_error_check_stuck(j, ret, flags))
540 		ret = -BCH_ERR_journal_res_get_blocked;
541 
542 	/*
543 	 * Journal is full - can't rely on reclaim from work item due to
544 	 * freezing:
545 	 */
546 	if ((ret == JOURNAL_ERR_journal_full ||
547 	     ret == JOURNAL_ERR_journal_pin_full) &&
548 	    !(flags & JOURNAL_RES_GET_NONBLOCK)) {
549 		if (can_discard) {
550 			bch2_journal_do_discards(j);
551 			goto retry;
552 		}
553 
554 		if (mutex_trylock(&j->reclaim_lock)) {
555 			bch2_journal_reclaim(j);
556 			mutex_unlock(&j->reclaim_lock);
557 		}
558 	}
559 
560 	return ret == JOURNAL_ERR_insufficient_devices
561 		? -BCH_ERR_erofs_journal_err
562 		: -BCH_ERR_journal_res_get_blocked;
563 }
564 
565 /*
566  * Essentially the entry function to the journaling code. When bcachefs is doing
567  * a btree insert, it calls this function to get the current journal write.
568  * Journal write is the structure used set up journal writes. The calling
569  * function will then add its keys to the structure, queuing them for the next
570  * write.
571  *
572  * To ensure forward progress, the current task must not be holding any
573  * btree node write locks.
574  */
575 int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
576 				  unsigned flags)
577 {
578 	int ret;
579 
580 	closure_wait_event(&j->async_wait,
581 		   (ret = __journal_res_get(j, res, flags)) != -BCH_ERR_journal_res_get_blocked ||
582 		   (flags & JOURNAL_RES_GET_NONBLOCK));
583 	return ret;
584 }
585 
586 /* journal_entry_res: */
587 
588 void bch2_journal_entry_res_resize(struct journal *j,
589 				   struct journal_entry_res *res,
590 				   unsigned new_u64s)
591 {
592 	union journal_res_state state;
593 	int d = new_u64s - res->u64s;
594 
595 	spin_lock(&j->lock);
596 
597 	j->entry_u64s_reserved += d;
598 	if (d <= 0)
599 		goto out;
600 
601 	j->cur_entry_u64s = max_t(int, 0, j->cur_entry_u64s - d);
602 	smp_mb();
603 	state = READ_ONCE(j->reservations);
604 
605 	if (state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL &&
606 	    state.cur_entry_offset > j->cur_entry_u64s) {
607 		j->cur_entry_u64s += d;
608 		/*
609 		 * Not enough room in current journal entry, have to flush it:
610 		 */
611 		__journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
612 	} else {
613 		journal_cur_buf(j)->u64s_reserved += d;
614 	}
615 out:
616 	spin_unlock(&j->lock);
617 	res->u64s += d;
618 }
619 
620 /* journal flushing: */
621 
622 /**
623  * bch2_journal_flush_seq_async - wait for a journal entry to be written
624  * @j:		journal object
625  * @seq:	seq to flush
626  * @parent:	closure object to wait with
627  * Returns:	1 if @seq has already been flushed, 0 if @seq is being flushed,
628  *		-EIO if @seq will never be flushed
629  *
630  * Like bch2_journal_wait_on_seq, except that it triggers a write immediately if
631  * necessary
632  */
633 int bch2_journal_flush_seq_async(struct journal *j, u64 seq,
634 				 struct closure *parent)
635 {
636 	struct journal_buf *buf;
637 	int ret = 0;
638 
639 	if (seq <= j->flushed_seq_ondisk)
640 		return 1;
641 
642 	spin_lock(&j->lock);
643 
644 	if (WARN_ONCE(seq > journal_cur_seq(j),
645 		      "requested to flush journal seq %llu, but currently at %llu",
646 		      seq, journal_cur_seq(j)))
647 		goto out;
648 
649 	/* Recheck under lock: */
650 	if (j->err_seq && seq >= j->err_seq) {
651 		ret = -EIO;
652 		goto out;
653 	}
654 
655 	if (seq <= j->flushed_seq_ondisk) {
656 		ret = 1;
657 		goto out;
658 	}
659 
660 	/* if seq was written, but not flushed - flush a newer one instead */
661 	seq = max(seq, journal_last_unwritten_seq(j));
662 
663 recheck_need_open:
664 	if (seq > journal_cur_seq(j)) {
665 		struct journal_res res = { 0 };
666 
667 		if (journal_entry_is_open(j))
668 			__journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
669 
670 		spin_unlock(&j->lock);
671 
672 		ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
673 		if (ret)
674 			return ret;
675 
676 		seq = res.seq;
677 		buf = j->buf + (seq & JOURNAL_BUF_MASK);
678 		buf->must_flush = true;
679 
680 		if (!buf->flush_time) {
681 			buf->flush_time	= local_clock() ?: 1;
682 			buf->expires = jiffies;
683 		}
684 
685 		if (parent && !closure_wait(&buf->wait, parent))
686 			BUG();
687 
688 		bch2_journal_res_put(j, &res);
689 
690 		spin_lock(&j->lock);
691 		goto want_write;
692 	}
693 
694 	/*
695 	 * if write was kicked off without a flush, flush the next sequence
696 	 * number instead
697 	 */
698 	buf = journal_seq_to_buf(j, seq);
699 	if (buf->noflush) {
700 		seq++;
701 		goto recheck_need_open;
702 	}
703 
704 	buf->must_flush = true;
705 
706 	if (parent && !closure_wait(&buf->wait, parent))
707 		BUG();
708 want_write:
709 	if (seq == journal_cur_seq(j))
710 		journal_entry_want_write(j);
711 out:
712 	spin_unlock(&j->lock);
713 	return ret;
714 }
715 
716 int bch2_journal_flush_seq(struct journal *j, u64 seq)
717 {
718 	u64 start_time = local_clock();
719 	int ret, ret2;
720 
721 	/*
722 	 * Don't update time_stats when @seq is already flushed:
723 	 */
724 	if (seq <= j->flushed_seq_ondisk)
725 		return 0;
726 
727 	ret = wait_event_interruptible(j->wait, (ret2 = bch2_journal_flush_seq_async(j, seq, NULL)));
728 
729 	if (!ret)
730 		bch2_time_stats_update(j->flush_seq_time, start_time);
731 
732 	return ret ?: ret2 < 0 ? ret2 : 0;
733 }
734 
735 /*
736  * bch2_journal_flush_async - if there is an open journal entry, or a journal
737  * still being written, write it and wait for the write to complete
738  */
739 void bch2_journal_flush_async(struct journal *j, struct closure *parent)
740 {
741 	bch2_journal_flush_seq_async(j, atomic64_read(&j->seq), parent);
742 }
743 
744 int bch2_journal_flush(struct journal *j)
745 {
746 	return bch2_journal_flush_seq(j, atomic64_read(&j->seq));
747 }
748 
749 /*
750  * bch2_journal_noflush_seq - tell the journal not to issue any flushes before
751  * @seq
752  */
753 bool bch2_journal_noflush_seq(struct journal *j, u64 seq)
754 {
755 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
756 	u64 unwritten_seq;
757 	bool ret = false;
758 
759 	if (!(c->sb.features & (1ULL << BCH_FEATURE_journal_no_flush)))
760 		return false;
761 
762 	if (seq <= c->journal.flushed_seq_ondisk)
763 		return false;
764 
765 	spin_lock(&j->lock);
766 	if (seq <= c->journal.flushed_seq_ondisk)
767 		goto out;
768 
769 	for (unwritten_seq = journal_last_unwritten_seq(j);
770 	     unwritten_seq < seq;
771 	     unwritten_seq++) {
772 		struct journal_buf *buf = journal_seq_to_buf(j, unwritten_seq);
773 
774 		/* journal write is already in flight, and was a flush write: */
775 		if (unwritten_seq == journal_last_unwritten_seq(j) && !buf->noflush)
776 			goto out;
777 
778 		buf->noflush = true;
779 	}
780 
781 	ret = true;
782 out:
783 	spin_unlock(&j->lock);
784 	return ret;
785 }
786 
787 int bch2_journal_meta(struct journal *j)
788 {
789 	struct journal_buf *buf;
790 	struct journal_res res;
791 	int ret;
792 
793 	memset(&res, 0, sizeof(res));
794 
795 	ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
796 	if (ret)
797 		return ret;
798 
799 	buf = j->buf + (res.seq & JOURNAL_BUF_MASK);
800 	buf->must_flush = true;
801 
802 	if (!buf->flush_time) {
803 		buf->flush_time	= local_clock() ?: 1;
804 		buf->expires = jiffies;
805 	}
806 
807 	bch2_journal_res_put(j, &res);
808 
809 	return bch2_journal_flush_seq(j, res.seq);
810 }
811 
812 /* block/unlock the journal: */
813 
814 void bch2_journal_unblock(struct journal *j)
815 {
816 	spin_lock(&j->lock);
817 	j->blocked--;
818 	spin_unlock(&j->lock);
819 
820 	journal_wake(j);
821 }
822 
823 void bch2_journal_block(struct journal *j)
824 {
825 	spin_lock(&j->lock);
826 	j->blocked++;
827 	spin_unlock(&j->lock);
828 
829 	journal_quiesce(j);
830 }
831 
832 static struct journal_buf *__bch2_next_write_buffer_flush_journal_buf(struct journal *j, u64 max_seq)
833 {
834 	struct journal_buf *ret = NULL;
835 
836 	mutex_lock(&j->buf_lock);
837 	spin_lock(&j->lock);
838 	max_seq = min(max_seq, journal_cur_seq(j));
839 
840 	for (u64 seq = journal_last_unwritten_seq(j);
841 	     seq <= max_seq;
842 	     seq++) {
843 		unsigned idx = seq & JOURNAL_BUF_MASK;
844 		struct journal_buf *buf = j->buf + idx;
845 
846 		if (buf->need_flush_to_write_buffer) {
847 			if (seq == journal_cur_seq(j))
848 				__journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
849 
850 			union journal_res_state s;
851 			s.v = atomic64_read_acquire(&j->reservations.counter);
852 
853 			ret = journal_state_count(s, idx)
854 				? ERR_PTR(-EAGAIN)
855 				: buf;
856 			break;
857 		}
858 	}
859 
860 	spin_unlock(&j->lock);
861 	if (IS_ERR_OR_NULL(ret))
862 		mutex_unlock(&j->buf_lock);
863 	return ret;
864 }
865 
866 struct journal_buf *bch2_next_write_buffer_flush_journal_buf(struct journal *j, u64 max_seq)
867 {
868 	struct journal_buf *ret;
869 
870 	wait_event(j->wait, (ret = __bch2_next_write_buffer_flush_journal_buf(j, max_seq)) != ERR_PTR(-EAGAIN));
871 	return ret;
872 }
873 
874 /* allocate journal on a device: */
875 
876 static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
877 					 bool new_fs, struct closure *cl)
878 {
879 	struct bch_fs *c = ca->fs;
880 	struct journal_device *ja = &ca->journal;
881 	u64 *new_bucket_seq = NULL, *new_buckets = NULL;
882 	struct open_bucket **ob = NULL;
883 	long *bu = NULL;
884 	unsigned i, pos, nr_got = 0, nr_want = nr - ja->nr;
885 	int ret = 0;
886 
887 	BUG_ON(nr <= ja->nr);
888 
889 	bu		= kcalloc(nr_want, sizeof(*bu), GFP_KERNEL);
890 	ob		= kcalloc(nr_want, sizeof(*ob), GFP_KERNEL);
891 	new_buckets	= kcalloc(nr, sizeof(u64), GFP_KERNEL);
892 	new_bucket_seq	= kcalloc(nr, sizeof(u64), GFP_KERNEL);
893 	if (!bu || !ob || !new_buckets || !new_bucket_seq) {
894 		ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
895 		goto err_free;
896 	}
897 
898 	for (nr_got = 0; nr_got < nr_want; nr_got++) {
899 		if (new_fs) {
900 			bu[nr_got] = bch2_bucket_alloc_new_fs(ca);
901 			if (bu[nr_got] < 0) {
902 				ret = -BCH_ERR_ENOSPC_bucket_alloc;
903 				break;
904 			}
905 		} else {
906 			ob[nr_got] = bch2_bucket_alloc(c, ca, BCH_WATERMARK_normal, cl);
907 			ret = PTR_ERR_OR_ZERO(ob[nr_got]);
908 			if (ret)
909 				break;
910 
911 			ret = bch2_trans_run(c,
912 				bch2_trans_mark_metadata_bucket(trans, ca,
913 						ob[nr_got]->bucket, BCH_DATA_journal,
914 						ca->mi.bucket_size));
915 			if (ret) {
916 				bch2_open_bucket_put(c, ob[nr_got]);
917 				bch_err_msg(c, ret, "marking new journal buckets");
918 				break;
919 			}
920 
921 			bu[nr_got] = ob[nr_got]->bucket;
922 		}
923 	}
924 
925 	if (!nr_got)
926 		goto err_free;
927 
928 	/* Don't return an error if we successfully allocated some buckets: */
929 	ret = 0;
930 
931 	if (c) {
932 		bch2_journal_flush_all_pins(&c->journal);
933 		bch2_journal_block(&c->journal);
934 		mutex_lock(&c->sb_lock);
935 	}
936 
937 	memcpy(new_buckets,	ja->buckets,	ja->nr * sizeof(u64));
938 	memcpy(new_bucket_seq,	ja->bucket_seq,	ja->nr * sizeof(u64));
939 
940 	BUG_ON(ja->discard_idx > ja->nr);
941 
942 	pos = ja->discard_idx ?: ja->nr;
943 
944 	memmove(new_buckets + pos + nr_got,
945 		new_buckets + pos,
946 		sizeof(new_buckets[0]) * (ja->nr - pos));
947 	memmove(new_bucket_seq + pos + nr_got,
948 		new_bucket_seq + pos,
949 		sizeof(new_bucket_seq[0]) * (ja->nr - pos));
950 
951 	for (i = 0; i < nr_got; i++) {
952 		new_buckets[pos + i] = bu[i];
953 		new_bucket_seq[pos + i] = 0;
954 	}
955 
956 	nr = ja->nr + nr_got;
957 
958 	ret = bch2_journal_buckets_to_sb(c, ca, new_buckets, nr);
959 	if (ret)
960 		goto err_unblock;
961 
962 	if (!new_fs)
963 		bch2_write_super(c);
964 
965 	/* Commit: */
966 	if (c)
967 		spin_lock(&c->journal.lock);
968 
969 	swap(new_buckets,	ja->buckets);
970 	swap(new_bucket_seq,	ja->bucket_seq);
971 	ja->nr = nr;
972 
973 	if (pos <= ja->discard_idx)
974 		ja->discard_idx = (ja->discard_idx + nr_got) % ja->nr;
975 	if (pos <= ja->dirty_idx_ondisk)
976 		ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + nr_got) % ja->nr;
977 	if (pos <= ja->dirty_idx)
978 		ja->dirty_idx = (ja->dirty_idx + nr_got) % ja->nr;
979 	if (pos <= ja->cur_idx)
980 		ja->cur_idx = (ja->cur_idx + nr_got) % ja->nr;
981 
982 	if (c)
983 		spin_unlock(&c->journal.lock);
984 err_unblock:
985 	if (c) {
986 		bch2_journal_unblock(&c->journal);
987 		mutex_unlock(&c->sb_lock);
988 	}
989 
990 	if (ret && !new_fs)
991 		for (i = 0; i < nr_got; i++)
992 			bch2_trans_run(c,
993 				bch2_trans_mark_metadata_bucket(trans, ca,
994 						bu[i], BCH_DATA_free, 0));
995 err_free:
996 	if (!new_fs)
997 		for (i = 0; i < nr_got; i++)
998 			bch2_open_bucket_put(c, ob[i]);
999 
1000 	kfree(new_bucket_seq);
1001 	kfree(new_buckets);
1002 	kfree(ob);
1003 	kfree(bu);
1004 	return ret;
1005 }
1006 
1007 /*
1008  * Allocate more journal space at runtime - not currently making use if it, but
1009  * the code works:
1010  */
1011 int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
1012 				unsigned nr)
1013 {
1014 	struct journal_device *ja = &ca->journal;
1015 	struct closure cl;
1016 	int ret = 0;
1017 
1018 	closure_init_stack(&cl);
1019 
1020 	down_write(&c->state_lock);
1021 
1022 	/* don't handle reducing nr of buckets yet: */
1023 	if (nr < ja->nr)
1024 		goto unlock;
1025 
1026 	while (ja->nr < nr) {
1027 		struct disk_reservation disk_res = { 0, 0, 0 };
1028 
1029 		/*
1030 		 * note: journal buckets aren't really counted as _sectors_ used yet, so
1031 		 * we don't need the disk reservation to avoid the BUG_ON() in buckets.c
1032 		 * when space used goes up without a reservation - but we do need the
1033 		 * reservation to ensure we'll actually be able to allocate:
1034 		 *
1035 		 * XXX: that's not right, disk reservations only ensure a
1036 		 * filesystem-wide allocation will succeed, this is a device
1037 		 * specific allocation - we can hang here:
1038 		 */
1039 
1040 		ret = bch2_disk_reservation_get(c, &disk_res,
1041 						bucket_to_sector(ca, nr - ja->nr), 1, 0);
1042 		if (ret)
1043 			break;
1044 
1045 		ret = __bch2_set_nr_journal_buckets(ca, nr, false, &cl);
1046 
1047 		bch2_disk_reservation_put(c, &disk_res);
1048 
1049 		closure_sync(&cl);
1050 
1051 		if (ret && ret != -BCH_ERR_bucket_alloc_blocked)
1052 			break;
1053 	}
1054 
1055 	bch_err_fn(c, ret);
1056 unlock:
1057 	up_write(&c->state_lock);
1058 	return ret;
1059 }
1060 
1061 int bch2_dev_journal_alloc(struct bch_dev *ca)
1062 {
1063 	unsigned nr;
1064 	int ret;
1065 
1066 	if (dynamic_fault("bcachefs:add:journal_alloc")) {
1067 		ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
1068 		goto err;
1069 	}
1070 
1071 	/* 1/128th of the device by default: */
1072 	nr = ca->mi.nbuckets >> 7;
1073 
1074 	/*
1075 	 * clamp journal size to 8192 buckets or 8GB (in sectors), whichever
1076 	 * is smaller:
1077 	 */
1078 	nr = clamp_t(unsigned, nr,
1079 		     BCH_JOURNAL_BUCKETS_MIN,
1080 		     min(1 << 13,
1081 			 (1 << 24) / ca->mi.bucket_size));
1082 
1083 	ret = __bch2_set_nr_journal_buckets(ca, nr, true, NULL);
1084 err:
1085 	bch_err_fn(ca, ret);
1086 	return ret;
1087 }
1088 
1089 int bch2_fs_journal_alloc(struct bch_fs *c)
1090 {
1091 	for_each_online_member(c, ca) {
1092 		if (ca->journal.nr)
1093 			continue;
1094 
1095 		int ret = bch2_dev_journal_alloc(ca);
1096 		if (ret) {
1097 			percpu_ref_put(&ca->io_ref);
1098 			return ret;
1099 		}
1100 	}
1101 
1102 	return 0;
1103 }
1104 
1105 /* startup/shutdown: */
1106 
1107 static bool bch2_journal_writing_to_device(struct journal *j, unsigned dev_idx)
1108 {
1109 	bool ret = false;
1110 	u64 seq;
1111 
1112 	spin_lock(&j->lock);
1113 	for (seq = journal_last_unwritten_seq(j);
1114 	     seq <= journal_cur_seq(j) && !ret;
1115 	     seq++) {
1116 		struct journal_buf *buf = journal_seq_to_buf(j, seq);
1117 
1118 		if (bch2_bkey_has_device_c(bkey_i_to_s_c(&buf->key), dev_idx))
1119 			ret = true;
1120 	}
1121 	spin_unlock(&j->lock);
1122 
1123 	return ret;
1124 }
1125 
1126 void bch2_dev_journal_stop(struct journal *j, struct bch_dev *ca)
1127 {
1128 	wait_event(j->wait, !bch2_journal_writing_to_device(j, ca->dev_idx));
1129 }
1130 
1131 void bch2_fs_journal_stop(struct journal *j)
1132 {
1133 	bch2_journal_reclaim_stop(j);
1134 	bch2_journal_flush_all_pins(j);
1135 
1136 	wait_event(j->wait, bch2_journal_entry_close(j));
1137 
1138 	/*
1139 	 * Always write a new journal entry, to make sure the clock hands are up
1140 	 * to date (and match the superblock)
1141 	 */
1142 	bch2_journal_meta(j);
1143 
1144 	journal_quiesce(j);
1145 
1146 	BUG_ON(!bch2_journal_error(j) &&
1147 	       test_bit(JOURNAL_REPLAY_DONE, &j->flags) &&
1148 	       j->last_empty_seq != journal_cur_seq(j));
1149 
1150 	cancel_delayed_work_sync(&j->write_work);
1151 }
1152 
1153 int bch2_fs_journal_start(struct journal *j, u64 cur_seq)
1154 {
1155 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
1156 	struct journal_entry_pin_list *p;
1157 	struct journal_replay *i, **_i;
1158 	struct genradix_iter iter;
1159 	bool had_entries = false;
1160 	unsigned ptr;
1161 	u64 last_seq = cur_seq, nr, seq;
1162 
1163 	genradix_for_each_reverse(&c->journal_entries, iter, _i) {
1164 		i = *_i;
1165 
1166 		if (!i || i->ignore)
1167 			continue;
1168 
1169 		last_seq = le64_to_cpu(i->j.last_seq);
1170 		break;
1171 	}
1172 
1173 	nr = cur_seq - last_seq;
1174 
1175 	if (nr + 1 > j->pin.size) {
1176 		free_fifo(&j->pin);
1177 		init_fifo(&j->pin, roundup_pow_of_two(nr + 1), GFP_KERNEL);
1178 		if (!j->pin.data) {
1179 			bch_err(c, "error reallocating journal fifo (%llu open entries)", nr);
1180 			return -BCH_ERR_ENOMEM_journal_pin_fifo;
1181 		}
1182 	}
1183 
1184 	j->replay_journal_seq	= last_seq;
1185 	j->replay_journal_seq_end = cur_seq;
1186 	j->last_seq_ondisk	= last_seq;
1187 	j->flushed_seq_ondisk	= cur_seq - 1;
1188 	j->seq_ondisk		= cur_seq - 1;
1189 	j->pin.front		= last_seq;
1190 	j->pin.back		= cur_seq;
1191 	atomic64_set(&j->seq, cur_seq - 1);
1192 
1193 	fifo_for_each_entry_ptr(p, &j->pin, seq)
1194 		journal_pin_list_init(p, 1);
1195 
1196 	genradix_for_each(&c->journal_entries, iter, _i) {
1197 		i = *_i;
1198 
1199 		if (!i || i->ignore)
1200 			continue;
1201 
1202 		seq = le64_to_cpu(i->j.seq);
1203 		BUG_ON(seq >= cur_seq);
1204 
1205 		if (seq < last_seq)
1206 			continue;
1207 
1208 		if (journal_entry_empty(&i->j))
1209 			j->last_empty_seq = le64_to_cpu(i->j.seq);
1210 
1211 		p = journal_seq_pin(j, seq);
1212 
1213 		p->devs.nr = 0;
1214 		for (ptr = 0; ptr < i->nr_ptrs; ptr++)
1215 			bch2_dev_list_add_dev(&p->devs, i->ptrs[ptr].dev);
1216 
1217 		had_entries = true;
1218 	}
1219 
1220 	if (!had_entries)
1221 		j->last_empty_seq = cur_seq;
1222 
1223 	spin_lock(&j->lock);
1224 
1225 	set_bit(JOURNAL_STARTED, &j->flags);
1226 	j->last_flush_write = jiffies;
1227 
1228 	j->reservations.idx = j->reservations.unwritten_idx = journal_cur_seq(j);
1229 	j->reservations.unwritten_idx++;
1230 
1231 	c->last_bucket_seq_cleanup = journal_cur_seq(j);
1232 
1233 	bch2_journal_space_available(j);
1234 	spin_unlock(&j->lock);
1235 
1236 	return bch2_journal_reclaim_start(j);
1237 }
1238 
1239 /* init/exit: */
1240 
1241 void bch2_dev_journal_exit(struct bch_dev *ca)
1242 {
1243 	kfree(ca->journal.bio);
1244 	kfree(ca->journal.buckets);
1245 	kfree(ca->journal.bucket_seq);
1246 
1247 	ca->journal.bio		= NULL;
1248 	ca->journal.buckets	= NULL;
1249 	ca->journal.bucket_seq	= NULL;
1250 }
1251 
1252 int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
1253 {
1254 	struct journal_device *ja = &ca->journal;
1255 	struct bch_sb_field_journal *journal_buckets =
1256 		bch2_sb_field_get(sb, journal);
1257 	struct bch_sb_field_journal_v2 *journal_buckets_v2 =
1258 		bch2_sb_field_get(sb, journal_v2);
1259 	unsigned i, nr_bvecs;
1260 
1261 	ja->nr = 0;
1262 
1263 	if (journal_buckets_v2) {
1264 		unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2);
1265 
1266 		for (i = 0; i < nr; i++)
1267 			ja->nr += le64_to_cpu(journal_buckets_v2->d[i].nr);
1268 	} else if (journal_buckets) {
1269 		ja->nr = bch2_nr_journal_buckets(journal_buckets);
1270 	}
1271 
1272 	ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1273 	if (!ja->bucket_seq)
1274 		return -BCH_ERR_ENOMEM_dev_journal_init;
1275 
1276 	nr_bvecs = DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE);
1277 
1278 	ca->journal.bio = bio_kmalloc(nr_bvecs, GFP_KERNEL);
1279 	if (!ca->journal.bio)
1280 		return -BCH_ERR_ENOMEM_dev_journal_init;
1281 
1282 	bio_init(ca->journal.bio, NULL, ca->journal.bio->bi_inline_vecs, nr_bvecs, 0);
1283 
1284 	ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1285 	if (!ja->buckets)
1286 		return -BCH_ERR_ENOMEM_dev_journal_init;
1287 
1288 	if (journal_buckets_v2) {
1289 		unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2);
1290 		unsigned j, dst = 0;
1291 
1292 		for (i = 0; i < nr; i++)
1293 			for (j = 0; j < le64_to_cpu(journal_buckets_v2->d[i].nr); j++)
1294 				ja->buckets[dst++] =
1295 					le64_to_cpu(journal_buckets_v2->d[i].start) + j;
1296 	} else if (journal_buckets) {
1297 		for (i = 0; i < ja->nr; i++)
1298 			ja->buckets[i] = le64_to_cpu(journal_buckets->buckets[i]);
1299 	}
1300 
1301 	return 0;
1302 }
1303 
1304 void bch2_fs_journal_exit(struct journal *j)
1305 {
1306 	unsigned i;
1307 
1308 	darray_exit(&j->early_journal_entries);
1309 
1310 	for (i = 0; i < ARRAY_SIZE(j->buf); i++)
1311 		kvpfree(j->buf[i].data, j->buf[i].buf_size);
1312 	free_fifo(&j->pin);
1313 }
1314 
1315 int bch2_fs_journal_init(struct journal *j)
1316 {
1317 	static struct lock_class_key res_key;
1318 	unsigned i;
1319 
1320 	mutex_init(&j->buf_lock);
1321 	spin_lock_init(&j->lock);
1322 	spin_lock_init(&j->err_lock);
1323 	init_waitqueue_head(&j->wait);
1324 	INIT_DELAYED_WORK(&j->write_work, journal_write_work);
1325 	init_waitqueue_head(&j->reclaim_wait);
1326 	init_waitqueue_head(&j->pin_flush_wait);
1327 	mutex_init(&j->reclaim_lock);
1328 	mutex_init(&j->discard_lock);
1329 
1330 	lockdep_init_map(&j->res_map, "journal res", &res_key, 0);
1331 
1332 	atomic64_set(&j->reservations.counter,
1333 		((union journal_res_state)
1334 		 { .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v);
1335 
1336 	if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)))
1337 		return -BCH_ERR_ENOMEM_journal_pin_fifo;
1338 
1339 	for (i = 0; i < ARRAY_SIZE(j->buf); i++) {
1340 		j->buf[i].buf_size = JOURNAL_ENTRY_SIZE_MIN;
1341 		j->buf[i].data = kvpmalloc(j->buf[i].buf_size, GFP_KERNEL);
1342 		if (!j->buf[i].data)
1343 			return -BCH_ERR_ENOMEM_journal_buf;
1344 	}
1345 
1346 	j->pin.front = j->pin.back = 1;
1347 	return 0;
1348 }
1349 
1350 /* debug: */
1351 
1352 void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1353 {
1354 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
1355 	union journal_res_state s;
1356 	unsigned long now = jiffies;
1357 	u64 nr_writes = j->nr_flush_writes + j->nr_noflush_writes;
1358 
1359 	if (!out->nr_tabstops)
1360 		printbuf_tabstop_push(out, 24);
1361 	out->atomic++;
1362 
1363 	rcu_read_lock();
1364 	s = READ_ONCE(j->reservations);
1365 
1366 	prt_printf(out, "dirty journal entries:\t%llu/%llu\n",	fifo_used(&j->pin), j->pin.size);
1367 	prt_printf(out, "seq:\t\t\t%llu\n",			journal_cur_seq(j));
1368 	prt_printf(out, "seq_ondisk:\t\t%llu\n",		j->seq_ondisk);
1369 	prt_printf(out, "last_seq:\t\t%llu\n",			journal_last_seq(j));
1370 	prt_printf(out, "last_seq_ondisk:\t%llu\n",		j->last_seq_ondisk);
1371 	prt_printf(out, "flushed_seq_ondisk:\t%llu\n",		j->flushed_seq_ondisk);
1372 	prt_printf(out, "watermark:\t\t%s\n",			bch2_watermarks[j->watermark]);
1373 	prt_printf(out, "each entry reserved:\t%u\n",		j->entry_u64s_reserved);
1374 	prt_printf(out, "nr flush writes:\t%llu\n",		j->nr_flush_writes);
1375 	prt_printf(out, "nr noflush writes:\t%llu\n",		j->nr_noflush_writes);
1376 	prt_printf(out, "average write size:\t");
1377 	prt_human_readable_u64(out, nr_writes ? div64_u64(j->entry_bytes_written, nr_writes) : 0);
1378 	prt_newline(out);
1379 	prt_printf(out, "nr direct reclaim:\t%llu\n",		j->nr_direct_reclaim);
1380 	prt_printf(out, "nr background reclaim:\t%llu\n",	j->nr_background_reclaim);
1381 	prt_printf(out, "reclaim kicked:\t\t%u\n",		j->reclaim_kicked);
1382 	prt_printf(out, "reclaim runs in:\t%u ms\n",		time_after(j->next_reclaim, now)
1383 	       ? jiffies_to_msecs(j->next_reclaim - jiffies) : 0);
1384 	prt_printf(out, "current entry sectors:\t%u\n",		j->cur_entry_sectors);
1385 	prt_printf(out, "current entry error:\t%s\n",		bch2_journal_errors[j->cur_entry_error]);
1386 	prt_printf(out, "current entry:\t\t");
1387 
1388 	switch (s.cur_entry_offset) {
1389 	case JOURNAL_ENTRY_ERROR_VAL:
1390 		prt_printf(out, "error");
1391 		break;
1392 	case JOURNAL_ENTRY_CLOSED_VAL:
1393 		prt_printf(out, "closed");
1394 		break;
1395 	default:
1396 		prt_printf(out, "%u/%u", s.cur_entry_offset, j->cur_entry_u64s);
1397 		break;
1398 	}
1399 
1400 	prt_newline(out);
1401 	prt_printf(out, "unwritten entries:");
1402 	prt_newline(out);
1403 	bch2_journal_bufs_to_text(out, j);
1404 
1405 	prt_printf(out,
1406 	       "replay done:\t\t%i\n",
1407 	       test_bit(JOURNAL_REPLAY_DONE,	&j->flags));
1408 
1409 	prt_printf(out, "space:\n");
1410 	prt_printf(out, "\tdiscarded\t%u:%u\n",
1411 	       j->space[journal_space_discarded].next_entry,
1412 	       j->space[journal_space_discarded].total);
1413 	prt_printf(out, "\tclean ondisk\t%u:%u\n",
1414 	       j->space[journal_space_clean_ondisk].next_entry,
1415 	       j->space[journal_space_clean_ondisk].total);
1416 	prt_printf(out, "\tclean\t\t%u:%u\n",
1417 	       j->space[journal_space_clean].next_entry,
1418 	       j->space[journal_space_clean].total);
1419 	prt_printf(out, "\ttotal\t\t%u:%u\n",
1420 	       j->space[journal_space_total].next_entry,
1421 	       j->space[journal_space_total].total);
1422 
1423 	for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) {
1424 		struct journal_device *ja = &ca->journal;
1425 
1426 		if (!test_bit(ca->dev_idx, c->rw_devs[BCH_DATA_journal].d))
1427 			continue;
1428 
1429 		if (!ja->nr)
1430 			continue;
1431 
1432 		prt_printf(out, "dev %u:\n",		ca->dev_idx);
1433 		prt_printf(out, "\tnr\t\t%u\n",		ja->nr);
1434 		prt_printf(out, "\tbucket size\t%u\n",	ca->mi.bucket_size);
1435 		prt_printf(out, "\tavailable\t%u:%u\n",	bch2_journal_dev_buckets_available(j, ja, journal_space_discarded), ja->sectors_free);
1436 		prt_printf(out, "\tdiscard_idx\t%u\n",	ja->discard_idx);
1437 		prt_printf(out, "\tdirty_ondisk\t%u (seq %llu)\n", ja->dirty_idx_ondisk,	ja->bucket_seq[ja->dirty_idx_ondisk]);
1438 		prt_printf(out, "\tdirty_idx\t%u (seq %llu)\n", ja->dirty_idx,		ja->bucket_seq[ja->dirty_idx]);
1439 		prt_printf(out, "\tcur_idx\t\t%u (seq %llu)\n", ja->cur_idx,		ja->bucket_seq[ja->cur_idx]);
1440 	}
1441 
1442 	rcu_read_unlock();
1443 
1444 	--out->atomic;
1445 }
1446 
1447 void bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1448 {
1449 	spin_lock(&j->lock);
1450 	__bch2_journal_debug_to_text(out, j);
1451 	spin_unlock(&j->lock);
1452 }
1453 
1454 bool bch2_journal_seq_pins_to_text(struct printbuf *out, struct journal *j, u64 *seq)
1455 {
1456 	struct journal_entry_pin_list *pin_list;
1457 	struct journal_entry_pin *pin;
1458 	unsigned i;
1459 
1460 	spin_lock(&j->lock);
1461 	*seq = max(*seq, j->pin.front);
1462 
1463 	if (*seq >= j->pin.back) {
1464 		spin_unlock(&j->lock);
1465 		return true;
1466 	}
1467 
1468 	out->atomic++;
1469 
1470 	pin_list = journal_seq_pin(j, *seq);
1471 
1472 	prt_printf(out, "%llu: count %u", *seq, atomic_read(&pin_list->count));
1473 	prt_newline(out);
1474 	printbuf_indent_add(out, 2);
1475 
1476 	for (i = 0; i < ARRAY_SIZE(pin_list->list); i++)
1477 		list_for_each_entry(pin, &pin_list->list[i], list) {
1478 			prt_printf(out, "\t%px %ps", pin, pin->flush);
1479 			prt_newline(out);
1480 		}
1481 
1482 	if (!list_empty(&pin_list->flushed)) {
1483 		prt_printf(out, "flushed:");
1484 		prt_newline(out);
1485 	}
1486 
1487 	list_for_each_entry(pin, &pin_list->flushed, list) {
1488 		prt_printf(out, "\t%px %ps", pin, pin->flush);
1489 		prt_newline(out);
1490 	}
1491 
1492 	printbuf_indent_sub(out, 2);
1493 
1494 	--out->atomic;
1495 	spin_unlock(&j->lock);
1496 
1497 	return false;
1498 }
1499 
1500 void bch2_journal_pins_to_text(struct printbuf *out, struct journal *j)
1501 {
1502 	u64 seq = 0;
1503 
1504 	while (!bch2_journal_seq_pins_to_text(out, j, &seq))
1505 		seq++;
1506 }
1507