xref: /linux/fs/bcachefs/journal.c (revision df2e3152f1cb798ed8ffa7e488c50261e6dc50e3)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bcachefs journalling code, for btree insertions
4  *
5  * Copyright 2012 Google, Inc.
6  */
7 
8 #include "bcachefs.h"
9 #include "alloc_foreground.h"
10 #include "bkey_methods.h"
11 #include "btree_gc.h"
12 #include "btree_update.h"
13 #include "btree_write_buffer.h"
14 #include "buckets.h"
15 #include "error.h"
16 #include "journal.h"
17 #include "journal_io.h"
18 #include "journal_reclaim.h"
19 #include "journal_sb.h"
20 #include "journal_seq_blacklist.h"
21 #include "trace.h"
22 
23 static const char * const bch2_journal_errors[] = {
24 #define x(n)	#n,
25 	JOURNAL_ERRORS()
26 #undef x
27 	NULL
28 };
29 
30 static inline bool journal_seq_unwritten(struct journal *j, u64 seq)
31 {
32 	return seq > j->seq_ondisk;
33 }
34 
35 static bool __journal_entry_is_open(union journal_res_state state)
36 {
37 	return state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL;
38 }
39 
40 static inline unsigned nr_unwritten_journal_entries(struct journal *j)
41 {
42 	return atomic64_read(&j->seq) - j->seq_ondisk;
43 }
44 
45 static bool journal_entry_is_open(struct journal *j)
46 {
47 	return __journal_entry_is_open(j->reservations);
48 }
49 
50 static void bch2_journal_buf_to_text(struct printbuf *out, struct journal *j, u64 seq)
51 {
52 	union journal_res_state s = READ_ONCE(j->reservations);
53 	unsigned i = seq & JOURNAL_BUF_MASK;
54 	struct journal_buf *buf = j->buf + i;
55 
56 	prt_printf(out, "seq:\t%llu\n", seq);
57 	printbuf_indent_add(out, 2);
58 
59 	prt_printf(out, "refcount:\t%u\n", journal_state_count(s, i));
60 
61 	prt_printf(out, "size:\t");
62 	prt_human_readable_u64(out, vstruct_bytes(buf->data));
63 	prt_newline(out);
64 
65 	prt_printf(out, "expires:\t");
66 	prt_printf(out, "%li jiffies\n", buf->expires - jiffies);
67 
68 	prt_printf(out, "flags:\t");
69 	if (buf->noflush)
70 		prt_str(out, "noflush ");
71 	if (buf->must_flush)
72 		prt_str(out, "must_flush ");
73 	if (buf->separate_flush)
74 		prt_str(out, "separate_flush ");
75 	if (buf->need_flush_to_write_buffer)
76 		prt_str(out, "need_flush_to_write_buffer ");
77 	if (buf->write_started)
78 		prt_str(out, "write_started ");
79 	if (buf->write_allocated)
80 		prt_str(out, "write_allocated ");
81 	if (buf->write_done)
82 		prt_str(out, "write_done");
83 	prt_newline(out);
84 
85 	printbuf_indent_sub(out, 2);
86 }
87 
88 static void bch2_journal_bufs_to_text(struct printbuf *out, struct journal *j)
89 {
90 	if (!out->nr_tabstops)
91 		printbuf_tabstop_push(out, 24);
92 
93 	for (u64 seq = journal_last_unwritten_seq(j);
94 	     seq <= journal_cur_seq(j);
95 	     seq++)
96 		bch2_journal_buf_to_text(out, j, seq);
97 	prt_printf(out, "last buf %s\n", journal_entry_is_open(j) ? "open" : "closed");
98 }
99 
100 static inline struct journal_buf *
101 journal_seq_to_buf(struct journal *j, u64 seq)
102 {
103 	struct journal_buf *buf = NULL;
104 
105 	EBUG_ON(seq > journal_cur_seq(j));
106 
107 	if (journal_seq_unwritten(j, seq)) {
108 		buf = j->buf + (seq & JOURNAL_BUF_MASK);
109 		EBUG_ON(le64_to_cpu(buf->data->seq) != seq);
110 	}
111 	return buf;
112 }
113 
114 static void journal_pin_list_init(struct journal_entry_pin_list *p, int count)
115 {
116 	for (unsigned i = 0; i < ARRAY_SIZE(p->unflushed); i++)
117 		INIT_LIST_HEAD(&p->unflushed[i]);
118 	for (unsigned i = 0; i < ARRAY_SIZE(p->flushed); i++)
119 		INIT_LIST_HEAD(&p->flushed[i]);
120 	atomic_set(&p->count, count);
121 	p->devs.nr = 0;
122 }
123 
124 /*
125  * Detect stuck journal conditions and trigger shutdown. Technically the journal
126  * can end up stuck for a variety of reasons, such as a blocked I/O, journal
127  * reservation lockup, etc. Since this is a fatal error with potentially
128  * unpredictable characteristics, we want to be fairly conservative before we
129  * decide to shut things down.
130  *
131  * Consider the journal stuck when it appears full with no ability to commit
132  * btree transactions, to discard journal buckets, nor acquire priority
133  * (reserved watermark) reservation.
134  */
135 static inline bool
136 journal_error_check_stuck(struct journal *j, int error, unsigned flags)
137 {
138 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
139 	bool stuck = false;
140 	struct printbuf buf = PRINTBUF;
141 
142 	if (!(error == JOURNAL_ERR_journal_full ||
143 	      error == JOURNAL_ERR_journal_pin_full) ||
144 	    nr_unwritten_journal_entries(j) ||
145 	    (flags & BCH_WATERMARK_MASK) != BCH_WATERMARK_reclaim)
146 		return stuck;
147 
148 	spin_lock(&j->lock);
149 
150 	if (j->can_discard) {
151 		spin_unlock(&j->lock);
152 		return stuck;
153 	}
154 
155 	stuck = true;
156 
157 	/*
158 	 * The journal shutdown path will set ->err_seq, but do it here first to
159 	 * serialize against concurrent failures and avoid duplicate error
160 	 * reports.
161 	 */
162 	if (j->err_seq) {
163 		spin_unlock(&j->lock);
164 		return stuck;
165 	}
166 	j->err_seq = journal_cur_seq(j);
167 	spin_unlock(&j->lock);
168 
169 	bch_err(c, "Journal stuck! Hava a pre-reservation but journal full (error %s)",
170 		bch2_journal_errors[error]);
171 	bch2_journal_debug_to_text(&buf, j);
172 	bch_err(c, "%s", buf.buf);
173 
174 	printbuf_reset(&buf);
175 	bch2_journal_pins_to_text(&buf, j);
176 	bch_err(c, "Journal pins:\n%s", buf.buf);
177 	printbuf_exit(&buf);
178 
179 	bch2_fatal_error(c);
180 	dump_stack();
181 
182 	return stuck;
183 }
184 
185 void bch2_journal_do_writes(struct journal *j)
186 {
187 	for (u64 seq = journal_last_unwritten_seq(j);
188 	     seq <= journal_cur_seq(j);
189 	     seq++) {
190 		unsigned idx = seq & JOURNAL_BUF_MASK;
191 		struct journal_buf *w = j->buf + idx;
192 
193 		if (w->write_started && !w->write_allocated)
194 			break;
195 		if (w->write_started)
196 			continue;
197 
198 		if (!journal_state_count(j->reservations, idx)) {
199 			w->write_started = true;
200 			closure_call(&w->io, bch2_journal_write, j->wq, NULL);
201 		}
202 
203 		break;
204 	}
205 }
206 
207 /*
208  * Final processing when the last reference of a journal buffer has been
209  * dropped. Drop the pin list reference acquired at journal entry open and write
210  * the buffer, if requested.
211  */
212 void bch2_journal_buf_put_final(struct journal *j, u64 seq)
213 {
214 	lockdep_assert_held(&j->lock);
215 
216 	if (__bch2_journal_pin_put(j, seq))
217 		bch2_journal_reclaim_fast(j);
218 	bch2_journal_do_writes(j);
219 
220 	/*
221 	 * for __bch2_next_write_buffer_flush_journal_buf(), when quiescing an
222 	 * open journal entry
223 	 */
224 	wake_up(&j->wait);
225 }
226 
227 /*
228  * Returns true if journal entry is now closed:
229  *
230  * We don't close a journal_buf until the next journal_buf is finished writing,
231  * and can be opened again - this also initializes the next journal_buf:
232  */
233 static void __journal_entry_close(struct journal *j, unsigned closed_val, bool trace)
234 {
235 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
236 	struct journal_buf *buf = journal_cur_buf(j);
237 	union journal_res_state old, new;
238 	unsigned sectors;
239 
240 	BUG_ON(closed_val != JOURNAL_ENTRY_CLOSED_VAL &&
241 	       closed_val != JOURNAL_ENTRY_ERROR_VAL);
242 
243 	lockdep_assert_held(&j->lock);
244 
245 	old.v = atomic64_read(&j->reservations.counter);
246 	do {
247 		new.v = old.v;
248 		new.cur_entry_offset = closed_val;
249 
250 		if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL ||
251 		    old.cur_entry_offset == new.cur_entry_offset)
252 			return;
253 	} while (!atomic64_try_cmpxchg(&j->reservations.counter,
254 				       &old.v, new.v));
255 
256 	if (!__journal_entry_is_open(old))
257 		return;
258 
259 	if (old.cur_entry_offset == JOURNAL_ENTRY_BLOCKED_VAL)
260 		old.cur_entry_offset = j->cur_entry_offset_if_blocked;
261 
262 	/* Close out old buffer: */
263 	buf->data->u64s		= cpu_to_le32(old.cur_entry_offset);
264 
265 	if (trace_journal_entry_close_enabled() && trace) {
266 		struct printbuf pbuf = PRINTBUF;
267 		pbuf.atomic++;
268 
269 		prt_str(&pbuf, "entry size: ");
270 		prt_human_readable_u64(&pbuf, vstruct_bytes(buf->data));
271 		prt_newline(&pbuf);
272 		bch2_prt_task_backtrace(&pbuf, current, 1, GFP_NOWAIT);
273 		trace_journal_entry_close(c, pbuf.buf);
274 		printbuf_exit(&pbuf);
275 	}
276 
277 	sectors = vstruct_blocks_plus(buf->data, c->block_bits,
278 				      buf->u64s_reserved) << c->block_bits;
279 	BUG_ON(sectors > buf->sectors);
280 	buf->sectors = sectors;
281 
282 	/*
283 	 * We have to set last_seq here, _before_ opening a new journal entry:
284 	 *
285 	 * A threads may replace an old pin with a new pin on their current
286 	 * journal reservation - the expectation being that the journal will
287 	 * contain either what the old pin protected or what the new pin
288 	 * protects.
289 	 *
290 	 * After the old pin is dropped journal_last_seq() won't include the old
291 	 * pin, so we can only write the updated last_seq on the entry that
292 	 * contains whatever the new pin protects.
293 	 *
294 	 * Restated, we can _not_ update last_seq for a given entry if there
295 	 * could be a newer entry open with reservations/pins that have been
296 	 * taken against it.
297 	 *
298 	 * Hence, we want update/set last_seq on the current journal entry right
299 	 * before we open a new one:
300 	 */
301 	buf->last_seq		= journal_last_seq(j);
302 	buf->data->last_seq	= cpu_to_le64(buf->last_seq);
303 	BUG_ON(buf->last_seq > le64_to_cpu(buf->data->seq));
304 
305 	cancel_delayed_work(&j->write_work);
306 
307 	bch2_journal_space_available(j);
308 
309 	__bch2_journal_buf_put(j, old.idx, le64_to_cpu(buf->data->seq));
310 }
311 
312 void bch2_journal_halt(struct journal *j)
313 {
314 	spin_lock(&j->lock);
315 	__journal_entry_close(j, JOURNAL_ENTRY_ERROR_VAL, true);
316 	if (!j->err_seq)
317 		j->err_seq = journal_cur_seq(j);
318 	journal_wake(j);
319 	spin_unlock(&j->lock);
320 }
321 
322 static bool journal_entry_want_write(struct journal *j)
323 {
324 	bool ret = !journal_entry_is_open(j) ||
325 		journal_cur_seq(j) == journal_last_unwritten_seq(j);
326 
327 	/* Don't close it yet if we already have a write in flight: */
328 	if (ret)
329 		__journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
330 	else if (nr_unwritten_journal_entries(j)) {
331 		struct journal_buf *buf = journal_cur_buf(j);
332 
333 		if (!buf->flush_time) {
334 			buf->flush_time	= local_clock() ?: 1;
335 			buf->expires = jiffies;
336 		}
337 	}
338 
339 	return ret;
340 }
341 
342 bool bch2_journal_entry_close(struct journal *j)
343 {
344 	bool ret;
345 
346 	spin_lock(&j->lock);
347 	ret = journal_entry_want_write(j);
348 	spin_unlock(&j->lock);
349 
350 	return ret;
351 }
352 
353 /*
354  * should _only_ called from journal_res_get() - when we actually want a
355  * journal reservation - journal entry is open means journal is dirty:
356  */
357 static int journal_entry_open(struct journal *j)
358 {
359 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
360 	struct journal_buf *buf = j->buf +
361 		((journal_cur_seq(j) + 1) & JOURNAL_BUF_MASK);
362 	union journal_res_state old, new;
363 	int u64s;
364 
365 	lockdep_assert_held(&j->lock);
366 	BUG_ON(journal_entry_is_open(j));
367 	BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
368 
369 	if (j->blocked)
370 		return JOURNAL_ERR_blocked;
371 
372 	if (j->cur_entry_error)
373 		return j->cur_entry_error;
374 
375 	if (bch2_journal_error(j))
376 		return JOURNAL_ERR_insufficient_devices; /* -EROFS */
377 
378 	if (!fifo_free(&j->pin))
379 		return JOURNAL_ERR_journal_pin_full;
380 
381 	if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf))
382 		return JOURNAL_ERR_max_in_flight;
383 
384 	if (bch2_fs_fatal_err_on(journal_cur_seq(j) >= JOURNAL_SEQ_MAX,
385 				 c, "cannot start: journal seq overflow"))
386 		return JOURNAL_ERR_insufficient_devices; /* -EROFS */
387 
388 	BUG_ON(!j->cur_entry_sectors);
389 
390 	buf->expires		=
391 		(journal_cur_seq(j) == j->flushed_seq_ondisk
392 		 ? jiffies
393 		 : j->last_flush_write) +
394 		msecs_to_jiffies(c->opts.journal_flush_delay);
395 
396 	buf->u64s_reserved	= j->entry_u64s_reserved;
397 	buf->disk_sectors	= j->cur_entry_sectors;
398 	buf->sectors		= min(buf->disk_sectors, buf->buf_size >> 9);
399 
400 	u64s = (int) (buf->sectors << 9) / sizeof(u64) -
401 		journal_entry_overhead(j);
402 	u64s = clamp_t(int, u64s, 0, JOURNAL_ENTRY_CLOSED_VAL - 1);
403 
404 	if (u64s <= (ssize_t) j->early_journal_entries.nr)
405 		return JOURNAL_ERR_journal_full;
406 
407 	if (fifo_empty(&j->pin) && j->reclaim_thread)
408 		wake_up_process(j->reclaim_thread);
409 
410 	/*
411 	 * The fifo_push() needs to happen at the same time as j->seq is
412 	 * incremented for journal_last_seq() to be calculated correctly
413 	 */
414 	atomic64_inc(&j->seq);
415 	journal_pin_list_init(fifo_push_ref(&j->pin), 1);
416 
417 	BUG_ON(j->pin.back - 1 != atomic64_read(&j->seq));
418 
419 	BUG_ON(j->buf + (journal_cur_seq(j) & JOURNAL_BUF_MASK) != buf);
420 
421 	bkey_extent_init(&buf->key);
422 	buf->noflush		= false;
423 	buf->must_flush		= false;
424 	buf->separate_flush	= false;
425 	buf->flush_time		= 0;
426 	buf->need_flush_to_write_buffer = true;
427 	buf->write_started	= false;
428 	buf->write_allocated	= false;
429 	buf->write_done		= false;
430 
431 	memset(buf->data, 0, sizeof(*buf->data));
432 	buf->data->seq	= cpu_to_le64(journal_cur_seq(j));
433 	buf->data->u64s	= 0;
434 
435 	if (j->early_journal_entries.nr) {
436 		memcpy(buf->data->_data, j->early_journal_entries.data,
437 		       j->early_journal_entries.nr * sizeof(u64));
438 		le32_add_cpu(&buf->data->u64s, j->early_journal_entries.nr);
439 	}
440 
441 	/*
442 	 * Must be set before marking the journal entry as open:
443 	 */
444 	j->cur_entry_u64s = u64s;
445 
446 	old.v = atomic64_read(&j->reservations.counter);
447 	do {
448 		new.v = old.v;
449 
450 		BUG_ON(old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL);
451 
452 		new.idx++;
453 		BUG_ON(journal_state_count(new, new.idx));
454 		BUG_ON(new.idx != (journal_cur_seq(j) & JOURNAL_BUF_MASK));
455 
456 		journal_state_inc(&new);
457 
458 		/* Handle any already added entries */
459 		new.cur_entry_offset = le32_to_cpu(buf->data->u64s);
460 	} while (!atomic64_try_cmpxchg(&j->reservations.counter,
461 				       &old.v, new.v));
462 
463 	if (nr_unwritten_journal_entries(j) == 1)
464 		mod_delayed_work(j->wq,
465 				 &j->write_work,
466 				 msecs_to_jiffies(c->opts.journal_flush_delay));
467 	journal_wake(j);
468 
469 	if (j->early_journal_entries.nr)
470 		darray_exit(&j->early_journal_entries);
471 	return 0;
472 }
473 
474 static bool journal_quiesced(struct journal *j)
475 {
476 	bool ret = atomic64_read(&j->seq) == j->seq_ondisk;
477 
478 	if (!ret)
479 		bch2_journal_entry_close(j);
480 	return ret;
481 }
482 
483 static void journal_quiesce(struct journal *j)
484 {
485 	wait_event(j->wait, journal_quiesced(j));
486 }
487 
488 static void journal_write_work(struct work_struct *work)
489 {
490 	struct journal *j = container_of(work, struct journal, write_work.work);
491 
492 	spin_lock(&j->lock);
493 	if (__journal_entry_is_open(j->reservations)) {
494 		long delta = journal_cur_buf(j)->expires - jiffies;
495 
496 		if (delta > 0)
497 			mod_delayed_work(j->wq, &j->write_work, delta);
498 		else
499 			__journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
500 	}
501 	spin_unlock(&j->lock);
502 }
503 
504 static int __journal_res_get(struct journal *j, struct journal_res *res,
505 			     unsigned flags)
506 {
507 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
508 	struct journal_buf *buf;
509 	bool can_discard;
510 	int ret;
511 retry:
512 	if (journal_res_get_fast(j, res, flags))
513 		return 0;
514 
515 	if (bch2_journal_error(j))
516 		return -BCH_ERR_erofs_journal_err;
517 
518 	if (j->blocked)
519 		return -BCH_ERR_journal_res_get_blocked;
520 
521 	if ((flags & BCH_WATERMARK_MASK) < j->watermark) {
522 		ret = JOURNAL_ERR_journal_full;
523 		can_discard = j->can_discard;
524 		goto out;
525 	}
526 
527 	if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf) && !journal_entry_is_open(j)) {
528 		ret = JOURNAL_ERR_max_in_flight;
529 		goto out;
530 	}
531 
532 	spin_lock(&j->lock);
533 
534 	/*
535 	 * Recheck after taking the lock, so we don't race with another thread
536 	 * that just did journal_entry_open() and call bch2_journal_entry_close()
537 	 * unnecessarily
538 	 */
539 	if (journal_res_get_fast(j, res, flags)) {
540 		ret = 0;
541 		goto unlock;
542 	}
543 
544 	/*
545 	 * If we couldn't get a reservation because the current buf filled up,
546 	 * and we had room for a bigger entry on disk, signal that we want to
547 	 * realloc the journal bufs:
548 	 */
549 	buf = journal_cur_buf(j);
550 	if (journal_entry_is_open(j) &&
551 	    buf->buf_size >> 9 < buf->disk_sectors &&
552 	    buf->buf_size < JOURNAL_ENTRY_SIZE_MAX)
553 		j->buf_size_want = max(j->buf_size_want, buf->buf_size << 1);
554 
555 	__journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, false);
556 	ret = journal_entry_open(j) ?: JOURNAL_ERR_retry;
557 unlock:
558 	can_discard = j->can_discard;
559 	spin_unlock(&j->lock);
560 out:
561 	if (ret == JOURNAL_ERR_retry)
562 		goto retry;
563 	if (!ret)
564 		return 0;
565 
566 	if (journal_error_check_stuck(j, ret, flags))
567 		ret = -BCH_ERR_journal_res_get_blocked;
568 
569 	if (ret == JOURNAL_ERR_max_in_flight &&
570 	    track_event_change(&c->times[BCH_TIME_blocked_journal_max_in_flight], true)) {
571 
572 		struct printbuf buf = PRINTBUF;
573 		prt_printf(&buf, "seq %llu\n", journal_cur_seq(j));
574 		bch2_journal_bufs_to_text(&buf, j);
575 		trace_journal_entry_full(c, buf.buf);
576 		printbuf_exit(&buf);
577 		count_event(c, journal_entry_full);
578 	}
579 
580 	/*
581 	 * Journal is full - can't rely on reclaim from work item due to
582 	 * freezing:
583 	 */
584 	if ((ret == JOURNAL_ERR_journal_full ||
585 	     ret == JOURNAL_ERR_journal_pin_full) &&
586 	    !(flags & JOURNAL_RES_GET_NONBLOCK)) {
587 		if (can_discard) {
588 			bch2_journal_do_discards(j);
589 			goto retry;
590 		}
591 
592 		if (mutex_trylock(&j->reclaim_lock)) {
593 			bch2_journal_reclaim(j);
594 			mutex_unlock(&j->reclaim_lock);
595 		}
596 	}
597 
598 	return ret == JOURNAL_ERR_insufficient_devices
599 		? -BCH_ERR_erofs_journal_err
600 		: -BCH_ERR_journal_res_get_blocked;
601 }
602 
603 static unsigned max_dev_latency(struct bch_fs *c)
604 {
605 	u64 nsecs = 0;
606 
607 	for_each_rw_member(c, ca)
608 		nsecs = max(nsecs, ca->io_latency[WRITE].stats.max_duration);
609 
610 	return nsecs_to_jiffies(nsecs);
611 }
612 
613 /*
614  * Essentially the entry function to the journaling code. When bcachefs is doing
615  * a btree insert, it calls this function to get the current journal write.
616  * Journal write is the structure used set up journal writes. The calling
617  * function will then add its keys to the structure, queuing them for the next
618  * write.
619  *
620  * To ensure forward progress, the current task must not be holding any
621  * btree node write locks.
622  */
623 int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
624 				  unsigned flags,
625 				  struct btree_trans *trans)
626 {
627 	int ret;
628 
629 	if (closure_wait_event_timeout(&j->async_wait,
630 		   (ret = __journal_res_get(j, res, flags)) != -BCH_ERR_journal_res_get_blocked ||
631 		   (flags & JOURNAL_RES_GET_NONBLOCK),
632 		   HZ))
633 		return ret;
634 
635 	if (trans)
636 		bch2_trans_unlock_long(trans);
637 
638 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
639 	int remaining_wait = max(max_dev_latency(c) * 2, HZ * 10);
640 
641 	remaining_wait = max(0, remaining_wait - HZ);
642 
643 	if (closure_wait_event_timeout(&j->async_wait,
644 		   (ret = __journal_res_get(j, res, flags)) != -BCH_ERR_journal_res_get_blocked ||
645 		   (flags & JOURNAL_RES_GET_NONBLOCK),
646 		   remaining_wait))
647 		return ret;
648 
649 	struct printbuf buf = PRINTBUF;
650 	bch2_journal_debug_to_text(&buf, j);
651 	bch_err(c, "Journal stuck? Waited for 10 seconds...\n%s",
652 		buf.buf);
653 	printbuf_exit(&buf);
654 
655 	closure_wait_event(&j->async_wait,
656 		   (ret = __journal_res_get(j, res, flags)) != -BCH_ERR_journal_res_get_blocked ||
657 		   (flags & JOURNAL_RES_GET_NONBLOCK));
658 	return ret;
659 }
660 
661 /* journal_entry_res: */
662 
663 void bch2_journal_entry_res_resize(struct journal *j,
664 				   struct journal_entry_res *res,
665 				   unsigned new_u64s)
666 {
667 	union journal_res_state state;
668 	int d = new_u64s - res->u64s;
669 
670 	spin_lock(&j->lock);
671 
672 	j->entry_u64s_reserved += d;
673 	if (d <= 0)
674 		goto out;
675 
676 	j->cur_entry_u64s = max_t(int, 0, j->cur_entry_u64s - d);
677 	smp_mb();
678 	state = READ_ONCE(j->reservations);
679 
680 	if (state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL &&
681 	    state.cur_entry_offset > j->cur_entry_u64s) {
682 		j->cur_entry_u64s += d;
683 		/*
684 		 * Not enough room in current journal entry, have to flush it:
685 		 */
686 		__journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
687 	} else {
688 		journal_cur_buf(j)->u64s_reserved += d;
689 	}
690 out:
691 	spin_unlock(&j->lock);
692 	res->u64s += d;
693 }
694 
695 /* journal flushing: */
696 
697 /**
698  * bch2_journal_flush_seq_async - wait for a journal entry to be written
699  * @j:		journal object
700  * @seq:	seq to flush
701  * @parent:	closure object to wait with
702  * Returns:	1 if @seq has already been flushed, 0 if @seq is being flushed,
703  *		-BCH_ERR_journal_flush_err if @seq will never be flushed
704  *
705  * Like bch2_journal_wait_on_seq, except that it triggers a write immediately if
706  * necessary
707  */
708 int bch2_journal_flush_seq_async(struct journal *j, u64 seq,
709 				 struct closure *parent)
710 {
711 	struct journal_buf *buf;
712 	int ret = 0;
713 
714 	if (seq <= j->flushed_seq_ondisk)
715 		return 1;
716 
717 	spin_lock(&j->lock);
718 
719 	if (WARN_ONCE(seq > journal_cur_seq(j),
720 		      "requested to flush journal seq %llu, but currently at %llu",
721 		      seq, journal_cur_seq(j)))
722 		goto out;
723 
724 	/* Recheck under lock: */
725 	if (j->err_seq && seq >= j->err_seq) {
726 		ret = -BCH_ERR_journal_flush_err;
727 		goto out;
728 	}
729 
730 	if (seq <= j->flushed_seq_ondisk) {
731 		ret = 1;
732 		goto out;
733 	}
734 
735 	/* if seq was written, but not flushed - flush a newer one instead */
736 	seq = max(seq, journal_last_unwritten_seq(j));
737 
738 recheck_need_open:
739 	if (seq > journal_cur_seq(j)) {
740 		struct journal_res res = { 0 };
741 
742 		if (journal_entry_is_open(j))
743 			__journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
744 
745 		spin_unlock(&j->lock);
746 
747 		/*
748 		 * We're called from bch2_journal_flush_seq() -> wait_event();
749 		 * but this might block. We won't usually block, so we won't
750 		 * livelock:
751 		 */
752 		sched_annotate_sleep();
753 		ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0, NULL);
754 		if (ret)
755 			return ret;
756 
757 		seq = res.seq;
758 		buf = journal_seq_to_buf(j, seq);
759 		buf->must_flush = true;
760 
761 		if (!buf->flush_time) {
762 			buf->flush_time	= local_clock() ?: 1;
763 			buf->expires = jiffies;
764 		}
765 
766 		if (parent && !closure_wait(&buf->wait, parent))
767 			BUG();
768 
769 		bch2_journal_res_put(j, &res);
770 
771 		spin_lock(&j->lock);
772 		goto want_write;
773 	}
774 
775 	/*
776 	 * if write was kicked off without a flush, or if we promised it
777 	 * wouldn't be a flush, flush the next sequence number instead
778 	 */
779 	buf = journal_seq_to_buf(j, seq);
780 	if (buf->noflush) {
781 		seq++;
782 		goto recheck_need_open;
783 	}
784 
785 	buf->must_flush = true;
786 
787 	if (parent && !closure_wait(&buf->wait, parent))
788 		BUG();
789 want_write:
790 	if (seq == journal_cur_seq(j))
791 		journal_entry_want_write(j);
792 out:
793 	spin_unlock(&j->lock);
794 	return ret;
795 }
796 
797 int bch2_journal_flush_seq(struct journal *j, u64 seq, unsigned task_state)
798 {
799 	u64 start_time = local_clock();
800 	int ret, ret2;
801 
802 	/*
803 	 * Don't update time_stats when @seq is already flushed:
804 	 */
805 	if (seq <= j->flushed_seq_ondisk)
806 		return 0;
807 
808 	ret = wait_event_state(j->wait,
809 			       (ret2 = bch2_journal_flush_seq_async(j, seq, NULL)),
810 			       task_state);
811 
812 	if (!ret)
813 		bch2_time_stats_update(j->flush_seq_time, start_time);
814 
815 	return ret ?: ret2 < 0 ? ret2 : 0;
816 }
817 
818 /*
819  * bch2_journal_flush_async - if there is an open journal entry, or a journal
820  * still being written, write it and wait for the write to complete
821  */
822 void bch2_journal_flush_async(struct journal *j, struct closure *parent)
823 {
824 	bch2_journal_flush_seq_async(j, atomic64_read(&j->seq), parent);
825 }
826 
827 int bch2_journal_flush(struct journal *j)
828 {
829 	return bch2_journal_flush_seq(j, atomic64_read(&j->seq), TASK_UNINTERRUPTIBLE);
830 }
831 
832 /*
833  * bch2_journal_noflush_seq - ask the journal not to issue any flushes in the
834  * range [start, end)
835  * @seq
836  */
837 bool bch2_journal_noflush_seq(struct journal *j, u64 start, u64 end)
838 {
839 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
840 	u64 unwritten_seq;
841 	bool ret = false;
842 
843 	if (!(c->sb.features & (1ULL << BCH_FEATURE_journal_no_flush)))
844 		return false;
845 
846 	if (c->journal.flushed_seq_ondisk >= start)
847 		return false;
848 
849 	spin_lock(&j->lock);
850 	if (c->journal.flushed_seq_ondisk >= start)
851 		goto out;
852 
853 	for (unwritten_seq = journal_last_unwritten_seq(j);
854 	     unwritten_seq < end;
855 	     unwritten_seq++) {
856 		struct journal_buf *buf = journal_seq_to_buf(j, unwritten_seq);
857 
858 		/* journal flush already in flight, or flush requseted */
859 		if (buf->must_flush)
860 			goto out;
861 
862 		buf->noflush = true;
863 	}
864 
865 	ret = true;
866 out:
867 	spin_unlock(&j->lock);
868 	return ret;
869 }
870 
871 static int __bch2_journal_meta(struct journal *j)
872 {
873 	struct journal_res res = {};
874 	int ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0, NULL);
875 	if (ret)
876 		return ret;
877 
878 	struct journal_buf *buf = j->buf + (res.seq & JOURNAL_BUF_MASK);
879 	buf->must_flush = true;
880 
881 	if (!buf->flush_time) {
882 		buf->flush_time	= local_clock() ?: 1;
883 		buf->expires = jiffies;
884 	}
885 
886 	bch2_journal_res_put(j, &res);
887 
888 	return bch2_journal_flush_seq(j, res.seq, TASK_UNINTERRUPTIBLE);
889 }
890 
891 int bch2_journal_meta(struct journal *j)
892 {
893 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
894 
895 	if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_journal))
896 		return -EROFS;
897 
898 	int ret = __bch2_journal_meta(j);
899 	bch2_write_ref_put(c, BCH_WRITE_REF_journal);
900 	return ret;
901 }
902 
903 /* block/unlock the journal: */
904 
905 void bch2_journal_unblock(struct journal *j)
906 {
907 	spin_lock(&j->lock);
908 	if (!--j->blocked &&
909 	    j->cur_entry_offset_if_blocked < JOURNAL_ENTRY_CLOSED_VAL &&
910 	    j->reservations.cur_entry_offset == JOURNAL_ENTRY_BLOCKED_VAL) {
911 		union journal_res_state old, new;
912 
913 		old.v = atomic64_read(&j->reservations.counter);
914 		do {
915 			new.v = old.v;
916 			new.cur_entry_offset = j->cur_entry_offset_if_blocked;
917 		} while (!atomic64_try_cmpxchg(&j->reservations.counter, &old.v, new.v));
918 	}
919 	spin_unlock(&j->lock);
920 
921 	journal_wake(j);
922 }
923 
924 static void __bch2_journal_block(struct journal *j)
925 {
926 	if (!j->blocked++) {
927 		union journal_res_state old, new;
928 
929 		old.v = atomic64_read(&j->reservations.counter);
930 		do {
931 			j->cur_entry_offset_if_blocked = old.cur_entry_offset;
932 
933 			if (j->cur_entry_offset_if_blocked >= JOURNAL_ENTRY_CLOSED_VAL)
934 				break;
935 
936 			new.v = old.v;
937 			new.cur_entry_offset = JOURNAL_ENTRY_BLOCKED_VAL;
938 		} while (!atomic64_try_cmpxchg(&j->reservations.counter, &old.v, new.v));
939 
940 		journal_cur_buf(j)->data->u64s = cpu_to_le32(old.cur_entry_offset);
941 	}
942 }
943 
944 void bch2_journal_block(struct journal *j)
945 {
946 	spin_lock(&j->lock);
947 	__bch2_journal_block(j);
948 	spin_unlock(&j->lock);
949 
950 	journal_quiesce(j);
951 }
952 
953 static struct journal_buf *__bch2_next_write_buffer_flush_journal_buf(struct journal *j,
954 						u64 max_seq, bool *blocked)
955 {
956 	struct journal_buf *ret = NULL;
957 
958 	/* We're inside wait_event(), but using mutex_lock(: */
959 	sched_annotate_sleep();
960 	mutex_lock(&j->buf_lock);
961 	spin_lock(&j->lock);
962 	max_seq = min(max_seq, journal_cur_seq(j));
963 
964 	for (u64 seq = journal_last_unwritten_seq(j);
965 	     seq <= max_seq;
966 	     seq++) {
967 		unsigned idx = seq & JOURNAL_BUF_MASK;
968 		struct journal_buf *buf = j->buf + idx;
969 
970 		if (buf->need_flush_to_write_buffer) {
971 			union journal_res_state s;
972 			s.v = atomic64_read_acquire(&j->reservations.counter);
973 
974 			unsigned open = seq == journal_cur_seq(j) && __journal_entry_is_open(s);
975 
976 			if (open && !*blocked) {
977 				__bch2_journal_block(j);
978 				*blocked = true;
979 			}
980 
981 			ret = journal_state_count(s, idx) > open
982 				? ERR_PTR(-EAGAIN)
983 				: buf;
984 			break;
985 		}
986 	}
987 
988 	spin_unlock(&j->lock);
989 	if (IS_ERR_OR_NULL(ret))
990 		mutex_unlock(&j->buf_lock);
991 	return ret;
992 }
993 
994 struct journal_buf *bch2_next_write_buffer_flush_journal_buf(struct journal *j,
995 							     u64 max_seq, bool *blocked)
996 {
997 	struct journal_buf *ret;
998 	*blocked = false;
999 
1000 	wait_event(j->wait, (ret = __bch2_next_write_buffer_flush_journal_buf(j,
1001 						max_seq, blocked)) != ERR_PTR(-EAGAIN));
1002 	if (IS_ERR_OR_NULL(ret) && *blocked)
1003 		bch2_journal_unblock(j);
1004 
1005 	return ret;
1006 }
1007 
1008 /* allocate journal on a device: */
1009 
1010 static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
1011 					 bool new_fs, struct closure *cl)
1012 {
1013 	struct bch_fs *c = ca->fs;
1014 	struct journal_device *ja = &ca->journal;
1015 	u64 *new_bucket_seq = NULL, *new_buckets = NULL;
1016 	struct open_bucket **ob = NULL;
1017 	long *bu = NULL;
1018 	unsigned i, pos, nr_got = 0, nr_want = nr - ja->nr;
1019 	int ret = 0;
1020 
1021 	BUG_ON(nr <= ja->nr);
1022 
1023 	bu		= kcalloc(nr_want, sizeof(*bu), GFP_KERNEL);
1024 	ob		= kcalloc(nr_want, sizeof(*ob), GFP_KERNEL);
1025 	new_buckets	= kcalloc(nr, sizeof(u64), GFP_KERNEL);
1026 	new_bucket_seq	= kcalloc(nr, sizeof(u64), GFP_KERNEL);
1027 	if (!bu || !ob || !new_buckets || !new_bucket_seq) {
1028 		ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
1029 		goto err_free;
1030 	}
1031 
1032 	for (nr_got = 0; nr_got < nr_want; nr_got++) {
1033 		enum bch_watermark watermark = new_fs
1034 			? BCH_WATERMARK_btree
1035 			: BCH_WATERMARK_normal;
1036 
1037 		ob[nr_got] = bch2_bucket_alloc(c, ca, watermark,
1038 					       BCH_DATA_journal, cl);
1039 		ret = PTR_ERR_OR_ZERO(ob[nr_got]);
1040 		if (ret)
1041 			break;
1042 
1043 		if (!new_fs) {
1044 			ret = bch2_trans_run(c,
1045 				bch2_trans_mark_metadata_bucket(trans, ca,
1046 						ob[nr_got]->bucket, BCH_DATA_journal,
1047 						ca->mi.bucket_size, BTREE_TRIGGER_transactional));
1048 			if (ret) {
1049 				bch2_open_bucket_put(c, ob[nr_got]);
1050 				bch_err_msg(c, ret, "marking new journal buckets");
1051 				break;
1052 			}
1053 		}
1054 
1055 		bu[nr_got] = ob[nr_got]->bucket;
1056 	}
1057 
1058 	if (!nr_got)
1059 		goto err_free;
1060 
1061 	/* Don't return an error if we successfully allocated some buckets: */
1062 	ret = 0;
1063 
1064 	if (c) {
1065 		bch2_journal_flush_all_pins(&c->journal);
1066 		bch2_journal_block(&c->journal);
1067 		mutex_lock(&c->sb_lock);
1068 	}
1069 
1070 	memcpy(new_buckets,	ja->buckets,	ja->nr * sizeof(u64));
1071 	memcpy(new_bucket_seq,	ja->bucket_seq,	ja->nr * sizeof(u64));
1072 
1073 	BUG_ON(ja->discard_idx > ja->nr);
1074 
1075 	pos = ja->discard_idx ?: ja->nr;
1076 
1077 	memmove(new_buckets + pos + nr_got,
1078 		new_buckets + pos,
1079 		sizeof(new_buckets[0]) * (ja->nr - pos));
1080 	memmove(new_bucket_seq + pos + nr_got,
1081 		new_bucket_seq + pos,
1082 		sizeof(new_bucket_seq[0]) * (ja->nr - pos));
1083 
1084 	for (i = 0; i < nr_got; i++) {
1085 		new_buckets[pos + i] = bu[i];
1086 		new_bucket_seq[pos + i] = 0;
1087 	}
1088 
1089 	nr = ja->nr + nr_got;
1090 
1091 	ret = bch2_journal_buckets_to_sb(c, ca, new_buckets, nr);
1092 	if (ret)
1093 		goto err_unblock;
1094 
1095 	bch2_write_super(c);
1096 
1097 	/* Commit: */
1098 	if (c)
1099 		spin_lock(&c->journal.lock);
1100 
1101 	swap(new_buckets,	ja->buckets);
1102 	swap(new_bucket_seq,	ja->bucket_seq);
1103 	ja->nr = nr;
1104 
1105 	if (pos <= ja->discard_idx)
1106 		ja->discard_idx = (ja->discard_idx + nr_got) % ja->nr;
1107 	if (pos <= ja->dirty_idx_ondisk)
1108 		ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + nr_got) % ja->nr;
1109 	if (pos <= ja->dirty_idx)
1110 		ja->dirty_idx = (ja->dirty_idx + nr_got) % ja->nr;
1111 	if (pos <= ja->cur_idx)
1112 		ja->cur_idx = (ja->cur_idx + nr_got) % ja->nr;
1113 
1114 	if (c)
1115 		spin_unlock(&c->journal.lock);
1116 err_unblock:
1117 	if (c) {
1118 		bch2_journal_unblock(&c->journal);
1119 		mutex_unlock(&c->sb_lock);
1120 	}
1121 
1122 	if (ret && !new_fs)
1123 		for (i = 0; i < nr_got; i++)
1124 			bch2_trans_run(c,
1125 				bch2_trans_mark_metadata_bucket(trans, ca,
1126 						bu[i], BCH_DATA_free, 0,
1127 						BTREE_TRIGGER_transactional));
1128 err_free:
1129 	for (i = 0; i < nr_got; i++)
1130 		bch2_open_bucket_put(c, ob[i]);
1131 
1132 	kfree(new_bucket_seq);
1133 	kfree(new_buckets);
1134 	kfree(ob);
1135 	kfree(bu);
1136 	return ret;
1137 }
1138 
1139 /*
1140  * Allocate more journal space at runtime - not currently making use if it, but
1141  * the code works:
1142  */
1143 int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
1144 				unsigned nr)
1145 {
1146 	struct journal_device *ja = &ca->journal;
1147 	struct closure cl;
1148 	int ret = 0;
1149 
1150 	closure_init_stack(&cl);
1151 
1152 	down_write(&c->state_lock);
1153 
1154 	/* don't handle reducing nr of buckets yet: */
1155 	if (nr < ja->nr)
1156 		goto unlock;
1157 
1158 	while (ja->nr < nr) {
1159 		struct disk_reservation disk_res = { 0, 0, 0 };
1160 
1161 		/*
1162 		 * note: journal buckets aren't really counted as _sectors_ used yet, so
1163 		 * we don't need the disk reservation to avoid the BUG_ON() in buckets.c
1164 		 * when space used goes up without a reservation - but we do need the
1165 		 * reservation to ensure we'll actually be able to allocate:
1166 		 *
1167 		 * XXX: that's not right, disk reservations only ensure a
1168 		 * filesystem-wide allocation will succeed, this is a device
1169 		 * specific allocation - we can hang here:
1170 		 */
1171 
1172 		ret = bch2_disk_reservation_get(c, &disk_res,
1173 						bucket_to_sector(ca, nr - ja->nr), 1, 0);
1174 		if (ret)
1175 			break;
1176 
1177 		ret = __bch2_set_nr_journal_buckets(ca, nr, false, &cl);
1178 
1179 		bch2_disk_reservation_put(c, &disk_res);
1180 
1181 		closure_sync(&cl);
1182 
1183 		if (ret && ret != -BCH_ERR_bucket_alloc_blocked)
1184 			break;
1185 	}
1186 
1187 	bch_err_fn(c, ret);
1188 unlock:
1189 	up_write(&c->state_lock);
1190 	return ret;
1191 }
1192 
1193 int bch2_dev_journal_alloc(struct bch_dev *ca, bool new_fs)
1194 {
1195 	unsigned nr;
1196 	int ret;
1197 
1198 	if (dynamic_fault("bcachefs:add:journal_alloc")) {
1199 		ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
1200 		goto err;
1201 	}
1202 
1203 	/* 1/128th of the device by default: */
1204 	nr = ca->mi.nbuckets >> 7;
1205 
1206 	/*
1207 	 * clamp journal size to 8192 buckets or 8GB (in sectors), whichever
1208 	 * is smaller:
1209 	 */
1210 	nr = clamp_t(unsigned, nr,
1211 		     BCH_JOURNAL_BUCKETS_MIN,
1212 		     min(1 << 13,
1213 			 (1 << 24) / ca->mi.bucket_size));
1214 
1215 	ret = __bch2_set_nr_journal_buckets(ca, nr, new_fs, NULL);
1216 err:
1217 	bch_err_fn(ca, ret);
1218 	return ret;
1219 }
1220 
1221 int bch2_fs_journal_alloc(struct bch_fs *c)
1222 {
1223 	for_each_online_member(c, ca) {
1224 		if (ca->journal.nr)
1225 			continue;
1226 
1227 		int ret = bch2_dev_journal_alloc(ca, true);
1228 		if (ret) {
1229 			percpu_ref_put(&ca->io_ref);
1230 			return ret;
1231 		}
1232 	}
1233 
1234 	return 0;
1235 }
1236 
1237 /* startup/shutdown: */
1238 
1239 static bool bch2_journal_writing_to_device(struct journal *j, unsigned dev_idx)
1240 {
1241 	bool ret = false;
1242 	u64 seq;
1243 
1244 	spin_lock(&j->lock);
1245 	for (seq = journal_last_unwritten_seq(j);
1246 	     seq <= journal_cur_seq(j) && !ret;
1247 	     seq++) {
1248 		struct journal_buf *buf = journal_seq_to_buf(j, seq);
1249 
1250 		if (bch2_bkey_has_device_c(bkey_i_to_s_c(&buf->key), dev_idx))
1251 			ret = true;
1252 	}
1253 	spin_unlock(&j->lock);
1254 
1255 	return ret;
1256 }
1257 
1258 void bch2_dev_journal_stop(struct journal *j, struct bch_dev *ca)
1259 {
1260 	wait_event(j->wait, !bch2_journal_writing_to_device(j, ca->dev_idx));
1261 }
1262 
1263 void bch2_fs_journal_stop(struct journal *j)
1264 {
1265 	if (!test_bit(JOURNAL_running, &j->flags))
1266 		return;
1267 
1268 	bch2_journal_reclaim_stop(j);
1269 	bch2_journal_flush_all_pins(j);
1270 
1271 	wait_event(j->wait, bch2_journal_entry_close(j));
1272 
1273 	/*
1274 	 * Always write a new journal entry, to make sure the clock hands are up
1275 	 * to date (and match the superblock)
1276 	 */
1277 	__bch2_journal_meta(j);
1278 
1279 	journal_quiesce(j);
1280 	cancel_delayed_work_sync(&j->write_work);
1281 
1282 	WARN(!bch2_journal_error(j) &&
1283 	     test_bit(JOURNAL_replay_done, &j->flags) &&
1284 	     j->last_empty_seq != journal_cur_seq(j),
1285 	     "journal shutdown error: cur seq %llu but last empty seq %llu",
1286 	     journal_cur_seq(j), j->last_empty_seq);
1287 
1288 	if (!bch2_journal_error(j))
1289 		clear_bit(JOURNAL_running, &j->flags);
1290 }
1291 
1292 int bch2_fs_journal_start(struct journal *j, u64 cur_seq)
1293 {
1294 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
1295 	struct journal_entry_pin_list *p;
1296 	struct journal_replay *i, **_i;
1297 	struct genradix_iter iter;
1298 	bool had_entries = false;
1299 	u64 last_seq = cur_seq, nr, seq;
1300 
1301 	if (cur_seq >= JOURNAL_SEQ_MAX) {
1302 		bch_err(c, "cannot start: journal seq overflow");
1303 		return -EINVAL;
1304 	}
1305 
1306 	genradix_for_each_reverse(&c->journal_entries, iter, _i) {
1307 		i = *_i;
1308 
1309 		if (journal_replay_ignore(i))
1310 			continue;
1311 
1312 		last_seq = le64_to_cpu(i->j.last_seq);
1313 		break;
1314 	}
1315 
1316 	nr = cur_seq - last_seq;
1317 
1318 	if (nr + 1 > j->pin.size) {
1319 		free_fifo(&j->pin);
1320 		init_fifo(&j->pin, roundup_pow_of_two(nr + 1), GFP_KERNEL);
1321 		if (!j->pin.data) {
1322 			bch_err(c, "error reallocating journal fifo (%llu open entries)", nr);
1323 			return -BCH_ERR_ENOMEM_journal_pin_fifo;
1324 		}
1325 	}
1326 
1327 	j->replay_journal_seq	= last_seq;
1328 	j->replay_journal_seq_end = cur_seq;
1329 	j->last_seq_ondisk	= last_seq;
1330 	j->flushed_seq_ondisk	= cur_seq - 1;
1331 	j->seq_ondisk		= cur_seq - 1;
1332 	j->pin.front		= last_seq;
1333 	j->pin.back		= cur_seq;
1334 	atomic64_set(&j->seq, cur_seq - 1);
1335 
1336 	fifo_for_each_entry_ptr(p, &j->pin, seq)
1337 		journal_pin_list_init(p, 1);
1338 
1339 	genradix_for_each(&c->journal_entries, iter, _i) {
1340 		i = *_i;
1341 
1342 		if (journal_replay_ignore(i))
1343 			continue;
1344 
1345 		seq = le64_to_cpu(i->j.seq);
1346 		BUG_ON(seq >= cur_seq);
1347 
1348 		if (seq < last_seq)
1349 			continue;
1350 
1351 		if (journal_entry_empty(&i->j))
1352 			j->last_empty_seq = le64_to_cpu(i->j.seq);
1353 
1354 		p = journal_seq_pin(j, seq);
1355 
1356 		p->devs.nr = 0;
1357 		darray_for_each(i->ptrs, ptr)
1358 			bch2_dev_list_add_dev(&p->devs, ptr->dev);
1359 
1360 		had_entries = true;
1361 	}
1362 
1363 	if (!had_entries)
1364 		j->last_empty_seq = cur_seq - 1; /* to match j->seq */
1365 
1366 	spin_lock(&j->lock);
1367 
1368 	set_bit(JOURNAL_running, &j->flags);
1369 	j->last_flush_write = jiffies;
1370 
1371 	j->reservations.idx = j->reservations.unwritten_idx = journal_cur_seq(j);
1372 	j->reservations.unwritten_idx++;
1373 
1374 	c->last_bucket_seq_cleanup = journal_cur_seq(j);
1375 
1376 	bch2_journal_space_available(j);
1377 	spin_unlock(&j->lock);
1378 
1379 	return bch2_journal_reclaim_start(j);
1380 }
1381 
1382 /* init/exit: */
1383 
1384 void bch2_dev_journal_exit(struct bch_dev *ca)
1385 {
1386 	struct journal_device *ja = &ca->journal;
1387 
1388 	for (unsigned i = 0; i < ARRAY_SIZE(ja->bio); i++) {
1389 		kfree(ja->bio[i]);
1390 		ja->bio[i] = NULL;
1391 	}
1392 
1393 	kfree(ja->buckets);
1394 	kfree(ja->bucket_seq);
1395 	ja->buckets	= NULL;
1396 	ja->bucket_seq	= NULL;
1397 }
1398 
1399 int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
1400 {
1401 	struct journal_device *ja = &ca->journal;
1402 	struct bch_sb_field_journal *journal_buckets =
1403 		bch2_sb_field_get(sb, journal);
1404 	struct bch_sb_field_journal_v2 *journal_buckets_v2 =
1405 		bch2_sb_field_get(sb, journal_v2);
1406 
1407 	ja->nr = 0;
1408 
1409 	if (journal_buckets_v2) {
1410 		unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2);
1411 
1412 		for (unsigned i = 0; i < nr; i++)
1413 			ja->nr += le64_to_cpu(journal_buckets_v2->d[i].nr);
1414 	} else if (journal_buckets) {
1415 		ja->nr = bch2_nr_journal_buckets(journal_buckets);
1416 	}
1417 
1418 	ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1419 	if (!ja->bucket_seq)
1420 		return -BCH_ERR_ENOMEM_dev_journal_init;
1421 
1422 	unsigned nr_bvecs = DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE);
1423 
1424 	for (unsigned i = 0; i < ARRAY_SIZE(ja->bio); i++) {
1425 		ja->bio[i] = kmalloc(struct_size(ja->bio[i], bio.bi_inline_vecs,
1426 				     nr_bvecs), GFP_KERNEL);
1427 		if (!ja->bio[i])
1428 			return -BCH_ERR_ENOMEM_dev_journal_init;
1429 
1430 		ja->bio[i]->ca = ca;
1431 		ja->bio[i]->buf_idx = i;
1432 		bio_init(&ja->bio[i]->bio, NULL, ja->bio[i]->bio.bi_inline_vecs, nr_bvecs, 0);
1433 	}
1434 
1435 	ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1436 	if (!ja->buckets)
1437 		return -BCH_ERR_ENOMEM_dev_journal_init;
1438 
1439 	if (journal_buckets_v2) {
1440 		unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2);
1441 		unsigned dst = 0;
1442 
1443 		for (unsigned i = 0; i < nr; i++)
1444 			for (unsigned j = 0; j < le64_to_cpu(journal_buckets_v2->d[i].nr); j++)
1445 				ja->buckets[dst++] =
1446 					le64_to_cpu(journal_buckets_v2->d[i].start) + j;
1447 	} else if (journal_buckets) {
1448 		for (unsigned i = 0; i < ja->nr; i++)
1449 			ja->buckets[i] = le64_to_cpu(journal_buckets->buckets[i]);
1450 	}
1451 
1452 	return 0;
1453 }
1454 
1455 void bch2_fs_journal_exit(struct journal *j)
1456 {
1457 	if (j->wq)
1458 		destroy_workqueue(j->wq);
1459 
1460 	darray_exit(&j->early_journal_entries);
1461 
1462 	for (unsigned i = 0; i < ARRAY_SIZE(j->buf); i++)
1463 		kvfree(j->buf[i].data);
1464 	free_fifo(&j->pin);
1465 }
1466 
1467 int bch2_fs_journal_init(struct journal *j)
1468 {
1469 	static struct lock_class_key res_key;
1470 
1471 	mutex_init(&j->buf_lock);
1472 	spin_lock_init(&j->lock);
1473 	spin_lock_init(&j->err_lock);
1474 	init_waitqueue_head(&j->wait);
1475 	INIT_DELAYED_WORK(&j->write_work, journal_write_work);
1476 	init_waitqueue_head(&j->reclaim_wait);
1477 	init_waitqueue_head(&j->pin_flush_wait);
1478 	mutex_init(&j->reclaim_lock);
1479 	mutex_init(&j->discard_lock);
1480 
1481 	lockdep_init_map(&j->res_map, "journal res", &res_key, 0);
1482 
1483 	atomic64_set(&j->reservations.counter,
1484 		((union journal_res_state)
1485 		 { .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v);
1486 
1487 	if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)))
1488 		return -BCH_ERR_ENOMEM_journal_pin_fifo;
1489 
1490 	for (unsigned i = 0; i < ARRAY_SIZE(j->buf); i++) {
1491 		j->buf[i].buf_size = JOURNAL_ENTRY_SIZE_MIN;
1492 		j->buf[i].data = kvmalloc(j->buf[i].buf_size, GFP_KERNEL);
1493 		if (!j->buf[i].data)
1494 			return -BCH_ERR_ENOMEM_journal_buf;
1495 		j->buf[i].idx = i;
1496 	}
1497 
1498 	j->pin.front = j->pin.back = 1;
1499 
1500 	j->wq = alloc_workqueue("bcachefs_journal",
1501 				WQ_HIGHPRI|WQ_FREEZABLE|WQ_UNBOUND|WQ_MEM_RECLAIM, 512);
1502 	if (!j->wq)
1503 		return -BCH_ERR_ENOMEM_fs_other_alloc;
1504 	return 0;
1505 }
1506 
1507 /* debug: */
1508 
1509 static const char * const bch2_journal_flags_strs[] = {
1510 #define x(n)	#n,
1511 	JOURNAL_FLAGS()
1512 #undef x
1513 	NULL
1514 };
1515 
1516 void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1517 {
1518 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
1519 	union journal_res_state s;
1520 	unsigned long now = jiffies;
1521 	u64 nr_writes = j->nr_flush_writes + j->nr_noflush_writes;
1522 
1523 	printbuf_tabstops_reset(out);
1524 	printbuf_tabstop_push(out, 28);
1525 	out->atomic++;
1526 
1527 	rcu_read_lock();
1528 	s = READ_ONCE(j->reservations);
1529 
1530 	prt_printf(out, "flags:\t");
1531 	prt_bitflags(out, bch2_journal_flags_strs, j->flags);
1532 	prt_newline(out);
1533 	prt_printf(out, "dirty journal entries:\t%llu/%llu\n",	fifo_used(&j->pin), j->pin.size);
1534 	prt_printf(out, "seq:\t%llu\n",				journal_cur_seq(j));
1535 	prt_printf(out, "seq_ondisk:\t%llu\n",			j->seq_ondisk);
1536 	prt_printf(out, "last_seq:\t%llu\n",			journal_last_seq(j));
1537 	prt_printf(out, "last_seq_ondisk:\t%llu\n",		j->last_seq_ondisk);
1538 	prt_printf(out, "flushed_seq_ondisk:\t%llu\n",		j->flushed_seq_ondisk);
1539 	prt_printf(out, "watermark:\t%s\n",			bch2_watermarks[j->watermark]);
1540 	prt_printf(out, "each entry reserved:\t%u\n",		j->entry_u64s_reserved);
1541 	prt_printf(out, "nr flush writes:\t%llu\n",		j->nr_flush_writes);
1542 	prt_printf(out, "nr noflush writes:\t%llu\n",		j->nr_noflush_writes);
1543 	prt_printf(out, "average write size:\t");
1544 	prt_human_readable_u64(out, nr_writes ? div64_u64(j->entry_bytes_written, nr_writes) : 0);
1545 	prt_newline(out);
1546 	prt_printf(out, "nr direct reclaim:\t%llu\n",		j->nr_direct_reclaim);
1547 	prt_printf(out, "nr background reclaim:\t%llu\n",	j->nr_background_reclaim);
1548 	prt_printf(out, "reclaim kicked:\t%u\n",		j->reclaim_kicked);
1549 	prt_printf(out, "reclaim runs in:\t%u ms\n",		time_after(j->next_reclaim, now)
1550 	       ? jiffies_to_msecs(j->next_reclaim - jiffies) : 0);
1551 	prt_printf(out, "blocked:\t%u\n",			j->blocked);
1552 	prt_printf(out, "current entry sectors:\t%u\n",		j->cur_entry_sectors);
1553 	prt_printf(out, "current entry error:\t%s\n",		bch2_journal_errors[j->cur_entry_error]);
1554 	prt_printf(out, "current entry:\t");
1555 
1556 	switch (s.cur_entry_offset) {
1557 	case JOURNAL_ENTRY_ERROR_VAL:
1558 		prt_printf(out, "error\n");
1559 		break;
1560 	case JOURNAL_ENTRY_CLOSED_VAL:
1561 		prt_printf(out, "closed\n");
1562 		break;
1563 	case JOURNAL_ENTRY_BLOCKED_VAL:
1564 		prt_printf(out, "blocked\n");
1565 		break;
1566 	default:
1567 		prt_printf(out, "%u/%u\n", s.cur_entry_offset, j->cur_entry_u64s);
1568 		break;
1569 	}
1570 
1571 	prt_printf(out, "unwritten entries:\n");
1572 	bch2_journal_bufs_to_text(out, j);
1573 
1574 	prt_printf(out, "space:\n");
1575 	printbuf_indent_add(out, 2);
1576 	prt_printf(out, "discarded\t%u:%u\n",
1577 	       j->space[journal_space_discarded].next_entry,
1578 	       j->space[journal_space_discarded].total);
1579 	prt_printf(out, "clean ondisk\t%u:%u\n",
1580 	       j->space[journal_space_clean_ondisk].next_entry,
1581 	       j->space[journal_space_clean_ondisk].total);
1582 	prt_printf(out, "clean\t%u:%u\n",
1583 	       j->space[journal_space_clean].next_entry,
1584 	       j->space[journal_space_clean].total);
1585 	prt_printf(out, "total\t%u:%u\n",
1586 	       j->space[journal_space_total].next_entry,
1587 	       j->space[journal_space_total].total);
1588 	printbuf_indent_sub(out, 2);
1589 
1590 	for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) {
1591 		if (!ca->mi.durability)
1592 			continue;
1593 
1594 		struct journal_device *ja = &ca->journal;
1595 
1596 		if (!test_bit(ca->dev_idx, c->rw_devs[BCH_DATA_journal].d))
1597 			continue;
1598 
1599 		if (!ja->nr)
1600 			continue;
1601 
1602 		prt_printf(out, "dev %u:\n",			ca->dev_idx);
1603 		prt_printf(out, "durability %u:\n",		ca->mi.durability);
1604 		printbuf_indent_add(out, 2);
1605 		prt_printf(out, "nr\t%u\n",			ja->nr);
1606 		prt_printf(out, "bucket size\t%u\n",		ca->mi.bucket_size);
1607 		prt_printf(out, "available\t%u:%u\n",		bch2_journal_dev_buckets_available(j, ja, journal_space_discarded), ja->sectors_free);
1608 		prt_printf(out, "discard_idx\t%u\n",		ja->discard_idx);
1609 		prt_printf(out, "dirty_ondisk\t%u (seq %llu)\n",ja->dirty_idx_ondisk,	ja->bucket_seq[ja->dirty_idx_ondisk]);
1610 		prt_printf(out, "dirty_idx\t%u (seq %llu)\n",	ja->dirty_idx,		ja->bucket_seq[ja->dirty_idx]);
1611 		prt_printf(out, "cur_idx\t%u (seq %llu)\n",	ja->cur_idx,		ja->bucket_seq[ja->cur_idx]);
1612 		printbuf_indent_sub(out, 2);
1613 	}
1614 
1615 	prt_printf(out, "replicas want %u need %u\n", c->opts.metadata_replicas, c->opts.metadata_replicas_required);
1616 
1617 	rcu_read_unlock();
1618 
1619 	--out->atomic;
1620 }
1621 
1622 void bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1623 {
1624 	spin_lock(&j->lock);
1625 	__bch2_journal_debug_to_text(out, j);
1626 	spin_unlock(&j->lock);
1627 }
1628