xref: /linux/fs/bcachefs/journal.c (revision 8f5b5f78113e881cb8570c961b0dc42b218a1b9e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bcachefs journalling code, for btree insertions
4  *
5  * Copyright 2012 Google, Inc.
6  */
7 
8 #include "bcachefs.h"
9 #include "alloc_foreground.h"
10 #include "bkey_methods.h"
11 #include "btree_gc.h"
12 #include "btree_update.h"
13 #include "btree_write_buffer.h"
14 #include "buckets.h"
15 #include "error.h"
16 #include "journal.h"
17 #include "journal_io.h"
18 #include "journal_reclaim.h"
19 #include "journal_sb.h"
20 #include "journal_seq_blacklist.h"
21 #include "trace.h"
22 
23 static const char * const bch2_journal_errors[] = {
24 #define x(n)	#n,
25 	JOURNAL_ERRORS()
26 #undef x
27 	NULL
28 };
29 
30 static inline bool journal_seq_unwritten(struct journal *j, u64 seq)
31 {
32 	return seq > j->seq_ondisk;
33 }
34 
35 static bool __journal_entry_is_open(union journal_res_state state)
36 {
37 	return state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL;
38 }
39 
40 static inline unsigned nr_unwritten_journal_entries(struct journal *j)
41 {
42 	return atomic64_read(&j->seq) - j->seq_ondisk;
43 }
44 
45 static bool journal_entry_is_open(struct journal *j)
46 {
47 	return __journal_entry_is_open(j->reservations);
48 }
49 
50 static void bch2_journal_buf_to_text(struct printbuf *out, struct journal *j, u64 seq)
51 {
52 	union journal_res_state s = READ_ONCE(j->reservations);
53 	unsigned i = seq & JOURNAL_BUF_MASK;
54 	struct journal_buf *buf = j->buf + i;
55 
56 	prt_str(out, "seq:");
57 	prt_tab(out);
58 	prt_printf(out, "%llu", seq);
59 	prt_newline(out);
60 	printbuf_indent_add(out, 2);
61 
62 	prt_str(out, "refcount:");
63 	prt_tab(out);
64 	prt_printf(out, "%u", journal_state_count(s, i));
65 	prt_newline(out);
66 
67 	prt_str(out, "size:");
68 	prt_tab(out);
69 	prt_human_readable_u64(out, vstruct_bytes(buf->data));
70 	prt_newline(out);
71 
72 	prt_str(out, "expires:");
73 	prt_tab(out);
74 	prt_printf(out, "%li jiffies", buf->expires - jiffies);
75 	prt_newline(out);
76 
77 	prt_str(out, "flags:");
78 	prt_tab(out);
79 	if (buf->noflush)
80 		prt_str(out, "noflush ");
81 	if (buf->must_flush)
82 		prt_str(out, "must_flush ");
83 	if (buf->separate_flush)
84 		prt_str(out, "separate_flush ");
85 	if (buf->need_flush_to_write_buffer)
86 		prt_str(out, "need_flush_to_write_buffer ");
87 	if (buf->write_started)
88 		prt_str(out, "write_started ");
89 	if (buf->write_allocated)
90 		prt_str(out, "write allocated ");
91 	if (buf->write_done)
92 		prt_str(out, "write done");
93 	prt_newline(out);
94 
95 	printbuf_indent_sub(out, 2);
96 }
97 
98 static void bch2_journal_bufs_to_text(struct printbuf *out, struct journal *j)
99 {
100 	if (!out->nr_tabstops)
101 		printbuf_tabstop_push(out, 24);
102 
103 	for (u64 seq = journal_last_unwritten_seq(j);
104 	     seq <= journal_cur_seq(j);
105 	     seq++)
106 		bch2_journal_buf_to_text(out, j, seq);
107 	prt_printf(out, "last buf %s\n", journal_entry_is_open(j) ? "open" : "closed");
108 }
109 
110 static inline struct journal_buf *
111 journal_seq_to_buf(struct journal *j, u64 seq)
112 {
113 	struct journal_buf *buf = NULL;
114 
115 	EBUG_ON(seq > journal_cur_seq(j));
116 
117 	if (journal_seq_unwritten(j, seq)) {
118 		buf = j->buf + (seq & JOURNAL_BUF_MASK);
119 		EBUG_ON(le64_to_cpu(buf->data->seq) != seq);
120 	}
121 	return buf;
122 }
123 
124 static void journal_pin_list_init(struct journal_entry_pin_list *p, int count)
125 {
126 	unsigned i;
127 
128 	for (i = 0; i < ARRAY_SIZE(p->list); i++)
129 		INIT_LIST_HEAD(&p->list[i]);
130 	INIT_LIST_HEAD(&p->flushed);
131 	atomic_set(&p->count, count);
132 	p->devs.nr = 0;
133 }
134 
135 /*
136  * Detect stuck journal conditions and trigger shutdown. Technically the journal
137  * can end up stuck for a variety of reasons, such as a blocked I/O, journal
138  * reservation lockup, etc. Since this is a fatal error with potentially
139  * unpredictable characteristics, we want to be fairly conservative before we
140  * decide to shut things down.
141  *
142  * Consider the journal stuck when it appears full with no ability to commit
143  * btree transactions, to discard journal buckets, nor acquire priority
144  * (reserved watermark) reservation.
145  */
146 static inline bool
147 journal_error_check_stuck(struct journal *j, int error, unsigned flags)
148 {
149 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
150 	bool stuck = false;
151 	struct printbuf buf = PRINTBUF;
152 
153 	if (!(error == JOURNAL_ERR_journal_full ||
154 	      error == JOURNAL_ERR_journal_pin_full) ||
155 	    nr_unwritten_journal_entries(j) ||
156 	    (flags & BCH_WATERMARK_MASK) != BCH_WATERMARK_reclaim)
157 		return stuck;
158 
159 	spin_lock(&j->lock);
160 
161 	if (j->can_discard) {
162 		spin_unlock(&j->lock);
163 		return stuck;
164 	}
165 
166 	stuck = true;
167 
168 	/*
169 	 * The journal shutdown path will set ->err_seq, but do it here first to
170 	 * serialize against concurrent failures and avoid duplicate error
171 	 * reports.
172 	 */
173 	if (j->err_seq) {
174 		spin_unlock(&j->lock);
175 		return stuck;
176 	}
177 	j->err_seq = journal_cur_seq(j);
178 	spin_unlock(&j->lock);
179 
180 	bch_err(c, "Journal stuck! Hava a pre-reservation but journal full (error %s)",
181 		bch2_journal_errors[error]);
182 	bch2_journal_debug_to_text(&buf, j);
183 	bch_err(c, "%s", buf.buf);
184 
185 	printbuf_reset(&buf);
186 	bch2_journal_pins_to_text(&buf, j);
187 	bch_err(c, "Journal pins:\n%s", buf.buf);
188 	printbuf_exit(&buf);
189 
190 	bch2_fatal_error(c);
191 	dump_stack();
192 
193 	return stuck;
194 }
195 
196 void bch2_journal_do_writes(struct journal *j)
197 {
198 	for (u64 seq = journal_last_unwritten_seq(j);
199 	     seq <= journal_cur_seq(j);
200 	     seq++) {
201 		unsigned idx = seq & JOURNAL_BUF_MASK;
202 		struct journal_buf *w = j->buf + idx;
203 
204 		if (w->write_started && !w->write_allocated)
205 			break;
206 		if (w->write_started)
207 			continue;
208 
209 		if (!journal_state_count(j->reservations, idx)) {
210 			w->write_started = true;
211 			closure_call(&w->io, bch2_journal_write, j->wq, NULL);
212 		}
213 
214 		break;
215 	}
216 }
217 
218 /*
219  * Final processing when the last reference of a journal buffer has been
220  * dropped. Drop the pin list reference acquired at journal entry open and write
221  * the buffer, if requested.
222  */
223 void bch2_journal_buf_put_final(struct journal *j, u64 seq)
224 {
225 	lockdep_assert_held(&j->lock);
226 
227 	if (__bch2_journal_pin_put(j, seq))
228 		bch2_journal_reclaim_fast(j);
229 	bch2_journal_do_writes(j);
230 }
231 
232 /*
233  * Returns true if journal entry is now closed:
234  *
235  * We don't close a journal_buf until the next journal_buf is finished writing,
236  * and can be opened again - this also initializes the next journal_buf:
237  */
238 static void __journal_entry_close(struct journal *j, unsigned closed_val, bool trace)
239 {
240 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
241 	struct journal_buf *buf = journal_cur_buf(j);
242 	union journal_res_state old, new;
243 	u64 v = atomic64_read(&j->reservations.counter);
244 	unsigned sectors;
245 
246 	BUG_ON(closed_val != JOURNAL_ENTRY_CLOSED_VAL &&
247 	       closed_val != JOURNAL_ENTRY_ERROR_VAL);
248 
249 	lockdep_assert_held(&j->lock);
250 
251 	do {
252 		old.v = new.v = v;
253 		new.cur_entry_offset = closed_val;
254 
255 		if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL ||
256 		    old.cur_entry_offset == new.cur_entry_offset)
257 			return;
258 	} while ((v = atomic64_cmpxchg(&j->reservations.counter,
259 				       old.v, new.v)) != old.v);
260 
261 	if (!__journal_entry_is_open(old))
262 		return;
263 
264 	/* Close out old buffer: */
265 	buf->data->u64s		= cpu_to_le32(old.cur_entry_offset);
266 
267 	if (trace_journal_entry_close_enabled() && trace) {
268 		struct printbuf pbuf = PRINTBUF;
269 		pbuf.atomic++;
270 
271 		prt_str(&pbuf, "entry size: ");
272 		prt_human_readable_u64(&pbuf, vstruct_bytes(buf->data));
273 		prt_newline(&pbuf);
274 		bch2_prt_task_backtrace(&pbuf, current, 1, GFP_NOWAIT);
275 		trace_journal_entry_close(c, pbuf.buf);
276 		printbuf_exit(&pbuf);
277 	}
278 
279 	sectors = vstruct_blocks_plus(buf->data, c->block_bits,
280 				      buf->u64s_reserved) << c->block_bits;
281 	BUG_ON(sectors > buf->sectors);
282 	buf->sectors = sectors;
283 
284 	/*
285 	 * We have to set last_seq here, _before_ opening a new journal entry:
286 	 *
287 	 * A threads may replace an old pin with a new pin on their current
288 	 * journal reservation - the expectation being that the journal will
289 	 * contain either what the old pin protected or what the new pin
290 	 * protects.
291 	 *
292 	 * After the old pin is dropped journal_last_seq() won't include the old
293 	 * pin, so we can only write the updated last_seq on the entry that
294 	 * contains whatever the new pin protects.
295 	 *
296 	 * Restated, we can _not_ update last_seq for a given entry if there
297 	 * could be a newer entry open with reservations/pins that have been
298 	 * taken against it.
299 	 *
300 	 * Hence, we want update/set last_seq on the current journal entry right
301 	 * before we open a new one:
302 	 */
303 	buf->last_seq		= journal_last_seq(j);
304 	buf->data->last_seq	= cpu_to_le64(buf->last_seq);
305 	BUG_ON(buf->last_seq > le64_to_cpu(buf->data->seq));
306 
307 	cancel_delayed_work(&j->write_work);
308 
309 	bch2_journal_space_available(j);
310 
311 	__bch2_journal_buf_put(j, old.idx, le64_to_cpu(buf->data->seq));
312 }
313 
314 void bch2_journal_halt(struct journal *j)
315 {
316 	spin_lock(&j->lock);
317 	__journal_entry_close(j, JOURNAL_ENTRY_ERROR_VAL, true);
318 	if (!j->err_seq)
319 		j->err_seq = journal_cur_seq(j);
320 	journal_wake(j);
321 	spin_unlock(&j->lock);
322 }
323 
324 static bool journal_entry_want_write(struct journal *j)
325 {
326 	bool ret = !journal_entry_is_open(j) ||
327 		journal_cur_seq(j) == journal_last_unwritten_seq(j);
328 
329 	/* Don't close it yet if we already have a write in flight: */
330 	if (ret)
331 		__journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
332 	else if (nr_unwritten_journal_entries(j)) {
333 		struct journal_buf *buf = journal_cur_buf(j);
334 
335 		if (!buf->flush_time) {
336 			buf->flush_time	= local_clock() ?: 1;
337 			buf->expires = jiffies;
338 		}
339 	}
340 
341 	return ret;
342 }
343 
344 bool bch2_journal_entry_close(struct journal *j)
345 {
346 	bool ret;
347 
348 	spin_lock(&j->lock);
349 	ret = journal_entry_want_write(j);
350 	spin_unlock(&j->lock);
351 
352 	return ret;
353 }
354 
355 /*
356  * should _only_ called from journal_res_get() - when we actually want a
357  * journal reservation - journal entry is open means journal is dirty:
358  */
359 static int journal_entry_open(struct journal *j)
360 {
361 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
362 	struct journal_buf *buf = j->buf +
363 		((journal_cur_seq(j) + 1) & JOURNAL_BUF_MASK);
364 	union journal_res_state old, new;
365 	int u64s;
366 	u64 v;
367 
368 	lockdep_assert_held(&j->lock);
369 	BUG_ON(journal_entry_is_open(j));
370 	BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
371 
372 	if (j->blocked)
373 		return JOURNAL_ERR_blocked;
374 
375 	if (j->cur_entry_error)
376 		return j->cur_entry_error;
377 
378 	if (bch2_journal_error(j))
379 		return JOURNAL_ERR_insufficient_devices; /* -EROFS */
380 
381 	if (!fifo_free(&j->pin))
382 		return JOURNAL_ERR_journal_pin_full;
383 
384 	if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf))
385 		return JOURNAL_ERR_max_in_flight;
386 
387 	BUG_ON(!j->cur_entry_sectors);
388 
389 	buf->expires		=
390 		(journal_cur_seq(j) == j->flushed_seq_ondisk
391 		 ? jiffies
392 		 : j->last_flush_write) +
393 		msecs_to_jiffies(c->opts.journal_flush_delay);
394 
395 	buf->u64s_reserved	= j->entry_u64s_reserved;
396 	buf->disk_sectors	= j->cur_entry_sectors;
397 	buf->sectors		= min(buf->disk_sectors, buf->buf_size >> 9);
398 
399 	u64s = (int) (buf->sectors << 9) / sizeof(u64) -
400 		journal_entry_overhead(j);
401 	u64s = clamp_t(int, u64s, 0, JOURNAL_ENTRY_CLOSED_VAL - 1);
402 
403 	if (u64s <= (ssize_t) j->early_journal_entries.nr)
404 		return JOURNAL_ERR_journal_full;
405 
406 	if (fifo_empty(&j->pin) && j->reclaim_thread)
407 		wake_up_process(j->reclaim_thread);
408 
409 	/*
410 	 * The fifo_push() needs to happen at the same time as j->seq is
411 	 * incremented for journal_last_seq() to be calculated correctly
412 	 */
413 	atomic64_inc(&j->seq);
414 	journal_pin_list_init(fifo_push_ref(&j->pin), 1);
415 
416 	BUG_ON(j->pin.back - 1 != atomic64_read(&j->seq));
417 
418 	BUG_ON(j->buf + (journal_cur_seq(j) & JOURNAL_BUF_MASK) != buf);
419 
420 	bkey_extent_init(&buf->key);
421 	buf->noflush		= false;
422 	buf->must_flush		= false;
423 	buf->separate_flush	= false;
424 	buf->flush_time		= 0;
425 	buf->need_flush_to_write_buffer = true;
426 	buf->write_started	= false;
427 	buf->write_allocated	= false;
428 	buf->write_done		= false;
429 
430 	memset(buf->data, 0, sizeof(*buf->data));
431 	buf->data->seq	= cpu_to_le64(journal_cur_seq(j));
432 	buf->data->u64s	= 0;
433 
434 	if (j->early_journal_entries.nr) {
435 		memcpy(buf->data->_data, j->early_journal_entries.data,
436 		       j->early_journal_entries.nr * sizeof(u64));
437 		le32_add_cpu(&buf->data->u64s, j->early_journal_entries.nr);
438 	}
439 
440 	/*
441 	 * Must be set before marking the journal entry as open:
442 	 */
443 	j->cur_entry_u64s = u64s;
444 
445 	v = atomic64_read(&j->reservations.counter);
446 	do {
447 		old.v = new.v = v;
448 
449 		BUG_ON(old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL);
450 
451 		new.idx++;
452 		BUG_ON(journal_state_count(new, new.idx));
453 		BUG_ON(new.idx != (journal_cur_seq(j) & JOURNAL_BUF_MASK));
454 
455 		journal_state_inc(&new);
456 
457 		/* Handle any already added entries */
458 		new.cur_entry_offset = le32_to_cpu(buf->data->u64s);
459 	} while ((v = atomic64_cmpxchg(&j->reservations.counter,
460 				       old.v, new.v)) != old.v);
461 
462 	if (nr_unwritten_journal_entries(j) == 1)
463 		mod_delayed_work(j->wq,
464 				 &j->write_work,
465 				 msecs_to_jiffies(c->opts.journal_flush_delay));
466 	journal_wake(j);
467 
468 	if (j->early_journal_entries.nr)
469 		darray_exit(&j->early_journal_entries);
470 	return 0;
471 }
472 
473 static bool journal_quiesced(struct journal *j)
474 {
475 	bool ret = atomic64_read(&j->seq) == j->seq_ondisk;
476 
477 	if (!ret)
478 		bch2_journal_entry_close(j);
479 	return ret;
480 }
481 
482 static void journal_quiesce(struct journal *j)
483 {
484 	wait_event(j->wait, journal_quiesced(j));
485 }
486 
487 static void journal_write_work(struct work_struct *work)
488 {
489 	struct journal *j = container_of(work, struct journal, write_work.work);
490 
491 	spin_lock(&j->lock);
492 	if (__journal_entry_is_open(j->reservations)) {
493 		long delta = journal_cur_buf(j)->expires - jiffies;
494 
495 		if (delta > 0)
496 			mod_delayed_work(j->wq, &j->write_work, delta);
497 		else
498 			__journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
499 	}
500 	spin_unlock(&j->lock);
501 }
502 
503 static int __journal_res_get(struct journal *j, struct journal_res *res,
504 			     unsigned flags)
505 {
506 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
507 	struct journal_buf *buf;
508 	bool can_discard;
509 	int ret;
510 retry:
511 	if (journal_res_get_fast(j, res, flags))
512 		return 0;
513 
514 	if (bch2_journal_error(j))
515 		return -BCH_ERR_erofs_journal_err;
516 
517 	if (j->blocked)
518 		return -BCH_ERR_journal_res_get_blocked;
519 
520 	if ((flags & BCH_WATERMARK_MASK) < j->watermark) {
521 		ret = JOURNAL_ERR_journal_full;
522 		can_discard = j->can_discard;
523 		goto out;
524 	}
525 
526 	if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf) && !journal_entry_is_open(j)) {
527 		ret = JOURNAL_ERR_max_in_flight;
528 		goto out;
529 	}
530 
531 	spin_lock(&j->lock);
532 
533 	/*
534 	 * Recheck after taking the lock, so we don't race with another thread
535 	 * that just did journal_entry_open() and call bch2_journal_entry_close()
536 	 * unnecessarily
537 	 */
538 	if (journal_res_get_fast(j, res, flags)) {
539 		ret = 0;
540 		goto unlock;
541 	}
542 
543 	/*
544 	 * If we couldn't get a reservation because the current buf filled up,
545 	 * and we had room for a bigger entry on disk, signal that we want to
546 	 * realloc the journal bufs:
547 	 */
548 	buf = journal_cur_buf(j);
549 	if (journal_entry_is_open(j) &&
550 	    buf->buf_size >> 9 < buf->disk_sectors &&
551 	    buf->buf_size < JOURNAL_ENTRY_SIZE_MAX)
552 		j->buf_size_want = max(j->buf_size_want, buf->buf_size << 1);
553 
554 	__journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, false);
555 	ret = journal_entry_open(j) ?: JOURNAL_ERR_retry;
556 unlock:
557 	can_discard = j->can_discard;
558 	spin_unlock(&j->lock);
559 out:
560 	if (ret == JOURNAL_ERR_retry)
561 		goto retry;
562 	if (!ret)
563 		return 0;
564 
565 	if (journal_error_check_stuck(j, ret, flags))
566 		ret = -BCH_ERR_journal_res_get_blocked;
567 
568 	if (ret == JOURNAL_ERR_max_in_flight &&
569 	    track_event_change(&c->times[BCH_TIME_blocked_journal_max_in_flight], true)) {
570 
571 		struct printbuf buf = PRINTBUF;
572 		prt_printf(&buf, "seq %llu\n", journal_cur_seq(j));
573 		bch2_journal_bufs_to_text(&buf, j);
574 		trace_journal_entry_full(c, buf.buf);
575 		printbuf_exit(&buf);
576 		count_event(c, journal_entry_full);
577 	}
578 
579 	/*
580 	 * Journal is full - can't rely on reclaim from work item due to
581 	 * freezing:
582 	 */
583 	if ((ret == JOURNAL_ERR_journal_full ||
584 	     ret == JOURNAL_ERR_journal_pin_full) &&
585 	    !(flags & JOURNAL_RES_GET_NONBLOCK)) {
586 		if (can_discard) {
587 			bch2_journal_do_discards(j);
588 			goto retry;
589 		}
590 
591 		if (mutex_trylock(&j->reclaim_lock)) {
592 			bch2_journal_reclaim(j);
593 			mutex_unlock(&j->reclaim_lock);
594 		}
595 	}
596 
597 	return ret == JOURNAL_ERR_insufficient_devices
598 		? -BCH_ERR_erofs_journal_err
599 		: -BCH_ERR_journal_res_get_blocked;
600 }
601 
602 /*
603  * Essentially the entry function to the journaling code. When bcachefs is doing
604  * a btree insert, it calls this function to get the current journal write.
605  * Journal write is the structure used set up journal writes. The calling
606  * function will then add its keys to the structure, queuing them for the next
607  * write.
608  *
609  * To ensure forward progress, the current task must not be holding any
610  * btree node write locks.
611  */
612 int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
613 				  unsigned flags)
614 {
615 	int ret;
616 
617 	closure_wait_event(&j->async_wait,
618 		   (ret = __journal_res_get(j, res, flags)) != -BCH_ERR_journal_res_get_blocked ||
619 		   (flags & JOURNAL_RES_GET_NONBLOCK));
620 	return ret;
621 }
622 
623 /* journal_entry_res: */
624 
625 void bch2_journal_entry_res_resize(struct journal *j,
626 				   struct journal_entry_res *res,
627 				   unsigned new_u64s)
628 {
629 	union journal_res_state state;
630 	int d = new_u64s - res->u64s;
631 
632 	spin_lock(&j->lock);
633 
634 	j->entry_u64s_reserved += d;
635 	if (d <= 0)
636 		goto out;
637 
638 	j->cur_entry_u64s = max_t(int, 0, j->cur_entry_u64s - d);
639 	smp_mb();
640 	state = READ_ONCE(j->reservations);
641 
642 	if (state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL &&
643 	    state.cur_entry_offset > j->cur_entry_u64s) {
644 		j->cur_entry_u64s += d;
645 		/*
646 		 * Not enough room in current journal entry, have to flush it:
647 		 */
648 		__journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
649 	} else {
650 		journal_cur_buf(j)->u64s_reserved += d;
651 	}
652 out:
653 	spin_unlock(&j->lock);
654 	res->u64s += d;
655 }
656 
657 /* journal flushing: */
658 
659 /**
660  * bch2_journal_flush_seq_async - wait for a journal entry to be written
661  * @j:		journal object
662  * @seq:	seq to flush
663  * @parent:	closure object to wait with
664  * Returns:	1 if @seq has already been flushed, 0 if @seq is being flushed,
665  *		-EIO if @seq will never be flushed
666  *
667  * Like bch2_journal_wait_on_seq, except that it triggers a write immediately if
668  * necessary
669  */
670 int bch2_journal_flush_seq_async(struct journal *j, u64 seq,
671 				 struct closure *parent)
672 {
673 	struct journal_buf *buf;
674 	int ret = 0;
675 
676 	if (seq <= j->flushed_seq_ondisk)
677 		return 1;
678 
679 	spin_lock(&j->lock);
680 
681 	if (WARN_ONCE(seq > journal_cur_seq(j),
682 		      "requested to flush journal seq %llu, but currently at %llu",
683 		      seq, journal_cur_seq(j)))
684 		goto out;
685 
686 	/* Recheck under lock: */
687 	if (j->err_seq && seq >= j->err_seq) {
688 		ret = -EIO;
689 		goto out;
690 	}
691 
692 	if (seq <= j->flushed_seq_ondisk) {
693 		ret = 1;
694 		goto out;
695 	}
696 
697 	/* if seq was written, but not flushed - flush a newer one instead */
698 	seq = max(seq, journal_last_unwritten_seq(j));
699 
700 recheck_need_open:
701 	if (seq > journal_cur_seq(j)) {
702 		struct journal_res res = { 0 };
703 
704 		if (journal_entry_is_open(j))
705 			__journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
706 
707 		spin_unlock(&j->lock);
708 
709 		/*
710 		 * We're called from bch2_journal_flush_seq() -> wait_event();
711 		 * but this might block. We won't usually block, so we won't
712 		 * livelock:
713 		 */
714 		sched_annotate_sleep();
715 		ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
716 		if (ret)
717 			return ret;
718 
719 		seq = res.seq;
720 		buf = journal_seq_to_buf(j, seq);
721 		buf->must_flush = true;
722 
723 		if (!buf->flush_time) {
724 			buf->flush_time	= local_clock() ?: 1;
725 			buf->expires = jiffies;
726 		}
727 
728 		if (parent && !closure_wait(&buf->wait, parent))
729 			BUG();
730 
731 		bch2_journal_res_put(j, &res);
732 
733 		spin_lock(&j->lock);
734 		goto want_write;
735 	}
736 
737 	/*
738 	 * if write was kicked off without a flush, or if we promised it
739 	 * wouldn't be a flush, flush the next sequence number instead
740 	 */
741 	buf = journal_seq_to_buf(j, seq);
742 	if (buf->noflush) {
743 		seq++;
744 		goto recheck_need_open;
745 	}
746 
747 	buf->must_flush = true;
748 
749 	if (parent && !closure_wait(&buf->wait, parent))
750 		BUG();
751 want_write:
752 	if (seq == journal_cur_seq(j))
753 		journal_entry_want_write(j);
754 out:
755 	spin_unlock(&j->lock);
756 	return ret;
757 }
758 
759 int bch2_journal_flush_seq(struct journal *j, u64 seq)
760 {
761 	u64 start_time = local_clock();
762 	int ret, ret2;
763 
764 	/*
765 	 * Don't update time_stats when @seq is already flushed:
766 	 */
767 	if (seq <= j->flushed_seq_ondisk)
768 		return 0;
769 
770 	ret = wait_event_interruptible(j->wait, (ret2 = bch2_journal_flush_seq_async(j, seq, NULL)));
771 
772 	if (!ret)
773 		bch2_time_stats_update(j->flush_seq_time, start_time);
774 
775 	return ret ?: ret2 < 0 ? ret2 : 0;
776 }
777 
778 /*
779  * bch2_journal_flush_async - if there is an open journal entry, or a journal
780  * still being written, write it and wait for the write to complete
781  */
782 void bch2_journal_flush_async(struct journal *j, struct closure *parent)
783 {
784 	bch2_journal_flush_seq_async(j, atomic64_read(&j->seq), parent);
785 }
786 
787 int bch2_journal_flush(struct journal *j)
788 {
789 	return bch2_journal_flush_seq(j, atomic64_read(&j->seq));
790 }
791 
792 /*
793  * bch2_journal_noflush_seq - tell the journal not to issue any flushes before
794  * @seq
795  */
796 bool bch2_journal_noflush_seq(struct journal *j, u64 seq)
797 {
798 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
799 	u64 unwritten_seq;
800 	bool ret = false;
801 
802 	if (!(c->sb.features & (1ULL << BCH_FEATURE_journal_no_flush)))
803 		return false;
804 
805 	if (seq <= c->journal.flushed_seq_ondisk)
806 		return false;
807 
808 	spin_lock(&j->lock);
809 	if (seq <= c->journal.flushed_seq_ondisk)
810 		goto out;
811 
812 	for (unwritten_seq = journal_last_unwritten_seq(j);
813 	     unwritten_seq < seq;
814 	     unwritten_seq++) {
815 		struct journal_buf *buf = journal_seq_to_buf(j, unwritten_seq);
816 
817 		/* journal flush already in flight, or flush requseted */
818 		if (buf->must_flush)
819 			goto out;
820 
821 		buf->noflush = true;
822 	}
823 
824 	ret = true;
825 out:
826 	spin_unlock(&j->lock);
827 	return ret;
828 }
829 
830 int bch2_journal_meta(struct journal *j)
831 {
832 	struct journal_buf *buf;
833 	struct journal_res res;
834 	int ret;
835 
836 	memset(&res, 0, sizeof(res));
837 
838 	ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
839 	if (ret)
840 		return ret;
841 
842 	buf = j->buf + (res.seq & JOURNAL_BUF_MASK);
843 	buf->must_flush = true;
844 
845 	if (!buf->flush_time) {
846 		buf->flush_time	= local_clock() ?: 1;
847 		buf->expires = jiffies;
848 	}
849 
850 	bch2_journal_res_put(j, &res);
851 
852 	return bch2_journal_flush_seq(j, res.seq);
853 }
854 
855 /* block/unlock the journal: */
856 
857 void bch2_journal_unblock(struct journal *j)
858 {
859 	spin_lock(&j->lock);
860 	j->blocked--;
861 	spin_unlock(&j->lock);
862 
863 	journal_wake(j);
864 }
865 
866 void bch2_journal_block(struct journal *j)
867 {
868 	spin_lock(&j->lock);
869 	j->blocked++;
870 	spin_unlock(&j->lock);
871 
872 	journal_quiesce(j);
873 }
874 
875 static struct journal_buf *__bch2_next_write_buffer_flush_journal_buf(struct journal *j, u64 max_seq)
876 {
877 	struct journal_buf *ret = NULL;
878 
879 	/* We're inside wait_event(), but using mutex_lock(: */
880 	sched_annotate_sleep();
881 	mutex_lock(&j->buf_lock);
882 	spin_lock(&j->lock);
883 	max_seq = min(max_seq, journal_cur_seq(j));
884 
885 	for (u64 seq = journal_last_unwritten_seq(j);
886 	     seq <= max_seq;
887 	     seq++) {
888 		unsigned idx = seq & JOURNAL_BUF_MASK;
889 		struct journal_buf *buf = j->buf + idx;
890 
891 		if (buf->need_flush_to_write_buffer) {
892 			if (seq == journal_cur_seq(j))
893 				__journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
894 
895 			union journal_res_state s;
896 			s.v = atomic64_read_acquire(&j->reservations.counter);
897 
898 			ret = journal_state_count(s, idx)
899 				? ERR_PTR(-EAGAIN)
900 				: buf;
901 			break;
902 		}
903 	}
904 
905 	spin_unlock(&j->lock);
906 	if (IS_ERR_OR_NULL(ret))
907 		mutex_unlock(&j->buf_lock);
908 	return ret;
909 }
910 
911 struct journal_buf *bch2_next_write_buffer_flush_journal_buf(struct journal *j, u64 max_seq)
912 {
913 	struct journal_buf *ret;
914 
915 	wait_event(j->wait, (ret = __bch2_next_write_buffer_flush_journal_buf(j, max_seq)) != ERR_PTR(-EAGAIN));
916 	return ret;
917 }
918 
919 /* allocate journal on a device: */
920 
921 static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
922 					 bool new_fs, struct closure *cl)
923 {
924 	struct bch_fs *c = ca->fs;
925 	struct journal_device *ja = &ca->journal;
926 	u64 *new_bucket_seq = NULL, *new_buckets = NULL;
927 	struct open_bucket **ob = NULL;
928 	long *bu = NULL;
929 	unsigned i, pos, nr_got = 0, nr_want = nr - ja->nr;
930 	int ret = 0;
931 
932 	BUG_ON(nr <= ja->nr);
933 
934 	bu		= kcalloc(nr_want, sizeof(*bu), GFP_KERNEL);
935 	ob		= kcalloc(nr_want, sizeof(*ob), GFP_KERNEL);
936 	new_buckets	= kcalloc(nr, sizeof(u64), GFP_KERNEL);
937 	new_bucket_seq	= kcalloc(nr, sizeof(u64), GFP_KERNEL);
938 	if (!bu || !ob || !new_buckets || !new_bucket_seq) {
939 		ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
940 		goto err_free;
941 	}
942 
943 	for (nr_got = 0; nr_got < nr_want; nr_got++) {
944 		if (new_fs) {
945 			bu[nr_got] = bch2_bucket_alloc_new_fs(ca);
946 			if (bu[nr_got] < 0) {
947 				ret = -BCH_ERR_ENOSPC_bucket_alloc;
948 				break;
949 			}
950 		} else {
951 			ob[nr_got] = bch2_bucket_alloc(c, ca, BCH_WATERMARK_normal, cl);
952 			ret = PTR_ERR_OR_ZERO(ob[nr_got]);
953 			if (ret)
954 				break;
955 
956 			ret = bch2_trans_run(c,
957 				bch2_trans_mark_metadata_bucket(trans, ca,
958 						ob[nr_got]->bucket, BCH_DATA_journal,
959 						ca->mi.bucket_size));
960 			if (ret) {
961 				bch2_open_bucket_put(c, ob[nr_got]);
962 				bch_err_msg(c, ret, "marking new journal buckets");
963 				break;
964 			}
965 
966 			bu[nr_got] = ob[nr_got]->bucket;
967 		}
968 	}
969 
970 	if (!nr_got)
971 		goto err_free;
972 
973 	/* Don't return an error if we successfully allocated some buckets: */
974 	ret = 0;
975 
976 	if (c) {
977 		bch2_journal_flush_all_pins(&c->journal);
978 		bch2_journal_block(&c->journal);
979 		mutex_lock(&c->sb_lock);
980 	}
981 
982 	memcpy(new_buckets,	ja->buckets,	ja->nr * sizeof(u64));
983 	memcpy(new_bucket_seq,	ja->bucket_seq,	ja->nr * sizeof(u64));
984 
985 	BUG_ON(ja->discard_idx > ja->nr);
986 
987 	pos = ja->discard_idx ?: ja->nr;
988 
989 	memmove(new_buckets + pos + nr_got,
990 		new_buckets + pos,
991 		sizeof(new_buckets[0]) * (ja->nr - pos));
992 	memmove(new_bucket_seq + pos + nr_got,
993 		new_bucket_seq + pos,
994 		sizeof(new_bucket_seq[0]) * (ja->nr - pos));
995 
996 	for (i = 0; i < nr_got; i++) {
997 		new_buckets[pos + i] = bu[i];
998 		new_bucket_seq[pos + i] = 0;
999 	}
1000 
1001 	nr = ja->nr + nr_got;
1002 
1003 	ret = bch2_journal_buckets_to_sb(c, ca, new_buckets, nr);
1004 	if (ret)
1005 		goto err_unblock;
1006 
1007 	if (!new_fs)
1008 		bch2_write_super(c);
1009 
1010 	/* Commit: */
1011 	if (c)
1012 		spin_lock(&c->journal.lock);
1013 
1014 	swap(new_buckets,	ja->buckets);
1015 	swap(new_bucket_seq,	ja->bucket_seq);
1016 	ja->nr = nr;
1017 
1018 	if (pos <= ja->discard_idx)
1019 		ja->discard_idx = (ja->discard_idx + nr_got) % ja->nr;
1020 	if (pos <= ja->dirty_idx_ondisk)
1021 		ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + nr_got) % ja->nr;
1022 	if (pos <= ja->dirty_idx)
1023 		ja->dirty_idx = (ja->dirty_idx + nr_got) % ja->nr;
1024 	if (pos <= ja->cur_idx)
1025 		ja->cur_idx = (ja->cur_idx + nr_got) % ja->nr;
1026 
1027 	if (c)
1028 		spin_unlock(&c->journal.lock);
1029 err_unblock:
1030 	if (c) {
1031 		bch2_journal_unblock(&c->journal);
1032 		mutex_unlock(&c->sb_lock);
1033 	}
1034 
1035 	if (ret && !new_fs)
1036 		for (i = 0; i < nr_got; i++)
1037 			bch2_trans_run(c,
1038 				bch2_trans_mark_metadata_bucket(trans, ca,
1039 						bu[i], BCH_DATA_free, 0));
1040 err_free:
1041 	if (!new_fs)
1042 		for (i = 0; i < nr_got; i++)
1043 			bch2_open_bucket_put(c, ob[i]);
1044 
1045 	kfree(new_bucket_seq);
1046 	kfree(new_buckets);
1047 	kfree(ob);
1048 	kfree(bu);
1049 	return ret;
1050 }
1051 
1052 /*
1053  * Allocate more journal space at runtime - not currently making use if it, but
1054  * the code works:
1055  */
1056 int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
1057 				unsigned nr)
1058 {
1059 	struct journal_device *ja = &ca->journal;
1060 	struct closure cl;
1061 	int ret = 0;
1062 
1063 	closure_init_stack(&cl);
1064 
1065 	down_write(&c->state_lock);
1066 
1067 	/* don't handle reducing nr of buckets yet: */
1068 	if (nr < ja->nr)
1069 		goto unlock;
1070 
1071 	while (ja->nr < nr) {
1072 		struct disk_reservation disk_res = { 0, 0, 0 };
1073 
1074 		/*
1075 		 * note: journal buckets aren't really counted as _sectors_ used yet, so
1076 		 * we don't need the disk reservation to avoid the BUG_ON() in buckets.c
1077 		 * when space used goes up without a reservation - but we do need the
1078 		 * reservation to ensure we'll actually be able to allocate:
1079 		 *
1080 		 * XXX: that's not right, disk reservations only ensure a
1081 		 * filesystem-wide allocation will succeed, this is a device
1082 		 * specific allocation - we can hang here:
1083 		 */
1084 
1085 		ret = bch2_disk_reservation_get(c, &disk_res,
1086 						bucket_to_sector(ca, nr - ja->nr), 1, 0);
1087 		if (ret)
1088 			break;
1089 
1090 		ret = __bch2_set_nr_journal_buckets(ca, nr, false, &cl);
1091 
1092 		bch2_disk_reservation_put(c, &disk_res);
1093 
1094 		closure_sync(&cl);
1095 
1096 		if (ret && ret != -BCH_ERR_bucket_alloc_blocked)
1097 			break;
1098 	}
1099 
1100 	bch_err_fn(c, ret);
1101 unlock:
1102 	up_write(&c->state_lock);
1103 	return ret;
1104 }
1105 
1106 int bch2_dev_journal_alloc(struct bch_dev *ca)
1107 {
1108 	unsigned nr;
1109 	int ret;
1110 
1111 	if (dynamic_fault("bcachefs:add:journal_alloc")) {
1112 		ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
1113 		goto err;
1114 	}
1115 
1116 	/* 1/128th of the device by default: */
1117 	nr = ca->mi.nbuckets >> 7;
1118 
1119 	/*
1120 	 * clamp journal size to 8192 buckets or 8GB (in sectors), whichever
1121 	 * is smaller:
1122 	 */
1123 	nr = clamp_t(unsigned, nr,
1124 		     BCH_JOURNAL_BUCKETS_MIN,
1125 		     min(1 << 13,
1126 			 (1 << 24) / ca->mi.bucket_size));
1127 
1128 	ret = __bch2_set_nr_journal_buckets(ca, nr, true, NULL);
1129 err:
1130 	bch_err_fn(ca, ret);
1131 	return ret;
1132 }
1133 
1134 int bch2_fs_journal_alloc(struct bch_fs *c)
1135 {
1136 	for_each_online_member(c, ca) {
1137 		if (ca->journal.nr)
1138 			continue;
1139 
1140 		int ret = bch2_dev_journal_alloc(ca);
1141 		if (ret) {
1142 			percpu_ref_put(&ca->io_ref);
1143 			return ret;
1144 		}
1145 	}
1146 
1147 	return 0;
1148 }
1149 
1150 /* startup/shutdown: */
1151 
1152 static bool bch2_journal_writing_to_device(struct journal *j, unsigned dev_idx)
1153 {
1154 	bool ret = false;
1155 	u64 seq;
1156 
1157 	spin_lock(&j->lock);
1158 	for (seq = journal_last_unwritten_seq(j);
1159 	     seq <= journal_cur_seq(j) && !ret;
1160 	     seq++) {
1161 		struct journal_buf *buf = journal_seq_to_buf(j, seq);
1162 
1163 		if (bch2_bkey_has_device_c(bkey_i_to_s_c(&buf->key), dev_idx))
1164 			ret = true;
1165 	}
1166 	spin_unlock(&j->lock);
1167 
1168 	return ret;
1169 }
1170 
1171 void bch2_dev_journal_stop(struct journal *j, struct bch_dev *ca)
1172 {
1173 	wait_event(j->wait, !bch2_journal_writing_to_device(j, ca->dev_idx));
1174 }
1175 
1176 void bch2_fs_journal_stop(struct journal *j)
1177 {
1178 	bch2_journal_reclaim_stop(j);
1179 	bch2_journal_flush_all_pins(j);
1180 
1181 	wait_event(j->wait, bch2_journal_entry_close(j));
1182 
1183 	/*
1184 	 * Always write a new journal entry, to make sure the clock hands are up
1185 	 * to date (and match the superblock)
1186 	 */
1187 	bch2_journal_meta(j);
1188 
1189 	journal_quiesce(j);
1190 
1191 	BUG_ON(!bch2_journal_error(j) &&
1192 	       test_bit(JOURNAL_REPLAY_DONE, &j->flags) &&
1193 	       j->last_empty_seq != journal_cur_seq(j));
1194 
1195 	cancel_delayed_work_sync(&j->write_work);
1196 }
1197 
1198 int bch2_fs_journal_start(struct journal *j, u64 cur_seq)
1199 {
1200 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
1201 	struct journal_entry_pin_list *p;
1202 	struct journal_replay *i, **_i;
1203 	struct genradix_iter iter;
1204 	bool had_entries = false;
1205 	u64 last_seq = cur_seq, nr, seq;
1206 
1207 	genradix_for_each_reverse(&c->journal_entries, iter, _i) {
1208 		i = *_i;
1209 
1210 		if (journal_replay_ignore(i))
1211 			continue;
1212 
1213 		last_seq = le64_to_cpu(i->j.last_seq);
1214 		break;
1215 	}
1216 
1217 	nr = cur_seq - last_seq;
1218 
1219 	if (nr + 1 > j->pin.size) {
1220 		free_fifo(&j->pin);
1221 		init_fifo(&j->pin, roundup_pow_of_two(nr + 1), GFP_KERNEL);
1222 		if (!j->pin.data) {
1223 			bch_err(c, "error reallocating journal fifo (%llu open entries)", nr);
1224 			return -BCH_ERR_ENOMEM_journal_pin_fifo;
1225 		}
1226 	}
1227 
1228 	j->replay_journal_seq	= last_seq;
1229 	j->replay_journal_seq_end = cur_seq;
1230 	j->last_seq_ondisk	= last_seq;
1231 	j->flushed_seq_ondisk	= cur_seq - 1;
1232 	j->seq_ondisk		= cur_seq - 1;
1233 	j->pin.front		= last_seq;
1234 	j->pin.back		= cur_seq;
1235 	atomic64_set(&j->seq, cur_seq - 1);
1236 
1237 	fifo_for_each_entry_ptr(p, &j->pin, seq)
1238 		journal_pin_list_init(p, 1);
1239 
1240 	genradix_for_each(&c->journal_entries, iter, _i) {
1241 		i = *_i;
1242 
1243 		if (journal_replay_ignore(i))
1244 			continue;
1245 
1246 		seq = le64_to_cpu(i->j.seq);
1247 		BUG_ON(seq >= cur_seq);
1248 
1249 		if (seq < last_seq)
1250 			continue;
1251 
1252 		if (journal_entry_empty(&i->j))
1253 			j->last_empty_seq = le64_to_cpu(i->j.seq);
1254 
1255 		p = journal_seq_pin(j, seq);
1256 
1257 		p->devs.nr = 0;
1258 		darray_for_each(i->ptrs, ptr)
1259 			bch2_dev_list_add_dev(&p->devs, ptr->dev);
1260 
1261 		had_entries = true;
1262 	}
1263 
1264 	if (!had_entries)
1265 		j->last_empty_seq = cur_seq;
1266 
1267 	spin_lock(&j->lock);
1268 
1269 	set_bit(JOURNAL_STARTED, &j->flags);
1270 	j->last_flush_write = jiffies;
1271 
1272 	j->reservations.idx = j->reservations.unwritten_idx = journal_cur_seq(j);
1273 	j->reservations.unwritten_idx++;
1274 
1275 	c->last_bucket_seq_cleanup = journal_cur_seq(j);
1276 
1277 	bch2_journal_space_available(j);
1278 	spin_unlock(&j->lock);
1279 
1280 	return bch2_journal_reclaim_start(j);
1281 }
1282 
1283 /* init/exit: */
1284 
1285 void bch2_dev_journal_exit(struct bch_dev *ca)
1286 {
1287 	struct journal_device *ja = &ca->journal;
1288 
1289 	for (unsigned i = 0; i < ARRAY_SIZE(ja->bio); i++) {
1290 		kfree(ja->bio[i]);
1291 		ja->bio[i] = NULL;
1292 	}
1293 
1294 	kfree(ja->buckets);
1295 	kfree(ja->bucket_seq);
1296 	ja->buckets	= NULL;
1297 	ja->bucket_seq	= NULL;
1298 }
1299 
1300 int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
1301 {
1302 	struct journal_device *ja = &ca->journal;
1303 	struct bch_sb_field_journal *journal_buckets =
1304 		bch2_sb_field_get(sb, journal);
1305 	struct bch_sb_field_journal_v2 *journal_buckets_v2 =
1306 		bch2_sb_field_get(sb, journal_v2);
1307 
1308 	ja->nr = 0;
1309 
1310 	if (journal_buckets_v2) {
1311 		unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2);
1312 
1313 		for (unsigned i = 0; i < nr; i++)
1314 			ja->nr += le64_to_cpu(journal_buckets_v2->d[i].nr);
1315 	} else if (journal_buckets) {
1316 		ja->nr = bch2_nr_journal_buckets(journal_buckets);
1317 	}
1318 
1319 	ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1320 	if (!ja->bucket_seq)
1321 		return -BCH_ERR_ENOMEM_dev_journal_init;
1322 
1323 	unsigned nr_bvecs = DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE);
1324 
1325 	for (unsigned i = 0; i < ARRAY_SIZE(ja->bio); i++) {
1326 		ja->bio[i] = kmalloc(struct_size(ja->bio[i], bio.bi_inline_vecs,
1327 				     nr_bvecs), GFP_KERNEL);
1328 		if (!ja->bio[i])
1329 			return -BCH_ERR_ENOMEM_dev_journal_init;
1330 
1331 		ja->bio[i]->ca = ca;
1332 		ja->bio[i]->buf_idx = i;
1333 		bio_init(&ja->bio[i]->bio, NULL, ja->bio[i]->bio.bi_inline_vecs, nr_bvecs, 0);
1334 	}
1335 
1336 	ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1337 	if (!ja->buckets)
1338 		return -BCH_ERR_ENOMEM_dev_journal_init;
1339 
1340 	if (journal_buckets_v2) {
1341 		unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2);
1342 		unsigned dst = 0;
1343 
1344 		for (unsigned i = 0; i < nr; i++)
1345 			for (unsigned j = 0; j < le64_to_cpu(journal_buckets_v2->d[i].nr); j++)
1346 				ja->buckets[dst++] =
1347 					le64_to_cpu(journal_buckets_v2->d[i].start) + j;
1348 	} else if (journal_buckets) {
1349 		for (unsigned i = 0; i < ja->nr; i++)
1350 			ja->buckets[i] = le64_to_cpu(journal_buckets->buckets[i]);
1351 	}
1352 
1353 	return 0;
1354 }
1355 
1356 void bch2_fs_journal_exit(struct journal *j)
1357 {
1358 	if (j->wq)
1359 		destroy_workqueue(j->wq);
1360 
1361 	darray_exit(&j->early_journal_entries);
1362 
1363 	for (unsigned i = 0; i < ARRAY_SIZE(j->buf); i++)
1364 		kvfree(j->buf[i].data);
1365 	free_fifo(&j->pin);
1366 }
1367 
1368 int bch2_fs_journal_init(struct journal *j)
1369 {
1370 	static struct lock_class_key res_key;
1371 
1372 	mutex_init(&j->buf_lock);
1373 	spin_lock_init(&j->lock);
1374 	spin_lock_init(&j->err_lock);
1375 	init_waitqueue_head(&j->wait);
1376 	INIT_DELAYED_WORK(&j->write_work, journal_write_work);
1377 	init_waitqueue_head(&j->reclaim_wait);
1378 	init_waitqueue_head(&j->pin_flush_wait);
1379 	mutex_init(&j->reclaim_lock);
1380 	mutex_init(&j->discard_lock);
1381 
1382 	lockdep_init_map(&j->res_map, "journal res", &res_key, 0);
1383 
1384 	atomic64_set(&j->reservations.counter,
1385 		((union journal_res_state)
1386 		 { .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v);
1387 
1388 	if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)))
1389 		return -BCH_ERR_ENOMEM_journal_pin_fifo;
1390 
1391 	for (unsigned i = 0; i < ARRAY_SIZE(j->buf); i++) {
1392 		j->buf[i].buf_size = JOURNAL_ENTRY_SIZE_MIN;
1393 		j->buf[i].data = kvmalloc(j->buf[i].buf_size, GFP_KERNEL);
1394 		if (!j->buf[i].data)
1395 			return -BCH_ERR_ENOMEM_journal_buf;
1396 		j->buf[i].idx = i;
1397 	}
1398 
1399 	j->pin.front = j->pin.back = 1;
1400 
1401 	j->wq = alloc_workqueue("bcachefs_journal",
1402 				WQ_HIGHPRI|WQ_FREEZABLE|WQ_UNBOUND|WQ_MEM_RECLAIM, 512);
1403 	if (!j->wq)
1404 		return -BCH_ERR_ENOMEM_fs_other_alloc;
1405 	return 0;
1406 }
1407 
1408 /* debug: */
1409 
1410 void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1411 {
1412 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
1413 	union journal_res_state s;
1414 	unsigned long now = jiffies;
1415 	u64 nr_writes = j->nr_flush_writes + j->nr_noflush_writes;
1416 
1417 	if (!out->nr_tabstops)
1418 		printbuf_tabstop_push(out, 24);
1419 	out->atomic++;
1420 
1421 	rcu_read_lock();
1422 	s = READ_ONCE(j->reservations);
1423 
1424 	prt_printf(out, "dirty journal entries:\t%llu/%llu\n",	fifo_used(&j->pin), j->pin.size);
1425 	prt_printf(out, "seq:\t\t\t%llu\n",			journal_cur_seq(j));
1426 	prt_printf(out, "seq_ondisk:\t\t%llu\n",		j->seq_ondisk);
1427 	prt_printf(out, "last_seq:\t\t%llu\n",			journal_last_seq(j));
1428 	prt_printf(out, "last_seq_ondisk:\t%llu\n",		j->last_seq_ondisk);
1429 	prt_printf(out, "flushed_seq_ondisk:\t%llu\n",		j->flushed_seq_ondisk);
1430 	prt_printf(out, "watermark:\t\t%s\n",			bch2_watermarks[j->watermark]);
1431 	prt_printf(out, "each entry reserved:\t%u\n",		j->entry_u64s_reserved);
1432 	prt_printf(out, "nr flush writes:\t%llu\n",		j->nr_flush_writes);
1433 	prt_printf(out, "nr noflush writes:\t%llu\n",		j->nr_noflush_writes);
1434 	prt_printf(out, "average write size:\t");
1435 	prt_human_readable_u64(out, nr_writes ? div64_u64(j->entry_bytes_written, nr_writes) : 0);
1436 	prt_newline(out);
1437 	prt_printf(out, "nr direct reclaim:\t%llu\n",		j->nr_direct_reclaim);
1438 	prt_printf(out, "nr background reclaim:\t%llu\n",	j->nr_background_reclaim);
1439 	prt_printf(out, "reclaim kicked:\t\t%u\n",		j->reclaim_kicked);
1440 	prt_printf(out, "reclaim runs in:\t%u ms\n",		time_after(j->next_reclaim, now)
1441 	       ? jiffies_to_msecs(j->next_reclaim - jiffies) : 0);
1442 	prt_printf(out, "blocked:\t\t%u\n",			j->blocked);
1443 	prt_printf(out, "current entry sectors:\t%u\n",		j->cur_entry_sectors);
1444 	prt_printf(out, "current entry error:\t%s\n",		bch2_journal_errors[j->cur_entry_error]);
1445 	prt_printf(out, "current entry:\t\t");
1446 
1447 	switch (s.cur_entry_offset) {
1448 	case JOURNAL_ENTRY_ERROR_VAL:
1449 		prt_printf(out, "error");
1450 		break;
1451 	case JOURNAL_ENTRY_CLOSED_VAL:
1452 		prt_printf(out, "closed");
1453 		break;
1454 	default:
1455 		prt_printf(out, "%u/%u", s.cur_entry_offset, j->cur_entry_u64s);
1456 		break;
1457 	}
1458 
1459 	prt_newline(out);
1460 	prt_printf(out, "unwritten entries:");
1461 	prt_newline(out);
1462 	bch2_journal_bufs_to_text(out, j);
1463 
1464 	prt_printf(out,
1465 	       "replay done:\t\t%i\n",
1466 	       test_bit(JOURNAL_REPLAY_DONE,	&j->flags));
1467 
1468 	prt_printf(out, "space:\n");
1469 	prt_printf(out, "\tdiscarded\t%u:%u\n",
1470 	       j->space[journal_space_discarded].next_entry,
1471 	       j->space[journal_space_discarded].total);
1472 	prt_printf(out, "\tclean ondisk\t%u:%u\n",
1473 	       j->space[journal_space_clean_ondisk].next_entry,
1474 	       j->space[journal_space_clean_ondisk].total);
1475 	prt_printf(out, "\tclean\t\t%u:%u\n",
1476 	       j->space[journal_space_clean].next_entry,
1477 	       j->space[journal_space_clean].total);
1478 	prt_printf(out, "\ttotal\t\t%u:%u\n",
1479 	       j->space[journal_space_total].next_entry,
1480 	       j->space[journal_space_total].total);
1481 
1482 	for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) {
1483 		struct journal_device *ja = &ca->journal;
1484 
1485 		if (!test_bit(ca->dev_idx, c->rw_devs[BCH_DATA_journal].d))
1486 			continue;
1487 
1488 		if (!ja->nr)
1489 			continue;
1490 
1491 		prt_printf(out, "dev %u:\n",		ca->dev_idx);
1492 		prt_printf(out, "\tnr\t\t%u\n",		ja->nr);
1493 		prt_printf(out, "\tbucket size\t%u\n",	ca->mi.bucket_size);
1494 		prt_printf(out, "\tavailable\t%u:%u\n",	bch2_journal_dev_buckets_available(j, ja, journal_space_discarded), ja->sectors_free);
1495 		prt_printf(out, "\tdiscard_idx\t%u\n",	ja->discard_idx);
1496 		prt_printf(out, "\tdirty_ondisk\t%u (seq %llu)\n", ja->dirty_idx_ondisk,	ja->bucket_seq[ja->dirty_idx_ondisk]);
1497 		prt_printf(out, "\tdirty_idx\t%u (seq %llu)\n", ja->dirty_idx,		ja->bucket_seq[ja->dirty_idx]);
1498 		prt_printf(out, "\tcur_idx\t\t%u (seq %llu)\n", ja->cur_idx,		ja->bucket_seq[ja->cur_idx]);
1499 	}
1500 
1501 	rcu_read_unlock();
1502 
1503 	--out->atomic;
1504 }
1505 
1506 void bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1507 {
1508 	spin_lock(&j->lock);
1509 	__bch2_journal_debug_to_text(out, j);
1510 	spin_unlock(&j->lock);
1511 }
1512 
1513 bool bch2_journal_seq_pins_to_text(struct printbuf *out, struct journal *j, u64 *seq)
1514 {
1515 	struct journal_entry_pin_list *pin_list;
1516 	struct journal_entry_pin *pin;
1517 
1518 	spin_lock(&j->lock);
1519 	*seq = max(*seq, j->pin.front);
1520 
1521 	if (*seq >= j->pin.back) {
1522 		spin_unlock(&j->lock);
1523 		return true;
1524 	}
1525 
1526 	out->atomic++;
1527 
1528 	pin_list = journal_seq_pin(j, *seq);
1529 
1530 	prt_printf(out, "%llu: count %u", *seq, atomic_read(&pin_list->count));
1531 	prt_newline(out);
1532 	printbuf_indent_add(out, 2);
1533 
1534 	for (unsigned i = 0; i < ARRAY_SIZE(pin_list->list); i++)
1535 		list_for_each_entry(pin, &pin_list->list[i], list) {
1536 			prt_printf(out, "\t%px %ps", pin, pin->flush);
1537 			prt_newline(out);
1538 		}
1539 
1540 	if (!list_empty(&pin_list->flushed)) {
1541 		prt_printf(out, "flushed:");
1542 		prt_newline(out);
1543 	}
1544 
1545 	list_for_each_entry(pin, &pin_list->flushed, list) {
1546 		prt_printf(out, "\t%px %ps", pin, pin->flush);
1547 		prt_newline(out);
1548 	}
1549 
1550 	printbuf_indent_sub(out, 2);
1551 
1552 	--out->atomic;
1553 	spin_unlock(&j->lock);
1554 
1555 	return false;
1556 }
1557 
1558 void bch2_journal_pins_to_text(struct printbuf *out, struct journal *j)
1559 {
1560 	u64 seq = 0;
1561 
1562 	while (!bch2_journal_seq_pins_to_text(out, j, &seq))
1563 		seq++;
1564 }
1565