xref: /linux/fs/bcachefs/journal.c (revision 56770e24f678a84a21f21bcc1ae9cbc1364677bd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bcachefs journalling code, for btree insertions
4  *
5  * Copyright 2012 Google, Inc.
6  */
7 
8 #include "bcachefs.h"
9 #include "alloc_foreground.h"
10 #include "bkey_methods.h"
11 #include "btree_gc.h"
12 #include "btree_update.h"
13 #include "btree_write_buffer.h"
14 #include "buckets.h"
15 #include "error.h"
16 #include "journal.h"
17 #include "journal_io.h"
18 #include "journal_reclaim.h"
19 #include "journal_sb.h"
20 #include "journal_seq_blacklist.h"
21 #include "trace.h"
22 
journal_seq_unwritten(struct journal * j,u64 seq)23 static inline bool journal_seq_unwritten(struct journal *j, u64 seq)
24 {
25 	return seq > j->seq_ondisk;
26 }
27 
__journal_entry_is_open(union journal_res_state state)28 static bool __journal_entry_is_open(union journal_res_state state)
29 {
30 	return state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL;
31 }
32 
nr_unwritten_journal_entries(struct journal * j)33 static inline unsigned nr_unwritten_journal_entries(struct journal *j)
34 {
35 	return atomic64_read(&j->seq) - j->seq_ondisk;
36 }
37 
journal_entry_is_open(struct journal * j)38 static bool journal_entry_is_open(struct journal *j)
39 {
40 	return __journal_entry_is_open(j->reservations);
41 }
42 
bch2_journal_buf_to_text(struct printbuf * out,struct journal * j,u64 seq)43 static void bch2_journal_buf_to_text(struct printbuf *out, struct journal *j, u64 seq)
44 {
45 	union journal_res_state s = READ_ONCE(j->reservations);
46 	unsigned i = seq & JOURNAL_BUF_MASK;
47 	struct journal_buf *buf = j->buf + i;
48 
49 	prt_printf(out, "seq:\t%llu\n", seq);
50 	printbuf_indent_add(out, 2);
51 
52 	if (!buf->write_started)
53 		prt_printf(out, "refcount:\t%u\n", journal_state_count(s, i & JOURNAL_STATE_BUF_MASK));
54 
55 	struct closure *cl = &buf->io;
56 	int r = atomic_read(&cl->remaining);
57 	prt_printf(out, "io:\t%pS r %i\n", cl->fn, r & CLOSURE_REMAINING_MASK);
58 
59 	if (buf->data) {
60 		prt_printf(out, "size:\t");
61 		prt_human_readable_u64(out, vstruct_bytes(buf->data));
62 		prt_newline(out);
63 	}
64 
65 	prt_printf(out, "expires:\t%li jiffies\n", buf->expires - jiffies);
66 
67 	prt_printf(out, "flags:\t");
68 	if (buf->noflush)
69 		prt_str(out, "noflush ");
70 	if (buf->must_flush)
71 		prt_str(out, "must_flush ");
72 	if (buf->separate_flush)
73 		prt_str(out, "separate_flush ");
74 	if (buf->need_flush_to_write_buffer)
75 		prt_str(out, "need_flush_to_write_buffer ");
76 	if (buf->write_started)
77 		prt_str(out, "write_started ");
78 	if (buf->write_allocated)
79 		prt_str(out, "write_allocated ");
80 	if (buf->write_done)
81 		prt_str(out, "write_done");
82 	prt_newline(out);
83 
84 	printbuf_indent_sub(out, 2);
85 }
86 
bch2_journal_bufs_to_text(struct printbuf * out,struct journal * j)87 static void bch2_journal_bufs_to_text(struct printbuf *out, struct journal *j)
88 {
89 	lockdep_assert_held(&j->lock);
90 	out->atomic++;
91 
92 	if (!out->nr_tabstops)
93 		printbuf_tabstop_push(out, 24);
94 
95 	for (u64 seq = journal_last_unwritten_seq(j);
96 	     seq <= journal_cur_seq(j);
97 	     seq++)
98 		bch2_journal_buf_to_text(out, j, seq);
99 	prt_printf(out, "last buf %s\n", journal_entry_is_open(j) ? "open" : "closed");
100 
101 	--out->atomic;
102 }
103 
104 static inline struct journal_buf *
journal_seq_to_buf(struct journal * j,u64 seq)105 journal_seq_to_buf(struct journal *j, u64 seq)
106 {
107 	struct journal_buf *buf = NULL;
108 
109 	EBUG_ON(seq > journal_cur_seq(j));
110 
111 	if (journal_seq_unwritten(j, seq))
112 		buf = j->buf + (seq & JOURNAL_BUF_MASK);
113 	return buf;
114 }
115 
journal_pin_list_init(struct journal_entry_pin_list * p,int count)116 static void journal_pin_list_init(struct journal_entry_pin_list *p, int count)
117 {
118 	for (unsigned i = 0; i < ARRAY_SIZE(p->unflushed); i++)
119 		INIT_LIST_HEAD(&p->unflushed[i]);
120 	for (unsigned i = 0; i < ARRAY_SIZE(p->flushed); i++)
121 		INIT_LIST_HEAD(&p->flushed[i]);
122 	atomic_set(&p->count, count);
123 	p->devs.nr = 0;
124 }
125 
126 /*
127  * Detect stuck journal conditions and trigger shutdown. Technically the journal
128  * can end up stuck for a variety of reasons, such as a blocked I/O, journal
129  * reservation lockup, etc. Since this is a fatal error with potentially
130  * unpredictable characteristics, we want to be fairly conservative before we
131  * decide to shut things down.
132  *
133  * Consider the journal stuck when it appears full with no ability to commit
134  * btree transactions, to discard journal buckets, nor acquire priority
135  * (reserved watermark) reservation.
136  */
137 static inline bool
journal_error_check_stuck(struct journal * j,int error,unsigned flags)138 journal_error_check_stuck(struct journal *j, int error, unsigned flags)
139 {
140 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
141 	bool stuck = false;
142 	struct printbuf buf = PRINTBUF;
143 
144 	buf.atomic++;
145 
146 	if (!(error == -BCH_ERR_journal_full ||
147 	      error == -BCH_ERR_journal_pin_full) ||
148 	    nr_unwritten_journal_entries(j) ||
149 	    (flags & BCH_WATERMARK_MASK) != BCH_WATERMARK_reclaim)
150 		return stuck;
151 
152 	spin_lock(&j->lock);
153 
154 	if (j->can_discard) {
155 		spin_unlock(&j->lock);
156 		return stuck;
157 	}
158 
159 	stuck = true;
160 
161 	/*
162 	 * The journal shutdown path will set ->err_seq, but do it here first to
163 	 * serialize against concurrent failures and avoid duplicate error
164 	 * reports.
165 	 */
166 	if (j->err_seq) {
167 		spin_unlock(&j->lock);
168 		return stuck;
169 	}
170 	j->err_seq = journal_cur_seq(j);
171 
172 	__bch2_journal_debug_to_text(&buf, j);
173 	spin_unlock(&j->lock);
174 	prt_printf(&buf, bch2_fmt(c, "Journal stuck! Hava a pre-reservation but journal full (error %s)"),
175 				  bch2_err_str(error));
176 	bch2_print_string_as_lines(KERN_ERR, buf.buf);
177 
178 	printbuf_reset(&buf);
179 	bch2_journal_pins_to_text(&buf, j);
180 	bch_err(c, "Journal pins:\n%s", buf.buf);
181 	printbuf_exit(&buf);
182 
183 	bch2_fatal_error(c);
184 	dump_stack();
185 
186 	return stuck;
187 }
188 
bch2_journal_do_writes(struct journal * j)189 void bch2_journal_do_writes(struct journal *j)
190 {
191 	for (u64 seq = journal_last_unwritten_seq(j);
192 	     seq <= journal_cur_seq(j);
193 	     seq++) {
194 		unsigned idx = seq & JOURNAL_BUF_MASK;
195 		struct journal_buf *w = j->buf + idx;
196 
197 		if (w->write_started && !w->write_allocated)
198 			break;
199 		if (w->write_started)
200 			continue;
201 
202 		if (!journal_state_seq_count(j, j->reservations, seq)) {
203 			j->seq_write_started = seq;
204 			w->write_started = true;
205 			closure_call(&w->io, bch2_journal_write, j->wq, NULL);
206 		}
207 
208 		break;
209 	}
210 }
211 
212 /*
213  * Final processing when the last reference of a journal buffer has been
214  * dropped. Drop the pin list reference acquired at journal entry open and write
215  * the buffer, if requested.
216  */
bch2_journal_buf_put_final(struct journal * j,u64 seq)217 void bch2_journal_buf_put_final(struct journal *j, u64 seq)
218 {
219 	lockdep_assert_held(&j->lock);
220 
221 	if (__bch2_journal_pin_put(j, seq))
222 		bch2_journal_reclaim_fast(j);
223 	bch2_journal_do_writes(j);
224 
225 	/*
226 	 * for __bch2_next_write_buffer_flush_journal_buf(), when quiescing an
227 	 * open journal entry
228 	 */
229 	wake_up(&j->wait);
230 }
231 
232 /*
233  * Returns true if journal entry is now closed:
234  *
235  * We don't close a journal_buf until the next journal_buf is finished writing,
236  * and can be opened again - this also initializes the next journal_buf:
237  */
__journal_entry_close(struct journal * j,unsigned closed_val,bool trace)238 static void __journal_entry_close(struct journal *j, unsigned closed_val, bool trace)
239 {
240 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
241 	struct journal_buf *buf = journal_cur_buf(j);
242 	union journal_res_state old, new;
243 	unsigned sectors;
244 
245 	BUG_ON(closed_val != JOURNAL_ENTRY_CLOSED_VAL &&
246 	       closed_val != JOURNAL_ENTRY_ERROR_VAL);
247 
248 	lockdep_assert_held(&j->lock);
249 
250 	old.v = atomic64_read(&j->reservations.counter);
251 	do {
252 		new.v = old.v;
253 		new.cur_entry_offset = closed_val;
254 
255 		if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL ||
256 		    old.cur_entry_offset == new.cur_entry_offset)
257 			return;
258 	} while (!atomic64_try_cmpxchg(&j->reservations.counter,
259 				       &old.v, new.v));
260 
261 	if (!__journal_entry_is_open(old))
262 		return;
263 
264 	if (old.cur_entry_offset == JOURNAL_ENTRY_BLOCKED_VAL)
265 		old.cur_entry_offset = j->cur_entry_offset_if_blocked;
266 
267 	/* Close out old buffer: */
268 	buf->data->u64s		= cpu_to_le32(old.cur_entry_offset);
269 
270 	if (trace_journal_entry_close_enabled() && trace) {
271 		struct printbuf pbuf = PRINTBUF;
272 		pbuf.atomic++;
273 
274 		prt_str(&pbuf, "entry size: ");
275 		prt_human_readable_u64(&pbuf, vstruct_bytes(buf->data));
276 		prt_newline(&pbuf);
277 		bch2_prt_task_backtrace(&pbuf, current, 1, GFP_NOWAIT);
278 		trace_journal_entry_close(c, pbuf.buf);
279 		printbuf_exit(&pbuf);
280 	}
281 
282 	sectors = vstruct_blocks_plus(buf->data, c->block_bits,
283 				      buf->u64s_reserved) << c->block_bits;
284 	BUG_ON(sectors > buf->sectors);
285 	buf->sectors = sectors;
286 
287 	/*
288 	 * We have to set last_seq here, _before_ opening a new journal entry:
289 	 *
290 	 * A threads may replace an old pin with a new pin on their current
291 	 * journal reservation - the expectation being that the journal will
292 	 * contain either what the old pin protected or what the new pin
293 	 * protects.
294 	 *
295 	 * After the old pin is dropped journal_last_seq() won't include the old
296 	 * pin, so we can only write the updated last_seq on the entry that
297 	 * contains whatever the new pin protects.
298 	 *
299 	 * Restated, we can _not_ update last_seq for a given entry if there
300 	 * could be a newer entry open with reservations/pins that have been
301 	 * taken against it.
302 	 *
303 	 * Hence, we want update/set last_seq on the current journal entry right
304 	 * before we open a new one:
305 	 */
306 	buf->last_seq		= journal_last_seq(j);
307 	buf->data->last_seq	= cpu_to_le64(buf->last_seq);
308 	BUG_ON(buf->last_seq > le64_to_cpu(buf->data->seq));
309 
310 	cancel_delayed_work(&j->write_work);
311 
312 	bch2_journal_space_available(j);
313 
314 	__bch2_journal_buf_put(j, le64_to_cpu(buf->data->seq));
315 }
316 
bch2_journal_halt(struct journal * j)317 void bch2_journal_halt(struct journal *j)
318 {
319 	spin_lock(&j->lock);
320 	__journal_entry_close(j, JOURNAL_ENTRY_ERROR_VAL, true);
321 	if (!j->err_seq)
322 		j->err_seq = journal_cur_seq(j);
323 	journal_wake(j);
324 	spin_unlock(&j->lock);
325 }
326 
bch2_journal_halt_locked(struct journal * j)327 void bch2_journal_halt_locked(struct journal *j)
328 {
329 	lockdep_assert_held(&j->lock);
330 
331 	__journal_entry_close(j, JOURNAL_ENTRY_ERROR_VAL, true);
332 	if (!j->err_seq)
333 		j->err_seq = journal_cur_seq(j);
334 	journal_wake(j);
335 }
336 
journal_entry_want_write(struct journal * j)337 static bool journal_entry_want_write(struct journal *j)
338 {
339 	bool ret = !journal_entry_is_open(j) ||
340 		journal_cur_seq(j) == journal_last_unwritten_seq(j);
341 
342 	/* Don't close it yet if we already have a write in flight: */
343 	if (ret)
344 		__journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
345 	else if (nr_unwritten_journal_entries(j)) {
346 		struct journal_buf *buf = journal_cur_buf(j);
347 
348 		if (!buf->flush_time) {
349 			buf->flush_time	= local_clock() ?: 1;
350 			buf->expires = jiffies;
351 		}
352 	}
353 
354 	return ret;
355 }
356 
bch2_journal_entry_close(struct journal * j)357 bool bch2_journal_entry_close(struct journal *j)
358 {
359 	bool ret;
360 
361 	spin_lock(&j->lock);
362 	ret = journal_entry_want_write(j);
363 	spin_unlock(&j->lock);
364 
365 	return ret;
366 }
367 
368 /*
369  * should _only_ called from journal_res_get() - when we actually want a
370  * journal reservation - journal entry is open means journal is dirty:
371  */
journal_entry_open(struct journal * j)372 static int journal_entry_open(struct journal *j)
373 {
374 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
375 	struct journal_buf *buf = j->buf +
376 		((journal_cur_seq(j) + 1) & JOURNAL_BUF_MASK);
377 	union journal_res_state old, new;
378 	int u64s;
379 
380 	lockdep_assert_held(&j->lock);
381 	BUG_ON(journal_entry_is_open(j));
382 	BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
383 
384 	if (j->blocked)
385 		return -BCH_ERR_journal_blocked;
386 
387 	if (j->cur_entry_error)
388 		return j->cur_entry_error;
389 
390 	int ret = bch2_journal_error(j);
391 	if (unlikely(ret))
392 		return ret;
393 
394 	if (!fifo_free(&j->pin))
395 		return -BCH_ERR_journal_pin_full;
396 
397 	if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf))
398 		return -BCH_ERR_journal_max_in_flight;
399 
400 	if (atomic64_read(&j->seq) - j->seq_write_started == JOURNAL_STATE_BUF_NR)
401 		return -BCH_ERR_journal_max_open;
402 
403 	if (journal_cur_seq(j) >= JOURNAL_SEQ_MAX) {
404 		bch_err(c, "cannot start: journal seq overflow");
405 		if (bch2_fs_emergency_read_only_locked(c))
406 			bch_err(c, "fatal error - emergency read only");
407 		return -BCH_ERR_journal_shutdown;
408 	}
409 
410 	if (!j->free_buf && !buf->data)
411 		return -BCH_ERR_journal_buf_enomem; /* will retry after write completion frees up a buf */
412 
413 	BUG_ON(!j->cur_entry_sectors);
414 
415 	if (!buf->data) {
416 		swap(buf->data,		j->free_buf);
417 		swap(buf->buf_size,	j->free_buf_size);
418 	}
419 
420 	buf->expires		=
421 		(journal_cur_seq(j) == j->flushed_seq_ondisk
422 		 ? jiffies
423 		 : j->last_flush_write) +
424 		msecs_to_jiffies(c->opts.journal_flush_delay);
425 
426 	buf->u64s_reserved	= j->entry_u64s_reserved;
427 	buf->disk_sectors	= j->cur_entry_sectors;
428 	buf->sectors		= min(buf->disk_sectors, buf->buf_size >> 9);
429 
430 	u64s = (int) (buf->sectors << 9) / sizeof(u64) -
431 		journal_entry_overhead(j);
432 	u64s = clamp_t(int, u64s, 0, JOURNAL_ENTRY_CLOSED_VAL - 1);
433 
434 	if (u64s <= (ssize_t) j->early_journal_entries.nr)
435 		return -BCH_ERR_journal_full;
436 
437 	if (fifo_empty(&j->pin) && j->reclaim_thread)
438 		wake_up_process(j->reclaim_thread);
439 
440 	/*
441 	 * The fifo_push() needs to happen at the same time as j->seq is
442 	 * incremented for journal_last_seq() to be calculated correctly
443 	 */
444 	atomic64_inc(&j->seq);
445 	journal_pin_list_init(fifo_push_ref(&j->pin), 1);
446 
447 	BUG_ON(j->pin.back - 1 != atomic64_read(&j->seq));
448 
449 	BUG_ON(j->buf + (journal_cur_seq(j) & JOURNAL_BUF_MASK) != buf);
450 
451 	bkey_extent_init(&buf->key);
452 	buf->noflush		= false;
453 	buf->must_flush		= false;
454 	buf->separate_flush	= false;
455 	buf->flush_time		= 0;
456 	buf->need_flush_to_write_buffer = true;
457 	buf->write_started	= false;
458 	buf->write_allocated	= false;
459 	buf->write_done		= false;
460 
461 	memset(buf->data, 0, sizeof(*buf->data));
462 	buf->data->seq	= cpu_to_le64(journal_cur_seq(j));
463 	buf->data->u64s	= 0;
464 
465 	if (j->early_journal_entries.nr) {
466 		memcpy(buf->data->_data, j->early_journal_entries.data,
467 		       j->early_journal_entries.nr * sizeof(u64));
468 		le32_add_cpu(&buf->data->u64s, j->early_journal_entries.nr);
469 	}
470 
471 	/*
472 	 * Must be set before marking the journal entry as open:
473 	 */
474 	j->cur_entry_u64s = u64s;
475 
476 	old.v = atomic64_read(&j->reservations.counter);
477 	do {
478 		new.v = old.v;
479 
480 		BUG_ON(old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL);
481 
482 		new.idx++;
483 		BUG_ON(journal_state_count(new, new.idx));
484 		BUG_ON(new.idx != (journal_cur_seq(j) & JOURNAL_STATE_BUF_MASK));
485 
486 		journal_state_inc(&new);
487 
488 		/* Handle any already added entries */
489 		new.cur_entry_offset = le32_to_cpu(buf->data->u64s);
490 	} while (!atomic64_try_cmpxchg(&j->reservations.counter,
491 				       &old.v, new.v));
492 
493 	if (nr_unwritten_journal_entries(j) == 1)
494 		mod_delayed_work(j->wq,
495 				 &j->write_work,
496 				 msecs_to_jiffies(c->opts.journal_flush_delay));
497 	journal_wake(j);
498 
499 	if (j->early_journal_entries.nr)
500 		darray_exit(&j->early_journal_entries);
501 	return 0;
502 }
503 
journal_quiesced(struct journal * j)504 static bool journal_quiesced(struct journal *j)
505 {
506 	bool ret = atomic64_read(&j->seq) == j->seq_ondisk;
507 
508 	if (!ret)
509 		bch2_journal_entry_close(j);
510 	return ret;
511 }
512 
journal_quiesce(struct journal * j)513 static void journal_quiesce(struct journal *j)
514 {
515 	wait_event(j->wait, journal_quiesced(j));
516 }
517 
journal_write_work(struct work_struct * work)518 static void journal_write_work(struct work_struct *work)
519 {
520 	struct journal *j = container_of(work, struct journal, write_work.work);
521 
522 	spin_lock(&j->lock);
523 	if (__journal_entry_is_open(j->reservations)) {
524 		long delta = journal_cur_buf(j)->expires - jiffies;
525 
526 		if (delta > 0)
527 			mod_delayed_work(j->wq, &j->write_work, delta);
528 		else
529 			__journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
530 	}
531 	spin_unlock(&j->lock);
532 }
533 
journal_buf_prealloc(struct journal * j)534 static void journal_buf_prealloc(struct journal *j)
535 {
536 	if (j->free_buf &&
537 	    j->free_buf_size >= j->buf_size_want)
538 		return;
539 
540 	unsigned buf_size = j->buf_size_want;
541 
542 	spin_unlock(&j->lock);
543 	void *buf = kvmalloc(buf_size, GFP_NOFS);
544 	spin_lock(&j->lock);
545 
546 	if (buf &&
547 	    (!j->free_buf ||
548 	     buf_size > j->free_buf_size)) {
549 		swap(buf,	j->free_buf);
550 		swap(buf_size,	j->free_buf_size);
551 	}
552 
553 	if (unlikely(buf)) {
554 		spin_unlock(&j->lock);
555 		/* kvfree can sleep */
556 		kvfree(buf);
557 		spin_lock(&j->lock);
558 	}
559 }
560 
__journal_res_get(struct journal * j,struct journal_res * res,unsigned flags)561 static int __journal_res_get(struct journal *j, struct journal_res *res,
562 			     unsigned flags)
563 {
564 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
565 	struct journal_buf *buf;
566 	bool can_discard;
567 	int ret;
568 retry:
569 	if (journal_res_get_fast(j, res, flags))
570 		return 0;
571 
572 	ret = bch2_journal_error(j);
573 	if (unlikely(ret))
574 		return ret;
575 
576 	if (j->blocked)
577 		return -BCH_ERR_journal_blocked;
578 
579 	if ((flags & BCH_WATERMARK_MASK) < j->watermark) {
580 		ret = -BCH_ERR_journal_full;
581 		can_discard = j->can_discard;
582 		goto out;
583 	}
584 
585 	if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf) && !journal_entry_is_open(j)) {
586 		ret = -BCH_ERR_journal_max_in_flight;
587 		goto out;
588 	}
589 
590 	spin_lock(&j->lock);
591 
592 	journal_buf_prealloc(j);
593 
594 	/*
595 	 * Recheck after taking the lock, so we don't race with another thread
596 	 * that just did journal_entry_open() and call bch2_journal_entry_close()
597 	 * unnecessarily
598 	 */
599 	if (journal_res_get_fast(j, res, flags)) {
600 		ret = 0;
601 		goto unlock;
602 	}
603 
604 	/*
605 	 * If we couldn't get a reservation because the current buf filled up,
606 	 * and we had room for a bigger entry on disk, signal that we want to
607 	 * realloc the journal bufs:
608 	 */
609 	buf = journal_cur_buf(j);
610 	if (journal_entry_is_open(j) &&
611 	    buf->buf_size >> 9 < buf->disk_sectors &&
612 	    buf->buf_size < JOURNAL_ENTRY_SIZE_MAX)
613 		j->buf_size_want = max(j->buf_size_want, buf->buf_size << 1);
614 
615 	__journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, false);
616 	ret = journal_entry_open(j) ?: -BCH_ERR_journal_retry_open;
617 unlock:
618 	can_discard = j->can_discard;
619 	spin_unlock(&j->lock);
620 out:
621 	if (likely(!ret))
622 		return 0;
623 	if (ret == -BCH_ERR_journal_retry_open)
624 		goto retry;
625 
626 	if (journal_error_check_stuck(j, ret, flags))
627 		ret = -BCH_ERR_journal_stuck;
628 
629 	if (ret == -BCH_ERR_journal_max_in_flight &&
630 	    track_event_change(&c->times[BCH_TIME_blocked_journal_max_in_flight], true) &&
631 	    trace_journal_entry_full_enabled()) {
632 		struct printbuf buf = PRINTBUF;
633 
634 		bch2_printbuf_make_room(&buf, 4096);
635 
636 		spin_lock(&j->lock);
637 		prt_printf(&buf, "seq %llu\n", journal_cur_seq(j));
638 		bch2_journal_bufs_to_text(&buf, j);
639 		spin_unlock(&j->lock);
640 
641 		trace_journal_entry_full(c, buf.buf);
642 		printbuf_exit(&buf);
643 		count_event(c, journal_entry_full);
644 	}
645 
646 	if (ret == -BCH_ERR_journal_max_open &&
647 	    track_event_change(&c->times[BCH_TIME_blocked_journal_max_open], true) &&
648 	    trace_journal_entry_full_enabled()) {
649 		struct printbuf buf = PRINTBUF;
650 
651 		bch2_printbuf_make_room(&buf, 4096);
652 
653 		spin_lock(&j->lock);
654 		prt_printf(&buf, "seq %llu\n", journal_cur_seq(j));
655 		bch2_journal_bufs_to_text(&buf, j);
656 		spin_unlock(&j->lock);
657 
658 		trace_journal_entry_full(c, buf.buf);
659 		printbuf_exit(&buf);
660 		count_event(c, journal_entry_full);
661 	}
662 
663 	/*
664 	 * Journal is full - can't rely on reclaim from work item due to
665 	 * freezing:
666 	 */
667 	if ((ret == -BCH_ERR_journal_full ||
668 	     ret == -BCH_ERR_journal_pin_full) &&
669 	    !(flags & JOURNAL_RES_GET_NONBLOCK)) {
670 		if (can_discard) {
671 			bch2_journal_do_discards(j);
672 			goto retry;
673 		}
674 
675 		if (mutex_trylock(&j->reclaim_lock)) {
676 			bch2_journal_reclaim(j);
677 			mutex_unlock(&j->reclaim_lock);
678 		}
679 	}
680 
681 	return ret;
682 }
683 
max_dev_latency(struct bch_fs * c)684 static unsigned max_dev_latency(struct bch_fs *c)
685 {
686 	u64 nsecs = 0;
687 
688 	for_each_rw_member(c, ca)
689 		nsecs = max(nsecs, ca->io_latency[WRITE].stats.max_duration);
690 
691 	return nsecs_to_jiffies(nsecs);
692 }
693 
694 /*
695  * Essentially the entry function to the journaling code. When bcachefs is doing
696  * a btree insert, it calls this function to get the current journal write.
697  * Journal write is the structure used set up journal writes. The calling
698  * function will then add its keys to the structure, queuing them for the next
699  * write.
700  *
701  * To ensure forward progress, the current task must not be holding any
702  * btree node write locks.
703  */
bch2_journal_res_get_slowpath(struct journal * j,struct journal_res * res,unsigned flags,struct btree_trans * trans)704 int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
705 				  unsigned flags,
706 				  struct btree_trans *trans)
707 {
708 	int ret;
709 
710 	if (closure_wait_event_timeout(&j->async_wait,
711 		   !bch2_err_matches(ret = __journal_res_get(j, res, flags), BCH_ERR_operation_blocked) ||
712 		   (flags & JOURNAL_RES_GET_NONBLOCK),
713 		   HZ))
714 		return ret;
715 
716 	if (trans)
717 		bch2_trans_unlock_long(trans);
718 
719 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
720 	int remaining_wait = max(max_dev_latency(c) * 2, HZ * 10);
721 
722 	remaining_wait = max(0, remaining_wait - HZ);
723 
724 	if (closure_wait_event_timeout(&j->async_wait,
725 		   !bch2_err_matches(ret = __journal_res_get(j, res, flags), BCH_ERR_operation_blocked) ||
726 		   (flags & JOURNAL_RES_GET_NONBLOCK),
727 		   remaining_wait))
728 		return ret;
729 
730 	struct printbuf buf = PRINTBUF;
731 	bch2_journal_debug_to_text(&buf, j);
732 	bch2_print_string_as_lines(KERN_ERR, buf.buf);
733 	prt_printf(&buf, bch2_fmt(c, "Journal stuck? Waited for 10 seconds, err %s"), bch2_err_str(ret));
734 	printbuf_exit(&buf);
735 
736 	closure_wait_event(&j->async_wait,
737 		   !bch2_err_matches(ret = __journal_res_get(j, res, flags), BCH_ERR_operation_blocked) ||
738 		   (flags & JOURNAL_RES_GET_NONBLOCK));
739 	return ret;
740 }
741 
742 /* journal_entry_res: */
743 
bch2_journal_entry_res_resize(struct journal * j,struct journal_entry_res * res,unsigned new_u64s)744 void bch2_journal_entry_res_resize(struct journal *j,
745 				   struct journal_entry_res *res,
746 				   unsigned new_u64s)
747 {
748 	union journal_res_state state;
749 	int d = new_u64s - res->u64s;
750 
751 	spin_lock(&j->lock);
752 
753 	j->entry_u64s_reserved += d;
754 	if (d <= 0)
755 		goto out;
756 
757 	j->cur_entry_u64s = max_t(int, 0, j->cur_entry_u64s - d);
758 	state = READ_ONCE(j->reservations);
759 
760 	if (state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL &&
761 	    state.cur_entry_offset > j->cur_entry_u64s) {
762 		j->cur_entry_u64s += d;
763 		/*
764 		 * Not enough room in current journal entry, have to flush it:
765 		 */
766 		__journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
767 	} else {
768 		journal_cur_buf(j)->u64s_reserved += d;
769 	}
770 out:
771 	spin_unlock(&j->lock);
772 	res->u64s += d;
773 }
774 
775 /* journal flushing: */
776 
777 /**
778  * bch2_journal_flush_seq_async - wait for a journal entry to be written
779  * @j:		journal object
780  * @seq:	seq to flush
781  * @parent:	closure object to wait with
782  * Returns:	1 if @seq has already been flushed, 0 if @seq is being flushed,
783  *		-BCH_ERR_journal_flush_err if @seq will never be flushed
784  *
785  * Like bch2_journal_wait_on_seq, except that it triggers a write immediately if
786  * necessary
787  */
bch2_journal_flush_seq_async(struct journal * j,u64 seq,struct closure * parent)788 int bch2_journal_flush_seq_async(struct journal *j, u64 seq,
789 				 struct closure *parent)
790 {
791 	struct journal_buf *buf;
792 	int ret = 0;
793 
794 	if (seq <= j->flushed_seq_ondisk)
795 		return 1;
796 
797 	spin_lock(&j->lock);
798 
799 	if (WARN_ONCE(seq > journal_cur_seq(j),
800 		      "requested to flush journal seq %llu, but currently at %llu",
801 		      seq, journal_cur_seq(j)))
802 		goto out;
803 
804 	/* Recheck under lock: */
805 	if (j->err_seq && seq >= j->err_seq) {
806 		ret = -BCH_ERR_journal_flush_err;
807 		goto out;
808 	}
809 
810 	if (seq <= j->flushed_seq_ondisk) {
811 		ret = 1;
812 		goto out;
813 	}
814 
815 	/* if seq was written, but not flushed - flush a newer one instead */
816 	seq = max(seq, journal_last_unwritten_seq(j));
817 
818 recheck_need_open:
819 	if (seq > journal_cur_seq(j)) {
820 		struct journal_res res = { 0 };
821 
822 		if (journal_entry_is_open(j))
823 			__journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
824 
825 		spin_unlock(&j->lock);
826 
827 		/*
828 		 * We're called from bch2_journal_flush_seq() -> wait_event();
829 		 * but this might block. We won't usually block, so we won't
830 		 * livelock:
831 		 */
832 		sched_annotate_sleep();
833 		ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0, NULL);
834 		if (ret)
835 			return ret;
836 
837 		seq = res.seq;
838 		buf = journal_seq_to_buf(j, seq);
839 		buf->must_flush = true;
840 
841 		if (!buf->flush_time) {
842 			buf->flush_time	= local_clock() ?: 1;
843 			buf->expires = jiffies;
844 		}
845 
846 		if (parent && !closure_wait(&buf->wait, parent))
847 			BUG();
848 
849 		bch2_journal_res_put(j, &res);
850 
851 		spin_lock(&j->lock);
852 		goto want_write;
853 	}
854 
855 	/*
856 	 * if write was kicked off without a flush, or if we promised it
857 	 * wouldn't be a flush, flush the next sequence number instead
858 	 */
859 	buf = journal_seq_to_buf(j, seq);
860 	if (buf->noflush) {
861 		seq++;
862 		goto recheck_need_open;
863 	}
864 
865 	buf->must_flush = true;
866 	j->flushing_seq = max(j->flushing_seq, seq);
867 
868 	if (parent && !closure_wait(&buf->wait, parent))
869 		BUG();
870 want_write:
871 	if (seq == journal_cur_seq(j))
872 		journal_entry_want_write(j);
873 out:
874 	spin_unlock(&j->lock);
875 	return ret;
876 }
877 
bch2_journal_flush_seq(struct journal * j,u64 seq,unsigned task_state)878 int bch2_journal_flush_seq(struct journal *j, u64 seq, unsigned task_state)
879 {
880 	u64 start_time = local_clock();
881 	int ret, ret2;
882 
883 	/*
884 	 * Don't update time_stats when @seq is already flushed:
885 	 */
886 	if (seq <= j->flushed_seq_ondisk)
887 		return 0;
888 
889 	ret = wait_event_state(j->wait,
890 			       (ret2 = bch2_journal_flush_seq_async(j, seq, NULL)),
891 			       task_state);
892 
893 	if (!ret)
894 		bch2_time_stats_update(j->flush_seq_time, start_time);
895 
896 	return ret ?: ret2 < 0 ? ret2 : 0;
897 }
898 
899 /*
900  * bch2_journal_flush_async - if there is an open journal entry, or a journal
901  * still being written, write it and wait for the write to complete
902  */
bch2_journal_flush_async(struct journal * j,struct closure * parent)903 void bch2_journal_flush_async(struct journal *j, struct closure *parent)
904 {
905 	bch2_journal_flush_seq_async(j, atomic64_read(&j->seq), parent);
906 }
907 
bch2_journal_flush(struct journal * j)908 int bch2_journal_flush(struct journal *j)
909 {
910 	return bch2_journal_flush_seq(j, atomic64_read(&j->seq), TASK_UNINTERRUPTIBLE);
911 }
912 
913 /*
914  * bch2_journal_noflush_seq - ask the journal not to issue any flushes in the
915  * range [start, end)
916  * @seq
917  */
bch2_journal_noflush_seq(struct journal * j,u64 start,u64 end)918 bool bch2_journal_noflush_seq(struct journal *j, u64 start, u64 end)
919 {
920 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
921 	u64 unwritten_seq;
922 	bool ret = false;
923 
924 	if (!(c->sb.features & (1ULL << BCH_FEATURE_journal_no_flush)))
925 		return false;
926 
927 	if (c->journal.flushed_seq_ondisk >= start)
928 		return false;
929 
930 	spin_lock(&j->lock);
931 	if (c->journal.flushed_seq_ondisk >= start)
932 		goto out;
933 
934 	for (unwritten_seq = journal_last_unwritten_seq(j);
935 	     unwritten_seq < end;
936 	     unwritten_seq++) {
937 		struct journal_buf *buf = journal_seq_to_buf(j, unwritten_seq);
938 
939 		/* journal flush already in flight, or flush requseted */
940 		if (buf->must_flush)
941 			goto out;
942 
943 		buf->noflush = true;
944 	}
945 
946 	ret = true;
947 out:
948 	spin_unlock(&j->lock);
949 	return ret;
950 }
951 
__bch2_journal_meta(struct journal * j)952 static int __bch2_journal_meta(struct journal *j)
953 {
954 	struct journal_res res = {};
955 	int ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0, NULL);
956 	if (ret)
957 		return ret;
958 
959 	struct journal_buf *buf = j->buf + (res.seq & JOURNAL_BUF_MASK);
960 	buf->must_flush = true;
961 
962 	if (!buf->flush_time) {
963 		buf->flush_time	= local_clock() ?: 1;
964 		buf->expires = jiffies;
965 	}
966 
967 	bch2_journal_res_put(j, &res);
968 
969 	return bch2_journal_flush_seq(j, res.seq, TASK_UNINTERRUPTIBLE);
970 }
971 
bch2_journal_meta(struct journal * j)972 int bch2_journal_meta(struct journal *j)
973 {
974 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
975 
976 	if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_journal))
977 		return -BCH_ERR_erofs_no_writes;
978 
979 	int ret = __bch2_journal_meta(j);
980 	bch2_write_ref_put(c, BCH_WRITE_REF_journal);
981 	return ret;
982 }
983 
984 /* block/unlock the journal: */
985 
bch2_journal_unblock(struct journal * j)986 void bch2_journal_unblock(struct journal *j)
987 {
988 	spin_lock(&j->lock);
989 	if (!--j->blocked &&
990 	    j->cur_entry_offset_if_blocked < JOURNAL_ENTRY_CLOSED_VAL &&
991 	    j->reservations.cur_entry_offset == JOURNAL_ENTRY_BLOCKED_VAL) {
992 		union journal_res_state old, new;
993 
994 		old.v = atomic64_read(&j->reservations.counter);
995 		do {
996 			new.v = old.v;
997 			new.cur_entry_offset = j->cur_entry_offset_if_blocked;
998 		} while (!atomic64_try_cmpxchg(&j->reservations.counter, &old.v, new.v));
999 	}
1000 	spin_unlock(&j->lock);
1001 
1002 	journal_wake(j);
1003 }
1004 
__bch2_journal_block(struct journal * j)1005 static void __bch2_journal_block(struct journal *j)
1006 {
1007 	if (!j->blocked++) {
1008 		union journal_res_state old, new;
1009 
1010 		old.v = atomic64_read(&j->reservations.counter);
1011 		do {
1012 			j->cur_entry_offset_if_blocked = old.cur_entry_offset;
1013 
1014 			if (j->cur_entry_offset_if_blocked >= JOURNAL_ENTRY_CLOSED_VAL)
1015 				break;
1016 
1017 			new.v = old.v;
1018 			new.cur_entry_offset = JOURNAL_ENTRY_BLOCKED_VAL;
1019 		} while (!atomic64_try_cmpxchg(&j->reservations.counter, &old.v, new.v));
1020 
1021 		if (old.cur_entry_offset < JOURNAL_ENTRY_BLOCKED_VAL)
1022 			journal_cur_buf(j)->data->u64s = cpu_to_le32(old.cur_entry_offset);
1023 	}
1024 }
1025 
bch2_journal_block(struct journal * j)1026 void bch2_journal_block(struct journal *j)
1027 {
1028 	spin_lock(&j->lock);
1029 	__bch2_journal_block(j);
1030 	spin_unlock(&j->lock);
1031 
1032 	journal_quiesce(j);
1033 }
1034 
__bch2_next_write_buffer_flush_journal_buf(struct journal * j,u64 max_seq,bool * blocked)1035 static struct journal_buf *__bch2_next_write_buffer_flush_journal_buf(struct journal *j,
1036 						u64 max_seq, bool *blocked)
1037 {
1038 	struct journal_buf *ret = NULL;
1039 
1040 	/* We're inside wait_event(), but using mutex_lock(: */
1041 	sched_annotate_sleep();
1042 	mutex_lock(&j->buf_lock);
1043 	spin_lock(&j->lock);
1044 	max_seq = min(max_seq, journal_cur_seq(j));
1045 
1046 	for (u64 seq = journal_last_unwritten_seq(j);
1047 	     seq <= max_seq;
1048 	     seq++) {
1049 		unsigned idx = seq & JOURNAL_BUF_MASK;
1050 		struct journal_buf *buf = j->buf + idx;
1051 
1052 		if (buf->need_flush_to_write_buffer) {
1053 			union journal_res_state s;
1054 			s.v = atomic64_read_acquire(&j->reservations.counter);
1055 
1056 			unsigned open = seq == journal_cur_seq(j) && __journal_entry_is_open(s);
1057 
1058 			if (open && !*blocked) {
1059 				__bch2_journal_block(j);
1060 				*blocked = true;
1061 			}
1062 
1063 			ret = journal_state_count(s, idx & JOURNAL_STATE_BUF_MASK) > open
1064 				? ERR_PTR(-EAGAIN)
1065 				: buf;
1066 			break;
1067 		}
1068 	}
1069 
1070 	spin_unlock(&j->lock);
1071 	if (IS_ERR_OR_NULL(ret))
1072 		mutex_unlock(&j->buf_lock);
1073 	return ret;
1074 }
1075 
bch2_next_write_buffer_flush_journal_buf(struct journal * j,u64 max_seq,bool * blocked)1076 struct journal_buf *bch2_next_write_buffer_flush_journal_buf(struct journal *j,
1077 							     u64 max_seq, bool *blocked)
1078 {
1079 	struct journal_buf *ret;
1080 	*blocked = false;
1081 
1082 	wait_event(j->wait, (ret = __bch2_next_write_buffer_flush_journal_buf(j,
1083 						max_seq, blocked)) != ERR_PTR(-EAGAIN));
1084 	if (IS_ERR_OR_NULL(ret) && *blocked)
1085 		bch2_journal_unblock(j);
1086 
1087 	return ret;
1088 }
1089 
1090 /* allocate journal on a device: */
1091 
bch2_set_nr_journal_buckets_iter(struct bch_dev * ca,unsigned nr,bool new_fs,struct closure * cl)1092 static int bch2_set_nr_journal_buckets_iter(struct bch_dev *ca, unsigned nr,
1093 					    bool new_fs, struct closure *cl)
1094 {
1095 	struct bch_fs *c = ca->fs;
1096 	struct journal_device *ja = &ca->journal;
1097 	u64 *new_bucket_seq = NULL, *new_buckets = NULL;
1098 	struct open_bucket **ob = NULL;
1099 	long *bu = NULL;
1100 	unsigned i, pos, nr_got = 0, nr_want = nr - ja->nr;
1101 	int ret = 0;
1102 
1103 	BUG_ON(nr <= ja->nr);
1104 
1105 	bu		= kcalloc(nr_want, sizeof(*bu), GFP_KERNEL);
1106 	ob		= kcalloc(nr_want, sizeof(*ob), GFP_KERNEL);
1107 	new_buckets	= kcalloc(nr, sizeof(u64), GFP_KERNEL);
1108 	new_bucket_seq	= kcalloc(nr, sizeof(u64), GFP_KERNEL);
1109 	if (!bu || !ob || !new_buckets || !new_bucket_seq) {
1110 		ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
1111 		goto err_free;
1112 	}
1113 
1114 	for (nr_got = 0; nr_got < nr_want; nr_got++) {
1115 		enum bch_watermark watermark = new_fs
1116 			? BCH_WATERMARK_btree
1117 			: BCH_WATERMARK_normal;
1118 
1119 		ob[nr_got] = bch2_bucket_alloc(c, ca, watermark,
1120 					       BCH_DATA_journal, cl);
1121 		ret = PTR_ERR_OR_ZERO(ob[nr_got]);
1122 		if (ret)
1123 			break;
1124 
1125 		if (!new_fs) {
1126 			ret = bch2_trans_run(c,
1127 				bch2_trans_mark_metadata_bucket(trans, ca,
1128 						ob[nr_got]->bucket, BCH_DATA_journal,
1129 						ca->mi.bucket_size, BTREE_TRIGGER_transactional));
1130 			if (ret) {
1131 				bch2_open_bucket_put(c, ob[nr_got]);
1132 				bch_err_msg(c, ret, "marking new journal buckets");
1133 				break;
1134 			}
1135 		}
1136 
1137 		bu[nr_got] = ob[nr_got]->bucket;
1138 	}
1139 
1140 	if (!nr_got)
1141 		goto err_free;
1142 
1143 	/* Don't return an error if we successfully allocated some buckets: */
1144 	ret = 0;
1145 
1146 	if (c) {
1147 		bch2_journal_flush_all_pins(&c->journal);
1148 		bch2_journal_block(&c->journal);
1149 		mutex_lock(&c->sb_lock);
1150 	}
1151 
1152 	memcpy(new_buckets,	ja->buckets,	ja->nr * sizeof(u64));
1153 	memcpy(new_bucket_seq,	ja->bucket_seq,	ja->nr * sizeof(u64));
1154 
1155 	BUG_ON(ja->discard_idx > ja->nr);
1156 
1157 	pos = ja->discard_idx ?: ja->nr;
1158 
1159 	memmove(new_buckets + pos + nr_got,
1160 		new_buckets + pos,
1161 		sizeof(new_buckets[0]) * (ja->nr - pos));
1162 	memmove(new_bucket_seq + pos + nr_got,
1163 		new_bucket_seq + pos,
1164 		sizeof(new_bucket_seq[0]) * (ja->nr - pos));
1165 
1166 	for (i = 0; i < nr_got; i++) {
1167 		new_buckets[pos + i] = bu[i];
1168 		new_bucket_seq[pos + i] = 0;
1169 	}
1170 
1171 	nr = ja->nr + nr_got;
1172 
1173 	ret = bch2_journal_buckets_to_sb(c, ca, new_buckets, nr);
1174 	if (ret)
1175 		goto err_unblock;
1176 
1177 	bch2_write_super(c);
1178 
1179 	/* Commit: */
1180 	if (c)
1181 		spin_lock(&c->journal.lock);
1182 
1183 	swap(new_buckets,	ja->buckets);
1184 	swap(new_bucket_seq,	ja->bucket_seq);
1185 	ja->nr = nr;
1186 
1187 	if (pos <= ja->discard_idx)
1188 		ja->discard_idx = (ja->discard_idx + nr_got) % ja->nr;
1189 	if (pos <= ja->dirty_idx_ondisk)
1190 		ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + nr_got) % ja->nr;
1191 	if (pos <= ja->dirty_idx)
1192 		ja->dirty_idx = (ja->dirty_idx + nr_got) % ja->nr;
1193 	if (pos <= ja->cur_idx)
1194 		ja->cur_idx = (ja->cur_idx + nr_got) % ja->nr;
1195 
1196 	if (c)
1197 		spin_unlock(&c->journal.lock);
1198 err_unblock:
1199 	if (c) {
1200 		bch2_journal_unblock(&c->journal);
1201 		mutex_unlock(&c->sb_lock);
1202 	}
1203 
1204 	if (ret && !new_fs)
1205 		for (i = 0; i < nr_got; i++)
1206 			bch2_trans_run(c,
1207 				bch2_trans_mark_metadata_bucket(trans, ca,
1208 						bu[i], BCH_DATA_free, 0,
1209 						BTREE_TRIGGER_transactional));
1210 err_free:
1211 	for (i = 0; i < nr_got; i++)
1212 		bch2_open_bucket_put(c, ob[i]);
1213 
1214 	kfree(new_bucket_seq);
1215 	kfree(new_buckets);
1216 	kfree(ob);
1217 	kfree(bu);
1218 	return ret;
1219 }
1220 
bch2_set_nr_journal_buckets_loop(struct bch_fs * c,struct bch_dev * ca,unsigned nr,bool new_fs)1221 static int bch2_set_nr_journal_buckets_loop(struct bch_fs *c, struct bch_dev *ca,
1222 					    unsigned nr, bool new_fs)
1223 {
1224 	struct journal_device *ja = &ca->journal;
1225 	int ret = 0;
1226 
1227 	struct closure cl;
1228 	closure_init_stack(&cl);
1229 
1230 	/* don't handle reducing nr of buckets yet: */
1231 	if (nr < ja->nr)
1232 		return 0;
1233 
1234 	while (!ret && ja->nr < nr) {
1235 		struct disk_reservation disk_res = { 0, 0, 0 };
1236 
1237 		/*
1238 		 * note: journal buckets aren't really counted as _sectors_ used yet, so
1239 		 * we don't need the disk reservation to avoid the BUG_ON() in buckets.c
1240 		 * when space used goes up without a reservation - but we do need the
1241 		 * reservation to ensure we'll actually be able to allocate:
1242 		 *
1243 		 * XXX: that's not right, disk reservations only ensure a
1244 		 * filesystem-wide allocation will succeed, this is a device
1245 		 * specific allocation - we can hang here:
1246 		 */
1247 		if (!new_fs) {
1248 			ret = bch2_disk_reservation_get(c, &disk_res,
1249 							bucket_to_sector(ca, nr - ja->nr), 1, 0);
1250 			if (ret)
1251 				break;
1252 		}
1253 
1254 		ret = bch2_set_nr_journal_buckets_iter(ca, nr, new_fs, &cl);
1255 
1256 		if (ret == -BCH_ERR_bucket_alloc_blocked ||
1257 		    ret == -BCH_ERR_open_buckets_empty)
1258 			ret = 0; /* wait and retry */
1259 
1260 		bch2_disk_reservation_put(c, &disk_res);
1261 		closure_sync(&cl);
1262 	}
1263 
1264 	return ret;
1265 }
1266 
1267 /*
1268  * Allocate more journal space at runtime - not currently making use if it, but
1269  * the code works:
1270  */
bch2_set_nr_journal_buckets(struct bch_fs * c,struct bch_dev * ca,unsigned nr)1271 int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
1272 				unsigned nr)
1273 {
1274 	down_write(&c->state_lock);
1275 	int ret = bch2_set_nr_journal_buckets_loop(c, ca, nr, false);
1276 	up_write(&c->state_lock);
1277 
1278 	bch_err_fn(c, ret);
1279 	return ret;
1280 }
1281 
bch2_dev_journal_alloc(struct bch_dev * ca,bool new_fs)1282 int bch2_dev_journal_alloc(struct bch_dev *ca, bool new_fs)
1283 {
1284 	unsigned nr;
1285 	int ret;
1286 
1287 	if (dynamic_fault("bcachefs:add:journal_alloc")) {
1288 		ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
1289 		goto err;
1290 	}
1291 
1292 	/* 1/128th of the device by default: */
1293 	nr = ca->mi.nbuckets >> 7;
1294 
1295 	/*
1296 	 * clamp journal size to 8192 buckets or 8GB (in sectors), whichever
1297 	 * is smaller:
1298 	 */
1299 	nr = clamp_t(unsigned, nr,
1300 		     BCH_JOURNAL_BUCKETS_MIN,
1301 		     min(1 << 13,
1302 			 (1 << 24) / ca->mi.bucket_size));
1303 
1304 	ret = bch2_set_nr_journal_buckets_loop(ca->fs, ca, nr, new_fs);
1305 err:
1306 	bch_err_fn(ca, ret);
1307 	return ret;
1308 }
1309 
bch2_fs_journal_alloc(struct bch_fs * c)1310 int bch2_fs_journal_alloc(struct bch_fs *c)
1311 {
1312 	for_each_online_member(c, ca) {
1313 		if (ca->journal.nr)
1314 			continue;
1315 
1316 		int ret = bch2_dev_journal_alloc(ca, true);
1317 		if (ret) {
1318 			percpu_ref_put(&ca->io_ref[READ]);
1319 			return ret;
1320 		}
1321 	}
1322 
1323 	return 0;
1324 }
1325 
1326 /* startup/shutdown: */
1327 
bch2_journal_writing_to_device(struct journal * j,unsigned dev_idx)1328 static bool bch2_journal_writing_to_device(struct journal *j, unsigned dev_idx)
1329 {
1330 	bool ret = false;
1331 	u64 seq;
1332 
1333 	spin_lock(&j->lock);
1334 	for (seq = journal_last_unwritten_seq(j);
1335 	     seq <= journal_cur_seq(j) && !ret;
1336 	     seq++) {
1337 		struct journal_buf *buf = journal_seq_to_buf(j, seq);
1338 
1339 		if (bch2_bkey_has_device_c(bkey_i_to_s_c(&buf->key), dev_idx))
1340 			ret = true;
1341 	}
1342 	spin_unlock(&j->lock);
1343 
1344 	return ret;
1345 }
1346 
bch2_dev_journal_stop(struct journal * j,struct bch_dev * ca)1347 void bch2_dev_journal_stop(struct journal *j, struct bch_dev *ca)
1348 {
1349 	wait_event(j->wait, !bch2_journal_writing_to_device(j, ca->dev_idx));
1350 }
1351 
bch2_fs_journal_stop(struct journal * j)1352 void bch2_fs_journal_stop(struct journal *j)
1353 {
1354 	if (!test_bit(JOURNAL_running, &j->flags))
1355 		return;
1356 
1357 	bch2_journal_reclaim_stop(j);
1358 	bch2_journal_flush_all_pins(j);
1359 
1360 	wait_event(j->wait, bch2_journal_entry_close(j));
1361 
1362 	/*
1363 	 * Always write a new journal entry, to make sure the clock hands are up
1364 	 * to date (and match the superblock)
1365 	 */
1366 	__bch2_journal_meta(j);
1367 
1368 	journal_quiesce(j);
1369 	cancel_delayed_work_sync(&j->write_work);
1370 
1371 	WARN(!bch2_journal_error(j) &&
1372 	     test_bit(JOURNAL_replay_done, &j->flags) &&
1373 	     j->last_empty_seq != journal_cur_seq(j),
1374 	     "journal shutdown error: cur seq %llu but last empty seq %llu",
1375 	     journal_cur_seq(j), j->last_empty_seq);
1376 
1377 	if (!bch2_journal_error(j))
1378 		clear_bit(JOURNAL_running, &j->flags);
1379 }
1380 
bch2_fs_journal_start(struct journal * j,u64 cur_seq)1381 int bch2_fs_journal_start(struct journal *j, u64 cur_seq)
1382 {
1383 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
1384 	struct journal_entry_pin_list *p;
1385 	struct journal_replay *i, **_i;
1386 	struct genradix_iter iter;
1387 	bool had_entries = false;
1388 	u64 last_seq = cur_seq, nr, seq;
1389 
1390 	if (cur_seq >= JOURNAL_SEQ_MAX) {
1391 		bch_err(c, "cannot start: journal seq overflow");
1392 		return -EINVAL;
1393 	}
1394 
1395 	genradix_for_each_reverse(&c->journal_entries, iter, _i) {
1396 		i = *_i;
1397 
1398 		if (journal_replay_ignore(i))
1399 			continue;
1400 
1401 		last_seq = le64_to_cpu(i->j.last_seq);
1402 		break;
1403 	}
1404 
1405 	nr = cur_seq - last_seq;
1406 
1407 	/*
1408 	 * Extra fudge factor, in case we crashed when the journal pin fifo was
1409 	 * nearly or completely full. We'll need to be able to open additional
1410 	 * journal entries (at least a few) in order for journal replay to get
1411 	 * going:
1412 	 */
1413 	nr += nr / 4;
1414 
1415 	if (nr + 1 > j->pin.size) {
1416 		free_fifo(&j->pin);
1417 		init_fifo(&j->pin, roundup_pow_of_two(nr + 1), GFP_KERNEL);
1418 		if (!j->pin.data) {
1419 			bch_err(c, "error reallocating journal fifo (%llu open entries)", nr);
1420 			return -BCH_ERR_ENOMEM_journal_pin_fifo;
1421 		}
1422 	}
1423 
1424 	j->replay_journal_seq	= last_seq;
1425 	j->replay_journal_seq_end = cur_seq;
1426 	j->last_seq_ondisk	= last_seq;
1427 	j->flushed_seq_ondisk	= cur_seq - 1;
1428 	j->seq_write_started	= cur_seq - 1;
1429 	j->seq_ondisk		= cur_seq - 1;
1430 	j->pin.front		= last_seq;
1431 	j->pin.back		= cur_seq;
1432 	atomic64_set(&j->seq, cur_seq - 1);
1433 
1434 	fifo_for_each_entry_ptr(p, &j->pin, seq)
1435 		journal_pin_list_init(p, 1);
1436 
1437 	genradix_for_each(&c->journal_entries, iter, _i) {
1438 		i = *_i;
1439 
1440 		if (journal_replay_ignore(i))
1441 			continue;
1442 
1443 		seq = le64_to_cpu(i->j.seq);
1444 		BUG_ON(seq >= cur_seq);
1445 
1446 		if (seq < last_seq)
1447 			continue;
1448 
1449 		if (journal_entry_empty(&i->j))
1450 			j->last_empty_seq = le64_to_cpu(i->j.seq);
1451 
1452 		p = journal_seq_pin(j, seq);
1453 
1454 		p->devs.nr = 0;
1455 		darray_for_each(i->ptrs, ptr)
1456 			bch2_dev_list_add_dev(&p->devs, ptr->dev);
1457 
1458 		had_entries = true;
1459 	}
1460 
1461 	if (!had_entries)
1462 		j->last_empty_seq = cur_seq - 1; /* to match j->seq */
1463 
1464 	spin_lock(&j->lock);
1465 
1466 	set_bit(JOURNAL_running, &j->flags);
1467 	j->last_flush_write = jiffies;
1468 
1469 	j->reservations.idx = journal_cur_seq(j);
1470 
1471 	c->last_bucket_seq_cleanup = journal_cur_seq(j);
1472 	spin_unlock(&j->lock);
1473 
1474 	return 0;
1475 }
1476 
1477 /* init/exit: */
1478 
bch2_dev_journal_exit(struct bch_dev * ca)1479 void bch2_dev_journal_exit(struct bch_dev *ca)
1480 {
1481 	struct journal_device *ja = &ca->journal;
1482 
1483 	for (unsigned i = 0; i < ARRAY_SIZE(ja->bio); i++) {
1484 		kfree(ja->bio[i]);
1485 		ja->bio[i] = NULL;
1486 	}
1487 
1488 	kfree(ja->buckets);
1489 	kfree(ja->bucket_seq);
1490 	ja->buckets	= NULL;
1491 	ja->bucket_seq	= NULL;
1492 }
1493 
bch2_dev_journal_init(struct bch_dev * ca,struct bch_sb * sb)1494 int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
1495 {
1496 	struct journal_device *ja = &ca->journal;
1497 	struct bch_sb_field_journal *journal_buckets =
1498 		bch2_sb_field_get(sb, journal);
1499 	struct bch_sb_field_journal_v2 *journal_buckets_v2 =
1500 		bch2_sb_field_get(sb, journal_v2);
1501 
1502 	ja->nr = 0;
1503 
1504 	if (journal_buckets_v2) {
1505 		unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2);
1506 
1507 		for (unsigned i = 0; i < nr; i++)
1508 			ja->nr += le64_to_cpu(journal_buckets_v2->d[i].nr);
1509 	} else if (journal_buckets) {
1510 		ja->nr = bch2_nr_journal_buckets(journal_buckets);
1511 	}
1512 
1513 	ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1514 	if (!ja->bucket_seq)
1515 		return -BCH_ERR_ENOMEM_dev_journal_init;
1516 
1517 	unsigned nr_bvecs = DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE);
1518 
1519 	for (unsigned i = 0; i < ARRAY_SIZE(ja->bio); i++) {
1520 		ja->bio[i] = kzalloc(struct_size(ja->bio[i], bio.bi_inline_vecs,
1521 				     nr_bvecs), GFP_KERNEL);
1522 		if (!ja->bio[i])
1523 			return -BCH_ERR_ENOMEM_dev_journal_init;
1524 
1525 		ja->bio[i]->ca = ca;
1526 		ja->bio[i]->buf_idx = i;
1527 		bio_init(&ja->bio[i]->bio, NULL, ja->bio[i]->bio.bi_inline_vecs, nr_bvecs, 0);
1528 	}
1529 
1530 	ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1531 	if (!ja->buckets)
1532 		return -BCH_ERR_ENOMEM_dev_journal_init;
1533 
1534 	if (journal_buckets_v2) {
1535 		unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2);
1536 		unsigned dst = 0;
1537 
1538 		for (unsigned i = 0; i < nr; i++)
1539 			for (unsigned j = 0; j < le64_to_cpu(journal_buckets_v2->d[i].nr); j++)
1540 				ja->buckets[dst++] =
1541 					le64_to_cpu(journal_buckets_v2->d[i].start) + j;
1542 	} else if (journal_buckets) {
1543 		for (unsigned i = 0; i < ja->nr; i++)
1544 			ja->buckets[i] = le64_to_cpu(journal_buckets->buckets[i]);
1545 	}
1546 
1547 	return 0;
1548 }
1549 
bch2_fs_journal_exit(struct journal * j)1550 void bch2_fs_journal_exit(struct journal *j)
1551 {
1552 	if (j->wq)
1553 		destroy_workqueue(j->wq);
1554 
1555 	darray_exit(&j->early_journal_entries);
1556 
1557 	for (unsigned i = 0; i < ARRAY_SIZE(j->buf); i++)
1558 		kvfree(j->buf[i].data);
1559 	kvfree(j->free_buf);
1560 	free_fifo(&j->pin);
1561 }
1562 
bch2_fs_journal_init(struct journal * j)1563 int bch2_fs_journal_init(struct journal *j)
1564 {
1565 	static struct lock_class_key res_key;
1566 
1567 	mutex_init(&j->buf_lock);
1568 	spin_lock_init(&j->lock);
1569 	spin_lock_init(&j->err_lock);
1570 	init_waitqueue_head(&j->wait);
1571 	INIT_DELAYED_WORK(&j->write_work, journal_write_work);
1572 	init_waitqueue_head(&j->reclaim_wait);
1573 	init_waitqueue_head(&j->pin_flush_wait);
1574 	mutex_init(&j->reclaim_lock);
1575 	mutex_init(&j->discard_lock);
1576 
1577 	lockdep_init_map(&j->res_map, "journal res", &res_key, 0);
1578 
1579 	atomic64_set(&j->reservations.counter,
1580 		((union journal_res_state)
1581 		 { .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v);
1582 
1583 	if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)))
1584 		return -BCH_ERR_ENOMEM_journal_pin_fifo;
1585 
1586 	j->free_buf_size = j->buf_size_want = JOURNAL_ENTRY_SIZE_MIN;
1587 	j->free_buf = kvmalloc(j->free_buf_size, GFP_KERNEL);
1588 	if (!j->free_buf)
1589 		return -BCH_ERR_ENOMEM_journal_buf;
1590 
1591 	for (unsigned i = 0; i < ARRAY_SIZE(j->buf); i++)
1592 		j->buf[i].idx = i;
1593 
1594 	j->pin.front = j->pin.back = 1;
1595 
1596 	j->wq = alloc_workqueue("bcachefs_journal",
1597 				WQ_HIGHPRI|WQ_FREEZABLE|WQ_UNBOUND|WQ_MEM_RECLAIM, 512);
1598 	if (!j->wq)
1599 		return -BCH_ERR_ENOMEM_fs_other_alloc;
1600 	return 0;
1601 }
1602 
1603 /* debug: */
1604 
1605 static const char * const bch2_journal_flags_strs[] = {
1606 #define x(n)	#n,
1607 	JOURNAL_FLAGS()
1608 #undef x
1609 	NULL
1610 };
1611 
__bch2_journal_debug_to_text(struct printbuf * out,struct journal * j)1612 void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1613 {
1614 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
1615 	union journal_res_state s;
1616 	unsigned long now = jiffies;
1617 	u64 nr_writes = j->nr_flush_writes + j->nr_noflush_writes;
1618 
1619 	printbuf_tabstops_reset(out);
1620 	printbuf_tabstop_push(out, 28);
1621 	out->atomic++;
1622 
1623 	rcu_read_lock();
1624 	s = READ_ONCE(j->reservations);
1625 
1626 	prt_printf(out, "flags:\t");
1627 	prt_bitflags(out, bch2_journal_flags_strs, j->flags);
1628 	prt_newline(out);
1629 	prt_printf(out, "dirty journal entries:\t%llu/%llu\n",	fifo_used(&j->pin), j->pin.size);
1630 	prt_printf(out, "seq:\t%llu\n",				journal_cur_seq(j));
1631 	prt_printf(out, "seq_ondisk:\t%llu\n",			j->seq_ondisk);
1632 	prt_printf(out, "last_seq:\t%llu\n",			journal_last_seq(j));
1633 	prt_printf(out, "last_seq_ondisk:\t%llu\n",		j->last_seq_ondisk);
1634 	prt_printf(out, "flushed_seq_ondisk:\t%llu\n",		j->flushed_seq_ondisk);
1635 	prt_printf(out, "watermark:\t%s\n",			bch2_watermarks[j->watermark]);
1636 	prt_printf(out, "each entry reserved:\t%u\n",		j->entry_u64s_reserved);
1637 	prt_printf(out, "nr flush writes:\t%llu\n",		j->nr_flush_writes);
1638 	prt_printf(out, "nr noflush writes:\t%llu\n",		j->nr_noflush_writes);
1639 	prt_printf(out, "average write size:\t");
1640 	prt_human_readable_u64(out, nr_writes ? div64_u64(j->entry_bytes_written, nr_writes) : 0);
1641 	prt_newline(out);
1642 	prt_printf(out, "free buf:\t%u\n",			j->free_buf ? j->free_buf_size : 0);
1643 	prt_printf(out, "nr direct reclaim:\t%llu\n",		j->nr_direct_reclaim);
1644 	prt_printf(out, "nr background reclaim:\t%llu\n",	j->nr_background_reclaim);
1645 	prt_printf(out, "reclaim kicked:\t%u\n",		j->reclaim_kicked);
1646 	prt_printf(out, "reclaim runs in:\t%u ms\n",		time_after(j->next_reclaim, now)
1647 	       ? jiffies_to_msecs(j->next_reclaim - jiffies) : 0);
1648 	prt_printf(out, "blocked:\t%u\n",			j->blocked);
1649 	prt_printf(out, "current entry sectors:\t%u\n",		j->cur_entry_sectors);
1650 	prt_printf(out, "current entry error:\t%s\n",		bch2_err_str(j->cur_entry_error));
1651 	prt_printf(out, "current entry:\t");
1652 
1653 	switch (s.cur_entry_offset) {
1654 	case JOURNAL_ENTRY_ERROR_VAL:
1655 		prt_printf(out, "error\n");
1656 		break;
1657 	case JOURNAL_ENTRY_CLOSED_VAL:
1658 		prt_printf(out, "closed\n");
1659 		break;
1660 	case JOURNAL_ENTRY_BLOCKED_VAL:
1661 		prt_printf(out, "blocked\n");
1662 		break;
1663 	default:
1664 		prt_printf(out, "%u/%u\n", s.cur_entry_offset, j->cur_entry_u64s);
1665 		break;
1666 	}
1667 
1668 	prt_printf(out, "unwritten entries:\n");
1669 	bch2_journal_bufs_to_text(out, j);
1670 
1671 	prt_printf(out, "space:\n");
1672 	printbuf_indent_add(out, 2);
1673 	prt_printf(out, "discarded\t%u:%u\n",
1674 	       j->space[journal_space_discarded].next_entry,
1675 	       j->space[journal_space_discarded].total);
1676 	prt_printf(out, "clean ondisk\t%u:%u\n",
1677 	       j->space[journal_space_clean_ondisk].next_entry,
1678 	       j->space[journal_space_clean_ondisk].total);
1679 	prt_printf(out, "clean\t%u:%u\n",
1680 	       j->space[journal_space_clean].next_entry,
1681 	       j->space[journal_space_clean].total);
1682 	prt_printf(out, "total\t%u:%u\n",
1683 	       j->space[journal_space_total].next_entry,
1684 	       j->space[journal_space_total].total);
1685 	printbuf_indent_sub(out, 2);
1686 
1687 	for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) {
1688 		if (!ca->mi.durability)
1689 			continue;
1690 
1691 		struct journal_device *ja = &ca->journal;
1692 
1693 		if (!test_bit(ca->dev_idx, c->rw_devs[BCH_DATA_journal].d))
1694 			continue;
1695 
1696 		if (!ja->nr)
1697 			continue;
1698 
1699 		prt_printf(out, "dev %u:\n",			ca->dev_idx);
1700 		prt_printf(out, "durability %u:\n",		ca->mi.durability);
1701 		printbuf_indent_add(out, 2);
1702 		prt_printf(out, "nr\t%u\n",			ja->nr);
1703 		prt_printf(out, "bucket size\t%u\n",		ca->mi.bucket_size);
1704 		prt_printf(out, "available\t%u:%u\n",		bch2_journal_dev_buckets_available(j, ja, journal_space_discarded), ja->sectors_free);
1705 		prt_printf(out, "discard_idx\t%u\n",		ja->discard_idx);
1706 		prt_printf(out, "dirty_ondisk\t%u (seq %llu)\n",ja->dirty_idx_ondisk,	ja->bucket_seq[ja->dirty_idx_ondisk]);
1707 		prt_printf(out, "dirty_idx\t%u (seq %llu)\n",	ja->dirty_idx,		ja->bucket_seq[ja->dirty_idx]);
1708 		prt_printf(out, "cur_idx\t%u (seq %llu)\n",	ja->cur_idx,		ja->bucket_seq[ja->cur_idx]);
1709 		printbuf_indent_sub(out, 2);
1710 	}
1711 
1712 	prt_printf(out, "replicas want %u need %u\n", c->opts.metadata_replicas, c->opts.metadata_replicas_required);
1713 
1714 	rcu_read_unlock();
1715 
1716 	--out->atomic;
1717 }
1718 
bch2_journal_debug_to_text(struct printbuf * out,struct journal * j)1719 void bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1720 {
1721 	spin_lock(&j->lock);
1722 	__bch2_journal_debug_to_text(out, j);
1723 	spin_unlock(&j->lock);
1724 }
1725