xref: /linux/fs/bcachefs/journal.c (revision c1e822754cc7f28b98c6897d62e8b47b4001e422)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bcachefs journalling code, for btree insertions
4  *
5  * Copyright 2012 Google, Inc.
6  */
7 
8 #include "bcachefs.h"
9 #include "alloc_foreground.h"
10 #include "bkey_methods.h"
11 #include "btree_gc.h"
12 #include "btree_update.h"
13 #include "btree_write_buffer.h"
14 #include "buckets.h"
15 #include "error.h"
16 #include "journal.h"
17 #include "journal_io.h"
18 #include "journal_reclaim.h"
19 #include "journal_sb.h"
20 #include "journal_seq_blacklist.h"
21 #include "trace.h"
22 
23 static const char * const bch2_journal_errors[] = {
24 #define x(n)	#n,
25 	JOURNAL_ERRORS()
26 #undef x
27 	NULL
28 };
29 
journal_seq_unwritten(struct journal * j,u64 seq)30 static inline bool journal_seq_unwritten(struct journal *j, u64 seq)
31 {
32 	return seq > j->seq_ondisk;
33 }
34 
__journal_entry_is_open(union journal_res_state state)35 static bool __journal_entry_is_open(union journal_res_state state)
36 {
37 	return state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL;
38 }
39 
nr_unwritten_journal_entries(struct journal * j)40 static inline unsigned nr_unwritten_journal_entries(struct journal *j)
41 {
42 	return atomic64_read(&j->seq) - j->seq_ondisk;
43 }
44 
journal_entry_is_open(struct journal * j)45 static bool journal_entry_is_open(struct journal *j)
46 {
47 	return __journal_entry_is_open(j->reservations);
48 }
49 
bch2_journal_buf_to_text(struct printbuf * out,struct journal * j,u64 seq)50 static void bch2_journal_buf_to_text(struct printbuf *out, struct journal *j, u64 seq)
51 {
52 	union journal_res_state s = READ_ONCE(j->reservations);
53 	unsigned i = seq & JOURNAL_BUF_MASK;
54 	struct journal_buf *buf = j->buf + i;
55 
56 	prt_printf(out, "seq:\t%llu\n", seq);
57 	printbuf_indent_add(out, 2);
58 
59 	prt_printf(out, "refcount:\t%u\n", journal_state_count(s, i));
60 
61 	prt_printf(out, "size:\t");
62 	prt_human_readable_u64(out, vstruct_bytes(buf->data));
63 	prt_newline(out);
64 
65 	prt_printf(out, "expires:\t");
66 	prt_printf(out, "%li jiffies\n", buf->expires - jiffies);
67 
68 	prt_printf(out, "flags:\t");
69 	if (buf->noflush)
70 		prt_str(out, "noflush ");
71 	if (buf->must_flush)
72 		prt_str(out, "must_flush ");
73 	if (buf->separate_flush)
74 		prt_str(out, "separate_flush ");
75 	if (buf->need_flush_to_write_buffer)
76 		prt_str(out, "need_flush_to_write_buffer ");
77 	if (buf->write_started)
78 		prt_str(out, "write_started ");
79 	if (buf->write_allocated)
80 		prt_str(out, "write_allocated ");
81 	if (buf->write_done)
82 		prt_str(out, "write_done");
83 	prt_newline(out);
84 
85 	printbuf_indent_sub(out, 2);
86 }
87 
bch2_journal_bufs_to_text(struct printbuf * out,struct journal * j)88 static void bch2_journal_bufs_to_text(struct printbuf *out, struct journal *j)
89 {
90 	if (!out->nr_tabstops)
91 		printbuf_tabstop_push(out, 24);
92 
93 	for (u64 seq = journal_last_unwritten_seq(j);
94 	     seq <= journal_cur_seq(j);
95 	     seq++)
96 		bch2_journal_buf_to_text(out, j, seq);
97 	prt_printf(out, "last buf %s\n", journal_entry_is_open(j) ? "open" : "closed");
98 }
99 
100 static inline struct journal_buf *
journal_seq_to_buf(struct journal * j,u64 seq)101 journal_seq_to_buf(struct journal *j, u64 seq)
102 {
103 	struct journal_buf *buf = NULL;
104 
105 	EBUG_ON(seq > journal_cur_seq(j));
106 
107 	if (journal_seq_unwritten(j, seq)) {
108 		buf = j->buf + (seq & JOURNAL_BUF_MASK);
109 		EBUG_ON(le64_to_cpu(buf->data->seq) != seq);
110 	}
111 	return buf;
112 }
113 
journal_pin_list_init(struct journal_entry_pin_list * p,int count)114 static void journal_pin_list_init(struct journal_entry_pin_list *p, int count)
115 {
116 	unsigned i;
117 
118 	for (i = 0; i < ARRAY_SIZE(p->list); i++)
119 		INIT_LIST_HEAD(&p->list[i]);
120 	INIT_LIST_HEAD(&p->flushed);
121 	atomic_set(&p->count, count);
122 	p->devs.nr = 0;
123 }
124 
125 /*
126  * Detect stuck journal conditions and trigger shutdown. Technically the journal
127  * can end up stuck for a variety of reasons, such as a blocked I/O, journal
128  * reservation lockup, etc. Since this is a fatal error with potentially
129  * unpredictable characteristics, we want to be fairly conservative before we
130  * decide to shut things down.
131  *
132  * Consider the journal stuck when it appears full with no ability to commit
133  * btree transactions, to discard journal buckets, nor acquire priority
134  * (reserved watermark) reservation.
135  */
136 static inline bool
journal_error_check_stuck(struct journal * j,int error,unsigned flags)137 journal_error_check_stuck(struct journal *j, int error, unsigned flags)
138 {
139 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
140 	bool stuck = false;
141 	struct printbuf buf = PRINTBUF;
142 
143 	if (!(error == JOURNAL_ERR_journal_full ||
144 	      error == JOURNAL_ERR_journal_pin_full) ||
145 	    nr_unwritten_journal_entries(j) ||
146 	    (flags & BCH_WATERMARK_MASK) != BCH_WATERMARK_reclaim)
147 		return stuck;
148 
149 	spin_lock(&j->lock);
150 
151 	if (j->can_discard) {
152 		spin_unlock(&j->lock);
153 		return stuck;
154 	}
155 
156 	stuck = true;
157 
158 	/*
159 	 * The journal shutdown path will set ->err_seq, but do it here first to
160 	 * serialize against concurrent failures and avoid duplicate error
161 	 * reports.
162 	 */
163 	if (j->err_seq) {
164 		spin_unlock(&j->lock);
165 		return stuck;
166 	}
167 	j->err_seq = journal_cur_seq(j);
168 	spin_unlock(&j->lock);
169 
170 	bch_err(c, "Journal stuck! Hava a pre-reservation but journal full (error %s)",
171 		bch2_journal_errors[error]);
172 	bch2_journal_debug_to_text(&buf, j);
173 	bch_err(c, "%s", buf.buf);
174 
175 	printbuf_reset(&buf);
176 	bch2_journal_pins_to_text(&buf, j);
177 	bch_err(c, "Journal pins:\n%s", buf.buf);
178 	printbuf_exit(&buf);
179 
180 	bch2_fatal_error(c);
181 	dump_stack();
182 
183 	return stuck;
184 }
185 
bch2_journal_do_writes(struct journal * j)186 void bch2_journal_do_writes(struct journal *j)
187 {
188 	for (u64 seq = journal_last_unwritten_seq(j);
189 	     seq <= journal_cur_seq(j);
190 	     seq++) {
191 		unsigned idx = seq & JOURNAL_BUF_MASK;
192 		struct journal_buf *w = j->buf + idx;
193 
194 		if (w->write_started && !w->write_allocated)
195 			break;
196 		if (w->write_started)
197 			continue;
198 
199 		if (!journal_state_count(j->reservations, idx)) {
200 			w->write_started = true;
201 			closure_call(&w->io, bch2_journal_write, j->wq, NULL);
202 		}
203 
204 		break;
205 	}
206 }
207 
208 /*
209  * Final processing when the last reference of a journal buffer has been
210  * dropped. Drop the pin list reference acquired at journal entry open and write
211  * the buffer, if requested.
212  */
bch2_journal_buf_put_final(struct journal * j,u64 seq)213 void bch2_journal_buf_put_final(struct journal *j, u64 seq)
214 {
215 	lockdep_assert_held(&j->lock);
216 
217 	if (__bch2_journal_pin_put(j, seq))
218 		bch2_journal_reclaim_fast(j);
219 	bch2_journal_do_writes(j);
220 }
221 
222 /*
223  * Returns true if journal entry is now closed:
224  *
225  * We don't close a journal_buf until the next journal_buf is finished writing,
226  * and can be opened again - this also initializes the next journal_buf:
227  */
__journal_entry_close(struct journal * j,unsigned closed_val,bool trace)228 static void __journal_entry_close(struct journal *j, unsigned closed_val, bool trace)
229 {
230 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
231 	struct journal_buf *buf = journal_cur_buf(j);
232 	union journal_res_state old, new;
233 	unsigned sectors;
234 
235 	BUG_ON(closed_val != JOURNAL_ENTRY_CLOSED_VAL &&
236 	       closed_val != JOURNAL_ENTRY_ERROR_VAL);
237 
238 	lockdep_assert_held(&j->lock);
239 
240 	old.v = atomic64_read(&j->reservations.counter);
241 	do {
242 		new.v = old.v;
243 		new.cur_entry_offset = closed_val;
244 
245 		if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL ||
246 		    old.cur_entry_offset == new.cur_entry_offset)
247 			return;
248 	} while (!atomic64_try_cmpxchg(&j->reservations.counter,
249 				       &old.v, new.v));
250 
251 	if (!__journal_entry_is_open(old))
252 		return;
253 
254 	/* Close out old buffer: */
255 	buf->data->u64s		= cpu_to_le32(old.cur_entry_offset);
256 
257 	if (trace_journal_entry_close_enabled() && trace) {
258 		struct printbuf pbuf = PRINTBUF;
259 		pbuf.atomic++;
260 
261 		prt_str(&pbuf, "entry size: ");
262 		prt_human_readable_u64(&pbuf, vstruct_bytes(buf->data));
263 		prt_newline(&pbuf);
264 		bch2_prt_task_backtrace(&pbuf, current, 1, GFP_NOWAIT);
265 		trace_journal_entry_close(c, pbuf.buf);
266 		printbuf_exit(&pbuf);
267 	}
268 
269 	sectors = vstruct_blocks_plus(buf->data, c->block_bits,
270 				      buf->u64s_reserved) << c->block_bits;
271 	BUG_ON(sectors > buf->sectors);
272 	buf->sectors = sectors;
273 
274 	/*
275 	 * We have to set last_seq here, _before_ opening a new journal entry:
276 	 *
277 	 * A threads may replace an old pin with a new pin on their current
278 	 * journal reservation - the expectation being that the journal will
279 	 * contain either what the old pin protected or what the new pin
280 	 * protects.
281 	 *
282 	 * After the old pin is dropped journal_last_seq() won't include the old
283 	 * pin, so we can only write the updated last_seq on the entry that
284 	 * contains whatever the new pin protects.
285 	 *
286 	 * Restated, we can _not_ update last_seq for a given entry if there
287 	 * could be a newer entry open with reservations/pins that have been
288 	 * taken against it.
289 	 *
290 	 * Hence, we want update/set last_seq on the current journal entry right
291 	 * before we open a new one:
292 	 */
293 	buf->last_seq		= journal_last_seq(j);
294 	buf->data->last_seq	= cpu_to_le64(buf->last_seq);
295 	BUG_ON(buf->last_seq > le64_to_cpu(buf->data->seq));
296 
297 	cancel_delayed_work(&j->write_work);
298 
299 	bch2_journal_space_available(j);
300 
301 	__bch2_journal_buf_put(j, old.idx, le64_to_cpu(buf->data->seq));
302 }
303 
bch2_journal_halt(struct journal * j)304 void bch2_journal_halt(struct journal *j)
305 {
306 	spin_lock(&j->lock);
307 	__journal_entry_close(j, JOURNAL_ENTRY_ERROR_VAL, true);
308 	if (!j->err_seq)
309 		j->err_seq = journal_cur_seq(j);
310 	journal_wake(j);
311 	spin_unlock(&j->lock);
312 }
313 
journal_entry_want_write(struct journal * j)314 static bool journal_entry_want_write(struct journal *j)
315 {
316 	bool ret = !journal_entry_is_open(j) ||
317 		journal_cur_seq(j) == journal_last_unwritten_seq(j);
318 
319 	/* Don't close it yet if we already have a write in flight: */
320 	if (ret)
321 		__journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
322 	else if (nr_unwritten_journal_entries(j)) {
323 		struct journal_buf *buf = journal_cur_buf(j);
324 
325 		if (!buf->flush_time) {
326 			buf->flush_time	= local_clock() ?: 1;
327 			buf->expires = jiffies;
328 		}
329 	}
330 
331 	return ret;
332 }
333 
bch2_journal_entry_close(struct journal * j)334 bool bch2_journal_entry_close(struct journal *j)
335 {
336 	bool ret;
337 
338 	spin_lock(&j->lock);
339 	ret = journal_entry_want_write(j);
340 	spin_unlock(&j->lock);
341 
342 	return ret;
343 }
344 
345 /*
346  * should _only_ called from journal_res_get() - when we actually want a
347  * journal reservation - journal entry is open means journal is dirty:
348  */
journal_entry_open(struct journal * j)349 static int journal_entry_open(struct journal *j)
350 {
351 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
352 	struct journal_buf *buf = j->buf +
353 		((journal_cur_seq(j) + 1) & JOURNAL_BUF_MASK);
354 	union journal_res_state old, new;
355 	int u64s;
356 
357 	lockdep_assert_held(&j->lock);
358 	BUG_ON(journal_entry_is_open(j));
359 	BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
360 
361 	if (j->blocked)
362 		return JOURNAL_ERR_blocked;
363 
364 	if (j->cur_entry_error)
365 		return j->cur_entry_error;
366 
367 	if (bch2_journal_error(j))
368 		return JOURNAL_ERR_insufficient_devices; /* -EROFS */
369 
370 	if (!fifo_free(&j->pin))
371 		return JOURNAL_ERR_journal_pin_full;
372 
373 	if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf))
374 		return JOURNAL_ERR_max_in_flight;
375 
376 	BUG_ON(!j->cur_entry_sectors);
377 
378 	buf->expires		=
379 		(journal_cur_seq(j) == j->flushed_seq_ondisk
380 		 ? jiffies
381 		 : j->last_flush_write) +
382 		msecs_to_jiffies(c->opts.journal_flush_delay);
383 
384 	buf->u64s_reserved	= j->entry_u64s_reserved;
385 	buf->disk_sectors	= j->cur_entry_sectors;
386 	buf->sectors		= min(buf->disk_sectors, buf->buf_size >> 9);
387 
388 	u64s = (int) (buf->sectors << 9) / sizeof(u64) -
389 		journal_entry_overhead(j);
390 	u64s = clamp_t(int, u64s, 0, JOURNAL_ENTRY_CLOSED_VAL - 1);
391 
392 	if (u64s <= (ssize_t) j->early_journal_entries.nr)
393 		return JOURNAL_ERR_journal_full;
394 
395 	if (fifo_empty(&j->pin) && j->reclaim_thread)
396 		wake_up_process(j->reclaim_thread);
397 
398 	/*
399 	 * The fifo_push() needs to happen at the same time as j->seq is
400 	 * incremented for journal_last_seq() to be calculated correctly
401 	 */
402 	atomic64_inc(&j->seq);
403 	journal_pin_list_init(fifo_push_ref(&j->pin), 1);
404 
405 	BUG_ON(j->pin.back - 1 != atomic64_read(&j->seq));
406 
407 	BUG_ON(j->buf + (journal_cur_seq(j) & JOURNAL_BUF_MASK) != buf);
408 
409 	bkey_extent_init(&buf->key);
410 	buf->noflush		= false;
411 	buf->must_flush		= false;
412 	buf->separate_flush	= false;
413 	buf->flush_time		= 0;
414 	buf->need_flush_to_write_buffer = true;
415 	buf->write_started	= false;
416 	buf->write_allocated	= false;
417 	buf->write_done		= false;
418 
419 	memset(buf->data, 0, sizeof(*buf->data));
420 	buf->data->seq	= cpu_to_le64(journal_cur_seq(j));
421 	buf->data->u64s	= 0;
422 
423 	if (j->early_journal_entries.nr) {
424 		memcpy(buf->data->_data, j->early_journal_entries.data,
425 		       j->early_journal_entries.nr * sizeof(u64));
426 		le32_add_cpu(&buf->data->u64s, j->early_journal_entries.nr);
427 	}
428 
429 	/*
430 	 * Must be set before marking the journal entry as open:
431 	 */
432 	j->cur_entry_u64s = u64s;
433 
434 	old.v = atomic64_read(&j->reservations.counter);
435 	do {
436 		new.v = old.v;
437 
438 		BUG_ON(old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL);
439 
440 		new.idx++;
441 		BUG_ON(journal_state_count(new, new.idx));
442 		BUG_ON(new.idx != (journal_cur_seq(j) & JOURNAL_BUF_MASK));
443 
444 		journal_state_inc(&new);
445 
446 		/* Handle any already added entries */
447 		new.cur_entry_offset = le32_to_cpu(buf->data->u64s);
448 	} while (!atomic64_try_cmpxchg(&j->reservations.counter,
449 				       &old.v, new.v));
450 
451 	if (nr_unwritten_journal_entries(j) == 1)
452 		mod_delayed_work(j->wq,
453 				 &j->write_work,
454 				 msecs_to_jiffies(c->opts.journal_flush_delay));
455 	journal_wake(j);
456 
457 	if (j->early_journal_entries.nr)
458 		darray_exit(&j->early_journal_entries);
459 	return 0;
460 }
461 
journal_quiesced(struct journal * j)462 static bool journal_quiesced(struct journal *j)
463 {
464 	bool ret = atomic64_read(&j->seq) == j->seq_ondisk;
465 
466 	if (!ret)
467 		bch2_journal_entry_close(j);
468 	return ret;
469 }
470 
journal_quiesce(struct journal * j)471 static void journal_quiesce(struct journal *j)
472 {
473 	wait_event(j->wait, journal_quiesced(j));
474 }
475 
journal_write_work(struct work_struct * work)476 static void journal_write_work(struct work_struct *work)
477 {
478 	struct journal *j = container_of(work, struct journal, write_work.work);
479 
480 	spin_lock(&j->lock);
481 	if (__journal_entry_is_open(j->reservations)) {
482 		long delta = journal_cur_buf(j)->expires - jiffies;
483 
484 		if (delta > 0)
485 			mod_delayed_work(j->wq, &j->write_work, delta);
486 		else
487 			__journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
488 	}
489 	spin_unlock(&j->lock);
490 }
491 
__journal_res_get(struct journal * j,struct journal_res * res,unsigned flags)492 static int __journal_res_get(struct journal *j, struct journal_res *res,
493 			     unsigned flags)
494 {
495 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
496 	struct journal_buf *buf;
497 	bool can_discard;
498 	int ret;
499 retry:
500 	if (journal_res_get_fast(j, res, flags))
501 		return 0;
502 
503 	if (bch2_journal_error(j))
504 		return -BCH_ERR_erofs_journal_err;
505 
506 	if (j->blocked)
507 		return -BCH_ERR_journal_res_get_blocked;
508 
509 	if ((flags & BCH_WATERMARK_MASK) < j->watermark) {
510 		ret = JOURNAL_ERR_journal_full;
511 		can_discard = j->can_discard;
512 		goto out;
513 	}
514 
515 	if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf) && !journal_entry_is_open(j)) {
516 		ret = JOURNAL_ERR_max_in_flight;
517 		goto out;
518 	}
519 
520 	spin_lock(&j->lock);
521 
522 	/*
523 	 * Recheck after taking the lock, so we don't race with another thread
524 	 * that just did journal_entry_open() and call bch2_journal_entry_close()
525 	 * unnecessarily
526 	 */
527 	if (journal_res_get_fast(j, res, flags)) {
528 		ret = 0;
529 		goto unlock;
530 	}
531 
532 	/*
533 	 * If we couldn't get a reservation because the current buf filled up,
534 	 * and we had room for a bigger entry on disk, signal that we want to
535 	 * realloc the journal bufs:
536 	 */
537 	buf = journal_cur_buf(j);
538 	if (journal_entry_is_open(j) &&
539 	    buf->buf_size >> 9 < buf->disk_sectors &&
540 	    buf->buf_size < JOURNAL_ENTRY_SIZE_MAX)
541 		j->buf_size_want = max(j->buf_size_want, buf->buf_size << 1);
542 
543 	__journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, false);
544 	ret = journal_entry_open(j) ?: JOURNAL_ERR_retry;
545 unlock:
546 	can_discard = j->can_discard;
547 	spin_unlock(&j->lock);
548 out:
549 	if (ret == JOURNAL_ERR_retry)
550 		goto retry;
551 	if (!ret)
552 		return 0;
553 
554 	if (journal_error_check_stuck(j, ret, flags))
555 		ret = -BCH_ERR_journal_res_get_blocked;
556 
557 	if (ret == JOURNAL_ERR_max_in_flight &&
558 	    track_event_change(&c->times[BCH_TIME_blocked_journal_max_in_flight], true)) {
559 
560 		struct printbuf buf = PRINTBUF;
561 		prt_printf(&buf, "seq %llu\n", journal_cur_seq(j));
562 		bch2_journal_bufs_to_text(&buf, j);
563 		trace_journal_entry_full(c, buf.buf);
564 		printbuf_exit(&buf);
565 		count_event(c, journal_entry_full);
566 	}
567 
568 	/*
569 	 * Journal is full - can't rely on reclaim from work item due to
570 	 * freezing:
571 	 */
572 	if ((ret == JOURNAL_ERR_journal_full ||
573 	     ret == JOURNAL_ERR_journal_pin_full) &&
574 	    !(flags & JOURNAL_RES_GET_NONBLOCK)) {
575 		if (can_discard) {
576 			bch2_journal_do_discards(j);
577 			goto retry;
578 		}
579 
580 		if (mutex_trylock(&j->reclaim_lock)) {
581 			bch2_journal_reclaim(j);
582 			mutex_unlock(&j->reclaim_lock);
583 		}
584 	}
585 
586 	return ret == JOURNAL_ERR_insufficient_devices
587 		? -BCH_ERR_erofs_journal_err
588 		: -BCH_ERR_journal_res_get_blocked;
589 }
590 
591 /*
592  * Essentially the entry function to the journaling code. When bcachefs is doing
593  * a btree insert, it calls this function to get the current journal write.
594  * Journal write is the structure used set up journal writes. The calling
595  * function will then add its keys to the structure, queuing them for the next
596  * write.
597  *
598  * To ensure forward progress, the current task must not be holding any
599  * btree node write locks.
600  */
bch2_journal_res_get_slowpath(struct journal * j,struct journal_res * res,unsigned flags)601 int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
602 				  unsigned flags)
603 {
604 	int ret;
605 
606 	if (closure_wait_event_timeout(&j->async_wait,
607 		   (ret = __journal_res_get(j, res, flags)) != -BCH_ERR_journal_res_get_blocked ||
608 		   (flags & JOURNAL_RES_GET_NONBLOCK),
609 		   HZ * 10))
610 		return ret;
611 
612 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
613 	struct printbuf buf = PRINTBUF;
614 	bch2_journal_debug_to_text(&buf, j);
615 	bch_err(c, "Journal stuck? Waited for 10 seconds...\n%s",
616 		buf.buf);
617 	printbuf_exit(&buf);
618 
619 	closure_wait_event(&j->async_wait,
620 		   (ret = __journal_res_get(j, res, flags)) != -BCH_ERR_journal_res_get_blocked ||
621 		   (flags & JOURNAL_RES_GET_NONBLOCK));
622 	return ret;
623 }
624 
625 /* journal_entry_res: */
626 
bch2_journal_entry_res_resize(struct journal * j,struct journal_entry_res * res,unsigned new_u64s)627 void bch2_journal_entry_res_resize(struct journal *j,
628 				   struct journal_entry_res *res,
629 				   unsigned new_u64s)
630 {
631 	union journal_res_state state;
632 	int d = new_u64s - res->u64s;
633 
634 	spin_lock(&j->lock);
635 
636 	j->entry_u64s_reserved += d;
637 	if (d <= 0)
638 		goto out;
639 
640 	j->cur_entry_u64s = max_t(int, 0, j->cur_entry_u64s - d);
641 	smp_mb();
642 	state = READ_ONCE(j->reservations);
643 
644 	if (state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL &&
645 	    state.cur_entry_offset > j->cur_entry_u64s) {
646 		j->cur_entry_u64s += d;
647 		/*
648 		 * Not enough room in current journal entry, have to flush it:
649 		 */
650 		__journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
651 	} else {
652 		journal_cur_buf(j)->u64s_reserved += d;
653 	}
654 out:
655 	spin_unlock(&j->lock);
656 	res->u64s += d;
657 }
658 
659 /* journal flushing: */
660 
661 /**
662  * bch2_journal_flush_seq_async - wait for a journal entry to be written
663  * @j:		journal object
664  * @seq:	seq to flush
665  * @parent:	closure object to wait with
666  * Returns:	1 if @seq has already been flushed, 0 if @seq is being flushed,
667  *		-EIO if @seq will never be flushed
668  *
669  * Like bch2_journal_wait_on_seq, except that it triggers a write immediately if
670  * necessary
671  */
bch2_journal_flush_seq_async(struct journal * j,u64 seq,struct closure * parent)672 int bch2_journal_flush_seq_async(struct journal *j, u64 seq,
673 				 struct closure *parent)
674 {
675 	struct journal_buf *buf;
676 	int ret = 0;
677 
678 	if (seq <= j->flushed_seq_ondisk)
679 		return 1;
680 
681 	spin_lock(&j->lock);
682 
683 	if (WARN_ONCE(seq > journal_cur_seq(j),
684 		      "requested to flush journal seq %llu, but currently at %llu",
685 		      seq, journal_cur_seq(j)))
686 		goto out;
687 
688 	/* Recheck under lock: */
689 	if (j->err_seq && seq >= j->err_seq) {
690 		ret = -EIO;
691 		goto out;
692 	}
693 
694 	if (seq <= j->flushed_seq_ondisk) {
695 		ret = 1;
696 		goto out;
697 	}
698 
699 	/* if seq was written, but not flushed - flush a newer one instead */
700 	seq = max(seq, journal_last_unwritten_seq(j));
701 
702 recheck_need_open:
703 	if (seq > journal_cur_seq(j)) {
704 		struct journal_res res = { 0 };
705 
706 		if (journal_entry_is_open(j))
707 			__journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
708 
709 		spin_unlock(&j->lock);
710 
711 		/*
712 		 * We're called from bch2_journal_flush_seq() -> wait_event();
713 		 * but this might block. We won't usually block, so we won't
714 		 * livelock:
715 		 */
716 		sched_annotate_sleep();
717 		ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
718 		if (ret)
719 			return ret;
720 
721 		seq = res.seq;
722 		buf = journal_seq_to_buf(j, seq);
723 		buf->must_flush = true;
724 
725 		if (!buf->flush_time) {
726 			buf->flush_time	= local_clock() ?: 1;
727 			buf->expires = jiffies;
728 		}
729 
730 		if (parent && !closure_wait(&buf->wait, parent))
731 			BUG();
732 
733 		bch2_journal_res_put(j, &res);
734 
735 		spin_lock(&j->lock);
736 		goto want_write;
737 	}
738 
739 	/*
740 	 * if write was kicked off without a flush, or if we promised it
741 	 * wouldn't be a flush, flush the next sequence number instead
742 	 */
743 	buf = journal_seq_to_buf(j, seq);
744 	if (buf->noflush) {
745 		seq++;
746 		goto recheck_need_open;
747 	}
748 
749 	buf->must_flush = true;
750 
751 	if (parent && !closure_wait(&buf->wait, parent))
752 		BUG();
753 want_write:
754 	if (seq == journal_cur_seq(j))
755 		journal_entry_want_write(j);
756 out:
757 	spin_unlock(&j->lock);
758 	return ret;
759 }
760 
bch2_journal_flush_seq(struct journal * j,u64 seq,unsigned task_state)761 int bch2_journal_flush_seq(struct journal *j, u64 seq, unsigned task_state)
762 {
763 	u64 start_time = local_clock();
764 	int ret, ret2;
765 
766 	/*
767 	 * Don't update time_stats when @seq is already flushed:
768 	 */
769 	if (seq <= j->flushed_seq_ondisk)
770 		return 0;
771 
772 	ret = wait_event_state(j->wait,
773 			       (ret2 = bch2_journal_flush_seq_async(j, seq, NULL)),
774 			       task_state);
775 
776 	if (!ret)
777 		bch2_time_stats_update(j->flush_seq_time, start_time);
778 
779 	return ret ?: ret2 < 0 ? ret2 : 0;
780 }
781 
782 /*
783  * bch2_journal_flush_async - if there is an open journal entry, or a journal
784  * still being written, write it and wait for the write to complete
785  */
bch2_journal_flush_async(struct journal * j,struct closure * parent)786 void bch2_journal_flush_async(struct journal *j, struct closure *parent)
787 {
788 	bch2_journal_flush_seq_async(j, atomic64_read(&j->seq), parent);
789 }
790 
bch2_journal_flush(struct journal * j)791 int bch2_journal_flush(struct journal *j)
792 {
793 	return bch2_journal_flush_seq(j, atomic64_read(&j->seq), TASK_UNINTERRUPTIBLE);
794 }
795 
796 /*
797  * bch2_journal_noflush_seq - tell the journal not to issue any flushes before
798  * @seq
799  */
bch2_journal_noflush_seq(struct journal * j,u64 seq)800 bool bch2_journal_noflush_seq(struct journal *j, u64 seq)
801 {
802 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
803 	u64 unwritten_seq;
804 	bool ret = false;
805 
806 	if (!(c->sb.features & (1ULL << BCH_FEATURE_journal_no_flush)))
807 		return false;
808 
809 	if (seq <= c->journal.flushed_seq_ondisk)
810 		return false;
811 
812 	spin_lock(&j->lock);
813 	if (seq <= c->journal.flushed_seq_ondisk)
814 		goto out;
815 
816 	for (unwritten_seq = journal_last_unwritten_seq(j);
817 	     unwritten_seq < seq;
818 	     unwritten_seq++) {
819 		struct journal_buf *buf = journal_seq_to_buf(j, unwritten_seq);
820 
821 		/* journal flush already in flight, or flush requseted */
822 		if (buf->must_flush)
823 			goto out;
824 
825 		buf->noflush = true;
826 	}
827 
828 	ret = true;
829 out:
830 	spin_unlock(&j->lock);
831 	return ret;
832 }
833 
bch2_journal_meta(struct journal * j)834 int bch2_journal_meta(struct journal *j)
835 {
836 	struct journal_buf *buf;
837 	struct journal_res res;
838 	int ret;
839 
840 	memset(&res, 0, sizeof(res));
841 
842 	ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
843 	if (ret)
844 		return ret;
845 
846 	buf = j->buf + (res.seq & JOURNAL_BUF_MASK);
847 	buf->must_flush = true;
848 
849 	if (!buf->flush_time) {
850 		buf->flush_time	= local_clock() ?: 1;
851 		buf->expires = jiffies;
852 	}
853 
854 	bch2_journal_res_put(j, &res);
855 
856 	return bch2_journal_flush_seq(j, res.seq, TASK_UNINTERRUPTIBLE);
857 }
858 
859 /* block/unlock the journal: */
860 
bch2_journal_unblock(struct journal * j)861 void bch2_journal_unblock(struct journal *j)
862 {
863 	spin_lock(&j->lock);
864 	j->blocked--;
865 	spin_unlock(&j->lock);
866 
867 	journal_wake(j);
868 }
869 
bch2_journal_block(struct journal * j)870 void bch2_journal_block(struct journal *j)
871 {
872 	spin_lock(&j->lock);
873 	j->blocked++;
874 	spin_unlock(&j->lock);
875 
876 	journal_quiesce(j);
877 }
878 
__bch2_next_write_buffer_flush_journal_buf(struct journal * j,u64 max_seq)879 static struct journal_buf *__bch2_next_write_buffer_flush_journal_buf(struct journal *j, u64 max_seq)
880 {
881 	struct journal_buf *ret = NULL;
882 
883 	/* We're inside wait_event(), but using mutex_lock(: */
884 	sched_annotate_sleep();
885 	mutex_lock(&j->buf_lock);
886 	spin_lock(&j->lock);
887 	max_seq = min(max_seq, journal_cur_seq(j));
888 
889 	for (u64 seq = journal_last_unwritten_seq(j);
890 	     seq <= max_seq;
891 	     seq++) {
892 		unsigned idx = seq & JOURNAL_BUF_MASK;
893 		struct journal_buf *buf = j->buf + idx;
894 
895 		if (buf->need_flush_to_write_buffer) {
896 			if (seq == journal_cur_seq(j))
897 				__journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
898 
899 			union journal_res_state s;
900 			s.v = atomic64_read_acquire(&j->reservations.counter);
901 
902 			ret = journal_state_count(s, idx)
903 				? ERR_PTR(-EAGAIN)
904 				: buf;
905 			break;
906 		}
907 	}
908 
909 	spin_unlock(&j->lock);
910 	if (IS_ERR_OR_NULL(ret))
911 		mutex_unlock(&j->buf_lock);
912 	return ret;
913 }
914 
bch2_next_write_buffer_flush_journal_buf(struct journal * j,u64 max_seq)915 struct journal_buf *bch2_next_write_buffer_flush_journal_buf(struct journal *j, u64 max_seq)
916 {
917 	struct journal_buf *ret;
918 
919 	wait_event(j->wait, (ret = __bch2_next_write_buffer_flush_journal_buf(j, max_seq)) != ERR_PTR(-EAGAIN));
920 	return ret;
921 }
922 
923 /* allocate journal on a device: */
924 
__bch2_set_nr_journal_buckets(struct bch_dev * ca,unsigned nr,bool new_fs,struct closure * cl)925 static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
926 					 bool new_fs, struct closure *cl)
927 {
928 	struct bch_fs *c = ca->fs;
929 	struct journal_device *ja = &ca->journal;
930 	u64 *new_bucket_seq = NULL, *new_buckets = NULL;
931 	struct open_bucket **ob = NULL;
932 	long *bu = NULL;
933 	unsigned i, pos, nr_got = 0, nr_want = nr - ja->nr;
934 	int ret = 0;
935 
936 	BUG_ON(nr <= ja->nr);
937 
938 	bu		= kcalloc(nr_want, sizeof(*bu), GFP_KERNEL);
939 	ob		= kcalloc(nr_want, sizeof(*ob), GFP_KERNEL);
940 	new_buckets	= kcalloc(nr, sizeof(u64), GFP_KERNEL);
941 	new_bucket_seq	= kcalloc(nr, sizeof(u64), GFP_KERNEL);
942 	if (!bu || !ob || !new_buckets || !new_bucket_seq) {
943 		ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
944 		goto err_free;
945 	}
946 
947 	for (nr_got = 0; nr_got < nr_want; nr_got++) {
948 		if (new_fs) {
949 			bu[nr_got] = bch2_bucket_alloc_new_fs(ca);
950 			if (bu[nr_got] < 0) {
951 				ret = -BCH_ERR_ENOSPC_bucket_alloc;
952 				break;
953 			}
954 		} else {
955 			ob[nr_got] = bch2_bucket_alloc(c, ca, BCH_WATERMARK_normal,
956 						       BCH_DATA_journal, cl);
957 			ret = PTR_ERR_OR_ZERO(ob[nr_got]);
958 			if (ret)
959 				break;
960 
961 			ret = bch2_trans_run(c,
962 				bch2_trans_mark_metadata_bucket(trans, ca,
963 						ob[nr_got]->bucket, BCH_DATA_journal,
964 						ca->mi.bucket_size, BTREE_TRIGGER_transactional));
965 			if (ret) {
966 				bch2_open_bucket_put(c, ob[nr_got]);
967 				bch_err_msg(c, ret, "marking new journal buckets");
968 				break;
969 			}
970 
971 			bu[nr_got] = ob[nr_got]->bucket;
972 		}
973 	}
974 
975 	if (!nr_got)
976 		goto err_free;
977 
978 	/* Don't return an error if we successfully allocated some buckets: */
979 	ret = 0;
980 
981 	if (c) {
982 		bch2_journal_flush_all_pins(&c->journal);
983 		bch2_journal_block(&c->journal);
984 		mutex_lock(&c->sb_lock);
985 	}
986 
987 	memcpy(new_buckets,	ja->buckets,	ja->nr * sizeof(u64));
988 	memcpy(new_bucket_seq,	ja->bucket_seq,	ja->nr * sizeof(u64));
989 
990 	BUG_ON(ja->discard_idx > ja->nr);
991 
992 	pos = ja->discard_idx ?: ja->nr;
993 
994 	memmove(new_buckets + pos + nr_got,
995 		new_buckets + pos,
996 		sizeof(new_buckets[0]) * (ja->nr - pos));
997 	memmove(new_bucket_seq + pos + nr_got,
998 		new_bucket_seq + pos,
999 		sizeof(new_bucket_seq[0]) * (ja->nr - pos));
1000 
1001 	for (i = 0; i < nr_got; i++) {
1002 		new_buckets[pos + i] = bu[i];
1003 		new_bucket_seq[pos + i] = 0;
1004 	}
1005 
1006 	nr = ja->nr + nr_got;
1007 
1008 	ret = bch2_journal_buckets_to_sb(c, ca, new_buckets, nr);
1009 	if (ret)
1010 		goto err_unblock;
1011 
1012 	if (!new_fs)
1013 		bch2_write_super(c);
1014 
1015 	/* Commit: */
1016 	if (c)
1017 		spin_lock(&c->journal.lock);
1018 
1019 	swap(new_buckets,	ja->buckets);
1020 	swap(new_bucket_seq,	ja->bucket_seq);
1021 	ja->nr = nr;
1022 
1023 	if (pos <= ja->discard_idx)
1024 		ja->discard_idx = (ja->discard_idx + nr_got) % ja->nr;
1025 	if (pos <= ja->dirty_idx_ondisk)
1026 		ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + nr_got) % ja->nr;
1027 	if (pos <= ja->dirty_idx)
1028 		ja->dirty_idx = (ja->dirty_idx + nr_got) % ja->nr;
1029 	if (pos <= ja->cur_idx)
1030 		ja->cur_idx = (ja->cur_idx + nr_got) % ja->nr;
1031 
1032 	if (c)
1033 		spin_unlock(&c->journal.lock);
1034 err_unblock:
1035 	if (c) {
1036 		bch2_journal_unblock(&c->journal);
1037 		mutex_unlock(&c->sb_lock);
1038 	}
1039 
1040 	if (ret && !new_fs)
1041 		for (i = 0; i < nr_got; i++)
1042 			bch2_trans_run(c,
1043 				bch2_trans_mark_metadata_bucket(trans, ca,
1044 						bu[i], BCH_DATA_free, 0,
1045 						BTREE_TRIGGER_transactional));
1046 err_free:
1047 	if (!new_fs)
1048 		for (i = 0; i < nr_got; i++)
1049 			bch2_open_bucket_put(c, ob[i]);
1050 
1051 	kfree(new_bucket_seq);
1052 	kfree(new_buckets);
1053 	kfree(ob);
1054 	kfree(bu);
1055 	return ret;
1056 }
1057 
1058 /*
1059  * Allocate more journal space at runtime - not currently making use if it, but
1060  * the code works:
1061  */
bch2_set_nr_journal_buckets(struct bch_fs * c,struct bch_dev * ca,unsigned nr)1062 int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
1063 				unsigned nr)
1064 {
1065 	struct journal_device *ja = &ca->journal;
1066 	struct closure cl;
1067 	int ret = 0;
1068 
1069 	closure_init_stack(&cl);
1070 
1071 	down_write(&c->state_lock);
1072 
1073 	/* don't handle reducing nr of buckets yet: */
1074 	if (nr < ja->nr)
1075 		goto unlock;
1076 
1077 	while (ja->nr < nr) {
1078 		struct disk_reservation disk_res = { 0, 0, 0 };
1079 
1080 		/*
1081 		 * note: journal buckets aren't really counted as _sectors_ used yet, so
1082 		 * we don't need the disk reservation to avoid the BUG_ON() in buckets.c
1083 		 * when space used goes up without a reservation - but we do need the
1084 		 * reservation to ensure we'll actually be able to allocate:
1085 		 *
1086 		 * XXX: that's not right, disk reservations only ensure a
1087 		 * filesystem-wide allocation will succeed, this is a device
1088 		 * specific allocation - we can hang here:
1089 		 */
1090 
1091 		ret = bch2_disk_reservation_get(c, &disk_res,
1092 						bucket_to_sector(ca, nr - ja->nr), 1, 0);
1093 		if (ret)
1094 			break;
1095 
1096 		ret = __bch2_set_nr_journal_buckets(ca, nr, false, &cl);
1097 
1098 		bch2_disk_reservation_put(c, &disk_res);
1099 
1100 		closure_sync(&cl);
1101 
1102 		if (ret && ret != -BCH_ERR_bucket_alloc_blocked)
1103 			break;
1104 	}
1105 
1106 	bch_err_fn(c, ret);
1107 unlock:
1108 	up_write(&c->state_lock);
1109 	return ret;
1110 }
1111 
bch2_dev_journal_alloc(struct bch_dev * ca,bool new_fs)1112 int bch2_dev_journal_alloc(struct bch_dev *ca, bool new_fs)
1113 {
1114 	unsigned nr;
1115 	int ret;
1116 
1117 	if (dynamic_fault("bcachefs:add:journal_alloc")) {
1118 		ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
1119 		goto err;
1120 	}
1121 
1122 	/* 1/128th of the device by default: */
1123 	nr = ca->mi.nbuckets >> 7;
1124 
1125 	/*
1126 	 * clamp journal size to 8192 buckets or 8GB (in sectors), whichever
1127 	 * is smaller:
1128 	 */
1129 	nr = clamp_t(unsigned, nr,
1130 		     BCH_JOURNAL_BUCKETS_MIN,
1131 		     min(1 << 13,
1132 			 (1 << 24) / ca->mi.bucket_size));
1133 
1134 	ret = __bch2_set_nr_journal_buckets(ca, nr, new_fs, NULL);
1135 err:
1136 	bch_err_fn(ca, ret);
1137 	return ret;
1138 }
1139 
bch2_fs_journal_alloc(struct bch_fs * c)1140 int bch2_fs_journal_alloc(struct bch_fs *c)
1141 {
1142 	for_each_online_member(c, ca) {
1143 		if (ca->journal.nr)
1144 			continue;
1145 
1146 		int ret = bch2_dev_journal_alloc(ca, true);
1147 		if (ret) {
1148 			percpu_ref_put(&ca->io_ref);
1149 			return ret;
1150 		}
1151 	}
1152 
1153 	return 0;
1154 }
1155 
1156 /* startup/shutdown: */
1157 
bch2_journal_writing_to_device(struct journal * j,unsigned dev_idx)1158 static bool bch2_journal_writing_to_device(struct journal *j, unsigned dev_idx)
1159 {
1160 	bool ret = false;
1161 	u64 seq;
1162 
1163 	spin_lock(&j->lock);
1164 	for (seq = journal_last_unwritten_seq(j);
1165 	     seq <= journal_cur_seq(j) && !ret;
1166 	     seq++) {
1167 		struct journal_buf *buf = journal_seq_to_buf(j, seq);
1168 
1169 		if (bch2_bkey_has_device_c(bkey_i_to_s_c(&buf->key), dev_idx))
1170 			ret = true;
1171 	}
1172 	spin_unlock(&j->lock);
1173 
1174 	return ret;
1175 }
1176 
bch2_dev_journal_stop(struct journal * j,struct bch_dev * ca)1177 void bch2_dev_journal_stop(struct journal *j, struct bch_dev *ca)
1178 {
1179 	wait_event(j->wait, !bch2_journal_writing_to_device(j, ca->dev_idx));
1180 }
1181 
bch2_fs_journal_stop(struct journal * j)1182 void bch2_fs_journal_stop(struct journal *j)
1183 {
1184 	if (!test_bit(JOURNAL_running, &j->flags))
1185 		return;
1186 
1187 	bch2_journal_reclaim_stop(j);
1188 	bch2_journal_flush_all_pins(j);
1189 
1190 	wait_event(j->wait, bch2_journal_entry_close(j));
1191 
1192 	/*
1193 	 * Always write a new journal entry, to make sure the clock hands are up
1194 	 * to date (and match the superblock)
1195 	 */
1196 	bch2_journal_meta(j);
1197 
1198 	journal_quiesce(j);
1199 	cancel_delayed_work_sync(&j->write_work);
1200 
1201 	WARN(!bch2_journal_error(j) &&
1202 	     test_bit(JOURNAL_replay_done, &j->flags) &&
1203 	     j->last_empty_seq != journal_cur_seq(j),
1204 	     "journal shutdown error: cur seq %llu but last empty seq %llu",
1205 	     journal_cur_seq(j), j->last_empty_seq);
1206 
1207 	if (!bch2_journal_error(j))
1208 		clear_bit(JOURNAL_running, &j->flags);
1209 }
1210 
bch2_fs_journal_start(struct journal * j,u64 cur_seq)1211 int bch2_fs_journal_start(struct journal *j, u64 cur_seq)
1212 {
1213 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
1214 	struct journal_entry_pin_list *p;
1215 	struct journal_replay *i, **_i;
1216 	struct genradix_iter iter;
1217 	bool had_entries = false;
1218 	u64 last_seq = cur_seq, nr, seq;
1219 
1220 	genradix_for_each_reverse(&c->journal_entries, iter, _i) {
1221 		i = *_i;
1222 
1223 		if (journal_replay_ignore(i))
1224 			continue;
1225 
1226 		last_seq = le64_to_cpu(i->j.last_seq);
1227 		break;
1228 	}
1229 
1230 	nr = cur_seq - last_seq;
1231 
1232 	if (nr + 1 > j->pin.size) {
1233 		free_fifo(&j->pin);
1234 		init_fifo(&j->pin, roundup_pow_of_two(nr + 1), GFP_KERNEL);
1235 		if (!j->pin.data) {
1236 			bch_err(c, "error reallocating journal fifo (%llu open entries)", nr);
1237 			return -BCH_ERR_ENOMEM_journal_pin_fifo;
1238 		}
1239 	}
1240 
1241 	j->replay_journal_seq	= last_seq;
1242 	j->replay_journal_seq_end = cur_seq;
1243 	j->last_seq_ondisk	= last_seq;
1244 	j->flushed_seq_ondisk	= cur_seq - 1;
1245 	j->seq_ondisk		= cur_seq - 1;
1246 	j->pin.front		= last_seq;
1247 	j->pin.back		= cur_seq;
1248 	atomic64_set(&j->seq, cur_seq - 1);
1249 
1250 	fifo_for_each_entry_ptr(p, &j->pin, seq)
1251 		journal_pin_list_init(p, 1);
1252 
1253 	genradix_for_each(&c->journal_entries, iter, _i) {
1254 		i = *_i;
1255 
1256 		if (journal_replay_ignore(i))
1257 			continue;
1258 
1259 		seq = le64_to_cpu(i->j.seq);
1260 		BUG_ON(seq >= cur_seq);
1261 
1262 		if (seq < last_seq)
1263 			continue;
1264 
1265 		if (journal_entry_empty(&i->j))
1266 			j->last_empty_seq = le64_to_cpu(i->j.seq);
1267 
1268 		p = journal_seq_pin(j, seq);
1269 
1270 		p->devs.nr = 0;
1271 		darray_for_each(i->ptrs, ptr)
1272 			bch2_dev_list_add_dev(&p->devs, ptr->dev);
1273 
1274 		had_entries = true;
1275 	}
1276 
1277 	if (!had_entries)
1278 		j->last_empty_seq = cur_seq - 1; /* to match j->seq */
1279 
1280 	spin_lock(&j->lock);
1281 
1282 	set_bit(JOURNAL_running, &j->flags);
1283 	j->last_flush_write = jiffies;
1284 
1285 	j->reservations.idx = j->reservations.unwritten_idx = journal_cur_seq(j);
1286 	j->reservations.unwritten_idx++;
1287 
1288 	c->last_bucket_seq_cleanup = journal_cur_seq(j);
1289 
1290 	bch2_journal_space_available(j);
1291 	spin_unlock(&j->lock);
1292 
1293 	return bch2_journal_reclaim_start(j);
1294 }
1295 
1296 /* init/exit: */
1297 
bch2_dev_journal_exit(struct bch_dev * ca)1298 void bch2_dev_journal_exit(struct bch_dev *ca)
1299 {
1300 	struct journal_device *ja = &ca->journal;
1301 
1302 	for (unsigned i = 0; i < ARRAY_SIZE(ja->bio); i++) {
1303 		kfree(ja->bio[i]);
1304 		ja->bio[i] = NULL;
1305 	}
1306 
1307 	kfree(ja->buckets);
1308 	kfree(ja->bucket_seq);
1309 	ja->buckets	= NULL;
1310 	ja->bucket_seq	= NULL;
1311 }
1312 
bch2_dev_journal_init(struct bch_dev * ca,struct bch_sb * sb)1313 int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
1314 {
1315 	struct journal_device *ja = &ca->journal;
1316 	struct bch_sb_field_journal *journal_buckets =
1317 		bch2_sb_field_get(sb, journal);
1318 	struct bch_sb_field_journal_v2 *journal_buckets_v2 =
1319 		bch2_sb_field_get(sb, journal_v2);
1320 
1321 	ja->nr = 0;
1322 
1323 	if (journal_buckets_v2) {
1324 		unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2);
1325 
1326 		for (unsigned i = 0; i < nr; i++)
1327 			ja->nr += le64_to_cpu(journal_buckets_v2->d[i].nr);
1328 	} else if (journal_buckets) {
1329 		ja->nr = bch2_nr_journal_buckets(journal_buckets);
1330 	}
1331 
1332 	ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1333 	if (!ja->bucket_seq)
1334 		return -BCH_ERR_ENOMEM_dev_journal_init;
1335 
1336 	unsigned nr_bvecs = DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE);
1337 
1338 	for (unsigned i = 0; i < ARRAY_SIZE(ja->bio); i++) {
1339 		ja->bio[i] = kmalloc(struct_size(ja->bio[i], bio.bi_inline_vecs,
1340 				     nr_bvecs), GFP_KERNEL);
1341 		if (!ja->bio[i])
1342 			return -BCH_ERR_ENOMEM_dev_journal_init;
1343 
1344 		ja->bio[i]->ca = ca;
1345 		ja->bio[i]->buf_idx = i;
1346 		bio_init(&ja->bio[i]->bio, NULL, ja->bio[i]->bio.bi_inline_vecs, nr_bvecs, 0);
1347 	}
1348 
1349 	ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1350 	if (!ja->buckets)
1351 		return -BCH_ERR_ENOMEM_dev_journal_init;
1352 
1353 	if (journal_buckets_v2) {
1354 		unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2);
1355 		unsigned dst = 0;
1356 
1357 		for (unsigned i = 0; i < nr; i++)
1358 			for (unsigned j = 0; j < le64_to_cpu(journal_buckets_v2->d[i].nr); j++)
1359 				ja->buckets[dst++] =
1360 					le64_to_cpu(journal_buckets_v2->d[i].start) + j;
1361 	} else if (journal_buckets) {
1362 		for (unsigned i = 0; i < ja->nr; i++)
1363 			ja->buckets[i] = le64_to_cpu(journal_buckets->buckets[i]);
1364 	}
1365 
1366 	return 0;
1367 }
1368 
bch2_fs_journal_exit(struct journal * j)1369 void bch2_fs_journal_exit(struct journal *j)
1370 {
1371 	if (j->wq)
1372 		destroy_workqueue(j->wq);
1373 
1374 	darray_exit(&j->early_journal_entries);
1375 
1376 	for (unsigned i = 0; i < ARRAY_SIZE(j->buf); i++)
1377 		kvfree(j->buf[i].data);
1378 	free_fifo(&j->pin);
1379 }
1380 
bch2_fs_journal_init(struct journal * j)1381 int bch2_fs_journal_init(struct journal *j)
1382 {
1383 	static struct lock_class_key res_key;
1384 
1385 	mutex_init(&j->buf_lock);
1386 	spin_lock_init(&j->lock);
1387 	spin_lock_init(&j->err_lock);
1388 	init_waitqueue_head(&j->wait);
1389 	INIT_DELAYED_WORK(&j->write_work, journal_write_work);
1390 	init_waitqueue_head(&j->reclaim_wait);
1391 	init_waitqueue_head(&j->pin_flush_wait);
1392 	mutex_init(&j->reclaim_lock);
1393 	mutex_init(&j->discard_lock);
1394 
1395 	lockdep_init_map(&j->res_map, "journal res", &res_key, 0);
1396 
1397 	atomic64_set(&j->reservations.counter,
1398 		((union journal_res_state)
1399 		 { .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v);
1400 
1401 	if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)))
1402 		return -BCH_ERR_ENOMEM_journal_pin_fifo;
1403 
1404 	for (unsigned i = 0; i < ARRAY_SIZE(j->buf); i++) {
1405 		j->buf[i].buf_size = JOURNAL_ENTRY_SIZE_MIN;
1406 		j->buf[i].data = kvmalloc(j->buf[i].buf_size, GFP_KERNEL);
1407 		if (!j->buf[i].data)
1408 			return -BCH_ERR_ENOMEM_journal_buf;
1409 		j->buf[i].idx = i;
1410 	}
1411 
1412 	j->pin.front = j->pin.back = 1;
1413 
1414 	j->wq = alloc_workqueue("bcachefs_journal",
1415 				WQ_HIGHPRI|WQ_FREEZABLE|WQ_UNBOUND|WQ_MEM_RECLAIM, 512);
1416 	if (!j->wq)
1417 		return -BCH_ERR_ENOMEM_fs_other_alloc;
1418 	return 0;
1419 }
1420 
1421 /* debug: */
1422 
1423 static const char * const bch2_journal_flags_strs[] = {
1424 #define x(n)	#n,
1425 	JOURNAL_FLAGS()
1426 #undef x
1427 	NULL
1428 };
1429 
__bch2_journal_debug_to_text(struct printbuf * out,struct journal * j)1430 void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1431 {
1432 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
1433 	union journal_res_state s;
1434 	unsigned long now = jiffies;
1435 	u64 nr_writes = j->nr_flush_writes + j->nr_noflush_writes;
1436 
1437 	printbuf_tabstops_reset(out);
1438 	printbuf_tabstop_push(out, 28);
1439 	out->atomic++;
1440 
1441 	rcu_read_lock();
1442 	s = READ_ONCE(j->reservations);
1443 
1444 	prt_printf(out, "flags:\t");
1445 	prt_bitflags(out, bch2_journal_flags_strs, j->flags);
1446 	prt_newline(out);
1447 	prt_printf(out, "dirty journal entries:\t%llu/%llu\n",	fifo_used(&j->pin), j->pin.size);
1448 	prt_printf(out, "seq:\t%llu\n",				journal_cur_seq(j));
1449 	prt_printf(out, "seq_ondisk:\t%llu\n",			j->seq_ondisk);
1450 	prt_printf(out, "last_seq:\t%llu\n",			journal_last_seq(j));
1451 	prt_printf(out, "last_seq_ondisk:\t%llu\n",		j->last_seq_ondisk);
1452 	prt_printf(out, "flushed_seq_ondisk:\t%llu\n",		j->flushed_seq_ondisk);
1453 	prt_printf(out, "watermark:\t%s\n",			bch2_watermarks[j->watermark]);
1454 	prt_printf(out, "each entry reserved:\t%u\n",		j->entry_u64s_reserved);
1455 	prt_printf(out, "nr flush writes:\t%llu\n",		j->nr_flush_writes);
1456 	prt_printf(out, "nr noflush writes:\t%llu\n",		j->nr_noflush_writes);
1457 	prt_printf(out, "average write size:\t");
1458 	prt_human_readable_u64(out, nr_writes ? div64_u64(j->entry_bytes_written, nr_writes) : 0);
1459 	prt_newline(out);
1460 	prt_printf(out, "nr direct reclaim:\t%llu\n",		j->nr_direct_reclaim);
1461 	prt_printf(out, "nr background reclaim:\t%llu\n",	j->nr_background_reclaim);
1462 	prt_printf(out, "reclaim kicked:\t%u\n",		j->reclaim_kicked);
1463 	prt_printf(out, "reclaim runs in:\t%u ms\n",		time_after(j->next_reclaim, now)
1464 	       ? jiffies_to_msecs(j->next_reclaim - jiffies) : 0);
1465 	prt_printf(out, "blocked:\t%u\n",			j->blocked);
1466 	prt_printf(out, "current entry sectors:\t%u\n",		j->cur_entry_sectors);
1467 	prt_printf(out, "current entry error:\t%s\n",		bch2_journal_errors[j->cur_entry_error]);
1468 	prt_printf(out, "current entry:\t");
1469 
1470 	switch (s.cur_entry_offset) {
1471 	case JOURNAL_ENTRY_ERROR_VAL:
1472 		prt_printf(out, "error\n");
1473 		break;
1474 	case JOURNAL_ENTRY_CLOSED_VAL:
1475 		prt_printf(out, "closed\n");
1476 		break;
1477 	default:
1478 		prt_printf(out, "%u/%u\n", s.cur_entry_offset, j->cur_entry_u64s);
1479 		break;
1480 	}
1481 
1482 	prt_printf(out, "unwritten entries:\n");
1483 	bch2_journal_bufs_to_text(out, j);
1484 
1485 	prt_printf(out, "space:\n");
1486 	printbuf_indent_add(out, 2);
1487 	prt_printf(out, "discarded\t%u:%u\n",
1488 	       j->space[journal_space_discarded].next_entry,
1489 	       j->space[journal_space_discarded].total);
1490 	prt_printf(out, "clean ondisk\t%u:%u\n",
1491 	       j->space[journal_space_clean_ondisk].next_entry,
1492 	       j->space[journal_space_clean_ondisk].total);
1493 	prt_printf(out, "clean\t%u:%u\n",
1494 	       j->space[journal_space_clean].next_entry,
1495 	       j->space[journal_space_clean].total);
1496 	prt_printf(out, "total\t%u:%u\n",
1497 	       j->space[journal_space_total].next_entry,
1498 	       j->space[journal_space_total].total);
1499 	printbuf_indent_sub(out, 2);
1500 
1501 	for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) {
1502 		struct journal_device *ja = &ca->journal;
1503 
1504 		if (!test_bit(ca->dev_idx, c->rw_devs[BCH_DATA_journal].d))
1505 			continue;
1506 
1507 		if (!ja->nr)
1508 			continue;
1509 
1510 		prt_printf(out, "dev %u:\n",			ca->dev_idx);
1511 		printbuf_indent_add(out, 2);
1512 		prt_printf(out, "nr\t%u\n",			ja->nr);
1513 		prt_printf(out, "bucket size\t%u\n",		ca->mi.bucket_size);
1514 		prt_printf(out, "available\t%u:%u\n",		bch2_journal_dev_buckets_available(j, ja, journal_space_discarded), ja->sectors_free);
1515 		prt_printf(out, "discard_idx\t%u\n",		ja->discard_idx);
1516 		prt_printf(out, "dirty_ondisk\t%u (seq %llu)\n",ja->dirty_idx_ondisk,	ja->bucket_seq[ja->dirty_idx_ondisk]);
1517 		prt_printf(out, "dirty_idx\t%u (seq %llu)\n",	ja->dirty_idx,		ja->bucket_seq[ja->dirty_idx]);
1518 		prt_printf(out, "cur_idx\t%u (seq %llu)\n",	ja->cur_idx,		ja->bucket_seq[ja->cur_idx]);
1519 		printbuf_indent_sub(out, 2);
1520 	}
1521 
1522 	rcu_read_unlock();
1523 
1524 	--out->atomic;
1525 }
1526 
bch2_journal_debug_to_text(struct printbuf * out,struct journal * j)1527 void bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1528 {
1529 	spin_lock(&j->lock);
1530 	__bch2_journal_debug_to_text(out, j);
1531 	spin_unlock(&j->lock);
1532 }
1533 
bch2_journal_seq_pins_to_text(struct printbuf * out,struct journal * j,u64 * seq)1534 bool bch2_journal_seq_pins_to_text(struct printbuf *out, struct journal *j, u64 *seq)
1535 {
1536 	struct journal_entry_pin_list *pin_list;
1537 	struct journal_entry_pin *pin;
1538 
1539 	spin_lock(&j->lock);
1540 	if (!test_bit(JOURNAL_running, &j->flags)) {
1541 		spin_unlock(&j->lock);
1542 		return true;
1543 	}
1544 
1545 	*seq = max(*seq, j->pin.front);
1546 
1547 	if (*seq >= j->pin.back) {
1548 		spin_unlock(&j->lock);
1549 		return true;
1550 	}
1551 
1552 	out->atomic++;
1553 
1554 	pin_list = journal_seq_pin(j, *seq);
1555 
1556 	prt_printf(out, "%llu: count %u\n", *seq, atomic_read(&pin_list->count));
1557 	printbuf_indent_add(out, 2);
1558 
1559 	for (unsigned i = 0; i < ARRAY_SIZE(pin_list->list); i++)
1560 		list_for_each_entry(pin, &pin_list->list[i], list)
1561 			prt_printf(out, "\t%px %ps\n", pin, pin->flush);
1562 
1563 	if (!list_empty(&pin_list->flushed))
1564 		prt_printf(out, "flushed:\n");
1565 
1566 	list_for_each_entry(pin, &pin_list->flushed, list)
1567 		prt_printf(out, "\t%px %ps\n", pin, pin->flush);
1568 
1569 	printbuf_indent_sub(out, 2);
1570 
1571 	--out->atomic;
1572 	spin_unlock(&j->lock);
1573 
1574 	return false;
1575 }
1576 
bch2_journal_pins_to_text(struct printbuf * out,struct journal * j)1577 void bch2_journal_pins_to_text(struct printbuf *out, struct journal *j)
1578 {
1579 	u64 seq = 0;
1580 
1581 	while (!bch2_journal_seq_pins_to_text(out, j, &seq))
1582 		seq++;
1583 }
1584