1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * bcachefs journalling code, for btree insertions
4 *
5 * Copyright 2012 Google, Inc.
6 */
7
8 #include "bcachefs.h"
9 #include "alloc_foreground.h"
10 #include "bkey_methods.h"
11 #include "btree_gc.h"
12 #include "btree_update.h"
13 #include "btree_write_buffer.h"
14 #include "buckets.h"
15 #include "error.h"
16 #include "journal.h"
17 #include "journal_io.h"
18 #include "journal_reclaim.h"
19 #include "journal_sb.h"
20 #include "journal_seq_blacklist.h"
21 #include "trace.h"
22
23 static const char * const bch2_journal_errors[] = {
24 #define x(n) #n,
25 JOURNAL_ERRORS()
26 #undef x
27 NULL
28 };
29
journal_seq_unwritten(struct journal * j,u64 seq)30 static inline bool journal_seq_unwritten(struct journal *j, u64 seq)
31 {
32 return seq > j->seq_ondisk;
33 }
34
__journal_entry_is_open(union journal_res_state state)35 static bool __journal_entry_is_open(union journal_res_state state)
36 {
37 return state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL;
38 }
39
nr_unwritten_journal_entries(struct journal * j)40 static inline unsigned nr_unwritten_journal_entries(struct journal *j)
41 {
42 return atomic64_read(&j->seq) - j->seq_ondisk;
43 }
44
journal_entry_is_open(struct journal * j)45 static bool journal_entry_is_open(struct journal *j)
46 {
47 return __journal_entry_is_open(j->reservations);
48 }
49
bch2_journal_buf_to_text(struct printbuf * out,struct journal * j,u64 seq)50 static void bch2_journal_buf_to_text(struct printbuf *out, struct journal *j, u64 seq)
51 {
52 union journal_res_state s = READ_ONCE(j->reservations);
53 unsigned i = seq & JOURNAL_BUF_MASK;
54 struct journal_buf *buf = j->buf + i;
55
56 prt_printf(out, "seq:\t%llu\n", seq);
57 printbuf_indent_add(out, 2);
58
59 prt_printf(out, "refcount:\t%u\n", journal_state_count(s, i));
60
61 prt_printf(out, "size:\t");
62 prt_human_readable_u64(out, vstruct_bytes(buf->data));
63 prt_newline(out);
64
65 prt_printf(out, "expires:\t");
66 prt_printf(out, "%li jiffies\n", buf->expires - jiffies);
67
68 prt_printf(out, "flags:\t");
69 if (buf->noflush)
70 prt_str(out, "noflush ");
71 if (buf->must_flush)
72 prt_str(out, "must_flush ");
73 if (buf->separate_flush)
74 prt_str(out, "separate_flush ");
75 if (buf->need_flush_to_write_buffer)
76 prt_str(out, "need_flush_to_write_buffer ");
77 if (buf->write_started)
78 prt_str(out, "write_started ");
79 if (buf->write_allocated)
80 prt_str(out, "write_allocated ");
81 if (buf->write_done)
82 prt_str(out, "write_done");
83 prt_newline(out);
84
85 printbuf_indent_sub(out, 2);
86 }
87
bch2_journal_bufs_to_text(struct printbuf * out,struct journal * j)88 static void bch2_journal_bufs_to_text(struct printbuf *out, struct journal *j)
89 {
90 if (!out->nr_tabstops)
91 printbuf_tabstop_push(out, 24);
92
93 for (u64 seq = journal_last_unwritten_seq(j);
94 seq <= journal_cur_seq(j);
95 seq++)
96 bch2_journal_buf_to_text(out, j, seq);
97 prt_printf(out, "last buf %s\n", journal_entry_is_open(j) ? "open" : "closed");
98 }
99
100 static inline struct journal_buf *
journal_seq_to_buf(struct journal * j,u64 seq)101 journal_seq_to_buf(struct journal *j, u64 seq)
102 {
103 struct journal_buf *buf = NULL;
104
105 EBUG_ON(seq > journal_cur_seq(j));
106
107 if (journal_seq_unwritten(j, seq)) {
108 buf = j->buf + (seq & JOURNAL_BUF_MASK);
109 EBUG_ON(le64_to_cpu(buf->data->seq) != seq);
110 }
111 return buf;
112 }
113
journal_pin_list_init(struct journal_entry_pin_list * p,int count)114 static void journal_pin_list_init(struct journal_entry_pin_list *p, int count)
115 {
116 for (unsigned i = 0; i < ARRAY_SIZE(p->unflushed); i++)
117 INIT_LIST_HEAD(&p->unflushed[i]);
118 for (unsigned i = 0; i < ARRAY_SIZE(p->flushed); i++)
119 INIT_LIST_HEAD(&p->flushed[i]);
120 atomic_set(&p->count, count);
121 p->devs.nr = 0;
122 }
123
124 /*
125 * Detect stuck journal conditions and trigger shutdown. Technically the journal
126 * can end up stuck for a variety of reasons, such as a blocked I/O, journal
127 * reservation lockup, etc. Since this is a fatal error with potentially
128 * unpredictable characteristics, we want to be fairly conservative before we
129 * decide to shut things down.
130 *
131 * Consider the journal stuck when it appears full with no ability to commit
132 * btree transactions, to discard journal buckets, nor acquire priority
133 * (reserved watermark) reservation.
134 */
135 static inline bool
journal_error_check_stuck(struct journal * j,int error,unsigned flags)136 journal_error_check_stuck(struct journal *j, int error, unsigned flags)
137 {
138 struct bch_fs *c = container_of(j, struct bch_fs, journal);
139 bool stuck = false;
140 struct printbuf buf = PRINTBUF;
141
142 if (!(error == JOURNAL_ERR_journal_full ||
143 error == JOURNAL_ERR_journal_pin_full) ||
144 nr_unwritten_journal_entries(j) ||
145 (flags & BCH_WATERMARK_MASK) != BCH_WATERMARK_reclaim)
146 return stuck;
147
148 spin_lock(&j->lock);
149
150 if (j->can_discard) {
151 spin_unlock(&j->lock);
152 return stuck;
153 }
154
155 stuck = true;
156
157 /*
158 * The journal shutdown path will set ->err_seq, but do it here first to
159 * serialize against concurrent failures and avoid duplicate error
160 * reports.
161 */
162 if (j->err_seq) {
163 spin_unlock(&j->lock);
164 return stuck;
165 }
166 j->err_seq = journal_cur_seq(j);
167 spin_unlock(&j->lock);
168
169 bch_err(c, "Journal stuck! Hava a pre-reservation but journal full (error %s)",
170 bch2_journal_errors[error]);
171 bch2_journal_debug_to_text(&buf, j);
172 bch_err(c, "%s", buf.buf);
173
174 printbuf_reset(&buf);
175 bch2_journal_pins_to_text(&buf, j);
176 bch_err(c, "Journal pins:\n%s", buf.buf);
177 printbuf_exit(&buf);
178
179 bch2_fatal_error(c);
180 dump_stack();
181
182 return stuck;
183 }
184
bch2_journal_do_writes(struct journal * j)185 void bch2_journal_do_writes(struct journal *j)
186 {
187 for (u64 seq = journal_last_unwritten_seq(j);
188 seq <= journal_cur_seq(j);
189 seq++) {
190 unsigned idx = seq & JOURNAL_BUF_MASK;
191 struct journal_buf *w = j->buf + idx;
192
193 if (w->write_started && !w->write_allocated)
194 break;
195 if (w->write_started)
196 continue;
197
198 if (!journal_state_count(j->reservations, idx)) {
199 w->write_started = true;
200 closure_call(&w->io, bch2_journal_write, j->wq, NULL);
201 }
202
203 break;
204 }
205 }
206
207 /*
208 * Final processing when the last reference of a journal buffer has been
209 * dropped. Drop the pin list reference acquired at journal entry open and write
210 * the buffer, if requested.
211 */
bch2_journal_buf_put_final(struct journal * j,u64 seq)212 void bch2_journal_buf_put_final(struct journal *j, u64 seq)
213 {
214 lockdep_assert_held(&j->lock);
215
216 if (__bch2_journal_pin_put(j, seq))
217 bch2_journal_reclaim_fast(j);
218 bch2_journal_do_writes(j);
219
220 /*
221 * for __bch2_next_write_buffer_flush_journal_buf(), when quiescing an
222 * open journal entry
223 */
224 wake_up(&j->wait);
225 }
226
227 /*
228 * Returns true if journal entry is now closed:
229 *
230 * We don't close a journal_buf until the next journal_buf is finished writing,
231 * and can be opened again - this also initializes the next journal_buf:
232 */
__journal_entry_close(struct journal * j,unsigned closed_val,bool trace)233 static void __journal_entry_close(struct journal *j, unsigned closed_val, bool trace)
234 {
235 struct bch_fs *c = container_of(j, struct bch_fs, journal);
236 struct journal_buf *buf = journal_cur_buf(j);
237 union journal_res_state old, new;
238 unsigned sectors;
239
240 BUG_ON(closed_val != JOURNAL_ENTRY_CLOSED_VAL &&
241 closed_val != JOURNAL_ENTRY_ERROR_VAL);
242
243 lockdep_assert_held(&j->lock);
244
245 old.v = atomic64_read(&j->reservations.counter);
246 do {
247 new.v = old.v;
248 new.cur_entry_offset = closed_val;
249
250 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL ||
251 old.cur_entry_offset == new.cur_entry_offset)
252 return;
253 } while (!atomic64_try_cmpxchg(&j->reservations.counter,
254 &old.v, new.v));
255
256 if (!__journal_entry_is_open(old))
257 return;
258
259 if (old.cur_entry_offset == JOURNAL_ENTRY_BLOCKED_VAL)
260 old.cur_entry_offset = j->cur_entry_offset_if_blocked;
261
262 /* Close out old buffer: */
263 buf->data->u64s = cpu_to_le32(old.cur_entry_offset);
264
265 if (trace_journal_entry_close_enabled() && trace) {
266 struct printbuf pbuf = PRINTBUF;
267 pbuf.atomic++;
268
269 prt_str(&pbuf, "entry size: ");
270 prt_human_readable_u64(&pbuf, vstruct_bytes(buf->data));
271 prt_newline(&pbuf);
272 bch2_prt_task_backtrace(&pbuf, current, 1, GFP_NOWAIT);
273 trace_journal_entry_close(c, pbuf.buf);
274 printbuf_exit(&pbuf);
275 }
276
277 sectors = vstruct_blocks_plus(buf->data, c->block_bits,
278 buf->u64s_reserved) << c->block_bits;
279 BUG_ON(sectors > buf->sectors);
280 buf->sectors = sectors;
281
282 /*
283 * We have to set last_seq here, _before_ opening a new journal entry:
284 *
285 * A threads may replace an old pin with a new pin on their current
286 * journal reservation - the expectation being that the journal will
287 * contain either what the old pin protected or what the new pin
288 * protects.
289 *
290 * After the old pin is dropped journal_last_seq() won't include the old
291 * pin, so we can only write the updated last_seq on the entry that
292 * contains whatever the new pin protects.
293 *
294 * Restated, we can _not_ update last_seq for a given entry if there
295 * could be a newer entry open with reservations/pins that have been
296 * taken against it.
297 *
298 * Hence, we want update/set last_seq on the current journal entry right
299 * before we open a new one:
300 */
301 buf->last_seq = journal_last_seq(j);
302 buf->data->last_seq = cpu_to_le64(buf->last_seq);
303 BUG_ON(buf->last_seq > le64_to_cpu(buf->data->seq));
304
305 cancel_delayed_work(&j->write_work);
306
307 bch2_journal_space_available(j);
308
309 __bch2_journal_buf_put(j, old.idx, le64_to_cpu(buf->data->seq));
310 }
311
bch2_journal_halt(struct journal * j)312 void bch2_journal_halt(struct journal *j)
313 {
314 spin_lock(&j->lock);
315 __journal_entry_close(j, JOURNAL_ENTRY_ERROR_VAL, true);
316 if (!j->err_seq)
317 j->err_seq = journal_cur_seq(j);
318 journal_wake(j);
319 spin_unlock(&j->lock);
320 }
321
bch2_journal_halt_locked(struct journal * j)322 void bch2_journal_halt_locked(struct journal *j)
323 {
324 lockdep_assert_held(&j->lock);
325
326 __journal_entry_close(j, JOURNAL_ENTRY_ERROR_VAL, true);
327 if (!j->err_seq)
328 j->err_seq = journal_cur_seq(j);
329 journal_wake(j);
330 }
331
journal_entry_want_write(struct journal * j)332 static bool journal_entry_want_write(struct journal *j)
333 {
334 bool ret = !journal_entry_is_open(j) ||
335 journal_cur_seq(j) == journal_last_unwritten_seq(j);
336
337 /* Don't close it yet if we already have a write in flight: */
338 if (ret)
339 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
340 else if (nr_unwritten_journal_entries(j)) {
341 struct journal_buf *buf = journal_cur_buf(j);
342
343 if (!buf->flush_time) {
344 buf->flush_time = local_clock() ?: 1;
345 buf->expires = jiffies;
346 }
347 }
348
349 return ret;
350 }
351
bch2_journal_entry_close(struct journal * j)352 bool bch2_journal_entry_close(struct journal *j)
353 {
354 bool ret;
355
356 spin_lock(&j->lock);
357 ret = journal_entry_want_write(j);
358 spin_unlock(&j->lock);
359
360 return ret;
361 }
362
363 /*
364 * should _only_ called from journal_res_get() - when we actually want a
365 * journal reservation - journal entry is open means journal is dirty:
366 */
journal_entry_open(struct journal * j)367 static int journal_entry_open(struct journal *j)
368 {
369 struct bch_fs *c = container_of(j, struct bch_fs, journal);
370 struct journal_buf *buf = j->buf +
371 ((journal_cur_seq(j) + 1) & JOURNAL_BUF_MASK);
372 union journal_res_state old, new;
373 int u64s;
374
375 lockdep_assert_held(&j->lock);
376 BUG_ON(journal_entry_is_open(j));
377 BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
378
379 if (j->blocked)
380 return JOURNAL_ERR_blocked;
381
382 if (j->cur_entry_error)
383 return j->cur_entry_error;
384
385 if (bch2_journal_error(j))
386 return JOURNAL_ERR_insufficient_devices; /* -EROFS */
387
388 if (!fifo_free(&j->pin))
389 return JOURNAL_ERR_journal_pin_full;
390
391 if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf))
392 return JOURNAL_ERR_max_in_flight;
393
394 if (journal_cur_seq(j) >= JOURNAL_SEQ_MAX) {
395 bch_err(c, "cannot start: journal seq overflow");
396 if (bch2_fs_emergency_read_only_locked(c))
397 bch_err(c, "fatal error - emergency read only");
398 return JOURNAL_ERR_insufficient_devices; /* -EROFS */
399 }
400
401 BUG_ON(!j->cur_entry_sectors);
402
403 buf->expires =
404 (journal_cur_seq(j) == j->flushed_seq_ondisk
405 ? jiffies
406 : j->last_flush_write) +
407 msecs_to_jiffies(c->opts.journal_flush_delay);
408
409 buf->u64s_reserved = j->entry_u64s_reserved;
410 buf->disk_sectors = j->cur_entry_sectors;
411 buf->sectors = min(buf->disk_sectors, buf->buf_size >> 9);
412
413 u64s = (int) (buf->sectors << 9) / sizeof(u64) -
414 journal_entry_overhead(j);
415 u64s = clamp_t(int, u64s, 0, JOURNAL_ENTRY_CLOSED_VAL - 1);
416
417 if (u64s <= (ssize_t) j->early_journal_entries.nr)
418 return JOURNAL_ERR_journal_full;
419
420 if (fifo_empty(&j->pin) && j->reclaim_thread)
421 wake_up_process(j->reclaim_thread);
422
423 /*
424 * The fifo_push() needs to happen at the same time as j->seq is
425 * incremented for journal_last_seq() to be calculated correctly
426 */
427 atomic64_inc(&j->seq);
428 journal_pin_list_init(fifo_push_ref(&j->pin), 1);
429
430 BUG_ON(j->pin.back - 1 != atomic64_read(&j->seq));
431
432 BUG_ON(j->buf + (journal_cur_seq(j) & JOURNAL_BUF_MASK) != buf);
433
434 bkey_extent_init(&buf->key);
435 buf->noflush = false;
436 buf->must_flush = false;
437 buf->separate_flush = false;
438 buf->flush_time = 0;
439 buf->need_flush_to_write_buffer = true;
440 buf->write_started = false;
441 buf->write_allocated = false;
442 buf->write_done = false;
443
444 memset(buf->data, 0, sizeof(*buf->data));
445 buf->data->seq = cpu_to_le64(journal_cur_seq(j));
446 buf->data->u64s = 0;
447
448 if (j->early_journal_entries.nr) {
449 memcpy(buf->data->_data, j->early_journal_entries.data,
450 j->early_journal_entries.nr * sizeof(u64));
451 le32_add_cpu(&buf->data->u64s, j->early_journal_entries.nr);
452 }
453
454 /*
455 * Must be set before marking the journal entry as open:
456 */
457 j->cur_entry_u64s = u64s;
458
459 old.v = atomic64_read(&j->reservations.counter);
460 do {
461 new.v = old.v;
462
463 BUG_ON(old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL);
464
465 new.idx++;
466 BUG_ON(journal_state_count(new, new.idx));
467 BUG_ON(new.idx != (journal_cur_seq(j) & JOURNAL_BUF_MASK));
468
469 journal_state_inc(&new);
470
471 /* Handle any already added entries */
472 new.cur_entry_offset = le32_to_cpu(buf->data->u64s);
473 } while (!atomic64_try_cmpxchg(&j->reservations.counter,
474 &old.v, new.v));
475
476 if (nr_unwritten_journal_entries(j) == 1)
477 mod_delayed_work(j->wq,
478 &j->write_work,
479 msecs_to_jiffies(c->opts.journal_flush_delay));
480 journal_wake(j);
481
482 if (j->early_journal_entries.nr)
483 darray_exit(&j->early_journal_entries);
484 return 0;
485 }
486
journal_quiesced(struct journal * j)487 static bool journal_quiesced(struct journal *j)
488 {
489 bool ret = atomic64_read(&j->seq) == j->seq_ondisk;
490
491 if (!ret)
492 bch2_journal_entry_close(j);
493 return ret;
494 }
495
journal_quiesce(struct journal * j)496 static void journal_quiesce(struct journal *j)
497 {
498 wait_event(j->wait, journal_quiesced(j));
499 }
500
journal_write_work(struct work_struct * work)501 static void journal_write_work(struct work_struct *work)
502 {
503 struct journal *j = container_of(work, struct journal, write_work.work);
504
505 spin_lock(&j->lock);
506 if (__journal_entry_is_open(j->reservations)) {
507 long delta = journal_cur_buf(j)->expires - jiffies;
508
509 if (delta > 0)
510 mod_delayed_work(j->wq, &j->write_work, delta);
511 else
512 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
513 }
514 spin_unlock(&j->lock);
515 }
516
__journal_res_get(struct journal * j,struct journal_res * res,unsigned flags)517 static int __journal_res_get(struct journal *j, struct journal_res *res,
518 unsigned flags)
519 {
520 struct bch_fs *c = container_of(j, struct bch_fs, journal);
521 struct journal_buf *buf;
522 bool can_discard;
523 int ret;
524 retry:
525 if (journal_res_get_fast(j, res, flags))
526 return 0;
527
528 if (bch2_journal_error(j))
529 return -BCH_ERR_erofs_journal_err;
530
531 if (j->blocked)
532 return -BCH_ERR_journal_res_get_blocked;
533
534 if ((flags & BCH_WATERMARK_MASK) < j->watermark) {
535 ret = JOURNAL_ERR_journal_full;
536 can_discard = j->can_discard;
537 goto out;
538 }
539
540 if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf) && !journal_entry_is_open(j)) {
541 ret = JOURNAL_ERR_max_in_flight;
542 goto out;
543 }
544
545 spin_lock(&j->lock);
546
547 /*
548 * Recheck after taking the lock, so we don't race with another thread
549 * that just did journal_entry_open() and call bch2_journal_entry_close()
550 * unnecessarily
551 */
552 if (journal_res_get_fast(j, res, flags)) {
553 ret = 0;
554 goto unlock;
555 }
556
557 /*
558 * If we couldn't get a reservation because the current buf filled up,
559 * and we had room for a bigger entry on disk, signal that we want to
560 * realloc the journal bufs:
561 */
562 buf = journal_cur_buf(j);
563 if (journal_entry_is_open(j) &&
564 buf->buf_size >> 9 < buf->disk_sectors &&
565 buf->buf_size < JOURNAL_ENTRY_SIZE_MAX)
566 j->buf_size_want = max(j->buf_size_want, buf->buf_size << 1);
567
568 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, false);
569 ret = journal_entry_open(j) ?: JOURNAL_ERR_retry;
570 unlock:
571 can_discard = j->can_discard;
572 spin_unlock(&j->lock);
573 out:
574 if (ret == JOURNAL_ERR_retry)
575 goto retry;
576 if (!ret)
577 return 0;
578
579 if (journal_error_check_stuck(j, ret, flags))
580 ret = -BCH_ERR_journal_res_get_blocked;
581
582 if (ret == JOURNAL_ERR_max_in_flight &&
583 track_event_change(&c->times[BCH_TIME_blocked_journal_max_in_flight], true)) {
584
585 struct printbuf buf = PRINTBUF;
586 prt_printf(&buf, "seq %llu\n", journal_cur_seq(j));
587 bch2_journal_bufs_to_text(&buf, j);
588 trace_journal_entry_full(c, buf.buf);
589 printbuf_exit(&buf);
590 count_event(c, journal_entry_full);
591 }
592
593 /*
594 * Journal is full - can't rely on reclaim from work item due to
595 * freezing:
596 */
597 if ((ret == JOURNAL_ERR_journal_full ||
598 ret == JOURNAL_ERR_journal_pin_full) &&
599 !(flags & JOURNAL_RES_GET_NONBLOCK)) {
600 if (can_discard) {
601 bch2_journal_do_discards(j);
602 goto retry;
603 }
604
605 if (mutex_trylock(&j->reclaim_lock)) {
606 bch2_journal_reclaim(j);
607 mutex_unlock(&j->reclaim_lock);
608 }
609 }
610
611 return ret == JOURNAL_ERR_insufficient_devices
612 ? -BCH_ERR_erofs_journal_err
613 : -BCH_ERR_journal_res_get_blocked;
614 }
615
max_dev_latency(struct bch_fs * c)616 static unsigned max_dev_latency(struct bch_fs *c)
617 {
618 u64 nsecs = 0;
619
620 for_each_rw_member(c, ca)
621 nsecs = max(nsecs, ca->io_latency[WRITE].stats.max_duration);
622
623 return nsecs_to_jiffies(nsecs);
624 }
625
626 /*
627 * Essentially the entry function to the journaling code. When bcachefs is doing
628 * a btree insert, it calls this function to get the current journal write.
629 * Journal write is the structure used set up journal writes. The calling
630 * function will then add its keys to the structure, queuing them for the next
631 * write.
632 *
633 * To ensure forward progress, the current task must not be holding any
634 * btree node write locks.
635 */
bch2_journal_res_get_slowpath(struct journal * j,struct journal_res * res,unsigned flags,struct btree_trans * trans)636 int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
637 unsigned flags,
638 struct btree_trans *trans)
639 {
640 int ret;
641
642 if (closure_wait_event_timeout(&j->async_wait,
643 (ret = __journal_res_get(j, res, flags)) != -BCH_ERR_journal_res_get_blocked ||
644 (flags & JOURNAL_RES_GET_NONBLOCK),
645 HZ))
646 return ret;
647
648 if (trans)
649 bch2_trans_unlock_long(trans);
650
651 struct bch_fs *c = container_of(j, struct bch_fs, journal);
652 int remaining_wait = max(max_dev_latency(c) * 2, HZ * 10);
653
654 remaining_wait = max(0, remaining_wait - HZ);
655
656 if (closure_wait_event_timeout(&j->async_wait,
657 (ret = __journal_res_get(j, res, flags)) != -BCH_ERR_journal_res_get_blocked ||
658 (flags & JOURNAL_RES_GET_NONBLOCK),
659 remaining_wait))
660 return ret;
661
662 struct printbuf buf = PRINTBUF;
663 bch2_journal_debug_to_text(&buf, j);
664 bch_err(c, "Journal stuck? Waited for 10 seconds...\n%s",
665 buf.buf);
666 printbuf_exit(&buf);
667
668 closure_wait_event(&j->async_wait,
669 (ret = __journal_res_get(j, res, flags)) != -BCH_ERR_journal_res_get_blocked ||
670 (flags & JOURNAL_RES_GET_NONBLOCK));
671 return ret;
672 }
673
674 /* journal_entry_res: */
675
bch2_journal_entry_res_resize(struct journal * j,struct journal_entry_res * res,unsigned new_u64s)676 void bch2_journal_entry_res_resize(struct journal *j,
677 struct journal_entry_res *res,
678 unsigned new_u64s)
679 {
680 union journal_res_state state;
681 int d = new_u64s - res->u64s;
682
683 spin_lock(&j->lock);
684
685 j->entry_u64s_reserved += d;
686 if (d <= 0)
687 goto out;
688
689 j->cur_entry_u64s = max_t(int, 0, j->cur_entry_u64s - d);
690 smp_mb();
691 state = READ_ONCE(j->reservations);
692
693 if (state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL &&
694 state.cur_entry_offset > j->cur_entry_u64s) {
695 j->cur_entry_u64s += d;
696 /*
697 * Not enough room in current journal entry, have to flush it:
698 */
699 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
700 } else {
701 journal_cur_buf(j)->u64s_reserved += d;
702 }
703 out:
704 spin_unlock(&j->lock);
705 res->u64s += d;
706 }
707
708 /* journal flushing: */
709
710 /**
711 * bch2_journal_flush_seq_async - wait for a journal entry to be written
712 * @j: journal object
713 * @seq: seq to flush
714 * @parent: closure object to wait with
715 * Returns: 1 if @seq has already been flushed, 0 if @seq is being flushed,
716 * -BCH_ERR_journal_flush_err if @seq will never be flushed
717 *
718 * Like bch2_journal_wait_on_seq, except that it triggers a write immediately if
719 * necessary
720 */
bch2_journal_flush_seq_async(struct journal * j,u64 seq,struct closure * parent)721 int bch2_journal_flush_seq_async(struct journal *j, u64 seq,
722 struct closure *parent)
723 {
724 struct journal_buf *buf;
725 int ret = 0;
726
727 if (seq <= j->flushed_seq_ondisk)
728 return 1;
729
730 spin_lock(&j->lock);
731
732 if (WARN_ONCE(seq > journal_cur_seq(j),
733 "requested to flush journal seq %llu, but currently at %llu",
734 seq, journal_cur_seq(j)))
735 goto out;
736
737 /* Recheck under lock: */
738 if (j->err_seq && seq >= j->err_seq) {
739 ret = -BCH_ERR_journal_flush_err;
740 goto out;
741 }
742
743 if (seq <= j->flushed_seq_ondisk) {
744 ret = 1;
745 goto out;
746 }
747
748 /* if seq was written, but not flushed - flush a newer one instead */
749 seq = max(seq, journal_last_unwritten_seq(j));
750
751 recheck_need_open:
752 if (seq > journal_cur_seq(j)) {
753 struct journal_res res = { 0 };
754
755 if (journal_entry_is_open(j))
756 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
757
758 spin_unlock(&j->lock);
759
760 /*
761 * We're called from bch2_journal_flush_seq() -> wait_event();
762 * but this might block. We won't usually block, so we won't
763 * livelock:
764 */
765 sched_annotate_sleep();
766 ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0, NULL);
767 if (ret)
768 return ret;
769
770 seq = res.seq;
771 buf = journal_seq_to_buf(j, seq);
772 buf->must_flush = true;
773
774 if (!buf->flush_time) {
775 buf->flush_time = local_clock() ?: 1;
776 buf->expires = jiffies;
777 }
778
779 if (parent && !closure_wait(&buf->wait, parent))
780 BUG();
781
782 bch2_journal_res_put(j, &res);
783
784 spin_lock(&j->lock);
785 goto want_write;
786 }
787
788 /*
789 * if write was kicked off without a flush, or if we promised it
790 * wouldn't be a flush, flush the next sequence number instead
791 */
792 buf = journal_seq_to_buf(j, seq);
793 if (buf->noflush) {
794 seq++;
795 goto recheck_need_open;
796 }
797
798 buf->must_flush = true;
799 j->flushing_seq = max(j->flushing_seq, seq);
800
801 if (parent && !closure_wait(&buf->wait, parent))
802 BUG();
803 want_write:
804 if (seq == journal_cur_seq(j))
805 journal_entry_want_write(j);
806 out:
807 spin_unlock(&j->lock);
808 return ret;
809 }
810
bch2_journal_flush_seq(struct journal * j,u64 seq,unsigned task_state)811 int bch2_journal_flush_seq(struct journal *j, u64 seq, unsigned task_state)
812 {
813 u64 start_time = local_clock();
814 int ret, ret2;
815
816 /*
817 * Don't update time_stats when @seq is already flushed:
818 */
819 if (seq <= j->flushed_seq_ondisk)
820 return 0;
821
822 ret = wait_event_state(j->wait,
823 (ret2 = bch2_journal_flush_seq_async(j, seq, NULL)),
824 task_state);
825
826 if (!ret)
827 bch2_time_stats_update(j->flush_seq_time, start_time);
828
829 return ret ?: ret2 < 0 ? ret2 : 0;
830 }
831
832 /*
833 * bch2_journal_flush_async - if there is an open journal entry, or a journal
834 * still being written, write it and wait for the write to complete
835 */
bch2_journal_flush_async(struct journal * j,struct closure * parent)836 void bch2_journal_flush_async(struct journal *j, struct closure *parent)
837 {
838 bch2_journal_flush_seq_async(j, atomic64_read(&j->seq), parent);
839 }
840
bch2_journal_flush(struct journal * j)841 int bch2_journal_flush(struct journal *j)
842 {
843 return bch2_journal_flush_seq(j, atomic64_read(&j->seq), TASK_UNINTERRUPTIBLE);
844 }
845
846 /*
847 * bch2_journal_noflush_seq - ask the journal not to issue any flushes in the
848 * range [start, end)
849 * @seq
850 */
bch2_journal_noflush_seq(struct journal * j,u64 start,u64 end)851 bool bch2_journal_noflush_seq(struct journal *j, u64 start, u64 end)
852 {
853 struct bch_fs *c = container_of(j, struct bch_fs, journal);
854 u64 unwritten_seq;
855 bool ret = false;
856
857 if (!(c->sb.features & (1ULL << BCH_FEATURE_journal_no_flush)))
858 return false;
859
860 if (c->journal.flushed_seq_ondisk >= start)
861 return false;
862
863 spin_lock(&j->lock);
864 if (c->journal.flushed_seq_ondisk >= start)
865 goto out;
866
867 for (unwritten_seq = journal_last_unwritten_seq(j);
868 unwritten_seq < end;
869 unwritten_seq++) {
870 struct journal_buf *buf = journal_seq_to_buf(j, unwritten_seq);
871
872 /* journal flush already in flight, or flush requseted */
873 if (buf->must_flush)
874 goto out;
875
876 buf->noflush = true;
877 }
878
879 ret = true;
880 out:
881 spin_unlock(&j->lock);
882 return ret;
883 }
884
__bch2_journal_meta(struct journal * j)885 static int __bch2_journal_meta(struct journal *j)
886 {
887 struct journal_res res = {};
888 int ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0, NULL);
889 if (ret)
890 return ret;
891
892 struct journal_buf *buf = j->buf + (res.seq & JOURNAL_BUF_MASK);
893 buf->must_flush = true;
894
895 if (!buf->flush_time) {
896 buf->flush_time = local_clock() ?: 1;
897 buf->expires = jiffies;
898 }
899
900 bch2_journal_res_put(j, &res);
901
902 return bch2_journal_flush_seq(j, res.seq, TASK_UNINTERRUPTIBLE);
903 }
904
bch2_journal_meta(struct journal * j)905 int bch2_journal_meta(struct journal *j)
906 {
907 struct bch_fs *c = container_of(j, struct bch_fs, journal);
908
909 if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_journal))
910 return -EROFS;
911
912 int ret = __bch2_journal_meta(j);
913 bch2_write_ref_put(c, BCH_WRITE_REF_journal);
914 return ret;
915 }
916
917 /* block/unlock the journal: */
918
bch2_journal_unblock(struct journal * j)919 void bch2_journal_unblock(struct journal *j)
920 {
921 spin_lock(&j->lock);
922 if (!--j->blocked &&
923 j->cur_entry_offset_if_blocked < JOURNAL_ENTRY_CLOSED_VAL &&
924 j->reservations.cur_entry_offset == JOURNAL_ENTRY_BLOCKED_VAL) {
925 union journal_res_state old, new;
926
927 old.v = atomic64_read(&j->reservations.counter);
928 do {
929 new.v = old.v;
930 new.cur_entry_offset = j->cur_entry_offset_if_blocked;
931 } while (!atomic64_try_cmpxchg(&j->reservations.counter, &old.v, new.v));
932 }
933 spin_unlock(&j->lock);
934
935 journal_wake(j);
936 }
937
__bch2_journal_block(struct journal * j)938 static void __bch2_journal_block(struct journal *j)
939 {
940 if (!j->blocked++) {
941 union journal_res_state old, new;
942
943 old.v = atomic64_read(&j->reservations.counter);
944 do {
945 j->cur_entry_offset_if_blocked = old.cur_entry_offset;
946
947 if (j->cur_entry_offset_if_blocked >= JOURNAL_ENTRY_CLOSED_VAL)
948 break;
949
950 new.v = old.v;
951 new.cur_entry_offset = JOURNAL_ENTRY_BLOCKED_VAL;
952 } while (!atomic64_try_cmpxchg(&j->reservations.counter, &old.v, new.v));
953
954 journal_cur_buf(j)->data->u64s = cpu_to_le32(old.cur_entry_offset);
955 }
956 }
957
bch2_journal_block(struct journal * j)958 void bch2_journal_block(struct journal *j)
959 {
960 spin_lock(&j->lock);
961 __bch2_journal_block(j);
962 spin_unlock(&j->lock);
963
964 journal_quiesce(j);
965 }
966
__bch2_next_write_buffer_flush_journal_buf(struct journal * j,u64 max_seq,bool * blocked)967 static struct journal_buf *__bch2_next_write_buffer_flush_journal_buf(struct journal *j,
968 u64 max_seq, bool *blocked)
969 {
970 struct journal_buf *ret = NULL;
971
972 /* We're inside wait_event(), but using mutex_lock(: */
973 sched_annotate_sleep();
974 mutex_lock(&j->buf_lock);
975 spin_lock(&j->lock);
976 max_seq = min(max_seq, journal_cur_seq(j));
977
978 for (u64 seq = journal_last_unwritten_seq(j);
979 seq <= max_seq;
980 seq++) {
981 unsigned idx = seq & JOURNAL_BUF_MASK;
982 struct journal_buf *buf = j->buf + idx;
983
984 if (buf->need_flush_to_write_buffer) {
985 union journal_res_state s;
986 s.v = atomic64_read_acquire(&j->reservations.counter);
987
988 unsigned open = seq == journal_cur_seq(j) && __journal_entry_is_open(s);
989
990 if (open && !*blocked) {
991 __bch2_journal_block(j);
992 *blocked = true;
993 }
994
995 ret = journal_state_count(s, idx) > open
996 ? ERR_PTR(-EAGAIN)
997 : buf;
998 break;
999 }
1000 }
1001
1002 spin_unlock(&j->lock);
1003 if (IS_ERR_OR_NULL(ret))
1004 mutex_unlock(&j->buf_lock);
1005 return ret;
1006 }
1007
bch2_next_write_buffer_flush_journal_buf(struct journal * j,u64 max_seq,bool * blocked)1008 struct journal_buf *bch2_next_write_buffer_flush_journal_buf(struct journal *j,
1009 u64 max_seq, bool *blocked)
1010 {
1011 struct journal_buf *ret;
1012 *blocked = false;
1013
1014 wait_event(j->wait, (ret = __bch2_next_write_buffer_flush_journal_buf(j,
1015 max_seq, blocked)) != ERR_PTR(-EAGAIN));
1016 if (IS_ERR_OR_NULL(ret) && *blocked)
1017 bch2_journal_unblock(j);
1018
1019 return ret;
1020 }
1021
1022 /* allocate journal on a device: */
1023
bch2_set_nr_journal_buckets_iter(struct bch_dev * ca,unsigned nr,bool new_fs,struct closure * cl)1024 static int bch2_set_nr_journal_buckets_iter(struct bch_dev *ca, unsigned nr,
1025 bool new_fs, struct closure *cl)
1026 {
1027 struct bch_fs *c = ca->fs;
1028 struct journal_device *ja = &ca->journal;
1029 u64 *new_bucket_seq = NULL, *new_buckets = NULL;
1030 struct open_bucket **ob = NULL;
1031 long *bu = NULL;
1032 unsigned i, pos, nr_got = 0, nr_want = nr - ja->nr;
1033 int ret = 0;
1034
1035 BUG_ON(nr <= ja->nr);
1036
1037 bu = kcalloc(nr_want, sizeof(*bu), GFP_KERNEL);
1038 ob = kcalloc(nr_want, sizeof(*ob), GFP_KERNEL);
1039 new_buckets = kcalloc(nr, sizeof(u64), GFP_KERNEL);
1040 new_bucket_seq = kcalloc(nr, sizeof(u64), GFP_KERNEL);
1041 if (!bu || !ob || !new_buckets || !new_bucket_seq) {
1042 ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
1043 goto err_free;
1044 }
1045
1046 for (nr_got = 0; nr_got < nr_want; nr_got++) {
1047 enum bch_watermark watermark = new_fs
1048 ? BCH_WATERMARK_btree
1049 : BCH_WATERMARK_normal;
1050
1051 ob[nr_got] = bch2_bucket_alloc(c, ca, watermark,
1052 BCH_DATA_journal, cl);
1053 ret = PTR_ERR_OR_ZERO(ob[nr_got]);
1054 if (ret)
1055 break;
1056
1057 if (!new_fs) {
1058 ret = bch2_trans_run(c,
1059 bch2_trans_mark_metadata_bucket(trans, ca,
1060 ob[nr_got]->bucket, BCH_DATA_journal,
1061 ca->mi.bucket_size, BTREE_TRIGGER_transactional));
1062 if (ret) {
1063 bch2_open_bucket_put(c, ob[nr_got]);
1064 bch_err_msg(c, ret, "marking new journal buckets");
1065 break;
1066 }
1067 }
1068
1069 bu[nr_got] = ob[nr_got]->bucket;
1070 }
1071
1072 if (!nr_got)
1073 goto err_free;
1074
1075 /* Don't return an error if we successfully allocated some buckets: */
1076 ret = 0;
1077
1078 if (c) {
1079 bch2_journal_flush_all_pins(&c->journal);
1080 bch2_journal_block(&c->journal);
1081 mutex_lock(&c->sb_lock);
1082 }
1083
1084 memcpy(new_buckets, ja->buckets, ja->nr * sizeof(u64));
1085 memcpy(new_bucket_seq, ja->bucket_seq, ja->nr * sizeof(u64));
1086
1087 BUG_ON(ja->discard_idx > ja->nr);
1088
1089 pos = ja->discard_idx ?: ja->nr;
1090
1091 memmove(new_buckets + pos + nr_got,
1092 new_buckets + pos,
1093 sizeof(new_buckets[0]) * (ja->nr - pos));
1094 memmove(new_bucket_seq + pos + nr_got,
1095 new_bucket_seq + pos,
1096 sizeof(new_bucket_seq[0]) * (ja->nr - pos));
1097
1098 for (i = 0; i < nr_got; i++) {
1099 new_buckets[pos + i] = bu[i];
1100 new_bucket_seq[pos + i] = 0;
1101 }
1102
1103 nr = ja->nr + nr_got;
1104
1105 ret = bch2_journal_buckets_to_sb(c, ca, new_buckets, nr);
1106 if (ret)
1107 goto err_unblock;
1108
1109 bch2_write_super(c);
1110
1111 /* Commit: */
1112 if (c)
1113 spin_lock(&c->journal.lock);
1114
1115 swap(new_buckets, ja->buckets);
1116 swap(new_bucket_seq, ja->bucket_seq);
1117 ja->nr = nr;
1118
1119 if (pos <= ja->discard_idx)
1120 ja->discard_idx = (ja->discard_idx + nr_got) % ja->nr;
1121 if (pos <= ja->dirty_idx_ondisk)
1122 ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + nr_got) % ja->nr;
1123 if (pos <= ja->dirty_idx)
1124 ja->dirty_idx = (ja->dirty_idx + nr_got) % ja->nr;
1125 if (pos <= ja->cur_idx)
1126 ja->cur_idx = (ja->cur_idx + nr_got) % ja->nr;
1127
1128 if (c)
1129 spin_unlock(&c->journal.lock);
1130 err_unblock:
1131 if (c) {
1132 bch2_journal_unblock(&c->journal);
1133 mutex_unlock(&c->sb_lock);
1134 }
1135
1136 if (ret && !new_fs)
1137 for (i = 0; i < nr_got; i++)
1138 bch2_trans_run(c,
1139 bch2_trans_mark_metadata_bucket(trans, ca,
1140 bu[i], BCH_DATA_free, 0,
1141 BTREE_TRIGGER_transactional));
1142 err_free:
1143 for (i = 0; i < nr_got; i++)
1144 bch2_open_bucket_put(c, ob[i]);
1145
1146 kfree(new_bucket_seq);
1147 kfree(new_buckets);
1148 kfree(ob);
1149 kfree(bu);
1150 return ret;
1151 }
1152
bch2_set_nr_journal_buckets_loop(struct bch_fs * c,struct bch_dev * ca,unsigned nr,bool new_fs)1153 static int bch2_set_nr_journal_buckets_loop(struct bch_fs *c, struct bch_dev *ca,
1154 unsigned nr, bool new_fs)
1155 {
1156 struct journal_device *ja = &ca->journal;
1157 int ret = 0;
1158
1159 struct closure cl;
1160 closure_init_stack(&cl);
1161
1162 /* don't handle reducing nr of buckets yet: */
1163 if (nr < ja->nr)
1164 return 0;
1165
1166 while (!ret && ja->nr < nr) {
1167 struct disk_reservation disk_res = { 0, 0, 0 };
1168
1169 /*
1170 * note: journal buckets aren't really counted as _sectors_ used yet, so
1171 * we don't need the disk reservation to avoid the BUG_ON() in buckets.c
1172 * when space used goes up without a reservation - but we do need the
1173 * reservation to ensure we'll actually be able to allocate:
1174 *
1175 * XXX: that's not right, disk reservations only ensure a
1176 * filesystem-wide allocation will succeed, this is a device
1177 * specific allocation - we can hang here:
1178 */
1179 if (!new_fs) {
1180 ret = bch2_disk_reservation_get(c, &disk_res,
1181 bucket_to_sector(ca, nr - ja->nr), 1, 0);
1182 if (ret)
1183 break;
1184 }
1185
1186 ret = bch2_set_nr_journal_buckets_iter(ca, nr, new_fs, &cl);
1187
1188 if (ret == -BCH_ERR_bucket_alloc_blocked ||
1189 ret == -BCH_ERR_open_buckets_empty)
1190 ret = 0; /* wait and retry */
1191
1192 bch2_disk_reservation_put(c, &disk_res);
1193 closure_sync(&cl);
1194 }
1195
1196 return ret;
1197 }
1198
1199 /*
1200 * Allocate more journal space at runtime - not currently making use if it, but
1201 * the code works:
1202 */
bch2_set_nr_journal_buckets(struct bch_fs * c,struct bch_dev * ca,unsigned nr)1203 int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
1204 unsigned nr)
1205 {
1206 down_write(&c->state_lock);
1207 int ret = bch2_set_nr_journal_buckets_loop(c, ca, nr, false);
1208 up_write(&c->state_lock);
1209
1210 bch_err_fn(c, ret);
1211 return ret;
1212 }
1213
bch2_dev_journal_alloc(struct bch_dev * ca,bool new_fs)1214 int bch2_dev_journal_alloc(struct bch_dev *ca, bool new_fs)
1215 {
1216 unsigned nr;
1217 int ret;
1218
1219 if (dynamic_fault("bcachefs:add:journal_alloc")) {
1220 ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
1221 goto err;
1222 }
1223
1224 /* 1/128th of the device by default: */
1225 nr = ca->mi.nbuckets >> 7;
1226
1227 /*
1228 * clamp journal size to 8192 buckets or 8GB (in sectors), whichever
1229 * is smaller:
1230 */
1231 nr = clamp_t(unsigned, nr,
1232 BCH_JOURNAL_BUCKETS_MIN,
1233 min(1 << 13,
1234 (1 << 24) / ca->mi.bucket_size));
1235
1236 ret = bch2_set_nr_journal_buckets_loop(ca->fs, ca, nr, new_fs);
1237 err:
1238 bch_err_fn(ca, ret);
1239 return ret;
1240 }
1241
bch2_fs_journal_alloc(struct bch_fs * c)1242 int bch2_fs_journal_alloc(struct bch_fs *c)
1243 {
1244 for_each_online_member(c, ca) {
1245 if (ca->journal.nr)
1246 continue;
1247
1248 int ret = bch2_dev_journal_alloc(ca, true);
1249 if (ret) {
1250 percpu_ref_put(&ca->io_ref);
1251 return ret;
1252 }
1253 }
1254
1255 return 0;
1256 }
1257
1258 /* startup/shutdown: */
1259
bch2_journal_writing_to_device(struct journal * j,unsigned dev_idx)1260 static bool bch2_journal_writing_to_device(struct journal *j, unsigned dev_idx)
1261 {
1262 bool ret = false;
1263 u64 seq;
1264
1265 spin_lock(&j->lock);
1266 for (seq = journal_last_unwritten_seq(j);
1267 seq <= journal_cur_seq(j) && !ret;
1268 seq++) {
1269 struct journal_buf *buf = journal_seq_to_buf(j, seq);
1270
1271 if (bch2_bkey_has_device_c(bkey_i_to_s_c(&buf->key), dev_idx))
1272 ret = true;
1273 }
1274 spin_unlock(&j->lock);
1275
1276 return ret;
1277 }
1278
bch2_dev_journal_stop(struct journal * j,struct bch_dev * ca)1279 void bch2_dev_journal_stop(struct journal *j, struct bch_dev *ca)
1280 {
1281 wait_event(j->wait, !bch2_journal_writing_to_device(j, ca->dev_idx));
1282 }
1283
bch2_fs_journal_stop(struct journal * j)1284 void bch2_fs_journal_stop(struct journal *j)
1285 {
1286 if (!test_bit(JOURNAL_running, &j->flags))
1287 return;
1288
1289 bch2_journal_reclaim_stop(j);
1290 bch2_journal_flush_all_pins(j);
1291
1292 wait_event(j->wait, bch2_journal_entry_close(j));
1293
1294 /*
1295 * Always write a new journal entry, to make sure the clock hands are up
1296 * to date (and match the superblock)
1297 */
1298 __bch2_journal_meta(j);
1299
1300 journal_quiesce(j);
1301 cancel_delayed_work_sync(&j->write_work);
1302
1303 WARN(!bch2_journal_error(j) &&
1304 test_bit(JOURNAL_replay_done, &j->flags) &&
1305 j->last_empty_seq != journal_cur_seq(j),
1306 "journal shutdown error: cur seq %llu but last empty seq %llu",
1307 journal_cur_seq(j), j->last_empty_seq);
1308
1309 if (!bch2_journal_error(j))
1310 clear_bit(JOURNAL_running, &j->flags);
1311 }
1312
bch2_fs_journal_start(struct journal * j,u64 cur_seq)1313 int bch2_fs_journal_start(struct journal *j, u64 cur_seq)
1314 {
1315 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1316 struct journal_entry_pin_list *p;
1317 struct journal_replay *i, **_i;
1318 struct genradix_iter iter;
1319 bool had_entries = false;
1320 u64 last_seq = cur_seq, nr, seq;
1321
1322 if (cur_seq >= JOURNAL_SEQ_MAX) {
1323 bch_err(c, "cannot start: journal seq overflow");
1324 return -EINVAL;
1325 }
1326
1327 genradix_for_each_reverse(&c->journal_entries, iter, _i) {
1328 i = *_i;
1329
1330 if (journal_replay_ignore(i))
1331 continue;
1332
1333 last_seq = le64_to_cpu(i->j.last_seq);
1334 break;
1335 }
1336
1337 nr = cur_seq - last_seq;
1338
1339 if (nr + 1 > j->pin.size) {
1340 free_fifo(&j->pin);
1341 init_fifo(&j->pin, roundup_pow_of_two(nr + 1), GFP_KERNEL);
1342 if (!j->pin.data) {
1343 bch_err(c, "error reallocating journal fifo (%llu open entries)", nr);
1344 return -BCH_ERR_ENOMEM_journal_pin_fifo;
1345 }
1346 }
1347
1348 j->replay_journal_seq = last_seq;
1349 j->replay_journal_seq_end = cur_seq;
1350 j->last_seq_ondisk = last_seq;
1351 j->flushed_seq_ondisk = cur_seq - 1;
1352 j->seq_ondisk = cur_seq - 1;
1353 j->pin.front = last_seq;
1354 j->pin.back = cur_seq;
1355 atomic64_set(&j->seq, cur_seq - 1);
1356
1357 fifo_for_each_entry_ptr(p, &j->pin, seq)
1358 journal_pin_list_init(p, 1);
1359
1360 genradix_for_each(&c->journal_entries, iter, _i) {
1361 i = *_i;
1362
1363 if (journal_replay_ignore(i))
1364 continue;
1365
1366 seq = le64_to_cpu(i->j.seq);
1367 BUG_ON(seq >= cur_seq);
1368
1369 if (seq < last_seq)
1370 continue;
1371
1372 if (journal_entry_empty(&i->j))
1373 j->last_empty_seq = le64_to_cpu(i->j.seq);
1374
1375 p = journal_seq_pin(j, seq);
1376
1377 p->devs.nr = 0;
1378 darray_for_each(i->ptrs, ptr)
1379 bch2_dev_list_add_dev(&p->devs, ptr->dev);
1380
1381 had_entries = true;
1382 }
1383
1384 if (!had_entries)
1385 j->last_empty_seq = cur_seq - 1; /* to match j->seq */
1386
1387 spin_lock(&j->lock);
1388
1389 set_bit(JOURNAL_running, &j->flags);
1390 j->last_flush_write = jiffies;
1391
1392 j->reservations.idx = j->reservations.unwritten_idx = journal_cur_seq(j);
1393 j->reservations.unwritten_idx++;
1394
1395 c->last_bucket_seq_cleanup = journal_cur_seq(j);
1396
1397 bch2_journal_space_available(j);
1398 spin_unlock(&j->lock);
1399
1400 return bch2_journal_reclaim_start(j);
1401 }
1402
1403 /* init/exit: */
1404
bch2_dev_journal_exit(struct bch_dev * ca)1405 void bch2_dev_journal_exit(struct bch_dev *ca)
1406 {
1407 struct journal_device *ja = &ca->journal;
1408
1409 for (unsigned i = 0; i < ARRAY_SIZE(ja->bio); i++) {
1410 kfree(ja->bio[i]);
1411 ja->bio[i] = NULL;
1412 }
1413
1414 kfree(ja->buckets);
1415 kfree(ja->bucket_seq);
1416 ja->buckets = NULL;
1417 ja->bucket_seq = NULL;
1418 }
1419
bch2_dev_journal_init(struct bch_dev * ca,struct bch_sb * sb)1420 int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
1421 {
1422 struct journal_device *ja = &ca->journal;
1423 struct bch_sb_field_journal *journal_buckets =
1424 bch2_sb_field_get(sb, journal);
1425 struct bch_sb_field_journal_v2 *journal_buckets_v2 =
1426 bch2_sb_field_get(sb, journal_v2);
1427
1428 ja->nr = 0;
1429
1430 if (journal_buckets_v2) {
1431 unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2);
1432
1433 for (unsigned i = 0; i < nr; i++)
1434 ja->nr += le64_to_cpu(journal_buckets_v2->d[i].nr);
1435 } else if (journal_buckets) {
1436 ja->nr = bch2_nr_journal_buckets(journal_buckets);
1437 }
1438
1439 ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1440 if (!ja->bucket_seq)
1441 return -BCH_ERR_ENOMEM_dev_journal_init;
1442
1443 unsigned nr_bvecs = DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE);
1444
1445 for (unsigned i = 0; i < ARRAY_SIZE(ja->bio); i++) {
1446 ja->bio[i] = kmalloc(struct_size(ja->bio[i], bio.bi_inline_vecs,
1447 nr_bvecs), GFP_KERNEL);
1448 if (!ja->bio[i])
1449 return -BCH_ERR_ENOMEM_dev_journal_init;
1450
1451 ja->bio[i]->ca = ca;
1452 ja->bio[i]->buf_idx = i;
1453 bio_init(&ja->bio[i]->bio, NULL, ja->bio[i]->bio.bi_inline_vecs, nr_bvecs, 0);
1454 }
1455
1456 ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1457 if (!ja->buckets)
1458 return -BCH_ERR_ENOMEM_dev_journal_init;
1459
1460 if (journal_buckets_v2) {
1461 unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2);
1462 unsigned dst = 0;
1463
1464 for (unsigned i = 0; i < nr; i++)
1465 for (unsigned j = 0; j < le64_to_cpu(journal_buckets_v2->d[i].nr); j++)
1466 ja->buckets[dst++] =
1467 le64_to_cpu(journal_buckets_v2->d[i].start) + j;
1468 } else if (journal_buckets) {
1469 for (unsigned i = 0; i < ja->nr; i++)
1470 ja->buckets[i] = le64_to_cpu(journal_buckets->buckets[i]);
1471 }
1472
1473 return 0;
1474 }
1475
bch2_fs_journal_exit(struct journal * j)1476 void bch2_fs_journal_exit(struct journal *j)
1477 {
1478 if (j->wq)
1479 destroy_workqueue(j->wq);
1480
1481 darray_exit(&j->early_journal_entries);
1482
1483 for (unsigned i = 0; i < ARRAY_SIZE(j->buf); i++)
1484 kvfree(j->buf[i].data);
1485 free_fifo(&j->pin);
1486 }
1487
bch2_fs_journal_init(struct journal * j)1488 int bch2_fs_journal_init(struct journal *j)
1489 {
1490 static struct lock_class_key res_key;
1491
1492 mutex_init(&j->buf_lock);
1493 spin_lock_init(&j->lock);
1494 spin_lock_init(&j->err_lock);
1495 init_waitqueue_head(&j->wait);
1496 INIT_DELAYED_WORK(&j->write_work, journal_write_work);
1497 init_waitqueue_head(&j->reclaim_wait);
1498 init_waitqueue_head(&j->pin_flush_wait);
1499 mutex_init(&j->reclaim_lock);
1500 mutex_init(&j->discard_lock);
1501
1502 lockdep_init_map(&j->res_map, "journal res", &res_key, 0);
1503
1504 atomic64_set(&j->reservations.counter,
1505 ((union journal_res_state)
1506 { .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v);
1507
1508 if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)))
1509 return -BCH_ERR_ENOMEM_journal_pin_fifo;
1510
1511 for (unsigned i = 0; i < ARRAY_SIZE(j->buf); i++) {
1512 j->buf[i].buf_size = JOURNAL_ENTRY_SIZE_MIN;
1513 j->buf[i].data = kvmalloc(j->buf[i].buf_size, GFP_KERNEL);
1514 if (!j->buf[i].data)
1515 return -BCH_ERR_ENOMEM_journal_buf;
1516 j->buf[i].idx = i;
1517 }
1518
1519 j->pin.front = j->pin.back = 1;
1520
1521 j->wq = alloc_workqueue("bcachefs_journal",
1522 WQ_HIGHPRI|WQ_FREEZABLE|WQ_UNBOUND|WQ_MEM_RECLAIM, 512);
1523 if (!j->wq)
1524 return -BCH_ERR_ENOMEM_fs_other_alloc;
1525 return 0;
1526 }
1527
1528 /* debug: */
1529
1530 static const char * const bch2_journal_flags_strs[] = {
1531 #define x(n) #n,
1532 JOURNAL_FLAGS()
1533 #undef x
1534 NULL
1535 };
1536
__bch2_journal_debug_to_text(struct printbuf * out,struct journal * j)1537 void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1538 {
1539 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1540 union journal_res_state s;
1541 unsigned long now = jiffies;
1542 u64 nr_writes = j->nr_flush_writes + j->nr_noflush_writes;
1543
1544 printbuf_tabstops_reset(out);
1545 printbuf_tabstop_push(out, 28);
1546 out->atomic++;
1547
1548 rcu_read_lock();
1549 s = READ_ONCE(j->reservations);
1550
1551 prt_printf(out, "flags:\t");
1552 prt_bitflags(out, bch2_journal_flags_strs, j->flags);
1553 prt_newline(out);
1554 prt_printf(out, "dirty journal entries:\t%llu/%llu\n", fifo_used(&j->pin), j->pin.size);
1555 prt_printf(out, "seq:\t%llu\n", journal_cur_seq(j));
1556 prt_printf(out, "seq_ondisk:\t%llu\n", j->seq_ondisk);
1557 prt_printf(out, "last_seq:\t%llu\n", journal_last_seq(j));
1558 prt_printf(out, "last_seq_ondisk:\t%llu\n", j->last_seq_ondisk);
1559 prt_printf(out, "flushed_seq_ondisk:\t%llu\n", j->flushed_seq_ondisk);
1560 prt_printf(out, "watermark:\t%s\n", bch2_watermarks[j->watermark]);
1561 prt_printf(out, "each entry reserved:\t%u\n", j->entry_u64s_reserved);
1562 prt_printf(out, "nr flush writes:\t%llu\n", j->nr_flush_writes);
1563 prt_printf(out, "nr noflush writes:\t%llu\n", j->nr_noflush_writes);
1564 prt_printf(out, "average write size:\t");
1565 prt_human_readable_u64(out, nr_writes ? div64_u64(j->entry_bytes_written, nr_writes) : 0);
1566 prt_newline(out);
1567 prt_printf(out, "nr direct reclaim:\t%llu\n", j->nr_direct_reclaim);
1568 prt_printf(out, "nr background reclaim:\t%llu\n", j->nr_background_reclaim);
1569 prt_printf(out, "reclaim kicked:\t%u\n", j->reclaim_kicked);
1570 prt_printf(out, "reclaim runs in:\t%u ms\n", time_after(j->next_reclaim, now)
1571 ? jiffies_to_msecs(j->next_reclaim - jiffies) : 0);
1572 prt_printf(out, "blocked:\t%u\n", j->blocked);
1573 prt_printf(out, "current entry sectors:\t%u\n", j->cur_entry_sectors);
1574 prt_printf(out, "current entry error:\t%s\n", bch2_journal_errors[j->cur_entry_error]);
1575 prt_printf(out, "current entry:\t");
1576
1577 switch (s.cur_entry_offset) {
1578 case JOURNAL_ENTRY_ERROR_VAL:
1579 prt_printf(out, "error\n");
1580 break;
1581 case JOURNAL_ENTRY_CLOSED_VAL:
1582 prt_printf(out, "closed\n");
1583 break;
1584 case JOURNAL_ENTRY_BLOCKED_VAL:
1585 prt_printf(out, "blocked\n");
1586 break;
1587 default:
1588 prt_printf(out, "%u/%u\n", s.cur_entry_offset, j->cur_entry_u64s);
1589 break;
1590 }
1591
1592 prt_printf(out, "unwritten entries:\n");
1593 bch2_journal_bufs_to_text(out, j);
1594
1595 prt_printf(out, "space:\n");
1596 printbuf_indent_add(out, 2);
1597 prt_printf(out, "discarded\t%u:%u\n",
1598 j->space[journal_space_discarded].next_entry,
1599 j->space[journal_space_discarded].total);
1600 prt_printf(out, "clean ondisk\t%u:%u\n",
1601 j->space[journal_space_clean_ondisk].next_entry,
1602 j->space[journal_space_clean_ondisk].total);
1603 prt_printf(out, "clean\t%u:%u\n",
1604 j->space[journal_space_clean].next_entry,
1605 j->space[journal_space_clean].total);
1606 prt_printf(out, "total\t%u:%u\n",
1607 j->space[journal_space_total].next_entry,
1608 j->space[journal_space_total].total);
1609 printbuf_indent_sub(out, 2);
1610
1611 for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) {
1612 if (!ca->mi.durability)
1613 continue;
1614
1615 struct journal_device *ja = &ca->journal;
1616
1617 if (!test_bit(ca->dev_idx, c->rw_devs[BCH_DATA_journal].d))
1618 continue;
1619
1620 if (!ja->nr)
1621 continue;
1622
1623 prt_printf(out, "dev %u:\n", ca->dev_idx);
1624 prt_printf(out, "durability %u:\n", ca->mi.durability);
1625 printbuf_indent_add(out, 2);
1626 prt_printf(out, "nr\t%u\n", ja->nr);
1627 prt_printf(out, "bucket size\t%u\n", ca->mi.bucket_size);
1628 prt_printf(out, "available\t%u:%u\n", bch2_journal_dev_buckets_available(j, ja, journal_space_discarded), ja->sectors_free);
1629 prt_printf(out, "discard_idx\t%u\n", ja->discard_idx);
1630 prt_printf(out, "dirty_ondisk\t%u (seq %llu)\n",ja->dirty_idx_ondisk, ja->bucket_seq[ja->dirty_idx_ondisk]);
1631 prt_printf(out, "dirty_idx\t%u (seq %llu)\n", ja->dirty_idx, ja->bucket_seq[ja->dirty_idx]);
1632 prt_printf(out, "cur_idx\t%u (seq %llu)\n", ja->cur_idx, ja->bucket_seq[ja->cur_idx]);
1633 printbuf_indent_sub(out, 2);
1634 }
1635
1636 prt_printf(out, "replicas want %u need %u\n", c->opts.metadata_replicas, c->opts.metadata_replicas_required);
1637
1638 rcu_read_unlock();
1639
1640 --out->atomic;
1641 }
1642
bch2_journal_debug_to_text(struct printbuf * out,struct journal * j)1643 void bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1644 {
1645 spin_lock(&j->lock);
1646 __bch2_journal_debug_to_text(out, j);
1647 spin_unlock(&j->lock);
1648 }
1649