xref: /linux/fs/bcachefs/btree_write_buffer.c (revision ff9fbcafbaf13346c742c0d672a22f5ac20b9d92)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "bcachefs.h"
4 #include "bkey_buf.h"
5 #include "btree_locking.h"
6 #include "btree_update.h"
7 #include "btree_update_interior.h"
8 #include "btree_write_buffer.h"
9 #include "error.h"
10 #include "extents.h"
11 #include "journal.h"
12 #include "journal_io.h"
13 #include "journal_reclaim.h"
14 
15 #include <linux/prefetch.h>
16 #include <linux/sort.h>
17 
18 static int bch2_btree_write_buffer_journal_flush(struct journal *,
19 				struct journal_entry_pin *, u64);
20 
21 static int bch2_journal_keys_to_write_buffer(struct bch_fs *, struct journal_buf *);
22 
23 static inline bool __wb_key_ref_cmp(const struct wb_key_ref *l, const struct wb_key_ref *r)
24 {
25 	return (cmp_int(l->hi, r->hi) ?:
26 		cmp_int(l->mi, r->mi) ?:
27 		cmp_int(l->lo, r->lo)) >= 0;
28 }
29 
30 static inline bool wb_key_ref_cmp(const struct wb_key_ref *l, const struct wb_key_ref *r)
31 {
32 #ifdef CONFIG_X86_64
33 	int cmp;
34 
35 	asm("mov   (%[l]), %%rax;"
36 	    "sub   (%[r]), %%rax;"
37 	    "mov  8(%[l]), %%rax;"
38 	    "sbb  8(%[r]), %%rax;"
39 	    "mov 16(%[l]), %%rax;"
40 	    "sbb 16(%[r]), %%rax;"
41 	    : "=@ccae" (cmp)
42 	    : [l] "r" (l), [r] "r" (r)
43 	    : "rax", "cc");
44 
45 	EBUG_ON(cmp != __wb_key_ref_cmp(l, r));
46 	return cmp;
47 #else
48 	return __wb_key_ref_cmp(l, r);
49 #endif
50 }
51 
52 static int wb_key_seq_cmp(const void *_l, const void *_r)
53 {
54 	const struct btree_write_buffered_key *l = _l;
55 	const struct btree_write_buffered_key *r = _r;
56 
57 	return cmp_int(l->journal_seq, r->journal_seq);
58 }
59 
60 /* Compare excluding idx, the low 24 bits: */
61 static inline bool wb_key_eq(const void *_l, const void *_r)
62 {
63 	const struct wb_key_ref *l = _l;
64 	const struct wb_key_ref *r = _r;
65 
66 	return !((l->hi ^ r->hi)|
67 		 (l->mi ^ r->mi)|
68 		 ((l->lo >> 24) ^ (r->lo >> 24)));
69 }
70 
71 static noinline void wb_sort(struct wb_key_ref *base, size_t num)
72 {
73 	size_t n = num, a = num / 2;
74 
75 	if (!a)		/* num < 2 || size == 0 */
76 		return;
77 
78 	for (;;) {
79 		size_t b, c, d;
80 
81 		if (a)			/* Building heap: sift down --a */
82 			--a;
83 		else if (--n)		/* Sorting: Extract root to --n */
84 			swap(base[0], base[n]);
85 		else			/* Sort complete */
86 			break;
87 
88 		/*
89 		 * Sift element at "a" down into heap.  This is the
90 		 * "bottom-up" variant, which significantly reduces
91 		 * calls to cmp_func(): we find the sift-down path all
92 		 * the way to the leaves (one compare per level), then
93 		 * backtrack to find where to insert the target element.
94 		 *
95 		 * Because elements tend to sift down close to the leaves,
96 		 * this uses fewer compares than doing two per level
97 		 * on the way down.  (A bit more than half as many on
98 		 * average, 3/4 worst-case.)
99 		 */
100 		for (b = a; c = 2*b + 1, (d = c + 1) < n;)
101 			b = wb_key_ref_cmp(base + c, base + d) ? c : d;
102 		if (d == n)		/* Special case last leaf with no sibling */
103 			b = c;
104 
105 		/* Now backtrack from "b" to the correct location for "a" */
106 		while (b != a && wb_key_ref_cmp(base + a, base + b))
107 			b = (b - 1) / 2;
108 		c = b;			/* Where "a" belongs */
109 		while (b != a) {	/* Shift it into place */
110 			b = (b - 1) / 2;
111 			swap(base[b], base[c]);
112 		}
113 	}
114 }
115 
116 static noinline int wb_flush_one_slowpath(struct btree_trans *trans,
117 					  struct btree_iter *iter,
118 					  struct btree_write_buffered_key *wb)
119 {
120 	struct btree_path *path = btree_iter_path(trans, iter);
121 
122 	bch2_btree_node_unlock_write(trans, path, path->l[0].b);
123 
124 	trans->journal_res.seq = wb->journal_seq;
125 
126 	return bch2_trans_update(trans, iter, &wb->k,
127 				 BTREE_UPDATE_internal_snapshot_node) ?:
128 		bch2_trans_commit(trans, NULL, NULL,
129 				  BCH_TRANS_COMMIT_no_enospc|
130 				  BCH_TRANS_COMMIT_no_check_rw|
131 				  BCH_TRANS_COMMIT_no_journal_res|
132 				  BCH_TRANS_COMMIT_journal_reclaim);
133 }
134 
135 static inline int wb_flush_one(struct btree_trans *trans, struct btree_iter *iter,
136 			       struct btree_write_buffered_key *wb,
137 			       bool *write_locked, size_t *fast)
138 {
139 	struct btree_path *path;
140 	int ret;
141 
142 	EBUG_ON(!wb->journal_seq);
143 	EBUG_ON(!trans->c->btree_write_buffer.flushing.pin.seq);
144 	EBUG_ON(trans->c->btree_write_buffer.flushing.pin.seq > wb->journal_seq);
145 
146 	ret = bch2_btree_iter_traverse(iter);
147 	if (ret)
148 		return ret;
149 
150 	/*
151 	 * We can't clone a path that has write locks: unshare it now, before
152 	 * set_pos and traverse():
153 	 */
154 	if (btree_iter_path(trans, iter)->ref > 1)
155 		iter->path = __bch2_btree_path_make_mut(trans, iter->path, true, _THIS_IP_);
156 
157 	path = btree_iter_path(trans, iter);
158 
159 	if (!*write_locked) {
160 		ret = bch2_btree_node_lock_write(trans, path, &path->l[0].b->c);
161 		if (ret)
162 			return ret;
163 
164 		bch2_btree_node_prep_for_write(trans, path, path->l[0].b);
165 		*write_locked = true;
166 	}
167 
168 	if (unlikely(!bch2_btree_node_insert_fits(path->l[0].b, wb->k.k.u64s))) {
169 		*write_locked = false;
170 		return wb_flush_one_slowpath(trans, iter, wb);
171 	}
172 
173 	bch2_btree_insert_key_leaf(trans, path, &wb->k, wb->journal_seq);
174 	(*fast)++;
175 	return 0;
176 }
177 
178 /*
179  * Update a btree with a write buffered key using the journal seq of the
180  * original write buffer insert.
181  *
182  * It is not safe to rejournal the key once it has been inserted into the write
183  * buffer because that may break recovery ordering. For example, the key may
184  * have already been modified in the active write buffer in a seq that comes
185  * before the current transaction. If we were to journal this key again and
186  * crash, recovery would process updates in the wrong order.
187  */
188 static int
189 btree_write_buffered_insert(struct btree_trans *trans,
190 			  struct btree_write_buffered_key *wb)
191 {
192 	struct btree_iter iter;
193 	int ret;
194 
195 	bch2_trans_iter_init(trans, &iter, wb->btree, bkey_start_pos(&wb->k.k),
196 			     BTREE_ITER_cached|BTREE_ITER_intent);
197 
198 	trans->journal_res.seq = wb->journal_seq;
199 
200 	ret   = bch2_btree_iter_traverse(&iter) ?:
201 		bch2_trans_update(trans, &iter, &wb->k,
202 				  BTREE_UPDATE_internal_snapshot_node);
203 	bch2_trans_iter_exit(trans, &iter);
204 	return ret;
205 }
206 
207 static void move_keys_from_inc_to_flushing(struct btree_write_buffer *wb)
208 {
209 	struct bch_fs *c = container_of(wb, struct bch_fs, btree_write_buffer);
210 	struct journal *j = &c->journal;
211 
212 	if (!wb->inc.keys.nr)
213 		return;
214 
215 	bch2_journal_pin_add(j, wb->inc.keys.data[0].journal_seq, &wb->flushing.pin,
216 			     bch2_btree_write_buffer_journal_flush);
217 
218 	darray_resize(&wb->flushing.keys, min_t(size_t, 1U << 20, wb->flushing.keys.nr + wb->inc.keys.nr));
219 	darray_resize(&wb->sorted, wb->flushing.keys.size);
220 
221 	if (!wb->flushing.keys.nr && wb->sorted.size >= wb->inc.keys.nr) {
222 		swap(wb->flushing.keys, wb->inc.keys);
223 		goto out;
224 	}
225 
226 	size_t nr = min(darray_room(wb->flushing.keys),
227 			wb->sorted.size - wb->flushing.keys.nr);
228 	nr = min(nr, wb->inc.keys.nr);
229 
230 	memcpy(&darray_top(wb->flushing.keys),
231 	       wb->inc.keys.data,
232 	       sizeof(wb->inc.keys.data[0]) * nr);
233 
234 	memmove(wb->inc.keys.data,
235 		wb->inc.keys.data + nr,
236 	       sizeof(wb->inc.keys.data[0]) * (wb->inc.keys.nr - nr));
237 
238 	wb->flushing.keys.nr	+= nr;
239 	wb->inc.keys.nr		-= nr;
240 out:
241 	if (!wb->inc.keys.nr)
242 		bch2_journal_pin_drop(j, &wb->inc.pin);
243 	else
244 		bch2_journal_pin_update(j, wb->inc.keys.data[0].journal_seq, &wb->inc.pin,
245 					bch2_btree_write_buffer_journal_flush);
246 
247 	if (j->watermark) {
248 		spin_lock(&j->lock);
249 		bch2_journal_set_watermark(j);
250 		spin_unlock(&j->lock);
251 	}
252 
253 	BUG_ON(wb->sorted.size < wb->flushing.keys.nr);
254 }
255 
256 static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
257 {
258 	struct bch_fs *c = trans->c;
259 	struct journal *j = &c->journal;
260 	struct btree_write_buffer *wb = &c->btree_write_buffer;
261 	struct btree_iter iter = { NULL };
262 	size_t skipped = 0, fast = 0, slowpath = 0;
263 	bool write_locked = false;
264 	int ret = 0;
265 
266 	bch2_trans_unlock(trans);
267 	bch2_trans_begin(trans);
268 
269 	mutex_lock(&wb->inc.lock);
270 	move_keys_from_inc_to_flushing(wb);
271 	mutex_unlock(&wb->inc.lock);
272 
273 	for (size_t i = 0; i < wb->flushing.keys.nr; i++) {
274 		wb->sorted.data[i].idx = i;
275 		wb->sorted.data[i].btree = wb->flushing.keys.data[i].btree;
276 		memcpy(&wb->sorted.data[i].pos, &wb->flushing.keys.data[i].k.k.p, sizeof(struct bpos));
277 	}
278 	wb->sorted.nr = wb->flushing.keys.nr;
279 
280 	/*
281 	 * We first sort so that we can detect and skip redundant updates, and
282 	 * then we attempt to flush in sorted btree order, as this is most
283 	 * efficient.
284 	 *
285 	 * However, since we're not flushing in the order they appear in the
286 	 * journal we won't be able to drop our journal pin until everything is
287 	 * flushed - which means this could deadlock the journal if we weren't
288 	 * passing BCH_TRANS_COMMIT_journal_reclaim. This causes the update to fail
289 	 * if it would block taking a journal reservation.
290 	 *
291 	 * If that happens, simply skip the key so we can optimistically insert
292 	 * as many keys as possible in the fast path.
293 	 */
294 	wb_sort(wb->sorted.data, wb->sorted.nr);
295 
296 	darray_for_each(wb->sorted, i) {
297 		struct btree_write_buffered_key *k = &wb->flushing.keys.data[i->idx];
298 
299 		for (struct wb_key_ref *n = i + 1; n < min(i + 4, &darray_top(wb->sorted)); n++)
300 			prefetch(&wb->flushing.keys.data[n->idx]);
301 
302 		BUG_ON(!k->journal_seq);
303 
304 		if (i + 1 < &darray_top(wb->sorted) &&
305 		    wb_key_eq(i, i + 1)) {
306 			struct btree_write_buffered_key *n = &wb->flushing.keys.data[i[1].idx];
307 
308 			skipped++;
309 			n->journal_seq = min_t(u64, n->journal_seq, k->journal_seq);
310 			k->journal_seq = 0;
311 			continue;
312 		}
313 
314 		if (write_locked) {
315 			struct btree_path *path = btree_iter_path(trans, &iter);
316 
317 			if (path->btree_id != i->btree ||
318 			    bpos_gt(k->k.k.p, path->l[0].b->key.k.p)) {
319 				bch2_btree_node_unlock_write(trans, path, path->l[0].b);
320 				write_locked = false;
321 
322 				ret = lockrestart_do(trans,
323 					bch2_btree_iter_traverse(&iter) ?:
324 					bch2_foreground_maybe_merge(trans, iter.path, 0,
325 							BCH_WATERMARK_reclaim|
326 							BCH_TRANS_COMMIT_journal_reclaim|
327 							BCH_TRANS_COMMIT_no_check_rw|
328 							BCH_TRANS_COMMIT_no_enospc));
329 				if (ret)
330 					goto err;
331 			}
332 		}
333 
334 		if (!iter.path || iter.btree_id != k->btree) {
335 			bch2_trans_iter_exit(trans, &iter);
336 			bch2_trans_iter_init(trans, &iter, k->btree, k->k.k.p,
337 					     BTREE_ITER_intent|BTREE_ITER_all_snapshots);
338 		}
339 
340 		bch2_btree_iter_set_pos(&iter, k->k.k.p);
341 		btree_iter_path(trans, &iter)->preserve = false;
342 
343 		do {
344 			if (race_fault()) {
345 				ret = -BCH_ERR_journal_reclaim_would_deadlock;
346 				break;
347 			}
348 
349 			ret = wb_flush_one(trans, &iter, k, &write_locked, &fast);
350 			if (!write_locked)
351 				bch2_trans_begin(trans);
352 		} while (bch2_err_matches(ret, BCH_ERR_transaction_restart));
353 
354 		if (!ret) {
355 			k->journal_seq = 0;
356 		} else if (ret == -BCH_ERR_journal_reclaim_would_deadlock) {
357 			slowpath++;
358 			ret = 0;
359 		} else
360 			break;
361 	}
362 
363 	if (write_locked) {
364 		struct btree_path *path = btree_iter_path(trans, &iter);
365 		bch2_btree_node_unlock_write(trans, path, path->l[0].b);
366 	}
367 	bch2_trans_iter_exit(trans, &iter);
368 
369 	if (ret)
370 		goto err;
371 
372 	if (slowpath) {
373 		/*
374 		 * Flush in the order they were present in the journal, so that
375 		 * we can release journal pins:
376 		 * The fastpath zapped the seq of keys that were successfully flushed so
377 		 * we can skip those here.
378 		 */
379 		trace_and_count(c, write_buffer_flush_slowpath, trans, slowpath, wb->flushing.keys.nr);
380 
381 		sort(wb->flushing.keys.data,
382 		     wb->flushing.keys.nr,
383 		     sizeof(wb->flushing.keys.data[0]),
384 		     wb_key_seq_cmp, NULL);
385 
386 		darray_for_each(wb->flushing.keys, i) {
387 			if (!i->journal_seq)
388 				continue;
389 
390 			bch2_journal_pin_update(j, i->journal_seq, &wb->flushing.pin,
391 						bch2_btree_write_buffer_journal_flush);
392 
393 			bch2_trans_begin(trans);
394 
395 			ret = commit_do(trans, NULL, NULL,
396 					BCH_WATERMARK_reclaim|
397 					BCH_TRANS_COMMIT_journal_reclaim|
398 					BCH_TRANS_COMMIT_no_check_rw|
399 					BCH_TRANS_COMMIT_no_enospc|
400 					BCH_TRANS_COMMIT_no_journal_res ,
401 					btree_write_buffered_insert(trans, i));
402 			if (ret)
403 				goto err;
404 		}
405 	}
406 err:
407 	bch2_fs_fatal_err_on(ret, c, "%s", bch2_err_str(ret));
408 	trace_write_buffer_flush(trans, wb->flushing.keys.nr, skipped, fast, 0);
409 	bch2_journal_pin_drop(j, &wb->flushing.pin);
410 	wb->flushing.keys.nr = 0;
411 	return ret;
412 }
413 
414 static int fetch_wb_keys_from_journal(struct bch_fs *c, u64 seq)
415 {
416 	struct journal *j = &c->journal;
417 	struct journal_buf *buf;
418 	int ret = 0;
419 
420 	while (!ret && (buf = bch2_next_write_buffer_flush_journal_buf(j, seq))) {
421 		ret = bch2_journal_keys_to_write_buffer(c, buf);
422 		mutex_unlock(&j->buf_lock);
423 	}
424 
425 	return ret;
426 }
427 
428 static int btree_write_buffer_flush_seq(struct btree_trans *trans, u64 seq)
429 {
430 	struct bch_fs *c = trans->c;
431 	struct btree_write_buffer *wb = &c->btree_write_buffer;
432 	int ret = 0, fetch_from_journal_err;
433 
434 	do {
435 		bch2_trans_unlock(trans);
436 
437 		fetch_from_journal_err = fetch_wb_keys_from_journal(c, seq);
438 
439 		/*
440 		 * On memory allocation failure, bch2_btree_write_buffer_flush_locked()
441 		 * is not guaranteed to empty wb->inc:
442 		 */
443 		mutex_lock(&wb->flushing.lock);
444 		ret = bch2_btree_write_buffer_flush_locked(trans);
445 		mutex_unlock(&wb->flushing.lock);
446 	} while (!ret &&
447 		 (fetch_from_journal_err ||
448 		  (wb->inc.pin.seq && wb->inc.pin.seq <= seq) ||
449 		  (wb->flushing.pin.seq && wb->flushing.pin.seq <= seq)));
450 
451 	return ret;
452 }
453 
454 static int bch2_btree_write_buffer_journal_flush(struct journal *j,
455 				struct journal_entry_pin *_pin, u64 seq)
456 {
457 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
458 
459 	return bch2_trans_run(c, btree_write_buffer_flush_seq(trans, seq));
460 }
461 
462 int bch2_btree_write_buffer_flush_sync(struct btree_trans *trans)
463 {
464 	struct bch_fs *c = trans->c;
465 
466 	trace_and_count(c, write_buffer_flush_sync, trans, _RET_IP_);
467 
468 	return btree_write_buffer_flush_seq(trans, journal_cur_seq(&c->journal));
469 }
470 
471 int bch2_btree_write_buffer_flush_nocheck_rw(struct btree_trans *trans)
472 {
473 	struct bch_fs *c = trans->c;
474 	struct btree_write_buffer *wb = &c->btree_write_buffer;
475 	int ret = 0;
476 
477 	if (mutex_trylock(&wb->flushing.lock)) {
478 		ret = bch2_btree_write_buffer_flush_locked(trans);
479 		mutex_unlock(&wb->flushing.lock);
480 	}
481 
482 	return ret;
483 }
484 
485 int bch2_btree_write_buffer_tryflush(struct btree_trans *trans)
486 {
487 	struct bch_fs *c = trans->c;
488 
489 	if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_btree_write_buffer))
490 		return -BCH_ERR_erofs_no_writes;
491 
492 	int ret = bch2_btree_write_buffer_flush_nocheck_rw(trans);
493 	bch2_write_ref_put(c, BCH_WRITE_REF_btree_write_buffer);
494 	return ret;
495 }
496 
497 /**
498  * In check and repair code, when checking references to write buffer btrees we
499  * need to issue a flush before we have a definitive error: this issues a flush
500  * if this is a key we haven't yet checked.
501  */
502 int bch2_btree_write_buffer_maybe_flush(struct btree_trans *trans,
503 					struct bkey_s_c referring_k,
504 					struct bkey_buf *last_flushed)
505 {
506 	struct bch_fs *c = trans->c;
507 	struct bkey_buf tmp;
508 	int ret = 0;
509 
510 	bch2_bkey_buf_init(&tmp);
511 
512 	if (!bkey_and_val_eq(referring_k, bkey_i_to_s_c(last_flushed->k))) {
513 		bch2_bkey_buf_reassemble(&tmp, c, referring_k);
514 
515 		if (bkey_is_btree_ptr(referring_k.k)) {
516 			bch2_trans_unlock(trans);
517 			bch2_btree_interior_updates_flush(c);
518 		}
519 
520 		ret = bch2_btree_write_buffer_flush_sync(trans);
521 		if (ret)
522 			goto err;
523 
524 		bch2_bkey_buf_copy(last_flushed, c, tmp.k);
525 		ret = -BCH_ERR_transaction_restart_write_buffer_flush;
526 	}
527 err:
528 	bch2_bkey_buf_exit(&tmp, c);
529 	return ret;
530 }
531 
532 static void bch2_btree_write_buffer_flush_work(struct work_struct *work)
533 {
534 	struct bch_fs *c = container_of(work, struct bch_fs, btree_write_buffer.flush_work);
535 	struct btree_write_buffer *wb = &c->btree_write_buffer;
536 	int ret;
537 
538 	mutex_lock(&wb->flushing.lock);
539 	do {
540 		ret = bch2_trans_run(c, bch2_btree_write_buffer_flush_locked(trans));
541 	} while (!ret && bch2_btree_write_buffer_should_flush(c));
542 	mutex_unlock(&wb->flushing.lock);
543 
544 	bch2_write_ref_put(c, BCH_WRITE_REF_btree_write_buffer);
545 }
546 
547 int bch2_journal_key_to_wb_slowpath(struct bch_fs *c,
548 			     struct journal_keys_to_wb *dst,
549 			     enum btree_id btree, struct bkey_i *k)
550 {
551 	struct btree_write_buffer *wb = &c->btree_write_buffer;
552 	int ret;
553 retry:
554 	ret = darray_make_room_gfp(&dst->wb->keys, 1, GFP_KERNEL);
555 	if (!ret && dst->wb == &wb->flushing)
556 		ret = darray_resize(&wb->sorted, wb->flushing.keys.size);
557 
558 	if (unlikely(ret)) {
559 		if (dst->wb == &c->btree_write_buffer.flushing) {
560 			mutex_unlock(&dst->wb->lock);
561 			dst->wb = &c->btree_write_buffer.inc;
562 			bch2_journal_pin_add(&c->journal, dst->seq, &dst->wb->pin,
563 					     bch2_btree_write_buffer_journal_flush);
564 			goto retry;
565 		}
566 
567 		return ret;
568 	}
569 
570 	dst->room = darray_room(dst->wb->keys);
571 	if (dst->wb == &wb->flushing)
572 		dst->room = min(dst->room, wb->sorted.size - wb->flushing.keys.nr);
573 	BUG_ON(!dst->room);
574 	BUG_ON(!dst->seq);
575 
576 	struct btree_write_buffered_key *wb_k = &darray_top(dst->wb->keys);
577 	wb_k->journal_seq	= dst->seq;
578 	wb_k->btree		= btree;
579 	bkey_copy(&wb_k->k, k);
580 	dst->wb->keys.nr++;
581 	dst->room--;
582 	return 0;
583 }
584 
585 void bch2_journal_keys_to_write_buffer_start(struct bch_fs *c, struct journal_keys_to_wb *dst, u64 seq)
586 {
587 	struct btree_write_buffer *wb = &c->btree_write_buffer;
588 
589 	if (mutex_trylock(&wb->flushing.lock)) {
590 		mutex_lock(&wb->inc.lock);
591 		move_keys_from_inc_to_flushing(wb);
592 
593 		/*
594 		 * Attempt to skip wb->inc, and add keys directly to
595 		 * wb->flushing, saving us a copy later:
596 		 */
597 
598 		if (!wb->inc.keys.nr) {
599 			dst->wb = &wb->flushing;
600 		} else {
601 			mutex_unlock(&wb->flushing.lock);
602 			dst->wb = &wb->inc;
603 		}
604 	} else {
605 		mutex_lock(&wb->inc.lock);
606 		dst->wb = &wb->inc;
607 	}
608 
609 	dst->room = darray_room(dst->wb->keys);
610 	if (dst->wb == &wb->flushing)
611 		dst->room = min(dst->room, wb->sorted.size - wb->flushing.keys.nr);
612 	dst->seq = seq;
613 
614 	bch2_journal_pin_add(&c->journal, seq, &dst->wb->pin,
615 			     bch2_btree_write_buffer_journal_flush);
616 }
617 
618 void bch2_journal_keys_to_write_buffer_end(struct bch_fs *c, struct journal_keys_to_wb *dst)
619 {
620 	struct btree_write_buffer *wb = &c->btree_write_buffer;
621 
622 	if (!dst->wb->keys.nr)
623 		bch2_journal_pin_drop(&c->journal, &dst->wb->pin);
624 
625 	if (bch2_btree_write_buffer_should_flush(c) &&
626 	    __bch2_write_ref_tryget(c, BCH_WRITE_REF_btree_write_buffer) &&
627 	    !queue_work(system_unbound_wq, &c->btree_write_buffer.flush_work))
628 		bch2_write_ref_put(c, BCH_WRITE_REF_btree_write_buffer);
629 
630 	if (dst->wb == &wb->flushing)
631 		mutex_unlock(&wb->flushing.lock);
632 	mutex_unlock(&wb->inc.lock);
633 }
634 
635 static int bch2_journal_keys_to_write_buffer(struct bch_fs *c, struct journal_buf *buf)
636 {
637 	struct journal_keys_to_wb dst;
638 	int ret = 0;
639 
640 	bch2_journal_keys_to_write_buffer_start(c, &dst, le64_to_cpu(buf->data->seq));
641 
642 	for_each_jset_entry_type(entry, buf->data, BCH_JSET_ENTRY_write_buffer_keys) {
643 		jset_entry_for_each_key(entry, k) {
644 			ret = bch2_journal_key_to_wb(c, &dst, entry->btree_id, k);
645 			if (ret)
646 				goto out;
647 		}
648 
649 		entry->type = BCH_JSET_ENTRY_btree_keys;
650 	}
651 
652 	spin_lock(&c->journal.lock);
653 	buf->need_flush_to_write_buffer = false;
654 	spin_unlock(&c->journal.lock);
655 out:
656 	bch2_journal_keys_to_write_buffer_end(c, &dst);
657 	return ret;
658 }
659 
660 static int wb_keys_resize(struct btree_write_buffer_keys *wb, size_t new_size)
661 {
662 	if (wb->keys.size >= new_size)
663 		return 0;
664 
665 	if (!mutex_trylock(&wb->lock))
666 		return -EINTR;
667 
668 	int ret = darray_resize(&wb->keys, new_size);
669 	mutex_unlock(&wb->lock);
670 	return ret;
671 }
672 
673 int bch2_btree_write_buffer_resize(struct bch_fs *c, size_t new_size)
674 {
675 	struct btree_write_buffer *wb = &c->btree_write_buffer;
676 
677 	return wb_keys_resize(&wb->flushing, new_size) ?:
678 		wb_keys_resize(&wb->inc, new_size);
679 }
680 
681 void bch2_fs_btree_write_buffer_exit(struct bch_fs *c)
682 {
683 	struct btree_write_buffer *wb = &c->btree_write_buffer;
684 
685 	BUG_ON((wb->inc.keys.nr || wb->flushing.keys.nr) &&
686 	       !bch2_journal_error(&c->journal));
687 
688 	darray_exit(&wb->sorted);
689 	darray_exit(&wb->flushing.keys);
690 	darray_exit(&wb->inc.keys);
691 }
692 
693 int bch2_fs_btree_write_buffer_init(struct bch_fs *c)
694 {
695 	struct btree_write_buffer *wb = &c->btree_write_buffer;
696 
697 	mutex_init(&wb->inc.lock);
698 	mutex_init(&wb->flushing.lock);
699 	INIT_WORK(&wb->flush_work, bch2_btree_write_buffer_flush_work);
700 
701 	/* Will be resized by journal as needed: */
702 	unsigned initial_size = 1 << 16;
703 
704 	return  darray_make_room(&wb->inc.keys, initial_size) ?:
705 		darray_make_room(&wb->flushing.keys, initial_size) ?:
706 		darray_make_room(&wb->sorted, initial_size);
707 }
708