xref: /linux/fs/bcachefs/btree_write_buffer.c (revision f96a974170b749e3a56844e25b31d46a7233b6f6)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "bcachefs.h"
4 #include "bkey_buf.h"
5 #include "btree_locking.h"
6 #include "btree_update.h"
7 #include "btree_update_interior.h"
8 #include "btree_write_buffer.h"
9 #include "disk_accounting.h"
10 #include "error.h"
11 #include "extents.h"
12 #include "journal.h"
13 #include "journal_io.h"
14 #include "journal_reclaim.h"
15 
16 #include <linux/prefetch.h>
17 #include <linux/sort.h>
18 
19 static int bch2_btree_write_buffer_journal_flush(struct journal *,
20 				struct journal_entry_pin *, u64);
21 
22 static inline bool __wb_key_ref_cmp(const struct wb_key_ref *l, const struct wb_key_ref *r)
23 {
24 	return (cmp_int(l->hi, r->hi) ?:
25 		cmp_int(l->mi, r->mi) ?:
26 		cmp_int(l->lo, r->lo)) >= 0;
27 }
28 
29 static inline bool wb_key_ref_cmp(const struct wb_key_ref *l, const struct wb_key_ref *r)
30 {
31 #ifdef CONFIG_X86_64
32 	int cmp;
33 
34 	asm("mov   (%[l]), %%rax;"
35 	    "sub   (%[r]), %%rax;"
36 	    "mov  8(%[l]), %%rax;"
37 	    "sbb  8(%[r]), %%rax;"
38 	    "mov 16(%[l]), %%rax;"
39 	    "sbb 16(%[r]), %%rax;"
40 	    : "=@ccae" (cmp)
41 	    : [l] "r" (l), [r] "r" (r)
42 	    : "rax", "cc");
43 
44 	EBUG_ON(cmp != __wb_key_ref_cmp(l, r));
45 	return cmp;
46 #else
47 	return __wb_key_ref_cmp(l, r);
48 #endif
49 }
50 
51 static int wb_key_seq_cmp(const void *_l, const void *_r)
52 {
53 	const struct btree_write_buffered_key *l = _l;
54 	const struct btree_write_buffered_key *r = _r;
55 
56 	return cmp_int(l->journal_seq, r->journal_seq);
57 }
58 
59 /* Compare excluding idx, the low 24 bits: */
60 static inline bool wb_key_eq(const void *_l, const void *_r)
61 {
62 	const struct wb_key_ref *l = _l;
63 	const struct wb_key_ref *r = _r;
64 
65 	return !((l->hi ^ r->hi)|
66 		 (l->mi ^ r->mi)|
67 		 ((l->lo >> 24) ^ (r->lo >> 24)));
68 }
69 
70 static noinline void wb_sort(struct wb_key_ref *base, size_t num)
71 {
72 	size_t n = num, a = num / 2;
73 
74 	if (!a)		/* num < 2 || size == 0 */
75 		return;
76 
77 	for (;;) {
78 		size_t b, c, d;
79 
80 		if (a)			/* Building heap: sift down --a */
81 			--a;
82 		else if (--n)		/* Sorting: Extract root to --n */
83 			swap(base[0], base[n]);
84 		else			/* Sort complete */
85 			break;
86 
87 		/*
88 		 * Sift element at "a" down into heap.  This is the
89 		 * "bottom-up" variant, which significantly reduces
90 		 * calls to cmp_func(): we find the sift-down path all
91 		 * the way to the leaves (one compare per level), then
92 		 * backtrack to find where to insert the target element.
93 		 *
94 		 * Because elements tend to sift down close to the leaves,
95 		 * this uses fewer compares than doing two per level
96 		 * on the way down.  (A bit more than half as many on
97 		 * average, 3/4 worst-case.)
98 		 */
99 		for (b = a; c = 2*b + 1, (d = c + 1) < n;)
100 			b = wb_key_ref_cmp(base + c, base + d) ? c : d;
101 		if (d == n)		/* Special case last leaf with no sibling */
102 			b = c;
103 
104 		/* Now backtrack from "b" to the correct location for "a" */
105 		while (b != a && wb_key_ref_cmp(base + a, base + b))
106 			b = (b - 1) / 2;
107 		c = b;			/* Where "a" belongs */
108 		while (b != a) {	/* Shift it into place */
109 			b = (b - 1) / 2;
110 			swap(base[b], base[c]);
111 		}
112 	}
113 }
114 
115 static noinline int wb_flush_one_slowpath(struct btree_trans *trans,
116 					  struct btree_iter *iter,
117 					  struct btree_write_buffered_key *wb)
118 {
119 	struct btree_path *path = btree_iter_path(trans, iter);
120 
121 	bch2_btree_node_unlock_write(trans, path, path->l[0].b);
122 
123 	trans->journal_res.seq = wb->journal_seq;
124 
125 	return bch2_trans_update(trans, iter, &wb->k,
126 				 BTREE_UPDATE_internal_snapshot_node) ?:
127 		bch2_trans_commit(trans, NULL, NULL,
128 				  BCH_TRANS_COMMIT_no_enospc|
129 				  BCH_TRANS_COMMIT_no_check_rw|
130 				  BCH_TRANS_COMMIT_no_journal_res|
131 				  BCH_TRANS_COMMIT_journal_reclaim);
132 }
133 
134 static inline int wb_flush_one(struct btree_trans *trans, struct btree_iter *iter,
135 			       struct btree_write_buffered_key *wb,
136 			       bool *write_locked,
137 			       bool *accounting_accumulated,
138 			       size_t *fast)
139 {
140 	struct btree_path *path;
141 	int ret;
142 
143 	EBUG_ON(!wb->journal_seq);
144 	EBUG_ON(!trans->c->btree_write_buffer.flushing.pin.seq);
145 	EBUG_ON(trans->c->btree_write_buffer.flushing.pin.seq > wb->journal_seq);
146 
147 	ret = bch2_btree_iter_traverse(iter);
148 	if (ret)
149 		return ret;
150 
151 	if (!*accounting_accumulated && wb->k.k.type == KEY_TYPE_accounting) {
152 		struct bkey u;
153 		struct bkey_s_c k = bch2_btree_path_peek_slot_exact(btree_iter_path(trans, iter), &u);
154 
155 		if (k.k->type == KEY_TYPE_accounting)
156 			bch2_accounting_accumulate(bkey_i_to_accounting(&wb->k),
157 						   bkey_s_c_to_accounting(k));
158 	}
159 	*accounting_accumulated = true;
160 
161 	/*
162 	 * We can't clone a path that has write locks: unshare it now, before
163 	 * set_pos and traverse():
164 	 */
165 	if (btree_iter_path(trans, iter)->ref > 1)
166 		iter->path = __bch2_btree_path_make_mut(trans, iter->path, true, _THIS_IP_);
167 
168 	path = btree_iter_path(trans, iter);
169 
170 	if (!*write_locked) {
171 		ret = bch2_btree_node_lock_write(trans, path, &path->l[0].b->c);
172 		if (ret)
173 			return ret;
174 
175 		bch2_btree_node_prep_for_write(trans, path, path->l[0].b);
176 		*write_locked = true;
177 	}
178 
179 	if (unlikely(!bch2_btree_node_insert_fits(path->l[0].b, wb->k.k.u64s))) {
180 		*write_locked = false;
181 		return wb_flush_one_slowpath(trans, iter, wb);
182 	}
183 
184 	bch2_btree_insert_key_leaf(trans, path, &wb->k, wb->journal_seq);
185 	(*fast)++;
186 	return 0;
187 }
188 
189 /*
190  * Update a btree with a write buffered key using the journal seq of the
191  * original write buffer insert.
192  *
193  * It is not safe to rejournal the key once it has been inserted into the write
194  * buffer because that may break recovery ordering. For example, the key may
195  * have already been modified in the active write buffer in a seq that comes
196  * before the current transaction. If we were to journal this key again and
197  * crash, recovery would process updates in the wrong order.
198  */
199 static int
200 btree_write_buffered_insert(struct btree_trans *trans,
201 			  struct btree_write_buffered_key *wb)
202 {
203 	struct btree_iter iter;
204 	int ret;
205 
206 	bch2_trans_iter_init(trans, &iter, wb->btree, bkey_start_pos(&wb->k.k),
207 			     BTREE_ITER_cached|BTREE_ITER_intent);
208 
209 	trans->journal_res.seq = wb->journal_seq;
210 
211 	ret   = bch2_btree_iter_traverse(&iter) ?:
212 		bch2_trans_update(trans, &iter, &wb->k,
213 				  BTREE_UPDATE_internal_snapshot_node);
214 	bch2_trans_iter_exit(trans, &iter);
215 	return ret;
216 }
217 
218 static void move_keys_from_inc_to_flushing(struct btree_write_buffer *wb)
219 {
220 	struct bch_fs *c = container_of(wb, struct bch_fs, btree_write_buffer);
221 	struct journal *j = &c->journal;
222 
223 	if (!wb->inc.keys.nr)
224 		return;
225 
226 	bch2_journal_pin_add(j, wb->inc.keys.data[0].journal_seq, &wb->flushing.pin,
227 			     bch2_btree_write_buffer_journal_flush);
228 
229 	darray_resize(&wb->flushing.keys, min_t(size_t, 1U << 20, wb->flushing.keys.nr + wb->inc.keys.nr));
230 	darray_resize(&wb->sorted, wb->flushing.keys.size);
231 
232 	if (!wb->flushing.keys.nr && wb->sorted.size >= wb->inc.keys.nr) {
233 		swap(wb->flushing.keys, wb->inc.keys);
234 		goto out;
235 	}
236 
237 	size_t nr = min(darray_room(wb->flushing.keys),
238 			wb->sorted.size - wb->flushing.keys.nr);
239 	nr = min(nr, wb->inc.keys.nr);
240 
241 	memcpy(&darray_top(wb->flushing.keys),
242 	       wb->inc.keys.data,
243 	       sizeof(wb->inc.keys.data[0]) * nr);
244 
245 	memmove(wb->inc.keys.data,
246 		wb->inc.keys.data + nr,
247 	       sizeof(wb->inc.keys.data[0]) * (wb->inc.keys.nr - nr));
248 
249 	wb->flushing.keys.nr	+= nr;
250 	wb->inc.keys.nr		-= nr;
251 out:
252 	if (!wb->inc.keys.nr)
253 		bch2_journal_pin_drop(j, &wb->inc.pin);
254 	else
255 		bch2_journal_pin_update(j, wb->inc.keys.data[0].journal_seq, &wb->inc.pin,
256 					bch2_btree_write_buffer_journal_flush);
257 
258 	if (j->watermark) {
259 		spin_lock(&j->lock);
260 		bch2_journal_set_watermark(j);
261 		spin_unlock(&j->lock);
262 	}
263 
264 	BUG_ON(wb->sorted.size < wb->flushing.keys.nr);
265 }
266 
267 static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
268 {
269 	struct bch_fs *c = trans->c;
270 	struct journal *j = &c->journal;
271 	struct btree_write_buffer *wb = &c->btree_write_buffer;
272 	struct btree_iter iter = { NULL };
273 	size_t overwritten = 0, fast = 0, slowpath = 0, could_not_insert = 0;
274 	bool write_locked = false;
275 	bool accounting_replay_done = test_bit(BCH_FS_accounting_replay_done, &c->flags);
276 	int ret = 0;
277 
278 	ret = bch2_journal_error(&c->journal);
279 	if (ret)
280 		return ret;
281 
282 	bch2_trans_unlock(trans);
283 	bch2_trans_begin(trans);
284 
285 	mutex_lock(&wb->inc.lock);
286 	move_keys_from_inc_to_flushing(wb);
287 	mutex_unlock(&wb->inc.lock);
288 
289 	for (size_t i = 0; i < wb->flushing.keys.nr; i++) {
290 		wb->sorted.data[i].idx = i;
291 		wb->sorted.data[i].btree = wb->flushing.keys.data[i].btree;
292 		memcpy(&wb->sorted.data[i].pos, &wb->flushing.keys.data[i].k.k.p, sizeof(struct bpos));
293 	}
294 	wb->sorted.nr = wb->flushing.keys.nr;
295 
296 	/*
297 	 * We first sort so that we can detect and skip redundant updates, and
298 	 * then we attempt to flush in sorted btree order, as this is most
299 	 * efficient.
300 	 *
301 	 * However, since we're not flushing in the order they appear in the
302 	 * journal we won't be able to drop our journal pin until everything is
303 	 * flushed - which means this could deadlock the journal if we weren't
304 	 * passing BCH_TRANS_COMMIT_journal_reclaim. This causes the update to fail
305 	 * if it would block taking a journal reservation.
306 	 *
307 	 * If that happens, simply skip the key so we can optimistically insert
308 	 * as many keys as possible in the fast path.
309 	 */
310 	wb_sort(wb->sorted.data, wb->sorted.nr);
311 
312 	darray_for_each(wb->sorted, i) {
313 		struct btree_write_buffered_key *k = &wb->flushing.keys.data[i->idx];
314 
315 		BUG_ON(!btree_type_uses_write_buffer(k->btree));
316 
317 		for (struct wb_key_ref *n = i + 1; n < min(i + 4, &darray_top(wb->sorted)); n++)
318 			prefetch(&wb->flushing.keys.data[n->idx]);
319 
320 		BUG_ON(!k->journal_seq);
321 
322 		if (!accounting_replay_done &&
323 		    k->k.k.type == KEY_TYPE_accounting) {
324 			slowpath++;
325 			continue;
326 		}
327 
328 		if (i + 1 < &darray_top(wb->sorted) &&
329 		    wb_key_eq(i, i + 1)) {
330 			struct btree_write_buffered_key *n = &wb->flushing.keys.data[i[1].idx];
331 
332 			if (k->k.k.type == KEY_TYPE_accounting &&
333 			    n->k.k.type == KEY_TYPE_accounting)
334 				bch2_accounting_accumulate(bkey_i_to_accounting(&n->k),
335 							   bkey_i_to_s_c_accounting(&k->k));
336 
337 			overwritten++;
338 			n->journal_seq = min_t(u64, n->journal_seq, k->journal_seq);
339 			k->journal_seq = 0;
340 			continue;
341 		}
342 
343 		if (write_locked) {
344 			struct btree_path *path = btree_iter_path(trans, &iter);
345 
346 			if (path->btree_id != i->btree ||
347 			    bpos_gt(k->k.k.p, path->l[0].b->key.k.p)) {
348 				bch2_btree_node_unlock_write(trans, path, path->l[0].b);
349 				write_locked = false;
350 
351 				ret = lockrestart_do(trans,
352 					bch2_btree_iter_traverse(&iter) ?:
353 					bch2_foreground_maybe_merge(trans, iter.path, 0,
354 							BCH_WATERMARK_reclaim|
355 							BCH_TRANS_COMMIT_journal_reclaim|
356 							BCH_TRANS_COMMIT_no_check_rw|
357 							BCH_TRANS_COMMIT_no_enospc));
358 				if (ret)
359 					goto err;
360 			}
361 		}
362 
363 		if (!iter.path || iter.btree_id != k->btree) {
364 			bch2_trans_iter_exit(trans, &iter);
365 			bch2_trans_iter_init(trans, &iter, k->btree, k->k.k.p,
366 					     BTREE_ITER_intent|BTREE_ITER_all_snapshots);
367 		}
368 
369 		bch2_btree_iter_set_pos(&iter, k->k.k.p);
370 		btree_iter_path(trans, &iter)->preserve = false;
371 
372 		bool accounting_accumulated = false;
373 		do {
374 			if (race_fault()) {
375 				ret = -BCH_ERR_journal_reclaim_would_deadlock;
376 				break;
377 			}
378 
379 			ret = wb_flush_one(trans, &iter, k, &write_locked,
380 					   &accounting_accumulated, &fast);
381 			if (!write_locked)
382 				bch2_trans_begin(trans);
383 		} while (bch2_err_matches(ret, BCH_ERR_transaction_restart));
384 
385 		if (!ret) {
386 			k->journal_seq = 0;
387 		} else if (ret == -BCH_ERR_journal_reclaim_would_deadlock) {
388 			slowpath++;
389 			ret = 0;
390 		} else
391 			break;
392 	}
393 
394 	if (write_locked) {
395 		struct btree_path *path = btree_iter_path(trans, &iter);
396 		bch2_btree_node_unlock_write(trans, path, path->l[0].b);
397 	}
398 	bch2_trans_iter_exit(trans, &iter);
399 
400 	if (ret)
401 		goto err;
402 
403 	if (slowpath) {
404 		/*
405 		 * Flush in the order they were present in the journal, so that
406 		 * we can release journal pins:
407 		 * The fastpath zapped the seq of keys that were successfully flushed so
408 		 * we can skip those here.
409 		 */
410 		trace_and_count(c, write_buffer_flush_slowpath, trans, slowpath, wb->flushing.keys.nr);
411 
412 		sort(wb->flushing.keys.data,
413 		     wb->flushing.keys.nr,
414 		     sizeof(wb->flushing.keys.data[0]),
415 		     wb_key_seq_cmp, NULL);
416 
417 		darray_for_each(wb->flushing.keys, i) {
418 			if (!i->journal_seq)
419 				continue;
420 
421 			if (!accounting_replay_done &&
422 			    i->k.k.type == KEY_TYPE_accounting) {
423 				could_not_insert++;
424 				continue;
425 			}
426 
427 			if (!could_not_insert)
428 				bch2_journal_pin_update(j, i->journal_seq, &wb->flushing.pin,
429 							bch2_btree_write_buffer_journal_flush);
430 
431 			bch2_trans_begin(trans);
432 
433 			ret = commit_do(trans, NULL, NULL,
434 					BCH_WATERMARK_reclaim|
435 					BCH_TRANS_COMMIT_journal_reclaim|
436 					BCH_TRANS_COMMIT_no_check_rw|
437 					BCH_TRANS_COMMIT_no_enospc|
438 					BCH_TRANS_COMMIT_no_journal_res ,
439 					btree_write_buffered_insert(trans, i));
440 			if (ret)
441 				goto err;
442 
443 			i->journal_seq = 0;
444 		}
445 
446 		/*
447 		 * If journal replay hasn't finished with accounting keys we
448 		 * can't flush accounting keys at all - condense them and leave
449 		 * them for next time.
450 		 *
451 		 * Q: Can the write buffer overflow?
452 		 * A Shouldn't be any actual risk. It's just new accounting
453 		 * updates that the write buffer can't flush, and those are only
454 		 * going to be generated by interior btree node updates as
455 		 * journal replay has to split/rewrite nodes to make room for
456 		 * its updates.
457 		 *
458 		 * And for those new acounting updates, updates to the same
459 		 * counters get accumulated as they're flushed from the journal
460 		 * to the write buffer - see the patch for eytzingcer tree
461 		 * accumulated. So we could only overflow if the number of
462 		 * distinct counters touched somehow was very large.
463 		 */
464 		if (could_not_insert) {
465 			struct btree_write_buffered_key *dst = wb->flushing.keys.data;
466 
467 			darray_for_each(wb->flushing.keys, i)
468 				if (i->journal_seq)
469 					*dst++ = *i;
470 			wb->flushing.keys.nr = dst - wb->flushing.keys.data;
471 		}
472 	}
473 err:
474 	if (ret || !could_not_insert) {
475 		bch2_journal_pin_drop(j, &wb->flushing.pin);
476 		wb->flushing.keys.nr = 0;
477 	}
478 
479 	bch2_fs_fatal_err_on(ret, c, "%s", bch2_err_str(ret));
480 	trace_write_buffer_flush(trans, wb->flushing.keys.nr, overwritten, fast, 0);
481 	return ret;
482 }
483 
484 static int bch2_journal_keys_to_write_buffer(struct bch_fs *c, struct journal_buf *buf)
485 {
486 	struct journal_keys_to_wb dst;
487 	int ret = 0;
488 
489 	bch2_journal_keys_to_write_buffer_start(c, &dst, le64_to_cpu(buf->data->seq));
490 
491 	for_each_jset_entry_type(entry, buf->data, BCH_JSET_ENTRY_write_buffer_keys) {
492 		jset_entry_for_each_key(entry, k) {
493 			ret = bch2_journal_key_to_wb(c, &dst, entry->btree_id, k);
494 			if (ret)
495 				goto out;
496 		}
497 
498 		entry->type = BCH_JSET_ENTRY_btree_keys;
499 	}
500 out:
501 	ret = bch2_journal_keys_to_write_buffer_end(c, &dst) ?: ret;
502 	return ret;
503 }
504 
505 static int fetch_wb_keys_from_journal(struct bch_fs *c, u64 max_seq)
506 {
507 	struct journal *j = &c->journal;
508 	struct journal_buf *buf;
509 	bool blocked;
510 	int ret = 0;
511 
512 	while (!ret && (buf = bch2_next_write_buffer_flush_journal_buf(j, max_seq, &blocked))) {
513 		ret = bch2_journal_keys_to_write_buffer(c, buf);
514 
515 		if (!blocked && !ret) {
516 			spin_lock(&j->lock);
517 			buf->need_flush_to_write_buffer = false;
518 			spin_unlock(&j->lock);
519 		}
520 
521 		mutex_unlock(&j->buf_lock);
522 
523 		if (blocked) {
524 			bch2_journal_unblock(j);
525 			break;
526 		}
527 	}
528 
529 	return ret;
530 }
531 
532 static int btree_write_buffer_flush_seq(struct btree_trans *trans, u64 max_seq,
533 					bool *did_work)
534 {
535 	struct bch_fs *c = trans->c;
536 	struct btree_write_buffer *wb = &c->btree_write_buffer;
537 	int ret = 0, fetch_from_journal_err;
538 
539 	do {
540 		bch2_trans_unlock(trans);
541 
542 		fetch_from_journal_err = fetch_wb_keys_from_journal(c, max_seq);
543 
544 		*did_work |= wb->inc.keys.nr || wb->flushing.keys.nr;
545 
546 		/*
547 		 * On memory allocation failure, bch2_btree_write_buffer_flush_locked()
548 		 * is not guaranteed to empty wb->inc:
549 		 */
550 		mutex_lock(&wb->flushing.lock);
551 		ret = bch2_btree_write_buffer_flush_locked(trans);
552 		mutex_unlock(&wb->flushing.lock);
553 	} while (!ret &&
554 		 (fetch_from_journal_err ||
555 		  (wb->inc.pin.seq && wb->inc.pin.seq <= max_seq) ||
556 		  (wb->flushing.pin.seq && wb->flushing.pin.seq <= max_seq)));
557 
558 	return ret;
559 }
560 
561 static int bch2_btree_write_buffer_journal_flush(struct journal *j,
562 				struct journal_entry_pin *_pin, u64 seq)
563 {
564 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
565 	bool did_work = false;
566 
567 	return bch2_trans_run(c, btree_write_buffer_flush_seq(trans, seq, &did_work));
568 }
569 
570 int bch2_btree_write_buffer_flush_sync(struct btree_trans *trans)
571 {
572 	struct bch_fs *c = trans->c;
573 	bool did_work = false;
574 
575 	trace_and_count(c, write_buffer_flush_sync, trans, _RET_IP_);
576 
577 	return btree_write_buffer_flush_seq(trans, journal_cur_seq(&c->journal), &did_work);
578 }
579 
580 /*
581  * The write buffer requires flushing when going RO: keys in the journal for the
582  * write buffer don't have a journal pin yet
583  */
584 bool bch2_btree_write_buffer_flush_going_ro(struct bch_fs *c)
585 {
586 	if (bch2_journal_error(&c->journal))
587 		return false;
588 
589 	bool did_work = false;
590 	bch2_trans_run(c, btree_write_buffer_flush_seq(trans,
591 				journal_cur_seq(&c->journal), &did_work));
592 	return did_work;
593 }
594 
595 int bch2_btree_write_buffer_flush_nocheck_rw(struct btree_trans *trans)
596 {
597 	struct bch_fs *c = trans->c;
598 	struct btree_write_buffer *wb = &c->btree_write_buffer;
599 	int ret = 0;
600 
601 	if (mutex_trylock(&wb->flushing.lock)) {
602 		ret = bch2_btree_write_buffer_flush_locked(trans);
603 		mutex_unlock(&wb->flushing.lock);
604 	}
605 
606 	return ret;
607 }
608 
609 int bch2_btree_write_buffer_tryflush(struct btree_trans *trans)
610 {
611 	struct bch_fs *c = trans->c;
612 
613 	if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_btree_write_buffer))
614 		return -BCH_ERR_erofs_no_writes;
615 
616 	int ret = bch2_btree_write_buffer_flush_nocheck_rw(trans);
617 	bch2_write_ref_put(c, BCH_WRITE_REF_btree_write_buffer);
618 	return ret;
619 }
620 
621 /*
622  * In check and repair code, when checking references to write buffer btrees we
623  * need to issue a flush before we have a definitive error: this issues a flush
624  * if this is a key we haven't yet checked.
625  */
626 int bch2_btree_write_buffer_maybe_flush(struct btree_trans *trans,
627 					struct bkey_s_c referring_k,
628 					struct bkey_buf *last_flushed)
629 {
630 	struct bch_fs *c = trans->c;
631 	struct bkey_buf tmp;
632 	int ret = 0;
633 
634 	bch2_bkey_buf_init(&tmp);
635 
636 	if (!bkey_and_val_eq(referring_k, bkey_i_to_s_c(last_flushed->k))) {
637 		if (trace_write_buffer_maybe_flush_enabled()) {
638 			struct printbuf buf = PRINTBUF;
639 
640 			bch2_bkey_val_to_text(&buf, c, referring_k);
641 			trace_write_buffer_maybe_flush(trans, _RET_IP_, buf.buf);
642 			printbuf_exit(&buf);
643 		}
644 
645 		bch2_bkey_buf_reassemble(&tmp, c, referring_k);
646 
647 		if (bkey_is_btree_ptr(referring_k.k)) {
648 			bch2_trans_unlock(trans);
649 			bch2_btree_interior_updates_flush(c);
650 		}
651 
652 		ret = bch2_btree_write_buffer_flush_sync(trans);
653 		if (ret)
654 			goto err;
655 
656 		bch2_bkey_buf_copy(last_flushed, c, tmp.k);
657 		ret = -BCH_ERR_transaction_restart_write_buffer_flush;
658 	}
659 err:
660 	bch2_bkey_buf_exit(&tmp, c);
661 	return ret;
662 }
663 
664 static void bch2_btree_write_buffer_flush_work(struct work_struct *work)
665 {
666 	struct bch_fs *c = container_of(work, struct bch_fs, btree_write_buffer.flush_work);
667 	struct btree_write_buffer *wb = &c->btree_write_buffer;
668 	int ret;
669 
670 	mutex_lock(&wb->flushing.lock);
671 	do {
672 		ret = bch2_trans_run(c, bch2_btree_write_buffer_flush_locked(trans));
673 	} while (!ret && bch2_btree_write_buffer_should_flush(c));
674 	mutex_unlock(&wb->flushing.lock);
675 
676 	bch2_write_ref_put(c, BCH_WRITE_REF_btree_write_buffer);
677 }
678 
679 static void wb_accounting_sort(struct btree_write_buffer *wb)
680 {
681 	eytzinger0_sort(wb->accounting.data, wb->accounting.nr,
682 			sizeof(wb->accounting.data[0]),
683 			wb_key_cmp, NULL);
684 }
685 
686 int bch2_accounting_key_to_wb_slowpath(struct bch_fs *c, enum btree_id btree,
687 				       struct bkey_i_accounting *k)
688 {
689 	struct btree_write_buffer *wb = &c->btree_write_buffer;
690 	struct btree_write_buffered_key new = { .btree = btree };
691 
692 	bkey_copy(&new.k, &k->k_i);
693 
694 	int ret = darray_push(&wb->accounting, new);
695 	if (ret)
696 		return ret;
697 
698 	wb_accounting_sort(wb);
699 	return 0;
700 }
701 
702 int bch2_journal_key_to_wb_slowpath(struct bch_fs *c,
703 			     struct journal_keys_to_wb *dst,
704 			     enum btree_id btree, struct bkey_i *k)
705 {
706 	struct btree_write_buffer *wb = &c->btree_write_buffer;
707 	int ret;
708 retry:
709 	ret = darray_make_room_gfp(&dst->wb->keys, 1, GFP_KERNEL);
710 	if (!ret && dst->wb == &wb->flushing)
711 		ret = darray_resize(&wb->sorted, wb->flushing.keys.size);
712 
713 	if (unlikely(ret)) {
714 		if (dst->wb == &c->btree_write_buffer.flushing) {
715 			mutex_unlock(&dst->wb->lock);
716 			dst->wb = &c->btree_write_buffer.inc;
717 			bch2_journal_pin_add(&c->journal, dst->seq, &dst->wb->pin,
718 					     bch2_btree_write_buffer_journal_flush);
719 			goto retry;
720 		}
721 
722 		return ret;
723 	}
724 
725 	dst->room = darray_room(dst->wb->keys);
726 	if (dst->wb == &wb->flushing)
727 		dst->room = min(dst->room, wb->sorted.size - wb->flushing.keys.nr);
728 	BUG_ON(!dst->room);
729 	BUG_ON(!dst->seq);
730 
731 	struct btree_write_buffered_key *wb_k = &darray_top(dst->wb->keys);
732 	wb_k->journal_seq	= dst->seq;
733 	wb_k->btree		= btree;
734 	bkey_copy(&wb_k->k, k);
735 	dst->wb->keys.nr++;
736 	dst->room--;
737 	return 0;
738 }
739 
740 void bch2_journal_keys_to_write_buffer_start(struct bch_fs *c, struct journal_keys_to_wb *dst, u64 seq)
741 {
742 	struct btree_write_buffer *wb = &c->btree_write_buffer;
743 
744 	if (mutex_trylock(&wb->flushing.lock)) {
745 		mutex_lock(&wb->inc.lock);
746 		move_keys_from_inc_to_flushing(wb);
747 
748 		/*
749 		 * Attempt to skip wb->inc, and add keys directly to
750 		 * wb->flushing, saving us a copy later:
751 		 */
752 
753 		if (!wb->inc.keys.nr) {
754 			dst->wb = &wb->flushing;
755 		} else {
756 			mutex_unlock(&wb->flushing.lock);
757 			dst->wb = &wb->inc;
758 		}
759 	} else {
760 		mutex_lock(&wb->inc.lock);
761 		dst->wb = &wb->inc;
762 	}
763 
764 	dst->room = darray_room(dst->wb->keys);
765 	if (dst->wb == &wb->flushing)
766 		dst->room = min(dst->room, wb->sorted.size - wb->flushing.keys.nr);
767 	dst->seq = seq;
768 
769 	bch2_journal_pin_add(&c->journal, seq, &dst->wb->pin,
770 			     bch2_btree_write_buffer_journal_flush);
771 
772 	darray_for_each(wb->accounting, i)
773 		memset(&i->k.v, 0, bkey_val_bytes(&i->k.k));
774 }
775 
776 int bch2_journal_keys_to_write_buffer_end(struct bch_fs *c, struct journal_keys_to_wb *dst)
777 {
778 	struct btree_write_buffer *wb = &c->btree_write_buffer;
779 	unsigned live_accounting_keys = 0;
780 	int ret = 0;
781 
782 	darray_for_each(wb->accounting, i)
783 		if (!bch2_accounting_key_is_zero(bkey_i_to_s_c_accounting(&i->k))) {
784 			i->journal_seq = dst->seq;
785 			live_accounting_keys++;
786 			ret = __bch2_journal_key_to_wb(c, dst, i->btree, &i->k);
787 			if (ret)
788 				break;
789 		}
790 
791 	if (live_accounting_keys * 2 < wb->accounting.nr) {
792 		struct btree_write_buffered_key *dst = wb->accounting.data;
793 
794 		darray_for_each(wb->accounting, src)
795 			if (!bch2_accounting_key_is_zero(bkey_i_to_s_c_accounting(&src->k)))
796 				*dst++ = *src;
797 		wb->accounting.nr = dst - wb->accounting.data;
798 		wb_accounting_sort(wb);
799 	}
800 
801 	if (!dst->wb->keys.nr)
802 		bch2_journal_pin_drop(&c->journal, &dst->wb->pin);
803 
804 	if (bch2_btree_write_buffer_should_flush(c) &&
805 	    __bch2_write_ref_tryget(c, BCH_WRITE_REF_btree_write_buffer) &&
806 	    !queue_work(system_unbound_wq, &c->btree_write_buffer.flush_work))
807 		bch2_write_ref_put(c, BCH_WRITE_REF_btree_write_buffer);
808 
809 	if (dst->wb == &wb->flushing)
810 		mutex_unlock(&wb->flushing.lock);
811 	mutex_unlock(&wb->inc.lock);
812 
813 	return ret;
814 }
815 
816 static int wb_keys_resize(struct btree_write_buffer_keys *wb, size_t new_size)
817 {
818 	if (wb->keys.size >= new_size)
819 		return 0;
820 
821 	if (!mutex_trylock(&wb->lock))
822 		return -EINTR;
823 
824 	int ret = darray_resize(&wb->keys, new_size);
825 	mutex_unlock(&wb->lock);
826 	return ret;
827 }
828 
829 int bch2_btree_write_buffer_resize(struct bch_fs *c, size_t new_size)
830 {
831 	struct btree_write_buffer *wb = &c->btree_write_buffer;
832 
833 	return wb_keys_resize(&wb->flushing, new_size) ?:
834 		wb_keys_resize(&wb->inc, new_size);
835 }
836 
837 void bch2_fs_btree_write_buffer_exit(struct bch_fs *c)
838 {
839 	struct btree_write_buffer *wb = &c->btree_write_buffer;
840 
841 	BUG_ON((wb->inc.keys.nr || wb->flushing.keys.nr) &&
842 	       !bch2_journal_error(&c->journal));
843 
844 	darray_exit(&wb->accounting);
845 	darray_exit(&wb->sorted);
846 	darray_exit(&wb->flushing.keys);
847 	darray_exit(&wb->inc.keys);
848 }
849 
850 int bch2_fs_btree_write_buffer_init(struct bch_fs *c)
851 {
852 	struct btree_write_buffer *wb = &c->btree_write_buffer;
853 
854 	mutex_init(&wb->inc.lock);
855 	mutex_init(&wb->flushing.lock);
856 	INIT_WORK(&wb->flush_work, bch2_btree_write_buffer_flush_work);
857 
858 	/* Will be resized by journal as needed: */
859 	unsigned initial_size = 1 << 16;
860 
861 	return  darray_make_room(&wb->inc.keys, initial_size) ?:
862 		darray_make_room(&wb->flushing.keys, initial_size) ?:
863 		darray_make_room(&wb->sorted, initial_size);
864 }
865