xref: /linux/fs/bcachefs/btree_io.c (revision 59fff63cc2b75dcfe08f9eeb4b2187d73e53843d)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "bcachefs.h"
4 #include "bkey_methods.h"
5 #include "bkey_sort.h"
6 #include "btree_cache.h"
7 #include "btree_io.h"
8 #include "btree_iter.h"
9 #include "btree_locking.h"
10 #include "btree_update.h"
11 #include "btree_update_interior.h"
12 #include "buckets.h"
13 #include "checksum.h"
14 #include "debug.h"
15 #include "error.h"
16 #include "extents.h"
17 #include "io_write.h"
18 #include "journal_reclaim.h"
19 #include "journal_seq_blacklist.h"
20 #include "recovery.h"
21 #include "super-io.h"
22 #include "trace.h"
23 
24 #include <linux/sched/mm.h>
25 
26 void bch2_btree_node_io_unlock(struct btree *b)
27 {
28 	EBUG_ON(!btree_node_write_in_flight(b));
29 
30 	clear_btree_node_write_in_flight_inner(b);
31 	clear_btree_node_write_in_flight(b);
32 	wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
33 }
34 
35 void bch2_btree_node_io_lock(struct btree *b)
36 {
37 	bch2_assert_btree_nodes_not_locked();
38 
39 	wait_on_bit_lock_io(&b->flags, BTREE_NODE_write_in_flight,
40 			    TASK_UNINTERRUPTIBLE);
41 }
42 
43 void __bch2_btree_node_wait_on_read(struct btree *b)
44 {
45 	wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
46 		       TASK_UNINTERRUPTIBLE);
47 }
48 
49 void __bch2_btree_node_wait_on_write(struct btree *b)
50 {
51 	wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
52 		       TASK_UNINTERRUPTIBLE);
53 }
54 
55 void bch2_btree_node_wait_on_read(struct btree *b)
56 {
57 	bch2_assert_btree_nodes_not_locked();
58 
59 	wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
60 		       TASK_UNINTERRUPTIBLE);
61 }
62 
63 void bch2_btree_node_wait_on_write(struct btree *b)
64 {
65 	bch2_assert_btree_nodes_not_locked();
66 
67 	wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
68 		       TASK_UNINTERRUPTIBLE);
69 }
70 
71 static void verify_no_dups(struct btree *b,
72 			   struct bkey_packed *start,
73 			   struct bkey_packed *end)
74 {
75 #ifdef CONFIG_BCACHEFS_DEBUG
76 	struct bkey_packed *k, *p;
77 
78 	if (start == end)
79 		return;
80 
81 	for (p = start, k = bkey_p_next(start);
82 	     k != end;
83 	     p = k, k = bkey_p_next(k)) {
84 		struct bkey l = bkey_unpack_key(b, p);
85 		struct bkey r = bkey_unpack_key(b, k);
86 
87 		BUG_ON(bpos_ge(l.p, bkey_start_pos(&r)));
88 	}
89 #endif
90 }
91 
92 static void set_needs_whiteout(struct bset *i, int v)
93 {
94 	struct bkey_packed *k;
95 
96 	for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
97 		k->needs_whiteout = v;
98 }
99 
100 static void btree_bounce_free(struct bch_fs *c, size_t size,
101 			      bool used_mempool, void *p)
102 {
103 	if (used_mempool)
104 		mempool_free(p, &c->btree_bounce_pool);
105 	else
106 		vpfree(p, size);
107 }
108 
109 static void *btree_bounce_alloc(struct bch_fs *c, size_t size,
110 				bool *used_mempool)
111 {
112 	unsigned flags = memalloc_nofs_save();
113 	void *p;
114 
115 	BUG_ON(size > btree_bytes(c));
116 
117 	*used_mempool = false;
118 	p = vpmalloc(size, __GFP_NOWARN|GFP_NOWAIT);
119 	if (!p) {
120 		*used_mempool = true;
121 		p = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
122 	}
123 	memalloc_nofs_restore(flags);
124 	return p;
125 }
126 
127 static void sort_bkey_ptrs(const struct btree *bt,
128 			   struct bkey_packed **ptrs, unsigned nr)
129 {
130 	unsigned n = nr, a = nr / 2, b, c, d;
131 
132 	if (!a)
133 		return;
134 
135 	/* Heap sort: see lib/sort.c: */
136 	while (1) {
137 		if (a)
138 			a--;
139 		else if (--n)
140 			swap(ptrs[0], ptrs[n]);
141 		else
142 			break;
143 
144 		for (b = a; c = 2 * b + 1, (d = c + 1) < n;)
145 			b = bch2_bkey_cmp_packed(bt,
146 					    ptrs[c],
147 					    ptrs[d]) >= 0 ? c : d;
148 		if (d == n)
149 			b = c;
150 
151 		while (b != a &&
152 		       bch2_bkey_cmp_packed(bt,
153 				       ptrs[a],
154 				       ptrs[b]) >= 0)
155 			b = (b - 1) / 2;
156 		c = b;
157 		while (b != a) {
158 			b = (b - 1) / 2;
159 			swap(ptrs[b], ptrs[c]);
160 		}
161 	}
162 }
163 
164 static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b)
165 {
166 	struct bkey_packed *new_whiteouts, **ptrs, **ptrs_end, *k;
167 	bool used_mempool = false;
168 	size_t bytes = b->whiteout_u64s * sizeof(u64);
169 
170 	if (!b->whiteout_u64s)
171 		return;
172 
173 	new_whiteouts = btree_bounce_alloc(c, bytes, &used_mempool);
174 
175 	ptrs = ptrs_end = ((void *) new_whiteouts + bytes);
176 
177 	for (k = unwritten_whiteouts_start(c, b);
178 	     k != unwritten_whiteouts_end(c, b);
179 	     k = bkey_p_next(k))
180 		*--ptrs = k;
181 
182 	sort_bkey_ptrs(b, ptrs, ptrs_end - ptrs);
183 
184 	k = new_whiteouts;
185 
186 	while (ptrs != ptrs_end) {
187 		bkey_copy(k, *ptrs);
188 		k = bkey_p_next(k);
189 		ptrs++;
190 	}
191 
192 	verify_no_dups(b, new_whiteouts,
193 		       (void *) ((u64 *) new_whiteouts + b->whiteout_u64s));
194 
195 	memcpy_u64s(unwritten_whiteouts_start(c, b),
196 		    new_whiteouts, b->whiteout_u64s);
197 
198 	btree_bounce_free(c, bytes, used_mempool, new_whiteouts);
199 }
200 
201 static bool should_compact_bset(struct btree *b, struct bset_tree *t,
202 				bool compacting, enum compact_mode mode)
203 {
204 	if (!bset_dead_u64s(b, t))
205 		return false;
206 
207 	switch (mode) {
208 	case COMPACT_LAZY:
209 		return should_compact_bset_lazy(b, t) ||
210 			(compacting && !bset_written(b, bset(b, t)));
211 	case COMPACT_ALL:
212 		return true;
213 	default:
214 		BUG();
215 	}
216 }
217 
218 static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode)
219 {
220 	struct bset_tree *t;
221 	bool ret = false;
222 
223 	for_each_bset(b, t) {
224 		struct bset *i = bset(b, t);
225 		struct bkey_packed *k, *n, *out, *start, *end;
226 		struct btree_node_entry *src = NULL, *dst = NULL;
227 
228 		if (t != b->set && !bset_written(b, i)) {
229 			src = container_of(i, struct btree_node_entry, keys);
230 			dst = max(write_block(b),
231 				  (void *) btree_bkey_last(b, t - 1));
232 		}
233 
234 		if (src != dst)
235 			ret = true;
236 
237 		if (!should_compact_bset(b, t, ret, mode)) {
238 			if (src != dst) {
239 				memmove(dst, src, sizeof(*src) +
240 					le16_to_cpu(src->keys.u64s) *
241 					sizeof(u64));
242 				i = &dst->keys;
243 				set_btree_bset(b, t, i);
244 			}
245 			continue;
246 		}
247 
248 		start	= btree_bkey_first(b, t);
249 		end	= btree_bkey_last(b, t);
250 
251 		if (src != dst) {
252 			memmove(dst, src, sizeof(*src));
253 			i = &dst->keys;
254 			set_btree_bset(b, t, i);
255 		}
256 
257 		out = i->start;
258 
259 		for (k = start; k != end; k = n) {
260 			n = bkey_p_next(k);
261 
262 			if (!bkey_deleted(k)) {
263 				bkey_copy(out, k);
264 				out = bkey_p_next(out);
265 			} else {
266 				BUG_ON(k->needs_whiteout);
267 			}
268 		}
269 
270 		i->u64s = cpu_to_le16((u64 *) out - i->_data);
271 		set_btree_bset_end(b, t);
272 		bch2_bset_set_no_aux_tree(b, t);
273 		ret = true;
274 	}
275 
276 	bch2_verify_btree_nr_keys(b);
277 
278 	bch2_btree_build_aux_trees(b);
279 
280 	return ret;
281 }
282 
283 bool bch2_compact_whiteouts(struct bch_fs *c, struct btree *b,
284 			    enum compact_mode mode)
285 {
286 	return bch2_drop_whiteouts(b, mode);
287 }
288 
289 static void btree_node_sort(struct bch_fs *c, struct btree *b,
290 			    unsigned start_idx,
291 			    unsigned end_idx,
292 			    bool filter_whiteouts)
293 {
294 	struct btree_node *out;
295 	struct sort_iter_stack sort_iter;
296 	struct bset_tree *t;
297 	struct bset *start_bset = bset(b, &b->set[start_idx]);
298 	bool used_mempool = false;
299 	u64 start_time, seq = 0;
300 	unsigned i, u64s = 0, bytes, shift = end_idx - start_idx - 1;
301 	bool sorting_entire_node = start_idx == 0 &&
302 		end_idx == b->nsets;
303 
304 	sort_iter_stack_init(&sort_iter, b);
305 
306 	for (t = b->set + start_idx;
307 	     t < b->set + end_idx;
308 	     t++) {
309 		u64s += le16_to_cpu(bset(b, t)->u64s);
310 		sort_iter_add(&sort_iter.iter,
311 			      btree_bkey_first(b, t),
312 			      btree_bkey_last(b, t));
313 	}
314 
315 	bytes = sorting_entire_node
316 		? btree_bytes(c)
317 		: __vstruct_bytes(struct btree_node, u64s);
318 
319 	out = btree_bounce_alloc(c, bytes, &used_mempool);
320 
321 	start_time = local_clock();
322 
323 	u64s = bch2_sort_keys(out->keys.start, &sort_iter.iter, filter_whiteouts);
324 
325 	out->keys.u64s = cpu_to_le16(u64s);
326 
327 	BUG_ON(vstruct_end(&out->keys) > (void *) out + bytes);
328 
329 	if (sorting_entire_node)
330 		bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
331 				       start_time);
332 
333 	/* Make sure we preserve bset journal_seq: */
334 	for (t = b->set + start_idx; t < b->set + end_idx; t++)
335 		seq = max(seq, le64_to_cpu(bset(b, t)->journal_seq));
336 	start_bset->journal_seq = cpu_to_le64(seq);
337 
338 	if (sorting_entire_node) {
339 		u64s = le16_to_cpu(out->keys.u64s);
340 
341 		BUG_ON(bytes != btree_bytes(c));
342 
343 		/*
344 		 * Our temporary buffer is the same size as the btree node's
345 		 * buffer, we can just swap buffers instead of doing a big
346 		 * memcpy()
347 		 */
348 		*out = *b->data;
349 		out->keys.u64s = cpu_to_le16(u64s);
350 		swap(out, b->data);
351 		set_btree_bset(b, b->set, &b->data->keys);
352 	} else {
353 		start_bset->u64s = out->keys.u64s;
354 		memcpy_u64s(start_bset->start,
355 			    out->keys.start,
356 			    le16_to_cpu(out->keys.u64s));
357 	}
358 
359 	for (i = start_idx + 1; i < end_idx; i++)
360 		b->nr.bset_u64s[start_idx] +=
361 			b->nr.bset_u64s[i];
362 
363 	b->nsets -= shift;
364 
365 	for (i = start_idx + 1; i < b->nsets; i++) {
366 		b->nr.bset_u64s[i]	= b->nr.bset_u64s[i + shift];
367 		b->set[i]		= b->set[i + shift];
368 	}
369 
370 	for (i = b->nsets; i < MAX_BSETS; i++)
371 		b->nr.bset_u64s[i] = 0;
372 
373 	set_btree_bset_end(b, &b->set[start_idx]);
374 	bch2_bset_set_no_aux_tree(b, &b->set[start_idx]);
375 
376 	btree_bounce_free(c, bytes, used_mempool, out);
377 
378 	bch2_verify_btree_nr_keys(b);
379 }
380 
381 void bch2_btree_sort_into(struct bch_fs *c,
382 			 struct btree *dst,
383 			 struct btree *src)
384 {
385 	struct btree_nr_keys nr;
386 	struct btree_node_iter src_iter;
387 	u64 start_time = local_clock();
388 
389 	BUG_ON(dst->nsets != 1);
390 
391 	bch2_bset_set_no_aux_tree(dst, dst->set);
392 
393 	bch2_btree_node_iter_init_from_start(&src_iter, src);
394 
395 	nr = bch2_sort_repack(btree_bset_first(dst),
396 			src, &src_iter,
397 			&dst->format,
398 			true);
399 
400 	bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
401 			       start_time);
402 
403 	set_btree_bset_end(dst, dst->set);
404 
405 	dst->nr.live_u64s	+= nr.live_u64s;
406 	dst->nr.bset_u64s[0]	+= nr.bset_u64s[0];
407 	dst->nr.packed_keys	+= nr.packed_keys;
408 	dst->nr.unpacked_keys	+= nr.unpacked_keys;
409 
410 	bch2_verify_btree_nr_keys(dst);
411 }
412 
413 /*
414  * We're about to add another bset to the btree node, so if there's currently
415  * too many bsets - sort some of them together:
416  */
417 static bool btree_node_compact(struct bch_fs *c, struct btree *b)
418 {
419 	unsigned unwritten_idx;
420 	bool ret = false;
421 
422 	for (unwritten_idx = 0;
423 	     unwritten_idx < b->nsets;
424 	     unwritten_idx++)
425 		if (!bset_written(b, bset(b, &b->set[unwritten_idx])))
426 			break;
427 
428 	if (b->nsets - unwritten_idx > 1) {
429 		btree_node_sort(c, b, unwritten_idx,
430 				b->nsets, false);
431 		ret = true;
432 	}
433 
434 	if (unwritten_idx > 1) {
435 		btree_node_sort(c, b, 0, unwritten_idx, false);
436 		ret = true;
437 	}
438 
439 	return ret;
440 }
441 
442 void bch2_btree_build_aux_trees(struct btree *b)
443 {
444 	struct bset_tree *t;
445 
446 	for_each_bset(b, t)
447 		bch2_bset_build_aux_tree(b, t,
448 				!bset_written(b, bset(b, t)) &&
449 				t == bset_tree_last(b));
450 }
451 
452 /*
453  * If we have MAX_BSETS (3) bsets, should we sort them all down to just one?
454  *
455  * The first bset is going to be of similar order to the size of the node, the
456  * last bset is bounded by btree_write_set_buffer(), which is set to keep the
457  * memmove on insert from being too expensive: the middle bset should, ideally,
458  * be the geometric mean of the first and the last.
459  *
460  * Returns true if the middle bset is greater than that geometric mean:
461  */
462 static inline bool should_compact_all(struct bch_fs *c, struct btree *b)
463 {
464 	unsigned mid_u64s_bits =
465 		(ilog2(btree_max_u64s(c)) + BTREE_WRITE_SET_U64s_BITS) / 2;
466 
467 	return bset_u64s(&b->set[1]) > 1U << mid_u64s_bits;
468 }
469 
470 /*
471  * @bch_btree_init_next - initialize a new (unwritten) bset that can then be
472  * inserted into
473  *
474  * Safe to call if there already is an unwritten bset - will only add a new bset
475  * if @b doesn't already have one.
476  *
477  * Returns true if we sorted (i.e. invalidated iterators
478  */
479 void bch2_btree_init_next(struct btree_trans *trans, struct btree *b)
480 {
481 	struct bch_fs *c = trans->c;
482 	struct btree_node_entry *bne;
483 	bool reinit_iter = false;
484 
485 	EBUG_ON(!six_lock_counts(&b->c.lock).n[SIX_LOCK_write]);
486 	BUG_ON(bset_written(b, bset(b, &b->set[1])));
487 	BUG_ON(btree_node_just_written(b));
488 
489 	if (b->nsets == MAX_BSETS &&
490 	    !btree_node_write_in_flight(b) &&
491 	    should_compact_all(c, b)) {
492 		bch2_btree_node_write(c, b, SIX_LOCK_write,
493 				      BTREE_WRITE_init_next_bset);
494 		reinit_iter = true;
495 	}
496 
497 	if (b->nsets == MAX_BSETS &&
498 	    btree_node_compact(c, b))
499 		reinit_iter = true;
500 
501 	BUG_ON(b->nsets >= MAX_BSETS);
502 
503 	bne = want_new_bset(c, b);
504 	if (bne)
505 		bch2_bset_init_next(c, b, bne);
506 
507 	bch2_btree_build_aux_trees(b);
508 
509 	if (reinit_iter)
510 		bch2_trans_node_reinit_iter(trans, b);
511 }
512 
513 static void btree_pos_to_text(struct printbuf *out, struct bch_fs *c,
514 			  struct btree *b)
515 {
516 	prt_printf(out, "%s level %u/%u\n  ",
517 	       bch2_btree_ids[b->c.btree_id],
518 	       b->c.level,
519 	       bch2_btree_id_root(c, b->c.btree_id)->level);
520 	bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&b->key));
521 }
522 
523 static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
524 			  struct bch_dev *ca,
525 			  struct btree *b, struct bset *i,
526 			  unsigned offset, int write)
527 {
528 	prt_printf(out, bch2_log_msg(c, "%s"),
529 		   write == READ
530 		   ? "error validating btree node "
531 		   : "corrupt btree node before write ");
532 	if (ca)
533 		prt_printf(out, "on %s ", ca->name);
534 	prt_printf(out, "at btree ");
535 	btree_pos_to_text(out, c, b);
536 
537 	prt_printf(out, "\n  node offset %u", b->written);
538 	if (i)
539 		prt_printf(out, " bset u64s %u", le16_to_cpu(i->u64s));
540 	prt_str(out, ": ");
541 }
542 
543 __printf(8, 9)
544 static int __btree_err(int ret,
545 		       struct bch_fs *c,
546 		       struct bch_dev *ca,
547 		       struct btree *b,
548 		       struct bset *i,
549 		       int write,
550 		       bool have_retry,
551 		       const char *fmt, ...)
552 {
553 	struct printbuf out = PRINTBUF;
554 	va_list args;
555 
556 	btree_err_msg(&out, c, ca, b, i, b->written, write);
557 
558 	va_start(args, fmt);
559 	prt_vprintf(&out, fmt, args);
560 	va_end(args);
561 
562 	if (write == WRITE) {
563 		bch2_print_string_as_lines(KERN_ERR, out.buf);
564 		ret = c->opts.errors == BCH_ON_ERROR_continue
565 			? 0
566 			: -BCH_ERR_fsck_errors_not_fixed;
567 		goto out;
568 	}
569 
570 	if (!have_retry && ret == -BCH_ERR_btree_node_read_err_want_retry)
571 		ret = -BCH_ERR_btree_node_read_err_fixable;
572 	if (!have_retry && ret == -BCH_ERR_btree_node_read_err_must_retry)
573 		ret = -BCH_ERR_btree_node_read_err_bad_node;
574 
575 	switch (ret) {
576 	case -BCH_ERR_btree_node_read_err_fixable:
577 		mustfix_fsck_err(c, "%s", out.buf);
578 		ret = -BCH_ERR_fsck_fix;
579 		break;
580 	case -BCH_ERR_btree_node_read_err_want_retry:
581 	case -BCH_ERR_btree_node_read_err_must_retry:
582 		bch2_print_string_as_lines(KERN_ERR, out.buf);
583 		break;
584 	case -BCH_ERR_btree_node_read_err_bad_node:
585 		bch2_print_string_as_lines(KERN_ERR, out.buf);
586 		bch2_topology_error(c);
587 		ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology) ?: -EIO;
588 		break;
589 	case -BCH_ERR_btree_node_read_err_incompatible:
590 		bch2_print_string_as_lines(KERN_ERR, out.buf);
591 		ret = -BCH_ERR_fsck_errors_not_fixed;
592 		break;
593 	default:
594 		BUG();
595 	}
596 out:
597 fsck_err:
598 	printbuf_exit(&out);
599 	return ret;
600 }
601 
602 #define btree_err(type, c, ca, b, i, msg, ...)				\
603 ({									\
604 	int _ret = __btree_err(type, c, ca, b, i, write, have_retry, msg, ##__VA_ARGS__);\
605 									\
606 	if (_ret != -BCH_ERR_fsck_fix) {				\
607 		ret = _ret;						\
608 		goto fsck_err;						\
609 	}								\
610 									\
611 	*saw_error = true;						\
612 })
613 
614 #define btree_err_on(cond, ...)	((cond) ? btree_err(__VA_ARGS__) : false)
615 
616 /*
617  * When btree topology repair changes the start or end of a node, that might
618  * mean we have to drop keys that are no longer inside the node:
619  */
620 __cold
621 void bch2_btree_node_drop_keys_outside_node(struct btree *b)
622 {
623 	struct bset_tree *t;
624 
625 	for_each_bset(b, t) {
626 		struct bset *i = bset(b, t);
627 		struct bkey_packed *k;
628 
629 		for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
630 			if (bkey_cmp_left_packed(b, k, &b->data->min_key) >= 0)
631 				break;
632 
633 		if (k != i->start) {
634 			unsigned shift = (u64 *) k - (u64 *) i->start;
635 
636 			memmove_u64s_down(i->start, k,
637 					  (u64 *) vstruct_end(i) - (u64 *) k);
638 			i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - shift);
639 			set_btree_bset_end(b, t);
640 		}
641 
642 		for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
643 			if (bkey_cmp_left_packed(b, k, &b->data->max_key) > 0)
644 				break;
645 
646 		if (k != vstruct_last(i)) {
647 			i->u64s = cpu_to_le16((u64 *) k - (u64 *) i->start);
648 			set_btree_bset_end(b, t);
649 		}
650 	}
651 
652 	/*
653 	 * Always rebuild search trees: eytzinger search tree nodes directly
654 	 * depend on the values of min/max key:
655 	 */
656 	bch2_bset_set_no_aux_tree(b, b->set);
657 	bch2_btree_build_aux_trees(b);
658 
659 	struct bkey_s_c k;
660 	struct bkey unpacked;
661 	struct btree_node_iter iter;
662 	for_each_btree_node_key_unpack(b, k, &iter, &unpacked) {
663 		BUG_ON(bpos_lt(k.k->p, b->data->min_key));
664 		BUG_ON(bpos_gt(k.k->p, b->data->max_key));
665 	}
666 }
667 
668 static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
669 			 struct btree *b, struct bset *i,
670 			 unsigned offset, unsigned sectors,
671 			 int write, bool have_retry, bool *saw_error)
672 {
673 	unsigned version = le16_to_cpu(i->version);
674 	struct printbuf buf1 = PRINTBUF;
675 	struct printbuf buf2 = PRINTBUF;
676 	int ret = 0;
677 
678 	btree_err_on(!bch2_version_compatible(version),
679 		     -BCH_ERR_btree_node_read_err_incompatible, c, ca, b, i,
680 		     "unsupported bset version %u.%u",
681 		     BCH_VERSION_MAJOR(version),
682 		     BCH_VERSION_MINOR(version));
683 
684 	if (btree_err_on(version < c->sb.version_min,
685 			 -BCH_ERR_btree_node_read_err_fixable, c, NULL, b, i,
686 			 "bset version %u older than superblock version_min %u",
687 			 version, c->sb.version_min)) {
688 		mutex_lock(&c->sb_lock);
689 		c->disk_sb.sb->version_min = cpu_to_le16(version);
690 		bch2_write_super(c);
691 		mutex_unlock(&c->sb_lock);
692 	}
693 
694 	if (btree_err_on(BCH_VERSION_MAJOR(version) >
695 			 BCH_VERSION_MAJOR(c->sb.version),
696 			 -BCH_ERR_btree_node_read_err_fixable, c, NULL, b, i,
697 			 "bset version %u newer than superblock version %u",
698 			 version, c->sb.version)) {
699 		mutex_lock(&c->sb_lock);
700 		c->disk_sb.sb->version = cpu_to_le16(version);
701 		bch2_write_super(c);
702 		mutex_unlock(&c->sb_lock);
703 	}
704 
705 	btree_err_on(BSET_SEPARATE_WHITEOUTS(i),
706 		     -BCH_ERR_btree_node_read_err_incompatible, c, ca, b, i,
707 		     "BSET_SEPARATE_WHITEOUTS no longer supported");
708 
709 	if (btree_err_on(offset + sectors > btree_sectors(c),
710 			 -BCH_ERR_btree_node_read_err_fixable, c, ca, b, i,
711 			 "bset past end of btree node")) {
712 		i->u64s = 0;
713 		ret = 0;
714 		goto out;
715 	}
716 
717 	btree_err_on(offset && !i->u64s,
718 		     -BCH_ERR_btree_node_read_err_fixable, c, ca, b, i,
719 		     "empty bset");
720 
721 	btree_err_on(BSET_OFFSET(i) &&
722 		     BSET_OFFSET(i) != offset,
723 		     -BCH_ERR_btree_node_read_err_want_retry, c, ca, b, i,
724 		     "bset at wrong sector offset");
725 
726 	if (!offset) {
727 		struct btree_node *bn =
728 			container_of(i, struct btree_node, keys);
729 		/* These indicate that we read the wrong btree node: */
730 
731 		if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
732 			struct bch_btree_ptr_v2 *bp =
733 				&bkey_i_to_btree_ptr_v2(&b->key)->v;
734 
735 			/* XXX endianness */
736 			btree_err_on(bp->seq != bn->keys.seq,
737 				     -BCH_ERR_btree_node_read_err_must_retry, c, ca, b, NULL,
738 				     "incorrect sequence number (wrong btree node)");
739 		}
740 
741 		btree_err_on(BTREE_NODE_ID(bn) != b->c.btree_id,
742 			     -BCH_ERR_btree_node_read_err_must_retry, c, ca, b, i,
743 			     "incorrect btree id");
744 
745 		btree_err_on(BTREE_NODE_LEVEL(bn) != b->c.level,
746 			     -BCH_ERR_btree_node_read_err_must_retry, c, ca, b, i,
747 			     "incorrect level");
748 
749 		if (!write)
750 			compat_btree_node(b->c.level, b->c.btree_id, version,
751 					  BSET_BIG_ENDIAN(i), write, bn);
752 
753 		if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
754 			struct bch_btree_ptr_v2 *bp =
755 				&bkey_i_to_btree_ptr_v2(&b->key)->v;
756 
757 			if (BTREE_PTR_RANGE_UPDATED(bp)) {
758 				b->data->min_key = bp->min_key;
759 				b->data->max_key = b->key.k.p;
760 			}
761 
762 			btree_err_on(!bpos_eq(b->data->min_key, bp->min_key),
763 				     -BCH_ERR_btree_node_read_err_must_retry, c, ca, b, NULL,
764 				     "incorrect min_key: got %s should be %s",
765 				     (printbuf_reset(&buf1),
766 				      bch2_bpos_to_text(&buf1, bn->min_key), buf1.buf),
767 				     (printbuf_reset(&buf2),
768 				      bch2_bpos_to_text(&buf2, bp->min_key), buf2.buf));
769 		}
770 
771 		btree_err_on(!bpos_eq(bn->max_key, b->key.k.p),
772 			     -BCH_ERR_btree_node_read_err_must_retry, c, ca, b, i,
773 			     "incorrect max key %s",
774 			     (printbuf_reset(&buf1),
775 			      bch2_bpos_to_text(&buf1, bn->max_key), buf1.buf));
776 
777 		if (write)
778 			compat_btree_node(b->c.level, b->c.btree_id, version,
779 					  BSET_BIG_ENDIAN(i), write, bn);
780 
781 		btree_err_on(bch2_bkey_format_invalid(c, &bn->format, write, &buf1),
782 			     -BCH_ERR_btree_node_read_err_bad_node, c, ca, b, i,
783 			     "invalid bkey format: %s\n  %s", buf1.buf,
784 			     (printbuf_reset(&buf2),
785 			      bch2_bkey_format_to_text(&buf2, &bn->format), buf2.buf));
786 		printbuf_reset(&buf1);
787 
788 		compat_bformat(b->c.level, b->c.btree_id, version,
789 			       BSET_BIG_ENDIAN(i), write,
790 			       &bn->format);
791 	}
792 out:
793 fsck_err:
794 	printbuf_exit(&buf2);
795 	printbuf_exit(&buf1);
796 	return ret;
797 }
798 
799 static int bset_key_invalid(struct bch_fs *c, struct btree *b,
800 			    struct bkey_s_c k,
801 			    bool updated_range, int rw,
802 			    struct printbuf *err)
803 {
804 	return __bch2_bkey_invalid(c, k, btree_node_type(b), READ, err) ?:
805 		(!updated_range ? bch2_bkey_in_btree_node(b, k, err) : 0) ?:
806 		(rw == WRITE ? bch2_bkey_val_invalid(c, k, READ, err) : 0);
807 }
808 
809 static int validate_bset_keys(struct bch_fs *c, struct btree *b,
810 			 struct bset *i, int write,
811 			 bool have_retry, bool *saw_error)
812 {
813 	unsigned version = le16_to_cpu(i->version);
814 	struct bkey_packed *k, *prev = NULL;
815 	struct printbuf buf = PRINTBUF;
816 	bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
817 		BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
818 	int ret = 0;
819 
820 	for (k = i->start;
821 	     k != vstruct_last(i);) {
822 		struct bkey_s u;
823 		struct bkey tmp;
824 
825 		if (btree_err_on(bkey_p_next(k) > vstruct_last(i),
826 				 -BCH_ERR_btree_node_read_err_fixable, c, NULL, b, i,
827 				 "key extends past end of bset")) {
828 			i->u64s = cpu_to_le16((u64 *) k - i->_data);
829 			break;
830 		}
831 
832 		if (btree_err_on(k->format > KEY_FORMAT_CURRENT,
833 				 -BCH_ERR_btree_node_read_err_fixable, c, NULL, b, i,
834 				 "invalid bkey format %u", k->format)) {
835 			i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
836 			memmove_u64s_down(k, bkey_p_next(k),
837 					  (u64 *) vstruct_end(i) - (u64 *) k);
838 			continue;
839 		}
840 
841 		/* XXX: validate k->u64s */
842 		if (!write)
843 			bch2_bkey_compat(b->c.level, b->c.btree_id, version,
844 				    BSET_BIG_ENDIAN(i), write,
845 				    &b->format, k);
846 
847 		u = __bkey_disassemble(b, k, &tmp);
848 
849 		printbuf_reset(&buf);
850 		if (bset_key_invalid(c, b, u.s_c, updated_range, write, &buf)) {
851 			printbuf_reset(&buf);
852 			prt_printf(&buf, "invalid bkey:  ");
853 			bset_key_invalid(c, b, u.s_c, updated_range, write, &buf);
854 			prt_printf(&buf, "\n  ");
855 			bch2_bkey_val_to_text(&buf, c, u.s_c);
856 
857 			btree_err(-BCH_ERR_btree_node_read_err_fixable, c, NULL, b, i, "%s", buf.buf);
858 
859 			i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
860 			memmove_u64s_down(k, bkey_p_next(k),
861 					  (u64 *) vstruct_end(i) - (u64 *) k);
862 			continue;
863 		}
864 
865 		if (write)
866 			bch2_bkey_compat(b->c.level, b->c.btree_id, version,
867 				    BSET_BIG_ENDIAN(i), write,
868 				    &b->format, k);
869 
870 		if (prev && bkey_iter_cmp(b, prev, k) > 0) {
871 			struct bkey up = bkey_unpack_key(b, prev);
872 
873 			printbuf_reset(&buf);
874 			prt_printf(&buf, "keys out of order: ");
875 			bch2_bkey_to_text(&buf, &up);
876 			prt_printf(&buf, " > ");
877 			bch2_bkey_to_text(&buf, u.k);
878 
879 			bch2_dump_bset(c, b, i, 0);
880 
881 			if (btree_err(-BCH_ERR_btree_node_read_err_fixable, c, NULL, b, i, "%s", buf.buf)) {
882 				i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
883 				memmove_u64s_down(k, bkey_p_next(k),
884 						  (u64 *) vstruct_end(i) - (u64 *) k);
885 				continue;
886 			}
887 		}
888 
889 		prev = k;
890 		k = bkey_p_next(k);
891 	}
892 fsck_err:
893 	printbuf_exit(&buf);
894 	return ret;
895 }
896 
897 int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
898 			      struct btree *b, bool have_retry, bool *saw_error)
899 {
900 	struct btree_node_entry *bne;
901 	struct sort_iter *iter;
902 	struct btree_node *sorted;
903 	struct bkey_packed *k;
904 	struct bch_extent_ptr *ptr;
905 	struct bset *i;
906 	bool used_mempool, blacklisted;
907 	bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
908 		BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
909 	unsigned u64s;
910 	unsigned ptr_written = btree_ptr_sectors_written(&b->key);
911 	struct printbuf buf = PRINTBUF;
912 	int ret = 0, retry_read = 0, write = READ;
913 
914 	b->version_ondisk = U16_MAX;
915 	/* We might get called multiple times on read retry: */
916 	b->written = 0;
917 
918 	iter = mempool_alloc(&c->fill_iter, GFP_NOFS);
919 	sort_iter_init(iter, b, (btree_blocks(c) + 1) * 2);
920 
921 	if (bch2_meta_read_fault("btree"))
922 		btree_err(-BCH_ERR_btree_node_read_err_must_retry, c, ca, b, NULL,
923 			  "dynamic fault");
924 
925 	btree_err_on(le64_to_cpu(b->data->magic) != bset_magic(c),
926 		     -BCH_ERR_btree_node_read_err_must_retry, c, ca, b, NULL,
927 		     "bad magic: want %llx, got %llx",
928 		     bset_magic(c), le64_to_cpu(b->data->magic));
929 
930 	btree_err_on(!b->data->keys.seq,
931 		     -BCH_ERR_btree_node_read_err_must_retry, c, ca, b, NULL,
932 		     "bad btree header: seq 0");
933 
934 	if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
935 		struct bch_btree_ptr_v2 *bp =
936 			&bkey_i_to_btree_ptr_v2(&b->key)->v;
937 
938 		btree_err_on(b->data->keys.seq != bp->seq,
939 			     -BCH_ERR_btree_node_read_err_must_retry, c, ca, b, NULL,
940 			     "got wrong btree node (seq %llx want %llx)",
941 			     b->data->keys.seq, bp->seq);
942 	}
943 
944 	while (b->written < (ptr_written ?: btree_sectors(c))) {
945 		unsigned sectors;
946 		struct nonce nonce;
947 		struct bch_csum csum;
948 		bool first = !b->written;
949 
950 		if (!b->written) {
951 			i = &b->data->keys;
952 
953 			btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
954 				     -BCH_ERR_btree_node_read_err_want_retry, c, ca, b, i,
955 				     "unknown checksum type %llu",
956 				     BSET_CSUM_TYPE(i));
957 
958 			nonce = btree_nonce(i, b->written << 9);
959 			csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data);
960 
961 			btree_err_on(bch2_crc_cmp(csum, b->data->csum),
962 				     -BCH_ERR_btree_node_read_err_want_retry, c, ca, b, i,
963 				     "invalid checksum");
964 
965 			ret = bset_encrypt(c, i, b->written << 9);
966 			if (bch2_fs_fatal_err_on(ret, c,
967 					"error decrypting btree node: %i", ret))
968 				goto fsck_err;
969 
970 			btree_err_on(btree_node_type_is_extents(btree_node_type(b)) &&
971 				     !BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data),
972 				     -BCH_ERR_btree_node_read_err_incompatible, c, NULL, b, NULL,
973 				     "btree node does not have NEW_EXTENT_OVERWRITE set");
974 
975 			sectors = vstruct_sectors(b->data, c->block_bits);
976 		} else {
977 			bne = write_block(b);
978 			i = &bne->keys;
979 
980 			if (i->seq != b->data->keys.seq)
981 				break;
982 
983 			btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
984 				     -BCH_ERR_btree_node_read_err_want_retry, c, ca, b, i,
985 				     "unknown checksum type %llu",
986 				     BSET_CSUM_TYPE(i));
987 
988 			nonce = btree_nonce(i, b->written << 9);
989 			csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
990 
991 			btree_err_on(bch2_crc_cmp(csum, bne->csum),
992 				     -BCH_ERR_btree_node_read_err_want_retry, c, ca, b, i,
993 				     "invalid checksum");
994 
995 			ret = bset_encrypt(c, i, b->written << 9);
996 			if (bch2_fs_fatal_err_on(ret, c,
997 					"error decrypting btree node: %i\n", ret))
998 				goto fsck_err;
999 
1000 			sectors = vstruct_sectors(bne, c->block_bits);
1001 		}
1002 
1003 		b->version_ondisk = min(b->version_ondisk,
1004 					le16_to_cpu(i->version));
1005 
1006 		ret = validate_bset(c, ca, b, i, b->written, sectors,
1007 				    READ, have_retry, saw_error);
1008 		if (ret)
1009 			goto fsck_err;
1010 
1011 		if (!b->written)
1012 			btree_node_set_format(b, b->data->format);
1013 
1014 		ret = validate_bset_keys(c, b, i, READ, have_retry, saw_error);
1015 		if (ret)
1016 			goto fsck_err;
1017 
1018 		SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
1019 
1020 		blacklisted = bch2_journal_seq_is_blacklisted(c,
1021 					le64_to_cpu(i->journal_seq),
1022 					true);
1023 
1024 		btree_err_on(blacklisted && first,
1025 			     -BCH_ERR_btree_node_read_err_fixable, c, ca, b, i,
1026 			     "first btree node bset has blacklisted journal seq (%llu)",
1027 			     le64_to_cpu(i->journal_seq));
1028 
1029 		btree_err_on(blacklisted && ptr_written,
1030 			     -BCH_ERR_btree_node_read_err_fixable, c, ca, b, i,
1031 			     "found blacklisted bset (journal seq %llu) in btree node at offset %u-%u/%u",
1032 			     le64_to_cpu(i->journal_seq),
1033 			     b->written, b->written + sectors, ptr_written);
1034 
1035 		b->written += sectors;
1036 
1037 		if (blacklisted && !first)
1038 			continue;
1039 
1040 		sort_iter_add(iter,
1041 			      vstruct_idx(i, 0),
1042 			      vstruct_last(i));
1043 	}
1044 
1045 	if (ptr_written) {
1046 		btree_err_on(b->written < ptr_written,
1047 			     -BCH_ERR_btree_node_read_err_want_retry, c, ca, b, NULL,
1048 			     "btree node data missing: expected %u sectors, found %u",
1049 			     ptr_written, b->written);
1050 	} else {
1051 		for (bne = write_block(b);
1052 		     bset_byte_offset(b, bne) < btree_bytes(c);
1053 		     bne = (void *) bne + block_bytes(c))
1054 			btree_err_on(bne->keys.seq == b->data->keys.seq &&
1055 				     !bch2_journal_seq_is_blacklisted(c,
1056 								      le64_to_cpu(bne->keys.journal_seq),
1057 								      true),
1058 				     -BCH_ERR_btree_node_read_err_want_retry, c, ca, b, NULL,
1059 				     "found bset signature after last bset");
1060 	}
1061 
1062 	sorted = btree_bounce_alloc(c, btree_bytes(c), &used_mempool);
1063 	sorted->keys.u64s = 0;
1064 
1065 	set_btree_bset(b, b->set, &b->data->keys);
1066 
1067 	b->nr = bch2_key_sort_fix_overlapping(c, &sorted->keys, iter);
1068 
1069 	u64s = le16_to_cpu(sorted->keys.u64s);
1070 	*sorted = *b->data;
1071 	sorted->keys.u64s = cpu_to_le16(u64s);
1072 	swap(sorted, b->data);
1073 	set_btree_bset(b, b->set, &b->data->keys);
1074 	b->nsets = 1;
1075 
1076 	BUG_ON(b->nr.live_u64s != u64s);
1077 
1078 	btree_bounce_free(c, btree_bytes(c), used_mempool, sorted);
1079 
1080 	if (updated_range)
1081 		bch2_btree_node_drop_keys_outside_node(b);
1082 
1083 	i = &b->data->keys;
1084 	for (k = i->start; k != vstruct_last(i);) {
1085 		struct bkey tmp;
1086 		struct bkey_s u = __bkey_disassemble(b, k, &tmp);
1087 
1088 		printbuf_reset(&buf);
1089 
1090 		if (bch2_bkey_val_invalid(c, u.s_c, READ, &buf) ||
1091 		    (bch2_inject_invalid_keys &&
1092 		     !bversion_cmp(u.k->version, MAX_VERSION))) {
1093 			printbuf_reset(&buf);
1094 
1095 			prt_printf(&buf, "invalid bkey: ");
1096 			bch2_bkey_val_invalid(c, u.s_c, READ, &buf);
1097 			prt_printf(&buf, "\n  ");
1098 			bch2_bkey_val_to_text(&buf, c, u.s_c);
1099 
1100 			btree_err(-BCH_ERR_btree_node_read_err_fixable, c, NULL, b, i, "%s", buf.buf);
1101 
1102 			btree_keys_account_key_drop(&b->nr, 0, k);
1103 
1104 			i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
1105 			memmove_u64s_down(k, bkey_p_next(k),
1106 					  (u64 *) vstruct_end(i) - (u64 *) k);
1107 			set_btree_bset_end(b, b->set);
1108 			continue;
1109 		}
1110 
1111 		if (u.k->type == KEY_TYPE_btree_ptr_v2) {
1112 			struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(u);
1113 
1114 			bp.v->mem_ptr = 0;
1115 		}
1116 
1117 		k = bkey_p_next(k);
1118 	}
1119 
1120 	bch2_bset_build_aux_tree(b, b->set, false);
1121 
1122 	set_needs_whiteout(btree_bset_first(b), true);
1123 
1124 	btree_node_reset_sib_u64s(b);
1125 
1126 	bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&b->key)), ptr) {
1127 		struct bch_dev *ca2 = bch_dev_bkey_exists(c, ptr->dev);
1128 
1129 		if (ca2->mi.state != BCH_MEMBER_STATE_rw)
1130 			set_btree_node_need_rewrite(b);
1131 	}
1132 
1133 	if (!ptr_written)
1134 		set_btree_node_need_rewrite(b);
1135 out:
1136 	mempool_free(iter, &c->fill_iter);
1137 	printbuf_exit(&buf);
1138 	return retry_read;
1139 fsck_err:
1140 	if (ret == -BCH_ERR_btree_node_read_err_want_retry ||
1141 	    ret == -BCH_ERR_btree_node_read_err_must_retry)
1142 		retry_read = 1;
1143 	else
1144 		set_btree_node_read_error(b);
1145 	goto out;
1146 }
1147 
1148 static void btree_node_read_work(struct work_struct *work)
1149 {
1150 	struct btree_read_bio *rb =
1151 		container_of(work, struct btree_read_bio, work);
1152 	struct bch_fs *c	= rb->c;
1153 	struct btree *b		= rb->b;
1154 	struct bch_dev *ca	= bch_dev_bkey_exists(c, rb->pick.ptr.dev);
1155 	struct bio *bio		= &rb->bio;
1156 	struct bch_io_failures failed = { .nr = 0 };
1157 	struct printbuf buf = PRINTBUF;
1158 	bool saw_error = false;
1159 	bool retry = false;
1160 	bool can_retry;
1161 
1162 	goto start;
1163 	while (1) {
1164 		retry = true;
1165 		bch_info(c, "retrying read");
1166 		ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
1167 		rb->have_ioref		= bch2_dev_get_ioref(ca, READ);
1168 		bio_reset(bio, NULL, REQ_OP_READ|REQ_SYNC|REQ_META);
1169 		bio->bi_iter.bi_sector	= rb->pick.ptr.offset;
1170 		bio->bi_iter.bi_size	= btree_bytes(c);
1171 
1172 		if (rb->have_ioref) {
1173 			bio_set_dev(bio, ca->disk_sb.bdev);
1174 			submit_bio_wait(bio);
1175 		} else {
1176 			bio->bi_status = BLK_STS_REMOVED;
1177 		}
1178 start:
1179 		printbuf_reset(&buf);
1180 		btree_pos_to_text(&buf, c, b);
1181 		bch2_dev_io_err_on(bio->bi_status, ca, "btree read error %s for %s",
1182 				   bch2_blk_status_to_str(bio->bi_status), buf.buf);
1183 		if (rb->have_ioref)
1184 			percpu_ref_put(&ca->io_ref);
1185 		rb->have_ioref = false;
1186 
1187 		bch2_mark_io_failure(&failed, &rb->pick);
1188 
1189 		can_retry = bch2_bkey_pick_read_device(c,
1190 				bkey_i_to_s_c(&b->key),
1191 				&failed, &rb->pick) > 0;
1192 
1193 		if (!bio->bi_status &&
1194 		    !bch2_btree_node_read_done(c, ca, b, can_retry, &saw_error)) {
1195 			if (retry)
1196 				bch_info(c, "retry success");
1197 			break;
1198 		}
1199 
1200 		saw_error = true;
1201 
1202 		if (!can_retry) {
1203 			set_btree_node_read_error(b);
1204 			break;
1205 		}
1206 	}
1207 
1208 	bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read],
1209 			       rb->start_time);
1210 	bio_put(&rb->bio);
1211 
1212 	if (saw_error && !btree_node_read_error(b)) {
1213 		printbuf_reset(&buf);
1214 		bch2_bpos_to_text(&buf, b->key.k.p);
1215 		bch_info(c, "%s: rewriting btree node at btree=%s level=%u %s due to error",
1216 			 __func__, bch2_btree_ids[b->c.btree_id], b->c.level, buf.buf);
1217 
1218 		bch2_btree_node_rewrite_async(c, b);
1219 	}
1220 
1221 	printbuf_exit(&buf);
1222 	clear_btree_node_read_in_flight(b);
1223 	wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1224 }
1225 
1226 static void btree_node_read_endio(struct bio *bio)
1227 {
1228 	struct btree_read_bio *rb =
1229 		container_of(bio, struct btree_read_bio, bio);
1230 	struct bch_fs *c	= rb->c;
1231 
1232 	if (rb->have_ioref) {
1233 		struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
1234 
1235 		bch2_latency_acct(ca, rb->start_time, READ);
1236 	}
1237 
1238 	queue_work(c->io_complete_wq, &rb->work);
1239 }
1240 
1241 struct btree_node_read_all {
1242 	struct closure		cl;
1243 	struct bch_fs		*c;
1244 	struct btree		*b;
1245 	unsigned		nr;
1246 	void			*buf[BCH_REPLICAS_MAX];
1247 	struct bio		*bio[BCH_REPLICAS_MAX];
1248 	blk_status_t		err[BCH_REPLICAS_MAX];
1249 };
1250 
1251 static unsigned btree_node_sectors_written(struct bch_fs *c, void *data)
1252 {
1253 	struct btree_node *bn = data;
1254 	struct btree_node_entry *bne;
1255 	unsigned offset = 0;
1256 
1257 	if (le64_to_cpu(bn->magic) !=  bset_magic(c))
1258 		return 0;
1259 
1260 	while (offset < btree_sectors(c)) {
1261 		if (!offset) {
1262 			offset += vstruct_sectors(bn, c->block_bits);
1263 		} else {
1264 			bne = data + (offset << 9);
1265 			if (bne->keys.seq != bn->keys.seq)
1266 				break;
1267 			offset += vstruct_sectors(bne, c->block_bits);
1268 		}
1269 	}
1270 
1271 	return offset;
1272 }
1273 
1274 static bool btree_node_has_extra_bsets(struct bch_fs *c, unsigned offset, void *data)
1275 {
1276 	struct btree_node *bn = data;
1277 	struct btree_node_entry *bne;
1278 
1279 	if (!offset)
1280 		return false;
1281 
1282 	while (offset < btree_sectors(c)) {
1283 		bne = data + (offset << 9);
1284 		if (bne->keys.seq == bn->keys.seq)
1285 			return true;
1286 		offset++;
1287 	}
1288 
1289 	return false;
1290 	return offset;
1291 }
1292 
1293 static void btree_node_read_all_replicas_done(struct closure *cl)
1294 {
1295 	struct btree_node_read_all *ra =
1296 		container_of(cl, struct btree_node_read_all, cl);
1297 	struct bch_fs *c = ra->c;
1298 	struct btree *b = ra->b;
1299 	struct printbuf buf = PRINTBUF;
1300 	bool dump_bset_maps = false;
1301 	bool have_retry = false;
1302 	int ret = 0, best = -1, write = READ;
1303 	unsigned i, written = 0, written2 = 0;
1304 	__le64 seq = b->key.k.type == KEY_TYPE_btree_ptr_v2
1305 		? bkey_i_to_btree_ptr_v2(&b->key)->v.seq : 0;
1306 	bool _saw_error = false, *saw_error = &_saw_error;
1307 
1308 	for (i = 0; i < ra->nr; i++) {
1309 		struct btree_node *bn = ra->buf[i];
1310 
1311 		if (ra->err[i])
1312 			continue;
1313 
1314 		if (le64_to_cpu(bn->magic) != bset_magic(c) ||
1315 		    (seq && seq != bn->keys.seq))
1316 			continue;
1317 
1318 		if (best < 0) {
1319 			best = i;
1320 			written = btree_node_sectors_written(c, bn);
1321 			continue;
1322 		}
1323 
1324 		written2 = btree_node_sectors_written(c, ra->buf[i]);
1325 		if (btree_err_on(written2 != written, -BCH_ERR_btree_node_read_err_fixable, c, NULL, b, NULL,
1326 				 "btree node sectors written mismatch: %u != %u",
1327 				 written, written2) ||
1328 		    btree_err_on(btree_node_has_extra_bsets(c, written2, ra->buf[i]),
1329 				 -BCH_ERR_btree_node_read_err_fixable, c, NULL, b, NULL,
1330 				 "found bset signature after last bset") ||
1331 		    btree_err_on(memcmp(ra->buf[best], ra->buf[i], written << 9),
1332 				 -BCH_ERR_btree_node_read_err_fixable, c, NULL, b, NULL,
1333 				 "btree node replicas content mismatch"))
1334 			dump_bset_maps = true;
1335 
1336 		if (written2 > written) {
1337 			written = written2;
1338 			best = i;
1339 		}
1340 	}
1341 fsck_err:
1342 	if (dump_bset_maps) {
1343 		for (i = 0; i < ra->nr; i++) {
1344 			struct btree_node *bn = ra->buf[i];
1345 			struct btree_node_entry *bne = NULL;
1346 			unsigned offset = 0, sectors;
1347 			bool gap = false;
1348 
1349 			if (ra->err[i])
1350 				continue;
1351 
1352 			printbuf_reset(&buf);
1353 
1354 			while (offset < btree_sectors(c)) {
1355 				if (!offset) {
1356 					sectors = vstruct_sectors(bn, c->block_bits);
1357 				} else {
1358 					bne = ra->buf[i] + (offset << 9);
1359 					if (bne->keys.seq != bn->keys.seq)
1360 						break;
1361 					sectors = vstruct_sectors(bne, c->block_bits);
1362 				}
1363 
1364 				prt_printf(&buf, " %u-%u", offset, offset + sectors);
1365 				if (bne && bch2_journal_seq_is_blacklisted(c,
1366 							le64_to_cpu(bne->keys.journal_seq), false))
1367 					prt_printf(&buf, "*");
1368 				offset += sectors;
1369 			}
1370 
1371 			while (offset < btree_sectors(c)) {
1372 				bne = ra->buf[i] + (offset << 9);
1373 				if (bne->keys.seq == bn->keys.seq) {
1374 					if (!gap)
1375 						prt_printf(&buf, " GAP");
1376 					gap = true;
1377 
1378 					sectors = vstruct_sectors(bne, c->block_bits);
1379 					prt_printf(&buf, " %u-%u", offset, offset + sectors);
1380 					if (bch2_journal_seq_is_blacklisted(c,
1381 							le64_to_cpu(bne->keys.journal_seq), false))
1382 						prt_printf(&buf, "*");
1383 				}
1384 				offset++;
1385 			}
1386 
1387 			bch_err(c, "replica %u:%s", i, buf.buf);
1388 		}
1389 	}
1390 
1391 	if (best >= 0) {
1392 		memcpy(b->data, ra->buf[best], btree_bytes(c));
1393 		ret = bch2_btree_node_read_done(c, NULL, b, false, saw_error);
1394 	} else {
1395 		ret = -1;
1396 	}
1397 
1398 	if (ret)
1399 		set_btree_node_read_error(b);
1400 	else if (*saw_error)
1401 		bch2_btree_node_rewrite_async(c, b);
1402 
1403 	for (i = 0; i < ra->nr; i++) {
1404 		mempool_free(ra->buf[i], &c->btree_bounce_pool);
1405 		bio_put(ra->bio[i]);
1406 	}
1407 
1408 	closure_debug_destroy(&ra->cl);
1409 	kfree(ra);
1410 	printbuf_exit(&buf);
1411 
1412 	clear_btree_node_read_in_flight(b);
1413 	wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1414 }
1415 
1416 static void btree_node_read_all_replicas_endio(struct bio *bio)
1417 {
1418 	struct btree_read_bio *rb =
1419 		container_of(bio, struct btree_read_bio, bio);
1420 	struct bch_fs *c	= rb->c;
1421 	struct btree_node_read_all *ra = rb->ra;
1422 
1423 	if (rb->have_ioref) {
1424 		struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
1425 
1426 		bch2_latency_acct(ca, rb->start_time, READ);
1427 	}
1428 
1429 	ra->err[rb->idx] = bio->bi_status;
1430 	closure_put(&ra->cl);
1431 }
1432 
1433 /*
1434  * XXX This allocates multiple times from the same mempools, and can deadlock
1435  * under sufficient memory pressure (but is only a debug path)
1436  */
1437 static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool sync)
1438 {
1439 	struct bkey_s_c k = bkey_i_to_s_c(&b->key);
1440 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1441 	const union bch_extent_entry *entry;
1442 	struct extent_ptr_decoded pick;
1443 	struct btree_node_read_all *ra;
1444 	unsigned i;
1445 
1446 	ra = kzalloc(sizeof(*ra), GFP_NOFS);
1447 	if (!ra)
1448 		return -BCH_ERR_ENOMEM_btree_node_read_all_replicas;
1449 
1450 	closure_init(&ra->cl, NULL);
1451 	ra->c	= c;
1452 	ra->b	= b;
1453 	ra->nr	= bch2_bkey_nr_ptrs(k);
1454 
1455 	for (i = 0; i < ra->nr; i++) {
1456 		ra->buf[i] = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
1457 		ra->bio[i] = bio_alloc_bioset(NULL,
1458 					      buf_pages(ra->buf[i], btree_bytes(c)),
1459 					      REQ_OP_READ|REQ_SYNC|REQ_META,
1460 					      GFP_NOFS,
1461 					      &c->btree_bio);
1462 	}
1463 
1464 	i = 0;
1465 	bkey_for_each_ptr_decode(k.k, ptrs, pick, entry) {
1466 		struct bch_dev *ca = bch_dev_bkey_exists(c, pick.ptr.dev);
1467 		struct btree_read_bio *rb =
1468 			container_of(ra->bio[i], struct btree_read_bio, bio);
1469 		rb->c			= c;
1470 		rb->b			= b;
1471 		rb->ra			= ra;
1472 		rb->start_time		= local_clock();
1473 		rb->have_ioref		= bch2_dev_get_ioref(ca, READ);
1474 		rb->idx			= i;
1475 		rb->pick		= pick;
1476 		rb->bio.bi_iter.bi_sector = pick.ptr.offset;
1477 		rb->bio.bi_end_io	= btree_node_read_all_replicas_endio;
1478 		bch2_bio_map(&rb->bio, ra->buf[i], btree_bytes(c));
1479 
1480 		if (rb->have_ioref) {
1481 			this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
1482 				     bio_sectors(&rb->bio));
1483 			bio_set_dev(&rb->bio, ca->disk_sb.bdev);
1484 
1485 			closure_get(&ra->cl);
1486 			submit_bio(&rb->bio);
1487 		} else {
1488 			ra->err[i] = BLK_STS_REMOVED;
1489 		}
1490 
1491 		i++;
1492 	}
1493 
1494 	if (sync) {
1495 		closure_sync(&ra->cl);
1496 		btree_node_read_all_replicas_done(&ra->cl);
1497 	} else {
1498 		continue_at(&ra->cl, btree_node_read_all_replicas_done,
1499 			    c->io_complete_wq);
1500 	}
1501 
1502 	return 0;
1503 }
1504 
1505 void bch2_btree_node_read(struct bch_fs *c, struct btree *b,
1506 			  bool sync)
1507 {
1508 	struct extent_ptr_decoded pick;
1509 	struct btree_read_bio *rb;
1510 	struct bch_dev *ca;
1511 	struct bio *bio;
1512 	int ret;
1513 
1514 	trace_and_count(c, btree_node_read, c, b);
1515 
1516 	if (bch2_verify_all_btree_replicas &&
1517 	    !btree_node_read_all_replicas(c, b, sync))
1518 		return;
1519 
1520 	ret = bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key),
1521 					 NULL, &pick);
1522 
1523 	if (ret <= 0) {
1524 		struct printbuf buf = PRINTBUF;
1525 
1526 		prt_str(&buf, "btree node read error: no device to read from\n at ");
1527 		btree_pos_to_text(&buf, c, b);
1528 		bch_err(c, "%s", buf.buf);
1529 
1530 		if (c->recovery_passes_explicit & BIT_ULL(BCH_RECOVERY_PASS_check_topology) &&
1531 		    c->curr_recovery_pass > BCH_RECOVERY_PASS_check_topology)
1532 			bch2_fatal_error(c);
1533 
1534 		set_btree_node_read_error(b);
1535 		clear_btree_node_read_in_flight(b);
1536 		wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1537 		printbuf_exit(&buf);
1538 		return;
1539 	}
1540 
1541 	ca = bch_dev_bkey_exists(c, pick.ptr.dev);
1542 
1543 	bio = bio_alloc_bioset(NULL,
1544 			       buf_pages(b->data, btree_bytes(c)),
1545 			       REQ_OP_READ|REQ_SYNC|REQ_META,
1546 			       GFP_NOFS,
1547 			       &c->btree_bio);
1548 	rb = container_of(bio, struct btree_read_bio, bio);
1549 	rb->c			= c;
1550 	rb->b			= b;
1551 	rb->ra			= NULL;
1552 	rb->start_time		= local_clock();
1553 	rb->have_ioref		= bch2_dev_get_ioref(ca, READ);
1554 	rb->pick		= pick;
1555 	INIT_WORK(&rb->work, btree_node_read_work);
1556 	bio->bi_iter.bi_sector	= pick.ptr.offset;
1557 	bio->bi_end_io		= btree_node_read_endio;
1558 	bch2_bio_map(bio, b->data, btree_bytes(c));
1559 
1560 	if (rb->have_ioref) {
1561 		this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
1562 			     bio_sectors(bio));
1563 		bio_set_dev(bio, ca->disk_sb.bdev);
1564 
1565 		if (sync) {
1566 			submit_bio_wait(bio);
1567 
1568 			btree_node_read_work(&rb->work);
1569 		} else {
1570 			submit_bio(bio);
1571 		}
1572 	} else {
1573 		bio->bi_status = BLK_STS_REMOVED;
1574 
1575 		if (sync)
1576 			btree_node_read_work(&rb->work);
1577 		else
1578 			queue_work(c->io_complete_wq, &rb->work);
1579 	}
1580 }
1581 
1582 static int __bch2_btree_root_read(struct btree_trans *trans, enum btree_id id,
1583 				  const struct bkey_i *k, unsigned level)
1584 {
1585 	struct bch_fs *c = trans->c;
1586 	struct closure cl;
1587 	struct btree *b;
1588 	int ret;
1589 
1590 	closure_init_stack(&cl);
1591 
1592 	do {
1593 		ret = bch2_btree_cache_cannibalize_lock(c, &cl);
1594 		closure_sync(&cl);
1595 	} while (ret);
1596 
1597 	b = bch2_btree_node_mem_alloc(trans, level != 0);
1598 	bch2_btree_cache_cannibalize_unlock(c);
1599 
1600 	BUG_ON(IS_ERR(b));
1601 
1602 	bkey_copy(&b->key, k);
1603 	BUG_ON(bch2_btree_node_hash_insert(&c->btree_cache, b, level, id));
1604 
1605 	set_btree_node_read_in_flight(b);
1606 
1607 	bch2_btree_node_read(c, b, true);
1608 
1609 	if (btree_node_read_error(b)) {
1610 		bch2_btree_node_hash_remove(&c->btree_cache, b);
1611 
1612 		mutex_lock(&c->btree_cache.lock);
1613 		list_move(&b->list, &c->btree_cache.freeable);
1614 		mutex_unlock(&c->btree_cache.lock);
1615 
1616 		ret = -EIO;
1617 		goto err;
1618 	}
1619 
1620 	bch2_btree_set_root_for_read(c, b);
1621 err:
1622 	six_unlock_write(&b->c.lock);
1623 	six_unlock_intent(&b->c.lock);
1624 
1625 	return ret;
1626 }
1627 
1628 int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
1629 			const struct bkey_i *k, unsigned level)
1630 {
1631 	return bch2_trans_run(c, __bch2_btree_root_read(trans, id, k, level));
1632 }
1633 
1634 void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
1635 			      struct btree_write *w)
1636 {
1637 	unsigned long old, new, v = READ_ONCE(b->will_make_reachable);
1638 
1639 	do {
1640 		old = new = v;
1641 		if (!(old & 1))
1642 			break;
1643 
1644 		new &= ~1UL;
1645 	} while ((v = cmpxchg(&b->will_make_reachable, old, new)) != old);
1646 
1647 	if (old & 1)
1648 		closure_put(&((struct btree_update *) new)->cl);
1649 
1650 	bch2_journal_pin_drop(&c->journal, &w->journal);
1651 }
1652 
1653 static void __btree_node_write_done(struct bch_fs *c, struct btree *b)
1654 {
1655 	struct btree_write *w = btree_prev_write(b);
1656 	unsigned long old, new, v;
1657 	unsigned type = 0;
1658 
1659 	bch2_btree_complete_write(c, b, w);
1660 
1661 	v = READ_ONCE(b->flags);
1662 	do {
1663 		old = new = v;
1664 
1665 		if ((old & (1U << BTREE_NODE_dirty)) &&
1666 		    (old & (1U << BTREE_NODE_need_write)) &&
1667 		    !(old & (1U << BTREE_NODE_never_write)) &&
1668 		    !(old & (1U << BTREE_NODE_write_blocked)) &&
1669 		    !(old & (1U << BTREE_NODE_will_make_reachable))) {
1670 			new &= ~(1U << BTREE_NODE_dirty);
1671 			new &= ~(1U << BTREE_NODE_need_write);
1672 			new |=  (1U << BTREE_NODE_write_in_flight);
1673 			new |=  (1U << BTREE_NODE_write_in_flight_inner);
1674 			new |=  (1U << BTREE_NODE_just_written);
1675 			new ^=  (1U << BTREE_NODE_write_idx);
1676 
1677 			type = new & BTREE_WRITE_TYPE_MASK;
1678 			new &= ~BTREE_WRITE_TYPE_MASK;
1679 		} else {
1680 			new &= ~(1U << BTREE_NODE_write_in_flight);
1681 			new &= ~(1U << BTREE_NODE_write_in_flight_inner);
1682 		}
1683 	} while ((v = cmpxchg(&b->flags, old, new)) != old);
1684 
1685 	if (new & (1U << BTREE_NODE_write_in_flight))
1686 		__bch2_btree_node_write(c, b, BTREE_WRITE_ALREADY_STARTED|type);
1687 	else
1688 		wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
1689 }
1690 
1691 static void btree_node_write_done(struct bch_fs *c, struct btree *b)
1692 {
1693 	struct btree_trans *trans = bch2_trans_get(c);
1694 
1695 	btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
1696 	__btree_node_write_done(c, b);
1697 	six_unlock_read(&b->c.lock);
1698 
1699 	bch2_trans_put(trans);
1700 }
1701 
1702 static void btree_node_write_work(struct work_struct *work)
1703 {
1704 	struct btree_write_bio *wbio =
1705 		container_of(work, struct btree_write_bio, work);
1706 	struct bch_fs *c	= wbio->wbio.c;
1707 	struct btree *b		= wbio->wbio.bio.bi_private;
1708 	struct bch_extent_ptr *ptr;
1709 	int ret = 0;
1710 
1711 	btree_bounce_free(c,
1712 		wbio->data_bytes,
1713 		wbio->wbio.used_mempool,
1714 		wbio->data);
1715 
1716 	bch2_bkey_drop_ptrs(bkey_i_to_s(&wbio->key), ptr,
1717 		bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev));
1718 
1719 	if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(&wbio->key)))
1720 		goto err;
1721 
1722 	if (wbio->wbio.first_btree_write) {
1723 		if (wbio->wbio.failed.nr) {
1724 
1725 		}
1726 	} else {
1727 		ret = bch2_trans_do(c, NULL, NULL, 0,
1728 			bch2_btree_node_update_key_get_iter(trans, b, &wbio->key,
1729 					BCH_WATERMARK_reclaim|
1730 					BTREE_INSERT_JOURNAL_RECLAIM|
1731 					BTREE_INSERT_NOFAIL|
1732 					BTREE_INSERT_NOCHECK_RW,
1733 					!wbio->wbio.failed.nr));
1734 		if (ret)
1735 			goto err;
1736 	}
1737 out:
1738 	bio_put(&wbio->wbio.bio);
1739 	btree_node_write_done(c, b);
1740 	return;
1741 err:
1742 	set_btree_node_noevict(b);
1743 	if (!bch2_err_matches(ret, EROFS))
1744 		bch2_fs_fatal_error(c, "fatal error writing btree node: %s", bch2_err_str(ret));
1745 	goto out;
1746 }
1747 
1748 static void btree_node_write_endio(struct bio *bio)
1749 {
1750 	struct bch_write_bio *wbio	= to_wbio(bio);
1751 	struct bch_write_bio *parent	= wbio->split ? wbio->parent : NULL;
1752 	struct bch_write_bio *orig	= parent ?: wbio;
1753 	struct btree_write_bio *wb	= container_of(orig, struct btree_write_bio, wbio);
1754 	struct bch_fs *c		= wbio->c;
1755 	struct btree *b			= wbio->bio.bi_private;
1756 	struct bch_dev *ca		= bch_dev_bkey_exists(c, wbio->dev);
1757 	unsigned long flags;
1758 
1759 	if (wbio->have_ioref)
1760 		bch2_latency_acct(ca, wbio->submit_time, WRITE);
1761 
1762 	if (bch2_dev_io_err_on(bio->bi_status, ca, "btree write error: %s",
1763 			       bch2_blk_status_to_str(bio->bi_status)) ||
1764 	    bch2_meta_write_fault("btree")) {
1765 		spin_lock_irqsave(&c->btree_write_error_lock, flags);
1766 		bch2_dev_list_add_dev(&orig->failed, wbio->dev);
1767 		spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
1768 	}
1769 
1770 	if (wbio->have_ioref)
1771 		percpu_ref_put(&ca->io_ref);
1772 
1773 	if (parent) {
1774 		bio_put(bio);
1775 		bio_endio(&parent->bio);
1776 		return;
1777 	}
1778 
1779 	clear_btree_node_write_in_flight_inner(b);
1780 	wake_up_bit(&b->flags, BTREE_NODE_write_in_flight_inner);
1781 	INIT_WORK(&wb->work, btree_node_write_work);
1782 	queue_work(c->btree_io_complete_wq, &wb->work);
1783 }
1784 
1785 static int validate_bset_for_write(struct bch_fs *c, struct btree *b,
1786 				   struct bset *i, unsigned sectors)
1787 {
1788 	struct printbuf buf = PRINTBUF;
1789 	bool saw_error;
1790 	int ret;
1791 
1792 	ret = bch2_bkey_invalid(c, bkey_i_to_s_c(&b->key),
1793 				BKEY_TYPE_btree, WRITE, &buf);
1794 
1795 	if (ret)
1796 		bch2_fs_inconsistent(c, "invalid btree node key before write: %s", buf.buf);
1797 	printbuf_exit(&buf);
1798 	if (ret)
1799 		return ret;
1800 
1801 	ret = validate_bset_keys(c, b, i, WRITE, false, &saw_error) ?:
1802 		validate_bset(c, NULL, b, i, b->written, sectors, WRITE, false, &saw_error);
1803 	if (ret) {
1804 		bch2_inconsistent_error(c);
1805 		dump_stack();
1806 	}
1807 
1808 	return ret;
1809 }
1810 
1811 static void btree_write_submit(struct work_struct *work)
1812 {
1813 	struct btree_write_bio *wbio = container_of(work, struct btree_write_bio, work);
1814 	struct bch_extent_ptr *ptr;
1815 	BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
1816 
1817 	bkey_copy(&tmp.k, &wbio->key);
1818 
1819 	bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&tmp.k)), ptr)
1820 		ptr->offset += wbio->sector_offset;
1821 
1822 	bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree,
1823 				  &tmp.k, false);
1824 }
1825 
1826 void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
1827 {
1828 	struct btree_write_bio *wbio;
1829 	struct bset_tree *t;
1830 	struct bset *i;
1831 	struct btree_node *bn = NULL;
1832 	struct btree_node_entry *bne = NULL;
1833 	struct sort_iter_stack sort_iter;
1834 	struct nonce nonce;
1835 	unsigned bytes_to_write, sectors_to_write, bytes, u64s;
1836 	u64 seq = 0;
1837 	bool used_mempool;
1838 	unsigned long old, new;
1839 	bool validate_before_checksum = false;
1840 	enum btree_write_type type = flags & BTREE_WRITE_TYPE_MASK;
1841 	void *data;
1842 	int ret;
1843 
1844 	if (flags & BTREE_WRITE_ALREADY_STARTED)
1845 		goto do_write;
1846 
1847 	/*
1848 	 * We may only have a read lock on the btree node - the dirty bit is our
1849 	 * "lock" against racing with other threads that may be trying to start
1850 	 * a write, we do a write iff we clear the dirty bit. Since setting the
1851 	 * dirty bit requires a write lock, we can't race with other threads
1852 	 * redirtying it:
1853 	 */
1854 	do {
1855 		old = new = READ_ONCE(b->flags);
1856 
1857 		if (!(old & (1 << BTREE_NODE_dirty)))
1858 			return;
1859 
1860 		if ((flags & BTREE_WRITE_ONLY_IF_NEED) &&
1861 		    !(old & (1 << BTREE_NODE_need_write)))
1862 			return;
1863 
1864 		if (old &
1865 		    ((1 << BTREE_NODE_never_write)|
1866 		     (1 << BTREE_NODE_write_blocked)))
1867 			return;
1868 
1869 		if (b->written &&
1870 		    (old & (1 << BTREE_NODE_will_make_reachable)))
1871 			return;
1872 
1873 		if (old & (1 << BTREE_NODE_write_in_flight))
1874 			return;
1875 
1876 		if (flags & BTREE_WRITE_ONLY_IF_NEED)
1877 			type = new & BTREE_WRITE_TYPE_MASK;
1878 		new &= ~BTREE_WRITE_TYPE_MASK;
1879 
1880 		new &= ~(1 << BTREE_NODE_dirty);
1881 		new &= ~(1 << BTREE_NODE_need_write);
1882 		new |=  (1 << BTREE_NODE_write_in_flight);
1883 		new |=  (1 << BTREE_NODE_write_in_flight_inner);
1884 		new |=  (1 << BTREE_NODE_just_written);
1885 		new ^=  (1 << BTREE_NODE_write_idx);
1886 	} while (cmpxchg_acquire(&b->flags, old, new) != old);
1887 
1888 	if (new & (1U << BTREE_NODE_need_write))
1889 		return;
1890 do_write:
1891 	BUG_ON((type == BTREE_WRITE_initial) != (b->written == 0));
1892 
1893 	atomic_dec(&c->btree_cache.dirty);
1894 
1895 	BUG_ON(btree_node_fake(b));
1896 	BUG_ON((b->will_make_reachable != 0) != !b->written);
1897 
1898 	BUG_ON(b->written >= btree_sectors(c));
1899 	BUG_ON(b->written & (block_sectors(c) - 1));
1900 	BUG_ON(bset_written(b, btree_bset_last(b)));
1901 	BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(c));
1902 	BUG_ON(memcmp(&b->data->format, &b->format, sizeof(b->format)));
1903 
1904 	bch2_sort_whiteouts(c, b);
1905 
1906 	sort_iter_stack_init(&sort_iter, b);
1907 
1908 	bytes = !b->written
1909 		? sizeof(struct btree_node)
1910 		: sizeof(struct btree_node_entry);
1911 
1912 	bytes += b->whiteout_u64s * sizeof(u64);
1913 
1914 	for_each_bset(b, t) {
1915 		i = bset(b, t);
1916 
1917 		if (bset_written(b, i))
1918 			continue;
1919 
1920 		bytes += le16_to_cpu(i->u64s) * sizeof(u64);
1921 		sort_iter_add(&sort_iter.iter,
1922 			      btree_bkey_first(b, t),
1923 			      btree_bkey_last(b, t));
1924 		seq = max(seq, le64_to_cpu(i->journal_seq));
1925 	}
1926 
1927 	BUG_ON(b->written && !seq);
1928 
1929 	/* bch2_varint_decode may read up to 7 bytes past the end of the buffer: */
1930 	bytes += 8;
1931 
1932 	/* buffer must be a multiple of the block size */
1933 	bytes = round_up(bytes, block_bytes(c));
1934 
1935 	data = btree_bounce_alloc(c, bytes, &used_mempool);
1936 
1937 	if (!b->written) {
1938 		bn = data;
1939 		*bn = *b->data;
1940 		i = &bn->keys;
1941 	} else {
1942 		bne = data;
1943 		bne->keys = b->data->keys;
1944 		i = &bne->keys;
1945 	}
1946 
1947 	i->journal_seq	= cpu_to_le64(seq);
1948 	i->u64s		= 0;
1949 
1950 	sort_iter_add(&sort_iter.iter,
1951 		      unwritten_whiteouts_start(c, b),
1952 		      unwritten_whiteouts_end(c, b));
1953 	SET_BSET_SEPARATE_WHITEOUTS(i, false);
1954 
1955 	b->whiteout_u64s = 0;
1956 
1957 	u64s = bch2_sort_keys(i->start, &sort_iter.iter, false);
1958 	le16_add_cpu(&i->u64s, u64s);
1959 
1960 	BUG_ON(!b->written && i->u64s != b->data->keys.u64s);
1961 
1962 	set_needs_whiteout(i, false);
1963 
1964 	/* do we have data to write? */
1965 	if (b->written && !i->u64s)
1966 		goto nowrite;
1967 
1968 	bytes_to_write = vstruct_end(i) - data;
1969 	sectors_to_write = round_up(bytes_to_write, block_bytes(c)) >> 9;
1970 
1971 	if (!b->written &&
1972 	    b->key.k.type == KEY_TYPE_btree_ptr_v2)
1973 		BUG_ON(btree_ptr_sectors_written(&b->key) != sectors_to_write);
1974 
1975 	memset(data + bytes_to_write, 0,
1976 	       (sectors_to_write << 9) - bytes_to_write);
1977 
1978 	BUG_ON(b->written + sectors_to_write > btree_sectors(c));
1979 	BUG_ON(BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN);
1980 	BUG_ON(i->seq != b->data->keys.seq);
1981 
1982 	i->version = cpu_to_le16(c->sb.version);
1983 	SET_BSET_OFFSET(i, b->written);
1984 	SET_BSET_CSUM_TYPE(i, bch2_meta_checksum_type(c));
1985 
1986 	if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)))
1987 		validate_before_checksum = true;
1988 
1989 	/* validate_bset will be modifying: */
1990 	if (le16_to_cpu(i->version) < bcachefs_metadata_version_current)
1991 		validate_before_checksum = true;
1992 
1993 	/* if we're going to be encrypting, check metadata validity first: */
1994 	if (validate_before_checksum &&
1995 	    validate_bset_for_write(c, b, i, sectors_to_write))
1996 		goto err;
1997 
1998 	ret = bset_encrypt(c, i, b->written << 9);
1999 	if (bch2_fs_fatal_err_on(ret, c,
2000 			"error encrypting btree node: %i\n", ret))
2001 		goto err;
2002 
2003 	nonce = btree_nonce(i, b->written << 9);
2004 
2005 	if (bn)
2006 		bn->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bn);
2007 	else
2008 		bne->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
2009 
2010 	/* if we're not encrypting, check metadata after checksumming: */
2011 	if (!validate_before_checksum &&
2012 	    validate_bset_for_write(c, b, i, sectors_to_write))
2013 		goto err;
2014 
2015 	/*
2016 	 * We handle btree write errors by immediately halting the journal -
2017 	 * after we've done that, we can't issue any subsequent btree writes
2018 	 * because they might have pointers to new nodes that failed to write.
2019 	 *
2020 	 * Furthermore, there's no point in doing any more btree writes because
2021 	 * with the journal stopped, we're never going to update the journal to
2022 	 * reflect that those writes were done and the data flushed from the
2023 	 * journal:
2024 	 *
2025 	 * Also on journal error, the pending write may have updates that were
2026 	 * never journalled (interior nodes, see btree_update_nodes_written()) -
2027 	 * it's critical that we don't do the write in that case otherwise we
2028 	 * will have updates visible that weren't in the journal:
2029 	 *
2030 	 * Make sure to update b->written so bch2_btree_init_next() doesn't
2031 	 * break:
2032 	 */
2033 	if (bch2_journal_error(&c->journal) ||
2034 	    c->opts.nochanges)
2035 		goto err;
2036 
2037 	trace_and_count(c, btree_node_write, b, bytes_to_write, sectors_to_write);
2038 
2039 	wbio = container_of(bio_alloc_bioset(NULL,
2040 				buf_pages(data, sectors_to_write << 9),
2041 				REQ_OP_WRITE|REQ_META,
2042 				GFP_NOFS,
2043 				&c->btree_bio),
2044 			    struct btree_write_bio, wbio.bio);
2045 	wbio_init(&wbio->wbio.bio);
2046 	wbio->data			= data;
2047 	wbio->data_bytes		= bytes;
2048 	wbio->sector_offset		= b->written;
2049 	wbio->wbio.c			= c;
2050 	wbio->wbio.used_mempool		= used_mempool;
2051 	wbio->wbio.first_btree_write	= !b->written;
2052 	wbio->wbio.bio.bi_end_io	= btree_node_write_endio;
2053 	wbio->wbio.bio.bi_private	= b;
2054 
2055 	bch2_bio_map(&wbio->wbio.bio, data, sectors_to_write << 9);
2056 
2057 	bkey_copy(&wbio->key, &b->key);
2058 
2059 	b->written += sectors_to_write;
2060 
2061 	if (wbio->key.k.type == KEY_TYPE_btree_ptr_v2)
2062 		bkey_i_to_btree_ptr_v2(&wbio->key)->v.sectors_written =
2063 			cpu_to_le16(b->written);
2064 
2065 	atomic64_inc(&c->btree_write_stats[type].nr);
2066 	atomic64_add(bytes_to_write, &c->btree_write_stats[type].bytes);
2067 
2068 	INIT_WORK(&wbio->work, btree_write_submit);
2069 	queue_work(c->io_complete_wq, &wbio->work);
2070 	return;
2071 err:
2072 	set_btree_node_noevict(b);
2073 	b->written += sectors_to_write;
2074 nowrite:
2075 	btree_bounce_free(c, bytes, used_mempool, data);
2076 	__btree_node_write_done(c, b);
2077 }
2078 
2079 /*
2080  * Work that must be done with write lock held:
2081  */
2082 bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
2083 {
2084 	bool invalidated_iter = false;
2085 	struct btree_node_entry *bne;
2086 	struct bset_tree *t;
2087 
2088 	if (!btree_node_just_written(b))
2089 		return false;
2090 
2091 	BUG_ON(b->whiteout_u64s);
2092 
2093 	clear_btree_node_just_written(b);
2094 
2095 	/*
2096 	 * Note: immediately after write, bset_written() doesn't work - the
2097 	 * amount of data we had to write after compaction might have been
2098 	 * smaller than the offset of the last bset.
2099 	 *
2100 	 * However, we know that all bsets have been written here, as long as
2101 	 * we're still holding the write lock:
2102 	 */
2103 
2104 	/*
2105 	 * XXX: decide if we really want to unconditionally sort down to a
2106 	 * single bset:
2107 	 */
2108 	if (b->nsets > 1) {
2109 		btree_node_sort(c, b, 0, b->nsets, true);
2110 		invalidated_iter = true;
2111 	} else {
2112 		invalidated_iter = bch2_drop_whiteouts(b, COMPACT_ALL);
2113 	}
2114 
2115 	for_each_bset(b, t)
2116 		set_needs_whiteout(bset(b, t), true);
2117 
2118 	bch2_btree_verify(c, b);
2119 
2120 	/*
2121 	 * If later we don't unconditionally sort down to a single bset, we have
2122 	 * to ensure this is still true:
2123 	 */
2124 	BUG_ON((void *) btree_bkey_last(b, bset_tree_last(b)) > write_block(b));
2125 
2126 	bne = want_new_bset(c, b);
2127 	if (bne)
2128 		bch2_bset_init_next(c, b, bne);
2129 
2130 	bch2_btree_build_aux_trees(b);
2131 
2132 	return invalidated_iter;
2133 }
2134 
2135 /*
2136  * Use this one if the node is intent locked:
2137  */
2138 void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
2139 			   enum six_lock_type lock_type_held,
2140 			   unsigned flags)
2141 {
2142 	if (lock_type_held == SIX_LOCK_intent ||
2143 	    (lock_type_held == SIX_LOCK_read &&
2144 	     six_lock_tryupgrade(&b->c.lock))) {
2145 		__bch2_btree_node_write(c, b, flags);
2146 
2147 		/* don't cycle lock unnecessarily: */
2148 		if (btree_node_just_written(b) &&
2149 		    six_trylock_write(&b->c.lock)) {
2150 			bch2_btree_post_write_cleanup(c, b);
2151 			six_unlock_write(&b->c.lock);
2152 		}
2153 
2154 		if (lock_type_held == SIX_LOCK_read)
2155 			six_lock_downgrade(&b->c.lock);
2156 	} else {
2157 		__bch2_btree_node_write(c, b, flags);
2158 		if (lock_type_held == SIX_LOCK_write &&
2159 		    btree_node_just_written(b))
2160 			bch2_btree_post_write_cleanup(c, b);
2161 	}
2162 }
2163 
2164 static bool __bch2_btree_flush_all(struct bch_fs *c, unsigned flag)
2165 {
2166 	struct bucket_table *tbl;
2167 	struct rhash_head *pos;
2168 	struct btree *b;
2169 	unsigned i;
2170 	bool ret = false;
2171 restart:
2172 	rcu_read_lock();
2173 	for_each_cached_btree(b, c, tbl, i, pos)
2174 		if (test_bit(flag, &b->flags)) {
2175 			rcu_read_unlock();
2176 			wait_on_bit_io(&b->flags, flag, TASK_UNINTERRUPTIBLE);
2177 			ret = true;
2178 			goto restart;
2179 		}
2180 	rcu_read_unlock();
2181 
2182 	return ret;
2183 }
2184 
2185 bool bch2_btree_flush_all_reads(struct bch_fs *c)
2186 {
2187 	return __bch2_btree_flush_all(c, BTREE_NODE_read_in_flight);
2188 }
2189 
2190 bool bch2_btree_flush_all_writes(struct bch_fs *c)
2191 {
2192 	return __bch2_btree_flush_all(c, BTREE_NODE_write_in_flight);
2193 }
2194 
2195 static const char * const bch2_btree_write_types[] = {
2196 #define x(t, n) [n] = #t,
2197 	BCH_BTREE_WRITE_TYPES()
2198 	NULL
2199 };
2200 
2201 void bch2_btree_write_stats_to_text(struct printbuf *out, struct bch_fs *c)
2202 {
2203 	printbuf_tabstop_push(out, 20);
2204 	printbuf_tabstop_push(out, 10);
2205 
2206 	prt_tab(out);
2207 	prt_str(out, "nr");
2208 	prt_tab(out);
2209 	prt_str(out, "size");
2210 	prt_newline(out);
2211 
2212 	for (unsigned i = 0; i < BTREE_WRITE_TYPE_NR; i++) {
2213 		u64 nr		= atomic64_read(&c->btree_write_stats[i].nr);
2214 		u64 bytes	= atomic64_read(&c->btree_write_stats[i].bytes);
2215 
2216 		prt_printf(out, "%s:", bch2_btree_write_types[i]);
2217 		prt_tab(out);
2218 		prt_u64(out, nr);
2219 		prt_tab(out);
2220 		prt_human_readable_u64(out, nr ? div64_u64(bytes, nr) : 0);
2221 		prt_newline(out);
2222 	}
2223 }
2224