xref: /linux/fs/bcachefs/btree_io.c (revision d7f39aee79f04eeaa42085728423501b33ac5be5)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "bcachefs.h"
4 #include "bkey_methods.h"
5 #include "bkey_sort.h"
6 #include "btree_cache.h"
7 #include "btree_io.h"
8 #include "btree_iter.h"
9 #include "btree_locking.h"
10 #include "btree_update.h"
11 #include "btree_update_interior.h"
12 #include "buckets.h"
13 #include "checksum.h"
14 #include "debug.h"
15 #include "error.h"
16 #include "extents.h"
17 #include "io_write.h"
18 #include "journal_reclaim.h"
19 #include "journal_seq_blacklist.h"
20 #include "recovery.h"
21 #include "super-io.h"
22 #include "trace.h"
23 
24 #include <linux/sched/mm.h>
25 
26 static void bch2_btree_node_header_to_text(struct printbuf *out, struct btree_node *bn)
27 {
28 	prt_printf(out, "btree=%s l=%u seq %llux\n",
29 		   bch2_btree_id_str(BTREE_NODE_ID(bn)),
30 		   (unsigned) BTREE_NODE_LEVEL(bn), bn->keys.seq);
31 	prt_str(out, "min: ");
32 	bch2_bpos_to_text(out, bn->min_key);
33 	prt_newline(out);
34 	prt_str(out, "max: ");
35 	bch2_bpos_to_text(out, bn->max_key);
36 }
37 
38 void bch2_btree_node_io_unlock(struct btree *b)
39 {
40 	EBUG_ON(!btree_node_write_in_flight(b));
41 
42 	clear_btree_node_write_in_flight_inner(b);
43 	clear_btree_node_write_in_flight(b);
44 	wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
45 }
46 
47 void bch2_btree_node_io_lock(struct btree *b)
48 {
49 	bch2_assert_btree_nodes_not_locked();
50 
51 	wait_on_bit_lock_io(&b->flags, BTREE_NODE_write_in_flight,
52 			    TASK_UNINTERRUPTIBLE);
53 }
54 
55 void __bch2_btree_node_wait_on_read(struct btree *b)
56 {
57 	wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
58 		       TASK_UNINTERRUPTIBLE);
59 }
60 
61 void __bch2_btree_node_wait_on_write(struct btree *b)
62 {
63 	wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
64 		       TASK_UNINTERRUPTIBLE);
65 }
66 
67 void bch2_btree_node_wait_on_read(struct btree *b)
68 {
69 	bch2_assert_btree_nodes_not_locked();
70 
71 	wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
72 		       TASK_UNINTERRUPTIBLE);
73 }
74 
75 void bch2_btree_node_wait_on_write(struct btree *b)
76 {
77 	bch2_assert_btree_nodes_not_locked();
78 
79 	wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
80 		       TASK_UNINTERRUPTIBLE);
81 }
82 
83 static void verify_no_dups(struct btree *b,
84 			   struct bkey_packed *start,
85 			   struct bkey_packed *end)
86 {
87 #ifdef CONFIG_BCACHEFS_DEBUG
88 	struct bkey_packed *k, *p;
89 
90 	if (start == end)
91 		return;
92 
93 	for (p = start, k = bkey_p_next(start);
94 	     k != end;
95 	     p = k, k = bkey_p_next(k)) {
96 		struct bkey l = bkey_unpack_key(b, p);
97 		struct bkey r = bkey_unpack_key(b, k);
98 
99 		BUG_ON(bpos_ge(l.p, bkey_start_pos(&r)));
100 	}
101 #endif
102 }
103 
104 static void set_needs_whiteout(struct bset *i, int v)
105 {
106 	struct bkey_packed *k;
107 
108 	for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
109 		k->needs_whiteout = v;
110 }
111 
112 static void btree_bounce_free(struct bch_fs *c, size_t size,
113 			      bool used_mempool, void *p)
114 {
115 	if (used_mempool)
116 		mempool_free(p, &c->btree_bounce_pool);
117 	else
118 		kvfree(p);
119 }
120 
121 static void *btree_bounce_alloc(struct bch_fs *c, size_t size,
122 				bool *used_mempool)
123 {
124 	unsigned flags = memalloc_nofs_save();
125 	void *p;
126 
127 	BUG_ON(size > c->opts.btree_node_size);
128 
129 	*used_mempool = false;
130 	p = kvmalloc(size, __GFP_NOWARN|GFP_NOWAIT);
131 	if (!p) {
132 		*used_mempool = true;
133 		p = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
134 	}
135 	memalloc_nofs_restore(flags);
136 	return p;
137 }
138 
139 static void sort_bkey_ptrs(const struct btree *bt,
140 			   struct bkey_packed **ptrs, unsigned nr)
141 {
142 	unsigned n = nr, a = nr / 2, b, c, d;
143 
144 	if (!a)
145 		return;
146 
147 	/* Heap sort: see lib/sort.c: */
148 	while (1) {
149 		if (a)
150 			a--;
151 		else if (--n)
152 			swap(ptrs[0], ptrs[n]);
153 		else
154 			break;
155 
156 		for (b = a; c = 2 * b + 1, (d = c + 1) < n;)
157 			b = bch2_bkey_cmp_packed(bt,
158 					    ptrs[c],
159 					    ptrs[d]) >= 0 ? c : d;
160 		if (d == n)
161 			b = c;
162 
163 		while (b != a &&
164 		       bch2_bkey_cmp_packed(bt,
165 				       ptrs[a],
166 				       ptrs[b]) >= 0)
167 			b = (b - 1) / 2;
168 		c = b;
169 		while (b != a) {
170 			b = (b - 1) / 2;
171 			swap(ptrs[b], ptrs[c]);
172 		}
173 	}
174 }
175 
176 static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b)
177 {
178 	struct bkey_packed *new_whiteouts, **ptrs, **ptrs_end, *k;
179 	bool used_mempool = false;
180 	size_t bytes = b->whiteout_u64s * sizeof(u64);
181 
182 	if (!b->whiteout_u64s)
183 		return;
184 
185 	new_whiteouts = btree_bounce_alloc(c, bytes, &used_mempool);
186 
187 	ptrs = ptrs_end = ((void *) new_whiteouts + bytes);
188 
189 	for (k = unwritten_whiteouts_start(b);
190 	     k != unwritten_whiteouts_end(b);
191 	     k = bkey_p_next(k))
192 		*--ptrs = k;
193 
194 	sort_bkey_ptrs(b, ptrs, ptrs_end - ptrs);
195 
196 	k = new_whiteouts;
197 
198 	while (ptrs != ptrs_end) {
199 		bkey_p_copy(k, *ptrs);
200 		k = bkey_p_next(k);
201 		ptrs++;
202 	}
203 
204 	verify_no_dups(b, new_whiteouts,
205 		       (void *) ((u64 *) new_whiteouts + b->whiteout_u64s));
206 
207 	memcpy_u64s(unwritten_whiteouts_start(b),
208 		    new_whiteouts, b->whiteout_u64s);
209 
210 	btree_bounce_free(c, bytes, used_mempool, new_whiteouts);
211 }
212 
213 static bool should_compact_bset(struct btree *b, struct bset_tree *t,
214 				bool compacting, enum compact_mode mode)
215 {
216 	if (!bset_dead_u64s(b, t))
217 		return false;
218 
219 	switch (mode) {
220 	case COMPACT_LAZY:
221 		return should_compact_bset_lazy(b, t) ||
222 			(compacting && !bset_written(b, bset(b, t)));
223 	case COMPACT_ALL:
224 		return true;
225 	default:
226 		BUG();
227 	}
228 }
229 
230 static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode)
231 {
232 	bool ret = false;
233 
234 	for_each_bset(b, t) {
235 		struct bset *i = bset(b, t);
236 		struct bkey_packed *k, *n, *out, *start, *end;
237 		struct btree_node_entry *src = NULL, *dst = NULL;
238 
239 		if (t != b->set && !bset_written(b, i)) {
240 			src = container_of(i, struct btree_node_entry, keys);
241 			dst = max(write_block(b),
242 				  (void *) btree_bkey_last(b, t - 1));
243 		}
244 
245 		if (src != dst)
246 			ret = true;
247 
248 		if (!should_compact_bset(b, t, ret, mode)) {
249 			if (src != dst) {
250 				memmove(dst, src, sizeof(*src) +
251 					le16_to_cpu(src->keys.u64s) *
252 					sizeof(u64));
253 				i = &dst->keys;
254 				set_btree_bset(b, t, i);
255 			}
256 			continue;
257 		}
258 
259 		start	= btree_bkey_first(b, t);
260 		end	= btree_bkey_last(b, t);
261 
262 		if (src != dst) {
263 			memmove(dst, src, sizeof(*src));
264 			i = &dst->keys;
265 			set_btree_bset(b, t, i);
266 		}
267 
268 		out = i->start;
269 
270 		for (k = start; k != end; k = n) {
271 			n = bkey_p_next(k);
272 
273 			if (!bkey_deleted(k)) {
274 				bkey_p_copy(out, k);
275 				out = bkey_p_next(out);
276 			} else {
277 				BUG_ON(k->needs_whiteout);
278 			}
279 		}
280 
281 		i->u64s = cpu_to_le16((u64 *) out - i->_data);
282 		set_btree_bset_end(b, t);
283 		bch2_bset_set_no_aux_tree(b, t);
284 		ret = true;
285 	}
286 
287 	bch2_verify_btree_nr_keys(b);
288 
289 	bch2_btree_build_aux_trees(b);
290 
291 	return ret;
292 }
293 
294 bool bch2_compact_whiteouts(struct bch_fs *c, struct btree *b,
295 			    enum compact_mode mode)
296 {
297 	return bch2_drop_whiteouts(b, mode);
298 }
299 
300 static void btree_node_sort(struct bch_fs *c, struct btree *b,
301 			    unsigned start_idx,
302 			    unsigned end_idx)
303 {
304 	struct btree_node *out;
305 	struct sort_iter_stack sort_iter;
306 	struct bset_tree *t;
307 	struct bset *start_bset = bset(b, &b->set[start_idx]);
308 	bool used_mempool = false;
309 	u64 start_time, seq = 0;
310 	unsigned i, u64s = 0, bytes, shift = end_idx - start_idx - 1;
311 	bool sorting_entire_node = start_idx == 0 &&
312 		end_idx == b->nsets;
313 
314 	sort_iter_stack_init(&sort_iter, b);
315 
316 	for (t = b->set + start_idx;
317 	     t < b->set + end_idx;
318 	     t++) {
319 		u64s += le16_to_cpu(bset(b, t)->u64s);
320 		sort_iter_add(&sort_iter.iter,
321 			      btree_bkey_first(b, t),
322 			      btree_bkey_last(b, t));
323 	}
324 
325 	bytes = sorting_entire_node
326 		? btree_buf_bytes(b)
327 		: __vstruct_bytes(struct btree_node, u64s);
328 
329 	out = btree_bounce_alloc(c, bytes, &used_mempool);
330 
331 	start_time = local_clock();
332 
333 	u64s = bch2_sort_keys(out->keys.start, &sort_iter.iter);
334 
335 	out->keys.u64s = cpu_to_le16(u64s);
336 
337 	BUG_ON(vstruct_end(&out->keys) > (void *) out + bytes);
338 
339 	if (sorting_entire_node)
340 		bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
341 				       start_time);
342 
343 	/* Make sure we preserve bset journal_seq: */
344 	for (t = b->set + start_idx; t < b->set + end_idx; t++)
345 		seq = max(seq, le64_to_cpu(bset(b, t)->journal_seq));
346 	start_bset->journal_seq = cpu_to_le64(seq);
347 
348 	if (sorting_entire_node) {
349 		u64s = le16_to_cpu(out->keys.u64s);
350 
351 		BUG_ON(bytes != btree_buf_bytes(b));
352 
353 		/*
354 		 * Our temporary buffer is the same size as the btree node's
355 		 * buffer, we can just swap buffers instead of doing a big
356 		 * memcpy()
357 		 */
358 		*out = *b->data;
359 		out->keys.u64s = cpu_to_le16(u64s);
360 		swap(out, b->data);
361 		set_btree_bset(b, b->set, &b->data->keys);
362 	} else {
363 		start_bset->u64s = out->keys.u64s;
364 		memcpy_u64s(start_bset->start,
365 			    out->keys.start,
366 			    le16_to_cpu(out->keys.u64s));
367 	}
368 
369 	for (i = start_idx + 1; i < end_idx; i++)
370 		b->nr.bset_u64s[start_idx] +=
371 			b->nr.bset_u64s[i];
372 
373 	b->nsets -= shift;
374 
375 	for (i = start_idx + 1; i < b->nsets; i++) {
376 		b->nr.bset_u64s[i]	= b->nr.bset_u64s[i + shift];
377 		b->set[i]		= b->set[i + shift];
378 	}
379 
380 	for (i = b->nsets; i < MAX_BSETS; i++)
381 		b->nr.bset_u64s[i] = 0;
382 
383 	set_btree_bset_end(b, &b->set[start_idx]);
384 	bch2_bset_set_no_aux_tree(b, &b->set[start_idx]);
385 
386 	btree_bounce_free(c, bytes, used_mempool, out);
387 
388 	bch2_verify_btree_nr_keys(b);
389 }
390 
391 void bch2_btree_sort_into(struct bch_fs *c,
392 			 struct btree *dst,
393 			 struct btree *src)
394 {
395 	struct btree_nr_keys nr;
396 	struct btree_node_iter src_iter;
397 	u64 start_time = local_clock();
398 
399 	BUG_ON(dst->nsets != 1);
400 
401 	bch2_bset_set_no_aux_tree(dst, dst->set);
402 
403 	bch2_btree_node_iter_init_from_start(&src_iter, src);
404 
405 	nr = bch2_sort_repack(btree_bset_first(dst),
406 			src, &src_iter,
407 			&dst->format,
408 			true);
409 
410 	bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
411 			       start_time);
412 
413 	set_btree_bset_end(dst, dst->set);
414 
415 	dst->nr.live_u64s	+= nr.live_u64s;
416 	dst->nr.bset_u64s[0]	+= nr.bset_u64s[0];
417 	dst->nr.packed_keys	+= nr.packed_keys;
418 	dst->nr.unpacked_keys	+= nr.unpacked_keys;
419 
420 	bch2_verify_btree_nr_keys(dst);
421 }
422 
423 /*
424  * We're about to add another bset to the btree node, so if there's currently
425  * too many bsets - sort some of them together:
426  */
427 static bool btree_node_compact(struct bch_fs *c, struct btree *b)
428 {
429 	unsigned unwritten_idx;
430 	bool ret = false;
431 
432 	for (unwritten_idx = 0;
433 	     unwritten_idx < b->nsets;
434 	     unwritten_idx++)
435 		if (!bset_written(b, bset(b, &b->set[unwritten_idx])))
436 			break;
437 
438 	if (b->nsets - unwritten_idx > 1) {
439 		btree_node_sort(c, b, unwritten_idx, b->nsets);
440 		ret = true;
441 	}
442 
443 	if (unwritten_idx > 1) {
444 		btree_node_sort(c, b, 0, unwritten_idx);
445 		ret = true;
446 	}
447 
448 	return ret;
449 }
450 
451 void bch2_btree_build_aux_trees(struct btree *b)
452 {
453 	for_each_bset(b, t)
454 		bch2_bset_build_aux_tree(b, t,
455 				!bset_written(b, bset(b, t)) &&
456 				t == bset_tree_last(b));
457 }
458 
459 /*
460  * If we have MAX_BSETS (3) bsets, should we sort them all down to just one?
461  *
462  * The first bset is going to be of similar order to the size of the node, the
463  * last bset is bounded by btree_write_set_buffer(), which is set to keep the
464  * memmove on insert from being too expensive: the middle bset should, ideally,
465  * be the geometric mean of the first and the last.
466  *
467  * Returns true if the middle bset is greater than that geometric mean:
468  */
469 static inline bool should_compact_all(struct bch_fs *c, struct btree *b)
470 {
471 	unsigned mid_u64s_bits =
472 		(ilog2(btree_max_u64s(c)) + BTREE_WRITE_SET_U64s_BITS) / 2;
473 
474 	return bset_u64s(&b->set[1]) > 1U << mid_u64s_bits;
475 }
476 
477 /*
478  * @bch_btree_init_next - initialize a new (unwritten) bset that can then be
479  * inserted into
480  *
481  * Safe to call if there already is an unwritten bset - will only add a new bset
482  * if @b doesn't already have one.
483  *
484  * Returns true if we sorted (i.e. invalidated iterators
485  */
486 void bch2_btree_init_next(struct btree_trans *trans, struct btree *b)
487 {
488 	struct bch_fs *c = trans->c;
489 	struct btree_node_entry *bne;
490 	bool reinit_iter = false;
491 
492 	EBUG_ON(!six_lock_counts(&b->c.lock).n[SIX_LOCK_write]);
493 	BUG_ON(bset_written(b, bset(b, &b->set[1])));
494 	BUG_ON(btree_node_just_written(b));
495 
496 	if (b->nsets == MAX_BSETS &&
497 	    !btree_node_write_in_flight(b) &&
498 	    should_compact_all(c, b)) {
499 		bch2_btree_node_write(c, b, SIX_LOCK_write,
500 				      BTREE_WRITE_init_next_bset);
501 		reinit_iter = true;
502 	}
503 
504 	if (b->nsets == MAX_BSETS &&
505 	    btree_node_compact(c, b))
506 		reinit_iter = true;
507 
508 	BUG_ON(b->nsets >= MAX_BSETS);
509 
510 	bne = want_new_bset(c, b);
511 	if (bne)
512 		bch2_bset_init_next(b, bne);
513 
514 	bch2_btree_build_aux_trees(b);
515 
516 	if (reinit_iter)
517 		bch2_trans_node_reinit_iter(trans, b);
518 }
519 
520 static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
521 			  struct bch_dev *ca,
522 			  struct btree *b, struct bset *i, struct bkey_packed *k,
523 			  unsigned offset, int write)
524 {
525 	prt_printf(out, bch2_log_msg(c, "%s"),
526 		   write == READ
527 		   ? "error validating btree node "
528 		   : "corrupt btree node before write ");
529 	if (ca)
530 		prt_printf(out, "on %s ", ca->name);
531 	prt_printf(out, "at btree ");
532 	bch2_btree_pos_to_text(out, c, b);
533 
534 	printbuf_indent_add(out, 2);
535 
536 	prt_printf(out, "\nnode offset %u/%u",
537 		   b->written, btree_ptr_sectors_written(&b->key));
538 	if (i)
539 		prt_printf(out, " bset u64s %u", le16_to_cpu(i->u64s));
540 	if (k)
541 		prt_printf(out, " bset byte offset %lu",
542 			   (unsigned long)(void *)k -
543 			   ((unsigned long)(void *)i & ~511UL));
544 	prt_str(out, ": ");
545 }
546 
547 __printf(10, 11)
548 static int __btree_err(int ret,
549 		       struct bch_fs *c,
550 		       struct bch_dev *ca,
551 		       struct btree *b,
552 		       struct bset *i,
553 		       struct bkey_packed *k,
554 		       int write,
555 		       bool have_retry,
556 		       enum bch_sb_error_id err_type,
557 		       const char *fmt, ...)
558 {
559 	struct printbuf out = PRINTBUF;
560 	bool silent = c->curr_recovery_pass == BCH_RECOVERY_PASS_scan_for_btree_nodes;
561 	va_list args;
562 
563 	btree_err_msg(&out, c, ca, b, i, k, b->written, write);
564 
565 	va_start(args, fmt);
566 	prt_vprintf(&out, fmt, args);
567 	va_end(args);
568 
569 	if (write == WRITE) {
570 		bch2_print_string_as_lines(KERN_ERR, out.buf);
571 		ret = c->opts.errors == BCH_ON_ERROR_continue
572 			? 0
573 			: -BCH_ERR_fsck_errors_not_fixed;
574 		goto out;
575 	}
576 
577 	if (!have_retry && ret == -BCH_ERR_btree_node_read_err_want_retry)
578 		ret = -BCH_ERR_btree_node_read_err_fixable;
579 	if (!have_retry && ret == -BCH_ERR_btree_node_read_err_must_retry)
580 		ret = -BCH_ERR_btree_node_read_err_bad_node;
581 
582 	if (!silent && ret != -BCH_ERR_btree_node_read_err_fixable)
583 		bch2_sb_error_count(c, err_type);
584 
585 	switch (ret) {
586 	case -BCH_ERR_btree_node_read_err_fixable:
587 		ret = !silent
588 			? bch2_fsck_err(c, FSCK_CAN_FIX, err_type, "%s", out.buf)
589 			: -BCH_ERR_fsck_fix;
590 		if (ret != -BCH_ERR_fsck_fix &&
591 		    ret != -BCH_ERR_fsck_ignore)
592 			goto fsck_err;
593 		ret = -BCH_ERR_fsck_fix;
594 		break;
595 	case -BCH_ERR_btree_node_read_err_want_retry:
596 	case -BCH_ERR_btree_node_read_err_must_retry:
597 		if (!silent)
598 			bch2_print_string_as_lines(KERN_ERR, out.buf);
599 		break;
600 	case -BCH_ERR_btree_node_read_err_bad_node:
601 		if (!silent)
602 			bch2_print_string_as_lines(KERN_ERR, out.buf);
603 		ret = bch2_topology_error(c);
604 		break;
605 	case -BCH_ERR_btree_node_read_err_incompatible:
606 		if (!silent)
607 			bch2_print_string_as_lines(KERN_ERR, out.buf);
608 		ret = -BCH_ERR_fsck_errors_not_fixed;
609 		break;
610 	default:
611 		BUG();
612 	}
613 out:
614 fsck_err:
615 	printbuf_exit(&out);
616 	return ret;
617 }
618 
619 #define btree_err(type, c, ca, b, i, k, _err_type, msg, ...)		\
620 ({									\
621 	int _ret = __btree_err(type, c, ca, b, i, k, write, have_retry,	\
622 			       BCH_FSCK_ERR_##_err_type,		\
623 			       msg, ##__VA_ARGS__);			\
624 									\
625 	if (_ret != -BCH_ERR_fsck_fix) {				\
626 		ret = _ret;						\
627 		goto fsck_err;						\
628 	}								\
629 									\
630 	*saw_error = true;						\
631 })
632 
633 #define btree_err_on(cond, ...)	((cond) ? btree_err(__VA_ARGS__) : false)
634 
635 /*
636  * When btree topology repair changes the start or end of a node, that might
637  * mean we have to drop keys that are no longer inside the node:
638  */
639 __cold
640 void bch2_btree_node_drop_keys_outside_node(struct btree *b)
641 {
642 	for_each_bset(b, t) {
643 		struct bset *i = bset(b, t);
644 		struct bkey_packed *k;
645 
646 		for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
647 			if (bkey_cmp_left_packed(b, k, &b->data->min_key) >= 0)
648 				break;
649 
650 		if (k != i->start) {
651 			unsigned shift = (u64 *) k - (u64 *) i->start;
652 
653 			memmove_u64s_down(i->start, k,
654 					  (u64 *) vstruct_end(i) - (u64 *) k);
655 			i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - shift);
656 			set_btree_bset_end(b, t);
657 		}
658 
659 		for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
660 			if (bkey_cmp_left_packed(b, k, &b->data->max_key) > 0)
661 				break;
662 
663 		if (k != vstruct_last(i)) {
664 			i->u64s = cpu_to_le16((u64 *) k - (u64 *) i->start);
665 			set_btree_bset_end(b, t);
666 		}
667 	}
668 
669 	/*
670 	 * Always rebuild search trees: eytzinger search tree nodes directly
671 	 * depend on the values of min/max key:
672 	 */
673 	bch2_bset_set_no_aux_tree(b, b->set);
674 	bch2_btree_build_aux_trees(b);
675 	b->nr = bch2_btree_node_count_keys(b);
676 
677 	struct bkey_s_c k;
678 	struct bkey unpacked;
679 	struct btree_node_iter iter;
680 	for_each_btree_node_key_unpack(b, k, &iter, &unpacked) {
681 		BUG_ON(bpos_lt(k.k->p, b->data->min_key));
682 		BUG_ON(bpos_gt(k.k->p, b->data->max_key));
683 	}
684 }
685 
686 static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
687 			 struct btree *b, struct bset *i,
688 			 unsigned offset, unsigned sectors,
689 			 int write, bool have_retry, bool *saw_error)
690 {
691 	unsigned version = le16_to_cpu(i->version);
692 	struct printbuf buf1 = PRINTBUF;
693 	struct printbuf buf2 = PRINTBUF;
694 	int ret = 0;
695 
696 	btree_err_on(!bch2_version_compatible(version),
697 		     -BCH_ERR_btree_node_read_err_incompatible,
698 		     c, ca, b, i, NULL,
699 		     btree_node_unsupported_version,
700 		     "unsupported bset version %u.%u",
701 		     BCH_VERSION_MAJOR(version),
702 		     BCH_VERSION_MINOR(version));
703 
704 	if (btree_err_on(version < c->sb.version_min,
705 			 -BCH_ERR_btree_node_read_err_fixable,
706 			 c, NULL, b, i, NULL,
707 			 btree_node_bset_older_than_sb_min,
708 			 "bset version %u older than superblock version_min %u",
709 			 version, c->sb.version_min)) {
710 		mutex_lock(&c->sb_lock);
711 		c->disk_sb.sb->version_min = cpu_to_le16(version);
712 		bch2_write_super(c);
713 		mutex_unlock(&c->sb_lock);
714 	}
715 
716 	if (btree_err_on(BCH_VERSION_MAJOR(version) >
717 			 BCH_VERSION_MAJOR(c->sb.version),
718 			 -BCH_ERR_btree_node_read_err_fixable,
719 			 c, NULL, b, i, NULL,
720 			 btree_node_bset_newer_than_sb,
721 			 "bset version %u newer than superblock version %u",
722 			 version, c->sb.version)) {
723 		mutex_lock(&c->sb_lock);
724 		c->disk_sb.sb->version = cpu_to_le16(version);
725 		bch2_write_super(c);
726 		mutex_unlock(&c->sb_lock);
727 	}
728 
729 	btree_err_on(BSET_SEPARATE_WHITEOUTS(i),
730 		     -BCH_ERR_btree_node_read_err_incompatible,
731 		     c, ca, b, i, NULL,
732 		     btree_node_unsupported_version,
733 		     "BSET_SEPARATE_WHITEOUTS no longer supported");
734 
735 	if (btree_err_on(offset + sectors > btree_sectors(c),
736 			 -BCH_ERR_btree_node_read_err_fixable,
737 			 c, ca, b, i, NULL,
738 			 bset_past_end_of_btree_node,
739 			 "bset past end of btree node")) {
740 		i->u64s = 0;
741 		ret = 0;
742 		goto out;
743 	}
744 
745 	btree_err_on(offset && !i->u64s,
746 		     -BCH_ERR_btree_node_read_err_fixable,
747 		     c, ca, b, i, NULL,
748 		     bset_empty,
749 		     "empty bset");
750 
751 	btree_err_on(BSET_OFFSET(i) && BSET_OFFSET(i) != offset,
752 		     -BCH_ERR_btree_node_read_err_want_retry,
753 		     c, ca, b, i, NULL,
754 		     bset_wrong_sector_offset,
755 		     "bset at wrong sector offset");
756 
757 	if (!offset) {
758 		struct btree_node *bn =
759 			container_of(i, struct btree_node, keys);
760 		/* These indicate that we read the wrong btree node: */
761 
762 		if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
763 			struct bch_btree_ptr_v2 *bp =
764 				&bkey_i_to_btree_ptr_v2(&b->key)->v;
765 
766 			/* XXX endianness */
767 			btree_err_on(bp->seq != bn->keys.seq,
768 				     -BCH_ERR_btree_node_read_err_must_retry,
769 				     c, ca, b, NULL, NULL,
770 				     bset_bad_seq,
771 				     "incorrect sequence number (wrong btree node)");
772 		}
773 
774 		btree_err_on(BTREE_NODE_ID(bn) != b->c.btree_id,
775 			     -BCH_ERR_btree_node_read_err_must_retry,
776 			     c, ca, b, i, NULL,
777 			     btree_node_bad_btree,
778 			     "incorrect btree id");
779 
780 		btree_err_on(BTREE_NODE_LEVEL(bn) != b->c.level,
781 			     -BCH_ERR_btree_node_read_err_must_retry,
782 			     c, ca, b, i, NULL,
783 			     btree_node_bad_level,
784 			     "incorrect level");
785 
786 		if (!write)
787 			compat_btree_node(b->c.level, b->c.btree_id, version,
788 					  BSET_BIG_ENDIAN(i), write, bn);
789 
790 		if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
791 			struct bch_btree_ptr_v2 *bp =
792 				&bkey_i_to_btree_ptr_v2(&b->key)->v;
793 
794 			if (BTREE_PTR_RANGE_UPDATED(bp)) {
795 				b->data->min_key = bp->min_key;
796 				b->data->max_key = b->key.k.p;
797 			}
798 
799 			btree_err_on(!bpos_eq(b->data->min_key, bp->min_key),
800 				     -BCH_ERR_btree_node_read_err_must_retry,
801 				     c, ca, b, NULL, NULL,
802 				     btree_node_bad_min_key,
803 				     "incorrect min_key: got %s should be %s",
804 				     (printbuf_reset(&buf1),
805 				      bch2_bpos_to_text(&buf1, bn->min_key), buf1.buf),
806 				     (printbuf_reset(&buf2),
807 				      bch2_bpos_to_text(&buf2, bp->min_key), buf2.buf));
808 		}
809 
810 		btree_err_on(!bpos_eq(bn->max_key, b->key.k.p),
811 			     -BCH_ERR_btree_node_read_err_must_retry,
812 			     c, ca, b, i, NULL,
813 			     btree_node_bad_max_key,
814 			     "incorrect max key %s",
815 			     (printbuf_reset(&buf1),
816 			      bch2_bpos_to_text(&buf1, bn->max_key), buf1.buf));
817 
818 		if (write)
819 			compat_btree_node(b->c.level, b->c.btree_id, version,
820 					  BSET_BIG_ENDIAN(i), write, bn);
821 
822 		btree_err_on(bch2_bkey_format_invalid(c, &bn->format, write, &buf1),
823 			     -BCH_ERR_btree_node_read_err_bad_node,
824 			     c, ca, b, i, NULL,
825 			     btree_node_bad_format,
826 			     "invalid bkey format: %s\n  %s", buf1.buf,
827 			     (printbuf_reset(&buf2),
828 			      bch2_bkey_format_to_text(&buf2, &bn->format), buf2.buf));
829 		printbuf_reset(&buf1);
830 
831 		compat_bformat(b->c.level, b->c.btree_id, version,
832 			       BSET_BIG_ENDIAN(i), write,
833 			       &bn->format);
834 	}
835 out:
836 fsck_err:
837 	printbuf_exit(&buf2);
838 	printbuf_exit(&buf1);
839 	return ret;
840 }
841 
842 static int bset_key_invalid(struct bch_fs *c, struct btree *b,
843 			    struct bkey_s_c k,
844 			    bool updated_range, int rw,
845 			    struct printbuf *err)
846 {
847 	return __bch2_bkey_invalid(c, k, btree_node_type(b), READ, err) ?:
848 		(!updated_range ? bch2_bkey_in_btree_node(c, b, k, err) : 0) ?:
849 		(rw == WRITE ? bch2_bkey_val_invalid(c, k, READ, err) : 0);
850 }
851 
852 static bool bkey_packed_valid(struct bch_fs *c, struct btree *b,
853 			 struct bset *i, struct bkey_packed *k)
854 {
855 	if (bkey_p_next(k) > vstruct_last(i))
856 		return false;
857 
858 	if (k->format > KEY_FORMAT_CURRENT)
859 		return false;
860 
861 	if (!bkeyp_u64s_valid(&b->format, k))
862 		return false;
863 
864 	struct printbuf buf = PRINTBUF;
865 	struct bkey tmp;
866 	struct bkey_s u = __bkey_disassemble(b, k, &tmp);
867 	bool ret = __bch2_bkey_invalid(c, u.s_c, btree_node_type(b), READ, &buf);
868 	printbuf_exit(&buf);
869 	return ret;
870 }
871 
872 static int validate_bset_keys(struct bch_fs *c, struct btree *b,
873 			 struct bset *i, int write,
874 			 bool have_retry, bool *saw_error)
875 {
876 	unsigned version = le16_to_cpu(i->version);
877 	struct bkey_packed *k, *prev = NULL;
878 	struct printbuf buf = PRINTBUF;
879 	bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
880 		BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
881 	int ret = 0;
882 
883 	for (k = i->start;
884 	     k != vstruct_last(i);) {
885 		struct bkey_s u;
886 		struct bkey tmp;
887 		unsigned next_good_key;
888 
889 		if (btree_err_on(bkey_p_next(k) > vstruct_last(i),
890 				 -BCH_ERR_btree_node_read_err_fixable,
891 				 c, NULL, b, i, k,
892 				 btree_node_bkey_past_bset_end,
893 				 "key extends past end of bset")) {
894 			i->u64s = cpu_to_le16((u64 *) k - i->_data);
895 			break;
896 		}
897 
898 		if (btree_err_on(k->format > KEY_FORMAT_CURRENT,
899 				 -BCH_ERR_btree_node_read_err_fixable,
900 				 c, NULL, b, i, k,
901 				 btree_node_bkey_bad_format,
902 				 "invalid bkey format %u", k->format))
903 			goto drop_this_key;
904 
905 		if (btree_err_on(!bkeyp_u64s_valid(&b->format, k),
906 				 -BCH_ERR_btree_node_read_err_fixable,
907 				 c, NULL, b, i, k,
908 				 btree_node_bkey_bad_u64s,
909 				 "bad k->u64s %u (min %u max %zu)", k->u64s,
910 				 bkeyp_key_u64s(&b->format, k),
911 				 U8_MAX - BKEY_U64s + bkeyp_key_u64s(&b->format, k)))
912 			goto drop_this_key;
913 
914 		if (!write)
915 			bch2_bkey_compat(b->c.level, b->c.btree_id, version,
916 				    BSET_BIG_ENDIAN(i), write,
917 				    &b->format, k);
918 
919 		u = __bkey_disassemble(b, k, &tmp);
920 
921 		printbuf_reset(&buf);
922 		if (bset_key_invalid(c, b, u.s_c, updated_range, write, &buf)) {
923 			printbuf_reset(&buf);
924 			bset_key_invalid(c, b, u.s_c, updated_range, write, &buf);
925 			prt_printf(&buf, "\n  ");
926 			bch2_bkey_val_to_text(&buf, c, u.s_c);
927 
928 			btree_err(-BCH_ERR_btree_node_read_err_fixable,
929 				  c, NULL, b, i, k,
930 				  btree_node_bad_bkey,
931 				  "invalid bkey: %s", buf.buf);
932 			goto drop_this_key;
933 		}
934 
935 		if (write)
936 			bch2_bkey_compat(b->c.level, b->c.btree_id, version,
937 				    BSET_BIG_ENDIAN(i), write,
938 				    &b->format, k);
939 
940 		if (prev && bkey_iter_cmp(b, prev, k) > 0) {
941 			struct bkey up = bkey_unpack_key(b, prev);
942 
943 			printbuf_reset(&buf);
944 			prt_printf(&buf, "keys out of order: ");
945 			bch2_bkey_to_text(&buf, &up);
946 			prt_printf(&buf, " > ");
947 			bch2_bkey_to_text(&buf, u.k);
948 
949 			if (btree_err(-BCH_ERR_btree_node_read_err_fixable,
950 				      c, NULL, b, i, k,
951 				      btree_node_bkey_out_of_order,
952 				      "%s", buf.buf))
953 				goto drop_this_key;
954 		}
955 
956 		prev = k;
957 		k = bkey_p_next(k);
958 		continue;
959 drop_this_key:
960 		next_good_key = k->u64s;
961 
962 		if (!next_good_key ||
963 		    (BSET_BIG_ENDIAN(i) == CPU_BIG_ENDIAN &&
964 		     version >= bcachefs_metadata_version_snapshot)) {
965 			/*
966 			 * only do scanning if bch2_bkey_compat() has nothing to
967 			 * do
968 			 */
969 
970 			if (!bkey_packed_valid(c, b, i, (void *) ((u64 *) k + next_good_key))) {
971 				for (next_good_key = 1;
972 				     next_good_key < (u64 *) vstruct_last(i) - (u64 *) k;
973 				     next_good_key++)
974 					if (bkey_packed_valid(c, b, i, (void *) ((u64 *) k + next_good_key)))
975 						goto got_good_key;
976 			}
977 
978 			/*
979 			 * didn't find a good key, have to truncate the rest of
980 			 * the bset
981 			 */
982 			next_good_key = (u64 *) vstruct_last(i) - (u64 *) k;
983 		}
984 got_good_key:
985 		le16_add_cpu(&i->u64s, -next_good_key);
986 		memmove_u64s_down(k, bkey_p_next(k), (u64 *) vstruct_end(i) - (u64 *) k);
987 	}
988 fsck_err:
989 	printbuf_exit(&buf);
990 	return ret;
991 }
992 
993 int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
994 			      struct btree *b, bool have_retry, bool *saw_error)
995 {
996 	struct btree_node_entry *bne;
997 	struct sort_iter *iter;
998 	struct btree_node *sorted;
999 	struct bkey_packed *k;
1000 	struct bset *i;
1001 	bool used_mempool, blacklisted;
1002 	bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
1003 		BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
1004 	unsigned u64s;
1005 	unsigned ptr_written = btree_ptr_sectors_written(&b->key);
1006 	struct printbuf buf = PRINTBUF;
1007 	int ret = 0, retry_read = 0, write = READ;
1008 	u64 start_time = local_clock();
1009 
1010 	b->version_ondisk = U16_MAX;
1011 	/* We might get called multiple times on read retry: */
1012 	b->written = 0;
1013 
1014 	iter = mempool_alloc(&c->fill_iter, GFP_NOFS);
1015 	sort_iter_init(iter, b, (btree_blocks(c) + 1) * 2);
1016 
1017 	if (bch2_meta_read_fault("btree"))
1018 		btree_err(-BCH_ERR_btree_node_read_err_must_retry,
1019 			  c, ca, b, NULL, NULL,
1020 			  btree_node_fault_injected,
1021 			  "dynamic fault");
1022 
1023 	btree_err_on(le64_to_cpu(b->data->magic) != bset_magic(c),
1024 		     -BCH_ERR_btree_node_read_err_must_retry,
1025 		     c, ca, b, NULL, NULL,
1026 		     btree_node_bad_magic,
1027 		     "bad magic: want %llx, got %llx",
1028 		     bset_magic(c), le64_to_cpu(b->data->magic));
1029 
1030 	if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
1031 		struct bch_btree_ptr_v2 *bp =
1032 			&bkey_i_to_btree_ptr_v2(&b->key)->v;
1033 
1034 		bch2_bpos_to_text(&buf, b->data->min_key);
1035 		prt_str(&buf, "-");
1036 		bch2_bpos_to_text(&buf, b->data->max_key);
1037 
1038 		btree_err_on(b->data->keys.seq != bp->seq,
1039 			     -BCH_ERR_btree_node_read_err_must_retry,
1040 			     c, ca, b, NULL, NULL,
1041 			     btree_node_bad_seq,
1042 			     "got wrong btree node: got\n%s",
1043 			     (printbuf_reset(&buf),
1044 			      bch2_btree_node_header_to_text(&buf, b->data),
1045 			      buf.buf));
1046 	} else {
1047 		btree_err_on(!b->data->keys.seq,
1048 			     -BCH_ERR_btree_node_read_err_must_retry,
1049 			     c, ca, b, NULL, NULL,
1050 			     btree_node_bad_seq,
1051 			     "bad btree header: seq 0\n%s",
1052 			     (printbuf_reset(&buf),
1053 			      bch2_btree_node_header_to_text(&buf, b->data),
1054 			      buf.buf));
1055 	}
1056 
1057 	while (b->written < (ptr_written ?: btree_sectors(c))) {
1058 		unsigned sectors;
1059 		struct nonce nonce;
1060 		bool first = !b->written;
1061 		bool csum_bad;
1062 
1063 		if (!b->written) {
1064 			i = &b->data->keys;
1065 
1066 			btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
1067 				     -BCH_ERR_btree_node_read_err_want_retry,
1068 				     c, ca, b, i, NULL,
1069 				     bset_unknown_csum,
1070 				     "unknown checksum type %llu", BSET_CSUM_TYPE(i));
1071 
1072 			nonce = btree_nonce(i, b->written << 9);
1073 
1074 			struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data);
1075 			csum_bad = bch2_crc_cmp(b->data->csum, csum);
1076 			if (csum_bad)
1077 				bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
1078 
1079 			btree_err_on(csum_bad,
1080 				     -BCH_ERR_btree_node_read_err_want_retry,
1081 				     c, ca, b, i, NULL,
1082 				     bset_bad_csum,
1083 				     "%s",
1084 				     (printbuf_reset(&buf),
1085 				      bch2_csum_err_msg(&buf, BSET_CSUM_TYPE(i), b->data->csum, csum),
1086 				      buf.buf));
1087 
1088 			ret = bset_encrypt(c, i, b->written << 9);
1089 			if (bch2_fs_fatal_err_on(ret, c,
1090 					"decrypting btree node: %s", bch2_err_str(ret)))
1091 				goto fsck_err;
1092 
1093 			btree_err_on(btree_node_type_is_extents(btree_node_type(b)) &&
1094 				     !BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data),
1095 				     -BCH_ERR_btree_node_read_err_incompatible,
1096 				     c, NULL, b, NULL, NULL,
1097 				     btree_node_unsupported_version,
1098 				     "btree node does not have NEW_EXTENT_OVERWRITE set");
1099 
1100 			sectors = vstruct_sectors(b->data, c->block_bits);
1101 		} else {
1102 			bne = write_block(b);
1103 			i = &bne->keys;
1104 
1105 			if (i->seq != b->data->keys.seq)
1106 				break;
1107 
1108 			btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
1109 				     -BCH_ERR_btree_node_read_err_want_retry,
1110 				     c, ca, b, i, NULL,
1111 				     bset_unknown_csum,
1112 				     "unknown checksum type %llu", BSET_CSUM_TYPE(i));
1113 
1114 			nonce = btree_nonce(i, b->written << 9);
1115 			struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
1116 			csum_bad = bch2_crc_cmp(bne->csum, csum);
1117 			if (ca && csum_bad)
1118 				bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
1119 
1120 			btree_err_on(csum_bad,
1121 				     -BCH_ERR_btree_node_read_err_want_retry,
1122 				     c, ca, b, i, NULL,
1123 				     bset_bad_csum,
1124 				     "%s",
1125 				     (printbuf_reset(&buf),
1126 				      bch2_csum_err_msg(&buf, BSET_CSUM_TYPE(i), bne->csum, csum),
1127 				      buf.buf));
1128 
1129 			ret = bset_encrypt(c, i, b->written << 9);
1130 			if (bch2_fs_fatal_err_on(ret, c,
1131 					"decrypting btree node: %s", bch2_err_str(ret)))
1132 				goto fsck_err;
1133 
1134 			sectors = vstruct_sectors(bne, c->block_bits);
1135 		}
1136 
1137 		b->version_ondisk = min(b->version_ondisk,
1138 					le16_to_cpu(i->version));
1139 
1140 		ret = validate_bset(c, ca, b, i, b->written, sectors,
1141 				    READ, have_retry, saw_error);
1142 		if (ret)
1143 			goto fsck_err;
1144 
1145 		if (!b->written)
1146 			btree_node_set_format(b, b->data->format);
1147 
1148 		ret = validate_bset_keys(c, b, i, READ, have_retry, saw_error);
1149 		if (ret)
1150 			goto fsck_err;
1151 
1152 		SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
1153 
1154 		blacklisted = bch2_journal_seq_is_blacklisted(c,
1155 					le64_to_cpu(i->journal_seq),
1156 					true);
1157 
1158 		btree_err_on(blacklisted && first,
1159 			     -BCH_ERR_btree_node_read_err_fixable,
1160 			     c, ca, b, i, NULL,
1161 			     bset_blacklisted_journal_seq,
1162 			     "first btree node bset has blacklisted journal seq (%llu)",
1163 			     le64_to_cpu(i->journal_seq));
1164 
1165 		btree_err_on(blacklisted && ptr_written,
1166 			     -BCH_ERR_btree_node_read_err_fixable,
1167 			     c, ca, b, i, NULL,
1168 			     first_bset_blacklisted_journal_seq,
1169 			     "found blacklisted bset (journal seq %llu) in btree node at offset %u-%u/%u",
1170 			     le64_to_cpu(i->journal_seq),
1171 			     b->written, b->written + sectors, ptr_written);
1172 
1173 		b->written += sectors;
1174 
1175 		if (blacklisted && !first)
1176 			continue;
1177 
1178 		sort_iter_add(iter,
1179 			      vstruct_idx(i, 0),
1180 			      vstruct_last(i));
1181 	}
1182 
1183 	if (ptr_written) {
1184 		btree_err_on(b->written < ptr_written,
1185 			     -BCH_ERR_btree_node_read_err_want_retry,
1186 			     c, ca, b, NULL, NULL,
1187 			     btree_node_data_missing,
1188 			     "btree node data missing: expected %u sectors, found %u",
1189 			     ptr_written, b->written);
1190 	} else {
1191 		for (bne = write_block(b);
1192 		     bset_byte_offset(b, bne) < btree_buf_bytes(b);
1193 		     bne = (void *) bne + block_bytes(c))
1194 			btree_err_on(bne->keys.seq == b->data->keys.seq &&
1195 				     !bch2_journal_seq_is_blacklisted(c,
1196 								      le64_to_cpu(bne->keys.journal_seq),
1197 								      true),
1198 				     -BCH_ERR_btree_node_read_err_want_retry,
1199 				     c, ca, b, NULL, NULL,
1200 				     btree_node_bset_after_end,
1201 				     "found bset signature after last bset");
1202 	}
1203 
1204 	sorted = btree_bounce_alloc(c, btree_buf_bytes(b), &used_mempool);
1205 	sorted->keys.u64s = 0;
1206 
1207 	set_btree_bset(b, b->set, &b->data->keys);
1208 
1209 	b->nr = bch2_key_sort_fix_overlapping(c, &sorted->keys, iter);
1210 
1211 	u64s = le16_to_cpu(sorted->keys.u64s);
1212 	*sorted = *b->data;
1213 	sorted->keys.u64s = cpu_to_le16(u64s);
1214 	swap(sorted, b->data);
1215 	set_btree_bset(b, b->set, &b->data->keys);
1216 	b->nsets = 1;
1217 
1218 	BUG_ON(b->nr.live_u64s != u64s);
1219 
1220 	btree_bounce_free(c, btree_buf_bytes(b), used_mempool, sorted);
1221 
1222 	if (updated_range)
1223 		bch2_btree_node_drop_keys_outside_node(b);
1224 
1225 	i = &b->data->keys;
1226 	for (k = i->start; k != vstruct_last(i);) {
1227 		struct bkey tmp;
1228 		struct bkey_s u = __bkey_disassemble(b, k, &tmp);
1229 
1230 		printbuf_reset(&buf);
1231 
1232 		if (bch2_bkey_val_invalid(c, u.s_c, READ, &buf) ||
1233 		    (bch2_inject_invalid_keys &&
1234 		     !bversion_cmp(u.k->version, MAX_VERSION))) {
1235 			printbuf_reset(&buf);
1236 
1237 			prt_printf(&buf, "invalid bkey: ");
1238 			bch2_bkey_val_invalid(c, u.s_c, READ, &buf);
1239 			prt_printf(&buf, "\n  ");
1240 			bch2_bkey_val_to_text(&buf, c, u.s_c);
1241 
1242 			btree_err(-BCH_ERR_btree_node_read_err_fixable,
1243 				  c, NULL, b, i, k,
1244 				  btree_node_bad_bkey,
1245 				  "%s", buf.buf);
1246 
1247 			btree_keys_account_key_drop(&b->nr, 0, k);
1248 
1249 			i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
1250 			memmove_u64s_down(k, bkey_p_next(k),
1251 					  (u64 *) vstruct_end(i) - (u64 *) k);
1252 			set_btree_bset_end(b, b->set);
1253 			continue;
1254 		}
1255 
1256 		if (u.k->type == KEY_TYPE_btree_ptr_v2) {
1257 			struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(u);
1258 
1259 			bp.v->mem_ptr = 0;
1260 		}
1261 
1262 		k = bkey_p_next(k);
1263 	}
1264 
1265 	bch2_bset_build_aux_tree(b, b->set, false);
1266 
1267 	set_needs_whiteout(btree_bset_first(b), true);
1268 
1269 	btree_node_reset_sib_u64s(b);
1270 
1271 	rcu_read_lock();
1272 	bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&b->key)), ptr) {
1273 		struct bch_dev *ca2 = bch2_dev_rcu(c, ptr->dev);
1274 
1275 		if (!ca2 || ca2->mi.state != BCH_MEMBER_STATE_rw)
1276 			set_btree_node_need_rewrite(b);
1277 	}
1278 	rcu_read_unlock();
1279 
1280 	if (!ptr_written)
1281 		set_btree_node_need_rewrite(b);
1282 out:
1283 	mempool_free(iter, &c->fill_iter);
1284 	printbuf_exit(&buf);
1285 	bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read_done], start_time);
1286 	return retry_read;
1287 fsck_err:
1288 	if (ret == -BCH_ERR_btree_node_read_err_want_retry ||
1289 	    ret == -BCH_ERR_btree_node_read_err_must_retry) {
1290 		retry_read = 1;
1291 	} else {
1292 		set_btree_node_read_error(b);
1293 		bch2_btree_lost_data(c, b->c.btree_id);
1294 	}
1295 	goto out;
1296 }
1297 
1298 static void btree_node_read_work(struct work_struct *work)
1299 {
1300 	struct btree_read_bio *rb =
1301 		container_of(work, struct btree_read_bio, work);
1302 	struct bch_fs *c	= rb->c;
1303 	struct bch_dev *ca	= rb->have_ioref ? bch2_dev_have_ref(c, rb->pick.ptr.dev) : NULL;
1304 	struct btree *b		= rb->b;
1305 	struct bio *bio		= &rb->bio;
1306 	struct bch_io_failures failed = { .nr = 0 };
1307 	struct printbuf buf = PRINTBUF;
1308 	bool saw_error = false;
1309 	bool retry = false;
1310 	bool can_retry;
1311 
1312 	goto start;
1313 	while (1) {
1314 		retry = true;
1315 		bch_info(c, "retrying read");
1316 		ca = bch2_dev_get_ioref(c, rb->pick.ptr.dev, READ);
1317 		rb->have_ioref		= ca != NULL;
1318 		bio_reset(bio, NULL, REQ_OP_READ|REQ_SYNC|REQ_META);
1319 		bio->bi_iter.bi_sector	= rb->pick.ptr.offset;
1320 		bio->bi_iter.bi_size	= btree_buf_bytes(b);
1321 
1322 		if (rb->have_ioref) {
1323 			bio_set_dev(bio, ca->disk_sb.bdev);
1324 			submit_bio_wait(bio);
1325 		} else {
1326 			bio->bi_status = BLK_STS_REMOVED;
1327 		}
1328 start:
1329 		printbuf_reset(&buf);
1330 		bch2_btree_pos_to_text(&buf, c, b);
1331 		bch2_dev_io_err_on(ca && bio->bi_status, ca, BCH_MEMBER_ERROR_read,
1332 				   "btree read error %s for %s",
1333 				   bch2_blk_status_to_str(bio->bi_status), buf.buf);
1334 		if (rb->have_ioref)
1335 			percpu_ref_put(&ca->io_ref);
1336 		rb->have_ioref = false;
1337 
1338 		bch2_mark_io_failure(&failed, &rb->pick);
1339 
1340 		can_retry = bch2_bkey_pick_read_device(c,
1341 				bkey_i_to_s_c(&b->key),
1342 				&failed, &rb->pick) > 0;
1343 
1344 		if (!bio->bi_status &&
1345 		    !bch2_btree_node_read_done(c, ca, b, can_retry, &saw_error)) {
1346 			if (retry)
1347 				bch_info(c, "retry success");
1348 			break;
1349 		}
1350 
1351 		saw_error = true;
1352 
1353 		if (!can_retry) {
1354 			set_btree_node_read_error(b);
1355 			bch2_btree_lost_data(c, b->c.btree_id);
1356 			break;
1357 		}
1358 	}
1359 
1360 	bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read],
1361 			       rb->start_time);
1362 	bio_put(&rb->bio);
1363 
1364 	if (saw_error &&
1365 	    !btree_node_read_error(b) &&
1366 	    c->curr_recovery_pass != BCH_RECOVERY_PASS_scan_for_btree_nodes) {
1367 		printbuf_reset(&buf);
1368 		bch2_bpos_to_text(&buf, b->key.k.p);
1369 		bch_err_ratelimited(c, "%s: rewriting btree node at btree=%s level=%u %s due to error",
1370 			 __func__, bch2_btree_id_str(b->c.btree_id), b->c.level, buf.buf);
1371 
1372 		bch2_btree_node_rewrite_async(c, b);
1373 	}
1374 
1375 	printbuf_exit(&buf);
1376 	clear_btree_node_read_in_flight(b);
1377 	wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1378 }
1379 
1380 static void btree_node_read_endio(struct bio *bio)
1381 {
1382 	struct btree_read_bio *rb =
1383 		container_of(bio, struct btree_read_bio, bio);
1384 	struct bch_fs *c	= rb->c;
1385 
1386 	if (rb->have_ioref) {
1387 		struct bch_dev *ca = bch2_dev_have_ref(c, rb->pick.ptr.dev);
1388 
1389 		bch2_latency_acct(ca, rb->start_time, READ);
1390 	}
1391 
1392 	queue_work(c->btree_read_complete_wq, &rb->work);
1393 }
1394 
1395 struct btree_node_read_all {
1396 	struct closure		cl;
1397 	struct bch_fs		*c;
1398 	struct btree		*b;
1399 	unsigned		nr;
1400 	void			*buf[BCH_REPLICAS_MAX];
1401 	struct bio		*bio[BCH_REPLICAS_MAX];
1402 	blk_status_t		err[BCH_REPLICAS_MAX];
1403 };
1404 
1405 static unsigned btree_node_sectors_written(struct bch_fs *c, void *data)
1406 {
1407 	struct btree_node *bn = data;
1408 	struct btree_node_entry *bne;
1409 	unsigned offset = 0;
1410 
1411 	if (le64_to_cpu(bn->magic) !=  bset_magic(c))
1412 		return 0;
1413 
1414 	while (offset < btree_sectors(c)) {
1415 		if (!offset) {
1416 			offset += vstruct_sectors(bn, c->block_bits);
1417 		} else {
1418 			bne = data + (offset << 9);
1419 			if (bne->keys.seq != bn->keys.seq)
1420 				break;
1421 			offset += vstruct_sectors(bne, c->block_bits);
1422 		}
1423 	}
1424 
1425 	return offset;
1426 }
1427 
1428 static bool btree_node_has_extra_bsets(struct bch_fs *c, unsigned offset, void *data)
1429 {
1430 	struct btree_node *bn = data;
1431 	struct btree_node_entry *bne;
1432 
1433 	if (!offset)
1434 		return false;
1435 
1436 	while (offset < btree_sectors(c)) {
1437 		bne = data + (offset << 9);
1438 		if (bne->keys.seq == bn->keys.seq)
1439 			return true;
1440 		offset++;
1441 	}
1442 
1443 	return false;
1444 	return offset;
1445 }
1446 
1447 static CLOSURE_CALLBACK(btree_node_read_all_replicas_done)
1448 {
1449 	closure_type(ra, struct btree_node_read_all, cl);
1450 	struct bch_fs *c = ra->c;
1451 	struct btree *b = ra->b;
1452 	struct printbuf buf = PRINTBUF;
1453 	bool dump_bset_maps = false;
1454 	bool have_retry = false;
1455 	int ret = 0, best = -1, write = READ;
1456 	unsigned i, written = 0, written2 = 0;
1457 	__le64 seq = b->key.k.type == KEY_TYPE_btree_ptr_v2
1458 		? bkey_i_to_btree_ptr_v2(&b->key)->v.seq : 0;
1459 	bool _saw_error = false, *saw_error = &_saw_error;
1460 
1461 	for (i = 0; i < ra->nr; i++) {
1462 		struct btree_node *bn = ra->buf[i];
1463 
1464 		if (ra->err[i])
1465 			continue;
1466 
1467 		if (le64_to_cpu(bn->magic) != bset_magic(c) ||
1468 		    (seq && seq != bn->keys.seq))
1469 			continue;
1470 
1471 		if (best < 0) {
1472 			best = i;
1473 			written = btree_node_sectors_written(c, bn);
1474 			continue;
1475 		}
1476 
1477 		written2 = btree_node_sectors_written(c, ra->buf[i]);
1478 		if (btree_err_on(written2 != written, -BCH_ERR_btree_node_read_err_fixable,
1479 				 c, NULL, b, NULL, NULL,
1480 				 btree_node_replicas_sectors_written_mismatch,
1481 				 "btree node sectors written mismatch: %u != %u",
1482 				 written, written2) ||
1483 		    btree_err_on(btree_node_has_extra_bsets(c, written2, ra->buf[i]),
1484 				 -BCH_ERR_btree_node_read_err_fixable,
1485 				 c, NULL, b, NULL, NULL,
1486 				 btree_node_bset_after_end,
1487 				 "found bset signature after last bset") ||
1488 		    btree_err_on(memcmp(ra->buf[best], ra->buf[i], written << 9),
1489 				 -BCH_ERR_btree_node_read_err_fixable,
1490 				 c, NULL, b, NULL, NULL,
1491 				 btree_node_replicas_data_mismatch,
1492 				 "btree node replicas content mismatch"))
1493 			dump_bset_maps = true;
1494 
1495 		if (written2 > written) {
1496 			written = written2;
1497 			best = i;
1498 		}
1499 	}
1500 fsck_err:
1501 	if (dump_bset_maps) {
1502 		for (i = 0; i < ra->nr; i++) {
1503 			struct btree_node *bn = ra->buf[i];
1504 			struct btree_node_entry *bne = NULL;
1505 			unsigned offset = 0, sectors;
1506 			bool gap = false;
1507 
1508 			if (ra->err[i])
1509 				continue;
1510 
1511 			printbuf_reset(&buf);
1512 
1513 			while (offset < btree_sectors(c)) {
1514 				if (!offset) {
1515 					sectors = vstruct_sectors(bn, c->block_bits);
1516 				} else {
1517 					bne = ra->buf[i] + (offset << 9);
1518 					if (bne->keys.seq != bn->keys.seq)
1519 						break;
1520 					sectors = vstruct_sectors(bne, c->block_bits);
1521 				}
1522 
1523 				prt_printf(&buf, " %u-%u", offset, offset + sectors);
1524 				if (bne && bch2_journal_seq_is_blacklisted(c,
1525 							le64_to_cpu(bne->keys.journal_seq), false))
1526 					prt_printf(&buf, "*");
1527 				offset += sectors;
1528 			}
1529 
1530 			while (offset < btree_sectors(c)) {
1531 				bne = ra->buf[i] + (offset << 9);
1532 				if (bne->keys.seq == bn->keys.seq) {
1533 					if (!gap)
1534 						prt_printf(&buf, " GAP");
1535 					gap = true;
1536 
1537 					sectors = vstruct_sectors(bne, c->block_bits);
1538 					prt_printf(&buf, " %u-%u", offset, offset + sectors);
1539 					if (bch2_journal_seq_is_blacklisted(c,
1540 							le64_to_cpu(bne->keys.journal_seq), false))
1541 						prt_printf(&buf, "*");
1542 				}
1543 				offset++;
1544 			}
1545 
1546 			bch_err(c, "replica %u:%s", i, buf.buf);
1547 		}
1548 	}
1549 
1550 	if (best >= 0) {
1551 		memcpy(b->data, ra->buf[best], btree_buf_bytes(b));
1552 		ret = bch2_btree_node_read_done(c, NULL, b, false, saw_error);
1553 	} else {
1554 		ret = -1;
1555 	}
1556 
1557 	if (ret) {
1558 		set_btree_node_read_error(b);
1559 		bch2_btree_lost_data(c, b->c.btree_id);
1560 	} else if (*saw_error)
1561 		bch2_btree_node_rewrite_async(c, b);
1562 
1563 	for (i = 0; i < ra->nr; i++) {
1564 		mempool_free(ra->buf[i], &c->btree_bounce_pool);
1565 		bio_put(ra->bio[i]);
1566 	}
1567 
1568 	closure_debug_destroy(&ra->cl);
1569 	kfree(ra);
1570 	printbuf_exit(&buf);
1571 
1572 	clear_btree_node_read_in_flight(b);
1573 	wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1574 }
1575 
1576 static void btree_node_read_all_replicas_endio(struct bio *bio)
1577 {
1578 	struct btree_read_bio *rb =
1579 		container_of(bio, struct btree_read_bio, bio);
1580 	struct bch_fs *c	= rb->c;
1581 	struct btree_node_read_all *ra = rb->ra;
1582 
1583 	if (rb->have_ioref) {
1584 		struct bch_dev *ca = bch2_dev_have_ref(c, rb->pick.ptr.dev);
1585 
1586 		bch2_latency_acct(ca, rb->start_time, READ);
1587 	}
1588 
1589 	ra->err[rb->idx] = bio->bi_status;
1590 	closure_put(&ra->cl);
1591 }
1592 
1593 /*
1594  * XXX This allocates multiple times from the same mempools, and can deadlock
1595  * under sufficient memory pressure (but is only a debug path)
1596  */
1597 static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool sync)
1598 {
1599 	struct bkey_s_c k = bkey_i_to_s_c(&b->key);
1600 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1601 	const union bch_extent_entry *entry;
1602 	struct extent_ptr_decoded pick;
1603 	struct btree_node_read_all *ra;
1604 	unsigned i;
1605 
1606 	ra = kzalloc(sizeof(*ra), GFP_NOFS);
1607 	if (!ra)
1608 		return -BCH_ERR_ENOMEM_btree_node_read_all_replicas;
1609 
1610 	closure_init(&ra->cl, NULL);
1611 	ra->c	= c;
1612 	ra->b	= b;
1613 	ra->nr	= bch2_bkey_nr_ptrs(k);
1614 
1615 	for (i = 0; i < ra->nr; i++) {
1616 		ra->buf[i] = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
1617 		ra->bio[i] = bio_alloc_bioset(NULL,
1618 					      buf_pages(ra->buf[i], btree_buf_bytes(b)),
1619 					      REQ_OP_READ|REQ_SYNC|REQ_META,
1620 					      GFP_NOFS,
1621 					      &c->btree_bio);
1622 	}
1623 
1624 	i = 0;
1625 	bkey_for_each_ptr_decode(k.k, ptrs, pick, entry) {
1626 		struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ);
1627 		struct btree_read_bio *rb =
1628 			container_of(ra->bio[i], struct btree_read_bio, bio);
1629 		rb->c			= c;
1630 		rb->b			= b;
1631 		rb->ra			= ra;
1632 		rb->start_time		= local_clock();
1633 		rb->have_ioref		= ca != NULL;
1634 		rb->idx			= i;
1635 		rb->pick		= pick;
1636 		rb->bio.bi_iter.bi_sector = pick.ptr.offset;
1637 		rb->bio.bi_end_io	= btree_node_read_all_replicas_endio;
1638 		bch2_bio_map(&rb->bio, ra->buf[i], btree_buf_bytes(b));
1639 
1640 		if (rb->have_ioref) {
1641 			this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
1642 				     bio_sectors(&rb->bio));
1643 			bio_set_dev(&rb->bio, ca->disk_sb.bdev);
1644 
1645 			closure_get(&ra->cl);
1646 			submit_bio(&rb->bio);
1647 		} else {
1648 			ra->err[i] = BLK_STS_REMOVED;
1649 		}
1650 
1651 		i++;
1652 	}
1653 
1654 	if (sync) {
1655 		closure_sync(&ra->cl);
1656 		btree_node_read_all_replicas_done(&ra->cl.work);
1657 	} else {
1658 		continue_at(&ra->cl, btree_node_read_all_replicas_done,
1659 			    c->btree_read_complete_wq);
1660 	}
1661 
1662 	return 0;
1663 }
1664 
1665 void bch2_btree_node_read(struct btree_trans *trans, struct btree *b,
1666 			  bool sync)
1667 {
1668 	struct bch_fs *c = trans->c;
1669 	struct extent_ptr_decoded pick;
1670 	struct btree_read_bio *rb;
1671 	struct bch_dev *ca;
1672 	struct bio *bio;
1673 	int ret;
1674 
1675 	trace_and_count(c, btree_node_read, trans, b);
1676 
1677 	if (bch2_verify_all_btree_replicas &&
1678 	    !btree_node_read_all_replicas(c, b, sync))
1679 		return;
1680 
1681 	ret = bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key),
1682 					 NULL, &pick);
1683 
1684 	if (ret <= 0) {
1685 		struct printbuf buf = PRINTBUF;
1686 
1687 		prt_str(&buf, "btree node read error: no device to read from\n at ");
1688 		bch2_btree_pos_to_text(&buf, c, b);
1689 		bch_err_ratelimited(c, "%s", buf.buf);
1690 
1691 		if (c->recovery_passes_explicit & BIT_ULL(BCH_RECOVERY_PASS_check_topology) &&
1692 		    c->curr_recovery_pass > BCH_RECOVERY_PASS_check_topology)
1693 			bch2_fatal_error(c);
1694 
1695 		set_btree_node_read_error(b);
1696 		bch2_btree_lost_data(c, b->c.btree_id);
1697 		clear_btree_node_read_in_flight(b);
1698 		wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1699 		printbuf_exit(&buf);
1700 		return;
1701 	}
1702 
1703 	ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ);
1704 
1705 	bio = bio_alloc_bioset(NULL,
1706 			       buf_pages(b->data, btree_buf_bytes(b)),
1707 			       REQ_OP_READ|REQ_SYNC|REQ_META,
1708 			       GFP_NOFS,
1709 			       &c->btree_bio);
1710 	rb = container_of(bio, struct btree_read_bio, bio);
1711 	rb->c			= c;
1712 	rb->b			= b;
1713 	rb->ra			= NULL;
1714 	rb->start_time		= local_clock();
1715 	rb->have_ioref		= ca != NULL;
1716 	rb->pick		= pick;
1717 	INIT_WORK(&rb->work, btree_node_read_work);
1718 	bio->bi_iter.bi_sector	= pick.ptr.offset;
1719 	bio->bi_end_io		= btree_node_read_endio;
1720 	bch2_bio_map(bio, b->data, btree_buf_bytes(b));
1721 
1722 	if (rb->have_ioref) {
1723 		this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
1724 			     bio_sectors(bio));
1725 		bio_set_dev(bio, ca->disk_sb.bdev);
1726 
1727 		if (sync) {
1728 			submit_bio_wait(bio);
1729 			bch2_latency_acct(ca, rb->start_time, READ);
1730 			btree_node_read_work(&rb->work);
1731 		} else {
1732 			submit_bio(bio);
1733 		}
1734 	} else {
1735 		bio->bi_status = BLK_STS_REMOVED;
1736 
1737 		if (sync)
1738 			btree_node_read_work(&rb->work);
1739 		else
1740 			queue_work(c->btree_read_complete_wq, &rb->work);
1741 	}
1742 }
1743 
1744 static int __bch2_btree_root_read(struct btree_trans *trans, enum btree_id id,
1745 				  const struct bkey_i *k, unsigned level)
1746 {
1747 	struct bch_fs *c = trans->c;
1748 	struct closure cl;
1749 	struct btree *b;
1750 	int ret;
1751 
1752 	closure_init_stack(&cl);
1753 
1754 	do {
1755 		ret = bch2_btree_cache_cannibalize_lock(trans, &cl);
1756 		closure_sync(&cl);
1757 	} while (ret);
1758 
1759 	b = bch2_btree_node_mem_alloc(trans, level != 0);
1760 	bch2_btree_cache_cannibalize_unlock(trans);
1761 
1762 	BUG_ON(IS_ERR(b));
1763 
1764 	bkey_copy(&b->key, k);
1765 	BUG_ON(bch2_btree_node_hash_insert(&c->btree_cache, b, level, id));
1766 
1767 	set_btree_node_read_in_flight(b);
1768 
1769 	bch2_btree_node_read(trans, b, true);
1770 
1771 	if (btree_node_read_error(b)) {
1772 		bch2_btree_node_hash_remove(&c->btree_cache, b);
1773 
1774 		mutex_lock(&c->btree_cache.lock);
1775 		list_move(&b->list, &c->btree_cache.freeable);
1776 		mutex_unlock(&c->btree_cache.lock);
1777 
1778 		ret = -BCH_ERR_btree_node_read_error;
1779 		goto err;
1780 	}
1781 
1782 	bch2_btree_set_root_for_read(c, b);
1783 err:
1784 	six_unlock_write(&b->c.lock);
1785 	six_unlock_intent(&b->c.lock);
1786 
1787 	return ret;
1788 }
1789 
1790 int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
1791 			const struct bkey_i *k, unsigned level)
1792 {
1793 	return bch2_trans_run(c, __bch2_btree_root_read(trans, id, k, level));
1794 }
1795 
1796 static void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
1797 				      struct btree_write *w)
1798 {
1799 	unsigned long old, new, v = READ_ONCE(b->will_make_reachable);
1800 
1801 	do {
1802 		old = new = v;
1803 		if (!(old & 1))
1804 			break;
1805 
1806 		new &= ~1UL;
1807 	} while ((v = cmpxchg(&b->will_make_reachable, old, new)) != old);
1808 
1809 	if (old & 1)
1810 		closure_put(&((struct btree_update *) new)->cl);
1811 
1812 	bch2_journal_pin_drop(&c->journal, &w->journal);
1813 }
1814 
1815 static void __btree_node_write_done(struct bch_fs *c, struct btree *b)
1816 {
1817 	struct btree_write *w = btree_prev_write(b);
1818 	unsigned long old, new, v;
1819 	unsigned type = 0;
1820 
1821 	bch2_btree_complete_write(c, b, w);
1822 
1823 	v = READ_ONCE(b->flags);
1824 	do {
1825 		old = new = v;
1826 
1827 		if ((old & (1U << BTREE_NODE_dirty)) &&
1828 		    (old & (1U << BTREE_NODE_need_write)) &&
1829 		    !(old & (1U << BTREE_NODE_never_write)) &&
1830 		    !(old & (1U << BTREE_NODE_write_blocked)) &&
1831 		    !(old & (1U << BTREE_NODE_will_make_reachable))) {
1832 			new &= ~(1U << BTREE_NODE_dirty);
1833 			new &= ~(1U << BTREE_NODE_need_write);
1834 			new |=  (1U << BTREE_NODE_write_in_flight);
1835 			new |=  (1U << BTREE_NODE_write_in_flight_inner);
1836 			new |=  (1U << BTREE_NODE_just_written);
1837 			new ^=  (1U << BTREE_NODE_write_idx);
1838 
1839 			type = new & BTREE_WRITE_TYPE_MASK;
1840 			new &= ~BTREE_WRITE_TYPE_MASK;
1841 		} else {
1842 			new &= ~(1U << BTREE_NODE_write_in_flight);
1843 			new &= ~(1U << BTREE_NODE_write_in_flight_inner);
1844 		}
1845 	} while ((v = cmpxchg(&b->flags, old, new)) != old);
1846 
1847 	if (new & (1U << BTREE_NODE_write_in_flight))
1848 		__bch2_btree_node_write(c, b, BTREE_WRITE_ALREADY_STARTED|type);
1849 	else
1850 		wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
1851 }
1852 
1853 static void btree_node_write_done(struct bch_fs *c, struct btree *b)
1854 {
1855 	struct btree_trans *trans = bch2_trans_get(c);
1856 
1857 	btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
1858 	__btree_node_write_done(c, b);
1859 	six_unlock_read(&b->c.lock);
1860 
1861 	bch2_trans_put(trans);
1862 }
1863 
1864 static void btree_node_write_work(struct work_struct *work)
1865 {
1866 	struct btree_write_bio *wbio =
1867 		container_of(work, struct btree_write_bio, work);
1868 	struct bch_fs *c	= wbio->wbio.c;
1869 	struct btree *b		= wbio->wbio.bio.bi_private;
1870 	int ret = 0;
1871 
1872 	btree_bounce_free(c,
1873 		wbio->data_bytes,
1874 		wbio->wbio.used_mempool,
1875 		wbio->data);
1876 
1877 	bch2_bkey_drop_ptrs(bkey_i_to_s(&wbio->key), ptr,
1878 		bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev));
1879 
1880 	if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(&wbio->key))) {
1881 		ret = -BCH_ERR_btree_node_write_all_failed;
1882 		goto err;
1883 	}
1884 
1885 	if (wbio->wbio.first_btree_write) {
1886 		if (wbio->wbio.failed.nr) {
1887 
1888 		}
1889 	} else {
1890 		ret = bch2_trans_do(c, NULL, NULL, 0,
1891 			bch2_btree_node_update_key_get_iter(trans, b, &wbio->key,
1892 					BCH_WATERMARK_interior_updates|
1893 					BCH_TRANS_COMMIT_journal_reclaim|
1894 					BCH_TRANS_COMMIT_no_enospc|
1895 					BCH_TRANS_COMMIT_no_check_rw,
1896 					!wbio->wbio.failed.nr));
1897 		if (ret)
1898 			goto err;
1899 	}
1900 out:
1901 	bio_put(&wbio->wbio.bio);
1902 	btree_node_write_done(c, b);
1903 	return;
1904 err:
1905 	set_btree_node_noevict(b);
1906 	bch2_fs_fatal_err_on(!bch2_err_matches(ret, EROFS), c,
1907 			     "writing btree node: %s", bch2_err_str(ret));
1908 	goto out;
1909 }
1910 
1911 static void btree_node_write_endio(struct bio *bio)
1912 {
1913 	struct bch_write_bio *wbio	= to_wbio(bio);
1914 	struct bch_write_bio *parent	= wbio->split ? wbio->parent : NULL;
1915 	struct bch_write_bio *orig	= parent ?: wbio;
1916 	struct btree_write_bio *wb	= container_of(orig, struct btree_write_bio, wbio);
1917 	struct bch_fs *c		= wbio->c;
1918 	struct btree *b			= wbio->bio.bi_private;
1919 	struct bch_dev *ca		= wbio->have_ioref ? bch2_dev_have_ref(c, wbio->dev) : NULL;
1920 	unsigned long flags;
1921 
1922 	if (wbio->have_ioref)
1923 		bch2_latency_acct(ca, wbio->submit_time, WRITE);
1924 
1925 	if (!ca ||
1926 	    bch2_dev_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write,
1927 			       "btree write error: %s",
1928 			       bch2_blk_status_to_str(bio->bi_status)) ||
1929 	    bch2_meta_write_fault("btree")) {
1930 		spin_lock_irqsave(&c->btree_write_error_lock, flags);
1931 		bch2_dev_list_add_dev(&orig->failed, wbio->dev);
1932 		spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
1933 	}
1934 
1935 	if (wbio->have_ioref)
1936 		percpu_ref_put(&ca->io_ref);
1937 
1938 	if (parent) {
1939 		bio_put(bio);
1940 		bio_endio(&parent->bio);
1941 		return;
1942 	}
1943 
1944 	clear_btree_node_write_in_flight_inner(b);
1945 	wake_up_bit(&b->flags, BTREE_NODE_write_in_flight_inner);
1946 	INIT_WORK(&wb->work, btree_node_write_work);
1947 	queue_work(c->btree_io_complete_wq, &wb->work);
1948 }
1949 
1950 static int validate_bset_for_write(struct bch_fs *c, struct btree *b,
1951 				   struct bset *i, unsigned sectors)
1952 {
1953 	struct printbuf buf = PRINTBUF;
1954 	bool saw_error;
1955 	int ret;
1956 
1957 	ret = bch2_bkey_invalid(c, bkey_i_to_s_c(&b->key),
1958 				BKEY_TYPE_btree, WRITE, &buf);
1959 
1960 	if (ret)
1961 		bch2_fs_inconsistent(c, "invalid btree node key before write: %s", buf.buf);
1962 	printbuf_exit(&buf);
1963 	if (ret)
1964 		return ret;
1965 
1966 	ret = validate_bset_keys(c, b, i, WRITE, false, &saw_error) ?:
1967 		validate_bset(c, NULL, b, i, b->written, sectors, WRITE, false, &saw_error);
1968 	if (ret) {
1969 		bch2_inconsistent_error(c);
1970 		dump_stack();
1971 	}
1972 
1973 	return ret;
1974 }
1975 
1976 static void btree_write_submit(struct work_struct *work)
1977 {
1978 	struct btree_write_bio *wbio = container_of(work, struct btree_write_bio, work);
1979 	BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
1980 
1981 	bkey_copy(&tmp.k, &wbio->key);
1982 
1983 	bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&tmp.k)), ptr)
1984 		ptr->offset += wbio->sector_offset;
1985 
1986 	bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree,
1987 				  &tmp.k, false);
1988 }
1989 
1990 void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
1991 {
1992 	struct btree_write_bio *wbio;
1993 	struct bset *i;
1994 	struct btree_node *bn = NULL;
1995 	struct btree_node_entry *bne = NULL;
1996 	struct sort_iter_stack sort_iter;
1997 	struct nonce nonce;
1998 	unsigned bytes_to_write, sectors_to_write, bytes, u64s;
1999 	u64 seq = 0;
2000 	bool used_mempool;
2001 	unsigned long old, new;
2002 	bool validate_before_checksum = false;
2003 	enum btree_write_type type = flags & BTREE_WRITE_TYPE_MASK;
2004 	void *data;
2005 	int ret;
2006 
2007 	if (flags & BTREE_WRITE_ALREADY_STARTED)
2008 		goto do_write;
2009 
2010 	/*
2011 	 * We may only have a read lock on the btree node - the dirty bit is our
2012 	 * "lock" against racing with other threads that may be trying to start
2013 	 * a write, we do a write iff we clear the dirty bit. Since setting the
2014 	 * dirty bit requires a write lock, we can't race with other threads
2015 	 * redirtying it:
2016 	 */
2017 	do {
2018 		old = new = READ_ONCE(b->flags);
2019 
2020 		if (!(old & (1 << BTREE_NODE_dirty)))
2021 			return;
2022 
2023 		if ((flags & BTREE_WRITE_ONLY_IF_NEED) &&
2024 		    !(old & (1 << BTREE_NODE_need_write)))
2025 			return;
2026 
2027 		if (old &
2028 		    ((1 << BTREE_NODE_never_write)|
2029 		     (1 << BTREE_NODE_write_blocked)))
2030 			return;
2031 
2032 		if (b->written &&
2033 		    (old & (1 << BTREE_NODE_will_make_reachable)))
2034 			return;
2035 
2036 		if (old & (1 << BTREE_NODE_write_in_flight))
2037 			return;
2038 
2039 		if (flags & BTREE_WRITE_ONLY_IF_NEED)
2040 			type = new & BTREE_WRITE_TYPE_MASK;
2041 		new &= ~BTREE_WRITE_TYPE_MASK;
2042 
2043 		new &= ~(1 << BTREE_NODE_dirty);
2044 		new &= ~(1 << BTREE_NODE_need_write);
2045 		new |=  (1 << BTREE_NODE_write_in_flight);
2046 		new |=  (1 << BTREE_NODE_write_in_flight_inner);
2047 		new |=  (1 << BTREE_NODE_just_written);
2048 		new ^=  (1 << BTREE_NODE_write_idx);
2049 	} while (cmpxchg_acquire(&b->flags, old, new) != old);
2050 
2051 	if (new & (1U << BTREE_NODE_need_write))
2052 		return;
2053 do_write:
2054 	BUG_ON((type == BTREE_WRITE_initial) != (b->written == 0));
2055 
2056 	atomic_dec(&c->btree_cache.dirty);
2057 
2058 	BUG_ON(btree_node_fake(b));
2059 	BUG_ON((b->will_make_reachable != 0) != !b->written);
2060 
2061 	BUG_ON(b->written >= btree_sectors(c));
2062 	BUG_ON(b->written & (block_sectors(c) - 1));
2063 	BUG_ON(bset_written(b, btree_bset_last(b)));
2064 	BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(c));
2065 	BUG_ON(memcmp(&b->data->format, &b->format, sizeof(b->format)));
2066 
2067 	bch2_sort_whiteouts(c, b);
2068 
2069 	sort_iter_stack_init(&sort_iter, b);
2070 
2071 	bytes = !b->written
2072 		? sizeof(struct btree_node)
2073 		: sizeof(struct btree_node_entry);
2074 
2075 	bytes += b->whiteout_u64s * sizeof(u64);
2076 
2077 	for_each_bset(b, t) {
2078 		i = bset(b, t);
2079 
2080 		if (bset_written(b, i))
2081 			continue;
2082 
2083 		bytes += le16_to_cpu(i->u64s) * sizeof(u64);
2084 		sort_iter_add(&sort_iter.iter,
2085 			      btree_bkey_first(b, t),
2086 			      btree_bkey_last(b, t));
2087 		seq = max(seq, le64_to_cpu(i->journal_seq));
2088 	}
2089 
2090 	BUG_ON(b->written && !seq);
2091 
2092 	/* bch2_varint_decode may read up to 7 bytes past the end of the buffer: */
2093 	bytes += 8;
2094 
2095 	/* buffer must be a multiple of the block size */
2096 	bytes = round_up(bytes, block_bytes(c));
2097 
2098 	data = btree_bounce_alloc(c, bytes, &used_mempool);
2099 
2100 	if (!b->written) {
2101 		bn = data;
2102 		*bn = *b->data;
2103 		i = &bn->keys;
2104 	} else {
2105 		bne = data;
2106 		bne->keys = b->data->keys;
2107 		i = &bne->keys;
2108 	}
2109 
2110 	i->journal_seq	= cpu_to_le64(seq);
2111 	i->u64s		= 0;
2112 
2113 	sort_iter_add(&sort_iter.iter,
2114 		      unwritten_whiteouts_start(b),
2115 		      unwritten_whiteouts_end(b));
2116 	SET_BSET_SEPARATE_WHITEOUTS(i, false);
2117 
2118 	u64s = bch2_sort_keys_keep_unwritten_whiteouts(i->start, &sort_iter.iter);
2119 	le16_add_cpu(&i->u64s, u64s);
2120 
2121 	b->whiteout_u64s = 0;
2122 
2123 	BUG_ON(!b->written && i->u64s != b->data->keys.u64s);
2124 
2125 	set_needs_whiteout(i, false);
2126 
2127 	/* do we have data to write? */
2128 	if (b->written && !i->u64s)
2129 		goto nowrite;
2130 
2131 	bytes_to_write = vstruct_end(i) - data;
2132 	sectors_to_write = round_up(bytes_to_write, block_bytes(c)) >> 9;
2133 
2134 	if (!b->written &&
2135 	    b->key.k.type == KEY_TYPE_btree_ptr_v2)
2136 		BUG_ON(btree_ptr_sectors_written(&b->key) != sectors_to_write);
2137 
2138 	memset(data + bytes_to_write, 0,
2139 	       (sectors_to_write << 9) - bytes_to_write);
2140 
2141 	BUG_ON(b->written + sectors_to_write > btree_sectors(c));
2142 	BUG_ON(BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN);
2143 	BUG_ON(i->seq != b->data->keys.seq);
2144 
2145 	i->version = cpu_to_le16(c->sb.version);
2146 	SET_BSET_OFFSET(i, b->written);
2147 	SET_BSET_CSUM_TYPE(i, bch2_meta_checksum_type(c));
2148 
2149 	if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)))
2150 		validate_before_checksum = true;
2151 
2152 	/* validate_bset will be modifying: */
2153 	if (le16_to_cpu(i->version) < bcachefs_metadata_version_current)
2154 		validate_before_checksum = true;
2155 
2156 	/* if we're going to be encrypting, check metadata validity first: */
2157 	if (validate_before_checksum &&
2158 	    validate_bset_for_write(c, b, i, sectors_to_write))
2159 		goto err;
2160 
2161 	ret = bset_encrypt(c, i, b->written << 9);
2162 	if (bch2_fs_fatal_err_on(ret, c,
2163 			"encrypting btree node: %s", bch2_err_str(ret)))
2164 		goto err;
2165 
2166 	nonce = btree_nonce(i, b->written << 9);
2167 
2168 	if (bn)
2169 		bn->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bn);
2170 	else
2171 		bne->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
2172 
2173 	/* if we're not encrypting, check metadata after checksumming: */
2174 	if (!validate_before_checksum &&
2175 	    validate_bset_for_write(c, b, i, sectors_to_write))
2176 		goto err;
2177 
2178 	/*
2179 	 * We handle btree write errors by immediately halting the journal -
2180 	 * after we've done that, we can't issue any subsequent btree writes
2181 	 * because they might have pointers to new nodes that failed to write.
2182 	 *
2183 	 * Furthermore, there's no point in doing any more btree writes because
2184 	 * with the journal stopped, we're never going to update the journal to
2185 	 * reflect that those writes were done and the data flushed from the
2186 	 * journal:
2187 	 *
2188 	 * Also on journal error, the pending write may have updates that were
2189 	 * never journalled (interior nodes, see btree_update_nodes_written()) -
2190 	 * it's critical that we don't do the write in that case otherwise we
2191 	 * will have updates visible that weren't in the journal:
2192 	 *
2193 	 * Make sure to update b->written so bch2_btree_init_next() doesn't
2194 	 * break:
2195 	 */
2196 	if (bch2_journal_error(&c->journal) ||
2197 	    c->opts.nochanges)
2198 		goto err;
2199 
2200 	trace_and_count(c, btree_node_write, b, bytes_to_write, sectors_to_write);
2201 
2202 	wbio = container_of(bio_alloc_bioset(NULL,
2203 				buf_pages(data, sectors_to_write << 9),
2204 				REQ_OP_WRITE|REQ_META,
2205 				GFP_NOFS,
2206 				&c->btree_bio),
2207 			    struct btree_write_bio, wbio.bio);
2208 	wbio_init(&wbio->wbio.bio);
2209 	wbio->data			= data;
2210 	wbio->data_bytes		= bytes;
2211 	wbio->sector_offset		= b->written;
2212 	wbio->wbio.c			= c;
2213 	wbio->wbio.used_mempool		= used_mempool;
2214 	wbio->wbio.first_btree_write	= !b->written;
2215 	wbio->wbio.bio.bi_end_io	= btree_node_write_endio;
2216 	wbio->wbio.bio.bi_private	= b;
2217 
2218 	bch2_bio_map(&wbio->wbio.bio, data, sectors_to_write << 9);
2219 
2220 	bkey_copy(&wbio->key, &b->key);
2221 
2222 	b->written += sectors_to_write;
2223 
2224 	if (wbio->key.k.type == KEY_TYPE_btree_ptr_v2)
2225 		bkey_i_to_btree_ptr_v2(&wbio->key)->v.sectors_written =
2226 			cpu_to_le16(b->written);
2227 
2228 	atomic64_inc(&c->btree_write_stats[type].nr);
2229 	atomic64_add(bytes_to_write, &c->btree_write_stats[type].bytes);
2230 
2231 	INIT_WORK(&wbio->work, btree_write_submit);
2232 	queue_work(c->btree_write_submit_wq, &wbio->work);
2233 	return;
2234 err:
2235 	set_btree_node_noevict(b);
2236 	b->written += sectors_to_write;
2237 nowrite:
2238 	btree_bounce_free(c, bytes, used_mempool, data);
2239 	__btree_node_write_done(c, b);
2240 }
2241 
2242 /*
2243  * Work that must be done with write lock held:
2244  */
2245 bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
2246 {
2247 	bool invalidated_iter = false;
2248 	struct btree_node_entry *bne;
2249 
2250 	if (!btree_node_just_written(b))
2251 		return false;
2252 
2253 	BUG_ON(b->whiteout_u64s);
2254 
2255 	clear_btree_node_just_written(b);
2256 
2257 	/*
2258 	 * Note: immediately after write, bset_written() doesn't work - the
2259 	 * amount of data we had to write after compaction might have been
2260 	 * smaller than the offset of the last bset.
2261 	 *
2262 	 * However, we know that all bsets have been written here, as long as
2263 	 * we're still holding the write lock:
2264 	 */
2265 
2266 	/*
2267 	 * XXX: decide if we really want to unconditionally sort down to a
2268 	 * single bset:
2269 	 */
2270 	if (b->nsets > 1) {
2271 		btree_node_sort(c, b, 0, b->nsets);
2272 		invalidated_iter = true;
2273 	} else {
2274 		invalidated_iter = bch2_drop_whiteouts(b, COMPACT_ALL);
2275 	}
2276 
2277 	for_each_bset(b, t)
2278 		set_needs_whiteout(bset(b, t), true);
2279 
2280 	bch2_btree_verify(c, b);
2281 
2282 	/*
2283 	 * If later we don't unconditionally sort down to a single bset, we have
2284 	 * to ensure this is still true:
2285 	 */
2286 	BUG_ON((void *) btree_bkey_last(b, bset_tree_last(b)) > write_block(b));
2287 
2288 	bne = want_new_bset(c, b);
2289 	if (bne)
2290 		bch2_bset_init_next(b, bne);
2291 
2292 	bch2_btree_build_aux_trees(b);
2293 
2294 	return invalidated_iter;
2295 }
2296 
2297 /*
2298  * Use this one if the node is intent locked:
2299  */
2300 void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
2301 			   enum six_lock_type lock_type_held,
2302 			   unsigned flags)
2303 {
2304 	if (lock_type_held == SIX_LOCK_intent ||
2305 	    (lock_type_held == SIX_LOCK_read &&
2306 	     six_lock_tryupgrade(&b->c.lock))) {
2307 		__bch2_btree_node_write(c, b, flags);
2308 
2309 		/* don't cycle lock unnecessarily: */
2310 		if (btree_node_just_written(b) &&
2311 		    six_trylock_write(&b->c.lock)) {
2312 			bch2_btree_post_write_cleanup(c, b);
2313 			six_unlock_write(&b->c.lock);
2314 		}
2315 
2316 		if (lock_type_held == SIX_LOCK_read)
2317 			six_lock_downgrade(&b->c.lock);
2318 	} else {
2319 		__bch2_btree_node_write(c, b, flags);
2320 		if (lock_type_held == SIX_LOCK_write &&
2321 		    btree_node_just_written(b))
2322 			bch2_btree_post_write_cleanup(c, b);
2323 	}
2324 }
2325 
2326 static bool __bch2_btree_flush_all(struct bch_fs *c, unsigned flag)
2327 {
2328 	struct bucket_table *tbl;
2329 	struct rhash_head *pos;
2330 	struct btree *b;
2331 	unsigned i;
2332 	bool ret = false;
2333 restart:
2334 	rcu_read_lock();
2335 	for_each_cached_btree(b, c, tbl, i, pos)
2336 		if (test_bit(flag, &b->flags)) {
2337 			rcu_read_unlock();
2338 			wait_on_bit_io(&b->flags, flag, TASK_UNINTERRUPTIBLE);
2339 			ret = true;
2340 			goto restart;
2341 		}
2342 	rcu_read_unlock();
2343 
2344 	return ret;
2345 }
2346 
2347 bool bch2_btree_flush_all_reads(struct bch_fs *c)
2348 {
2349 	return __bch2_btree_flush_all(c, BTREE_NODE_read_in_flight);
2350 }
2351 
2352 bool bch2_btree_flush_all_writes(struct bch_fs *c)
2353 {
2354 	return __bch2_btree_flush_all(c, BTREE_NODE_write_in_flight);
2355 }
2356 
2357 static const char * const bch2_btree_write_types[] = {
2358 #define x(t, n) [n] = #t,
2359 	BCH_BTREE_WRITE_TYPES()
2360 	NULL
2361 };
2362 
2363 void bch2_btree_write_stats_to_text(struct printbuf *out, struct bch_fs *c)
2364 {
2365 	printbuf_tabstop_push(out, 20);
2366 	printbuf_tabstop_push(out, 10);
2367 
2368 	prt_printf(out, "\tnr\tsize\n");
2369 
2370 	for (unsigned i = 0; i < BTREE_WRITE_TYPE_NR; i++) {
2371 		u64 nr		= atomic64_read(&c->btree_write_stats[i].nr);
2372 		u64 bytes	= atomic64_read(&c->btree_write_stats[i].bytes);
2373 
2374 		prt_printf(out, "%s:\t%llu\t", bch2_btree_write_types[i], nr);
2375 		prt_human_readable_u64(out, nr ? div64_u64(bytes, nr) : 0);
2376 		prt_newline(out);
2377 	}
2378 }
2379