xref: /linux/fs/bcachefs/btree_io.c (revision 90d32e92011eaae8e70a9169b4e7acf4ca8f9d3a)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "bcachefs.h"
4 #include "bkey_methods.h"
5 #include "bkey_sort.h"
6 #include "btree_cache.h"
7 #include "btree_io.h"
8 #include "btree_iter.h"
9 #include "btree_locking.h"
10 #include "btree_update.h"
11 #include "btree_update_interior.h"
12 #include "buckets.h"
13 #include "checksum.h"
14 #include "debug.h"
15 #include "error.h"
16 #include "extents.h"
17 #include "io_write.h"
18 #include "journal_reclaim.h"
19 #include "journal_seq_blacklist.h"
20 #include "recovery.h"
21 #include "super-io.h"
22 #include "trace.h"
23 
24 #include <linux/sched/mm.h>
25 
26 static void bch2_btree_node_header_to_text(struct printbuf *out, struct btree_node *bn)
27 {
28 	prt_printf(out, "btree=%s l=%u seq %llux\n",
29 		   bch2_btree_id_str(BTREE_NODE_ID(bn)),
30 		   (unsigned) BTREE_NODE_LEVEL(bn), bn->keys.seq);
31 	prt_str(out, "min: ");
32 	bch2_bpos_to_text(out, bn->min_key);
33 	prt_newline(out);
34 	prt_str(out, "max: ");
35 	bch2_bpos_to_text(out, bn->max_key);
36 }
37 
38 void bch2_btree_node_io_unlock(struct btree *b)
39 {
40 	EBUG_ON(!btree_node_write_in_flight(b));
41 
42 	clear_btree_node_write_in_flight_inner(b);
43 	clear_btree_node_write_in_flight(b);
44 	wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
45 }
46 
47 void bch2_btree_node_io_lock(struct btree *b)
48 {
49 	bch2_assert_btree_nodes_not_locked();
50 
51 	wait_on_bit_lock_io(&b->flags, BTREE_NODE_write_in_flight,
52 			    TASK_UNINTERRUPTIBLE);
53 }
54 
55 void __bch2_btree_node_wait_on_read(struct btree *b)
56 {
57 	wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
58 		       TASK_UNINTERRUPTIBLE);
59 }
60 
61 void __bch2_btree_node_wait_on_write(struct btree *b)
62 {
63 	wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
64 		       TASK_UNINTERRUPTIBLE);
65 }
66 
67 void bch2_btree_node_wait_on_read(struct btree *b)
68 {
69 	bch2_assert_btree_nodes_not_locked();
70 
71 	wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
72 		       TASK_UNINTERRUPTIBLE);
73 }
74 
75 void bch2_btree_node_wait_on_write(struct btree *b)
76 {
77 	bch2_assert_btree_nodes_not_locked();
78 
79 	wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
80 		       TASK_UNINTERRUPTIBLE);
81 }
82 
83 static void verify_no_dups(struct btree *b,
84 			   struct bkey_packed *start,
85 			   struct bkey_packed *end)
86 {
87 #ifdef CONFIG_BCACHEFS_DEBUG
88 	struct bkey_packed *k, *p;
89 
90 	if (start == end)
91 		return;
92 
93 	for (p = start, k = bkey_p_next(start);
94 	     k != end;
95 	     p = k, k = bkey_p_next(k)) {
96 		struct bkey l = bkey_unpack_key(b, p);
97 		struct bkey r = bkey_unpack_key(b, k);
98 
99 		BUG_ON(bpos_ge(l.p, bkey_start_pos(&r)));
100 	}
101 #endif
102 }
103 
104 static void set_needs_whiteout(struct bset *i, int v)
105 {
106 	struct bkey_packed *k;
107 
108 	for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
109 		k->needs_whiteout = v;
110 }
111 
112 static void btree_bounce_free(struct bch_fs *c, size_t size,
113 			      bool used_mempool, void *p)
114 {
115 	if (used_mempool)
116 		mempool_free(p, &c->btree_bounce_pool);
117 	else
118 		kvfree(p);
119 }
120 
121 static void *btree_bounce_alloc(struct bch_fs *c, size_t size,
122 				bool *used_mempool)
123 {
124 	unsigned flags = memalloc_nofs_save();
125 	void *p;
126 
127 	BUG_ON(size > c->opts.btree_node_size);
128 
129 	*used_mempool = false;
130 	p = kvmalloc(size, __GFP_NOWARN|GFP_NOWAIT);
131 	if (!p) {
132 		*used_mempool = true;
133 		p = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
134 	}
135 	memalloc_nofs_restore(flags);
136 	return p;
137 }
138 
139 static void sort_bkey_ptrs(const struct btree *bt,
140 			   struct bkey_packed **ptrs, unsigned nr)
141 {
142 	unsigned n = nr, a = nr / 2, b, c, d;
143 
144 	if (!a)
145 		return;
146 
147 	/* Heap sort: see lib/sort.c: */
148 	while (1) {
149 		if (a)
150 			a--;
151 		else if (--n)
152 			swap(ptrs[0], ptrs[n]);
153 		else
154 			break;
155 
156 		for (b = a; c = 2 * b + 1, (d = c + 1) < n;)
157 			b = bch2_bkey_cmp_packed(bt,
158 					    ptrs[c],
159 					    ptrs[d]) >= 0 ? c : d;
160 		if (d == n)
161 			b = c;
162 
163 		while (b != a &&
164 		       bch2_bkey_cmp_packed(bt,
165 				       ptrs[a],
166 				       ptrs[b]) >= 0)
167 			b = (b - 1) / 2;
168 		c = b;
169 		while (b != a) {
170 			b = (b - 1) / 2;
171 			swap(ptrs[b], ptrs[c]);
172 		}
173 	}
174 }
175 
176 static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b)
177 {
178 	struct bkey_packed *new_whiteouts, **ptrs, **ptrs_end, *k;
179 	bool used_mempool = false;
180 	size_t bytes = b->whiteout_u64s * sizeof(u64);
181 
182 	if (!b->whiteout_u64s)
183 		return;
184 
185 	new_whiteouts = btree_bounce_alloc(c, bytes, &used_mempool);
186 
187 	ptrs = ptrs_end = ((void *) new_whiteouts + bytes);
188 
189 	for (k = unwritten_whiteouts_start(b);
190 	     k != unwritten_whiteouts_end(b);
191 	     k = bkey_p_next(k))
192 		*--ptrs = k;
193 
194 	sort_bkey_ptrs(b, ptrs, ptrs_end - ptrs);
195 
196 	k = new_whiteouts;
197 
198 	while (ptrs != ptrs_end) {
199 		bkey_p_copy(k, *ptrs);
200 		k = bkey_p_next(k);
201 		ptrs++;
202 	}
203 
204 	verify_no_dups(b, new_whiteouts,
205 		       (void *) ((u64 *) new_whiteouts + b->whiteout_u64s));
206 
207 	memcpy_u64s(unwritten_whiteouts_start(b),
208 		    new_whiteouts, b->whiteout_u64s);
209 
210 	btree_bounce_free(c, bytes, used_mempool, new_whiteouts);
211 }
212 
213 static bool should_compact_bset(struct btree *b, struct bset_tree *t,
214 				bool compacting, enum compact_mode mode)
215 {
216 	if (!bset_dead_u64s(b, t))
217 		return false;
218 
219 	switch (mode) {
220 	case COMPACT_LAZY:
221 		return should_compact_bset_lazy(b, t) ||
222 			(compacting && !bset_written(b, bset(b, t)));
223 	case COMPACT_ALL:
224 		return true;
225 	default:
226 		BUG();
227 	}
228 }
229 
230 static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode)
231 {
232 	bool ret = false;
233 
234 	for_each_bset(b, t) {
235 		struct bset *i = bset(b, t);
236 		struct bkey_packed *k, *n, *out, *start, *end;
237 		struct btree_node_entry *src = NULL, *dst = NULL;
238 
239 		if (t != b->set && !bset_written(b, i)) {
240 			src = container_of(i, struct btree_node_entry, keys);
241 			dst = max(write_block(b),
242 				  (void *) btree_bkey_last(b, t - 1));
243 		}
244 
245 		if (src != dst)
246 			ret = true;
247 
248 		if (!should_compact_bset(b, t, ret, mode)) {
249 			if (src != dst) {
250 				memmove(dst, src, sizeof(*src) +
251 					le16_to_cpu(src->keys.u64s) *
252 					sizeof(u64));
253 				i = &dst->keys;
254 				set_btree_bset(b, t, i);
255 			}
256 			continue;
257 		}
258 
259 		start	= btree_bkey_first(b, t);
260 		end	= btree_bkey_last(b, t);
261 
262 		if (src != dst) {
263 			memmove(dst, src, sizeof(*src));
264 			i = &dst->keys;
265 			set_btree_bset(b, t, i);
266 		}
267 
268 		out = i->start;
269 
270 		for (k = start; k != end; k = n) {
271 			n = bkey_p_next(k);
272 
273 			if (!bkey_deleted(k)) {
274 				bkey_p_copy(out, k);
275 				out = bkey_p_next(out);
276 			} else {
277 				BUG_ON(k->needs_whiteout);
278 			}
279 		}
280 
281 		i->u64s = cpu_to_le16((u64 *) out - i->_data);
282 		set_btree_bset_end(b, t);
283 		bch2_bset_set_no_aux_tree(b, t);
284 		ret = true;
285 	}
286 
287 	bch2_verify_btree_nr_keys(b);
288 
289 	bch2_btree_build_aux_trees(b);
290 
291 	return ret;
292 }
293 
294 bool bch2_compact_whiteouts(struct bch_fs *c, struct btree *b,
295 			    enum compact_mode mode)
296 {
297 	return bch2_drop_whiteouts(b, mode);
298 }
299 
300 static void btree_node_sort(struct bch_fs *c, struct btree *b,
301 			    unsigned start_idx,
302 			    unsigned end_idx)
303 {
304 	struct btree_node *out;
305 	struct sort_iter_stack sort_iter;
306 	struct bset_tree *t;
307 	struct bset *start_bset = bset(b, &b->set[start_idx]);
308 	bool used_mempool = false;
309 	u64 start_time, seq = 0;
310 	unsigned i, u64s = 0, bytes, shift = end_idx - start_idx - 1;
311 	bool sorting_entire_node = start_idx == 0 &&
312 		end_idx == b->nsets;
313 
314 	sort_iter_stack_init(&sort_iter, b);
315 
316 	for (t = b->set + start_idx;
317 	     t < b->set + end_idx;
318 	     t++) {
319 		u64s += le16_to_cpu(bset(b, t)->u64s);
320 		sort_iter_add(&sort_iter.iter,
321 			      btree_bkey_first(b, t),
322 			      btree_bkey_last(b, t));
323 	}
324 
325 	bytes = sorting_entire_node
326 		? btree_buf_bytes(b)
327 		: __vstruct_bytes(struct btree_node, u64s);
328 
329 	out = btree_bounce_alloc(c, bytes, &used_mempool);
330 
331 	start_time = local_clock();
332 
333 	u64s = bch2_sort_keys(out->keys.start, &sort_iter.iter);
334 
335 	out->keys.u64s = cpu_to_le16(u64s);
336 
337 	BUG_ON(vstruct_end(&out->keys) > (void *) out + bytes);
338 
339 	if (sorting_entire_node)
340 		bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
341 				       start_time);
342 
343 	/* Make sure we preserve bset journal_seq: */
344 	for (t = b->set + start_idx; t < b->set + end_idx; t++)
345 		seq = max(seq, le64_to_cpu(bset(b, t)->journal_seq));
346 	start_bset->journal_seq = cpu_to_le64(seq);
347 
348 	if (sorting_entire_node) {
349 		u64s = le16_to_cpu(out->keys.u64s);
350 
351 		BUG_ON(bytes != btree_buf_bytes(b));
352 
353 		/*
354 		 * Our temporary buffer is the same size as the btree node's
355 		 * buffer, we can just swap buffers instead of doing a big
356 		 * memcpy()
357 		 */
358 		*out = *b->data;
359 		out->keys.u64s = cpu_to_le16(u64s);
360 		swap(out, b->data);
361 		set_btree_bset(b, b->set, &b->data->keys);
362 	} else {
363 		start_bset->u64s = out->keys.u64s;
364 		memcpy_u64s(start_bset->start,
365 			    out->keys.start,
366 			    le16_to_cpu(out->keys.u64s));
367 	}
368 
369 	for (i = start_idx + 1; i < end_idx; i++)
370 		b->nr.bset_u64s[start_idx] +=
371 			b->nr.bset_u64s[i];
372 
373 	b->nsets -= shift;
374 
375 	for (i = start_idx + 1; i < b->nsets; i++) {
376 		b->nr.bset_u64s[i]	= b->nr.bset_u64s[i + shift];
377 		b->set[i]		= b->set[i + shift];
378 	}
379 
380 	for (i = b->nsets; i < MAX_BSETS; i++)
381 		b->nr.bset_u64s[i] = 0;
382 
383 	set_btree_bset_end(b, &b->set[start_idx]);
384 	bch2_bset_set_no_aux_tree(b, &b->set[start_idx]);
385 
386 	btree_bounce_free(c, bytes, used_mempool, out);
387 
388 	bch2_verify_btree_nr_keys(b);
389 }
390 
391 void bch2_btree_sort_into(struct bch_fs *c,
392 			 struct btree *dst,
393 			 struct btree *src)
394 {
395 	struct btree_nr_keys nr;
396 	struct btree_node_iter src_iter;
397 	u64 start_time = local_clock();
398 
399 	BUG_ON(dst->nsets != 1);
400 
401 	bch2_bset_set_no_aux_tree(dst, dst->set);
402 
403 	bch2_btree_node_iter_init_from_start(&src_iter, src);
404 
405 	nr = bch2_sort_repack(btree_bset_first(dst),
406 			src, &src_iter,
407 			&dst->format,
408 			true);
409 
410 	bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
411 			       start_time);
412 
413 	set_btree_bset_end(dst, dst->set);
414 
415 	dst->nr.live_u64s	+= nr.live_u64s;
416 	dst->nr.bset_u64s[0]	+= nr.bset_u64s[0];
417 	dst->nr.packed_keys	+= nr.packed_keys;
418 	dst->nr.unpacked_keys	+= nr.unpacked_keys;
419 
420 	bch2_verify_btree_nr_keys(dst);
421 }
422 
423 /*
424  * We're about to add another bset to the btree node, so if there's currently
425  * too many bsets - sort some of them together:
426  */
427 static bool btree_node_compact(struct bch_fs *c, struct btree *b)
428 {
429 	unsigned unwritten_idx;
430 	bool ret = false;
431 
432 	for (unwritten_idx = 0;
433 	     unwritten_idx < b->nsets;
434 	     unwritten_idx++)
435 		if (!bset_written(b, bset(b, &b->set[unwritten_idx])))
436 			break;
437 
438 	if (b->nsets - unwritten_idx > 1) {
439 		btree_node_sort(c, b, unwritten_idx, b->nsets);
440 		ret = true;
441 	}
442 
443 	if (unwritten_idx > 1) {
444 		btree_node_sort(c, b, 0, unwritten_idx);
445 		ret = true;
446 	}
447 
448 	return ret;
449 }
450 
451 void bch2_btree_build_aux_trees(struct btree *b)
452 {
453 	for_each_bset(b, t)
454 		bch2_bset_build_aux_tree(b, t,
455 				!bset_written(b, bset(b, t)) &&
456 				t == bset_tree_last(b));
457 }
458 
459 /*
460  * If we have MAX_BSETS (3) bsets, should we sort them all down to just one?
461  *
462  * The first bset is going to be of similar order to the size of the node, the
463  * last bset is bounded by btree_write_set_buffer(), which is set to keep the
464  * memmove on insert from being too expensive: the middle bset should, ideally,
465  * be the geometric mean of the first and the last.
466  *
467  * Returns true if the middle bset is greater than that geometric mean:
468  */
469 static inline bool should_compact_all(struct bch_fs *c, struct btree *b)
470 {
471 	unsigned mid_u64s_bits =
472 		(ilog2(btree_max_u64s(c)) + BTREE_WRITE_SET_U64s_BITS) / 2;
473 
474 	return bset_u64s(&b->set[1]) > 1U << mid_u64s_bits;
475 }
476 
477 /*
478  * @bch_btree_init_next - initialize a new (unwritten) bset that can then be
479  * inserted into
480  *
481  * Safe to call if there already is an unwritten bset - will only add a new bset
482  * if @b doesn't already have one.
483  *
484  * Returns true if we sorted (i.e. invalidated iterators
485  */
486 void bch2_btree_init_next(struct btree_trans *trans, struct btree *b)
487 {
488 	struct bch_fs *c = trans->c;
489 	struct btree_node_entry *bne;
490 	bool reinit_iter = false;
491 
492 	EBUG_ON(!six_lock_counts(&b->c.lock).n[SIX_LOCK_write]);
493 	BUG_ON(bset_written(b, bset(b, &b->set[1])));
494 	BUG_ON(btree_node_just_written(b));
495 
496 	if (b->nsets == MAX_BSETS &&
497 	    !btree_node_write_in_flight(b) &&
498 	    should_compact_all(c, b)) {
499 		bch2_btree_node_write(c, b, SIX_LOCK_write,
500 				      BTREE_WRITE_init_next_bset);
501 		reinit_iter = true;
502 	}
503 
504 	if (b->nsets == MAX_BSETS &&
505 	    btree_node_compact(c, b))
506 		reinit_iter = true;
507 
508 	BUG_ON(b->nsets >= MAX_BSETS);
509 
510 	bne = want_new_bset(c, b);
511 	if (bne)
512 		bch2_bset_init_next(b, bne);
513 
514 	bch2_btree_build_aux_trees(b);
515 
516 	if (reinit_iter)
517 		bch2_trans_node_reinit_iter(trans, b);
518 }
519 
520 static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
521 			  struct bch_dev *ca,
522 			  struct btree *b, struct bset *i,
523 			  unsigned offset, int write)
524 {
525 	prt_printf(out, bch2_log_msg(c, "%s"),
526 		   write == READ
527 		   ? "error validating btree node "
528 		   : "corrupt btree node before write ");
529 	if (ca)
530 		prt_printf(out, "on %s ", ca->name);
531 	prt_printf(out, "at btree ");
532 	bch2_btree_pos_to_text(out, c, b);
533 
534 	printbuf_indent_add(out, 2);
535 
536 	prt_printf(out, "\nnode offset %u/%u",
537 		   b->written, btree_ptr_sectors_written(&b->key));
538 	if (i)
539 		prt_printf(out, " bset u64s %u", le16_to_cpu(i->u64s));
540 	prt_str(out, ": ");
541 }
542 
543 __printf(9, 10)
544 static int __btree_err(int ret,
545 		       struct bch_fs *c,
546 		       struct bch_dev *ca,
547 		       struct btree *b,
548 		       struct bset *i,
549 		       int write,
550 		       bool have_retry,
551 		       enum bch_sb_error_id err_type,
552 		       const char *fmt, ...)
553 {
554 	struct printbuf out = PRINTBUF;
555 	bool silent = c->curr_recovery_pass == BCH_RECOVERY_PASS_scan_for_btree_nodes;
556 	va_list args;
557 
558 	btree_err_msg(&out, c, ca, b, i, b->written, write);
559 
560 	va_start(args, fmt);
561 	prt_vprintf(&out, fmt, args);
562 	va_end(args);
563 
564 	if (write == WRITE) {
565 		bch2_print_string_as_lines(KERN_ERR, out.buf);
566 		ret = c->opts.errors == BCH_ON_ERROR_continue
567 			? 0
568 			: -BCH_ERR_fsck_errors_not_fixed;
569 		goto out;
570 	}
571 
572 	if (!have_retry && ret == -BCH_ERR_btree_node_read_err_want_retry)
573 		ret = -BCH_ERR_btree_node_read_err_fixable;
574 	if (!have_retry && ret == -BCH_ERR_btree_node_read_err_must_retry)
575 		ret = -BCH_ERR_btree_node_read_err_bad_node;
576 
577 	if (!silent && ret != -BCH_ERR_btree_node_read_err_fixable)
578 		bch2_sb_error_count(c, err_type);
579 
580 	switch (ret) {
581 	case -BCH_ERR_btree_node_read_err_fixable:
582 		ret = !silent
583 			? bch2_fsck_err(c, FSCK_CAN_FIX, err_type, "%s", out.buf)
584 			: -BCH_ERR_fsck_fix;
585 		if (ret != -BCH_ERR_fsck_fix &&
586 		    ret != -BCH_ERR_fsck_ignore)
587 			goto fsck_err;
588 		ret = -BCH_ERR_fsck_fix;
589 		break;
590 	case -BCH_ERR_btree_node_read_err_want_retry:
591 	case -BCH_ERR_btree_node_read_err_must_retry:
592 		if (!silent)
593 			bch2_print_string_as_lines(KERN_ERR, out.buf);
594 		break;
595 	case -BCH_ERR_btree_node_read_err_bad_node:
596 		if (!silent)
597 			bch2_print_string_as_lines(KERN_ERR, out.buf);
598 		ret = bch2_topology_error(c);
599 		break;
600 	case -BCH_ERR_btree_node_read_err_incompatible:
601 		if (!silent)
602 			bch2_print_string_as_lines(KERN_ERR, out.buf);
603 		ret = -BCH_ERR_fsck_errors_not_fixed;
604 		break;
605 	default:
606 		BUG();
607 	}
608 out:
609 fsck_err:
610 	printbuf_exit(&out);
611 	return ret;
612 }
613 
614 #define btree_err(type, c, ca, b, i, _err_type, msg, ...)		\
615 ({									\
616 	int _ret = __btree_err(type, c, ca, b, i, write, have_retry,	\
617 			       BCH_FSCK_ERR_##_err_type,		\
618 			       msg, ##__VA_ARGS__);			\
619 									\
620 	if (_ret != -BCH_ERR_fsck_fix) {				\
621 		ret = _ret;						\
622 		goto fsck_err;						\
623 	}								\
624 									\
625 	*saw_error = true;						\
626 })
627 
628 #define btree_err_on(cond, ...)	((cond) ? btree_err(__VA_ARGS__) : false)
629 
630 /*
631  * When btree topology repair changes the start or end of a node, that might
632  * mean we have to drop keys that are no longer inside the node:
633  */
634 __cold
635 void bch2_btree_node_drop_keys_outside_node(struct btree *b)
636 {
637 	for_each_bset(b, t) {
638 		struct bset *i = bset(b, t);
639 		struct bkey_packed *k;
640 
641 		for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
642 			if (bkey_cmp_left_packed(b, k, &b->data->min_key) >= 0)
643 				break;
644 
645 		if (k != i->start) {
646 			unsigned shift = (u64 *) k - (u64 *) i->start;
647 
648 			memmove_u64s_down(i->start, k,
649 					  (u64 *) vstruct_end(i) - (u64 *) k);
650 			i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - shift);
651 			set_btree_bset_end(b, t);
652 		}
653 
654 		for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
655 			if (bkey_cmp_left_packed(b, k, &b->data->max_key) > 0)
656 				break;
657 
658 		if (k != vstruct_last(i)) {
659 			i->u64s = cpu_to_le16((u64 *) k - (u64 *) i->start);
660 			set_btree_bset_end(b, t);
661 		}
662 	}
663 
664 	/*
665 	 * Always rebuild search trees: eytzinger search tree nodes directly
666 	 * depend on the values of min/max key:
667 	 */
668 	bch2_bset_set_no_aux_tree(b, b->set);
669 	bch2_btree_build_aux_trees(b);
670 	b->nr = bch2_btree_node_count_keys(b);
671 
672 	struct bkey_s_c k;
673 	struct bkey unpacked;
674 	struct btree_node_iter iter;
675 	for_each_btree_node_key_unpack(b, k, &iter, &unpacked) {
676 		BUG_ON(bpos_lt(k.k->p, b->data->min_key));
677 		BUG_ON(bpos_gt(k.k->p, b->data->max_key));
678 	}
679 }
680 
681 static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
682 			 struct btree *b, struct bset *i,
683 			 unsigned offset, unsigned sectors,
684 			 int write, bool have_retry, bool *saw_error)
685 {
686 	unsigned version = le16_to_cpu(i->version);
687 	struct printbuf buf1 = PRINTBUF;
688 	struct printbuf buf2 = PRINTBUF;
689 	int ret = 0;
690 
691 	btree_err_on(!bch2_version_compatible(version),
692 		     -BCH_ERR_btree_node_read_err_incompatible,
693 		     c, ca, b, i,
694 		     btree_node_unsupported_version,
695 		     "unsupported bset version %u.%u",
696 		     BCH_VERSION_MAJOR(version),
697 		     BCH_VERSION_MINOR(version));
698 
699 	if (btree_err_on(version < c->sb.version_min,
700 			 -BCH_ERR_btree_node_read_err_fixable,
701 			 c, NULL, b, i,
702 			 btree_node_bset_older_than_sb_min,
703 			 "bset version %u older than superblock version_min %u",
704 			 version, c->sb.version_min)) {
705 		mutex_lock(&c->sb_lock);
706 		c->disk_sb.sb->version_min = cpu_to_le16(version);
707 		bch2_write_super(c);
708 		mutex_unlock(&c->sb_lock);
709 	}
710 
711 	if (btree_err_on(BCH_VERSION_MAJOR(version) >
712 			 BCH_VERSION_MAJOR(c->sb.version),
713 			 -BCH_ERR_btree_node_read_err_fixable,
714 			 c, NULL, b, i,
715 			 btree_node_bset_newer_than_sb,
716 			 "bset version %u newer than superblock version %u",
717 			 version, c->sb.version)) {
718 		mutex_lock(&c->sb_lock);
719 		c->disk_sb.sb->version = cpu_to_le16(version);
720 		bch2_write_super(c);
721 		mutex_unlock(&c->sb_lock);
722 	}
723 
724 	btree_err_on(BSET_SEPARATE_WHITEOUTS(i),
725 		     -BCH_ERR_btree_node_read_err_incompatible,
726 		     c, ca, b, i,
727 		     btree_node_unsupported_version,
728 		     "BSET_SEPARATE_WHITEOUTS no longer supported");
729 
730 	if (btree_err_on(offset + sectors > btree_sectors(c),
731 			 -BCH_ERR_btree_node_read_err_fixable,
732 			 c, ca, b, i,
733 			 bset_past_end_of_btree_node,
734 			 "bset past end of btree node")) {
735 		i->u64s = 0;
736 		ret = 0;
737 		goto out;
738 	}
739 
740 	btree_err_on(offset && !i->u64s,
741 		     -BCH_ERR_btree_node_read_err_fixable,
742 		     c, ca, b, i,
743 		     bset_empty,
744 		     "empty bset");
745 
746 	btree_err_on(BSET_OFFSET(i) && BSET_OFFSET(i) != offset,
747 		     -BCH_ERR_btree_node_read_err_want_retry,
748 		     c, ca, b, i,
749 		     bset_wrong_sector_offset,
750 		     "bset at wrong sector offset");
751 
752 	if (!offset) {
753 		struct btree_node *bn =
754 			container_of(i, struct btree_node, keys);
755 		/* These indicate that we read the wrong btree node: */
756 
757 		if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
758 			struct bch_btree_ptr_v2 *bp =
759 				&bkey_i_to_btree_ptr_v2(&b->key)->v;
760 
761 			/* XXX endianness */
762 			btree_err_on(bp->seq != bn->keys.seq,
763 				     -BCH_ERR_btree_node_read_err_must_retry,
764 				     c, ca, b, NULL,
765 				     bset_bad_seq,
766 				     "incorrect sequence number (wrong btree node)");
767 		}
768 
769 		btree_err_on(BTREE_NODE_ID(bn) != b->c.btree_id,
770 			     -BCH_ERR_btree_node_read_err_must_retry,
771 			     c, ca, b, i,
772 			     btree_node_bad_btree,
773 			     "incorrect btree id");
774 
775 		btree_err_on(BTREE_NODE_LEVEL(bn) != b->c.level,
776 			     -BCH_ERR_btree_node_read_err_must_retry,
777 			     c, ca, b, i,
778 			     btree_node_bad_level,
779 			     "incorrect level");
780 
781 		if (!write)
782 			compat_btree_node(b->c.level, b->c.btree_id, version,
783 					  BSET_BIG_ENDIAN(i), write, bn);
784 
785 		if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
786 			struct bch_btree_ptr_v2 *bp =
787 				&bkey_i_to_btree_ptr_v2(&b->key)->v;
788 
789 			if (BTREE_PTR_RANGE_UPDATED(bp)) {
790 				b->data->min_key = bp->min_key;
791 				b->data->max_key = b->key.k.p;
792 			}
793 
794 			btree_err_on(!bpos_eq(b->data->min_key, bp->min_key),
795 				     -BCH_ERR_btree_node_read_err_must_retry,
796 				     c, ca, b, NULL,
797 				     btree_node_bad_min_key,
798 				     "incorrect min_key: got %s should be %s",
799 				     (printbuf_reset(&buf1),
800 				      bch2_bpos_to_text(&buf1, bn->min_key), buf1.buf),
801 				     (printbuf_reset(&buf2),
802 				      bch2_bpos_to_text(&buf2, bp->min_key), buf2.buf));
803 		}
804 
805 		btree_err_on(!bpos_eq(bn->max_key, b->key.k.p),
806 			     -BCH_ERR_btree_node_read_err_must_retry,
807 			     c, ca, b, i,
808 			     btree_node_bad_max_key,
809 			     "incorrect max key %s",
810 			     (printbuf_reset(&buf1),
811 			      bch2_bpos_to_text(&buf1, bn->max_key), buf1.buf));
812 
813 		if (write)
814 			compat_btree_node(b->c.level, b->c.btree_id, version,
815 					  BSET_BIG_ENDIAN(i), write, bn);
816 
817 		btree_err_on(bch2_bkey_format_invalid(c, &bn->format, write, &buf1),
818 			     -BCH_ERR_btree_node_read_err_bad_node,
819 			     c, ca, b, i,
820 			     btree_node_bad_format,
821 			     "invalid bkey format: %s\n  %s", buf1.buf,
822 			     (printbuf_reset(&buf2),
823 			      bch2_bkey_format_to_text(&buf2, &bn->format), buf2.buf));
824 		printbuf_reset(&buf1);
825 
826 		compat_bformat(b->c.level, b->c.btree_id, version,
827 			       BSET_BIG_ENDIAN(i), write,
828 			       &bn->format);
829 	}
830 out:
831 fsck_err:
832 	printbuf_exit(&buf2);
833 	printbuf_exit(&buf1);
834 	return ret;
835 }
836 
837 static int bset_key_invalid(struct bch_fs *c, struct btree *b,
838 			    struct bkey_s_c k,
839 			    bool updated_range, int rw,
840 			    struct printbuf *err)
841 {
842 	return __bch2_bkey_invalid(c, k, btree_node_type(b), READ, err) ?:
843 		(!updated_range ? bch2_bkey_in_btree_node(c, b, k, err) : 0) ?:
844 		(rw == WRITE ? bch2_bkey_val_invalid(c, k, READ, err) : 0);
845 }
846 
847 static bool bkey_packed_valid(struct bch_fs *c, struct btree *b,
848 			 struct bset *i, struct bkey_packed *k)
849 {
850 	if (bkey_p_next(k) > vstruct_last(i))
851 		return false;
852 
853 	if (k->format > KEY_FORMAT_CURRENT)
854 		return false;
855 
856 	if (!bkeyp_u64s_valid(&b->format, k))
857 		return false;
858 
859 	struct printbuf buf = PRINTBUF;
860 	struct bkey tmp;
861 	struct bkey_s u = __bkey_disassemble(b, k, &tmp);
862 	bool ret = __bch2_bkey_invalid(c, u.s_c, btree_node_type(b), READ, &buf);
863 	printbuf_exit(&buf);
864 	return ret;
865 }
866 
867 static int validate_bset_keys(struct bch_fs *c, struct btree *b,
868 			 struct bset *i, int write,
869 			 bool have_retry, bool *saw_error)
870 {
871 	unsigned version = le16_to_cpu(i->version);
872 	struct bkey_packed *k, *prev = NULL;
873 	struct printbuf buf = PRINTBUF;
874 	bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
875 		BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
876 	int ret = 0;
877 
878 	for (k = i->start;
879 	     k != vstruct_last(i);) {
880 		struct bkey_s u;
881 		struct bkey tmp;
882 		unsigned next_good_key;
883 
884 		if (btree_err_on(bkey_p_next(k) > vstruct_last(i),
885 				 -BCH_ERR_btree_node_read_err_fixable,
886 				 c, NULL, b, i,
887 				 btree_node_bkey_past_bset_end,
888 				 "key extends past end of bset")) {
889 			i->u64s = cpu_to_le16((u64 *) k - i->_data);
890 			break;
891 		}
892 
893 		if (btree_err_on(k->format > KEY_FORMAT_CURRENT,
894 				 -BCH_ERR_btree_node_read_err_fixable,
895 				 c, NULL, b, i,
896 				 btree_node_bkey_bad_format,
897 				 "invalid bkey format %u", k->format))
898 			goto drop_this_key;
899 
900 		if (btree_err_on(!bkeyp_u64s_valid(&b->format, k),
901 				 -BCH_ERR_btree_node_read_err_fixable,
902 				 c, NULL, b, i,
903 				 btree_node_bkey_bad_u64s,
904 				 "bad k->u64s %u (min %u max %zu)", k->u64s,
905 				 bkeyp_key_u64s(&b->format, k),
906 				 U8_MAX - BKEY_U64s + bkeyp_key_u64s(&b->format, k)))
907 			goto drop_this_key;
908 
909 		if (!write)
910 			bch2_bkey_compat(b->c.level, b->c.btree_id, version,
911 				    BSET_BIG_ENDIAN(i), write,
912 				    &b->format, k);
913 
914 		u = __bkey_disassemble(b, k, &tmp);
915 
916 		printbuf_reset(&buf);
917 		if (bset_key_invalid(c, b, u.s_c, updated_range, write, &buf)) {
918 			printbuf_reset(&buf);
919 			bset_key_invalid(c, b, u.s_c, updated_range, write, &buf);
920 			prt_printf(&buf, "\n  ");
921 			bch2_bkey_val_to_text(&buf, c, u.s_c);
922 
923 			btree_err(-BCH_ERR_btree_node_read_err_fixable,
924 				  c, NULL, b, i,
925 				  btree_node_bad_bkey,
926 				  "invalid bkey: %s", buf.buf);
927 			goto drop_this_key;
928 		}
929 
930 		if (write)
931 			bch2_bkey_compat(b->c.level, b->c.btree_id, version,
932 				    BSET_BIG_ENDIAN(i), write,
933 				    &b->format, k);
934 
935 		if (prev && bkey_iter_cmp(b, prev, k) > 0) {
936 			struct bkey up = bkey_unpack_key(b, prev);
937 
938 			printbuf_reset(&buf);
939 			prt_printf(&buf, "keys out of order: ");
940 			bch2_bkey_to_text(&buf, &up);
941 			prt_printf(&buf, " > ");
942 			bch2_bkey_to_text(&buf, u.k);
943 
944 			if (btree_err(-BCH_ERR_btree_node_read_err_fixable,
945 				      c, NULL, b, i,
946 				      btree_node_bkey_out_of_order,
947 				      "%s", buf.buf))
948 				goto drop_this_key;
949 		}
950 
951 		prev = k;
952 		k = bkey_p_next(k);
953 		continue;
954 drop_this_key:
955 		next_good_key = k->u64s;
956 
957 		if (!next_good_key ||
958 		    (BSET_BIG_ENDIAN(i) == CPU_BIG_ENDIAN &&
959 		     version >= bcachefs_metadata_version_snapshot)) {
960 			/*
961 			 * only do scanning if bch2_bkey_compat() has nothing to
962 			 * do
963 			 */
964 
965 			if (!bkey_packed_valid(c, b, i, (void *) ((u64 *) k + next_good_key))) {
966 				for (next_good_key = 1;
967 				     next_good_key < (u64 *) vstruct_last(i) - (u64 *) k;
968 				     next_good_key++)
969 					if (bkey_packed_valid(c, b, i, (void *) ((u64 *) k + next_good_key)))
970 						goto got_good_key;
971 			}
972 
973 			/*
974 			 * didn't find a good key, have to truncate the rest of
975 			 * the bset
976 			 */
977 			next_good_key = (u64 *) vstruct_last(i) - (u64 *) k;
978 		}
979 got_good_key:
980 		le16_add_cpu(&i->u64s, -next_good_key);
981 		memmove_u64s_down(k, bkey_p_next(k), (u64 *) vstruct_end(i) - (u64 *) k);
982 	}
983 fsck_err:
984 	printbuf_exit(&buf);
985 	return ret;
986 }
987 
988 int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
989 			      struct btree *b, bool have_retry, bool *saw_error)
990 {
991 	struct btree_node_entry *bne;
992 	struct sort_iter *iter;
993 	struct btree_node *sorted;
994 	struct bkey_packed *k;
995 	struct bset *i;
996 	bool used_mempool, blacklisted;
997 	bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
998 		BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
999 	unsigned u64s;
1000 	unsigned ptr_written = btree_ptr_sectors_written(&b->key);
1001 	struct printbuf buf = PRINTBUF;
1002 	int ret = 0, retry_read = 0, write = READ;
1003 	u64 start_time = local_clock();
1004 
1005 	b->version_ondisk = U16_MAX;
1006 	/* We might get called multiple times on read retry: */
1007 	b->written = 0;
1008 
1009 	iter = mempool_alloc(&c->fill_iter, GFP_NOFS);
1010 	sort_iter_init(iter, b, (btree_blocks(c) + 1) * 2);
1011 
1012 	if (bch2_meta_read_fault("btree"))
1013 		btree_err(-BCH_ERR_btree_node_read_err_must_retry,
1014 			  c, ca, b, NULL,
1015 			  btree_node_fault_injected,
1016 			  "dynamic fault");
1017 
1018 	btree_err_on(le64_to_cpu(b->data->magic) != bset_magic(c),
1019 		     -BCH_ERR_btree_node_read_err_must_retry,
1020 		     c, ca, b, NULL,
1021 		     btree_node_bad_magic,
1022 		     "bad magic: want %llx, got %llx",
1023 		     bset_magic(c), le64_to_cpu(b->data->magic));
1024 
1025 	if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
1026 		struct bch_btree_ptr_v2 *bp =
1027 			&bkey_i_to_btree_ptr_v2(&b->key)->v;
1028 
1029 		bch2_bpos_to_text(&buf, b->data->min_key);
1030 		prt_str(&buf, "-");
1031 		bch2_bpos_to_text(&buf, b->data->max_key);
1032 
1033 		btree_err_on(b->data->keys.seq != bp->seq,
1034 			     -BCH_ERR_btree_node_read_err_must_retry,
1035 			     c, ca, b, NULL,
1036 			     btree_node_bad_seq,
1037 			     "got wrong btree node: got\n%s",
1038 			     (printbuf_reset(&buf),
1039 			      bch2_btree_node_header_to_text(&buf, b->data),
1040 			      buf.buf));
1041 	} else {
1042 		btree_err_on(!b->data->keys.seq,
1043 			     -BCH_ERR_btree_node_read_err_must_retry,
1044 			     c, ca, b, NULL,
1045 			     btree_node_bad_seq,
1046 			     "bad btree header: seq 0\n%s",
1047 			     (printbuf_reset(&buf),
1048 			      bch2_btree_node_header_to_text(&buf, b->data),
1049 			      buf.buf));
1050 	}
1051 
1052 	while (b->written < (ptr_written ?: btree_sectors(c))) {
1053 		unsigned sectors;
1054 		struct nonce nonce;
1055 		bool first = !b->written;
1056 		bool csum_bad;
1057 
1058 		if (!b->written) {
1059 			i = &b->data->keys;
1060 
1061 			btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
1062 				     -BCH_ERR_btree_node_read_err_want_retry,
1063 				     c, ca, b, i,
1064 				     bset_unknown_csum,
1065 				     "unknown checksum type %llu", BSET_CSUM_TYPE(i));
1066 
1067 			nonce = btree_nonce(i, b->written << 9);
1068 
1069 			struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data);
1070 			csum_bad = bch2_crc_cmp(b->data->csum, csum);
1071 			if (csum_bad)
1072 				bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
1073 
1074 			btree_err_on(csum_bad,
1075 				     -BCH_ERR_btree_node_read_err_want_retry,
1076 				     c, ca, b, i,
1077 				     bset_bad_csum,
1078 				     "%s",
1079 				     (printbuf_reset(&buf),
1080 				      bch2_csum_err_msg(&buf, BSET_CSUM_TYPE(i), b->data->csum, csum),
1081 				      buf.buf));
1082 
1083 			ret = bset_encrypt(c, i, b->written << 9);
1084 			if (bch2_fs_fatal_err_on(ret, c,
1085 					"decrypting btree node: %s", bch2_err_str(ret)))
1086 				goto fsck_err;
1087 
1088 			btree_err_on(btree_node_type_is_extents(btree_node_type(b)) &&
1089 				     !BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data),
1090 				     -BCH_ERR_btree_node_read_err_incompatible,
1091 				     c, NULL, b, NULL,
1092 				     btree_node_unsupported_version,
1093 				     "btree node does not have NEW_EXTENT_OVERWRITE set");
1094 
1095 			sectors = vstruct_sectors(b->data, c->block_bits);
1096 		} else {
1097 			bne = write_block(b);
1098 			i = &bne->keys;
1099 
1100 			if (i->seq != b->data->keys.seq)
1101 				break;
1102 
1103 			btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
1104 				     -BCH_ERR_btree_node_read_err_want_retry,
1105 				     c, ca, b, i,
1106 				     bset_unknown_csum,
1107 				     "unknown checksum type %llu", BSET_CSUM_TYPE(i));
1108 
1109 			nonce = btree_nonce(i, b->written << 9);
1110 			struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
1111 			csum_bad = bch2_crc_cmp(bne->csum, csum);
1112 			if (ca && csum_bad)
1113 				bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
1114 
1115 			btree_err_on(csum_bad,
1116 				     -BCH_ERR_btree_node_read_err_want_retry,
1117 				     c, ca, b, i,
1118 				     bset_bad_csum,
1119 				     "%s",
1120 				     (printbuf_reset(&buf),
1121 				      bch2_csum_err_msg(&buf, BSET_CSUM_TYPE(i), bne->csum, csum),
1122 				      buf.buf));
1123 
1124 			ret = bset_encrypt(c, i, b->written << 9);
1125 			if (bch2_fs_fatal_err_on(ret, c,
1126 					"decrypting btree node: %s", bch2_err_str(ret)))
1127 				goto fsck_err;
1128 
1129 			sectors = vstruct_sectors(bne, c->block_bits);
1130 		}
1131 
1132 		b->version_ondisk = min(b->version_ondisk,
1133 					le16_to_cpu(i->version));
1134 
1135 		ret = validate_bset(c, ca, b, i, b->written, sectors,
1136 				    READ, have_retry, saw_error);
1137 		if (ret)
1138 			goto fsck_err;
1139 
1140 		if (!b->written)
1141 			btree_node_set_format(b, b->data->format);
1142 
1143 		ret = validate_bset_keys(c, b, i, READ, have_retry, saw_error);
1144 		if (ret)
1145 			goto fsck_err;
1146 
1147 		SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
1148 
1149 		blacklisted = bch2_journal_seq_is_blacklisted(c,
1150 					le64_to_cpu(i->journal_seq),
1151 					true);
1152 
1153 		btree_err_on(blacklisted && first,
1154 			     -BCH_ERR_btree_node_read_err_fixable,
1155 			     c, ca, b, i,
1156 			     bset_blacklisted_journal_seq,
1157 			     "first btree node bset has blacklisted journal seq (%llu)",
1158 			     le64_to_cpu(i->journal_seq));
1159 
1160 		btree_err_on(blacklisted && ptr_written,
1161 			     -BCH_ERR_btree_node_read_err_fixable,
1162 			     c, ca, b, i,
1163 			     first_bset_blacklisted_journal_seq,
1164 			     "found blacklisted bset (journal seq %llu) in btree node at offset %u-%u/%u",
1165 			     le64_to_cpu(i->journal_seq),
1166 			     b->written, b->written + sectors, ptr_written);
1167 
1168 		b->written += sectors;
1169 
1170 		if (blacklisted && !first)
1171 			continue;
1172 
1173 		sort_iter_add(iter,
1174 			      vstruct_idx(i, 0),
1175 			      vstruct_last(i));
1176 	}
1177 
1178 	if (ptr_written) {
1179 		btree_err_on(b->written < ptr_written,
1180 			     -BCH_ERR_btree_node_read_err_want_retry,
1181 			     c, ca, b, NULL,
1182 			     btree_node_data_missing,
1183 			     "btree node data missing: expected %u sectors, found %u",
1184 			     ptr_written, b->written);
1185 	} else {
1186 		for (bne = write_block(b);
1187 		     bset_byte_offset(b, bne) < btree_buf_bytes(b);
1188 		     bne = (void *) bne + block_bytes(c))
1189 			btree_err_on(bne->keys.seq == b->data->keys.seq &&
1190 				     !bch2_journal_seq_is_blacklisted(c,
1191 								      le64_to_cpu(bne->keys.journal_seq),
1192 								      true),
1193 				     -BCH_ERR_btree_node_read_err_want_retry,
1194 				     c, ca, b, NULL,
1195 				     btree_node_bset_after_end,
1196 				     "found bset signature after last bset");
1197 	}
1198 
1199 	sorted = btree_bounce_alloc(c, btree_buf_bytes(b), &used_mempool);
1200 	sorted->keys.u64s = 0;
1201 
1202 	set_btree_bset(b, b->set, &b->data->keys);
1203 
1204 	b->nr = bch2_key_sort_fix_overlapping(c, &sorted->keys, iter);
1205 
1206 	u64s = le16_to_cpu(sorted->keys.u64s);
1207 	*sorted = *b->data;
1208 	sorted->keys.u64s = cpu_to_le16(u64s);
1209 	swap(sorted, b->data);
1210 	set_btree_bset(b, b->set, &b->data->keys);
1211 	b->nsets = 1;
1212 
1213 	BUG_ON(b->nr.live_u64s != u64s);
1214 
1215 	btree_bounce_free(c, btree_buf_bytes(b), used_mempool, sorted);
1216 
1217 	if (updated_range)
1218 		bch2_btree_node_drop_keys_outside_node(b);
1219 
1220 	i = &b->data->keys;
1221 	for (k = i->start; k != vstruct_last(i);) {
1222 		struct bkey tmp;
1223 		struct bkey_s u = __bkey_disassemble(b, k, &tmp);
1224 
1225 		printbuf_reset(&buf);
1226 
1227 		if (bch2_bkey_val_invalid(c, u.s_c, READ, &buf) ||
1228 		    (bch2_inject_invalid_keys &&
1229 		     !bversion_cmp(u.k->version, MAX_VERSION))) {
1230 			printbuf_reset(&buf);
1231 
1232 			prt_printf(&buf, "invalid bkey: ");
1233 			bch2_bkey_val_invalid(c, u.s_c, READ, &buf);
1234 			prt_printf(&buf, "\n  ");
1235 			bch2_bkey_val_to_text(&buf, c, u.s_c);
1236 
1237 			btree_err(-BCH_ERR_btree_node_read_err_fixable,
1238 				  c, NULL, b, i,
1239 				  btree_node_bad_bkey,
1240 				  "%s", buf.buf);
1241 
1242 			btree_keys_account_key_drop(&b->nr, 0, k);
1243 
1244 			i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
1245 			memmove_u64s_down(k, bkey_p_next(k),
1246 					  (u64 *) vstruct_end(i) - (u64 *) k);
1247 			set_btree_bset_end(b, b->set);
1248 			continue;
1249 		}
1250 
1251 		if (u.k->type == KEY_TYPE_btree_ptr_v2) {
1252 			struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(u);
1253 
1254 			bp.v->mem_ptr = 0;
1255 		}
1256 
1257 		k = bkey_p_next(k);
1258 	}
1259 
1260 	bch2_bset_build_aux_tree(b, b->set, false);
1261 
1262 	set_needs_whiteout(btree_bset_first(b), true);
1263 
1264 	btree_node_reset_sib_u64s(b);
1265 
1266 	rcu_read_lock();
1267 	bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&b->key)), ptr) {
1268 		struct bch_dev *ca2 = bch2_dev_rcu(c, ptr->dev);
1269 
1270 		if (!ca2 || ca2->mi.state != BCH_MEMBER_STATE_rw)
1271 			set_btree_node_need_rewrite(b);
1272 	}
1273 	rcu_read_unlock();
1274 
1275 	if (!ptr_written)
1276 		set_btree_node_need_rewrite(b);
1277 out:
1278 	mempool_free(iter, &c->fill_iter);
1279 	printbuf_exit(&buf);
1280 	bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read_done], start_time);
1281 	return retry_read;
1282 fsck_err:
1283 	if (ret == -BCH_ERR_btree_node_read_err_want_retry ||
1284 	    ret == -BCH_ERR_btree_node_read_err_must_retry) {
1285 		retry_read = 1;
1286 	} else {
1287 		set_btree_node_read_error(b);
1288 		bch2_btree_lost_data(c, b->c.btree_id);
1289 	}
1290 	goto out;
1291 }
1292 
1293 static void btree_node_read_work(struct work_struct *work)
1294 {
1295 	struct btree_read_bio *rb =
1296 		container_of(work, struct btree_read_bio, work);
1297 	struct bch_fs *c	= rb->c;
1298 	struct bch_dev *ca	= rb->have_ioref ? bch2_dev_have_ref(c, rb->pick.ptr.dev) : NULL;
1299 	struct btree *b		= rb->b;
1300 	struct bio *bio		= &rb->bio;
1301 	struct bch_io_failures failed = { .nr = 0 };
1302 	struct printbuf buf = PRINTBUF;
1303 	bool saw_error = false;
1304 	bool retry = false;
1305 	bool can_retry;
1306 
1307 	goto start;
1308 	while (1) {
1309 		retry = true;
1310 		bch_info(c, "retrying read");
1311 		ca = bch2_dev_get_ioref(c, rb->pick.ptr.dev, READ);
1312 		rb->have_ioref		= ca != NULL;
1313 		bio_reset(bio, NULL, REQ_OP_READ|REQ_SYNC|REQ_META);
1314 		bio->bi_iter.bi_sector	= rb->pick.ptr.offset;
1315 		bio->bi_iter.bi_size	= btree_buf_bytes(b);
1316 
1317 		if (rb->have_ioref) {
1318 			bio_set_dev(bio, ca->disk_sb.bdev);
1319 			submit_bio_wait(bio);
1320 		} else {
1321 			bio->bi_status = BLK_STS_REMOVED;
1322 		}
1323 start:
1324 		printbuf_reset(&buf);
1325 		bch2_btree_pos_to_text(&buf, c, b);
1326 		bch2_dev_io_err_on(ca && bio->bi_status, ca, BCH_MEMBER_ERROR_read,
1327 				   "btree read error %s for %s",
1328 				   bch2_blk_status_to_str(bio->bi_status), buf.buf);
1329 		if (rb->have_ioref)
1330 			percpu_ref_put(&ca->io_ref);
1331 		rb->have_ioref = false;
1332 
1333 		bch2_mark_io_failure(&failed, &rb->pick);
1334 
1335 		can_retry = bch2_bkey_pick_read_device(c,
1336 				bkey_i_to_s_c(&b->key),
1337 				&failed, &rb->pick) > 0;
1338 
1339 		if (!bio->bi_status &&
1340 		    !bch2_btree_node_read_done(c, ca, b, can_retry, &saw_error)) {
1341 			if (retry)
1342 				bch_info(c, "retry success");
1343 			break;
1344 		}
1345 
1346 		saw_error = true;
1347 
1348 		if (!can_retry) {
1349 			set_btree_node_read_error(b);
1350 			bch2_btree_lost_data(c, b->c.btree_id);
1351 			break;
1352 		}
1353 	}
1354 
1355 	bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read],
1356 			       rb->start_time);
1357 	bio_put(&rb->bio);
1358 
1359 	if (saw_error &&
1360 	    !btree_node_read_error(b) &&
1361 	    c->curr_recovery_pass != BCH_RECOVERY_PASS_scan_for_btree_nodes) {
1362 		printbuf_reset(&buf);
1363 		bch2_bpos_to_text(&buf, b->key.k.p);
1364 		bch_err_ratelimited(c, "%s: rewriting btree node at btree=%s level=%u %s due to error",
1365 			 __func__, bch2_btree_id_str(b->c.btree_id), b->c.level, buf.buf);
1366 
1367 		bch2_btree_node_rewrite_async(c, b);
1368 	}
1369 
1370 	printbuf_exit(&buf);
1371 	clear_btree_node_read_in_flight(b);
1372 	wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1373 }
1374 
1375 static void btree_node_read_endio(struct bio *bio)
1376 {
1377 	struct btree_read_bio *rb =
1378 		container_of(bio, struct btree_read_bio, bio);
1379 	struct bch_fs *c	= rb->c;
1380 
1381 	if (rb->have_ioref) {
1382 		struct bch_dev *ca = bch2_dev_have_ref(c, rb->pick.ptr.dev);
1383 
1384 		bch2_latency_acct(ca, rb->start_time, READ);
1385 	}
1386 
1387 	queue_work(c->io_complete_wq, &rb->work);
1388 }
1389 
1390 struct btree_node_read_all {
1391 	struct closure		cl;
1392 	struct bch_fs		*c;
1393 	struct btree		*b;
1394 	unsigned		nr;
1395 	void			*buf[BCH_REPLICAS_MAX];
1396 	struct bio		*bio[BCH_REPLICAS_MAX];
1397 	blk_status_t		err[BCH_REPLICAS_MAX];
1398 };
1399 
1400 static unsigned btree_node_sectors_written(struct bch_fs *c, void *data)
1401 {
1402 	struct btree_node *bn = data;
1403 	struct btree_node_entry *bne;
1404 	unsigned offset = 0;
1405 
1406 	if (le64_to_cpu(bn->magic) !=  bset_magic(c))
1407 		return 0;
1408 
1409 	while (offset < btree_sectors(c)) {
1410 		if (!offset) {
1411 			offset += vstruct_sectors(bn, c->block_bits);
1412 		} else {
1413 			bne = data + (offset << 9);
1414 			if (bne->keys.seq != bn->keys.seq)
1415 				break;
1416 			offset += vstruct_sectors(bne, c->block_bits);
1417 		}
1418 	}
1419 
1420 	return offset;
1421 }
1422 
1423 static bool btree_node_has_extra_bsets(struct bch_fs *c, unsigned offset, void *data)
1424 {
1425 	struct btree_node *bn = data;
1426 	struct btree_node_entry *bne;
1427 
1428 	if (!offset)
1429 		return false;
1430 
1431 	while (offset < btree_sectors(c)) {
1432 		bne = data + (offset << 9);
1433 		if (bne->keys.seq == bn->keys.seq)
1434 			return true;
1435 		offset++;
1436 	}
1437 
1438 	return false;
1439 	return offset;
1440 }
1441 
1442 static CLOSURE_CALLBACK(btree_node_read_all_replicas_done)
1443 {
1444 	closure_type(ra, struct btree_node_read_all, cl);
1445 	struct bch_fs *c = ra->c;
1446 	struct btree *b = ra->b;
1447 	struct printbuf buf = PRINTBUF;
1448 	bool dump_bset_maps = false;
1449 	bool have_retry = false;
1450 	int ret = 0, best = -1, write = READ;
1451 	unsigned i, written = 0, written2 = 0;
1452 	__le64 seq = b->key.k.type == KEY_TYPE_btree_ptr_v2
1453 		? bkey_i_to_btree_ptr_v2(&b->key)->v.seq : 0;
1454 	bool _saw_error = false, *saw_error = &_saw_error;
1455 
1456 	for (i = 0; i < ra->nr; i++) {
1457 		struct btree_node *bn = ra->buf[i];
1458 
1459 		if (ra->err[i])
1460 			continue;
1461 
1462 		if (le64_to_cpu(bn->magic) != bset_magic(c) ||
1463 		    (seq && seq != bn->keys.seq))
1464 			continue;
1465 
1466 		if (best < 0) {
1467 			best = i;
1468 			written = btree_node_sectors_written(c, bn);
1469 			continue;
1470 		}
1471 
1472 		written2 = btree_node_sectors_written(c, ra->buf[i]);
1473 		if (btree_err_on(written2 != written, -BCH_ERR_btree_node_read_err_fixable,
1474 				 c, NULL, b, NULL,
1475 				 btree_node_replicas_sectors_written_mismatch,
1476 				 "btree node sectors written mismatch: %u != %u",
1477 				 written, written2) ||
1478 		    btree_err_on(btree_node_has_extra_bsets(c, written2, ra->buf[i]),
1479 				 -BCH_ERR_btree_node_read_err_fixable,
1480 				 c, NULL, b, NULL,
1481 				 btree_node_bset_after_end,
1482 				 "found bset signature after last bset") ||
1483 		    btree_err_on(memcmp(ra->buf[best], ra->buf[i], written << 9),
1484 				 -BCH_ERR_btree_node_read_err_fixable,
1485 				 c, NULL, b, NULL,
1486 				 btree_node_replicas_data_mismatch,
1487 				 "btree node replicas content mismatch"))
1488 			dump_bset_maps = true;
1489 
1490 		if (written2 > written) {
1491 			written = written2;
1492 			best = i;
1493 		}
1494 	}
1495 fsck_err:
1496 	if (dump_bset_maps) {
1497 		for (i = 0; i < ra->nr; i++) {
1498 			struct btree_node *bn = ra->buf[i];
1499 			struct btree_node_entry *bne = NULL;
1500 			unsigned offset = 0, sectors;
1501 			bool gap = false;
1502 
1503 			if (ra->err[i])
1504 				continue;
1505 
1506 			printbuf_reset(&buf);
1507 
1508 			while (offset < btree_sectors(c)) {
1509 				if (!offset) {
1510 					sectors = vstruct_sectors(bn, c->block_bits);
1511 				} else {
1512 					bne = ra->buf[i] + (offset << 9);
1513 					if (bne->keys.seq != bn->keys.seq)
1514 						break;
1515 					sectors = vstruct_sectors(bne, c->block_bits);
1516 				}
1517 
1518 				prt_printf(&buf, " %u-%u", offset, offset + sectors);
1519 				if (bne && bch2_journal_seq_is_blacklisted(c,
1520 							le64_to_cpu(bne->keys.journal_seq), false))
1521 					prt_printf(&buf, "*");
1522 				offset += sectors;
1523 			}
1524 
1525 			while (offset < btree_sectors(c)) {
1526 				bne = ra->buf[i] + (offset << 9);
1527 				if (bne->keys.seq == bn->keys.seq) {
1528 					if (!gap)
1529 						prt_printf(&buf, " GAP");
1530 					gap = true;
1531 
1532 					sectors = vstruct_sectors(bne, c->block_bits);
1533 					prt_printf(&buf, " %u-%u", offset, offset + sectors);
1534 					if (bch2_journal_seq_is_blacklisted(c,
1535 							le64_to_cpu(bne->keys.journal_seq), false))
1536 						prt_printf(&buf, "*");
1537 				}
1538 				offset++;
1539 			}
1540 
1541 			bch_err(c, "replica %u:%s", i, buf.buf);
1542 		}
1543 	}
1544 
1545 	if (best >= 0) {
1546 		memcpy(b->data, ra->buf[best], btree_buf_bytes(b));
1547 		ret = bch2_btree_node_read_done(c, NULL, b, false, saw_error);
1548 	} else {
1549 		ret = -1;
1550 	}
1551 
1552 	if (ret) {
1553 		set_btree_node_read_error(b);
1554 		bch2_btree_lost_data(c, b->c.btree_id);
1555 	} else if (*saw_error)
1556 		bch2_btree_node_rewrite_async(c, b);
1557 
1558 	for (i = 0; i < ra->nr; i++) {
1559 		mempool_free(ra->buf[i], &c->btree_bounce_pool);
1560 		bio_put(ra->bio[i]);
1561 	}
1562 
1563 	closure_debug_destroy(&ra->cl);
1564 	kfree(ra);
1565 	printbuf_exit(&buf);
1566 
1567 	clear_btree_node_read_in_flight(b);
1568 	wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1569 }
1570 
1571 static void btree_node_read_all_replicas_endio(struct bio *bio)
1572 {
1573 	struct btree_read_bio *rb =
1574 		container_of(bio, struct btree_read_bio, bio);
1575 	struct bch_fs *c	= rb->c;
1576 	struct btree_node_read_all *ra = rb->ra;
1577 
1578 	if (rb->have_ioref) {
1579 		struct bch_dev *ca = bch2_dev_have_ref(c, rb->pick.ptr.dev);
1580 
1581 		bch2_latency_acct(ca, rb->start_time, READ);
1582 	}
1583 
1584 	ra->err[rb->idx] = bio->bi_status;
1585 	closure_put(&ra->cl);
1586 }
1587 
1588 /*
1589  * XXX This allocates multiple times from the same mempools, and can deadlock
1590  * under sufficient memory pressure (but is only a debug path)
1591  */
1592 static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool sync)
1593 {
1594 	struct bkey_s_c k = bkey_i_to_s_c(&b->key);
1595 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1596 	const union bch_extent_entry *entry;
1597 	struct extent_ptr_decoded pick;
1598 	struct btree_node_read_all *ra;
1599 	unsigned i;
1600 
1601 	ra = kzalloc(sizeof(*ra), GFP_NOFS);
1602 	if (!ra)
1603 		return -BCH_ERR_ENOMEM_btree_node_read_all_replicas;
1604 
1605 	closure_init(&ra->cl, NULL);
1606 	ra->c	= c;
1607 	ra->b	= b;
1608 	ra->nr	= bch2_bkey_nr_ptrs(k);
1609 
1610 	for (i = 0; i < ra->nr; i++) {
1611 		ra->buf[i] = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
1612 		ra->bio[i] = bio_alloc_bioset(NULL,
1613 					      buf_pages(ra->buf[i], btree_buf_bytes(b)),
1614 					      REQ_OP_READ|REQ_SYNC|REQ_META,
1615 					      GFP_NOFS,
1616 					      &c->btree_bio);
1617 	}
1618 
1619 	i = 0;
1620 	bkey_for_each_ptr_decode(k.k, ptrs, pick, entry) {
1621 		struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ);
1622 		struct btree_read_bio *rb =
1623 			container_of(ra->bio[i], struct btree_read_bio, bio);
1624 		rb->c			= c;
1625 		rb->b			= b;
1626 		rb->ra			= ra;
1627 		rb->start_time		= local_clock();
1628 		rb->have_ioref		= ca != NULL;
1629 		rb->idx			= i;
1630 		rb->pick		= pick;
1631 		rb->bio.bi_iter.bi_sector = pick.ptr.offset;
1632 		rb->bio.bi_end_io	= btree_node_read_all_replicas_endio;
1633 		bch2_bio_map(&rb->bio, ra->buf[i], btree_buf_bytes(b));
1634 
1635 		if (rb->have_ioref) {
1636 			this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
1637 				     bio_sectors(&rb->bio));
1638 			bio_set_dev(&rb->bio, ca->disk_sb.bdev);
1639 
1640 			closure_get(&ra->cl);
1641 			submit_bio(&rb->bio);
1642 		} else {
1643 			ra->err[i] = BLK_STS_REMOVED;
1644 		}
1645 
1646 		i++;
1647 	}
1648 
1649 	if (sync) {
1650 		closure_sync(&ra->cl);
1651 		btree_node_read_all_replicas_done(&ra->cl.work);
1652 	} else {
1653 		continue_at(&ra->cl, btree_node_read_all_replicas_done,
1654 			    c->io_complete_wq);
1655 	}
1656 
1657 	return 0;
1658 }
1659 
1660 void bch2_btree_node_read(struct btree_trans *trans, struct btree *b,
1661 			  bool sync)
1662 {
1663 	struct bch_fs *c = trans->c;
1664 	struct extent_ptr_decoded pick;
1665 	struct btree_read_bio *rb;
1666 	struct bch_dev *ca;
1667 	struct bio *bio;
1668 	int ret;
1669 
1670 	trace_and_count(c, btree_node_read, trans, b);
1671 
1672 	if (bch2_verify_all_btree_replicas &&
1673 	    !btree_node_read_all_replicas(c, b, sync))
1674 		return;
1675 
1676 	ret = bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key),
1677 					 NULL, &pick);
1678 
1679 	if (ret <= 0) {
1680 		struct printbuf buf = PRINTBUF;
1681 
1682 		prt_str(&buf, "btree node read error: no device to read from\n at ");
1683 		bch2_btree_pos_to_text(&buf, c, b);
1684 		bch_err_ratelimited(c, "%s", buf.buf);
1685 
1686 		if (c->recovery_passes_explicit & BIT_ULL(BCH_RECOVERY_PASS_check_topology) &&
1687 		    c->curr_recovery_pass > BCH_RECOVERY_PASS_check_topology)
1688 			bch2_fatal_error(c);
1689 
1690 		set_btree_node_read_error(b);
1691 		bch2_btree_lost_data(c, b->c.btree_id);
1692 		clear_btree_node_read_in_flight(b);
1693 		wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1694 		printbuf_exit(&buf);
1695 		return;
1696 	}
1697 
1698 	ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ);
1699 
1700 	bio = bio_alloc_bioset(NULL,
1701 			       buf_pages(b->data, btree_buf_bytes(b)),
1702 			       REQ_OP_READ|REQ_SYNC|REQ_META,
1703 			       GFP_NOFS,
1704 			       &c->btree_bio);
1705 	rb = container_of(bio, struct btree_read_bio, bio);
1706 	rb->c			= c;
1707 	rb->b			= b;
1708 	rb->ra			= NULL;
1709 	rb->start_time		= local_clock();
1710 	rb->have_ioref		= ca != NULL;
1711 	rb->pick		= pick;
1712 	INIT_WORK(&rb->work, btree_node_read_work);
1713 	bio->bi_iter.bi_sector	= pick.ptr.offset;
1714 	bio->bi_end_io		= btree_node_read_endio;
1715 	bch2_bio_map(bio, b->data, btree_buf_bytes(b));
1716 
1717 	if (rb->have_ioref) {
1718 		this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
1719 			     bio_sectors(bio));
1720 		bio_set_dev(bio, ca->disk_sb.bdev);
1721 
1722 		if (sync) {
1723 			submit_bio_wait(bio);
1724 			bch2_latency_acct(ca, rb->start_time, READ);
1725 			btree_node_read_work(&rb->work);
1726 		} else {
1727 			submit_bio(bio);
1728 		}
1729 	} else {
1730 		bio->bi_status = BLK_STS_REMOVED;
1731 
1732 		if (sync)
1733 			btree_node_read_work(&rb->work);
1734 		else
1735 			queue_work(c->io_complete_wq, &rb->work);
1736 	}
1737 }
1738 
1739 static int __bch2_btree_root_read(struct btree_trans *trans, enum btree_id id,
1740 				  const struct bkey_i *k, unsigned level)
1741 {
1742 	struct bch_fs *c = trans->c;
1743 	struct closure cl;
1744 	struct btree *b;
1745 	int ret;
1746 
1747 	closure_init_stack(&cl);
1748 
1749 	do {
1750 		ret = bch2_btree_cache_cannibalize_lock(trans, &cl);
1751 		closure_sync(&cl);
1752 	} while (ret);
1753 
1754 	b = bch2_btree_node_mem_alloc(trans, level != 0);
1755 	bch2_btree_cache_cannibalize_unlock(trans);
1756 
1757 	BUG_ON(IS_ERR(b));
1758 
1759 	bkey_copy(&b->key, k);
1760 	BUG_ON(bch2_btree_node_hash_insert(&c->btree_cache, b, level, id));
1761 
1762 	set_btree_node_read_in_flight(b);
1763 
1764 	bch2_btree_node_read(trans, b, true);
1765 
1766 	if (btree_node_read_error(b)) {
1767 		bch2_btree_node_hash_remove(&c->btree_cache, b);
1768 
1769 		mutex_lock(&c->btree_cache.lock);
1770 		list_move(&b->list, &c->btree_cache.freeable);
1771 		mutex_unlock(&c->btree_cache.lock);
1772 
1773 		ret = -BCH_ERR_btree_node_read_error;
1774 		goto err;
1775 	}
1776 
1777 	bch2_btree_set_root_for_read(c, b);
1778 err:
1779 	six_unlock_write(&b->c.lock);
1780 	six_unlock_intent(&b->c.lock);
1781 
1782 	return ret;
1783 }
1784 
1785 int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
1786 			const struct bkey_i *k, unsigned level)
1787 {
1788 	return bch2_trans_run(c, __bch2_btree_root_read(trans, id, k, level));
1789 }
1790 
1791 static void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
1792 				      struct btree_write *w)
1793 {
1794 	unsigned long old, new, v = READ_ONCE(b->will_make_reachable);
1795 
1796 	do {
1797 		old = new = v;
1798 		if (!(old & 1))
1799 			break;
1800 
1801 		new &= ~1UL;
1802 	} while ((v = cmpxchg(&b->will_make_reachable, old, new)) != old);
1803 
1804 	if (old & 1)
1805 		closure_put(&((struct btree_update *) new)->cl);
1806 
1807 	bch2_journal_pin_drop(&c->journal, &w->journal);
1808 }
1809 
1810 static void __btree_node_write_done(struct bch_fs *c, struct btree *b)
1811 {
1812 	struct btree_write *w = btree_prev_write(b);
1813 	unsigned long old, new, v;
1814 	unsigned type = 0;
1815 
1816 	bch2_btree_complete_write(c, b, w);
1817 
1818 	v = READ_ONCE(b->flags);
1819 	do {
1820 		old = new = v;
1821 
1822 		if ((old & (1U << BTREE_NODE_dirty)) &&
1823 		    (old & (1U << BTREE_NODE_need_write)) &&
1824 		    !(old & (1U << BTREE_NODE_never_write)) &&
1825 		    !(old & (1U << BTREE_NODE_write_blocked)) &&
1826 		    !(old & (1U << BTREE_NODE_will_make_reachable))) {
1827 			new &= ~(1U << BTREE_NODE_dirty);
1828 			new &= ~(1U << BTREE_NODE_need_write);
1829 			new |=  (1U << BTREE_NODE_write_in_flight);
1830 			new |=  (1U << BTREE_NODE_write_in_flight_inner);
1831 			new |=  (1U << BTREE_NODE_just_written);
1832 			new ^=  (1U << BTREE_NODE_write_idx);
1833 
1834 			type = new & BTREE_WRITE_TYPE_MASK;
1835 			new &= ~BTREE_WRITE_TYPE_MASK;
1836 		} else {
1837 			new &= ~(1U << BTREE_NODE_write_in_flight);
1838 			new &= ~(1U << BTREE_NODE_write_in_flight_inner);
1839 		}
1840 	} while ((v = cmpxchg(&b->flags, old, new)) != old);
1841 
1842 	if (new & (1U << BTREE_NODE_write_in_flight))
1843 		__bch2_btree_node_write(c, b, BTREE_WRITE_ALREADY_STARTED|type);
1844 	else
1845 		wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
1846 }
1847 
1848 static void btree_node_write_done(struct bch_fs *c, struct btree *b)
1849 {
1850 	struct btree_trans *trans = bch2_trans_get(c);
1851 
1852 	btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
1853 	__btree_node_write_done(c, b);
1854 	six_unlock_read(&b->c.lock);
1855 
1856 	bch2_trans_put(trans);
1857 }
1858 
1859 static void btree_node_write_work(struct work_struct *work)
1860 {
1861 	struct btree_write_bio *wbio =
1862 		container_of(work, struct btree_write_bio, work);
1863 	struct bch_fs *c	= wbio->wbio.c;
1864 	struct btree *b		= wbio->wbio.bio.bi_private;
1865 	int ret = 0;
1866 
1867 	btree_bounce_free(c,
1868 		wbio->data_bytes,
1869 		wbio->wbio.used_mempool,
1870 		wbio->data);
1871 
1872 	bch2_bkey_drop_ptrs(bkey_i_to_s(&wbio->key), ptr,
1873 		bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev));
1874 
1875 	if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(&wbio->key))) {
1876 		ret = -BCH_ERR_btree_node_write_all_failed;
1877 		goto err;
1878 	}
1879 
1880 	if (wbio->wbio.first_btree_write) {
1881 		if (wbio->wbio.failed.nr) {
1882 
1883 		}
1884 	} else {
1885 		ret = bch2_trans_do(c, NULL, NULL, 0,
1886 			bch2_btree_node_update_key_get_iter(trans, b, &wbio->key,
1887 					BCH_WATERMARK_interior_updates|
1888 					BCH_TRANS_COMMIT_journal_reclaim|
1889 					BCH_TRANS_COMMIT_no_enospc|
1890 					BCH_TRANS_COMMIT_no_check_rw,
1891 					!wbio->wbio.failed.nr));
1892 		if (ret)
1893 			goto err;
1894 	}
1895 out:
1896 	bio_put(&wbio->wbio.bio);
1897 	btree_node_write_done(c, b);
1898 	return;
1899 err:
1900 	set_btree_node_noevict(b);
1901 	bch2_fs_fatal_err_on(!bch2_err_matches(ret, EROFS), c,
1902 			     "writing btree node: %s", bch2_err_str(ret));
1903 	goto out;
1904 }
1905 
1906 static void btree_node_write_endio(struct bio *bio)
1907 {
1908 	struct bch_write_bio *wbio	= to_wbio(bio);
1909 	struct bch_write_bio *parent	= wbio->split ? wbio->parent : NULL;
1910 	struct bch_write_bio *orig	= parent ?: wbio;
1911 	struct btree_write_bio *wb	= container_of(orig, struct btree_write_bio, wbio);
1912 	struct bch_fs *c		= wbio->c;
1913 	struct btree *b			= wbio->bio.bi_private;
1914 	struct bch_dev *ca		= wbio->have_ioref ? bch2_dev_have_ref(c, wbio->dev) : NULL;
1915 	unsigned long flags;
1916 
1917 	if (wbio->have_ioref)
1918 		bch2_latency_acct(ca, wbio->submit_time, WRITE);
1919 
1920 	if (!ca ||
1921 	    bch2_dev_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write,
1922 			       "btree write error: %s",
1923 			       bch2_blk_status_to_str(bio->bi_status)) ||
1924 	    bch2_meta_write_fault("btree")) {
1925 		spin_lock_irqsave(&c->btree_write_error_lock, flags);
1926 		bch2_dev_list_add_dev(&orig->failed, wbio->dev);
1927 		spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
1928 	}
1929 
1930 	if (wbio->have_ioref)
1931 		percpu_ref_put(&ca->io_ref);
1932 
1933 	if (parent) {
1934 		bio_put(bio);
1935 		bio_endio(&parent->bio);
1936 		return;
1937 	}
1938 
1939 	clear_btree_node_write_in_flight_inner(b);
1940 	wake_up_bit(&b->flags, BTREE_NODE_write_in_flight_inner);
1941 	INIT_WORK(&wb->work, btree_node_write_work);
1942 	queue_work(c->btree_io_complete_wq, &wb->work);
1943 }
1944 
1945 static int validate_bset_for_write(struct bch_fs *c, struct btree *b,
1946 				   struct bset *i, unsigned sectors)
1947 {
1948 	struct printbuf buf = PRINTBUF;
1949 	bool saw_error;
1950 	int ret;
1951 
1952 	ret = bch2_bkey_invalid(c, bkey_i_to_s_c(&b->key),
1953 				BKEY_TYPE_btree, WRITE, &buf);
1954 
1955 	if (ret)
1956 		bch2_fs_inconsistent(c, "invalid btree node key before write: %s", buf.buf);
1957 	printbuf_exit(&buf);
1958 	if (ret)
1959 		return ret;
1960 
1961 	ret = validate_bset_keys(c, b, i, WRITE, false, &saw_error) ?:
1962 		validate_bset(c, NULL, b, i, b->written, sectors, WRITE, false, &saw_error);
1963 	if (ret) {
1964 		bch2_inconsistent_error(c);
1965 		dump_stack();
1966 	}
1967 
1968 	return ret;
1969 }
1970 
1971 static void btree_write_submit(struct work_struct *work)
1972 {
1973 	struct btree_write_bio *wbio = container_of(work, struct btree_write_bio, work);
1974 	BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
1975 
1976 	bkey_copy(&tmp.k, &wbio->key);
1977 
1978 	bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&tmp.k)), ptr)
1979 		ptr->offset += wbio->sector_offset;
1980 
1981 	bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree,
1982 				  &tmp.k, false);
1983 }
1984 
1985 void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
1986 {
1987 	struct btree_write_bio *wbio;
1988 	struct bset *i;
1989 	struct btree_node *bn = NULL;
1990 	struct btree_node_entry *bne = NULL;
1991 	struct sort_iter_stack sort_iter;
1992 	struct nonce nonce;
1993 	unsigned bytes_to_write, sectors_to_write, bytes, u64s;
1994 	u64 seq = 0;
1995 	bool used_mempool;
1996 	unsigned long old, new;
1997 	bool validate_before_checksum = false;
1998 	enum btree_write_type type = flags & BTREE_WRITE_TYPE_MASK;
1999 	void *data;
2000 	int ret;
2001 
2002 	if (flags & BTREE_WRITE_ALREADY_STARTED)
2003 		goto do_write;
2004 
2005 	/*
2006 	 * We may only have a read lock on the btree node - the dirty bit is our
2007 	 * "lock" against racing with other threads that may be trying to start
2008 	 * a write, we do a write iff we clear the dirty bit. Since setting the
2009 	 * dirty bit requires a write lock, we can't race with other threads
2010 	 * redirtying it:
2011 	 */
2012 	do {
2013 		old = new = READ_ONCE(b->flags);
2014 
2015 		if (!(old & (1 << BTREE_NODE_dirty)))
2016 			return;
2017 
2018 		if ((flags & BTREE_WRITE_ONLY_IF_NEED) &&
2019 		    !(old & (1 << BTREE_NODE_need_write)))
2020 			return;
2021 
2022 		if (old &
2023 		    ((1 << BTREE_NODE_never_write)|
2024 		     (1 << BTREE_NODE_write_blocked)))
2025 			return;
2026 
2027 		if (b->written &&
2028 		    (old & (1 << BTREE_NODE_will_make_reachable)))
2029 			return;
2030 
2031 		if (old & (1 << BTREE_NODE_write_in_flight))
2032 			return;
2033 
2034 		if (flags & BTREE_WRITE_ONLY_IF_NEED)
2035 			type = new & BTREE_WRITE_TYPE_MASK;
2036 		new &= ~BTREE_WRITE_TYPE_MASK;
2037 
2038 		new &= ~(1 << BTREE_NODE_dirty);
2039 		new &= ~(1 << BTREE_NODE_need_write);
2040 		new |=  (1 << BTREE_NODE_write_in_flight);
2041 		new |=  (1 << BTREE_NODE_write_in_flight_inner);
2042 		new |=  (1 << BTREE_NODE_just_written);
2043 		new ^=  (1 << BTREE_NODE_write_idx);
2044 	} while (cmpxchg_acquire(&b->flags, old, new) != old);
2045 
2046 	if (new & (1U << BTREE_NODE_need_write))
2047 		return;
2048 do_write:
2049 	BUG_ON((type == BTREE_WRITE_initial) != (b->written == 0));
2050 
2051 	atomic_dec(&c->btree_cache.dirty);
2052 
2053 	BUG_ON(btree_node_fake(b));
2054 	BUG_ON((b->will_make_reachable != 0) != !b->written);
2055 
2056 	BUG_ON(b->written >= btree_sectors(c));
2057 	BUG_ON(b->written & (block_sectors(c) - 1));
2058 	BUG_ON(bset_written(b, btree_bset_last(b)));
2059 	BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(c));
2060 	BUG_ON(memcmp(&b->data->format, &b->format, sizeof(b->format)));
2061 
2062 	bch2_sort_whiteouts(c, b);
2063 
2064 	sort_iter_stack_init(&sort_iter, b);
2065 
2066 	bytes = !b->written
2067 		? sizeof(struct btree_node)
2068 		: sizeof(struct btree_node_entry);
2069 
2070 	bytes += b->whiteout_u64s * sizeof(u64);
2071 
2072 	for_each_bset(b, t) {
2073 		i = bset(b, t);
2074 
2075 		if (bset_written(b, i))
2076 			continue;
2077 
2078 		bytes += le16_to_cpu(i->u64s) * sizeof(u64);
2079 		sort_iter_add(&sort_iter.iter,
2080 			      btree_bkey_first(b, t),
2081 			      btree_bkey_last(b, t));
2082 		seq = max(seq, le64_to_cpu(i->journal_seq));
2083 	}
2084 
2085 	BUG_ON(b->written && !seq);
2086 
2087 	/* bch2_varint_decode may read up to 7 bytes past the end of the buffer: */
2088 	bytes += 8;
2089 
2090 	/* buffer must be a multiple of the block size */
2091 	bytes = round_up(bytes, block_bytes(c));
2092 
2093 	data = btree_bounce_alloc(c, bytes, &used_mempool);
2094 
2095 	if (!b->written) {
2096 		bn = data;
2097 		*bn = *b->data;
2098 		i = &bn->keys;
2099 	} else {
2100 		bne = data;
2101 		bne->keys = b->data->keys;
2102 		i = &bne->keys;
2103 	}
2104 
2105 	i->journal_seq	= cpu_to_le64(seq);
2106 	i->u64s		= 0;
2107 
2108 	sort_iter_add(&sort_iter.iter,
2109 		      unwritten_whiteouts_start(b),
2110 		      unwritten_whiteouts_end(b));
2111 	SET_BSET_SEPARATE_WHITEOUTS(i, false);
2112 
2113 	u64s = bch2_sort_keys_keep_unwritten_whiteouts(i->start, &sort_iter.iter);
2114 	le16_add_cpu(&i->u64s, u64s);
2115 
2116 	b->whiteout_u64s = 0;
2117 
2118 	BUG_ON(!b->written && i->u64s != b->data->keys.u64s);
2119 
2120 	set_needs_whiteout(i, false);
2121 
2122 	/* do we have data to write? */
2123 	if (b->written && !i->u64s)
2124 		goto nowrite;
2125 
2126 	bytes_to_write = vstruct_end(i) - data;
2127 	sectors_to_write = round_up(bytes_to_write, block_bytes(c)) >> 9;
2128 
2129 	if (!b->written &&
2130 	    b->key.k.type == KEY_TYPE_btree_ptr_v2)
2131 		BUG_ON(btree_ptr_sectors_written(&b->key) != sectors_to_write);
2132 
2133 	memset(data + bytes_to_write, 0,
2134 	       (sectors_to_write << 9) - bytes_to_write);
2135 
2136 	BUG_ON(b->written + sectors_to_write > btree_sectors(c));
2137 	BUG_ON(BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN);
2138 	BUG_ON(i->seq != b->data->keys.seq);
2139 
2140 	i->version = cpu_to_le16(c->sb.version);
2141 	SET_BSET_OFFSET(i, b->written);
2142 	SET_BSET_CSUM_TYPE(i, bch2_meta_checksum_type(c));
2143 
2144 	if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)))
2145 		validate_before_checksum = true;
2146 
2147 	/* validate_bset will be modifying: */
2148 	if (le16_to_cpu(i->version) < bcachefs_metadata_version_current)
2149 		validate_before_checksum = true;
2150 
2151 	/* if we're going to be encrypting, check metadata validity first: */
2152 	if (validate_before_checksum &&
2153 	    validate_bset_for_write(c, b, i, sectors_to_write))
2154 		goto err;
2155 
2156 	ret = bset_encrypt(c, i, b->written << 9);
2157 	if (bch2_fs_fatal_err_on(ret, c,
2158 			"encrypting btree node: %s", bch2_err_str(ret)))
2159 		goto err;
2160 
2161 	nonce = btree_nonce(i, b->written << 9);
2162 
2163 	if (bn)
2164 		bn->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bn);
2165 	else
2166 		bne->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
2167 
2168 	/* if we're not encrypting, check metadata after checksumming: */
2169 	if (!validate_before_checksum &&
2170 	    validate_bset_for_write(c, b, i, sectors_to_write))
2171 		goto err;
2172 
2173 	/*
2174 	 * We handle btree write errors by immediately halting the journal -
2175 	 * after we've done that, we can't issue any subsequent btree writes
2176 	 * because they might have pointers to new nodes that failed to write.
2177 	 *
2178 	 * Furthermore, there's no point in doing any more btree writes because
2179 	 * with the journal stopped, we're never going to update the journal to
2180 	 * reflect that those writes were done and the data flushed from the
2181 	 * journal:
2182 	 *
2183 	 * Also on journal error, the pending write may have updates that were
2184 	 * never journalled (interior nodes, see btree_update_nodes_written()) -
2185 	 * it's critical that we don't do the write in that case otherwise we
2186 	 * will have updates visible that weren't in the journal:
2187 	 *
2188 	 * Make sure to update b->written so bch2_btree_init_next() doesn't
2189 	 * break:
2190 	 */
2191 	if (bch2_journal_error(&c->journal) ||
2192 	    c->opts.nochanges)
2193 		goto err;
2194 
2195 	trace_and_count(c, btree_node_write, b, bytes_to_write, sectors_to_write);
2196 
2197 	wbio = container_of(bio_alloc_bioset(NULL,
2198 				buf_pages(data, sectors_to_write << 9),
2199 				REQ_OP_WRITE|REQ_META,
2200 				GFP_NOFS,
2201 				&c->btree_bio),
2202 			    struct btree_write_bio, wbio.bio);
2203 	wbio_init(&wbio->wbio.bio);
2204 	wbio->data			= data;
2205 	wbio->data_bytes		= bytes;
2206 	wbio->sector_offset		= b->written;
2207 	wbio->wbio.c			= c;
2208 	wbio->wbio.used_mempool		= used_mempool;
2209 	wbio->wbio.first_btree_write	= !b->written;
2210 	wbio->wbio.bio.bi_end_io	= btree_node_write_endio;
2211 	wbio->wbio.bio.bi_private	= b;
2212 
2213 	bch2_bio_map(&wbio->wbio.bio, data, sectors_to_write << 9);
2214 
2215 	bkey_copy(&wbio->key, &b->key);
2216 
2217 	b->written += sectors_to_write;
2218 
2219 	if (wbio->key.k.type == KEY_TYPE_btree_ptr_v2)
2220 		bkey_i_to_btree_ptr_v2(&wbio->key)->v.sectors_written =
2221 			cpu_to_le16(b->written);
2222 
2223 	atomic64_inc(&c->btree_write_stats[type].nr);
2224 	atomic64_add(bytes_to_write, &c->btree_write_stats[type].bytes);
2225 
2226 	INIT_WORK(&wbio->work, btree_write_submit);
2227 	queue_work(c->io_complete_wq, &wbio->work);
2228 	return;
2229 err:
2230 	set_btree_node_noevict(b);
2231 	b->written += sectors_to_write;
2232 nowrite:
2233 	btree_bounce_free(c, bytes, used_mempool, data);
2234 	__btree_node_write_done(c, b);
2235 }
2236 
2237 /*
2238  * Work that must be done with write lock held:
2239  */
2240 bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
2241 {
2242 	bool invalidated_iter = false;
2243 	struct btree_node_entry *bne;
2244 
2245 	if (!btree_node_just_written(b))
2246 		return false;
2247 
2248 	BUG_ON(b->whiteout_u64s);
2249 
2250 	clear_btree_node_just_written(b);
2251 
2252 	/*
2253 	 * Note: immediately after write, bset_written() doesn't work - the
2254 	 * amount of data we had to write after compaction might have been
2255 	 * smaller than the offset of the last bset.
2256 	 *
2257 	 * However, we know that all bsets have been written here, as long as
2258 	 * we're still holding the write lock:
2259 	 */
2260 
2261 	/*
2262 	 * XXX: decide if we really want to unconditionally sort down to a
2263 	 * single bset:
2264 	 */
2265 	if (b->nsets > 1) {
2266 		btree_node_sort(c, b, 0, b->nsets);
2267 		invalidated_iter = true;
2268 	} else {
2269 		invalidated_iter = bch2_drop_whiteouts(b, COMPACT_ALL);
2270 	}
2271 
2272 	for_each_bset(b, t)
2273 		set_needs_whiteout(bset(b, t), true);
2274 
2275 	bch2_btree_verify(c, b);
2276 
2277 	/*
2278 	 * If later we don't unconditionally sort down to a single bset, we have
2279 	 * to ensure this is still true:
2280 	 */
2281 	BUG_ON((void *) btree_bkey_last(b, bset_tree_last(b)) > write_block(b));
2282 
2283 	bne = want_new_bset(c, b);
2284 	if (bne)
2285 		bch2_bset_init_next(b, bne);
2286 
2287 	bch2_btree_build_aux_trees(b);
2288 
2289 	return invalidated_iter;
2290 }
2291 
2292 /*
2293  * Use this one if the node is intent locked:
2294  */
2295 void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
2296 			   enum six_lock_type lock_type_held,
2297 			   unsigned flags)
2298 {
2299 	if (lock_type_held == SIX_LOCK_intent ||
2300 	    (lock_type_held == SIX_LOCK_read &&
2301 	     six_lock_tryupgrade(&b->c.lock))) {
2302 		__bch2_btree_node_write(c, b, flags);
2303 
2304 		/* don't cycle lock unnecessarily: */
2305 		if (btree_node_just_written(b) &&
2306 		    six_trylock_write(&b->c.lock)) {
2307 			bch2_btree_post_write_cleanup(c, b);
2308 			six_unlock_write(&b->c.lock);
2309 		}
2310 
2311 		if (lock_type_held == SIX_LOCK_read)
2312 			six_lock_downgrade(&b->c.lock);
2313 	} else {
2314 		__bch2_btree_node_write(c, b, flags);
2315 		if (lock_type_held == SIX_LOCK_write &&
2316 		    btree_node_just_written(b))
2317 			bch2_btree_post_write_cleanup(c, b);
2318 	}
2319 }
2320 
2321 static bool __bch2_btree_flush_all(struct bch_fs *c, unsigned flag)
2322 {
2323 	struct bucket_table *tbl;
2324 	struct rhash_head *pos;
2325 	struct btree *b;
2326 	unsigned i;
2327 	bool ret = false;
2328 restart:
2329 	rcu_read_lock();
2330 	for_each_cached_btree(b, c, tbl, i, pos)
2331 		if (test_bit(flag, &b->flags)) {
2332 			rcu_read_unlock();
2333 			wait_on_bit_io(&b->flags, flag, TASK_UNINTERRUPTIBLE);
2334 			ret = true;
2335 			goto restart;
2336 		}
2337 	rcu_read_unlock();
2338 
2339 	return ret;
2340 }
2341 
2342 bool bch2_btree_flush_all_reads(struct bch_fs *c)
2343 {
2344 	return __bch2_btree_flush_all(c, BTREE_NODE_read_in_flight);
2345 }
2346 
2347 bool bch2_btree_flush_all_writes(struct bch_fs *c)
2348 {
2349 	return __bch2_btree_flush_all(c, BTREE_NODE_write_in_flight);
2350 }
2351 
2352 static const char * const bch2_btree_write_types[] = {
2353 #define x(t, n) [n] = #t,
2354 	BCH_BTREE_WRITE_TYPES()
2355 	NULL
2356 };
2357 
2358 void bch2_btree_write_stats_to_text(struct printbuf *out, struct bch_fs *c)
2359 {
2360 	printbuf_tabstop_push(out, 20);
2361 	printbuf_tabstop_push(out, 10);
2362 
2363 	prt_printf(out, "\tnr\tsize\n");
2364 
2365 	for (unsigned i = 0; i < BTREE_WRITE_TYPE_NR; i++) {
2366 		u64 nr		= atomic64_read(&c->btree_write_stats[i].nr);
2367 		u64 bytes	= atomic64_read(&c->btree_write_stats[i].bytes);
2368 
2369 		prt_printf(out, "%s:\t%llu\t", bch2_btree_write_types[i], nr);
2370 		prt_human_readable_u64(out, nr ? div64_u64(bytes, nr) : 0);
2371 		prt_newline(out);
2372 	}
2373 }
2374