xref: /linux/fs/bcachefs/btree_io.c (revision c2a96b7f187fb6a455836d4a6e113947ff11de97)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "bcachefs.h"
4 #include "bkey_methods.h"
5 #include "bkey_sort.h"
6 #include "btree_cache.h"
7 #include "btree_io.h"
8 #include "btree_iter.h"
9 #include "btree_locking.h"
10 #include "btree_update.h"
11 #include "btree_update_interior.h"
12 #include "buckets.h"
13 #include "checksum.h"
14 #include "debug.h"
15 #include "error.h"
16 #include "extents.h"
17 #include "io_write.h"
18 #include "journal_reclaim.h"
19 #include "journal_seq_blacklist.h"
20 #include "recovery.h"
21 #include "super-io.h"
22 #include "trace.h"
23 
24 #include <linux/sched/mm.h>
25 
26 static void bch2_btree_node_header_to_text(struct printbuf *out, struct btree_node *bn)
27 {
28 	prt_printf(out, "btree=%s l=%u seq %llux\n",
29 		   bch2_btree_id_str(BTREE_NODE_ID(bn)),
30 		   (unsigned) BTREE_NODE_LEVEL(bn), bn->keys.seq);
31 	prt_str(out, "min: ");
32 	bch2_bpos_to_text(out, bn->min_key);
33 	prt_newline(out);
34 	prt_str(out, "max: ");
35 	bch2_bpos_to_text(out, bn->max_key);
36 }
37 
38 void bch2_btree_node_io_unlock(struct btree *b)
39 {
40 	EBUG_ON(!btree_node_write_in_flight(b));
41 
42 	clear_btree_node_write_in_flight_inner(b);
43 	clear_btree_node_write_in_flight(b);
44 	wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
45 }
46 
47 void bch2_btree_node_io_lock(struct btree *b)
48 {
49 	wait_on_bit_lock_io(&b->flags, BTREE_NODE_write_in_flight,
50 			    TASK_UNINTERRUPTIBLE);
51 }
52 
53 void __bch2_btree_node_wait_on_read(struct btree *b)
54 {
55 	wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
56 		       TASK_UNINTERRUPTIBLE);
57 }
58 
59 void __bch2_btree_node_wait_on_write(struct btree *b)
60 {
61 	wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
62 		       TASK_UNINTERRUPTIBLE);
63 }
64 
65 void bch2_btree_node_wait_on_read(struct btree *b)
66 {
67 	wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
68 		       TASK_UNINTERRUPTIBLE);
69 }
70 
71 void bch2_btree_node_wait_on_write(struct btree *b)
72 {
73 	wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
74 		       TASK_UNINTERRUPTIBLE);
75 }
76 
77 static void verify_no_dups(struct btree *b,
78 			   struct bkey_packed *start,
79 			   struct bkey_packed *end)
80 {
81 #ifdef CONFIG_BCACHEFS_DEBUG
82 	struct bkey_packed *k, *p;
83 
84 	if (start == end)
85 		return;
86 
87 	for (p = start, k = bkey_p_next(start);
88 	     k != end;
89 	     p = k, k = bkey_p_next(k)) {
90 		struct bkey l = bkey_unpack_key(b, p);
91 		struct bkey r = bkey_unpack_key(b, k);
92 
93 		BUG_ON(bpos_ge(l.p, bkey_start_pos(&r)));
94 	}
95 #endif
96 }
97 
98 static void set_needs_whiteout(struct bset *i, int v)
99 {
100 	struct bkey_packed *k;
101 
102 	for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
103 		k->needs_whiteout = v;
104 }
105 
106 static void btree_bounce_free(struct bch_fs *c, size_t size,
107 			      bool used_mempool, void *p)
108 {
109 	if (used_mempool)
110 		mempool_free(p, &c->btree_bounce_pool);
111 	else
112 		kvfree(p);
113 }
114 
115 static void *btree_bounce_alloc(struct bch_fs *c, size_t size,
116 				bool *used_mempool)
117 {
118 	unsigned flags = memalloc_nofs_save();
119 	void *p;
120 
121 	BUG_ON(size > c->opts.btree_node_size);
122 
123 	*used_mempool = false;
124 	p = kvmalloc(size, __GFP_NOWARN|GFP_NOWAIT);
125 	if (!p) {
126 		*used_mempool = true;
127 		p = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
128 	}
129 	memalloc_nofs_restore(flags);
130 	return p;
131 }
132 
133 static void sort_bkey_ptrs(const struct btree *bt,
134 			   struct bkey_packed **ptrs, unsigned nr)
135 {
136 	unsigned n = nr, a = nr / 2, b, c, d;
137 
138 	if (!a)
139 		return;
140 
141 	/* Heap sort: see lib/sort.c: */
142 	while (1) {
143 		if (a)
144 			a--;
145 		else if (--n)
146 			swap(ptrs[0], ptrs[n]);
147 		else
148 			break;
149 
150 		for (b = a; c = 2 * b + 1, (d = c + 1) < n;)
151 			b = bch2_bkey_cmp_packed(bt,
152 					    ptrs[c],
153 					    ptrs[d]) >= 0 ? c : d;
154 		if (d == n)
155 			b = c;
156 
157 		while (b != a &&
158 		       bch2_bkey_cmp_packed(bt,
159 				       ptrs[a],
160 				       ptrs[b]) >= 0)
161 			b = (b - 1) / 2;
162 		c = b;
163 		while (b != a) {
164 			b = (b - 1) / 2;
165 			swap(ptrs[b], ptrs[c]);
166 		}
167 	}
168 }
169 
170 static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b)
171 {
172 	struct bkey_packed *new_whiteouts, **ptrs, **ptrs_end, *k;
173 	bool used_mempool = false;
174 	size_t bytes = b->whiteout_u64s * sizeof(u64);
175 
176 	if (!b->whiteout_u64s)
177 		return;
178 
179 	new_whiteouts = btree_bounce_alloc(c, bytes, &used_mempool);
180 
181 	ptrs = ptrs_end = ((void *) new_whiteouts + bytes);
182 
183 	for (k = unwritten_whiteouts_start(b);
184 	     k != unwritten_whiteouts_end(b);
185 	     k = bkey_p_next(k))
186 		*--ptrs = k;
187 
188 	sort_bkey_ptrs(b, ptrs, ptrs_end - ptrs);
189 
190 	k = new_whiteouts;
191 
192 	while (ptrs != ptrs_end) {
193 		bkey_p_copy(k, *ptrs);
194 		k = bkey_p_next(k);
195 		ptrs++;
196 	}
197 
198 	verify_no_dups(b, new_whiteouts,
199 		       (void *) ((u64 *) new_whiteouts + b->whiteout_u64s));
200 
201 	memcpy_u64s(unwritten_whiteouts_start(b),
202 		    new_whiteouts, b->whiteout_u64s);
203 
204 	btree_bounce_free(c, bytes, used_mempool, new_whiteouts);
205 }
206 
207 static bool should_compact_bset(struct btree *b, struct bset_tree *t,
208 				bool compacting, enum compact_mode mode)
209 {
210 	if (!bset_dead_u64s(b, t))
211 		return false;
212 
213 	switch (mode) {
214 	case COMPACT_LAZY:
215 		return should_compact_bset_lazy(b, t) ||
216 			(compacting && !bset_written(b, bset(b, t)));
217 	case COMPACT_ALL:
218 		return true;
219 	default:
220 		BUG();
221 	}
222 }
223 
224 static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode)
225 {
226 	bool ret = false;
227 
228 	for_each_bset(b, t) {
229 		struct bset *i = bset(b, t);
230 		struct bkey_packed *k, *n, *out, *start, *end;
231 		struct btree_node_entry *src = NULL, *dst = NULL;
232 
233 		if (t != b->set && !bset_written(b, i)) {
234 			src = container_of(i, struct btree_node_entry, keys);
235 			dst = max(write_block(b),
236 				  (void *) btree_bkey_last(b, t - 1));
237 		}
238 
239 		if (src != dst)
240 			ret = true;
241 
242 		if (!should_compact_bset(b, t, ret, mode)) {
243 			if (src != dst) {
244 				memmove(dst, src, sizeof(*src) +
245 					le16_to_cpu(src->keys.u64s) *
246 					sizeof(u64));
247 				i = &dst->keys;
248 				set_btree_bset(b, t, i);
249 			}
250 			continue;
251 		}
252 
253 		start	= btree_bkey_first(b, t);
254 		end	= btree_bkey_last(b, t);
255 
256 		if (src != dst) {
257 			memmove(dst, src, sizeof(*src));
258 			i = &dst->keys;
259 			set_btree_bset(b, t, i);
260 		}
261 
262 		out = i->start;
263 
264 		for (k = start; k != end; k = n) {
265 			n = bkey_p_next(k);
266 
267 			if (!bkey_deleted(k)) {
268 				bkey_p_copy(out, k);
269 				out = bkey_p_next(out);
270 			} else {
271 				BUG_ON(k->needs_whiteout);
272 			}
273 		}
274 
275 		i->u64s = cpu_to_le16((u64 *) out - i->_data);
276 		set_btree_bset_end(b, t);
277 		bch2_bset_set_no_aux_tree(b, t);
278 		ret = true;
279 	}
280 
281 	bch2_verify_btree_nr_keys(b);
282 
283 	bch2_btree_build_aux_trees(b);
284 
285 	return ret;
286 }
287 
288 bool bch2_compact_whiteouts(struct bch_fs *c, struct btree *b,
289 			    enum compact_mode mode)
290 {
291 	return bch2_drop_whiteouts(b, mode);
292 }
293 
294 static void btree_node_sort(struct bch_fs *c, struct btree *b,
295 			    unsigned start_idx,
296 			    unsigned end_idx)
297 {
298 	struct btree_node *out;
299 	struct sort_iter_stack sort_iter;
300 	struct bset_tree *t;
301 	struct bset *start_bset = bset(b, &b->set[start_idx]);
302 	bool used_mempool = false;
303 	u64 start_time, seq = 0;
304 	unsigned i, u64s = 0, bytes, shift = end_idx - start_idx - 1;
305 	bool sorting_entire_node = start_idx == 0 &&
306 		end_idx == b->nsets;
307 
308 	sort_iter_stack_init(&sort_iter, b);
309 
310 	for (t = b->set + start_idx;
311 	     t < b->set + end_idx;
312 	     t++) {
313 		u64s += le16_to_cpu(bset(b, t)->u64s);
314 		sort_iter_add(&sort_iter.iter,
315 			      btree_bkey_first(b, t),
316 			      btree_bkey_last(b, t));
317 	}
318 
319 	bytes = sorting_entire_node
320 		? btree_buf_bytes(b)
321 		: __vstruct_bytes(struct btree_node, u64s);
322 
323 	out = btree_bounce_alloc(c, bytes, &used_mempool);
324 
325 	start_time = local_clock();
326 
327 	u64s = bch2_sort_keys(out->keys.start, &sort_iter.iter);
328 
329 	out->keys.u64s = cpu_to_le16(u64s);
330 
331 	BUG_ON(vstruct_end(&out->keys) > (void *) out + bytes);
332 
333 	if (sorting_entire_node)
334 		bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
335 				       start_time);
336 
337 	/* Make sure we preserve bset journal_seq: */
338 	for (t = b->set + start_idx; t < b->set + end_idx; t++)
339 		seq = max(seq, le64_to_cpu(bset(b, t)->journal_seq));
340 	start_bset->journal_seq = cpu_to_le64(seq);
341 
342 	if (sorting_entire_node) {
343 		u64s = le16_to_cpu(out->keys.u64s);
344 
345 		BUG_ON(bytes != btree_buf_bytes(b));
346 
347 		/*
348 		 * Our temporary buffer is the same size as the btree node's
349 		 * buffer, we can just swap buffers instead of doing a big
350 		 * memcpy()
351 		 */
352 		*out = *b->data;
353 		out->keys.u64s = cpu_to_le16(u64s);
354 		swap(out, b->data);
355 		set_btree_bset(b, b->set, &b->data->keys);
356 	} else {
357 		start_bset->u64s = out->keys.u64s;
358 		memcpy_u64s(start_bset->start,
359 			    out->keys.start,
360 			    le16_to_cpu(out->keys.u64s));
361 	}
362 
363 	for (i = start_idx + 1; i < end_idx; i++)
364 		b->nr.bset_u64s[start_idx] +=
365 			b->nr.bset_u64s[i];
366 
367 	b->nsets -= shift;
368 
369 	for (i = start_idx + 1; i < b->nsets; i++) {
370 		b->nr.bset_u64s[i]	= b->nr.bset_u64s[i + shift];
371 		b->set[i]		= b->set[i + shift];
372 	}
373 
374 	for (i = b->nsets; i < MAX_BSETS; i++)
375 		b->nr.bset_u64s[i] = 0;
376 
377 	set_btree_bset_end(b, &b->set[start_idx]);
378 	bch2_bset_set_no_aux_tree(b, &b->set[start_idx]);
379 
380 	btree_bounce_free(c, bytes, used_mempool, out);
381 
382 	bch2_verify_btree_nr_keys(b);
383 }
384 
385 void bch2_btree_sort_into(struct bch_fs *c,
386 			 struct btree *dst,
387 			 struct btree *src)
388 {
389 	struct btree_nr_keys nr;
390 	struct btree_node_iter src_iter;
391 	u64 start_time = local_clock();
392 
393 	BUG_ON(dst->nsets != 1);
394 
395 	bch2_bset_set_no_aux_tree(dst, dst->set);
396 
397 	bch2_btree_node_iter_init_from_start(&src_iter, src);
398 
399 	nr = bch2_sort_repack(btree_bset_first(dst),
400 			src, &src_iter,
401 			&dst->format,
402 			true);
403 
404 	bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
405 			       start_time);
406 
407 	set_btree_bset_end(dst, dst->set);
408 
409 	dst->nr.live_u64s	+= nr.live_u64s;
410 	dst->nr.bset_u64s[0]	+= nr.bset_u64s[0];
411 	dst->nr.packed_keys	+= nr.packed_keys;
412 	dst->nr.unpacked_keys	+= nr.unpacked_keys;
413 
414 	bch2_verify_btree_nr_keys(dst);
415 }
416 
417 /*
418  * We're about to add another bset to the btree node, so if there's currently
419  * too many bsets - sort some of them together:
420  */
421 static bool btree_node_compact(struct bch_fs *c, struct btree *b)
422 {
423 	unsigned unwritten_idx;
424 	bool ret = false;
425 
426 	for (unwritten_idx = 0;
427 	     unwritten_idx < b->nsets;
428 	     unwritten_idx++)
429 		if (!bset_written(b, bset(b, &b->set[unwritten_idx])))
430 			break;
431 
432 	if (b->nsets - unwritten_idx > 1) {
433 		btree_node_sort(c, b, unwritten_idx, b->nsets);
434 		ret = true;
435 	}
436 
437 	if (unwritten_idx > 1) {
438 		btree_node_sort(c, b, 0, unwritten_idx);
439 		ret = true;
440 	}
441 
442 	return ret;
443 }
444 
445 void bch2_btree_build_aux_trees(struct btree *b)
446 {
447 	for_each_bset(b, t)
448 		bch2_bset_build_aux_tree(b, t,
449 				!bset_written(b, bset(b, t)) &&
450 				t == bset_tree_last(b));
451 }
452 
453 /*
454  * If we have MAX_BSETS (3) bsets, should we sort them all down to just one?
455  *
456  * The first bset is going to be of similar order to the size of the node, the
457  * last bset is bounded by btree_write_set_buffer(), which is set to keep the
458  * memmove on insert from being too expensive: the middle bset should, ideally,
459  * be the geometric mean of the first and the last.
460  *
461  * Returns true if the middle bset is greater than that geometric mean:
462  */
463 static inline bool should_compact_all(struct bch_fs *c, struct btree *b)
464 {
465 	unsigned mid_u64s_bits =
466 		(ilog2(btree_max_u64s(c)) + BTREE_WRITE_SET_U64s_BITS) / 2;
467 
468 	return bset_u64s(&b->set[1]) > 1U << mid_u64s_bits;
469 }
470 
471 /*
472  * @bch_btree_init_next - initialize a new (unwritten) bset that can then be
473  * inserted into
474  *
475  * Safe to call if there already is an unwritten bset - will only add a new bset
476  * if @b doesn't already have one.
477  *
478  * Returns true if we sorted (i.e. invalidated iterators
479  */
480 void bch2_btree_init_next(struct btree_trans *trans, struct btree *b)
481 {
482 	struct bch_fs *c = trans->c;
483 	struct btree_node_entry *bne;
484 	bool reinit_iter = false;
485 
486 	EBUG_ON(!six_lock_counts(&b->c.lock).n[SIX_LOCK_write]);
487 	BUG_ON(bset_written(b, bset(b, &b->set[1])));
488 	BUG_ON(btree_node_just_written(b));
489 
490 	if (b->nsets == MAX_BSETS &&
491 	    !btree_node_write_in_flight(b) &&
492 	    should_compact_all(c, b)) {
493 		bch2_btree_node_write(c, b, SIX_LOCK_write,
494 				      BTREE_WRITE_init_next_bset);
495 		reinit_iter = true;
496 	}
497 
498 	if (b->nsets == MAX_BSETS &&
499 	    btree_node_compact(c, b))
500 		reinit_iter = true;
501 
502 	BUG_ON(b->nsets >= MAX_BSETS);
503 
504 	bne = want_new_bset(c, b);
505 	if (bne)
506 		bch2_bset_init_next(b, bne);
507 
508 	bch2_btree_build_aux_trees(b);
509 
510 	if (reinit_iter)
511 		bch2_trans_node_reinit_iter(trans, b);
512 }
513 
514 static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
515 			  struct bch_dev *ca,
516 			  struct btree *b, struct bset *i, struct bkey_packed *k,
517 			  unsigned offset, int write)
518 {
519 	prt_printf(out, bch2_log_msg(c, "%s"),
520 		   write == READ
521 		   ? "error validating btree node "
522 		   : "corrupt btree node before write ");
523 	if (ca)
524 		prt_printf(out, "on %s ", ca->name);
525 	prt_printf(out, "at btree ");
526 	bch2_btree_pos_to_text(out, c, b);
527 
528 	printbuf_indent_add(out, 2);
529 
530 	prt_printf(out, "\nnode offset %u/%u",
531 		   b->written, btree_ptr_sectors_written(bkey_i_to_s_c(&b->key)));
532 	if (i)
533 		prt_printf(out, " bset u64s %u", le16_to_cpu(i->u64s));
534 	if (k)
535 		prt_printf(out, " bset byte offset %lu",
536 			   (unsigned long)(void *)k -
537 			   ((unsigned long)(void *)i & ~511UL));
538 	prt_str(out, ": ");
539 }
540 
541 __printf(10, 11)
542 static int __btree_err(int ret,
543 		       struct bch_fs *c,
544 		       struct bch_dev *ca,
545 		       struct btree *b,
546 		       struct bset *i,
547 		       struct bkey_packed *k,
548 		       int write,
549 		       bool have_retry,
550 		       enum bch_sb_error_id err_type,
551 		       const char *fmt, ...)
552 {
553 	struct printbuf out = PRINTBUF;
554 	bool silent = c->curr_recovery_pass == BCH_RECOVERY_PASS_scan_for_btree_nodes;
555 	va_list args;
556 
557 	btree_err_msg(&out, c, ca, b, i, k, b->written, write);
558 
559 	va_start(args, fmt);
560 	prt_vprintf(&out, fmt, args);
561 	va_end(args);
562 
563 	if (write == WRITE) {
564 		bch2_print_string_as_lines(KERN_ERR, out.buf);
565 		ret = c->opts.errors == BCH_ON_ERROR_continue
566 			? 0
567 			: -BCH_ERR_fsck_errors_not_fixed;
568 		goto out;
569 	}
570 
571 	if (!have_retry && ret == -BCH_ERR_btree_node_read_err_want_retry)
572 		ret = -BCH_ERR_btree_node_read_err_fixable;
573 	if (!have_retry && ret == -BCH_ERR_btree_node_read_err_must_retry)
574 		ret = -BCH_ERR_btree_node_read_err_bad_node;
575 
576 	if (!silent && ret != -BCH_ERR_btree_node_read_err_fixable)
577 		bch2_sb_error_count(c, err_type);
578 
579 	switch (ret) {
580 	case -BCH_ERR_btree_node_read_err_fixable:
581 		ret = !silent
582 			? __bch2_fsck_err(c, NULL, FSCK_CAN_FIX, err_type, "%s", out.buf)
583 			: -BCH_ERR_fsck_fix;
584 		if (ret != -BCH_ERR_fsck_fix &&
585 		    ret != -BCH_ERR_fsck_ignore)
586 			goto fsck_err;
587 		ret = -BCH_ERR_fsck_fix;
588 		break;
589 	case -BCH_ERR_btree_node_read_err_want_retry:
590 	case -BCH_ERR_btree_node_read_err_must_retry:
591 		if (!silent)
592 			bch2_print_string_as_lines(KERN_ERR, out.buf);
593 		break;
594 	case -BCH_ERR_btree_node_read_err_bad_node:
595 		if (!silent)
596 			bch2_print_string_as_lines(KERN_ERR, out.buf);
597 		ret = bch2_topology_error(c);
598 		break;
599 	case -BCH_ERR_btree_node_read_err_incompatible:
600 		if (!silent)
601 			bch2_print_string_as_lines(KERN_ERR, out.buf);
602 		ret = -BCH_ERR_fsck_errors_not_fixed;
603 		break;
604 	default:
605 		BUG();
606 	}
607 out:
608 fsck_err:
609 	printbuf_exit(&out);
610 	return ret;
611 }
612 
613 #define btree_err(type, c, ca, b, i, k, _err_type, msg, ...)		\
614 ({									\
615 	int _ret = __btree_err(type, c, ca, b, i, k, write, have_retry,	\
616 			       BCH_FSCK_ERR_##_err_type,		\
617 			       msg, ##__VA_ARGS__);			\
618 									\
619 	if (_ret != -BCH_ERR_fsck_fix) {				\
620 		ret = _ret;						\
621 		goto fsck_err;						\
622 	}								\
623 									\
624 	*saw_error = true;						\
625 })
626 
627 #define btree_err_on(cond, ...)	((cond) ? btree_err(__VA_ARGS__) : false)
628 
629 /*
630  * When btree topology repair changes the start or end of a node, that might
631  * mean we have to drop keys that are no longer inside the node:
632  */
633 __cold
634 void bch2_btree_node_drop_keys_outside_node(struct btree *b)
635 {
636 	for_each_bset(b, t) {
637 		struct bset *i = bset(b, t);
638 		struct bkey_packed *k;
639 
640 		for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
641 			if (bkey_cmp_left_packed(b, k, &b->data->min_key) >= 0)
642 				break;
643 
644 		if (k != i->start) {
645 			unsigned shift = (u64 *) k - (u64 *) i->start;
646 
647 			memmove_u64s_down(i->start, k,
648 					  (u64 *) vstruct_end(i) - (u64 *) k);
649 			i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - shift);
650 			set_btree_bset_end(b, t);
651 		}
652 
653 		for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
654 			if (bkey_cmp_left_packed(b, k, &b->data->max_key) > 0)
655 				break;
656 
657 		if (k != vstruct_last(i)) {
658 			i->u64s = cpu_to_le16((u64 *) k - (u64 *) i->start);
659 			set_btree_bset_end(b, t);
660 		}
661 	}
662 
663 	/*
664 	 * Always rebuild search trees: eytzinger search tree nodes directly
665 	 * depend on the values of min/max key:
666 	 */
667 	bch2_bset_set_no_aux_tree(b, b->set);
668 	bch2_btree_build_aux_trees(b);
669 	b->nr = bch2_btree_node_count_keys(b);
670 
671 	struct bkey_s_c k;
672 	struct bkey unpacked;
673 	struct btree_node_iter iter;
674 	for_each_btree_node_key_unpack(b, k, &iter, &unpacked) {
675 		BUG_ON(bpos_lt(k.k->p, b->data->min_key));
676 		BUG_ON(bpos_gt(k.k->p, b->data->max_key));
677 	}
678 }
679 
680 static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
681 			 struct btree *b, struct bset *i,
682 			 unsigned offset, unsigned sectors,
683 			 int write, bool have_retry, bool *saw_error)
684 {
685 	unsigned version = le16_to_cpu(i->version);
686 	unsigned ptr_written = btree_ptr_sectors_written(bkey_i_to_s_c(&b->key));
687 	struct printbuf buf1 = PRINTBUF;
688 	struct printbuf buf2 = PRINTBUF;
689 	int ret = 0;
690 
691 	btree_err_on(!bch2_version_compatible(version),
692 		     -BCH_ERR_btree_node_read_err_incompatible,
693 		     c, ca, b, i, NULL,
694 		     btree_node_unsupported_version,
695 		     "unsupported bset version %u.%u",
696 		     BCH_VERSION_MAJOR(version),
697 		     BCH_VERSION_MINOR(version));
698 
699 	if (btree_err_on(version < c->sb.version_min,
700 			 -BCH_ERR_btree_node_read_err_fixable,
701 			 c, NULL, b, i, NULL,
702 			 btree_node_bset_older_than_sb_min,
703 			 "bset version %u older than superblock version_min %u",
704 			 version, c->sb.version_min)) {
705 		mutex_lock(&c->sb_lock);
706 		c->disk_sb.sb->version_min = cpu_to_le16(version);
707 		bch2_write_super(c);
708 		mutex_unlock(&c->sb_lock);
709 	}
710 
711 	if (btree_err_on(BCH_VERSION_MAJOR(version) >
712 			 BCH_VERSION_MAJOR(c->sb.version),
713 			 -BCH_ERR_btree_node_read_err_fixable,
714 			 c, NULL, b, i, NULL,
715 			 btree_node_bset_newer_than_sb,
716 			 "bset version %u newer than superblock version %u",
717 			 version, c->sb.version)) {
718 		mutex_lock(&c->sb_lock);
719 		c->disk_sb.sb->version = cpu_to_le16(version);
720 		bch2_write_super(c);
721 		mutex_unlock(&c->sb_lock);
722 	}
723 
724 	btree_err_on(BSET_SEPARATE_WHITEOUTS(i),
725 		     -BCH_ERR_btree_node_read_err_incompatible,
726 		     c, ca, b, i, NULL,
727 		     btree_node_unsupported_version,
728 		     "BSET_SEPARATE_WHITEOUTS no longer supported");
729 
730 	if (!write &&
731 	    btree_err_on(offset + sectors > (ptr_written ?: btree_sectors(c)),
732 			 -BCH_ERR_btree_node_read_err_fixable,
733 			 c, ca, b, i, NULL,
734 			 bset_past_end_of_btree_node,
735 			 "bset past end of btree node (offset %u len %u but written %zu)",
736 			 offset, sectors, ptr_written ?: btree_sectors(c))) {
737 		i->u64s = 0;
738 		ret = 0;
739 		goto out;
740 	}
741 
742 	btree_err_on(offset && !i->u64s,
743 		     -BCH_ERR_btree_node_read_err_fixable,
744 		     c, ca, b, i, NULL,
745 		     bset_empty,
746 		     "empty bset");
747 
748 	btree_err_on(BSET_OFFSET(i) && BSET_OFFSET(i) != offset,
749 		     -BCH_ERR_btree_node_read_err_want_retry,
750 		     c, ca, b, i, NULL,
751 		     bset_wrong_sector_offset,
752 		     "bset at wrong sector offset");
753 
754 	if (!offset) {
755 		struct btree_node *bn =
756 			container_of(i, struct btree_node, keys);
757 		/* These indicate that we read the wrong btree node: */
758 
759 		if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
760 			struct bch_btree_ptr_v2 *bp =
761 				&bkey_i_to_btree_ptr_v2(&b->key)->v;
762 
763 			/* XXX endianness */
764 			btree_err_on(bp->seq != bn->keys.seq,
765 				     -BCH_ERR_btree_node_read_err_must_retry,
766 				     c, ca, b, NULL, NULL,
767 				     bset_bad_seq,
768 				     "incorrect sequence number (wrong btree node)");
769 		}
770 
771 		btree_err_on(BTREE_NODE_ID(bn) != b->c.btree_id,
772 			     -BCH_ERR_btree_node_read_err_must_retry,
773 			     c, ca, b, i, NULL,
774 			     btree_node_bad_btree,
775 			     "incorrect btree id");
776 
777 		btree_err_on(BTREE_NODE_LEVEL(bn) != b->c.level,
778 			     -BCH_ERR_btree_node_read_err_must_retry,
779 			     c, ca, b, i, NULL,
780 			     btree_node_bad_level,
781 			     "incorrect level");
782 
783 		if (!write)
784 			compat_btree_node(b->c.level, b->c.btree_id, version,
785 					  BSET_BIG_ENDIAN(i), write, bn);
786 
787 		if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
788 			struct bch_btree_ptr_v2 *bp =
789 				&bkey_i_to_btree_ptr_v2(&b->key)->v;
790 
791 			if (BTREE_PTR_RANGE_UPDATED(bp)) {
792 				b->data->min_key = bp->min_key;
793 				b->data->max_key = b->key.k.p;
794 			}
795 
796 			btree_err_on(!bpos_eq(b->data->min_key, bp->min_key),
797 				     -BCH_ERR_btree_node_read_err_must_retry,
798 				     c, ca, b, NULL, NULL,
799 				     btree_node_bad_min_key,
800 				     "incorrect min_key: got %s should be %s",
801 				     (printbuf_reset(&buf1),
802 				      bch2_bpos_to_text(&buf1, bn->min_key), buf1.buf),
803 				     (printbuf_reset(&buf2),
804 				      bch2_bpos_to_text(&buf2, bp->min_key), buf2.buf));
805 		}
806 
807 		btree_err_on(!bpos_eq(bn->max_key, b->key.k.p),
808 			     -BCH_ERR_btree_node_read_err_must_retry,
809 			     c, ca, b, i, NULL,
810 			     btree_node_bad_max_key,
811 			     "incorrect max key %s",
812 			     (printbuf_reset(&buf1),
813 			      bch2_bpos_to_text(&buf1, bn->max_key), buf1.buf));
814 
815 		if (write)
816 			compat_btree_node(b->c.level, b->c.btree_id, version,
817 					  BSET_BIG_ENDIAN(i), write, bn);
818 
819 		btree_err_on(bch2_bkey_format_invalid(c, &bn->format, write, &buf1),
820 			     -BCH_ERR_btree_node_read_err_bad_node,
821 			     c, ca, b, i, NULL,
822 			     btree_node_bad_format,
823 			     "invalid bkey format: %s\n  %s", buf1.buf,
824 			     (printbuf_reset(&buf2),
825 			      bch2_bkey_format_to_text(&buf2, &bn->format), buf2.buf));
826 		printbuf_reset(&buf1);
827 
828 		compat_bformat(b->c.level, b->c.btree_id, version,
829 			       BSET_BIG_ENDIAN(i), write,
830 			       &bn->format);
831 	}
832 out:
833 fsck_err:
834 	printbuf_exit(&buf2);
835 	printbuf_exit(&buf1);
836 	return ret;
837 }
838 
839 static int bset_key_invalid(struct bch_fs *c, struct btree *b,
840 			    struct bkey_s_c k,
841 			    bool updated_range, int rw,
842 			    struct printbuf *err)
843 {
844 	return __bch2_bkey_invalid(c, k, btree_node_type(b), READ, err) ?:
845 		(!updated_range ? bch2_bkey_in_btree_node(c, b, k, err) : 0) ?:
846 		(rw == WRITE ? bch2_bkey_val_invalid(c, k, READ, err) : 0);
847 }
848 
849 static bool bkey_packed_valid(struct bch_fs *c, struct btree *b,
850 			 struct bset *i, struct bkey_packed *k)
851 {
852 	if (bkey_p_next(k) > vstruct_last(i))
853 		return false;
854 
855 	if (k->format > KEY_FORMAT_CURRENT)
856 		return false;
857 
858 	if (!bkeyp_u64s_valid(&b->format, k))
859 		return false;
860 
861 	struct printbuf buf = PRINTBUF;
862 	struct bkey tmp;
863 	struct bkey_s u = __bkey_disassemble(b, k, &tmp);
864 	bool ret = __bch2_bkey_invalid(c, u.s_c, btree_node_type(b), READ, &buf);
865 	printbuf_exit(&buf);
866 	return ret;
867 }
868 
869 static int validate_bset_keys(struct bch_fs *c, struct btree *b,
870 			 struct bset *i, int write,
871 			 bool have_retry, bool *saw_error)
872 {
873 	unsigned version = le16_to_cpu(i->version);
874 	struct bkey_packed *k, *prev = NULL;
875 	struct printbuf buf = PRINTBUF;
876 	bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
877 		BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
878 	int ret = 0;
879 
880 	for (k = i->start;
881 	     k != vstruct_last(i);) {
882 		struct bkey_s u;
883 		struct bkey tmp;
884 		unsigned next_good_key;
885 
886 		if (btree_err_on(bkey_p_next(k) > vstruct_last(i),
887 				 -BCH_ERR_btree_node_read_err_fixable,
888 				 c, NULL, b, i, k,
889 				 btree_node_bkey_past_bset_end,
890 				 "key extends past end of bset")) {
891 			i->u64s = cpu_to_le16((u64 *) k - i->_data);
892 			break;
893 		}
894 
895 		if (btree_err_on(k->format > KEY_FORMAT_CURRENT,
896 				 -BCH_ERR_btree_node_read_err_fixable,
897 				 c, NULL, b, i, k,
898 				 btree_node_bkey_bad_format,
899 				 "invalid bkey format %u", k->format))
900 			goto drop_this_key;
901 
902 		if (btree_err_on(!bkeyp_u64s_valid(&b->format, k),
903 				 -BCH_ERR_btree_node_read_err_fixable,
904 				 c, NULL, b, i, k,
905 				 btree_node_bkey_bad_u64s,
906 				 "bad k->u64s %u (min %u max %zu)", k->u64s,
907 				 bkeyp_key_u64s(&b->format, k),
908 				 U8_MAX - BKEY_U64s + bkeyp_key_u64s(&b->format, k)))
909 			goto drop_this_key;
910 
911 		if (!write)
912 			bch2_bkey_compat(b->c.level, b->c.btree_id, version,
913 				    BSET_BIG_ENDIAN(i), write,
914 				    &b->format, k);
915 
916 		u = __bkey_disassemble(b, k, &tmp);
917 
918 		printbuf_reset(&buf);
919 		if (bset_key_invalid(c, b, u.s_c, updated_range, write, &buf)) {
920 			printbuf_reset(&buf);
921 			bset_key_invalid(c, b, u.s_c, updated_range, write, &buf);
922 			prt_printf(&buf, "\n  ");
923 			bch2_bkey_val_to_text(&buf, c, u.s_c);
924 
925 			btree_err(-BCH_ERR_btree_node_read_err_fixable,
926 				  c, NULL, b, i, k,
927 				  btree_node_bad_bkey,
928 				  "invalid bkey: %s", buf.buf);
929 			goto drop_this_key;
930 		}
931 
932 		if (write)
933 			bch2_bkey_compat(b->c.level, b->c.btree_id, version,
934 				    BSET_BIG_ENDIAN(i), write,
935 				    &b->format, k);
936 
937 		if (prev && bkey_iter_cmp(b, prev, k) > 0) {
938 			struct bkey up = bkey_unpack_key(b, prev);
939 
940 			printbuf_reset(&buf);
941 			prt_printf(&buf, "keys out of order: ");
942 			bch2_bkey_to_text(&buf, &up);
943 			prt_printf(&buf, " > ");
944 			bch2_bkey_to_text(&buf, u.k);
945 
946 			if (btree_err(-BCH_ERR_btree_node_read_err_fixable,
947 				      c, NULL, b, i, k,
948 				      btree_node_bkey_out_of_order,
949 				      "%s", buf.buf))
950 				goto drop_this_key;
951 		}
952 
953 		prev = k;
954 		k = bkey_p_next(k);
955 		continue;
956 drop_this_key:
957 		next_good_key = k->u64s;
958 
959 		if (!next_good_key ||
960 		    (BSET_BIG_ENDIAN(i) == CPU_BIG_ENDIAN &&
961 		     version >= bcachefs_metadata_version_snapshot)) {
962 			/*
963 			 * only do scanning if bch2_bkey_compat() has nothing to
964 			 * do
965 			 */
966 
967 			if (!bkey_packed_valid(c, b, i, (void *) ((u64 *) k + next_good_key))) {
968 				for (next_good_key = 1;
969 				     next_good_key < (u64 *) vstruct_last(i) - (u64 *) k;
970 				     next_good_key++)
971 					if (bkey_packed_valid(c, b, i, (void *) ((u64 *) k + next_good_key)))
972 						goto got_good_key;
973 			}
974 
975 			/*
976 			 * didn't find a good key, have to truncate the rest of
977 			 * the bset
978 			 */
979 			next_good_key = (u64 *) vstruct_last(i) - (u64 *) k;
980 		}
981 got_good_key:
982 		le16_add_cpu(&i->u64s, -next_good_key);
983 		memmove_u64s_down(k, bkey_p_next(k), (u64 *) vstruct_end(i) - (u64 *) k);
984 	}
985 fsck_err:
986 	printbuf_exit(&buf);
987 	return ret;
988 }
989 
990 int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
991 			      struct btree *b, bool have_retry, bool *saw_error)
992 {
993 	struct btree_node_entry *bne;
994 	struct sort_iter *iter;
995 	struct btree_node *sorted;
996 	struct bkey_packed *k;
997 	struct bset *i;
998 	bool used_mempool, blacklisted;
999 	bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
1000 		BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
1001 	unsigned u64s;
1002 	unsigned ptr_written = btree_ptr_sectors_written(bkey_i_to_s_c(&b->key));
1003 	u64 max_journal_seq = 0;
1004 	struct printbuf buf = PRINTBUF;
1005 	int ret = 0, retry_read = 0, write = READ;
1006 	u64 start_time = local_clock();
1007 
1008 	b->version_ondisk = U16_MAX;
1009 	/* We might get called multiple times on read retry: */
1010 	b->written = 0;
1011 
1012 	iter = mempool_alloc(&c->fill_iter, GFP_NOFS);
1013 	sort_iter_init(iter, b, (btree_blocks(c) + 1) * 2);
1014 
1015 	if (bch2_meta_read_fault("btree"))
1016 		btree_err(-BCH_ERR_btree_node_read_err_must_retry,
1017 			  c, ca, b, NULL, NULL,
1018 			  btree_node_fault_injected,
1019 			  "dynamic fault");
1020 
1021 	btree_err_on(le64_to_cpu(b->data->magic) != bset_magic(c),
1022 		     -BCH_ERR_btree_node_read_err_must_retry,
1023 		     c, ca, b, NULL, NULL,
1024 		     btree_node_bad_magic,
1025 		     "bad magic: want %llx, got %llx",
1026 		     bset_magic(c), le64_to_cpu(b->data->magic));
1027 
1028 	if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
1029 		struct bch_btree_ptr_v2 *bp =
1030 			&bkey_i_to_btree_ptr_v2(&b->key)->v;
1031 
1032 		bch2_bpos_to_text(&buf, b->data->min_key);
1033 		prt_str(&buf, "-");
1034 		bch2_bpos_to_text(&buf, b->data->max_key);
1035 
1036 		btree_err_on(b->data->keys.seq != bp->seq,
1037 			     -BCH_ERR_btree_node_read_err_must_retry,
1038 			     c, ca, b, NULL, NULL,
1039 			     btree_node_bad_seq,
1040 			     "got wrong btree node: got\n%s",
1041 			     (printbuf_reset(&buf),
1042 			      bch2_btree_node_header_to_text(&buf, b->data),
1043 			      buf.buf));
1044 	} else {
1045 		btree_err_on(!b->data->keys.seq,
1046 			     -BCH_ERR_btree_node_read_err_must_retry,
1047 			     c, ca, b, NULL, NULL,
1048 			     btree_node_bad_seq,
1049 			     "bad btree header: seq 0\n%s",
1050 			     (printbuf_reset(&buf),
1051 			      bch2_btree_node_header_to_text(&buf, b->data),
1052 			      buf.buf));
1053 	}
1054 
1055 	while (b->written < (ptr_written ?: btree_sectors(c))) {
1056 		unsigned sectors;
1057 		struct nonce nonce;
1058 		bool first = !b->written;
1059 		bool csum_bad;
1060 
1061 		if (!b->written) {
1062 			i = &b->data->keys;
1063 
1064 			btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
1065 				     -BCH_ERR_btree_node_read_err_want_retry,
1066 				     c, ca, b, i, NULL,
1067 				     bset_unknown_csum,
1068 				     "unknown checksum type %llu", BSET_CSUM_TYPE(i));
1069 
1070 			nonce = btree_nonce(i, b->written << 9);
1071 
1072 			struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data);
1073 			csum_bad = bch2_crc_cmp(b->data->csum, csum);
1074 			if (csum_bad)
1075 				bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
1076 
1077 			btree_err_on(csum_bad,
1078 				     -BCH_ERR_btree_node_read_err_want_retry,
1079 				     c, ca, b, i, NULL,
1080 				     bset_bad_csum,
1081 				     "%s",
1082 				     (printbuf_reset(&buf),
1083 				      bch2_csum_err_msg(&buf, BSET_CSUM_TYPE(i), b->data->csum, csum),
1084 				      buf.buf));
1085 
1086 			ret = bset_encrypt(c, i, b->written << 9);
1087 			if (bch2_fs_fatal_err_on(ret, c,
1088 					"decrypting btree node: %s", bch2_err_str(ret)))
1089 				goto fsck_err;
1090 
1091 			btree_err_on(btree_node_type_is_extents(btree_node_type(b)) &&
1092 				     !BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data),
1093 				     -BCH_ERR_btree_node_read_err_incompatible,
1094 				     c, NULL, b, NULL, NULL,
1095 				     btree_node_unsupported_version,
1096 				     "btree node does not have NEW_EXTENT_OVERWRITE set");
1097 
1098 			sectors = vstruct_sectors(b->data, c->block_bits);
1099 		} else {
1100 			bne = write_block(b);
1101 			i = &bne->keys;
1102 
1103 			if (i->seq != b->data->keys.seq)
1104 				break;
1105 
1106 			btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
1107 				     -BCH_ERR_btree_node_read_err_want_retry,
1108 				     c, ca, b, i, NULL,
1109 				     bset_unknown_csum,
1110 				     "unknown checksum type %llu", BSET_CSUM_TYPE(i));
1111 
1112 			nonce = btree_nonce(i, b->written << 9);
1113 			struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
1114 			csum_bad = bch2_crc_cmp(bne->csum, csum);
1115 			if (ca && csum_bad)
1116 				bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
1117 
1118 			btree_err_on(csum_bad,
1119 				     -BCH_ERR_btree_node_read_err_want_retry,
1120 				     c, ca, b, i, NULL,
1121 				     bset_bad_csum,
1122 				     "%s",
1123 				     (printbuf_reset(&buf),
1124 				      bch2_csum_err_msg(&buf, BSET_CSUM_TYPE(i), bne->csum, csum),
1125 				      buf.buf));
1126 
1127 			ret = bset_encrypt(c, i, b->written << 9);
1128 			if (bch2_fs_fatal_err_on(ret, c,
1129 					"decrypting btree node: %s", bch2_err_str(ret)))
1130 				goto fsck_err;
1131 
1132 			sectors = vstruct_sectors(bne, c->block_bits);
1133 		}
1134 
1135 		b->version_ondisk = min(b->version_ondisk,
1136 					le16_to_cpu(i->version));
1137 
1138 		ret = validate_bset(c, ca, b, i, b->written, sectors,
1139 				    READ, have_retry, saw_error);
1140 		if (ret)
1141 			goto fsck_err;
1142 
1143 		if (!b->written)
1144 			btree_node_set_format(b, b->data->format);
1145 
1146 		ret = validate_bset_keys(c, b, i, READ, have_retry, saw_error);
1147 		if (ret)
1148 			goto fsck_err;
1149 
1150 		SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
1151 
1152 		blacklisted = bch2_journal_seq_is_blacklisted(c,
1153 					le64_to_cpu(i->journal_seq),
1154 					true);
1155 
1156 		btree_err_on(blacklisted && first,
1157 			     -BCH_ERR_btree_node_read_err_fixable,
1158 			     c, ca, b, i, NULL,
1159 			     bset_blacklisted_journal_seq,
1160 			     "first btree node bset has blacklisted journal seq (%llu)",
1161 			     le64_to_cpu(i->journal_seq));
1162 
1163 		btree_err_on(blacklisted && ptr_written,
1164 			     -BCH_ERR_btree_node_read_err_fixable,
1165 			     c, ca, b, i, NULL,
1166 			     first_bset_blacklisted_journal_seq,
1167 			     "found blacklisted bset (journal seq %llu) in btree node at offset %u-%u/%u",
1168 			     le64_to_cpu(i->journal_seq),
1169 			     b->written, b->written + sectors, ptr_written);
1170 
1171 		b->written += sectors;
1172 
1173 		if (blacklisted && !first)
1174 			continue;
1175 
1176 		sort_iter_add(iter,
1177 			      vstruct_idx(i, 0),
1178 			      vstruct_last(i));
1179 
1180 		max_journal_seq = max(max_journal_seq, le64_to_cpu(i->journal_seq));
1181 	}
1182 
1183 	if (ptr_written) {
1184 		btree_err_on(b->written < ptr_written,
1185 			     -BCH_ERR_btree_node_read_err_want_retry,
1186 			     c, ca, b, NULL, NULL,
1187 			     btree_node_data_missing,
1188 			     "btree node data missing: expected %u sectors, found %u",
1189 			     ptr_written, b->written);
1190 	} else {
1191 		for (bne = write_block(b);
1192 		     bset_byte_offset(b, bne) < btree_buf_bytes(b);
1193 		     bne = (void *) bne + block_bytes(c))
1194 			btree_err_on(bne->keys.seq == b->data->keys.seq &&
1195 				     !bch2_journal_seq_is_blacklisted(c,
1196 								      le64_to_cpu(bne->keys.journal_seq),
1197 								      true),
1198 				     -BCH_ERR_btree_node_read_err_want_retry,
1199 				     c, ca, b, NULL, NULL,
1200 				     btree_node_bset_after_end,
1201 				     "found bset signature after last bset");
1202 	}
1203 
1204 	sorted = btree_bounce_alloc(c, btree_buf_bytes(b), &used_mempool);
1205 	sorted->keys.u64s = 0;
1206 
1207 	set_btree_bset(b, b->set, &b->data->keys);
1208 
1209 	b->nr = bch2_key_sort_fix_overlapping(c, &sorted->keys, iter);
1210 
1211 	u64s = le16_to_cpu(sorted->keys.u64s);
1212 	*sorted = *b->data;
1213 	sorted->keys.u64s = cpu_to_le16(u64s);
1214 	swap(sorted, b->data);
1215 	set_btree_bset(b, b->set, &b->data->keys);
1216 	b->nsets = 1;
1217 	b->data->keys.journal_seq = cpu_to_le64(max_journal_seq);
1218 
1219 	BUG_ON(b->nr.live_u64s != u64s);
1220 
1221 	btree_bounce_free(c, btree_buf_bytes(b), used_mempool, sorted);
1222 
1223 	if (updated_range)
1224 		bch2_btree_node_drop_keys_outside_node(b);
1225 
1226 	i = &b->data->keys;
1227 	for (k = i->start; k != vstruct_last(i);) {
1228 		struct bkey tmp;
1229 		struct bkey_s u = __bkey_disassemble(b, k, &tmp);
1230 
1231 		printbuf_reset(&buf);
1232 
1233 		if (bch2_bkey_val_invalid(c, u.s_c, READ, &buf) ||
1234 		    (bch2_inject_invalid_keys &&
1235 		     !bversion_cmp(u.k->version, MAX_VERSION))) {
1236 			printbuf_reset(&buf);
1237 
1238 			prt_printf(&buf, "invalid bkey: ");
1239 			bch2_bkey_val_invalid(c, u.s_c, READ, &buf);
1240 			prt_printf(&buf, "\n  ");
1241 			bch2_bkey_val_to_text(&buf, c, u.s_c);
1242 
1243 			btree_err(-BCH_ERR_btree_node_read_err_fixable,
1244 				  c, NULL, b, i, k,
1245 				  btree_node_bad_bkey,
1246 				  "%s", buf.buf);
1247 
1248 			btree_keys_account_key_drop(&b->nr, 0, k);
1249 
1250 			i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
1251 			memmove_u64s_down(k, bkey_p_next(k),
1252 					  (u64 *) vstruct_end(i) - (u64 *) k);
1253 			set_btree_bset_end(b, b->set);
1254 			continue;
1255 		}
1256 
1257 		if (u.k->type == KEY_TYPE_btree_ptr_v2) {
1258 			struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(u);
1259 
1260 			bp.v->mem_ptr = 0;
1261 		}
1262 
1263 		k = bkey_p_next(k);
1264 	}
1265 
1266 	bch2_bset_build_aux_tree(b, b->set, false);
1267 
1268 	set_needs_whiteout(btree_bset_first(b), true);
1269 
1270 	btree_node_reset_sib_u64s(b);
1271 
1272 	rcu_read_lock();
1273 	bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&b->key)), ptr) {
1274 		struct bch_dev *ca2 = bch2_dev_rcu(c, ptr->dev);
1275 
1276 		if (!ca2 || ca2->mi.state != BCH_MEMBER_STATE_rw)
1277 			set_btree_node_need_rewrite(b);
1278 	}
1279 	rcu_read_unlock();
1280 
1281 	if (!ptr_written)
1282 		set_btree_node_need_rewrite(b);
1283 out:
1284 	mempool_free(iter, &c->fill_iter);
1285 	printbuf_exit(&buf);
1286 	bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read_done], start_time);
1287 	return retry_read;
1288 fsck_err:
1289 	if (ret == -BCH_ERR_btree_node_read_err_want_retry ||
1290 	    ret == -BCH_ERR_btree_node_read_err_must_retry) {
1291 		retry_read = 1;
1292 	} else {
1293 		set_btree_node_read_error(b);
1294 		bch2_btree_lost_data(c, b->c.btree_id);
1295 	}
1296 	goto out;
1297 }
1298 
1299 static void btree_node_read_work(struct work_struct *work)
1300 {
1301 	struct btree_read_bio *rb =
1302 		container_of(work, struct btree_read_bio, work);
1303 	struct bch_fs *c	= rb->c;
1304 	struct bch_dev *ca	= rb->have_ioref ? bch2_dev_have_ref(c, rb->pick.ptr.dev) : NULL;
1305 	struct btree *b		= rb->b;
1306 	struct bio *bio		= &rb->bio;
1307 	struct bch_io_failures failed = { .nr = 0 };
1308 	struct printbuf buf = PRINTBUF;
1309 	bool saw_error = false;
1310 	bool retry = false;
1311 	bool can_retry;
1312 
1313 	goto start;
1314 	while (1) {
1315 		retry = true;
1316 		bch_info(c, "retrying read");
1317 		ca = bch2_dev_get_ioref(c, rb->pick.ptr.dev, READ);
1318 		rb->have_ioref		= ca != NULL;
1319 		bio_reset(bio, NULL, REQ_OP_READ|REQ_SYNC|REQ_META);
1320 		bio->bi_iter.bi_sector	= rb->pick.ptr.offset;
1321 		bio->bi_iter.bi_size	= btree_buf_bytes(b);
1322 
1323 		if (rb->have_ioref) {
1324 			bio_set_dev(bio, ca->disk_sb.bdev);
1325 			submit_bio_wait(bio);
1326 		} else {
1327 			bio->bi_status = BLK_STS_REMOVED;
1328 		}
1329 start:
1330 		printbuf_reset(&buf);
1331 		bch2_btree_pos_to_text(&buf, c, b);
1332 		bch2_dev_io_err_on(ca && bio->bi_status, ca, BCH_MEMBER_ERROR_read,
1333 				   "btree read error %s for %s",
1334 				   bch2_blk_status_to_str(bio->bi_status), buf.buf);
1335 		if (rb->have_ioref)
1336 			percpu_ref_put(&ca->io_ref);
1337 		rb->have_ioref = false;
1338 
1339 		bch2_mark_io_failure(&failed, &rb->pick);
1340 
1341 		can_retry = bch2_bkey_pick_read_device(c,
1342 				bkey_i_to_s_c(&b->key),
1343 				&failed, &rb->pick) > 0;
1344 
1345 		if (!bio->bi_status &&
1346 		    !bch2_btree_node_read_done(c, ca, b, can_retry, &saw_error)) {
1347 			if (retry)
1348 				bch_info(c, "retry success");
1349 			break;
1350 		}
1351 
1352 		saw_error = true;
1353 
1354 		if (!can_retry) {
1355 			set_btree_node_read_error(b);
1356 			bch2_btree_lost_data(c, b->c.btree_id);
1357 			break;
1358 		}
1359 	}
1360 
1361 	bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read],
1362 			       rb->start_time);
1363 	bio_put(&rb->bio);
1364 
1365 	if (saw_error &&
1366 	    !btree_node_read_error(b) &&
1367 	    c->curr_recovery_pass != BCH_RECOVERY_PASS_scan_for_btree_nodes) {
1368 		printbuf_reset(&buf);
1369 		bch2_bpos_to_text(&buf, b->key.k.p);
1370 		bch_err_ratelimited(c, "%s: rewriting btree node at btree=%s level=%u %s due to error",
1371 			 __func__, bch2_btree_id_str(b->c.btree_id), b->c.level, buf.buf);
1372 
1373 		bch2_btree_node_rewrite_async(c, b);
1374 	}
1375 
1376 	printbuf_exit(&buf);
1377 	clear_btree_node_read_in_flight(b);
1378 	wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1379 }
1380 
1381 static void btree_node_read_endio(struct bio *bio)
1382 {
1383 	struct btree_read_bio *rb =
1384 		container_of(bio, struct btree_read_bio, bio);
1385 	struct bch_fs *c	= rb->c;
1386 
1387 	if (rb->have_ioref) {
1388 		struct bch_dev *ca = bch2_dev_have_ref(c, rb->pick.ptr.dev);
1389 
1390 		bch2_latency_acct(ca, rb->start_time, READ);
1391 	}
1392 
1393 	queue_work(c->btree_read_complete_wq, &rb->work);
1394 }
1395 
1396 struct btree_node_read_all {
1397 	struct closure		cl;
1398 	struct bch_fs		*c;
1399 	struct btree		*b;
1400 	unsigned		nr;
1401 	void			*buf[BCH_REPLICAS_MAX];
1402 	struct bio		*bio[BCH_REPLICAS_MAX];
1403 	blk_status_t		err[BCH_REPLICAS_MAX];
1404 };
1405 
1406 static unsigned btree_node_sectors_written(struct bch_fs *c, void *data)
1407 {
1408 	struct btree_node *bn = data;
1409 	struct btree_node_entry *bne;
1410 	unsigned offset = 0;
1411 
1412 	if (le64_to_cpu(bn->magic) !=  bset_magic(c))
1413 		return 0;
1414 
1415 	while (offset < btree_sectors(c)) {
1416 		if (!offset) {
1417 			offset += vstruct_sectors(bn, c->block_bits);
1418 		} else {
1419 			bne = data + (offset << 9);
1420 			if (bne->keys.seq != bn->keys.seq)
1421 				break;
1422 			offset += vstruct_sectors(bne, c->block_bits);
1423 		}
1424 	}
1425 
1426 	return offset;
1427 }
1428 
1429 static bool btree_node_has_extra_bsets(struct bch_fs *c, unsigned offset, void *data)
1430 {
1431 	struct btree_node *bn = data;
1432 	struct btree_node_entry *bne;
1433 
1434 	if (!offset)
1435 		return false;
1436 
1437 	while (offset < btree_sectors(c)) {
1438 		bne = data + (offset << 9);
1439 		if (bne->keys.seq == bn->keys.seq)
1440 			return true;
1441 		offset++;
1442 	}
1443 
1444 	return false;
1445 	return offset;
1446 }
1447 
1448 static CLOSURE_CALLBACK(btree_node_read_all_replicas_done)
1449 {
1450 	closure_type(ra, struct btree_node_read_all, cl);
1451 	struct bch_fs *c = ra->c;
1452 	struct btree *b = ra->b;
1453 	struct printbuf buf = PRINTBUF;
1454 	bool dump_bset_maps = false;
1455 	bool have_retry = false;
1456 	int ret = 0, best = -1, write = READ;
1457 	unsigned i, written = 0, written2 = 0;
1458 	__le64 seq = b->key.k.type == KEY_TYPE_btree_ptr_v2
1459 		? bkey_i_to_btree_ptr_v2(&b->key)->v.seq : 0;
1460 	bool _saw_error = false, *saw_error = &_saw_error;
1461 
1462 	for (i = 0; i < ra->nr; i++) {
1463 		struct btree_node *bn = ra->buf[i];
1464 
1465 		if (ra->err[i])
1466 			continue;
1467 
1468 		if (le64_to_cpu(bn->magic) != bset_magic(c) ||
1469 		    (seq && seq != bn->keys.seq))
1470 			continue;
1471 
1472 		if (best < 0) {
1473 			best = i;
1474 			written = btree_node_sectors_written(c, bn);
1475 			continue;
1476 		}
1477 
1478 		written2 = btree_node_sectors_written(c, ra->buf[i]);
1479 		if (btree_err_on(written2 != written, -BCH_ERR_btree_node_read_err_fixable,
1480 				 c, NULL, b, NULL, NULL,
1481 				 btree_node_replicas_sectors_written_mismatch,
1482 				 "btree node sectors written mismatch: %u != %u",
1483 				 written, written2) ||
1484 		    btree_err_on(btree_node_has_extra_bsets(c, written2, ra->buf[i]),
1485 				 -BCH_ERR_btree_node_read_err_fixable,
1486 				 c, NULL, b, NULL, NULL,
1487 				 btree_node_bset_after_end,
1488 				 "found bset signature after last bset") ||
1489 		    btree_err_on(memcmp(ra->buf[best], ra->buf[i], written << 9),
1490 				 -BCH_ERR_btree_node_read_err_fixable,
1491 				 c, NULL, b, NULL, NULL,
1492 				 btree_node_replicas_data_mismatch,
1493 				 "btree node replicas content mismatch"))
1494 			dump_bset_maps = true;
1495 
1496 		if (written2 > written) {
1497 			written = written2;
1498 			best = i;
1499 		}
1500 	}
1501 fsck_err:
1502 	if (dump_bset_maps) {
1503 		for (i = 0; i < ra->nr; i++) {
1504 			struct btree_node *bn = ra->buf[i];
1505 			struct btree_node_entry *bne = NULL;
1506 			unsigned offset = 0, sectors;
1507 			bool gap = false;
1508 
1509 			if (ra->err[i])
1510 				continue;
1511 
1512 			printbuf_reset(&buf);
1513 
1514 			while (offset < btree_sectors(c)) {
1515 				if (!offset) {
1516 					sectors = vstruct_sectors(bn, c->block_bits);
1517 				} else {
1518 					bne = ra->buf[i] + (offset << 9);
1519 					if (bne->keys.seq != bn->keys.seq)
1520 						break;
1521 					sectors = vstruct_sectors(bne, c->block_bits);
1522 				}
1523 
1524 				prt_printf(&buf, " %u-%u", offset, offset + sectors);
1525 				if (bne && bch2_journal_seq_is_blacklisted(c,
1526 							le64_to_cpu(bne->keys.journal_seq), false))
1527 					prt_printf(&buf, "*");
1528 				offset += sectors;
1529 			}
1530 
1531 			while (offset < btree_sectors(c)) {
1532 				bne = ra->buf[i] + (offset << 9);
1533 				if (bne->keys.seq == bn->keys.seq) {
1534 					if (!gap)
1535 						prt_printf(&buf, " GAP");
1536 					gap = true;
1537 
1538 					sectors = vstruct_sectors(bne, c->block_bits);
1539 					prt_printf(&buf, " %u-%u", offset, offset + sectors);
1540 					if (bch2_journal_seq_is_blacklisted(c,
1541 							le64_to_cpu(bne->keys.journal_seq), false))
1542 						prt_printf(&buf, "*");
1543 				}
1544 				offset++;
1545 			}
1546 
1547 			bch_err(c, "replica %u:%s", i, buf.buf);
1548 		}
1549 	}
1550 
1551 	if (best >= 0) {
1552 		memcpy(b->data, ra->buf[best], btree_buf_bytes(b));
1553 		ret = bch2_btree_node_read_done(c, NULL, b, false, saw_error);
1554 	} else {
1555 		ret = -1;
1556 	}
1557 
1558 	if (ret) {
1559 		set_btree_node_read_error(b);
1560 		bch2_btree_lost_data(c, b->c.btree_id);
1561 	} else if (*saw_error)
1562 		bch2_btree_node_rewrite_async(c, b);
1563 
1564 	for (i = 0; i < ra->nr; i++) {
1565 		mempool_free(ra->buf[i], &c->btree_bounce_pool);
1566 		bio_put(ra->bio[i]);
1567 	}
1568 
1569 	closure_debug_destroy(&ra->cl);
1570 	kfree(ra);
1571 	printbuf_exit(&buf);
1572 
1573 	clear_btree_node_read_in_flight(b);
1574 	wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1575 }
1576 
1577 static void btree_node_read_all_replicas_endio(struct bio *bio)
1578 {
1579 	struct btree_read_bio *rb =
1580 		container_of(bio, struct btree_read_bio, bio);
1581 	struct bch_fs *c	= rb->c;
1582 	struct btree_node_read_all *ra = rb->ra;
1583 
1584 	if (rb->have_ioref) {
1585 		struct bch_dev *ca = bch2_dev_have_ref(c, rb->pick.ptr.dev);
1586 
1587 		bch2_latency_acct(ca, rb->start_time, READ);
1588 	}
1589 
1590 	ra->err[rb->idx] = bio->bi_status;
1591 	closure_put(&ra->cl);
1592 }
1593 
1594 /*
1595  * XXX This allocates multiple times from the same mempools, and can deadlock
1596  * under sufficient memory pressure (but is only a debug path)
1597  */
1598 static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool sync)
1599 {
1600 	struct bkey_s_c k = bkey_i_to_s_c(&b->key);
1601 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1602 	const union bch_extent_entry *entry;
1603 	struct extent_ptr_decoded pick;
1604 	struct btree_node_read_all *ra;
1605 	unsigned i;
1606 
1607 	ra = kzalloc(sizeof(*ra), GFP_NOFS);
1608 	if (!ra)
1609 		return -BCH_ERR_ENOMEM_btree_node_read_all_replicas;
1610 
1611 	closure_init(&ra->cl, NULL);
1612 	ra->c	= c;
1613 	ra->b	= b;
1614 	ra->nr	= bch2_bkey_nr_ptrs(k);
1615 
1616 	for (i = 0; i < ra->nr; i++) {
1617 		ra->buf[i] = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
1618 		ra->bio[i] = bio_alloc_bioset(NULL,
1619 					      buf_pages(ra->buf[i], btree_buf_bytes(b)),
1620 					      REQ_OP_READ|REQ_SYNC|REQ_META,
1621 					      GFP_NOFS,
1622 					      &c->btree_bio);
1623 	}
1624 
1625 	i = 0;
1626 	bkey_for_each_ptr_decode(k.k, ptrs, pick, entry) {
1627 		struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ);
1628 		struct btree_read_bio *rb =
1629 			container_of(ra->bio[i], struct btree_read_bio, bio);
1630 		rb->c			= c;
1631 		rb->b			= b;
1632 		rb->ra			= ra;
1633 		rb->start_time		= local_clock();
1634 		rb->have_ioref		= ca != NULL;
1635 		rb->idx			= i;
1636 		rb->pick		= pick;
1637 		rb->bio.bi_iter.bi_sector = pick.ptr.offset;
1638 		rb->bio.bi_end_io	= btree_node_read_all_replicas_endio;
1639 		bch2_bio_map(&rb->bio, ra->buf[i], btree_buf_bytes(b));
1640 
1641 		if (rb->have_ioref) {
1642 			this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
1643 				     bio_sectors(&rb->bio));
1644 			bio_set_dev(&rb->bio, ca->disk_sb.bdev);
1645 
1646 			closure_get(&ra->cl);
1647 			submit_bio(&rb->bio);
1648 		} else {
1649 			ra->err[i] = BLK_STS_REMOVED;
1650 		}
1651 
1652 		i++;
1653 	}
1654 
1655 	if (sync) {
1656 		closure_sync(&ra->cl);
1657 		btree_node_read_all_replicas_done(&ra->cl.work);
1658 	} else {
1659 		continue_at(&ra->cl, btree_node_read_all_replicas_done,
1660 			    c->btree_read_complete_wq);
1661 	}
1662 
1663 	return 0;
1664 }
1665 
1666 void bch2_btree_node_read(struct btree_trans *trans, struct btree *b,
1667 			  bool sync)
1668 {
1669 	struct bch_fs *c = trans->c;
1670 	struct extent_ptr_decoded pick;
1671 	struct btree_read_bio *rb;
1672 	struct bch_dev *ca;
1673 	struct bio *bio;
1674 	int ret;
1675 
1676 	trace_and_count(c, btree_node_read, trans, b);
1677 
1678 	if (bch2_verify_all_btree_replicas &&
1679 	    !btree_node_read_all_replicas(c, b, sync))
1680 		return;
1681 
1682 	ret = bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key),
1683 					 NULL, &pick);
1684 
1685 	if (ret <= 0) {
1686 		struct printbuf buf = PRINTBUF;
1687 
1688 		prt_str(&buf, "btree node read error: no device to read from\n at ");
1689 		bch2_btree_pos_to_text(&buf, c, b);
1690 		bch_err_ratelimited(c, "%s", buf.buf);
1691 
1692 		if (c->recovery_passes_explicit & BIT_ULL(BCH_RECOVERY_PASS_check_topology) &&
1693 		    c->curr_recovery_pass > BCH_RECOVERY_PASS_check_topology)
1694 			bch2_fatal_error(c);
1695 
1696 		set_btree_node_read_error(b);
1697 		bch2_btree_lost_data(c, b->c.btree_id);
1698 		clear_btree_node_read_in_flight(b);
1699 		wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1700 		printbuf_exit(&buf);
1701 		return;
1702 	}
1703 
1704 	ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ);
1705 
1706 	bio = bio_alloc_bioset(NULL,
1707 			       buf_pages(b->data, btree_buf_bytes(b)),
1708 			       REQ_OP_READ|REQ_SYNC|REQ_META,
1709 			       GFP_NOFS,
1710 			       &c->btree_bio);
1711 	rb = container_of(bio, struct btree_read_bio, bio);
1712 	rb->c			= c;
1713 	rb->b			= b;
1714 	rb->ra			= NULL;
1715 	rb->start_time		= local_clock();
1716 	rb->have_ioref		= ca != NULL;
1717 	rb->pick		= pick;
1718 	INIT_WORK(&rb->work, btree_node_read_work);
1719 	bio->bi_iter.bi_sector	= pick.ptr.offset;
1720 	bio->bi_end_io		= btree_node_read_endio;
1721 	bch2_bio_map(bio, b->data, btree_buf_bytes(b));
1722 
1723 	if (rb->have_ioref) {
1724 		this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
1725 			     bio_sectors(bio));
1726 		bio_set_dev(bio, ca->disk_sb.bdev);
1727 
1728 		if (sync) {
1729 			submit_bio_wait(bio);
1730 			bch2_latency_acct(ca, rb->start_time, READ);
1731 			btree_node_read_work(&rb->work);
1732 		} else {
1733 			submit_bio(bio);
1734 		}
1735 	} else {
1736 		bio->bi_status = BLK_STS_REMOVED;
1737 
1738 		if (sync)
1739 			btree_node_read_work(&rb->work);
1740 		else
1741 			queue_work(c->btree_read_complete_wq, &rb->work);
1742 	}
1743 }
1744 
1745 static int __bch2_btree_root_read(struct btree_trans *trans, enum btree_id id,
1746 				  const struct bkey_i *k, unsigned level)
1747 {
1748 	struct bch_fs *c = trans->c;
1749 	struct closure cl;
1750 	struct btree *b;
1751 	int ret;
1752 
1753 	closure_init_stack(&cl);
1754 
1755 	do {
1756 		ret = bch2_btree_cache_cannibalize_lock(trans, &cl);
1757 		closure_sync(&cl);
1758 	} while (ret);
1759 
1760 	b = bch2_btree_node_mem_alloc(trans, level != 0);
1761 	bch2_btree_cache_cannibalize_unlock(trans);
1762 
1763 	BUG_ON(IS_ERR(b));
1764 
1765 	bkey_copy(&b->key, k);
1766 	BUG_ON(bch2_btree_node_hash_insert(&c->btree_cache, b, level, id));
1767 
1768 	set_btree_node_read_in_flight(b);
1769 
1770 	bch2_btree_node_read(trans, b, true);
1771 
1772 	if (btree_node_read_error(b)) {
1773 		bch2_btree_node_hash_remove(&c->btree_cache, b);
1774 
1775 		mutex_lock(&c->btree_cache.lock);
1776 		list_move(&b->list, &c->btree_cache.freeable);
1777 		mutex_unlock(&c->btree_cache.lock);
1778 
1779 		ret = -BCH_ERR_btree_node_read_error;
1780 		goto err;
1781 	}
1782 
1783 	bch2_btree_set_root_for_read(c, b);
1784 err:
1785 	six_unlock_write(&b->c.lock);
1786 	six_unlock_intent(&b->c.lock);
1787 
1788 	return ret;
1789 }
1790 
1791 int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
1792 			const struct bkey_i *k, unsigned level)
1793 {
1794 	return bch2_trans_run(c, __bch2_btree_root_read(trans, id, k, level));
1795 }
1796 
1797 static void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
1798 				      struct btree_write *w)
1799 {
1800 	unsigned long old, new;
1801 
1802 	old = READ_ONCE(b->will_make_reachable);
1803 	do {
1804 		new = old;
1805 		if (!(old & 1))
1806 			break;
1807 
1808 		new &= ~1UL;
1809 	} while (!try_cmpxchg(&b->will_make_reachable, &old, new));
1810 
1811 	if (old & 1)
1812 		closure_put(&((struct btree_update *) new)->cl);
1813 
1814 	bch2_journal_pin_drop(&c->journal, &w->journal);
1815 }
1816 
1817 static void __btree_node_write_done(struct bch_fs *c, struct btree *b)
1818 {
1819 	struct btree_write *w = btree_prev_write(b);
1820 	unsigned long old, new;
1821 	unsigned type = 0;
1822 
1823 	bch2_btree_complete_write(c, b, w);
1824 
1825 	old = READ_ONCE(b->flags);
1826 	do {
1827 		new = old;
1828 
1829 		if ((old & (1U << BTREE_NODE_dirty)) &&
1830 		    (old & (1U << BTREE_NODE_need_write)) &&
1831 		    !(old & (1U << BTREE_NODE_never_write)) &&
1832 		    !(old & (1U << BTREE_NODE_write_blocked)) &&
1833 		    !(old & (1U << BTREE_NODE_will_make_reachable))) {
1834 			new &= ~(1U << BTREE_NODE_dirty);
1835 			new &= ~(1U << BTREE_NODE_need_write);
1836 			new |=  (1U << BTREE_NODE_write_in_flight);
1837 			new |=  (1U << BTREE_NODE_write_in_flight_inner);
1838 			new |=  (1U << BTREE_NODE_just_written);
1839 			new ^=  (1U << BTREE_NODE_write_idx);
1840 
1841 			type = new & BTREE_WRITE_TYPE_MASK;
1842 			new &= ~BTREE_WRITE_TYPE_MASK;
1843 		} else {
1844 			new &= ~(1U << BTREE_NODE_write_in_flight);
1845 			new &= ~(1U << BTREE_NODE_write_in_flight_inner);
1846 		}
1847 	} while (!try_cmpxchg(&b->flags, &old, new));
1848 
1849 	if (new & (1U << BTREE_NODE_write_in_flight))
1850 		__bch2_btree_node_write(c, b, BTREE_WRITE_ALREADY_STARTED|type);
1851 	else
1852 		wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
1853 }
1854 
1855 static void btree_node_write_done(struct bch_fs *c, struct btree *b)
1856 {
1857 	struct btree_trans *trans = bch2_trans_get(c);
1858 
1859 	btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
1860 	__btree_node_write_done(c, b);
1861 	six_unlock_read(&b->c.lock);
1862 
1863 	bch2_trans_put(trans);
1864 }
1865 
1866 static void btree_node_write_work(struct work_struct *work)
1867 {
1868 	struct btree_write_bio *wbio =
1869 		container_of(work, struct btree_write_bio, work);
1870 	struct bch_fs *c	= wbio->wbio.c;
1871 	struct btree *b		= wbio->wbio.bio.bi_private;
1872 	int ret = 0;
1873 
1874 	btree_bounce_free(c,
1875 		wbio->data_bytes,
1876 		wbio->wbio.used_mempool,
1877 		wbio->data);
1878 
1879 	bch2_bkey_drop_ptrs(bkey_i_to_s(&wbio->key), ptr,
1880 		bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev));
1881 
1882 	if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(&wbio->key))) {
1883 		ret = -BCH_ERR_btree_node_write_all_failed;
1884 		goto err;
1885 	}
1886 
1887 	if (wbio->wbio.first_btree_write) {
1888 		if (wbio->wbio.failed.nr) {
1889 
1890 		}
1891 	} else {
1892 		ret = bch2_trans_do(c, NULL, NULL, 0,
1893 			bch2_btree_node_update_key_get_iter(trans, b, &wbio->key,
1894 					BCH_WATERMARK_interior_updates|
1895 					BCH_TRANS_COMMIT_journal_reclaim|
1896 					BCH_TRANS_COMMIT_no_enospc|
1897 					BCH_TRANS_COMMIT_no_check_rw,
1898 					!wbio->wbio.failed.nr));
1899 		if (ret)
1900 			goto err;
1901 	}
1902 out:
1903 	bio_put(&wbio->wbio.bio);
1904 	btree_node_write_done(c, b);
1905 	return;
1906 err:
1907 	set_btree_node_noevict(b);
1908 	bch2_fs_fatal_err_on(!bch2_err_matches(ret, EROFS), c,
1909 			     "writing btree node: %s", bch2_err_str(ret));
1910 	goto out;
1911 }
1912 
1913 static void btree_node_write_endio(struct bio *bio)
1914 {
1915 	struct bch_write_bio *wbio	= to_wbio(bio);
1916 	struct bch_write_bio *parent	= wbio->split ? wbio->parent : NULL;
1917 	struct bch_write_bio *orig	= parent ?: wbio;
1918 	struct btree_write_bio *wb	= container_of(orig, struct btree_write_bio, wbio);
1919 	struct bch_fs *c		= wbio->c;
1920 	struct btree *b			= wbio->bio.bi_private;
1921 	struct bch_dev *ca		= wbio->have_ioref ? bch2_dev_have_ref(c, wbio->dev) : NULL;
1922 	unsigned long flags;
1923 
1924 	if (wbio->have_ioref)
1925 		bch2_latency_acct(ca, wbio->submit_time, WRITE);
1926 
1927 	if (!ca ||
1928 	    bch2_dev_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write,
1929 			       "btree write error: %s",
1930 			       bch2_blk_status_to_str(bio->bi_status)) ||
1931 	    bch2_meta_write_fault("btree")) {
1932 		spin_lock_irqsave(&c->btree_write_error_lock, flags);
1933 		bch2_dev_list_add_dev(&orig->failed, wbio->dev);
1934 		spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
1935 	}
1936 
1937 	if (wbio->have_ioref)
1938 		percpu_ref_put(&ca->io_ref);
1939 
1940 	if (parent) {
1941 		bio_put(bio);
1942 		bio_endio(&parent->bio);
1943 		return;
1944 	}
1945 
1946 	clear_btree_node_write_in_flight_inner(b);
1947 	wake_up_bit(&b->flags, BTREE_NODE_write_in_flight_inner);
1948 	INIT_WORK(&wb->work, btree_node_write_work);
1949 	queue_work(c->btree_io_complete_wq, &wb->work);
1950 }
1951 
1952 static int validate_bset_for_write(struct bch_fs *c, struct btree *b,
1953 				   struct bset *i, unsigned sectors)
1954 {
1955 	struct printbuf buf = PRINTBUF;
1956 	bool saw_error;
1957 	int ret;
1958 
1959 	ret = bch2_bkey_invalid(c, bkey_i_to_s_c(&b->key),
1960 				BKEY_TYPE_btree, WRITE, &buf);
1961 
1962 	if (ret)
1963 		bch2_fs_inconsistent(c, "invalid btree node key before write: %s", buf.buf);
1964 	printbuf_exit(&buf);
1965 	if (ret)
1966 		return ret;
1967 
1968 	ret = validate_bset_keys(c, b, i, WRITE, false, &saw_error) ?:
1969 		validate_bset(c, NULL, b, i, b->written, sectors, WRITE, false, &saw_error);
1970 	if (ret) {
1971 		bch2_inconsistent_error(c);
1972 		dump_stack();
1973 	}
1974 
1975 	return ret;
1976 }
1977 
1978 static void btree_write_submit(struct work_struct *work)
1979 {
1980 	struct btree_write_bio *wbio = container_of(work, struct btree_write_bio, work);
1981 	BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
1982 
1983 	bkey_copy(&tmp.k, &wbio->key);
1984 
1985 	bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&tmp.k)), ptr)
1986 		ptr->offset += wbio->sector_offset;
1987 
1988 	bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree,
1989 				  &tmp.k, false);
1990 }
1991 
1992 void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
1993 {
1994 	struct btree_write_bio *wbio;
1995 	struct bset *i;
1996 	struct btree_node *bn = NULL;
1997 	struct btree_node_entry *bne = NULL;
1998 	struct sort_iter_stack sort_iter;
1999 	struct nonce nonce;
2000 	unsigned bytes_to_write, sectors_to_write, bytes, u64s;
2001 	u64 seq = 0;
2002 	bool used_mempool;
2003 	unsigned long old, new;
2004 	bool validate_before_checksum = false;
2005 	enum btree_write_type type = flags & BTREE_WRITE_TYPE_MASK;
2006 	void *data;
2007 	int ret;
2008 
2009 	if (flags & BTREE_WRITE_ALREADY_STARTED)
2010 		goto do_write;
2011 
2012 	/*
2013 	 * We may only have a read lock on the btree node - the dirty bit is our
2014 	 * "lock" against racing with other threads that may be trying to start
2015 	 * a write, we do a write iff we clear the dirty bit. Since setting the
2016 	 * dirty bit requires a write lock, we can't race with other threads
2017 	 * redirtying it:
2018 	 */
2019 	old = READ_ONCE(b->flags);
2020 	do {
2021 		new = old;
2022 
2023 		if (!(old & (1 << BTREE_NODE_dirty)))
2024 			return;
2025 
2026 		if ((flags & BTREE_WRITE_ONLY_IF_NEED) &&
2027 		    !(old & (1 << BTREE_NODE_need_write)))
2028 			return;
2029 
2030 		if (old &
2031 		    ((1 << BTREE_NODE_never_write)|
2032 		     (1 << BTREE_NODE_write_blocked)))
2033 			return;
2034 
2035 		if (b->written &&
2036 		    (old & (1 << BTREE_NODE_will_make_reachable)))
2037 			return;
2038 
2039 		if (old & (1 << BTREE_NODE_write_in_flight))
2040 			return;
2041 
2042 		if (flags & BTREE_WRITE_ONLY_IF_NEED)
2043 			type = new & BTREE_WRITE_TYPE_MASK;
2044 		new &= ~BTREE_WRITE_TYPE_MASK;
2045 
2046 		new &= ~(1 << BTREE_NODE_dirty);
2047 		new &= ~(1 << BTREE_NODE_need_write);
2048 		new |=  (1 << BTREE_NODE_write_in_flight);
2049 		new |=  (1 << BTREE_NODE_write_in_flight_inner);
2050 		new |=  (1 << BTREE_NODE_just_written);
2051 		new ^=  (1 << BTREE_NODE_write_idx);
2052 	} while (!try_cmpxchg_acquire(&b->flags, &old, new));
2053 
2054 	if (new & (1U << BTREE_NODE_need_write))
2055 		return;
2056 do_write:
2057 	BUG_ON((type == BTREE_WRITE_initial) != (b->written == 0));
2058 
2059 	atomic_dec(&c->btree_cache.dirty);
2060 
2061 	BUG_ON(btree_node_fake(b));
2062 	BUG_ON((b->will_make_reachable != 0) != !b->written);
2063 
2064 	BUG_ON(b->written >= btree_sectors(c));
2065 	BUG_ON(b->written & (block_sectors(c) - 1));
2066 	BUG_ON(bset_written(b, btree_bset_last(b)));
2067 	BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(c));
2068 	BUG_ON(memcmp(&b->data->format, &b->format, sizeof(b->format)));
2069 
2070 	bch2_sort_whiteouts(c, b);
2071 
2072 	sort_iter_stack_init(&sort_iter, b);
2073 
2074 	bytes = !b->written
2075 		? sizeof(struct btree_node)
2076 		: sizeof(struct btree_node_entry);
2077 
2078 	bytes += b->whiteout_u64s * sizeof(u64);
2079 
2080 	for_each_bset(b, t) {
2081 		i = bset(b, t);
2082 
2083 		if (bset_written(b, i))
2084 			continue;
2085 
2086 		bytes += le16_to_cpu(i->u64s) * sizeof(u64);
2087 		sort_iter_add(&sort_iter.iter,
2088 			      btree_bkey_first(b, t),
2089 			      btree_bkey_last(b, t));
2090 		seq = max(seq, le64_to_cpu(i->journal_seq));
2091 	}
2092 
2093 	BUG_ON(b->written && !seq);
2094 
2095 	/* bch2_varint_decode may read up to 7 bytes past the end of the buffer: */
2096 	bytes += 8;
2097 
2098 	/* buffer must be a multiple of the block size */
2099 	bytes = round_up(bytes, block_bytes(c));
2100 
2101 	data = btree_bounce_alloc(c, bytes, &used_mempool);
2102 
2103 	if (!b->written) {
2104 		bn = data;
2105 		*bn = *b->data;
2106 		i = &bn->keys;
2107 	} else {
2108 		bne = data;
2109 		bne->keys = b->data->keys;
2110 		i = &bne->keys;
2111 	}
2112 
2113 	i->journal_seq	= cpu_to_le64(seq);
2114 	i->u64s		= 0;
2115 
2116 	sort_iter_add(&sort_iter.iter,
2117 		      unwritten_whiteouts_start(b),
2118 		      unwritten_whiteouts_end(b));
2119 	SET_BSET_SEPARATE_WHITEOUTS(i, false);
2120 
2121 	u64s = bch2_sort_keys_keep_unwritten_whiteouts(i->start, &sort_iter.iter);
2122 	le16_add_cpu(&i->u64s, u64s);
2123 
2124 	b->whiteout_u64s = 0;
2125 
2126 	BUG_ON(!b->written && i->u64s != b->data->keys.u64s);
2127 
2128 	set_needs_whiteout(i, false);
2129 
2130 	/* do we have data to write? */
2131 	if (b->written && !i->u64s)
2132 		goto nowrite;
2133 
2134 	bytes_to_write = vstruct_end(i) - data;
2135 	sectors_to_write = round_up(bytes_to_write, block_bytes(c)) >> 9;
2136 
2137 	if (!b->written &&
2138 	    b->key.k.type == KEY_TYPE_btree_ptr_v2)
2139 		BUG_ON(btree_ptr_sectors_written(bkey_i_to_s_c(&b->key)) != sectors_to_write);
2140 
2141 	memset(data + bytes_to_write, 0,
2142 	       (sectors_to_write << 9) - bytes_to_write);
2143 
2144 	BUG_ON(b->written + sectors_to_write > btree_sectors(c));
2145 	BUG_ON(BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN);
2146 	BUG_ON(i->seq != b->data->keys.seq);
2147 
2148 	i->version = cpu_to_le16(c->sb.version);
2149 	SET_BSET_OFFSET(i, b->written);
2150 	SET_BSET_CSUM_TYPE(i, bch2_meta_checksum_type(c));
2151 
2152 	if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)))
2153 		validate_before_checksum = true;
2154 
2155 	/* validate_bset will be modifying: */
2156 	if (le16_to_cpu(i->version) < bcachefs_metadata_version_current)
2157 		validate_before_checksum = true;
2158 
2159 	/* if we're going to be encrypting, check metadata validity first: */
2160 	if (validate_before_checksum &&
2161 	    validate_bset_for_write(c, b, i, sectors_to_write))
2162 		goto err;
2163 
2164 	ret = bset_encrypt(c, i, b->written << 9);
2165 	if (bch2_fs_fatal_err_on(ret, c,
2166 			"encrypting btree node: %s", bch2_err_str(ret)))
2167 		goto err;
2168 
2169 	nonce = btree_nonce(i, b->written << 9);
2170 
2171 	if (bn)
2172 		bn->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bn);
2173 	else
2174 		bne->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
2175 
2176 	/* if we're not encrypting, check metadata after checksumming: */
2177 	if (!validate_before_checksum &&
2178 	    validate_bset_for_write(c, b, i, sectors_to_write))
2179 		goto err;
2180 
2181 	/*
2182 	 * We handle btree write errors by immediately halting the journal -
2183 	 * after we've done that, we can't issue any subsequent btree writes
2184 	 * because they might have pointers to new nodes that failed to write.
2185 	 *
2186 	 * Furthermore, there's no point in doing any more btree writes because
2187 	 * with the journal stopped, we're never going to update the journal to
2188 	 * reflect that those writes were done and the data flushed from the
2189 	 * journal:
2190 	 *
2191 	 * Also on journal error, the pending write may have updates that were
2192 	 * never journalled (interior nodes, see btree_update_nodes_written()) -
2193 	 * it's critical that we don't do the write in that case otherwise we
2194 	 * will have updates visible that weren't in the journal:
2195 	 *
2196 	 * Make sure to update b->written so bch2_btree_init_next() doesn't
2197 	 * break:
2198 	 */
2199 	if (bch2_journal_error(&c->journal) ||
2200 	    c->opts.nochanges)
2201 		goto err;
2202 
2203 	trace_and_count(c, btree_node_write, b, bytes_to_write, sectors_to_write);
2204 
2205 	wbio = container_of(bio_alloc_bioset(NULL,
2206 				buf_pages(data, sectors_to_write << 9),
2207 				REQ_OP_WRITE|REQ_META,
2208 				GFP_NOFS,
2209 				&c->btree_bio),
2210 			    struct btree_write_bio, wbio.bio);
2211 	wbio_init(&wbio->wbio.bio);
2212 	wbio->data			= data;
2213 	wbio->data_bytes		= bytes;
2214 	wbio->sector_offset		= b->written;
2215 	wbio->wbio.c			= c;
2216 	wbio->wbio.used_mempool		= used_mempool;
2217 	wbio->wbio.first_btree_write	= !b->written;
2218 	wbio->wbio.bio.bi_end_io	= btree_node_write_endio;
2219 	wbio->wbio.bio.bi_private	= b;
2220 
2221 	bch2_bio_map(&wbio->wbio.bio, data, sectors_to_write << 9);
2222 
2223 	bkey_copy(&wbio->key, &b->key);
2224 
2225 	b->written += sectors_to_write;
2226 
2227 	if (wbio->key.k.type == KEY_TYPE_btree_ptr_v2)
2228 		bkey_i_to_btree_ptr_v2(&wbio->key)->v.sectors_written =
2229 			cpu_to_le16(b->written);
2230 
2231 	atomic64_inc(&c->btree_write_stats[type].nr);
2232 	atomic64_add(bytes_to_write, &c->btree_write_stats[type].bytes);
2233 
2234 	INIT_WORK(&wbio->work, btree_write_submit);
2235 	queue_work(c->btree_write_submit_wq, &wbio->work);
2236 	return;
2237 err:
2238 	set_btree_node_noevict(b);
2239 	b->written += sectors_to_write;
2240 nowrite:
2241 	btree_bounce_free(c, bytes, used_mempool, data);
2242 	__btree_node_write_done(c, b);
2243 }
2244 
2245 /*
2246  * Work that must be done with write lock held:
2247  */
2248 bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
2249 {
2250 	bool invalidated_iter = false;
2251 	struct btree_node_entry *bne;
2252 
2253 	if (!btree_node_just_written(b))
2254 		return false;
2255 
2256 	BUG_ON(b->whiteout_u64s);
2257 
2258 	clear_btree_node_just_written(b);
2259 
2260 	/*
2261 	 * Note: immediately after write, bset_written() doesn't work - the
2262 	 * amount of data we had to write after compaction might have been
2263 	 * smaller than the offset of the last bset.
2264 	 *
2265 	 * However, we know that all bsets have been written here, as long as
2266 	 * we're still holding the write lock:
2267 	 */
2268 
2269 	/*
2270 	 * XXX: decide if we really want to unconditionally sort down to a
2271 	 * single bset:
2272 	 */
2273 	if (b->nsets > 1) {
2274 		btree_node_sort(c, b, 0, b->nsets);
2275 		invalidated_iter = true;
2276 	} else {
2277 		invalidated_iter = bch2_drop_whiteouts(b, COMPACT_ALL);
2278 	}
2279 
2280 	for_each_bset(b, t)
2281 		set_needs_whiteout(bset(b, t), true);
2282 
2283 	bch2_btree_verify(c, b);
2284 
2285 	/*
2286 	 * If later we don't unconditionally sort down to a single bset, we have
2287 	 * to ensure this is still true:
2288 	 */
2289 	BUG_ON((void *) btree_bkey_last(b, bset_tree_last(b)) > write_block(b));
2290 
2291 	bne = want_new_bset(c, b);
2292 	if (bne)
2293 		bch2_bset_init_next(b, bne);
2294 
2295 	bch2_btree_build_aux_trees(b);
2296 
2297 	return invalidated_iter;
2298 }
2299 
2300 /*
2301  * Use this one if the node is intent locked:
2302  */
2303 void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
2304 			   enum six_lock_type lock_type_held,
2305 			   unsigned flags)
2306 {
2307 	if (lock_type_held == SIX_LOCK_intent ||
2308 	    (lock_type_held == SIX_LOCK_read &&
2309 	     six_lock_tryupgrade(&b->c.lock))) {
2310 		__bch2_btree_node_write(c, b, flags);
2311 
2312 		/* don't cycle lock unnecessarily: */
2313 		if (btree_node_just_written(b) &&
2314 		    six_trylock_write(&b->c.lock)) {
2315 			bch2_btree_post_write_cleanup(c, b);
2316 			six_unlock_write(&b->c.lock);
2317 		}
2318 
2319 		if (lock_type_held == SIX_LOCK_read)
2320 			six_lock_downgrade(&b->c.lock);
2321 	} else {
2322 		__bch2_btree_node_write(c, b, flags);
2323 		if (lock_type_held == SIX_LOCK_write &&
2324 		    btree_node_just_written(b))
2325 			bch2_btree_post_write_cleanup(c, b);
2326 	}
2327 }
2328 
2329 static bool __bch2_btree_flush_all(struct bch_fs *c, unsigned flag)
2330 {
2331 	struct bucket_table *tbl;
2332 	struct rhash_head *pos;
2333 	struct btree *b;
2334 	unsigned i;
2335 	bool ret = false;
2336 restart:
2337 	rcu_read_lock();
2338 	for_each_cached_btree(b, c, tbl, i, pos)
2339 		if (test_bit(flag, &b->flags)) {
2340 			rcu_read_unlock();
2341 			wait_on_bit_io(&b->flags, flag, TASK_UNINTERRUPTIBLE);
2342 			ret = true;
2343 			goto restart;
2344 		}
2345 	rcu_read_unlock();
2346 
2347 	return ret;
2348 }
2349 
2350 bool bch2_btree_flush_all_reads(struct bch_fs *c)
2351 {
2352 	return __bch2_btree_flush_all(c, BTREE_NODE_read_in_flight);
2353 }
2354 
2355 bool bch2_btree_flush_all_writes(struct bch_fs *c)
2356 {
2357 	return __bch2_btree_flush_all(c, BTREE_NODE_write_in_flight);
2358 }
2359 
2360 static const char * const bch2_btree_write_types[] = {
2361 #define x(t, n) [n] = #t,
2362 	BCH_BTREE_WRITE_TYPES()
2363 	NULL
2364 };
2365 
2366 void bch2_btree_write_stats_to_text(struct printbuf *out, struct bch_fs *c)
2367 {
2368 	printbuf_tabstop_push(out, 20);
2369 	printbuf_tabstop_push(out, 10);
2370 
2371 	prt_printf(out, "\tnr\tsize\n");
2372 
2373 	for (unsigned i = 0; i < BTREE_WRITE_TYPE_NR; i++) {
2374 		u64 nr		= atomic64_read(&c->btree_write_stats[i].nr);
2375 		u64 bytes	= atomic64_read(&c->btree_write_stats[i].bytes);
2376 
2377 		prt_printf(out, "%s:\t%llu\t", bch2_btree_write_types[i], nr);
2378 		prt_human_readable_u64(out, nr ? div64_u64(bytes, nr) : 0);
2379 		prt_newline(out);
2380 	}
2381 }
2382