xref: /linux/fs/bcachefs/btree_io.c (revision 8b8eed05a1c650c27e78bc47d07f7d6c9ba779e8)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "bcachefs.h"
4 #include "bkey_methods.h"
5 #include "bkey_sort.h"
6 #include "btree_cache.h"
7 #include "btree_io.h"
8 #include "btree_iter.h"
9 #include "btree_locking.h"
10 #include "btree_update.h"
11 #include "btree_update_interior.h"
12 #include "buckets.h"
13 #include "checksum.h"
14 #include "debug.h"
15 #include "error.h"
16 #include "extents.h"
17 #include "io_write.h"
18 #include "journal_reclaim.h"
19 #include "journal_seq_blacklist.h"
20 #include "recovery.h"
21 #include "super-io.h"
22 #include "trace.h"
23 
24 #include <linux/sched/mm.h>
25 
26 void bch2_btree_node_io_unlock(struct btree *b)
27 {
28 	EBUG_ON(!btree_node_write_in_flight(b));
29 
30 	clear_btree_node_write_in_flight_inner(b);
31 	clear_btree_node_write_in_flight(b);
32 	wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
33 }
34 
35 void bch2_btree_node_io_lock(struct btree *b)
36 {
37 	bch2_assert_btree_nodes_not_locked();
38 
39 	wait_on_bit_lock_io(&b->flags, BTREE_NODE_write_in_flight,
40 			    TASK_UNINTERRUPTIBLE);
41 }
42 
43 void __bch2_btree_node_wait_on_read(struct btree *b)
44 {
45 	wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
46 		       TASK_UNINTERRUPTIBLE);
47 }
48 
49 void __bch2_btree_node_wait_on_write(struct btree *b)
50 {
51 	wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
52 		       TASK_UNINTERRUPTIBLE);
53 }
54 
55 void bch2_btree_node_wait_on_read(struct btree *b)
56 {
57 	bch2_assert_btree_nodes_not_locked();
58 
59 	wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
60 		       TASK_UNINTERRUPTIBLE);
61 }
62 
63 void bch2_btree_node_wait_on_write(struct btree *b)
64 {
65 	bch2_assert_btree_nodes_not_locked();
66 
67 	wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
68 		       TASK_UNINTERRUPTIBLE);
69 }
70 
71 static void verify_no_dups(struct btree *b,
72 			   struct bkey_packed *start,
73 			   struct bkey_packed *end)
74 {
75 #ifdef CONFIG_BCACHEFS_DEBUG
76 	struct bkey_packed *k, *p;
77 
78 	if (start == end)
79 		return;
80 
81 	for (p = start, k = bkey_p_next(start);
82 	     k != end;
83 	     p = k, k = bkey_p_next(k)) {
84 		struct bkey l = bkey_unpack_key(b, p);
85 		struct bkey r = bkey_unpack_key(b, k);
86 
87 		BUG_ON(bpos_ge(l.p, bkey_start_pos(&r)));
88 	}
89 #endif
90 }
91 
92 static void set_needs_whiteout(struct bset *i, int v)
93 {
94 	struct bkey_packed *k;
95 
96 	for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
97 		k->needs_whiteout = v;
98 }
99 
100 static void btree_bounce_free(struct bch_fs *c, size_t size,
101 			      bool used_mempool, void *p)
102 {
103 	if (used_mempool)
104 		mempool_free(p, &c->btree_bounce_pool);
105 	else
106 		vpfree(p, size);
107 }
108 
109 static void *btree_bounce_alloc(struct bch_fs *c, size_t size,
110 				bool *used_mempool)
111 {
112 	unsigned flags = memalloc_nofs_save();
113 	void *p;
114 
115 	BUG_ON(size > btree_bytes(c));
116 
117 	*used_mempool = false;
118 	p = vpmalloc(size, __GFP_NOWARN|GFP_NOWAIT);
119 	if (!p) {
120 		*used_mempool = true;
121 		p = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
122 	}
123 	memalloc_nofs_restore(flags);
124 	return p;
125 }
126 
127 static void sort_bkey_ptrs(const struct btree *bt,
128 			   struct bkey_packed **ptrs, unsigned nr)
129 {
130 	unsigned n = nr, a = nr / 2, b, c, d;
131 
132 	if (!a)
133 		return;
134 
135 	/* Heap sort: see lib/sort.c: */
136 	while (1) {
137 		if (a)
138 			a--;
139 		else if (--n)
140 			swap(ptrs[0], ptrs[n]);
141 		else
142 			break;
143 
144 		for (b = a; c = 2 * b + 1, (d = c + 1) < n;)
145 			b = bch2_bkey_cmp_packed(bt,
146 					    ptrs[c],
147 					    ptrs[d]) >= 0 ? c : d;
148 		if (d == n)
149 			b = c;
150 
151 		while (b != a &&
152 		       bch2_bkey_cmp_packed(bt,
153 				       ptrs[a],
154 				       ptrs[b]) >= 0)
155 			b = (b - 1) / 2;
156 		c = b;
157 		while (b != a) {
158 			b = (b - 1) / 2;
159 			swap(ptrs[b], ptrs[c]);
160 		}
161 	}
162 }
163 
164 static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b)
165 {
166 	struct bkey_packed *new_whiteouts, **ptrs, **ptrs_end, *k;
167 	bool used_mempool = false;
168 	size_t bytes = b->whiteout_u64s * sizeof(u64);
169 
170 	if (!b->whiteout_u64s)
171 		return;
172 
173 	new_whiteouts = btree_bounce_alloc(c, bytes, &used_mempool);
174 
175 	ptrs = ptrs_end = ((void *) new_whiteouts + bytes);
176 
177 	for (k = unwritten_whiteouts_start(c, b);
178 	     k != unwritten_whiteouts_end(c, b);
179 	     k = bkey_p_next(k))
180 		*--ptrs = k;
181 
182 	sort_bkey_ptrs(b, ptrs, ptrs_end - ptrs);
183 
184 	k = new_whiteouts;
185 
186 	while (ptrs != ptrs_end) {
187 		bkey_p_copy(k, *ptrs);
188 		k = bkey_p_next(k);
189 		ptrs++;
190 	}
191 
192 	verify_no_dups(b, new_whiteouts,
193 		       (void *) ((u64 *) new_whiteouts + b->whiteout_u64s));
194 
195 	memcpy_u64s(unwritten_whiteouts_start(c, b),
196 		    new_whiteouts, b->whiteout_u64s);
197 
198 	btree_bounce_free(c, bytes, used_mempool, new_whiteouts);
199 }
200 
201 static bool should_compact_bset(struct btree *b, struct bset_tree *t,
202 				bool compacting, enum compact_mode mode)
203 {
204 	if (!bset_dead_u64s(b, t))
205 		return false;
206 
207 	switch (mode) {
208 	case COMPACT_LAZY:
209 		return should_compact_bset_lazy(b, t) ||
210 			(compacting && !bset_written(b, bset(b, t)));
211 	case COMPACT_ALL:
212 		return true;
213 	default:
214 		BUG();
215 	}
216 }
217 
218 static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode)
219 {
220 	struct bset_tree *t;
221 	bool ret = false;
222 
223 	for_each_bset(b, t) {
224 		struct bset *i = bset(b, t);
225 		struct bkey_packed *k, *n, *out, *start, *end;
226 		struct btree_node_entry *src = NULL, *dst = NULL;
227 
228 		if (t != b->set && !bset_written(b, i)) {
229 			src = container_of(i, struct btree_node_entry, keys);
230 			dst = max(write_block(b),
231 				  (void *) btree_bkey_last(b, t - 1));
232 		}
233 
234 		if (src != dst)
235 			ret = true;
236 
237 		if (!should_compact_bset(b, t, ret, mode)) {
238 			if (src != dst) {
239 				memmove(dst, src, sizeof(*src) +
240 					le16_to_cpu(src->keys.u64s) *
241 					sizeof(u64));
242 				i = &dst->keys;
243 				set_btree_bset(b, t, i);
244 			}
245 			continue;
246 		}
247 
248 		start	= btree_bkey_first(b, t);
249 		end	= btree_bkey_last(b, t);
250 
251 		if (src != dst) {
252 			memmove(dst, src, sizeof(*src));
253 			i = &dst->keys;
254 			set_btree_bset(b, t, i);
255 		}
256 
257 		out = i->start;
258 
259 		for (k = start; k != end; k = n) {
260 			n = bkey_p_next(k);
261 
262 			if (!bkey_deleted(k)) {
263 				bkey_p_copy(out, k);
264 				out = bkey_p_next(out);
265 			} else {
266 				BUG_ON(k->needs_whiteout);
267 			}
268 		}
269 
270 		i->u64s = cpu_to_le16((u64 *) out - i->_data);
271 		set_btree_bset_end(b, t);
272 		bch2_bset_set_no_aux_tree(b, t);
273 		ret = true;
274 	}
275 
276 	bch2_verify_btree_nr_keys(b);
277 
278 	bch2_btree_build_aux_trees(b);
279 
280 	return ret;
281 }
282 
283 bool bch2_compact_whiteouts(struct bch_fs *c, struct btree *b,
284 			    enum compact_mode mode)
285 {
286 	return bch2_drop_whiteouts(b, mode);
287 }
288 
289 static void btree_node_sort(struct bch_fs *c, struct btree *b,
290 			    unsigned start_idx,
291 			    unsigned end_idx,
292 			    bool filter_whiteouts)
293 {
294 	struct btree_node *out;
295 	struct sort_iter_stack sort_iter;
296 	struct bset_tree *t;
297 	struct bset *start_bset = bset(b, &b->set[start_idx]);
298 	bool used_mempool = false;
299 	u64 start_time, seq = 0;
300 	unsigned i, u64s = 0, bytes, shift = end_idx - start_idx - 1;
301 	bool sorting_entire_node = start_idx == 0 &&
302 		end_idx == b->nsets;
303 
304 	sort_iter_stack_init(&sort_iter, b);
305 
306 	for (t = b->set + start_idx;
307 	     t < b->set + end_idx;
308 	     t++) {
309 		u64s += le16_to_cpu(bset(b, t)->u64s);
310 		sort_iter_add(&sort_iter.iter,
311 			      btree_bkey_first(b, t),
312 			      btree_bkey_last(b, t));
313 	}
314 
315 	bytes = sorting_entire_node
316 		? btree_bytes(c)
317 		: __vstruct_bytes(struct btree_node, u64s);
318 
319 	out = btree_bounce_alloc(c, bytes, &used_mempool);
320 
321 	start_time = local_clock();
322 
323 	u64s = bch2_sort_keys(out->keys.start, &sort_iter.iter, filter_whiteouts);
324 
325 	out->keys.u64s = cpu_to_le16(u64s);
326 
327 	BUG_ON(vstruct_end(&out->keys) > (void *) out + bytes);
328 
329 	if (sorting_entire_node)
330 		bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
331 				       start_time);
332 
333 	/* Make sure we preserve bset journal_seq: */
334 	for (t = b->set + start_idx; t < b->set + end_idx; t++)
335 		seq = max(seq, le64_to_cpu(bset(b, t)->journal_seq));
336 	start_bset->journal_seq = cpu_to_le64(seq);
337 
338 	if (sorting_entire_node) {
339 		u64s = le16_to_cpu(out->keys.u64s);
340 
341 		BUG_ON(bytes != btree_bytes(c));
342 
343 		/*
344 		 * Our temporary buffer is the same size as the btree node's
345 		 * buffer, we can just swap buffers instead of doing a big
346 		 * memcpy()
347 		 */
348 		*out = *b->data;
349 		out->keys.u64s = cpu_to_le16(u64s);
350 		swap(out, b->data);
351 		set_btree_bset(b, b->set, &b->data->keys);
352 	} else {
353 		start_bset->u64s = out->keys.u64s;
354 		memcpy_u64s(start_bset->start,
355 			    out->keys.start,
356 			    le16_to_cpu(out->keys.u64s));
357 	}
358 
359 	for (i = start_idx + 1; i < end_idx; i++)
360 		b->nr.bset_u64s[start_idx] +=
361 			b->nr.bset_u64s[i];
362 
363 	b->nsets -= shift;
364 
365 	for (i = start_idx + 1; i < b->nsets; i++) {
366 		b->nr.bset_u64s[i]	= b->nr.bset_u64s[i + shift];
367 		b->set[i]		= b->set[i + shift];
368 	}
369 
370 	for (i = b->nsets; i < MAX_BSETS; i++)
371 		b->nr.bset_u64s[i] = 0;
372 
373 	set_btree_bset_end(b, &b->set[start_idx]);
374 	bch2_bset_set_no_aux_tree(b, &b->set[start_idx]);
375 
376 	btree_bounce_free(c, bytes, used_mempool, out);
377 
378 	bch2_verify_btree_nr_keys(b);
379 }
380 
381 void bch2_btree_sort_into(struct bch_fs *c,
382 			 struct btree *dst,
383 			 struct btree *src)
384 {
385 	struct btree_nr_keys nr;
386 	struct btree_node_iter src_iter;
387 	u64 start_time = local_clock();
388 
389 	BUG_ON(dst->nsets != 1);
390 
391 	bch2_bset_set_no_aux_tree(dst, dst->set);
392 
393 	bch2_btree_node_iter_init_from_start(&src_iter, src);
394 
395 	nr = bch2_sort_repack(btree_bset_first(dst),
396 			src, &src_iter,
397 			&dst->format,
398 			true);
399 
400 	bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
401 			       start_time);
402 
403 	set_btree_bset_end(dst, dst->set);
404 
405 	dst->nr.live_u64s	+= nr.live_u64s;
406 	dst->nr.bset_u64s[0]	+= nr.bset_u64s[0];
407 	dst->nr.packed_keys	+= nr.packed_keys;
408 	dst->nr.unpacked_keys	+= nr.unpacked_keys;
409 
410 	bch2_verify_btree_nr_keys(dst);
411 }
412 
413 /*
414  * We're about to add another bset to the btree node, so if there's currently
415  * too many bsets - sort some of them together:
416  */
417 static bool btree_node_compact(struct bch_fs *c, struct btree *b)
418 {
419 	unsigned unwritten_idx;
420 	bool ret = false;
421 
422 	for (unwritten_idx = 0;
423 	     unwritten_idx < b->nsets;
424 	     unwritten_idx++)
425 		if (!bset_written(b, bset(b, &b->set[unwritten_idx])))
426 			break;
427 
428 	if (b->nsets - unwritten_idx > 1) {
429 		btree_node_sort(c, b, unwritten_idx,
430 				b->nsets, false);
431 		ret = true;
432 	}
433 
434 	if (unwritten_idx > 1) {
435 		btree_node_sort(c, b, 0, unwritten_idx, false);
436 		ret = true;
437 	}
438 
439 	return ret;
440 }
441 
442 void bch2_btree_build_aux_trees(struct btree *b)
443 {
444 	struct bset_tree *t;
445 
446 	for_each_bset(b, t)
447 		bch2_bset_build_aux_tree(b, t,
448 				!bset_written(b, bset(b, t)) &&
449 				t == bset_tree_last(b));
450 }
451 
452 /*
453  * If we have MAX_BSETS (3) bsets, should we sort them all down to just one?
454  *
455  * The first bset is going to be of similar order to the size of the node, the
456  * last bset is bounded by btree_write_set_buffer(), which is set to keep the
457  * memmove on insert from being too expensive: the middle bset should, ideally,
458  * be the geometric mean of the first and the last.
459  *
460  * Returns true if the middle bset is greater than that geometric mean:
461  */
462 static inline bool should_compact_all(struct bch_fs *c, struct btree *b)
463 {
464 	unsigned mid_u64s_bits =
465 		(ilog2(btree_max_u64s(c)) + BTREE_WRITE_SET_U64s_BITS) / 2;
466 
467 	return bset_u64s(&b->set[1]) > 1U << mid_u64s_bits;
468 }
469 
470 /*
471  * @bch_btree_init_next - initialize a new (unwritten) bset that can then be
472  * inserted into
473  *
474  * Safe to call if there already is an unwritten bset - will only add a new bset
475  * if @b doesn't already have one.
476  *
477  * Returns true if we sorted (i.e. invalidated iterators
478  */
479 void bch2_btree_init_next(struct btree_trans *trans, struct btree *b)
480 {
481 	struct bch_fs *c = trans->c;
482 	struct btree_node_entry *bne;
483 	bool reinit_iter = false;
484 
485 	EBUG_ON(!six_lock_counts(&b->c.lock).n[SIX_LOCK_write]);
486 	BUG_ON(bset_written(b, bset(b, &b->set[1])));
487 	BUG_ON(btree_node_just_written(b));
488 
489 	if (b->nsets == MAX_BSETS &&
490 	    !btree_node_write_in_flight(b) &&
491 	    should_compact_all(c, b)) {
492 		bch2_btree_node_write(c, b, SIX_LOCK_write,
493 				      BTREE_WRITE_init_next_bset);
494 		reinit_iter = true;
495 	}
496 
497 	if (b->nsets == MAX_BSETS &&
498 	    btree_node_compact(c, b))
499 		reinit_iter = true;
500 
501 	BUG_ON(b->nsets >= MAX_BSETS);
502 
503 	bne = want_new_bset(c, b);
504 	if (bne)
505 		bch2_bset_init_next(c, b, bne);
506 
507 	bch2_btree_build_aux_trees(b);
508 
509 	if (reinit_iter)
510 		bch2_trans_node_reinit_iter(trans, b);
511 }
512 
513 static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
514 			  struct bch_dev *ca,
515 			  struct btree *b, struct bset *i,
516 			  unsigned offset, int write)
517 {
518 	prt_printf(out, bch2_log_msg(c, "%s"),
519 		   write == READ
520 		   ? "error validating btree node "
521 		   : "corrupt btree node before write ");
522 	if (ca)
523 		prt_printf(out, "on %s ", ca->name);
524 	prt_printf(out, "at btree ");
525 	bch2_btree_pos_to_text(out, c, b);
526 
527 	prt_printf(out, "\n  node offset %u", b->written);
528 	if (i)
529 		prt_printf(out, " bset u64s %u", le16_to_cpu(i->u64s));
530 	prt_str(out, ": ");
531 }
532 
533 __printf(9, 10)
534 static int __btree_err(int ret,
535 		       struct bch_fs *c,
536 		       struct bch_dev *ca,
537 		       struct btree *b,
538 		       struct bset *i,
539 		       int write,
540 		       bool have_retry,
541 		       enum bch_sb_error_id err_type,
542 		       const char *fmt, ...)
543 {
544 	struct printbuf out = PRINTBUF;
545 	va_list args;
546 
547 	btree_err_msg(&out, c, ca, b, i, b->written, write);
548 
549 	va_start(args, fmt);
550 	prt_vprintf(&out, fmt, args);
551 	va_end(args);
552 
553 	if (write == WRITE) {
554 		bch2_print_string_as_lines(KERN_ERR, out.buf);
555 		ret = c->opts.errors == BCH_ON_ERROR_continue
556 			? 0
557 			: -BCH_ERR_fsck_errors_not_fixed;
558 		goto out;
559 	}
560 
561 	if (!have_retry && ret == -BCH_ERR_btree_node_read_err_want_retry)
562 		ret = -BCH_ERR_btree_node_read_err_fixable;
563 	if (!have_retry && ret == -BCH_ERR_btree_node_read_err_must_retry)
564 		ret = -BCH_ERR_btree_node_read_err_bad_node;
565 
566 	if (ret != -BCH_ERR_btree_node_read_err_fixable)
567 		bch2_sb_error_count(c, err_type);
568 
569 	switch (ret) {
570 	case -BCH_ERR_btree_node_read_err_fixable:
571 		ret = bch2_fsck_err(c, FSCK_CAN_FIX, err_type, "%s", out.buf);
572 		if (ret != -BCH_ERR_fsck_fix &&
573 		    ret != -BCH_ERR_fsck_ignore)
574 			goto fsck_err;
575 		ret = -BCH_ERR_fsck_fix;
576 		break;
577 	case -BCH_ERR_btree_node_read_err_want_retry:
578 	case -BCH_ERR_btree_node_read_err_must_retry:
579 		bch2_print_string_as_lines(KERN_ERR, out.buf);
580 		break;
581 	case -BCH_ERR_btree_node_read_err_bad_node:
582 		bch2_print_string_as_lines(KERN_ERR, out.buf);
583 		bch2_topology_error(c);
584 		ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology) ?: -EIO;
585 		break;
586 	case -BCH_ERR_btree_node_read_err_incompatible:
587 		bch2_print_string_as_lines(KERN_ERR, out.buf);
588 		ret = -BCH_ERR_fsck_errors_not_fixed;
589 		break;
590 	default:
591 		BUG();
592 	}
593 out:
594 fsck_err:
595 	printbuf_exit(&out);
596 	return ret;
597 }
598 
599 #define btree_err(type, c, ca, b, i, _err_type, msg, ...)		\
600 ({									\
601 	int _ret = __btree_err(type, c, ca, b, i, write, have_retry,	\
602 			       BCH_FSCK_ERR_##_err_type,		\
603 			       msg, ##__VA_ARGS__);			\
604 									\
605 	if (_ret != -BCH_ERR_fsck_fix) {				\
606 		ret = _ret;						\
607 		goto fsck_err;						\
608 	}								\
609 									\
610 	*saw_error = true;						\
611 })
612 
613 #define btree_err_on(cond, ...)	((cond) ? btree_err(__VA_ARGS__) : false)
614 
615 /*
616  * When btree topology repair changes the start or end of a node, that might
617  * mean we have to drop keys that are no longer inside the node:
618  */
619 __cold
620 void bch2_btree_node_drop_keys_outside_node(struct btree *b)
621 {
622 	struct bset_tree *t;
623 
624 	for_each_bset(b, t) {
625 		struct bset *i = bset(b, t);
626 		struct bkey_packed *k;
627 
628 		for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
629 			if (bkey_cmp_left_packed(b, k, &b->data->min_key) >= 0)
630 				break;
631 
632 		if (k != i->start) {
633 			unsigned shift = (u64 *) k - (u64 *) i->start;
634 
635 			memmove_u64s_down(i->start, k,
636 					  (u64 *) vstruct_end(i) - (u64 *) k);
637 			i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - shift);
638 			set_btree_bset_end(b, t);
639 		}
640 
641 		for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
642 			if (bkey_cmp_left_packed(b, k, &b->data->max_key) > 0)
643 				break;
644 
645 		if (k != vstruct_last(i)) {
646 			i->u64s = cpu_to_le16((u64 *) k - (u64 *) i->start);
647 			set_btree_bset_end(b, t);
648 		}
649 	}
650 
651 	/*
652 	 * Always rebuild search trees: eytzinger search tree nodes directly
653 	 * depend on the values of min/max key:
654 	 */
655 	bch2_bset_set_no_aux_tree(b, b->set);
656 	bch2_btree_build_aux_trees(b);
657 
658 	struct bkey_s_c k;
659 	struct bkey unpacked;
660 	struct btree_node_iter iter;
661 	for_each_btree_node_key_unpack(b, k, &iter, &unpacked) {
662 		BUG_ON(bpos_lt(k.k->p, b->data->min_key));
663 		BUG_ON(bpos_gt(k.k->p, b->data->max_key));
664 	}
665 }
666 
667 static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
668 			 struct btree *b, struct bset *i,
669 			 unsigned offset, unsigned sectors,
670 			 int write, bool have_retry, bool *saw_error)
671 {
672 	unsigned version = le16_to_cpu(i->version);
673 	struct printbuf buf1 = PRINTBUF;
674 	struct printbuf buf2 = PRINTBUF;
675 	int ret = 0;
676 
677 	btree_err_on(!bch2_version_compatible(version),
678 		     -BCH_ERR_btree_node_read_err_incompatible,
679 		     c, ca, b, i,
680 		     btree_node_unsupported_version,
681 		     "unsupported bset version %u.%u",
682 		     BCH_VERSION_MAJOR(version),
683 		     BCH_VERSION_MINOR(version));
684 
685 	if (btree_err_on(version < c->sb.version_min,
686 			 -BCH_ERR_btree_node_read_err_fixable,
687 			 c, NULL, b, i,
688 			 btree_node_bset_older_than_sb_min,
689 			 "bset version %u older than superblock version_min %u",
690 			 version, c->sb.version_min)) {
691 		mutex_lock(&c->sb_lock);
692 		c->disk_sb.sb->version_min = cpu_to_le16(version);
693 		bch2_write_super(c);
694 		mutex_unlock(&c->sb_lock);
695 	}
696 
697 	if (btree_err_on(BCH_VERSION_MAJOR(version) >
698 			 BCH_VERSION_MAJOR(c->sb.version),
699 			 -BCH_ERR_btree_node_read_err_fixable,
700 			 c, NULL, b, i,
701 			 btree_node_bset_newer_than_sb,
702 			 "bset version %u newer than superblock version %u",
703 			 version, c->sb.version)) {
704 		mutex_lock(&c->sb_lock);
705 		c->disk_sb.sb->version = cpu_to_le16(version);
706 		bch2_write_super(c);
707 		mutex_unlock(&c->sb_lock);
708 	}
709 
710 	btree_err_on(BSET_SEPARATE_WHITEOUTS(i),
711 		     -BCH_ERR_btree_node_read_err_incompatible,
712 		     c, ca, b, i,
713 		     btree_node_unsupported_version,
714 		     "BSET_SEPARATE_WHITEOUTS no longer supported");
715 
716 	if (btree_err_on(offset + sectors > btree_sectors(c),
717 			 -BCH_ERR_btree_node_read_err_fixable,
718 			 c, ca, b, i,
719 			 bset_past_end_of_btree_node,
720 			 "bset past end of btree node")) {
721 		i->u64s = 0;
722 		ret = 0;
723 		goto out;
724 	}
725 
726 	btree_err_on(offset && !i->u64s,
727 		     -BCH_ERR_btree_node_read_err_fixable,
728 		     c, ca, b, i,
729 		     bset_empty,
730 		     "empty bset");
731 
732 	btree_err_on(BSET_OFFSET(i) && BSET_OFFSET(i) != offset,
733 		     -BCH_ERR_btree_node_read_err_want_retry,
734 		     c, ca, b, i,
735 		     bset_wrong_sector_offset,
736 		     "bset at wrong sector offset");
737 
738 	if (!offset) {
739 		struct btree_node *bn =
740 			container_of(i, struct btree_node, keys);
741 		/* These indicate that we read the wrong btree node: */
742 
743 		if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
744 			struct bch_btree_ptr_v2 *bp =
745 				&bkey_i_to_btree_ptr_v2(&b->key)->v;
746 
747 			/* XXX endianness */
748 			btree_err_on(bp->seq != bn->keys.seq,
749 				     -BCH_ERR_btree_node_read_err_must_retry,
750 				     c, ca, b, NULL,
751 				     bset_bad_seq,
752 				     "incorrect sequence number (wrong btree node)");
753 		}
754 
755 		btree_err_on(BTREE_NODE_ID(bn) != b->c.btree_id,
756 			     -BCH_ERR_btree_node_read_err_must_retry,
757 			     c, ca, b, i,
758 			     btree_node_bad_btree,
759 			     "incorrect btree id");
760 
761 		btree_err_on(BTREE_NODE_LEVEL(bn) != b->c.level,
762 			     -BCH_ERR_btree_node_read_err_must_retry,
763 			     c, ca, b, i,
764 			     btree_node_bad_level,
765 			     "incorrect level");
766 
767 		if (!write)
768 			compat_btree_node(b->c.level, b->c.btree_id, version,
769 					  BSET_BIG_ENDIAN(i), write, bn);
770 
771 		if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
772 			struct bch_btree_ptr_v2 *bp =
773 				&bkey_i_to_btree_ptr_v2(&b->key)->v;
774 
775 			if (BTREE_PTR_RANGE_UPDATED(bp)) {
776 				b->data->min_key = bp->min_key;
777 				b->data->max_key = b->key.k.p;
778 			}
779 
780 			btree_err_on(!bpos_eq(b->data->min_key, bp->min_key),
781 				     -BCH_ERR_btree_node_read_err_must_retry,
782 				     c, ca, b, NULL,
783 				     btree_node_bad_min_key,
784 				     "incorrect min_key: got %s should be %s",
785 				     (printbuf_reset(&buf1),
786 				      bch2_bpos_to_text(&buf1, bn->min_key), buf1.buf),
787 				     (printbuf_reset(&buf2),
788 				      bch2_bpos_to_text(&buf2, bp->min_key), buf2.buf));
789 		}
790 
791 		btree_err_on(!bpos_eq(bn->max_key, b->key.k.p),
792 			     -BCH_ERR_btree_node_read_err_must_retry,
793 			     c, ca, b, i,
794 			     btree_node_bad_max_key,
795 			     "incorrect max key %s",
796 			     (printbuf_reset(&buf1),
797 			      bch2_bpos_to_text(&buf1, bn->max_key), buf1.buf));
798 
799 		if (write)
800 			compat_btree_node(b->c.level, b->c.btree_id, version,
801 					  BSET_BIG_ENDIAN(i), write, bn);
802 
803 		btree_err_on(bch2_bkey_format_invalid(c, &bn->format, write, &buf1),
804 			     -BCH_ERR_btree_node_read_err_bad_node,
805 			     c, ca, b, i,
806 			     btree_node_bad_format,
807 			     "invalid bkey format: %s\n  %s", buf1.buf,
808 			     (printbuf_reset(&buf2),
809 			      bch2_bkey_format_to_text(&buf2, &bn->format), buf2.buf));
810 		printbuf_reset(&buf1);
811 
812 		compat_bformat(b->c.level, b->c.btree_id, version,
813 			       BSET_BIG_ENDIAN(i), write,
814 			       &bn->format);
815 	}
816 out:
817 fsck_err:
818 	printbuf_exit(&buf2);
819 	printbuf_exit(&buf1);
820 	return ret;
821 }
822 
823 static int bset_key_invalid(struct bch_fs *c, struct btree *b,
824 			    struct bkey_s_c k,
825 			    bool updated_range, int rw,
826 			    struct printbuf *err)
827 {
828 	return __bch2_bkey_invalid(c, k, btree_node_type(b), READ, err) ?:
829 		(!updated_range ? bch2_bkey_in_btree_node(c, b, k, err) : 0) ?:
830 		(rw == WRITE ? bch2_bkey_val_invalid(c, k, READ, err) : 0);
831 }
832 
833 static int validate_bset_keys(struct bch_fs *c, struct btree *b,
834 			 struct bset *i, int write,
835 			 bool have_retry, bool *saw_error)
836 {
837 	unsigned version = le16_to_cpu(i->version);
838 	struct bkey_packed *k, *prev = NULL;
839 	struct printbuf buf = PRINTBUF;
840 	bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
841 		BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
842 	int ret = 0;
843 
844 	for (k = i->start;
845 	     k != vstruct_last(i);) {
846 		struct bkey_s u;
847 		struct bkey tmp;
848 
849 		if (btree_err_on(bkey_p_next(k) > vstruct_last(i),
850 				 -BCH_ERR_btree_node_read_err_fixable,
851 				 c, NULL, b, i,
852 				 btree_node_bkey_past_bset_end,
853 				 "key extends past end of bset")) {
854 			i->u64s = cpu_to_le16((u64 *) k - i->_data);
855 			break;
856 		}
857 
858 		if (btree_err_on(k->format > KEY_FORMAT_CURRENT,
859 				 -BCH_ERR_btree_node_read_err_fixable,
860 				 c, NULL, b, i,
861 				 btree_node_bkey_bad_format,
862 				 "invalid bkey format %u", k->format)) {
863 			i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
864 			memmove_u64s_down(k, bkey_p_next(k),
865 					  (u64 *) vstruct_end(i) - (u64 *) k);
866 			continue;
867 		}
868 
869 		/* XXX: validate k->u64s */
870 		if (!write)
871 			bch2_bkey_compat(b->c.level, b->c.btree_id, version,
872 				    BSET_BIG_ENDIAN(i), write,
873 				    &b->format, k);
874 
875 		u = __bkey_disassemble(b, k, &tmp);
876 
877 		printbuf_reset(&buf);
878 		if (bset_key_invalid(c, b, u.s_c, updated_range, write, &buf)) {
879 			printbuf_reset(&buf);
880 			bset_key_invalid(c, b, u.s_c, updated_range, write, &buf);
881 			prt_printf(&buf, "\n  ");
882 			bch2_bkey_val_to_text(&buf, c, u.s_c);
883 
884 			btree_err(-BCH_ERR_btree_node_read_err_fixable,
885 				  c, NULL, b, i,
886 				  btree_node_bad_bkey,
887 				  "invalid bkey: %s", buf.buf);
888 
889 			i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
890 			memmove_u64s_down(k, bkey_p_next(k),
891 					  (u64 *) vstruct_end(i) - (u64 *) k);
892 			continue;
893 		}
894 
895 		if (write)
896 			bch2_bkey_compat(b->c.level, b->c.btree_id, version,
897 				    BSET_BIG_ENDIAN(i), write,
898 				    &b->format, k);
899 
900 		if (prev && bkey_iter_cmp(b, prev, k) > 0) {
901 			struct bkey up = bkey_unpack_key(b, prev);
902 
903 			printbuf_reset(&buf);
904 			prt_printf(&buf, "keys out of order: ");
905 			bch2_bkey_to_text(&buf, &up);
906 			prt_printf(&buf, " > ");
907 			bch2_bkey_to_text(&buf, u.k);
908 
909 			bch2_dump_bset(c, b, i, 0);
910 
911 			if (btree_err(-BCH_ERR_btree_node_read_err_fixable,
912 				      c, NULL, b, i,
913 				      btree_node_bkey_out_of_order,
914 				      "%s", buf.buf)) {
915 				i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
916 				memmove_u64s_down(k, bkey_p_next(k),
917 						  (u64 *) vstruct_end(i) - (u64 *) k);
918 				continue;
919 			}
920 		}
921 
922 		prev = k;
923 		k = bkey_p_next(k);
924 	}
925 fsck_err:
926 	printbuf_exit(&buf);
927 	return ret;
928 }
929 
930 int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
931 			      struct btree *b, bool have_retry, bool *saw_error)
932 {
933 	struct btree_node_entry *bne;
934 	struct sort_iter *iter;
935 	struct btree_node *sorted;
936 	struct bkey_packed *k;
937 	struct bch_extent_ptr *ptr;
938 	struct bset *i;
939 	bool used_mempool, blacklisted;
940 	bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
941 		BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
942 	unsigned u64s;
943 	unsigned ptr_written = btree_ptr_sectors_written(&b->key);
944 	struct printbuf buf = PRINTBUF;
945 	int ret = 0, retry_read = 0, write = READ;
946 
947 	b->version_ondisk = U16_MAX;
948 	/* We might get called multiple times on read retry: */
949 	b->written = 0;
950 
951 	iter = mempool_alloc(&c->fill_iter, GFP_NOFS);
952 	sort_iter_init(iter, b, (btree_blocks(c) + 1) * 2);
953 
954 	if (bch2_meta_read_fault("btree"))
955 		btree_err(-BCH_ERR_btree_node_read_err_must_retry,
956 			  c, ca, b, NULL,
957 			  btree_node_fault_injected,
958 			  "dynamic fault");
959 
960 	btree_err_on(le64_to_cpu(b->data->magic) != bset_magic(c),
961 		     -BCH_ERR_btree_node_read_err_must_retry,
962 		     c, ca, b, NULL,
963 		     btree_node_bad_magic,
964 		     "bad magic: want %llx, got %llx",
965 		     bset_magic(c), le64_to_cpu(b->data->magic));
966 
967 	if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
968 		struct bch_btree_ptr_v2 *bp =
969 			&bkey_i_to_btree_ptr_v2(&b->key)->v;
970 
971 		btree_err_on(b->data->keys.seq != bp->seq,
972 			     -BCH_ERR_btree_node_read_err_must_retry,
973 			     c, ca, b, NULL,
974 			     btree_node_bad_seq,
975 			     "got wrong btree node (seq %llx want %llx)",
976 			     b->data->keys.seq, bp->seq);
977 	} else {
978 		btree_err_on(!b->data->keys.seq,
979 			     -BCH_ERR_btree_node_read_err_must_retry,
980 			     c, ca, b, NULL,
981 			     btree_node_bad_seq,
982 			     "bad btree header: seq 0");
983 	}
984 
985 	while (b->written < (ptr_written ?: btree_sectors(c))) {
986 		unsigned sectors;
987 		struct nonce nonce;
988 		bool first = !b->written;
989 		bool csum_bad;
990 
991 		if (!b->written) {
992 			i = &b->data->keys;
993 
994 			btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
995 				     -BCH_ERR_btree_node_read_err_want_retry,
996 				     c, ca, b, i,
997 				     bset_unknown_csum,
998 				     "unknown checksum type %llu", BSET_CSUM_TYPE(i));
999 
1000 			nonce = btree_nonce(i, b->written << 9);
1001 
1002 			csum_bad = bch2_crc_cmp(b->data->csum,
1003 				csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data));
1004 			if (csum_bad)
1005 				bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
1006 
1007 			btree_err_on(csum_bad,
1008 				     -BCH_ERR_btree_node_read_err_want_retry,
1009 				     c, ca, b, i,
1010 				     bset_bad_csum,
1011 				     "invalid checksum");
1012 
1013 			ret = bset_encrypt(c, i, b->written << 9);
1014 			if (bch2_fs_fatal_err_on(ret, c,
1015 					"error decrypting btree node: %i", ret))
1016 				goto fsck_err;
1017 
1018 			btree_err_on(btree_node_type_is_extents(btree_node_type(b)) &&
1019 				     !BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data),
1020 				     -BCH_ERR_btree_node_read_err_incompatible,
1021 				     c, NULL, b, NULL,
1022 				     btree_node_unsupported_version,
1023 				     "btree node does not have NEW_EXTENT_OVERWRITE set");
1024 
1025 			sectors = vstruct_sectors(b->data, c->block_bits);
1026 		} else {
1027 			bne = write_block(b);
1028 			i = &bne->keys;
1029 
1030 			if (i->seq != b->data->keys.seq)
1031 				break;
1032 
1033 			btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
1034 				     -BCH_ERR_btree_node_read_err_want_retry,
1035 				     c, ca, b, i,
1036 				     bset_unknown_csum,
1037 				     "unknown checksum type %llu", BSET_CSUM_TYPE(i));
1038 
1039 			nonce = btree_nonce(i, b->written << 9);
1040 			csum_bad = bch2_crc_cmp(bne->csum,
1041 				csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne));
1042 			if (csum_bad)
1043 				bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
1044 
1045 			btree_err_on(csum_bad,
1046 				     -BCH_ERR_btree_node_read_err_want_retry,
1047 				     c, ca, b, i,
1048 				     bset_bad_csum,
1049 				     "invalid checksum");
1050 
1051 			ret = bset_encrypt(c, i, b->written << 9);
1052 			if (bch2_fs_fatal_err_on(ret, c,
1053 					"error decrypting btree node: %i\n", ret))
1054 				goto fsck_err;
1055 
1056 			sectors = vstruct_sectors(bne, c->block_bits);
1057 		}
1058 
1059 		b->version_ondisk = min(b->version_ondisk,
1060 					le16_to_cpu(i->version));
1061 
1062 		ret = validate_bset(c, ca, b, i, b->written, sectors,
1063 				    READ, have_retry, saw_error);
1064 		if (ret)
1065 			goto fsck_err;
1066 
1067 		if (!b->written)
1068 			btree_node_set_format(b, b->data->format);
1069 
1070 		ret = validate_bset_keys(c, b, i, READ, have_retry, saw_error);
1071 		if (ret)
1072 			goto fsck_err;
1073 
1074 		SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
1075 
1076 		blacklisted = bch2_journal_seq_is_blacklisted(c,
1077 					le64_to_cpu(i->journal_seq),
1078 					true);
1079 
1080 		btree_err_on(blacklisted && first,
1081 			     -BCH_ERR_btree_node_read_err_fixable,
1082 			     c, ca, b, i,
1083 			     bset_blacklisted_journal_seq,
1084 			     "first btree node bset has blacklisted journal seq (%llu)",
1085 			     le64_to_cpu(i->journal_seq));
1086 
1087 		btree_err_on(blacklisted && ptr_written,
1088 			     -BCH_ERR_btree_node_read_err_fixable,
1089 			     c, ca, b, i,
1090 			     first_bset_blacklisted_journal_seq,
1091 			     "found blacklisted bset (journal seq %llu) in btree node at offset %u-%u/%u",
1092 			     le64_to_cpu(i->journal_seq),
1093 			     b->written, b->written + sectors, ptr_written);
1094 
1095 		b->written += sectors;
1096 
1097 		if (blacklisted && !first)
1098 			continue;
1099 
1100 		sort_iter_add(iter,
1101 			      vstruct_idx(i, 0),
1102 			      vstruct_last(i));
1103 	}
1104 
1105 	if (ptr_written) {
1106 		btree_err_on(b->written < ptr_written,
1107 			     -BCH_ERR_btree_node_read_err_want_retry,
1108 			     c, ca, b, NULL,
1109 			     btree_node_data_missing,
1110 			     "btree node data missing: expected %u sectors, found %u",
1111 			     ptr_written, b->written);
1112 	} else {
1113 		for (bne = write_block(b);
1114 		     bset_byte_offset(b, bne) < btree_bytes(c);
1115 		     bne = (void *) bne + block_bytes(c))
1116 			btree_err_on(bne->keys.seq == b->data->keys.seq &&
1117 				     !bch2_journal_seq_is_blacklisted(c,
1118 								      le64_to_cpu(bne->keys.journal_seq),
1119 								      true),
1120 				     -BCH_ERR_btree_node_read_err_want_retry,
1121 				     c, ca, b, NULL,
1122 				     btree_node_bset_after_end,
1123 				     "found bset signature after last bset");
1124 	}
1125 
1126 	sorted = btree_bounce_alloc(c, btree_bytes(c), &used_mempool);
1127 	sorted->keys.u64s = 0;
1128 
1129 	set_btree_bset(b, b->set, &b->data->keys);
1130 
1131 	b->nr = bch2_key_sort_fix_overlapping(c, &sorted->keys, iter);
1132 
1133 	u64s = le16_to_cpu(sorted->keys.u64s);
1134 	*sorted = *b->data;
1135 	sorted->keys.u64s = cpu_to_le16(u64s);
1136 	swap(sorted, b->data);
1137 	set_btree_bset(b, b->set, &b->data->keys);
1138 	b->nsets = 1;
1139 
1140 	BUG_ON(b->nr.live_u64s != u64s);
1141 
1142 	btree_bounce_free(c, btree_bytes(c), used_mempool, sorted);
1143 
1144 	if (updated_range)
1145 		bch2_btree_node_drop_keys_outside_node(b);
1146 
1147 	i = &b->data->keys;
1148 	for (k = i->start; k != vstruct_last(i);) {
1149 		struct bkey tmp;
1150 		struct bkey_s u = __bkey_disassemble(b, k, &tmp);
1151 
1152 		printbuf_reset(&buf);
1153 
1154 		if (bch2_bkey_val_invalid(c, u.s_c, READ, &buf) ||
1155 		    (bch2_inject_invalid_keys &&
1156 		     !bversion_cmp(u.k->version, MAX_VERSION))) {
1157 			printbuf_reset(&buf);
1158 
1159 			prt_printf(&buf, "invalid bkey: ");
1160 			bch2_bkey_val_invalid(c, u.s_c, READ, &buf);
1161 			prt_printf(&buf, "\n  ");
1162 			bch2_bkey_val_to_text(&buf, c, u.s_c);
1163 
1164 			btree_err(-BCH_ERR_btree_node_read_err_fixable,
1165 				  c, NULL, b, i,
1166 				  btree_node_bad_bkey,
1167 				  "%s", buf.buf);
1168 
1169 			btree_keys_account_key_drop(&b->nr, 0, k);
1170 
1171 			i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
1172 			memmove_u64s_down(k, bkey_p_next(k),
1173 					  (u64 *) vstruct_end(i) - (u64 *) k);
1174 			set_btree_bset_end(b, b->set);
1175 			continue;
1176 		}
1177 
1178 		if (u.k->type == KEY_TYPE_btree_ptr_v2) {
1179 			struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(u);
1180 
1181 			bp.v->mem_ptr = 0;
1182 		}
1183 
1184 		k = bkey_p_next(k);
1185 	}
1186 
1187 	bch2_bset_build_aux_tree(b, b->set, false);
1188 
1189 	set_needs_whiteout(btree_bset_first(b), true);
1190 
1191 	btree_node_reset_sib_u64s(b);
1192 
1193 	bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&b->key)), ptr) {
1194 		struct bch_dev *ca2 = bch_dev_bkey_exists(c, ptr->dev);
1195 
1196 		if (ca2->mi.state != BCH_MEMBER_STATE_rw)
1197 			set_btree_node_need_rewrite(b);
1198 	}
1199 
1200 	if (!ptr_written)
1201 		set_btree_node_need_rewrite(b);
1202 out:
1203 	mempool_free(iter, &c->fill_iter);
1204 	printbuf_exit(&buf);
1205 	return retry_read;
1206 fsck_err:
1207 	if (ret == -BCH_ERR_btree_node_read_err_want_retry ||
1208 	    ret == -BCH_ERR_btree_node_read_err_must_retry)
1209 		retry_read = 1;
1210 	else
1211 		set_btree_node_read_error(b);
1212 	goto out;
1213 }
1214 
1215 static void btree_node_read_work(struct work_struct *work)
1216 {
1217 	struct btree_read_bio *rb =
1218 		container_of(work, struct btree_read_bio, work);
1219 	struct bch_fs *c	= rb->c;
1220 	struct btree *b		= rb->b;
1221 	struct bch_dev *ca	= bch_dev_bkey_exists(c, rb->pick.ptr.dev);
1222 	struct bio *bio		= &rb->bio;
1223 	struct bch_io_failures failed = { .nr = 0 };
1224 	struct printbuf buf = PRINTBUF;
1225 	bool saw_error = false;
1226 	bool retry = false;
1227 	bool can_retry;
1228 
1229 	goto start;
1230 	while (1) {
1231 		retry = true;
1232 		bch_info(c, "retrying read");
1233 		ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
1234 		rb->have_ioref		= bch2_dev_get_ioref(ca, READ);
1235 		bio_reset(bio, NULL, REQ_OP_READ|REQ_SYNC|REQ_META);
1236 		bio->bi_iter.bi_sector	= rb->pick.ptr.offset;
1237 		bio->bi_iter.bi_size	= btree_bytes(c);
1238 
1239 		if (rb->have_ioref) {
1240 			bio_set_dev(bio, ca->disk_sb.bdev);
1241 			submit_bio_wait(bio);
1242 		} else {
1243 			bio->bi_status = BLK_STS_REMOVED;
1244 		}
1245 start:
1246 		printbuf_reset(&buf);
1247 		bch2_btree_pos_to_text(&buf, c, b);
1248 		bch2_dev_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_read,
1249 				   "btree read error %s for %s",
1250 				   bch2_blk_status_to_str(bio->bi_status), buf.buf);
1251 		if (rb->have_ioref)
1252 			percpu_ref_put(&ca->io_ref);
1253 		rb->have_ioref = false;
1254 
1255 		bch2_mark_io_failure(&failed, &rb->pick);
1256 
1257 		can_retry = bch2_bkey_pick_read_device(c,
1258 				bkey_i_to_s_c(&b->key),
1259 				&failed, &rb->pick) > 0;
1260 
1261 		if (!bio->bi_status &&
1262 		    !bch2_btree_node_read_done(c, ca, b, can_retry, &saw_error)) {
1263 			if (retry)
1264 				bch_info(c, "retry success");
1265 			break;
1266 		}
1267 
1268 		saw_error = true;
1269 
1270 		if (!can_retry) {
1271 			set_btree_node_read_error(b);
1272 			break;
1273 		}
1274 	}
1275 
1276 	bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read],
1277 			       rb->start_time);
1278 	bio_put(&rb->bio);
1279 
1280 	if (saw_error && !btree_node_read_error(b)) {
1281 		printbuf_reset(&buf);
1282 		bch2_bpos_to_text(&buf, b->key.k.p);
1283 		bch_info(c, "%s: rewriting btree node at btree=%s level=%u %s due to error",
1284 			 __func__, bch2_btree_id_str(b->c.btree_id), b->c.level, buf.buf);
1285 
1286 		bch2_btree_node_rewrite_async(c, b);
1287 	}
1288 
1289 	printbuf_exit(&buf);
1290 	clear_btree_node_read_in_flight(b);
1291 	wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1292 }
1293 
1294 static void btree_node_read_endio(struct bio *bio)
1295 {
1296 	struct btree_read_bio *rb =
1297 		container_of(bio, struct btree_read_bio, bio);
1298 	struct bch_fs *c	= rb->c;
1299 
1300 	if (rb->have_ioref) {
1301 		struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
1302 
1303 		bch2_latency_acct(ca, rb->start_time, READ);
1304 	}
1305 
1306 	queue_work(c->io_complete_wq, &rb->work);
1307 }
1308 
1309 struct btree_node_read_all {
1310 	struct closure		cl;
1311 	struct bch_fs		*c;
1312 	struct btree		*b;
1313 	unsigned		nr;
1314 	void			*buf[BCH_REPLICAS_MAX];
1315 	struct bio		*bio[BCH_REPLICAS_MAX];
1316 	blk_status_t		err[BCH_REPLICAS_MAX];
1317 };
1318 
1319 static unsigned btree_node_sectors_written(struct bch_fs *c, void *data)
1320 {
1321 	struct btree_node *bn = data;
1322 	struct btree_node_entry *bne;
1323 	unsigned offset = 0;
1324 
1325 	if (le64_to_cpu(bn->magic) !=  bset_magic(c))
1326 		return 0;
1327 
1328 	while (offset < btree_sectors(c)) {
1329 		if (!offset) {
1330 			offset += vstruct_sectors(bn, c->block_bits);
1331 		} else {
1332 			bne = data + (offset << 9);
1333 			if (bne->keys.seq != bn->keys.seq)
1334 				break;
1335 			offset += vstruct_sectors(bne, c->block_bits);
1336 		}
1337 	}
1338 
1339 	return offset;
1340 }
1341 
1342 static bool btree_node_has_extra_bsets(struct bch_fs *c, unsigned offset, void *data)
1343 {
1344 	struct btree_node *bn = data;
1345 	struct btree_node_entry *bne;
1346 
1347 	if (!offset)
1348 		return false;
1349 
1350 	while (offset < btree_sectors(c)) {
1351 		bne = data + (offset << 9);
1352 		if (bne->keys.seq == bn->keys.seq)
1353 			return true;
1354 		offset++;
1355 	}
1356 
1357 	return false;
1358 	return offset;
1359 }
1360 
1361 static void btree_node_read_all_replicas_done(struct closure *cl)
1362 {
1363 	struct btree_node_read_all *ra =
1364 		container_of(cl, struct btree_node_read_all, cl);
1365 	struct bch_fs *c = ra->c;
1366 	struct btree *b = ra->b;
1367 	struct printbuf buf = PRINTBUF;
1368 	bool dump_bset_maps = false;
1369 	bool have_retry = false;
1370 	int ret = 0, best = -1, write = READ;
1371 	unsigned i, written = 0, written2 = 0;
1372 	__le64 seq = b->key.k.type == KEY_TYPE_btree_ptr_v2
1373 		? bkey_i_to_btree_ptr_v2(&b->key)->v.seq : 0;
1374 	bool _saw_error = false, *saw_error = &_saw_error;
1375 
1376 	for (i = 0; i < ra->nr; i++) {
1377 		struct btree_node *bn = ra->buf[i];
1378 
1379 		if (ra->err[i])
1380 			continue;
1381 
1382 		if (le64_to_cpu(bn->magic) != bset_magic(c) ||
1383 		    (seq && seq != bn->keys.seq))
1384 			continue;
1385 
1386 		if (best < 0) {
1387 			best = i;
1388 			written = btree_node_sectors_written(c, bn);
1389 			continue;
1390 		}
1391 
1392 		written2 = btree_node_sectors_written(c, ra->buf[i]);
1393 		if (btree_err_on(written2 != written, -BCH_ERR_btree_node_read_err_fixable,
1394 				 c, NULL, b, NULL,
1395 				 btree_node_replicas_sectors_written_mismatch,
1396 				 "btree node sectors written mismatch: %u != %u",
1397 				 written, written2) ||
1398 		    btree_err_on(btree_node_has_extra_bsets(c, written2, ra->buf[i]),
1399 				 -BCH_ERR_btree_node_read_err_fixable,
1400 				 c, NULL, b, NULL,
1401 				 btree_node_bset_after_end,
1402 				 "found bset signature after last bset") ||
1403 		    btree_err_on(memcmp(ra->buf[best], ra->buf[i], written << 9),
1404 				 -BCH_ERR_btree_node_read_err_fixable,
1405 				 c, NULL, b, NULL,
1406 				 btree_node_replicas_data_mismatch,
1407 				 "btree node replicas content mismatch"))
1408 			dump_bset_maps = true;
1409 
1410 		if (written2 > written) {
1411 			written = written2;
1412 			best = i;
1413 		}
1414 	}
1415 fsck_err:
1416 	if (dump_bset_maps) {
1417 		for (i = 0; i < ra->nr; i++) {
1418 			struct btree_node *bn = ra->buf[i];
1419 			struct btree_node_entry *bne = NULL;
1420 			unsigned offset = 0, sectors;
1421 			bool gap = false;
1422 
1423 			if (ra->err[i])
1424 				continue;
1425 
1426 			printbuf_reset(&buf);
1427 
1428 			while (offset < btree_sectors(c)) {
1429 				if (!offset) {
1430 					sectors = vstruct_sectors(bn, c->block_bits);
1431 				} else {
1432 					bne = ra->buf[i] + (offset << 9);
1433 					if (bne->keys.seq != bn->keys.seq)
1434 						break;
1435 					sectors = vstruct_sectors(bne, c->block_bits);
1436 				}
1437 
1438 				prt_printf(&buf, " %u-%u", offset, offset + sectors);
1439 				if (bne && bch2_journal_seq_is_blacklisted(c,
1440 							le64_to_cpu(bne->keys.journal_seq), false))
1441 					prt_printf(&buf, "*");
1442 				offset += sectors;
1443 			}
1444 
1445 			while (offset < btree_sectors(c)) {
1446 				bne = ra->buf[i] + (offset << 9);
1447 				if (bne->keys.seq == bn->keys.seq) {
1448 					if (!gap)
1449 						prt_printf(&buf, " GAP");
1450 					gap = true;
1451 
1452 					sectors = vstruct_sectors(bne, c->block_bits);
1453 					prt_printf(&buf, " %u-%u", offset, offset + sectors);
1454 					if (bch2_journal_seq_is_blacklisted(c,
1455 							le64_to_cpu(bne->keys.journal_seq), false))
1456 						prt_printf(&buf, "*");
1457 				}
1458 				offset++;
1459 			}
1460 
1461 			bch_err(c, "replica %u:%s", i, buf.buf);
1462 		}
1463 	}
1464 
1465 	if (best >= 0) {
1466 		memcpy(b->data, ra->buf[best], btree_bytes(c));
1467 		ret = bch2_btree_node_read_done(c, NULL, b, false, saw_error);
1468 	} else {
1469 		ret = -1;
1470 	}
1471 
1472 	if (ret)
1473 		set_btree_node_read_error(b);
1474 	else if (*saw_error)
1475 		bch2_btree_node_rewrite_async(c, b);
1476 
1477 	for (i = 0; i < ra->nr; i++) {
1478 		mempool_free(ra->buf[i], &c->btree_bounce_pool);
1479 		bio_put(ra->bio[i]);
1480 	}
1481 
1482 	closure_debug_destroy(&ra->cl);
1483 	kfree(ra);
1484 	printbuf_exit(&buf);
1485 
1486 	clear_btree_node_read_in_flight(b);
1487 	wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1488 }
1489 
1490 static void btree_node_read_all_replicas_endio(struct bio *bio)
1491 {
1492 	struct btree_read_bio *rb =
1493 		container_of(bio, struct btree_read_bio, bio);
1494 	struct bch_fs *c	= rb->c;
1495 	struct btree_node_read_all *ra = rb->ra;
1496 
1497 	if (rb->have_ioref) {
1498 		struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
1499 
1500 		bch2_latency_acct(ca, rb->start_time, READ);
1501 	}
1502 
1503 	ra->err[rb->idx] = bio->bi_status;
1504 	closure_put(&ra->cl);
1505 }
1506 
1507 /*
1508  * XXX This allocates multiple times from the same mempools, and can deadlock
1509  * under sufficient memory pressure (but is only a debug path)
1510  */
1511 static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool sync)
1512 {
1513 	struct bkey_s_c k = bkey_i_to_s_c(&b->key);
1514 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1515 	const union bch_extent_entry *entry;
1516 	struct extent_ptr_decoded pick;
1517 	struct btree_node_read_all *ra;
1518 	unsigned i;
1519 
1520 	ra = kzalloc(sizeof(*ra), GFP_NOFS);
1521 	if (!ra)
1522 		return -BCH_ERR_ENOMEM_btree_node_read_all_replicas;
1523 
1524 	closure_init(&ra->cl, NULL);
1525 	ra->c	= c;
1526 	ra->b	= b;
1527 	ra->nr	= bch2_bkey_nr_ptrs(k);
1528 
1529 	for (i = 0; i < ra->nr; i++) {
1530 		ra->buf[i] = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
1531 		ra->bio[i] = bio_alloc_bioset(NULL,
1532 					      buf_pages(ra->buf[i], btree_bytes(c)),
1533 					      REQ_OP_READ|REQ_SYNC|REQ_META,
1534 					      GFP_NOFS,
1535 					      &c->btree_bio);
1536 	}
1537 
1538 	i = 0;
1539 	bkey_for_each_ptr_decode(k.k, ptrs, pick, entry) {
1540 		struct bch_dev *ca = bch_dev_bkey_exists(c, pick.ptr.dev);
1541 		struct btree_read_bio *rb =
1542 			container_of(ra->bio[i], struct btree_read_bio, bio);
1543 		rb->c			= c;
1544 		rb->b			= b;
1545 		rb->ra			= ra;
1546 		rb->start_time		= local_clock();
1547 		rb->have_ioref		= bch2_dev_get_ioref(ca, READ);
1548 		rb->idx			= i;
1549 		rb->pick		= pick;
1550 		rb->bio.bi_iter.bi_sector = pick.ptr.offset;
1551 		rb->bio.bi_end_io	= btree_node_read_all_replicas_endio;
1552 		bch2_bio_map(&rb->bio, ra->buf[i], btree_bytes(c));
1553 
1554 		if (rb->have_ioref) {
1555 			this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
1556 				     bio_sectors(&rb->bio));
1557 			bio_set_dev(&rb->bio, ca->disk_sb.bdev);
1558 
1559 			closure_get(&ra->cl);
1560 			submit_bio(&rb->bio);
1561 		} else {
1562 			ra->err[i] = BLK_STS_REMOVED;
1563 		}
1564 
1565 		i++;
1566 	}
1567 
1568 	if (sync) {
1569 		closure_sync(&ra->cl);
1570 		btree_node_read_all_replicas_done(&ra->cl);
1571 	} else {
1572 		continue_at(&ra->cl, btree_node_read_all_replicas_done,
1573 			    c->io_complete_wq);
1574 	}
1575 
1576 	return 0;
1577 }
1578 
1579 void bch2_btree_node_read(struct bch_fs *c, struct btree *b,
1580 			  bool sync)
1581 {
1582 	struct extent_ptr_decoded pick;
1583 	struct btree_read_bio *rb;
1584 	struct bch_dev *ca;
1585 	struct bio *bio;
1586 	int ret;
1587 
1588 	trace_and_count(c, btree_node_read, c, b);
1589 
1590 	if (bch2_verify_all_btree_replicas &&
1591 	    !btree_node_read_all_replicas(c, b, sync))
1592 		return;
1593 
1594 	ret = bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key),
1595 					 NULL, &pick);
1596 
1597 	if (ret <= 0) {
1598 		struct printbuf buf = PRINTBUF;
1599 
1600 		prt_str(&buf, "btree node read error: no device to read from\n at ");
1601 		bch2_btree_pos_to_text(&buf, c, b);
1602 		bch_err(c, "%s", buf.buf);
1603 
1604 		if (c->recovery_passes_explicit & BIT_ULL(BCH_RECOVERY_PASS_check_topology) &&
1605 		    c->curr_recovery_pass > BCH_RECOVERY_PASS_check_topology)
1606 			bch2_fatal_error(c);
1607 
1608 		set_btree_node_read_error(b);
1609 		clear_btree_node_read_in_flight(b);
1610 		wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1611 		printbuf_exit(&buf);
1612 		return;
1613 	}
1614 
1615 	ca = bch_dev_bkey_exists(c, pick.ptr.dev);
1616 
1617 	bio = bio_alloc_bioset(NULL,
1618 			       buf_pages(b->data, btree_bytes(c)),
1619 			       REQ_OP_READ|REQ_SYNC|REQ_META,
1620 			       GFP_NOFS,
1621 			       &c->btree_bio);
1622 	rb = container_of(bio, struct btree_read_bio, bio);
1623 	rb->c			= c;
1624 	rb->b			= b;
1625 	rb->ra			= NULL;
1626 	rb->start_time		= local_clock();
1627 	rb->have_ioref		= bch2_dev_get_ioref(ca, READ);
1628 	rb->pick		= pick;
1629 	INIT_WORK(&rb->work, btree_node_read_work);
1630 	bio->bi_iter.bi_sector	= pick.ptr.offset;
1631 	bio->bi_end_io		= btree_node_read_endio;
1632 	bch2_bio_map(bio, b->data, btree_bytes(c));
1633 
1634 	if (rb->have_ioref) {
1635 		this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
1636 			     bio_sectors(bio));
1637 		bio_set_dev(bio, ca->disk_sb.bdev);
1638 
1639 		if (sync) {
1640 			submit_bio_wait(bio);
1641 
1642 			btree_node_read_work(&rb->work);
1643 		} else {
1644 			submit_bio(bio);
1645 		}
1646 	} else {
1647 		bio->bi_status = BLK_STS_REMOVED;
1648 
1649 		if (sync)
1650 			btree_node_read_work(&rb->work);
1651 		else
1652 			queue_work(c->io_complete_wq, &rb->work);
1653 	}
1654 }
1655 
1656 static int __bch2_btree_root_read(struct btree_trans *trans, enum btree_id id,
1657 				  const struct bkey_i *k, unsigned level)
1658 {
1659 	struct bch_fs *c = trans->c;
1660 	struct closure cl;
1661 	struct btree *b;
1662 	int ret;
1663 
1664 	closure_init_stack(&cl);
1665 
1666 	do {
1667 		ret = bch2_btree_cache_cannibalize_lock(c, &cl);
1668 		closure_sync(&cl);
1669 	} while (ret);
1670 
1671 	b = bch2_btree_node_mem_alloc(trans, level != 0);
1672 	bch2_btree_cache_cannibalize_unlock(c);
1673 
1674 	BUG_ON(IS_ERR(b));
1675 
1676 	bkey_copy(&b->key, k);
1677 	BUG_ON(bch2_btree_node_hash_insert(&c->btree_cache, b, level, id));
1678 
1679 	set_btree_node_read_in_flight(b);
1680 
1681 	bch2_btree_node_read(c, b, true);
1682 
1683 	if (btree_node_read_error(b)) {
1684 		bch2_btree_node_hash_remove(&c->btree_cache, b);
1685 
1686 		mutex_lock(&c->btree_cache.lock);
1687 		list_move(&b->list, &c->btree_cache.freeable);
1688 		mutex_unlock(&c->btree_cache.lock);
1689 
1690 		ret = -EIO;
1691 		goto err;
1692 	}
1693 
1694 	bch2_btree_set_root_for_read(c, b);
1695 err:
1696 	six_unlock_write(&b->c.lock);
1697 	six_unlock_intent(&b->c.lock);
1698 
1699 	return ret;
1700 }
1701 
1702 int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
1703 			const struct bkey_i *k, unsigned level)
1704 {
1705 	return bch2_trans_run(c, __bch2_btree_root_read(trans, id, k, level));
1706 }
1707 
1708 void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
1709 			      struct btree_write *w)
1710 {
1711 	unsigned long old, new, v = READ_ONCE(b->will_make_reachable);
1712 
1713 	do {
1714 		old = new = v;
1715 		if (!(old & 1))
1716 			break;
1717 
1718 		new &= ~1UL;
1719 	} while ((v = cmpxchg(&b->will_make_reachable, old, new)) != old);
1720 
1721 	if (old & 1)
1722 		closure_put(&((struct btree_update *) new)->cl);
1723 
1724 	bch2_journal_pin_drop(&c->journal, &w->journal);
1725 }
1726 
1727 static void __btree_node_write_done(struct bch_fs *c, struct btree *b)
1728 {
1729 	struct btree_write *w = btree_prev_write(b);
1730 	unsigned long old, new, v;
1731 	unsigned type = 0;
1732 
1733 	bch2_btree_complete_write(c, b, w);
1734 
1735 	v = READ_ONCE(b->flags);
1736 	do {
1737 		old = new = v;
1738 
1739 		if ((old & (1U << BTREE_NODE_dirty)) &&
1740 		    (old & (1U << BTREE_NODE_need_write)) &&
1741 		    !(old & (1U << BTREE_NODE_never_write)) &&
1742 		    !(old & (1U << BTREE_NODE_write_blocked)) &&
1743 		    !(old & (1U << BTREE_NODE_will_make_reachable))) {
1744 			new &= ~(1U << BTREE_NODE_dirty);
1745 			new &= ~(1U << BTREE_NODE_need_write);
1746 			new |=  (1U << BTREE_NODE_write_in_flight);
1747 			new |=  (1U << BTREE_NODE_write_in_flight_inner);
1748 			new |=  (1U << BTREE_NODE_just_written);
1749 			new ^=  (1U << BTREE_NODE_write_idx);
1750 
1751 			type = new & BTREE_WRITE_TYPE_MASK;
1752 			new &= ~BTREE_WRITE_TYPE_MASK;
1753 		} else {
1754 			new &= ~(1U << BTREE_NODE_write_in_flight);
1755 			new &= ~(1U << BTREE_NODE_write_in_flight_inner);
1756 		}
1757 	} while ((v = cmpxchg(&b->flags, old, new)) != old);
1758 
1759 	if (new & (1U << BTREE_NODE_write_in_flight))
1760 		__bch2_btree_node_write(c, b, BTREE_WRITE_ALREADY_STARTED|type);
1761 	else
1762 		wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
1763 }
1764 
1765 static void btree_node_write_done(struct bch_fs *c, struct btree *b)
1766 {
1767 	struct btree_trans *trans = bch2_trans_get(c);
1768 
1769 	btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
1770 	__btree_node_write_done(c, b);
1771 	six_unlock_read(&b->c.lock);
1772 
1773 	bch2_trans_put(trans);
1774 }
1775 
1776 static void btree_node_write_work(struct work_struct *work)
1777 {
1778 	struct btree_write_bio *wbio =
1779 		container_of(work, struct btree_write_bio, work);
1780 	struct bch_fs *c	= wbio->wbio.c;
1781 	struct btree *b		= wbio->wbio.bio.bi_private;
1782 	struct bch_extent_ptr *ptr;
1783 	int ret = 0;
1784 
1785 	btree_bounce_free(c,
1786 		wbio->data_bytes,
1787 		wbio->wbio.used_mempool,
1788 		wbio->data);
1789 
1790 	bch2_bkey_drop_ptrs(bkey_i_to_s(&wbio->key), ptr,
1791 		bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev));
1792 
1793 	if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(&wbio->key)))
1794 		goto err;
1795 
1796 	if (wbio->wbio.first_btree_write) {
1797 		if (wbio->wbio.failed.nr) {
1798 
1799 		}
1800 	} else {
1801 		ret = bch2_trans_do(c, NULL, NULL, 0,
1802 			bch2_btree_node_update_key_get_iter(trans, b, &wbio->key,
1803 					BCH_WATERMARK_reclaim|
1804 					BTREE_INSERT_JOURNAL_RECLAIM|
1805 					BTREE_INSERT_NOFAIL|
1806 					BTREE_INSERT_NOCHECK_RW,
1807 					!wbio->wbio.failed.nr));
1808 		if (ret)
1809 			goto err;
1810 	}
1811 out:
1812 	bio_put(&wbio->wbio.bio);
1813 	btree_node_write_done(c, b);
1814 	return;
1815 err:
1816 	set_btree_node_noevict(b);
1817 	if (!bch2_err_matches(ret, EROFS))
1818 		bch2_fs_fatal_error(c, "fatal error writing btree node: %s", bch2_err_str(ret));
1819 	goto out;
1820 }
1821 
1822 static void btree_node_write_endio(struct bio *bio)
1823 {
1824 	struct bch_write_bio *wbio	= to_wbio(bio);
1825 	struct bch_write_bio *parent	= wbio->split ? wbio->parent : NULL;
1826 	struct bch_write_bio *orig	= parent ?: wbio;
1827 	struct btree_write_bio *wb	= container_of(orig, struct btree_write_bio, wbio);
1828 	struct bch_fs *c		= wbio->c;
1829 	struct btree *b			= wbio->bio.bi_private;
1830 	struct bch_dev *ca		= bch_dev_bkey_exists(c, wbio->dev);
1831 	unsigned long flags;
1832 
1833 	if (wbio->have_ioref)
1834 		bch2_latency_acct(ca, wbio->submit_time, WRITE);
1835 
1836 	if (bch2_dev_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write,
1837 			       "btree write error: %s",
1838 			       bch2_blk_status_to_str(bio->bi_status)) ||
1839 	    bch2_meta_write_fault("btree")) {
1840 		spin_lock_irqsave(&c->btree_write_error_lock, flags);
1841 		bch2_dev_list_add_dev(&orig->failed, wbio->dev);
1842 		spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
1843 	}
1844 
1845 	if (wbio->have_ioref)
1846 		percpu_ref_put(&ca->io_ref);
1847 
1848 	if (parent) {
1849 		bio_put(bio);
1850 		bio_endio(&parent->bio);
1851 		return;
1852 	}
1853 
1854 	clear_btree_node_write_in_flight_inner(b);
1855 	wake_up_bit(&b->flags, BTREE_NODE_write_in_flight_inner);
1856 	INIT_WORK(&wb->work, btree_node_write_work);
1857 	queue_work(c->btree_io_complete_wq, &wb->work);
1858 }
1859 
1860 static int validate_bset_for_write(struct bch_fs *c, struct btree *b,
1861 				   struct bset *i, unsigned sectors)
1862 {
1863 	struct printbuf buf = PRINTBUF;
1864 	bool saw_error;
1865 	int ret;
1866 
1867 	ret = bch2_bkey_invalid(c, bkey_i_to_s_c(&b->key),
1868 				BKEY_TYPE_btree, WRITE, &buf);
1869 
1870 	if (ret)
1871 		bch2_fs_inconsistent(c, "invalid btree node key before write: %s", buf.buf);
1872 	printbuf_exit(&buf);
1873 	if (ret)
1874 		return ret;
1875 
1876 	ret = validate_bset_keys(c, b, i, WRITE, false, &saw_error) ?:
1877 		validate_bset(c, NULL, b, i, b->written, sectors, WRITE, false, &saw_error);
1878 	if (ret) {
1879 		bch2_inconsistent_error(c);
1880 		dump_stack();
1881 	}
1882 
1883 	return ret;
1884 }
1885 
1886 static void btree_write_submit(struct work_struct *work)
1887 {
1888 	struct btree_write_bio *wbio = container_of(work, struct btree_write_bio, work);
1889 	struct bch_extent_ptr *ptr;
1890 	BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
1891 
1892 	bkey_copy(&tmp.k, &wbio->key);
1893 
1894 	bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&tmp.k)), ptr)
1895 		ptr->offset += wbio->sector_offset;
1896 
1897 	bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree,
1898 				  &tmp.k, false);
1899 }
1900 
1901 void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
1902 {
1903 	struct btree_write_bio *wbio;
1904 	struct bset_tree *t;
1905 	struct bset *i;
1906 	struct btree_node *bn = NULL;
1907 	struct btree_node_entry *bne = NULL;
1908 	struct sort_iter_stack sort_iter;
1909 	struct nonce nonce;
1910 	unsigned bytes_to_write, sectors_to_write, bytes, u64s;
1911 	u64 seq = 0;
1912 	bool used_mempool;
1913 	unsigned long old, new;
1914 	bool validate_before_checksum = false;
1915 	enum btree_write_type type = flags & BTREE_WRITE_TYPE_MASK;
1916 	void *data;
1917 	int ret;
1918 
1919 	if (flags & BTREE_WRITE_ALREADY_STARTED)
1920 		goto do_write;
1921 
1922 	/*
1923 	 * We may only have a read lock on the btree node - the dirty bit is our
1924 	 * "lock" against racing with other threads that may be trying to start
1925 	 * a write, we do a write iff we clear the dirty bit. Since setting the
1926 	 * dirty bit requires a write lock, we can't race with other threads
1927 	 * redirtying it:
1928 	 */
1929 	do {
1930 		old = new = READ_ONCE(b->flags);
1931 
1932 		if (!(old & (1 << BTREE_NODE_dirty)))
1933 			return;
1934 
1935 		if ((flags & BTREE_WRITE_ONLY_IF_NEED) &&
1936 		    !(old & (1 << BTREE_NODE_need_write)))
1937 			return;
1938 
1939 		if (old &
1940 		    ((1 << BTREE_NODE_never_write)|
1941 		     (1 << BTREE_NODE_write_blocked)))
1942 			return;
1943 
1944 		if (b->written &&
1945 		    (old & (1 << BTREE_NODE_will_make_reachable)))
1946 			return;
1947 
1948 		if (old & (1 << BTREE_NODE_write_in_flight))
1949 			return;
1950 
1951 		if (flags & BTREE_WRITE_ONLY_IF_NEED)
1952 			type = new & BTREE_WRITE_TYPE_MASK;
1953 		new &= ~BTREE_WRITE_TYPE_MASK;
1954 
1955 		new &= ~(1 << BTREE_NODE_dirty);
1956 		new &= ~(1 << BTREE_NODE_need_write);
1957 		new |=  (1 << BTREE_NODE_write_in_flight);
1958 		new |=  (1 << BTREE_NODE_write_in_flight_inner);
1959 		new |=  (1 << BTREE_NODE_just_written);
1960 		new ^=  (1 << BTREE_NODE_write_idx);
1961 	} while (cmpxchg_acquire(&b->flags, old, new) != old);
1962 
1963 	if (new & (1U << BTREE_NODE_need_write))
1964 		return;
1965 do_write:
1966 	BUG_ON((type == BTREE_WRITE_initial) != (b->written == 0));
1967 
1968 	atomic_dec(&c->btree_cache.dirty);
1969 
1970 	BUG_ON(btree_node_fake(b));
1971 	BUG_ON((b->will_make_reachable != 0) != !b->written);
1972 
1973 	BUG_ON(b->written >= btree_sectors(c));
1974 	BUG_ON(b->written & (block_sectors(c) - 1));
1975 	BUG_ON(bset_written(b, btree_bset_last(b)));
1976 	BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(c));
1977 	BUG_ON(memcmp(&b->data->format, &b->format, sizeof(b->format)));
1978 
1979 	bch2_sort_whiteouts(c, b);
1980 
1981 	sort_iter_stack_init(&sort_iter, b);
1982 
1983 	bytes = !b->written
1984 		? sizeof(struct btree_node)
1985 		: sizeof(struct btree_node_entry);
1986 
1987 	bytes += b->whiteout_u64s * sizeof(u64);
1988 
1989 	for_each_bset(b, t) {
1990 		i = bset(b, t);
1991 
1992 		if (bset_written(b, i))
1993 			continue;
1994 
1995 		bytes += le16_to_cpu(i->u64s) * sizeof(u64);
1996 		sort_iter_add(&sort_iter.iter,
1997 			      btree_bkey_first(b, t),
1998 			      btree_bkey_last(b, t));
1999 		seq = max(seq, le64_to_cpu(i->journal_seq));
2000 	}
2001 
2002 	BUG_ON(b->written && !seq);
2003 
2004 	/* bch2_varint_decode may read up to 7 bytes past the end of the buffer: */
2005 	bytes += 8;
2006 
2007 	/* buffer must be a multiple of the block size */
2008 	bytes = round_up(bytes, block_bytes(c));
2009 
2010 	data = btree_bounce_alloc(c, bytes, &used_mempool);
2011 
2012 	if (!b->written) {
2013 		bn = data;
2014 		*bn = *b->data;
2015 		i = &bn->keys;
2016 	} else {
2017 		bne = data;
2018 		bne->keys = b->data->keys;
2019 		i = &bne->keys;
2020 	}
2021 
2022 	i->journal_seq	= cpu_to_le64(seq);
2023 	i->u64s		= 0;
2024 
2025 	sort_iter_add(&sort_iter.iter,
2026 		      unwritten_whiteouts_start(c, b),
2027 		      unwritten_whiteouts_end(c, b));
2028 	SET_BSET_SEPARATE_WHITEOUTS(i, false);
2029 
2030 	b->whiteout_u64s = 0;
2031 
2032 	u64s = bch2_sort_keys(i->start, &sort_iter.iter, false);
2033 	le16_add_cpu(&i->u64s, u64s);
2034 
2035 	BUG_ON(!b->written && i->u64s != b->data->keys.u64s);
2036 
2037 	set_needs_whiteout(i, false);
2038 
2039 	/* do we have data to write? */
2040 	if (b->written && !i->u64s)
2041 		goto nowrite;
2042 
2043 	bytes_to_write = vstruct_end(i) - data;
2044 	sectors_to_write = round_up(bytes_to_write, block_bytes(c)) >> 9;
2045 
2046 	if (!b->written &&
2047 	    b->key.k.type == KEY_TYPE_btree_ptr_v2)
2048 		BUG_ON(btree_ptr_sectors_written(&b->key) != sectors_to_write);
2049 
2050 	memset(data + bytes_to_write, 0,
2051 	       (sectors_to_write << 9) - bytes_to_write);
2052 
2053 	BUG_ON(b->written + sectors_to_write > btree_sectors(c));
2054 	BUG_ON(BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN);
2055 	BUG_ON(i->seq != b->data->keys.seq);
2056 
2057 	i->version = cpu_to_le16(c->sb.version);
2058 	SET_BSET_OFFSET(i, b->written);
2059 	SET_BSET_CSUM_TYPE(i, bch2_meta_checksum_type(c));
2060 
2061 	if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)))
2062 		validate_before_checksum = true;
2063 
2064 	/* validate_bset will be modifying: */
2065 	if (le16_to_cpu(i->version) < bcachefs_metadata_version_current)
2066 		validate_before_checksum = true;
2067 
2068 	/* if we're going to be encrypting, check metadata validity first: */
2069 	if (validate_before_checksum &&
2070 	    validate_bset_for_write(c, b, i, sectors_to_write))
2071 		goto err;
2072 
2073 	ret = bset_encrypt(c, i, b->written << 9);
2074 	if (bch2_fs_fatal_err_on(ret, c,
2075 			"error encrypting btree node: %i\n", ret))
2076 		goto err;
2077 
2078 	nonce = btree_nonce(i, b->written << 9);
2079 
2080 	if (bn)
2081 		bn->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bn);
2082 	else
2083 		bne->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
2084 
2085 	/* if we're not encrypting, check metadata after checksumming: */
2086 	if (!validate_before_checksum &&
2087 	    validate_bset_for_write(c, b, i, sectors_to_write))
2088 		goto err;
2089 
2090 	/*
2091 	 * We handle btree write errors by immediately halting the journal -
2092 	 * after we've done that, we can't issue any subsequent btree writes
2093 	 * because they might have pointers to new nodes that failed to write.
2094 	 *
2095 	 * Furthermore, there's no point in doing any more btree writes because
2096 	 * with the journal stopped, we're never going to update the journal to
2097 	 * reflect that those writes were done and the data flushed from the
2098 	 * journal:
2099 	 *
2100 	 * Also on journal error, the pending write may have updates that were
2101 	 * never journalled (interior nodes, see btree_update_nodes_written()) -
2102 	 * it's critical that we don't do the write in that case otherwise we
2103 	 * will have updates visible that weren't in the journal:
2104 	 *
2105 	 * Make sure to update b->written so bch2_btree_init_next() doesn't
2106 	 * break:
2107 	 */
2108 	if (bch2_journal_error(&c->journal) ||
2109 	    c->opts.nochanges)
2110 		goto err;
2111 
2112 	trace_and_count(c, btree_node_write, b, bytes_to_write, sectors_to_write);
2113 
2114 	wbio = container_of(bio_alloc_bioset(NULL,
2115 				buf_pages(data, sectors_to_write << 9),
2116 				REQ_OP_WRITE|REQ_META,
2117 				GFP_NOFS,
2118 				&c->btree_bio),
2119 			    struct btree_write_bio, wbio.bio);
2120 	wbio_init(&wbio->wbio.bio);
2121 	wbio->data			= data;
2122 	wbio->data_bytes		= bytes;
2123 	wbio->sector_offset		= b->written;
2124 	wbio->wbio.c			= c;
2125 	wbio->wbio.used_mempool		= used_mempool;
2126 	wbio->wbio.first_btree_write	= !b->written;
2127 	wbio->wbio.bio.bi_end_io	= btree_node_write_endio;
2128 	wbio->wbio.bio.bi_private	= b;
2129 
2130 	bch2_bio_map(&wbio->wbio.bio, data, sectors_to_write << 9);
2131 
2132 	bkey_copy(&wbio->key, &b->key);
2133 
2134 	b->written += sectors_to_write;
2135 
2136 	if (wbio->key.k.type == KEY_TYPE_btree_ptr_v2)
2137 		bkey_i_to_btree_ptr_v2(&wbio->key)->v.sectors_written =
2138 			cpu_to_le16(b->written);
2139 
2140 	atomic64_inc(&c->btree_write_stats[type].nr);
2141 	atomic64_add(bytes_to_write, &c->btree_write_stats[type].bytes);
2142 
2143 	INIT_WORK(&wbio->work, btree_write_submit);
2144 	queue_work(c->io_complete_wq, &wbio->work);
2145 	return;
2146 err:
2147 	set_btree_node_noevict(b);
2148 	b->written += sectors_to_write;
2149 nowrite:
2150 	btree_bounce_free(c, bytes, used_mempool, data);
2151 	__btree_node_write_done(c, b);
2152 }
2153 
2154 /*
2155  * Work that must be done with write lock held:
2156  */
2157 bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
2158 {
2159 	bool invalidated_iter = false;
2160 	struct btree_node_entry *bne;
2161 	struct bset_tree *t;
2162 
2163 	if (!btree_node_just_written(b))
2164 		return false;
2165 
2166 	BUG_ON(b->whiteout_u64s);
2167 
2168 	clear_btree_node_just_written(b);
2169 
2170 	/*
2171 	 * Note: immediately after write, bset_written() doesn't work - the
2172 	 * amount of data we had to write after compaction might have been
2173 	 * smaller than the offset of the last bset.
2174 	 *
2175 	 * However, we know that all bsets have been written here, as long as
2176 	 * we're still holding the write lock:
2177 	 */
2178 
2179 	/*
2180 	 * XXX: decide if we really want to unconditionally sort down to a
2181 	 * single bset:
2182 	 */
2183 	if (b->nsets > 1) {
2184 		btree_node_sort(c, b, 0, b->nsets, true);
2185 		invalidated_iter = true;
2186 	} else {
2187 		invalidated_iter = bch2_drop_whiteouts(b, COMPACT_ALL);
2188 	}
2189 
2190 	for_each_bset(b, t)
2191 		set_needs_whiteout(bset(b, t), true);
2192 
2193 	bch2_btree_verify(c, b);
2194 
2195 	/*
2196 	 * If later we don't unconditionally sort down to a single bset, we have
2197 	 * to ensure this is still true:
2198 	 */
2199 	BUG_ON((void *) btree_bkey_last(b, bset_tree_last(b)) > write_block(b));
2200 
2201 	bne = want_new_bset(c, b);
2202 	if (bne)
2203 		bch2_bset_init_next(c, b, bne);
2204 
2205 	bch2_btree_build_aux_trees(b);
2206 
2207 	return invalidated_iter;
2208 }
2209 
2210 /*
2211  * Use this one if the node is intent locked:
2212  */
2213 void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
2214 			   enum six_lock_type lock_type_held,
2215 			   unsigned flags)
2216 {
2217 	if (lock_type_held == SIX_LOCK_intent ||
2218 	    (lock_type_held == SIX_LOCK_read &&
2219 	     six_lock_tryupgrade(&b->c.lock))) {
2220 		__bch2_btree_node_write(c, b, flags);
2221 
2222 		/* don't cycle lock unnecessarily: */
2223 		if (btree_node_just_written(b) &&
2224 		    six_trylock_write(&b->c.lock)) {
2225 			bch2_btree_post_write_cleanup(c, b);
2226 			six_unlock_write(&b->c.lock);
2227 		}
2228 
2229 		if (lock_type_held == SIX_LOCK_read)
2230 			six_lock_downgrade(&b->c.lock);
2231 	} else {
2232 		__bch2_btree_node_write(c, b, flags);
2233 		if (lock_type_held == SIX_LOCK_write &&
2234 		    btree_node_just_written(b))
2235 			bch2_btree_post_write_cleanup(c, b);
2236 	}
2237 }
2238 
2239 static bool __bch2_btree_flush_all(struct bch_fs *c, unsigned flag)
2240 {
2241 	struct bucket_table *tbl;
2242 	struct rhash_head *pos;
2243 	struct btree *b;
2244 	unsigned i;
2245 	bool ret = false;
2246 restart:
2247 	rcu_read_lock();
2248 	for_each_cached_btree(b, c, tbl, i, pos)
2249 		if (test_bit(flag, &b->flags)) {
2250 			rcu_read_unlock();
2251 			wait_on_bit_io(&b->flags, flag, TASK_UNINTERRUPTIBLE);
2252 			ret = true;
2253 			goto restart;
2254 		}
2255 	rcu_read_unlock();
2256 
2257 	return ret;
2258 }
2259 
2260 bool bch2_btree_flush_all_reads(struct bch_fs *c)
2261 {
2262 	return __bch2_btree_flush_all(c, BTREE_NODE_read_in_flight);
2263 }
2264 
2265 bool bch2_btree_flush_all_writes(struct bch_fs *c)
2266 {
2267 	return __bch2_btree_flush_all(c, BTREE_NODE_write_in_flight);
2268 }
2269 
2270 static const char * const bch2_btree_write_types[] = {
2271 #define x(t, n) [n] = #t,
2272 	BCH_BTREE_WRITE_TYPES()
2273 	NULL
2274 };
2275 
2276 void bch2_btree_write_stats_to_text(struct printbuf *out, struct bch_fs *c)
2277 {
2278 	printbuf_tabstop_push(out, 20);
2279 	printbuf_tabstop_push(out, 10);
2280 
2281 	prt_tab(out);
2282 	prt_str(out, "nr");
2283 	prt_tab(out);
2284 	prt_str(out, "size");
2285 	prt_newline(out);
2286 
2287 	for (unsigned i = 0; i < BTREE_WRITE_TYPE_NR; i++) {
2288 		u64 nr		= atomic64_read(&c->btree_write_stats[i].nr);
2289 		u64 bytes	= atomic64_read(&c->btree_write_stats[i].bytes);
2290 
2291 		prt_printf(out, "%s:", bch2_btree_write_types[i]);
2292 		prt_tab(out);
2293 		prt_u64(out, nr);
2294 		prt_tab(out);
2295 		prt_human_readable_u64(out, nr ? div64_u64(bytes, nr) : 0);
2296 		prt_newline(out);
2297 	}
2298 }
2299