xref: /linux/fs/bcachefs/btree_io.c (revision 482deed9dfa065cf3f68372dadac857541c7d504)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "bcachefs.h"
4 #include "async_objs.h"
5 #include "bkey_buf.h"
6 #include "bkey_methods.h"
7 #include "bkey_sort.h"
8 #include "btree_cache.h"
9 #include "btree_io.h"
10 #include "btree_iter.h"
11 #include "btree_locking.h"
12 #include "btree_update.h"
13 #include "btree_update_interior.h"
14 #include "buckets.h"
15 #include "checksum.h"
16 #include "debug.h"
17 #include "enumerated_ref.h"
18 #include "error.h"
19 #include "extents.h"
20 #include "io_write.h"
21 #include "journal_reclaim.h"
22 #include "journal_seq_blacklist.h"
23 #include "recovery.h"
24 #include "super-io.h"
25 #include "trace.h"
26 
27 #include <linux/sched/mm.h>
28 
bch2_btree_node_header_to_text(struct printbuf * out,struct btree_node * bn)29 static void bch2_btree_node_header_to_text(struct printbuf *out, struct btree_node *bn)
30 {
31 	bch2_btree_id_level_to_text(out, BTREE_NODE_ID(bn), BTREE_NODE_LEVEL(bn));
32 	prt_printf(out, " seq %llx %llu\n", bn->keys.seq, BTREE_NODE_SEQ(bn));
33 	prt_str(out, "min: ");
34 	bch2_bpos_to_text(out, bn->min_key);
35 	prt_newline(out);
36 	prt_str(out, "max: ");
37 	bch2_bpos_to_text(out, bn->max_key);
38 }
39 
bch2_btree_node_io_unlock(struct btree * b)40 void bch2_btree_node_io_unlock(struct btree *b)
41 {
42 	EBUG_ON(!btree_node_write_in_flight(b));
43 
44 	clear_btree_node_write_in_flight_inner(b);
45 	clear_btree_node_write_in_flight(b);
46 	smp_mb__after_atomic();
47 	wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
48 }
49 
bch2_btree_node_io_lock(struct btree * b)50 void bch2_btree_node_io_lock(struct btree *b)
51 {
52 	wait_on_bit_lock_io(&b->flags, BTREE_NODE_write_in_flight,
53 			    TASK_UNINTERRUPTIBLE);
54 }
55 
__bch2_btree_node_wait_on_read(struct btree * b)56 void __bch2_btree_node_wait_on_read(struct btree *b)
57 {
58 	wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
59 		       TASK_UNINTERRUPTIBLE);
60 }
61 
__bch2_btree_node_wait_on_write(struct btree * b)62 void __bch2_btree_node_wait_on_write(struct btree *b)
63 {
64 	wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
65 		       TASK_UNINTERRUPTIBLE);
66 }
67 
bch2_btree_node_wait_on_read(struct btree * b)68 void bch2_btree_node_wait_on_read(struct btree *b)
69 {
70 	wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
71 		       TASK_UNINTERRUPTIBLE);
72 }
73 
bch2_btree_node_wait_on_write(struct btree * b)74 void bch2_btree_node_wait_on_write(struct btree *b)
75 {
76 	wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
77 		       TASK_UNINTERRUPTIBLE);
78 }
79 
verify_no_dups(struct btree * b,struct bkey_packed * start,struct bkey_packed * end)80 static void verify_no_dups(struct btree *b,
81 			   struct bkey_packed *start,
82 			   struct bkey_packed *end)
83 {
84 #ifdef CONFIG_BCACHEFS_DEBUG
85 	struct bkey_packed *k, *p;
86 
87 	if (start == end)
88 		return;
89 
90 	for (p = start, k = bkey_p_next(start);
91 	     k != end;
92 	     p = k, k = bkey_p_next(k)) {
93 		struct bkey l = bkey_unpack_key(b, p);
94 		struct bkey r = bkey_unpack_key(b, k);
95 
96 		BUG_ON(bpos_ge(l.p, bkey_start_pos(&r)));
97 	}
98 #endif
99 }
100 
set_needs_whiteout(struct bset * i,int v)101 static void set_needs_whiteout(struct bset *i, int v)
102 {
103 	struct bkey_packed *k;
104 
105 	for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
106 		k->needs_whiteout = v;
107 }
108 
btree_bounce_free(struct bch_fs * c,size_t size,bool used_mempool,void * p)109 static void btree_bounce_free(struct bch_fs *c, size_t size,
110 			      bool used_mempool, void *p)
111 {
112 	if (used_mempool)
113 		mempool_free(p, &c->btree_bounce_pool);
114 	else
115 		kvfree(p);
116 }
117 
btree_bounce_alloc(struct bch_fs * c,size_t size,bool * used_mempool)118 static void *btree_bounce_alloc(struct bch_fs *c, size_t size,
119 				bool *used_mempool)
120 {
121 	unsigned flags = memalloc_nofs_save();
122 	void *p;
123 
124 	BUG_ON(size > c->opts.btree_node_size);
125 
126 	*used_mempool = false;
127 	p = kvmalloc(size, __GFP_NOWARN|GFP_NOWAIT);
128 	if (!p) {
129 		*used_mempool = true;
130 		p = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
131 	}
132 	memalloc_nofs_restore(flags);
133 	return p;
134 }
135 
sort_bkey_ptrs(const struct btree * bt,struct bkey_packed ** ptrs,unsigned nr)136 static void sort_bkey_ptrs(const struct btree *bt,
137 			   struct bkey_packed **ptrs, unsigned nr)
138 {
139 	unsigned n = nr, a = nr / 2, b, c, d;
140 
141 	if (!a)
142 		return;
143 
144 	/* Heap sort: see lib/sort.c: */
145 	while (1) {
146 		if (a)
147 			a--;
148 		else if (--n)
149 			swap(ptrs[0], ptrs[n]);
150 		else
151 			break;
152 
153 		for (b = a; c = 2 * b + 1, (d = c + 1) < n;)
154 			b = bch2_bkey_cmp_packed(bt,
155 					    ptrs[c],
156 					    ptrs[d]) >= 0 ? c : d;
157 		if (d == n)
158 			b = c;
159 
160 		while (b != a &&
161 		       bch2_bkey_cmp_packed(bt,
162 				       ptrs[a],
163 				       ptrs[b]) >= 0)
164 			b = (b - 1) / 2;
165 		c = b;
166 		while (b != a) {
167 			b = (b - 1) / 2;
168 			swap(ptrs[b], ptrs[c]);
169 		}
170 	}
171 }
172 
bch2_sort_whiteouts(struct bch_fs * c,struct btree * b)173 static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b)
174 {
175 	struct bkey_packed *new_whiteouts, **ptrs, **ptrs_end, *k;
176 	bool used_mempool = false;
177 	size_t bytes = b->whiteout_u64s * sizeof(u64);
178 
179 	if (!b->whiteout_u64s)
180 		return;
181 
182 	new_whiteouts = btree_bounce_alloc(c, bytes, &used_mempool);
183 
184 	ptrs = ptrs_end = ((void *) new_whiteouts + bytes);
185 
186 	for (k = unwritten_whiteouts_start(b);
187 	     k != unwritten_whiteouts_end(b);
188 	     k = bkey_p_next(k))
189 		*--ptrs = k;
190 
191 	sort_bkey_ptrs(b, ptrs, ptrs_end - ptrs);
192 
193 	k = new_whiteouts;
194 
195 	while (ptrs != ptrs_end) {
196 		bkey_p_copy(k, *ptrs);
197 		k = bkey_p_next(k);
198 		ptrs++;
199 	}
200 
201 	verify_no_dups(b, new_whiteouts,
202 		       (void *) ((u64 *) new_whiteouts + b->whiteout_u64s));
203 
204 	memcpy_u64s(unwritten_whiteouts_start(b),
205 		    new_whiteouts, b->whiteout_u64s);
206 
207 	btree_bounce_free(c, bytes, used_mempool, new_whiteouts);
208 }
209 
should_compact_bset(struct btree * b,struct bset_tree * t,bool compacting,enum compact_mode mode)210 static bool should_compact_bset(struct btree *b, struct bset_tree *t,
211 				bool compacting, enum compact_mode mode)
212 {
213 	if (!bset_dead_u64s(b, t))
214 		return false;
215 
216 	switch (mode) {
217 	case COMPACT_LAZY:
218 		return should_compact_bset_lazy(b, t) ||
219 			(compacting && !bset_written(b, bset(b, t)));
220 	case COMPACT_ALL:
221 		return true;
222 	default:
223 		BUG();
224 	}
225 }
226 
bch2_drop_whiteouts(struct btree * b,enum compact_mode mode)227 static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode)
228 {
229 	bool ret = false;
230 
231 	for_each_bset(b, t) {
232 		struct bset *i = bset(b, t);
233 		struct bkey_packed *k, *n, *out, *start, *end;
234 		struct btree_node_entry *src = NULL, *dst = NULL;
235 
236 		if (t != b->set && !bset_written(b, i)) {
237 			src = container_of(i, struct btree_node_entry, keys);
238 			dst = max(write_block(b),
239 				  (void *) btree_bkey_last(b, t - 1));
240 		}
241 
242 		if (src != dst)
243 			ret = true;
244 
245 		if (!should_compact_bset(b, t, ret, mode)) {
246 			if (src != dst) {
247 				memmove(dst, src, sizeof(*src) +
248 					le16_to_cpu(src->keys.u64s) *
249 					sizeof(u64));
250 				i = &dst->keys;
251 				set_btree_bset(b, t, i);
252 			}
253 			continue;
254 		}
255 
256 		start	= btree_bkey_first(b, t);
257 		end	= btree_bkey_last(b, t);
258 
259 		if (src != dst) {
260 			memmove(dst, src, sizeof(*src));
261 			i = &dst->keys;
262 			set_btree_bset(b, t, i);
263 		}
264 
265 		out = i->start;
266 
267 		for (k = start; k != end; k = n) {
268 			n = bkey_p_next(k);
269 
270 			if (!bkey_deleted(k)) {
271 				bkey_p_copy(out, k);
272 				out = bkey_p_next(out);
273 			} else {
274 				BUG_ON(k->needs_whiteout);
275 			}
276 		}
277 
278 		i->u64s = cpu_to_le16((u64 *) out - i->_data);
279 		set_btree_bset_end(b, t);
280 		bch2_bset_set_no_aux_tree(b, t);
281 		ret = true;
282 	}
283 
284 	bch2_verify_btree_nr_keys(b);
285 
286 	bch2_btree_build_aux_trees(b);
287 
288 	return ret;
289 }
290 
bch2_compact_whiteouts(struct bch_fs * c,struct btree * b,enum compact_mode mode)291 bool bch2_compact_whiteouts(struct bch_fs *c, struct btree *b,
292 			    enum compact_mode mode)
293 {
294 	return bch2_drop_whiteouts(b, mode);
295 }
296 
btree_node_sort(struct bch_fs * c,struct btree * b,unsigned start_idx,unsigned end_idx)297 static void btree_node_sort(struct bch_fs *c, struct btree *b,
298 			    unsigned start_idx,
299 			    unsigned end_idx)
300 {
301 	struct btree_node *out;
302 	struct sort_iter_stack sort_iter;
303 	struct bset_tree *t;
304 	struct bset *start_bset = bset(b, &b->set[start_idx]);
305 	bool used_mempool = false;
306 	u64 start_time, seq = 0;
307 	unsigned i, u64s = 0, bytes, shift = end_idx - start_idx - 1;
308 	bool sorting_entire_node = start_idx == 0 &&
309 		end_idx == b->nsets;
310 
311 	sort_iter_stack_init(&sort_iter, b);
312 
313 	for (t = b->set + start_idx;
314 	     t < b->set + end_idx;
315 	     t++) {
316 		u64s += le16_to_cpu(bset(b, t)->u64s);
317 		sort_iter_add(&sort_iter.iter,
318 			      btree_bkey_first(b, t),
319 			      btree_bkey_last(b, t));
320 	}
321 
322 	bytes = sorting_entire_node
323 		? btree_buf_bytes(b)
324 		: __vstruct_bytes(struct btree_node, u64s);
325 
326 	out = btree_bounce_alloc(c, bytes, &used_mempool);
327 
328 	start_time = local_clock();
329 
330 	u64s = bch2_sort_keys(out->keys.start, &sort_iter.iter);
331 
332 	out->keys.u64s = cpu_to_le16(u64s);
333 
334 	BUG_ON(vstruct_end(&out->keys) > (void *) out + bytes);
335 
336 	if (sorting_entire_node)
337 		bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
338 				       start_time);
339 
340 	/* Make sure we preserve bset journal_seq: */
341 	for (t = b->set + start_idx; t < b->set + end_idx; t++)
342 		seq = max(seq, le64_to_cpu(bset(b, t)->journal_seq));
343 	start_bset->journal_seq = cpu_to_le64(seq);
344 
345 	if (sorting_entire_node) {
346 		u64s = le16_to_cpu(out->keys.u64s);
347 
348 		BUG_ON(bytes != btree_buf_bytes(b));
349 
350 		/*
351 		 * Our temporary buffer is the same size as the btree node's
352 		 * buffer, we can just swap buffers instead of doing a big
353 		 * memcpy()
354 		 */
355 		*out = *b->data;
356 		out->keys.u64s = cpu_to_le16(u64s);
357 		swap(out, b->data);
358 		set_btree_bset(b, b->set, &b->data->keys);
359 	} else {
360 		start_bset->u64s = out->keys.u64s;
361 		memcpy_u64s(start_bset->start,
362 			    out->keys.start,
363 			    le16_to_cpu(out->keys.u64s));
364 	}
365 
366 	for (i = start_idx + 1; i < end_idx; i++)
367 		b->nr.bset_u64s[start_idx] +=
368 			b->nr.bset_u64s[i];
369 
370 	b->nsets -= shift;
371 
372 	for (i = start_idx + 1; i < b->nsets; i++) {
373 		b->nr.bset_u64s[i]	= b->nr.bset_u64s[i + shift];
374 		b->set[i]		= b->set[i + shift];
375 	}
376 
377 	for (i = b->nsets; i < MAX_BSETS; i++)
378 		b->nr.bset_u64s[i] = 0;
379 
380 	set_btree_bset_end(b, &b->set[start_idx]);
381 	bch2_bset_set_no_aux_tree(b, &b->set[start_idx]);
382 
383 	btree_bounce_free(c, bytes, used_mempool, out);
384 
385 	bch2_verify_btree_nr_keys(b);
386 }
387 
bch2_btree_sort_into(struct bch_fs * c,struct btree * dst,struct btree * src)388 void bch2_btree_sort_into(struct bch_fs *c,
389 			 struct btree *dst,
390 			 struct btree *src)
391 {
392 	struct btree_nr_keys nr;
393 	struct btree_node_iter src_iter;
394 	u64 start_time = local_clock();
395 
396 	BUG_ON(dst->nsets != 1);
397 
398 	bch2_bset_set_no_aux_tree(dst, dst->set);
399 
400 	bch2_btree_node_iter_init_from_start(&src_iter, src);
401 
402 	nr = bch2_sort_repack(btree_bset_first(dst),
403 			src, &src_iter,
404 			&dst->format,
405 			true);
406 
407 	bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
408 			       start_time);
409 
410 	set_btree_bset_end(dst, dst->set);
411 
412 	dst->nr.live_u64s	+= nr.live_u64s;
413 	dst->nr.bset_u64s[0]	+= nr.bset_u64s[0];
414 	dst->nr.packed_keys	+= nr.packed_keys;
415 	dst->nr.unpacked_keys	+= nr.unpacked_keys;
416 
417 	bch2_verify_btree_nr_keys(dst);
418 }
419 
420 /*
421  * We're about to add another bset to the btree node, so if there's currently
422  * too many bsets - sort some of them together:
423  */
btree_node_compact(struct bch_fs * c,struct btree * b)424 static bool btree_node_compact(struct bch_fs *c, struct btree *b)
425 {
426 	unsigned unwritten_idx;
427 	bool ret = false;
428 
429 	for (unwritten_idx = 0;
430 	     unwritten_idx < b->nsets;
431 	     unwritten_idx++)
432 		if (!bset_written(b, bset(b, &b->set[unwritten_idx])))
433 			break;
434 
435 	if (b->nsets - unwritten_idx > 1) {
436 		btree_node_sort(c, b, unwritten_idx, b->nsets);
437 		ret = true;
438 	}
439 
440 	if (unwritten_idx > 1) {
441 		btree_node_sort(c, b, 0, unwritten_idx);
442 		ret = true;
443 	}
444 
445 	return ret;
446 }
447 
bch2_btree_build_aux_trees(struct btree * b)448 void bch2_btree_build_aux_trees(struct btree *b)
449 {
450 	for_each_bset(b, t)
451 		bch2_bset_build_aux_tree(b, t,
452 				!bset_written(b, bset(b, t)) &&
453 				t == bset_tree_last(b));
454 }
455 
456 /*
457  * If we have MAX_BSETS (3) bsets, should we sort them all down to just one?
458  *
459  * The first bset is going to be of similar order to the size of the node, the
460  * last bset is bounded by btree_write_set_buffer(), which is set to keep the
461  * memmove on insert from being too expensive: the middle bset should, ideally,
462  * be the geometric mean of the first and the last.
463  *
464  * Returns true if the middle bset is greater than that geometric mean:
465  */
should_compact_all(struct bch_fs * c,struct btree * b)466 static inline bool should_compact_all(struct bch_fs *c, struct btree *b)
467 {
468 	unsigned mid_u64s_bits =
469 		(ilog2(btree_max_u64s(c)) + BTREE_WRITE_SET_U64s_BITS) / 2;
470 
471 	return bset_u64s(&b->set[1]) > 1U << mid_u64s_bits;
472 }
473 
474 /*
475  * @bch_btree_init_next - initialize a new (unwritten) bset that can then be
476  * inserted into
477  *
478  * Safe to call if there already is an unwritten bset - will only add a new bset
479  * if @b doesn't already have one.
480  *
481  * Returns true if we sorted (i.e. invalidated iterators
482  */
bch2_btree_init_next(struct btree_trans * trans,struct btree * b)483 void bch2_btree_init_next(struct btree_trans *trans, struct btree *b)
484 {
485 	struct bch_fs *c = trans->c;
486 	struct btree_node_entry *bne;
487 	bool reinit_iter = false;
488 
489 	EBUG_ON(!six_lock_counts(&b->c.lock).n[SIX_LOCK_write]);
490 	BUG_ON(bset_written(b, bset(b, &b->set[1])));
491 	BUG_ON(btree_node_just_written(b));
492 
493 	if (b->nsets == MAX_BSETS &&
494 	    !btree_node_write_in_flight(b) &&
495 	    should_compact_all(c, b)) {
496 		bch2_btree_node_write_trans(trans, b, SIX_LOCK_write,
497 					    BTREE_WRITE_init_next_bset);
498 		reinit_iter = true;
499 	}
500 
501 	if (b->nsets == MAX_BSETS &&
502 	    btree_node_compact(c, b))
503 		reinit_iter = true;
504 
505 	BUG_ON(b->nsets >= MAX_BSETS);
506 
507 	bne = want_new_bset(c, b);
508 	if (bne)
509 		bch2_bset_init_next(b, bne);
510 
511 	bch2_btree_build_aux_trees(b);
512 
513 	if (reinit_iter)
514 		bch2_trans_node_reinit_iter(trans, b);
515 }
516 
btree_err_msg(struct printbuf * out,struct bch_fs * c,struct bch_dev * ca,bool print_pos,struct btree * b,struct bset * i,struct bkey_packed * k,unsigned offset,int rw)517 static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
518 			  struct bch_dev *ca,
519 			  bool print_pos,
520 			  struct btree *b, struct bset *i, struct bkey_packed *k,
521 			  unsigned offset, int rw)
522 {
523 	if (print_pos) {
524 		prt_str(out, rw == READ
525 			? "error validating btree node "
526 			: "corrupt btree node before write ");
527 		prt_printf(out, "at btree ");
528 		bch2_btree_pos_to_text(out, c, b);
529 		prt_newline(out);
530 	}
531 
532 	if (ca)
533 		prt_printf(out, "%s ", ca->name);
534 
535 	prt_printf(out, "node offset %u/%u",
536 		   b->written, btree_ptr_sectors_written(bkey_i_to_s_c(&b->key)));
537 	if (i)
538 		prt_printf(out, " bset u64s %u", le16_to_cpu(i->u64s));
539 	if (k)
540 		prt_printf(out, " bset byte offset %lu",
541 			   (unsigned long)(void *)k -
542 			   ((unsigned long)(void *)i & ~511UL));
543 	prt_str(out, ": ");
544 }
545 
546 __printf(11, 12)
__btree_err(int ret,struct bch_fs * c,struct bch_dev * ca,struct btree * b,struct bset * i,struct bkey_packed * k,int rw,enum bch_sb_error_id err_type,struct bch_io_failures * failed,struct printbuf * err_msg,const char * fmt,...)547 static int __btree_err(int ret,
548 		       struct bch_fs *c,
549 		       struct bch_dev *ca,
550 		       struct btree *b,
551 		       struct bset *i,
552 		       struct bkey_packed *k,
553 		       int rw,
554 		       enum bch_sb_error_id err_type,
555 		       struct bch_io_failures *failed,
556 		       struct printbuf *err_msg,
557 		       const char *fmt, ...)
558 {
559 	if (c->recovery.curr_pass == BCH_RECOVERY_PASS_scan_for_btree_nodes)
560 		return ret == -BCH_ERR_btree_node_read_err_fixable
561 			? bch_err_throw(c, fsck_fix)
562 			: ret;
563 
564 	bool have_retry = false;
565 	int ret2;
566 
567 	if (ca) {
568 		bch2_mark_btree_validate_failure(failed, ca->dev_idx);
569 
570 		struct extent_ptr_decoded pick;
571 		have_retry = !bch2_bkey_pick_read_device(c,
572 					bkey_i_to_s_c(&b->key),
573 					failed, &pick, -1);
574 	}
575 
576 	if (!have_retry && ret == -BCH_ERR_btree_node_read_err_want_retry)
577 		ret = bch_err_throw(c, btree_node_read_err_fixable);
578 	if (!have_retry && ret == -BCH_ERR_btree_node_read_err_must_retry)
579 		ret = bch_err_throw(c, btree_node_read_err_bad_node);
580 
581 	bch2_sb_error_count(c, err_type);
582 
583 	bool print_deferred = err_msg &&
584 		rw == READ &&
585 		!(test_bit(BCH_FS_in_fsck, &c->flags) &&
586 		  c->opts.fix_errors == FSCK_FIX_ask);
587 
588 	struct printbuf out = PRINTBUF;
589 	bch2_log_msg_start(c, &out);
590 
591 	if (!print_deferred)
592 		err_msg = &out;
593 
594 	btree_err_msg(err_msg, c, ca, !print_deferred, b, i, k, b->written, rw);
595 
596 	va_list args;
597 	va_start(args, fmt);
598 	prt_vprintf(err_msg, fmt, args);
599 	va_end(args);
600 
601 	if (print_deferred) {
602 		prt_newline(err_msg);
603 
604 		switch (ret) {
605 		case -BCH_ERR_btree_node_read_err_fixable:
606 			ret2 = bch2_fsck_err_opt(c, FSCK_CAN_FIX, err_type);
607 			if (!bch2_err_matches(ret2, BCH_ERR_fsck_fix) &&
608 			    !bch2_err_matches(ret2, BCH_ERR_fsck_ignore)) {
609 				ret = ret2;
610 				goto fsck_err;
611 			}
612 
613 			if (!have_retry)
614 				ret = bch_err_throw(c, fsck_fix);
615 			goto out;
616 		case -BCH_ERR_btree_node_read_err_bad_node:
617 			prt_str(&out, ", ");
618 			ret = __bch2_topology_error(c, &out);
619 			break;
620 		}
621 
622 		goto out;
623 	}
624 
625 	if (rw == WRITE) {
626 		prt_str(&out, ", ");
627 		ret = __bch2_inconsistent_error(c, &out)
628 			? -BCH_ERR_fsck_errors_not_fixed
629 			: 0;
630 		goto print;
631 	}
632 
633 	switch (ret) {
634 	case -BCH_ERR_btree_node_read_err_fixable:
635 		ret2 = __bch2_fsck_err(c, NULL, FSCK_CAN_FIX, err_type, "%s", out.buf);
636 		if (!bch2_err_matches(ret2, BCH_ERR_fsck_fix) &&
637 		    !bch2_err_matches(ret2, BCH_ERR_fsck_ignore)) {
638 			ret = ret2;
639 			goto fsck_err;
640 		}
641 
642 		if (!have_retry)
643 			ret = bch_err_throw(c, fsck_fix);
644 		goto out;
645 	case -BCH_ERR_btree_node_read_err_bad_node:
646 		prt_str(&out, ", ");
647 		ret = __bch2_topology_error(c, &out);
648 		break;
649 	}
650 print:
651 	bch2_print_str(c, KERN_ERR, out.buf);
652 out:
653 fsck_err:
654 	printbuf_exit(&out);
655 	return ret;
656 }
657 
658 #define btree_err(type, c, ca, b, i, k, _err_type, msg, ...)		\
659 ({									\
660 	int _ret = __btree_err(type, c, ca, b, i, k, write,		\
661 			       BCH_FSCK_ERR_##_err_type,		\
662 			       failed, err_msg,				\
663 			       msg, ##__VA_ARGS__);			\
664 									\
665 	if (!bch2_err_matches(_ret, BCH_ERR_fsck_fix)) {		\
666 		ret = _ret;						\
667 		goto fsck_err;						\
668 	}								\
669 									\
670 	true;								\
671 })
672 
673 #define btree_err_on(cond, ...)	((cond) ? btree_err(__VA_ARGS__) : false)
674 
675 /*
676  * When btree topology repair changes the start or end of a node, that might
677  * mean we have to drop keys that are no longer inside the node:
678  */
679 __cold
bch2_btree_node_drop_keys_outside_node(struct btree * b)680 void bch2_btree_node_drop_keys_outside_node(struct btree *b)
681 {
682 	for_each_bset(b, t) {
683 		struct bset *i = bset(b, t);
684 		struct bkey_packed *k;
685 
686 		for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
687 			if (bkey_cmp_left_packed(b, k, &b->data->min_key) >= 0)
688 				break;
689 
690 		if (k != i->start) {
691 			unsigned shift = (u64 *) k - (u64 *) i->start;
692 
693 			memmove_u64s_down(i->start, k,
694 					  (u64 *) vstruct_end(i) - (u64 *) k);
695 			i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - shift);
696 			set_btree_bset_end(b, t);
697 		}
698 
699 		for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
700 			if (bkey_cmp_left_packed(b, k, &b->data->max_key) > 0)
701 				break;
702 
703 		if (k != vstruct_last(i)) {
704 			i->u64s = cpu_to_le16((u64 *) k - (u64 *) i->start);
705 			set_btree_bset_end(b, t);
706 		}
707 	}
708 
709 	/*
710 	 * Always rebuild search trees: eytzinger search tree nodes directly
711 	 * depend on the values of min/max key:
712 	 */
713 	bch2_bset_set_no_aux_tree(b, b->set);
714 	bch2_btree_build_aux_trees(b);
715 	b->nr = bch2_btree_node_count_keys(b);
716 
717 	struct bkey_s_c k;
718 	struct bkey unpacked;
719 	struct btree_node_iter iter;
720 	for_each_btree_node_key_unpack(b, k, &iter, &unpacked) {
721 		BUG_ON(bpos_lt(k.k->p, b->data->min_key));
722 		BUG_ON(bpos_gt(k.k->p, b->data->max_key));
723 	}
724 }
725 
validate_bset(struct bch_fs * c,struct bch_dev * ca,struct btree * b,struct bset * i,unsigned offset,int write,struct bch_io_failures * failed,struct printbuf * err_msg)726 static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
727 			 struct btree *b, struct bset *i,
728 			 unsigned offset, int write,
729 			 struct bch_io_failures *failed,
730 			 struct printbuf *err_msg)
731 {
732 	unsigned version = le16_to_cpu(i->version);
733 	struct printbuf buf1 = PRINTBUF;
734 	struct printbuf buf2 = PRINTBUF;
735 	int ret = 0;
736 
737 	btree_err_on(!bch2_version_compatible(version),
738 		     -BCH_ERR_btree_node_read_err_incompatible,
739 		     c, ca, b, i, NULL,
740 		     btree_node_unsupported_version,
741 		     "unsupported bset version %u.%u",
742 		     BCH_VERSION_MAJOR(version),
743 		     BCH_VERSION_MINOR(version));
744 
745 	if (c->recovery.curr_pass != BCH_RECOVERY_PASS_scan_for_btree_nodes &&
746 	    btree_err_on(version < c->sb.version_min,
747 			 -BCH_ERR_btree_node_read_err_fixable,
748 			 c, NULL, b, i, NULL,
749 			 btree_node_bset_older_than_sb_min,
750 			 "bset version %u older than superblock version_min %u",
751 			 version, c->sb.version_min)) {
752 		if (bch2_version_compatible(version)) {
753 			mutex_lock(&c->sb_lock);
754 			c->disk_sb.sb->version_min = cpu_to_le16(version);
755 			bch2_write_super(c);
756 			mutex_unlock(&c->sb_lock);
757 		} else {
758 			/* We have no idea what's going on: */
759 			i->version = cpu_to_le16(c->sb.version);
760 		}
761 	}
762 
763 	if (btree_err_on(BCH_VERSION_MAJOR(version) >
764 			 BCH_VERSION_MAJOR(c->sb.version),
765 			 -BCH_ERR_btree_node_read_err_fixable,
766 			 c, NULL, b, i, NULL,
767 			 btree_node_bset_newer_than_sb,
768 			 "bset version %u newer than superblock version %u",
769 			 version, c->sb.version)) {
770 		mutex_lock(&c->sb_lock);
771 		c->disk_sb.sb->version = cpu_to_le16(version);
772 		bch2_write_super(c);
773 		mutex_unlock(&c->sb_lock);
774 	}
775 
776 	btree_err_on(BSET_SEPARATE_WHITEOUTS(i),
777 		     -BCH_ERR_btree_node_read_err_incompatible,
778 		     c, ca, b, i, NULL,
779 		     btree_node_unsupported_version,
780 		     "BSET_SEPARATE_WHITEOUTS no longer supported");
781 
782 	btree_err_on(offset && !i->u64s,
783 		     -BCH_ERR_btree_node_read_err_fixable,
784 		     c, ca, b, i, NULL,
785 		     bset_empty,
786 		     "empty bset");
787 
788 	btree_err_on(BSET_OFFSET(i) && BSET_OFFSET(i) != offset,
789 		     -BCH_ERR_btree_node_read_err_want_retry,
790 		     c, ca, b, i, NULL,
791 		     bset_wrong_sector_offset,
792 		     "bset at wrong sector offset");
793 
794 	if (!offset) {
795 		struct btree_node *bn =
796 			container_of(i, struct btree_node, keys);
797 		/* These indicate that we read the wrong btree node: */
798 
799 		if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
800 			struct bch_btree_ptr_v2 *bp =
801 				&bkey_i_to_btree_ptr_v2(&b->key)->v;
802 
803 			/* XXX endianness */
804 			btree_err_on(bp->seq != bn->keys.seq,
805 				     -BCH_ERR_btree_node_read_err_must_retry,
806 				     c, ca, b, NULL, NULL,
807 				     bset_bad_seq,
808 				     "incorrect sequence number (wrong btree node)");
809 		}
810 
811 		btree_err_on(BTREE_NODE_ID(bn) != b->c.btree_id,
812 			     -BCH_ERR_btree_node_read_err_must_retry,
813 			     c, ca, b, i, NULL,
814 			     btree_node_bad_btree,
815 			     "incorrect btree id");
816 
817 		btree_err_on(BTREE_NODE_LEVEL(bn) != b->c.level,
818 			     -BCH_ERR_btree_node_read_err_must_retry,
819 			     c, ca, b, i, NULL,
820 			     btree_node_bad_level,
821 			     "incorrect level");
822 
823 		if (!write)
824 			compat_btree_node(b->c.level, b->c.btree_id, version,
825 					  BSET_BIG_ENDIAN(i), write, bn);
826 
827 		if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
828 			struct bch_btree_ptr_v2 *bp =
829 				&bkey_i_to_btree_ptr_v2(&b->key)->v;
830 
831 			if (BTREE_PTR_RANGE_UPDATED(bp)) {
832 				b->data->min_key = bp->min_key;
833 				b->data->max_key = b->key.k.p;
834 			}
835 
836 			btree_err_on(!bpos_eq(b->data->min_key, bp->min_key),
837 				     -BCH_ERR_btree_node_read_err_must_retry,
838 				     c, ca, b, NULL, NULL,
839 				     btree_node_bad_min_key,
840 				     "incorrect min_key: got %s should be %s",
841 				     (printbuf_reset(&buf1),
842 				      bch2_bpos_to_text(&buf1, bn->min_key), buf1.buf),
843 				     (printbuf_reset(&buf2),
844 				      bch2_bpos_to_text(&buf2, bp->min_key), buf2.buf));
845 		}
846 
847 		btree_err_on(!bpos_eq(bn->max_key, b->key.k.p),
848 			     -BCH_ERR_btree_node_read_err_must_retry,
849 			     c, ca, b, i, NULL,
850 			     btree_node_bad_max_key,
851 			     "incorrect max key %s",
852 			     (printbuf_reset(&buf1),
853 			      bch2_bpos_to_text(&buf1, bn->max_key), buf1.buf));
854 
855 		if (write)
856 			compat_btree_node(b->c.level, b->c.btree_id, version,
857 					  BSET_BIG_ENDIAN(i), write, bn);
858 
859 		btree_err_on(bch2_bkey_format_invalid(c, &bn->format, write, &buf1),
860 			     -BCH_ERR_btree_node_read_err_bad_node,
861 			     c, ca, b, i, NULL,
862 			     btree_node_bad_format,
863 			     "invalid bkey format: %s\n%s", buf1.buf,
864 			     (printbuf_reset(&buf2),
865 			      bch2_bkey_format_to_text(&buf2, &bn->format), buf2.buf));
866 		printbuf_reset(&buf1);
867 
868 		compat_bformat(b->c.level, b->c.btree_id, version,
869 			       BSET_BIG_ENDIAN(i), write,
870 			       &bn->format);
871 	}
872 fsck_err:
873 	printbuf_exit(&buf2);
874 	printbuf_exit(&buf1);
875 	return ret;
876 }
877 
btree_node_bkey_val_validate(struct bch_fs * c,struct btree * b,struct bkey_s_c k,enum bch_validate_flags flags)878 static int btree_node_bkey_val_validate(struct bch_fs *c, struct btree *b,
879 					struct bkey_s_c k,
880 					enum bch_validate_flags flags)
881 {
882 	return bch2_bkey_val_validate(c, k, (struct bkey_validate_context) {
883 		.from	= BKEY_VALIDATE_btree_node,
884 		.level	= b->c.level,
885 		.btree	= b->c.btree_id,
886 		.flags	= flags
887 	});
888 }
889 
bset_key_validate(struct bch_fs * c,struct btree * b,struct bkey_s_c k,bool updated_range,enum bch_validate_flags flags)890 static int bset_key_validate(struct bch_fs *c, struct btree *b,
891 			     struct bkey_s_c k,
892 			     bool updated_range,
893 			     enum bch_validate_flags flags)
894 {
895 	struct bkey_validate_context from = (struct bkey_validate_context) {
896 		.from	= BKEY_VALIDATE_btree_node,
897 		.level	= b->c.level,
898 		.btree	= b->c.btree_id,
899 		.flags	= flags,
900 	};
901 	return __bch2_bkey_validate(c, k, from) ?:
902 		(!updated_range ? bch2_bkey_in_btree_node(c, b, k, from) : 0) ?:
903 		(flags & BCH_VALIDATE_write ? btree_node_bkey_val_validate(c, b, k, flags) : 0);
904 }
905 
bkey_packed_valid(struct bch_fs * c,struct btree * b,struct bset * i,struct bkey_packed * k)906 static bool bkey_packed_valid(struct bch_fs *c, struct btree *b,
907 			 struct bset *i, struct bkey_packed *k)
908 {
909 	if (bkey_p_next(k) > vstruct_last(i))
910 		return false;
911 
912 	if (k->format > KEY_FORMAT_CURRENT)
913 		return false;
914 
915 	if (!bkeyp_u64s_valid(&b->format, k))
916 		return false;
917 
918 	struct bkey tmp;
919 	struct bkey_s u = __bkey_disassemble(b, k, &tmp);
920 	return !__bch2_bkey_validate(c, u.s_c,
921 				     (struct bkey_validate_context) {
922 					.from	= BKEY_VALIDATE_btree_node,
923 					.level	= b->c.level,
924 					.btree	= b->c.btree_id,
925 					.flags	= BCH_VALIDATE_silent
926 				     });
927 }
928 
btree_node_read_bkey_cmp(const struct btree * b,const struct bkey_packed * l,const struct bkey_packed * r)929 static inline int btree_node_read_bkey_cmp(const struct btree *b,
930 				const struct bkey_packed *l,
931 				const struct bkey_packed *r)
932 {
933 	return bch2_bkey_cmp_packed(b, l, r)
934 		?: (int) bkey_deleted(r) - (int) bkey_deleted(l);
935 }
936 
validate_bset_keys(struct bch_fs * c,struct btree * b,struct bset * i,int write,struct bch_io_failures * failed,struct printbuf * err_msg)937 static int validate_bset_keys(struct bch_fs *c, struct btree *b,
938 			 struct bset *i, int write,
939 			 struct bch_io_failures *failed,
940 			 struct printbuf *err_msg)
941 {
942 	unsigned version = le16_to_cpu(i->version);
943 	struct bkey_packed *k, *prev = NULL;
944 	struct printbuf buf = PRINTBUF;
945 	bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
946 		BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
947 	int ret = 0;
948 
949 	for (k = i->start;
950 	     k != vstruct_last(i);) {
951 		struct bkey_s u;
952 		struct bkey tmp;
953 		unsigned next_good_key;
954 
955 		if (btree_err_on(bkey_p_next(k) > vstruct_last(i),
956 				 -BCH_ERR_btree_node_read_err_fixable,
957 				 c, NULL, b, i, k,
958 				 btree_node_bkey_past_bset_end,
959 				 "key extends past end of bset")) {
960 			i->u64s = cpu_to_le16((u64 *) k - i->_data);
961 			break;
962 		}
963 
964 		if (btree_err_on(k->format > KEY_FORMAT_CURRENT,
965 				 -BCH_ERR_btree_node_read_err_fixable,
966 				 c, NULL, b, i, k,
967 				 btree_node_bkey_bad_format,
968 				 "invalid bkey format %u", k->format))
969 			goto drop_this_key;
970 
971 		if (btree_err_on(!bkeyp_u64s_valid(&b->format, k),
972 				 -BCH_ERR_btree_node_read_err_fixable,
973 				 c, NULL, b, i, k,
974 				 btree_node_bkey_bad_u64s,
975 				 "bad k->u64s %u (min %u max %zu)", k->u64s,
976 				 bkeyp_key_u64s(&b->format, k),
977 				 U8_MAX - BKEY_U64s + bkeyp_key_u64s(&b->format, k)))
978 			goto drop_this_key;
979 
980 		if (!write)
981 			bch2_bkey_compat(b->c.level, b->c.btree_id, version,
982 				    BSET_BIG_ENDIAN(i), write,
983 				    &b->format, k);
984 
985 		u = __bkey_disassemble(b, k, &tmp);
986 
987 		ret = bset_key_validate(c, b, u.s_c, updated_range, write);
988 		if (ret == -BCH_ERR_fsck_delete_bkey)
989 			goto drop_this_key;
990 		if (ret)
991 			goto fsck_err;
992 
993 		if (write)
994 			bch2_bkey_compat(b->c.level, b->c.btree_id, version,
995 				    BSET_BIG_ENDIAN(i), write,
996 				    &b->format, k);
997 
998 		if (prev && btree_node_read_bkey_cmp(b, prev, k) >= 0) {
999 			struct bkey up = bkey_unpack_key(b, prev);
1000 
1001 			printbuf_reset(&buf);
1002 			prt_printf(&buf, "keys out of order: ");
1003 			bch2_bkey_to_text(&buf, &up);
1004 			prt_printf(&buf, " > ");
1005 			bch2_bkey_to_text(&buf, u.k);
1006 
1007 			if (btree_err(-BCH_ERR_btree_node_read_err_fixable,
1008 				      c, NULL, b, i, k,
1009 				      btree_node_bkey_out_of_order,
1010 				      "%s", buf.buf))
1011 				goto drop_this_key;
1012 		}
1013 
1014 		prev = k;
1015 		k = bkey_p_next(k);
1016 		continue;
1017 drop_this_key:
1018 		next_good_key = k->u64s;
1019 
1020 		if (!next_good_key ||
1021 		    (BSET_BIG_ENDIAN(i) == CPU_BIG_ENDIAN &&
1022 		     version >= bcachefs_metadata_version_snapshot)) {
1023 			/*
1024 			 * only do scanning if bch2_bkey_compat() has nothing to
1025 			 * do
1026 			 */
1027 
1028 			if (!bkey_packed_valid(c, b, i, (void *) ((u64 *) k + next_good_key))) {
1029 				for (next_good_key = 1;
1030 				     next_good_key < (u64 *) vstruct_last(i) - (u64 *) k;
1031 				     next_good_key++)
1032 					if (bkey_packed_valid(c, b, i, (void *) ((u64 *) k + next_good_key)))
1033 						goto got_good_key;
1034 			}
1035 
1036 			/*
1037 			 * didn't find a good key, have to truncate the rest of
1038 			 * the bset
1039 			 */
1040 			next_good_key = (u64 *) vstruct_last(i) - (u64 *) k;
1041 		}
1042 got_good_key:
1043 		le16_add_cpu(&i->u64s, -next_good_key);
1044 		memmove_u64s_down(k, (u64 *) k + next_good_key, (u64 *) vstruct_end(i) - (u64 *) k);
1045 		set_btree_node_need_rewrite(b);
1046 		set_btree_node_need_rewrite_error(b);
1047 	}
1048 fsck_err:
1049 	printbuf_exit(&buf);
1050 	return ret;
1051 }
1052 
bch2_btree_node_read_done(struct bch_fs * c,struct bch_dev * ca,struct btree * b,struct bch_io_failures * failed,struct printbuf * err_msg)1053 int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
1054 			      struct btree *b,
1055 			      struct bch_io_failures *failed,
1056 			      struct printbuf *err_msg)
1057 {
1058 	struct btree_node_entry *bne;
1059 	struct sort_iter *iter;
1060 	struct btree_node *sorted;
1061 	struct bkey_packed *k;
1062 	struct bset *i;
1063 	bool used_mempool, blacklisted;
1064 	bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
1065 		BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
1066 	unsigned ptr_written = btree_ptr_sectors_written(bkey_i_to_s_c(&b->key));
1067 	u64 max_journal_seq = 0;
1068 	struct printbuf buf = PRINTBUF;
1069 	int ret = 0, write = READ;
1070 	u64 start_time = local_clock();
1071 
1072 	b->version_ondisk = U16_MAX;
1073 	/* We might get called multiple times on read retry: */
1074 	b->written = 0;
1075 
1076 	iter = mempool_alloc(&c->fill_iter, GFP_NOFS);
1077 	sort_iter_init(iter, b, (btree_blocks(c) + 1) * 2);
1078 
1079 	if (bch2_meta_read_fault("btree"))
1080 		btree_err(-BCH_ERR_btree_node_read_err_must_retry,
1081 			  c, ca, b, NULL, NULL,
1082 			  btree_node_fault_injected,
1083 			  "dynamic fault");
1084 
1085 	btree_err_on(le64_to_cpu(b->data->magic) != bset_magic(c),
1086 		     -BCH_ERR_btree_node_read_err_must_retry,
1087 		     c, ca, b, NULL, NULL,
1088 		     btree_node_bad_magic,
1089 		     "bad magic: want %llx, got %llx",
1090 		     bset_magic(c), le64_to_cpu(b->data->magic));
1091 
1092 	if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
1093 		struct bch_btree_ptr_v2 *bp =
1094 			&bkey_i_to_btree_ptr_v2(&b->key)->v;
1095 
1096 		bch2_bpos_to_text(&buf, b->data->min_key);
1097 		prt_str(&buf, "-");
1098 		bch2_bpos_to_text(&buf, b->data->max_key);
1099 
1100 		btree_err_on(b->data->keys.seq != bp->seq,
1101 			     -BCH_ERR_btree_node_read_err_must_retry,
1102 			     c, ca, b, NULL, NULL,
1103 			     btree_node_bad_seq,
1104 			     "got wrong btree node: got\n%s",
1105 			     (printbuf_reset(&buf),
1106 			      bch2_btree_node_header_to_text(&buf, b->data),
1107 			      buf.buf));
1108 	} else {
1109 		btree_err_on(!b->data->keys.seq,
1110 			     -BCH_ERR_btree_node_read_err_must_retry,
1111 			     c, ca, b, NULL, NULL,
1112 			     btree_node_bad_seq,
1113 			     "bad btree header: seq 0\n%s",
1114 			     (printbuf_reset(&buf),
1115 			      bch2_btree_node_header_to_text(&buf, b->data),
1116 			      buf.buf));
1117 	}
1118 
1119 	while (b->written < (ptr_written ?: btree_sectors(c))) {
1120 		unsigned sectors;
1121 		bool first = !b->written;
1122 
1123 		if (first) {
1124 			bne = NULL;
1125 			i = &b->data->keys;
1126 		} else {
1127 			bne = write_block(b);
1128 			i = &bne->keys;
1129 
1130 			if (i->seq != b->data->keys.seq)
1131 				break;
1132 		}
1133 
1134 		struct nonce nonce = btree_nonce(i, b->written << 9);
1135 		bool good_csum_type = bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i));
1136 
1137 		btree_err_on(!good_csum_type,
1138 			     bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i))
1139 			     ? -BCH_ERR_btree_node_read_err_must_retry
1140 			     : -BCH_ERR_btree_node_read_err_want_retry,
1141 			     c, ca, b, i, NULL,
1142 			     bset_unknown_csum,
1143 			     "unknown checksum type %llu", BSET_CSUM_TYPE(i));
1144 
1145 		if (first) {
1146 			sectors = vstruct_sectors(b->data, c->block_bits);
1147 			if (btree_err_on(b->written + sectors > (ptr_written ?: btree_sectors(c)),
1148 					 -BCH_ERR_btree_node_read_err_fixable,
1149 					 c, ca, b, i, NULL,
1150 					 bset_past_end_of_btree_node,
1151 					 "bset past end of btree node (offset %u len %u but written %zu)",
1152 					 b->written, sectors, ptr_written ?: btree_sectors(c)))
1153 				i->u64s = 0;
1154 			if (good_csum_type) {
1155 				struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data);
1156 				bool csum_bad = bch2_crc_cmp(b->data->csum, csum);
1157 				if (csum_bad)
1158 					bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
1159 
1160 				btree_err_on(csum_bad,
1161 					     -BCH_ERR_btree_node_read_err_want_retry,
1162 					     c, ca, b, i, NULL,
1163 					     bset_bad_csum,
1164 					     "%s",
1165 					     (printbuf_reset(&buf),
1166 					      bch2_csum_err_msg(&buf, BSET_CSUM_TYPE(i), b->data->csum, csum),
1167 					      buf.buf));
1168 
1169 				ret = bset_encrypt(c, i, b->written << 9);
1170 				if (bch2_fs_fatal_err_on(ret, c,
1171 							 "decrypting btree node: %s", bch2_err_str(ret)))
1172 					goto fsck_err;
1173 			}
1174 
1175 			btree_err_on(btree_node_type_is_extents(btree_node_type(b)) &&
1176 				     !BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data),
1177 				     -BCH_ERR_btree_node_read_err_incompatible,
1178 				     c, NULL, b, NULL, NULL,
1179 				     btree_node_unsupported_version,
1180 				     "btree node does not have NEW_EXTENT_OVERWRITE set");
1181 		} else {
1182 			sectors = vstruct_sectors(bne, c->block_bits);
1183 			if (btree_err_on(b->written + sectors > (ptr_written ?: btree_sectors(c)),
1184 					 -BCH_ERR_btree_node_read_err_fixable,
1185 					 c, ca, b, i, NULL,
1186 					 bset_past_end_of_btree_node,
1187 					 "bset past end of btree node (offset %u len %u but written %zu)",
1188 					 b->written, sectors, ptr_written ?: btree_sectors(c)))
1189 				i->u64s = 0;
1190 			if (good_csum_type) {
1191 				struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
1192 				bool csum_bad = bch2_crc_cmp(bne->csum, csum);
1193 				if (ca && csum_bad)
1194 					bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
1195 
1196 				btree_err_on(csum_bad,
1197 					     -BCH_ERR_btree_node_read_err_want_retry,
1198 					     c, ca, b, i, NULL,
1199 					     bset_bad_csum,
1200 					     "%s",
1201 					     (printbuf_reset(&buf),
1202 					      bch2_csum_err_msg(&buf, BSET_CSUM_TYPE(i), bne->csum, csum),
1203 					      buf.buf));
1204 
1205 				ret = bset_encrypt(c, i, b->written << 9);
1206 				if (bch2_fs_fatal_err_on(ret, c,
1207 						"decrypting btree node: %s", bch2_err_str(ret)))
1208 					goto fsck_err;
1209 			}
1210 		}
1211 
1212 		b->version_ondisk = min(b->version_ondisk,
1213 					le16_to_cpu(i->version));
1214 
1215 		ret = validate_bset(c, ca, b, i, b->written, READ, failed, err_msg);
1216 		if (ret)
1217 			goto fsck_err;
1218 
1219 		if (!b->written)
1220 			btree_node_set_format(b, b->data->format);
1221 
1222 		ret = validate_bset_keys(c, b, i, READ, failed, err_msg);
1223 		if (ret)
1224 			goto fsck_err;
1225 
1226 		SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
1227 
1228 		blacklisted = bch2_journal_seq_is_blacklisted(c,
1229 					le64_to_cpu(i->journal_seq),
1230 					true);
1231 
1232 		btree_err_on(blacklisted && first,
1233 			     -BCH_ERR_btree_node_read_err_fixable,
1234 			     c, ca, b, i, NULL,
1235 			     bset_blacklisted_journal_seq,
1236 			     "first btree node bset has blacklisted journal seq (%llu)",
1237 			     le64_to_cpu(i->journal_seq));
1238 
1239 		btree_err_on(blacklisted && ptr_written,
1240 			     -BCH_ERR_btree_node_read_err_fixable,
1241 			     c, ca, b, i, NULL,
1242 			     first_bset_blacklisted_journal_seq,
1243 			     "found blacklisted bset (journal seq %llu) in btree node at offset %u-%u/%u",
1244 			     le64_to_cpu(i->journal_seq),
1245 			     b->written, b->written + sectors, ptr_written);
1246 
1247 		b->written = min(b->written + sectors, btree_sectors(c));
1248 
1249 		if (blacklisted && !first)
1250 			continue;
1251 
1252 		sort_iter_add(iter,
1253 			      vstruct_idx(i, 0),
1254 			      vstruct_last(i));
1255 
1256 		max_journal_seq = max(max_journal_seq, le64_to_cpu(i->journal_seq));
1257 	}
1258 
1259 	if (ptr_written) {
1260 		btree_err_on(b->written < ptr_written,
1261 			     -BCH_ERR_btree_node_read_err_want_retry,
1262 			     c, ca, b, NULL, NULL,
1263 			     btree_node_data_missing,
1264 			     "btree node data missing: expected %u sectors, found %u",
1265 			     ptr_written, b->written);
1266 	} else {
1267 		for (bne = write_block(b);
1268 		     bset_byte_offset(b, bne) < btree_buf_bytes(b);
1269 		     bne = (void *) bne + block_bytes(c))
1270 			btree_err_on(bne->keys.seq == b->data->keys.seq &&
1271 				     !bch2_journal_seq_is_blacklisted(c,
1272 								      le64_to_cpu(bne->keys.journal_seq),
1273 								      true),
1274 				     -BCH_ERR_btree_node_read_err_want_retry,
1275 				     c, ca, b, NULL, NULL,
1276 				     btree_node_bset_after_end,
1277 				     "found bset signature after last bset");
1278 	}
1279 
1280 	sorted = btree_bounce_alloc(c, btree_buf_bytes(b), &used_mempool);
1281 	sorted->keys.u64s = 0;
1282 
1283 	b->nr = bch2_key_sort_fix_overlapping(c, &sorted->keys, iter);
1284 	memset((uint8_t *)(sorted + 1) + b->nr.live_u64s * sizeof(u64), 0,
1285 			btree_buf_bytes(b) -
1286 			sizeof(struct btree_node) -
1287 			b->nr.live_u64s * sizeof(u64));
1288 
1289 	b->data->keys.u64s = sorted->keys.u64s;
1290 	*sorted = *b->data;
1291 	swap(sorted, b->data);
1292 	set_btree_bset(b, b->set, &b->data->keys);
1293 	b->nsets = 1;
1294 	b->data->keys.journal_seq = cpu_to_le64(max_journal_seq);
1295 
1296 	BUG_ON(b->nr.live_u64s != le16_to_cpu(b->data->keys.u64s));
1297 
1298 	btree_bounce_free(c, btree_buf_bytes(b), used_mempool, sorted);
1299 
1300 	if (updated_range)
1301 		bch2_btree_node_drop_keys_outside_node(b);
1302 
1303 	i = &b->data->keys;
1304 	for (k = i->start; k != vstruct_last(i);) {
1305 		struct bkey tmp;
1306 		struct bkey_s u = __bkey_disassemble(b, k, &tmp);
1307 
1308 		ret = btree_node_bkey_val_validate(c, b, u.s_c, READ);
1309 		if (ret == -BCH_ERR_fsck_delete_bkey ||
1310 		    (static_branch_unlikely(&bch2_inject_invalid_keys) &&
1311 		     !bversion_cmp(u.k->bversion, MAX_VERSION))) {
1312 			btree_keys_account_key_drop(&b->nr, 0, k);
1313 
1314 			i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
1315 			memmove_u64s_down(k, bkey_p_next(k),
1316 					  (u64 *) vstruct_end(i) - (u64 *) k);
1317 			set_btree_bset_end(b, b->set);
1318 			set_btree_node_need_rewrite(b);
1319 			set_btree_node_need_rewrite_error(b);
1320 			continue;
1321 		}
1322 		if (ret)
1323 			goto fsck_err;
1324 
1325 		if (u.k->type == KEY_TYPE_btree_ptr_v2) {
1326 			struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(u);
1327 
1328 			bp.v->mem_ptr = 0;
1329 		}
1330 
1331 		k = bkey_p_next(k);
1332 	}
1333 
1334 	bch2_bset_build_aux_tree(b, b->set, false);
1335 
1336 	set_needs_whiteout(btree_bset_first(b), true);
1337 
1338 	btree_node_reset_sib_u64s(b);
1339 
1340 	/*
1341 	 * XXX:
1342 	 *
1343 	 * We deadlock if too many btree updates require node rewrites while
1344 	 * we're still in journal replay.
1345 	 *
1346 	 * This is because btree node rewrites generate more updates for the
1347 	 * interior updates (alloc, backpointers), and if those updates touch
1348 	 * new nodes and generate more rewrites - well, you see the problem.
1349 	 *
1350 	 * The biggest cause is that we don't use the btree write buffer (for
1351 	 * the backpointer updates - this needs some real thought on locking in
1352 	 * order to fix.
1353 	 *
1354 	 * The problem with this workaround (not doing the rewrite for degraded
1355 	 * nodes in journal replay) is that those degraded nodes persist, and we
1356 	 * don't want that (this is a real bug when a btree node write completes
1357 	 * with fewer replicas than we wanted and leaves a degraded node due to
1358 	 * device _removal_, i.e. the device went away mid write).
1359 	 *
1360 	 * It's less of a bug here, but still a problem because we don't yet
1361 	 * have a way of tracking degraded data - we another index (all
1362 	 * extents/btree nodes, by replicas entry) in order to fix properly
1363 	 * (re-replicate degraded data at the earliest possible time).
1364 	 */
1365 	if (c->recovery.passes_complete & BIT_ULL(BCH_RECOVERY_PASS_journal_replay)) {
1366 		scoped_guard(rcu)
1367 			bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&b->key)), ptr) {
1368 				struct bch_dev *ca2 = bch2_dev_rcu(c, ptr->dev);
1369 
1370 				if (!ca2 || ca2->mi.state != BCH_MEMBER_STATE_rw) {
1371 					set_btree_node_need_rewrite(b);
1372 					set_btree_node_need_rewrite_degraded(b);
1373 				}
1374 			}
1375 	}
1376 
1377 	if (!ptr_written) {
1378 		set_btree_node_need_rewrite(b);
1379 		set_btree_node_need_rewrite_ptr_written_zero(b);
1380 	}
1381 fsck_err:
1382 	mempool_free(iter, &c->fill_iter);
1383 	printbuf_exit(&buf);
1384 	bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read_done], start_time);
1385 	return ret;
1386 }
1387 
btree_node_read_work(struct work_struct * work)1388 static void btree_node_read_work(struct work_struct *work)
1389 {
1390 	struct btree_read_bio *rb =
1391 		container_of(work, struct btree_read_bio, work);
1392 	struct bch_fs *c	= rb->c;
1393 	struct bch_dev *ca	= rb->have_ioref ? bch2_dev_have_ref(c, rb->pick.ptr.dev) : NULL;
1394 	struct btree *b		= rb->b;
1395 	struct bio *bio		= &rb->bio;
1396 	struct bch_io_failures failed = { .nr = 0 };
1397 	int ret = 0;
1398 
1399 	struct printbuf buf = PRINTBUF;
1400 	bch2_log_msg_start(c, &buf);
1401 
1402 	prt_printf(&buf, "btree node read error at btree ");
1403 	bch2_btree_pos_to_text(&buf, c, b);
1404 	prt_newline(&buf);
1405 
1406 	goto start;
1407 	while (1) {
1408 		ret = bch2_bkey_pick_read_device(c,
1409 					bkey_i_to_s_c(&b->key),
1410 					&failed, &rb->pick, -1);
1411 		if (ret) {
1412 			set_btree_node_read_error(b);
1413 			break;
1414 		}
1415 
1416 		ca = bch2_dev_get_ioref(c, rb->pick.ptr.dev, READ, BCH_DEV_READ_REF_btree_node_read);
1417 		rb->have_ioref		= ca != NULL;
1418 		rb->start_time		= local_clock();
1419 		bio_reset(bio, NULL, REQ_OP_READ|REQ_SYNC|REQ_META);
1420 		bio->bi_iter.bi_sector	= rb->pick.ptr.offset;
1421 		bio->bi_iter.bi_size	= btree_buf_bytes(b);
1422 
1423 		if (rb->have_ioref) {
1424 			bio_set_dev(bio, ca->disk_sb.bdev);
1425 			submit_bio_wait(bio);
1426 		} else {
1427 			bio->bi_status = BLK_STS_REMOVED;
1428 		}
1429 
1430 		bch2_account_io_completion(ca, BCH_MEMBER_ERROR_read,
1431 					   rb->start_time, !bio->bi_status);
1432 start:
1433 		if (rb->have_ioref)
1434 			enumerated_ref_put(&ca->io_ref[READ], BCH_DEV_READ_REF_btree_node_read);
1435 		rb->have_ioref = false;
1436 
1437 		if (bio->bi_status) {
1438 			bch2_mark_io_failure(&failed, &rb->pick, false);
1439 			continue;
1440 		}
1441 
1442 		ret = bch2_btree_node_read_done(c, ca, b, &failed, &buf);
1443 		if (ret == -BCH_ERR_btree_node_read_err_want_retry ||
1444 		    ret == -BCH_ERR_btree_node_read_err_must_retry)
1445 			continue;
1446 
1447 		if (ret)
1448 			set_btree_node_read_error(b);
1449 
1450 		break;
1451 	}
1452 
1453 	bch2_io_failures_to_text(&buf, c, &failed);
1454 
1455 	if (btree_node_read_error(b))
1456 		bch2_btree_lost_data(c, &buf, b->c.btree_id);
1457 
1458 	/*
1459 	 * only print retry success if we read from a replica with no errors
1460 	 */
1461 	if (btree_node_read_error(b))
1462 		prt_printf(&buf, "ret %s", bch2_err_str(ret));
1463 	else if (failed.nr) {
1464 		if (!bch2_dev_io_failures(&failed, rb->pick.ptr.dev))
1465 			prt_printf(&buf, "retry success");
1466 		else
1467 			prt_printf(&buf, "repair success");
1468 	}
1469 
1470 	if ((failed.nr ||
1471 	     btree_node_need_rewrite(b)) &&
1472 	    !btree_node_read_error(b) &&
1473 	    c->recovery.curr_pass != BCH_RECOVERY_PASS_scan_for_btree_nodes) {
1474 		prt_printf(&buf, " (rewriting node)");
1475 		bch2_btree_node_rewrite_async(c, b);
1476 	}
1477 	prt_newline(&buf);
1478 
1479 	if (failed.nr)
1480 		bch2_print_str_ratelimited(c, KERN_ERR, buf.buf);
1481 
1482 	async_object_list_del(c, btree_read_bio, rb->list_idx);
1483 	bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read],
1484 			       rb->start_time);
1485 	bio_put(&rb->bio);
1486 	printbuf_exit(&buf);
1487 	clear_btree_node_read_in_flight(b);
1488 	smp_mb__after_atomic();
1489 	wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1490 }
1491 
btree_node_read_endio(struct bio * bio)1492 static void btree_node_read_endio(struct bio *bio)
1493 {
1494 	struct btree_read_bio *rb =
1495 		container_of(bio, struct btree_read_bio, bio);
1496 	struct bch_fs *c	= rb->c;
1497 	struct bch_dev *ca	= rb->have_ioref
1498 		? bch2_dev_have_ref(c, rb->pick.ptr.dev) : NULL;
1499 
1500 	bch2_account_io_completion(ca, BCH_MEMBER_ERROR_read,
1501 				   rb->start_time, !bio->bi_status);
1502 
1503 	queue_work(c->btree_read_complete_wq, &rb->work);
1504 }
1505 
bch2_btree_read_bio_to_text(struct printbuf * out,struct btree_read_bio * rbio)1506 void bch2_btree_read_bio_to_text(struct printbuf *out, struct btree_read_bio *rbio)
1507 {
1508 	bch2_bio_to_text(out, &rbio->bio);
1509 }
1510 
1511 struct btree_node_read_all {
1512 	struct closure		cl;
1513 	struct bch_fs		*c;
1514 	struct btree		*b;
1515 	unsigned		nr;
1516 	void			*buf[BCH_REPLICAS_MAX];
1517 	struct bio		*bio[BCH_REPLICAS_MAX];
1518 	blk_status_t		err[BCH_REPLICAS_MAX];
1519 };
1520 
btree_node_sectors_written(struct bch_fs * c,void * data)1521 static unsigned btree_node_sectors_written(struct bch_fs *c, void *data)
1522 {
1523 	struct btree_node *bn = data;
1524 	struct btree_node_entry *bne;
1525 	unsigned offset = 0;
1526 
1527 	if (le64_to_cpu(bn->magic) !=  bset_magic(c))
1528 		return 0;
1529 
1530 	while (offset < btree_sectors(c)) {
1531 		if (!offset) {
1532 			offset += vstruct_sectors(bn, c->block_bits);
1533 		} else {
1534 			bne = data + (offset << 9);
1535 			if (bne->keys.seq != bn->keys.seq)
1536 				break;
1537 			offset += vstruct_sectors(bne, c->block_bits);
1538 		}
1539 	}
1540 
1541 	return offset;
1542 }
1543 
btree_node_has_extra_bsets(struct bch_fs * c,unsigned offset,void * data)1544 static bool btree_node_has_extra_bsets(struct bch_fs *c, unsigned offset, void *data)
1545 {
1546 	struct btree_node *bn = data;
1547 	struct btree_node_entry *bne;
1548 
1549 	if (!offset)
1550 		return false;
1551 
1552 	while (offset < btree_sectors(c)) {
1553 		bne = data + (offset << 9);
1554 		if (bne->keys.seq == bn->keys.seq)
1555 			return true;
1556 		offset++;
1557 	}
1558 
1559 	return false;
1560 	return offset;
1561 }
1562 
CLOSURE_CALLBACK(btree_node_read_all_replicas_done)1563 static CLOSURE_CALLBACK(btree_node_read_all_replicas_done)
1564 {
1565 	closure_type(ra, struct btree_node_read_all, cl);
1566 	struct bch_fs *c = ra->c;
1567 	struct btree *b = ra->b;
1568 	struct printbuf buf = PRINTBUF;
1569 	bool dump_bset_maps = false;
1570 	int ret = 0, best = -1, write = READ;
1571 	unsigned i, written = 0, written2 = 0;
1572 	__le64 seq = b->key.k.type == KEY_TYPE_btree_ptr_v2
1573 		? bkey_i_to_btree_ptr_v2(&b->key)->v.seq : 0;
1574 	bool _saw_error = false, *saw_error = &_saw_error;
1575 	struct printbuf *err_msg = NULL;
1576 	struct bch_io_failures *failed = NULL;
1577 
1578 	for (i = 0; i < ra->nr; i++) {
1579 		struct btree_node *bn = ra->buf[i];
1580 
1581 		if (ra->err[i])
1582 			continue;
1583 
1584 		if (le64_to_cpu(bn->magic) != bset_magic(c) ||
1585 		    (seq && seq != bn->keys.seq))
1586 			continue;
1587 
1588 		if (best < 0) {
1589 			best = i;
1590 			written = btree_node_sectors_written(c, bn);
1591 			continue;
1592 		}
1593 
1594 		written2 = btree_node_sectors_written(c, ra->buf[i]);
1595 		if (btree_err_on(written2 != written, -BCH_ERR_btree_node_read_err_fixable,
1596 				 c, NULL, b, NULL, NULL,
1597 				 btree_node_replicas_sectors_written_mismatch,
1598 				 "btree node sectors written mismatch: %u != %u",
1599 				 written, written2) ||
1600 		    btree_err_on(btree_node_has_extra_bsets(c, written2, ra->buf[i]),
1601 				 -BCH_ERR_btree_node_read_err_fixable,
1602 				 c, NULL, b, NULL, NULL,
1603 				 btree_node_bset_after_end,
1604 				 "found bset signature after last bset") ||
1605 		    btree_err_on(memcmp(ra->buf[best], ra->buf[i], written << 9),
1606 				 -BCH_ERR_btree_node_read_err_fixable,
1607 				 c, NULL, b, NULL, NULL,
1608 				 btree_node_replicas_data_mismatch,
1609 				 "btree node replicas content mismatch"))
1610 			dump_bset_maps = true;
1611 
1612 		if (written2 > written) {
1613 			written = written2;
1614 			best = i;
1615 		}
1616 	}
1617 fsck_err:
1618 	if (dump_bset_maps) {
1619 		for (i = 0; i < ra->nr; i++) {
1620 			struct btree_node *bn = ra->buf[i];
1621 			struct btree_node_entry *bne = NULL;
1622 			unsigned offset = 0, sectors;
1623 			bool gap = false;
1624 
1625 			if (ra->err[i])
1626 				continue;
1627 
1628 			printbuf_reset(&buf);
1629 
1630 			while (offset < btree_sectors(c)) {
1631 				if (!offset) {
1632 					sectors = vstruct_sectors(bn, c->block_bits);
1633 				} else {
1634 					bne = ra->buf[i] + (offset << 9);
1635 					if (bne->keys.seq != bn->keys.seq)
1636 						break;
1637 					sectors = vstruct_sectors(bne, c->block_bits);
1638 				}
1639 
1640 				prt_printf(&buf, " %u-%u", offset, offset + sectors);
1641 				if (bne && bch2_journal_seq_is_blacklisted(c,
1642 							le64_to_cpu(bne->keys.journal_seq), false))
1643 					prt_printf(&buf, "*");
1644 				offset += sectors;
1645 			}
1646 
1647 			while (offset < btree_sectors(c)) {
1648 				bne = ra->buf[i] + (offset << 9);
1649 				if (bne->keys.seq == bn->keys.seq) {
1650 					if (!gap)
1651 						prt_printf(&buf, " GAP");
1652 					gap = true;
1653 
1654 					sectors = vstruct_sectors(bne, c->block_bits);
1655 					prt_printf(&buf, " %u-%u", offset, offset + sectors);
1656 					if (bch2_journal_seq_is_blacklisted(c,
1657 							le64_to_cpu(bne->keys.journal_seq), false))
1658 						prt_printf(&buf, "*");
1659 				}
1660 				offset++;
1661 			}
1662 
1663 			bch_err(c, "replica %u:%s", i, buf.buf);
1664 		}
1665 	}
1666 
1667 	if (best >= 0) {
1668 		memcpy(b->data, ra->buf[best], btree_buf_bytes(b));
1669 		ret = bch2_btree_node_read_done(c, NULL, b, NULL, NULL);
1670 	} else {
1671 		ret = -1;
1672 	}
1673 
1674 	if (ret) {
1675 		set_btree_node_read_error(b);
1676 
1677 		struct printbuf buf = PRINTBUF;
1678 		bch2_btree_lost_data(c, &buf, b->c.btree_id);
1679 		if (buf.pos)
1680 			bch_err(c, "%s", buf.buf);
1681 		printbuf_exit(&buf);
1682 	} else if (*saw_error)
1683 		bch2_btree_node_rewrite_async(c, b);
1684 
1685 	for (i = 0; i < ra->nr; i++) {
1686 		mempool_free(ra->buf[i], &c->btree_bounce_pool);
1687 		bio_put(ra->bio[i]);
1688 	}
1689 
1690 	closure_debug_destroy(&ra->cl);
1691 	kfree(ra);
1692 	printbuf_exit(&buf);
1693 
1694 	clear_btree_node_read_in_flight(b);
1695 	smp_mb__after_atomic();
1696 	wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1697 }
1698 
btree_node_read_all_replicas_endio(struct bio * bio)1699 static void btree_node_read_all_replicas_endio(struct bio *bio)
1700 {
1701 	struct btree_read_bio *rb =
1702 		container_of(bio, struct btree_read_bio, bio);
1703 	struct bch_fs *c	= rb->c;
1704 	struct btree_node_read_all *ra = rb->ra;
1705 
1706 	if (rb->have_ioref) {
1707 		struct bch_dev *ca = bch2_dev_have_ref(c, rb->pick.ptr.dev);
1708 
1709 		bch2_latency_acct(ca, rb->start_time, READ);
1710 		enumerated_ref_put(&ca->io_ref[READ],
1711 			BCH_DEV_READ_REF_btree_node_read_all_replicas);
1712 	}
1713 
1714 	ra->err[rb->idx] = bio->bi_status;
1715 	closure_put(&ra->cl);
1716 }
1717 
1718 /*
1719  * XXX This allocates multiple times from the same mempools, and can deadlock
1720  * under sufficient memory pressure (but is only a debug path)
1721  */
btree_node_read_all_replicas(struct bch_fs * c,struct btree * b,bool sync)1722 static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool sync)
1723 {
1724 	struct bkey_s_c k = bkey_i_to_s_c(&b->key);
1725 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1726 	const union bch_extent_entry *entry;
1727 	struct extent_ptr_decoded pick;
1728 	struct btree_node_read_all *ra;
1729 	unsigned i;
1730 
1731 	ra = kzalloc(sizeof(*ra), GFP_NOFS);
1732 	if (!ra)
1733 		return bch_err_throw(c, ENOMEM_btree_node_read_all_replicas);
1734 
1735 	closure_init(&ra->cl, NULL);
1736 	ra->c	= c;
1737 	ra->b	= b;
1738 	ra->nr	= bch2_bkey_nr_ptrs(k);
1739 
1740 	for (i = 0; i < ra->nr; i++) {
1741 		ra->buf[i] = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
1742 		ra->bio[i] = bio_alloc_bioset(NULL,
1743 					      buf_pages(ra->buf[i], btree_buf_bytes(b)),
1744 					      REQ_OP_READ|REQ_SYNC|REQ_META,
1745 					      GFP_NOFS,
1746 					      &c->btree_bio);
1747 	}
1748 
1749 	i = 0;
1750 	bkey_for_each_ptr_decode(k.k, ptrs, pick, entry) {
1751 		struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ,
1752 					BCH_DEV_READ_REF_btree_node_read_all_replicas);
1753 		struct btree_read_bio *rb =
1754 			container_of(ra->bio[i], struct btree_read_bio, bio);
1755 		rb->c			= c;
1756 		rb->b			= b;
1757 		rb->ra			= ra;
1758 		rb->start_time		= local_clock();
1759 		rb->have_ioref		= ca != NULL;
1760 		rb->idx			= i;
1761 		rb->pick		= pick;
1762 		rb->bio.bi_iter.bi_sector = pick.ptr.offset;
1763 		rb->bio.bi_end_io	= btree_node_read_all_replicas_endio;
1764 		bch2_bio_map(&rb->bio, ra->buf[i], btree_buf_bytes(b));
1765 
1766 		if (rb->have_ioref) {
1767 			this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
1768 				     bio_sectors(&rb->bio));
1769 			bio_set_dev(&rb->bio, ca->disk_sb.bdev);
1770 
1771 			closure_get(&ra->cl);
1772 			submit_bio(&rb->bio);
1773 		} else {
1774 			ra->err[i] = BLK_STS_REMOVED;
1775 		}
1776 
1777 		i++;
1778 	}
1779 
1780 	if (sync) {
1781 		closure_sync(&ra->cl);
1782 		btree_node_read_all_replicas_done(&ra->cl.work);
1783 	} else {
1784 		continue_at(&ra->cl, btree_node_read_all_replicas_done,
1785 			    c->btree_read_complete_wq);
1786 	}
1787 
1788 	return 0;
1789 }
1790 
bch2_btree_node_read(struct btree_trans * trans,struct btree * b,bool sync)1791 void bch2_btree_node_read(struct btree_trans *trans, struct btree *b,
1792 			  bool sync)
1793 {
1794 	struct bch_fs *c = trans->c;
1795 	struct extent_ptr_decoded pick;
1796 	struct btree_read_bio *rb;
1797 	struct bch_dev *ca;
1798 	struct bio *bio;
1799 	int ret;
1800 
1801 	trace_and_count(c, btree_node_read, trans, b);
1802 
1803 	if (static_branch_unlikely(&bch2_verify_all_btree_replicas) &&
1804 	    !btree_node_read_all_replicas(c, b, sync))
1805 		return;
1806 
1807 	ret = bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key),
1808 					 NULL, &pick, -1);
1809 
1810 	if (ret <= 0) {
1811 		bool ratelimit = true;
1812 		struct printbuf buf = PRINTBUF;
1813 		bch2_log_msg_start(c, &buf);
1814 
1815 		prt_str(&buf, "btree node read error: no device to read from\n at ");
1816 		bch2_btree_pos_to_text(&buf, c, b);
1817 		prt_newline(&buf);
1818 		bch2_btree_lost_data(c, &buf, b->c.btree_id);
1819 
1820 		if (c->recovery.passes_complete & BIT_ULL(BCH_RECOVERY_PASS_check_topology) &&
1821 		    bch2_fs_emergency_read_only2(c, &buf))
1822 			ratelimit = false;
1823 
1824 		static DEFINE_RATELIMIT_STATE(rs,
1825 					      DEFAULT_RATELIMIT_INTERVAL,
1826 					      DEFAULT_RATELIMIT_BURST);
1827 		if (!ratelimit || __ratelimit(&rs))
1828 			bch2_print_str(c, KERN_ERR, buf.buf);
1829 		printbuf_exit(&buf);
1830 
1831 		set_btree_node_read_error(b);
1832 		clear_btree_node_read_in_flight(b);
1833 		smp_mb__after_atomic();
1834 		wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1835 		return;
1836 	}
1837 
1838 	ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ, BCH_DEV_READ_REF_btree_node_read);
1839 
1840 	bio = bio_alloc_bioset(NULL,
1841 			       buf_pages(b->data, btree_buf_bytes(b)),
1842 			       REQ_OP_READ|REQ_SYNC|REQ_META,
1843 			       GFP_NOFS,
1844 			       &c->btree_bio);
1845 	rb = container_of(bio, struct btree_read_bio, bio);
1846 	rb->c			= c;
1847 	rb->b			= b;
1848 	rb->ra			= NULL;
1849 	rb->start_time		= local_clock();
1850 	rb->have_ioref		= ca != NULL;
1851 	rb->pick		= pick;
1852 	INIT_WORK(&rb->work, btree_node_read_work);
1853 	bio->bi_iter.bi_sector	= pick.ptr.offset;
1854 	bio->bi_end_io		= btree_node_read_endio;
1855 	bch2_bio_map(bio, b->data, btree_buf_bytes(b));
1856 
1857 	async_object_list_add(c, btree_read_bio, rb, &rb->list_idx);
1858 
1859 	if (rb->have_ioref) {
1860 		this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
1861 			     bio_sectors(bio));
1862 		bio_set_dev(bio, ca->disk_sb.bdev);
1863 
1864 		if (sync) {
1865 			submit_bio_wait(bio);
1866 			bch2_latency_acct(ca, rb->start_time, READ);
1867 			btree_node_read_work(&rb->work);
1868 		} else {
1869 			submit_bio(bio);
1870 		}
1871 	} else {
1872 		bio->bi_status = BLK_STS_REMOVED;
1873 
1874 		if (sync)
1875 			btree_node_read_work(&rb->work);
1876 		else
1877 			queue_work(c->btree_read_complete_wq, &rb->work);
1878 	}
1879 }
1880 
__bch2_btree_root_read(struct btree_trans * trans,enum btree_id id,const struct bkey_i * k,unsigned level)1881 static int __bch2_btree_root_read(struct btree_trans *trans, enum btree_id id,
1882 				  const struct bkey_i *k, unsigned level)
1883 {
1884 	struct bch_fs *c = trans->c;
1885 	struct closure cl;
1886 	struct btree *b;
1887 	int ret;
1888 
1889 	closure_init_stack(&cl);
1890 
1891 	do {
1892 		ret = bch2_btree_cache_cannibalize_lock(trans, &cl);
1893 		closure_sync(&cl);
1894 	} while (ret);
1895 
1896 	b = bch2_btree_node_mem_alloc(trans, level != 0);
1897 	bch2_btree_cache_cannibalize_unlock(trans);
1898 
1899 	BUG_ON(IS_ERR(b));
1900 
1901 	bkey_copy(&b->key, k);
1902 	BUG_ON(bch2_btree_node_hash_insert(&c->btree_cache, b, level, id));
1903 
1904 	set_btree_node_read_in_flight(b);
1905 
1906 	/* we can't pass the trans to read_done() for fsck errors, so it must be unlocked */
1907 	bch2_trans_unlock(trans);
1908 	bch2_btree_node_read(trans, b, true);
1909 
1910 	if (btree_node_read_error(b)) {
1911 		mutex_lock(&c->btree_cache.lock);
1912 		bch2_btree_node_hash_remove(&c->btree_cache, b);
1913 		mutex_unlock(&c->btree_cache.lock);
1914 
1915 		ret = bch_err_throw(c, btree_node_read_error);
1916 		goto err;
1917 	}
1918 
1919 	bch2_btree_set_root_for_read(c, b);
1920 err:
1921 	six_unlock_write(&b->c.lock);
1922 	six_unlock_intent(&b->c.lock);
1923 
1924 	return ret;
1925 }
1926 
bch2_btree_root_read(struct bch_fs * c,enum btree_id id,const struct bkey_i * k,unsigned level)1927 int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
1928 			const struct bkey_i *k, unsigned level)
1929 {
1930 	return bch2_trans_run(c, __bch2_btree_root_read(trans, id, k, level));
1931 }
1932 
1933 struct btree_node_scrub {
1934 	struct bch_fs		*c;
1935 	struct bch_dev		*ca;
1936 	void			*buf;
1937 	bool			used_mempool;
1938 	unsigned		written;
1939 
1940 	enum btree_id		btree;
1941 	unsigned		level;
1942 	struct bkey_buf		key;
1943 	__le64			seq;
1944 
1945 	struct work_struct	work;
1946 	struct bio		bio;
1947 };
1948 
btree_node_scrub_check(struct bch_fs * c,struct btree_node * data,unsigned ptr_written,struct printbuf * err)1949 static bool btree_node_scrub_check(struct bch_fs *c, struct btree_node *data, unsigned ptr_written,
1950 				   struct printbuf *err)
1951 {
1952 	unsigned written = 0;
1953 
1954 	if (le64_to_cpu(data->magic) != bset_magic(c)) {
1955 		prt_printf(err, "bad magic: want %llx, got %llx",
1956 			   bset_magic(c), le64_to_cpu(data->magic));
1957 		return false;
1958 	}
1959 
1960 	while (written < (ptr_written ?: btree_sectors(c))) {
1961 		struct btree_node_entry *bne;
1962 		struct bset *i;
1963 		bool first = !written;
1964 
1965 		if (first) {
1966 			bne = NULL;
1967 			i = &data->keys;
1968 		} else {
1969 			bne = (void *) data + (written << 9);
1970 			i = &bne->keys;
1971 
1972 			if (!ptr_written && i->seq != data->keys.seq)
1973 				break;
1974 		}
1975 
1976 		struct nonce nonce = btree_nonce(i, written << 9);
1977 		bool good_csum_type = bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i));
1978 
1979 		if (first) {
1980 			if (good_csum_type) {
1981 				struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, data);
1982 				if (bch2_crc_cmp(data->csum, csum)) {
1983 					bch2_csum_err_msg(err, BSET_CSUM_TYPE(i), data->csum, csum);
1984 					return false;
1985 				}
1986 			}
1987 
1988 			written += vstruct_sectors(data, c->block_bits);
1989 		} else {
1990 			if (good_csum_type) {
1991 				struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
1992 				if (bch2_crc_cmp(bne->csum, csum)) {
1993 					bch2_csum_err_msg(err, BSET_CSUM_TYPE(i), bne->csum, csum);
1994 					return false;
1995 				}
1996 			}
1997 
1998 			written += vstruct_sectors(bne, c->block_bits);
1999 		}
2000 	}
2001 
2002 	return true;
2003 }
2004 
btree_node_scrub_work(struct work_struct * work)2005 static void btree_node_scrub_work(struct work_struct *work)
2006 {
2007 	struct btree_node_scrub *scrub = container_of(work, struct btree_node_scrub, work);
2008 	struct bch_fs *c = scrub->c;
2009 	struct printbuf err = PRINTBUF;
2010 
2011 	__bch2_btree_pos_to_text(&err, c, scrub->btree, scrub->level,
2012 				 bkey_i_to_s_c(scrub->key.k));
2013 	prt_newline(&err);
2014 
2015 	if (!btree_node_scrub_check(c, scrub->buf, scrub->written, &err)) {
2016 		int ret = bch2_trans_do(c,
2017 			bch2_btree_node_rewrite_key(trans, scrub->btree, scrub->level - 1,
2018 						    scrub->key.k, 0));
2019 		if (!bch2_err_matches(ret, ENOENT) &&
2020 		    !bch2_err_matches(ret, EROFS))
2021 			bch_err_fn_ratelimited(c, ret);
2022 	}
2023 
2024 	printbuf_exit(&err);
2025 	bch2_bkey_buf_exit(&scrub->key, c);;
2026 	btree_bounce_free(c, c->opts.btree_node_size, scrub->used_mempool, scrub->buf);
2027 	enumerated_ref_put(&scrub->ca->io_ref[READ], BCH_DEV_READ_REF_btree_node_scrub);
2028 	kfree(scrub);
2029 	enumerated_ref_put(&c->writes, BCH_WRITE_REF_btree_node_scrub);
2030 }
2031 
btree_node_scrub_endio(struct bio * bio)2032 static void btree_node_scrub_endio(struct bio *bio)
2033 {
2034 	struct btree_node_scrub *scrub = container_of(bio, struct btree_node_scrub, bio);
2035 
2036 	queue_work(scrub->c->btree_read_complete_wq, &scrub->work);
2037 }
2038 
bch2_btree_node_scrub(struct btree_trans * trans,enum btree_id btree,unsigned level,struct bkey_s_c k,unsigned dev)2039 int bch2_btree_node_scrub(struct btree_trans *trans,
2040 			  enum btree_id btree, unsigned level,
2041 			  struct bkey_s_c k, unsigned dev)
2042 {
2043 	if (k.k->type != KEY_TYPE_btree_ptr_v2)
2044 		return 0;
2045 
2046 	struct bch_fs *c = trans->c;
2047 
2048 	if (!enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_btree_node_scrub))
2049 		return bch_err_throw(c, erofs_no_writes);
2050 
2051 	struct extent_ptr_decoded pick;
2052 	int ret = bch2_bkey_pick_read_device(c, k, NULL, &pick, dev);
2053 	if (ret <= 0)
2054 		goto err;
2055 
2056 	struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ,
2057 						BCH_DEV_READ_REF_btree_node_scrub);
2058 	if (!ca) {
2059 		ret = bch_err_throw(c, device_offline);
2060 		goto err;
2061 	}
2062 
2063 	bool used_mempool = false;
2064 	void *buf = btree_bounce_alloc(c, c->opts.btree_node_size, &used_mempool);
2065 
2066 	unsigned vecs = buf_pages(buf, c->opts.btree_node_size);
2067 
2068 	struct btree_node_scrub *scrub =
2069 		kzalloc(sizeof(*scrub) + sizeof(struct bio_vec) * vecs, GFP_KERNEL);
2070 	if (!scrub) {
2071 		ret = -ENOMEM;
2072 		goto err_free;
2073 	}
2074 
2075 	scrub->c		= c;
2076 	scrub->ca		= ca;
2077 	scrub->buf		= buf;
2078 	scrub->used_mempool	= used_mempool;
2079 	scrub->written		= btree_ptr_sectors_written(k);
2080 
2081 	scrub->btree		= btree;
2082 	scrub->level		= level;
2083 	bch2_bkey_buf_init(&scrub->key);
2084 	bch2_bkey_buf_reassemble(&scrub->key, c, k);
2085 	scrub->seq		= bkey_s_c_to_btree_ptr_v2(k).v->seq;
2086 
2087 	INIT_WORK(&scrub->work, btree_node_scrub_work);
2088 
2089 	bio_init(&scrub->bio, ca->disk_sb.bdev, scrub->bio.bi_inline_vecs, vecs, REQ_OP_READ);
2090 	bch2_bio_map(&scrub->bio, scrub->buf, c->opts.btree_node_size);
2091 	scrub->bio.bi_iter.bi_sector	= pick.ptr.offset;
2092 	scrub->bio.bi_end_io		= btree_node_scrub_endio;
2093 	submit_bio(&scrub->bio);
2094 	return 0;
2095 err_free:
2096 	btree_bounce_free(c, c->opts.btree_node_size, used_mempool, buf);
2097 	enumerated_ref_put(&ca->io_ref[READ], BCH_DEV_READ_REF_btree_node_scrub);
2098 err:
2099 	enumerated_ref_put(&c->writes, BCH_WRITE_REF_btree_node_scrub);
2100 	return ret;
2101 }
2102 
bch2_btree_complete_write(struct bch_fs * c,struct btree * b,struct btree_write * w)2103 static void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
2104 				      struct btree_write *w)
2105 {
2106 	unsigned long old, new;
2107 
2108 	old = READ_ONCE(b->will_make_reachable);
2109 	do {
2110 		new = old;
2111 		if (!(old & 1))
2112 			break;
2113 
2114 		new &= ~1UL;
2115 	} while (!try_cmpxchg(&b->will_make_reachable, &old, new));
2116 
2117 	if (old & 1)
2118 		closure_put(&((struct btree_update *) new)->cl);
2119 
2120 	bch2_journal_pin_drop(&c->journal, &w->journal);
2121 }
2122 
__btree_node_write_done(struct bch_fs * c,struct btree * b,u64 start_time)2123 static void __btree_node_write_done(struct bch_fs *c, struct btree *b, u64 start_time)
2124 {
2125 	struct btree_write *w = btree_prev_write(b);
2126 	unsigned long old, new;
2127 	unsigned type = 0;
2128 
2129 	bch2_btree_complete_write(c, b, w);
2130 
2131 	if (start_time)
2132 		bch2_time_stats_update(&c->times[BCH_TIME_btree_node_write], start_time);
2133 
2134 	old = READ_ONCE(b->flags);
2135 	do {
2136 		new = old;
2137 
2138 		if ((old & (1U << BTREE_NODE_dirty)) &&
2139 		    (old & (1U << BTREE_NODE_need_write)) &&
2140 		    !(old & (1U << BTREE_NODE_never_write)) &&
2141 		    !(old & (1U << BTREE_NODE_write_blocked)) &&
2142 		    !(old & (1U << BTREE_NODE_will_make_reachable))) {
2143 			new &= ~(1U << BTREE_NODE_dirty);
2144 			new &= ~(1U << BTREE_NODE_need_write);
2145 			new |=  (1U << BTREE_NODE_write_in_flight);
2146 			new |=  (1U << BTREE_NODE_write_in_flight_inner);
2147 			new |=  (1U << BTREE_NODE_just_written);
2148 			new ^=  (1U << BTREE_NODE_write_idx);
2149 
2150 			type = new & BTREE_WRITE_TYPE_MASK;
2151 			new &= ~BTREE_WRITE_TYPE_MASK;
2152 		} else {
2153 			new &= ~(1U << BTREE_NODE_write_in_flight);
2154 			new &= ~(1U << BTREE_NODE_write_in_flight_inner);
2155 		}
2156 	} while (!try_cmpxchg(&b->flags, &old, new));
2157 
2158 	if (new & (1U << BTREE_NODE_write_in_flight))
2159 		__bch2_btree_node_write(c, b, BTREE_WRITE_ALREADY_STARTED|type);
2160 	else {
2161 		smp_mb__after_atomic();
2162 		wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
2163 	}
2164 }
2165 
btree_node_write_done(struct bch_fs * c,struct btree * b,u64 start_time)2166 static void btree_node_write_done(struct bch_fs *c, struct btree *b, u64 start_time)
2167 {
2168 	struct btree_trans *trans = bch2_trans_get(c);
2169 
2170 	btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
2171 
2172 	/* we don't need transaction context anymore after we got the lock. */
2173 	bch2_trans_put(trans);
2174 	__btree_node_write_done(c, b, start_time);
2175 	six_unlock_read(&b->c.lock);
2176 }
2177 
btree_node_write_work(struct work_struct * work)2178 static void btree_node_write_work(struct work_struct *work)
2179 {
2180 	struct btree_write_bio *wbio =
2181 		container_of(work, struct btree_write_bio, work);
2182 	struct bch_fs *c	= wbio->wbio.c;
2183 	struct btree *b		= wbio->wbio.bio.bi_private;
2184 	u64 start_time		= wbio->start_time;
2185 	int ret = 0;
2186 
2187 	btree_bounce_free(c,
2188 		wbio->data_bytes,
2189 		wbio->wbio.used_mempool,
2190 		wbio->data);
2191 
2192 	bch2_bkey_drop_ptrs(bkey_i_to_s(&wbio->key), ptr,
2193 		bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev));
2194 
2195 	if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(&wbio->key))) {
2196 		ret = bch_err_throw(c, btree_node_write_all_failed);
2197 		goto err;
2198 	}
2199 
2200 	if (wbio->wbio.first_btree_write) {
2201 		if (wbio->wbio.failed.nr) {
2202 
2203 		}
2204 	} else {
2205 		ret = bch2_trans_do(c,
2206 			bch2_btree_node_update_key_get_iter(trans, b, &wbio->key,
2207 					BCH_WATERMARK_interior_updates|
2208 					BCH_TRANS_COMMIT_journal_reclaim|
2209 					BCH_TRANS_COMMIT_no_enospc|
2210 					BCH_TRANS_COMMIT_no_check_rw,
2211 					!wbio->wbio.failed.nr));
2212 		if (ret)
2213 			goto err;
2214 	}
2215 out:
2216 	async_object_list_del(c, btree_write_bio, wbio->list_idx);
2217 	bio_put(&wbio->wbio.bio);
2218 	btree_node_write_done(c, b, start_time);
2219 	return;
2220 err:
2221 	set_btree_node_noevict(b);
2222 
2223 	if (!bch2_err_matches(ret, EROFS)) {
2224 		struct printbuf buf = PRINTBUF;
2225 		prt_printf(&buf, "writing btree node: %s\n  ", bch2_err_str(ret));
2226 		bch2_btree_pos_to_text(&buf, c, b);
2227 		bch2_fs_fatal_error(c, "%s", buf.buf);
2228 		printbuf_exit(&buf);
2229 	}
2230 	goto out;
2231 }
2232 
btree_node_write_endio(struct bio * bio)2233 static void btree_node_write_endio(struct bio *bio)
2234 {
2235 	struct bch_write_bio *wbio	= to_wbio(bio);
2236 	struct bch_write_bio *parent	= wbio->split ? wbio->parent : NULL;
2237 	struct bch_write_bio *orig	= parent ?: wbio;
2238 	struct btree_write_bio *wb	= container_of(orig, struct btree_write_bio, wbio);
2239 	struct bch_fs *c		= wbio->c;
2240 	struct btree *b			= wbio->bio.bi_private;
2241 	struct bch_dev *ca		= wbio->have_ioref ? bch2_dev_have_ref(c, wbio->dev) : NULL;
2242 
2243 	bch2_account_io_completion(ca, BCH_MEMBER_ERROR_write,
2244 				   wbio->submit_time, !bio->bi_status);
2245 
2246 	if (ca && bio->bi_status) {
2247 		struct printbuf buf = PRINTBUF;
2248 		buf.atomic++;
2249 		prt_printf(&buf, "btree write error: %s\n  ",
2250 			   bch2_blk_status_to_str(bio->bi_status));
2251 		bch2_btree_pos_to_text(&buf, c, b);
2252 		bch_err_dev_ratelimited(ca, "%s", buf.buf);
2253 		printbuf_exit(&buf);
2254 	}
2255 
2256 	if (bio->bi_status) {
2257 		unsigned long flags;
2258 		spin_lock_irqsave(&c->btree_write_error_lock, flags);
2259 		bch2_dev_list_add_dev(&orig->failed, wbio->dev);
2260 		spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
2261 	}
2262 
2263 	/*
2264 	 * XXX: we should be using io_ref[WRITE], but we aren't retrying failed
2265 	 * btree writes yet (due to device removal/ro):
2266 	 */
2267 	if (wbio->have_ioref)
2268 		enumerated_ref_put(&ca->io_ref[READ],
2269 				   BCH_DEV_READ_REF_btree_node_write);
2270 
2271 	if (parent) {
2272 		bio_put(bio);
2273 		bio_endio(&parent->bio);
2274 		return;
2275 	}
2276 
2277 	clear_btree_node_write_in_flight_inner(b);
2278 	smp_mb__after_atomic();
2279 	wake_up_bit(&b->flags, BTREE_NODE_write_in_flight_inner);
2280 	INIT_WORK(&wb->work, btree_node_write_work);
2281 	queue_work(c->btree_write_complete_wq, &wb->work);
2282 }
2283 
validate_bset_for_write(struct bch_fs * c,struct btree * b,struct bset * i)2284 static int validate_bset_for_write(struct bch_fs *c, struct btree *b,
2285 				   struct bset *i)
2286 {
2287 	int ret = bch2_bkey_validate(c, bkey_i_to_s_c(&b->key),
2288 				     (struct bkey_validate_context) {
2289 					.from	= BKEY_VALIDATE_btree_node,
2290 					.level	= b->c.level + 1,
2291 					.btree	= b->c.btree_id,
2292 					.flags	= BCH_VALIDATE_write,
2293 				     });
2294 	if (ret) {
2295 		bch2_fs_inconsistent(c, "invalid btree node key before write");
2296 		return ret;
2297 	}
2298 
2299 	ret = validate_bset_keys(c, b, i, WRITE, NULL, NULL) ?:
2300 		validate_bset(c, NULL, b, i, b->written, WRITE, NULL, NULL);
2301 	if (ret) {
2302 		bch2_inconsistent_error(c);
2303 		dump_stack();
2304 	}
2305 
2306 	return ret;
2307 }
2308 
btree_write_submit(struct work_struct * work)2309 static void btree_write_submit(struct work_struct *work)
2310 {
2311 	struct btree_write_bio *wbio = container_of(work, struct btree_write_bio, work);
2312 	BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
2313 
2314 	bkey_copy(&tmp.k, &wbio->key);
2315 
2316 	bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&tmp.k)), ptr)
2317 		ptr->offset += wbio->sector_offset;
2318 
2319 	bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree,
2320 				  &tmp.k, false);
2321 }
2322 
__bch2_btree_node_write(struct bch_fs * c,struct btree * b,unsigned flags)2323 void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
2324 {
2325 	struct btree_write_bio *wbio;
2326 	struct bset *i;
2327 	struct btree_node *bn = NULL;
2328 	struct btree_node_entry *bne = NULL;
2329 	struct sort_iter_stack sort_iter;
2330 	struct nonce nonce;
2331 	unsigned bytes_to_write, sectors_to_write, bytes, u64s;
2332 	u64 seq = 0;
2333 	bool used_mempool;
2334 	unsigned long old, new;
2335 	bool validate_before_checksum = false;
2336 	enum btree_write_type type = flags & BTREE_WRITE_TYPE_MASK;
2337 	void *data;
2338 	u64 start_time = local_clock();
2339 	int ret;
2340 
2341 	if (flags & BTREE_WRITE_ALREADY_STARTED)
2342 		goto do_write;
2343 
2344 	/*
2345 	 * We may only have a read lock on the btree node - the dirty bit is our
2346 	 * "lock" against racing with other threads that may be trying to start
2347 	 * a write, we do a write iff we clear the dirty bit. Since setting the
2348 	 * dirty bit requires a write lock, we can't race with other threads
2349 	 * redirtying it:
2350 	 */
2351 	old = READ_ONCE(b->flags);
2352 	do {
2353 		new = old;
2354 
2355 		if (!(old & (1 << BTREE_NODE_dirty)))
2356 			return;
2357 
2358 		if ((flags & BTREE_WRITE_ONLY_IF_NEED) &&
2359 		    !(old & (1 << BTREE_NODE_need_write)))
2360 			return;
2361 
2362 		if (old &
2363 		    ((1 << BTREE_NODE_never_write)|
2364 		     (1 << BTREE_NODE_write_blocked)))
2365 			return;
2366 
2367 		if (b->written &&
2368 		    (old & (1 << BTREE_NODE_will_make_reachable)))
2369 			return;
2370 
2371 		if (old & (1 << BTREE_NODE_write_in_flight))
2372 			return;
2373 
2374 		if (flags & BTREE_WRITE_ONLY_IF_NEED)
2375 			type = new & BTREE_WRITE_TYPE_MASK;
2376 		new &= ~BTREE_WRITE_TYPE_MASK;
2377 
2378 		new &= ~(1 << BTREE_NODE_dirty);
2379 		new &= ~(1 << BTREE_NODE_need_write);
2380 		new |=  (1 << BTREE_NODE_write_in_flight);
2381 		new |=  (1 << BTREE_NODE_write_in_flight_inner);
2382 		new |=  (1 << BTREE_NODE_just_written);
2383 		new ^=  (1 << BTREE_NODE_write_idx);
2384 	} while (!try_cmpxchg_acquire(&b->flags, &old, new));
2385 
2386 	if (new & (1U << BTREE_NODE_need_write))
2387 		return;
2388 do_write:
2389 	BUG_ON((type == BTREE_WRITE_initial) != (b->written == 0));
2390 
2391 	atomic_long_dec(&c->btree_cache.nr_dirty);
2392 
2393 	BUG_ON(btree_node_fake(b));
2394 	BUG_ON((b->will_make_reachable != 0) != !b->written);
2395 
2396 	BUG_ON(b->written >= btree_sectors(c));
2397 	BUG_ON(b->written & (block_sectors(c) - 1));
2398 	BUG_ON(bset_written(b, btree_bset_last(b)));
2399 	BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(c));
2400 	BUG_ON(memcmp(&b->data->format, &b->format, sizeof(b->format)));
2401 
2402 	bch2_sort_whiteouts(c, b);
2403 
2404 	sort_iter_stack_init(&sort_iter, b);
2405 
2406 	bytes = !b->written
2407 		? sizeof(struct btree_node)
2408 		: sizeof(struct btree_node_entry);
2409 
2410 	bytes += b->whiteout_u64s * sizeof(u64);
2411 
2412 	for_each_bset(b, t) {
2413 		i = bset(b, t);
2414 
2415 		if (bset_written(b, i))
2416 			continue;
2417 
2418 		bytes += le16_to_cpu(i->u64s) * sizeof(u64);
2419 		sort_iter_add(&sort_iter.iter,
2420 			      btree_bkey_first(b, t),
2421 			      btree_bkey_last(b, t));
2422 		seq = max(seq, le64_to_cpu(i->journal_seq));
2423 	}
2424 
2425 	BUG_ON(b->written && !seq);
2426 
2427 	/* bch2_varint_decode may read up to 7 bytes past the end of the buffer: */
2428 	bytes += 8;
2429 
2430 	/* buffer must be a multiple of the block size */
2431 	bytes = round_up(bytes, block_bytes(c));
2432 
2433 	data = btree_bounce_alloc(c, bytes, &used_mempool);
2434 
2435 	if (!b->written) {
2436 		bn = data;
2437 		*bn = *b->data;
2438 		i = &bn->keys;
2439 	} else {
2440 		bne = data;
2441 		bne->keys = b->data->keys;
2442 		i = &bne->keys;
2443 	}
2444 
2445 	i->journal_seq	= cpu_to_le64(seq);
2446 	i->u64s		= 0;
2447 
2448 	sort_iter_add(&sort_iter.iter,
2449 		      unwritten_whiteouts_start(b),
2450 		      unwritten_whiteouts_end(b));
2451 	SET_BSET_SEPARATE_WHITEOUTS(i, false);
2452 
2453 	u64s = bch2_sort_keys_keep_unwritten_whiteouts(i->start, &sort_iter.iter);
2454 	le16_add_cpu(&i->u64s, u64s);
2455 
2456 	b->whiteout_u64s = 0;
2457 
2458 	BUG_ON(!b->written && i->u64s != b->data->keys.u64s);
2459 
2460 	set_needs_whiteout(i, false);
2461 
2462 	/* do we have data to write? */
2463 	if (b->written && !i->u64s)
2464 		goto nowrite;
2465 
2466 	bytes_to_write = vstruct_end(i) - data;
2467 	sectors_to_write = round_up(bytes_to_write, block_bytes(c)) >> 9;
2468 
2469 	if (!b->written &&
2470 	    b->key.k.type == KEY_TYPE_btree_ptr_v2)
2471 		BUG_ON(btree_ptr_sectors_written(bkey_i_to_s_c(&b->key)) != sectors_to_write);
2472 
2473 	memset(data + bytes_to_write, 0,
2474 	       (sectors_to_write << 9) - bytes_to_write);
2475 
2476 	BUG_ON(b->written + sectors_to_write > btree_sectors(c));
2477 	BUG_ON(BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN);
2478 	BUG_ON(i->seq != b->data->keys.seq);
2479 
2480 	i->version = cpu_to_le16(c->sb.version);
2481 	SET_BSET_OFFSET(i, b->written);
2482 	SET_BSET_CSUM_TYPE(i, bch2_meta_checksum_type(c));
2483 
2484 	if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)))
2485 		validate_before_checksum = true;
2486 
2487 	/* validate_bset will be modifying: */
2488 	if (le16_to_cpu(i->version) < bcachefs_metadata_version_current)
2489 		validate_before_checksum = true;
2490 
2491 	/* if we're going to be encrypting, check metadata validity first: */
2492 	if (validate_before_checksum &&
2493 	    validate_bset_for_write(c, b, i))
2494 		goto err;
2495 
2496 	ret = bset_encrypt(c, i, b->written << 9);
2497 	if (bch2_fs_fatal_err_on(ret, c,
2498 			"encrypting btree node: %s", bch2_err_str(ret)))
2499 		goto err;
2500 
2501 	nonce = btree_nonce(i, b->written << 9);
2502 
2503 	if (bn)
2504 		bn->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bn);
2505 	else
2506 		bne->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
2507 
2508 	/* if we're not encrypting, check metadata after checksumming: */
2509 	if (!validate_before_checksum &&
2510 	    validate_bset_for_write(c, b, i))
2511 		goto err;
2512 
2513 	/*
2514 	 * We handle btree write errors by immediately halting the journal -
2515 	 * after we've done that, we can't issue any subsequent btree writes
2516 	 * because they might have pointers to new nodes that failed to write.
2517 	 *
2518 	 * Furthermore, there's no point in doing any more btree writes because
2519 	 * with the journal stopped, we're never going to update the journal to
2520 	 * reflect that those writes were done and the data flushed from the
2521 	 * journal:
2522 	 *
2523 	 * Also on journal error, the pending write may have updates that were
2524 	 * never journalled (interior nodes, see btree_update_nodes_written()) -
2525 	 * it's critical that we don't do the write in that case otherwise we
2526 	 * will have updates visible that weren't in the journal:
2527 	 *
2528 	 * Make sure to update b->written so bch2_btree_init_next() doesn't
2529 	 * break:
2530 	 */
2531 	if (bch2_journal_error(&c->journal) ||
2532 	    c->opts.nochanges)
2533 		goto err;
2534 
2535 	trace_and_count(c, btree_node_write, b, bytes_to_write, sectors_to_write);
2536 
2537 	wbio = container_of(bio_alloc_bioset(NULL,
2538 				buf_pages(data, sectors_to_write << 9),
2539 				REQ_OP_WRITE|REQ_META,
2540 				GFP_NOFS,
2541 				&c->btree_bio),
2542 			    struct btree_write_bio, wbio.bio);
2543 	wbio_init(&wbio->wbio.bio);
2544 	wbio->data			= data;
2545 	wbio->data_bytes		= bytes;
2546 	wbio->sector_offset		= b->written;
2547 	wbio->start_time		= start_time;
2548 	wbio->wbio.c			= c;
2549 	wbio->wbio.used_mempool		= used_mempool;
2550 	wbio->wbio.first_btree_write	= !b->written;
2551 	wbio->wbio.bio.bi_end_io	= btree_node_write_endio;
2552 	wbio->wbio.bio.bi_private	= b;
2553 
2554 	bch2_bio_map(&wbio->wbio.bio, data, sectors_to_write << 9);
2555 
2556 	bkey_copy(&wbio->key, &b->key);
2557 
2558 	b->written += sectors_to_write;
2559 
2560 	if (wbio->key.k.type == KEY_TYPE_btree_ptr_v2)
2561 		bkey_i_to_btree_ptr_v2(&wbio->key)->v.sectors_written =
2562 			cpu_to_le16(b->written);
2563 
2564 	atomic64_inc(&c->btree_write_stats[type].nr);
2565 	atomic64_add(bytes_to_write, &c->btree_write_stats[type].bytes);
2566 
2567 	async_object_list_add(c, btree_write_bio, wbio, &wbio->list_idx);
2568 
2569 	INIT_WORK(&wbio->work, btree_write_submit);
2570 	queue_work(c->btree_write_submit_wq, &wbio->work);
2571 	return;
2572 err:
2573 	set_btree_node_noevict(b);
2574 	b->written += sectors_to_write;
2575 nowrite:
2576 	btree_bounce_free(c, bytes, used_mempool, data);
2577 	__btree_node_write_done(c, b, 0);
2578 }
2579 
2580 /*
2581  * Work that must be done with write lock held:
2582  */
bch2_btree_post_write_cleanup(struct bch_fs * c,struct btree * b)2583 bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
2584 {
2585 	bool invalidated_iter = false;
2586 	struct btree_node_entry *bne;
2587 
2588 	if (!btree_node_just_written(b))
2589 		return false;
2590 
2591 	BUG_ON(b->whiteout_u64s);
2592 
2593 	clear_btree_node_just_written(b);
2594 
2595 	/*
2596 	 * Note: immediately after write, bset_written() doesn't work - the
2597 	 * amount of data we had to write after compaction might have been
2598 	 * smaller than the offset of the last bset.
2599 	 *
2600 	 * However, we know that all bsets have been written here, as long as
2601 	 * we're still holding the write lock:
2602 	 */
2603 
2604 	/*
2605 	 * XXX: decide if we really want to unconditionally sort down to a
2606 	 * single bset:
2607 	 */
2608 	if (b->nsets > 1) {
2609 		btree_node_sort(c, b, 0, b->nsets);
2610 		invalidated_iter = true;
2611 	} else {
2612 		invalidated_iter = bch2_drop_whiteouts(b, COMPACT_ALL);
2613 	}
2614 
2615 	for_each_bset(b, t)
2616 		set_needs_whiteout(bset(b, t), true);
2617 
2618 	bch2_btree_verify(c, b);
2619 
2620 	/*
2621 	 * If later we don't unconditionally sort down to a single bset, we have
2622 	 * to ensure this is still true:
2623 	 */
2624 	BUG_ON((void *) btree_bkey_last(b, bset_tree_last(b)) > write_block(b));
2625 
2626 	bne = want_new_bset(c, b);
2627 	if (bne)
2628 		bch2_bset_init_next(b, bne);
2629 
2630 	bch2_btree_build_aux_trees(b);
2631 
2632 	return invalidated_iter;
2633 }
2634 
2635 /*
2636  * Use this one if the node is intent locked:
2637  */
bch2_btree_node_write(struct bch_fs * c,struct btree * b,enum six_lock_type lock_type_held,unsigned flags)2638 void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
2639 			   enum six_lock_type lock_type_held,
2640 			   unsigned flags)
2641 {
2642 	if (lock_type_held == SIX_LOCK_intent ||
2643 	    (lock_type_held == SIX_LOCK_read &&
2644 	     six_lock_tryupgrade(&b->c.lock))) {
2645 		__bch2_btree_node_write(c, b, flags);
2646 
2647 		/* don't cycle lock unnecessarily: */
2648 		if (btree_node_just_written(b) &&
2649 		    six_trylock_write(&b->c.lock)) {
2650 			bch2_btree_post_write_cleanup(c, b);
2651 			six_unlock_write(&b->c.lock);
2652 		}
2653 
2654 		if (lock_type_held == SIX_LOCK_read)
2655 			six_lock_downgrade(&b->c.lock);
2656 	} else {
2657 		__bch2_btree_node_write(c, b, flags);
2658 		if (lock_type_held == SIX_LOCK_write &&
2659 		    btree_node_just_written(b))
2660 			bch2_btree_post_write_cleanup(c, b);
2661 	}
2662 }
2663 
bch2_btree_node_write_trans(struct btree_trans * trans,struct btree * b,enum six_lock_type lock_type_held,unsigned flags)2664 void bch2_btree_node_write_trans(struct btree_trans *trans, struct btree *b,
2665 				 enum six_lock_type lock_type_held,
2666 				 unsigned flags)
2667 {
2668 	struct bch_fs *c = trans->c;
2669 
2670 	if (lock_type_held == SIX_LOCK_intent ||
2671 	    (lock_type_held == SIX_LOCK_read &&
2672 	     six_lock_tryupgrade(&b->c.lock))) {
2673 		__bch2_btree_node_write(c, b, flags);
2674 
2675 		/* don't cycle lock unnecessarily: */
2676 		if (btree_node_just_written(b) &&
2677 		    six_trylock_write(&b->c.lock)) {
2678 			bch2_btree_post_write_cleanup(c, b);
2679 			__bch2_btree_node_unlock_write(trans, b);
2680 		}
2681 
2682 		if (lock_type_held == SIX_LOCK_read)
2683 			six_lock_downgrade(&b->c.lock);
2684 	} else {
2685 		__bch2_btree_node_write(c, b, flags);
2686 		if (lock_type_held == SIX_LOCK_write &&
2687 		    btree_node_just_written(b))
2688 			bch2_btree_post_write_cleanup(c, b);
2689 	}
2690 }
2691 
__bch2_btree_flush_all(struct bch_fs * c,unsigned flag)2692 static bool __bch2_btree_flush_all(struct bch_fs *c, unsigned flag)
2693 {
2694 	struct bucket_table *tbl;
2695 	struct rhash_head *pos;
2696 	struct btree *b;
2697 	unsigned i;
2698 	bool ret = false;
2699 restart:
2700 	rcu_read_lock();
2701 	for_each_cached_btree(b, c, tbl, i, pos)
2702 		if (test_bit(flag, &b->flags)) {
2703 			rcu_read_unlock();
2704 			wait_on_bit_io(&b->flags, flag, TASK_UNINTERRUPTIBLE);
2705 			ret = true;
2706 			goto restart;
2707 		}
2708 	rcu_read_unlock();
2709 
2710 	return ret;
2711 }
2712 
bch2_btree_flush_all_reads(struct bch_fs * c)2713 bool bch2_btree_flush_all_reads(struct bch_fs *c)
2714 {
2715 	return __bch2_btree_flush_all(c, BTREE_NODE_read_in_flight);
2716 }
2717 
bch2_btree_flush_all_writes(struct bch_fs * c)2718 bool bch2_btree_flush_all_writes(struct bch_fs *c)
2719 {
2720 	return __bch2_btree_flush_all(c, BTREE_NODE_write_in_flight);
2721 }
2722 
2723 static const char * const bch2_btree_write_types[] = {
2724 #define x(t, n) [n] = #t,
2725 	BCH_BTREE_WRITE_TYPES()
2726 	NULL
2727 };
2728 
bch2_btree_write_stats_to_text(struct printbuf * out,struct bch_fs * c)2729 void bch2_btree_write_stats_to_text(struct printbuf *out, struct bch_fs *c)
2730 {
2731 	printbuf_tabstop_push(out, 20);
2732 	printbuf_tabstop_push(out, 10);
2733 
2734 	prt_printf(out, "\tnr\tsize\n");
2735 
2736 	for (unsigned i = 0; i < BTREE_WRITE_TYPE_NR; i++) {
2737 		u64 nr		= atomic64_read(&c->btree_write_stats[i].nr);
2738 		u64 bytes	= atomic64_read(&c->btree_write_stats[i].bytes);
2739 
2740 		prt_printf(out, "%s:\t%llu\t", bch2_btree_write_types[i], nr);
2741 		prt_human_readable_u64(out, nr ? div64_u64(bytes, nr) : 0);
2742 		prt_newline(out);
2743 	}
2744 }
2745