xref: /linux/fs/bcachefs/btree_io.c (revision 4a4b30ea80d8cb5e8c4c62bb86201f4ea0d9b030)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "bcachefs.h"
4 #include "bkey_buf.h"
5 #include "bkey_methods.h"
6 #include "bkey_sort.h"
7 #include "btree_cache.h"
8 #include "btree_io.h"
9 #include "btree_iter.h"
10 #include "btree_locking.h"
11 #include "btree_update.h"
12 #include "btree_update_interior.h"
13 #include "buckets.h"
14 #include "checksum.h"
15 #include "debug.h"
16 #include "error.h"
17 #include "extents.h"
18 #include "io_write.h"
19 #include "journal_reclaim.h"
20 #include "journal_seq_blacklist.h"
21 #include "recovery.h"
22 #include "super-io.h"
23 #include "trace.h"
24 
25 #include <linux/sched/mm.h>
26 
bch2_btree_node_header_to_text(struct printbuf * out,struct btree_node * bn)27 static void bch2_btree_node_header_to_text(struct printbuf *out, struct btree_node *bn)
28 {
29 	bch2_btree_id_level_to_text(out, BTREE_NODE_ID(bn), BTREE_NODE_LEVEL(bn));
30 	prt_printf(out, " seq %llx %llu\n", bn->keys.seq, BTREE_NODE_SEQ(bn));
31 	prt_str(out, "min: ");
32 	bch2_bpos_to_text(out, bn->min_key);
33 	prt_newline(out);
34 	prt_str(out, "max: ");
35 	bch2_bpos_to_text(out, bn->max_key);
36 }
37 
bch2_btree_node_io_unlock(struct btree * b)38 void bch2_btree_node_io_unlock(struct btree *b)
39 {
40 	EBUG_ON(!btree_node_write_in_flight(b));
41 
42 	clear_btree_node_write_in_flight_inner(b);
43 	clear_btree_node_write_in_flight(b);
44 	wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
45 }
46 
bch2_btree_node_io_lock(struct btree * b)47 void bch2_btree_node_io_lock(struct btree *b)
48 {
49 	wait_on_bit_lock_io(&b->flags, BTREE_NODE_write_in_flight,
50 			    TASK_UNINTERRUPTIBLE);
51 }
52 
__bch2_btree_node_wait_on_read(struct btree * b)53 void __bch2_btree_node_wait_on_read(struct btree *b)
54 {
55 	wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
56 		       TASK_UNINTERRUPTIBLE);
57 }
58 
__bch2_btree_node_wait_on_write(struct btree * b)59 void __bch2_btree_node_wait_on_write(struct btree *b)
60 {
61 	wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
62 		       TASK_UNINTERRUPTIBLE);
63 }
64 
bch2_btree_node_wait_on_read(struct btree * b)65 void bch2_btree_node_wait_on_read(struct btree *b)
66 {
67 	wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
68 		       TASK_UNINTERRUPTIBLE);
69 }
70 
bch2_btree_node_wait_on_write(struct btree * b)71 void bch2_btree_node_wait_on_write(struct btree *b)
72 {
73 	wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
74 		       TASK_UNINTERRUPTIBLE);
75 }
76 
verify_no_dups(struct btree * b,struct bkey_packed * start,struct bkey_packed * end)77 static void verify_no_dups(struct btree *b,
78 			   struct bkey_packed *start,
79 			   struct bkey_packed *end)
80 {
81 #ifdef CONFIG_BCACHEFS_DEBUG
82 	struct bkey_packed *k, *p;
83 
84 	if (start == end)
85 		return;
86 
87 	for (p = start, k = bkey_p_next(start);
88 	     k != end;
89 	     p = k, k = bkey_p_next(k)) {
90 		struct bkey l = bkey_unpack_key(b, p);
91 		struct bkey r = bkey_unpack_key(b, k);
92 
93 		BUG_ON(bpos_ge(l.p, bkey_start_pos(&r)));
94 	}
95 #endif
96 }
97 
set_needs_whiteout(struct bset * i,int v)98 static void set_needs_whiteout(struct bset *i, int v)
99 {
100 	struct bkey_packed *k;
101 
102 	for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
103 		k->needs_whiteout = v;
104 }
105 
btree_bounce_free(struct bch_fs * c,size_t size,bool used_mempool,void * p)106 static void btree_bounce_free(struct bch_fs *c, size_t size,
107 			      bool used_mempool, void *p)
108 {
109 	if (used_mempool)
110 		mempool_free(p, &c->btree_bounce_pool);
111 	else
112 		kvfree(p);
113 }
114 
btree_bounce_alloc(struct bch_fs * c,size_t size,bool * used_mempool)115 static void *btree_bounce_alloc(struct bch_fs *c, size_t size,
116 				bool *used_mempool)
117 {
118 	unsigned flags = memalloc_nofs_save();
119 	void *p;
120 
121 	BUG_ON(size > c->opts.btree_node_size);
122 
123 	*used_mempool = false;
124 	p = kvmalloc(size, __GFP_NOWARN|GFP_NOWAIT);
125 	if (!p) {
126 		*used_mempool = true;
127 		p = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
128 	}
129 	memalloc_nofs_restore(flags);
130 	return p;
131 }
132 
sort_bkey_ptrs(const struct btree * bt,struct bkey_packed ** ptrs,unsigned nr)133 static void sort_bkey_ptrs(const struct btree *bt,
134 			   struct bkey_packed **ptrs, unsigned nr)
135 {
136 	unsigned n = nr, a = nr / 2, b, c, d;
137 
138 	if (!a)
139 		return;
140 
141 	/* Heap sort: see lib/sort.c: */
142 	while (1) {
143 		if (a)
144 			a--;
145 		else if (--n)
146 			swap(ptrs[0], ptrs[n]);
147 		else
148 			break;
149 
150 		for (b = a; c = 2 * b + 1, (d = c + 1) < n;)
151 			b = bch2_bkey_cmp_packed(bt,
152 					    ptrs[c],
153 					    ptrs[d]) >= 0 ? c : d;
154 		if (d == n)
155 			b = c;
156 
157 		while (b != a &&
158 		       bch2_bkey_cmp_packed(bt,
159 				       ptrs[a],
160 				       ptrs[b]) >= 0)
161 			b = (b - 1) / 2;
162 		c = b;
163 		while (b != a) {
164 			b = (b - 1) / 2;
165 			swap(ptrs[b], ptrs[c]);
166 		}
167 	}
168 }
169 
bch2_sort_whiteouts(struct bch_fs * c,struct btree * b)170 static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b)
171 {
172 	struct bkey_packed *new_whiteouts, **ptrs, **ptrs_end, *k;
173 	bool used_mempool = false;
174 	size_t bytes = b->whiteout_u64s * sizeof(u64);
175 
176 	if (!b->whiteout_u64s)
177 		return;
178 
179 	new_whiteouts = btree_bounce_alloc(c, bytes, &used_mempool);
180 
181 	ptrs = ptrs_end = ((void *) new_whiteouts + bytes);
182 
183 	for (k = unwritten_whiteouts_start(b);
184 	     k != unwritten_whiteouts_end(b);
185 	     k = bkey_p_next(k))
186 		*--ptrs = k;
187 
188 	sort_bkey_ptrs(b, ptrs, ptrs_end - ptrs);
189 
190 	k = new_whiteouts;
191 
192 	while (ptrs != ptrs_end) {
193 		bkey_p_copy(k, *ptrs);
194 		k = bkey_p_next(k);
195 		ptrs++;
196 	}
197 
198 	verify_no_dups(b, new_whiteouts,
199 		       (void *) ((u64 *) new_whiteouts + b->whiteout_u64s));
200 
201 	memcpy_u64s(unwritten_whiteouts_start(b),
202 		    new_whiteouts, b->whiteout_u64s);
203 
204 	btree_bounce_free(c, bytes, used_mempool, new_whiteouts);
205 }
206 
should_compact_bset(struct btree * b,struct bset_tree * t,bool compacting,enum compact_mode mode)207 static bool should_compact_bset(struct btree *b, struct bset_tree *t,
208 				bool compacting, enum compact_mode mode)
209 {
210 	if (!bset_dead_u64s(b, t))
211 		return false;
212 
213 	switch (mode) {
214 	case COMPACT_LAZY:
215 		return should_compact_bset_lazy(b, t) ||
216 			(compacting && !bset_written(b, bset(b, t)));
217 	case COMPACT_ALL:
218 		return true;
219 	default:
220 		BUG();
221 	}
222 }
223 
bch2_drop_whiteouts(struct btree * b,enum compact_mode mode)224 static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode)
225 {
226 	bool ret = false;
227 
228 	for_each_bset(b, t) {
229 		struct bset *i = bset(b, t);
230 		struct bkey_packed *k, *n, *out, *start, *end;
231 		struct btree_node_entry *src = NULL, *dst = NULL;
232 
233 		if (t != b->set && !bset_written(b, i)) {
234 			src = container_of(i, struct btree_node_entry, keys);
235 			dst = max(write_block(b),
236 				  (void *) btree_bkey_last(b, t - 1));
237 		}
238 
239 		if (src != dst)
240 			ret = true;
241 
242 		if (!should_compact_bset(b, t, ret, mode)) {
243 			if (src != dst) {
244 				memmove(dst, src, sizeof(*src) +
245 					le16_to_cpu(src->keys.u64s) *
246 					sizeof(u64));
247 				i = &dst->keys;
248 				set_btree_bset(b, t, i);
249 			}
250 			continue;
251 		}
252 
253 		start	= btree_bkey_first(b, t);
254 		end	= btree_bkey_last(b, t);
255 
256 		if (src != dst) {
257 			memmove(dst, src, sizeof(*src));
258 			i = &dst->keys;
259 			set_btree_bset(b, t, i);
260 		}
261 
262 		out = i->start;
263 
264 		for (k = start; k != end; k = n) {
265 			n = bkey_p_next(k);
266 
267 			if (!bkey_deleted(k)) {
268 				bkey_p_copy(out, k);
269 				out = bkey_p_next(out);
270 			} else {
271 				BUG_ON(k->needs_whiteout);
272 			}
273 		}
274 
275 		i->u64s = cpu_to_le16((u64 *) out - i->_data);
276 		set_btree_bset_end(b, t);
277 		bch2_bset_set_no_aux_tree(b, t);
278 		ret = true;
279 	}
280 
281 	bch2_verify_btree_nr_keys(b);
282 
283 	bch2_btree_build_aux_trees(b);
284 
285 	return ret;
286 }
287 
bch2_compact_whiteouts(struct bch_fs * c,struct btree * b,enum compact_mode mode)288 bool bch2_compact_whiteouts(struct bch_fs *c, struct btree *b,
289 			    enum compact_mode mode)
290 {
291 	return bch2_drop_whiteouts(b, mode);
292 }
293 
btree_node_sort(struct bch_fs * c,struct btree * b,unsigned start_idx,unsigned end_idx)294 static void btree_node_sort(struct bch_fs *c, struct btree *b,
295 			    unsigned start_idx,
296 			    unsigned end_idx)
297 {
298 	struct btree_node *out;
299 	struct sort_iter_stack sort_iter;
300 	struct bset_tree *t;
301 	struct bset *start_bset = bset(b, &b->set[start_idx]);
302 	bool used_mempool = false;
303 	u64 start_time, seq = 0;
304 	unsigned i, u64s = 0, bytes, shift = end_idx - start_idx - 1;
305 	bool sorting_entire_node = start_idx == 0 &&
306 		end_idx == b->nsets;
307 
308 	sort_iter_stack_init(&sort_iter, b);
309 
310 	for (t = b->set + start_idx;
311 	     t < b->set + end_idx;
312 	     t++) {
313 		u64s += le16_to_cpu(bset(b, t)->u64s);
314 		sort_iter_add(&sort_iter.iter,
315 			      btree_bkey_first(b, t),
316 			      btree_bkey_last(b, t));
317 	}
318 
319 	bytes = sorting_entire_node
320 		? btree_buf_bytes(b)
321 		: __vstruct_bytes(struct btree_node, u64s);
322 
323 	out = btree_bounce_alloc(c, bytes, &used_mempool);
324 
325 	start_time = local_clock();
326 
327 	u64s = bch2_sort_keys(out->keys.start, &sort_iter.iter);
328 
329 	out->keys.u64s = cpu_to_le16(u64s);
330 
331 	BUG_ON(vstruct_end(&out->keys) > (void *) out + bytes);
332 
333 	if (sorting_entire_node)
334 		bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
335 				       start_time);
336 
337 	/* Make sure we preserve bset journal_seq: */
338 	for (t = b->set + start_idx; t < b->set + end_idx; t++)
339 		seq = max(seq, le64_to_cpu(bset(b, t)->journal_seq));
340 	start_bset->journal_seq = cpu_to_le64(seq);
341 
342 	if (sorting_entire_node) {
343 		u64s = le16_to_cpu(out->keys.u64s);
344 
345 		BUG_ON(bytes != btree_buf_bytes(b));
346 
347 		/*
348 		 * Our temporary buffer is the same size as the btree node's
349 		 * buffer, we can just swap buffers instead of doing a big
350 		 * memcpy()
351 		 */
352 		*out = *b->data;
353 		out->keys.u64s = cpu_to_le16(u64s);
354 		swap(out, b->data);
355 		set_btree_bset(b, b->set, &b->data->keys);
356 	} else {
357 		start_bset->u64s = out->keys.u64s;
358 		memcpy_u64s(start_bset->start,
359 			    out->keys.start,
360 			    le16_to_cpu(out->keys.u64s));
361 	}
362 
363 	for (i = start_idx + 1; i < end_idx; i++)
364 		b->nr.bset_u64s[start_idx] +=
365 			b->nr.bset_u64s[i];
366 
367 	b->nsets -= shift;
368 
369 	for (i = start_idx + 1; i < b->nsets; i++) {
370 		b->nr.bset_u64s[i]	= b->nr.bset_u64s[i + shift];
371 		b->set[i]		= b->set[i + shift];
372 	}
373 
374 	for (i = b->nsets; i < MAX_BSETS; i++)
375 		b->nr.bset_u64s[i] = 0;
376 
377 	set_btree_bset_end(b, &b->set[start_idx]);
378 	bch2_bset_set_no_aux_tree(b, &b->set[start_idx]);
379 
380 	btree_bounce_free(c, bytes, used_mempool, out);
381 
382 	bch2_verify_btree_nr_keys(b);
383 }
384 
bch2_btree_sort_into(struct bch_fs * c,struct btree * dst,struct btree * src)385 void bch2_btree_sort_into(struct bch_fs *c,
386 			 struct btree *dst,
387 			 struct btree *src)
388 {
389 	struct btree_nr_keys nr;
390 	struct btree_node_iter src_iter;
391 	u64 start_time = local_clock();
392 
393 	BUG_ON(dst->nsets != 1);
394 
395 	bch2_bset_set_no_aux_tree(dst, dst->set);
396 
397 	bch2_btree_node_iter_init_from_start(&src_iter, src);
398 
399 	nr = bch2_sort_repack(btree_bset_first(dst),
400 			src, &src_iter,
401 			&dst->format,
402 			true);
403 
404 	bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
405 			       start_time);
406 
407 	set_btree_bset_end(dst, dst->set);
408 
409 	dst->nr.live_u64s	+= nr.live_u64s;
410 	dst->nr.bset_u64s[0]	+= nr.bset_u64s[0];
411 	dst->nr.packed_keys	+= nr.packed_keys;
412 	dst->nr.unpacked_keys	+= nr.unpacked_keys;
413 
414 	bch2_verify_btree_nr_keys(dst);
415 }
416 
417 /*
418  * We're about to add another bset to the btree node, so if there's currently
419  * too many bsets - sort some of them together:
420  */
btree_node_compact(struct bch_fs * c,struct btree * b)421 static bool btree_node_compact(struct bch_fs *c, struct btree *b)
422 {
423 	unsigned unwritten_idx;
424 	bool ret = false;
425 
426 	for (unwritten_idx = 0;
427 	     unwritten_idx < b->nsets;
428 	     unwritten_idx++)
429 		if (!bset_written(b, bset(b, &b->set[unwritten_idx])))
430 			break;
431 
432 	if (b->nsets - unwritten_idx > 1) {
433 		btree_node_sort(c, b, unwritten_idx, b->nsets);
434 		ret = true;
435 	}
436 
437 	if (unwritten_idx > 1) {
438 		btree_node_sort(c, b, 0, unwritten_idx);
439 		ret = true;
440 	}
441 
442 	return ret;
443 }
444 
bch2_btree_build_aux_trees(struct btree * b)445 void bch2_btree_build_aux_trees(struct btree *b)
446 {
447 	for_each_bset(b, t)
448 		bch2_bset_build_aux_tree(b, t,
449 				!bset_written(b, bset(b, t)) &&
450 				t == bset_tree_last(b));
451 }
452 
453 /*
454  * If we have MAX_BSETS (3) bsets, should we sort them all down to just one?
455  *
456  * The first bset is going to be of similar order to the size of the node, the
457  * last bset is bounded by btree_write_set_buffer(), which is set to keep the
458  * memmove on insert from being too expensive: the middle bset should, ideally,
459  * be the geometric mean of the first and the last.
460  *
461  * Returns true if the middle bset is greater than that geometric mean:
462  */
should_compact_all(struct bch_fs * c,struct btree * b)463 static inline bool should_compact_all(struct bch_fs *c, struct btree *b)
464 {
465 	unsigned mid_u64s_bits =
466 		(ilog2(btree_max_u64s(c)) + BTREE_WRITE_SET_U64s_BITS) / 2;
467 
468 	return bset_u64s(&b->set[1]) > 1U << mid_u64s_bits;
469 }
470 
471 /*
472  * @bch_btree_init_next - initialize a new (unwritten) bset that can then be
473  * inserted into
474  *
475  * Safe to call if there already is an unwritten bset - will only add a new bset
476  * if @b doesn't already have one.
477  *
478  * Returns true if we sorted (i.e. invalidated iterators
479  */
bch2_btree_init_next(struct btree_trans * trans,struct btree * b)480 void bch2_btree_init_next(struct btree_trans *trans, struct btree *b)
481 {
482 	struct bch_fs *c = trans->c;
483 	struct btree_node_entry *bne;
484 	bool reinit_iter = false;
485 
486 	EBUG_ON(!six_lock_counts(&b->c.lock).n[SIX_LOCK_write]);
487 	BUG_ON(bset_written(b, bset(b, &b->set[1])));
488 	BUG_ON(btree_node_just_written(b));
489 
490 	if (b->nsets == MAX_BSETS &&
491 	    !btree_node_write_in_flight(b) &&
492 	    should_compact_all(c, b)) {
493 		bch2_btree_node_write_trans(trans, b, SIX_LOCK_write,
494 					    BTREE_WRITE_init_next_bset);
495 		reinit_iter = true;
496 	}
497 
498 	if (b->nsets == MAX_BSETS &&
499 	    btree_node_compact(c, b))
500 		reinit_iter = true;
501 
502 	BUG_ON(b->nsets >= MAX_BSETS);
503 
504 	bne = want_new_bset(c, b);
505 	if (bne)
506 		bch2_bset_init_next(b, bne);
507 
508 	bch2_btree_build_aux_trees(b);
509 
510 	if (reinit_iter)
511 		bch2_trans_node_reinit_iter(trans, b);
512 }
513 
btree_err_msg(struct printbuf * out,struct bch_fs * c,struct bch_dev * ca,struct btree * b,struct bset * i,struct bkey_packed * k,unsigned offset,int write)514 static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
515 			  struct bch_dev *ca,
516 			  struct btree *b, struct bset *i, struct bkey_packed *k,
517 			  unsigned offset, int write)
518 {
519 	prt_printf(out, bch2_log_msg(c, "%s"),
520 		   write == READ
521 		   ? "error validating btree node "
522 		   : "corrupt btree node before write ");
523 	if (ca)
524 		prt_printf(out, "on %s ", ca->name);
525 	prt_printf(out, "at btree ");
526 	bch2_btree_pos_to_text(out, c, b);
527 
528 	printbuf_indent_add(out, 2);
529 
530 	prt_printf(out, "\nnode offset %u/%u",
531 		   b->written, btree_ptr_sectors_written(bkey_i_to_s_c(&b->key)));
532 	if (i)
533 		prt_printf(out, " bset u64s %u", le16_to_cpu(i->u64s));
534 	if (k)
535 		prt_printf(out, " bset byte offset %lu",
536 			   (unsigned long)(void *)k -
537 			   ((unsigned long)(void *)i & ~511UL));
538 	prt_str(out, ": ");
539 }
540 
541 __printf(10, 11)
__btree_err(int ret,struct bch_fs * c,struct bch_dev * ca,struct btree * b,struct bset * i,struct bkey_packed * k,int write,bool have_retry,enum bch_sb_error_id err_type,const char * fmt,...)542 static int __btree_err(int ret,
543 		       struct bch_fs *c,
544 		       struct bch_dev *ca,
545 		       struct btree *b,
546 		       struct bset *i,
547 		       struct bkey_packed *k,
548 		       int write,
549 		       bool have_retry,
550 		       enum bch_sb_error_id err_type,
551 		       const char *fmt, ...)
552 {
553 	struct printbuf out = PRINTBUF;
554 	bool silent = c->curr_recovery_pass == BCH_RECOVERY_PASS_scan_for_btree_nodes;
555 	va_list args;
556 
557 	btree_err_msg(&out, c, ca, b, i, k, b->written, write);
558 
559 	va_start(args, fmt);
560 	prt_vprintf(&out, fmt, args);
561 	va_end(args);
562 
563 	if (write == WRITE) {
564 		bch2_print_string_as_lines(KERN_ERR, out.buf);
565 		ret = c->opts.errors == BCH_ON_ERROR_continue
566 			? 0
567 			: -BCH_ERR_fsck_errors_not_fixed;
568 		goto out;
569 	}
570 
571 	if (!have_retry && ret == -BCH_ERR_btree_node_read_err_want_retry)
572 		ret = -BCH_ERR_btree_node_read_err_fixable;
573 	if (!have_retry && ret == -BCH_ERR_btree_node_read_err_must_retry)
574 		ret = -BCH_ERR_btree_node_read_err_bad_node;
575 
576 	if (!silent && ret != -BCH_ERR_btree_node_read_err_fixable)
577 		bch2_sb_error_count(c, err_type);
578 
579 	switch (ret) {
580 	case -BCH_ERR_btree_node_read_err_fixable:
581 		ret = !silent
582 			? __bch2_fsck_err(c, NULL, FSCK_CAN_FIX, err_type, "%s", out.buf)
583 			: -BCH_ERR_fsck_fix;
584 		if (ret != -BCH_ERR_fsck_fix &&
585 		    ret != -BCH_ERR_fsck_ignore)
586 			goto fsck_err;
587 		ret = -BCH_ERR_fsck_fix;
588 		break;
589 	case -BCH_ERR_btree_node_read_err_want_retry:
590 	case -BCH_ERR_btree_node_read_err_must_retry:
591 		if (!silent)
592 			bch2_print_string_as_lines(KERN_ERR, out.buf);
593 		break;
594 	case -BCH_ERR_btree_node_read_err_bad_node:
595 		if (!silent)
596 			bch2_print_string_as_lines(KERN_ERR, out.buf);
597 		ret = bch2_topology_error(c);
598 		break;
599 	case -BCH_ERR_btree_node_read_err_incompatible:
600 		if (!silent)
601 			bch2_print_string_as_lines(KERN_ERR, out.buf);
602 		ret = -BCH_ERR_fsck_errors_not_fixed;
603 		break;
604 	default:
605 		BUG();
606 	}
607 out:
608 fsck_err:
609 	printbuf_exit(&out);
610 	return ret;
611 }
612 
613 #define btree_err(type, c, ca, b, i, k, _err_type, msg, ...)		\
614 ({									\
615 	int _ret = __btree_err(type, c, ca, b, i, k, write, have_retry,	\
616 			       BCH_FSCK_ERR_##_err_type,		\
617 			       msg, ##__VA_ARGS__);			\
618 									\
619 	if (_ret != -BCH_ERR_fsck_fix) {				\
620 		ret = _ret;						\
621 		goto fsck_err;						\
622 	}								\
623 									\
624 	*saw_error = true;						\
625 })
626 
627 #define btree_err_on(cond, ...)	((cond) ? btree_err(__VA_ARGS__) : false)
628 
629 /*
630  * When btree topology repair changes the start or end of a node, that might
631  * mean we have to drop keys that are no longer inside the node:
632  */
633 __cold
bch2_btree_node_drop_keys_outside_node(struct btree * b)634 void bch2_btree_node_drop_keys_outside_node(struct btree *b)
635 {
636 	for_each_bset(b, t) {
637 		struct bset *i = bset(b, t);
638 		struct bkey_packed *k;
639 
640 		for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
641 			if (bkey_cmp_left_packed(b, k, &b->data->min_key) >= 0)
642 				break;
643 
644 		if (k != i->start) {
645 			unsigned shift = (u64 *) k - (u64 *) i->start;
646 
647 			memmove_u64s_down(i->start, k,
648 					  (u64 *) vstruct_end(i) - (u64 *) k);
649 			i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - shift);
650 			set_btree_bset_end(b, t);
651 		}
652 
653 		for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
654 			if (bkey_cmp_left_packed(b, k, &b->data->max_key) > 0)
655 				break;
656 
657 		if (k != vstruct_last(i)) {
658 			i->u64s = cpu_to_le16((u64 *) k - (u64 *) i->start);
659 			set_btree_bset_end(b, t);
660 		}
661 	}
662 
663 	/*
664 	 * Always rebuild search trees: eytzinger search tree nodes directly
665 	 * depend on the values of min/max key:
666 	 */
667 	bch2_bset_set_no_aux_tree(b, b->set);
668 	bch2_btree_build_aux_trees(b);
669 	b->nr = bch2_btree_node_count_keys(b);
670 
671 	struct bkey_s_c k;
672 	struct bkey unpacked;
673 	struct btree_node_iter iter;
674 	for_each_btree_node_key_unpack(b, k, &iter, &unpacked) {
675 		BUG_ON(bpos_lt(k.k->p, b->data->min_key));
676 		BUG_ON(bpos_gt(k.k->p, b->data->max_key));
677 	}
678 }
679 
validate_bset(struct bch_fs * c,struct bch_dev * ca,struct btree * b,struct bset * i,unsigned offset,unsigned sectors,int write,bool have_retry,bool * saw_error)680 static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
681 			 struct btree *b, struct bset *i,
682 			 unsigned offset, unsigned sectors,
683 			 int write, bool have_retry, bool *saw_error)
684 {
685 	unsigned version = le16_to_cpu(i->version);
686 	unsigned ptr_written = btree_ptr_sectors_written(bkey_i_to_s_c(&b->key));
687 	struct printbuf buf1 = PRINTBUF;
688 	struct printbuf buf2 = PRINTBUF;
689 	int ret = 0;
690 
691 	btree_err_on(!bch2_version_compatible(version),
692 		     -BCH_ERR_btree_node_read_err_incompatible,
693 		     c, ca, b, i, NULL,
694 		     btree_node_unsupported_version,
695 		     "unsupported bset version %u.%u",
696 		     BCH_VERSION_MAJOR(version),
697 		     BCH_VERSION_MINOR(version));
698 
699 	if (btree_err_on(version < c->sb.version_min,
700 			 -BCH_ERR_btree_node_read_err_fixable,
701 			 c, NULL, b, i, NULL,
702 			 btree_node_bset_older_than_sb_min,
703 			 "bset version %u older than superblock version_min %u",
704 			 version, c->sb.version_min)) {
705 		mutex_lock(&c->sb_lock);
706 		c->disk_sb.sb->version_min = cpu_to_le16(version);
707 		bch2_write_super(c);
708 		mutex_unlock(&c->sb_lock);
709 	}
710 
711 	if (btree_err_on(BCH_VERSION_MAJOR(version) >
712 			 BCH_VERSION_MAJOR(c->sb.version),
713 			 -BCH_ERR_btree_node_read_err_fixable,
714 			 c, NULL, b, i, NULL,
715 			 btree_node_bset_newer_than_sb,
716 			 "bset version %u newer than superblock version %u",
717 			 version, c->sb.version)) {
718 		mutex_lock(&c->sb_lock);
719 		c->disk_sb.sb->version = cpu_to_le16(version);
720 		bch2_write_super(c);
721 		mutex_unlock(&c->sb_lock);
722 	}
723 
724 	btree_err_on(BSET_SEPARATE_WHITEOUTS(i),
725 		     -BCH_ERR_btree_node_read_err_incompatible,
726 		     c, ca, b, i, NULL,
727 		     btree_node_unsupported_version,
728 		     "BSET_SEPARATE_WHITEOUTS no longer supported");
729 
730 	if (!write &&
731 	    btree_err_on(offset + sectors > (ptr_written ?: btree_sectors(c)),
732 			 -BCH_ERR_btree_node_read_err_fixable,
733 			 c, ca, b, i, NULL,
734 			 bset_past_end_of_btree_node,
735 			 "bset past end of btree node (offset %u len %u but written %zu)",
736 			 offset, sectors, ptr_written ?: btree_sectors(c)))
737 		i->u64s = 0;
738 
739 	btree_err_on(offset && !i->u64s,
740 		     -BCH_ERR_btree_node_read_err_fixable,
741 		     c, ca, b, i, NULL,
742 		     bset_empty,
743 		     "empty bset");
744 
745 	btree_err_on(BSET_OFFSET(i) && BSET_OFFSET(i) != offset,
746 		     -BCH_ERR_btree_node_read_err_want_retry,
747 		     c, ca, b, i, NULL,
748 		     bset_wrong_sector_offset,
749 		     "bset at wrong sector offset");
750 
751 	if (!offset) {
752 		struct btree_node *bn =
753 			container_of(i, struct btree_node, keys);
754 		/* These indicate that we read the wrong btree node: */
755 
756 		if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
757 			struct bch_btree_ptr_v2 *bp =
758 				&bkey_i_to_btree_ptr_v2(&b->key)->v;
759 
760 			/* XXX endianness */
761 			btree_err_on(bp->seq != bn->keys.seq,
762 				     -BCH_ERR_btree_node_read_err_must_retry,
763 				     c, ca, b, NULL, NULL,
764 				     bset_bad_seq,
765 				     "incorrect sequence number (wrong btree node)");
766 		}
767 
768 		btree_err_on(BTREE_NODE_ID(bn) != b->c.btree_id,
769 			     -BCH_ERR_btree_node_read_err_must_retry,
770 			     c, ca, b, i, NULL,
771 			     btree_node_bad_btree,
772 			     "incorrect btree id");
773 
774 		btree_err_on(BTREE_NODE_LEVEL(bn) != b->c.level,
775 			     -BCH_ERR_btree_node_read_err_must_retry,
776 			     c, ca, b, i, NULL,
777 			     btree_node_bad_level,
778 			     "incorrect level");
779 
780 		if (!write)
781 			compat_btree_node(b->c.level, b->c.btree_id, version,
782 					  BSET_BIG_ENDIAN(i), write, bn);
783 
784 		if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
785 			struct bch_btree_ptr_v2 *bp =
786 				&bkey_i_to_btree_ptr_v2(&b->key)->v;
787 
788 			if (BTREE_PTR_RANGE_UPDATED(bp)) {
789 				b->data->min_key = bp->min_key;
790 				b->data->max_key = b->key.k.p;
791 			}
792 
793 			btree_err_on(!bpos_eq(b->data->min_key, bp->min_key),
794 				     -BCH_ERR_btree_node_read_err_must_retry,
795 				     c, ca, b, NULL, NULL,
796 				     btree_node_bad_min_key,
797 				     "incorrect min_key: got %s should be %s",
798 				     (printbuf_reset(&buf1),
799 				      bch2_bpos_to_text(&buf1, bn->min_key), buf1.buf),
800 				     (printbuf_reset(&buf2),
801 				      bch2_bpos_to_text(&buf2, bp->min_key), buf2.buf));
802 		}
803 
804 		btree_err_on(!bpos_eq(bn->max_key, b->key.k.p),
805 			     -BCH_ERR_btree_node_read_err_must_retry,
806 			     c, ca, b, i, NULL,
807 			     btree_node_bad_max_key,
808 			     "incorrect max key %s",
809 			     (printbuf_reset(&buf1),
810 			      bch2_bpos_to_text(&buf1, bn->max_key), buf1.buf));
811 
812 		if (write)
813 			compat_btree_node(b->c.level, b->c.btree_id, version,
814 					  BSET_BIG_ENDIAN(i), write, bn);
815 
816 		btree_err_on(bch2_bkey_format_invalid(c, &bn->format, write, &buf1),
817 			     -BCH_ERR_btree_node_read_err_bad_node,
818 			     c, ca, b, i, NULL,
819 			     btree_node_bad_format,
820 			     "invalid bkey format: %s\n  %s", buf1.buf,
821 			     (printbuf_reset(&buf2),
822 			      bch2_bkey_format_to_text(&buf2, &bn->format), buf2.buf));
823 		printbuf_reset(&buf1);
824 
825 		compat_bformat(b->c.level, b->c.btree_id, version,
826 			       BSET_BIG_ENDIAN(i), write,
827 			       &bn->format);
828 	}
829 fsck_err:
830 	printbuf_exit(&buf2);
831 	printbuf_exit(&buf1);
832 	return ret;
833 }
834 
btree_node_bkey_val_validate(struct bch_fs * c,struct btree * b,struct bkey_s_c k,enum bch_validate_flags flags)835 static int btree_node_bkey_val_validate(struct bch_fs *c, struct btree *b,
836 					struct bkey_s_c k,
837 					enum bch_validate_flags flags)
838 {
839 	return bch2_bkey_val_validate(c, k, (struct bkey_validate_context) {
840 		.from	= BKEY_VALIDATE_btree_node,
841 		.level	= b->c.level,
842 		.btree	= b->c.btree_id,
843 		.flags	= flags
844 	});
845 }
846 
bset_key_validate(struct bch_fs * c,struct btree * b,struct bkey_s_c k,bool updated_range,enum bch_validate_flags flags)847 static int bset_key_validate(struct bch_fs *c, struct btree *b,
848 			     struct bkey_s_c k,
849 			     bool updated_range,
850 			     enum bch_validate_flags flags)
851 {
852 	struct bkey_validate_context from = (struct bkey_validate_context) {
853 		.from	= BKEY_VALIDATE_btree_node,
854 		.level	= b->c.level,
855 		.btree	= b->c.btree_id,
856 		.flags	= flags,
857 	};
858 	return __bch2_bkey_validate(c, k, from) ?:
859 		(!updated_range ? bch2_bkey_in_btree_node(c, b, k, from) : 0) ?:
860 		(flags & BCH_VALIDATE_write ? btree_node_bkey_val_validate(c, b, k, flags) : 0);
861 }
862 
bkey_packed_valid(struct bch_fs * c,struct btree * b,struct bset * i,struct bkey_packed * k)863 static bool bkey_packed_valid(struct bch_fs *c, struct btree *b,
864 			 struct bset *i, struct bkey_packed *k)
865 {
866 	if (bkey_p_next(k) > vstruct_last(i))
867 		return false;
868 
869 	if (k->format > KEY_FORMAT_CURRENT)
870 		return false;
871 
872 	if (!bkeyp_u64s_valid(&b->format, k))
873 		return false;
874 
875 	struct bkey tmp;
876 	struct bkey_s u = __bkey_disassemble(b, k, &tmp);
877 	return !__bch2_bkey_validate(c, u.s_c,
878 				     (struct bkey_validate_context) {
879 					.from	= BKEY_VALIDATE_btree_node,
880 					.level	= b->c.level,
881 					.btree	= b->c.btree_id,
882 					.flags	= BCH_VALIDATE_silent
883 				     });
884 }
885 
btree_node_read_bkey_cmp(const struct btree * b,const struct bkey_packed * l,const struct bkey_packed * r)886 static inline int btree_node_read_bkey_cmp(const struct btree *b,
887 				const struct bkey_packed *l,
888 				const struct bkey_packed *r)
889 {
890 	return bch2_bkey_cmp_packed(b, l, r)
891 		?: (int) bkey_deleted(r) - (int) bkey_deleted(l);
892 }
893 
validate_bset_keys(struct bch_fs * c,struct btree * b,struct bset * i,int write,bool have_retry,bool * saw_error)894 static int validate_bset_keys(struct bch_fs *c, struct btree *b,
895 			 struct bset *i, int write,
896 			 bool have_retry, bool *saw_error)
897 {
898 	unsigned version = le16_to_cpu(i->version);
899 	struct bkey_packed *k, *prev = NULL;
900 	struct printbuf buf = PRINTBUF;
901 	bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
902 		BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
903 	int ret = 0;
904 
905 	for (k = i->start;
906 	     k != vstruct_last(i);) {
907 		struct bkey_s u;
908 		struct bkey tmp;
909 		unsigned next_good_key;
910 
911 		if (btree_err_on(bkey_p_next(k) > vstruct_last(i),
912 				 -BCH_ERR_btree_node_read_err_fixable,
913 				 c, NULL, b, i, k,
914 				 btree_node_bkey_past_bset_end,
915 				 "key extends past end of bset")) {
916 			i->u64s = cpu_to_le16((u64 *) k - i->_data);
917 			break;
918 		}
919 
920 		if (btree_err_on(k->format > KEY_FORMAT_CURRENT,
921 				 -BCH_ERR_btree_node_read_err_fixable,
922 				 c, NULL, b, i, k,
923 				 btree_node_bkey_bad_format,
924 				 "invalid bkey format %u", k->format))
925 			goto drop_this_key;
926 
927 		if (btree_err_on(!bkeyp_u64s_valid(&b->format, k),
928 				 -BCH_ERR_btree_node_read_err_fixable,
929 				 c, NULL, b, i, k,
930 				 btree_node_bkey_bad_u64s,
931 				 "bad k->u64s %u (min %u max %zu)", k->u64s,
932 				 bkeyp_key_u64s(&b->format, k),
933 				 U8_MAX - BKEY_U64s + bkeyp_key_u64s(&b->format, k)))
934 			goto drop_this_key;
935 
936 		if (!write)
937 			bch2_bkey_compat(b->c.level, b->c.btree_id, version,
938 				    BSET_BIG_ENDIAN(i), write,
939 				    &b->format, k);
940 
941 		u = __bkey_disassemble(b, k, &tmp);
942 
943 		ret = bset_key_validate(c, b, u.s_c, updated_range, write);
944 		if (ret == -BCH_ERR_fsck_delete_bkey)
945 			goto drop_this_key;
946 		if (ret)
947 			goto fsck_err;
948 
949 		if (write)
950 			bch2_bkey_compat(b->c.level, b->c.btree_id, version,
951 				    BSET_BIG_ENDIAN(i), write,
952 				    &b->format, k);
953 
954 		if (prev && btree_node_read_bkey_cmp(b, prev, k) >= 0) {
955 			struct bkey up = bkey_unpack_key(b, prev);
956 
957 			printbuf_reset(&buf);
958 			prt_printf(&buf, "keys out of order: ");
959 			bch2_bkey_to_text(&buf, &up);
960 			prt_printf(&buf, " > ");
961 			bch2_bkey_to_text(&buf, u.k);
962 
963 			if (btree_err(-BCH_ERR_btree_node_read_err_fixable,
964 				      c, NULL, b, i, k,
965 				      btree_node_bkey_out_of_order,
966 				      "%s", buf.buf))
967 				goto drop_this_key;
968 		}
969 
970 		prev = k;
971 		k = bkey_p_next(k);
972 		continue;
973 drop_this_key:
974 		next_good_key = k->u64s;
975 
976 		if (!next_good_key ||
977 		    (BSET_BIG_ENDIAN(i) == CPU_BIG_ENDIAN &&
978 		     version >= bcachefs_metadata_version_snapshot)) {
979 			/*
980 			 * only do scanning if bch2_bkey_compat() has nothing to
981 			 * do
982 			 */
983 
984 			if (!bkey_packed_valid(c, b, i, (void *) ((u64 *) k + next_good_key))) {
985 				for (next_good_key = 1;
986 				     next_good_key < (u64 *) vstruct_last(i) - (u64 *) k;
987 				     next_good_key++)
988 					if (bkey_packed_valid(c, b, i, (void *) ((u64 *) k + next_good_key)))
989 						goto got_good_key;
990 			}
991 
992 			/*
993 			 * didn't find a good key, have to truncate the rest of
994 			 * the bset
995 			 */
996 			next_good_key = (u64 *) vstruct_last(i) - (u64 *) k;
997 		}
998 got_good_key:
999 		le16_add_cpu(&i->u64s, -next_good_key);
1000 		memmove_u64s_down(k, (u64 *) k + next_good_key, (u64 *) vstruct_end(i) - (u64 *) k);
1001 		set_btree_node_need_rewrite(b);
1002 	}
1003 fsck_err:
1004 	printbuf_exit(&buf);
1005 	return ret;
1006 }
1007 
bch2_btree_node_read_done(struct bch_fs * c,struct bch_dev * ca,struct btree * b,bool have_retry,bool * saw_error)1008 int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
1009 			      struct btree *b, bool have_retry, bool *saw_error)
1010 {
1011 	struct btree_node_entry *bne;
1012 	struct sort_iter *iter;
1013 	struct btree_node *sorted;
1014 	struct bkey_packed *k;
1015 	struct bset *i;
1016 	bool used_mempool, blacklisted;
1017 	bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
1018 		BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
1019 	unsigned u64s;
1020 	unsigned ptr_written = btree_ptr_sectors_written(bkey_i_to_s_c(&b->key));
1021 	u64 max_journal_seq = 0;
1022 	struct printbuf buf = PRINTBUF;
1023 	int ret = 0, retry_read = 0, write = READ;
1024 	u64 start_time = local_clock();
1025 
1026 	b->version_ondisk = U16_MAX;
1027 	/* We might get called multiple times on read retry: */
1028 	b->written = 0;
1029 
1030 	iter = mempool_alloc(&c->fill_iter, GFP_NOFS);
1031 	sort_iter_init(iter, b, (btree_blocks(c) + 1) * 2);
1032 
1033 	if (bch2_meta_read_fault("btree"))
1034 		btree_err(-BCH_ERR_btree_node_read_err_must_retry,
1035 			  c, ca, b, NULL, NULL,
1036 			  btree_node_fault_injected,
1037 			  "dynamic fault");
1038 
1039 	btree_err_on(le64_to_cpu(b->data->magic) != bset_magic(c),
1040 		     -BCH_ERR_btree_node_read_err_must_retry,
1041 		     c, ca, b, NULL, NULL,
1042 		     btree_node_bad_magic,
1043 		     "bad magic: want %llx, got %llx",
1044 		     bset_magic(c), le64_to_cpu(b->data->magic));
1045 
1046 	if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
1047 		struct bch_btree_ptr_v2 *bp =
1048 			&bkey_i_to_btree_ptr_v2(&b->key)->v;
1049 
1050 		bch2_bpos_to_text(&buf, b->data->min_key);
1051 		prt_str(&buf, "-");
1052 		bch2_bpos_to_text(&buf, b->data->max_key);
1053 
1054 		btree_err_on(b->data->keys.seq != bp->seq,
1055 			     -BCH_ERR_btree_node_read_err_must_retry,
1056 			     c, ca, b, NULL, NULL,
1057 			     btree_node_bad_seq,
1058 			     "got wrong btree node: got\n%s",
1059 			     (printbuf_reset(&buf),
1060 			      bch2_btree_node_header_to_text(&buf, b->data),
1061 			      buf.buf));
1062 	} else {
1063 		btree_err_on(!b->data->keys.seq,
1064 			     -BCH_ERR_btree_node_read_err_must_retry,
1065 			     c, ca, b, NULL, NULL,
1066 			     btree_node_bad_seq,
1067 			     "bad btree header: seq 0\n%s",
1068 			     (printbuf_reset(&buf),
1069 			      bch2_btree_node_header_to_text(&buf, b->data),
1070 			      buf.buf));
1071 	}
1072 
1073 	while (b->written < (ptr_written ?: btree_sectors(c))) {
1074 		unsigned sectors;
1075 		bool first = !b->written;
1076 
1077 		if (first) {
1078 			bne = NULL;
1079 			i = &b->data->keys;
1080 		} else {
1081 			bne = write_block(b);
1082 			i = &bne->keys;
1083 
1084 			if (i->seq != b->data->keys.seq)
1085 				break;
1086 		}
1087 
1088 		struct nonce nonce = btree_nonce(i, b->written << 9);
1089 		bool good_csum_type = bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i));
1090 
1091 		btree_err_on(!good_csum_type,
1092 			     bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i))
1093 			     ? -BCH_ERR_btree_node_read_err_must_retry
1094 			     : -BCH_ERR_btree_node_read_err_want_retry,
1095 			     c, ca, b, i, NULL,
1096 			     bset_unknown_csum,
1097 			     "unknown checksum type %llu", BSET_CSUM_TYPE(i));
1098 
1099 		if (first) {
1100 			if (good_csum_type) {
1101 				struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data);
1102 				bool csum_bad = bch2_crc_cmp(b->data->csum, csum);
1103 				if (csum_bad)
1104 					bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
1105 
1106 				btree_err_on(csum_bad,
1107 					     -BCH_ERR_btree_node_read_err_want_retry,
1108 					     c, ca, b, i, NULL,
1109 					     bset_bad_csum,
1110 					     "%s",
1111 					     (printbuf_reset(&buf),
1112 					      bch2_csum_err_msg(&buf, BSET_CSUM_TYPE(i), b->data->csum, csum),
1113 					      buf.buf));
1114 
1115 				ret = bset_encrypt(c, i, b->written << 9);
1116 				if (bch2_fs_fatal_err_on(ret, c,
1117 							 "decrypting btree node: %s", bch2_err_str(ret)))
1118 					goto fsck_err;
1119 			}
1120 
1121 			btree_err_on(btree_node_type_is_extents(btree_node_type(b)) &&
1122 				     !BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data),
1123 				     -BCH_ERR_btree_node_read_err_incompatible,
1124 				     c, NULL, b, NULL, NULL,
1125 				     btree_node_unsupported_version,
1126 				     "btree node does not have NEW_EXTENT_OVERWRITE set");
1127 
1128 			sectors = vstruct_sectors(b->data, c->block_bits);
1129 		} else {
1130 			if (good_csum_type) {
1131 				struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
1132 				bool csum_bad = bch2_crc_cmp(bne->csum, csum);
1133 				if (ca && csum_bad)
1134 					bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
1135 
1136 				btree_err_on(csum_bad,
1137 					     -BCH_ERR_btree_node_read_err_want_retry,
1138 					     c, ca, b, i, NULL,
1139 					     bset_bad_csum,
1140 					     "%s",
1141 					     (printbuf_reset(&buf),
1142 					      bch2_csum_err_msg(&buf, BSET_CSUM_TYPE(i), bne->csum, csum),
1143 					      buf.buf));
1144 
1145 				ret = bset_encrypt(c, i, b->written << 9);
1146 				if (bch2_fs_fatal_err_on(ret, c,
1147 						"decrypting btree node: %s", bch2_err_str(ret)))
1148 					goto fsck_err;
1149 			}
1150 
1151 			sectors = vstruct_sectors(bne, c->block_bits);
1152 		}
1153 
1154 		b->version_ondisk = min(b->version_ondisk,
1155 					le16_to_cpu(i->version));
1156 
1157 		ret = validate_bset(c, ca, b, i, b->written, sectors,
1158 				    READ, have_retry, saw_error);
1159 		if (ret)
1160 			goto fsck_err;
1161 
1162 		if (!b->written)
1163 			btree_node_set_format(b, b->data->format);
1164 
1165 		ret = validate_bset_keys(c, b, i, READ, have_retry, saw_error);
1166 		if (ret)
1167 			goto fsck_err;
1168 
1169 		SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
1170 
1171 		blacklisted = bch2_journal_seq_is_blacklisted(c,
1172 					le64_to_cpu(i->journal_seq),
1173 					true);
1174 
1175 		btree_err_on(blacklisted && first,
1176 			     -BCH_ERR_btree_node_read_err_fixable,
1177 			     c, ca, b, i, NULL,
1178 			     bset_blacklisted_journal_seq,
1179 			     "first btree node bset has blacklisted journal seq (%llu)",
1180 			     le64_to_cpu(i->journal_seq));
1181 
1182 		btree_err_on(blacklisted && ptr_written,
1183 			     -BCH_ERR_btree_node_read_err_fixable,
1184 			     c, ca, b, i, NULL,
1185 			     first_bset_blacklisted_journal_seq,
1186 			     "found blacklisted bset (journal seq %llu) in btree node at offset %u-%u/%u",
1187 			     le64_to_cpu(i->journal_seq),
1188 			     b->written, b->written + sectors, ptr_written);
1189 
1190 		b->written = min(b->written + sectors, btree_sectors(c));
1191 
1192 		if (blacklisted && !first)
1193 			continue;
1194 
1195 		sort_iter_add(iter,
1196 			      vstruct_idx(i, 0),
1197 			      vstruct_last(i));
1198 
1199 		max_journal_seq = max(max_journal_seq, le64_to_cpu(i->journal_seq));
1200 	}
1201 
1202 	if (ptr_written) {
1203 		btree_err_on(b->written < ptr_written,
1204 			     -BCH_ERR_btree_node_read_err_want_retry,
1205 			     c, ca, b, NULL, NULL,
1206 			     btree_node_data_missing,
1207 			     "btree node data missing: expected %u sectors, found %u",
1208 			     ptr_written, b->written);
1209 	} else {
1210 		for (bne = write_block(b);
1211 		     bset_byte_offset(b, bne) < btree_buf_bytes(b);
1212 		     bne = (void *) bne + block_bytes(c))
1213 			btree_err_on(bne->keys.seq == b->data->keys.seq &&
1214 				     !bch2_journal_seq_is_blacklisted(c,
1215 								      le64_to_cpu(bne->keys.journal_seq),
1216 								      true),
1217 				     -BCH_ERR_btree_node_read_err_want_retry,
1218 				     c, ca, b, NULL, NULL,
1219 				     btree_node_bset_after_end,
1220 				     "found bset signature after last bset");
1221 	}
1222 
1223 	sorted = btree_bounce_alloc(c, btree_buf_bytes(b), &used_mempool);
1224 	sorted->keys.u64s = 0;
1225 
1226 	set_btree_bset(b, b->set, &b->data->keys);
1227 
1228 	b->nr = bch2_key_sort_fix_overlapping(c, &sorted->keys, iter);
1229 	memset((uint8_t *)(sorted + 1) + b->nr.live_u64s * sizeof(u64), 0,
1230 			btree_buf_bytes(b) -
1231 			sizeof(struct btree_node) -
1232 			b->nr.live_u64s * sizeof(u64));
1233 
1234 	u64s = le16_to_cpu(sorted->keys.u64s);
1235 	*sorted = *b->data;
1236 	sorted->keys.u64s = cpu_to_le16(u64s);
1237 	swap(sorted, b->data);
1238 	set_btree_bset(b, b->set, &b->data->keys);
1239 	b->nsets = 1;
1240 	b->data->keys.journal_seq = cpu_to_le64(max_journal_seq);
1241 
1242 	BUG_ON(b->nr.live_u64s != u64s);
1243 
1244 	btree_bounce_free(c, btree_buf_bytes(b), used_mempool, sorted);
1245 
1246 	if (updated_range)
1247 		bch2_btree_node_drop_keys_outside_node(b);
1248 
1249 	i = &b->data->keys;
1250 	for (k = i->start; k != vstruct_last(i);) {
1251 		struct bkey tmp;
1252 		struct bkey_s u = __bkey_disassemble(b, k, &tmp);
1253 
1254 		ret = btree_node_bkey_val_validate(c, b, u.s_c, READ);
1255 		if (ret == -BCH_ERR_fsck_delete_bkey ||
1256 		    (bch2_inject_invalid_keys &&
1257 		     !bversion_cmp(u.k->bversion, MAX_VERSION))) {
1258 			btree_keys_account_key_drop(&b->nr, 0, k);
1259 
1260 			i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
1261 			memmove_u64s_down(k, bkey_p_next(k),
1262 					  (u64 *) vstruct_end(i) - (u64 *) k);
1263 			set_btree_bset_end(b, b->set);
1264 			set_btree_node_need_rewrite(b);
1265 			continue;
1266 		}
1267 		if (ret)
1268 			goto fsck_err;
1269 
1270 		if (u.k->type == KEY_TYPE_btree_ptr_v2) {
1271 			struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(u);
1272 
1273 			bp.v->mem_ptr = 0;
1274 		}
1275 
1276 		k = bkey_p_next(k);
1277 	}
1278 
1279 	bch2_bset_build_aux_tree(b, b->set, false);
1280 
1281 	set_needs_whiteout(btree_bset_first(b), true);
1282 
1283 	btree_node_reset_sib_u64s(b);
1284 
1285 	rcu_read_lock();
1286 	bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&b->key)), ptr) {
1287 		struct bch_dev *ca2 = bch2_dev_rcu(c, ptr->dev);
1288 
1289 		if (!ca2 || ca2->mi.state != BCH_MEMBER_STATE_rw)
1290 			set_btree_node_need_rewrite(b);
1291 	}
1292 	rcu_read_unlock();
1293 
1294 	if (!ptr_written)
1295 		set_btree_node_need_rewrite(b);
1296 out:
1297 	mempool_free(iter, &c->fill_iter);
1298 	printbuf_exit(&buf);
1299 	bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read_done], start_time);
1300 	return retry_read;
1301 fsck_err:
1302 	if (ret == -BCH_ERR_btree_node_read_err_want_retry ||
1303 	    ret == -BCH_ERR_btree_node_read_err_must_retry) {
1304 		retry_read = 1;
1305 	} else {
1306 		set_btree_node_read_error(b);
1307 		bch2_btree_lost_data(c, b->c.btree_id);
1308 	}
1309 	goto out;
1310 }
1311 
btree_node_read_work(struct work_struct * work)1312 static void btree_node_read_work(struct work_struct *work)
1313 {
1314 	struct btree_read_bio *rb =
1315 		container_of(work, struct btree_read_bio, work);
1316 	struct bch_fs *c	= rb->c;
1317 	struct bch_dev *ca	= rb->have_ioref ? bch2_dev_have_ref(c, rb->pick.ptr.dev) : NULL;
1318 	struct btree *b		= rb->b;
1319 	struct bio *bio		= &rb->bio;
1320 	struct bch_io_failures failed = { .nr = 0 };
1321 	struct printbuf buf = PRINTBUF;
1322 	bool saw_error = false;
1323 	bool retry = false;
1324 	bool can_retry;
1325 
1326 	goto start;
1327 	while (1) {
1328 		retry = true;
1329 		bch_info(c, "retrying read");
1330 		ca = bch2_dev_get_ioref(c, rb->pick.ptr.dev, READ);
1331 		rb->have_ioref		= ca != NULL;
1332 		rb->start_time		= local_clock();
1333 		bio_reset(bio, NULL, REQ_OP_READ|REQ_SYNC|REQ_META);
1334 		bio->bi_iter.bi_sector	= rb->pick.ptr.offset;
1335 		bio->bi_iter.bi_size	= btree_buf_bytes(b);
1336 
1337 		if (rb->have_ioref) {
1338 			bio_set_dev(bio, ca->disk_sb.bdev);
1339 			submit_bio_wait(bio);
1340 		} else {
1341 			bio->bi_status = BLK_STS_REMOVED;
1342 		}
1343 
1344 		bch2_account_io_completion(ca, BCH_MEMBER_ERROR_read,
1345 					   rb->start_time, !bio->bi_status);
1346 start:
1347 		printbuf_reset(&buf);
1348 		bch2_btree_pos_to_text(&buf, c, b);
1349 
1350 		if (ca && bio->bi_status)
1351 			bch_err_dev_ratelimited(ca,
1352 					"btree read error %s for %s",
1353 					bch2_blk_status_to_str(bio->bi_status), buf.buf);
1354 		if (rb->have_ioref)
1355 			percpu_ref_put(&ca->io_ref);
1356 		rb->have_ioref = false;
1357 
1358 		bch2_mark_io_failure(&failed, &rb->pick, false);
1359 
1360 		can_retry = bch2_bkey_pick_read_device(c,
1361 				bkey_i_to_s_c(&b->key),
1362 				&failed, &rb->pick, -1) > 0;
1363 
1364 		if (!bio->bi_status &&
1365 		    !bch2_btree_node_read_done(c, ca, b, can_retry, &saw_error)) {
1366 			if (retry)
1367 				bch_info(c, "retry success");
1368 			break;
1369 		}
1370 
1371 		saw_error = true;
1372 
1373 		if (!can_retry) {
1374 			set_btree_node_read_error(b);
1375 			bch2_btree_lost_data(c, b->c.btree_id);
1376 			break;
1377 		}
1378 	}
1379 
1380 	bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read],
1381 			       rb->start_time);
1382 	bio_put(&rb->bio);
1383 
1384 	if ((saw_error ||
1385 	     btree_node_need_rewrite(b)) &&
1386 	    !btree_node_read_error(b) &&
1387 	    c->curr_recovery_pass != BCH_RECOVERY_PASS_scan_for_btree_nodes) {
1388 		if (saw_error) {
1389 			printbuf_reset(&buf);
1390 			bch2_btree_id_level_to_text(&buf, b->c.btree_id, b->c.level);
1391 			prt_str(&buf, " ");
1392 			bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
1393 			bch_err_ratelimited(c, "%s: rewriting btree node at due to error\n  %s",
1394 					    __func__, buf.buf);
1395 		}
1396 
1397 		bch2_btree_node_rewrite_async(c, b);
1398 	}
1399 
1400 	printbuf_exit(&buf);
1401 	clear_btree_node_read_in_flight(b);
1402 	wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1403 }
1404 
btree_node_read_endio(struct bio * bio)1405 static void btree_node_read_endio(struct bio *bio)
1406 {
1407 	struct btree_read_bio *rb =
1408 		container_of(bio, struct btree_read_bio, bio);
1409 	struct bch_fs *c	= rb->c;
1410 	struct bch_dev *ca	= rb->have_ioref
1411 		? bch2_dev_have_ref(c, rb->pick.ptr.dev) : NULL;
1412 
1413 	bch2_account_io_completion(ca, BCH_MEMBER_ERROR_read,
1414 				   rb->start_time, !bio->bi_status);
1415 
1416 	queue_work(c->btree_read_complete_wq, &rb->work);
1417 }
1418 
1419 struct btree_node_read_all {
1420 	struct closure		cl;
1421 	struct bch_fs		*c;
1422 	struct btree		*b;
1423 	unsigned		nr;
1424 	void			*buf[BCH_REPLICAS_MAX];
1425 	struct bio		*bio[BCH_REPLICAS_MAX];
1426 	blk_status_t		err[BCH_REPLICAS_MAX];
1427 };
1428 
btree_node_sectors_written(struct bch_fs * c,void * data)1429 static unsigned btree_node_sectors_written(struct bch_fs *c, void *data)
1430 {
1431 	struct btree_node *bn = data;
1432 	struct btree_node_entry *bne;
1433 	unsigned offset = 0;
1434 
1435 	if (le64_to_cpu(bn->magic) !=  bset_magic(c))
1436 		return 0;
1437 
1438 	while (offset < btree_sectors(c)) {
1439 		if (!offset) {
1440 			offset += vstruct_sectors(bn, c->block_bits);
1441 		} else {
1442 			bne = data + (offset << 9);
1443 			if (bne->keys.seq != bn->keys.seq)
1444 				break;
1445 			offset += vstruct_sectors(bne, c->block_bits);
1446 		}
1447 	}
1448 
1449 	return offset;
1450 }
1451 
btree_node_has_extra_bsets(struct bch_fs * c,unsigned offset,void * data)1452 static bool btree_node_has_extra_bsets(struct bch_fs *c, unsigned offset, void *data)
1453 {
1454 	struct btree_node *bn = data;
1455 	struct btree_node_entry *bne;
1456 
1457 	if (!offset)
1458 		return false;
1459 
1460 	while (offset < btree_sectors(c)) {
1461 		bne = data + (offset << 9);
1462 		if (bne->keys.seq == bn->keys.seq)
1463 			return true;
1464 		offset++;
1465 	}
1466 
1467 	return false;
1468 	return offset;
1469 }
1470 
CLOSURE_CALLBACK(btree_node_read_all_replicas_done)1471 static CLOSURE_CALLBACK(btree_node_read_all_replicas_done)
1472 {
1473 	closure_type(ra, struct btree_node_read_all, cl);
1474 	struct bch_fs *c = ra->c;
1475 	struct btree *b = ra->b;
1476 	struct printbuf buf = PRINTBUF;
1477 	bool dump_bset_maps = false;
1478 	bool have_retry = false;
1479 	int ret = 0, best = -1, write = READ;
1480 	unsigned i, written = 0, written2 = 0;
1481 	__le64 seq = b->key.k.type == KEY_TYPE_btree_ptr_v2
1482 		? bkey_i_to_btree_ptr_v2(&b->key)->v.seq : 0;
1483 	bool _saw_error = false, *saw_error = &_saw_error;
1484 
1485 	for (i = 0; i < ra->nr; i++) {
1486 		struct btree_node *bn = ra->buf[i];
1487 
1488 		if (ra->err[i])
1489 			continue;
1490 
1491 		if (le64_to_cpu(bn->magic) != bset_magic(c) ||
1492 		    (seq && seq != bn->keys.seq))
1493 			continue;
1494 
1495 		if (best < 0) {
1496 			best = i;
1497 			written = btree_node_sectors_written(c, bn);
1498 			continue;
1499 		}
1500 
1501 		written2 = btree_node_sectors_written(c, ra->buf[i]);
1502 		if (btree_err_on(written2 != written, -BCH_ERR_btree_node_read_err_fixable,
1503 				 c, NULL, b, NULL, NULL,
1504 				 btree_node_replicas_sectors_written_mismatch,
1505 				 "btree node sectors written mismatch: %u != %u",
1506 				 written, written2) ||
1507 		    btree_err_on(btree_node_has_extra_bsets(c, written2, ra->buf[i]),
1508 				 -BCH_ERR_btree_node_read_err_fixable,
1509 				 c, NULL, b, NULL, NULL,
1510 				 btree_node_bset_after_end,
1511 				 "found bset signature after last bset") ||
1512 		    btree_err_on(memcmp(ra->buf[best], ra->buf[i], written << 9),
1513 				 -BCH_ERR_btree_node_read_err_fixable,
1514 				 c, NULL, b, NULL, NULL,
1515 				 btree_node_replicas_data_mismatch,
1516 				 "btree node replicas content mismatch"))
1517 			dump_bset_maps = true;
1518 
1519 		if (written2 > written) {
1520 			written = written2;
1521 			best = i;
1522 		}
1523 	}
1524 fsck_err:
1525 	if (dump_bset_maps) {
1526 		for (i = 0; i < ra->nr; i++) {
1527 			struct btree_node *bn = ra->buf[i];
1528 			struct btree_node_entry *bne = NULL;
1529 			unsigned offset = 0, sectors;
1530 			bool gap = false;
1531 
1532 			if (ra->err[i])
1533 				continue;
1534 
1535 			printbuf_reset(&buf);
1536 
1537 			while (offset < btree_sectors(c)) {
1538 				if (!offset) {
1539 					sectors = vstruct_sectors(bn, c->block_bits);
1540 				} else {
1541 					bne = ra->buf[i] + (offset << 9);
1542 					if (bne->keys.seq != bn->keys.seq)
1543 						break;
1544 					sectors = vstruct_sectors(bne, c->block_bits);
1545 				}
1546 
1547 				prt_printf(&buf, " %u-%u", offset, offset + sectors);
1548 				if (bne && bch2_journal_seq_is_blacklisted(c,
1549 							le64_to_cpu(bne->keys.journal_seq), false))
1550 					prt_printf(&buf, "*");
1551 				offset += sectors;
1552 			}
1553 
1554 			while (offset < btree_sectors(c)) {
1555 				bne = ra->buf[i] + (offset << 9);
1556 				if (bne->keys.seq == bn->keys.seq) {
1557 					if (!gap)
1558 						prt_printf(&buf, " GAP");
1559 					gap = true;
1560 
1561 					sectors = vstruct_sectors(bne, c->block_bits);
1562 					prt_printf(&buf, " %u-%u", offset, offset + sectors);
1563 					if (bch2_journal_seq_is_blacklisted(c,
1564 							le64_to_cpu(bne->keys.journal_seq), false))
1565 						prt_printf(&buf, "*");
1566 				}
1567 				offset++;
1568 			}
1569 
1570 			bch_err(c, "replica %u:%s", i, buf.buf);
1571 		}
1572 	}
1573 
1574 	if (best >= 0) {
1575 		memcpy(b->data, ra->buf[best], btree_buf_bytes(b));
1576 		ret = bch2_btree_node_read_done(c, NULL, b, false, saw_error);
1577 	} else {
1578 		ret = -1;
1579 	}
1580 
1581 	if (ret) {
1582 		set_btree_node_read_error(b);
1583 		bch2_btree_lost_data(c, b->c.btree_id);
1584 	} else if (*saw_error)
1585 		bch2_btree_node_rewrite_async(c, b);
1586 
1587 	for (i = 0; i < ra->nr; i++) {
1588 		mempool_free(ra->buf[i], &c->btree_bounce_pool);
1589 		bio_put(ra->bio[i]);
1590 	}
1591 
1592 	closure_debug_destroy(&ra->cl);
1593 	kfree(ra);
1594 	printbuf_exit(&buf);
1595 
1596 	clear_btree_node_read_in_flight(b);
1597 	wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1598 }
1599 
btree_node_read_all_replicas_endio(struct bio * bio)1600 static void btree_node_read_all_replicas_endio(struct bio *bio)
1601 {
1602 	struct btree_read_bio *rb =
1603 		container_of(bio, struct btree_read_bio, bio);
1604 	struct bch_fs *c	= rb->c;
1605 	struct btree_node_read_all *ra = rb->ra;
1606 
1607 	if (rb->have_ioref) {
1608 		struct bch_dev *ca = bch2_dev_have_ref(c, rb->pick.ptr.dev);
1609 
1610 		bch2_latency_acct(ca, rb->start_time, READ);
1611 	}
1612 
1613 	ra->err[rb->idx] = bio->bi_status;
1614 	closure_put(&ra->cl);
1615 }
1616 
1617 /*
1618  * XXX This allocates multiple times from the same mempools, and can deadlock
1619  * under sufficient memory pressure (but is only a debug path)
1620  */
btree_node_read_all_replicas(struct bch_fs * c,struct btree * b,bool sync)1621 static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool sync)
1622 {
1623 	struct bkey_s_c k = bkey_i_to_s_c(&b->key);
1624 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1625 	const union bch_extent_entry *entry;
1626 	struct extent_ptr_decoded pick;
1627 	struct btree_node_read_all *ra;
1628 	unsigned i;
1629 
1630 	ra = kzalloc(sizeof(*ra), GFP_NOFS);
1631 	if (!ra)
1632 		return -BCH_ERR_ENOMEM_btree_node_read_all_replicas;
1633 
1634 	closure_init(&ra->cl, NULL);
1635 	ra->c	= c;
1636 	ra->b	= b;
1637 	ra->nr	= bch2_bkey_nr_ptrs(k);
1638 
1639 	for (i = 0; i < ra->nr; i++) {
1640 		ra->buf[i] = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
1641 		ra->bio[i] = bio_alloc_bioset(NULL,
1642 					      buf_pages(ra->buf[i], btree_buf_bytes(b)),
1643 					      REQ_OP_READ|REQ_SYNC|REQ_META,
1644 					      GFP_NOFS,
1645 					      &c->btree_bio);
1646 	}
1647 
1648 	i = 0;
1649 	bkey_for_each_ptr_decode(k.k, ptrs, pick, entry) {
1650 		struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ);
1651 		struct btree_read_bio *rb =
1652 			container_of(ra->bio[i], struct btree_read_bio, bio);
1653 		rb->c			= c;
1654 		rb->b			= b;
1655 		rb->ra			= ra;
1656 		rb->start_time		= local_clock();
1657 		rb->have_ioref		= ca != NULL;
1658 		rb->idx			= i;
1659 		rb->pick		= pick;
1660 		rb->bio.bi_iter.bi_sector = pick.ptr.offset;
1661 		rb->bio.bi_end_io	= btree_node_read_all_replicas_endio;
1662 		bch2_bio_map(&rb->bio, ra->buf[i], btree_buf_bytes(b));
1663 
1664 		if (rb->have_ioref) {
1665 			this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
1666 				     bio_sectors(&rb->bio));
1667 			bio_set_dev(&rb->bio, ca->disk_sb.bdev);
1668 
1669 			closure_get(&ra->cl);
1670 			submit_bio(&rb->bio);
1671 		} else {
1672 			ra->err[i] = BLK_STS_REMOVED;
1673 		}
1674 
1675 		i++;
1676 	}
1677 
1678 	if (sync) {
1679 		closure_sync(&ra->cl);
1680 		btree_node_read_all_replicas_done(&ra->cl.work);
1681 	} else {
1682 		continue_at(&ra->cl, btree_node_read_all_replicas_done,
1683 			    c->btree_read_complete_wq);
1684 	}
1685 
1686 	return 0;
1687 }
1688 
bch2_btree_node_read(struct btree_trans * trans,struct btree * b,bool sync)1689 void bch2_btree_node_read(struct btree_trans *trans, struct btree *b,
1690 			  bool sync)
1691 {
1692 	struct bch_fs *c = trans->c;
1693 	struct extent_ptr_decoded pick;
1694 	struct btree_read_bio *rb;
1695 	struct bch_dev *ca;
1696 	struct bio *bio;
1697 	int ret;
1698 
1699 	trace_and_count(c, btree_node_read, trans, b);
1700 
1701 	if (bch2_verify_all_btree_replicas &&
1702 	    !btree_node_read_all_replicas(c, b, sync))
1703 		return;
1704 
1705 	ret = bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key),
1706 					 NULL, &pick, -1);
1707 
1708 	if (ret <= 0) {
1709 		struct printbuf buf = PRINTBUF;
1710 
1711 		prt_str(&buf, "btree node read error: no device to read from\n at ");
1712 		bch2_btree_pos_to_text(&buf, c, b);
1713 		bch_err_ratelimited(c, "%s", buf.buf);
1714 
1715 		if (c->opts.recovery_passes & BIT_ULL(BCH_RECOVERY_PASS_check_topology) &&
1716 		    c->curr_recovery_pass > BCH_RECOVERY_PASS_check_topology)
1717 			bch2_fatal_error(c);
1718 
1719 		set_btree_node_read_error(b);
1720 		bch2_btree_lost_data(c, b->c.btree_id);
1721 		clear_btree_node_read_in_flight(b);
1722 		wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1723 		printbuf_exit(&buf);
1724 		return;
1725 	}
1726 
1727 	ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ);
1728 
1729 	bio = bio_alloc_bioset(NULL,
1730 			       buf_pages(b->data, btree_buf_bytes(b)),
1731 			       REQ_OP_READ|REQ_SYNC|REQ_META,
1732 			       GFP_NOFS,
1733 			       &c->btree_bio);
1734 	rb = container_of(bio, struct btree_read_bio, bio);
1735 	rb->c			= c;
1736 	rb->b			= b;
1737 	rb->ra			= NULL;
1738 	rb->start_time		= local_clock();
1739 	rb->have_ioref		= ca != NULL;
1740 	rb->pick		= pick;
1741 	INIT_WORK(&rb->work, btree_node_read_work);
1742 	bio->bi_iter.bi_sector	= pick.ptr.offset;
1743 	bio->bi_end_io		= btree_node_read_endio;
1744 	bch2_bio_map(bio, b->data, btree_buf_bytes(b));
1745 
1746 	if (rb->have_ioref) {
1747 		this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
1748 			     bio_sectors(bio));
1749 		bio_set_dev(bio, ca->disk_sb.bdev);
1750 
1751 		if (sync) {
1752 			submit_bio_wait(bio);
1753 			bch2_latency_acct(ca, rb->start_time, READ);
1754 			btree_node_read_work(&rb->work);
1755 		} else {
1756 			submit_bio(bio);
1757 		}
1758 	} else {
1759 		bio->bi_status = BLK_STS_REMOVED;
1760 
1761 		if (sync)
1762 			btree_node_read_work(&rb->work);
1763 		else
1764 			queue_work(c->btree_read_complete_wq, &rb->work);
1765 	}
1766 }
1767 
__bch2_btree_root_read(struct btree_trans * trans,enum btree_id id,const struct bkey_i * k,unsigned level)1768 static int __bch2_btree_root_read(struct btree_trans *trans, enum btree_id id,
1769 				  const struct bkey_i *k, unsigned level)
1770 {
1771 	struct bch_fs *c = trans->c;
1772 	struct closure cl;
1773 	struct btree *b;
1774 	int ret;
1775 
1776 	closure_init_stack(&cl);
1777 
1778 	do {
1779 		ret = bch2_btree_cache_cannibalize_lock(trans, &cl);
1780 		closure_sync(&cl);
1781 	} while (ret);
1782 
1783 	b = bch2_btree_node_mem_alloc(trans, level != 0);
1784 	bch2_btree_cache_cannibalize_unlock(trans);
1785 
1786 	BUG_ON(IS_ERR(b));
1787 
1788 	bkey_copy(&b->key, k);
1789 	BUG_ON(bch2_btree_node_hash_insert(&c->btree_cache, b, level, id));
1790 
1791 	set_btree_node_read_in_flight(b);
1792 
1793 	/* we can't pass the trans to read_done() for fsck errors, so it must be unlocked */
1794 	bch2_trans_unlock(trans);
1795 	bch2_btree_node_read(trans, b, true);
1796 
1797 	if (btree_node_read_error(b)) {
1798 		mutex_lock(&c->btree_cache.lock);
1799 		bch2_btree_node_hash_remove(&c->btree_cache, b);
1800 		mutex_unlock(&c->btree_cache.lock);
1801 
1802 		ret = -BCH_ERR_btree_node_read_error;
1803 		goto err;
1804 	}
1805 
1806 	bch2_btree_set_root_for_read(c, b);
1807 err:
1808 	six_unlock_write(&b->c.lock);
1809 	six_unlock_intent(&b->c.lock);
1810 
1811 	return ret;
1812 }
1813 
bch2_btree_root_read(struct bch_fs * c,enum btree_id id,const struct bkey_i * k,unsigned level)1814 int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
1815 			const struct bkey_i *k, unsigned level)
1816 {
1817 	return bch2_trans_run(c, __bch2_btree_root_read(trans, id, k, level));
1818 }
1819 
1820 struct btree_node_scrub {
1821 	struct bch_fs		*c;
1822 	struct bch_dev		*ca;
1823 	void			*buf;
1824 	bool			used_mempool;
1825 	unsigned		written;
1826 
1827 	enum btree_id		btree;
1828 	unsigned		level;
1829 	struct bkey_buf		key;
1830 	__le64			seq;
1831 
1832 	struct work_struct	work;
1833 	struct bio		bio;
1834 };
1835 
btree_node_scrub_check(struct bch_fs * c,struct btree_node * data,unsigned ptr_written,struct printbuf * err)1836 static bool btree_node_scrub_check(struct bch_fs *c, struct btree_node *data, unsigned ptr_written,
1837 				   struct printbuf *err)
1838 {
1839 	unsigned written = 0;
1840 
1841 	if (le64_to_cpu(data->magic) != bset_magic(c)) {
1842 		prt_printf(err, "bad magic: want %llx, got %llx",
1843 			   bset_magic(c), le64_to_cpu(data->magic));
1844 		return false;
1845 	}
1846 
1847 	while (written < (ptr_written ?: btree_sectors(c))) {
1848 		struct btree_node_entry *bne;
1849 		struct bset *i;
1850 		bool first = !written;
1851 
1852 		if (first) {
1853 			bne = NULL;
1854 			i = &data->keys;
1855 		} else {
1856 			bne = (void *) data + (written << 9);
1857 			i = &bne->keys;
1858 
1859 			if (!ptr_written && i->seq != data->keys.seq)
1860 				break;
1861 		}
1862 
1863 		struct nonce nonce = btree_nonce(i, written << 9);
1864 		bool good_csum_type = bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i));
1865 
1866 		if (first) {
1867 			if (good_csum_type) {
1868 				struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, data);
1869 				if (bch2_crc_cmp(data->csum, csum)) {
1870 					bch2_csum_err_msg(err, BSET_CSUM_TYPE(i), data->csum, csum);
1871 					return false;
1872 				}
1873 			}
1874 
1875 			written += vstruct_sectors(data, c->block_bits);
1876 		} else {
1877 			if (good_csum_type) {
1878 				struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
1879 				if (bch2_crc_cmp(bne->csum, csum)) {
1880 					bch2_csum_err_msg(err, BSET_CSUM_TYPE(i), bne->csum, csum);
1881 					return false;
1882 				}
1883 			}
1884 
1885 			written += vstruct_sectors(bne, c->block_bits);
1886 		}
1887 	}
1888 
1889 	return true;
1890 }
1891 
btree_node_scrub_work(struct work_struct * work)1892 static void btree_node_scrub_work(struct work_struct *work)
1893 {
1894 	struct btree_node_scrub *scrub = container_of(work, struct btree_node_scrub, work);
1895 	struct bch_fs *c = scrub->c;
1896 	struct printbuf err = PRINTBUF;
1897 
1898 	__bch2_btree_pos_to_text(&err, c, scrub->btree, scrub->level,
1899 				 bkey_i_to_s_c(scrub->key.k));
1900 	prt_newline(&err);
1901 
1902 	if (!btree_node_scrub_check(c, scrub->buf, scrub->written, &err)) {
1903 		struct btree_trans *trans = bch2_trans_get(c);
1904 
1905 		struct btree_iter iter;
1906 		bch2_trans_node_iter_init(trans, &iter, scrub->btree,
1907 					  scrub->key.k->k.p, 0, scrub->level - 1, 0);
1908 
1909 		struct btree *b;
1910 		int ret = lockrestart_do(trans, PTR_ERR_OR_ZERO(b = bch2_btree_iter_peek_node(&iter)));
1911 		if (ret)
1912 			goto err;
1913 
1914 		if (bkey_i_to_btree_ptr_v2(&b->key)->v.seq == scrub->seq) {
1915 			bch_err(c, "error validating btree node during scrub on %s at btree %s",
1916 				scrub->ca->name, err.buf);
1917 
1918 			ret = bch2_btree_node_rewrite(trans, &iter, b, 0);
1919 		}
1920 err:
1921 		bch2_trans_iter_exit(trans, &iter);
1922 		bch2_trans_begin(trans);
1923 		bch2_trans_put(trans);
1924 	}
1925 
1926 	printbuf_exit(&err);
1927 	bch2_bkey_buf_exit(&scrub->key, c);;
1928 	btree_bounce_free(c, c->opts.btree_node_size, scrub->used_mempool, scrub->buf);
1929 	percpu_ref_put(&scrub->ca->io_ref);
1930 	kfree(scrub);
1931 	bch2_write_ref_put(c, BCH_WRITE_REF_btree_node_scrub);
1932 }
1933 
btree_node_scrub_endio(struct bio * bio)1934 static void btree_node_scrub_endio(struct bio *bio)
1935 {
1936 	struct btree_node_scrub *scrub = container_of(bio, struct btree_node_scrub, bio);
1937 
1938 	queue_work(scrub->c->btree_read_complete_wq, &scrub->work);
1939 }
1940 
bch2_btree_node_scrub(struct btree_trans * trans,enum btree_id btree,unsigned level,struct bkey_s_c k,unsigned dev)1941 int bch2_btree_node_scrub(struct btree_trans *trans,
1942 			  enum btree_id btree, unsigned level,
1943 			  struct bkey_s_c k, unsigned dev)
1944 {
1945 	if (k.k->type != KEY_TYPE_btree_ptr_v2)
1946 		return 0;
1947 
1948 	struct bch_fs *c = trans->c;
1949 
1950 	if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_btree_node_scrub))
1951 		return -BCH_ERR_erofs_no_writes;
1952 
1953 	struct extent_ptr_decoded pick;
1954 	int ret = bch2_bkey_pick_read_device(c, k, NULL, &pick, dev);
1955 	if (ret <= 0)
1956 		goto err;
1957 
1958 	struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ);
1959 	if (!ca) {
1960 		ret = -BCH_ERR_device_offline;
1961 		goto err;
1962 	}
1963 
1964 	bool used_mempool = false;
1965 	void *buf = btree_bounce_alloc(c, c->opts.btree_node_size, &used_mempool);
1966 
1967 	unsigned vecs = buf_pages(buf, c->opts.btree_node_size);
1968 
1969 	struct btree_node_scrub *scrub =
1970 		kzalloc(sizeof(*scrub) + sizeof(struct bio_vec) * vecs, GFP_KERNEL);
1971 	if (!scrub) {
1972 		ret = -ENOMEM;
1973 		goto err_free;
1974 	}
1975 
1976 	scrub->c		= c;
1977 	scrub->ca		= ca;
1978 	scrub->buf		= buf;
1979 	scrub->used_mempool	= used_mempool;
1980 	scrub->written		= btree_ptr_sectors_written(k);
1981 
1982 	scrub->btree		= btree;
1983 	scrub->level		= level;
1984 	bch2_bkey_buf_init(&scrub->key);
1985 	bch2_bkey_buf_reassemble(&scrub->key, c, k);
1986 	scrub->seq		= bkey_s_c_to_btree_ptr_v2(k).v->seq;
1987 
1988 	INIT_WORK(&scrub->work, btree_node_scrub_work);
1989 
1990 	bio_init(&scrub->bio, ca->disk_sb.bdev, scrub->bio.bi_inline_vecs, vecs, REQ_OP_READ);
1991 	bch2_bio_map(&scrub->bio, scrub->buf, c->opts.btree_node_size);
1992 	scrub->bio.bi_iter.bi_sector	= pick.ptr.offset;
1993 	scrub->bio.bi_end_io		= btree_node_scrub_endio;
1994 	submit_bio(&scrub->bio);
1995 	return 0;
1996 err_free:
1997 	btree_bounce_free(c, c->opts.btree_node_size, used_mempool, buf);
1998 	percpu_ref_put(&ca->io_ref);
1999 err:
2000 	bch2_write_ref_put(c, BCH_WRITE_REF_btree_node_scrub);
2001 	return ret;
2002 }
2003 
bch2_btree_complete_write(struct bch_fs * c,struct btree * b,struct btree_write * w)2004 static void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
2005 				      struct btree_write *w)
2006 {
2007 	unsigned long old, new;
2008 
2009 	old = READ_ONCE(b->will_make_reachable);
2010 	do {
2011 		new = old;
2012 		if (!(old & 1))
2013 			break;
2014 
2015 		new &= ~1UL;
2016 	} while (!try_cmpxchg(&b->will_make_reachable, &old, new));
2017 
2018 	if (old & 1)
2019 		closure_put(&((struct btree_update *) new)->cl);
2020 
2021 	bch2_journal_pin_drop(&c->journal, &w->journal);
2022 }
2023 
__btree_node_write_done(struct bch_fs * c,struct btree * b,u64 start_time)2024 static void __btree_node_write_done(struct bch_fs *c, struct btree *b, u64 start_time)
2025 {
2026 	struct btree_write *w = btree_prev_write(b);
2027 	unsigned long old, new;
2028 	unsigned type = 0;
2029 
2030 	bch2_btree_complete_write(c, b, w);
2031 
2032 	if (start_time)
2033 		bch2_time_stats_update(&c->times[BCH_TIME_btree_node_write], start_time);
2034 
2035 	old = READ_ONCE(b->flags);
2036 	do {
2037 		new = old;
2038 
2039 		if ((old & (1U << BTREE_NODE_dirty)) &&
2040 		    (old & (1U << BTREE_NODE_need_write)) &&
2041 		    !(old & (1U << BTREE_NODE_never_write)) &&
2042 		    !(old & (1U << BTREE_NODE_write_blocked)) &&
2043 		    !(old & (1U << BTREE_NODE_will_make_reachable))) {
2044 			new &= ~(1U << BTREE_NODE_dirty);
2045 			new &= ~(1U << BTREE_NODE_need_write);
2046 			new |=  (1U << BTREE_NODE_write_in_flight);
2047 			new |=  (1U << BTREE_NODE_write_in_flight_inner);
2048 			new |=  (1U << BTREE_NODE_just_written);
2049 			new ^=  (1U << BTREE_NODE_write_idx);
2050 
2051 			type = new & BTREE_WRITE_TYPE_MASK;
2052 			new &= ~BTREE_WRITE_TYPE_MASK;
2053 		} else {
2054 			new &= ~(1U << BTREE_NODE_write_in_flight);
2055 			new &= ~(1U << BTREE_NODE_write_in_flight_inner);
2056 		}
2057 	} while (!try_cmpxchg(&b->flags, &old, new));
2058 
2059 	if (new & (1U << BTREE_NODE_write_in_flight))
2060 		__bch2_btree_node_write(c, b, BTREE_WRITE_ALREADY_STARTED|type);
2061 	else
2062 		wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
2063 }
2064 
btree_node_write_done(struct bch_fs * c,struct btree * b,u64 start_time)2065 static void btree_node_write_done(struct bch_fs *c, struct btree *b, u64 start_time)
2066 {
2067 	struct btree_trans *trans = bch2_trans_get(c);
2068 
2069 	btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
2070 
2071 	/* we don't need transaction context anymore after we got the lock. */
2072 	bch2_trans_put(trans);
2073 	__btree_node_write_done(c, b, start_time);
2074 	six_unlock_read(&b->c.lock);
2075 }
2076 
btree_node_write_work(struct work_struct * work)2077 static void btree_node_write_work(struct work_struct *work)
2078 {
2079 	struct btree_write_bio *wbio =
2080 		container_of(work, struct btree_write_bio, work);
2081 	struct bch_fs *c	= wbio->wbio.c;
2082 	struct btree *b		= wbio->wbio.bio.bi_private;
2083 	u64 start_time		= wbio->start_time;
2084 	int ret = 0;
2085 
2086 	btree_bounce_free(c,
2087 		wbio->data_bytes,
2088 		wbio->wbio.used_mempool,
2089 		wbio->data);
2090 
2091 	bch2_bkey_drop_ptrs(bkey_i_to_s(&wbio->key), ptr,
2092 		bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev));
2093 
2094 	if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(&wbio->key))) {
2095 		ret = -BCH_ERR_btree_node_write_all_failed;
2096 		goto err;
2097 	}
2098 
2099 	if (wbio->wbio.first_btree_write) {
2100 		if (wbio->wbio.failed.nr) {
2101 
2102 		}
2103 	} else {
2104 		ret = bch2_trans_do(c,
2105 			bch2_btree_node_update_key_get_iter(trans, b, &wbio->key,
2106 					BCH_WATERMARK_interior_updates|
2107 					BCH_TRANS_COMMIT_journal_reclaim|
2108 					BCH_TRANS_COMMIT_no_enospc|
2109 					BCH_TRANS_COMMIT_no_check_rw,
2110 					!wbio->wbio.failed.nr));
2111 		if (ret)
2112 			goto err;
2113 	}
2114 out:
2115 	bio_put(&wbio->wbio.bio);
2116 	btree_node_write_done(c, b, start_time);
2117 	return;
2118 err:
2119 	set_btree_node_noevict(b);
2120 
2121 	if (!bch2_err_matches(ret, EROFS)) {
2122 		struct printbuf buf = PRINTBUF;
2123 		prt_printf(&buf, "writing btree node: %s\n  ", bch2_err_str(ret));
2124 		bch2_btree_pos_to_text(&buf, c, b);
2125 		bch2_fs_fatal_error(c, "%s", buf.buf);
2126 		printbuf_exit(&buf);
2127 	}
2128 	goto out;
2129 }
2130 
btree_node_write_endio(struct bio * bio)2131 static void btree_node_write_endio(struct bio *bio)
2132 {
2133 	struct bch_write_bio *wbio	= to_wbio(bio);
2134 	struct bch_write_bio *parent	= wbio->split ? wbio->parent : NULL;
2135 	struct bch_write_bio *orig	= parent ?: wbio;
2136 	struct btree_write_bio *wb	= container_of(orig, struct btree_write_bio, wbio);
2137 	struct bch_fs *c		= wbio->c;
2138 	struct btree *b			= wbio->bio.bi_private;
2139 	struct bch_dev *ca		= wbio->have_ioref ? bch2_dev_have_ref(c, wbio->dev) : NULL;
2140 
2141 	bch2_account_io_completion(ca, BCH_MEMBER_ERROR_write,
2142 				   wbio->submit_time, !bio->bi_status);
2143 
2144 	if (ca && bio->bi_status) {
2145 		struct printbuf buf = PRINTBUF;
2146 		prt_printf(&buf, "btree write error: %s\n  ",
2147 			   bch2_blk_status_to_str(bio->bi_status));
2148 		bch2_btree_pos_to_text(&buf, c, b);
2149 		bch_err_dev_ratelimited(ca, "%s", buf.buf);
2150 		printbuf_exit(&buf);
2151 	}
2152 
2153 	if (bio->bi_status) {
2154 		unsigned long flags;
2155 		spin_lock_irqsave(&c->btree_write_error_lock, flags);
2156 		bch2_dev_list_add_dev(&orig->failed, wbio->dev);
2157 		spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
2158 	}
2159 
2160 	if (wbio->have_ioref)
2161 		percpu_ref_put(&ca->io_ref);
2162 
2163 	if (parent) {
2164 		bio_put(bio);
2165 		bio_endio(&parent->bio);
2166 		return;
2167 	}
2168 
2169 	clear_btree_node_write_in_flight_inner(b);
2170 	wake_up_bit(&b->flags, BTREE_NODE_write_in_flight_inner);
2171 	INIT_WORK(&wb->work, btree_node_write_work);
2172 	queue_work(c->btree_io_complete_wq, &wb->work);
2173 }
2174 
validate_bset_for_write(struct bch_fs * c,struct btree * b,struct bset * i,unsigned sectors)2175 static int validate_bset_for_write(struct bch_fs *c, struct btree *b,
2176 				   struct bset *i, unsigned sectors)
2177 {
2178 	bool saw_error;
2179 
2180 	int ret = bch2_bkey_validate(c, bkey_i_to_s_c(&b->key),
2181 				     (struct bkey_validate_context) {
2182 					.from	= BKEY_VALIDATE_btree_node,
2183 					.level	= b->c.level + 1,
2184 					.btree	= b->c.btree_id,
2185 					.flags	= BCH_VALIDATE_write,
2186 				     });
2187 	if (ret) {
2188 		bch2_fs_inconsistent(c, "invalid btree node key before write");
2189 		return ret;
2190 	}
2191 
2192 	ret = validate_bset_keys(c, b, i, WRITE, false, &saw_error) ?:
2193 		validate_bset(c, NULL, b, i, b->written, sectors, WRITE, false, &saw_error);
2194 	if (ret) {
2195 		bch2_inconsistent_error(c);
2196 		dump_stack();
2197 	}
2198 
2199 	return ret;
2200 }
2201 
btree_write_submit(struct work_struct * work)2202 static void btree_write_submit(struct work_struct *work)
2203 {
2204 	struct btree_write_bio *wbio = container_of(work, struct btree_write_bio, work);
2205 	BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
2206 
2207 	bkey_copy(&tmp.k, &wbio->key);
2208 
2209 	bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&tmp.k)), ptr)
2210 		ptr->offset += wbio->sector_offset;
2211 
2212 	bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree,
2213 				  &tmp.k, false);
2214 }
2215 
__bch2_btree_node_write(struct bch_fs * c,struct btree * b,unsigned flags)2216 void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
2217 {
2218 	struct btree_write_bio *wbio;
2219 	struct bset *i;
2220 	struct btree_node *bn = NULL;
2221 	struct btree_node_entry *bne = NULL;
2222 	struct sort_iter_stack sort_iter;
2223 	struct nonce nonce;
2224 	unsigned bytes_to_write, sectors_to_write, bytes, u64s;
2225 	u64 seq = 0;
2226 	bool used_mempool;
2227 	unsigned long old, new;
2228 	bool validate_before_checksum = false;
2229 	enum btree_write_type type = flags & BTREE_WRITE_TYPE_MASK;
2230 	void *data;
2231 	u64 start_time = local_clock();
2232 	int ret;
2233 
2234 	if (flags & BTREE_WRITE_ALREADY_STARTED)
2235 		goto do_write;
2236 
2237 	/*
2238 	 * We may only have a read lock on the btree node - the dirty bit is our
2239 	 * "lock" against racing with other threads that may be trying to start
2240 	 * a write, we do a write iff we clear the dirty bit. Since setting the
2241 	 * dirty bit requires a write lock, we can't race with other threads
2242 	 * redirtying it:
2243 	 */
2244 	old = READ_ONCE(b->flags);
2245 	do {
2246 		new = old;
2247 
2248 		if (!(old & (1 << BTREE_NODE_dirty)))
2249 			return;
2250 
2251 		if ((flags & BTREE_WRITE_ONLY_IF_NEED) &&
2252 		    !(old & (1 << BTREE_NODE_need_write)))
2253 			return;
2254 
2255 		if (old &
2256 		    ((1 << BTREE_NODE_never_write)|
2257 		     (1 << BTREE_NODE_write_blocked)))
2258 			return;
2259 
2260 		if (b->written &&
2261 		    (old & (1 << BTREE_NODE_will_make_reachable)))
2262 			return;
2263 
2264 		if (old & (1 << BTREE_NODE_write_in_flight))
2265 			return;
2266 
2267 		if (flags & BTREE_WRITE_ONLY_IF_NEED)
2268 			type = new & BTREE_WRITE_TYPE_MASK;
2269 		new &= ~BTREE_WRITE_TYPE_MASK;
2270 
2271 		new &= ~(1 << BTREE_NODE_dirty);
2272 		new &= ~(1 << BTREE_NODE_need_write);
2273 		new |=  (1 << BTREE_NODE_write_in_flight);
2274 		new |=  (1 << BTREE_NODE_write_in_flight_inner);
2275 		new |=  (1 << BTREE_NODE_just_written);
2276 		new ^=  (1 << BTREE_NODE_write_idx);
2277 	} while (!try_cmpxchg_acquire(&b->flags, &old, new));
2278 
2279 	if (new & (1U << BTREE_NODE_need_write))
2280 		return;
2281 do_write:
2282 	BUG_ON((type == BTREE_WRITE_initial) != (b->written == 0));
2283 
2284 	atomic_long_dec(&c->btree_cache.nr_dirty);
2285 
2286 	BUG_ON(btree_node_fake(b));
2287 	BUG_ON((b->will_make_reachable != 0) != !b->written);
2288 
2289 	BUG_ON(b->written >= btree_sectors(c));
2290 	BUG_ON(b->written & (block_sectors(c) - 1));
2291 	BUG_ON(bset_written(b, btree_bset_last(b)));
2292 	BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(c));
2293 	BUG_ON(memcmp(&b->data->format, &b->format, sizeof(b->format)));
2294 
2295 	bch2_sort_whiteouts(c, b);
2296 
2297 	sort_iter_stack_init(&sort_iter, b);
2298 
2299 	bytes = !b->written
2300 		? sizeof(struct btree_node)
2301 		: sizeof(struct btree_node_entry);
2302 
2303 	bytes += b->whiteout_u64s * sizeof(u64);
2304 
2305 	for_each_bset(b, t) {
2306 		i = bset(b, t);
2307 
2308 		if (bset_written(b, i))
2309 			continue;
2310 
2311 		bytes += le16_to_cpu(i->u64s) * sizeof(u64);
2312 		sort_iter_add(&sort_iter.iter,
2313 			      btree_bkey_first(b, t),
2314 			      btree_bkey_last(b, t));
2315 		seq = max(seq, le64_to_cpu(i->journal_seq));
2316 	}
2317 
2318 	BUG_ON(b->written && !seq);
2319 
2320 	/* bch2_varint_decode may read up to 7 bytes past the end of the buffer: */
2321 	bytes += 8;
2322 
2323 	/* buffer must be a multiple of the block size */
2324 	bytes = round_up(bytes, block_bytes(c));
2325 
2326 	data = btree_bounce_alloc(c, bytes, &used_mempool);
2327 
2328 	if (!b->written) {
2329 		bn = data;
2330 		*bn = *b->data;
2331 		i = &bn->keys;
2332 	} else {
2333 		bne = data;
2334 		bne->keys = b->data->keys;
2335 		i = &bne->keys;
2336 	}
2337 
2338 	i->journal_seq	= cpu_to_le64(seq);
2339 	i->u64s		= 0;
2340 
2341 	sort_iter_add(&sort_iter.iter,
2342 		      unwritten_whiteouts_start(b),
2343 		      unwritten_whiteouts_end(b));
2344 	SET_BSET_SEPARATE_WHITEOUTS(i, false);
2345 
2346 	u64s = bch2_sort_keys_keep_unwritten_whiteouts(i->start, &sort_iter.iter);
2347 	le16_add_cpu(&i->u64s, u64s);
2348 
2349 	b->whiteout_u64s = 0;
2350 
2351 	BUG_ON(!b->written && i->u64s != b->data->keys.u64s);
2352 
2353 	set_needs_whiteout(i, false);
2354 
2355 	/* do we have data to write? */
2356 	if (b->written && !i->u64s)
2357 		goto nowrite;
2358 
2359 	bytes_to_write = vstruct_end(i) - data;
2360 	sectors_to_write = round_up(bytes_to_write, block_bytes(c)) >> 9;
2361 
2362 	if (!b->written &&
2363 	    b->key.k.type == KEY_TYPE_btree_ptr_v2)
2364 		BUG_ON(btree_ptr_sectors_written(bkey_i_to_s_c(&b->key)) != sectors_to_write);
2365 
2366 	memset(data + bytes_to_write, 0,
2367 	       (sectors_to_write << 9) - bytes_to_write);
2368 
2369 	BUG_ON(b->written + sectors_to_write > btree_sectors(c));
2370 	BUG_ON(BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN);
2371 	BUG_ON(i->seq != b->data->keys.seq);
2372 
2373 	i->version = cpu_to_le16(c->sb.version);
2374 	SET_BSET_OFFSET(i, b->written);
2375 	SET_BSET_CSUM_TYPE(i, bch2_meta_checksum_type(c));
2376 
2377 	if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)))
2378 		validate_before_checksum = true;
2379 
2380 	/* validate_bset will be modifying: */
2381 	if (le16_to_cpu(i->version) < bcachefs_metadata_version_current)
2382 		validate_before_checksum = true;
2383 
2384 	/* if we're going to be encrypting, check metadata validity first: */
2385 	if (validate_before_checksum &&
2386 	    validate_bset_for_write(c, b, i, sectors_to_write))
2387 		goto err;
2388 
2389 	ret = bset_encrypt(c, i, b->written << 9);
2390 	if (bch2_fs_fatal_err_on(ret, c,
2391 			"encrypting btree node: %s", bch2_err_str(ret)))
2392 		goto err;
2393 
2394 	nonce = btree_nonce(i, b->written << 9);
2395 
2396 	if (bn)
2397 		bn->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bn);
2398 	else
2399 		bne->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
2400 
2401 	/* if we're not encrypting, check metadata after checksumming: */
2402 	if (!validate_before_checksum &&
2403 	    validate_bset_for_write(c, b, i, sectors_to_write))
2404 		goto err;
2405 
2406 	/*
2407 	 * We handle btree write errors by immediately halting the journal -
2408 	 * after we've done that, we can't issue any subsequent btree writes
2409 	 * because they might have pointers to new nodes that failed to write.
2410 	 *
2411 	 * Furthermore, there's no point in doing any more btree writes because
2412 	 * with the journal stopped, we're never going to update the journal to
2413 	 * reflect that those writes were done and the data flushed from the
2414 	 * journal:
2415 	 *
2416 	 * Also on journal error, the pending write may have updates that were
2417 	 * never journalled (interior nodes, see btree_update_nodes_written()) -
2418 	 * it's critical that we don't do the write in that case otherwise we
2419 	 * will have updates visible that weren't in the journal:
2420 	 *
2421 	 * Make sure to update b->written so bch2_btree_init_next() doesn't
2422 	 * break:
2423 	 */
2424 	if (bch2_journal_error(&c->journal) ||
2425 	    c->opts.nochanges)
2426 		goto err;
2427 
2428 	trace_and_count(c, btree_node_write, b, bytes_to_write, sectors_to_write);
2429 
2430 	wbio = container_of(bio_alloc_bioset(NULL,
2431 				buf_pages(data, sectors_to_write << 9),
2432 				REQ_OP_WRITE|REQ_META,
2433 				GFP_NOFS,
2434 				&c->btree_bio),
2435 			    struct btree_write_bio, wbio.bio);
2436 	wbio_init(&wbio->wbio.bio);
2437 	wbio->data			= data;
2438 	wbio->data_bytes		= bytes;
2439 	wbio->sector_offset		= b->written;
2440 	wbio->start_time		= start_time;
2441 	wbio->wbio.c			= c;
2442 	wbio->wbio.used_mempool		= used_mempool;
2443 	wbio->wbio.first_btree_write	= !b->written;
2444 	wbio->wbio.bio.bi_end_io	= btree_node_write_endio;
2445 	wbio->wbio.bio.bi_private	= b;
2446 
2447 	bch2_bio_map(&wbio->wbio.bio, data, sectors_to_write << 9);
2448 
2449 	bkey_copy(&wbio->key, &b->key);
2450 
2451 	b->written += sectors_to_write;
2452 
2453 	if (wbio->key.k.type == KEY_TYPE_btree_ptr_v2)
2454 		bkey_i_to_btree_ptr_v2(&wbio->key)->v.sectors_written =
2455 			cpu_to_le16(b->written);
2456 
2457 	atomic64_inc(&c->btree_write_stats[type].nr);
2458 	atomic64_add(bytes_to_write, &c->btree_write_stats[type].bytes);
2459 
2460 	INIT_WORK(&wbio->work, btree_write_submit);
2461 	queue_work(c->btree_write_submit_wq, &wbio->work);
2462 	return;
2463 err:
2464 	set_btree_node_noevict(b);
2465 	b->written += sectors_to_write;
2466 nowrite:
2467 	btree_bounce_free(c, bytes, used_mempool, data);
2468 	__btree_node_write_done(c, b, 0);
2469 }
2470 
2471 /*
2472  * Work that must be done with write lock held:
2473  */
bch2_btree_post_write_cleanup(struct bch_fs * c,struct btree * b)2474 bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
2475 {
2476 	bool invalidated_iter = false;
2477 	struct btree_node_entry *bne;
2478 
2479 	if (!btree_node_just_written(b))
2480 		return false;
2481 
2482 	BUG_ON(b->whiteout_u64s);
2483 
2484 	clear_btree_node_just_written(b);
2485 
2486 	/*
2487 	 * Note: immediately after write, bset_written() doesn't work - the
2488 	 * amount of data we had to write after compaction might have been
2489 	 * smaller than the offset of the last bset.
2490 	 *
2491 	 * However, we know that all bsets have been written here, as long as
2492 	 * we're still holding the write lock:
2493 	 */
2494 
2495 	/*
2496 	 * XXX: decide if we really want to unconditionally sort down to a
2497 	 * single bset:
2498 	 */
2499 	if (b->nsets > 1) {
2500 		btree_node_sort(c, b, 0, b->nsets);
2501 		invalidated_iter = true;
2502 	} else {
2503 		invalidated_iter = bch2_drop_whiteouts(b, COMPACT_ALL);
2504 	}
2505 
2506 	for_each_bset(b, t)
2507 		set_needs_whiteout(bset(b, t), true);
2508 
2509 	bch2_btree_verify(c, b);
2510 
2511 	/*
2512 	 * If later we don't unconditionally sort down to a single bset, we have
2513 	 * to ensure this is still true:
2514 	 */
2515 	BUG_ON((void *) btree_bkey_last(b, bset_tree_last(b)) > write_block(b));
2516 
2517 	bne = want_new_bset(c, b);
2518 	if (bne)
2519 		bch2_bset_init_next(b, bne);
2520 
2521 	bch2_btree_build_aux_trees(b);
2522 
2523 	return invalidated_iter;
2524 }
2525 
2526 /*
2527  * Use this one if the node is intent locked:
2528  */
bch2_btree_node_write(struct bch_fs * c,struct btree * b,enum six_lock_type lock_type_held,unsigned flags)2529 void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
2530 			   enum six_lock_type lock_type_held,
2531 			   unsigned flags)
2532 {
2533 	if (lock_type_held == SIX_LOCK_intent ||
2534 	    (lock_type_held == SIX_LOCK_read &&
2535 	     six_lock_tryupgrade(&b->c.lock))) {
2536 		__bch2_btree_node_write(c, b, flags);
2537 
2538 		/* don't cycle lock unnecessarily: */
2539 		if (btree_node_just_written(b) &&
2540 		    six_trylock_write(&b->c.lock)) {
2541 			bch2_btree_post_write_cleanup(c, b);
2542 			six_unlock_write(&b->c.lock);
2543 		}
2544 
2545 		if (lock_type_held == SIX_LOCK_read)
2546 			six_lock_downgrade(&b->c.lock);
2547 	} else {
2548 		__bch2_btree_node_write(c, b, flags);
2549 		if (lock_type_held == SIX_LOCK_write &&
2550 		    btree_node_just_written(b))
2551 			bch2_btree_post_write_cleanup(c, b);
2552 	}
2553 }
2554 
bch2_btree_node_write_trans(struct btree_trans * trans,struct btree * b,enum six_lock_type lock_type_held,unsigned flags)2555 void bch2_btree_node_write_trans(struct btree_trans *trans, struct btree *b,
2556 				 enum six_lock_type lock_type_held,
2557 				 unsigned flags)
2558 {
2559 	struct bch_fs *c = trans->c;
2560 
2561 	if (lock_type_held == SIX_LOCK_intent ||
2562 	    (lock_type_held == SIX_LOCK_read &&
2563 	     six_lock_tryupgrade(&b->c.lock))) {
2564 		__bch2_btree_node_write(c, b, flags);
2565 
2566 		/* don't cycle lock unnecessarily: */
2567 		if (btree_node_just_written(b) &&
2568 		    six_trylock_write(&b->c.lock)) {
2569 			bch2_btree_post_write_cleanup(c, b);
2570 			__bch2_btree_node_unlock_write(trans, b);
2571 		}
2572 
2573 		if (lock_type_held == SIX_LOCK_read)
2574 			six_lock_downgrade(&b->c.lock);
2575 	} else {
2576 		__bch2_btree_node_write(c, b, flags);
2577 		if (lock_type_held == SIX_LOCK_write &&
2578 		    btree_node_just_written(b))
2579 			bch2_btree_post_write_cleanup(c, b);
2580 	}
2581 }
2582 
__bch2_btree_flush_all(struct bch_fs * c,unsigned flag)2583 static bool __bch2_btree_flush_all(struct bch_fs *c, unsigned flag)
2584 {
2585 	struct bucket_table *tbl;
2586 	struct rhash_head *pos;
2587 	struct btree *b;
2588 	unsigned i;
2589 	bool ret = false;
2590 restart:
2591 	rcu_read_lock();
2592 	for_each_cached_btree(b, c, tbl, i, pos)
2593 		if (test_bit(flag, &b->flags)) {
2594 			rcu_read_unlock();
2595 			wait_on_bit_io(&b->flags, flag, TASK_UNINTERRUPTIBLE);
2596 			ret = true;
2597 			goto restart;
2598 		}
2599 	rcu_read_unlock();
2600 
2601 	return ret;
2602 }
2603 
bch2_btree_flush_all_reads(struct bch_fs * c)2604 bool bch2_btree_flush_all_reads(struct bch_fs *c)
2605 {
2606 	return __bch2_btree_flush_all(c, BTREE_NODE_read_in_flight);
2607 }
2608 
bch2_btree_flush_all_writes(struct bch_fs * c)2609 bool bch2_btree_flush_all_writes(struct bch_fs *c)
2610 {
2611 	return __bch2_btree_flush_all(c, BTREE_NODE_write_in_flight);
2612 }
2613 
2614 static const char * const bch2_btree_write_types[] = {
2615 #define x(t, n) [n] = #t,
2616 	BCH_BTREE_WRITE_TYPES()
2617 	NULL
2618 };
2619 
bch2_btree_write_stats_to_text(struct printbuf * out,struct bch_fs * c)2620 void bch2_btree_write_stats_to_text(struct printbuf *out, struct bch_fs *c)
2621 {
2622 	printbuf_tabstop_push(out, 20);
2623 	printbuf_tabstop_push(out, 10);
2624 
2625 	prt_printf(out, "\tnr\tsize\n");
2626 
2627 	for (unsigned i = 0; i < BTREE_WRITE_TYPE_NR; i++) {
2628 		u64 nr		= atomic64_read(&c->btree_write_stats[i].nr);
2629 		u64 bytes	= atomic64_read(&c->btree_write_stats[i].bytes);
2630 
2631 		prt_printf(out, "%s:\t%llu\t", bch2_btree_write_types[i], nr);
2632 		prt_human_readable_u64(out, nr ? div64_u64(bytes, nr) : 0);
2633 		prt_newline(out);
2634 	}
2635 }
2636