1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "bkey_methods.h"
5 #include "bkey_sort.h"
6 #include "btree_cache.h"
7 #include "btree_io.h"
8 #include "btree_iter.h"
9 #include "btree_locking.h"
10 #include "btree_update.h"
11 #include "btree_update_interior.h"
12 #include "buckets.h"
13 #include "checksum.h"
14 #include "debug.h"
15 #include "error.h"
16 #include "extents.h"
17 #include "io_write.h"
18 #include "journal_reclaim.h"
19 #include "journal_seq_blacklist.h"
20 #include "recovery.h"
21 #include "super-io.h"
22 #include "trace.h"
23
24 #include <linux/sched/mm.h>
25
bch2_btree_node_header_to_text(struct printbuf * out,struct btree_node * bn)26 static void bch2_btree_node_header_to_text(struct printbuf *out, struct btree_node *bn)
27 {
28 prt_printf(out, "btree=%s l=%u seq %llux\n",
29 bch2_btree_id_str(BTREE_NODE_ID(bn)),
30 (unsigned) BTREE_NODE_LEVEL(bn), bn->keys.seq);
31 prt_str(out, "min: ");
32 bch2_bpos_to_text(out, bn->min_key);
33 prt_newline(out);
34 prt_str(out, "max: ");
35 bch2_bpos_to_text(out, bn->max_key);
36 }
37
bch2_btree_node_io_unlock(struct btree * b)38 void bch2_btree_node_io_unlock(struct btree *b)
39 {
40 EBUG_ON(!btree_node_write_in_flight(b));
41
42 clear_btree_node_write_in_flight_inner(b);
43 clear_btree_node_write_in_flight(b);
44 wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
45 }
46
bch2_btree_node_io_lock(struct btree * b)47 void bch2_btree_node_io_lock(struct btree *b)
48 {
49 wait_on_bit_lock_io(&b->flags, BTREE_NODE_write_in_flight,
50 TASK_UNINTERRUPTIBLE);
51 }
52
__bch2_btree_node_wait_on_read(struct btree * b)53 void __bch2_btree_node_wait_on_read(struct btree *b)
54 {
55 wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
56 TASK_UNINTERRUPTIBLE);
57 }
58
__bch2_btree_node_wait_on_write(struct btree * b)59 void __bch2_btree_node_wait_on_write(struct btree *b)
60 {
61 wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
62 TASK_UNINTERRUPTIBLE);
63 }
64
bch2_btree_node_wait_on_read(struct btree * b)65 void bch2_btree_node_wait_on_read(struct btree *b)
66 {
67 wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
68 TASK_UNINTERRUPTIBLE);
69 }
70
bch2_btree_node_wait_on_write(struct btree * b)71 void bch2_btree_node_wait_on_write(struct btree *b)
72 {
73 wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
74 TASK_UNINTERRUPTIBLE);
75 }
76
verify_no_dups(struct btree * b,struct bkey_packed * start,struct bkey_packed * end)77 static void verify_no_dups(struct btree *b,
78 struct bkey_packed *start,
79 struct bkey_packed *end)
80 {
81 #ifdef CONFIG_BCACHEFS_DEBUG
82 struct bkey_packed *k, *p;
83
84 if (start == end)
85 return;
86
87 for (p = start, k = bkey_p_next(start);
88 k != end;
89 p = k, k = bkey_p_next(k)) {
90 struct bkey l = bkey_unpack_key(b, p);
91 struct bkey r = bkey_unpack_key(b, k);
92
93 BUG_ON(bpos_ge(l.p, bkey_start_pos(&r)));
94 }
95 #endif
96 }
97
set_needs_whiteout(struct bset * i,int v)98 static void set_needs_whiteout(struct bset *i, int v)
99 {
100 struct bkey_packed *k;
101
102 for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
103 k->needs_whiteout = v;
104 }
105
btree_bounce_free(struct bch_fs * c,size_t size,bool used_mempool,void * p)106 static void btree_bounce_free(struct bch_fs *c, size_t size,
107 bool used_mempool, void *p)
108 {
109 if (used_mempool)
110 mempool_free(p, &c->btree_bounce_pool);
111 else
112 kvfree(p);
113 }
114
btree_bounce_alloc(struct bch_fs * c,size_t size,bool * used_mempool)115 static void *btree_bounce_alloc(struct bch_fs *c, size_t size,
116 bool *used_mempool)
117 {
118 unsigned flags = memalloc_nofs_save();
119 void *p;
120
121 BUG_ON(size > c->opts.btree_node_size);
122
123 *used_mempool = false;
124 p = kvmalloc(size, __GFP_NOWARN|GFP_NOWAIT);
125 if (!p) {
126 *used_mempool = true;
127 p = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
128 }
129 memalloc_nofs_restore(flags);
130 return p;
131 }
132
sort_bkey_ptrs(const struct btree * bt,struct bkey_packed ** ptrs,unsigned nr)133 static void sort_bkey_ptrs(const struct btree *bt,
134 struct bkey_packed **ptrs, unsigned nr)
135 {
136 unsigned n = nr, a = nr / 2, b, c, d;
137
138 if (!a)
139 return;
140
141 /* Heap sort: see lib/sort.c: */
142 while (1) {
143 if (a)
144 a--;
145 else if (--n)
146 swap(ptrs[0], ptrs[n]);
147 else
148 break;
149
150 for (b = a; c = 2 * b + 1, (d = c + 1) < n;)
151 b = bch2_bkey_cmp_packed(bt,
152 ptrs[c],
153 ptrs[d]) >= 0 ? c : d;
154 if (d == n)
155 b = c;
156
157 while (b != a &&
158 bch2_bkey_cmp_packed(bt,
159 ptrs[a],
160 ptrs[b]) >= 0)
161 b = (b - 1) / 2;
162 c = b;
163 while (b != a) {
164 b = (b - 1) / 2;
165 swap(ptrs[b], ptrs[c]);
166 }
167 }
168 }
169
bch2_sort_whiteouts(struct bch_fs * c,struct btree * b)170 static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b)
171 {
172 struct bkey_packed *new_whiteouts, **ptrs, **ptrs_end, *k;
173 bool used_mempool = false;
174 size_t bytes = b->whiteout_u64s * sizeof(u64);
175
176 if (!b->whiteout_u64s)
177 return;
178
179 new_whiteouts = btree_bounce_alloc(c, bytes, &used_mempool);
180
181 ptrs = ptrs_end = ((void *) new_whiteouts + bytes);
182
183 for (k = unwritten_whiteouts_start(b);
184 k != unwritten_whiteouts_end(b);
185 k = bkey_p_next(k))
186 *--ptrs = k;
187
188 sort_bkey_ptrs(b, ptrs, ptrs_end - ptrs);
189
190 k = new_whiteouts;
191
192 while (ptrs != ptrs_end) {
193 bkey_p_copy(k, *ptrs);
194 k = bkey_p_next(k);
195 ptrs++;
196 }
197
198 verify_no_dups(b, new_whiteouts,
199 (void *) ((u64 *) new_whiteouts + b->whiteout_u64s));
200
201 memcpy_u64s(unwritten_whiteouts_start(b),
202 new_whiteouts, b->whiteout_u64s);
203
204 btree_bounce_free(c, bytes, used_mempool, new_whiteouts);
205 }
206
should_compact_bset(struct btree * b,struct bset_tree * t,bool compacting,enum compact_mode mode)207 static bool should_compact_bset(struct btree *b, struct bset_tree *t,
208 bool compacting, enum compact_mode mode)
209 {
210 if (!bset_dead_u64s(b, t))
211 return false;
212
213 switch (mode) {
214 case COMPACT_LAZY:
215 return should_compact_bset_lazy(b, t) ||
216 (compacting && !bset_written(b, bset(b, t)));
217 case COMPACT_ALL:
218 return true;
219 default:
220 BUG();
221 }
222 }
223
bch2_drop_whiteouts(struct btree * b,enum compact_mode mode)224 static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode)
225 {
226 bool ret = false;
227
228 for_each_bset(b, t) {
229 struct bset *i = bset(b, t);
230 struct bkey_packed *k, *n, *out, *start, *end;
231 struct btree_node_entry *src = NULL, *dst = NULL;
232
233 if (t != b->set && !bset_written(b, i)) {
234 src = container_of(i, struct btree_node_entry, keys);
235 dst = max(write_block(b),
236 (void *) btree_bkey_last(b, t - 1));
237 }
238
239 if (src != dst)
240 ret = true;
241
242 if (!should_compact_bset(b, t, ret, mode)) {
243 if (src != dst) {
244 memmove(dst, src, sizeof(*src) +
245 le16_to_cpu(src->keys.u64s) *
246 sizeof(u64));
247 i = &dst->keys;
248 set_btree_bset(b, t, i);
249 }
250 continue;
251 }
252
253 start = btree_bkey_first(b, t);
254 end = btree_bkey_last(b, t);
255
256 if (src != dst) {
257 memmove(dst, src, sizeof(*src));
258 i = &dst->keys;
259 set_btree_bset(b, t, i);
260 }
261
262 out = i->start;
263
264 for (k = start; k != end; k = n) {
265 n = bkey_p_next(k);
266
267 if (!bkey_deleted(k)) {
268 bkey_p_copy(out, k);
269 out = bkey_p_next(out);
270 } else {
271 BUG_ON(k->needs_whiteout);
272 }
273 }
274
275 i->u64s = cpu_to_le16((u64 *) out - i->_data);
276 set_btree_bset_end(b, t);
277 bch2_bset_set_no_aux_tree(b, t);
278 ret = true;
279 }
280
281 bch2_verify_btree_nr_keys(b);
282
283 bch2_btree_build_aux_trees(b);
284
285 return ret;
286 }
287
bch2_compact_whiteouts(struct bch_fs * c,struct btree * b,enum compact_mode mode)288 bool bch2_compact_whiteouts(struct bch_fs *c, struct btree *b,
289 enum compact_mode mode)
290 {
291 return bch2_drop_whiteouts(b, mode);
292 }
293
btree_node_sort(struct bch_fs * c,struct btree * b,unsigned start_idx,unsigned end_idx)294 static void btree_node_sort(struct bch_fs *c, struct btree *b,
295 unsigned start_idx,
296 unsigned end_idx)
297 {
298 struct btree_node *out;
299 struct sort_iter_stack sort_iter;
300 struct bset_tree *t;
301 struct bset *start_bset = bset(b, &b->set[start_idx]);
302 bool used_mempool = false;
303 u64 start_time, seq = 0;
304 unsigned i, u64s = 0, bytes, shift = end_idx - start_idx - 1;
305 bool sorting_entire_node = start_idx == 0 &&
306 end_idx == b->nsets;
307
308 sort_iter_stack_init(&sort_iter, b);
309
310 for (t = b->set + start_idx;
311 t < b->set + end_idx;
312 t++) {
313 u64s += le16_to_cpu(bset(b, t)->u64s);
314 sort_iter_add(&sort_iter.iter,
315 btree_bkey_first(b, t),
316 btree_bkey_last(b, t));
317 }
318
319 bytes = sorting_entire_node
320 ? btree_buf_bytes(b)
321 : __vstruct_bytes(struct btree_node, u64s);
322
323 out = btree_bounce_alloc(c, bytes, &used_mempool);
324
325 start_time = local_clock();
326
327 u64s = bch2_sort_keys(out->keys.start, &sort_iter.iter);
328
329 out->keys.u64s = cpu_to_le16(u64s);
330
331 BUG_ON(vstruct_end(&out->keys) > (void *) out + bytes);
332
333 if (sorting_entire_node)
334 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
335 start_time);
336
337 /* Make sure we preserve bset journal_seq: */
338 for (t = b->set + start_idx; t < b->set + end_idx; t++)
339 seq = max(seq, le64_to_cpu(bset(b, t)->journal_seq));
340 start_bset->journal_seq = cpu_to_le64(seq);
341
342 if (sorting_entire_node) {
343 u64s = le16_to_cpu(out->keys.u64s);
344
345 BUG_ON(bytes != btree_buf_bytes(b));
346
347 /*
348 * Our temporary buffer is the same size as the btree node's
349 * buffer, we can just swap buffers instead of doing a big
350 * memcpy()
351 */
352 *out = *b->data;
353 out->keys.u64s = cpu_to_le16(u64s);
354 swap(out, b->data);
355 set_btree_bset(b, b->set, &b->data->keys);
356 } else {
357 start_bset->u64s = out->keys.u64s;
358 memcpy_u64s(start_bset->start,
359 out->keys.start,
360 le16_to_cpu(out->keys.u64s));
361 }
362
363 for (i = start_idx + 1; i < end_idx; i++)
364 b->nr.bset_u64s[start_idx] +=
365 b->nr.bset_u64s[i];
366
367 b->nsets -= shift;
368
369 for (i = start_idx + 1; i < b->nsets; i++) {
370 b->nr.bset_u64s[i] = b->nr.bset_u64s[i + shift];
371 b->set[i] = b->set[i + shift];
372 }
373
374 for (i = b->nsets; i < MAX_BSETS; i++)
375 b->nr.bset_u64s[i] = 0;
376
377 set_btree_bset_end(b, &b->set[start_idx]);
378 bch2_bset_set_no_aux_tree(b, &b->set[start_idx]);
379
380 btree_bounce_free(c, bytes, used_mempool, out);
381
382 bch2_verify_btree_nr_keys(b);
383 }
384
bch2_btree_sort_into(struct bch_fs * c,struct btree * dst,struct btree * src)385 void bch2_btree_sort_into(struct bch_fs *c,
386 struct btree *dst,
387 struct btree *src)
388 {
389 struct btree_nr_keys nr;
390 struct btree_node_iter src_iter;
391 u64 start_time = local_clock();
392
393 BUG_ON(dst->nsets != 1);
394
395 bch2_bset_set_no_aux_tree(dst, dst->set);
396
397 bch2_btree_node_iter_init_from_start(&src_iter, src);
398
399 nr = bch2_sort_repack(btree_bset_first(dst),
400 src, &src_iter,
401 &dst->format,
402 true);
403
404 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
405 start_time);
406
407 set_btree_bset_end(dst, dst->set);
408
409 dst->nr.live_u64s += nr.live_u64s;
410 dst->nr.bset_u64s[0] += nr.bset_u64s[0];
411 dst->nr.packed_keys += nr.packed_keys;
412 dst->nr.unpacked_keys += nr.unpacked_keys;
413
414 bch2_verify_btree_nr_keys(dst);
415 }
416
417 /*
418 * We're about to add another bset to the btree node, so if there's currently
419 * too many bsets - sort some of them together:
420 */
btree_node_compact(struct bch_fs * c,struct btree * b)421 static bool btree_node_compact(struct bch_fs *c, struct btree *b)
422 {
423 unsigned unwritten_idx;
424 bool ret = false;
425
426 for (unwritten_idx = 0;
427 unwritten_idx < b->nsets;
428 unwritten_idx++)
429 if (!bset_written(b, bset(b, &b->set[unwritten_idx])))
430 break;
431
432 if (b->nsets - unwritten_idx > 1) {
433 btree_node_sort(c, b, unwritten_idx, b->nsets);
434 ret = true;
435 }
436
437 if (unwritten_idx > 1) {
438 btree_node_sort(c, b, 0, unwritten_idx);
439 ret = true;
440 }
441
442 return ret;
443 }
444
bch2_btree_build_aux_trees(struct btree * b)445 void bch2_btree_build_aux_trees(struct btree *b)
446 {
447 for_each_bset(b, t)
448 bch2_bset_build_aux_tree(b, t,
449 !bset_written(b, bset(b, t)) &&
450 t == bset_tree_last(b));
451 }
452
453 /*
454 * If we have MAX_BSETS (3) bsets, should we sort them all down to just one?
455 *
456 * The first bset is going to be of similar order to the size of the node, the
457 * last bset is bounded by btree_write_set_buffer(), which is set to keep the
458 * memmove on insert from being too expensive: the middle bset should, ideally,
459 * be the geometric mean of the first and the last.
460 *
461 * Returns true if the middle bset is greater than that geometric mean:
462 */
should_compact_all(struct bch_fs * c,struct btree * b)463 static inline bool should_compact_all(struct bch_fs *c, struct btree *b)
464 {
465 unsigned mid_u64s_bits =
466 (ilog2(btree_max_u64s(c)) + BTREE_WRITE_SET_U64s_BITS) / 2;
467
468 return bset_u64s(&b->set[1]) > 1U << mid_u64s_bits;
469 }
470
471 /*
472 * @bch_btree_init_next - initialize a new (unwritten) bset that can then be
473 * inserted into
474 *
475 * Safe to call if there already is an unwritten bset - will only add a new bset
476 * if @b doesn't already have one.
477 *
478 * Returns true if we sorted (i.e. invalidated iterators
479 */
bch2_btree_init_next(struct btree_trans * trans,struct btree * b)480 void bch2_btree_init_next(struct btree_trans *trans, struct btree *b)
481 {
482 struct bch_fs *c = trans->c;
483 struct btree_node_entry *bne;
484 bool reinit_iter = false;
485
486 EBUG_ON(!six_lock_counts(&b->c.lock).n[SIX_LOCK_write]);
487 BUG_ON(bset_written(b, bset(b, &b->set[1])));
488 BUG_ON(btree_node_just_written(b));
489
490 if (b->nsets == MAX_BSETS &&
491 !btree_node_write_in_flight(b) &&
492 should_compact_all(c, b)) {
493 bch2_btree_node_write(c, b, SIX_LOCK_write,
494 BTREE_WRITE_init_next_bset);
495 reinit_iter = true;
496 }
497
498 if (b->nsets == MAX_BSETS &&
499 btree_node_compact(c, b))
500 reinit_iter = true;
501
502 BUG_ON(b->nsets >= MAX_BSETS);
503
504 bne = want_new_bset(c, b);
505 if (bne)
506 bch2_bset_init_next(b, bne);
507
508 bch2_btree_build_aux_trees(b);
509
510 if (reinit_iter)
511 bch2_trans_node_reinit_iter(trans, b);
512 }
513
btree_err_msg(struct printbuf * out,struct bch_fs * c,struct bch_dev * ca,struct btree * b,struct bset * i,struct bkey_packed * k,unsigned offset,int write)514 static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
515 struct bch_dev *ca,
516 struct btree *b, struct bset *i, struct bkey_packed *k,
517 unsigned offset, int write)
518 {
519 prt_printf(out, bch2_log_msg(c, "%s"),
520 write == READ
521 ? "error validating btree node "
522 : "corrupt btree node before write ");
523 if (ca)
524 prt_printf(out, "on %s ", ca->name);
525 prt_printf(out, "at btree ");
526 bch2_btree_pos_to_text(out, c, b);
527
528 printbuf_indent_add(out, 2);
529
530 prt_printf(out, "\nnode offset %u/%u",
531 b->written, btree_ptr_sectors_written(bkey_i_to_s_c(&b->key)));
532 if (i)
533 prt_printf(out, " bset u64s %u", le16_to_cpu(i->u64s));
534 if (k)
535 prt_printf(out, " bset byte offset %lu",
536 (unsigned long)(void *)k -
537 ((unsigned long)(void *)i & ~511UL));
538 prt_str(out, ": ");
539 }
540
541 __printf(10, 11)
__btree_err(int ret,struct bch_fs * c,struct bch_dev * ca,struct btree * b,struct bset * i,struct bkey_packed * k,int write,bool have_retry,enum bch_sb_error_id err_type,const char * fmt,...)542 static int __btree_err(int ret,
543 struct bch_fs *c,
544 struct bch_dev *ca,
545 struct btree *b,
546 struct bset *i,
547 struct bkey_packed *k,
548 int write,
549 bool have_retry,
550 enum bch_sb_error_id err_type,
551 const char *fmt, ...)
552 {
553 struct printbuf out = PRINTBUF;
554 bool silent = c->curr_recovery_pass == BCH_RECOVERY_PASS_scan_for_btree_nodes;
555 va_list args;
556
557 btree_err_msg(&out, c, ca, b, i, k, b->written, write);
558
559 va_start(args, fmt);
560 prt_vprintf(&out, fmt, args);
561 va_end(args);
562
563 if (write == WRITE) {
564 bch2_print_string_as_lines(KERN_ERR, out.buf);
565 ret = c->opts.errors == BCH_ON_ERROR_continue
566 ? 0
567 : -BCH_ERR_fsck_errors_not_fixed;
568 goto out;
569 }
570
571 if (!have_retry && ret == -BCH_ERR_btree_node_read_err_want_retry)
572 ret = -BCH_ERR_btree_node_read_err_fixable;
573 if (!have_retry && ret == -BCH_ERR_btree_node_read_err_must_retry)
574 ret = -BCH_ERR_btree_node_read_err_bad_node;
575
576 if (!silent && ret != -BCH_ERR_btree_node_read_err_fixable)
577 bch2_sb_error_count(c, err_type);
578
579 switch (ret) {
580 case -BCH_ERR_btree_node_read_err_fixable:
581 ret = !silent
582 ? __bch2_fsck_err(c, NULL, FSCK_CAN_FIX, err_type, "%s", out.buf)
583 : -BCH_ERR_fsck_fix;
584 if (ret != -BCH_ERR_fsck_fix &&
585 ret != -BCH_ERR_fsck_ignore)
586 goto fsck_err;
587 ret = -BCH_ERR_fsck_fix;
588 break;
589 case -BCH_ERR_btree_node_read_err_want_retry:
590 case -BCH_ERR_btree_node_read_err_must_retry:
591 if (!silent)
592 bch2_print_string_as_lines(KERN_ERR, out.buf);
593 break;
594 case -BCH_ERR_btree_node_read_err_bad_node:
595 if (!silent)
596 bch2_print_string_as_lines(KERN_ERR, out.buf);
597 ret = bch2_topology_error(c);
598 break;
599 case -BCH_ERR_btree_node_read_err_incompatible:
600 if (!silent)
601 bch2_print_string_as_lines(KERN_ERR, out.buf);
602 ret = -BCH_ERR_fsck_errors_not_fixed;
603 break;
604 default:
605 BUG();
606 }
607 out:
608 fsck_err:
609 printbuf_exit(&out);
610 return ret;
611 }
612
613 #define btree_err(type, c, ca, b, i, k, _err_type, msg, ...) \
614 ({ \
615 int _ret = __btree_err(type, c, ca, b, i, k, write, have_retry, \
616 BCH_FSCK_ERR_##_err_type, \
617 msg, ##__VA_ARGS__); \
618 \
619 if (_ret != -BCH_ERR_fsck_fix) { \
620 ret = _ret; \
621 goto fsck_err; \
622 } \
623 \
624 *saw_error = true; \
625 })
626
627 #define btree_err_on(cond, ...) ((cond) ? btree_err(__VA_ARGS__) : false)
628
629 /*
630 * When btree topology repair changes the start or end of a node, that might
631 * mean we have to drop keys that are no longer inside the node:
632 */
633 __cold
bch2_btree_node_drop_keys_outside_node(struct btree * b)634 void bch2_btree_node_drop_keys_outside_node(struct btree *b)
635 {
636 for_each_bset(b, t) {
637 struct bset *i = bset(b, t);
638 struct bkey_packed *k;
639
640 for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
641 if (bkey_cmp_left_packed(b, k, &b->data->min_key) >= 0)
642 break;
643
644 if (k != i->start) {
645 unsigned shift = (u64 *) k - (u64 *) i->start;
646
647 memmove_u64s_down(i->start, k,
648 (u64 *) vstruct_end(i) - (u64 *) k);
649 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - shift);
650 set_btree_bset_end(b, t);
651 }
652
653 for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
654 if (bkey_cmp_left_packed(b, k, &b->data->max_key) > 0)
655 break;
656
657 if (k != vstruct_last(i)) {
658 i->u64s = cpu_to_le16((u64 *) k - (u64 *) i->start);
659 set_btree_bset_end(b, t);
660 }
661 }
662
663 /*
664 * Always rebuild search trees: eytzinger search tree nodes directly
665 * depend on the values of min/max key:
666 */
667 bch2_bset_set_no_aux_tree(b, b->set);
668 bch2_btree_build_aux_trees(b);
669 b->nr = bch2_btree_node_count_keys(b);
670
671 struct bkey_s_c k;
672 struct bkey unpacked;
673 struct btree_node_iter iter;
674 for_each_btree_node_key_unpack(b, k, &iter, &unpacked) {
675 BUG_ON(bpos_lt(k.k->p, b->data->min_key));
676 BUG_ON(bpos_gt(k.k->p, b->data->max_key));
677 }
678 }
679
validate_bset(struct bch_fs * c,struct bch_dev * ca,struct btree * b,struct bset * i,unsigned offset,unsigned sectors,int write,bool have_retry,bool * saw_error)680 static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
681 struct btree *b, struct bset *i,
682 unsigned offset, unsigned sectors,
683 int write, bool have_retry, bool *saw_error)
684 {
685 unsigned version = le16_to_cpu(i->version);
686 unsigned ptr_written = btree_ptr_sectors_written(bkey_i_to_s_c(&b->key));
687 struct printbuf buf1 = PRINTBUF;
688 struct printbuf buf2 = PRINTBUF;
689 int ret = 0;
690
691 btree_err_on(!bch2_version_compatible(version),
692 -BCH_ERR_btree_node_read_err_incompatible,
693 c, ca, b, i, NULL,
694 btree_node_unsupported_version,
695 "unsupported bset version %u.%u",
696 BCH_VERSION_MAJOR(version),
697 BCH_VERSION_MINOR(version));
698
699 if (btree_err_on(version < c->sb.version_min,
700 -BCH_ERR_btree_node_read_err_fixable,
701 c, NULL, b, i, NULL,
702 btree_node_bset_older_than_sb_min,
703 "bset version %u older than superblock version_min %u",
704 version, c->sb.version_min)) {
705 mutex_lock(&c->sb_lock);
706 c->disk_sb.sb->version_min = cpu_to_le16(version);
707 bch2_write_super(c);
708 mutex_unlock(&c->sb_lock);
709 }
710
711 if (btree_err_on(BCH_VERSION_MAJOR(version) >
712 BCH_VERSION_MAJOR(c->sb.version),
713 -BCH_ERR_btree_node_read_err_fixable,
714 c, NULL, b, i, NULL,
715 btree_node_bset_newer_than_sb,
716 "bset version %u newer than superblock version %u",
717 version, c->sb.version)) {
718 mutex_lock(&c->sb_lock);
719 c->disk_sb.sb->version = cpu_to_le16(version);
720 bch2_write_super(c);
721 mutex_unlock(&c->sb_lock);
722 }
723
724 btree_err_on(BSET_SEPARATE_WHITEOUTS(i),
725 -BCH_ERR_btree_node_read_err_incompatible,
726 c, ca, b, i, NULL,
727 btree_node_unsupported_version,
728 "BSET_SEPARATE_WHITEOUTS no longer supported");
729
730 if (!write &&
731 btree_err_on(offset + sectors > (ptr_written ?: btree_sectors(c)),
732 -BCH_ERR_btree_node_read_err_fixable,
733 c, ca, b, i, NULL,
734 bset_past_end_of_btree_node,
735 "bset past end of btree node (offset %u len %u but written %zu)",
736 offset, sectors, ptr_written ?: btree_sectors(c))) {
737 i->u64s = 0;
738 ret = 0;
739 goto out;
740 }
741
742 btree_err_on(offset && !i->u64s,
743 -BCH_ERR_btree_node_read_err_fixable,
744 c, ca, b, i, NULL,
745 bset_empty,
746 "empty bset");
747
748 btree_err_on(BSET_OFFSET(i) && BSET_OFFSET(i) != offset,
749 -BCH_ERR_btree_node_read_err_want_retry,
750 c, ca, b, i, NULL,
751 bset_wrong_sector_offset,
752 "bset at wrong sector offset");
753
754 if (!offset) {
755 struct btree_node *bn =
756 container_of(i, struct btree_node, keys);
757 /* These indicate that we read the wrong btree node: */
758
759 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
760 struct bch_btree_ptr_v2 *bp =
761 &bkey_i_to_btree_ptr_v2(&b->key)->v;
762
763 /* XXX endianness */
764 btree_err_on(bp->seq != bn->keys.seq,
765 -BCH_ERR_btree_node_read_err_must_retry,
766 c, ca, b, NULL, NULL,
767 bset_bad_seq,
768 "incorrect sequence number (wrong btree node)");
769 }
770
771 btree_err_on(BTREE_NODE_ID(bn) != b->c.btree_id,
772 -BCH_ERR_btree_node_read_err_must_retry,
773 c, ca, b, i, NULL,
774 btree_node_bad_btree,
775 "incorrect btree id");
776
777 btree_err_on(BTREE_NODE_LEVEL(bn) != b->c.level,
778 -BCH_ERR_btree_node_read_err_must_retry,
779 c, ca, b, i, NULL,
780 btree_node_bad_level,
781 "incorrect level");
782
783 if (!write)
784 compat_btree_node(b->c.level, b->c.btree_id, version,
785 BSET_BIG_ENDIAN(i), write, bn);
786
787 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
788 struct bch_btree_ptr_v2 *bp =
789 &bkey_i_to_btree_ptr_v2(&b->key)->v;
790
791 if (BTREE_PTR_RANGE_UPDATED(bp)) {
792 b->data->min_key = bp->min_key;
793 b->data->max_key = b->key.k.p;
794 }
795
796 btree_err_on(!bpos_eq(b->data->min_key, bp->min_key),
797 -BCH_ERR_btree_node_read_err_must_retry,
798 c, ca, b, NULL, NULL,
799 btree_node_bad_min_key,
800 "incorrect min_key: got %s should be %s",
801 (printbuf_reset(&buf1),
802 bch2_bpos_to_text(&buf1, bn->min_key), buf1.buf),
803 (printbuf_reset(&buf2),
804 bch2_bpos_to_text(&buf2, bp->min_key), buf2.buf));
805 }
806
807 btree_err_on(!bpos_eq(bn->max_key, b->key.k.p),
808 -BCH_ERR_btree_node_read_err_must_retry,
809 c, ca, b, i, NULL,
810 btree_node_bad_max_key,
811 "incorrect max key %s",
812 (printbuf_reset(&buf1),
813 bch2_bpos_to_text(&buf1, bn->max_key), buf1.buf));
814
815 if (write)
816 compat_btree_node(b->c.level, b->c.btree_id, version,
817 BSET_BIG_ENDIAN(i), write, bn);
818
819 btree_err_on(bch2_bkey_format_invalid(c, &bn->format, write, &buf1),
820 -BCH_ERR_btree_node_read_err_bad_node,
821 c, ca, b, i, NULL,
822 btree_node_bad_format,
823 "invalid bkey format: %s\n %s", buf1.buf,
824 (printbuf_reset(&buf2),
825 bch2_bkey_format_to_text(&buf2, &bn->format), buf2.buf));
826 printbuf_reset(&buf1);
827
828 compat_bformat(b->c.level, b->c.btree_id, version,
829 BSET_BIG_ENDIAN(i), write,
830 &bn->format);
831 }
832 out:
833 fsck_err:
834 printbuf_exit(&buf2);
835 printbuf_exit(&buf1);
836 return ret;
837 }
838
bset_key_validate(struct bch_fs * c,struct btree * b,struct bkey_s_c k,bool updated_range,int rw)839 static int bset_key_validate(struct bch_fs *c, struct btree *b,
840 struct bkey_s_c k,
841 bool updated_range, int rw)
842 {
843 return __bch2_bkey_validate(c, k, btree_node_type(b), 0) ?:
844 (!updated_range ? bch2_bkey_in_btree_node(c, b, k, 0) : 0) ?:
845 (rw == WRITE ? bch2_bkey_val_validate(c, k, 0) : 0);
846 }
847
bkey_packed_valid(struct bch_fs * c,struct btree * b,struct bset * i,struct bkey_packed * k)848 static bool bkey_packed_valid(struct bch_fs *c, struct btree *b,
849 struct bset *i, struct bkey_packed *k)
850 {
851 if (bkey_p_next(k) > vstruct_last(i))
852 return false;
853
854 if (k->format > KEY_FORMAT_CURRENT)
855 return false;
856
857 if (!bkeyp_u64s_valid(&b->format, k))
858 return false;
859
860 struct bkey tmp;
861 struct bkey_s u = __bkey_disassemble(b, k, &tmp);
862 return !__bch2_bkey_validate(c, u.s_c, btree_node_type(b), BCH_VALIDATE_silent);
863 }
864
validate_bset_keys(struct bch_fs * c,struct btree * b,struct bset * i,int write,bool have_retry,bool * saw_error)865 static int validate_bset_keys(struct bch_fs *c, struct btree *b,
866 struct bset *i, int write,
867 bool have_retry, bool *saw_error)
868 {
869 unsigned version = le16_to_cpu(i->version);
870 struct bkey_packed *k, *prev = NULL;
871 struct printbuf buf = PRINTBUF;
872 bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
873 BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
874 int ret = 0;
875
876 for (k = i->start;
877 k != vstruct_last(i);) {
878 struct bkey_s u;
879 struct bkey tmp;
880 unsigned next_good_key;
881
882 if (btree_err_on(bkey_p_next(k) > vstruct_last(i),
883 -BCH_ERR_btree_node_read_err_fixable,
884 c, NULL, b, i, k,
885 btree_node_bkey_past_bset_end,
886 "key extends past end of bset")) {
887 i->u64s = cpu_to_le16((u64 *) k - i->_data);
888 break;
889 }
890
891 if (btree_err_on(k->format > KEY_FORMAT_CURRENT,
892 -BCH_ERR_btree_node_read_err_fixable,
893 c, NULL, b, i, k,
894 btree_node_bkey_bad_format,
895 "invalid bkey format %u", k->format))
896 goto drop_this_key;
897
898 if (btree_err_on(!bkeyp_u64s_valid(&b->format, k),
899 -BCH_ERR_btree_node_read_err_fixable,
900 c, NULL, b, i, k,
901 btree_node_bkey_bad_u64s,
902 "bad k->u64s %u (min %u max %zu)", k->u64s,
903 bkeyp_key_u64s(&b->format, k),
904 U8_MAX - BKEY_U64s + bkeyp_key_u64s(&b->format, k)))
905 goto drop_this_key;
906
907 if (!write)
908 bch2_bkey_compat(b->c.level, b->c.btree_id, version,
909 BSET_BIG_ENDIAN(i), write,
910 &b->format, k);
911
912 u = __bkey_disassemble(b, k, &tmp);
913
914 ret = bset_key_validate(c, b, u.s_c, updated_range, write);
915 if (ret == -BCH_ERR_fsck_delete_bkey)
916 goto drop_this_key;
917 if (ret)
918 goto fsck_err;
919
920 if (write)
921 bch2_bkey_compat(b->c.level, b->c.btree_id, version,
922 BSET_BIG_ENDIAN(i), write,
923 &b->format, k);
924
925 if (prev && bkey_iter_cmp(b, prev, k) > 0) {
926 struct bkey up = bkey_unpack_key(b, prev);
927
928 printbuf_reset(&buf);
929 prt_printf(&buf, "keys out of order: ");
930 bch2_bkey_to_text(&buf, &up);
931 prt_printf(&buf, " > ");
932 bch2_bkey_to_text(&buf, u.k);
933
934 if (btree_err(-BCH_ERR_btree_node_read_err_fixable,
935 c, NULL, b, i, k,
936 btree_node_bkey_out_of_order,
937 "%s", buf.buf))
938 goto drop_this_key;
939 }
940
941 prev = k;
942 k = bkey_p_next(k);
943 continue;
944 drop_this_key:
945 next_good_key = k->u64s;
946
947 if (!next_good_key ||
948 (BSET_BIG_ENDIAN(i) == CPU_BIG_ENDIAN &&
949 version >= bcachefs_metadata_version_snapshot)) {
950 /*
951 * only do scanning if bch2_bkey_compat() has nothing to
952 * do
953 */
954
955 if (!bkey_packed_valid(c, b, i, (void *) ((u64 *) k + next_good_key))) {
956 for (next_good_key = 1;
957 next_good_key < (u64 *) vstruct_last(i) - (u64 *) k;
958 next_good_key++)
959 if (bkey_packed_valid(c, b, i, (void *) ((u64 *) k + next_good_key)))
960 goto got_good_key;
961 }
962
963 /*
964 * didn't find a good key, have to truncate the rest of
965 * the bset
966 */
967 next_good_key = (u64 *) vstruct_last(i) - (u64 *) k;
968 }
969 got_good_key:
970 le16_add_cpu(&i->u64s, -next_good_key);
971 memmove_u64s_down(k, bkey_p_next(k), (u64 *) vstruct_end(i) - (u64 *) k);
972 }
973 fsck_err:
974 printbuf_exit(&buf);
975 return ret;
976 }
977
bch2_btree_node_read_done(struct bch_fs * c,struct bch_dev * ca,struct btree * b,bool have_retry,bool * saw_error)978 int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
979 struct btree *b, bool have_retry, bool *saw_error)
980 {
981 struct btree_node_entry *bne;
982 struct sort_iter *iter;
983 struct btree_node *sorted;
984 struct bkey_packed *k;
985 struct bset *i;
986 bool used_mempool, blacklisted;
987 bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
988 BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
989 unsigned u64s;
990 unsigned ptr_written = btree_ptr_sectors_written(bkey_i_to_s_c(&b->key));
991 u64 max_journal_seq = 0;
992 struct printbuf buf = PRINTBUF;
993 int ret = 0, retry_read = 0, write = READ;
994 u64 start_time = local_clock();
995
996 b->version_ondisk = U16_MAX;
997 /* We might get called multiple times on read retry: */
998 b->written = 0;
999
1000 iter = mempool_alloc(&c->fill_iter, GFP_NOFS);
1001 sort_iter_init(iter, b, (btree_blocks(c) + 1) * 2);
1002
1003 if (bch2_meta_read_fault("btree"))
1004 btree_err(-BCH_ERR_btree_node_read_err_must_retry,
1005 c, ca, b, NULL, NULL,
1006 btree_node_fault_injected,
1007 "dynamic fault");
1008
1009 btree_err_on(le64_to_cpu(b->data->magic) != bset_magic(c),
1010 -BCH_ERR_btree_node_read_err_must_retry,
1011 c, ca, b, NULL, NULL,
1012 btree_node_bad_magic,
1013 "bad magic: want %llx, got %llx",
1014 bset_magic(c), le64_to_cpu(b->data->magic));
1015
1016 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
1017 struct bch_btree_ptr_v2 *bp =
1018 &bkey_i_to_btree_ptr_v2(&b->key)->v;
1019
1020 bch2_bpos_to_text(&buf, b->data->min_key);
1021 prt_str(&buf, "-");
1022 bch2_bpos_to_text(&buf, b->data->max_key);
1023
1024 btree_err_on(b->data->keys.seq != bp->seq,
1025 -BCH_ERR_btree_node_read_err_must_retry,
1026 c, ca, b, NULL, NULL,
1027 btree_node_bad_seq,
1028 "got wrong btree node: got\n%s",
1029 (printbuf_reset(&buf),
1030 bch2_btree_node_header_to_text(&buf, b->data),
1031 buf.buf));
1032 } else {
1033 btree_err_on(!b->data->keys.seq,
1034 -BCH_ERR_btree_node_read_err_must_retry,
1035 c, ca, b, NULL, NULL,
1036 btree_node_bad_seq,
1037 "bad btree header: seq 0\n%s",
1038 (printbuf_reset(&buf),
1039 bch2_btree_node_header_to_text(&buf, b->data),
1040 buf.buf));
1041 }
1042
1043 while (b->written < (ptr_written ?: btree_sectors(c))) {
1044 unsigned sectors;
1045 struct nonce nonce;
1046 bool first = !b->written;
1047 bool csum_bad;
1048
1049 if (!b->written) {
1050 i = &b->data->keys;
1051
1052 btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
1053 -BCH_ERR_btree_node_read_err_want_retry,
1054 c, ca, b, i, NULL,
1055 bset_unknown_csum,
1056 "unknown checksum type %llu", BSET_CSUM_TYPE(i));
1057
1058 nonce = btree_nonce(i, b->written << 9);
1059
1060 struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data);
1061 csum_bad = bch2_crc_cmp(b->data->csum, csum);
1062 if (csum_bad)
1063 bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
1064
1065 btree_err_on(csum_bad,
1066 -BCH_ERR_btree_node_read_err_want_retry,
1067 c, ca, b, i, NULL,
1068 bset_bad_csum,
1069 "%s",
1070 (printbuf_reset(&buf),
1071 bch2_csum_err_msg(&buf, BSET_CSUM_TYPE(i), b->data->csum, csum),
1072 buf.buf));
1073
1074 ret = bset_encrypt(c, i, b->written << 9);
1075 if (bch2_fs_fatal_err_on(ret, c,
1076 "decrypting btree node: %s", bch2_err_str(ret)))
1077 goto fsck_err;
1078
1079 btree_err_on(btree_node_type_is_extents(btree_node_type(b)) &&
1080 !BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data),
1081 -BCH_ERR_btree_node_read_err_incompatible,
1082 c, NULL, b, NULL, NULL,
1083 btree_node_unsupported_version,
1084 "btree node does not have NEW_EXTENT_OVERWRITE set");
1085
1086 sectors = vstruct_sectors(b->data, c->block_bits);
1087 } else {
1088 bne = write_block(b);
1089 i = &bne->keys;
1090
1091 if (i->seq != b->data->keys.seq)
1092 break;
1093
1094 btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
1095 -BCH_ERR_btree_node_read_err_want_retry,
1096 c, ca, b, i, NULL,
1097 bset_unknown_csum,
1098 "unknown checksum type %llu", BSET_CSUM_TYPE(i));
1099
1100 nonce = btree_nonce(i, b->written << 9);
1101 struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
1102 csum_bad = bch2_crc_cmp(bne->csum, csum);
1103 if (ca && csum_bad)
1104 bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
1105
1106 btree_err_on(csum_bad,
1107 -BCH_ERR_btree_node_read_err_want_retry,
1108 c, ca, b, i, NULL,
1109 bset_bad_csum,
1110 "%s",
1111 (printbuf_reset(&buf),
1112 bch2_csum_err_msg(&buf, BSET_CSUM_TYPE(i), bne->csum, csum),
1113 buf.buf));
1114
1115 ret = bset_encrypt(c, i, b->written << 9);
1116 if (bch2_fs_fatal_err_on(ret, c,
1117 "decrypting btree node: %s", bch2_err_str(ret)))
1118 goto fsck_err;
1119
1120 sectors = vstruct_sectors(bne, c->block_bits);
1121 }
1122
1123 b->version_ondisk = min(b->version_ondisk,
1124 le16_to_cpu(i->version));
1125
1126 ret = validate_bset(c, ca, b, i, b->written, sectors,
1127 READ, have_retry, saw_error);
1128 if (ret)
1129 goto fsck_err;
1130
1131 if (!b->written)
1132 btree_node_set_format(b, b->data->format);
1133
1134 ret = validate_bset_keys(c, b, i, READ, have_retry, saw_error);
1135 if (ret)
1136 goto fsck_err;
1137
1138 SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
1139
1140 blacklisted = bch2_journal_seq_is_blacklisted(c,
1141 le64_to_cpu(i->journal_seq),
1142 true);
1143
1144 btree_err_on(blacklisted && first,
1145 -BCH_ERR_btree_node_read_err_fixable,
1146 c, ca, b, i, NULL,
1147 bset_blacklisted_journal_seq,
1148 "first btree node bset has blacklisted journal seq (%llu)",
1149 le64_to_cpu(i->journal_seq));
1150
1151 btree_err_on(blacklisted && ptr_written,
1152 -BCH_ERR_btree_node_read_err_fixable,
1153 c, ca, b, i, NULL,
1154 first_bset_blacklisted_journal_seq,
1155 "found blacklisted bset (journal seq %llu) in btree node at offset %u-%u/%u",
1156 le64_to_cpu(i->journal_seq),
1157 b->written, b->written + sectors, ptr_written);
1158
1159 b->written += sectors;
1160
1161 if (blacklisted && !first)
1162 continue;
1163
1164 sort_iter_add(iter,
1165 vstruct_idx(i, 0),
1166 vstruct_last(i));
1167
1168 max_journal_seq = max(max_journal_seq, le64_to_cpu(i->journal_seq));
1169 }
1170
1171 if (ptr_written) {
1172 btree_err_on(b->written < ptr_written,
1173 -BCH_ERR_btree_node_read_err_want_retry,
1174 c, ca, b, NULL, NULL,
1175 btree_node_data_missing,
1176 "btree node data missing: expected %u sectors, found %u",
1177 ptr_written, b->written);
1178 } else {
1179 for (bne = write_block(b);
1180 bset_byte_offset(b, bne) < btree_buf_bytes(b);
1181 bne = (void *) bne + block_bytes(c))
1182 btree_err_on(bne->keys.seq == b->data->keys.seq &&
1183 !bch2_journal_seq_is_blacklisted(c,
1184 le64_to_cpu(bne->keys.journal_seq),
1185 true),
1186 -BCH_ERR_btree_node_read_err_want_retry,
1187 c, ca, b, NULL, NULL,
1188 btree_node_bset_after_end,
1189 "found bset signature after last bset");
1190 }
1191
1192 sorted = btree_bounce_alloc(c, btree_buf_bytes(b), &used_mempool);
1193 sorted->keys.u64s = 0;
1194
1195 set_btree_bset(b, b->set, &b->data->keys);
1196
1197 b->nr = bch2_key_sort_fix_overlapping(c, &sorted->keys, iter);
1198 memset((uint8_t *)(sorted + 1) + b->nr.live_u64s * sizeof(u64), 0,
1199 btree_buf_bytes(b) -
1200 sizeof(struct btree_node) -
1201 b->nr.live_u64s * sizeof(u64));
1202
1203 u64s = le16_to_cpu(sorted->keys.u64s);
1204 *sorted = *b->data;
1205 sorted->keys.u64s = cpu_to_le16(u64s);
1206 swap(sorted, b->data);
1207 set_btree_bset(b, b->set, &b->data->keys);
1208 b->nsets = 1;
1209 b->data->keys.journal_seq = cpu_to_le64(max_journal_seq);
1210
1211 BUG_ON(b->nr.live_u64s != u64s);
1212
1213 btree_bounce_free(c, btree_buf_bytes(b), used_mempool, sorted);
1214
1215 if (updated_range)
1216 bch2_btree_node_drop_keys_outside_node(b);
1217
1218 i = &b->data->keys;
1219 for (k = i->start; k != vstruct_last(i);) {
1220 struct bkey tmp;
1221 struct bkey_s u = __bkey_disassemble(b, k, &tmp);
1222
1223 ret = bch2_bkey_val_validate(c, u.s_c, READ);
1224 if (ret == -BCH_ERR_fsck_delete_bkey ||
1225 (bch2_inject_invalid_keys &&
1226 !bversion_cmp(u.k->bversion, MAX_VERSION))) {
1227 btree_keys_account_key_drop(&b->nr, 0, k);
1228
1229 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
1230 memmove_u64s_down(k, bkey_p_next(k),
1231 (u64 *) vstruct_end(i) - (u64 *) k);
1232 set_btree_bset_end(b, b->set);
1233 continue;
1234 }
1235 if (ret)
1236 goto fsck_err;
1237
1238 if (u.k->type == KEY_TYPE_btree_ptr_v2) {
1239 struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(u);
1240
1241 bp.v->mem_ptr = 0;
1242 }
1243
1244 k = bkey_p_next(k);
1245 }
1246
1247 bch2_bset_build_aux_tree(b, b->set, false);
1248
1249 set_needs_whiteout(btree_bset_first(b), true);
1250
1251 btree_node_reset_sib_u64s(b);
1252
1253 rcu_read_lock();
1254 bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&b->key)), ptr) {
1255 struct bch_dev *ca2 = bch2_dev_rcu(c, ptr->dev);
1256
1257 if (!ca2 || ca2->mi.state != BCH_MEMBER_STATE_rw)
1258 set_btree_node_need_rewrite(b);
1259 }
1260 rcu_read_unlock();
1261
1262 if (!ptr_written)
1263 set_btree_node_need_rewrite(b);
1264 out:
1265 mempool_free(iter, &c->fill_iter);
1266 printbuf_exit(&buf);
1267 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read_done], start_time);
1268 return retry_read;
1269 fsck_err:
1270 if (ret == -BCH_ERR_btree_node_read_err_want_retry ||
1271 ret == -BCH_ERR_btree_node_read_err_must_retry) {
1272 retry_read = 1;
1273 } else {
1274 set_btree_node_read_error(b);
1275 bch2_btree_lost_data(c, b->c.btree_id);
1276 }
1277 goto out;
1278 }
1279
btree_node_read_work(struct work_struct * work)1280 static void btree_node_read_work(struct work_struct *work)
1281 {
1282 struct btree_read_bio *rb =
1283 container_of(work, struct btree_read_bio, work);
1284 struct bch_fs *c = rb->c;
1285 struct bch_dev *ca = rb->have_ioref ? bch2_dev_have_ref(c, rb->pick.ptr.dev) : NULL;
1286 struct btree *b = rb->b;
1287 struct bio *bio = &rb->bio;
1288 struct bch_io_failures failed = { .nr = 0 };
1289 struct printbuf buf = PRINTBUF;
1290 bool saw_error = false;
1291 bool retry = false;
1292 bool can_retry;
1293
1294 goto start;
1295 while (1) {
1296 retry = true;
1297 bch_info(c, "retrying read");
1298 ca = bch2_dev_get_ioref(c, rb->pick.ptr.dev, READ);
1299 rb->have_ioref = ca != NULL;
1300 bio_reset(bio, NULL, REQ_OP_READ|REQ_SYNC|REQ_META);
1301 bio->bi_iter.bi_sector = rb->pick.ptr.offset;
1302 bio->bi_iter.bi_size = btree_buf_bytes(b);
1303
1304 if (rb->have_ioref) {
1305 bio_set_dev(bio, ca->disk_sb.bdev);
1306 submit_bio_wait(bio);
1307 } else {
1308 bio->bi_status = BLK_STS_REMOVED;
1309 }
1310 start:
1311 printbuf_reset(&buf);
1312 bch2_btree_pos_to_text(&buf, c, b);
1313 bch2_dev_io_err_on(ca && bio->bi_status, ca, BCH_MEMBER_ERROR_read,
1314 "btree read error %s for %s",
1315 bch2_blk_status_to_str(bio->bi_status), buf.buf);
1316 if (rb->have_ioref)
1317 percpu_ref_put(&ca->io_ref);
1318 rb->have_ioref = false;
1319
1320 bch2_mark_io_failure(&failed, &rb->pick);
1321
1322 can_retry = bch2_bkey_pick_read_device(c,
1323 bkey_i_to_s_c(&b->key),
1324 &failed, &rb->pick) > 0;
1325
1326 if (!bio->bi_status &&
1327 !bch2_btree_node_read_done(c, ca, b, can_retry, &saw_error)) {
1328 if (retry)
1329 bch_info(c, "retry success");
1330 break;
1331 }
1332
1333 saw_error = true;
1334
1335 if (!can_retry) {
1336 set_btree_node_read_error(b);
1337 bch2_btree_lost_data(c, b->c.btree_id);
1338 break;
1339 }
1340 }
1341
1342 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read],
1343 rb->start_time);
1344 bio_put(&rb->bio);
1345
1346 if (saw_error &&
1347 !btree_node_read_error(b) &&
1348 c->curr_recovery_pass != BCH_RECOVERY_PASS_scan_for_btree_nodes) {
1349 printbuf_reset(&buf);
1350 bch2_bpos_to_text(&buf, b->key.k.p);
1351 bch_err_ratelimited(c, "%s: rewriting btree node at btree=%s level=%u %s due to error",
1352 __func__, bch2_btree_id_str(b->c.btree_id), b->c.level, buf.buf);
1353
1354 bch2_btree_node_rewrite_async(c, b);
1355 }
1356
1357 printbuf_exit(&buf);
1358 clear_btree_node_read_in_flight(b);
1359 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1360 }
1361
btree_node_read_endio(struct bio * bio)1362 static void btree_node_read_endio(struct bio *bio)
1363 {
1364 struct btree_read_bio *rb =
1365 container_of(bio, struct btree_read_bio, bio);
1366 struct bch_fs *c = rb->c;
1367
1368 if (rb->have_ioref) {
1369 struct bch_dev *ca = bch2_dev_have_ref(c, rb->pick.ptr.dev);
1370
1371 bch2_latency_acct(ca, rb->start_time, READ);
1372 }
1373
1374 queue_work(c->btree_read_complete_wq, &rb->work);
1375 }
1376
1377 struct btree_node_read_all {
1378 struct closure cl;
1379 struct bch_fs *c;
1380 struct btree *b;
1381 unsigned nr;
1382 void *buf[BCH_REPLICAS_MAX];
1383 struct bio *bio[BCH_REPLICAS_MAX];
1384 blk_status_t err[BCH_REPLICAS_MAX];
1385 };
1386
btree_node_sectors_written(struct bch_fs * c,void * data)1387 static unsigned btree_node_sectors_written(struct bch_fs *c, void *data)
1388 {
1389 struct btree_node *bn = data;
1390 struct btree_node_entry *bne;
1391 unsigned offset = 0;
1392
1393 if (le64_to_cpu(bn->magic) != bset_magic(c))
1394 return 0;
1395
1396 while (offset < btree_sectors(c)) {
1397 if (!offset) {
1398 offset += vstruct_sectors(bn, c->block_bits);
1399 } else {
1400 bne = data + (offset << 9);
1401 if (bne->keys.seq != bn->keys.seq)
1402 break;
1403 offset += vstruct_sectors(bne, c->block_bits);
1404 }
1405 }
1406
1407 return offset;
1408 }
1409
btree_node_has_extra_bsets(struct bch_fs * c,unsigned offset,void * data)1410 static bool btree_node_has_extra_bsets(struct bch_fs *c, unsigned offset, void *data)
1411 {
1412 struct btree_node *bn = data;
1413 struct btree_node_entry *bne;
1414
1415 if (!offset)
1416 return false;
1417
1418 while (offset < btree_sectors(c)) {
1419 bne = data + (offset << 9);
1420 if (bne->keys.seq == bn->keys.seq)
1421 return true;
1422 offset++;
1423 }
1424
1425 return false;
1426 return offset;
1427 }
1428
CLOSURE_CALLBACK(btree_node_read_all_replicas_done)1429 static CLOSURE_CALLBACK(btree_node_read_all_replicas_done)
1430 {
1431 closure_type(ra, struct btree_node_read_all, cl);
1432 struct bch_fs *c = ra->c;
1433 struct btree *b = ra->b;
1434 struct printbuf buf = PRINTBUF;
1435 bool dump_bset_maps = false;
1436 bool have_retry = false;
1437 int ret = 0, best = -1, write = READ;
1438 unsigned i, written = 0, written2 = 0;
1439 __le64 seq = b->key.k.type == KEY_TYPE_btree_ptr_v2
1440 ? bkey_i_to_btree_ptr_v2(&b->key)->v.seq : 0;
1441 bool _saw_error = false, *saw_error = &_saw_error;
1442
1443 for (i = 0; i < ra->nr; i++) {
1444 struct btree_node *bn = ra->buf[i];
1445
1446 if (ra->err[i])
1447 continue;
1448
1449 if (le64_to_cpu(bn->magic) != bset_magic(c) ||
1450 (seq && seq != bn->keys.seq))
1451 continue;
1452
1453 if (best < 0) {
1454 best = i;
1455 written = btree_node_sectors_written(c, bn);
1456 continue;
1457 }
1458
1459 written2 = btree_node_sectors_written(c, ra->buf[i]);
1460 if (btree_err_on(written2 != written, -BCH_ERR_btree_node_read_err_fixable,
1461 c, NULL, b, NULL, NULL,
1462 btree_node_replicas_sectors_written_mismatch,
1463 "btree node sectors written mismatch: %u != %u",
1464 written, written2) ||
1465 btree_err_on(btree_node_has_extra_bsets(c, written2, ra->buf[i]),
1466 -BCH_ERR_btree_node_read_err_fixable,
1467 c, NULL, b, NULL, NULL,
1468 btree_node_bset_after_end,
1469 "found bset signature after last bset") ||
1470 btree_err_on(memcmp(ra->buf[best], ra->buf[i], written << 9),
1471 -BCH_ERR_btree_node_read_err_fixable,
1472 c, NULL, b, NULL, NULL,
1473 btree_node_replicas_data_mismatch,
1474 "btree node replicas content mismatch"))
1475 dump_bset_maps = true;
1476
1477 if (written2 > written) {
1478 written = written2;
1479 best = i;
1480 }
1481 }
1482 fsck_err:
1483 if (dump_bset_maps) {
1484 for (i = 0; i < ra->nr; i++) {
1485 struct btree_node *bn = ra->buf[i];
1486 struct btree_node_entry *bne = NULL;
1487 unsigned offset = 0, sectors;
1488 bool gap = false;
1489
1490 if (ra->err[i])
1491 continue;
1492
1493 printbuf_reset(&buf);
1494
1495 while (offset < btree_sectors(c)) {
1496 if (!offset) {
1497 sectors = vstruct_sectors(bn, c->block_bits);
1498 } else {
1499 bne = ra->buf[i] + (offset << 9);
1500 if (bne->keys.seq != bn->keys.seq)
1501 break;
1502 sectors = vstruct_sectors(bne, c->block_bits);
1503 }
1504
1505 prt_printf(&buf, " %u-%u", offset, offset + sectors);
1506 if (bne && bch2_journal_seq_is_blacklisted(c,
1507 le64_to_cpu(bne->keys.journal_seq), false))
1508 prt_printf(&buf, "*");
1509 offset += sectors;
1510 }
1511
1512 while (offset < btree_sectors(c)) {
1513 bne = ra->buf[i] + (offset << 9);
1514 if (bne->keys.seq == bn->keys.seq) {
1515 if (!gap)
1516 prt_printf(&buf, " GAP");
1517 gap = true;
1518
1519 sectors = vstruct_sectors(bne, c->block_bits);
1520 prt_printf(&buf, " %u-%u", offset, offset + sectors);
1521 if (bch2_journal_seq_is_blacklisted(c,
1522 le64_to_cpu(bne->keys.journal_seq), false))
1523 prt_printf(&buf, "*");
1524 }
1525 offset++;
1526 }
1527
1528 bch_err(c, "replica %u:%s", i, buf.buf);
1529 }
1530 }
1531
1532 if (best >= 0) {
1533 memcpy(b->data, ra->buf[best], btree_buf_bytes(b));
1534 ret = bch2_btree_node_read_done(c, NULL, b, false, saw_error);
1535 } else {
1536 ret = -1;
1537 }
1538
1539 if (ret) {
1540 set_btree_node_read_error(b);
1541 bch2_btree_lost_data(c, b->c.btree_id);
1542 } else if (*saw_error)
1543 bch2_btree_node_rewrite_async(c, b);
1544
1545 for (i = 0; i < ra->nr; i++) {
1546 mempool_free(ra->buf[i], &c->btree_bounce_pool);
1547 bio_put(ra->bio[i]);
1548 }
1549
1550 closure_debug_destroy(&ra->cl);
1551 kfree(ra);
1552 printbuf_exit(&buf);
1553
1554 clear_btree_node_read_in_flight(b);
1555 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1556 }
1557
btree_node_read_all_replicas_endio(struct bio * bio)1558 static void btree_node_read_all_replicas_endio(struct bio *bio)
1559 {
1560 struct btree_read_bio *rb =
1561 container_of(bio, struct btree_read_bio, bio);
1562 struct bch_fs *c = rb->c;
1563 struct btree_node_read_all *ra = rb->ra;
1564
1565 if (rb->have_ioref) {
1566 struct bch_dev *ca = bch2_dev_have_ref(c, rb->pick.ptr.dev);
1567
1568 bch2_latency_acct(ca, rb->start_time, READ);
1569 }
1570
1571 ra->err[rb->idx] = bio->bi_status;
1572 closure_put(&ra->cl);
1573 }
1574
1575 /*
1576 * XXX This allocates multiple times from the same mempools, and can deadlock
1577 * under sufficient memory pressure (but is only a debug path)
1578 */
btree_node_read_all_replicas(struct bch_fs * c,struct btree * b,bool sync)1579 static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool sync)
1580 {
1581 struct bkey_s_c k = bkey_i_to_s_c(&b->key);
1582 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1583 const union bch_extent_entry *entry;
1584 struct extent_ptr_decoded pick;
1585 struct btree_node_read_all *ra;
1586 unsigned i;
1587
1588 ra = kzalloc(sizeof(*ra), GFP_NOFS);
1589 if (!ra)
1590 return -BCH_ERR_ENOMEM_btree_node_read_all_replicas;
1591
1592 closure_init(&ra->cl, NULL);
1593 ra->c = c;
1594 ra->b = b;
1595 ra->nr = bch2_bkey_nr_ptrs(k);
1596
1597 for (i = 0; i < ra->nr; i++) {
1598 ra->buf[i] = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
1599 ra->bio[i] = bio_alloc_bioset(NULL,
1600 buf_pages(ra->buf[i], btree_buf_bytes(b)),
1601 REQ_OP_READ|REQ_SYNC|REQ_META,
1602 GFP_NOFS,
1603 &c->btree_bio);
1604 }
1605
1606 i = 0;
1607 bkey_for_each_ptr_decode(k.k, ptrs, pick, entry) {
1608 struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ);
1609 struct btree_read_bio *rb =
1610 container_of(ra->bio[i], struct btree_read_bio, bio);
1611 rb->c = c;
1612 rb->b = b;
1613 rb->ra = ra;
1614 rb->start_time = local_clock();
1615 rb->have_ioref = ca != NULL;
1616 rb->idx = i;
1617 rb->pick = pick;
1618 rb->bio.bi_iter.bi_sector = pick.ptr.offset;
1619 rb->bio.bi_end_io = btree_node_read_all_replicas_endio;
1620 bch2_bio_map(&rb->bio, ra->buf[i], btree_buf_bytes(b));
1621
1622 if (rb->have_ioref) {
1623 this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
1624 bio_sectors(&rb->bio));
1625 bio_set_dev(&rb->bio, ca->disk_sb.bdev);
1626
1627 closure_get(&ra->cl);
1628 submit_bio(&rb->bio);
1629 } else {
1630 ra->err[i] = BLK_STS_REMOVED;
1631 }
1632
1633 i++;
1634 }
1635
1636 if (sync) {
1637 closure_sync(&ra->cl);
1638 btree_node_read_all_replicas_done(&ra->cl.work);
1639 } else {
1640 continue_at(&ra->cl, btree_node_read_all_replicas_done,
1641 c->btree_read_complete_wq);
1642 }
1643
1644 return 0;
1645 }
1646
bch2_btree_node_read(struct btree_trans * trans,struct btree * b,bool sync)1647 void bch2_btree_node_read(struct btree_trans *trans, struct btree *b,
1648 bool sync)
1649 {
1650 struct bch_fs *c = trans->c;
1651 struct extent_ptr_decoded pick;
1652 struct btree_read_bio *rb;
1653 struct bch_dev *ca;
1654 struct bio *bio;
1655 int ret;
1656
1657 trace_and_count(c, btree_node_read, trans, b);
1658
1659 if (bch2_verify_all_btree_replicas &&
1660 !btree_node_read_all_replicas(c, b, sync))
1661 return;
1662
1663 ret = bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key),
1664 NULL, &pick);
1665
1666 if (ret <= 0) {
1667 struct printbuf buf = PRINTBUF;
1668
1669 prt_str(&buf, "btree node read error: no device to read from\n at ");
1670 bch2_btree_pos_to_text(&buf, c, b);
1671 bch_err_ratelimited(c, "%s", buf.buf);
1672
1673 if (c->opts.recovery_passes & BIT_ULL(BCH_RECOVERY_PASS_check_topology) &&
1674 c->curr_recovery_pass > BCH_RECOVERY_PASS_check_topology)
1675 bch2_fatal_error(c);
1676
1677 set_btree_node_read_error(b);
1678 bch2_btree_lost_data(c, b->c.btree_id);
1679 clear_btree_node_read_in_flight(b);
1680 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1681 printbuf_exit(&buf);
1682 return;
1683 }
1684
1685 ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ);
1686
1687 bio = bio_alloc_bioset(NULL,
1688 buf_pages(b->data, btree_buf_bytes(b)),
1689 REQ_OP_READ|REQ_SYNC|REQ_META,
1690 GFP_NOFS,
1691 &c->btree_bio);
1692 rb = container_of(bio, struct btree_read_bio, bio);
1693 rb->c = c;
1694 rb->b = b;
1695 rb->ra = NULL;
1696 rb->start_time = local_clock();
1697 rb->have_ioref = ca != NULL;
1698 rb->pick = pick;
1699 INIT_WORK(&rb->work, btree_node_read_work);
1700 bio->bi_iter.bi_sector = pick.ptr.offset;
1701 bio->bi_end_io = btree_node_read_endio;
1702 bch2_bio_map(bio, b->data, btree_buf_bytes(b));
1703
1704 if (rb->have_ioref) {
1705 this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
1706 bio_sectors(bio));
1707 bio_set_dev(bio, ca->disk_sb.bdev);
1708
1709 if (sync) {
1710 submit_bio_wait(bio);
1711 bch2_latency_acct(ca, rb->start_time, READ);
1712 btree_node_read_work(&rb->work);
1713 } else {
1714 submit_bio(bio);
1715 }
1716 } else {
1717 bio->bi_status = BLK_STS_REMOVED;
1718
1719 if (sync)
1720 btree_node_read_work(&rb->work);
1721 else
1722 queue_work(c->btree_read_complete_wq, &rb->work);
1723 }
1724 }
1725
__bch2_btree_root_read(struct btree_trans * trans,enum btree_id id,const struct bkey_i * k,unsigned level)1726 static int __bch2_btree_root_read(struct btree_trans *trans, enum btree_id id,
1727 const struct bkey_i *k, unsigned level)
1728 {
1729 struct bch_fs *c = trans->c;
1730 struct closure cl;
1731 struct btree *b;
1732 int ret;
1733
1734 closure_init_stack(&cl);
1735
1736 do {
1737 ret = bch2_btree_cache_cannibalize_lock(trans, &cl);
1738 closure_sync(&cl);
1739 } while (ret);
1740
1741 b = bch2_btree_node_mem_alloc(trans, level != 0);
1742 bch2_btree_cache_cannibalize_unlock(trans);
1743
1744 BUG_ON(IS_ERR(b));
1745
1746 bkey_copy(&b->key, k);
1747 BUG_ON(bch2_btree_node_hash_insert(&c->btree_cache, b, level, id));
1748
1749 set_btree_node_read_in_flight(b);
1750
1751 /* we can't pass the trans to read_done() for fsck errors, so it must be unlocked */
1752 bch2_trans_unlock(trans);
1753 bch2_btree_node_read(trans, b, true);
1754
1755 if (btree_node_read_error(b)) {
1756 mutex_lock(&c->btree_cache.lock);
1757 bch2_btree_node_hash_remove(&c->btree_cache, b);
1758 mutex_unlock(&c->btree_cache.lock);
1759
1760 ret = -BCH_ERR_btree_node_read_error;
1761 goto err;
1762 }
1763
1764 bch2_btree_set_root_for_read(c, b);
1765 err:
1766 six_unlock_write(&b->c.lock);
1767 six_unlock_intent(&b->c.lock);
1768
1769 return ret;
1770 }
1771
bch2_btree_root_read(struct bch_fs * c,enum btree_id id,const struct bkey_i * k,unsigned level)1772 int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
1773 const struct bkey_i *k, unsigned level)
1774 {
1775 return bch2_trans_run(c, __bch2_btree_root_read(trans, id, k, level));
1776 }
1777
bch2_btree_complete_write(struct bch_fs * c,struct btree * b,struct btree_write * w)1778 static void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
1779 struct btree_write *w)
1780 {
1781 unsigned long old, new;
1782
1783 old = READ_ONCE(b->will_make_reachable);
1784 do {
1785 new = old;
1786 if (!(old & 1))
1787 break;
1788
1789 new &= ~1UL;
1790 } while (!try_cmpxchg(&b->will_make_reachable, &old, new));
1791
1792 if (old & 1)
1793 closure_put(&((struct btree_update *) new)->cl);
1794
1795 bch2_journal_pin_drop(&c->journal, &w->journal);
1796 }
1797
__btree_node_write_done(struct bch_fs * c,struct btree * b)1798 static void __btree_node_write_done(struct bch_fs *c, struct btree *b)
1799 {
1800 struct btree_write *w = btree_prev_write(b);
1801 unsigned long old, new;
1802 unsigned type = 0;
1803
1804 bch2_btree_complete_write(c, b, w);
1805
1806 old = READ_ONCE(b->flags);
1807 do {
1808 new = old;
1809
1810 if ((old & (1U << BTREE_NODE_dirty)) &&
1811 (old & (1U << BTREE_NODE_need_write)) &&
1812 !(old & (1U << BTREE_NODE_never_write)) &&
1813 !(old & (1U << BTREE_NODE_write_blocked)) &&
1814 !(old & (1U << BTREE_NODE_will_make_reachable))) {
1815 new &= ~(1U << BTREE_NODE_dirty);
1816 new &= ~(1U << BTREE_NODE_need_write);
1817 new |= (1U << BTREE_NODE_write_in_flight);
1818 new |= (1U << BTREE_NODE_write_in_flight_inner);
1819 new |= (1U << BTREE_NODE_just_written);
1820 new ^= (1U << BTREE_NODE_write_idx);
1821
1822 type = new & BTREE_WRITE_TYPE_MASK;
1823 new &= ~BTREE_WRITE_TYPE_MASK;
1824 } else {
1825 new &= ~(1U << BTREE_NODE_write_in_flight);
1826 new &= ~(1U << BTREE_NODE_write_in_flight_inner);
1827 }
1828 } while (!try_cmpxchg(&b->flags, &old, new));
1829
1830 if (new & (1U << BTREE_NODE_write_in_flight))
1831 __bch2_btree_node_write(c, b, BTREE_WRITE_ALREADY_STARTED|type);
1832 else
1833 wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
1834 }
1835
btree_node_write_done(struct bch_fs * c,struct btree * b)1836 static void btree_node_write_done(struct bch_fs *c, struct btree *b)
1837 {
1838 struct btree_trans *trans = bch2_trans_get(c);
1839
1840 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
1841
1842 /* we don't need transaction context anymore after we got the lock. */
1843 bch2_trans_put(trans);
1844 __btree_node_write_done(c, b);
1845 six_unlock_read(&b->c.lock);
1846 }
1847
btree_node_write_work(struct work_struct * work)1848 static void btree_node_write_work(struct work_struct *work)
1849 {
1850 struct btree_write_bio *wbio =
1851 container_of(work, struct btree_write_bio, work);
1852 struct bch_fs *c = wbio->wbio.c;
1853 struct btree *b = wbio->wbio.bio.bi_private;
1854 int ret = 0;
1855
1856 btree_bounce_free(c,
1857 wbio->data_bytes,
1858 wbio->wbio.used_mempool,
1859 wbio->data);
1860
1861 bch2_bkey_drop_ptrs(bkey_i_to_s(&wbio->key), ptr,
1862 bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev));
1863
1864 if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(&wbio->key))) {
1865 ret = -BCH_ERR_btree_node_write_all_failed;
1866 goto err;
1867 }
1868
1869 if (wbio->wbio.first_btree_write) {
1870 if (wbio->wbio.failed.nr) {
1871
1872 }
1873 } else {
1874 ret = bch2_trans_do(c,
1875 bch2_btree_node_update_key_get_iter(trans, b, &wbio->key,
1876 BCH_WATERMARK_interior_updates|
1877 BCH_TRANS_COMMIT_journal_reclaim|
1878 BCH_TRANS_COMMIT_no_enospc|
1879 BCH_TRANS_COMMIT_no_check_rw,
1880 !wbio->wbio.failed.nr));
1881 if (ret)
1882 goto err;
1883 }
1884 out:
1885 bio_put(&wbio->wbio.bio);
1886 btree_node_write_done(c, b);
1887 return;
1888 err:
1889 set_btree_node_noevict(b);
1890 bch2_fs_fatal_err_on(!bch2_err_matches(ret, EROFS), c,
1891 "writing btree node: %s", bch2_err_str(ret));
1892 goto out;
1893 }
1894
btree_node_write_endio(struct bio * bio)1895 static void btree_node_write_endio(struct bio *bio)
1896 {
1897 struct bch_write_bio *wbio = to_wbio(bio);
1898 struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL;
1899 struct bch_write_bio *orig = parent ?: wbio;
1900 struct btree_write_bio *wb = container_of(orig, struct btree_write_bio, wbio);
1901 struct bch_fs *c = wbio->c;
1902 struct btree *b = wbio->bio.bi_private;
1903 struct bch_dev *ca = wbio->have_ioref ? bch2_dev_have_ref(c, wbio->dev) : NULL;
1904 unsigned long flags;
1905
1906 if (wbio->have_ioref)
1907 bch2_latency_acct(ca, wbio->submit_time, WRITE);
1908
1909 if (!ca ||
1910 bch2_dev_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write,
1911 "btree write error: %s",
1912 bch2_blk_status_to_str(bio->bi_status)) ||
1913 bch2_meta_write_fault("btree")) {
1914 spin_lock_irqsave(&c->btree_write_error_lock, flags);
1915 bch2_dev_list_add_dev(&orig->failed, wbio->dev);
1916 spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
1917 }
1918
1919 if (wbio->have_ioref)
1920 percpu_ref_put(&ca->io_ref);
1921
1922 if (parent) {
1923 bio_put(bio);
1924 bio_endio(&parent->bio);
1925 return;
1926 }
1927
1928 clear_btree_node_write_in_flight_inner(b);
1929 wake_up_bit(&b->flags, BTREE_NODE_write_in_flight_inner);
1930 INIT_WORK(&wb->work, btree_node_write_work);
1931 queue_work(c->btree_io_complete_wq, &wb->work);
1932 }
1933
validate_bset_for_write(struct bch_fs * c,struct btree * b,struct bset * i,unsigned sectors)1934 static int validate_bset_for_write(struct bch_fs *c, struct btree *b,
1935 struct bset *i, unsigned sectors)
1936 {
1937 bool saw_error;
1938
1939 int ret = bch2_bkey_validate(c, bkey_i_to_s_c(&b->key),
1940 BKEY_TYPE_btree, WRITE);
1941 if (ret) {
1942 bch2_fs_inconsistent(c, "invalid btree node key before write");
1943 return ret;
1944 }
1945
1946 ret = validate_bset_keys(c, b, i, WRITE, false, &saw_error) ?:
1947 validate_bset(c, NULL, b, i, b->written, sectors, WRITE, false, &saw_error);
1948 if (ret) {
1949 bch2_inconsistent_error(c);
1950 dump_stack();
1951 }
1952
1953 return ret;
1954 }
1955
btree_write_submit(struct work_struct * work)1956 static void btree_write_submit(struct work_struct *work)
1957 {
1958 struct btree_write_bio *wbio = container_of(work, struct btree_write_bio, work);
1959 BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
1960
1961 bkey_copy(&tmp.k, &wbio->key);
1962
1963 bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&tmp.k)), ptr)
1964 ptr->offset += wbio->sector_offset;
1965
1966 bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree,
1967 &tmp.k, false);
1968 }
1969
__bch2_btree_node_write(struct bch_fs * c,struct btree * b,unsigned flags)1970 void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
1971 {
1972 struct btree_write_bio *wbio;
1973 struct bset *i;
1974 struct btree_node *bn = NULL;
1975 struct btree_node_entry *bne = NULL;
1976 struct sort_iter_stack sort_iter;
1977 struct nonce nonce;
1978 unsigned bytes_to_write, sectors_to_write, bytes, u64s;
1979 u64 seq = 0;
1980 bool used_mempool;
1981 unsigned long old, new;
1982 bool validate_before_checksum = false;
1983 enum btree_write_type type = flags & BTREE_WRITE_TYPE_MASK;
1984 void *data;
1985 int ret;
1986
1987 if (flags & BTREE_WRITE_ALREADY_STARTED)
1988 goto do_write;
1989
1990 /*
1991 * We may only have a read lock on the btree node - the dirty bit is our
1992 * "lock" against racing with other threads that may be trying to start
1993 * a write, we do a write iff we clear the dirty bit. Since setting the
1994 * dirty bit requires a write lock, we can't race with other threads
1995 * redirtying it:
1996 */
1997 old = READ_ONCE(b->flags);
1998 do {
1999 new = old;
2000
2001 if (!(old & (1 << BTREE_NODE_dirty)))
2002 return;
2003
2004 if ((flags & BTREE_WRITE_ONLY_IF_NEED) &&
2005 !(old & (1 << BTREE_NODE_need_write)))
2006 return;
2007
2008 if (old &
2009 ((1 << BTREE_NODE_never_write)|
2010 (1 << BTREE_NODE_write_blocked)))
2011 return;
2012
2013 if (b->written &&
2014 (old & (1 << BTREE_NODE_will_make_reachable)))
2015 return;
2016
2017 if (old & (1 << BTREE_NODE_write_in_flight))
2018 return;
2019
2020 if (flags & BTREE_WRITE_ONLY_IF_NEED)
2021 type = new & BTREE_WRITE_TYPE_MASK;
2022 new &= ~BTREE_WRITE_TYPE_MASK;
2023
2024 new &= ~(1 << BTREE_NODE_dirty);
2025 new &= ~(1 << BTREE_NODE_need_write);
2026 new |= (1 << BTREE_NODE_write_in_flight);
2027 new |= (1 << BTREE_NODE_write_in_flight_inner);
2028 new |= (1 << BTREE_NODE_just_written);
2029 new ^= (1 << BTREE_NODE_write_idx);
2030 } while (!try_cmpxchg_acquire(&b->flags, &old, new));
2031
2032 if (new & (1U << BTREE_NODE_need_write))
2033 return;
2034 do_write:
2035 BUG_ON((type == BTREE_WRITE_initial) != (b->written == 0));
2036
2037 atomic_long_dec(&c->btree_cache.nr_dirty);
2038
2039 BUG_ON(btree_node_fake(b));
2040 BUG_ON((b->will_make_reachable != 0) != !b->written);
2041
2042 BUG_ON(b->written >= btree_sectors(c));
2043 BUG_ON(b->written & (block_sectors(c) - 1));
2044 BUG_ON(bset_written(b, btree_bset_last(b)));
2045 BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(c));
2046 BUG_ON(memcmp(&b->data->format, &b->format, sizeof(b->format)));
2047
2048 bch2_sort_whiteouts(c, b);
2049
2050 sort_iter_stack_init(&sort_iter, b);
2051
2052 bytes = !b->written
2053 ? sizeof(struct btree_node)
2054 : sizeof(struct btree_node_entry);
2055
2056 bytes += b->whiteout_u64s * sizeof(u64);
2057
2058 for_each_bset(b, t) {
2059 i = bset(b, t);
2060
2061 if (bset_written(b, i))
2062 continue;
2063
2064 bytes += le16_to_cpu(i->u64s) * sizeof(u64);
2065 sort_iter_add(&sort_iter.iter,
2066 btree_bkey_first(b, t),
2067 btree_bkey_last(b, t));
2068 seq = max(seq, le64_to_cpu(i->journal_seq));
2069 }
2070
2071 BUG_ON(b->written && !seq);
2072
2073 /* bch2_varint_decode may read up to 7 bytes past the end of the buffer: */
2074 bytes += 8;
2075
2076 /* buffer must be a multiple of the block size */
2077 bytes = round_up(bytes, block_bytes(c));
2078
2079 data = btree_bounce_alloc(c, bytes, &used_mempool);
2080
2081 if (!b->written) {
2082 bn = data;
2083 *bn = *b->data;
2084 i = &bn->keys;
2085 } else {
2086 bne = data;
2087 bne->keys = b->data->keys;
2088 i = &bne->keys;
2089 }
2090
2091 i->journal_seq = cpu_to_le64(seq);
2092 i->u64s = 0;
2093
2094 sort_iter_add(&sort_iter.iter,
2095 unwritten_whiteouts_start(b),
2096 unwritten_whiteouts_end(b));
2097 SET_BSET_SEPARATE_WHITEOUTS(i, false);
2098
2099 u64s = bch2_sort_keys_keep_unwritten_whiteouts(i->start, &sort_iter.iter);
2100 le16_add_cpu(&i->u64s, u64s);
2101
2102 b->whiteout_u64s = 0;
2103
2104 BUG_ON(!b->written && i->u64s != b->data->keys.u64s);
2105
2106 set_needs_whiteout(i, false);
2107
2108 /* do we have data to write? */
2109 if (b->written && !i->u64s)
2110 goto nowrite;
2111
2112 bytes_to_write = vstruct_end(i) - data;
2113 sectors_to_write = round_up(bytes_to_write, block_bytes(c)) >> 9;
2114
2115 if (!b->written &&
2116 b->key.k.type == KEY_TYPE_btree_ptr_v2)
2117 BUG_ON(btree_ptr_sectors_written(bkey_i_to_s_c(&b->key)) != sectors_to_write);
2118
2119 memset(data + bytes_to_write, 0,
2120 (sectors_to_write << 9) - bytes_to_write);
2121
2122 BUG_ON(b->written + sectors_to_write > btree_sectors(c));
2123 BUG_ON(BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN);
2124 BUG_ON(i->seq != b->data->keys.seq);
2125
2126 i->version = cpu_to_le16(c->sb.version);
2127 SET_BSET_OFFSET(i, b->written);
2128 SET_BSET_CSUM_TYPE(i, bch2_meta_checksum_type(c));
2129
2130 if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)))
2131 validate_before_checksum = true;
2132
2133 /* validate_bset will be modifying: */
2134 if (le16_to_cpu(i->version) < bcachefs_metadata_version_current)
2135 validate_before_checksum = true;
2136
2137 /* if we're going to be encrypting, check metadata validity first: */
2138 if (validate_before_checksum &&
2139 validate_bset_for_write(c, b, i, sectors_to_write))
2140 goto err;
2141
2142 ret = bset_encrypt(c, i, b->written << 9);
2143 if (bch2_fs_fatal_err_on(ret, c,
2144 "encrypting btree node: %s", bch2_err_str(ret)))
2145 goto err;
2146
2147 nonce = btree_nonce(i, b->written << 9);
2148
2149 if (bn)
2150 bn->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bn);
2151 else
2152 bne->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
2153
2154 /* if we're not encrypting, check metadata after checksumming: */
2155 if (!validate_before_checksum &&
2156 validate_bset_for_write(c, b, i, sectors_to_write))
2157 goto err;
2158
2159 /*
2160 * We handle btree write errors by immediately halting the journal -
2161 * after we've done that, we can't issue any subsequent btree writes
2162 * because they might have pointers to new nodes that failed to write.
2163 *
2164 * Furthermore, there's no point in doing any more btree writes because
2165 * with the journal stopped, we're never going to update the journal to
2166 * reflect that those writes were done and the data flushed from the
2167 * journal:
2168 *
2169 * Also on journal error, the pending write may have updates that were
2170 * never journalled (interior nodes, see btree_update_nodes_written()) -
2171 * it's critical that we don't do the write in that case otherwise we
2172 * will have updates visible that weren't in the journal:
2173 *
2174 * Make sure to update b->written so bch2_btree_init_next() doesn't
2175 * break:
2176 */
2177 if (bch2_journal_error(&c->journal) ||
2178 c->opts.nochanges)
2179 goto err;
2180
2181 trace_and_count(c, btree_node_write, b, bytes_to_write, sectors_to_write);
2182
2183 wbio = container_of(bio_alloc_bioset(NULL,
2184 buf_pages(data, sectors_to_write << 9),
2185 REQ_OP_WRITE|REQ_META,
2186 GFP_NOFS,
2187 &c->btree_bio),
2188 struct btree_write_bio, wbio.bio);
2189 wbio_init(&wbio->wbio.bio);
2190 wbio->data = data;
2191 wbio->data_bytes = bytes;
2192 wbio->sector_offset = b->written;
2193 wbio->wbio.c = c;
2194 wbio->wbio.used_mempool = used_mempool;
2195 wbio->wbio.first_btree_write = !b->written;
2196 wbio->wbio.bio.bi_end_io = btree_node_write_endio;
2197 wbio->wbio.bio.bi_private = b;
2198
2199 bch2_bio_map(&wbio->wbio.bio, data, sectors_to_write << 9);
2200
2201 bkey_copy(&wbio->key, &b->key);
2202
2203 b->written += sectors_to_write;
2204
2205 if (wbio->key.k.type == KEY_TYPE_btree_ptr_v2)
2206 bkey_i_to_btree_ptr_v2(&wbio->key)->v.sectors_written =
2207 cpu_to_le16(b->written);
2208
2209 atomic64_inc(&c->btree_write_stats[type].nr);
2210 atomic64_add(bytes_to_write, &c->btree_write_stats[type].bytes);
2211
2212 INIT_WORK(&wbio->work, btree_write_submit);
2213 queue_work(c->btree_write_submit_wq, &wbio->work);
2214 return;
2215 err:
2216 set_btree_node_noevict(b);
2217 b->written += sectors_to_write;
2218 nowrite:
2219 btree_bounce_free(c, bytes, used_mempool, data);
2220 __btree_node_write_done(c, b);
2221 }
2222
2223 /*
2224 * Work that must be done with write lock held:
2225 */
bch2_btree_post_write_cleanup(struct bch_fs * c,struct btree * b)2226 bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
2227 {
2228 bool invalidated_iter = false;
2229 struct btree_node_entry *bne;
2230
2231 if (!btree_node_just_written(b))
2232 return false;
2233
2234 BUG_ON(b->whiteout_u64s);
2235
2236 clear_btree_node_just_written(b);
2237
2238 /*
2239 * Note: immediately after write, bset_written() doesn't work - the
2240 * amount of data we had to write after compaction might have been
2241 * smaller than the offset of the last bset.
2242 *
2243 * However, we know that all bsets have been written here, as long as
2244 * we're still holding the write lock:
2245 */
2246
2247 /*
2248 * XXX: decide if we really want to unconditionally sort down to a
2249 * single bset:
2250 */
2251 if (b->nsets > 1) {
2252 btree_node_sort(c, b, 0, b->nsets);
2253 invalidated_iter = true;
2254 } else {
2255 invalidated_iter = bch2_drop_whiteouts(b, COMPACT_ALL);
2256 }
2257
2258 for_each_bset(b, t)
2259 set_needs_whiteout(bset(b, t), true);
2260
2261 bch2_btree_verify(c, b);
2262
2263 /*
2264 * If later we don't unconditionally sort down to a single bset, we have
2265 * to ensure this is still true:
2266 */
2267 BUG_ON((void *) btree_bkey_last(b, bset_tree_last(b)) > write_block(b));
2268
2269 bne = want_new_bset(c, b);
2270 if (bne)
2271 bch2_bset_init_next(b, bne);
2272
2273 bch2_btree_build_aux_trees(b);
2274
2275 return invalidated_iter;
2276 }
2277
2278 /*
2279 * Use this one if the node is intent locked:
2280 */
bch2_btree_node_write(struct bch_fs * c,struct btree * b,enum six_lock_type lock_type_held,unsigned flags)2281 void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
2282 enum six_lock_type lock_type_held,
2283 unsigned flags)
2284 {
2285 if (lock_type_held == SIX_LOCK_intent ||
2286 (lock_type_held == SIX_LOCK_read &&
2287 six_lock_tryupgrade(&b->c.lock))) {
2288 __bch2_btree_node_write(c, b, flags);
2289
2290 /* don't cycle lock unnecessarily: */
2291 if (btree_node_just_written(b) &&
2292 six_trylock_write(&b->c.lock)) {
2293 bch2_btree_post_write_cleanup(c, b);
2294 six_unlock_write(&b->c.lock);
2295 }
2296
2297 if (lock_type_held == SIX_LOCK_read)
2298 six_lock_downgrade(&b->c.lock);
2299 } else {
2300 __bch2_btree_node_write(c, b, flags);
2301 if (lock_type_held == SIX_LOCK_write &&
2302 btree_node_just_written(b))
2303 bch2_btree_post_write_cleanup(c, b);
2304 }
2305 }
2306
__bch2_btree_flush_all(struct bch_fs * c,unsigned flag)2307 static bool __bch2_btree_flush_all(struct bch_fs *c, unsigned flag)
2308 {
2309 struct bucket_table *tbl;
2310 struct rhash_head *pos;
2311 struct btree *b;
2312 unsigned i;
2313 bool ret = false;
2314 restart:
2315 rcu_read_lock();
2316 for_each_cached_btree(b, c, tbl, i, pos)
2317 if (test_bit(flag, &b->flags)) {
2318 rcu_read_unlock();
2319 wait_on_bit_io(&b->flags, flag, TASK_UNINTERRUPTIBLE);
2320 ret = true;
2321 goto restart;
2322 }
2323 rcu_read_unlock();
2324
2325 return ret;
2326 }
2327
bch2_btree_flush_all_reads(struct bch_fs * c)2328 bool bch2_btree_flush_all_reads(struct bch_fs *c)
2329 {
2330 return __bch2_btree_flush_all(c, BTREE_NODE_read_in_flight);
2331 }
2332
bch2_btree_flush_all_writes(struct bch_fs * c)2333 bool bch2_btree_flush_all_writes(struct bch_fs *c)
2334 {
2335 return __bch2_btree_flush_all(c, BTREE_NODE_write_in_flight);
2336 }
2337
2338 static const char * const bch2_btree_write_types[] = {
2339 #define x(t, n) [n] = #t,
2340 BCH_BTREE_WRITE_TYPES()
2341 NULL
2342 };
2343
bch2_btree_write_stats_to_text(struct printbuf * out,struct bch_fs * c)2344 void bch2_btree_write_stats_to_text(struct printbuf *out, struct bch_fs *c)
2345 {
2346 printbuf_tabstop_push(out, 20);
2347 printbuf_tabstop_push(out, 10);
2348
2349 prt_printf(out, "\tnr\tsize\n");
2350
2351 for (unsigned i = 0; i < BTREE_WRITE_TYPE_NR; i++) {
2352 u64 nr = atomic64_read(&c->btree_write_stats[i].nr);
2353 u64 bytes = atomic64_read(&c->btree_write_stats[i].bytes);
2354
2355 prt_printf(out, "%s:\t%llu\t", bch2_btree_write_types[i], nr);
2356 prt_human_readable_u64(out, nr ? div64_u64(bytes, nr) : 0);
2357 prt_newline(out);
2358 }
2359 }
2360