1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "bkey_methods.h"
5 #include "bkey_sort.h"
6 #include "btree_cache.h"
7 #include "btree_io.h"
8 #include "btree_iter.h"
9 #include "btree_locking.h"
10 #include "btree_update.h"
11 #include "btree_update_interior.h"
12 #include "buckets.h"
13 #include "checksum.h"
14 #include "debug.h"
15 #include "error.h"
16 #include "extents.h"
17 #include "io_write.h"
18 #include "journal_reclaim.h"
19 #include "journal_seq_blacklist.h"
20 #include "recovery.h"
21 #include "super-io.h"
22 #include "trace.h"
23
24 #include <linux/sched/mm.h>
25
bch2_btree_node_header_to_text(struct printbuf * out,struct btree_node * bn)26 static void bch2_btree_node_header_to_text(struct printbuf *out, struct btree_node *bn)
27 {
28 bch2_btree_id_level_to_text(out, BTREE_NODE_ID(bn), BTREE_NODE_LEVEL(bn));
29 prt_printf(out, " seq %llx %llu\n", bn->keys.seq, BTREE_NODE_SEQ(bn));
30 prt_str(out, "min: ");
31 bch2_bpos_to_text(out, bn->min_key);
32 prt_newline(out);
33 prt_str(out, "max: ");
34 bch2_bpos_to_text(out, bn->max_key);
35 }
36
bch2_btree_node_io_unlock(struct btree * b)37 void bch2_btree_node_io_unlock(struct btree *b)
38 {
39 EBUG_ON(!btree_node_write_in_flight(b));
40
41 clear_btree_node_write_in_flight_inner(b);
42 clear_btree_node_write_in_flight(b);
43 wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
44 }
45
bch2_btree_node_io_lock(struct btree * b)46 void bch2_btree_node_io_lock(struct btree *b)
47 {
48 wait_on_bit_lock_io(&b->flags, BTREE_NODE_write_in_flight,
49 TASK_UNINTERRUPTIBLE);
50 }
51
__bch2_btree_node_wait_on_read(struct btree * b)52 void __bch2_btree_node_wait_on_read(struct btree *b)
53 {
54 wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
55 TASK_UNINTERRUPTIBLE);
56 }
57
__bch2_btree_node_wait_on_write(struct btree * b)58 void __bch2_btree_node_wait_on_write(struct btree *b)
59 {
60 wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
61 TASK_UNINTERRUPTIBLE);
62 }
63
bch2_btree_node_wait_on_read(struct btree * b)64 void bch2_btree_node_wait_on_read(struct btree *b)
65 {
66 wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
67 TASK_UNINTERRUPTIBLE);
68 }
69
bch2_btree_node_wait_on_write(struct btree * b)70 void bch2_btree_node_wait_on_write(struct btree *b)
71 {
72 wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
73 TASK_UNINTERRUPTIBLE);
74 }
75
verify_no_dups(struct btree * b,struct bkey_packed * start,struct bkey_packed * end)76 static void verify_no_dups(struct btree *b,
77 struct bkey_packed *start,
78 struct bkey_packed *end)
79 {
80 #ifdef CONFIG_BCACHEFS_DEBUG
81 struct bkey_packed *k, *p;
82
83 if (start == end)
84 return;
85
86 for (p = start, k = bkey_p_next(start);
87 k != end;
88 p = k, k = bkey_p_next(k)) {
89 struct bkey l = bkey_unpack_key(b, p);
90 struct bkey r = bkey_unpack_key(b, k);
91
92 BUG_ON(bpos_ge(l.p, bkey_start_pos(&r)));
93 }
94 #endif
95 }
96
set_needs_whiteout(struct bset * i,int v)97 static void set_needs_whiteout(struct bset *i, int v)
98 {
99 struct bkey_packed *k;
100
101 for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
102 k->needs_whiteout = v;
103 }
104
btree_bounce_free(struct bch_fs * c,size_t size,bool used_mempool,void * p)105 static void btree_bounce_free(struct bch_fs *c, size_t size,
106 bool used_mempool, void *p)
107 {
108 if (used_mempool)
109 mempool_free(p, &c->btree_bounce_pool);
110 else
111 kvfree(p);
112 }
113
btree_bounce_alloc(struct bch_fs * c,size_t size,bool * used_mempool)114 static void *btree_bounce_alloc(struct bch_fs *c, size_t size,
115 bool *used_mempool)
116 {
117 unsigned flags = memalloc_nofs_save();
118 void *p;
119
120 BUG_ON(size > c->opts.btree_node_size);
121
122 *used_mempool = false;
123 p = kvmalloc(size, __GFP_NOWARN|GFP_NOWAIT);
124 if (!p) {
125 *used_mempool = true;
126 p = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
127 }
128 memalloc_nofs_restore(flags);
129 return p;
130 }
131
sort_bkey_ptrs(const struct btree * bt,struct bkey_packed ** ptrs,unsigned nr)132 static void sort_bkey_ptrs(const struct btree *bt,
133 struct bkey_packed **ptrs, unsigned nr)
134 {
135 unsigned n = nr, a = nr / 2, b, c, d;
136
137 if (!a)
138 return;
139
140 /* Heap sort: see lib/sort.c: */
141 while (1) {
142 if (a)
143 a--;
144 else if (--n)
145 swap(ptrs[0], ptrs[n]);
146 else
147 break;
148
149 for (b = a; c = 2 * b + 1, (d = c + 1) < n;)
150 b = bch2_bkey_cmp_packed(bt,
151 ptrs[c],
152 ptrs[d]) >= 0 ? c : d;
153 if (d == n)
154 b = c;
155
156 while (b != a &&
157 bch2_bkey_cmp_packed(bt,
158 ptrs[a],
159 ptrs[b]) >= 0)
160 b = (b - 1) / 2;
161 c = b;
162 while (b != a) {
163 b = (b - 1) / 2;
164 swap(ptrs[b], ptrs[c]);
165 }
166 }
167 }
168
bch2_sort_whiteouts(struct bch_fs * c,struct btree * b)169 static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b)
170 {
171 struct bkey_packed *new_whiteouts, **ptrs, **ptrs_end, *k;
172 bool used_mempool = false;
173 size_t bytes = b->whiteout_u64s * sizeof(u64);
174
175 if (!b->whiteout_u64s)
176 return;
177
178 new_whiteouts = btree_bounce_alloc(c, bytes, &used_mempool);
179
180 ptrs = ptrs_end = ((void *) new_whiteouts + bytes);
181
182 for (k = unwritten_whiteouts_start(b);
183 k != unwritten_whiteouts_end(b);
184 k = bkey_p_next(k))
185 *--ptrs = k;
186
187 sort_bkey_ptrs(b, ptrs, ptrs_end - ptrs);
188
189 k = new_whiteouts;
190
191 while (ptrs != ptrs_end) {
192 bkey_p_copy(k, *ptrs);
193 k = bkey_p_next(k);
194 ptrs++;
195 }
196
197 verify_no_dups(b, new_whiteouts,
198 (void *) ((u64 *) new_whiteouts + b->whiteout_u64s));
199
200 memcpy_u64s(unwritten_whiteouts_start(b),
201 new_whiteouts, b->whiteout_u64s);
202
203 btree_bounce_free(c, bytes, used_mempool, new_whiteouts);
204 }
205
should_compact_bset(struct btree * b,struct bset_tree * t,bool compacting,enum compact_mode mode)206 static bool should_compact_bset(struct btree *b, struct bset_tree *t,
207 bool compacting, enum compact_mode mode)
208 {
209 if (!bset_dead_u64s(b, t))
210 return false;
211
212 switch (mode) {
213 case COMPACT_LAZY:
214 return should_compact_bset_lazy(b, t) ||
215 (compacting && !bset_written(b, bset(b, t)));
216 case COMPACT_ALL:
217 return true;
218 default:
219 BUG();
220 }
221 }
222
bch2_drop_whiteouts(struct btree * b,enum compact_mode mode)223 static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode)
224 {
225 bool ret = false;
226
227 for_each_bset(b, t) {
228 struct bset *i = bset(b, t);
229 struct bkey_packed *k, *n, *out, *start, *end;
230 struct btree_node_entry *src = NULL, *dst = NULL;
231
232 if (t != b->set && !bset_written(b, i)) {
233 src = container_of(i, struct btree_node_entry, keys);
234 dst = max(write_block(b),
235 (void *) btree_bkey_last(b, t - 1));
236 }
237
238 if (src != dst)
239 ret = true;
240
241 if (!should_compact_bset(b, t, ret, mode)) {
242 if (src != dst) {
243 memmove(dst, src, sizeof(*src) +
244 le16_to_cpu(src->keys.u64s) *
245 sizeof(u64));
246 i = &dst->keys;
247 set_btree_bset(b, t, i);
248 }
249 continue;
250 }
251
252 start = btree_bkey_first(b, t);
253 end = btree_bkey_last(b, t);
254
255 if (src != dst) {
256 memmove(dst, src, sizeof(*src));
257 i = &dst->keys;
258 set_btree_bset(b, t, i);
259 }
260
261 out = i->start;
262
263 for (k = start; k != end; k = n) {
264 n = bkey_p_next(k);
265
266 if (!bkey_deleted(k)) {
267 bkey_p_copy(out, k);
268 out = bkey_p_next(out);
269 } else {
270 BUG_ON(k->needs_whiteout);
271 }
272 }
273
274 i->u64s = cpu_to_le16((u64 *) out - i->_data);
275 set_btree_bset_end(b, t);
276 bch2_bset_set_no_aux_tree(b, t);
277 ret = true;
278 }
279
280 bch2_verify_btree_nr_keys(b);
281
282 bch2_btree_build_aux_trees(b);
283
284 return ret;
285 }
286
bch2_compact_whiteouts(struct bch_fs * c,struct btree * b,enum compact_mode mode)287 bool bch2_compact_whiteouts(struct bch_fs *c, struct btree *b,
288 enum compact_mode mode)
289 {
290 return bch2_drop_whiteouts(b, mode);
291 }
292
btree_node_sort(struct bch_fs * c,struct btree * b,unsigned start_idx,unsigned end_idx)293 static void btree_node_sort(struct bch_fs *c, struct btree *b,
294 unsigned start_idx,
295 unsigned end_idx)
296 {
297 struct btree_node *out;
298 struct sort_iter_stack sort_iter;
299 struct bset_tree *t;
300 struct bset *start_bset = bset(b, &b->set[start_idx]);
301 bool used_mempool = false;
302 u64 start_time, seq = 0;
303 unsigned i, u64s = 0, bytes, shift = end_idx - start_idx - 1;
304 bool sorting_entire_node = start_idx == 0 &&
305 end_idx == b->nsets;
306
307 sort_iter_stack_init(&sort_iter, b);
308
309 for (t = b->set + start_idx;
310 t < b->set + end_idx;
311 t++) {
312 u64s += le16_to_cpu(bset(b, t)->u64s);
313 sort_iter_add(&sort_iter.iter,
314 btree_bkey_first(b, t),
315 btree_bkey_last(b, t));
316 }
317
318 bytes = sorting_entire_node
319 ? btree_buf_bytes(b)
320 : __vstruct_bytes(struct btree_node, u64s);
321
322 out = btree_bounce_alloc(c, bytes, &used_mempool);
323
324 start_time = local_clock();
325
326 u64s = bch2_sort_keys(out->keys.start, &sort_iter.iter);
327
328 out->keys.u64s = cpu_to_le16(u64s);
329
330 BUG_ON(vstruct_end(&out->keys) > (void *) out + bytes);
331
332 if (sorting_entire_node)
333 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
334 start_time);
335
336 /* Make sure we preserve bset journal_seq: */
337 for (t = b->set + start_idx; t < b->set + end_idx; t++)
338 seq = max(seq, le64_to_cpu(bset(b, t)->journal_seq));
339 start_bset->journal_seq = cpu_to_le64(seq);
340
341 if (sorting_entire_node) {
342 u64s = le16_to_cpu(out->keys.u64s);
343
344 BUG_ON(bytes != btree_buf_bytes(b));
345
346 /*
347 * Our temporary buffer is the same size as the btree node's
348 * buffer, we can just swap buffers instead of doing a big
349 * memcpy()
350 */
351 *out = *b->data;
352 out->keys.u64s = cpu_to_le16(u64s);
353 swap(out, b->data);
354 set_btree_bset(b, b->set, &b->data->keys);
355 } else {
356 start_bset->u64s = out->keys.u64s;
357 memcpy_u64s(start_bset->start,
358 out->keys.start,
359 le16_to_cpu(out->keys.u64s));
360 }
361
362 for (i = start_idx + 1; i < end_idx; i++)
363 b->nr.bset_u64s[start_idx] +=
364 b->nr.bset_u64s[i];
365
366 b->nsets -= shift;
367
368 for (i = start_idx + 1; i < b->nsets; i++) {
369 b->nr.bset_u64s[i] = b->nr.bset_u64s[i + shift];
370 b->set[i] = b->set[i + shift];
371 }
372
373 for (i = b->nsets; i < MAX_BSETS; i++)
374 b->nr.bset_u64s[i] = 0;
375
376 set_btree_bset_end(b, &b->set[start_idx]);
377 bch2_bset_set_no_aux_tree(b, &b->set[start_idx]);
378
379 btree_bounce_free(c, bytes, used_mempool, out);
380
381 bch2_verify_btree_nr_keys(b);
382 }
383
bch2_btree_sort_into(struct bch_fs * c,struct btree * dst,struct btree * src)384 void bch2_btree_sort_into(struct bch_fs *c,
385 struct btree *dst,
386 struct btree *src)
387 {
388 struct btree_nr_keys nr;
389 struct btree_node_iter src_iter;
390 u64 start_time = local_clock();
391
392 BUG_ON(dst->nsets != 1);
393
394 bch2_bset_set_no_aux_tree(dst, dst->set);
395
396 bch2_btree_node_iter_init_from_start(&src_iter, src);
397
398 nr = bch2_sort_repack(btree_bset_first(dst),
399 src, &src_iter,
400 &dst->format,
401 true);
402
403 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
404 start_time);
405
406 set_btree_bset_end(dst, dst->set);
407
408 dst->nr.live_u64s += nr.live_u64s;
409 dst->nr.bset_u64s[0] += nr.bset_u64s[0];
410 dst->nr.packed_keys += nr.packed_keys;
411 dst->nr.unpacked_keys += nr.unpacked_keys;
412
413 bch2_verify_btree_nr_keys(dst);
414 }
415
416 /*
417 * We're about to add another bset to the btree node, so if there's currently
418 * too many bsets - sort some of them together:
419 */
btree_node_compact(struct bch_fs * c,struct btree * b)420 static bool btree_node_compact(struct bch_fs *c, struct btree *b)
421 {
422 unsigned unwritten_idx;
423 bool ret = false;
424
425 for (unwritten_idx = 0;
426 unwritten_idx < b->nsets;
427 unwritten_idx++)
428 if (!bset_written(b, bset(b, &b->set[unwritten_idx])))
429 break;
430
431 if (b->nsets - unwritten_idx > 1) {
432 btree_node_sort(c, b, unwritten_idx, b->nsets);
433 ret = true;
434 }
435
436 if (unwritten_idx > 1) {
437 btree_node_sort(c, b, 0, unwritten_idx);
438 ret = true;
439 }
440
441 return ret;
442 }
443
bch2_btree_build_aux_trees(struct btree * b)444 void bch2_btree_build_aux_trees(struct btree *b)
445 {
446 for_each_bset(b, t)
447 bch2_bset_build_aux_tree(b, t,
448 !bset_written(b, bset(b, t)) &&
449 t == bset_tree_last(b));
450 }
451
452 /*
453 * If we have MAX_BSETS (3) bsets, should we sort them all down to just one?
454 *
455 * The first bset is going to be of similar order to the size of the node, the
456 * last bset is bounded by btree_write_set_buffer(), which is set to keep the
457 * memmove on insert from being too expensive: the middle bset should, ideally,
458 * be the geometric mean of the first and the last.
459 *
460 * Returns true if the middle bset is greater than that geometric mean:
461 */
should_compact_all(struct bch_fs * c,struct btree * b)462 static inline bool should_compact_all(struct bch_fs *c, struct btree *b)
463 {
464 unsigned mid_u64s_bits =
465 (ilog2(btree_max_u64s(c)) + BTREE_WRITE_SET_U64s_BITS) / 2;
466
467 return bset_u64s(&b->set[1]) > 1U << mid_u64s_bits;
468 }
469
470 /*
471 * @bch_btree_init_next - initialize a new (unwritten) bset that can then be
472 * inserted into
473 *
474 * Safe to call if there already is an unwritten bset - will only add a new bset
475 * if @b doesn't already have one.
476 *
477 * Returns true if we sorted (i.e. invalidated iterators
478 */
bch2_btree_init_next(struct btree_trans * trans,struct btree * b)479 void bch2_btree_init_next(struct btree_trans *trans, struct btree *b)
480 {
481 struct bch_fs *c = trans->c;
482 struct btree_node_entry *bne;
483 bool reinit_iter = false;
484
485 EBUG_ON(!six_lock_counts(&b->c.lock).n[SIX_LOCK_write]);
486 BUG_ON(bset_written(b, bset(b, &b->set[1])));
487 BUG_ON(btree_node_just_written(b));
488
489 if (b->nsets == MAX_BSETS &&
490 !btree_node_write_in_flight(b) &&
491 should_compact_all(c, b)) {
492 bch2_btree_node_write_trans(trans, b, SIX_LOCK_write,
493 BTREE_WRITE_init_next_bset);
494 reinit_iter = true;
495 }
496
497 if (b->nsets == MAX_BSETS &&
498 btree_node_compact(c, b))
499 reinit_iter = true;
500
501 BUG_ON(b->nsets >= MAX_BSETS);
502
503 bne = want_new_bset(c, b);
504 if (bne)
505 bch2_bset_init_next(b, bne);
506
507 bch2_btree_build_aux_trees(b);
508
509 if (reinit_iter)
510 bch2_trans_node_reinit_iter(trans, b);
511 }
512
btree_err_msg(struct printbuf * out,struct bch_fs * c,struct bch_dev * ca,struct btree * b,struct bset * i,struct bkey_packed * k,unsigned offset,int write)513 static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
514 struct bch_dev *ca,
515 struct btree *b, struct bset *i, struct bkey_packed *k,
516 unsigned offset, int write)
517 {
518 prt_printf(out, bch2_log_msg(c, "%s"),
519 write == READ
520 ? "error validating btree node "
521 : "corrupt btree node before write ");
522 if (ca)
523 prt_printf(out, "on %s ", ca->name);
524 prt_printf(out, "at btree ");
525 bch2_btree_pos_to_text(out, c, b);
526
527 printbuf_indent_add(out, 2);
528
529 prt_printf(out, "\nnode offset %u/%u",
530 b->written, btree_ptr_sectors_written(bkey_i_to_s_c(&b->key)));
531 if (i)
532 prt_printf(out, " bset u64s %u", le16_to_cpu(i->u64s));
533 if (k)
534 prt_printf(out, " bset byte offset %lu",
535 (unsigned long)(void *)k -
536 ((unsigned long)(void *)i & ~511UL));
537 prt_str(out, ": ");
538 }
539
540 __printf(10, 11)
__btree_err(int ret,struct bch_fs * c,struct bch_dev * ca,struct btree * b,struct bset * i,struct bkey_packed * k,int write,bool have_retry,enum bch_sb_error_id err_type,const char * fmt,...)541 static int __btree_err(int ret,
542 struct bch_fs *c,
543 struct bch_dev *ca,
544 struct btree *b,
545 struct bset *i,
546 struct bkey_packed *k,
547 int write,
548 bool have_retry,
549 enum bch_sb_error_id err_type,
550 const char *fmt, ...)
551 {
552 struct printbuf out = PRINTBUF;
553 bool silent = c->curr_recovery_pass == BCH_RECOVERY_PASS_scan_for_btree_nodes;
554 va_list args;
555
556 btree_err_msg(&out, c, ca, b, i, k, b->written, write);
557
558 va_start(args, fmt);
559 prt_vprintf(&out, fmt, args);
560 va_end(args);
561
562 if (write == WRITE) {
563 bch2_print_string_as_lines(KERN_ERR, out.buf);
564 ret = c->opts.errors == BCH_ON_ERROR_continue
565 ? 0
566 : -BCH_ERR_fsck_errors_not_fixed;
567 goto out;
568 }
569
570 if (!have_retry && ret == -BCH_ERR_btree_node_read_err_want_retry)
571 ret = -BCH_ERR_btree_node_read_err_fixable;
572 if (!have_retry && ret == -BCH_ERR_btree_node_read_err_must_retry)
573 ret = -BCH_ERR_btree_node_read_err_bad_node;
574
575 if (!silent && ret != -BCH_ERR_btree_node_read_err_fixable)
576 bch2_sb_error_count(c, err_type);
577
578 switch (ret) {
579 case -BCH_ERR_btree_node_read_err_fixable:
580 ret = !silent
581 ? __bch2_fsck_err(c, NULL, FSCK_CAN_FIX, err_type, "%s", out.buf)
582 : -BCH_ERR_fsck_fix;
583 if (ret != -BCH_ERR_fsck_fix &&
584 ret != -BCH_ERR_fsck_ignore)
585 goto fsck_err;
586 ret = -BCH_ERR_fsck_fix;
587 break;
588 case -BCH_ERR_btree_node_read_err_want_retry:
589 case -BCH_ERR_btree_node_read_err_must_retry:
590 if (!silent)
591 bch2_print_string_as_lines(KERN_ERR, out.buf);
592 break;
593 case -BCH_ERR_btree_node_read_err_bad_node:
594 if (!silent)
595 bch2_print_string_as_lines(KERN_ERR, out.buf);
596 ret = bch2_topology_error(c);
597 break;
598 case -BCH_ERR_btree_node_read_err_incompatible:
599 if (!silent)
600 bch2_print_string_as_lines(KERN_ERR, out.buf);
601 ret = -BCH_ERR_fsck_errors_not_fixed;
602 break;
603 default:
604 BUG();
605 }
606 out:
607 fsck_err:
608 printbuf_exit(&out);
609 return ret;
610 }
611
612 #define btree_err(type, c, ca, b, i, k, _err_type, msg, ...) \
613 ({ \
614 int _ret = __btree_err(type, c, ca, b, i, k, write, have_retry, \
615 BCH_FSCK_ERR_##_err_type, \
616 msg, ##__VA_ARGS__); \
617 \
618 if (_ret != -BCH_ERR_fsck_fix) { \
619 ret = _ret; \
620 goto fsck_err; \
621 } \
622 \
623 *saw_error = true; \
624 })
625
626 #define btree_err_on(cond, ...) ((cond) ? btree_err(__VA_ARGS__) : false)
627
628 /*
629 * When btree topology repair changes the start or end of a node, that might
630 * mean we have to drop keys that are no longer inside the node:
631 */
632 __cold
bch2_btree_node_drop_keys_outside_node(struct btree * b)633 void bch2_btree_node_drop_keys_outside_node(struct btree *b)
634 {
635 for_each_bset(b, t) {
636 struct bset *i = bset(b, t);
637 struct bkey_packed *k;
638
639 for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
640 if (bkey_cmp_left_packed(b, k, &b->data->min_key) >= 0)
641 break;
642
643 if (k != i->start) {
644 unsigned shift = (u64 *) k - (u64 *) i->start;
645
646 memmove_u64s_down(i->start, k,
647 (u64 *) vstruct_end(i) - (u64 *) k);
648 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - shift);
649 set_btree_bset_end(b, t);
650 }
651
652 for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
653 if (bkey_cmp_left_packed(b, k, &b->data->max_key) > 0)
654 break;
655
656 if (k != vstruct_last(i)) {
657 i->u64s = cpu_to_le16((u64 *) k - (u64 *) i->start);
658 set_btree_bset_end(b, t);
659 }
660 }
661
662 /*
663 * Always rebuild search trees: eytzinger search tree nodes directly
664 * depend on the values of min/max key:
665 */
666 bch2_bset_set_no_aux_tree(b, b->set);
667 bch2_btree_build_aux_trees(b);
668 b->nr = bch2_btree_node_count_keys(b);
669
670 struct bkey_s_c k;
671 struct bkey unpacked;
672 struct btree_node_iter iter;
673 for_each_btree_node_key_unpack(b, k, &iter, &unpacked) {
674 BUG_ON(bpos_lt(k.k->p, b->data->min_key));
675 BUG_ON(bpos_gt(k.k->p, b->data->max_key));
676 }
677 }
678
validate_bset(struct bch_fs * c,struct bch_dev * ca,struct btree * b,struct bset * i,unsigned offset,unsigned sectors,int write,bool have_retry,bool * saw_error)679 static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
680 struct btree *b, struct bset *i,
681 unsigned offset, unsigned sectors,
682 int write, bool have_retry, bool *saw_error)
683 {
684 unsigned version = le16_to_cpu(i->version);
685 unsigned ptr_written = btree_ptr_sectors_written(bkey_i_to_s_c(&b->key));
686 struct printbuf buf1 = PRINTBUF;
687 struct printbuf buf2 = PRINTBUF;
688 int ret = 0;
689
690 btree_err_on(!bch2_version_compatible(version),
691 -BCH_ERR_btree_node_read_err_incompatible,
692 c, ca, b, i, NULL,
693 btree_node_unsupported_version,
694 "unsupported bset version %u.%u",
695 BCH_VERSION_MAJOR(version),
696 BCH_VERSION_MINOR(version));
697
698 if (btree_err_on(version < c->sb.version_min,
699 -BCH_ERR_btree_node_read_err_fixable,
700 c, NULL, b, i, NULL,
701 btree_node_bset_older_than_sb_min,
702 "bset version %u older than superblock version_min %u",
703 version, c->sb.version_min)) {
704 mutex_lock(&c->sb_lock);
705 c->disk_sb.sb->version_min = cpu_to_le16(version);
706 bch2_write_super(c);
707 mutex_unlock(&c->sb_lock);
708 }
709
710 if (btree_err_on(BCH_VERSION_MAJOR(version) >
711 BCH_VERSION_MAJOR(c->sb.version),
712 -BCH_ERR_btree_node_read_err_fixable,
713 c, NULL, b, i, NULL,
714 btree_node_bset_newer_than_sb,
715 "bset version %u newer than superblock version %u",
716 version, c->sb.version)) {
717 mutex_lock(&c->sb_lock);
718 c->disk_sb.sb->version = cpu_to_le16(version);
719 bch2_write_super(c);
720 mutex_unlock(&c->sb_lock);
721 }
722
723 btree_err_on(BSET_SEPARATE_WHITEOUTS(i),
724 -BCH_ERR_btree_node_read_err_incompatible,
725 c, ca, b, i, NULL,
726 btree_node_unsupported_version,
727 "BSET_SEPARATE_WHITEOUTS no longer supported");
728
729 if (!write &&
730 btree_err_on(offset + sectors > (ptr_written ?: btree_sectors(c)),
731 -BCH_ERR_btree_node_read_err_fixable,
732 c, ca, b, i, NULL,
733 bset_past_end_of_btree_node,
734 "bset past end of btree node (offset %u len %u but written %zu)",
735 offset, sectors, ptr_written ?: btree_sectors(c)))
736 i->u64s = 0;
737
738 btree_err_on(offset && !i->u64s,
739 -BCH_ERR_btree_node_read_err_fixable,
740 c, ca, b, i, NULL,
741 bset_empty,
742 "empty bset");
743
744 btree_err_on(BSET_OFFSET(i) && BSET_OFFSET(i) != offset,
745 -BCH_ERR_btree_node_read_err_want_retry,
746 c, ca, b, i, NULL,
747 bset_wrong_sector_offset,
748 "bset at wrong sector offset");
749
750 if (!offset) {
751 struct btree_node *bn =
752 container_of(i, struct btree_node, keys);
753 /* These indicate that we read the wrong btree node: */
754
755 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
756 struct bch_btree_ptr_v2 *bp =
757 &bkey_i_to_btree_ptr_v2(&b->key)->v;
758
759 /* XXX endianness */
760 btree_err_on(bp->seq != bn->keys.seq,
761 -BCH_ERR_btree_node_read_err_must_retry,
762 c, ca, b, NULL, NULL,
763 bset_bad_seq,
764 "incorrect sequence number (wrong btree node)");
765 }
766
767 btree_err_on(BTREE_NODE_ID(bn) != b->c.btree_id,
768 -BCH_ERR_btree_node_read_err_must_retry,
769 c, ca, b, i, NULL,
770 btree_node_bad_btree,
771 "incorrect btree id");
772
773 btree_err_on(BTREE_NODE_LEVEL(bn) != b->c.level,
774 -BCH_ERR_btree_node_read_err_must_retry,
775 c, ca, b, i, NULL,
776 btree_node_bad_level,
777 "incorrect level");
778
779 if (!write)
780 compat_btree_node(b->c.level, b->c.btree_id, version,
781 BSET_BIG_ENDIAN(i), write, bn);
782
783 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
784 struct bch_btree_ptr_v2 *bp =
785 &bkey_i_to_btree_ptr_v2(&b->key)->v;
786
787 if (BTREE_PTR_RANGE_UPDATED(bp)) {
788 b->data->min_key = bp->min_key;
789 b->data->max_key = b->key.k.p;
790 }
791
792 btree_err_on(!bpos_eq(b->data->min_key, bp->min_key),
793 -BCH_ERR_btree_node_read_err_must_retry,
794 c, ca, b, NULL, NULL,
795 btree_node_bad_min_key,
796 "incorrect min_key: got %s should be %s",
797 (printbuf_reset(&buf1),
798 bch2_bpos_to_text(&buf1, bn->min_key), buf1.buf),
799 (printbuf_reset(&buf2),
800 bch2_bpos_to_text(&buf2, bp->min_key), buf2.buf));
801 }
802
803 btree_err_on(!bpos_eq(bn->max_key, b->key.k.p),
804 -BCH_ERR_btree_node_read_err_must_retry,
805 c, ca, b, i, NULL,
806 btree_node_bad_max_key,
807 "incorrect max key %s",
808 (printbuf_reset(&buf1),
809 bch2_bpos_to_text(&buf1, bn->max_key), buf1.buf));
810
811 if (write)
812 compat_btree_node(b->c.level, b->c.btree_id, version,
813 BSET_BIG_ENDIAN(i), write, bn);
814
815 btree_err_on(bch2_bkey_format_invalid(c, &bn->format, write, &buf1),
816 -BCH_ERR_btree_node_read_err_bad_node,
817 c, ca, b, i, NULL,
818 btree_node_bad_format,
819 "invalid bkey format: %s\n %s", buf1.buf,
820 (printbuf_reset(&buf2),
821 bch2_bkey_format_to_text(&buf2, &bn->format), buf2.buf));
822 printbuf_reset(&buf1);
823
824 compat_bformat(b->c.level, b->c.btree_id, version,
825 BSET_BIG_ENDIAN(i), write,
826 &bn->format);
827 }
828 fsck_err:
829 printbuf_exit(&buf2);
830 printbuf_exit(&buf1);
831 return ret;
832 }
833
btree_node_bkey_val_validate(struct bch_fs * c,struct btree * b,struct bkey_s_c k,enum bch_validate_flags flags)834 static int btree_node_bkey_val_validate(struct bch_fs *c, struct btree *b,
835 struct bkey_s_c k,
836 enum bch_validate_flags flags)
837 {
838 return bch2_bkey_val_validate(c, k, (struct bkey_validate_context) {
839 .from = BKEY_VALIDATE_btree_node,
840 .level = b->c.level,
841 .btree = b->c.btree_id,
842 .flags = flags
843 });
844 }
845
bset_key_validate(struct bch_fs * c,struct btree * b,struct bkey_s_c k,bool updated_range,enum bch_validate_flags flags)846 static int bset_key_validate(struct bch_fs *c, struct btree *b,
847 struct bkey_s_c k,
848 bool updated_range,
849 enum bch_validate_flags flags)
850 {
851 struct bkey_validate_context from = (struct bkey_validate_context) {
852 .from = BKEY_VALIDATE_btree_node,
853 .level = b->c.level,
854 .btree = b->c.btree_id,
855 .flags = flags,
856 };
857 return __bch2_bkey_validate(c, k, from) ?:
858 (!updated_range ? bch2_bkey_in_btree_node(c, b, k, from) : 0) ?:
859 (flags & BCH_VALIDATE_write ? btree_node_bkey_val_validate(c, b, k, flags) : 0);
860 }
861
bkey_packed_valid(struct bch_fs * c,struct btree * b,struct bset * i,struct bkey_packed * k)862 static bool bkey_packed_valid(struct bch_fs *c, struct btree *b,
863 struct bset *i, struct bkey_packed *k)
864 {
865 if (bkey_p_next(k) > vstruct_last(i))
866 return false;
867
868 if (k->format > KEY_FORMAT_CURRENT)
869 return false;
870
871 if (!bkeyp_u64s_valid(&b->format, k))
872 return false;
873
874 struct bkey tmp;
875 struct bkey_s u = __bkey_disassemble(b, k, &tmp);
876 return !__bch2_bkey_validate(c, u.s_c,
877 (struct bkey_validate_context) {
878 .from = BKEY_VALIDATE_btree_node,
879 .level = b->c.level,
880 .btree = b->c.btree_id,
881 .flags = BCH_VALIDATE_silent
882 });
883 }
884
btree_node_read_bkey_cmp(const struct btree * b,const struct bkey_packed * l,const struct bkey_packed * r)885 static inline int btree_node_read_bkey_cmp(const struct btree *b,
886 const struct bkey_packed *l,
887 const struct bkey_packed *r)
888 {
889 return bch2_bkey_cmp_packed(b, l, r)
890 ?: (int) bkey_deleted(r) - (int) bkey_deleted(l);
891 }
892
validate_bset_keys(struct bch_fs * c,struct btree * b,struct bset * i,int write,bool have_retry,bool * saw_error)893 static int validate_bset_keys(struct bch_fs *c, struct btree *b,
894 struct bset *i, int write,
895 bool have_retry, bool *saw_error)
896 {
897 unsigned version = le16_to_cpu(i->version);
898 struct bkey_packed *k, *prev = NULL;
899 struct printbuf buf = PRINTBUF;
900 bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
901 BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
902 int ret = 0;
903
904 for (k = i->start;
905 k != vstruct_last(i);) {
906 struct bkey_s u;
907 struct bkey tmp;
908 unsigned next_good_key;
909
910 if (btree_err_on(bkey_p_next(k) > vstruct_last(i),
911 -BCH_ERR_btree_node_read_err_fixable,
912 c, NULL, b, i, k,
913 btree_node_bkey_past_bset_end,
914 "key extends past end of bset")) {
915 i->u64s = cpu_to_le16((u64 *) k - i->_data);
916 break;
917 }
918
919 if (btree_err_on(k->format > KEY_FORMAT_CURRENT,
920 -BCH_ERR_btree_node_read_err_fixable,
921 c, NULL, b, i, k,
922 btree_node_bkey_bad_format,
923 "invalid bkey format %u", k->format))
924 goto drop_this_key;
925
926 if (btree_err_on(!bkeyp_u64s_valid(&b->format, k),
927 -BCH_ERR_btree_node_read_err_fixable,
928 c, NULL, b, i, k,
929 btree_node_bkey_bad_u64s,
930 "bad k->u64s %u (min %u max %zu)", k->u64s,
931 bkeyp_key_u64s(&b->format, k),
932 U8_MAX - BKEY_U64s + bkeyp_key_u64s(&b->format, k)))
933 goto drop_this_key;
934
935 if (!write)
936 bch2_bkey_compat(b->c.level, b->c.btree_id, version,
937 BSET_BIG_ENDIAN(i), write,
938 &b->format, k);
939
940 u = __bkey_disassemble(b, k, &tmp);
941
942 ret = bset_key_validate(c, b, u.s_c, updated_range, write);
943 if (ret == -BCH_ERR_fsck_delete_bkey)
944 goto drop_this_key;
945 if (ret)
946 goto fsck_err;
947
948 if (write)
949 bch2_bkey_compat(b->c.level, b->c.btree_id, version,
950 BSET_BIG_ENDIAN(i), write,
951 &b->format, k);
952
953 if (prev && btree_node_read_bkey_cmp(b, prev, k) >= 0) {
954 struct bkey up = bkey_unpack_key(b, prev);
955
956 printbuf_reset(&buf);
957 prt_printf(&buf, "keys out of order: ");
958 bch2_bkey_to_text(&buf, &up);
959 prt_printf(&buf, " > ");
960 bch2_bkey_to_text(&buf, u.k);
961
962 if (btree_err(-BCH_ERR_btree_node_read_err_fixable,
963 c, NULL, b, i, k,
964 btree_node_bkey_out_of_order,
965 "%s", buf.buf))
966 goto drop_this_key;
967 }
968
969 prev = k;
970 k = bkey_p_next(k);
971 continue;
972 drop_this_key:
973 next_good_key = k->u64s;
974
975 if (!next_good_key ||
976 (BSET_BIG_ENDIAN(i) == CPU_BIG_ENDIAN &&
977 version >= bcachefs_metadata_version_snapshot)) {
978 /*
979 * only do scanning if bch2_bkey_compat() has nothing to
980 * do
981 */
982
983 if (!bkey_packed_valid(c, b, i, (void *) ((u64 *) k + next_good_key))) {
984 for (next_good_key = 1;
985 next_good_key < (u64 *) vstruct_last(i) - (u64 *) k;
986 next_good_key++)
987 if (bkey_packed_valid(c, b, i, (void *) ((u64 *) k + next_good_key)))
988 goto got_good_key;
989 }
990
991 /*
992 * didn't find a good key, have to truncate the rest of
993 * the bset
994 */
995 next_good_key = (u64 *) vstruct_last(i) - (u64 *) k;
996 }
997 got_good_key:
998 le16_add_cpu(&i->u64s, -next_good_key);
999 memmove_u64s_down(k, (u64 *) k + next_good_key, (u64 *) vstruct_end(i) - (u64 *) k);
1000 set_btree_node_need_rewrite(b);
1001 }
1002 fsck_err:
1003 printbuf_exit(&buf);
1004 return ret;
1005 }
1006
bch2_btree_node_read_done(struct bch_fs * c,struct bch_dev * ca,struct btree * b,bool have_retry,bool * saw_error)1007 int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
1008 struct btree *b, bool have_retry, bool *saw_error)
1009 {
1010 struct btree_node_entry *bne;
1011 struct sort_iter *iter;
1012 struct btree_node *sorted;
1013 struct bkey_packed *k;
1014 struct bset *i;
1015 bool used_mempool, blacklisted;
1016 bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
1017 BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
1018 unsigned u64s;
1019 unsigned ptr_written = btree_ptr_sectors_written(bkey_i_to_s_c(&b->key));
1020 u64 max_journal_seq = 0;
1021 struct printbuf buf = PRINTBUF;
1022 int ret = 0, retry_read = 0, write = READ;
1023 u64 start_time = local_clock();
1024
1025 b->version_ondisk = U16_MAX;
1026 /* We might get called multiple times on read retry: */
1027 b->written = 0;
1028
1029 iter = mempool_alloc(&c->fill_iter, GFP_NOFS);
1030 sort_iter_init(iter, b, (btree_blocks(c) + 1) * 2);
1031
1032 if (bch2_meta_read_fault("btree"))
1033 btree_err(-BCH_ERR_btree_node_read_err_must_retry,
1034 c, ca, b, NULL, NULL,
1035 btree_node_fault_injected,
1036 "dynamic fault");
1037
1038 btree_err_on(le64_to_cpu(b->data->magic) != bset_magic(c),
1039 -BCH_ERR_btree_node_read_err_must_retry,
1040 c, ca, b, NULL, NULL,
1041 btree_node_bad_magic,
1042 "bad magic: want %llx, got %llx",
1043 bset_magic(c), le64_to_cpu(b->data->magic));
1044
1045 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
1046 struct bch_btree_ptr_v2 *bp =
1047 &bkey_i_to_btree_ptr_v2(&b->key)->v;
1048
1049 bch2_bpos_to_text(&buf, b->data->min_key);
1050 prt_str(&buf, "-");
1051 bch2_bpos_to_text(&buf, b->data->max_key);
1052
1053 btree_err_on(b->data->keys.seq != bp->seq,
1054 -BCH_ERR_btree_node_read_err_must_retry,
1055 c, ca, b, NULL, NULL,
1056 btree_node_bad_seq,
1057 "got wrong btree node: got\n%s",
1058 (printbuf_reset(&buf),
1059 bch2_btree_node_header_to_text(&buf, b->data),
1060 buf.buf));
1061 } else {
1062 btree_err_on(!b->data->keys.seq,
1063 -BCH_ERR_btree_node_read_err_must_retry,
1064 c, ca, b, NULL, NULL,
1065 btree_node_bad_seq,
1066 "bad btree header: seq 0\n%s",
1067 (printbuf_reset(&buf),
1068 bch2_btree_node_header_to_text(&buf, b->data),
1069 buf.buf));
1070 }
1071
1072 while (b->written < (ptr_written ?: btree_sectors(c))) {
1073 unsigned sectors;
1074 bool first = !b->written;
1075
1076 if (first) {
1077 bne = NULL;
1078 i = &b->data->keys;
1079 } else {
1080 bne = write_block(b);
1081 i = &bne->keys;
1082
1083 if (i->seq != b->data->keys.seq)
1084 break;
1085 }
1086
1087 struct nonce nonce = btree_nonce(i, b->written << 9);
1088 bool good_csum_type = bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i));
1089
1090 btree_err_on(!good_csum_type,
1091 bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i))
1092 ? -BCH_ERR_btree_node_read_err_must_retry
1093 : -BCH_ERR_btree_node_read_err_want_retry,
1094 c, ca, b, i, NULL,
1095 bset_unknown_csum,
1096 "unknown checksum type %llu", BSET_CSUM_TYPE(i));
1097
1098 if (first) {
1099 if (good_csum_type) {
1100 struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data);
1101 bool csum_bad = bch2_crc_cmp(b->data->csum, csum);
1102 if (csum_bad)
1103 bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
1104
1105 btree_err_on(csum_bad,
1106 -BCH_ERR_btree_node_read_err_want_retry,
1107 c, ca, b, i, NULL,
1108 bset_bad_csum,
1109 "%s",
1110 (printbuf_reset(&buf),
1111 bch2_csum_err_msg(&buf, BSET_CSUM_TYPE(i), b->data->csum, csum),
1112 buf.buf));
1113
1114 ret = bset_encrypt(c, i, b->written << 9);
1115 if (bch2_fs_fatal_err_on(ret, c,
1116 "decrypting btree node: %s", bch2_err_str(ret)))
1117 goto fsck_err;
1118 }
1119
1120 btree_err_on(btree_node_type_is_extents(btree_node_type(b)) &&
1121 !BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data),
1122 -BCH_ERR_btree_node_read_err_incompatible,
1123 c, NULL, b, NULL, NULL,
1124 btree_node_unsupported_version,
1125 "btree node does not have NEW_EXTENT_OVERWRITE set");
1126
1127 sectors = vstruct_sectors(b->data, c->block_bits);
1128 } else {
1129 if (good_csum_type) {
1130 struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
1131 bool csum_bad = bch2_crc_cmp(bne->csum, csum);
1132 if (ca && csum_bad)
1133 bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
1134
1135 btree_err_on(csum_bad,
1136 -BCH_ERR_btree_node_read_err_want_retry,
1137 c, ca, b, i, NULL,
1138 bset_bad_csum,
1139 "%s",
1140 (printbuf_reset(&buf),
1141 bch2_csum_err_msg(&buf, BSET_CSUM_TYPE(i), bne->csum, csum),
1142 buf.buf));
1143
1144 ret = bset_encrypt(c, i, b->written << 9);
1145 if (bch2_fs_fatal_err_on(ret, c,
1146 "decrypting btree node: %s", bch2_err_str(ret)))
1147 goto fsck_err;
1148 }
1149
1150 sectors = vstruct_sectors(bne, c->block_bits);
1151 }
1152
1153 b->version_ondisk = min(b->version_ondisk,
1154 le16_to_cpu(i->version));
1155
1156 ret = validate_bset(c, ca, b, i, b->written, sectors,
1157 READ, have_retry, saw_error);
1158 if (ret)
1159 goto fsck_err;
1160
1161 if (!b->written)
1162 btree_node_set_format(b, b->data->format);
1163
1164 ret = validate_bset_keys(c, b, i, READ, have_retry, saw_error);
1165 if (ret)
1166 goto fsck_err;
1167
1168 SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
1169
1170 blacklisted = bch2_journal_seq_is_blacklisted(c,
1171 le64_to_cpu(i->journal_seq),
1172 true);
1173
1174 btree_err_on(blacklisted && first,
1175 -BCH_ERR_btree_node_read_err_fixable,
1176 c, ca, b, i, NULL,
1177 bset_blacklisted_journal_seq,
1178 "first btree node bset has blacklisted journal seq (%llu)",
1179 le64_to_cpu(i->journal_seq));
1180
1181 btree_err_on(blacklisted && ptr_written,
1182 -BCH_ERR_btree_node_read_err_fixable,
1183 c, ca, b, i, NULL,
1184 first_bset_blacklisted_journal_seq,
1185 "found blacklisted bset (journal seq %llu) in btree node at offset %u-%u/%u",
1186 le64_to_cpu(i->journal_seq),
1187 b->written, b->written + sectors, ptr_written);
1188
1189 b->written = min(b->written + sectors, btree_sectors(c));
1190
1191 if (blacklisted && !first)
1192 continue;
1193
1194 sort_iter_add(iter,
1195 vstruct_idx(i, 0),
1196 vstruct_last(i));
1197
1198 max_journal_seq = max(max_journal_seq, le64_to_cpu(i->journal_seq));
1199 }
1200
1201 if (ptr_written) {
1202 btree_err_on(b->written < ptr_written,
1203 -BCH_ERR_btree_node_read_err_want_retry,
1204 c, ca, b, NULL, NULL,
1205 btree_node_data_missing,
1206 "btree node data missing: expected %u sectors, found %u",
1207 ptr_written, b->written);
1208 } else {
1209 for (bne = write_block(b);
1210 bset_byte_offset(b, bne) < btree_buf_bytes(b);
1211 bne = (void *) bne + block_bytes(c))
1212 btree_err_on(bne->keys.seq == b->data->keys.seq &&
1213 !bch2_journal_seq_is_blacklisted(c,
1214 le64_to_cpu(bne->keys.journal_seq),
1215 true),
1216 -BCH_ERR_btree_node_read_err_want_retry,
1217 c, ca, b, NULL, NULL,
1218 btree_node_bset_after_end,
1219 "found bset signature after last bset");
1220 }
1221
1222 sorted = btree_bounce_alloc(c, btree_buf_bytes(b), &used_mempool);
1223 sorted->keys.u64s = 0;
1224
1225 set_btree_bset(b, b->set, &b->data->keys);
1226
1227 b->nr = bch2_key_sort_fix_overlapping(c, &sorted->keys, iter);
1228 memset((uint8_t *)(sorted + 1) + b->nr.live_u64s * sizeof(u64), 0,
1229 btree_buf_bytes(b) -
1230 sizeof(struct btree_node) -
1231 b->nr.live_u64s * sizeof(u64));
1232
1233 u64s = le16_to_cpu(sorted->keys.u64s);
1234 *sorted = *b->data;
1235 sorted->keys.u64s = cpu_to_le16(u64s);
1236 swap(sorted, b->data);
1237 set_btree_bset(b, b->set, &b->data->keys);
1238 b->nsets = 1;
1239 b->data->keys.journal_seq = cpu_to_le64(max_journal_seq);
1240
1241 BUG_ON(b->nr.live_u64s != u64s);
1242
1243 btree_bounce_free(c, btree_buf_bytes(b), used_mempool, sorted);
1244
1245 if (updated_range)
1246 bch2_btree_node_drop_keys_outside_node(b);
1247
1248 i = &b->data->keys;
1249 for (k = i->start; k != vstruct_last(i);) {
1250 struct bkey tmp;
1251 struct bkey_s u = __bkey_disassemble(b, k, &tmp);
1252
1253 ret = btree_node_bkey_val_validate(c, b, u.s_c, READ);
1254 if (ret == -BCH_ERR_fsck_delete_bkey ||
1255 (bch2_inject_invalid_keys &&
1256 !bversion_cmp(u.k->bversion, MAX_VERSION))) {
1257 btree_keys_account_key_drop(&b->nr, 0, k);
1258
1259 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
1260 memmove_u64s_down(k, bkey_p_next(k),
1261 (u64 *) vstruct_end(i) - (u64 *) k);
1262 set_btree_bset_end(b, b->set);
1263 set_btree_node_need_rewrite(b);
1264 continue;
1265 }
1266 if (ret)
1267 goto fsck_err;
1268
1269 if (u.k->type == KEY_TYPE_btree_ptr_v2) {
1270 struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(u);
1271
1272 bp.v->mem_ptr = 0;
1273 }
1274
1275 k = bkey_p_next(k);
1276 }
1277
1278 bch2_bset_build_aux_tree(b, b->set, false);
1279
1280 set_needs_whiteout(btree_bset_first(b), true);
1281
1282 btree_node_reset_sib_u64s(b);
1283
1284 rcu_read_lock();
1285 bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&b->key)), ptr) {
1286 struct bch_dev *ca2 = bch2_dev_rcu(c, ptr->dev);
1287
1288 if (!ca2 || ca2->mi.state != BCH_MEMBER_STATE_rw)
1289 set_btree_node_need_rewrite(b);
1290 }
1291 rcu_read_unlock();
1292
1293 if (!ptr_written)
1294 set_btree_node_need_rewrite(b);
1295 out:
1296 mempool_free(iter, &c->fill_iter);
1297 printbuf_exit(&buf);
1298 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read_done], start_time);
1299 return retry_read;
1300 fsck_err:
1301 if (ret == -BCH_ERR_btree_node_read_err_want_retry ||
1302 ret == -BCH_ERR_btree_node_read_err_must_retry) {
1303 retry_read = 1;
1304 } else {
1305 set_btree_node_read_error(b);
1306 bch2_btree_lost_data(c, b->c.btree_id);
1307 }
1308 goto out;
1309 }
1310
btree_node_read_work(struct work_struct * work)1311 static void btree_node_read_work(struct work_struct *work)
1312 {
1313 struct btree_read_bio *rb =
1314 container_of(work, struct btree_read_bio, work);
1315 struct bch_fs *c = rb->c;
1316 struct bch_dev *ca = rb->have_ioref ? bch2_dev_have_ref(c, rb->pick.ptr.dev) : NULL;
1317 struct btree *b = rb->b;
1318 struct bio *bio = &rb->bio;
1319 struct bch_io_failures failed = { .nr = 0 };
1320 struct printbuf buf = PRINTBUF;
1321 bool saw_error = false;
1322 bool retry = false;
1323 bool can_retry;
1324
1325 goto start;
1326 while (1) {
1327 retry = true;
1328 bch_info(c, "retrying read");
1329 ca = bch2_dev_get_ioref(c, rb->pick.ptr.dev, READ);
1330 rb->have_ioref = ca != NULL;
1331 bio_reset(bio, NULL, REQ_OP_READ|REQ_SYNC|REQ_META);
1332 bio->bi_iter.bi_sector = rb->pick.ptr.offset;
1333 bio->bi_iter.bi_size = btree_buf_bytes(b);
1334
1335 if (rb->have_ioref) {
1336 bio_set_dev(bio, ca->disk_sb.bdev);
1337 submit_bio_wait(bio);
1338 } else {
1339 bio->bi_status = BLK_STS_REMOVED;
1340 }
1341 start:
1342 printbuf_reset(&buf);
1343 bch2_btree_pos_to_text(&buf, c, b);
1344 bch2_dev_io_err_on(ca && bio->bi_status, ca, BCH_MEMBER_ERROR_read,
1345 "btree read error %s for %s",
1346 bch2_blk_status_to_str(bio->bi_status), buf.buf);
1347 if (rb->have_ioref)
1348 percpu_ref_put(&ca->io_ref);
1349 rb->have_ioref = false;
1350
1351 bch2_mark_io_failure(&failed, &rb->pick);
1352
1353 can_retry = bch2_bkey_pick_read_device(c,
1354 bkey_i_to_s_c(&b->key),
1355 &failed, &rb->pick) > 0;
1356
1357 if (!bio->bi_status &&
1358 !bch2_btree_node_read_done(c, ca, b, can_retry, &saw_error)) {
1359 if (retry)
1360 bch_info(c, "retry success");
1361 break;
1362 }
1363
1364 saw_error = true;
1365
1366 if (!can_retry) {
1367 set_btree_node_read_error(b);
1368 bch2_btree_lost_data(c, b->c.btree_id);
1369 break;
1370 }
1371 }
1372
1373 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read],
1374 rb->start_time);
1375 bio_put(&rb->bio);
1376
1377 if ((saw_error ||
1378 btree_node_need_rewrite(b)) &&
1379 !btree_node_read_error(b) &&
1380 c->curr_recovery_pass != BCH_RECOVERY_PASS_scan_for_btree_nodes) {
1381 if (saw_error) {
1382 printbuf_reset(&buf);
1383 bch2_btree_id_level_to_text(&buf, b->c.btree_id, b->c.level);
1384 prt_str(&buf, " ");
1385 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
1386 bch_err_ratelimited(c, "%s: rewriting btree node at due to error\n %s",
1387 __func__, buf.buf);
1388 }
1389
1390 bch2_btree_node_rewrite_async(c, b);
1391 }
1392
1393 printbuf_exit(&buf);
1394 clear_btree_node_read_in_flight(b);
1395 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1396 }
1397
btree_node_read_endio(struct bio * bio)1398 static void btree_node_read_endio(struct bio *bio)
1399 {
1400 struct btree_read_bio *rb =
1401 container_of(bio, struct btree_read_bio, bio);
1402 struct bch_fs *c = rb->c;
1403
1404 if (rb->have_ioref) {
1405 struct bch_dev *ca = bch2_dev_have_ref(c, rb->pick.ptr.dev);
1406
1407 bch2_latency_acct(ca, rb->start_time, READ);
1408 }
1409
1410 queue_work(c->btree_read_complete_wq, &rb->work);
1411 }
1412
1413 struct btree_node_read_all {
1414 struct closure cl;
1415 struct bch_fs *c;
1416 struct btree *b;
1417 unsigned nr;
1418 void *buf[BCH_REPLICAS_MAX];
1419 struct bio *bio[BCH_REPLICAS_MAX];
1420 blk_status_t err[BCH_REPLICAS_MAX];
1421 };
1422
btree_node_sectors_written(struct bch_fs * c,void * data)1423 static unsigned btree_node_sectors_written(struct bch_fs *c, void *data)
1424 {
1425 struct btree_node *bn = data;
1426 struct btree_node_entry *bne;
1427 unsigned offset = 0;
1428
1429 if (le64_to_cpu(bn->magic) != bset_magic(c))
1430 return 0;
1431
1432 while (offset < btree_sectors(c)) {
1433 if (!offset) {
1434 offset += vstruct_sectors(bn, c->block_bits);
1435 } else {
1436 bne = data + (offset << 9);
1437 if (bne->keys.seq != bn->keys.seq)
1438 break;
1439 offset += vstruct_sectors(bne, c->block_bits);
1440 }
1441 }
1442
1443 return offset;
1444 }
1445
btree_node_has_extra_bsets(struct bch_fs * c,unsigned offset,void * data)1446 static bool btree_node_has_extra_bsets(struct bch_fs *c, unsigned offset, void *data)
1447 {
1448 struct btree_node *bn = data;
1449 struct btree_node_entry *bne;
1450
1451 if (!offset)
1452 return false;
1453
1454 while (offset < btree_sectors(c)) {
1455 bne = data + (offset << 9);
1456 if (bne->keys.seq == bn->keys.seq)
1457 return true;
1458 offset++;
1459 }
1460
1461 return false;
1462 return offset;
1463 }
1464
CLOSURE_CALLBACK(btree_node_read_all_replicas_done)1465 static CLOSURE_CALLBACK(btree_node_read_all_replicas_done)
1466 {
1467 closure_type(ra, struct btree_node_read_all, cl);
1468 struct bch_fs *c = ra->c;
1469 struct btree *b = ra->b;
1470 struct printbuf buf = PRINTBUF;
1471 bool dump_bset_maps = false;
1472 bool have_retry = false;
1473 int ret = 0, best = -1, write = READ;
1474 unsigned i, written = 0, written2 = 0;
1475 __le64 seq = b->key.k.type == KEY_TYPE_btree_ptr_v2
1476 ? bkey_i_to_btree_ptr_v2(&b->key)->v.seq : 0;
1477 bool _saw_error = false, *saw_error = &_saw_error;
1478
1479 for (i = 0; i < ra->nr; i++) {
1480 struct btree_node *bn = ra->buf[i];
1481
1482 if (ra->err[i])
1483 continue;
1484
1485 if (le64_to_cpu(bn->magic) != bset_magic(c) ||
1486 (seq && seq != bn->keys.seq))
1487 continue;
1488
1489 if (best < 0) {
1490 best = i;
1491 written = btree_node_sectors_written(c, bn);
1492 continue;
1493 }
1494
1495 written2 = btree_node_sectors_written(c, ra->buf[i]);
1496 if (btree_err_on(written2 != written, -BCH_ERR_btree_node_read_err_fixable,
1497 c, NULL, b, NULL, NULL,
1498 btree_node_replicas_sectors_written_mismatch,
1499 "btree node sectors written mismatch: %u != %u",
1500 written, written2) ||
1501 btree_err_on(btree_node_has_extra_bsets(c, written2, ra->buf[i]),
1502 -BCH_ERR_btree_node_read_err_fixable,
1503 c, NULL, b, NULL, NULL,
1504 btree_node_bset_after_end,
1505 "found bset signature after last bset") ||
1506 btree_err_on(memcmp(ra->buf[best], ra->buf[i], written << 9),
1507 -BCH_ERR_btree_node_read_err_fixable,
1508 c, NULL, b, NULL, NULL,
1509 btree_node_replicas_data_mismatch,
1510 "btree node replicas content mismatch"))
1511 dump_bset_maps = true;
1512
1513 if (written2 > written) {
1514 written = written2;
1515 best = i;
1516 }
1517 }
1518 fsck_err:
1519 if (dump_bset_maps) {
1520 for (i = 0; i < ra->nr; i++) {
1521 struct btree_node *bn = ra->buf[i];
1522 struct btree_node_entry *bne = NULL;
1523 unsigned offset = 0, sectors;
1524 bool gap = false;
1525
1526 if (ra->err[i])
1527 continue;
1528
1529 printbuf_reset(&buf);
1530
1531 while (offset < btree_sectors(c)) {
1532 if (!offset) {
1533 sectors = vstruct_sectors(bn, c->block_bits);
1534 } else {
1535 bne = ra->buf[i] + (offset << 9);
1536 if (bne->keys.seq != bn->keys.seq)
1537 break;
1538 sectors = vstruct_sectors(bne, c->block_bits);
1539 }
1540
1541 prt_printf(&buf, " %u-%u", offset, offset + sectors);
1542 if (bne && bch2_journal_seq_is_blacklisted(c,
1543 le64_to_cpu(bne->keys.journal_seq), false))
1544 prt_printf(&buf, "*");
1545 offset += sectors;
1546 }
1547
1548 while (offset < btree_sectors(c)) {
1549 bne = ra->buf[i] + (offset << 9);
1550 if (bne->keys.seq == bn->keys.seq) {
1551 if (!gap)
1552 prt_printf(&buf, " GAP");
1553 gap = true;
1554
1555 sectors = vstruct_sectors(bne, c->block_bits);
1556 prt_printf(&buf, " %u-%u", offset, offset + sectors);
1557 if (bch2_journal_seq_is_blacklisted(c,
1558 le64_to_cpu(bne->keys.journal_seq), false))
1559 prt_printf(&buf, "*");
1560 }
1561 offset++;
1562 }
1563
1564 bch_err(c, "replica %u:%s", i, buf.buf);
1565 }
1566 }
1567
1568 if (best >= 0) {
1569 memcpy(b->data, ra->buf[best], btree_buf_bytes(b));
1570 ret = bch2_btree_node_read_done(c, NULL, b, false, saw_error);
1571 } else {
1572 ret = -1;
1573 }
1574
1575 if (ret) {
1576 set_btree_node_read_error(b);
1577 bch2_btree_lost_data(c, b->c.btree_id);
1578 } else if (*saw_error)
1579 bch2_btree_node_rewrite_async(c, b);
1580
1581 for (i = 0; i < ra->nr; i++) {
1582 mempool_free(ra->buf[i], &c->btree_bounce_pool);
1583 bio_put(ra->bio[i]);
1584 }
1585
1586 closure_debug_destroy(&ra->cl);
1587 kfree(ra);
1588 printbuf_exit(&buf);
1589
1590 clear_btree_node_read_in_flight(b);
1591 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1592 }
1593
btree_node_read_all_replicas_endio(struct bio * bio)1594 static void btree_node_read_all_replicas_endio(struct bio *bio)
1595 {
1596 struct btree_read_bio *rb =
1597 container_of(bio, struct btree_read_bio, bio);
1598 struct bch_fs *c = rb->c;
1599 struct btree_node_read_all *ra = rb->ra;
1600
1601 if (rb->have_ioref) {
1602 struct bch_dev *ca = bch2_dev_have_ref(c, rb->pick.ptr.dev);
1603
1604 bch2_latency_acct(ca, rb->start_time, READ);
1605 }
1606
1607 ra->err[rb->idx] = bio->bi_status;
1608 closure_put(&ra->cl);
1609 }
1610
1611 /*
1612 * XXX This allocates multiple times from the same mempools, and can deadlock
1613 * under sufficient memory pressure (but is only a debug path)
1614 */
btree_node_read_all_replicas(struct bch_fs * c,struct btree * b,bool sync)1615 static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool sync)
1616 {
1617 struct bkey_s_c k = bkey_i_to_s_c(&b->key);
1618 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1619 const union bch_extent_entry *entry;
1620 struct extent_ptr_decoded pick;
1621 struct btree_node_read_all *ra;
1622 unsigned i;
1623
1624 ra = kzalloc(sizeof(*ra), GFP_NOFS);
1625 if (!ra)
1626 return -BCH_ERR_ENOMEM_btree_node_read_all_replicas;
1627
1628 closure_init(&ra->cl, NULL);
1629 ra->c = c;
1630 ra->b = b;
1631 ra->nr = bch2_bkey_nr_ptrs(k);
1632
1633 for (i = 0; i < ra->nr; i++) {
1634 ra->buf[i] = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
1635 ra->bio[i] = bio_alloc_bioset(NULL,
1636 buf_pages(ra->buf[i], btree_buf_bytes(b)),
1637 REQ_OP_READ|REQ_SYNC|REQ_META,
1638 GFP_NOFS,
1639 &c->btree_bio);
1640 }
1641
1642 i = 0;
1643 bkey_for_each_ptr_decode(k.k, ptrs, pick, entry) {
1644 struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ);
1645 struct btree_read_bio *rb =
1646 container_of(ra->bio[i], struct btree_read_bio, bio);
1647 rb->c = c;
1648 rb->b = b;
1649 rb->ra = ra;
1650 rb->start_time = local_clock();
1651 rb->have_ioref = ca != NULL;
1652 rb->idx = i;
1653 rb->pick = pick;
1654 rb->bio.bi_iter.bi_sector = pick.ptr.offset;
1655 rb->bio.bi_end_io = btree_node_read_all_replicas_endio;
1656 bch2_bio_map(&rb->bio, ra->buf[i], btree_buf_bytes(b));
1657
1658 if (rb->have_ioref) {
1659 this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
1660 bio_sectors(&rb->bio));
1661 bio_set_dev(&rb->bio, ca->disk_sb.bdev);
1662
1663 closure_get(&ra->cl);
1664 submit_bio(&rb->bio);
1665 } else {
1666 ra->err[i] = BLK_STS_REMOVED;
1667 }
1668
1669 i++;
1670 }
1671
1672 if (sync) {
1673 closure_sync(&ra->cl);
1674 btree_node_read_all_replicas_done(&ra->cl.work);
1675 } else {
1676 continue_at(&ra->cl, btree_node_read_all_replicas_done,
1677 c->btree_read_complete_wq);
1678 }
1679
1680 return 0;
1681 }
1682
bch2_btree_node_read(struct btree_trans * trans,struct btree * b,bool sync)1683 void bch2_btree_node_read(struct btree_trans *trans, struct btree *b,
1684 bool sync)
1685 {
1686 struct bch_fs *c = trans->c;
1687 struct extent_ptr_decoded pick;
1688 struct btree_read_bio *rb;
1689 struct bch_dev *ca;
1690 struct bio *bio;
1691 int ret;
1692
1693 trace_and_count(c, btree_node_read, trans, b);
1694
1695 if (bch2_verify_all_btree_replicas &&
1696 !btree_node_read_all_replicas(c, b, sync))
1697 return;
1698
1699 ret = bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key),
1700 NULL, &pick);
1701
1702 if (ret <= 0) {
1703 struct printbuf buf = PRINTBUF;
1704
1705 prt_str(&buf, "btree node read error: no device to read from\n at ");
1706 bch2_btree_pos_to_text(&buf, c, b);
1707 bch_err_ratelimited(c, "%s", buf.buf);
1708
1709 if (c->opts.recovery_passes & BIT_ULL(BCH_RECOVERY_PASS_check_topology) &&
1710 c->curr_recovery_pass > BCH_RECOVERY_PASS_check_topology)
1711 bch2_fatal_error(c);
1712
1713 set_btree_node_read_error(b);
1714 bch2_btree_lost_data(c, b->c.btree_id);
1715 clear_btree_node_read_in_flight(b);
1716 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1717 printbuf_exit(&buf);
1718 return;
1719 }
1720
1721 ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ);
1722
1723 bio = bio_alloc_bioset(NULL,
1724 buf_pages(b->data, btree_buf_bytes(b)),
1725 REQ_OP_READ|REQ_SYNC|REQ_META,
1726 GFP_NOFS,
1727 &c->btree_bio);
1728 rb = container_of(bio, struct btree_read_bio, bio);
1729 rb->c = c;
1730 rb->b = b;
1731 rb->ra = NULL;
1732 rb->start_time = local_clock();
1733 rb->have_ioref = ca != NULL;
1734 rb->pick = pick;
1735 INIT_WORK(&rb->work, btree_node_read_work);
1736 bio->bi_iter.bi_sector = pick.ptr.offset;
1737 bio->bi_end_io = btree_node_read_endio;
1738 bch2_bio_map(bio, b->data, btree_buf_bytes(b));
1739
1740 if (rb->have_ioref) {
1741 this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
1742 bio_sectors(bio));
1743 bio_set_dev(bio, ca->disk_sb.bdev);
1744
1745 if (sync) {
1746 submit_bio_wait(bio);
1747 bch2_latency_acct(ca, rb->start_time, READ);
1748 btree_node_read_work(&rb->work);
1749 } else {
1750 submit_bio(bio);
1751 }
1752 } else {
1753 bio->bi_status = BLK_STS_REMOVED;
1754
1755 if (sync)
1756 btree_node_read_work(&rb->work);
1757 else
1758 queue_work(c->btree_read_complete_wq, &rb->work);
1759 }
1760 }
1761
__bch2_btree_root_read(struct btree_trans * trans,enum btree_id id,const struct bkey_i * k,unsigned level)1762 static int __bch2_btree_root_read(struct btree_trans *trans, enum btree_id id,
1763 const struct bkey_i *k, unsigned level)
1764 {
1765 struct bch_fs *c = trans->c;
1766 struct closure cl;
1767 struct btree *b;
1768 int ret;
1769
1770 closure_init_stack(&cl);
1771
1772 do {
1773 ret = bch2_btree_cache_cannibalize_lock(trans, &cl);
1774 closure_sync(&cl);
1775 } while (ret);
1776
1777 b = bch2_btree_node_mem_alloc(trans, level != 0);
1778 bch2_btree_cache_cannibalize_unlock(trans);
1779
1780 BUG_ON(IS_ERR(b));
1781
1782 bkey_copy(&b->key, k);
1783 BUG_ON(bch2_btree_node_hash_insert(&c->btree_cache, b, level, id));
1784
1785 set_btree_node_read_in_flight(b);
1786
1787 /* we can't pass the trans to read_done() for fsck errors, so it must be unlocked */
1788 bch2_trans_unlock(trans);
1789 bch2_btree_node_read(trans, b, true);
1790
1791 if (btree_node_read_error(b)) {
1792 mutex_lock(&c->btree_cache.lock);
1793 bch2_btree_node_hash_remove(&c->btree_cache, b);
1794 mutex_unlock(&c->btree_cache.lock);
1795
1796 ret = -BCH_ERR_btree_node_read_error;
1797 goto err;
1798 }
1799
1800 bch2_btree_set_root_for_read(c, b);
1801 err:
1802 six_unlock_write(&b->c.lock);
1803 six_unlock_intent(&b->c.lock);
1804
1805 return ret;
1806 }
1807
bch2_btree_root_read(struct bch_fs * c,enum btree_id id,const struct bkey_i * k,unsigned level)1808 int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
1809 const struct bkey_i *k, unsigned level)
1810 {
1811 return bch2_trans_run(c, __bch2_btree_root_read(trans, id, k, level));
1812 }
1813
bch2_btree_complete_write(struct bch_fs * c,struct btree * b,struct btree_write * w)1814 static void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
1815 struct btree_write *w)
1816 {
1817 unsigned long old, new;
1818
1819 old = READ_ONCE(b->will_make_reachable);
1820 do {
1821 new = old;
1822 if (!(old & 1))
1823 break;
1824
1825 new &= ~1UL;
1826 } while (!try_cmpxchg(&b->will_make_reachable, &old, new));
1827
1828 if (old & 1)
1829 closure_put(&((struct btree_update *) new)->cl);
1830
1831 bch2_journal_pin_drop(&c->journal, &w->journal);
1832 }
1833
__btree_node_write_done(struct bch_fs * c,struct btree * b)1834 static void __btree_node_write_done(struct bch_fs *c, struct btree *b)
1835 {
1836 struct btree_write *w = btree_prev_write(b);
1837 unsigned long old, new;
1838 unsigned type = 0;
1839
1840 bch2_btree_complete_write(c, b, w);
1841
1842 old = READ_ONCE(b->flags);
1843 do {
1844 new = old;
1845
1846 if ((old & (1U << BTREE_NODE_dirty)) &&
1847 (old & (1U << BTREE_NODE_need_write)) &&
1848 !(old & (1U << BTREE_NODE_never_write)) &&
1849 !(old & (1U << BTREE_NODE_write_blocked)) &&
1850 !(old & (1U << BTREE_NODE_will_make_reachable))) {
1851 new &= ~(1U << BTREE_NODE_dirty);
1852 new &= ~(1U << BTREE_NODE_need_write);
1853 new |= (1U << BTREE_NODE_write_in_flight);
1854 new |= (1U << BTREE_NODE_write_in_flight_inner);
1855 new |= (1U << BTREE_NODE_just_written);
1856 new ^= (1U << BTREE_NODE_write_idx);
1857
1858 type = new & BTREE_WRITE_TYPE_MASK;
1859 new &= ~BTREE_WRITE_TYPE_MASK;
1860 } else {
1861 new &= ~(1U << BTREE_NODE_write_in_flight);
1862 new &= ~(1U << BTREE_NODE_write_in_flight_inner);
1863 }
1864 } while (!try_cmpxchg(&b->flags, &old, new));
1865
1866 if (new & (1U << BTREE_NODE_write_in_flight))
1867 __bch2_btree_node_write(c, b, BTREE_WRITE_ALREADY_STARTED|type);
1868 else
1869 wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
1870 }
1871
btree_node_write_done(struct bch_fs * c,struct btree * b)1872 static void btree_node_write_done(struct bch_fs *c, struct btree *b)
1873 {
1874 struct btree_trans *trans = bch2_trans_get(c);
1875
1876 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
1877
1878 /* we don't need transaction context anymore after we got the lock. */
1879 bch2_trans_put(trans);
1880 __btree_node_write_done(c, b);
1881 six_unlock_read(&b->c.lock);
1882 }
1883
btree_node_write_work(struct work_struct * work)1884 static void btree_node_write_work(struct work_struct *work)
1885 {
1886 struct btree_write_bio *wbio =
1887 container_of(work, struct btree_write_bio, work);
1888 struct bch_fs *c = wbio->wbio.c;
1889 struct btree *b = wbio->wbio.bio.bi_private;
1890 int ret = 0;
1891
1892 btree_bounce_free(c,
1893 wbio->data_bytes,
1894 wbio->wbio.used_mempool,
1895 wbio->data);
1896
1897 bch2_bkey_drop_ptrs(bkey_i_to_s(&wbio->key), ptr,
1898 bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev));
1899
1900 if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(&wbio->key))) {
1901 ret = -BCH_ERR_btree_node_write_all_failed;
1902 goto err;
1903 }
1904
1905 if (wbio->wbio.first_btree_write) {
1906 if (wbio->wbio.failed.nr) {
1907
1908 }
1909 } else {
1910 ret = bch2_trans_do(c,
1911 bch2_btree_node_update_key_get_iter(trans, b, &wbio->key,
1912 BCH_WATERMARK_interior_updates|
1913 BCH_TRANS_COMMIT_journal_reclaim|
1914 BCH_TRANS_COMMIT_no_enospc|
1915 BCH_TRANS_COMMIT_no_check_rw,
1916 !wbio->wbio.failed.nr));
1917 if (ret)
1918 goto err;
1919 }
1920 out:
1921 bio_put(&wbio->wbio.bio);
1922 btree_node_write_done(c, b);
1923 return;
1924 err:
1925 set_btree_node_noevict(b);
1926 bch2_fs_fatal_err_on(!bch2_err_matches(ret, EROFS), c,
1927 "writing btree node: %s", bch2_err_str(ret));
1928 goto out;
1929 }
1930
btree_node_write_endio(struct bio * bio)1931 static void btree_node_write_endio(struct bio *bio)
1932 {
1933 struct bch_write_bio *wbio = to_wbio(bio);
1934 struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL;
1935 struct bch_write_bio *orig = parent ?: wbio;
1936 struct btree_write_bio *wb = container_of(orig, struct btree_write_bio, wbio);
1937 struct bch_fs *c = wbio->c;
1938 struct btree *b = wbio->bio.bi_private;
1939 struct bch_dev *ca = wbio->have_ioref ? bch2_dev_have_ref(c, wbio->dev) : NULL;
1940 unsigned long flags;
1941
1942 if (wbio->have_ioref)
1943 bch2_latency_acct(ca, wbio->submit_time, WRITE);
1944
1945 if (!ca ||
1946 bch2_dev_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write,
1947 "btree write error: %s",
1948 bch2_blk_status_to_str(bio->bi_status)) ||
1949 bch2_meta_write_fault("btree")) {
1950 spin_lock_irqsave(&c->btree_write_error_lock, flags);
1951 bch2_dev_list_add_dev(&orig->failed, wbio->dev);
1952 spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
1953 }
1954
1955 if (wbio->have_ioref)
1956 percpu_ref_put(&ca->io_ref);
1957
1958 if (parent) {
1959 bio_put(bio);
1960 bio_endio(&parent->bio);
1961 return;
1962 }
1963
1964 clear_btree_node_write_in_flight_inner(b);
1965 wake_up_bit(&b->flags, BTREE_NODE_write_in_flight_inner);
1966 INIT_WORK(&wb->work, btree_node_write_work);
1967 queue_work(c->btree_io_complete_wq, &wb->work);
1968 }
1969
validate_bset_for_write(struct bch_fs * c,struct btree * b,struct bset * i,unsigned sectors)1970 static int validate_bset_for_write(struct bch_fs *c, struct btree *b,
1971 struct bset *i, unsigned sectors)
1972 {
1973 bool saw_error;
1974
1975 int ret = bch2_bkey_validate(c, bkey_i_to_s_c(&b->key),
1976 (struct bkey_validate_context) {
1977 .from = BKEY_VALIDATE_btree_node,
1978 .level = b->c.level + 1,
1979 .btree = b->c.btree_id,
1980 .flags = BCH_VALIDATE_write,
1981 });
1982 if (ret) {
1983 bch2_fs_inconsistent(c, "invalid btree node key before write");
1984 return ret;
1985 }
1986
1987 ret = validate_bset_keys(c, b, i, WRITE, false, &saw_error) ?:
1988 validate_bset(c, NULL, b, i, b->written, sectors, WRITE, false, &saw_error);
1989 if (ret) {
1990 bch2_inconsistent_error(c);
1991 dump_stack();
1992 }
1993
1994 return ret;
1995 }
1996
btree_write_submit(struct work_struct * work)1997 static void btree_write_submit(struct work_struct *work)
1998 {
1999 struct btree_write_bio *wbio = container_of(work, struct btree_write_bio, work);
2000 BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
2001
2002 bkey_copy(&tmp.k, &wbio->key);
2003
2004 bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&tmp.k)), ptr)
2005 ptr->offset += wbio->sector_offset;
2006
2007 bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree,
2008 &tmp.k, false);
2009 }
2010
__bch2_btree_node_write(struct bch_fs * c,struct btree * b,unsigned flags)2011 void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
2012 {
2013 struct btree_write_bio *wbio;
2014 struct bset *i;
2015 struct btree_node *bn = NULL;
2016 struct btree_node_entry *bne = NULL;
2017 struct sort_iter_stack sort_iter;
2018 struct nonce nonce;
2019 unsigned bytes_to_write, sectors_to_write, bytes, u64s;
2020 u64 seq = 0;
2021 bool used_mempool;
2022 unsigned long old, new;
2023 bool validate_before_checksum = false;
2024 enum btree_write_type type = flags & BTREE_WRITE_TYPE_MASK;
2025 void *data;
2026 int ret;
2027
2028 if (flags & BTREE_WRITE_ALREADY_STARTED)
2029 goto do_write;
2030
2031 /*
2032 * We may only have a read lock on the btree node - the dirty bit is our
2033 * "lock" against racing with other threads that may be trying to start
2034 * a write, we do a write iff we clear the dirty bit. Since setting the
2035 * dirty bit requires a write lock, we can't race with other threads
2036 * redirtying it:
2037 */
2038 old = READ_ONCE(b->flags);
2039 do {
2040 new = old;
2041
2042 if (!(old & (1 << BTREE_NODE_dirty)))
2043 return;
2044
2045 if ((flags & BTREE_WRITE_ONLY_IF_NEED) &&
2046 !(old & (1 << BTREE_NODE_need_write)))
2047 return;
2048
2049 if (old &
2050 ((1 << BTREE_NODE_never_write)|
2051 (1 << BTREE_NODE_write_blocked)))
2052 return;
2053
2054 if (b->written &&
2055 (old & (1 << BTREE_NODE_will_make_reachable)))
2056 return;
2057
2058 if (old & (1 << BTREE_NODE_write_in_flight))
2059 return;
2060
2061 if (flags & BTREE_WRITE_ONLY_IF_NEED)
2062 type = new & BTREE_WRITE_TYPE_MASK;
2063 new &= ~BTREE_WRITE_TYPE_MASK;
2064
2065 new &= ~(1 << BTREE_NODE_dirty);
2066 new &= ~(1 << BTREE_NODE_need_write);
2067 new |= (1 << BTREE_NODE_write_in_flight);
2068 new |= (1 << BTREE_NODE_write_in_flight_inner);
2069 new |= (1 << BTREE_NODE_just_written);
2070 new ^= (1 << BTREE_NODE_write_idx);
2071 } while (!try_cmpxchg_acquire(&b->flags, &old, new));
2072
2073 if (new & (1U << BTREE_NODE_need_write))
2074 return;
2075 do_write:
2076 BUG_ON((type == BTREE_WRITE_initial) != (b->written == 0));
2077
2078 atomic_long_dec(&c->btree_cache.nr_dirty);
2079
2080 BUG_ON(btree_node_fake(b));
2081 BUG_ON((b->will_make_reachable != 0) != !b->written);
2082
2083 BUG_ON(b->written >= btree_sectors(c));
2084 BUG_ON(b->written & (block_sectors(c) - 1));
2085 BUG_ON(bset_written(b, btree_bset_last(b)));
2086 BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(c));
2087 BUG_ON(memcmp(&b->data->format, &b->format, sizeof(b->format)));
2088
2089 bch2_sort_whiteouts(c, b);
2090
2091 sort_iter_stack_init(&sort_iter, b);
2092
2093 bytes = !b->written
2094 ? sizeof(struct btree_node)
2095 : sizeof(struct btree_node_entry);
2096
2097 bytes += b->whiteout_u64s * sizeof(u64);
2098
2099 for_each_bset(b, t) {
2100 i = bset(b, t);
2101
2102 if (bset_written(b, i))
2103 continue;
2104
2105 bytes += le16_to_cpu(i->u64s) * sizeof(u64);
2106 sort_iter_add(&sort_iter.iter,
2107 btree_bkey_first(b, t),
2108 btree_bkey_last(b, t));
2109 seq = max(seq, le64_to_cpu(i->journal_seq));
2110 }
2111
2112 BUG_ON(b->written && !seq);
2113
2114 /* bch2_varint_decode may read up to 7 bytes past the end of the buffer: */
2115 bytes += 8;
2116
2117 /* buffer must be a multiple of the block size */
2118 bytes = round_up(bytes, block_bytes(c));
2119
2120 data = btree_bounce_alloc(c, bytes, &used_mempool);
2121
2122 if (!b->written) {
2123 bn = data;
2124 *bn = *b->data;
2125 i = &bn->keys;
2126 } else {
2127 bne = data;
2128 bne->keys = b->data->keys;
2129 i = &bne->keys;
2130 }
2131
2132 i->journal_seq = cpu_to_le64(seq);
2133 i->u64s = 0;
2134
2135 sort_iter_add(&sort_iter.iter,
2136 unwritten_whiteouts_start(b),
2137 unwritten_whiteouts_end(b));
2138 SET_BSET_SEPARATE_WHITEOUTS(i, false);
2139
2140 u64s = bch2_sort_keys_keep_unwritten_whiteouts(i->start, &sort_iter.iter);
2141 le16_add_cpu(&i->u64s, u64s);
2142
2143 b->whiteout_u64s = 0;
2144
2145 BUG_ON(!b->written && i->u64s != b->data->keys.u64s);
2146
2147 set_needs_whiteout(i, false);
2148
2149 /* do we have data to write? */
2150 if (b->written && !i->u64s)
2151 goto nowrite;
2152
2153 bytes_to_write = vstruct_end(i) - data;
2154 sectors_to_write = round_up(bytes_to_write, block_bytes(c)) >> 9;
2155
2156 if (!b->written &&
2157 b->key.k.type == KEY_TYPE_btree_ptr_v2)
2158 BUG_ON(btree_ptr_sectors_written(bkey_i_to_s_c(&b->key)) != sectors_to_write);
2159
2160 memset(data + bytes_to_write, 0,
2161 (sectors_to_write << 9) - bytes_to_write);
2162
2163 BUG_ON(b->written + sectors_to_write > btree_sectors(c));
2164 BUG_ON(BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN);
2165 BUG_ON(i->seq != b->data->keys.seq);
2166
2167 i->version = cpu_to_le16(c->sb.version);
2168 SET_BSET_OFFSET(i, b->written);
2169 SET_BSET_CSUM_TYPE(i, bch2_meta_checksum_type(c));
2170
2171 if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)))
2172 validate_before_checksum = true;
2173
2174 /* validate_bset will be modifying: */
2175 if (le16_to_cpu(i->version) < bcachefs_metadata_version_current)
2176 validate_before_checksum = true;
2177
2178 /* if we're going to be encrypting, check metadata validity first: */
2179 if (validate_before_checksum &&
2180 validate_bset_for_write(c, b, i, sectors_to_write))
2181 goto err;
2182
2183 ret = bset_encrypt(c, i, b->written << 9);
2184 if (bch2_fs_fatal_err_on(ret, c,
2185 "encrypting btree node: %s", bch2_err_str(ret)))
2186 goto err;
2187
2188 nonce = btree_nonce(i, b->written << 9);
2189
2190 if (bn)
2191 bn->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bn);
2192 else
2193 bne->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
2194
2195 /* if we're not encrypting, check metadata after checksumming: */
2196 if (!validate_before_checksum &&
2197 validate_bset_for_write(c, b, i, sectors_to_write))
2198 goto err;
2199
2200 /*
2201 * We handle btree write errors by immediately halting the journal -
2202 * after we've done that, we can't issue any subsequent btree writes
2203 * because they might have pointers to new nodes that failed to write.
2204 *
2205 * Furthermore, there's no point in doing any more btree writes because
2206 * with the journal stopped, we're never going to update the journal to
2207 * reflect that those writes were done and the data flushed from the
2208 * journal:
2209 *
2210 * Also on journal error, the pending write may have updates that were
2211 * never journalled (interior nodes, see btree_update_nodes_written()) -
2212 * it's critical that we don't do the write in that case otherwise we
2213 * will have updates visible that weren't in the journal:
2214 *
2215 * Make sure to update b->written so bch2_btree_init_next() doesn't
2216 * break:
2217 */
2218 if (bch2_journal_error(&c->journal) ||
2219 c->opts.nochanges)
2220 goto err;
2221
2222 trace_and_count(c, btree_node_write, b, bytes_to_write, sectors_to_write);
2223
2224 wbio = container_of(bio_alloc_bioset(NULL,
2225 buf_pages(data, sectors_to_write << 9),
2226 REQ_OP_WRITE|REQ_META,
2227 GFP_NOFS,
2228 &c->btree_bio),
2229 struct btree_write_bio, wbio.bio);
2230 wbio_init(&wbio->wbio.bio);
2231 wbio->data = data;
2232 wbio->data_bytes = bytes;
2233 wbio->sector_offset = b->written;
2234 wbio->wbio.c = c;
2235 wbio->wbio.used_mempool = used_mempool;
2236 wbio->wbio.first_btree_write = !b->written;
2237 wbio->wbio.bio.bi_end_io = btree_node_write_endio;
2238 wbio->wbio.bio.bi_private = b;
2239
2240 bch2_bio_map(&wbio->wbio.bio, data, sectors_to_write << 9);
2241
2242 bkey_copy(&wbio->key, &b->key);
2243
2244 b->written += sectors_to_write;
2245
2246 if (wbio->key.k.type == KEY_TYPE_btree_ptr_v2)
2247 bkey_i_to_btree_ptr_v2(&wbio->key)->v.sectors_written =
2248 cpu_to_le16(b->written);
2249
2250 atomic64_inc(&c->btree_write_stats[type].nr);
2251 atomic64_add(bytes_to_write, &c->btree_write_stats[type].bytes);
2252
2253 INIT_WORK(&wbio->work, btree_write_submit);
2254 queue_work(c->btree_write_submit_wq, &wbio->work);
2255 return;
2256 err:
2257 set_btree_node_noevict(b);
2258 b->written += sectors_to_write;
2259 nowrite:
2260 btree_bounce_free(c, bytes, used_mempool, data);
2261 __btree_node_write_done(c, b);
2262 }
2263
2264 /*
2265 * Work that must be done with write lock held:
2266 */
bch2_btree_post_write_cleanup(struct bch_fs * c,struct btree * b)2267 bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
2268 {
2269 bool invalidated_iter = false;
2270 struct btree_node_entry *bne;
2271
2272 if (!btree_node_just_written(b))
2273 return false;
2274
2275 BUG_ON(b->whiteout_u64s);
2276
2277 clear_btree_node_just_written(b);
2278
2279 /*
2280 * Note: immediately after write, bset_written() doesn't work - the
2281 * amount of data we had to write after compaction might have been
2282 * smaller than the offset of the last bset.
2283 *
2284 * However, we know that all bsets have been written here, as long as
2285 * we're still holding the write lock:
2286 */
2287
2288 /*
2289 * XXX: decide if we really want to unconditionally sort down to a
2290 * single bset:
2291 */
2292 if (b->nsets > 1) {
2293 btree_node_sort(c, b, 0, b->nsets);
2294 invalidated_iter = true;
2295 } else {
2296 invalidated_iter = bch2_drop_whiteouts(b, COMPACT_ALL);
2297 }
2298
2299 for_each_bset(b, t)
2300 set_needs_whiteout(bset(b, t), true);
2301
2302 bch2_btree_verify(c, b);
2303
2304 /*
2305 * If later we don't unconditionally sort down to a single bset, we have
2306 * to ensure this is still true:
2307 */
2308 BUG_ON((void *) btree_bkey_last(b, bset_tree_last(b)) > write_block(b));
2309
2310 bne = want_new_bset(c, b);
2311 if (bne)
2312 bch2_bset_init_next(b, bne);
2313
2314 bch2_btree_build_aux_trees(b);
2315
2316 return invalidated_iter;
2317 }
2318
2319 /*
2320 * Use this one if the node is intent locked:
2321 */
bch2_btree_node_write(struct bch_fs * c,struct btree * b,enum six_lock_type lock_type_held,unsigned flags)2322 void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
2323 enum six_lock_type lock_type_held,
2324 unsigned flags)
2325 {
2326 if (lock_type_held == SIX_LOCK_intent ||
2327 (lock_type_held == SIX_LOCK_read &&
2328 six_lock_tryupgrade(&b->c.lock))) {
2329 __bch2_btree_node_write(c, b, flags);
2330
2331 /* don't cycle lock unnecessarily: */
2332 if (btree_node_just_written(b) &&
2333 six_trylock_write(&b->c.lock)) {
2334 bch2_btree_post_write_cleanup(c, b);
2335 six_unlock_write(&b->c.lock);
2336 }
2337
2338 if (lock_type_held == SIX_LOCK_read)
2339 six_lock_downgrade(&b->c.lock);
2340 } else {
2341 __bch2_btree_node_write(c, b, flags);
2342 if (lock_type_held == SIX_LOCK_write &&
2343 btree_node_just_written(b))
2344 bch2_btree_post_write_cleanup(c, b);
2345 }
2346 }
2347
bch2_btree_node_write_trans(struct btree_trans * trans,struct btree * b,enum six_lock_type lock_type_held,unsigned flags)2348 void bch2_btree_node_write_trans(struct btree_trans *trans, struct btree *b,
2349 enum six_lock_type lock_type_held,
2350 unsigned flags)
2351 {
2352 struct bch_fs *c = trans->c;
2353
2354 if (lock_type_held == SIX_LOCK_intent ||
2355 (lock_type_held == SIX_LOCK_read &&
2356 six_lock_tryupgrade(&b->c.lock))) {
2357 __bch2_btree_node_write(c, b, flags);
2358
2359 /* don't cycle lock unnecessarily: */
2360 if (btree_node_just_written(b) &&
2361 six_trylock_write(&b->c.lock)) {
2362 bch2_btree_post_write_cleanup(c, b);
2363 __bch2_btree_node_unlock_write(trans, b);
2364 }
2365
2366 if (lock_type_held == SIX_LOCK_read)
2367 six_lock_downgrade(&b->c.lock);
2368 } else {
2369 __bch2_btree_node_write(c, b, flags);
2370 if (lock_type_held == SIX_LOCK_write &&
2371 btree_node_just_written(b))
2372 bch2_btree_post_write_cleanup(c, b);
2373 }
2374 }
2375
__bch2_btree_flush_all(struct bch_fs * c,unsigned flag)2376 static bool __bch2_btree_flush_all(struct bch_fs *c, unsigned flag)
2377 {
2378 struct bucket_table *tbl;
2379 struct rhash_head *pos;
2380 struct btree *b;
2381 unsigned i;
2382 bool ret = false;
2383 restart:
2384 rcu_read_lock();
2385 for_each_cached_btree(b, c, tbl, i, pos)
2386 if (test_bit(flag, &b->flags)) {
2387 rcu_read_unlock();
2388 wait_on_bit_io(&b->flags, flag, TASK_UNINTERRUPTIBLE);
2389 ret = true;
2390 goto restart;
2391 }
2392 rcu_read_unlock();
2393
2394 return ret;
2395 }
2396
bch2_btree_flush_all_reads(struct bch_fs * c)2397 bool bch2_btree_flush_all_reads(struct bch_fs *c)
2398 {
2399 return __bch2_btree_flush_all(c, BTREE_NODE_read_in_flight);
2400 }
2401
bch2_btree_flush_all_writes(struct bch_fs * c)2402 bool bch2_btree_flush_all_writes(struct bch_fs *c)
2403 {
2404 return __bch2_btree_flush_all(c, BTREE_NODE_write_in_flight);
2405 }
2406
2407 static const char * const bch2_btree_write_types[] = {
2408 #define x(t, n) [n] = #t,
2409 BCH_BTREE_WRITE_TYPES()
2410 NULL
2411 };
2412
bch2_btree_write_stats_to_text(struct printbuf * out,struct bch_fs * c)2413 void bch2_btree_write_stats_to_text(struct printbuf *out, struct bch_fs *c)
2414 {
2415 printbuf_tabstop_push(out, 20);
2416 printbuf_tabstop_push(out, 10);
2417
2418 prt_printf(out, "\tnr\tsize\n");
2419
2420 for (unsigned i = 0; i < BTREE_WRITE_TYPE_NR; i++) {
2421 u64 nr = atomic64_read(&c->btree_write_stats[i].nr);
2422 u64 bytes = atomic64_read(&c->btree_write_stats[i].bytes);
2423
2424 prt_printf(out, "%s:\t%llu\t", bch2_btree_write_types[i], nr);
2425 prt_human_readable_u64(out, nr ? div64_u64(bytes, nr) : 0);
2426 prt_newline(out);
2427 }
2428 }
2429