1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "btree_cache.h"
4 #include "btree_iter.h"
5 #include "error.h"
6 #include "fs-common.h"
7 #include "journal.h"
8 #include "recovery_passes.h"
9 #include "super.h"
10 #include "thread_with_file.h"
11
12 #define FSCK_ERR_RATELIMIT_NR 10
13
bch2_inconsistent_error(struct bch_fs * c)14 bool bch2_inconsistent_error(struct bch_fs *c)
15 {
16 set_bit(BCH_FS_error, &c->flags);
17
18 switch (c->opts.errors) {
19 case BCH_ON_ERROR_continue:
20 return false;
21 case BCH_ON_ERROR_fix_safe:
22 case BCH_ON_ERROR_ro:
23 if (bch2_fs_emergency_read_only(c))
24 bch_err(c, "inconsistency detected - emergency read only at journal seq %llu",
25 journal_cur_seq(&c->journal));
26 return true;
27 case BCH_ON_ERROR_panic:
28 panic(bch2_fmt(c, "panic after error"));
29 return true;
30 default:
31 BUG();
32 }
33 }
34
bch2_topology_error(struct bch_fs * c)35 int bch2_topology_error(struct bch_fs *c)
36 {
37 set_bit(BCH_FS_topology_error, &c->flags);
38 if (!test_bit(BCH_FS_recovery_running, &c->flags)) {
39 bch2_inconsistent_error(c);
40 return -BCH_ERR_btree_need_topology_repair;
41 } else {
42 return bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology) ?:
43 -BCH_ERR_btree_node_read_validate_error;
44 }
45 }
46
bch2_fatal_error(struct bch_fs * c)47 void bch2_fatal_error(struct bch_fs *c)
48 {
49 if (bch2_fs_emergency_read_only(c))
50 bch_err(c, "fatal error - emergency read only");
51 }
52
bch2_io_error_work(struct work_struct * work)53 void bch2_io_error_work(struct work_struct *work)
54 {
55 struct bch_dev *ca = container_of(work, struct bch_dev, io_error_work);
56 struct bch_fs *c = ca->fs;
57 bool dev;
58
59 down_write(&c->state_lock);
60 dev = bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_ro,
61 BCH_FORCE_IF_DEGRADED);
62 if (dev
63 ? __bch2_dev_set_state(c, ca, BCH_MEMBER_STATE_ro,
64 BCH_FORCE_IF_DEGRADED)
65 : bch2_fs_emergency_read_only(c))
66 bch_err(ca,
67 "too many IO errors, setting %s RO",
68 dev ? "device" : "filesystem");
69 up_write(&c->state_lock);
70 }
71
bch2_io_error(struct bch_dev * ca,enum bch_member_error_type type)72 void bch2_io_error(struct bch_dev *ca, enum bch_member_error_type type)
73 {
74 atomic64_inc(&ca->errors[type]);
75 //queue_work(system_long_wq, &ca->io_error_work);
76 }
77
78 enum ask_yn {
79 YN_NO,
80 YN_YES,
81 YN_ALLNO,
82 YN_ALLYES,
83 };
84
parse_yn_response(char * buf)85 static enum ask_yn parse_yn_response(char *buf)
86 {
87 buf = strim(buf);
88
89 if (strlen(buf) == 1)
90 switch (buf[0]) {
91 case 'n':
92 return YN_NO;
93 case 'y':
94 return YN_YES;
95 case 'N':
96 return YN_ALLNO;
97 case 'Y':
98 return YN_ALLYES;
99 }
100 return -1;
101 }
102
103 #ifdef __KERNEL__
bch2_fsck_ask_yn(struct bch_fs * c,struct btree_trans * trans)104 static enum ask_yn bch2_fsck_ask_yn(struct bch_fs *c, struct btree_trans *trans)
105 {
106 struct stdio_redirect *stdio = c->stdio;
107
108 if (c->stdio_filter && c->stdio_filter != current)
109 stdio = NULL;
110
111 if (!stdio)
112 return YN_NO;
113
114 if (trans)
115 bch2_trans_unlock(trans);
116
117 unsigned long unlock_long_at = trans ? jiffies + HZ * 2 : 0;
118 darray_char line = {};
119 int ret;
120
121 do {
122 unsigned long t;
123 bch2_print(c, " (y,n, or Y,N for all errors of this type) ");
124 rewait:
125 t = unlock_long_at
126 ? max_t(long, unlock_long_at - jiffies, 0)
127 : MAX_SCHEDULE_TIMEOUT;
128
129 int r = bch2_stdio_redirect_readline_timeout(stdio, &line, t);
130 if (r == -ETIME) {
131 bch2_trans_unlock_long(trans);
132 unlock_long_at = 0;
133 goto rewait;
134 }
135
136 if (r < 0) {
137 ret = YN_NO;
138 break;
139 }
140
141 darray_last(line) = '\0';
142 } while ((ret = parse_yn_response(line.data)) < 0);
143
144 darray_exit(&line);
145 return ret;
146 }
147 #else
148
149 #include "tools-util.h"
150
bch2_fsck_ask_yn(struct bch_fs * c,struct btree_trans * trans)151 static enum ask_yn bch2_fsck_ask_yn(struct bch_fs *c, struct btree_trans *trans)
152 {
153 char *buf = NULL;
154 size_t buflen = 0;
155 int ret;
156
157 do {
158 fputs(" (y,n, or Y,N for all errors of this type) ", stdout);
159 fflush(stdout);
160
161 if (getline(&buf, &buflen, stdin) < 0)
162 die("error reading from standard input");
163 } while ((ret = parse_yn_response(buf)) < 0);
164
165 free(buf);
166 return ret;
167 }
168
169 #endif
170
fsck_err_get(struct bch_fs * c,const char * fmt)171 static struct fsck_err_state *fsck_err_get(struct bch_fs *c, const char *fmt)
172 {
173 struct fsck_err_state *s;
174
175 if (!test_bit(BCH_FS_fsck_running, &c->flags))
176 return NULL;
177
178 list_for_each_entry(s, &c->fsck_error_msgs, list)
179 if (s->fmt == fmt) {
180 /*
181 * move it to the head of the list: repeated fsck errors
182 * are common
183 */
184 list_move(&s->list, &c->fsck_error_msgs);
185 return s;
186 }
187
188 s = kzalloc(sizeof(*s), GFP_NOFS);
189 if (!s) {
190 if (!c->fsck_alloc_msgs_err)
191 bch_err(c, "kmalloc err, cannot ratelimit fsck errs");
192 c->fsck_alloc_msgs_err = true;
193 return NULL;
194 }
195
196 INIT_LIST_HEAD(&s->list);
197 s->fmt = fmt;
198 list_add(&s->list, &c->fsck_error_msgs);
199 return s;
200 }
201
202 /* s/fix?/fixing/ s/recreate?/recreating/ */
prt_actioning(struct printbuf * out,const char * action)203 static void prt_actioning(struct printbuf *out, const char *action)
204 {
205 unsigned len = strlen(action);
206
207 BUG_ON(action[len - 1] != '?');
208 --len;
209
210 if (action[len - 1] == 'e')
211 --len;
212
213 prt_bytes(out, action, len);
214 prt_str(out, "ing");
215 }
216
217 static const u8 fsck_flags_extra[] = {
218 #define x(t, n, flags) [BCH_FSCK_ERR_##t] = flags,
219 BCH_SB_ERRS()
220 #undef x
221 };
222
do_fsck_ask_yn(struct bch_fs * c,struct btree_trans * trans,struct printbuf * question,const char * action)223 static int do_fsck_ask_yn(struct bch_fs *c,
224 struct btree_trans *trans,
225 struct printbuf *question,
226 const char *action)
227 {
228 prt_str(question, ", ");
229 prt_str(question, action);
230
231 if (bch2_fs_stdio_redirect(c))
232 bch2_print(c, "%s", question->buf);
233 else
234 bch2_print_string_as_lines(KERN_ERR, question->buf);
235
236 int ask = bch2_fsck_ask_yn(c, trans);
237
238 if (trans) {
239 int ret = bch2_trans_relock(trans);
240 if (ret)
241 return ret;
242 }
243
244 return ask;
245 }
246
__bch2_fsck_err(struct bch_fs * c,struct btree_trans * trans,enum bch_fsck_flags flags,enum bch_sb_error_id err,const char * fmt,...)247 int __bch2_fsck_err(struct bch_fs *c,
248 struct btree_trans *trans,
249 enum bch_fsck_flags flags,
250 enum bch_sb_error_id err,
251 const char *fmt, ...)
252 {
253 struct fsck_err_state *s = NULL;
254 va_list args;
255 bool print = true, suppressing = false, inconsistent = false, exiting = false;
256 struct printbuf buf = PRINTBUF, *out = &buf;
257 int ret = -BCH_ERR_fsck_ignore;
258 const char *action_orig = "fix?", *action = action_orig;
259
260 might_sleep();
261
262 if (!WARN_ON(err >= ARRAY_SIZE(fsck_flags_extra)))
263 flags |= fsck_flags_extra[err];
264
265 if (!c)
266 c = trans->c;
267
268 /*
269 * Ugly: if there's a transaction in the current task it has to be
270 * passed in to unlock if we prompt for user input.
271 *
272 * But, plumbing a transaction and transaction restarts into
273 * bkey_validate() is problematic.
274 *
275 * So:
276 * - make all bkey errors AUTOFIX, they're simple anyways (we just
277 * delete the key)
278 * - and we don't need to warn if we're not prompting
279 */
280 WARN_ON((flags & FSCK_CAN_FIX) &&
281 !(flags & FSCK_AUTOFIX) &&
282 !trans &&
283 bch2_current_has_btree_trans(c));
284
285 if (test_bit(err, c->sb.errors_silent))
286 return flags & FSCK_CAN_FIX
287 ? -BCH_ERR_fsck_fix
288 : -BCH_ERR_fsck_ignore;
289
290 bch2_sb_error_count(c, err);
291
292 va_start(args, fmt);
293 prt_vprintf(out, fmt, args);
294 va_end(args);
295
296 /* Custom fix/continue/recreate/etc.? */
297 if (out->buf[out->pos - 1] == '?') {
298 const char *p = strrchr(out->buf, ',');
299 if (p) {
300 out->pos = p - out->buf;
301 action = kstrdup(p + 2, GFP_KERNEL);
302 if (!action) {
303 ret = -ENOMEM;
304 goto err;
305 }
306 }
307 }
308
309 mutex_lock(&c->fsck_error_msgs_lock);
310 s = fsck_err_get(c, fmt);
311 if (s) {
312 /*
313 * We may be called multiple times for the same error on
314 * transaction restart - this memoizes instead of asking the user
315 * multiple times for the same error:
316 */
317 if (s->last_msg && !strcmp(buf.buf, s->last_msg)) {
318 ret = s->ret;
319 goto err_unlock;
320 }
321
322 kfree(s->last_msg);
323 s->last_msg = kstrdup(buf.buf, GFP_KERNEL);
324 if (!s->last_msg) {
325 ret = -ENOMEM;
326 goto err_unlock;
327 }
328
329 if (c->opts.ratelimit_errors &&
330 !(flags & FSCK_NO_RATELIMIT) &&
331 s->nr >= FSCK_ERR_RATELIMIT_NR) {
332 if (s->nr == FSCK_ERR_RATELIMIT_NR)
333 suppressing = true;
334 else
335 print = false;
336 }
337
338 s->nr++;
339 }
340
341 #ifdef BCACHEFS_LOG_PREFIX
342 if (!strncmp(fmt, "bcachefs:", 9))
343 prt_printf(out, bch2_log_msg(c, ""));
344 #endif
345
346 if ((flags & FSCK_AUTOFIX) &&
347 (c->opts.errors == BCH_ON_ERROR_continue ||
348 c->opts.errors == BCH_ON_ERROR_fix_safe)) {
349 prt_str(out, ", ");
350 if (flags & FSCK_CAN_FIX) {
351 prt_actioning(out, action);
352 ret = -BCH_ERR_fsck_fix;
353 } else {
354 prt_str(out, ", continuing");
355 ret = -BCH_ERR_fsck_ignore;
356 }
357
358 goto print;
359 } else if (!test_bit(BCH_FS_fsck_running, &c->flags)) {
360 if (c->opts.errors != BCH_ON_ERROR_continue ||
361 !(flags & (FSCK_CAN_FIX|FSCK_CAN_IGNORE))) {
362 prt_str(out, ", shutting down");
363 inconsistent = true;
364 ret = -BCH_ERR_fsck_errors_not_fixed;
365 } else if (flags & FSCK_CAN_FIX) {
366 prt_str(out, ", ");
367 prt_actioning(out, action);
368 ret = -BCH_ERR_fsck_fix;
369 } else {
370 prt_str(out, ", continuing");
371 ret = -BCH_ERR_fsck_ignore;
372 }
373 } else if (c->opts.fix_errors == FSCK_FIX_exit) {
374 prt_str(out, ", exiting");
375 ret = -BCH_ERR_fsck_errors_not_fixed;
376 } else if (flags & FSCK_CAN_FIX) {
377 int fix = s && s->fix
378 ? s->fix
379 : c->opts.fix_errors;
380
381 if (fix == FSCK_FIX_ask) {
382 print = false;
383
384 ret = do_fsck_ask_yn(c, trans, out, action);
385 if (ret < 0)
386 goto err_unlock;
387
388 if (ret >= YN_ALLNO && s)
389 s->fix = ret == YN_ALLNO
390 ? FSCK_FIX_no
391 : FSCK_FIX_yes;
392
393 ret = ret & 1
394 ? -BCH_ERR_fsck_fix
395 : -BCH_ERR_fsck_ignore;
396 } else if (fix == FSCK_FIX_yes ||
397 (c->opts.nochanges &&
398 !(flags & FSCK_CAN_IGNORE))) {
399 prt_str(out, ", ");
400 prt_actioning(out, action);
401 ret = -BCH_ERR_fsck_fix;
402 } else {
403 prt_str(out, ", not ");
404 prt_actioning(out, action);
405 }
406 } else if (!(flags & FSCK_CAN_IGNORE)) {
407 prt_str(out, " (repair unimplemented)");
408 }
409
410 if (ret == -BCH_ERR_fsck_ignore &&
411 (c->opts.fix_errors == FSCK_FIX_exit ||
412 !(flags & FSCK_CAN_IGNORE)))
413 ret = -BCH_ERR_fsck_errors_not_fixed;
414
415 if (test_bit(BCH_FS_fsck_running, &c->flags) &&
416 (ret != -BCH_ERR_fsck_fix &&
417 ret != -BCH_ERR_fsck_ignore)) {
418 exiting = true;
419 print = true;
420 }
421 print:
422 if (print) {
423 if (bch2_fs_stdio_redirect(c))
424 bch2_print(c, "%s\n", out->buf);
425 else
426 bch2_print_string_as_lines(KERN_ERR, out->buf);
427 }
428
429 if (exiting)
430 bch_err(c, "Unable to continue, halting");
431 else if (suppressing)
432 bch_err(c, "Ratelimiting new instances of previous error");
433
434 if (s)
435 s->ret = ret;
436
437 if (inconsistent)
438 bch2_inconsistent_error(c);
439
440 /*
441 * We don't yet track whether the filesystem currently has errors, for
442 * log_fsck_err()s: that would require us to track for every error type
443 * which recovery pass corrects it, to get the fsck exit status correct:
444 */
445 if (flags & FSCK_CAN_FIX) {
446 if (ret == -BCH_ERR_fsck_fix) {
447 set_bit(BCH_FS_errors_fixed, &c->flags);
448 } else {
449 set_bit(BCH_FS_errors_not_fixed, &c->flags);
450 set_bit(BCH_FS_error, &c->flags);
451 }
452 }
453 err_unlock:
454 mutex_unlock(&c->fsck_error_msgs_lock);
455 err:
456 if (action != action_orig)
457 kfree(action);
458 printbuf_exit(&buf);
459 return ret;
460 }
461
462 static const char * const bch2_bkey_validate_contexts[] = {
463 #define x(n) #n,
464 BKEY_VALIDATE_CONTEXTS()
465 #undef x
466 NULL
467 };
468
__bch2_bkey_fsck_err(struct bch_fs * c,struct bkey_s_c k,struct bkey_validate_context from,enum bch_sb_error_id err,const char * fmt,...)469 int __bch2_bkey_fsck_err(struct bch_fs *c,
470 struct bkey_s_c k,
471 struct bkey_validate_context from,
472 enum bch_sb_error_id err,
473 const char *fmt, ...)
474 {
475 if (from.flags & BCH_VALIDATE_silent)
476 return -BCH_ERR_fsck_delete_bkey;
477
478 unsigned fsck_flags = 0;
479 if (!(from.flags & (BCH_VALIDATE_write|BCH_VALIDATE_commit))) {
480 if (test_bit(err, c->sb.errors_silent))
481 return -BCH_ERR_fsck_delete_bkey;
482
483 fsck_flags |= FSCK_AUTOFIX|FSCK_CAN_FIX;
484 }
485 if (!WARN_ON(err >= ARRAY_SIZE(fsck_flags_extra)))
486 fsck_flags |= fsck_flags_extra[err];
487
488 struct printbuf buf = PRINTBUF;
489 prt_printf(&buf, "invalid bkey in %s",
490 bch2_bkey_validate_contexts[from.from]);
491
492 if (from.from == BKEY_VALIDATE_journal)
493 prt_printf(&buf, " journal seq=%llu offset=%u",
494 from.journal_seq, from.journal_offset);
495
496 prt_str(&buf, " btree=");
497 bch2_btree_id_to_text(&buf, from.btree);
498 prt_printf(&buf, " level=%u: ", from.level);
499
500 bch2_bkey_val_to_text(&buf, c, k);
501 prt_str(&buf, "\n ");
502
503 va_list args;
504 va_start(args, fmt);
505 prt_vprintf(&buf, fmt, args);
506 va_end(args);
507
508 prt_str(&buf, ": delete?");
509
510 int ret = __bch2_fsck_err(c, NULL, fsck_flags, err, "%s", buf.buf);
511 printbuf_exit(&buf);
512 return ret;
513 }
514
bch2_flush_fsck_errs(struct bch_fs * c)515 void bch2_flush_fsck_errs(struct bch_fs *c)
516 {
517 struct fsck_err_state *s, *n;
518
519 mutex_lock(&c->fsck_error_msgs_lock);
520
521 list_for_each_entry_safe(s, n, &c->fsck_error_msgs, list) {
522 if (s->ratelimited && s->last_msg)
523 bch_err(c, "Saw %llu errors like:\n %s", s->nr, s->last_msg);
524
525 list_del(&s->list);
526 kfree(s->last_msg);
527 kfree(s);
528 }
529
530 mutex_unlock(&c->fsck_error_msgs_lock);
531 }
532
bch2_inum_err_msg_trans(struct btree_trans * trans,struct printbuf * out,subvol_inum inum)533 int bch2_inum_err_msg_trans(struct btree_trans *trans, struct printbuf *out, subvol_inum inum)
534 {
535 u32 restart_count = trans->restart_count;
536 int ret = 0;
537
538 /* XXX: we don't yet attempt to print paths when we don't know the subvol */
539 if (inum.subvol)
540 ret = lockrestart_do(trans, bch2_inum_to_path(trans, inum, out));
541 if (!inum.subvol || ret)
542 prt_printf(out, "inum %llu:%llu", inum.subvol, inum.inum);
543
544 return trans_was_restarted(trans, restart_count);
545 }
546
bch2_inum_offset_err_msg_trans(struct btree_trans * trans,struct printbuf * out,subvol_inum inum,u64 offset)547 int bch2_inum_offset_err_msg_trans(struct btree_trans *trans, struct printbuf *out,
548 subvol_inum inum, u64 offset)
549 {
550 int ret = bch2_inum_err_msg_trans(trans, out, inum);
551 prt_printf(out, " offset %llu: ", offset);
552 return ret;
553 }
554
bch2_inum_err_msg(struct bch_fs * c,struct printbuf * out,subvol_inum inum)555 void bch2_inum_err_msg(struct bch_fs *c, struct printbuf *out, subvol_inum inum)
556 {
557 bch2_trans_run(c, bch2_inum_err_msg_trans(trans, out, inum));
558 }
559
bch2_inum_offset_err_msg(struct bch_fs * c,struct printbuf * out,subvol_inum inum,u64 offset)560 void bch2_inum_offset_err_msg(struct bch_fs *c, struct printbuf *out,
561 subvol_inum inum, u64 offset)
562 {
563 bch2_trans_run(c, bch2_inum_offset_err_msg_trans(trans, out, inum, offset));
564 }
565