xref: /linux/fs/bcachefs/recovery_passes.c (revision d53b8e36925256097a08d7cb749198d85cbf9b2b)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "bcachefs.h"
4 #include "alloc_background.h"
5 #include "backpointers.h"
6 #include "btree_gc.h"
7 #include "btree_node_scan.h"
8 #include "disk_accounting.h"
9 #include "ec.h"
10 #include "fsck.h"
11 #include "inode.h"
12 #include "journal.h"
13 #include "lru.h"
14 #include "logged_ops.h"
15 #include "rebalance.h"
16 #include "recovery.h"
17 #include "recovery_passes.h"
18 #include "snapshot.h"
19 #include "subvolume.h"
20 #include "super.h"
21 #include "super-io.h"
22 
23 const char * const bch2_recovery_passes[] = {
24 #define x(_fn, ...)	#_fn,
25 	BCH_RECOVERY_PASSES()
26 #undef x
27 	NULL
28 };
29 
30 static int bch2_set_may_go_rw(struct bch_fs *c)
31 {
32 	struct journal_keys *keys = &c->journal_keys;
33 
34 	/*
35 	 * After we go RW, the journal keys buffer can't be modified (except for
36 	 * setting journal_key->overwritten: it will be accessed by multiple
37 	 * threads
38 	 */
39 	move_gap(keys, keys->nr);
40 
41 	set_bit(BCH_FS_may_go_rw, &c->flags);
42 
43 	if (keys->nr || c->opts.fsck || !c->sb.clean || c->recovery_passes_explicit)
44 		return bch2_fs_read_write_early(c);
45 	return 0;
46 }
47 
48 struct recovery_pass_fn {
49 	int		(*fn)(struct bch_fs *);
50 	unsigned	when;
51 };
52 
53 static struct recovery_pass_fn recovery_pass_fns[] = {
54 #define x(_fn, _id, _when)	{ .fn = bch2_##_fn, .when = _when },
55 	BCH_RECOVERY_PASSES()
56 #undef x
57 };
58 
59 static const u8 passes_to_stable_map[] = {
60 #define x(n, id, ...)	[BCH_RECOVERY_PASS_##n] = BCH_RECOVERY_PASS_STABLE_##n,
61 	BCH_RECOVERY_PASSES()
62 #undef x
63 };
64 
65 static enum bch_recovery_pass_stable bch2_recovery_pass_to_stable(enum bch_recovery_pass pass)
66 {
67 	return passes_to_stable_map[pass];
68 }
69 
70 u64 bch2_recovery_passes_to_stable(u64 v)
71 {
72 	u64 ret = 0;
73 	for (unsigned i = 0; i < ARRAY_SIZE(passes_to_stable_map); i++)
74 		if (v & BIT_ULL(i))
75 			ret |= BIT_ULL(passes_to_stable_map[i]);
76 	return ret;
77 }
78 
79 u64 bch2_recovery_passes_from_stable(u64 v)
80 {
81 	static const u8 map[] = {
82 #define x(n, id, ...)	[BCH_RECOVERY_PASS_STABLE_##n] = BCH_RECOVERY_PASS_##n,
83 	BCH_RECOVERY_PASSES()
84 #undef x
85 	};
86 
87 	u64 ret = 0;
88 	for (unsigned i = 0; i < ARRAY_SIZE(map); i++)
89 		if (v & BIT_ULL(i))
90 			ret |= BIT_ULL(map[i]);
91 	return ret;
92 }
93 
94 /*
95  * For when we need to rewind recovery passes and run a pass we skipped:
96  */
97 int bch2_run_explicit_recovery_pass(struct bch_fs *c,
98 				    enum bch_recovery_pass pass)
99 {
100 	if (c->recovery_passes_explicit & BIT_ULL(pass))
101 		return 0;
102 
103 	bch_info(c, "running explicit recovery pass %s (%u), currently at %s (%u)",
104 		 bch2_recovery_passes[pass], pass,
105 		 bch2_recovery_passes[c->curr_recovery_pass], c->curr_recovery_pass);
106 
107 	c->recovery_passes_explicit |= BIT_ULL(pass);
108 
109 	if (c->curr_recovery_pass >= pass) {
110 		c->curr_recovery_pass = pass;
111 		c->recovery_passes_complete &= (1ULL << pass) >> 1;
112 		return -BCH_ERR_restart_recovery;
113 	} else {
114 		return 0;
115 	}
116 }
117 
118 int bch2_run_explicit_recovery_pass_persistent(struct bch_fs *c,
119 					       enum bch_recovery_pass pass)
120 {
121 	enum bch_recovery_pass_stable s = bch2_recovery_pass_to_stable(pass);
122 
123 	mutex_lock(&c->sb_lock);
124 	struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
125 
126 	if (!test_bit_le64(s, ext->recovery_passes_required)) {
127 		__set_bit_le64(s, ext->recovery_passes_required);
128 		bch2_write_super(c);
129 	}
130 	mutex_unlock(&c->sb_lock);
131 
132 	return bch2_run_explicit_recovery_pass(c, pass);
133 }
134 
135 static void bch2_clear_recovery_pass_required(struct bch_fs *c,
136 					      enum bch_recovery_pass pass)
137 {
138 	enum bch_recovery_pass_stable s = bch2_recovery_pass_to_stable(pass);
139 
140 	mutex_lock(&c->sb_lock);
141 	struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
142 
143 	if (test_bit_le64(s, ext->recovery_passes_required)) {
144 		__clear_bit_le64(s, ext->recovery_passes_required);
145 		bch2_write_super(c);
146 	}
147 	mutex_unlock(&c->sb_lock);
148 }
149 
150 u64 bch2_fsck_recovery_passes(void)
151 {
152 	u64 ret = 0;
153 
154 	for (unsigned i = 0; i < ARRAY_SIZE(recovery_pass_fns); i++)
155 		if (recovery_pass_fns[i].when & PASS_FSCK)
156 			ret |= BIT_ULL(i);
157 	return ret;
158 }
159 
160 static bool should_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass)
161 {
162 	struct recovery_pass_fn *p = recovery_pass_fns + pass;
163 
164 	if (c->recovery_passes_explicit & BIT_ULL(pass))
165 		return true;
166 	if ((p->when & PASS_FSCK) && c->opts.fsck)
167 		return true;
168 	if ((p->when & PASS_UNCLEAN) && !c->sb.clean)
169 		return true;
170 	if (p->when & PASS_ALWAYS)
171 		return true;
172 	return false;
173 }
174 
175 static int bch2_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass)
176 {
177 	struct recovery_pass_fn *p = recovery_pass_fns + pass;
178 	int ret;
179 
180 	if (!(p->when & PASS_SILENT))
181 		bch2_print(c, KERN_INFO bch2_log_msg(c, "%s..."),
182 			   bch2_recovery_passes[pass]);
183 	ret = p->fn(c);
184 	if (ret)
185 		return ret;
186 	if (!(p->when & PASS_SILENT))
187 		bch2_print(c, KERN_CONT " done\n");
188 
189 	return 0;
190 }
191 
192 int bch2_run_online_recovery_passes(struct bch_fs *c)
193 {
194 	int ret = 0;
195 
196 	down_read(&c->state_lock);
197 
198 	for (unsigned i = 0; i < ARRAY_SIZE(recovery_pass_fns); i++) {
199 		struct recovery_pass_fn *p = recovery_pass_fns + i;
200 
201 		if (!(p->when & PASS_ONLINE))
202 			continue;
203 
204 		ret = bch2_run_recovery_pass(c, i);
205 		if (bch2_err_matches(ret, BCH_ERR_restart_recovery)) {
206 			i = c->curr_recovery_pass;
207 			continue;
208 		}
209 		if (ret)
210 			break;
211 	}
212 
213 	up_read(&c->state_lock);
214 
215 	return ret;
216 }
217 
218 int bch2_run_recovery_passes(struct bch_fs *c)
219 {
220 	int ret = 0;
221 
222 	while (c->curr_recovery_pass < ARRAY_SIZE(recovery_pass_fns)) {
223 		if (c->opts.recovery_pass_last &&
224 		    c->curr_recovery_pass > c->opts.recovery_pass_last)
225 			break;
226 
227 		if (should_run_recovery_pass(c, c->curr_recovery_pass)) {
228 			unsigned pass = c->curr_recovery_pass;
229 
230 			ret =   bch2_run_recovery_pass(c, c->curr_recovery_pass) ?:
231 				bch2_journal_flush(&c->journal);
232 			if (bch2_err_matches(ret, BCH_ERR_restart_recovery) ||
233 			    (ret && c->curr_recovery_pass < pass))
234 				continue;
235 			if (ret)
236 				break;
237 
238 			c->recovery_passes_complete |= BIT_ULL(c->curr_recovery_pass);
239 		}
240 
241 		c->recovery_pass_done = max(c->recovery_pass_done, c->curr_recovery_pass);
242 
243 		if (!test_bit(BCH_FS_error, &c->flags))
244 			bch2_clear_recovery_pass_required(c, c->curr_recovery_pass);
245 
246 		c->curr_recovery_pass++;
247 	}
248 
249 	return ret;
250 }
251