1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4 * Superblock section that contains a list of recovery passes to run when
5 * downgrading past a given version
6 */
7
8 #include "bcachefs.h"
9 #include "darray.h"
10 #include "recovery_passes.h"
11 #include "sb-downgrade.h"
12 #include "sb-errors.h"
13 #include "super-io.h"
14
15 #define RECOVERY_PASS_ALL_FSCK BIT_ULL(63)
16
17 /*
18 * Upgrade, downgrade tables - run certain recovery passes, fix certain errors
19 *
20 * x(version, recovery_passes, errors...)
21 */
22 #define UPGRADE_TABLE() \
23 x(backpointers, \
24 RECOVERY_PASS_ALL_FSCK) \
25 x(inode_v3, \
26 RECOVERY_PASS_ALL_FSCK) \
27 x(unwritten_extents, \
28 RECOVERY_PASS_ALL_FSCK) \
29 x(bucket_gens, \
30 BIT_ULL(BCH_RECOVERY_PASS_bucket_gens_init)| \
31 RECOVERY_PASS_ALL_FSCK) \
32 x(lru_v2, \
33 RECOVERY_PASS_ALL_FSCK) \
34 x(fragmentation_lru, \
35 RECOVERY_PASS_ALL_FSCK) \
36 x(no_bps_in_alloc_keys, \
37 RECOVERY_PASS_ALL_FSCK) \
38 x(snapshot_trees, \
39 RECOVERY_PASS_ALL_FSCK) \
40 x(snapshot_skiplists, \
41 BIT_ULL(BCH_RECOVERY_PASS_check_snapshots), \
42 BCH_FSCK_ERR_snapshot_bad_depth, \
43 BCH_FSCK_ERR_snapshot_bad_skiplist) \
44 x(deleted_inodes, \
45 BIT_ULL(BCH_RECOVERY_PASS_check_inodes), \
46 BCH_FSCK_ERR_unlinked_inode_not_on_deleted_list) \
47 x(rebalance_work, \
48 BIT_ULL(BCH_RECOVERY_PASS_set_fs_needs_rebalance)) \
49 x(subvolume_fs_parent, \
50 BIT_ULL(BCH_RECOVERY_PASS_check_dirents), \
51 BCH_FSCK_ERR_subvol_fs_path_parent_wrong) \
52 x(btree_subvolume_children, \
53 BIT_ULL(BCH_RECOVERY_PASS_check_subvols), \
54 BCH_FSCK_ERR_subvol_children_not_set) \
55 x(mi_btree_bitmap, \
56 BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \
57 BCH_FSCK_ERR_btree_bitmap_not_marked) \
58 x(disk_accounting_v2, \
59 BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \
60 BCH_FSCK_ERR_bkey_version_in_future, \
61 BCH_FSCK_ERR_dev_usage_buckets_wrong, \
62 BCH_FSCK_ERR_dev_usage_sectors_wrong, \
63 BCH_FSCK_ERR_dev_usage_fragmented_wrong, \
64 BCH_FSCK_ERR_accounting_mismatch) \
65 x(disk_accounting_v3, \
66 BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \
67 BCH_FSCK_ERR_bkey_version_in_future, \
68 BCH_FSCK_ERR_dev_usage_buckets_wrong, \
69 BCH_FSCK_ERR_dev_usage_sectors_wrong, \
70 BCH_FSCK_ERR_dev_usage_fragmented_wrong, \
71 BCH_FSCK_ERR_accounting_mismatch, \
72 BCH_FSCK_ERR_accounting_key_replicas_nr_devs_0, \
73 BCH_FSCK_ERR_accounting_key_replicas_nr_required_bad, \
74 BCH_FSCK_ERR_accounting_key_replicas_devs_unsorted, \
75 BCH_FSCK_ERR_accounting_key_junk_at_end) \
76 x(disk_accounting_inum, \
77 BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \
78 BCH_FSCK_ERR_accounting_mismatch) \
79 x(rebalance_work_acct_fix, \
80 BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \
81 BCH_FSCK_ERR_accounting_mismatch) \
82 x(inode_has_child_snapshots, \
83 BIT_ULL(BCH_RECOVERY_PASS_check_inodes), \
84 BCH_FSCK_ERR_inode_has_child_snapshots_wrong) \
85 x(backpointer_bucket_gen, \
86 BIT_ULL(BCH_RECOVERY_PASS_check_extents_to_backpointers),\
87 BCH_FSCK_ERR_backpointer_to_missing_ptr, \
88 BCH_FSCK_ERR_ptr_to_missing_backpointer) \
89 x(disk_accounting_big_endian, \
90 BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \
91 BCH_FSCK_ERR_accounting_mismatch, \
92 BCH_FSCK_ERR_accounting_key_replicas_nr_devs_0, \
93 BCH_FSCK_ERR_accounting_key_junk_at_end)
94
95 #define DOWNGRADE_TABLE() \
96 x(bucket_stripe_sectors, \
97 0) \
98 x(disk_accounting_v2, \
99 BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \
100 BCH_FSCK_ERR_dev_usage_buckets_wrong, \
101 BCH_FSCK_ERR_dev_usage_sectors_wrong, \
102 BCH_FSCK_ERR_dev_usage_fragmented_wrong, \
103 BCH_FSCK_ERR_fs_usage_hidden_wrong, \
104 BCH_FSCK_ERR_fs_usage_btree_wrong, \
105 BCH_FSCK_ERR_fs_usage_data_wrong, \
106 BCH_FSCK_ERR_fs_usage_cached_wrong, \
107 BCH_FSCK_ERR_fs_usage_reserved_wrong, \
108 BCH_FSCK_ERR_fs_usage_nr_inodes_wrong, \
109 BCH_FSCK_ERR_fs_usage_persistent_reserved_wrong, \
110 BCH_FSCK_ERR_fs_usage_replicas_wrong, \
111 BCH_FSCK_ERR_bkey_version_in_future) \
112 x(disk_accounting_v3, \
113 BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \
114 BCH_FSCK_ERR_dev_usage_buckets_wrong, \
115 BCH_FSCK_ERR_dev_usage_sectors_wrong, \
116 BCH_FSCK_ERR_dev_usage_fragmented_wrong, \
117 BCH_FSCK_ERR_fs_usage_hidden_wrong, \
118 BCH_FSCK_ERR_fs_usage_btree_wrong, \
119 BCH_FSCK_ERR_fs_usage_data_wrong, \
120 BCH_FSCK_ERR_fs_usage_cached_wrong, \
121 BCH_FSCK_ERR_fs_usage_reserved_wrong, \
122 BCH_FSCK_ERR_fs_usage_nr_inodes_wrong, \
123 BCH_FSCK_ERR_fs_usage_persistent_reserved_wrong, \
124 BCH_FSCK_ERR_fs_usage_replicas_wrong, \
125 BCH_FSCK_ERR_accounting_replicas_not_marked, \
126 BCH_FSCK_ERR_bkey_version_in_future) \
127 x(rebalance_work_acct_fix, \
128 BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \
129 BCH_FSCK_ERR_accounting_mismatch, \
130 BCH_FSCK_ERR_accounting_key_replicas_nr_devs_0, \
131 BCH_FSCK_ERR_accounting_key_junk_at_end) \
132 x(backpointer_bucket_gen, \
133 BIT_ULL(BCH_RECOVERY_PASS_check_extents_to_backpointers),\
134 BCH_FSCK_ERR_backpointer_bucket_offset_wrong, \
135 BCH_FSCK_ERR_backpointer_to_missing_ptr, \
136 BCH_FSCK_ERR_ptr_to_missing_backpointer) \
137 x(disk_accounting_big_endian, \
138 BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \
139 BCH_FSCK_ERR_accounting_mismatch, \
140 BCH_FSCK_ERR_accounting_key_replicas_nr_devs_0, \
141 BCH_FSCK_ERR_accounting_key_junk_at_end)
142
143 struct upgrade_downgrade_entry {
144 u64 recovery_passes;
145 u16 version;
146 u16 nr_errors;
147 const u16 *errors;
148 };
149
150 #define x(ver, passes, ...) static const u16 upgrade_##ver##_errors[] = { __VA_ARGS__ };
151 UPGRADE_TABLE()
152 #undef x
153
154 static const struct upgrade_downgrade_entry upgrade_table[] = {
155 #define x(ver, passes, ...) { \
156 .recovery_passes = passes, \
157 .version = bcachefs_metadata_version_##ver,\
158 .nr_errors = ARRAY_SIZE(upgrade_##ver##_errors), \
159 .errors = upgrade_##ver##_errors, \
160 },
161 UPGRADE_TABLE()
162 #undef x
163 };
164
have_stripes(struct bch_fs * c)165 static int have_stripes(struct bch_fs *c)
166 {
167 if (IS_ERR_OR_NULL(c->btree_roots_known[BTREE_ID_stripes].b))
168 return 0;
169
170 return !btree_node_fake(c->btree_roots_known[BTREE_ID_stripes].b);
171 }
172
bch2_sb_set_upgrade_extra(struct bch_fs * c)173 int bch2_sb_set_upgrade_extra(struct bch_fs *c)
174 {
175 unsigned old_version = c->sb.version_upgrade_complete ?: c->sb.version;
176 unsigned new_version = c->sb.version;
177 bool write_sb = false;
178 int ret = 0;
179
180 mutex_lock(&c->sb_lock);
181 struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
182
183 if (old_version < bcachefs_metadata_version_bucket_stripe_sectors &&
184 new_version >= bcachefs_metadata_version_bucket_stripe_sectors &&
185 (ret = have_stripes(c) > 0)) {
186 __set_bit_le64(BCH_RECOVERY_PASS_STABLE_check_allocations, ext->recovery_passes_required);
187 __set_bit_le64(BCH_FSCK_ERR_alloc_key_dirty_sectors_wrong, ext->errors_silent);
188 __set_bit_le64(BCH_FSCK_ERR_alloc_key_stripe_sectors_wrong, ext->errors_silent);
189 write_sb = true;
190 }
191
192 if (write_sb)
193 bch2_write_super(c);
194 mutex_unlock(&c->sb_lock);
195
196 return ret < 0 ? ret : 0;
197 }
198
bch2_sb_set_upgrade(struct bch_fs * c,unsigned old_version,unsigned new_version)199 void bch2_sb_set_upgrade(struct bch_fs *c,
200 unsigned old_version,
201 unsigned new_version)
202 {
203 lockdep_assert_held(&c->sb_lock);
204
205 struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
206
207 for (const struct upgrade_downgrade_entry *i = upgrade_table;
208 i < upgrade_table + ARRAY_SIZE(upgrade_table);
209 i++)
210 if (i->version > old_version && i->version <= new_version) {
211 u64 passes = i->recovery_passes;
212
213 if (passes & RECOVERY_PASS_ALL_FSCK)
214 passes |= bch2_fsck_recovery_passes();
215 passes &= ~RECOVERY_PASS_ALL_FSCK;
216
217 ext->recovery_passes_required[0] |=
218 cpu_to_le64(bch2_recovery_passes_to_stable(passes));
219
220 for (const u16 *e = i->errors; e < i->errors + i->nr_errors; e++)
221 __set_bit_le64(*e, ext->errors_silent);
222 }
223 }
224
225 #define x(ver, passes, ...) static const u16 downgrade_##ver##_errors[] = { __VA_ARGS__ };
226 DOWNGRADE_TABLE()
227 #undef x
228
229 static const struct upgrade_downgrade_entry downgrade_table[] = {
230 #define x(ver, passes, ...) { \
231 .recovery_passes = passes, \
232 .version = bcachefs_metadata_version_##ver,\
233 .nr_errors = ARRAY_SIZE(downgrade_##ver##_errors), \
234 .errors = downgrade_##ver##_errors, \
235 },
236 DOWNGRADE_TABLE()
237 #undef x
238 };
239
downgrade_table_extra(struct bch_fs * c,darray_char * table)240 static int downgrade_table_extra(struct bch_fs *c, darray_char *table)
241 {
242 struct bch_sb_field_downgrade_entry *dst = (void *) &darray_top(*table);
243 unsigned bytes = sizeof(*dst) + sizeof(dst->errors[0]) * le16_to_cpu(dst->nr_errors);
244 int ret = 0;
245
246 unsigned nr_errors = le16_to_cpu(dst->nr_errors);
247
248 switch (le16_to_cpu(dst->version)) {
249 case bcachefs_metadata_version_bucket_stripe_sectors:
250 if (have_stripes(c)) {
251 bytes += sizeof(dst->errors[0]) * 2;
252
253 ret = darray_make_room(table, bytes);
254 if (ret)
255 return ret;
256
257 /* open coded __set_bit_le64, as dst is packed and
258 * dst->recovery_passes is misaligned */
259 unsigned b = BCH_RECOVERY_PASS_STABLE_check_allocations;
260 dst->recovery_passes[b / 64] |= cpu_to_le64(BIT_ULL(b % 64));
261
262 dst->errors[nr_errors++] = cpu_to_le16(BCH_FSCK_ERR_alloc_key_dirty_sectors_wrong);
263 }
264 break;
265 }
266
267 dst->nr_errors = cpu_to_le16(nr_errors);
268 return ret;
269 }
270
271 static inline const struct bch_sb_field_downgrade_entry *
downgrade_entry_next_c(const struct bch_sb_field_downgrade_entry * e)272 downgrade_entry_next_c(const struct bch_sb_field_downgrade_entry *e)
273 {
274 return (void *) &e->errors[le16_to_cpu(e->nr_errors)];
275 }
276
277 #define for_each_downgrade_entry(_d, _i) \
278 for (const struct bch_sb_field_downgrade_entry *_i = (_d)->entries; \
279 (void *) _i < vstruct_end(&(_d)->field) && \
280 (void *) &_i->errors[0] <= vstruct_end(&(_d)->field) && \
281 (void *) downgrade_entry_next_c(_i) <= vstruct_end(&(_d)->field); \
282 _i = downgrade_entry_next_c(_i))
283
bch2_sb_downgrade_validate(struct bch_sb * sb,struct bch_sb_field * f,enum bch_validate_flags flags,struct printbuf * err)284 static int bch2_sb_downgrade_validate(struct bch_sb *sb, struct bch_sb_field *f,
285 enum bch_validate_flags flags, struct printbuf *err)
286 {
287 struct bch_sb_field_downgrade *e = field_to_type(f, downgrade);
288
289 for (const struct bch_sb_field_downgrade_entry *i = e->entries;
290 (void *) i < vstruct_end(&e->field);
291 i = downgrade_entry_next_c(i)) {
292 /*
293 * Careful: sb_field_downgrade_entry is only 2 byte aligned, but
294 * section sizes are 8 byte aligned - an empty entry spanning
295 * the end of the section is allowed (and ignored):
296 */
297 if ((void *) &i->errors[0] > vstruct_end(&e->field))
298 break;
299
300 if (flags & BCH_VALIDATE_write &&
301 (void *) downgrade_entry_next_c(i) > vstruct_end(&e->field)) {
302 prt_printf(err, "downgrade entry overruns end of superblock section");
303 return -BCH_ERR_invalid_sb_downgrade;
304 }
305
306 if (BCH_VERSION_MAJOR(le16_to_cpu(i->version)) !=
307 BCH_VERSION_MAJOR(le16_to_cpu(sb->version))) {
308 prt_printf(err, "downgrade entry with mismatched major version (%u != %u)",
309 BCH_VERSION_MAJOR(le16_to_cpu(i->version)),
310 BCH_VERSION_MAJOR(le16_to_cpu(sb->version)));
311 return -BCH_ERR_invalid_sb_downgrade;
312 }
313 }
314
315 return 0;
316 }
317
bch2_sb_downgrade_to_text(struct printbuf * out,struct bch_sb * sb,struct bch_sb_field * f)318 static void bch2_sb_downgrade_to_text(struct printbuf *out, struct bch_sb *sb,
319 struct bch_sb_field *f)
320 {
321 struct bch_sb_field_downgrade *e = field_to_type(f, downgrade);
322
323 if (out->nr_tabstops <= 1)
324 printbuf_tabstop_push(out, 16);
325
326 for_each_downgrade_entry(e, i) {
327 prt_str(out, "version:\t");
328 bch2_version_to_text(out, le16_to_cpu(i->version));
329 prt_newline(out);
330
331 prt_str(out, "recovery passes:\t");
332 prt_bitflags(out, bch2_recovery_passes,
333 bch2_recovery_passes_from_stable(le64_to_cpu(i->recovery_passes[0])));
334 prt_newline(out);
335
336 prt_str(out, "errors:\t");
337 bool first = true;
338 for (unsigned j = 0; j < le16_to_cpu(i->nr_errors); j++) {
339 if (!first)
340 prt_char(out, ',');
341 first = false;
342 bch2_sb_error_id_to_text(out, le16_to_cpu(i->errors[j]));
343 }
344 prt_newline(out);
345 }
346 }
347
348 const struct bch_sb_field_ops bch_sb_field_ops_downgrade = {
349 .validate = bch2_sb_downgrade_validate,
350 .to_text = bch2_sb_downgrade_to_text,
351 };
352
bch2_sb_downgrade_update(struct bch_fs * c)353 int bch2_sb_downgrade_update(struct bch_fs *c)
354 {
355 if (!test_bit(BCH_FS_btree_running, &c->flags))
356 return 0;
357
358 darray_char table = {};
359 int ret = 0;
360
361 for (const struct upgrade_downgrade_entry *src = downgrade_table;
362 src < downgrade_table + ARRAY_SIZE(downgrade_table);
363 src++) {
364 if (BCH_VERSION_MAJOR(src->version) != BCH_VERSION_MAJOR(le16_to_cpu(c->disk_sb.sb->version)))
365 continue;
366
367 struct bch_sb_field_downgrade_entry *dst;
368 unsigned bytes = sizeof(*dst) + sizeof(dst->errors[0]) * src->nr_errors;
369
370 ret = darray_make_room(&table, bytes);
371 if (ret)
372 goto out;
373
374 dst = (void *) &darray_top(table);
375 dst->version = cpu_to_le16(src->version);
376 dst->recovery_passes[0] = cpu_to_le64(bch2_recovery_passes_to_stable(src->recovery_passes));
377 dst->recovery_passes[1] = 0;
378 dst->nr_errors = cpu_to_le16(src->nr_errors);
379 for (unsigned i = 0; i < src->nr_errors; i++)
380 dst->errors[i] = cpu_to_le16(src->errors[i]);
381
382 ret = downgrade_table_extra(c, &table);
383 if (ret)
384 goto out;
385
386 if (!dst->recovery_passes[0] &&
387 !dst->recovery_passes[1] &&
388 !dst->nr_errors)
389 continue;
390
391 table.nr += sizeof(*dst) + sizeof(dst->errors[0]) * le16_to_cpu(dst->nr_errors);
392 }
393
394 struct bch_sb_field_downgrade *d = bch2_sb_field_get(c->disk_sb.sb, downgrade);
395
396 unsigned sb_u64s = DIV_ROUND_UP(sizeof(*d) + table.nr, sizeof(u64));
397
398 if (d && le32_to_cpu(d->field.u64s) > sb_u64s)
399 goto out;
400
401 d = bch2_sb_field_resize(&c->disk_sb, downgrade, sb_u64s);
402 if (!d) {
403 ret = -BCH_ERR_ENOSPC_sb_downgrade;
404 goto out;
405 }
406
407 memcpy(d->entries, table.data, table.nr);
408 memset_u64s_tail(d->entries, 0, table.nr);
409 out:
410 darray_exit(&table);
411 return ret;
412 }
413
bch2_sb_set_downgrade(struct bch_fs * c,unsigned new_minor,unsigned old_minor)414 void bch2_sb_set_downgrade(struct bch_fs *c, unsigned new_minor, unsigned old_minor)
415 {
416 struct bch_sb_field_downgrade *d = bch2_sb_field_get(c->disk_sb.sb, downgrade);
417 if (!d)
418 return;
419
420 struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
421
422 for_each_downgrade_entry(d, i) {
423 unsigned minor = BCH_VERSION_MINOR(le16_to_cpu(i->version));
424 if (new_minor < minor && minor <= old_minor) {
425 ext->recovery_passes_required[0] |= i->recovery_passes[0];
426 ext->recovery_passes_required[1] |= i->recovery_passes[1];
427
428 for (unsigned j = 0; j < le16_to_cpu(i->nr_errors); j++) {
429 unsigned e = le16_to_cpu(i->errors[j]);
430 if (e < BCH_FSCK_ERR_MAX)
431 __set_bit(e, c->sb.errors_silent);
432 if (e < sizeof(ext->errors_silent) * 8)
433 __set_bit_le64(e, ext->errors_silent);
434 }
435 }
436 }
437 }
438