1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "eytzinger.h"
5 #include "journal.h"
6 #include "journal_seq_blacklist.h"
7 #include "super-io.h"
8
9 /*
10 * journal_seq_blacklist machinery:
11 *
12 * To guarantee order of btree updates after a crash, we need to detect when a
13 * btree node entry (bset) is newer than the newest journal entry that was
14 * successfully written, and ignore it - effectively ignoring any btree updates
15 * that didn't make it into the journal.
16 *
17 * If we didn't do this, we might have two btree nodes, a and b, both with
18 * updates that weren't written to the journal yet: if b was updated after a,
19 * but b was flushed and not a - oops; on recovery we'll find that the updates
20 * to b happened, but not the updates to a that happened before it.
21 *
22 * Ignoring bsets that are newer than the newest journal entry is always safe,
23 * because everything they contain will also have been journalled - and must
24 * still be present in the journal on disk until a journal entry has been
25 * written _after_ that bset was written.
26 *
27 * To accomplish this, bsets record the newest journal sequence number they
28 * contain updates for; then, on startup, the btree code queries the journal
29 * code to ask "Is this sequence number newer than the newest journal entry? If
30 * so, ignore it."
31 *
32 * When this happens, we must blacklist that journal sequence number: the
33 * journal must not write any entries with that sequence number, and it must
34 * record that it was blacklisted so that a) on recovery we don't think we have
35 * missing journal entries and b) so that the btree code continues to ignore
36 * that bset, until that btree node is rewritten.
37 */
38
sb_blacklist_u64s(unsigned nr)39 static unsigned sb_blacklist_u64s(unsigned nr)
40 {
41 struct bch_sb_field_journal_seq_blacklist *bl;
42
43 return (sizeof(*bl) + sizeof(bl->start[0]) * nr) / sizeof(u64);
44 }
45
bch2_journal_seq_blacklist_add(struct bch_fs * c,u64 start,u64 end)46 int bch2_journal_seq_blacklist_add(struct bch_fs *c, u64 start, u64 end)
47 {
48 struct bch_sb_field_journal_seq_blacklist *bl;
49 unsigned i = 0, nr;
50 int ret = 0;
51
52 mutex_lock(&c->sb_lock);
53 bl = bch2_sb_field_get(c->disk_sb.sb, journal_seq_blacklist);
54 nr = blacklist_nr_entries(bl);
55
56 while (i < nr) {
57 struct journal_seq_blacklist_entry *e =
58 bl->start + i;
59
60 if (end < le64_to_cpu(e->start))
61 break;
62
63 if (start > le64_to_cpu(e->end)) {
64 i++;
65 continue;
66 }
67
68 /*
69 * Entry is contiguous or overlapping with new entry: merge it
70 * with new entry, and delete:
71 */
72
73 start = min(start, le64_to_cpu(e->start));
74 end = max(end, le64_to_cpu(e->end));
75 array_remove_item(bl->start, nr, i);
76 }
77
78 bl = bch2_sb_field_resize(&c->disk_sb, journal_seq_blacklist,
79 sb_blacklist_u64s(nr + 1));
80 if (!bl) {
81 ret = bch_err_throw(c, ENOSPC_sb_journal_seq_blacklist);
82 goto out;
83 }
84
85 array_insert_item(bl->start, nr, i, ((struct journal_seq_blacklist_entry) {
86 .start = cpu_to_le64(start),
87 .end = cpu_to_le64(end),
88 }));
89 c->disk_sb.sb->features[0] |= cpu_to_le64(1ULL << BCH_FEATURE_journal_seq_blacklist_v3);
90
91 ret = bch2_write_super(c);
92 out:
93 mutex_unlock(&c->sb_lock);
94
95 return ret ?: bch2_blacklist_table_initialize(c);
96 }
97
journal_seq_blacklist_table_cmp(const void * _l,const void * _r)98 static int journal_seq_blacklist_table_cmp(const void *_l, const void *_r)
99 {
100 const struct journal_seq_blacklist_table_entry *l = _l;
101 const struct journal_seq_blacklist_table_entry *r = _r;
102
103 return cmp_int(l->start, r->start);
104 }
105
bch2_journal_seq_is_blacklisted(struct bch_fs * c,u64 seq,bool dirty)106 bool bch2_journal_seq_is_blacklisted(struct bch_fs *c, u64 seq,
107 bool dirty)
108 {
109 struct journal_seq_blacklist_table *t = c->journal_seq_blacklist_table;
110 struct journal_seq_blacklist_table_entry search = { .start = seq };
111 int idx;
112
113 if (!t)
114 return false;
115
116 idx = eytzinger0_find_le(t->entries, t->nr,
117 sizeof(t->entries[0]),
118 journal_seq_blacklist_table_cmp,
119 &search);
120 if (idx < 0)
121 return false;
122
123 BUG_ON(t->entries[idx].start > seq);
124
125 if (seq >= t->entries[idx].end)
126 return false;
127
128 if (dirty)
129 t->entries[idx].dirty = true;
130 return true;
131 }
132
bch2_journal_last_blacklisted_seq(struct bch_fs * c)133 u64 bch2_journal_last_blacklisted_seq(struct bch_fs *c)
134 {
135 struct journal_seq_blacklist_table *t = c->journal_seq_blacklist_table;
136
137 if (!t || !t->nr)
138 return 0;
139
140 return t->entries[eytzinger0_last(t->nr)].end - 1;
141 }
142
bch2_blacklist_table_initialize(struct bch_fs * c)143 int bch2_blacklist_table_initialize(struct bch_fs *c)
144 {
145 struct bch_sb_field_journal_seq_blacklist *bl =
146 bch2_sb_field_get(c->disk_sb.sb, journal_seq_blacklist);
147 struct journal_seq_blacklist_table *t;
148 unsigned i, nr = blacklist_nr_entries(bl);
149
150 if (!bl)
151 return 0;
152
153 t = kzalloc(struct_size(t, entries, nr), GFP_KERNEL);
154 if (!t)
155 return bch_err_throw(c, ENOMEM_blacklist_table_init);
156
157 t->nr = nr;
158
159 for (i = 0; i < nr; i++) {
160 t->entries[i].start = le64_to_cpu(bl->start[i].start);
161 t->entries[i].end = le64_to_cpu(bl->start[i].end);
162 }
163
164 eytzinger0_sort(t->entries,
165 t->nr,
166 sizeof(t->entries[0]),
167 journal_seq_blacklist_table_cmp,
168 NULL);
169
170 kfree(c->journal_seq_blacklist_table);
171 c->journal_seq_blacklist_table = t;
172 return 0;
173 }
174
bch2_sb_journal_seq_blacklist_validate(struct bch_sb * sb,struct bch_sb_field * f,enum bch_validate_flags flags,struct printbuf * err)175 static int bch2_sb_journal_seq_blacklist_validate(struct bch_sb *sb, struct bch_sb_field *f,
176 enum bch_validate_flags flags, struct printbuf *err)
177 {
178 struct bch_sb_field_journal_seq_blacklist *bl =
179 field_to_type(f, journal_seq_blacklist);
180 unsigned i, nr = blacklist_nr_entries(bl);
181
182 for (i = 0; i < nr; i++) {
183 struct journal_seq_blacklist_entry *e = bl->start + i;
184
185 if (le64_to_cpu(e->start) >=
186 le64_to_cpu(e->end)) {
187 prt_printf(err, "entry %u start >= end (%llu >= %llu)",
188 i, le64_to_cpu(e->start), le64_to_cpu(e->end));
189 return -BCH_ERR_invalid_sb_journal_seq_blacklist;
190 }
191
192 if (i + 1 < nr &&
193 le64_to_cpu(e[0].end) >
194 le64_to_cpu(e[1].start)) {
195 prt_printf(err, "entry %u out of order with next entry (%llu > %llu)",
196 i + 1, le64_to_cpu(e[0].end), le64_to_cpu(e[1].start));
197 return -BCH_ERR_invalid_sb_journal_seq_blacklist;
198 }
199 }
200
201 return 0;
202 }
203
bch2_sb_journal_seq_blacklist_to_text(struct printbuf * out,struct bch_sb * sb,struct bch_sb_field * f)204 static void bch2_sb_journal_seq_blacklist_to_text(struct printbuf *out,
205 struct bch_sb *sb,
206 struct bch_sb_field *f)
207 {
208 struct bch_sb_field_journal_seq_blacklist *bl =
209 field_to_type(f, journal_seq_blacklist);
210 struct journal_seq_blacklist_entry *i;
211 unsigned nr = blacklist_nr_entries(bl);
212
213 for (i = bl->start; i < bl->start + nr; i++) {
214 if (i != bl->start)
215 prt_printf(out, " ");
216
217 prt_printf(out, "%llu-%llu",
218 le64_to_cpu(i->start),
219 le64_to_cpu(i->end));
220 }
221 prt_newline(out);
222 }
223
224 const struct bch_sb_field_ops bch_sb_field_ops_journal_seq_blacklist = {
225 .validate = bch2_sb_journal_seq_blacklist_validate,
226 .to_text = bch2_sb_journal_seq_blacklist_to_text
227 };
228
bch2_blacklist_entries_gc(struct bch_fs * c)229 bool bch2_blacklist_entries_gc(struct bch_fs *c)
230 {
231 struct journal_seq_blacklist_entry *src, *dst;
232
233 struct bch_sb_field_journal_seq_blacklist *bl =
234 bch2_sb_field_get(c->disk_sb.sb, journal_seq_blacklist);
235 if (!bl)
236 return false;
237
238 unsigned nr = blacklist_nr_entries(bl);
239 dst = bl->start;
240
241 struct journal_seq_blacklist_table *t = c->journal_seq_blacklist_table;
242 BUG_ON(nr != t->nr);
243
244 src = bl->start;
245 eytzinger0_for_each(i, nr) {
246 BUG_ON(t->entries[i].start != le64_to_cpu(src->start));
247 BUG_ON(t->entries[i].end != le64_to_cpu(src->end));
248
249 if (t->entries[i].dirty || t->entries[i].end >= c->journal.oldest_seq_found_ondisk)
250 *dst++ = *src;
251 src++;
252 }
253
254 unsigned new_nr = dst - bl->start;
255 if (new_nr == nr)
256 return false;
257
258 bch_verbose(c, "nr blacklist entries was %u, now %u", nr, new_nr);
259
260 bl = bch2_sb_field_resize(&c->disk_sb, journal_seq_blacklist,
261 new_nr ? sb_blacklist_u64s(new_nr) : 0);
262 BUG_ON(new_nr && !bl);
263 return true;
264 }
265