xref: /linux/fs/bcachefs/journal_seq_blacklist.c (revision f6154d8babbb8a98f0d3ea325aafae2e33bfd8be)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "bcachefs.h"
4 #include "btree_iter.h"
5 #include "eytzinger.h"
6 #include "journal_seq_blacklist.h"
7 #include "super-io.h"
8 
9 /*
10  * journal_seq_blacklist machinery:
11  *
12  * To guarantee order of btree updates after a crash, we need to detect when a
13  * btree node entry (bset) is newer than the newest journal entry that was
14  * successfully written, and ignore it - effectively ignoring any btree updates
15  * that didn't make it into the journal.
16  *
17  * If we didn't do this, we might have two btree nodes, a and b, both with
18  * updates that weren't written to the journal yet: if b was updated after a,
19  * but b was flushed and not a - oops; on recovery we'll find that the updates
20  * to b happened, but not the updates to a that happened before it.
21  *
22  * Ignoring bsets that are newer than the newest journal entry is always safe,
23  * because everything they contain will also have been journalled - and must
24  * still be present in the journal on disk until a journal entry has been
25  * written _after_ that bset was written.
26  *
27  * To accomplish this, bsets record the newest journal sequence number they
28  * contain updates for; then, on startup, the btree code queries the journal
29  * code to ask "Is this sequence number newer than the newest journal entry? If
30  * so, ignore it."
31  *
32  * When this happens, we must blacklist that journal sequence number: the
33  * journal must not write any entries with that sequence number, and it must
34  * record that it was blacklisted so that a) on recovery we don't think we have
35  * missing journal entries and b) so that the btree code continues to ignore
36  * that bset, until that btree node is rewritten.
37  */
38 
39 static unsigned sb_blacklist_u64s(unsigned nr)
40 {
41 	struct bch_sb_field_journal_seq_blacklist *bl;
42 
43 	return (sizeof(*bl) + sizeof(bl->start[0]) * nr) / sizeof(u64);
44 }
45 
46 static struct bch_sb_field_journal_seq_blacklist *
47 blacklist_entry_try_merge(struct bch_fs *c,
48 			  struct bch_sb_field_journal_seq_blacklist *bl,
49 			  unsigned i)
50 {
51 	unsigned nr = blacklist_nr_entries(bl);
52 
53 	if (le64_to_cpu(bl->start[i].end) >=
54 	    le64_to_cpu(bl->start[i + 1].start)) {
55 		bl->start[i].end = bl->start[i + 1].end;
56 		--nr;
57 		memmove(&bl->start[i],
58 			&bl->start[i + 1],
59 			sizeof(bl->start[0]) * (nr - i));
60 
61 		bl = bch2_sb_field_resize(&c->disk_sb, journal_seq_blacklist,
62 					  sb_blacklist_u64s(nr));
63 		BUG_ON(!bl);
64 	}
65 
66 	return bl;
67 }
68 
69 static bool bl_entry_contig_or_overlaps(struct journal_seq_blacklist_entry *e,
70 					u64 start, u64 end)
71 {
72 	return !(end < le64_to_cpu(e->start) || le64_to_cpu(e->end) < start);
73 }
74 
75 int bch2_journal_seq_blacklist_add(struct bch_fs *c, u64 start, u64 end)
76 {
77 	struct bch_sb_field_journal_seq_blacklist *bl;
78 	unsigned i, nr;
79 	int ret = 0;
80 
81 	mutex_lock(&c->sb_lock);
82 	bl = bch2_sb_field_get(c->disk_sb.sb, journal_seq_blacklist);
83 	nr = blacklist_nr_entries(bl);
84 
85 	for (i = 0; i < nr; i++) {
86 		struct journal_seq_blacklist_entry *e =
87 			bl->start + i;
88 
89 		if (bl_entry_contig_or_overlaps(e, start, end)) {
90 			e->start = cpu_to_le64(min(start, le64_to_cpu(e->start)));
91 			e->end	= cpu_to_le64(max(end, le64_to_cpu(e->end)));
92 
93 			if (i + 1 < nr)
94 				bl = blacklist_entry_try_merge(c,
95 							bl, i);
96 			if (i)
97 				bl = blacklist_entry_try_merge(c,
98 							bl, i - 1);
99 			goto out_write_sb;
100 		}
101 	}
102 
103 	bl = bch2_sb_field_resize(&c->disk_sb, journal_seq_blacklist,
104 				  sb_blacklist_u64s(nr + 1));
105 	if (!bl) {
106 		ret = -BCH_ERR_ENOSPC_sb_journal_seq_blacklist;
107 		goto out;
108 	}
109 
110 	bl->start[nr].start	= cpu_to_le64(start);
111 	bl->start[nr].end	= cpu_to_le64(end);
112 out_write_sb:
113 	c->disk_sb.sb->features[0] |= cpu_to_le64(1ULL << BCH_FEATURE_journal_seq_blacklist_v3);
114 
115 	ret = bch2_write_super(c);
116 out:
117 	mutex_unlock(&c->sb_lock);
118 
119 	return ret ?: bch2_blacklist_table_initialize(c);
120 }
121 
122 static int journal_seq_blacklist_table_cmp(const void *_l,
123 					   const void *_r, size_t size)
124 {
125 	const struct journal_seq_blacklist_table_entry *l = _l;
126 	const struct journal_seq_blacklist_table_entry *r = _r;
127 
128 	return cmp_int(l->start, r->start);
129 }
130 
131 bool bch2_journal_seq_is_blacklisted(struct bch_fs *c, u64 seq,
132 				     bool dirty)
133 {
134 	struct journal_seq_blacklist_table *t = c->journal_seq_blacklist_table;
135 	struct journal_seq_blacklist_table_entry search = { .start = seq };
136 	int idx;
137 
138 	if (!t)
139 		return false;
140 
141 	idx = eytzinger0_find_le(t->entries, t->nr,
142 				 sizeof(t->entries[0]),
143 				 journal_seq_blacklist_table_cmp,
144 				 &search);
145 	if (idx < 0)
146 		return false;
147 
148 	BUG_ON(t->entries[idx].start > seq);
149 
150 	if (seq >= t->entries[idx].end)
151 		return false;
152 
153 	if (dirty)
154 		t->entries[idx].dirty = true;
155 	return true;
156 }
157 
158 int bch2_blacklist_table_initialize(struct bch_fs *c)
159 {
160 	struct bch_sb_field_journal_seq_blacklist *bl =
161 		bch2_sb_field_get(c->disk_sb.sb, journal_seq_blacklist);
162 	struct journal_seq_blacklist_table *t;
163 	unsigned i, nr = blacklist_nr_entries(bl);
164 
165 	if (!bl)
166 		return 0;
167 
168 	t = kzalloc(sizeof(*t) + sizeof(t->entries[0]) * nr,
169 		    GFP_KERNEL);
170 	if (!t)
171 		return -BCH_ERR_ENOMEM_blacklist_table_init;
172 
173 	t->nr = nr;
174 
175 	for (i = 0; i < nr; i++) {
176 		t->entries[i].start	= le64_to_cpu(bl->start[i].start);
177 		t->entries[i].end	= le64_to_cpu(bl->start[i].end);
178 	}
179 
180 	eytzinger0_sort(t->entries,
181 			t->nr,
182 			sizeof(t->entries[0]),
183 			journal_seq_blacklist_table_cmp,
184 			NULL);
185 
186 	kfree(c->journal_seq_blacklist_table);
187 	c->journal_seq_blacklist_table = t;
188 	return 0;
189 }
190 
191 static int bch2_sb_journal_seq_blacklist_validate(struct bch_sb *sb,
192 						  struct bch_sb_field *f,
193 						  struct printbuf *err)
194 {
195 	struct bch_sb_field_journal_seq_blacklist *bl =
196 		field_to_type(f, journal_seq_blacklist);
197 	unsigned i, nr = blacklist_nr_entries(bl);
198 
199 	for (i = 0; i < nr; i++) {
200 		struct journal_seq_blacklist_entry *e = bl->start + i;
201 
202 		if (le64_to_cpu(e->start) >=
203 		    le64_to_cpu(e->end)) {
204 			prt_printf(err, "entry %u start >= end (%llu >= %llu)",
205 			       i, le64_to_cpu(e->start), le64_to_cpu(e->end));
206 			return -BCH_ERR_invalid_sb_journal_seq_blacklist;
207 		}
208 
209 		if (i + 1 < nr &&
210 		    le64_to_cpu(e[0].end) >
211 		    le64_to_cpu(e[1].start)) {
212 			prt_printf(err, "entry %u out of order with next entry (%llu > %llu)",
213 			       i + 1, le64_to_cpu(e[0].end), le64_to_cpu(e[1].start));
214 			return -BCH_ERR_invalid_sb_journal_seq_blacklist;
215 		}
216 	}
217 
218 	return 0;
219 }
220 
221 static void bch2_sb_journal_seq_blacklist_to_text(struct printbuf *out,
222 						  struct bch_sb *sb,
223 						  struct bch_sb_field *f)
224 {
225 	struct bch_sb_field_journal_seq_blacklist *bl =
226 		field_to_type(f, journal_seq_blacklist);
227 	struct journal_seq_blacklist_entry *i;
228 	unsigned nr = blacklist_nr_entries(bl);
229 
230 	for (i = bl->start; i < bl->start + nr; i++) {
231 		if (i != bl->start)
232 			prt_printf(out, " ");
233 
234 		prt_printf(out, "%llu-%llu",
235 		       le64_to_cpu(i->start),
236 		       le64_to_cpu(i->end));
237 	}
238 	prt_newline(out);
239 }
240 
241 const struct bch_sb_field_ops bch_sb_field_ops_journal_seq_blacklist = {
242 	.validate	= bch2_sb_journal_seq_blacklist_validate,
243 	.to_text	= bch2_sb_journal_seq_blacklist_to_text
244 };
245 
246 void bch2_blacklist_entries_gc(struct work_struct *work)
247 {
248 	struct bch_fs *c = container_of(work, struct bch_fs,
249 					journal_seq_blacklist_gc_work);
250 	struct journal_seq_blacklist_table *t;
251 	struct bch_sb_field_journal_seq_blacklist *bl;
252 	struct journal_seq_blacklist_entry *src, *dst;
253 	struct btree_trans *trans = bch2_trans_get(c);
254 	unsigned i, nr, new_nr;
255 	int ret;
256 
257 	for (i = 0; i < BTREE_ID_NR; i++) {
258 		struct btree_iter iter;
259 		struct btree *b;
260 
261 		bch2_trans_node_iter_init(trans, &iter, i, POS_MIN,
262 					  0, 0, BTREE_ITER_PREFETCH);
263 retry:
264 		bch2_trans_begin(trans);
265 
266 		b = bch2_btree_iter_peek_node(&iter);
267 
268 		while (!(ret = PTR_ERR_OR_ZERO(b)) &&
269 		       b &&
270 		       !test_bit(BCH_FS_STOPPING, &c->flags))
271 			b = bch2_btree_iter_next_node(&iter);
272 
273 		if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
274 			goto retry;
275 
276 		bch2_trans_iter_exit(trans, &iter);
277 	}
278 
279 	bch2_trans_put(trans);
280 	if (ret)
281 		return;
282 
283 	mutex_lock(&c->sb_lock);
284 	bl = bch2_sb_field_get(c->disk_sb.sb, journal_seq_blacklist);
285 	if (!bl)
286 		goto out;
287 
288 	nr = blacklist_nr_entries(bl);
289 	dst = bl->start;
290 
291 	t = c->journal_seq_blacklist_table;
292 	BUG_ON(nr != t->nr);
293 
294 	for (src = bl->start, i = eytzinger0_first(t->nr);
295 	     src < bl->start + nr;
296 	     src++, i = eytzinger0_next(i, nr)) {
297 		BUG_ON(t->entries[i].start	!= le64_to_cpu(src->start));
298 		BUG_ON(t->entries[i].end	!= le64_to_cpu(src->end));
299 
300 		if (t->entries[i].dirty)
301 			*dst++ = *src;
302 	}
303 
304 	new_nr = dst - bl->start;
305 
306 	bch_info(c, "nr blacklist entries was %u, now %u", nr, new_nr);
307 
308 	if (new_nr != nr) {
309 		bl = bch2_sb_field_resize(&c->disk_sb, journal_seq_blacklist,
310 				new_nr ? sb_blacklist_u64s(new_nr) : 0);
311 		BUG_ON(new_nr && !bl);
312 
313 		if (!new_nr)
314 			c->disk_sb.sb->features[0] &= cpu_to_le64(~(1ULL << BCH_FEATURE_journal_seq_blacklist_v3));
315 
316 		bch2_write_super(c);
317 	}
318 out:
319 	mutex_unlock(&c->sb_lock);
320 }
321