xref: /linux/fs/bcachefs/journal_seq_blacklist.c (revision 4b660dbd9ee2059850fd30e0df420ca7a38a1856)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "bcachefs.h"
4 #include "btree_iter.h"
5 #include "eytzinger.h"
6 #include "journal_seq_blacklist.h"
7 #include "super-io.h"
8 
9 /*
10  * journal_seq_blacklist machinery:
11  *
12  * To guarantee order of btree updates after a crash, we need to detect when a
13  * btree node entry (bset) is newer than the newest journal entry that was
14  * successfully written, and ignore it - effectively ignoring any btree updates
15  * that didn't make it into the journal.
16  *
17  * If we didn't do this, we might have two btree nodes, a and b, both with
18  * updates that weren't written to the journal yet: if b was updated after a,
19  * but b was flushed and not a - oops; on recovery we'll find that the updates
20  * to b happened, but not the updates to a that happened before it.
21  *
22  * Ignoring bsets that are newer than the newest journal entry is always safe,
23  * because everything they contain will also have been journalled - and must
24  * still be present in the journal on disk until a journal entry has been
25  * written _after_ that bset was written.
26  *
27  * To accomplish this, bsets record the newest journal sequence number they
28  * contain updates for; then, on startup, the btree code queries the journal
29  * code to ask "Is this sequence number newer than the newest journal entry? If
30  * so, ignore it."
31  *
32  * When this happens, we must blacklist that journal sequence number: the
33  * journal must not write any entries with that sequence number, and it must
34  * record that it was blacklisted so that a) on recovery we don't think we have
35  * missing journal entries and b) so that the btree code continues to ignore
36  * that bset, until that btree node is rewritten.
37  */
38 
39 static unsigned sb_blacklist_u64s(unsigned nr)
40 {
41 	struct bch_sb_field_journal_seq_blacklist *bl;
42 
43 	return (sizeof(*bl) + sizeof(bl->start[0]) * nr) / sizeof(u64);
44 }
45 
46 int bch2_journal_seq_blacklist_add(struct bch_fs *c, u64 start, u64 end)
47 {
48 	struct bch_sb_field_journal_seq_blacklist *bl;
49 	unsigned i = 0, nr;
50 	int ret = 0;
51 
52 	mutex_lock(&c->sb_lock);
53 	bl = bch2_sb_field_get(c->disk_sb.sb, journal_seq_blacklist);
54 	nr = blacklist_nr_entries(bl);
55 
56 	while (i < nr) {
57 		struct journal_seq_blacklist_entry *e =
58 			bl->start + i;
59 
60 		if (end < le64_to_cpu(e->start))
61 			break;
62 
63 		if (start > le64_to_cpu(e->end)) {
64 			i++;
65 			continue;
66 		}
67 
68 		/*
69 		 * Entry is contiguous or overlapping with new entry: merge it
70 		 * with new entry, and delete:
71 		 */
72 
73 		start	= min(start,	le64_to_cpu(e->start));
74 		end	= max(end,	le64_to_cpu(e->end));
75 		array_remove_item(bl->start, nr, i);
76 	}
77 
78 	bl = bch2_sb_field_resize(&c->disk_sb, journal_seq_blacklist,
79 				  sb_blacklist_u64s(nr + 1));
80 	if (!bl) {
81 		ret = -BCH_ERR_ENOSPC_sb_journal_seq_blacklist;
82 		goto out;
83 	}
84 
85 	array_insert_item(bl->start, nr, i, ((struct journal_seq_blacklist_entry) {
86 		.start	= cpu_to_le64(start),
87 		.end	= cpu_to_le64(end),
88 	}));
89 	c->disk_sb.sb->features[0] |= cpu_to_le64(1ULL << BCH_FEATURE_journal_seq_blacklist_v3);
90 
91 	ret = bch2_write_super(c);
92 out:
93 	mutex_unlock(&c->sb_lock);
94 
95 	return ret ?: bch2_blacklist_table_initialize(c);
96 }
97 
98 static int journal_seq_blacklist_table_cmp(const void *_l,
99 					   const void *_r, size_t size)
100 {
101 	const struct journal_seq_blacklist_table_entry *l = _l;
102 	const struct journal_seq_blacklist_table_entry *r = _r;
103 
104 	return cmp_int(l->start, r->start);
105 }
106 
107 bool bch2_journal_seq_is_blacklisted(struct bch_fs *c, u64 seq,
108 				     bool dirty)
109 {
110 	struct journal_seq_blacklist_table *t = c->journal_seq_blacklist_table;
111 	struct journal_seq_blacklist_table_entry search = { .start = seq };
112 	int idx;
113 
114 	if (!t)
115 		return false;
116 
117 	idx = eytzinger0_find_le(t->entries, t->nr,
118 				 sizeof(t->entries[0]),
119 				 journal_seq_blacklist_table_cmp,
120 				 &search);
121 	if (idx < 0)
122 		return false;
123 
124 	BUG_ON(t->entries[idx].start > seq);
125 
126 	if (seq >= t->entries[idx].end)
127 		return false;
128 
129 	if (dirty)
130 		t->entries[idx].dirty = true;
131 	return true;
132 }
133 
134 int bch2_blacklist_table_initialize(struct bch_fs *c)
135 {
136 	struct bch_sb_field_journal_seq_blacklist *bl =
137 		bch2_sb_field_get(c->disk_sb.sb, journal_seq_blacklist);
138 	struct journal_seq_blacklist_table *t;
139 	unsigned i, nr = blacklist_nr_entries(bl);
140 
141 	if (!bl)
142 		return 0;
143 
144 	t = kzalloc(struct_size(t, entries, nr), GFP_KERNEL);
145 	if (!t)
146 		return -BCH_ERR_ENOMEM_blacklist_table_init;
147 
148 	t->nr = nr;
149 
150 	for (i = 0; i < nr; i++) {
151 		t->entries[i].start	= le64_to_cpu(bl->start[i].start);
152 		t->entries[i].end	= le64_to_cpu(bl->start[i].end);
153 	}
154 
155 	eytzinger0_sort(t->entries,
156 			t->nr,
157 			sizeof(t->entries[0]),
158 			journal_seq_blacklist_table_cmp,
159 			NULL);
160 
161 	kfree(c->journal_seq_blacklist_table);
162 	c->journal_seq_blacklist_table = t;
163 	return 0;
164 }
165 
166 static int bch2_sb_journal_seq_blacklist_validate(struct bch_sb *sb,
167 						  struct bch_sb_field *f,
168 						  struct printbuf *err)
169 {
170 	struct bch_sb_field_journal_seq_blacklist *bl =
171 		field_to_type(f, journal_seq_blacklist);
172 	unsigned i, nr = blacklist_nr_entries(bl);
173 
174 	for (i = 0; i < nr; i++) {
175 		struct journal_seq_blacklist_entry *e = bl->start + i;
176 
177 		if (le64_to_cpu(e->start) >=
178 		    le64_to_cpu(e->end)) {
179 			prt_printf(err, "entry %u start >= end (%llu >= %llu)",
180 			       i, le64_to_cpu(e->start), le64_to_cpu(e->end));
181 			return -BCH_ERR_invalid_sb_journal_seq_blacklist;
182 		}
183 
184 		if (i + 1 < nr &&
185 		    le64_to_cpu(e[0].end) >
186 		    le64_to_cpu(e[1].start)) {
187 			prt_printf(err, "entry %u out of order with next entry (%llu > %llu)",
188 			       i + 1, le64_to_cpu(e[0].end), le64_to_cpu(e[1].start));
189 			return -BCH_ERR_invalid_sb_journal_seq_blacklist;
190 		}
191 	}
192 
193 	return 0;
194 }
195 
196 static void bch2_sb_journal_seq_blacklist_to_text(struct printbuf *out,
197 						  struct bch_sb *sb,
198 						  struct bch_sb_field *f)
199 {
200 	struct bch_sb_field_journal_seq_blacklist *bl =
201 		field_to_type(f, journal_seq_blacklist);
202 	struct journal_seq_blacklist_entry *i;
203 	unsigned nr = blacklist_nr_entries(bl);
204 
205 	for (i = bl->start; i < bl->start + nr; i++) {
206 		if (i != bl->start)
207 			prt_printf(out, " ");
208 
209 		prt_printf(out, "%llu-%llu",
210 		       le64_to_cpu(i->start),
211 		       le64_to_cpu(i->end));
212 	}
213 	prt_newline(out);
214 }
215 
216 const struct bch_sb_field_ops bch_sb_field_ops_journal_seq_blacklist = {
217 	.validate	= bch2_sb_journal_seq_blacklist_validate,
218 	.to_text	= bch2_sb_journal_seq_blacklist_to_text
219 };
220 
221 void bch2_blacklist_entries_gc(struct work_struct *work)
222 {
223 	struct bch_fs *c = container_of(work, struct bch_fs,
224 					journal_seq_blacklist_gc_work);
225 	struct journal_seq_blacklist_table *t;
226 	struct bch_sb_field_journal_seq_blacklist *bl;
227 	struct journal_seq_blacklist_entry *src, *dst;
228 	struct btree_trans *trans = bch2_trans_get(c);
229 	unsigned i, nr, new_nr;
230 	int ret;
231 
232 	for (i = 0; i < BTREE_ID_NR; i++) {
233 		struct btree_iter iter;
234 		struct btree *b;
235 
236 		bch2_trans_node_iter_init(trans, &iter, i, POS_MIN,
237 					  0, 0, BTREE_ITER_PREFETCH);
238 retry:
239 		bch2_trans_begin(trans);
240 
241 		b = bch2_btree_iter_peek_node(&iter);
242 
243 		while (!(ret = PTR_ERR_OR_ZERO(b)) &&
244 		       b &&
245 		       !test_bit(BCH_FS_stopping, &c->flags))
246 			b = bch2_btree_iter_next_node(&iter);
247 
248 		if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
249 			goto retry;
250 
251 		bch2_trans_iter_exit(trans, &iter);
252 	}
253 
254 	bch2_trans_put(trans);
255 	if (ret)
256 		return;
257 
258 	mutex_lock(&c->sb_lock);
259 	bl = bch2_sb_field_get(c->disk_sb.sb, journal_seq_blacklist);
260 	if (!bl)
261 		goto out;
262 
263 	nr = blacklist_nr_entries(bl);
264 	dst = bl->start;
265 
266 	t = c->journal_seq_blacklist_table;
267 	BUG_ON(nr != t->nr);
268 
269 	for (src = bl->start, i = eytzinger0_first(t->nr);
270 	     src < bl->start + nr;
271 	     src++, i = eytzinger0_next(i, nr)) {
272 		BUG_ON(t->entries[i].start	!= le64_to_cpu(src->start));
273 		BUG_ON(t->entries[i].end	!= le64_to_cpu(src->end));
274 
275 		if (t->entries[i].dirty)
276 			*dst++ = *src;
277 	}
278 
279 	new_nr = dst - bl->start;
280 
281 	bch_info(c, "nr blacklist entries was %u, now %u", nr, new_nr);
282 
283 	if (new_nr != nr) {
284 		bl = bch2_sb_field_resize(&c->disk_sb, journal_seq_blacklist,
285 				new_nr ? sb_blacklist_u64s(new_nr) : 0);
286 		BUG_ON(new_nr && !bl);
287 
288 		if (!new_nr)
289 			c->disk_sb.sb->features[0] &= cpu_to_le64(~(1ULL << BCH_FEATURE_journal_seq_blacklist_v3));
290 
291 		bch2_write_super(c);
292 	}
293 out:
294 	mutex_unlock(&c->sb_lock);
295 }
296