xref: /linux/fs/bcachefs/btree_journal_iter.c (revision 3d0fe49454652117522f60bfbefb978ba0e5300b)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "bcachefs.h"
4 #include "bset.h"
5 #include "btree_journal_iter.h"
6 #include "journal_io.h"
7 
8 #include <linux/sort.h>
9 
10 /*
11  * For managing keys we read from the journal: until journal replay works normal
12  * btree lookups need to be able to find and return keys from the journal where
13  * they overwrite what's in the btree, so we have a special iterator and
14  * operations for the regular btree iter code to use:
15  */
16 
17 static int __journal_key_cmp(enum btree_id	l_btree_id,
18 			     unsigned		l_level,
19 			     struct bpos	l_pos,
20 			     const struct journal_key *r)
21 {
22 	return (cmp_int(l_btree_id,	r->btree_id) ?:
23 		cmp_int(l_level,	r->level) ?:
24 		bpos_cmp(l_pos,	r->k->k.p));
25 }
26 
27 static int journal_key_cmp(const struct journal_key *l, const struct journal_key *r)
28 {
29 	return __journal_key_cmp(l->btree_id, l->level, l->k->k.p, r);
30 }
31 
32 static inline size_t idx_to_pos(struct journal_keys *keys, size_t idx)
33 {
34 	size_t gap_size = keys->size - keys->nr;
35 
36 	if (idx >= keys->gap)
37 		idx += gap_size;
38 	return idx;
39 }
40 
41 static inline struct journal_key *idx_to_key(struct journal_keys *keys, size_t idx)
42 {
43 	return keys->d + idx_to_pos(keys, idx);
44 }
45 
46 static size_t __bch2_journal_key_search(struct journal_keys *keys,
47 					enum btree_id id, unsigned level,
48 					struct bpos pos)
49 {
50 	size_t l = 0, r = keys->nr, m;
51 
52 	while (l < r) {
53 		m = l + ((r - l) >> 1);
54 		if (__journal_key_cmp(id, level, pos, idx_to_key(keys, m)) > 0)
55 			l = m + 1;
56 		else
57 			r = m;
58 	}
59 
60 	BUG_ON(l < keys->nr &&
61 	       __journal_key_cmp(id, level, pos, idx_to_key(keys, l)) > 0);
62 
63 	BUG_ON(l &&
64 	       __journal_key_cmp(id, level, pos, idx_to_key(keys, l - 1)) <= 0);
65 
66 	return l;
67 }
68 
69 static size_t bch2_journal_key_search(struct journal_keys *keys,
70 				      enum btree_id id, unsigned level,
71 				      struct bpos pos)
72 {
73 	return idx_to_pos(keys, __bch2_journal_key_search(keys, id, level, pos));
74 }
75 
76 struct bkey_i *bch2_journal_keys_peek_upto(struct bch_fs *c, enum btree_id btree_id,
77 					   unsigned level, struct bpos pos,
78 					   struct bpos end_pos, size_t *idx)
79 {
80 	struct journal_keys *keys = &c->journal_keys;
81 	unsigned iters = 0;
82 	struct journal_key *k;
83 search:
84 	if (!*idx)
85 		*idx = __bch2_journal_key_search(keys, btree_id, level, pos);
86 
87 	while ((k = *idx < keys->nr ? idx_to_key(keys, *idx) : NULL)) {
88 		if (__journal_key_cmp(btree_id, level, end_pos, k) < 0)
89 			return NULL;
90 
91 		if (__journal_key_cmp(btree_id, level, pos, k) <= 0 &&
92 		    !k->overwritten)
93 			return k->k;
94 
95 		(*idx)++;
96 		iters++;
97 		if (iters == 10) {
98 			*idx = 0;
99 			goto search;
100 		}
101 	}
102 
103 	return NULL;
104 }
105 
106 struct bkey_i *bch2_journal_keys_peek_slot(struct bch_fs *c, enum btree_id btree_id,
107 					   unsigned level, struct bpos pos)
108 {
109 	size_t idx = 0;
110 
111 	return bch2_journal_keys_peek_upto(c, btree_id, level, pos, pos, &idx);
112 }
113 
114 static void journal_iters_fix(struct bch_fs *c)
115 {
116 	struct journal_keys *keys = &c->journal_keys;
117 	/* The key we just inserted is immediately before the gap: */
118 	size_t gap_end = keys->gap + (keys->size - keys->nr);
119 	struct btree_and_journal_iter *iter;
120 
121 	/*
122 	 * If an iterator points one after the key we just inserted, decrement
123 	 * the iterator so it points at the key we just inserted - if the
124 	 * decrement was unnecessary, bch2_btree_and_journal_iter_peek() will
125 	 * handle that:
126 	 */
127 	list_for_each_entry(iter, &c->journal_iters, journal.list)
128 		if (iter->journal.idx == gap_end)
129 			iter->journal.idx = keys->gap - 1;
130 }
131 
132 static void journal_iters_move_gap(struct bch_fs *c, size_t old_gap, size_t new_gap)
133 {
134 	struct journal_keys *keys = &c->journal_keys;
135 	struct journal_iter *iter;
136 	size_t gap_size = keys->size - keys->nr;
137 
138 	list_for_each_entry(iter, &c->journal_iters, list) {
139 		if (iter->idx > old_gap)
140 			iter->idx -= gap_size;
141 		if (iter->idx >= new_gap)
142 			iter->idx += gap_size;
143 	}
144 }
145 
146 int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
147 				 unsigned level, struct bkey_i *k)
148 {
149 	struct journal_key n = {
150 		.btree_id	= id,
151 		.level		= level,
152 		.k		= k,
153 		.allocated	= true,
154 		/*
155 		 * Ensure these keys are done last by journal replay, to unblock
156 		 * journal reclaim:
157 		 */
158 		.journal_seq	= U32_MAX,
159 	};
160 	struct journal_keys *keys = &c->journal_keys;
161 	size_t idx = bch2_journal_key_search(keys, id, level, k->k.p);
162 
163 	BUG_ON(test_bit(BCH_FS_RW, &c->flags));
164 
165 	if (idx < keys->size &&
166 	    journal_key_cmp(&n, &keys->d[idx]) == 0) {
167 		if (keys->d[idx].allocated)
168 			kfree(keys->d[idx].k);
169 		keys->d[idx] = n;
170 		return 0;
171 	}
172 
173 	if (idx > keys->gap)
174 		idx -= keys->size - keys->nr;
175 
176 	if (keys->nr == keys->size) {
177 		struct journal_keys new_keys = {
178 			.nr			= keys->nr,
179 			.size			= max_t(size_t, keys->size, 8) * 2,
180 		};
181 
182 		new_keys.d = kvmalloc_array(new_keys.size, sizeof(new_keys.d[0]), GFP_KERNEL);
183 		if (!new_keys.d) {
184 			bch_err(c, "%s: error allocating new key array (size %zu)",
185 				__func__, new_keys.size);
186 			return -BCH_ERR_ENOMEM_journal_key_insert;
187 		}
188 
189 		/* Since @keys was full, there was no gap: */
190 		memcpy(new_keys.d, keys->d, sizeof(keys->d[0]) * keys->nr);
191 		kvfree(keys->d);
192 		*keys = new_keys;
193 
194 		/* And now the gap is at the end: */
195 		keys->gap = keys->nr;
196 	}
197 
198 	journal_iters_move_gap(c, keys->gap, idx);
199 
200 	move_gap(keys->d, keys->nr, keys->size, keys->gap, idx);
201 	keys->gap = idx;
202 
203 	keys->nr++;
204 	keys->d[keys->gap++] = n;
205 
206 	journal_iters_fix(c);
207 
208 	return 0;
209 }
210 
211 /*
212  * Can only be used from the recovery thread while we're still RO - can't be
213  * used once we've got RW, as journal_keys is at that point used by multiple
214  * threads:
215  */
216 int bch2_journal_key_insert(struct bch_fs *c, enum btree_id id,
217 			    unsigned level, struct bkey_i *k)
218 {
219 	struct bkey_i *n;
220 	int ret;
221 
222 	n = kmalloc(bkey_bytes(&k->k), GFP_KERNEL);
223 	if (!n)
224 		return -BCH_ERR_ENOMEM_journal_key_insert;
225 
226 	bkey_copy(n, k);
227 	ret = bch2_journal_key_insert_take(c, id, level, n);
228 	if (ret)
229 		kfree(n);
230 	return ret;
231 }
232 
233 int bch2_journal_key_delete(struct bch_fs *c, enum btree_id id,
234 			    unsigned level, struct bpos pos)
235 {
236 	struct bkey_i whiteout;
237 
238 	bkey_init(&whiteout.k);
239 	whiteout.k.p = pos;
240 
241 	return bch2_journal_key_insert(c, id, level, &whiteout);
242 }
243 
244 void bch2_journal_key_overwritten(struct bch_fs *c, enum btree_id btree,
245 				  unsigned level, struct bpos pos)
246 {
247 	struct journal_keys *keys = &c->journal_keys;
248 	size_t idx = bch2_journal_key_search(keys, btree, level, pos);
249 
250 	if (idx < keys->size &&
251 	    keys->d[idx].btree_id	== btree &&
252 	    keys->d[idx].level		== level &&
253 	    bpos_eq(keys->d[idx].k->k.p, pos))
254 		keys->d[idx].overwritten = true;
255 }
256 
257 static void bch2_journal_iter_advance(struct journal_iter *iter)
258 {
259 	if (iter->idx < iter->keys->size) {
260 		iter->idx++;
261 		if (iter->idx == iter->keys->gap)
262 			iter->idx += iter->keys->size - iter->keys->nr;
263 	}
264 }
265 
266 static struct bkey_s_c bch2_journal_iter_peek(struct journal_iter *iter)
267 {
268 	struct journal_key *k = iter->keys->d + iter->idx;
269 
270 	while (k < iter->keys->d + iter->keys->size &&
271 	       k->btree_id	== iter->btree_id &&
272 	       k->level		== iter->level) {
273 		if (!k->overwritten)
274 			return bkey_i_to_s_c(k->k);
275 
276 		bch2_journal_iter_advance(iter);
277 		k = iter->keys->d + iter->idx;
278 	}
279 
280 	return bkey_s_c_null;
281 }
282 
283 static void bch2_journal_iter_exit(struct journal_iter *iter)
284 {
285 	list_del(&iter->list);
286 }
287 
288 static void bch2_journal_iter_init(struct bch_fs *c,
289 				   struct journal_iter *iter,
290 				   enum btree_id id, unsigned level,
291 				   struct bpos pos)
292 {
293 	iter->btree_id	= id;
294 	iter->level	= level;
295 	iter->keys	= &c->journal_keys;
296 	iter->idx	= bch2_journal_key_search(&c->journal_keys, id, level, pos);
297 }
298 
299 static struct bkey_s_c bch2_journal_iter_peek_btree(struct btree_and_journal_iter *iter)
300 {
301 	return bch2_btree_node_iter_peek_unpack(&iter->node_iter,
302 						iter->b, &iter->unpacked);
303 }
304 
305 static void bch2_journal_iter_advance_btree(struct btree_and_journal_iter *iter)
306 {
307 	bch2_btree_node_iter_advance(&iter->node_iter, iter->b);
308 }
309 
310 void bch2_btree_and_journal_iter_advance(struct btree_and_journal_iter *iter)
311 {
312 	if (bpos_eq(iter->pos, SPOS_MAX))
313 		iter->at_end = true;
314 	else
315 		iter->pos = bpos_successor(iter->pos);
316 }
317 
318 struct bkey_s_c bch2_btree_and_journal_iter_peek(struct btree_and_journal_iter *iter)
319 {
320 	struct bkey_s_c btree_k, journal_k, ret;
321 again:
322 	if (iter->at_end)
323 		return bkey_s_c_null;
324 
325 	while ((btree_k = bch2_journal_iter_peek_btree(iter)).k &&
326 	       bpos_lt(btree_k.k->p, iter->pos))
327 		bch2_journal_iter_advance_btree(iter);
328 
329 	while ((journal_k = bch2_journal_iter_peek(&iter->journal)).k &&
330 	       bpos_lt(journal_k.k->p, iter->pos))
331 		bch2_journal_iter_advance(&iter->journal);
332 
333 	ret = journal_k.k &&
334 		(!btree_k.k || bpos_le(journal_k.k->p, btree_k.k->p))
335 		? journal_k
336 		: btree_k;
337 
338 	if (ret.k && iter->b && bpos_gt(ret.k->p, iter->b->data->max_key))
339 		ret = bkey_s_c_null;
340 
341 	if (ret.k) {
342 		iter->pos = ret.k->p;
343 		if (bkey_deleted(ret.k)) {
344 			bch2_btree_and_journal_iter_advance(iter);
345 			goto again;
346 		}
347 	} else {
348 		iter->pos = SPOS_MAX;
349 		iter->at_end = true;
350 	}
351 
352 	return ret;
353 }
354 
355 void bch2_btree_and_journal_iter_exit(struct btree_and_journal_iter *iter)
356 {
357 	bch2_journal_iter_exit(&iter->journal);
358 }
359 
360 void __bch2_btree_and_journal_iter_init_node_iter(struct btree_and_journal_iter *iter,
361 						  struct bch_fs *c,
362 						  struct btree *b,
363 						  struct btree_node_iter node_iter,
364 						  struct bpos pos)
365 {
366 	memset(iter, 0, sizeof(*iter));
367 
368 	iter->b = b;
369 	iter->node_iter = node_iter;
370 	bch2_journal_iter_init(c, &iter->journal, b->c.btree_id, b->c.level, pos);
371 	INIT_LIST_HEAD(&iter->journal.list);
372 	iter->pos = b->data->min_key;
373 	iter->at_end = false;
374 }
375 
376 /*
377  * this version is used by btree_gc before filesystem has gone RW and
378  * multithreaded, so uses the journal_iters list:
379  */
380 void bch2_btree_and_journal_iter_init_node_iter(struct btree_and_journal_iter *iter,
381 						struct bch_fs *c,
382 						struct btree *b)
383 {
384 	struct btree_node_iter node_iter;
385 
386 	bch2_btree_node_iter_init_from_start(&node_iter, b);
387 	__bch2_btree_and_journal_iter_init_node_iter(iter, c, b, node_iter, b->data->min_key);
388 	list_add(&iter->journal.list, &c->journal_iters);
389 }
390 
391 /* sort and dedup all keys in the journal: */
392 
393 void bch2_journal_entries_free(struct bch_fs *c)
394 {
395 	struct journal_replay **i;
396 	struct genradix_iter iter;
397 
398 	genradix_for_each(&c->journal_entries, iter, i)
399 		if (*i)
400 			kvpfree(*i, offsetof(struct journal_replay, j) +
401 				vstruct_bytes(&(*i)->j));
402 	genradix_free(&c->journal_entries);
403 }
404 
405 /*
406  * When keys compare equal, oldest compares first:
407  */
408 static int journal_sort_key_cmp(const void *_l, const void *_r)
409 {
410 	const struct journal_key *l = _l;
411 	const struct journal_key *r = _r;
412 
413 	return  journal_key_cmp(l, r) ?:
414 		cmp_int(l->journal_seq, r->journal_seq) ?:
415 		cmp_int(l->journal_offset, r->journal_offset);
416 }
417 
418 void bch2_journal_keys_free(struct journal_keys *keys)
419 {
420 	struct journal_key *i;
421 
422 	move_gap(keys->d, keys->nr, keys->size, keys->gap, keys->nr);
423 	keys->gap = keys->nr;
424 
425 	for (i = keys->d; i < keys->d + keys->nr; i++)
426 		if (i->allocated)
427 			kfree(i->k);
428 
429 	kvfree(keys->d);
430 	keys->d = NULL;
431 	keys->nr = keys->gap = keys->size = 0;
432 }
433 
434 static void __journal_keys_sort(struct journal_keys *keys)
435 {
436 	struct journal_key *src, *dst;
437 
438 	sort(keys->d, keys->nr, sizeof(keys->d[0]), journal_sort_key_cmp, NULL);
439 
440 	src = dst = keys->d;
441 	while (src < keys->d + keys->nr) {
442 		while (src + 1 < keys->d + keys->nr &&
443 		       src[0].btree_id	== src[1].btree_id &&
444 		       src[0].level	== src[1].level &&
445 		       bpos_eq(src[0].k->k.p, src[1].k->k.p))
446 			src++;
447 
448 		*dst++ = *src++;
449 	}
450 
451 	keys->nr = dst - keys->d;
452 }
453 
454 int bch2_journal_keys_sort(struct bch_fs *c)
455 {
456 	struct genradix_iter iter;
457 	struct journal_replay *i, **_i;
458 	struct jset_entry *entry;
459 	struct bkey_i *k;
460 	struct journal_keys *keys = &c->journal_keys;
461 	size_t nr_keys = 0, nr_read = 0;
462 
463 	genradix_for_each(&c->journal_entries, iter, _i) {
464 		i = *_i;
465 
466 		if (!i || i->ignore)
467 			continue;
468 
469 		for_each_jset_key(k, entry, &i->j)
470 			nr_keys++;
471 	}
472 
473 	if (!nr_keys)
474 		return 0;
475 
476 	keys->size = roundup_pow_of_two(nr_keys);
477 
478 	keys->d = kvmalloc_array(keys->size, sizeof(keys->d[0]), GFP_KERNEL);
479 	if (!keys->d) {
480 		bch_err(c, "Failed to allocate buffer for sorted journal keys (%zu keys); trying slowpath",
481 			nr_keys);
482 
483 		do {
484 			keys->size >>= 1;
485 			keys->d = kvmalloc_array(keys->size, sizeof(keys->d[0]), GFP_KERNEL);
486 		} while (!keys->d && keys->size > nr_keys / 8);
487 
488 		if (!keys->d) {
489 			bch_err(c, "Failed to allocate %zu size buffer for sorted journal keys; exiting",
490 				keys->size);
491 			return -BCH_ERR_ENOMEM_journal_keys_sort;
492 		}
493 	}
494 
495 	genradix_for_each(&c->journal_entries, iter, _i) {
496 		i = *_i;
497 
498 		if (!i || i->ignore)
499 			continue;
500 
501 		cond_resched();
502 
503 		for_each_jset_key(k, entry, &i->j) {
504 			if (keys->nr == keys->size) {
505 				__journal_keys_sort(keys);
506 
507 				if (keys->nr > keys->size * 7 / 8) {
508 					bch_err(c, "Too many journal keys for slowpath; have %zu compacted, buf size %zu, processed %zu/%zu",
509 						keys->nr, keys->size, nr_read, nr_keys);
510 					return -BCH_ERR_ENOMEM_journal_keys_sort;
511 				}
512 			}
513 
514 			keys->d[keys->nr++] = (struct journal_key) {
515 				.btree_id	= entry->btree_id,
516 				.level		= entry->level,
517 				.k		= k,
518 				.journal_seq	= le64_to_cpu(i->j.seq),
519 				.journal_offset	= k->_data - i->j._data,
520 			};
521 
522 			nr_read++;
523 		}
524 	}
525 
526 	__journal_keys_sort(keys);
527 	keys->gap = keys->nr;
528 
529 	bch_verbose(c, "Journal keys: %zu read, %zu after sorting and compacting", nr_keys, keys->nr);
530 	return 0;
531 }
532