Lines Matching full:keys
13 * For managing keys we read from the journal: until journal replay works normal
14 * btree lookups need to be able to find and return keys from the journal where
19 static inline size_t pos_to_idx(struct journal_keys *keys, size_t pos) in pos_to_idx() argument
21 size_t gap_size = keys->size - keys->nr; in pos_to_idx()
23 BUG_ON(pos >= keys->gap && pos < keys->gap + gap_size); in pos_to_idx()
25 if (pos >= keys->gap) in pos_to_idx()
30 static inline size_t idx_to_pos(struct journal_keys *keys, size_t idx) in idx_to_pos() argument
32 size_t gap_size = keys->size - keys->nr; in idx_to_pos()
34 if (idx >= keys->gap) in idx_to_pos()
39 static inline struct journal_key *idx_to_key(struct journal_keys *keys, size_t idx) in idx_to_key() argument
41 return keys->data + idx_to_pos(keys, idx); in idx_to_key()
44 static size_t __bch2_journal_key_search(struct journal_keys *keys, in __bch2_journal_key_search() argument
48 size_t l = 0, r = keys->nr, m; in __bch2_journal_key_search()
52 if (__journal_key_cmp(id, level, pos, idx_to_key(keys, m)) > 0) in __bch2_journal_key_search()
58 BUG_ON(l < keys->nr && in __bch2_journal_key_search()
59 __journal_key_cmp(id, level, pos, idx_to_key(keys, l)) > 0); in __bch2_journal_key_search()
62 __journal_key_cmp(id, level, pos, idx_to_key(keys, l - 1)) <= 0); in __bch2_journal_key_search()
67 static size_t bch2_journal_key_search(struct journal_keys *keys, in bch2_journal_key_search() argument
71 return idx_to_pos(keys, __bch2_journal_key_search(keys, id, level, pos)); in bch2_journal_key_search()
79 struct journal_keys *keys = &c->journal_keys; in bch2_journal_keys_peek_max() local
83 BUG_ON(*idx > keys->nr); in bch2_journal_keys_peek_max()
86 *idx = __bch2_journal_key_search(keys, btree_id, level, pos); in bch2_journal_keys_peek_max()
89 __journal_key_cmp(btree_id, level, end_pos, idx_to_key(keys, *idx - 1)) <= 0) { in bch2_journal_keys_peek_max()
101 while ((k = *idx < keys->nr ? idx_to_key(keys, *idx) : NULL)) { in bch2_journal_keys_peek_max()
135 struct journal_keys *keys = &c->journal_keys; in bch2_journal_keys_peek_prev_min() local
139 BUG_ON(*idx > keys->nr); in bch2_journal_keys_peek_prev_min()
142 *idx = __bch2_journal_key_search(keys, btree_id, level, pos); in bch2_journal_keys_peek_prev_min()
145 __journal_key_cmp(btree_id, level, end_pos, idx_to_key(keys, *idx - 1)) <= 0) { in bch2_journal_keys_peek_prev_min()
157 while ((k = *idx < keys->nr ? idx_to_key(keys, *idx) : NULL)) { in bch2_journal_keys_peek_prev_min()
197 struct journal_keys *keys = iter->keys; in journal_iter_verify() local
198 size_t gap_size = keys->size - keys->nr; in journal_iter_verify()
200 BUG_ON(iter->idx >= keys->gap && in journal_iter_verify()
201 iter->idx < keys->gap + gap_size); in journal_iter_verify()
203 if (iter->idx < keys->size) { in journal_iter_verify()
204 struct journal_key *k = keys->data + iter->idx; in journal_iter_verify()
214 struct journal_keys *keys = &c->journal_keys; in journal_iters_fix() local
216 size_t gap_end = keys->gap + (keys->size - keys->nr); in journal_iters_fix()
217 struct journal_key *new_key = &keys->data[keys->gap - 1]; in journal_iters_fix()
231 iter->idx = keys->gap - 1; in journal_iters_fix()
238 struct journal_keys *keys = &c->journal_keys; in journal_iters_move_gap() local
240 size_t gap_size = keys->size - keys->nr; in journal_iters_move_gap()
259 * Ensure these keys are done last by journal replay, to unblock in bch2_journal_key_insert_take()
264 struct journal_keys *keys = &c->journal_keys; in bch2_journal_key_insert_take() local
265 size_t idx = bch2_journal_key_search(keys, id, level, k->k.p); in bch2_journal_key_insert_take()
269 if (idx < keys->size && in bch2_journal_key_insert_take()
270 journal_key_cmp(&n, &keys->data[idx]) == 0) { in bch2_journal_key_insert_take()
271 if (keys->data[idx].allocated) in bch2_journal_key_insert_take()
272 kfree(keys->data[idx].k); in bch2_journal_key_insert_take()
273 keys->data[idx] = n; in bch2_journal_key_insert_take()
277 if (idx > keys->gap) in bch2_journal_key_insert_take()
278 idx -= keys->size - keys->nr; in bch2_journal_key_insert_take()
280 size_t old_gap = keys->gap; in bch2_journal_key_insert_take()
282 if (keys->nr == keys->size) { in bch2_journal_key_insert_take()
283 journal_iters_move_gap(c, old_gap, keys->size); in bch2_journal_key_insert_take()
284 old_gap = keys->size; in bch2_journal_key_insert_take()
287 .nr = keys->nr, in bch2_journal_key_insert_take()
288 .size = max_t(size_t, keys->size, 8) * 2, in bch2_journal_key_insert_take()
298 /* Since @keys was full, there was no gap: */ in bch2_journal_key_insert_take()
299 memcpy(new_keys.data, keys->data, sizeof(keys->data[0]) * keys->nr); in bch2_journal_key_insert_take()
300 kvfree(keys->data); in bch2_journal_key_insert_take()
301 keys->data = new_keys.data; in bch2_journal_key_insert_take()
302 keys->nr = new_keys.nr; in bch2_journal_key_insert_take()
303 keys->size = new_keys.size; in bch2_journal_key_insert_take()
306 keys->gap = keys->nr; in bch2_journal_key_insert_take()
311 move_gap(keys, idx); in bch2_journal_key_insert_take()
313 keys->nr++; in bch2_journal_key_insert_take()
314 keys->data[keys->gap++] = n; in bch2_journal_key_insert_take()
357 struct journal_keys *keys = &trans->c->journal_keys; in bch2_key_deleted_in_journal() local
358 size_t idx = bch2_journal_key_search(keys, btree, level, pos); in bch2_key_deleted_in_journal()
363 return (idx < keys->size && in bch2_key_deleted_in_journal()
364 keys->data[idx].btree_id == btree && in bch2_key_deleted_in_journal()
365 keys->data[idx].level == level && in bch2_key_deleted_in_journal()
366 bpos_eq(keys->data[idx].k->k.p, pos) && in bch2_key_deleted_in_journal()
367 bkey_deleted(&keys->data[idx].k->k)); in bch2_key_deleted_in_journal()
370 static void __bch2_journal_key_overwritten(struct journal_keys *keys, size_t pos) in __bch2_journal_key_overwritten() argument
372 struct journal_key *k = keys->data + pos; in __bch2_journal_key_overwritten()
373 size_t idx = pos_to_idx(keys, pos); in __bch2_journal_key_overwritten()
377 struct journal_key *prev = idx > 0 ? keys->data + idx_to_pos(keys, idx - 1) : NULL; in __bch2_journal_key_overwritten()
378 struct journal_key *next = idx + 1 < keys->nr ? keys->data + idx_to_pos(keys, idx + 1) : NULL; in __bch2_journal_key_overwritten()
394 keys->data[pos].overwritten_range = prev_range; in __bch2_journal_key_overwritten()
396 struct journal_key *ip = keys->data + idx_to_pos(keys, i); in __bch2_journal_key_overwritten()
435 struct journal_keys *keys = &c->journal_keys; in bch2_journal_key_overwritten() local
436 size_t idx = bch2_journal_key_search(keys, btree, level, pos); in bch2_journal_key_overwritten()
438 if (idx < keys->size && in bch2_journal_key_overwritten()
439 keys->data[idx].btree_id == btree && in bch2_journal_key_overwritten()
440 keys->data[idx].level == level && in bch2_journal_key_overwritten()
441 bpos_eq(keys->data[idx].k->k.p, pos) && in bch2_journal_key_overwritten()
442 !keys->data[idx].overwritten) { in bch2_journal_key_overwritten()
443 mutex_lock(&keys->overwrite_lock); in bch2_journal_key_overwritten()
444 __bch2_journal_key_overwritten(keys, idx); in bch2_journal_key_overwritten()
445 mutex_unlock(&keys->overwrite_lock); in bch2_journal_key_overwritten()
451 if (iter->idx < iter->keys->size) { in bch2_journal_iter_advance()
453 if (iter->idx == iter->keys->gap) in bch2_journal_iter_advance()
454 iter->idx += iter->keys->size - iter->keys->nr; in bch2_journal_iter_advance()
465 while (iter->idx < iter->keys->size) { in bch2_journal_iter_peek()
466 struct journal_key *k = iter->keys->data + iter->idx; in bch2_journal_iter_peek()
479 iter->idx = idx_to_pos(iter->keys, rcu_dereference(k->overwritten_range)->end); in bch2_journal_iter_peek()
500 iter->keys = &c->journal_keys; in bch2_journal_iter_init()
640 /* sort and dedup all keys in the journal: */
643 * When keys compare equal, oldest compares first:
657 struct journal_keys *keys = &c->journal_keys; in bch2_journal_keys_put() local
659 BUG_ON(atomic_read(&keys->ref) <= 0); in bch2_journal_keys_put()
661 if (!atomic_dec_and_test(&keys->ref)) in bch2_journal_keys_put()
664 move_gap(keys, keys->nr); in bch2_journal_keys_put()
666 darray_for_each(*keys, i) { in bch2_journal_keys_put()
668 (i == &darray_last(*keys) || in bch2_journal_keys_put()
676 kvfree(keys->data); in bch2_journal_keys_put()
677 keys->data = NULL; in bch2_journal_keys_put()
678 keys->nr = keys->gap = keys->size = 0; in bch2_journal_keys_put()
688 static void __journal_keys_sort(struct journal_keys *keys) in __journal_keys_sort() argument
690 sort(keys->data, keys->nr, sizeof(keys->data[0]), journal_sort_key_cmp, NULL); in __journal_keys_sort()
694 struct journal_key *dst = keys->data; in __journal_keys_sort()
696 darray_for_each(*keys, src) { in __journal_keys_sort()
698 * We don't accumulate accounting keys here because we have to in __journal_keys_sort()
703 src + 1 < &darray_top(*keys) && in __journal_keys_sort()
710 keys->nr = dst - keys->data; in __journal_keys_sort()
717 struct journal_keys *keys = &c->journal_keys; in bch2_journal_keys_sort() local
737 if (darray_push(keys, n)) { in bch2_journal_keys_sort()
738 __journal_keys_sort(keys); in bch2_journal_keys_sort()
740 if (keys->nr * 8 > keys->size * 7) { in bch2_journal_keys_sort()
741 …bch_err(c, "Too many journal keys for slowpath; have %zu compacted, buf size %zu, processed %zu ke… in bch2_journal_keys_sort()
742 keys->nr, keys->size, nr_read, le64_to_cpu(i->j.seq)); in bch2_journal_keys_sort()
746 BUG_ON(darray_push(keys, n)); in bch2_journal_keys_sort()
753 __journal_keys_sort(keys); in bch2_journal_keys_sort()
754 keys->gap = keys->nr; in bch2_journal_keys_sort()
756 bch_verbose(c, "Journal keys: %zu read, %zu after sorting and compacting", nr_read, keys->nr); in bch2_journal_keys_sort()
764 struct journal_keys *keys = &c->journal_keys; in bch2_shoot_down_journal_keys() local
767 move_gap(keys, keys->nr); in bch2_shoot_down_journal_keys()
769 darray_for_each(*keys, i) in bch2_shoot_down_journal_keys()
775 keys->data[dst++] = *i; in bch2_shoot_down_journal_keys()
776 keys->nr = keys->gap = dst; in bch2_shoot_down_journal_keys()
781 struct journal_keys *keys = &c->journal_keys; in bch2_journal_keys_dump() local
784 pr_info("%zu keys:", keys->nr); in bch2_journal_keys_dump()
786 move_gap(keys, keys->nr); in bch2_journal_keys_dump()
788 darray_for_each(*keys, i) { in bch2_journal_keys_dump()
801 struct journal_keys *keys = &c->journal_keys; in bch2_fs_journal_keys_init() local
803 atomic_set(&keys->ref, 1); in bch2_fs_journal_keys_init()
804 keys->initial_ref_held = true; in bch2_fs_journal_keys_init()
805 mutex_init(&keys->overwrite_lock); in bch2_fs_journal_keys_init()