Lines Matching +full:a +full:- +full:b

1 /* SPDX-License-Identifier: GPL-2.0 */
14 * A bkey contains a key, a size field, a variable number of pointers, and some
25 * that it also filters out keys of size 0 - these are keys that have been
27 * them on disk, just unnecessary work - so we filter them out when resorting
31 * collection needs to find them to ensure bucket gens don't wrap around -
35 * front or the back of a bkey - this is mainly used for fixing overlapping
40 * A bset is an array of bkeys laid out contiguously in memory in sorted order,
41 * along with a header. A btree node is made up of a number of these, written at
45 * 4 in memory - we lazily resort as needed.
49 * implement a btree iterator.
53 * Most of the code in bcache doesn't care about an individual bset - it needs
57 * in a btree node in sorted order, starting from either keys after a specific
58 * point (if you pass it a search key) or the start of the btree node.
62 * Since keys are variable length, we can't use a binary search on a bset - we
68 * into the last (unwritten) set, most of the keys within a given btree node are
73 * set; they index one key every BSET_CACHELINE bytes, and then a linear search
77 * into, we construct a binary search tree in an array - traversing a binary
80 * (and their grandchildren, and great grandchildren...) - this means
83 * It's quite useful performance wise to keep these nodes small - not just
85 * more nodes on a single cacheline and thus prefetch more iterations in advance
88 * Nodes in the auxiliary search tree must contain both a key to compare against
90 * and a pointer to the key. We use a few tricks to compress both of these.
94 * a function (to_inorder()) that takes the index of a node in a binary tree and
98 * The key is 84 bits (KEY_DEV + key->key, the offset on the device). To
101 * we're looking for lie within some range - bounded by our previous
102 * comparisons. (We special case the start of a search so that this is true even
105 * So we know the key we're looking for is between a and b, and a and b don't
110 * to partition the key range we're currently checking. Consider key n - the
115 * Note that this could be bit 0 - we might sometimes need all 80 bits to do the
119 * The solution is to make them fixed size, and when we're constructing a node
123 * happen a bit less than 1% of the time), we win - even on failures, that key
128 * point, with an exponent and a mantissa. The exponent needs to be big enough
133 * are 8 byte aligned); using 22 bits for the mantissa means a node is 4 bytes.
139 * whenever we insert another key into it. For the unwritten set, we use a much
140 * simpler lookup table - it's just a flat array, so index i in the lookup table
144 * These are much easier to keep up to date when we insert a key - we do it
145 * somewhat lazily; when we shift a key up we usually just increment the pointer
159 * We construct a binary tree in an array as if the array
168 /* function of size - precalculated for to_inorder() */
176 * The nodes in the bset tree point to specific keys - this
179 * Conceptually it's a member of struct bkey_float, but we want
194 bool (*insert_fixup)(struct btree_keys *b,
212 * itself in a couple places
225 * Sets of sorted keys - the real btree node - plus a binary search tree
227 * set[0] is special; set[0]->tree, set[0]->prev and set[0]->data point
229 * set[0]->data points to the entire btree node as it exists on disk.
234 static inline struct bset_tree *bset_tree_last(struct btree_keys *b) in bset_tree_last() argument
236 return b->set + b->nsets; in bset_tree_last()
239 static inline bool bset_written(struct btree_keys *b, struct bset_tree *t) in bset_written() argument
241 return t <= b->set + b->nsets - b->last_set_unwritten; in bset_written()
244 static inline bool bkey_written(struct btree_keys *b, struct bkey *k) in bkey_written() argument
246 return !b->last_set_unwritten || k < b->set[b->nsets].data->start; in bkey_written()
249 static inline unsigned int bset_byte_offset(struct btree_keys *b, in bset_byte_offset() argument
252 return ((size_t) i) - ((size_t) b->set->data); in bset_byte_offset()
255 static inline unsigned int bset_sector_offset(struct btree_keys *b, in bset_sector_offset() argument
258 return bset_byte_offset(b, i) >> 9; in bset_sector_offset()
262 #define set_bytes(i) __set_bytes(i, i->keys)
267 __set_blocks(i, (i)->keys, block_bytes)
269 static inline size_t bch_btree_keys_u64s_remaining(struct btree_keys *b) in bch_btree_keys_u64s_remaining() argument
271 struct bset_tree *t = bset_tree_last(b); in bch_btree_keys_u64s_remaining()
273 BUG_ON((PAGE_SIZE << b->page_order) < in bch_btree_keys_u64s_remaining()
274 (bset_byte_offset(b, t->data) + set_bytes(t->data))); in bch_btree_keys_u64s_remaining()
276 if (!b->last_set_unwritten) in bch_btree_keys_u64s_remaining()
279 return ((PAGE_SIZE << b->page_order) - in bch_btree_keys_u64s_remaining()
280 (bset_byte_offset(b, t->data) + set_bytes(t->data))) / in bch_btree_keys_u64s_remaining()
284 static inline struct bset *bset_next_set(struct btree_keys *b, in bset_next_set() argument
287 struct bset *i = bset_tree_last(b)->data; in bset_next_set()
292 void bch_btree_keys_free(struct btree_keys *b);
293 int bch_btree_keys_alloc(struct btree_keys *b, unsigned int page_order,
295 void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops,
298 void bch_bset_init_next(struct btree_keys *b, struct bset *i, uint64_t magic);
299 void bch_bset_build_written_tree(struct btree_keys *b);
300 void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k);
301 bool bch_bkey_try_merge(struct btree_keys *b, struct bkey *l, struct bkey *r);
302 void bch_bset_insert(struct btree_keys *b, struct bkey *where,
304 unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
320 struct btree_keys *b; member
327 /* Fixed-size btree_iter that can be allocated on the stack */
334 typedef bool (*ptr_filter_fn)(struct btree_keys *b, const struct bkey *k);
338 struct btree_keys *b,
343 struct bkey *bch_btree_iter_stack_init(struct btree_keys *b,
347 struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
353 static inline struct bkey *bch_bset_search(struct btree_keys *b, in bch_bset_search() argument
357 return search ? __bch_bset_search(b, t, search) : t->data->start; in bch_bset_search()
360 #define for_each_key_filter(b, k, stack_iter, filter) \ argument
361 for (bch_btree_iter_stack_init((b), (stack_iter), NULL); \
362 ((k) = bch_btree_iter_next_filter(&((stack_iter)->iter), (b), \
365 #define for_each_key(b, k, stack_iter) \ argument
366 for (bch_btree_iter_stack_init((b), (stack_iter), NULL); \
367 ((k) = bch_btree_iter_next(&((stack_iter)->iter)));)
383 void bch_btree_sort_lazy(struct btree_keys *b, struct bset_sort_state *state);
384 void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,
386 void bch_btree_sort_and_fix_extents(struct btree_keys *b,
389 void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
392 static inline void bch_btree_sort(struct btree_keys *b, in bch_btree_sort() argument
395 bch_btree_sort_partial(b, 0, state); in bch_btree_sort()
404 void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *state);
408 #define bset_bkey_last(i) bkey_idx((struct bkey *) (i)->d, \
409 (unsigned int)(i)->keys)
413 return bkey_idx(i->start, idx); in bset_bkey_idx()
425 ? (int64_t) KEY_INODE(l) - (int64_t) KEY_INODE(r) in bkey_cmp()
426 : (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r); in bkey_cmp()
447 * Pointer '*preceding_key_p' points to a memory object to store preceding
452 * and it points to an on-stack variable, so the memory release is handled
459 if (!(*preceding_key_p)->low) in preceding_key()
460 (*preceding_key_p)->high--; in preceding_key()
461 (*preceding_key_p)->low--; in preceding_key()
467 static inline bool bch_ptr_invalid(struct btree_keys *b, const struct bkey *k) in bch_ptr_invalid() argument
469 return b->ops->key_invalid(b, k); in bch_ptr_invalid()
472 static inline bool bch_ptr_bad(struct btree_keys *b, const struct bkey *k) in bch_ptr_bad() argument
474 return b->ops->key_bad(b, k); in bch_ptr_bad()
477 static inline void bch_bkey_to_text(struct btree_keys *b, char *buf, in bch_bkey_to_text() argument
480 return b->ops->key_to_text(buf, size, k); in bch_bkey_to_text()
510 l->top_p = l->keys_p = l->inline_keys; in bch_keylist_init()
515 l->keys = k; in bch_keylist_init_single()
516 l->top = bkey_next(k); in bch_keylist_init_single()
521 l->top = bkey_next(l->top); in bch_keylist_push()
526 bkey_copy(l->top, k); in bch_keylist_add()
532 return l->top == l->keys; in bch_keylist_empty()
537 l->top = l->keys; in bch_keylist_reset()
542 if (l->keys_p != l->inline_keys) in bch_keylist_free()
543 kfree(l->keys_p); in bch_keylist_free()
548 return l->top_p - l->keys_p; in bch_keylist_nkeys()
564 int __bch_count_data(struct btree_keys *b);
565 void __printf(2, 3) __bch_check_keys(struct btree_keys *b,
568 void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set);
569 void bch_dump_bucket(struct btree_keys *b);
573 static inline int __bch_count_data(struct btree_keys *b) { return -1; } in __bch_count_data() argument
575 __bch_check_keys(struct btree_keys *b, const char *fmt, ...) {} in __bch_check_keys() argument
576 static inline void bch_dump_bucket(struct btree_keys *b) {} in bch_dump_bucket() argument
577 void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set);
581 static inline bool btree_keys_expensive_checks(struct btree_keys *b) in btree_keys_expensive_checks() argument
584 return *b->expensive_debug_checks; in btree_keys_expensive_checks()
590 static inline int bch_count_data(struct btree_keys *b) in bch_count_data() argument
592 return btree_keys_expensive_checks(b) ? __bch_count_data(b) : -1; in bch_count_data()
595 #define bch_check_keys(b, ...) \ argument
597 if (btree_keys_expensive_checks(b)) \
598 __bch_check_keys(b, __VA_ARGS__); \