1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _BCACHE_BSET_H 3 #define _BCACHE_BSET_H 4 5 #include <linux/kernel.h> 6 #include <linux/types.h> 7 8 #include "bcache_ondisk.h" 9 #include "util.h" /* for time_stats */ 10 11 /* 12 * BKEYS: 13 * 14 * A bkey contains a key, a size field, a variable number of pointers, and some 15 * ancillary flag bits. 16 * 17 * We use two different functions for validating bkeys, bch_ptr_invalid and 18 * bch_ptr_bad(). 19 * 20 * bch_ptr_invalid() primarily filters out keys and pointers that would be 21 * invalid due to some sort of bug, whereas bch_ptr_bad() filters out keys and 22 * pointer that occur in normal practice but don't point to real data. 23 * 24 * The one exception to the rule that ptr_invalid() filters out invalid keys is 25 * that it also filters out keys of size 0 - these are keys that have been 26 * completely overwritten. It'd be safe to delete these in memory while leaving 27 * them on disk, just unnecessary work - so we filter them out when resorting 28 * instead. 29 * 30 * We can't filter out stale keys when we're resorting, because garbage 31 * collection needs to find them to ensure bucket gens don't wrap around - 32 * unless we're rewriting the btree node those stale keys still exist on disk. 33 * 34 * We also implement functions here for removing some number of sectors from the 35 * front or the back of a bkey - this is mainly used for fixing overlapping 36 * extents, by removing the overlapping sectors from the older key. 37 * 38 * BSETS: 39 * 40 * A bset is an array of bkeys laid out contiguously in memory in sorted order, 41 * along with a header. A btree node is made up of a number of these, written at 42 * different times. 43 * 44 * There could be many of them on disk, but we never allow there to be more than 45 * 4 in memory - we lazily resort as needed. 46 * 47 * We implement code here for creating and maintaining auxiliary search trees 48 * (described below) for searching an individial bset, and on top of that we 49 * implement a btree iterator. 50 * 51 * BTREE ITERATOR: 52 * 53 * Most of the code in bcache doesn't care about an individual bset - it needs 54 * to search entire btree nodes and iterate over them in sorted order. 55 * 56 * The btree iterator code serves both functions; it iterates through the keys 57 * in a btree node in sorted order, starting from either keys after a specific 58 * point (if you pass it a search key) or the start of the btree node. 59 * 60 * AUXILIARY SEARCH TREES: 61 * 62 * Since keys are variable length, we can't use a binary search on a bset - we 63 * wouldn't be able to find the start of the next key. But binary searches are 64 * slow anyways, due to terrible cache behaviour; bcache originally used binary 65 * searches and that code topped out at under 50k lookups/second. 66 * 67 * So we need to construct some sort of lookup table. Since we only insert keys 68 * into the last (unwritten) set, most of the keys within a given btree node are 69 * usually in sets that are mostly constant. We use two different types of 70 * lookup tables to take advantage of this. 71 * 72 * Both lookup tables share in common that they don't index every key in the 73 * set; they index one key every BSET_CACHELINE bytes, and then a linear search 74 * is used for the rest. 75 * 76 * For sets that have been written to disk and are no longer being inserted 77 * into, we construct a binary search tree in an array - traversing a binary 78 * search tree in an array gives excellent locality of reference and is very 79 * fast, since both children of any node are adjacent to each other in memory 80 * (and their grandchildren, and great grandchildren...) - this means 81 * prefetching can be used to great effect. 82 * 83 * It's quite useful performance wise to keep these nodes small - not just 84 * because they're more likely to be in L2, but also because we can prefetch 85 * more nodes on a single cacheline and thus prefetch more iterations in advance 86 * when traversing this tree. 87 * 88 * Nodes in the auxiliary search tree must contain both a key to compare against 89 * (we don't want to fetch the key from the set, that would defeat the purpose), 90 * and a pointer to the key. We use a few tricks to compress both of these. 91 * 92 * To compress the pointer, we take advantage of the fact that one node in the 93 * search tree corresponds to precisely BSET_CACHELINE bytes in the set. We have 94 * a function (to_inorder()) that takes the index of a node in a binary tree and 95 * returns what its index would be in an inorder traversal, so we only have to 96 * store the low bits of the offset. 97 * 98 * The key is 84 bits (KEY_DEV + key->key, the offset on the device). To 99 * compress that, we take advantage of the fact that when we're traversing the 100 * search tree at every iteration we know that both our search key and the key 101 * we're looking for lie within some range - bounded by our previous 102 * comparisons. (We special case the start of a search so that this is true even 103 * at the root of the tree). 104 * 105 * So we know the key we're looking for is between a and b, and a and b don't 106 * differ higher than bit 50, we don't need to check anything higher than bit 107 * 50. 108 * 109 * We don't usually need the rest of the bits, either; we only need enough bits 110 * to partition the key range we're currently checking. Consider key n - the 111 * key our auxiliary search tree node corresponds to, and key p, the key 112 * immediately preceding n. The lowest bit we need to store in the auxiliary 113 * search tree is the highest bit that differs between n and p. 114 * 115 * Note that this could be bit 0 - we might sometimes need all 80 bits to do the 116 * comparison. But we'd really like our nodes in the auxiliary search tree to be 117 * of fixed size. 118 * 119 * The solution is to make them fixed size, and when we're constructing a node 120 * check if p and n differed in the bits we needed them to. If they don't we 121 * flag that node, and when doing lookups we fallback to comparing against the 122 * real key. As long as this doesn't happen to often (and it seems to reliably 123 * happen a bit less than 1% of the time), we win - even on failures, that key 124 * is then more likely to be in cache than if we were doing binary searches all 125 * the way, since we're touching so much less memory. 126 * 127 * The keys in the auxiliary search tree are stored in (software) floating 128 * point, with an exponent and a mantissa. The exponent needs to be big enough 129 * to address all the bits in the original key, but the number of bits in the 130 * mantissa is somewhat arbitrary; more bits just gets us fewer failures. 131 * 132 * We need 7 bits for the exponent and 3 bits for the key's offset (since keys 133 * are 8 byte aligned); using 22 bits for the mantissa means a node is 4 bytes. 134 * We need one node per 128 bytes in the btree node, which means the auxiliary 135 * search trees take up 3% as much memory as the btree itself. 136 * 137 * Constructing these auxiliary search trees is moderately expensive, and we 138 * don't want to be constantly rebuilding the search tree for the last set 139 * whenever we insert another key into it. For the unwritten set, we use a much 140 * simpler lookup table - it's just a flat array, so index i in the lookup table 141 * corresponds to the i range of BSET_CACHELINE bytes in the set. Indexing 142 * within each byte range works the same as with the auxiliary search trees. 143 * 144 * These are much easier to keep up to date when we insert a key - we do it 145 * somewhat lazily; when we shift a key up we usually just increment the pointer 146 * to it, only when it would overflow do we go to the trouble of finding the 147 * first key in that range of bytes again. 148 */ 149 150 struct btree_keys; 151 struct btree_iter; 152 struct btree_iter_set; 153 struct bkey_float; 154 155 #define MAX_BSETS 4U 156 157 struct bset_tree { 158 /* 159 * We construct a binary tree in an array as if the array 160 * started at 1, so that things line up on the same cachelines 161 * better: see comments in bset.c at cacheline_to_bkey() for 162 * details 163 */ 164 165 /* size of the binary tree and prev array */ 166 unsigned int size; 167 168 /* function of size - precalculated for to_inorder() */ 169 unsigned int extra; 170 171 /* copy of the last key in the set */ 172 struct bkey end; 173 struct bkey_float *tree; 174 175 /* 176 * The nodes in the bset tree point to specific keys - this 177 * array holds the sizes of the previous key. 178 * 179 * Conceptually it's a member of struct bkey_float, but we want 180 * to keep bkey_float to 4 bytes and prev isn't used in the fast 181 * path. 182 */ 183 uint8_t *prev; 184 185 /* The actual btree node, with pointers to each sorted set */ 186 struct bset *data; 187 }; 188 189 struct btree_keys_ops { 190 bool (*sort_cmp)(struct btree_iter_set l, 191 struct btree_iter_set r); 192 struct bkey *(*sort_fixup)(struct btree_iter *iter, 193 struct bkey *tmp); 194 bool (*insert_fixup)(struct btree_keys *b, 195 struct bkey *insert, 196 struct btree_iter *iter, 197 struct bkey *replace_key); 198 bool (*key_invalid)(struct btree_keys *bk, 199 const struct bkey *k); 200 bool (*key_bad)(struct btree_keys *bk, 201 const struct bkey *k); 202 bool (*key_merge)(struct btree_keys *bk, 203 struct bkey *l, struct bkey *r); 204 void (*key_to_text)(char *buf, 205 size_t size, 206 const struct bkey *k); 207 void (*key_dump)(struct btree_keys *keys, 208 const struct bkey *k); 209 210 /* 211 * Only used for deciding whether to use START_KEY(k) or just the key 212 * itself in a couple places 213 */ 214 bool is_extents; 215 }; 216 217 struct btree_keys { 218 const struct btree_keys_ops *ops; 219 uint8_t page_order; 220 uint8_t nsets; 221 unsigned int last_set_unwritten:1; 222 bool *expensive_debug_checks; 223 224 /* 225 * Sets of sorted keys - the real btree node - plus a binary search tree 226 * 227 * set[0] is special; set[0]->tree, set[0]->prev and set[0]->data point 228 * to the memory we have allocated for this btree node. Additionally, 229 * set[0]->data points to the entire btree node as it exists on disk. 230 */ 231 struct bset_tree set[MAX_BSETS]; 232 }; 233 234 static inline struct bset_tree *bset_tree_last(struct btree_keys *b) 235 { 236 return b->set + b->nsets; 237 } 238 239 static inline bool bset_written(struct btree_keys *b, struct bset_tree *t) 240 { 241 return t <= b->set + b->nsets - b->last_set_unwritten; 242 } 243 244 static inline bool bkey_written(struct btree_keys *b, struct bkey *k) 245 { 246 return !b->last_set_unwritten || k < b->set[b->nsets].data->start; 247 } 248 249 static inline unsigned int bset_byte_offset(struct btree_keys *b, 250 struct bset *i) 251 { 252 return ((size_t) i) - ((size_t) b->set->data); 253 } 254 255 static inline unsigned int bset_sector_offset(struct btree_keys *b, 256 struct bset *i) 257 { 258 return bset_byte_offset(b, i) >> 9; 259 } 260 261 #define __set_bytes(i, k) (sizeof(*(i)) + (k) * sizeof(uint64_t)) 262 #define set_bytes(i) __set_bytes(i, i->keys) 263 264 #define __set_blocks(i, k, block_bytes) \ 265 DIV_ROUND_UP(__set_bytes(i, k), block_bytes) 266 #define set_blocks(i, block_bytes) \ 267 __set_blocks(i, (i)->keys, block_bytes) 268 269 static inline size_t bch_btree_keys_u64s_remaining(struct btree_keys *b) 270 { 271 struct bset_tree *t = bset_tree_last(b); 272 273 BUG_ON((PAGE_SIZE << b->page_order) < 274 (bset_byte_offset(b, t->data) + set_bytes(t->data))); 275 276 if (!b->last_set_unwritten) 277 return 0; 278 279 return ((PAGE_SIZE << b->page_order) - 280 (bset_byte_offset(b, t->data) + set_bytes(t->data))) / 281 sizeof(u64); 282 } 283 284 static inline struct bset *bset_next_set(struct btree_keys *b, 285 unsigned int block_bytes) 286 { 287 struct bset *i = bset_tree_last(b)->data; 288 289 return ((void *) i) + roundup(set_bytes(i), block_bytes); 290 } 291 292 void bch_btree_keys_free(struct btree_keys *b); 293 int bch_btree_keys_alloc(struct btree_keys *b, unsigned int page_order, 294 gfp_t gfp); 295 void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops, 296 bool *expensive_debug_checks); 297 298 void bch_bset_init_next(struct btree_keys *b, struct bset *i, uint64_t magic); 299 void bch_bset_build_written_tree(struct btree_keys *b); 300 void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k); 301 bool bch_bkey_try_merge(struct btree_keys *b, struct bkey *l, struct bkey *r); 302 void bch_bset_insert(struct btree_keys *b, struct bkey *where, 303 struct bkey *insert); 304 unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k, 305 struct bkey *replace_key); 306 307 enum { 308 BTREE_INSERT_STATUS_NO_INSERT = 0, 309 BTREE_INSERT_STATUS_INSERT, 310 BTREE_INSERT_STATUS_BACK_MERGE, 311 BTREE_INSERT_STATUS_OVERWROTE, 312 BTREE_INSERT_STATUS_FRONT_MERGE, 313 }; 314 315 /* Btree key iteration */ 316 317 struct btree_iter { 318 size_t size, used; 319 #ifdef CONFIG_BCACHE_DEBUG 320 struct btree_keys *b; 321 #endif 322 struct btree_iter_set { 323 struct bkey *k, *end; 324 } data[]; 325 }; 326 327 /* Fixed-size btree_iter that can be allocated on the stack */ 328 329 struct btree_iter_stack { 330 struct btree_iter iter; 331 struct btree_iter_set stack_data[MAX_BSETS]; 332 }; 333 334 typedef bool (*ptr_filter_fn)(struct btree_keys *b, const struct bkey *k); 335 336 struct bkey *bch_btree_iter_next(struct btree_iter *iter); 337 struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter, 338 struct btree_keys *b, 339 ptr_filter_fn fn); 340 341 void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k, 342 struct bkey *end); 343 struct bkey *bch_btree_iter_stack_init(struct btree_keys *b, 344 struct btree_iter_stack *iter, 345 struct bkey *search); 346 347 struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t, 348 const struct bkey *search); 349 350 /* 351 * Returns the first key that is strictly greater than search 352 */ 353 static inline struct bkey *bch_bset_search(struct btree_keys *b, 354 struct bset_tree *t, 355 const struct bkey *search) 356 { 357 return search ? __bch_bset_search(b, t, search) : t->data->start; 358 } 359 360 #define for_each_key_filter(b, k, stack_iter, filter) \ 361 for (bch_btree_iter_stack_init((b), (stack_iter), NULL); \ 362 ((k) = bch_btree_iter_next_filter(&((stack_iter)->iter), (b), \ 363 filter));) 364 365 #define for_each_key(b, k, stack_iter) \ 366 for (bch_btree_iter_stack_init((b), (stack_iter), NULL); \ 367 ((k) = bch_btree_iter_next(&((stack_iter)->iter)));) 368 369 /* Sorting */ 370 371 struct bset_sort_state { 372 mempool_t pool; 373 374 unsigned int page_order; 375 unsigned int crit_factor; 376 377 struct time_stats time; 378 }; 379 380 void bch_bset_sort_state_free(struct bset_sort_state *state); 381 int bch_bset_sort_state_init(struct bset_sort_state *state, 382 unsigned int page_order); 383 void bch_btree_sort_lazy(struct btree_keys *b, struct bset_sort_state *state); 384 void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new, 385 struct bset_sort_state *state); 386 void bch_btree_sort_and_fix_extents(struct btree_keys *b, 387 struct btree_iter *iter, 388 struct bset_sort_state *state); 389 void bch_btree_sort_partial(struct btree_keys *b, unsigned int start, 390 struct bset_sort_state *state); 391 392 static inline void bch_btree_sort(struct btree_keys *b, 393 struct bset_sort_state *state) 394 { 395 bch_btree_sort_partial(b, 0, state); 396 } 397 398 struct bset_stats { 399 size_t sets_written, sets_unwritten; 400 size_t bytes_written, bytes_unwritten; 401 size_t floats, failed; 402 }; 403 404 void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *state); 405 406 /* Bkey utility code */ 407 408 #define bset_bkey_last(i) bkey_idx((struct bkey *) (i)->d, \ 409 (unsigned int)(i)->keys) 410 411 static inline struct bkey *bset_bkey_idx(struct bset *i, unsigned int idx) 412 { 413 return bkey_idx(i->start, idx); 414 } 415 416 static inline void bkey_init(struct bkey *k) 417 { 418 *k = ZERO_KEY; 419 } 420 421 static __always_inline int64_t bkey_cmp(const struct bkey *l, 422 const struct bkey *r) 423 { 424 return unlikely(KEY_INODE(l) != KEY_INODE(r)) 425 ? (int64_t) KEY_INODE(l) - (int64_t) KEY_INODE(r) 426 : (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r); 427 } 428 429 void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src, 430 unsigned int i); 431 bool __bch_cut_front(const struct bkey *where, struct bkey *k); 432 bool __bch_cut_back(const struct bkey *where, struct bkey *k); 433 434 static inline bool bch_cut_front(const struct bkey *where, struct bkey *k) 435 { 436 BUG_ON(bkey_cmp(where, k) > 0); 437 return __bch_cut_front(where, k); 438 } 439 440 static inline bool bch_cut_back(const struct bkey *where, struct bkey *k) 441 { 442 BUG_ON(bkey_cmp(where, &START_KEY(k)) < 0); 443 return __bch_cut_back(where, k); 444 } 445 446 /* 447 * Pointer '*preceding_key_p' points to a memory object to store preceding 448 * key of k. If the preceding key does not exist, set '*preceding_key_p' to 449 * NULL. So the caller of preceding_key() needs to take care of memory 450 * which '*preceding_key_p' pointed to before calling preceding_key(). 451 * Currently the only caller of preceding_key() is bch_btree_insert_key(), 452 * and it points to an on-stack variable, so the memory release is handled 453 * by stackframe itself. 454 */ 455 static inline void preceding_key(struct bkey *k, struct bkey **preceding_key_p) 456 { 457 if (KEY_INODE(k) || KEY_OFFSET(k)) { 458 (**preceding_key_p) = KEY(KEY_INODE(k), KEY_OFFSET(k), 0); 459 if (!(*preceding_key_p)->low) 460 (*preceding_key_p)->high--; 461 (*preceding_key_p)->low--; 462 } else { 463 (*preceding_key_p) = NULL; 464 } 465 } 466 467 static inline bool bch_ptr_invalid(struct btree_keys *b, const struct bkey *k) 468 { 469 return b->ops->key_invalid(b, k); 470 } 471 472 static inline bool bch_ptr_bad(struct btree_keys *b, const struct bkey *k) 473 { 474 return b->ops->key_bad(b, k); 475 } 476 477 static inline void bch_bkey_to_text(struct btree_keys *b, char *buf, 478 size_t size, const struct bkey *k) 479 { 480 return b->ops->key_to_text(buf, size, k); 481 } 482 483 static inline bool bch_bkey_equal_header(const struct bkey *l, 484 const struct bkey *r) 485 { 486 return (KEY_DIRTY(l) == KEY_DIRTY(r) && 487 KEY_PTRS(l) == KEY_PTRS(r) && 488 KEY_CSUM(l) == KEY_CSUM(r)); 489 } 490 491 /* Keylists */ 492 493 struct keylist { 494 union { 495 struct bkey *keys; 496 uint64_t *keys_p; 497 }; 498 union { 499 struct bkey *top; 500 uint64_t *top_p; 501 }; 502 503 /* Enough room for btree_split's keys without realloc */ 504 #define KEYLIST_INLINE 16 505 uint64_t inline_keys[KEYLIST_INLINE]; 506 }; 507 508 static inline void bch_keylist_init(struct keylist *l) 509 { 510 l->top_p = l->keys_p = l->inline_keys; 511 } 512 513 static inline void bch_keylist_init_single(struct keylist *l, struct bkey *k) 514 { 515 l->keys = k; 516 l->top = bkey_next(k); 517 } 518 519 static inline void bch_keylist_push(struct keylist *l) 520 { 521 l->top = bkey_next(l->top); 522 } 523 524 static inline void bch_keylist_add(struct keylist *l, struct bkey *k) 525 { 526 bkey_copy(l->top, k); 527 bch_keylist_push(l); 528 } 529 530 static inline bool bch_keylist_empty(struct keylist *l) 531 { 532 return l->top == l->keys; 533 } 534 535 static inline void bch_keylist_reset(struct keylist *l) 536 { 537 l->top = l->keys; 538 } 539 540 static inline void bch_keylist_free(struct keylist *l) 541 { 542 if (l->keys_p != l->inline_keys) 543 kfree(l->keys_p); 544 } 545 546 static inline size_t bch_keylist_nkeys(struct keylist *l) 547 { 548 return l->top_p - l->keys_p; 549 } 550 551 static inline size_t bch_keylist_bytes(struct keylist *l) 552 { 553 return bch_keylist_nkeys(l) * sizeof(uint64_t); 554 } 555 556 struct bkey *bch_keylist_pop(struct keylist *l); 557 void bch_keylist_pop_front(struct keylist *l); 558 int __bch_keylist_realloc(struct keylist *l, unsigned int u64s); 559 560 /* Debug stuff */ 561 562 #ifdef CONFIG_BCACHE_DEBUG 563 564 int __bch_count_data(struct btree_keys *b); 565 void __printf(2, 3) __bch_check_keys(struct btree_keys *b, 566 const char *fmt, 567 ...); 568 void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set); 569 void bch_dump_bucket(struct btree_keys *b); 570 571 #else 572 573 static inline int __bch_count_data(struct btree_keys *b) { return -1; } 574 static inline void __printf(2, 3) 575 __bch_check_keys(struct btree_keys *b, const char *fmt, ...) {} 576 static inline void bch_dump_bucket(struct btree_keys *b) {} 577 void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set); 578 579 #endif 580 581 static inline bool btree_keys_expensive_checks(struct btree_keys *b) 582 { 583 #ifdef CONFIG_BCACHE_DEBUG 584 return *b->expensive_debug_checks; 585 #else 586 return false; 587 #endif 588 } 589 590 static inline int bch_count_data(struct btree_keys *b) 591 { 592 return btree_keys_expensive_checks(b) ? __bch_count_data(b) : -1; 593 } 594 595 #define bch_check_keys(b, ...) \ 596 do { \ 597 if (btree_keys_expensive_checks(b)) \ 598 __bch_check_keys(b, __VA_ARGS__); \ 599 } while (0) 600 601 #endif 602