1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Code for working with individual keys, and sorted sets of keys with in a 4 * btree node 5 * 6 * Copyright 2012 Google, Inc. 7 */ 8 9 #define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__ 10 11 #include "util.h" 12 #include "bset.h" 13 14 #include <linux/console.h> 15 #include <linux/sched/clock.h> 16 #include <linux/random.h> 17 #include <linux/prefetch.h> 18 19 #ifdef CONFIG_BCACHE_DEBUG 20 21 void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set) 22 { 23 struct bkey *k, *next; 24 25 for (k = i->start; k < bset_bkey_last(i); k = next) { 26 next = bkey_next(k); 27 28 printk(KERN_ERR "block %u key %u/%u: ", set, 29 (unsigned) ((u64 *) k - i->d), i->keys); 30 31 if (b->ops->key_dump) 32 b->ops->key_dump(b, k); 33 else 34 printk("%llu:%llu\n", KEY_INODE(k), KEY_OFFSET(k)); 35 36 if (next < bset_bkey_last(i) && 37 bkey_cmp(k, b->ops->is_extents ? 38 &START_KEY(next) : next) > 0) 39 printk(KERN_ERR "Key skipped backwards\n"); 40 } 41 } 42 43 void bch_dump_bucket(struct btree_keys *b) 44 { 45 unsigned i; 46 47 console_lock(); 48 for (i = 0; i <= b->nsets; i++) 49 bch_dump_bset(b, b->set[i].data, 50 bset_sector_offset(b, b->set[i].data)); 51 console_unlock(); 52 } 53 54 int __bch_count_data(struct btree_keys *b) 55 { 56 unsigned ret = 0; 57 struct btree_iter iter; 58 struct bkey *k; 59 60 if (b->ops->is_extents) 61 for_each_key(b, k, &iter) 62 ret += KEY_SIZE(k); 63 return ret; 64 } 65 66 void __bch_check_keys(struct btree_keys *b, const char *fmt, ...) 67 { 68 va_list args; 69 struct bkey *k, *p = NULL; 70 struct btree_iter iter; 71 const char *err; 72 73 for_each_key(b, k, &iter) { 74 if (b->ops->is_extents) { 75 err = "Keys out of order"; 76 if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0) 77 goto bug; 78 79 if (bch_ptr_invalid(b, k)) 80 continue; 81 82 err = "Overlapping keys"; 83 if (p && bkey_cmp(p, &START_KEY(k)) > 0) 84 goto bug; 85 } else { 86 if (bch_ptr_bad(b, k)) 87 continue; 88 89 err = "Duplicate keys"; 90 if (p && !bkey_cmp(p, k)) 91 goto bug; 92 } 93 p = k; 94 } 95 #if 0 96 err = "Key larger than btree node key"; 97 if (p && bkey_cmp(p, &b->key) > 0) 98 goto bug; 99 #endif 100 return; 101 bug: 102 bch_dump_bucket(b); 103 104 va_start(args, fmt); 105 vprintk(fmt, args); 106 va_end(args); 107 108 panic("bch_check_keys error: %s:\n", err); 109 } 110 111 static void bch_btree_iter_next_check(struct btree_iter *iter) 112 { 113 struct bkey *k = iter->data->k, *next = bkey_next(k); 114 115 if (next < iter->data->end && 116 bkey_cmp(k, iter->b->ops->is_extents ? 117 &START_KEY(next) : next) > 0) { 118 bch_dump_bucket(iter->b); 119 panic("Key skipped backwards\n"); 120 } 121 } 122 123 #else 124 125 static inline void bch_btree_iter_next_check(struct btree_iter *iter) {} 126 127 #endif 128 129 /* Keylists */ 130 131 int __bch_keylist_realloc(struct keylist *l, unsigned u64s) 132 { 133 size_t oldsize = bch_keylist_nkeys(l); 134 size_t newsize = oldsize + u64s; 135 uint64_t *old_keys = l->keys_p == l->inline_keys ? NULL : l->keys_p; 136 uint64_t *new_keys; 137 138 newsize = roundup_pow_of_two(newsize); 139 140 if (newsize <= KEYLIST_INLINE || 141 roundup_pow_of_two(oldsize) == newsize) 142 return 0; 143 144 new_keys = krealloc(old_keys, sizeof(uint64_t) * newsize, GFP_NOIO); 145 146 if (!new_keys) 147 return -ENOMEM; 148 149 if (!old_keys) 150 memcpy(new_keys, l->inline_keys, sizeof(uint64_t) * oldsize); 151 152 l->keys_p = new_keys; 153 l->top_p = new_keys + oldsize; 154 155 return 0; 156 } 157 158 struct bkey *bch_keylist_pop(struct keylist *l) 159 { 160 struct bkey *k = l->keys; 161 162 if (k == l->top) 163 return NULL; 164 165 while (bkey_next(k) != l->top) 166 k = bkey_next(k); 167 168 return l->top = k; 169 } 170 171 void bch_keylist_pop_front(struct keylist *l) 172 { 173 l->top_p -= bkey_u64s(l->keys); 174 175 memmove(l->keys, 176 bkey_next(l->keys), 177 bch_keylist_bytes(l)); 178 } 179 180 /* Key/pointer manipulation */ 181 182 void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src, 183 unsigned i) 184 { 185 BUG_ON(i > KEY_PTRS(src)); 186 187 /* Only copy the header, key, and one pointer. */ 188 memcpy(dest, src, 2 * sizeof(uint64_t)); 189 dest->ptr[0] = src->ptr[i]; 190 SET_KEY_PTRS(dest, 1); 191 /* We didn't copy the checksum so clear that bit. */ 192 SET_KEY_CSUM(dest, 0); 193 } 194 195 bool __bch_cut_front(const struct bkey *where, struct bkey *k) 196 { 197 unsigned i, len = 0; 198 199 if (bkey_cmp(where, &START_KEY(k)) <= 0) 200 return false; 201 202 if (bkey_cmp(where, k) < 0) 203 len = KEY_OFFSET(k) - KEY_OFFSET(where); 204 else 205 bkey_copy_key(k, where); 206 207 for (i = 0; i < KEY_PTRS(k); i++) 208 SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + KEY_SIZE(k) - len); 209 210 BUG_ON(len > KEY_SIZE(k)); 211 SET_KEY_SIZE(k, len); 212 return true; 213 } 214 215 bool __bch_cut_back(const struct bkey *where, struct bkey *k) 216 { 217 unsigned len = 0; 218 219 if (bkey_cmp(where, k) >= 0) 220 return false; 221 222 BUG_ON(KEY_INODE(where) != KEY_INODE(k)); 223 224 if (bkey_cmp(where, &START_KEY(k)) > 0) 225 len = KEY_OFFSET(where) - KEY_START(k); 226 227 bkey_copy_key(k, where); 228 229 BUG_ON(len > KEY_SIZE(k)); 230 SET_KEY_SIZE(k, len); 231 return true; 232 } 233 234 /* Auxiliary search trees */ 235 236 /* 32 bits total: */ 237 #define BKEY_MID_BITS 3 238 #define BKEY_EXPONENT_BITS 7 239 #define BKEY_MANTISSA_BITS (32 - BKEY_MID_BITS - BKEY_EXPONENT_BITS) 240 #define BKEY_MANTISSA_MASK ((1 << BKEY_MANTISSA_BITS) - 1) 241 242 struct bkey_float { 243 unsigned exponent:BKEY_EXPONENT_BITS; 244 unsigned m:BKEY_MID_BITS; 245 unsigned mantissa:BKEY_MANTISSA_BITS; 246 } __packed; 247 248 /* 249 * BSET_CACHELINE was originally intended to match the hardware cacheline size - 250 * it used to be 64, but I realized the lookup code would touch slightly less 251 * memory if it was 128. 252 * 253 * It definites the number of bytes (in struct bset) per struct bkey_float in 254 * the auxiliar search tree - when we're done searching the bset_float tree we 255 * have this many bytes left that we do a linear search over. 256 * 257 * Since (after level 5) every level of the bset_tree is on a new cacheline, 258 * we're touching one fewer cacheline in the bset tree in exchange for one more 259 * cacheline in the linear search - but the linear search might stop before it 260 * gets to the second cacheline. 261 */ 262 263 #define BSET_CACHELINE 128 264 265 /* Space required for the btree node keys */ 266 static inline size_t btree_keys_bytes(struct btree_keys *b) 267 { 268 return PAGE_SIZE << b->page_order; 269 } 270 271 static inline size_t btree_keys_cachelines(struct btree_keys *b) 272 { 273 return btree_keys_bytes(b) / BSET_CACHELINE; 274 } 275 276 /* Space required for the auxiliary search trees */ 277 static inline size_t bset_tree_bytes(struct btree_keys *b) 278 { 279 return btree_keys_cachelines(b) * sizeof(struct bkey_float); 280 } 281 282 /* Space required for the prev pointers */ 283 static inline size_t bset_prev_bytes(struct btree_keys *b) 284 { 285 return btree_keys_cachelines(b) * sizeof(uint8_t); 286 } 287 288 /* Memory allocation */ 289 290 void bch_btree_keys_free(struct btree_keys *b) 291 { 292 struct bset_tree *t = b->set; 293 294 if (bset_prev_bytes(b) < PAGE_SIZE) 295 kfree(t->prev); 296 else 297 free_pages((unsigned long) t->prev, 298 get_order(bset_prev_bytes(b))); 299 300 if (bset_tree_bytes(b) < PAGE_SIZE) 301 kfree(t->tree); 302 else 303 free_pages((unsigned long) t->tree, 304 get_order(bset_tree_bytes(b))); 305 306 free_pages((unsigned long) t->data, b->page_order); 307 308 t->prev = NULL; 309 t->tree = NULL; 310 t->data = NULL; 311 } 312 EXPORT_SYMBOL(bch_btree_keys_free); 313 314 int bch_btree_keys_alloc(struct btree_keys *b, unsigned page_order, gfp_t gfp) 315 { 316 struct bset_tree *t = b->set; 317 318 BUG_ON(t->data); 319 320 b->page_order = page_order; 321 322 t->data = (void *) __get_free_pages(gfp, b->page_order); 323 if (!t->data) 324 goto err; 325 326 t->tree = bset_tree_bytes(b) < PAGE_SIZE 327 ? kmalloc(bset_tree_bytes(b), gfp) 328 : (void *) __get_free_pages(gfp, get_order(bset_tree_bytes(b))); 329 if (!t->tree) 330 goto err; 331 332 t->prev = bset_prev_bytes(b) < PAGE_SIZE 333 ? kmalloc(bset_prev_bytes(b), gfp) 334 : (void *) __get_free_pages(gfp, get_order(bset_prev_bytes(b))); 335 if (!t->prev) 336 goto err; 337 338 return 0; 339 err: 340 bch_btree_keys_free(b); 341 return -ENOMEM; 342 } 343 EXPORT_SYMBOL(bch_btree_keys_alloc); 344 345 void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops, 346 bool *expensive_debug_checks) 347 { 348 unsigned i; 349 350 b->ops = ops; 351 b->expensive_debug_checks = expensive_debug_checks; 352 b->nsets = 0; 353 b->last_set_unwritten = 0; 354 355 /* XXX: shouldn't be needed */ 356 for (i = 0; i < MAX_BSETS; i++) 357 b->set[i].size = 0; 358 /* 359 * Second loop starts at 1 because b->keys[0]->data is the memory we 360 * allocated 361 */ 362 for (i = 1; i < MAX_BSETS; i++) 363 b->set[i].data = NULL; 364 } 365 EXPORT_SYMBOL(bch_btree_keys_init); 366 367 /* Binary tree stuff for auxiliary search trees */ 368 369 /* 370 * return array index next to j when does in-order traverse 371 * of a binary tree which is stored in a linear array 372 */ 373 static unsigned inorder_next(unsigned j, unsigned size) 374 { 375 if (j * 2 + 1 < size) { 376 j = j * 2 + 1; 377 378 while (j * 2 < size) 379 j *= 2; 380 } else 381 j >>= ffz(j) + 1; 382 383 return j; 384 } 385 386 /* 387 * return array index previous to j when does in-order traverse 388 * of a binary tree which is stored in a linear array 389 */ 390 static unsigned inorder_prev(unsigned j, unsigned size) 391 { 392 if (j * 2 < size) { 393 j = j * 2; 394 395 while (j * 2 + 1 < size) 396 j = j * 2 + 1; 397 } else 398 j >>= ffs(j); 399 400 return j; 401 } 402 403 /* I have no idea why this code works... and I'm the one who wrote it 404 * 405 * However, I do know what it does: 406 * Given a binary tree constructed in an array (i.e. how you normally implement 407 * a heap), it converts a node in the tree - referenced by array index - to the 408 * index it would have if you did an inorder traversal. 409 * 410 * Also tested for every j, size up to size somewhere around 6 million. 411 * 412 * The binary tree starts at array index 1, not 0 413 * extra is a function of size: 414 * extra = (size - rounddown_pow_of_two(size - 1)) << 1; 415 */ 416 static unsigned __to_inorder(unsigned j, unsigned size, unsigned extra) 417 { 418 unsigned b = fls(j); 419 unsigned shift = fls(size - 1) - b; 420 421 j ^= 1U << (b - 1); 422 j <<= 1; 423 j |= 1; 424 j <<= shift; 425 426 if (j > extra) 427 j -= (j - extra) >> 1; 428 429 return j; 430 } 431 432 /* 433 * Return the cacheline index in bset_tree->data, where j is index 434 * from a linear array which stores the auxiliar binary tree 435 */ 436 static unsigned to_inorder(unsigned j, struct bset_tree *t) 437 { 438 return __to_inorder(j, t->size, t->extra); 439 } 440 441 static unsigned __inorder_to_tree(unsigned j, unsigned size, unsigned extra) 442 { 443 unsigned shift; 444 445 if (j > extra) 446 j += j - extra; 447 448 shift = ffs(j); 449 450 j >>= shift; 451 j |= roundup_pow_of_two(size) >> shift; 452 453 return j; 454 } 455 456 /* 457 * Return an index from a linear array which stores the auxiliar binary 458 * tree, j is the cacheline index of t->data. 459 */ 460 static unsigned inorder_to_tree(unsigned j, struct bset_tree *t) 461 { 462 return __inorder_to_tree(j, t->size, t->extra); 463 } 464 465 #if 0 466 void inorder_test(void) 467 { 468 unsigned long done = 0; 469 ktime_t start = ktime_get(); 470 471 for (unsigned size = 2; 472 size < 65536000; 473 size++) { 474 unsigned extra = (size - rounddown_pow_of_two(size - 1)) << 1; 475 unsigned i = 1, j = rounddown_pow_of_two(size - 1); 476 477 if (!(size % 4096)) 478 printk(KERN_NOTICE "loop %u, %llu per us\n", size, 479 done / ktime_us_delta(ktime_get(), start)); 480 481 while (1) { 482 if (__inorder_to_tree(i, size, extra) != j) 483 panic("size %10u j %10u i %10u", size, j, i); 484 485 if (__to_inorder(j, size, extra) != i) 486 panic("size %10u j %10u i %10u", size, j, i); 487 488 if (j == rounddown_pow_of_two(size) - 1) 489 break; 490 491 BUG_ON(inorder_prev(inorder_next(j, size), size) != j); 492 493 j = inorder_next(j, size); 494 i++; 495 } 496 497 done += size - 1; 498 } 499 } 500 #endif 501 502 /* 503 * Cacheline/offset <-> bkey pointer arithmetic: 504 * 505 * t->tree is a binary search tree in an array; each node corresponds to a key 506 * in one cacheline in t->set (BSET_CACHELINE bytes). 507 * 508 * This means we don't have to store the full index of the key that a node in 509 * the binary tree points to; to_inorder() gives us the cacheline, and then 510 * bkey_float->m gives us the offset within that cacheline, in units of 8 bytes. 511 * 512 * cacheline_to_bkey() and friends abstract out all the pointer arithmetic to 513 * make this work. 514 * 515 * To construct the bfloat for an arbitrary key we need to know what the key 516 * immediately preceding it is: we have to check if the two keys differ in the 517 * bits we're going to store in bkey_float->mantissa. t->prev[j] stores the size 518 * of the previous key so we can walk backwards to it from t->tree[j]'s key. 519 */ 520 521 static struct bkey *cacheline_to_bkey(struct bset_tree *t, unsigned cacheline, 522 unsigned offset) 523 { 524 return ((void *) t->data) + cacheline * BSET_CACHELINE + offset * 8; 525 } 526 527 static unsigned bkey_to_cacheline(struct bset_tree *t, struct bkey *k) 528 { 529 return ((void *) k - (void *) t->data) / BSET_CACHELINE; 530 } 531 532 static unsigned bkey_to_cacheline_offset(struct bset_tree *t, 533 unsigned cacheline, 534 struct bkey *k) 535 { 536 return (u64 *) k - (u64 *) cacheline_to_bkey(t, cacheline, 0); 537 } 538 539 static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned j) 540 { 541 return cacheline_to_bkey(t, to_inorder(j, t), t->tree[j].m); 542 } 543 544 static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned j) 545 { 546 return (void *) (((uint64_t *) tree_to_bkey(t, j)) - t->prev[j]); 547 } 548 549 /* 550 * For the write set - the one we're currently inserting keys into - we don't 551 * maintain a full search tree, we just keep a simple lookup table in t->prev. 552 */ 553 static struct bkey *table_to_bkey(struct bset_tree *t, unsigned cacheline) 554 { 555 return cacheline_to_bkey(t, cacheline, t->prev[cacheline]); 556 } 557 558 static inline uint64_t shrd128(uint64_t high, uint64_t low, uint8_t shift) 559 { 560 low >>= shift; 561 low |= (high << 1) << (63U - shift); 562 return low; 563 } 564 565 /* 566 * Calculate mantissa value for struct bkey_float. 567 * If most significant bit of f->exponent is not set, then 568 * - f->exponent >> 6 is 0 569 * - p[0] points to bkey->low 570 * - p[-1] borrows bits from KEY_INODE() of bkey->high 571 * if most isgnificant bits of f->exponent is set, then 572 * - f->exponent >> 6 is 1 573 * - p[0] points to bits from KEY_INODE() of bkey->high 574 * - p[-1] points to other bits from KEY_INODE() of 575 * bkey->high too. 576 * See make_bfloat() to check when most significant bit of f->exponent 577 * is set or not. 578 */ 579 static inline unsigned bfloat_mantissa(const struct bkey *k, 580 struct bkey_float *f) 581 { 582 const uint64_t *p = &k->low - (f->exponent >> 6); 583 return shrd128(p[-1], p[0], f->exponent & 63) & BKEY_MANTISSA_MASK; 584 } 585 586 static void make_bfloat(struct bset_tree *t, unsigned j) 587 { 588 struct bkey_float *f = &t->tree[j]; 589 struct bkey *m = tree_to_bkey(t, j); 590 struct bkey *p = tree_to_prev_bkey(t, j); 591 592 struct bkey *l = is_power_of_2(j) 593 ? t->data->start 594 : tree_to_prev_bkey(t, j >> ffs(j)); 595 596 struct bkey *r = is_power_of_2(j + 1) 597 ? bset_bkey_idx(t->data, t->data->keys - bkey_u64s(&t->end)) 598 : tree_to_bkey(t, j >> (ffz(j) + 1)); 599 600 BUG_ON(m < l || m > r); 601 BUG_ON(bkey_next(p) != m); 602 603 /* 604 * If l and r have different KEY_INODE values (different backing 605 * device), f->exponent records how many least significant bits 606 * are different in KEY_INODE values and sets most significant 607 * bits to 1 (by +64). 608 * If l and r have same KEY_INODE value, f->exponent records 609 * how many different bits in least significant bits of bkey->low. 610 * See bfloat_mantiss() how the most significant bit of 611 * f->exponent is used to calculate bfloat mantissa value. 612 */ 613 if (KEY_INODE(l) != KEY_INODE(r)) 614 f->exponent = fls64(KEY_INODE(r) ^ KEY_INODE(l)) + 64; 615 else 616 f->exponent = fls64(r->low ^ l->low); 617 618 f->exponent = max_t(int, f->exponent - BKEY_MANTISSA_BITS, 0); 619 620 /* 621 * Setting f->exponent = 127 flags this node as failed, and causes the 622 * lookup code to fall back to comparing against the original key. 623 */ 624 625 if (bfloat_mantissa(m, f) != bfloat_mantissa(p, f)) 626 f->mantissa = bfloat_mantissa(m, f) - 1; 627 else 628 f->exponent = 127; 629 } 630 631 static void bset_alloc_tree(struct btree_keys *b, struct bset_tree *t) 632 { 633 if (t != b->set) { 634 unsigned j = roundup(t[-1].size, 635 64 / sizeof(struct bkey_float)); 636 637 t->tree = t[-1].tree + j; 638 t->prev = t[-1].prev + j; 639 } 640 641 while (t < b->set + MAX_BSETS) 642 t++->size = 0; 643 } 644 645 static void bch_bset_build_unwritten_tree(struct btree_keys *b) 646 { 647 struct bset_tree *t = bset_tree_last(b); 648 649 BUG_ON(b->last_set_unwritten); 650 b->last_set_unwritten = 1; 651 652 bset_alloc_tree(b, t); 653 654 if (t->tree != b->set->tree + btree_keys_cachelines(b)) { 655 t->prev[0] = bkey_to_cacheline_offset(t, 0, t->data->start); 656 t->size = 1; 657 } 658 } 659 660 void bch_bset_init_next(struct btree_keys *b, struct bset *i, uint64_t magic) 661 { 662 if (i != b->set->data) { 663 b->set[++b->nsets].data = i; 664 i->seq = b->set->data->seq; 665 } else 666 get_random_bytes(&i->seq, sizeof(uint64_t)); 667 668 i->magic = magic; 669 i->version = 0; 670 i->keys = 0; 671 672 bch_bset_build_unwritten_tree(b); 673 } 674 EXPORT_SYMBOL(bch_bset_init_next); 675 676 /* 677 * Build auxiliary binary tree 'struct bset_tree *t', this tree is used to 678 * accelerate bkey search in a btree node (pointed by bset_tree->data in 679 * memory). After search in the auxiliar tree by calling bset_search_tree(), 680 * a struct bset_search_iter is returned which indicates range [l, r] from 681 * bset_tree->data where the searching bkey might be inside. Then a followed 682 * linear comparison does the exact search, see __bch_bset_search() for how 683 * the auxiliary tree is used. 684 */ 685 void bch_bset_build_written_tree(struct btree_keys *b) 686 { 687 struct bset_tree *t = bset_tree_last(b); 688 struct bkey *prev = NULL, *k = t->data->start; 689 unsigned j, cacheline = 1; 690 691 b->last_set_unwritten = 0; 692 693 bset_alloc_tree(b, t); 694 695 t->size = min_t(unsigned, 696 bkey_to_cacheline(t, bset_bkey_last(t->data)), 697 b->set->tree + btree_keys_cachelines(b) - t->tree); 698 699 if (t->size < 2) { 700 t->size = 0; 701 return; 702 } 703 704 t->extra = (t->size - rounddown_pow_of_two(t->size - 1)) << 1; 705 706 /* First we figure out where the first key in each cacheline is */ 707 for (j = inorder_next(0, t->size); 708 j; 709 j = inorder_next(j, t->size)) { 710 while (bkey_to_cacheline(t, k) < cacheline) 711 prev = k, k = bkey_next(k); 712 713 t->prev[j] = bkey_u64s(prev); 714 t->tree[j].m = bkey_to_cacheline_offset(t, cacheline++, k); 715 } 716 717 while (bkey_next(k) != bset_bkey_last(t->data)) 718 k = bkey_next(k); 719 720 t->end = *k; 721 722 /* Then we build the tree */ 723 for (j = inorder_next(0, t->size); 724 j; 725 j = inorder_next(j, t->size)) 726 make_bfloat(t, j); 727 } 728 EXPORT_SYMBOL(bch_bset_build_written_tree); 729 730 /* Insert */ 731 732 void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k) 733 { 734 struct bset_tree *t; 735 unsigned inorder, j = 1; 736 737 for (t = b->set; t <= bset_tree_last(b); t++) 738 if (k < bset_bkey_last(t->data)) 739 goto found_set; 740 741 BUG(); 742 found_set: 743 if (!t->size || !bset_written(b, t)) 744 return; 745 746 inorder = bkey_to_cacheline(t, k); 747 748 if (k == t->data->start) 749 goto fix_left; 750 751 if (bkey_next(k) == bset_bkey_last(t->data)) { 752 t->end = *k; 753 goto fix_right; 754 } 755 756 j = inorder_to_tree(inorder, t); 757 758 if (j && 759 j < t->size && 760 k == tree_to_bkey(t, j)) 761 fix_left: do { 762 make_bfloat(t, j); 763 j = j * 2; 764 } while (j < t->size); 765 766 j = inorder_to_tree(inorder + 1, t); 767 768 if (j && 769 j < t->size && 770 k == tree_to_prev_bkey(t, j)) 771 fix_right: do { 772 make_bfloat(t, j); 773 j = j * 2 + 1; 774 } while (j < t->size); 775 } 776 EXPORT_SYMBOL(bch_bset_fix_invalidated_key); 777 778 static void bch_bset_fix_lookup_table(struct btree_keys *b, 779 struct bset_tree *t, 780 struct bkey *k) 781 { 782 unsigned shift = bkey_u64s(k); 783 unsigned j = bkey_to_cacheline(t, k); 784 785 /* We're getting called from btree_split() or btree_gc, just bail out */ 786 if (!t->size) 787 return; 788 789 /* k is the key we just inserted; we need to find the entry in the 790 * lookup table for the first key that is strictly greater than k: 791 * it's either k's cacheline or the next one 792 */ 793 while (j < t->size && 794 table_to_bkey(t, j) <= k) 795 j++; 796 797 /* Adjust all the lookup table entries, and find a new key for any that 798 * have gotten too big 799 */ 800 for (; j < t->size; j++) { 801 t->prev[j] += shift; 802 803 if (t->prev[j] > 7) { 804 k = table_to_bkey(t, j - 1); 805 806 while (k < cacheline_to_bkey(t, j, 0)) 807 k = bkey_next(k); 808 809 t->prev[j] = bkey_to_cacheline_offset(t, j, k); 810 } 811 } 812 813 if (t->size == b->set->tree + btree_keys_cachelines(b) - t->tree) 814 return; 815 816 /* Possibly add a new entry to the end of the lookup table */ 817 818 for (k = table_to_bkey(t, t->size - 1); 819 k != bset_bkey_last(t->data); 820 k = bkey_next(k)) 821 if (t->size == bkey_to_cacheline(t, k)) { 822 t->prev[t->size] = bkey_to_cacheline_offset(t, t->size, k); 823 t->size++; 824 } 825 } 826 827 /* 828 * Tries to merge l and r: l should be lower than r 829 * Returns true if we were able to merge. If we did merge, l will be the merged 830 * key, r will be untouched. 831 */ 832 bool bch_bkey_try_merge(struct btree_keys *b, struct bkey *l, struct bkey *r) 833 { 834 if (!b->ops->key_merge) 835 return false; 836 837 /* 838 * Generic header checks 839 * Assumes left and right are in order 840 * Left and right must be exactly aligned 841 */ 842 if (!bch_bkey_equal_header(l, r) || 843 bkey_cmp(l, &START_KEY(r))) 844 return false; 845 846 return b->ops->key_merge(b, l, r); 847 } 848 EXPORT_SYMBOL(bch_bkey_try_merge); 849 850 void bch_bset_insert(struct btree_keys *b, struct bkey *where, 851 struct bkey *insert) 852 { 853 struct bset_tree *t = bset_tree_last(b); 854 855 BUG_ON(!b->last_set_unwritten); 856 BUG_ON(bset_byte_offset(b, t->data) + 857 __set_bytes(t->data, t->data->keys + bkey_u64s(insert)) > 858 PAGE_SIZE << b->page_order); 859 860 memmove((uint64_t *) where + bkey_u64s(insert), 861 where, 862 (void *) bset_bkey_last(t->data) - (void *) where); 863 864 t->data->keys += bkey_u64s(insert); 865 bkey_copy(where, insert); 866 bch_bset_fix_lookup_table(b, t, where); 867 } 868 EXPORT_SYMBOL(bch_bset_insert); 869 870 unsigned bch_btree_insert_key(struct btree_keys *b, struct bkey *k, 871 struct bkey *replace_key) 872 { 873 unsigned status = BTREE_INSERT_STATUS_NO_INSERT; 874 struct bset *i = bset_tree_last(b)->data; 875 struct bkey *m, *prev = NULL; 876 struct btree_iter iter; 877 878 BUG_ON(b->ops->is_extents && !KEY_SIZE(k)); 879 880 m = bch_btree_iter_init(b, &iter, b->ops->is_extents 881 ? PRECEDING_KEY(&START_KEY(k)) 882 : PRECEDING_KEY(k)); 883 884 if (b->ops->insert_fixup(b, k, &iter, replace_key)) 885 return status; 886 887 status = BTREE_INSERT_STATUS_INSERT; 888 889 while (m != bset_bkey_last(i) && 890 bkey_cmp(k, b->ops->is_extents ? &START_KEY(m) : m) > 0) 891 prev = m, m = bkey_next(m); 892 893 /* prev is in the tree, if we merge we're done */ 894 status = BTREE_INSERT_STATUS_BACK_MERGE; 895 if (prev && 896 bch_bkey_try_merge(b, prev, k)) 897 goto merged; 898 #if 0 899 status = BTREE_INSERT_STATUS_OVERWROTE; 900 if (m != bset_bkey_last(i) && 901 KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m)) 902 goto copy; 903 #endif 904 status = BTREE_INSERT_STATUS_FRONT_MERGE; 905 if (m != bset_bkey_last(i) && 906 bch_bkey_try_merge(b, k, m)) 907 goto copy; 908 909 bch_bset_insert(b, m, k); 910 copy: bkey_copy(m, k); 911 merged: 912 return status; 913 } 914 EXPORT_SYMBOL(bch_btree_insert_key); 915 916 /* Lookup */ 917 918 struct bset_search_iter { 919 struct bkey *l, *r; 920 }; 921 922 static struct bset_search_iter bset_search_write_set(struct bset_tree *t, 923 const struct bkey *search) 924 { 925 unsigned li = 0, ri = t->size; 926 927 while (li + 1 != ri) { 928 unsigned m = (li + ri) >> 1; 929 930 if (bkey_cmp(table_to_bkey(t, m), search) > 0) 931 ri = m; 932 else 933 li = m; 934 } 935 936 return (struct bset_search_iter) { 937 table_to_bkey(t, li), 938 ri < t->size ? table_to_bkey(t, ri) : bset_bkey_last(t->data) 939 }; 940 } 941 942 static struct bset_search_iter bset_search_tree(struct bset_tree *t, 943 const struct bkey *search) 944 { 945 struct bkey *l, *r; 946 struct bkey_float *f; 947 unsigned inorder, j, n = 1; 948 949 do { 950 /* 951 * A bit trick here. 952 * If p < t->size, (int)(p - t->size) is a minus value and 953 * the most significant bit is set, right shifting 31 bits 954 * gets 1. If p >= t->size, the most significant bit is 955 * not set, right shifting 31 bits gets 0. 956 * So the following 2 lines equals to 957 * if (p >= t->size) 958 * p = 0; 959 * but a branch instruction is avoided. 960 */ 961 unsigned p = n << 4; 962 p &= ((int) (p - t->size)) >> 31; 963 964 prefetch(&t->tree[p]); 965 966 j = n; 967 f = &t->tree[j]; 968 969 /* 970 * Similar bit trick, use subtract operation to avoid a branch 971 * instruction. 972 * 973 * n = (f->mantissa > bfloat_mantissa()) 974 * ? j * 2 975 * : j * 2 + 1; 976 * 977 * We need to subtract 1 from f->mantissa for the sign bit trick 978 * to work - that's done in make_bfloat() 979 */ 980 if (likely(f->exponent != 127)) 981 n = j * 2 + (((unsigned) 982 (f->mantissa - 983 bfloat_mantissa(search, f))) >> 31); 984 else 985 n = (bkey_cmp(tree_to_bkey(t, j), search) > 0) 986 ? j * 2 987 : j * 2 + 1; 988 } while (n < t->size); 989 990 inorder = to_inorder(j, t); 991 992 /* 993 * n would have been the node we recursed to - the low bit tells us if 994 * we recursed left or recursed right. 995 */ 996 if (n & 1) { 997 l = cacheline_to_bkey(t, inorder, f->m); 998 999 if (++inorder != t->size) { 1000 f = &t->tree[inorder_next(j, t->size)]; 1001 r = cacheline_to_bkey(t, inorder, f->m); 1002 } else 1003 r = bset_bkey_last(t->data); 1004 } else { 1005 r = cacheline_to_bkey(t, inorder, f->m); 1006 1007 if (--inorder) { 1008 f = &t->tree[inorder_prev(j, t->size)]; 1009 l = cacheline_to_bkey(t, inorder, f->m); 1010 } else 1011 l = t->data->start; 1012 } 1013 1014 return (struct bset_search_iter) {l, r}; 1015 } 1016 1017 struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t, 1018 const struct bkey *search) 1019 { 1020 struct bset_search_iter i; 1021 1022 /* 1023 * First, we search for a cacheline, then lastly we do a linear search 1024 * within that cacheline. 1025 * 1026 * To search for the cacheline, there's three different possibilities: 1027 * * The set is too small to have a search tree, so we just do a linear 1028 * search over the whole set. 1029 * * The set is the one we're currently inserting into; keeping a full 1030 * auxiliary search tree up to date would be too expensive, so we 1031 * use a much simpler lookup table to do a binary search - 1032 * bset_search_write_set(). 1033 * * Or we use the auxiliary search tree we constructed earlier - 1034 * bset_search_tree() 1035 */ 1036 1037 if (unlikely(!t->size)) { 1038 i.l = t->data->start; 1039 i.r = bset_bkey_last(t->data); 1040 } else if (bset_written(b, t)) { 1041 /* 1042 * Each node in the auxiliary search tree covers a certain range 1043 * of bits, and keys above and below the set it covers might 1044 * differ outside those bits - so we have to special case the 1045 * start and end - handle that here: 1046 */ 1047 1048 if (unlikely(bkey_cmp(search, &t->end) >= 0)) 1049 return bset_bkey_last(t->data); 1050 1051 if (unlikely(bkey_cmp(search, t->data->start) < 0)) 1052 return t->data->start; 1053 1054 i = bset_search_tree(t, search); 1055 } else { 1056 BUG_ON(!b->nsets && 1057 t->size < bkey_to_cacheline(t, bset_bkey_last(t->data))); 1058 1059 i = bset_search_write_set(t, search); 1060 } 1061 1062 if (btree_keys_expensive_checks(b)) { 1063 BUG_ON(bset_written(b, t) && 1064 i.l != t->data->start && 1065 bkey_cmp(tree_to_prev_bkey(t, 1066 inorder_to_tree(bkey_to_cacheline(t, i.l), t)), 1067 search) > 0); 1068 1069 BUG_ON(i.r != bset_bkey_last(t->data) && 1070 bkey_cmp(i.r, search) <= 0); 1071 } 1072 1073 while (likely(i.l != i.r) && 1074 bkey_cmp(i.l, search) <= 0) 1075 i.l = bkey_next(i.l); 1076 1077 return i.l; 1078 } 1079 EXPORT_SYMBOL(__bch_bset_search); 1080 1081 /* Btree iterator */ 1082 1083 typedef bool (btree_iter_cmp_fn)(struct btree_iter_set, 1084 struct btree_iter_set); 1085 1086 static inline bool btree_iter_cmp(struct btree_iter_set l, 1087 struct btree_iter_set r) 1088 { 1089 return bkey_cmp(l.k, r.k) > 0; 1090 } 1091 1092 static inline bool btree_iter_end(struct btree_iter *iter) 1093 { 1094 return !iter->used; 1095 } 1096 1097 void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k, 1098 struct bkey *end) 1099 { 1100 if (k != end) 1101 BUG_ON(!heap_add(iter, 1102 ((struct btree_iter_set) { k, end }), 1103 btree_iter_cmp)); 1104 } 1105 1106 static struct bkey *__bch_btree_iter_init(struct btree_keys *b, 1107 struct btree_iter *iter, 1108 struct bkey *search, 1109 struct bset_tree *start) 1110 { 1111 struct bkey *ret = NULL; 1112 iter->size = ARRAY_SIZE(iter->data); 1113 iter->used = 0; 1114 1115 #ifdef CONFIG_BCACHE_DEBUG 1116 iter->b = b; 1117 #endif 1118 1119 for (; start <= bset_tree_last(b); start++) { 1120 ret = bch_bset_search(b, start, search); 1121 bch_btree_iter_push(iter, ret, bset_bkey_last(start->data)); 1122 } 1123 1124 return ret; 1125 } 1126 1127 struct bkey *bch_btree_iter_init(struct btree_keys *b, 1128 struct btree_iter *iter, 1129 struct bkey *search) 1130 { 1131 return __bch_btree_iter_init(b, iter, search, b->set); 1132 } 1133 EXPORT_SYMBOL(bch_btree_iter_init); 1134 1135 static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter, 1136 btree_iter_cmp_fn *cmp) 1137 { 1138 struct btree_iter_set b __maybe_unused; 1139 struct bkey *ret = NULL; 1140 1141 if (!btree_iter_end(iter)) { 1142 bch_btree_iter_next_check(iter); 1143 1144 ret = iter->data->k; 1145 iter->data->k = bkey_next(iter->data->k); 1146 1147 if (iter->data->k > iter->data->end) { 1148 WARN_ONCE(1, "bset was corrupt!\n"); 1149 iter->data->k = iter->data->end; 1150 } 1151 1152 if (iter->data->k == iter->data->end) 1153 heap_pop(iter, b, cmp); 1154 else 1155 heap_sift(iter, 0, cmp); 1156 } 1157 1158 return ret; 1159 } 1160 1161 struct bkey *bch_btree_iter_next(struct btree_iter *iter) 1162 { 1163 return __bch_btree_iter_next(iter, btree_iter_cmp); 1164 1165 } 1166 EXPORT_SYMBOL(bch_btree_iter_next); 1167 1168 struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter, 1169 struct btree_keys *b, ptr_filter_fn fn) 1170 { 1171 struct bkey *ret; 1172 1173 do { 1174 ret = bch_btree_iter_next(iter); 1175 } while (ret && fn(b, ret)); 1176 1177 return ret; 1178 } 1179 1180 /* Mergesort */ 1181 1182 void bch_bset_sort_state_free(struct bset_sort_state *state) 1183 { 1184 mempool_exit(&state->pool); 1185 } 1186 1187 int bch_bset_sort_state_init(struct bset_sort_state *state, unsigned page_order) 1188 { 1189 spin_lock_init(&state->time.lock); 1190 1191 state->page_order = page_order; 1192 state->crit_factor = int_sqrt(1 << page_order); 1193 1194 return mempool_init_page_pool(&state->pool, 1, page_order); 1195 } 1196 EXPORT_SYMBOL(bch_bset_sort_state_init); 1197 1198 static void btree_mergesort(struct btree_keys *b, struct bset *out, 1199 struct btree_iter *iter, 1200 bool fixup, bool remove_stale) 1201 { 1202 int i; 1203 struct bkey *k, *last = NULL; 1204 BKEY_PADDED(k) tmp; 1205 bool (*bad)(struct btree_keys *, const struct bkey *) = remove_stale 1206 ? bch_ptr_bad 1207 : bch_ptr_invalid; 1208 1209 /* Heapify the iterator, using our comparison function */ 1210 for (i = iter->used / 2 - 1; i >= 0; --i) 1211 heap_sift(iter, i, b->ops->sort_cmp); 1212 1213 while (!btree_iter_end(iter)) { 1214 if (b->ops->sort_fixup && fixup) 1215 k = b->ops->sort_fixup(iter, &tmp.k); 1216 else 1217 k = NULL; 1218 1219 if (!k) 1220 k = __bch_btree_iter_next(iter, b->ops->sort_cmp); 1221 1222 if (bad(b, k)) 1223 continue; 1224 1225 if (!last) { 1226 last = out->start; 1227 bkey_copy(last, k); 1228 } else if (!bch_bkey_try_merge(b, last, k)) { 1229 last = bkey_next(last); 1230 bkey_copy(last, k); 1231 } 1232 } 1233 1234 out->keys = last ? (uint64_t *) bkey_next(last) - out->d : 0; 1235 1236 pr_debug("sorted %i keys", out->keys); 1237 } 1238 1239 static void __btree_sort(struct btree_keys *b, struct btree_iter *iter, 1240 unsigned start, unsigned order, bool fixup, 1241 struct bset_sort_state *state) 1242 { 1243 uint64_t start_time; 1244 bool used_mempool = false; 1245 struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOWAIT, 1246 order); 1247 if (!out) { 1248 struct page *outp; 1249 1250 BUG_ON(order > state->page_order); 1251 1252 outp = mempool_alloc(&state->pool, GFP_NOIO); 1253 out = page_address(outp); 1254 used_mempool = true; 1255 order = state->page_order; 1256 } 1257 1258 start_time = local_clock(); 1259 1260 btree_mergesort(b, out, iter, fixup, false); 1261 b->nsets = start; 1262 1263 if (!start && order == b->page_order) { 1264 /* 1265 * Our temporary buffer is the same size as the btree node's 1266 * buffer, we can just swap buffers instead of doing a big 1267 * memcpy() 1268 */ 1269 1270 out->magic = b->set->data->magic; 1271 out->seq = b->set->data->seq; 1272 out->version = b->set->data->version; 1273 swap(out, b->set->data); 1274 } else { 1275 b->set[start].data->keys = out->keys; 1276 memcpy(b->set[start].data->start, out->start, 1277 (void *) bset_bkey_last(out) - (void *) out->start); 1278 } 1279 1280 if (used_mempool) 1281 mempool_free(virt_to_page(out), &state->pool); 1282 else 1283 free_pages((unsigned long) out, order); 1284 1285 bch_bset_build_written_tree(b); 1286 1287 if (!start) 1288 bch_time_stats_update(&state->time, start_time); 1289 } 1290 1291 void bch_btree_sort_partial(struct btree_keys *b, unsigned start, 1292 struct bset_sort_state *state) 1293 { 1294 size_t order = b->page_order, keys = 0; 1295 struct btree_iter iter; 1296 int oldsize = bch_count_data(b); 1297 1298 __bch_btree_iter_init(b, &iter, NULL, &b->set[start]); 1299 1300 if (start) { 1301 unsigned i; 1302 1303 for (i = start; i <= b->nsets; i++) 1304 keys += b->set[i].data->keys; 1305 1306 order = get_order(__set_bytes(b->set->data, keys)); 1307 } 1308 1309 __btree_sort(b, &iter, start, order, false, state); 1310 1311 EBUG_ON(oldsize >= 0 && bch_count_data(b) != oldsize); 1312 } 1313 EXPORT_SYMBOL(bch_btree_sort_partial); 1314 1315 void bch_btree_sort_and_fix_extents(struct btree_keys *b, 1316 struct btree_iter *iter, 1317 struct bset_sort_state *state) 1318 { 1319 __btree_sort(b, iter, 0, b->page_order, true, state); 1320 } 1321 1322 void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new, 1323 struct bset_sort_state *state) 1324 { 1325 uint64_t start_time = local_clock(); 1326 1327 struct btree_iter iter; 1328 bch_btree_iter_init(b, &iter, NULL); 1329 1330 btree_mergesort(b, new->set->data, &iter, false, true); 1331 1332 bch_time_stats_update(&state->time, start_time); 1333 1334 new->set->size = 0; // XXX: why? 1335 } 1336 1337 #define SORT_CRIT (4096 / sizeof(uint64_t)) 1338 1339 void bch_btree_sort_lazy(struct btree_keys *b, struct bset_sort_state *state) 1340 { 1341 unsigned crit = SORT_CRIT; 1342 int i; 1343 1344 /* Don't sort if nothing to do */ 1345 if (!b->nsets) 1346 goto out; 1347 1348 for (i = b->nsets - 1; i >= 0; --i) { 1349 crit *= state->crit_factor; 1350 1351 if (b->set[i].data->keys < crit) { 1352 bch_btree_sort_partial(b, i, state); 1353 return; 1354 } 1355 } 1356 1357 /* Sort if we'd overflow */ 1358 if (b->nsets + 1 == MAX_BSETS) { 1359 bch_btree_sort(b, state); 1360 return; 1361 } 1362 1363 out: 1364 bch_bset_build_written_tree(b); 1365 } 1366 EXPORT_SYMBOL(bch_btree_sort_lazy); 1367 1368 void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *stats) 1369 { 1370 unsigned i; 1371 1372 for (i = 0; i <= b->nsets; i++) { 1373 struct bset_tree *t = &b->set[i]; 1374 size_t bytes = t->data->keys * sizeof(uint64_t); 1375 size_t j; 1376 1377 if (bset_written(b, t)) { 1378 stats->sets_written++; 1379 stats->bytes_written += bytes; 1380 1381 stats->floats += t->size - 1; 1382 1383 for (j = 1; j < t->size; j++) 1384 if (t->tree[j].exponent == 127) 1385 stats->failed++; 1386 } else { 1387 stats->sets_unwritten++; 1388 stats->bytes_unwritten += bytes; 1389 } 1390 } 1391 } 1392