1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com> 4 * 5 * Uses a block device as cache for other block devices; optimized for SSDs. 6 * All allocation is done in buckets, which should match the erase block size 7 * of the device. 8 * 9 * Buckets containing cached data are kept on a heap sorted by priority; 10 * bucket priority is increased on cache hit, and periodically all the buckets 11 * on the heap have their priority scaled down. This currently is just used as 12 * an LRU but in the future should allow for more intelligent heuristics. 13 * 14 * Buckets have an 8 bit counter; freeing is accomplished by incrementing the 15 * counter. Garbage collection is used to remove stale pointers. 16 * 17 * Indexing is done via a btree; nodes are not necessarily fully sorted, rather 18 * as keys are inserted we only sort the pages that have not yet been written. 19 * When garbage collection is run, we resort the entire node. 20 * 21 * All configuration is done via sysfs; see Documentation/admin-guide/bcache.rst. 22 */ 23 24 #include "bcache.h" 25 #include "btree.h" 26 #include "debug.h" 27 #include "extents.h" 28 29 #include <linux/slab.h> 30 #include <linux/bitops.h> 31 #include <linux/hash.h> 32 #include <linux/kthread.h> 33 #include <linux/prefetch.h> 34 #include <linux/random.h> 35 #include <linux/rcupdate.h> 36 #include <linux/sched/clock.h> 37 #include <linux/rculist.h> 38 #include <linux/delay.h> 39 #include <trace/events/bcache.h> 40 41 /* 42 * Todo: 43 * register_bcache: Return errors out to userspace correctly 44 * 45 * Writeback: don't undirty key until after a cache flush 46 * 47 * Create an iterator for key pointers 48 * 49 * On btree write error, mark bucket such that it won't be freed from the cache 50 * 51 * Journalling: 52 * Check for bad keys in replay 53 * Propagate barriers 54 * Refcount journal entries in journal_replay 55 * 56 * Garbage collection: 57 * Finish incremental gc 58 * Gc should free old UUIDs, data for invalid UUIDs 59 * 60 * Provide a way to list backing device UUIDs we have data cached for, and 61 * probably how long it's been since we've seen them, and a way to invalidate 62 * dirty data for devices that will never be attached again 63 * 64 * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so 65 * that based on that and how much dirty data we have we can keep writeback 66 * from being starved 67 * 68 * Add a tracepoint or somesuch to watch for writeback starvation 69 * 70 * When btree depth > 1 and splitting an interior node, we have to make sure 71 * alloc_bucket() cannot fail. This should be true but is not completely 72 * obvious. 73 * 74 * Plugging? 75 * 76 * If data write is less than hard sector size of ssd, round up offset in open 77 * bucket to the next whole sector 78 * 79 * Superblock needs to be fleshed out for multiple cache devices 80 * 81 * Add a sysfs tunable for the number of writeback IOs in flight 82 * 83 * Add a sysfs tunable for the number of open data buckets 84 * 85 * IO tracking: Can we track when one process is doing io on behalf of another? 86 * IO tracking: Don't use just an average, weigh more recent stuff higher 87 * 88 * Test module load/unload 89 */ 90 91 #define MAX_NEED_GC 64 92 #define MAX_SAVE_PRIO 72 93 #define MAX_GC_TIMES 100 94 #define MIN_GC_NODES 100 95 #define GC_SLEEP_MS 100 96 97 #define PTR_DIRTY_BIT (((uint64_t) 1 << 36)) 98 99 #define PTR_HASH(c, k) \ 100 (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0)) 101 102 #define insert_lock(s, b) ((b)->level <= (s)->lock) 103 104 105 static inline struct bset *write_block(struct btree *b) 106 { 107 return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c->cache); 108 } 109 110 static void bch_btree_init_next(struct btree *b) 111 { 112 /* If not a leaf node, always sort */ 113 if (b->level && b->keys.nsets) 114 bch_btree_sort(&b->keys, &b->c->sort); 115 else 116 bch_btree_sort_lazy(&b->keys, &b->c->sort); 117 118 if (b->written < btree_blocks(b)) 119 bch_bset_init_next(&b->keys, write_block(b), 120 bset_magic(&b->c->sb)); 121 122 } 123 124 /* Btree key manipulation */ 125 126 void bkey_put(struct cache_set *c, struct bkey *k) 127 { 128 unsigned int i; 129 130 for (i = 0; i < KEY_PTRS(k); i++) 131 if (ptr_available(c, k, i)) 132 atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin); 133 } 134 135 /* Btree IO */ 136 137 static uint64_t btree_csum_set(struct btree *b, struct bset *i) 138 { 139 uint64_t crc = b->key.ptr[0]; 140 void *data = (void *) i + 8, *end = bset_bkey_last(i); 141 142 crc = bch_crc64_update(crc, data, end - data); 143 return crc ^ 0xffffffffffffffffULL; 144 } 145 146 void bch_btree_node_read_done(struct btree *b) 147 { 148 const char *err = "bad btree header"; 149 struct bset *i = btree_bset_first(b); 150 struct btree_iter *iter; 151 152 /* 153 * c->fill_iter can allocate an iterator with more memory space 154 * than static MAX_BSETS. 155 * See the comment arount cache_set->fill_iter. 156 */ 157 iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO); 158 iter->size = b->c->sb.bucket_size / b->c->sb.block_size; 159 iter->used = 0; 160 161 #ifdef CONFIG_BCACHE_DEBUG 162 iter->b = &b->keys; 163 #endif 164 165 if (!i->seq) 166 goto err; 167 168 for (; 169 b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq; 170 i = write_block(b)) { 171 err = "unsupported bset version"; 172 if (i->version > BCACHE_BSET_VERSION) 173 goto err; 174 175 err = "bad btree header"; 176 if (b->written + set_blocks(i, block_bytes(b->c->cache)) > 177 btree_blocks(b)) 178 goto err; 179 180 err = "bad magic"; 181 if (i->magic != bset_magic(&b->c->sb)) 182 goto err; 183 184 err = "bad checksum"; 185 switch (i->version) { 186 case 0: 187 if (i->csum != csum_set(i)) 188 goto err; 189 break; 190 case BCACHE_BSET_VERSION: 191 if (i->csum != btree_csum_set(b, i)) 192 goto err; 193 break; 194 } 195 196 err = "empty set"; 197 if (i != b->keys.set[0].data && !i->keys) 198 goto err; 199 200 bch_btree_iter_push(iter, i->start, bset_bkey_last(i)); 201 202 b->written += set_blocks(i, block_bytes(b->c->cache)); 203 } 204 205 err = "corrupted btree"; 206 for (i = write_block(b); 207 bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key); 208 i = ((void *) i) + block_bytes(b->c->cache)) 209 if (i->seq == b->keys.set[0].data->seq) 210 goto err; 211 212 bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort); 213 214 i = b->keys.set[0].data; 215 err = "short btree key"; 216 if (b->keys.set[0].size && 217 bkey_cmp(&b->key, &b->keys.set[0].end) < 0) 218 goto err; 219 220 if (b->written < btree_blocks(b)) 221 bch_bset_init_next(&b->keys, write_block(b), 222 bset_magic(&b->c->sb)); 223 out: 224 mempool_free(iter, &b->c->fill_iter); 225 return; 226 err: 227 set_btree_node_io_error(b); 228 bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys", 229 err, PTR_BUCKET_NR(b->c, &b->key, 0), 230 bset_block_offset(b, i), i->keys); 231 goto out; 232 } 233 234 static void btree_node_read_endio(struct bio *bio) 235 { 236 struct closure *cl = bio->bi_private; 237 238 closure_put(cl); 239 } 240 241 static void bch_btree_node_read(struct btree *b) 242 { 243 uint64_t start_time = local_clock(); 244 struct closure cl; 245 struct bio *bio; 246 247 trace_bcache_btree_read(b); 248 249 closure_init_stack(&cl); 250 251 bio = bch_bbio_alloc(b->c); 252 bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9; 253 bio->bi_end_io = btree_node_read_endio; 254 bio->bi_private = &cl; 255 bio->bi_opf = REQ_OP_READ | REQ_META; 256 257 bch_bio_map(bio, b->keys.set[0].data); 258 259 bch_submit_bbio(bio, b->c, &b->key, 0); 260 closure_sync(&cl); 261 262 if (bio->bi_status) 263 set_btree_node_io_error(b); 264 265 bch_bbio_free(bio, b->c); 266 267 if (btree_node_io_error(b)) 268 goto err; 269 270 bch_btree_node_read_done(b); 271 bch_time_stats_update(&b->c->btree_read_time, start_time); 272 273 return; 274 err: 275 bch_cache_set_error(b->c, "io error reading bucket %zu", 276 PTR_BUCKET_NR(b->c, &b->key, 0)); 277 } 278 279 static void btree_complete_write(struct btree *b, struct btree_write *w) 280 { 281 if (w->prio_blocked && 282 !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked)) 283 wake_up_allocators(b->c); 284 285 if (w->journal) { 286 atomic_dec_bug(w->journal); 287 __closure_wake_up(&b->c->journal.wait); 288 } 289 290 w->prio_blocked = 0; 291 w->journal = NULL; 292 } 293 294 static void btree_node_write_unlock(struct closure *cl) 295 { 296 struct btree *b = container_of(cl, struct btree, io); 297 298 up(&b->io_mutex); 299 } 300 301 static void __btree_node_write_done(struct closure *cl) 302 { 303 struct btree *b = container_of(cl, struct btree, io); 304 struct btree_write *w = btree_prev_write(b); 305 306 bch_bbio_free(b->bio, b->c); 307 b->bio = NULL; 308 btree_complete_write(b, w); 309 310 if (btree_node_dirty(b)) 311 schedule_delayed_work(&b->work, 30 * HZ); 312 313 closure_return_with_destructor(cl, btree_node_write_unlock); 314 } 315 316 static void btree_node_write_done(struct closure *cl) 317 { 318 struct btree *b = container_of(cl, struct btree, io); 319 320 bio_free_pages(b->bio); 321 __btree_node_write_done(cl); 322 } 323 324 static void btree_node_write_endio(struct bio *bio) 325 { 326 struct closure *cl = bio->bi_private; 327 struct btree *b = container_of(cl, struct btree, io); 328 329 if (bio->bi_status) 330 set_btree_node_io_error(b); 331 332 bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree"); 333 closure_put(cl); 334 } 335 336 static void do_btree_node_write(struct btree *b) 337 { 338 struct closure *cl = &b->io; 339 struct bset *i = btree_bset_last(b); 340 BKEY_PADDED(key) k; 341 342 i->version = BCACHE_BSET_VERSION; 343 i->csum = btree_csum_set(b, i); 344 345 BUG_ON(b->bio); 346 b->bio = bch_bbio_alloc(b->c); 347 348 b->bio->bi_end_io = btree_node_write_endio; 349 b->bio->bi_private = cl; 350 b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c->cache)); 351 b->bio->bi_opf = REQ_OP_WRITE | REQ_META | REQ_FUA; 352 bch_bio_map(b->bio, i); 353 354 /* 355 * If we're appending to a leaf node, we don't technically need FUA - 356 * this write just needs to be persisted before the next journal write, 357 * which will be marked FLUSH|FUA. 358 * 359 * Similarly if we're writing a new btree root - the pointer is going to 360 * be in the next journal entry. 361 * 362 * But if we're writing a new btree node (that isn't a root) or 363 * appending to a non leaf btree node, we need either FUA or a flush 364 * when we write the parent with the new pointer. FUA is cheaper than a 365 * flush, and writes appending to leaf nodes aren't blocking anything so 366 * just make all btree node writes FUA to keep things sane. 367 */ 368 369 bkey_copy(&k.key, &b->key); 370 SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) + 371 bset_sector_offset(&b->keys, i)); 372 373 if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) { 374 struct bio_vec *bv; 375 void *addr = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1)); 376 struct bvec_iter_all iter_all; 377 378 bio_for_each_segment_all(bv, b->bio, iter_all) { 379 memcpy(page_address(bv->bv_page), addr, PAGE_SIZE); 380 addr += PAGE_SIZE; 381 } 382 383 bch_submit_bbio(b->bio, b->c, &k.key, 0); 384 385 continue_at(cl, btree_node_write_done, NULL); 386 } else { 387 /* 388 * No problem for multipage bvec since the bio is 389 * just allocated 390 */ 391 b->bio->bi_vcnt = 0; 392 bch_bio_map(b->bio, i); 393 394 bch_submit_bbio(b->bio, b->c, &k.key, 0); 395 396 closure_sync(cl); 397 continue_at_nobarrier(cl, __btree_node_write_done, NULL); 398 } 399 } 400 401 void __bch_btree_node_write(struct btree *b, struct closure *parent) 402 { 403 struct bset *i = btree_bset_last(b); 404 405 lockdep_assert_held(&b->write_lock); 406 407 trace_bcache_btree_write(b); 408 409 BUG_ON(current->bio_list); 410 BUG_ON(b->written >= btree_blocks(b)); 411 BUG_ON(b->written && !i->keys); 412 BUG_ON(btree_bset_first(b)->seq != i->seq); 413 bch_check_keys(&b->keys, "writing"); 414 415 cancel_delayed_work(&b->work); 416 417 /* If caller isn't waiting for write, parent refcount is cache set */ 418 down(&b->io_mutex); 419 closure_init(&b->io, parent ?: &b->c->cl); 420 421 clear_bit(BTREE_NODE_dirty, &b->flags); 422 change_bit(BTREE_NODE_write_idx, &b->flags); 423 424 do_btree_node_write(b); 425 426 atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->sb.block_size, 427 &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written); 428 429 b->written += set_blocks(i, block_bytes(b->c->cache)); 430 } 431 432 void bch_btree_node_write(struct btree *b, struct closure *parent) 433 { 434 unsigned int nsets = b->keys.nsets; 435 436 lockdep_assert_held(&b->lock); 437 438 __bch_btree_node_write(b, parent); 439 440 /* 441 * do verify if there was more than one set initially (i.e. we did a 442 * sort) and we sorted down to a single set: 443 */ 444 if (nsets && !b->keys.nsets) 445 bch_btree_verify(b); 446 447 bch_btree_init_next(b); 448 } 449 450 static void bch_btree_node_write_sync(struct btree *b) 451 { 452 struct closure cl; 453 454 closure_init_stack(&cl); 455 456 mutex_lock(&b->write_lock); 457 bch_btree_node_write(b, &cl); 458 mutex_unlock(&b->write_lock); 459 460 closure_sync(&cl); 461 } 462 463 static void btree_node_write_work(struct work_struct *w) 464 { 465 struct btree *b = container_of(to_delayed_work(w), struct btree, work); 466 467 mutex_lock(&b->write_lock); 468 if (btree_node_dirty(b)) 469 __bch_btree_node_write(b, NULL); 470 mutex_unlock(&b->write_lock); 471 } 472 473 static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref) 474 { 475 struct bset *i = btree_bset_last(b); 476 struct btree_write *w = btree_current_write(b); 477 478 lockdep_assert_held(&b->write_lock); 479 480 BUG_ON(!b->written); 481 BUG_ON(!i->keys); 482 483 if (!btree_node_dirty(b)) 484 schedule_delayed_work(&b->work, 30 * HZ); 485 486 set_btree_node_dirty(b); 487 488 /* 489 * w->journal is always the oldest journal pin of all bkeys 490 * in the leaf node, to make sure the oldest jset seq won't 491 * be increased before this btree node is flushed. 492 */ 493 if (journal_ref) { 494 if (w->journal && 495 journal_pin_cmp(b->c, w->journal, journal_ref)) { 496 atomic_dec_bug(w->journal); 497 w->journal = NULL; 498 } 499 500 if (!w->journal) { 501 w->journal = journal_ref; 502 atomic_inc(w->journal); 503 } 504 } 505 506 /* Force write if set is too big */ 507 if (set_bytes(i) > PAGE_SIZE - 48 && 508 !current->bio_list) 509 bch_btree_node_write(b, NULL); 510 } 511 512 /* 513 * Btree in memory cache - allocation/freeing 514 * mca -> memory cache 515 */ 516 517 #define mca_reserve(c) (((!IS_ERR_OR_NULL(c->root) && c->root->level) \ 518 ? c->root->level : 1) * 8 + 16) 519 #define mca_can_free(c) \ 520 max_t(int, 0, c->btree_cache_used - mca_reserve(c)) 521 522 static void mca_data_free(struct btree *b) 523 { 524 BUG_ON(b->io_mutex.count != 1); 525 526 bch_btree_keys_free(&b->keys); 527 528 b->c->btree_cache_used--; 529 list_move(&b->list, &b->c->btree_cache_freed); 530 } 531 532 static void mca_bucket_free(struct btree *b) 533 { 534 BUG_ON(btree_node_dirty(b)); 535 536 b->key.ptr[0] = 0; 537 hlist_del_init_rcu(&b->hash); 538 list_move(&b->list, &b->c->btree_cache_freeable); 539 } 540 541 static unsigned int btree_order(struct bkey *k) 542 { 543 return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1); 544 } 545 546 static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp) 547 { 548 if (!bch_btree_keys_alloc(&b->keys, 549 max_t(unsigned int, 550 ilog2(b->c->btree_pages), 551 btree_order(k)), 552 gfp)) { 553 b->c->btree_cache_used++; 554 list_move(&b->list, &b->c->btree_cache); 555 } else { 556 list_move(&b->list, &b->c->btree_cache_freed); 557 } 558 } 559 560 static struct btree *mca_bucket_alloc(struct cache_set *c, 561 struct bkey *k, gfp_t gfp) 562 { 563 /* 564 * kzalloc() is necessary here for initialization, 565 * see code comments in bch_btree_keys_init(). 566 */ 567 struct btree *b = kzalloc(sizeof(struct btree), gfp); 568 569 if (!b) 570 return NULL; 571 572 init_rwsem(&b->lock); 573 lockdep_set_novalidate_class(&b->lock); 574 mutex_init(&b->write_lock); 575 lockdep_set_novalidate_class(&b->write_lock); 576 INIT_LIST_HEAD(&b->list); 577 INIT_DELAYED_WORK(&b->work, btree_node_write_work); 578 b->c = c; 579 sema_init(&b->io_mutex, 1); 580 581 mca_data_alloc(b, k, gfp); 582 return b; 583 } 584 585 static int mca_reap(struct btree *b, unsigned int min_order, bool flush) 586 { 587 struct closure cl; 588 589 closure_init_stack(&cl); 590 lockdep_assert_held(&b->c->bucket_lock); 591 592 if (!down_write_trylock(&b->lock)) 593 return -ENOMEM; 594 595 BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data); 596 597 if (b->keys.page_order < min_order) 598 goto out_unlock; 599 600 if (!flush) { 601 if (btree_node_dirty(b)) 602 goto out_unlock; 603 604 if (down_trylock(&b->io_mutex)) 605 goto out_unlock; 606 up(&b->io_mutex); 607 } 608 609 retry: 610 /* 611 * BTREE_NODE_dirty might be cleared in btree_flush_btree() by 612 * __bch_btree_node_write(). To avoid an extra flush, acquire 613 * b->write_lock before checking BTREE_NODE_dirty bit. 614 */ 615 mutex_lock(&b->write_lock); 616 /* 617 * If this btree node is selected in btree_flush_write() by journal 618 * code, delay and retry until the node is flushed by journal code 619 * and BTREE_NODE_journal_flush bit cleared by btree_flush_write(). 620 */ 621 if (btree_node_journal_flush(b)) { 622 pr_debug("bnode %p is flushing by journal, retry\n", b); 623 mutex_unlock(&b->write_lock); 624 udelay(1); 625 goto retry; 626 } 627 628 if (btree_node_dirty(b)) 629 __bch_btree_node_write(b, &cl); 630 mutex_unlock(&b->write_lock); 631 632 closure_sync(&cl); 633 634 /* wait for any in flight btree write */ 635 down(&b->io_mutex); 636 up(&b->io_mutex); 637 638 return 0; 639 out_unlock: 640 rw_unlock(true, b); 641 return -ENOMEM; 642 } 643 644 static unsigned long bch_mca_scan(struct shrinker *shrink, 645 struct shrink_control *sc) 646 { 647 struct cache_set *c = container_of(shrink, struct cache_set, shrink); 648 struct btree *b, *t; 649 unsigned long i, nr = sc->nr_to_scan; 650 unsigned long freed = 0; 651 unsigned int btree_cache_used; 652 653 if (c->shrinker_disabled) 654 return SHRINK_STOP; 655 656 if (c->btree_cache_alloc_lock) 657 return SHRINK_STOP; 658 659 /* Return -1 if we can't do anything right now */ 660 if (sc->gfp_mask & __GFP_IO) 661 mutex_lock(&c->bucket_lock); 662 else if (!mutex_trylock(&c->bucket_lock)) 663 return -1; 664 665 /* 666 * It's _really_ critical that we don't free too many btree nodes - we 667 * have to always leave ourselves a reserve. The reserve is how we 668 * guarantee that allocating memory for a new btree node can always 669 * succeed, so that inserting keys into the btree can always succeed and 670 * IO can always make forward progress: 671 */ 672 nr /= c->btree_pages; 673 if (nr == 0) 674 nr = 1; 675 nr = min_t(unsigned long, nr, mca_can_free(c)); 676 677 i = 0; 678 btree_cache_used = c->btree_cache_used; 679 list_for_each_entry_safe_reverse(b, t, &c->btree_cache_freeable, list) { 680 if (nr <= 0) 681 goto out; 682 683 if (!mca_reap(b, 0, false)) { 684 mca_data_free(b); 685 rw_unlock(true, b); 686 freed++; 687 } 688 nr--; 689 i++; 690 } 691 692 list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) { 693 if (nr <= 0 || i >= btree_cache_used) 694 goto out; 695 696 if (!mca_reap(b, 0, false)) { 697 mca_bucket_free(b); 698 mca_data_free(b); 699 rw_unlock(true, b); 700 freed++; 701 } 702 703 nr--; 704 i++; 705 } 706 out: 707 mutex_unlock(&c->bucket_lock); 708 return freed * c->btree_pages; 709 } 710 711 static unsigned long bch_mca_count(struct shrinker *shrink, 712 struct shrink_control *sc) 713 { 714 struct cache_set *c = container_of(shrink, struct cache_set, shrink); 715 716 if (c->shrinker_disabled) 717 return 0; 718 719 if (c->btree_cache_alloc_lock) 720 return 0; 721 722 return mca_can_free(c) * c->btree_pages; 723 } 724 725 void bch_btree_cache_free(struct cache_set *c) 726 { 727 struct btree *b; 728 struct closure cl; 729 730 closure_init_stack(&cl); 731 732 if (c->shrink.list.next) 733 unregister_shrinker(&c->shrink); 734 735 mutex_lock(&c->bucket_lock); 736 737 #ifdef CONFIG_BCACHE_DEBUG 738 if (c->verify_data) 739 list_move(&c->verify_data->list, &c->btree_cache); 740 741 free_pages((unsigned long) c->verify_ondisk, ilog2(meta_bucket_pages(&c->sb))); 742 #endif 743 744 list_splice(&c->btree_cache_freeable, 745 &c->btree_cache); 746 747 while (!list_empty(&c->btree_cache)) { 748 b = list_first_entry(&c->btree_cache, struct btree, list); 749 750 /* 751 * This function is called by cache_set_free(), no I/O 752 * request on cache now, it is unnecessary to acquire 753 * b->write_lock before clearing BTREE_NODE_dirty anymore. 754 */ 755 if (btree_node_dirty(b)) { 756 btree_complete_write(b, btree_current_write(b)); 757 clear_bit(BTREE_NODE_dirty, &b->flags); 758 } 759 mca_data_free(b); 760 } 761 762 while (!list_empty(&c->btree_cache_freed)) { 763 b = list_first_entry(&c->btree_cache_freed, 764 struct btree, list); 765 list_del(&b->list); 766 cancel_delayed_work_sync(&b->work); 767 kfree(b); 768 } 769 770 mutex_unlock(&c->bucket_lock); 771 } 772 773 int bch_btree_cache_alloc(struct cache_set *c) 774 { 775 unsigned int i; 776 777 for (i = 0; i < mca_reserve(c); i++) 778 if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL)) 779 return -ENOMEM; 780 781 list_splice_init(&c->btree_cache, 782 &c->btree_cache_freeable); 783 784 #ifdef CONFIG_BCACHE_DEBUG 785 mutex_init(&c->verify_lock); 786 787 c->verify_ondisk = (void *) 788 __get_free_pages(GFP_KERNEL|__GFP_COMP, ilog2(meta_bucket_pages(&c->sb))); 789 if (!c->verify_ondisk) { 790 /* 791 * Don't worry about the mca_rereserve buckets 792 * allocated in previous for-loop, they will be 793 * handled properly in bch_cache_set_unregister(). 794 */ 795 return -ENOMEM; 796 } 797 798 c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL); 799 800 if (c->verify_data && 801 c->verify_data->keys.set->data) 802 list_del_init(&c->verify_data->list); 803 else 804 c->verify_data = NULL; 805 #endif 806 807 c->shrink.count_objects = bch_mca_count; 808 c->shrink.scan_objects = bch_mca_scan; 809 c->shrink.seeks = 4; 810 c->shrink.batch = c->btree_pages * 2; 811 812 if (register_shrinker(&c->shrink)) 813 pr_warn("bcache: %s: could not register shrinker\n", 814 __func__); 815 816 return 0; 817 } 818 819 /* Btree in memory cache - hash table */ 820 821 static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k) 822 { 823 return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)]; 824 } 825 826 static struct btree *mca_find(struct cache_set *c, struct bkey *k) 827 { 828 struct btree *b; 829 830 rcu_read_lock(); 831 hlist_for_each_entry_rcu(b, mca_hash(c, k), hash) 832 if (PTR_HASH(c, &b->key) == PTR_HASH(c, k)) 833 goto out; 834 b = NULL; 835 out: 836 rcu_read_unlock(); 837 return b; 838 } 839 840 static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op) 841 { 842 spin_lock(&c->btree_cannibalize_lock); 843 if (likely(c->btree_cache_alloc_lock == NULL)) { 844 c->btree_cache_alloc_lock = current; 845 } else if (c->btree_cache_alloc_lock != current) { 846 if (op) 847 prepare_to_wait(&c->btree_cache_wait, &op->wait, 848 TASK_UNINTERRUPTIBLE); 849 spin_unlock(&c->btree_cannibalize_lock); 850 return -EINTR; 851 } 852 spin_unlock(&c->btree_cannibalize_lock); 853 854 return 0; 855 } 856 857 static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op, 858 struct bkey *k) 859 { 860 struct btree *b; 861 862 trace_bcache_btree_cache_cannibalize(c); 863 864 if (mca_cannibalize_lock(c, op)) 865 return ERR_PTR(-EINTR); 866 867 list_for_each_entry_reverse(b, &c->btree_cache, list) 868 if (!mca_reap(b, btree_order(k), false)) 869 return b; 870 871 list_for_each_entry_reverse(b, &c->btree_cache, list) 872 if (!mca_reap(b, btree_order(k), true)) 873 return b; 874 875 WARN(1, "btree cache cannibalize failed\n"); 876 return ERR_PTR(-ENOMEM); 877 } 878 879 /* 880 * We can only have one thread cannibalizing other cached btree nodes at a time, 881 * or we'll deadlock. We use an open coded mutex to ensure that, which a 882 * cannibalize_bucket() will take. This means every time we unlock the root of 883 * the btree, we need to release this lock if we have it held. 884 */ 885 static void bch_cannibalize_unlock(struct cache_set *c) 886 { 887 spin_lock(&c->btree_cannibalize_lock); 888 if (c->btree_cache_alloc_lock == current) { 889 c->btree_cache_alloc_lock = NULL; 890 wake_up(&c->btree_cache_wait); 891 } 892 spin_unlock(&c->btree_cannibalize_lock); 893 } 894 895 static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op, 896 struct bkey *k, int level) 897 { 898 struct btree *b; 899 900 BUG_ON(current->bio_list); 901 902 lockdep_assert_held(&c->bucket_lock); 903 904 if (mca_find(c, k)) 905 return NULL; 906 907 /* btree_free() doesn't free memory; it sticks the node on the end of 908 * the list. Check if there's any freed nodes there: 909 */ 910 list_for_each_entry(b, &c->btree_cache_freeable, list) 911 if (!mca_reap(b, btree_order(k), false)) 912 goto out; 913 914 /* We never free struct btree itself, just the memory that holds the on 915 * disk node. Check the freed list before allocating a new one: 916 */ 917 list_for_each_entry(b, &c->btree_cache_freed, list) 918 if (!mca_reap(b, 0, false)) { 919 mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO); 920 if (!b->keys.set[0].data) 921 goto err; 922 else 923 goto out; 924 } 925 926 b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO); 927 if (!b) 928 goto err; 929 930 BUG_ON(!down_write_trylock(&b->lock)); 931 if (!b->keys.set->data) 932 goto err; 933 out: 934 BUG_ON(b->io_mutex.count != 1); 935 936 bkey_copy(&b->key, k); 937 list_move(&b->list, &c->btree_cache); 938 hlist_del_init_rcu(&b->hash); 939 hlist_add_head_rcu(&b->hash, mca_hash(c, k)); 940 941 lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_); 942 b->parent = (void *) ~0UL; 943 b->flags = 0; 944 b->written = 0; 945 b->level = level; 946 947 if (!b->level) 948 bch_btree_keys_init(&b->keys, &bch_extent_keys_ops, 949 &b->c->expensive_debug_checks); 950 else 951 bch_btree_keys_init(&b->keys, &bch_btree_keys_ops, 952 &b->c->expensive_debug_checks); 953 954 return b; 955 err: 956 if (b) 957 rw_unlock(true, b); 958 959 b = mca_cannibalize(c, op, k); 960 if (!IS_ERR(b)) 961 goto out; 962 963 return b; 964 } 965 966 /* 967 * bch_btree_node_get - find a btree node in the cache and lock it, reading it 968 * in from disk if necessary. 969 * 970 * If IO is necessary and running under submit_bio_noacct, returns -EAGAIN. 971 * 972 * The btree node will have either a read or a write lock held, depending on 973 * level and op->lock. 974 */ 975 struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op, 976 struct bkey *k, int level, bool write, 977 struct btree *parent) 978 { 979 int i = 0; 980 struct btree *b; 981 982 BUG_ON(level < 0); 983 retry: 984 b = mca_find(c, k); 985 986 if (!b) { 987 if (current->bio_list) 988 return ERR_PTR(-EAGAIN); 989 990 mutex_lock(&c->bucket_lock); 991 b = mca_alloc(c, op, k, level); 992 mutex_unlock(&c->bucket_lock); 993 994 if (!b) 995 goto retry; 996 if (IS_ERR(b)) 997 return b; 998 999 bch_btree_node_read(b); 1000 1001 if (!write) 1002 downgrade_write(&b->lock); 1003 } else { 1004 rw_lock(write, b, level); 1005 if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) { 1006 rw_unlock(write, b); 1007 goto retry; 1008 } 1009 BUG_ON(b->level != level); 1010 } 1011 1012 if (btree_node_io_error(b)) { 1013 rw_unlock(write, b); 1014 return ERR_PTR(-EIO); 1015 } 1016 1017 BUG_ON(!b->written); 1018 1019 b->parent = parent; 1020 1021 for (; i <= b->keys.nsets && b->keys.set[i].size; i++) { 1022 prefetch(b->keys.set[i].tree); 1023 prefetch(b->keys.set[i].data); 1024 } 1025 1026 for (; i <= b->keys.nsets; i++) 1027 prefetch(b->keys.set[i].data); 1028 1029 return b; 1030 } 1031 1032 static void btree_node_prefetch(struct btree *parent, struct bkey *k) 1033 { 1034 struct btree *b; 1035 1036 mutex_lock(&parent->c->bucket_lock); 1037 b = mca_alloc(parent->c, NULL, k, parent->level - 1); 1038 mutex_unlock(&parent->c->bucket_lock); 1039 1040 if (!IS_ERR_OR_NULL(b)) { 1041 b->parent = parent; 1042 bch_btree_node_read(b); 1043 rw_unlock(true, b); 1044 } 1045 } 1046 1047 /* Btree alloc */ 1048 1049 static void btree_node_free(struct btree *b) 1050 { 1051 trace_bcache_btree_node_free(b); 1052 1053 BUG_ON(b == b->c->root); 1054 1055 retry: 1056 mutex_lock(&b->write_lock); 1057 /* 1058 * If the btree node is selected and flushing in btree_flush_write(), 1059 * delay and retry until the BTREE_NODE_journal_flush bit cleared, 1060 * then it is safe to free the btree node here. Otherwise this btree 1061 * node will be in race condition. 1062 */ 1063 if (btree_node_journal_flush(b)) { 1064 mutex_unlock(&b->write_lock); 1065 pr_debug("bnode %p journal_flush set, retry\n", b); 1066 udelay(1); 1067 goto retry; 1068 } 1069 1070 if (btree_node_dirty(b)) { 1071 btree_complete_write(b, btree_current_write(b)); 1072 clear_bit(BTREE_NODE_dirty, &b->flags); 1073 } 1074 1075 mutex_unlock(&b->write_lock); 1076 1077 cancel_delayed_work(&b->work); 1078 1079 mutex_lock(&b->c->bucket_lock); 1080 bch_bucket_free(b->c, &b->key); 1081 mca_bucket_free(b); 1082 mutex_unlock(&b->c->bucket_lock); 1083 } 1084 1085 struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op, 1086 int level, bool wait, 1087 struct btree *parent) 1088 { 1089 BKEY_PADDED(key) k; 1090 struct btree *b = ERR_PTR(-EAGAIN); 1091 1092 mutex_lock(&c->bucket_lock); 1093 retry: 1094 if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, wait)) 1095 goto err; 1096 1097 bkey_put(c, &k.key); 1098 SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS); 1099 1100 b = mca_alloc(c, op, &k.key, level); 1101 if (IS_ERR(b)) 1102 goto err_free; 1103 1104 if (!b) { 1105 cache_bug(c, 1106 "Tried to allocate bucket that was in btree cache"); 1107 goto retry; 1108 } 1109 1110 b->parent = parent; 1111 bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb)); 1112 1113 mutex_unlock(&c->bucket_lock); 1114 1115 trace_bcache_btree_node_alloc(b); 1116 return b; 1117 err_free: 1118 bch_bucket_free(c, &k.key); 1119 err: 1120 mutex_unlock(&c->bucket_lock); 1121 1122 trace_bcache_btree_node_alloc_fail(c); 1123 return b; 1124 } 1125 1126 static struct btree *bch_btree_node_alloc(struct cache_set *c, 1127 struct btree_op *op, int level, 1128 struct btree *parent) 1129 { 1130 return __bch_btree_node_alloc(c, op, level, op != NULL, parent); 1131 } 1132 1133 static struct btree *btree_node_alloc_replacement(struct btree *b, 1134 struct btree_op *op) 1135 { 1136 struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent); 1137 1138 if (!IS_ERR_OR_NULL(n)) { 1139 mutex_lock(&n->write_lock); 1140 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort); 1141 bkey_copy_key(&n->key, &b->key); 1142 mutex_unlock(&n->write_lock); 1143 } 1144 1145 return n; 1146 } 1147 1148 static void make_btree_freeing_key(struct btree *b, struct bkey *k) 1149 { 1150 unsigned int i; 1151 1152 mutex_lock(&b->c->bucket_lock); 1153 1154 atomic_inc(&b->c->prio_blocked); 1155 1156 bkey_copy(k, &b->key); 1157 bkey_copy_key(k, &ZERO_KEY); 1158 1159 for (i = 0; i < KEY_PTRS(k); i++) 1160 SET_PTR_GEN(k, i, 1161 bch_inc_gen(PTR_CACHE(b->c, &b->key, i), 1162 PTR_BUCKET(b->c, &b->key, i))); 1163 1164 mutex_unlock(&b->c->bucket_lock); 1165 } 1166 1167 static int btree_check_reserve(struct btree *b, struct btree_op *op) 1168 { 1169 struct cache_set *c = b->c; 1170 struct cache *ca = c->cache; 1171 unsigned int reserve = (c->root->level - b->level) * 2 + 1; 1172 1173 mutex_lock(&c->bucket_lock); 1174 1175 if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) { 1176 if (op) 1177 prepare_to_wait(&c->btree_cache_wait, &op->wait, 1178 TASK_UNINTERRUPTIBLE); 1179 mutex_unlock(&c->bucket_lock); 1180 return -EINTR; 1181 } 1182 1183 mutex_unlock(&c->bucket_lock); 1184 1185 return mca_cannibalize_lock(b->c, op); 1186 } 1187 1188 /* Garbage collection */ 1189 1190 static uint8_t __bch_btree_mark_key(struct cache_set *c, int level, 1191 struct bkey *k) 1192 { 1193 uint8_t stale = 0; 1194 unsigned int i; 1195 struct bucket *g; 1196 1197 /* 1198 * ptr_invalid() can't return true for the keys that mark btree nodes as 1199 * freed, but since ptr_bad() returns true we'll never actually use them 1200 * for anything and thus we don't want mark their pointers here 1201 */ 1202 if (!bkey_cmp(k, &ZERO_KEY)) 1203 return stale; 1204 1205 for (i = 0; i < KEY_PTRS(k); i++) { 1206 if (!ptr_available(c, k, i)) 1207 continue; 1208 1209 g = PTR_BUCKET(c, k, i); 1210 1211 if (gen_after(g->last_gc, PTR_GEN(k, i))) 1212 g->last_gc = PTR_GEN(k, i); 1213 1214 if (ptr_stale(c, k, i)) { 1215 stale = max(stale, ptr_stale(c, k, i)); 1216 continue; 1217 } 1218 1219 cache_bug_on(GC_MARK(g) && 1220 (GC_MARK(g) == GC_MARK_METADATA) != (level != 0), 1221 c, "inconsistent ptrs: mark = %llu, level = %i", 1222 GC_MARK(g), level); 1223 1224 if (level) 1225 SET_GC_MARK(g, GC_MARK_METADATA); 1226 else if (KEY_DIRTY(k)) 1227 SET_GC_MARK(g, GC_MARK_DIRTY); 1228 else if (!GC_MARK(g)) 1229 SET_GC_MARK(g, GC_MARK_RECLAIMABLE); 1230 1231 /* guard against overflow */ 1232 SET_GC_SECTORS_USED(g, min_t(unsigned int, 1233 GC_SECTORS_USED(g) + KEY_SIZE(k), 1234 MAX_GC_SECTORS_USED)); 1235 1236 BUG_ON(!GC_SECTORS_USED(g)); 1237 } 1238 1239 return stale; 1240 } 1241 1242 #define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k) 1243 1244 void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k) 1245 { 1246 unsigned int i; 1247 1248 for (i = 0; i < KEY_PTRS(k); i++) 1249 if (ptr_available(c, k, i) && 1250 !ptr_stale(c, k, i)) { 1251 struct bucket *b = PTR_BUCKET(c, k, i); 1252 1253 b->gen = PTR_GEN(k, i); 1254 1255 if (level && bkey_cmp(k, &ZERO_KEY)) 1256 b->prio = BTREE_PRIO; 1257 else if (!level && b->prio == BTREE_PRIO) 1258 b->prio = INITIAL_PRIO; 1259 } 1260 1261 __bch_btree_mark_key(c, level, k); 1262 } 1263 1264 void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats) 1265 { 1266 stats->in_use = (c->nbuckets - c->avail_nbuckets) * 100 / c->nbuckets; 1267 } 1268 1269 static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc) 1270 { 1271 uint8_t stale = 0; 1272 unsigned int keys = 0, good_keys = 0; 1273 struct bkey *k; 1274 struct btree_iter iter; 1275 struct bset_tree *t; 1276 1277 gc->nodes++; 1278 1279 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) { 1280 stale = max(stale, btree_mark_key(b, k)); 1281 keys++; 1282 1283 if (bch_ptr_bad(&b->keys, k)) 1284 continue; 1285 1286 gc->key_bytes += bkey_u64s(k); 1287 gc->nkeys++; 1288 good_keys++; 1289 1290 gc->data += KEY_SIZE(k); 1291 } 1292 1293 for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++) 1294 btree_bug_on(t->size && 1295 bset_written(&b->keys, t) && 1296 bkey_cmp(&b->key, &t->end) < 0, 1297 b, "found short btree key in gc"); 1298 1299 if (b->c->gc_always_rewrite) 1300 return true; 1301 1302 if (stale > 10) 1303 return true; 1304 1305 if ((keys - good_keys) * 2 > keys) 1306 return true; 1307 1308 return false; 1309 } 1310 1311 #define GC_MERGE_NODES 4U 1312 1313 struct gc_merge_info { 1314 struct btree *b; 1315 unsigned int keys; 1316 }; 1317 1318 static int bch_btree_insert_node(struct btree *b, struct btree_op *op, 1319 struct keylist *insert_keys, 1320 atomic_t *journal_ref, 1321 struct bkey *replace_key); 1322 1323 static int btree_gc_coalesce(struct btree *b, struct btree_op *op, 1324 struct gc_stat *gc, struct gc_merge_info *r) 1325 { 1326 unsigned int i, nodes = 0, keys = 0, blocks; 1327 struct btree *new_nodes[GC_MERGE_NODES]; 1328 struct keylist keylist; 1329 struct closure cl; 1330 struct bkey *k; 1331 1332 bch_keylist_init(&keylist); 1333 1334 if (btree_check_reserve(b, NULL)) 1335 return 0; 1336 1337 memset(new_nodes, 0, sizeof(new_nodes)); 1338 closure_init_stack(&cl); 1339 1340 while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b)) 1341 keys += r[nodes++].keys; 1342 1343 blocks = btree_default_blocks(b->c) * 2 / 3; 1344 1345 if (nodes < 2 || 1346 __set_blocks(b->keys.set[0].data, keys, 1347 block_bytes(b->c->cache)) > blocks * (nodes - 1)) 1348 return 0; 1349 1350 for (i = 0; i < nodes; i++) { 1351 new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL); 1352 if (IS_ERR_OR_NULL(new_nodes[i])) 1353 goto out_nocoalesce; 1354 } 1355 1356 /* 1357 * We have to check the reserve here, after we've allocated our new 1358 * nodes, to make sure the insert below will succeed - we also check 1359 * before as an optimization to potentially avoid a bunch of expensive 1360 * allocs/sorts 1361 */ 1362 if (btree_check_reserve(b, NULL)) 1363 goto out_nocoalesce; 1364 1365 for (i = 0; i < nodes; i++) 1366 mutex_lock(&new_nodes[i]->write_lock); 1367 1368 for (i = nodes - 1; i > 0; --i) { 1369 struct bset *n1 = btree_bset_first(new_nodes[i]); 1370 struct bset *n2 = btree_bset_first(new_nodes[i - 1]); 1371 struct bkey *k, *last = NULL; 1372 1373 keys = 0; 1374 1375 if (i > 1) { 1376 for (k = n2->start; 1377 k < bset_bkey_last(n2); 1378 k = bkey_next(k)) { 1379 if (__set_blocks(n1, n1->keys + keys + 1380 bkey_u64s(k), 1381 block_bytes(b->c->cache)) > blocks) 1382 break; 1383 1384 last = k; 1385 keys += bkey_u64s(k); 1386 } 1387 } else { 1388 /* 1389 * Last node we're not getting rid of - we're getting 1390 * rid of the node at r[0]. Have to try and fit all of 1391 * the remaining keys into this node; we can't ensure 1392 * they will always fit due to rounding and variable 1393 * length keys (shouldn't be possible in practice, 1394 * though) 1395 */ 1396 if (__set_blocks(n1, n1->keys + n2->keys, 1397 block_bytes(b->c->cache)) > 1398 btree_blocks(new_nodes[i])) 1399 goto out_unlock_nocoalesce; 1400 1401 keys = n2->keys; 1402 /* Take the key of the node we're getting rid of */ 1403 last = &r->b->key; 1404 } 1405 1406 BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c->cache)) > 1407 btree_blocks(new_nodes[i])); 1408 1409 if (last) 1410 bkey_copy_key(&new_nodes[i]->key, last); 1411 1412 memcpy(bset_bkey_last(n1), 1413 n2->start, 1414 (void *) bset_bkey_idx(n2, keys) - (void *) n2->start); 1415 1416 n1->keys += keys; 1417 r[i].keys = n1->keys; 1418 1419 memmove(n2->start, 1420 bset_bkey_idx(n2, keys), 1421 (void *) bset_bkey_last(n2) - 1422 (void *) bset_bkey_idx(n2, keys)); 1423 1424 n2->keys -= keys; 1425 1426 if (__bch_keylist_realloc(&keylist, 1427 bkey_u64s(&new_nodes[i]->key))) 1428 goto out_unlock_nocoalesce; 1429 1430 bch_btree_node_write(new_nodes[i], &cl); 1431 bch_keylist_add(&keylist, &new_nodes[i]->key); 1432 } 1433 1434 for (i = 0; i < nodes; i++) 1435 mutex_unlock(&new_nodes[i]->write_lock); 1436 1437 closure_sync(&cl); 1438 1439 /* We emptied out this node */ 1440 BUG_ON(btree_bset_first(new_nodes[0])->keys); 1441 btree_node_free(new_nodes[0]); 1442 rw_unlock(true, new_nodes[0]); 1443 new_nodes[0] = NULL; 1444 1445 for (i = 0; i < nodes; i++) { 1446 if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key))) 1447 goto out_nocoalesce; 1448 1449 make_btree_freeing_key(r[i].b, keylist.top); 1450 bch_keylist_push(&keylist); 1451 } 1452 1453 bch_btree_insert_node(b, op, &keylist, NULL, NULL); 1454 BUG_ON(!bch_keylist_empty(&keylist)); 1455 1456 for (i = 0; i < nodes; i++) { 1457 btree_node_free(r[i].b); 1458 rw_unlock(true, r[i].b); 1459 1460 r[i].b = new_nodes[i]; 1461 } 1462 1463 memmove(r, r + 1, sizeof(r[0]) * (nodes - 1)); 1464 r[nodes - 1].b = ERR_PTR(-EINTR); 1465 1466 trace_bcache_btree_gc_coalesce(nodes); 1467 gc->nodes--; 1468 1469 bch_keylist_free(&keylist); 1470 1471 /* Invalidated our iterator */ 1472 return -EINTR; 1473 1474 out_unlock_nocoalesce: 1475 for (i = 0; i < nodes; i++) 1476 mutex_unlock(&new_nodes[i]->write_lock); 1477 1478 out_nocoalesce: 1479 closure_sync(&cl); 1480 1481 while ((k = bch_keylist_pop(&keylist))) 1482 if (!bkey_cmp(k, &ZERO_KEY)) 1483 atomic_dec(&b->c->prio_blocked); 1484 bch_keylist_free(&keylist); 1485 1486 for (i = 0; i < nodes; i++) 1487 if (!IS_ERR_OR_NULL(new_nodes[i])) { 1488 btree_node_free(new_nodes[i]); 1489 rw_unlock(true, new_nodes[i]); 1490 } 1491 return 0; 1492 } 1493 1494 static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op, 1495 struct btree *replace) 1496 { 1497 struct keylist keys; 1498 struct btree *n; 1499 1500 if (btree_check_reserve(b, NULL)) 1501 return 0; 1502 1503 n = btree_node_alloc_replacement(replace, NULL); 1504 1505 /* recheck reserve after allocating replacement node */ 1506 if (btree_check_reserve(b, NULL)) { 1507 btree_node_free(n); 1508 rw_unlock(true, n); 1509 return 0; 1510 } 1511 1512 bch_btree_node_write_sync(n); 1513 1514 bch_keylist_init(&keys); 1515 bch_keylist_add(&keys, &n->key); 1516 1517 make_btree_freeing_key(replace, keys.top); 1518 bch_keylist_push(&keys); 1519 1520 bch_btree_insert_node(b, op, &keys, NULL, NULL); 1521 BUG_ON(!bch_keylist_empty(&keys)); 1522 1523 btree_node_free(replace); 1524 rw_unlock(true, n); 1525 1526 /* Invalidated our iterator */ 1527 return -EINTR; 1528 } 1529 1530 static unsigned int btree_gc_count_keys(struct btree *b) 1531 { 1532 struct bkey *k; 1533 struct btree_iter iter; 1534 unsigned int ret = 0; 1535 1536 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad) 1537 ret += bkey_u64s(k); 1538 1539 return ret; 1540 } 1541 1542 static size_t btree_gc_min_nodes(struct cache_set *c) 1543 { 1544 size_t min_nodes; 1545 1546 /* 1547 * Since incremental GC would stop 100ms when front 1548 * side I/O comes, so when there are many btree nodes, 1549 * if GC only processes constant (100) nodes each time, 1550 * GC would last a long time, and the front side I/Os 1551 * would run out of the buckets (since no new bucket 1552 * can be allocated during GC), and be blocked again. 1553 * So GC should not process constant nodes, but varied 1554 * nodes according to the number of btree nodes, which 1555 * realized by dividing GC into constant(100) times, 1556 * so when there are many btree nodes, GC can process 1557 * more nodes each time, otherwise, GC will process less 1558 * nodes each time (but no less than MIN_GC_NODES) 1559 */ 1560 min_nodes = c->gc_stats.nodes / MAX_GC_TIMES; 1561 if (min_nodes < MIN_GC_NODES) 1562 min_nodes = MIN_GC_NODES; 1563 1564 return min_nodes; 1565 } 1566 1567 1568 static int btree_gc_recurse(struct btree *b, struct btree_op *op, 1569 struct closure *writes, struct gc_stat *gc) 1570 { 1571 int ret = 0; 1572 bool should_rewrite; 1573 struct bkey *k; 1574 struct btree_iter iter; 1575 struct gc_merge_info r[GC_MERGE_NODES]; 1576 struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1; 1577 1578 bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done); 1579 1580 for (i = r; i < r + ARRAY_SIZE(r); i++) 1581 i->b = ERR_PTR(-EINTR); 1582 1583 while (1) { 1584 k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad); 1585 if (k) { 1586 r->b = bch_btree_node_get(b->c, op, k, b->level - 1, 1587 true, b); 1588 if (IS_ERR(r->b)) { 1589 ret = PTR_ERR(r->b); 1590 break; 1591 } 1592 1593 r->keys = btree_gc_count_keys(r->b); 1594 1595 ret = btree_gc_coalesce(b, op, gc, r); 1596 if (ret) 1597 break; 1598 } 1599 1600 if (!last->b) 1601 break; 1602 1603 if (!IS_ERR(last->b)) { 1604 should_rewrite = btree_gc_mark_node(last->b, gc); 1605 if (should_rewrite) { 1606 ret = btree_gc_rewrite_node(b, op, last->b); 1607 if (ret) 1608 break; 1609 } 1610 1611 if (last->b->level) { 1612 ret = btree_gc_recurse(last->b, op, writes, gc); 1613 if (ret) 1614 break; 1615 } 1616 1617 bkey_copy_key(&b->c->gc_done, &last->b->key); 1618 1619 /* 1620 * Must flush leaf nodes before gc ends, since replace 1621 * operations aren't journalled 1622 */ 1623 mutex_lock(&last->b->write_lock); 1624 if (btree_node_dirty(last->b)) 1625 bch_btree_node_write(last->b, writes); 1626 mutex_unlock(&last->b->write_lock); 1627 rw_unlock(true, last->b); 1628 } 1629 1630 memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1)); 1631 r->b = NULL; 1632 1633 if (atomic_read(&b->c->search_inflight) && 1634 gc->nodes >= gc->nodes_pre + btree_gc_min_nodes(b->c)) { 1635 gc->nodes_pre = gc->nodes; 1636 ret = -EAGAIN; 1637 break; 1638 } 1639 1640 if (need_resched()) { 1641 ret = -EAGAIN; 1642 break; 1643 } 1644 } 1645 1646 for (i = r; i < r + ARRAY_SIZE(r); i++) 1647 if (!IS_ERR_OR_NULL(i->b)) { 1648 mutex_lock(&i->b->write_lock); 1649 if (btree_node_dirty(i->b)) 1650 bch_btree_node_write(i->b, writes); 1651 mutex_unlock(&i->b->write_lock); 1652 rw_unlock(true, i->b); 1653 } 1654 1655 return ret; 1656 } 1657 1658 static int bch_btree_gc_root(struct btree *b, struct btree_op *op, 1659 struct closure *writes, struct gc_stat *gc) 1660 { 1661 struct btree *n = NULL; 1662 int ret = 0; 1663 bool should_rewrite; 1664 1665 should_rewrite = btree_gc_mark_node(b, gc); 1666 if (should_rewrite) { 1667 n = btree_node_alloc_replacement(b, NULL); 1668 1669 if (!IS_ERR_OR_NULL(n)) { 1670 bch_btree_node_write_sync(n); 1671 1672 bch_btree_set_root(n); 1673 btree_node_free(b); 1674 rw_unlock(true, n); 1675 1676 return -EINTR; 1677 } 1678 } 1679 1680 __bch_btree_mark_key(b->c, b->level + 1, &b->key); 1681 1682 if (b->level) { 1683 ret = btree_gc_recurse(b, op, writes, gc); 1684 if (ret) 1685 return ret; 1686 } 1687 1688 bkey_copy_key(&b->c->gc_done, &b->key); 1689 1690 return ret; 1691 } 1692 1693 static void btree_gc_start(struct cache_set *c) 1694 { 1695 struct cache *ca; 1696 struct bucket *b; 1697 1698 if (!c->gc_mark_valid) 1699 return; 1700 1701 mutex_lock(&c->bucket_lock); 1702 1703 c->gc_mark_valid = 0; 1704 c->gc_done = ZERO_KEY; 1705 1706 ca = c->cache; 1707 for_each_bucket(b, ca) { 1708 b->last_gc = b->gen; 1709 if (!atomic_read(&b->pin)) { 1710 SET_GC_MARK(b, 0); 1711 SET_GC_SECTORS_USED(b, 0); 1712 } 1713 } 1714 1715 mutex_unlock(&c->bucket_lock); 1716 } 1717 1718 static void bch_btree_gc_finish(struct cache_set *c) 1719 { 1720 struct bucket *b; 1721 struct cache *ca; 1722 unsigned int i, j; 1723 uint64_t *k; 1724 1725 mutex_lock(&c->bucket_lock); 1726 1727 set_gc_sectors(c); 1728 c->gc_mark_valid = 1; 1729 c->need_gc = 0; 1730 1731 for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++) 1732 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i), 1733 GC_MARK_METADATA); 1734 1735 /* don't reclaim buckets to which writeback keys point */ 1736 rcu_read_lock(); 1737 for (i = 0; i < c->devices_max_used; i++) { 1738 struct bcache_device *d = c->devices[i]; 1739 struct cached_dev *dc; 1740 struct keybuf_key *w, *n; 1741 1742 if (!d || UUID_FLASH_ONLY(&c->uuids[i])) 1743 continue; 1744 dc = container_of(d, struct cached_dev, disk); 1745 1746 spin_lock(&dc->writeback_keys.lock); 1747 rbtree_postorder_for_each_entry_safe(w, n, 1748 &dc->writeback_keys.keys, node) 1749 for (j = 0; j < KEY_PTRS(&w->key); j++) 1750 SET_GC_MARK(PTR_BUCKET(c, &w->key, j), 1751 GC_MARK_DIRTY); 1752 spin_unlock(&dc->writeback_keys.lock); 1753 } 1754 rcu_read_unlock(); 1755 1756 c->avail_nbuckets = 0; 1757 1758 ca = c->cache; 1759 ca->invalidate_needs_gc = 0; 1760 1761 for (k = ca->sb.d; k < ca->sb.d + ca->sb.keys; k++) 1762 SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA); 1763 1764 for (k = ca->prio_buckets; 1765 k < ca->prio_buckets + prio_buckets(ca) * 2; k++) 1766 SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA); 1767 1768 for_each_bucket(b, ca) { 1769 c->need_gc = max(c->need_gc, bucket_gc_gen(b)); 1770 1771 if (atomic_read(&b->pin)) 1772 continue; 1773 1774 BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b)); 1775 1776 if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE) 1777 c->avail_nbuckets++; 1778 } 1779 1780 mutex_unlock(&c->bucket_lock); 1781 } 1782 1783 static void bch_btree_gc(struct cache_set *c) 1784 { 1785 int ret; 1786 struct gc_stat stats; 1787 struct closure writes; 1788 struct btree_op op; 1789 uint64_t start_time = local_clock(); 1790 1791 trace_bcache_gc_start(c); 1792 1793 memset(&stats, 0, sizeof(struct gc_stat)); 1794 closure_init_stack(&writes); 1795 bch_btree_op_init(&op, SHRT_MAX); 1796 1797 btree_gc_start(c); 1798 1799 /* if CACHE_SET_IO_DISABLE set, gc thread should stop too */ 1800 do { 1801 ret = bcache_btree_root(gc_root, c, &op, &writes, &stats); 1802 closure_sync(&writes); 1803 cond_resched(); 1804 1805 if (ret == -EAGAIN) 1806 schedule_timeout_interruptible(msecs_to_jiffies 1807 (GC_SLEEP_MS)); 1808 else if (ret) 1809 pr_warn("gc failed!\n"); 1810 } while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags)); 1811 1812 bch_btree_gc_finish(c); 1813 wake_up_allocators(c); 1814 1815 bch_time_stats_update(&c->btree_gc_time, start_time); 1816 1817 stats.key_bytes *= sizeof(uint64_t); 1818 stats.data <<= 9; 1819 bch_update_bucket_in_use(c, &stats); 1820 memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat)); 1821 1822 trace_bcache_gc_end(c); 1823 1824 bch_moving_gc(c); 1825 } 1826 1827 static bool gc_should_run(struct cache_set *c) 1828 { 1829 struct cache *ca = c->cache; 1830 1831 if (ca->invalidate_needs_gc) 1832 return true; 1833 1834 if (atomic_read(&c->sectors_to_gc) < 0) 1835 return true; 1836 1837 return false; 1838 } 1839 1840 static int bch_gc_thread(void *arg) 1841 { 1842 struct cache_set *c = arg; 1843 1844 while (1) { 1845 wait_event_interruptible(c->gc_wait, 1846 kthread_should_stop() || 1847 test_bit(CACHE_SET_IO_DISABLE, &c->flags) || 1848 gc_should_run(c)); 1849 1850 if (kthread_should_stop() || 1851 test_bit(CACHE_SET_IO_DISABLE, &c->flags)) 1852 break; 1853 1854 set_gc_sectors(c); 1855 bch_btree_gc(c); 1856 } 1857 1858 wait_for_kthread_stop(); 1859 return 0; 1860 } 1861 1862 int bch_gc_thread_start(struct cache_set *c) 1863 { 1864 c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc"); 1865 return PTR_ERR_OR_ZERO(c->gc_thread); 1866 } 1867 1868 /* Initial partial gc */ 1869 1870 static int bch_btree_check_recurse(struct btree *b, struct btree_op *op) 1871 { 1872 int ret = 0; 1873 struct bkey *k, *p = NULL; 1874 struct btree_iter iter; 1875 1876 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) 1877 bch_initial_mark_key(b->c, b->level, k); 1878 1879 bch_initial_mark_key(b->c, b->level + 1, &b->key); 1880 1881 if (b->level) { 1882 bch_btree_iter_init(&b->keys, &iter, NULL); 1883 1884 do { 1885 k = bch_btree_iter_next_filter(&iter, &b->keys, 1886 bch_ptr_bad); 1887 if (k) { 1888 btree_node_prefetch(b, k); 1889 /* 1890 * initiallize c->gc_stats.nodes 1891 * for incremental GC 1892 */ 1893 b->c->gc_stats.nodes++; 1894 } 1895 1896 if (p) 1897 ret = bcache_btree(check_recurse, p, b, op); 1898 1899 p = k; 1900 } while (p && !ret); 1901 } 1902 1903 return ret; 1904 } 1905 1906 1907 static int bch_btree_check_thread(void *arg) 1908 { 1909 int ret; 1910 struct btree_check_info *info = arg; 1911 struct btree_check_state *check_state = info->state; 1912 struct cache_set *c = check_state->c; 1913 struct btree_iter iter; 1914 struct bkey *k, *p; 1915 int cur_idx, prev_idx, skip_nr; 1916 1917 k = p = NULL; 1918 cur_idx = prev_idx = 0; 1919 ret = 0; 1920 1921 /* root node keys are checked before thread created */ 1922 bch_btree_iter_init(&c->root->keys, &iter, NULL); 1923 k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad); 1924 BUG_ON(!k); 1925 1926 p = k; 1927 while (k) { 1928 /* 1929 * Fetch a root node key index, skip the keys which 1930 * should be fetched by other threads, then check the 1931 * sub-tree indexed by the fetched key. 1932 */ 1933 spin_lock(&check_state->idx_lock); 1934 cur_idx = check_state->key_idx; 1935 check_state->key_idx++; 1936 spin_unlock(&check_state->idx_lock); 1937 1938 skip_nr = cur_idx - prev_idx; 1939 1940 while (skip_nr) { 1941 k = bch_btree_iter_next_filter(&iter, 1942 &c->root->keys, 1943 bch_ptr_bad); 1944 if (k) 1945 p = k; 1946 else { 1947 /* 1948 * No more keys to check in root node, 1949 * current checking threads are enough, 1950 * stop creating more. 1951 */ 1952 atomic_set(&check_state->enough, 1); 1953 /* Update check_state->enough earlier */ 1954 smp_mb__after_atomic(); 1955 goto out; 1956 } 1957 skip_nr--; 1958 cond_resched(); 1959 } 1960 1961 if (p) { 1962 struct btree_op op; 1963 1964 btree_node_prefetch(c->root, p); 1965 c->gc_stats.nodes++; 1966 bch_btree_op_init(&op, 0); 1967 ret = bcache_btree(check_recurse, p, c->root, &op); 1968 if (ret) 1969 goto out; 1970 } 1971 p = NULL; 1972 prev_idx = cur_idx; 1973 cond_resched(); 1974 } 1975 1976 out: 1977 info->result = ret; 1978 /* update check_state->started among all CPUs */ 1979 smp_mb__before_atomic(); 1980 if (atomic_dec_and_test(&check_state->started)) 1981 wake_up(&check_state->wait); 1982 1983 return ret; 1984 } 1985 1986 1987 1988 static int bch_btree_chkthread_nr(void) 1989 { 1990 int n = num_online_cpus()/2; 1991 1992 if (n == 0) 1993 n = 1; 1994 else if (n > BCH_BTR_CHKTHREAD_MAX) 1995 n = BCH_BTR_CHKTHREAD_MAX; 1996 1997 return n; 1998 } 1999 2000 int bch_btree_check(struct cache_set *c) 2001 { 2002 int ret = 0; 2003 int i; 2004 struct bkey *k = NULL; 2005 struct btree_iter iter; 2006 struct btree_check_state *check_state; 2007 char name[32]; 2008 2009 /* check and mark root node keys */ 2010 for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid) 2011 bch_initial_mark_key(c, c->root->level, k); 2012 2013 bch_initial_mark_key(c, c->root->level + 1, &c->root->key); 2014 2015 if (c->root->level == 0) 2016 return 0; 2017 2018 check_state = kzalloc(sizeof(struct btree_check_state), GFP_KERNEL); 2019 if (!check_state) 2020 return -ENOMEM; 2021 2022 check_state->c = c; 2023 check_state->total_threads = bch_btree_chkthread_nr(); 2024 check_state->key_idx = 0; 2025 spin_lock_init(&check_state->idx_lock); 2026 atomic_set(&check_state->started, 0); 2027 atomic_set(&check_state->enough, 0); 2028 init_waitqueue_head(&check_state->wait); 2029 2030 /* 2031 * Run multiple threads to check btree nodes in parallel, 2032 * if check_state->enough is non-zero, it means current 2033 * running check threads are enough, unncessary to create 2034 * more. 2035 */ 2036 for (i = 0; i < check_state->total_threads; i++) { 2037 /* fetch latest check_state->enough earlier */ 2038 smp_mb__before_atomic(); 2039 if (atomic_read(&check_state->enough)) 2040 break; 2041 2042 check_state->infos[i].result = 0; 2043 check_state->infos[i].state = check_state; 2044 snprintf(name, sizeof(name), "bch_btrchk[%u]", i); 2045 atomic_inc(&check_state->started); 2046 2047 check_state->infos[i].thread = 2048 kthread_run(bch_btree_check_thread, 2049 &check_state->infos[i], 2050 name); 2051 if (IS_ERR(check_state->infos[i].thread)) { 2052 pr_err("fails to run thread bch_btrchk[%d]\n", i); 2053 for (--i; i >= 0; i--) 2054 kthread_stop(check_state->infos[i].thread); 2055 ret = -ENOMEM; 2056 goto out; 2057 } 2058 } 2059 2060 wait_event_interruptible(check_state->wait, 2061 atomic_read(&check_state->started) == 0 || 2062 test_bit(CACHE_SET_IO_DISABLE, &c->flags)); 2063 2064 for (i = 0; i < check_state->total_threads; i++) { 2065 if (check_state->infos[i].result) { 2066 ret = check_state->infos[i].result; 2067 goto out; 2068 } 2069 } 2070 2071 out: 2072 kfree(check_state); 2073 return ret; 2074 } 2075 2076 void bch_initial_gc_finish(struct cache_set *c) 2077 { 2078 struct cache *ca = c->cache; 2079 struct bucket *b; 2080 2081 bch_btree_gc_finish(c); 2082 2083 mutex_lock(&c->bucket_lock); 2084 2085 /* 2086 * We need to put some unused buckets directly on the prio freelist in 2087 * order to get the allocator thread started - it needs freed buckets in 2088 * order to rewrite the prios and gens, and it needs to rewrite prios 2089 * and gens in order to free buckets. 2090 * 2091 * This is only safe for buckets that have no live data in them, which 2092 * there should always be some of. 2093 */ 2094 for_each_bucket(b, ca) { 2095 if (fifo_full(&ca->free[RESERVE_PRIO]) && 2096 fifo_full(&ca->free[RESERVE_BTREE])) 2097 break; 2098 2099 if (bch_can_invalidate_bucket(ca, b) && 2100 !GC_MARK(b)) { 2101 __bch_invalidate_one_bucket(ca, b); 2102 if (!fifo_push(&ca->free[RESERVE_PRIO], 2103 b - ca->buckets)) 2104 fifo_push(&ca->free[RESERVE_BTREE], 2105 b - ca->buckets); 2106 } 2107 } 2108 2109 mutex_unlock(&c->bucket_lock); 2110 } 2111 2112 /* Btree insertion */ 2113 2114 static bool btree_insert_key(struct btree *b, struct bkey *k, 2115 struct bkey *replace_key) 2116 { 2117 unsigned int status; 2118 2119 BUG_ON(bkey_cmp(k, &b->key) > 0); 2120 2121 status = bch_btree_insert_key(&b->keys, k, replace_key); 2122 if (status != BTREE_INSERT_STATUS_NO_INSERT) { 2123 bch_check_keys(&b->keys, "%u for %s", status, 2124 replace_key ? "replace" : "insert"); 2125 2126 trace_bcache_btree_insert_key(b, k, replace_key != NULL, 2127 status); 2128 return true; 2129 } else 2130 return false; 2131 } 2132 2133 static size_t insert_u64s_remaining(struct btree *b) 2134 { 2135 long ret = bch_btree_keys_u64s_remaining(&b->keys); 2136 2137 /* 2138 * Might land in the middle of an existing extent and have to split it 2139 */ 2140 if (b->keys.ops->is_extents) 2141 ret -= KEY_MAX_U64S; 2142 2143 return max(ret, 0L); 2144 } 2145 2146 static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op, 2147 struct keylist *insert_keys, 2148 struct bkey *replace_key) 2149 { 2150 bool ret = false; 2151 int oldsize = bch_count_data(&b->keys); 2152 2153 while (!bch_keylist_empty(insert_keys)) { 2154 struct bkey *k = insert_keys->keys; 2155 2156 if (bkey_u64s(k) > insert_u64s_remaining(b)) 2157 break; 2158 2159 if (bkey_cmp(k, &b->key) <= 0) { 2160 if (!b->level) 2161 bkey_put(b->c, k); 2162 2163 ret |= btree_insert_key(b, k, replace_key); 2164 bch_keylist_pop_front(insert_keys); 2165 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) { 2166 BKEY_PADDED(key) temp; 2167 bkey_copy(&temp.key, insert_keys->keys); 2168 2169 bch_cut_back(&b->key, &temp.key); 2170 bch_cut_front(&b->key, insert_keys->keys); 2171 2172 ret |= btree_insert_key(b, &temp.key, replace_key); 2173 break; 2174 } else { 2175 break; 2176 } 2177 } 2178 2179 if (!ret) 2180 op->insert_collision = true; 2181 2182 BUG_ON(!bch_keylist_empty(insert_keys) && b->level); 2183 2184 BUG_ON(bch_count_data(&b->keys) < oldsize); 2185 return ret; 2186 } 2187 2188 static int btree_split(struct btree *b, struct btree_op *op, 2189 struct keylist *insert_keys, 2190 struct bkey *replace_key) 2191 { 2192 bool split; 2193 struct btree *n1, *n2 = NULL, *n3 = NULL; 2194 uint64_t start_time = local_clock(); 2195 struct closure cl; 2196 struct keylist parent_keys; 2197 2198 closure_init_stack(&cl); 2199 bch_keylist_init(&parent_keys); 2200 2201 if (btree_check_reserve(b, op)) { 2202 if (!b->level) 2203 return -EINTR; 2204 else 2205 WARN(1, "insufficient reserve for split\n"); 2206 } 2207 2208 n1 = btree_node_alloc_replacement(b, op); 2209 if (IS_ERR(n1)) 2210 goto err; 2211 2212 split = set_blocks(btree_bset_first(n1), 2213 block_bytes(n1->c->cache)) > (btree_blocks(b) * 4) / 5; 2214 2215 if (split) { 2216 unsigned int keys = 0; 2217 2218 trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys); 2219 2220 n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent); 2221 if (IS_ERR(n2)) 2222 goto err_free1; 2223 2224 if (!b->parent) { 2225 n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL); 2226 if (IS_ERR(n3)) 2227 goto err_free2; 2228 } 2229 2230 mutex_lock(&n1->write_lock); 2231 mutex_lock(&n2->write_lock); 2232 2233 bch_btree_insert_keys(n1, op, insert_keys, replace_key); 2234 2235 /* 2236 * Has to be a linear search because we don't have an auxiliary 2237 * search tree yet 2238 */ 2239 2240 while (keys < (btree_bset_first(n1)->keys * 3) / 5) 2241 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), 2242 keys)); 2243 2244 bkey_copy_key(&n1->key, 2245 bset_bkey_idx(btree_bset_first(n1), keys)); 2246 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys)); 2247 2248 btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys; 2249 btree_bset_first(n1)->keys = keys; 2250 2251 memcpy(btree_bset_first(n2)->start, 2252 bset_bkey_last(btree_bset_first(n1)), 2253 btree_bset_first(n2)->keys * sizeof(uint64_t)); 2254 2255 bkey_copy_key(&n2->key, &b->key); 2256 2257 bch_keylist_add(&parent_keys, &n2->key); 2258 bch_btree_node_write(n2, &cl); 2259 mutex_unlock(&n2->write_lock); 2260 rw_unlock(true, n2); 2261 } else { 2262 trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys); 2263 2264 mutex_lock(&n1->write_lock); 2265 bch_btree_insert_keys(n1, op, insert_keys, replace_key); 2266 } 2267 2268 bch_keylist_add(&parent_keys, &n1->key); 2269 bch_btree_node_write(n1, &cl); 2270 mutex_unlock(&n1->write_lock); 2271 2272 if (n3) { 2273 /* Depth increases, make a new root */ 2274 mutex_lock(&n3->write_lock); 2275 bkey_copy_key(&n3->key, &MAX_KEY); 2276 bch_btree_insert_keys(n3, op, &parent_keys, NULL); 2277 bch_btree_node_write(n3, &cl); 2278 mutex_unlock(&n3->write_lock); 2279 2280 closure_sync(&cl); 2281 bch_btree_set_root(n3); 2282 rw_unlock(true, n3); 2283 } else if (!b->parent) { 2284 /* Root filled up but didn't need to be split */ 2285 closure_sync(&cl); 2286 bch_btree_set_root(n1); 2287 } else { 2288 /* Split a non root node */ 2289 closure_sync(&cl); 2290 make_btree_freeing_key(b, parent_keys.top); 2291 bch_keylist_push(&parent_keys); 2292 2293 bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL); 2294 BUG_ON(!bch_keylist_empty(&parent_keys)); 2295 } 2296 2297 btree_node_free(b); 2298 rw_unlock(true, n1); 2299 2300 bch_time_stats_update(&b->c->btree_split_time, start_time); 2301 2302 return 0; 2303 err_free2: 2304 bkey_put(b->c, &n2->key); 2305 btree_node_free(n2); 2306 rw_unlock(true, n2); 2307 err_free1: 2308 bkey_put(b->c, &n1->key); 2309 btree_node_free(n1); 2310 rw_unlock(true, n1); 2311 err: 2312 WARN(1, "bcache: btree split failed (level %u)", b->level); 2313 2314 if (n3 == ERR_PTR(-EAGAIN) || 2315 n2 == ERR_PTR(-EAGAIN) || 2316 n1 == ERR_PTR(-EAGAIN)) 2317 return -EAGAIN; 2318 2319 return -ENOMEM; 2320 } 2321 2322 static int bch_btree_insert_node(struct btree *b, struct btree_op *op, 2323 struct keylist *insert_keys, 2324 atomic_t *journal_ref, 2325 struct bkey *replace_key) 2326 { 2327 struct closure cl; 2328 2329 BUG_ON(b->level && replace_key); 2330 2331 closure_init_stack(&cl); 2332 2333 mutex_lock(&b->write_lock); 2334 2335 if (write_block(b) != btree_bset_last(b) && 2336 b->keys.last_set_unwritten) 2337 bch_btree_init_next(b); /* just wrote a set */ 2338 2339 if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) { 2340 mutex_unlock(&b->write_lock); 2341 goto split; 2342 } 2343 2344 BUG_ON(write_block(b) != btree_bset_last(b)); 2345 2346 if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) { 2347 if (!b->level) 2348 bch_btree_leaf_dirty(b, journal_ref); 2349 else 2350 bch_btree_node_write(b, &cl); 2351 } 2352 2353 mutex_unlock(&b->write_lock); 2354 2355 /* wait for btree node write if necessary, after unlock */ 2356 closure_sync(&cl); 2357 2358 return 0; 2359 split: 2360 if (current->bio_list) { 2361 op->lock = b->c->root->level + 1; 2362 return -EAGAIN; 2363 } else if (op->lock <= b->c->root->level) { 2364 op->lock = b->c->root->level + 1; 2365 return -EINTR; 2366 } else { 2367 /* Invalidated all iterators */ 2368 int ret = btree_split(b, op, insert_keys, replace_key); 2369 2370 if (bch_keylist_empty(insert_keys)) 2371 return 0; 2372 else if (!ret) 2373 return -EINTR; 2374 return ret; 2375 } 2376 } 2377 2378 int bch_btree_insert_check_key(struct btree *b, struct btree_op *op, 2379 struct bkey *check_key) 2380 { 2381 int ret = -EINTR; 2382 uint64_t btree_ptr = b->key.ptr[0]; 2383 unsigned long seq = b->seq; 2384 struct keylist insert; 2385 bool upgrade = op->lock == -1; 2386 2387 bch_keylist_init(&insert); 2388 2389 if (upgrade) { 2390 rw_unlock(false, b); 2391 rw_lock(true, b, b->level); 2392 2393 if (b->key.ptr[0] != btree_ptr || 2394 b->seq != seq + 1) { 2395 op->lock = b->level; 2396 goto out; 2397 } 2398 } 2399 2400 SET_KEY_PTRS(check_key, 1); 2401 get_random_bytes(&check_key->ptr[0], sizeof(uint64_t)); 2402 2403 SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV); 2404 2405 bch_keylist_add(&insert, check_key); 2406 2407 ret = bch_btree_insert_node(b, op, &insert, NULL, NULL); 2408 2409 BUG_ON(!ret && !bch_keylist_empty(&insert)); 2410 out: 2411 if (upgrade) 2412 downgrade_write(&b->lock); 2413 return ret; 2414 } 2415 2416 struct btree_insert_op { 2417 struct btree_op op; 2418 struct keylist *keys; 2419 atomic_t *journal_ref; 2420 struct bkey *replace_key; 2421 }; 2422 2423 static int btree_insert_fn(struct btree_op *b_op, struct btree *b) 2424 { 2425 struct btree_insert_op *op = container_of(b_op, 2426 struct btree_insert_op, op); 2427 2428 int ret = bch_btree_insert_node(b, &op->op, op->keys, 2429 op->journal_ref, op->replace_key); 2430 if (ret && !bch_keylist_empty(op->keys)) 2431 return ret; 2432 else 2433 return MAP_DONE; 2434 } 2435 2436 int bch_btree_insert(struct cache_set *c, struct keylist *keys, 2437 atomic_t *journal_ref, struct bkey *replace_key) 2438 { 2439 struct btree_insert_op op; 2440 int ret = 0; 2441 2442 BUG_ON(current->bio_list); 2443 BUG_ON(bch_keylist_empty(keys)); 2444 2445 bch_btree_op_init(&op.op, 0); 2446 op.keys = keys; 2447 op.journal_ref = journal_ref; 2448 op.replace_key = replace_key; 2449 2450 while (!ret && !bch_keylist_empty(keys)) { 2451 op.op.lock = 0; 2452 ret = bch_btree_map_leaf_nodes(&op.op, c, 2453 &START_KEY(keys->keys), 2454 btree_insert_fn); 2455 } 2456 2457 if (ret) { 2458 struct bkey *k; 2459 2460 pr_err("error %i\n", ret); 2461 2462 while ((k = bch_keylist_pop(keys))) 2463 bkey_put(c, k); 2464 } else if (op.op.insert_collision) 2465 ret = -ESRCH; 2466 2467 return ret; 2468 } 2469 2470 void bch_btree_set_root(struct btree *b) 2471 { 2472 unsigned int i; 2473 struct closure cl; 2474 2475 closure_init_stack(&cl); 2476 2477 trace_bcache_btree_set_root(b); 2478 2479 BUG_ON(!b->written); 2480 2481 for (i = 0; i < KEY_PTRS(&b->key); i++) 2482 BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO); 2483 2484 mutex_lock(&b->c->bucket_lock); 2485 list_del_init(&b->list); 2486 mutex_unlock(&b->c->bucket_lock); 2487 2488 b->c->root = b; 2489 2490 bch_journal_meta(b->c, &cl); 2491 closure_sync(&cl); 2492 } 2493 2494 /* Map across nodes or keys */ 2495 2496 static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op, 2497 struct bkey *from, 2498 btree_map_nodes_fn *fn, int flags) 2499 { 2500 int ret = MAP_CONTINUE; 2501 2502 if (b->level) { 2503 struct bkey *k; 2504 struct btree_iter iter; 2505 2506 bch_btree_iter_init(&b->keys, &iter, from); 2507 2508 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, 2509 bch_ptr_bad))) { 2510 ret = bcache_btree(map_nodes_recurse, k, b, 2511 op, from, fn, flags); 2512 from = NULL; 2513 2514 if (ret != MAP_CONTINUE) 2515 return ret; 2516 } 2517 } 2518 2519 if (!b->level || flags == MAP_ALL_NODES) 2520 ret = fn(op, b); 2521 2522 return ret; 2523 } 2524 2525 int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c, 2526 struct bkey *from, btree_map_nodes_fn *fn, int flags) 2527 { 2528 return bcache_btree_root(map_nodes_recurse, c, op, from, fn, flags); 2529 } 2530 2531 int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op, 2532 struct bkey *from, btree_map_keys_fn *fn, 2533 int flags) 2534 { 2535 int ret = MAP_CONTINUE; 2536 struct bkey *k; 2537 struct btree_iter iter; 2538 2539 bch_btree_iter_init(&b->keys, &iter, from); 2540 2541 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) { 2542 ret = !b->level 2543 ? fn(op, b, k) 2544 : bcache_btree(map_keys_recurse, k, 2545 b, op, from, fn, flags); 2546 from = NULL; 2547 2548 if (ret != MAP_CONTINUE) 2549 return ret; 2550 } 2551 2552 if (!b->level && (flags & MAP_END_KEY)) 2553 ret = fn(op, b, &KEY(KEY_INODE(&b->key), 2554 KEY_OFFSET(&b->key), 0)); 2555 2556 return ret; 2557 } 2558 2559 int bch_btree_map_keys(struct btree_op *op, struct cache_set *c, 2560 struct bkey *from, btree_map_keys_fn *fn, int flags) 2561 { 2562 return bcache_btree_root(map_keys_recurse, c, op, from, fn, flags); 2563 } 2564 2565 /* Keybuf code */ 2566 2567 static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r) 2568 { 2569 /* Overlapping keys compare equal */ 2570 if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0) 2571 return -1; 2572 if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0) 2573 return 1; 2574 return 0; 2575 } 2576 2577 static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l, 2578 struct keybuf_key *r) 2579 { 2580 return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1); 2581 } 2582 2583 struct refill { 2584 struct btree_op op; 2585 unsigned int nr_found; 2586 struct keybuf *buf; 2587 struct bkey *end; 2588 keybuf_pred_fn *pred; 2589 }; 2590 2591 static int refill_keybuf_fn(struct btree_op *op, struct btree *b, 2592 struct bkey *k) 2593 { 2594 struct refill *refill = container_of(op, struct refill, op); 2595 struct keybuf *buf = refill->buf; 2596 int ret = MAP_CONTINUE; 2597 2598 if (bkey_cmp(k, refill->end) > 0) { 2599 ret = MAP_DONE; 2600 goto out; 2601 } 2602 2603 if (!KEY_SIZE(k)) /* end key */ 2604 goto out; 2605 2606 if (refill->pred(buf, k)) { 2607 struct keybuf_key *w; 2608 2609 spin_lock(&buf->lock); 2610 2611 w = array_alloc(&buf->freelist); 2612 if (!w) { 2613 spin_unlock(&buf->lock); 2614 return MAP_DONE; 2615 } 2616 2617 w->private = NULL; 2618 bkey_copy(&w->key, k); 2619 2620 if (RB_INSERT(&buf->keys, w, node, keybuf_cmp)) 2621 array_free(&buf->freelist, w); 2622 else 2623 refill->nr_found++; 2624 2625 if (array_freelist_empty(&buf->freelist)) 2626 ret = MAP_DONE; 2627 2628 spin_unlock(&buf->lock); 2629 } 2630 out: 2631 buf->last_scanned = *k; 2632 return ret; 2633 } 2634 2635 void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf, 2636 struct bkey *end, keybuf_pred_fn *pred) 2637 { 2638 struct bkey start = buf->last_scanned; 2639 struct refill refill; 2640 2641 cond_resched(); 2642 2643 bch_btree_op_init(&refill.op, -1); 2644 refill.nr_found = 0; 2645 refill.buf = buf; 2646 refill.end = end; 2647 refill.pred = pred; 2648 2649 bch_btree_map_keys(&refill.op, c, &buf->last_scanned, 2650 refill_keybuf_fn, MAP_END_KEY); 2651 2652 trace_bcache_keyscan(refill.nr_found, 2653 KEY_INODE(&start), KEY_OFFSET(&start), 2654 KEY_INODE(&buf->last_scanned), 2655 KEY_OFFSET(&buf->last_scanned)); 2656 2657 spin_lock(&buf->lock); 2658 2659 if (!RB_EMPTY_ROOT(&buf->keys)) { 2660 struct keybuf_key *w; 2661 2662 w = RB_FIRST(&buf->keys, struct keybuf_key, node); 2663 buf->start = START_KEY(&w->key); 2664 2665 w = RB_LAST(&buf->keys, struct keybuf_key, node); 2666 buf->end = w->key; 2667 } else { 2668 buf->start = MAX_KEY; 2669 buf->end = MAX_KEY; 2670 } 2671 2672 spin_unlock(&buf->lock); 2673 } 2674 2675 static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w) 2676 { 2677 rb_erase(&w->node, &buf->keys); 2678 array_free(&buf->freelist, w); 2679 } 2680 2681 void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w) 2682 { 2683 spin_lock(&buf->lock); 2684 __bch_keybuf_del(buf, w); 2685 spin_unlock(&buf->lock); 2686 } 2687 2688 bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start, 2689 struct bkey *end) 2690 { 2691 bool ret = false; 2692 struct keybuf_key *p, *w, s; 2693 2694 s.key = *start; 2695 2696 if (bkey_cmp(end, &buf->start) <= 0 || 2697 bkey_cmp(start, &buf->end) >= 0) 2698 return false; 2699 2700 spin_lock(&buf->lock); 2701 w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp); 2702 2703 while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) { 2704 p = w; 2705 w = RB_NEXT(w, node); 2706 2707 if (p->private) 2708 ret = true; 2709 else 2710 __bch_keybuf_del(buf, p); 2711 } 2712 2713 spin_unlock(&buf->lock); 2714 return ret; 2715 } 2716 2717 struct keybuf_key *bch_keybuf_next(struct keybuf *buf) 2718 { 2719 struct keybuf_key *w; 2720 2721 spin_lock(&buf->lock); 2722 2723 w = RB_FIRST(&buf->keys, struct keybuf_key, node); 2724 2725 while (w && w->private) 2726 w = RB_NEXT(w, node); 2727 2728 if (w) 2729 w->private = ERR_PTR(-EINTR); 2730 2731 spin_unlock(&buf->lock); 2732 return w; 2733 } 2734 2735 struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c, 2736 struct keybuf *buf, 2737 struct bkey *end, 2738 keybuf_pred_fn *pred) 2739 { 2740 struct keybuf_key *ret; 2741 2742 while (1) { 2743 ret = bch_keybuf_next(buf); 2744 if (ret) 2745 break; 2746 2747 if (bkey_cmp(&buf->last_scanned, end) >= 0) { 2748 pr_debug("scan finished\n"); 2749 break; 2750 } 2751 2752 bch_refill_keybuf(c, buf, end, pred); 2753 } 2754 2755 return ret; 2756 } 2757 2758 void bch_keybuf_init(struct keybuf *buf) 2759 { 2760 buf->last_scanned = MAX_KEY; 2761 buf->keys = RB_ROOT; 2762 2763 spin_lock_init(&buf->lock); 2764 array_allocator_init(&buf->freelist); 2765 } 2766