1 /* 2 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com> 3 * 4 * Uses a block device as cache for other block devices; optimized for SSDs. 5 * All allocation is done in buckets, which should match the erase block size 6 * of the device. 7 * 8 * Buckets containing cached data are kept on a heap sorted by priority; 9 * bucket priority is increased on cache hit, and periodically all the buckets 10 * on the heap have their priority scaled down. This currently is just used as 11 * an LRU but in the future should allow for more intelligent heuristics. 12 * 13 * Buckets have an 8 bit counter; freeing is accomplished by incrementing the 14 * counter. Garbage collection is used to remove stale pointers. 15 * 16 * Indexing is done via a btree; nodes are not necessarily fully sorted, rather 17 * as keys are inserted we only sort the pages that have not yet been written. 18 * When garbage collection is run, we resort the entire node. 19 * 20 * All configuration is done via sysfs; see Documentation/bcache.txt. 21 */ 22 23 #include "bcache.h" 24 #include "btree.h" 25 #include "debug.h" 26 #include "extents.h" 27 28 #include <linux/slab.h> 29 #include <linux/bitops.h> 30 #include <linux/hash.h> 31 #include <linux/kthread.h> 32 #include <linux/prefetch.h> 33 #include <linux/random.h> 34 #include <linux/rcupdate.h> 35 #include <trace/events/bcache.h> 36 37 /* 38 * Todo: 39 * register_bcache: Return errors out to userspace correctly 40 * 41 * Writeback: don't undirty key until after a cache flush 42 * 43 * Create an iterator for key pointers 44 * 45 * On btree write error, mark bucket such that it won't be freed from the cache 46 * 47 * Journalling: 48 * Check for bad keys in replay 49 * Propagate barriers 50 * Refcount journal entries in journal_replay 51 * 52 * Garbage collection: 53 * Finish incremental gc 54 * Gc should free old UUIDs, data for invalid UUIDs 55 * 56 * Provide a way to list backing device UUIDs we have data cached for, and 57 * probably how long it's been since we've seen them, and a way to invalidate 58 * dirty data for devices that will never be attached again 59 * 60 * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so 61 * that based on that and how much dirty data we have we can keep writeback 62 * from being starved 63 * 64 * Add a tracepoint or somesuch to watch for writeback starvation 65 * 66 * When btree depth > 1 and splitting an interior node, we have to make sure 67 * alloc_bucket() cannot fail. This should be true but is not completely 68 * obvious. 69 * 70 * Plugging? 71 * 72 * If data write is less than hard sector size of ssd, round up offset in open 73 * bucket to the next whole sector 74 * 75 * Superblock needs to be fleshed out for multiple cache devices 76 * 77 * Add a sysfs tunable for the number of writeback IOs in flight 78 * 79 * Add a sysfs tunable for the number of open data buckets 80 * 81 * IO tracking: Can we track when one process is doing io on behalf of another? 82 * IO tracking: Don't use just an average, weigh more recent stuff higher 83 * 84 * Test module load/unload 85 */ 86 87 #define MAX_NEED_GC 64 88 #define MAX_SAVE_PRIO 72 89 90 #define PTR_DIRTY_BIT (((uint64_t) 1 << 36)) 91 92 #define PTR_HASH(c, k) \ 93 (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0)) 94 95 #define insert_lock(s, b) ((b)->level <= (s)->lock) 96 97 /* 98 * These macros are for recursing down the btree - they handle the details of 99 * locking and looking up nodes in the cache for you. They're best treated as 100 * mere syntax when reading code that uses them. 101 * 102 * op->lock determines whether we take a read or a write lock at a given depth. 103 * If you've got a read lock and find that you need a write lock (i.e. you're 104 * going to have to split), set op->lock and return -EINTR; btree_root() will 105 * call you again and you'll have the correct lock. 106 */ 107 108 /** 109 * btree - recurse down the btree on a specified key 110 * @fn: function to call, which will be passed the child node 111 * @key: key to recurse on 112 * @b: parent btree node 113 * @op: pointer to struct btree_op 114 */ 115 #define btree(fn, key, b, op, ...) \ 116 ({ \ 117 int _r, l = (b)->level - 1; \ 118 bool _w = l <= (op)->lock; \ 119 struct btree *_child = bch_btree_node_get((b)->c, op, key, l, \ 120 _w, b); \ 121 if (!IS_ERR(_child)) { \ 122 _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \ 123 rw_unlock(_w, _child); \ 124 } else \ 125 _r = PTR_ERR(_child); \ 126 _r; \ 127 }) 128 129 /** 130 * btree_root - call a function on the root of the btree 131 * @fn: function to call, which will be passed the child node 132 * @c: cache set 133 * @op: pointer to struct btree_op 134 */ 135 #define btree_root(fn, c, op, ...) \ 136 ({ \ 137 int _r = -EINTR; \ 138 do { \ 139 struct btree *_b = (c)->root; \ 140 bool _w = insert_lock(op, _b); \ 141 rw_lock(_w, _b, _b->level); \ 142 if (_b == (c)->root && \ 143 _w == insert_lock(op, _b)) { \ 144 _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \ 145 } \ 146 rw_unlock(_w, _b); \ 147 bch_cannibalize_unlock(c); \ 148 if (_r == -EINTR) \ 149 schedule(); \ 150 } while (_r == -EINTR); \ 151 \ 152 finish_wait(&(c)->btree_cache_wait, &(op)->wait); \ 153 _r; \ 154 }) 155 156 static inline struct bset *write_block(struct btree *b) 157 { 158 return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c); 159 } 160 161 static void bch_btree_init_next(struct btree *b) 162 { 163 /* If not a leaf node, always sort */ 164 if (b->level && b->keys.nsets) 165 bch_btree_sort(&b->keys, &b->c->sort); 166 else 167 bch_btree_sort_lazy(&b->keys, &b->c->sort); 168 169 if (b->written < btree_blocks(b)) 170 bch_bset_init_next(&b->keys, write_block(b), 171 bset_magic(&b->c->sb)); 172 173 } 174 175 /* Btree key manipulation */ 176 177 void bkey_put(struct cache_set *c, struct bkey *k) 178 { 179 unsigned i; 180 181 for (i = 0; i < KEY_PTRS(k); i++) 182 if (ptr_available(c, k, i)) 183 atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin); 184 } 185 186 /* Btree IO */ 187 188 static uint64_t btree_csum_set(struct btree *b, struct bset *i) 189 { 190 uint64_t crc = b->key.ptr[0]; 191 void *data = (void *) i + 8, *end = bset_bkey_last(i); 192 193 crc = bch_crc64_update(crc, data, end - data); 194 return crc ^ 0xffffffffffffffffULL; 195 } 196 197 void bch_btree_node_read_done(struct btree *b) 198 { 199 const char *err = "bad btree header"; 200 struct bset *i = btree_bset_first(b); 201 struct btree_iter *iter; 202 203 iter = mempool_alloc(b->c->fill_iter, GFP_NOIO); 204 iter->size = b->c->sb.bucket_size / b->c->sb.block_size; 205 iter->used = 0; 206 207 #ifdef CONFIG_BCACHE_DEBUG 208 iter->b = &b->keys; 209 #endif 210 211 if (!i->seq) 212 goto err; 213 214 for (; 215 b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq; 216 i = write_block(b)) { 217 err = "unsupported bset version"; 218 if (i->version > BCACHE_BSET_VERSION) 219 goto err; 220 221 err = "bad btree header"; 222 if (b->written + set_blocks(i, block_bytes(b->c)) > 223 btree_blocks(b)) 224 goto err; 225 226 err = "bad magic"; 227 if (i->magic != bset_magic(&b->c->sb)) 228 goto err; 229 230 err = "bad checksum"; 231 switch (i->version) { 232 case 0: 233 if (i->csum != csum_set(i)) 234 goto err; 235 break; 236 case BCACHE_BSET_VERSION: 237 if (i->csum != btree_csum_set(b, i)) 238 goto err; 239 break; 240 } 241 242 err = "empty set"; 243 if (i != b->keys.set[0].data && !i->keys) 244 goto err; 245 246 bch_btree_iter_push(iter, i->start, bset_bkey_last(i)); 247 248 b->written += set_blocks(i, block_bytes(b->c)); 249 } 250 251 err = "corrupted btree"; 252 for (i = write_block(b); 253 bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key); 254 i = ((void *) i) + block_bytes(b->c)) 255 if (i->seq == b->keys.set[0].data->seq) 256 goto err; 257 258 bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort); 259 260 i = b->keys.set[0].data; 261 err = "short btree key"; 262 if (b->keys.set[0].size && 263 bkey_cmp(&b->key, &b->keys.set[0].end) < 0) 264 goto err; 265 266 if (b->written < btree_blocks(b)) 267 bch_bset_init_next(&b->keys, write_block(b), 268 bset_magic(&b->c->sb)); 269 out: 270 mempool_free(iter, b->c->fill_iter); 271 return; 272 err: 273 set_btree_node_io_error(b); 274 bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys", 275 err, PTR_BUCKET_NR(b->c, &b->key, 0), 276 bset_block_offset(b, i), i->keys); 277 goto out; 278 } 279 280 static void btree_node_read_endio(struct bio *bio) 281 { 282 struct closure *cl = bio->bi_private; 283 closure_put(cl); 284 } 285 286 static void bch_btree_node_read(struct btree *b) 287 { 288 uint64_t start_time = local_clock(); 289 struct closure cl; 290 struct bio *bio; 291 292 trace_bcache_btree_read(b); 293 294 closure_init_stack(&cl); 295 296 bio = bch_bbio_alloc(b->c); 297 bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9; 298 bio->bi_end_io = btree_node_read_endio; 299 bio->bi_private = &cl; 300 bio_set_op_attrs(bio, REQ_OP_READ, REQ_META|READ_SYNC); 301 302 bch_bio_map(bio, b->keys.set[0].data); 303 304 bch_submit_bbio(bio, b->c, &b->key, 0); 305 closure_sync(&cl); 306 307 if (bio->bi_error) 308 set_btree_node_io_error(b); 309 310 bch_bbio_free(bio, b->c); 311 312 if (btree_node_io_error(b)) 313 goto err; 314 315 bch_btree_node_read_done(b); 316 bch_time_stats_update(&b->c->btree_read_time, start_time); 317 318 return; 319 err: 320 bch_cache_set_error(b->c, "io error reading bucket %zu", 321 PTR_BUCKET_NR(b->c, &b->key, 0)); 322 } 323 324 static void btree_complete_write(struct btree *b, struct btree_write *w) 325 { 326 if (w->prio_blocked && 327 !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked)) 328 wake_up_allocators(b->c); 329 330 if (w->journal) { 331 atomic_dec_bug(w->journal); 332 __closure_wake_up(&b->c->journal.wait); 333 } 334 335 w->prio_blocked = 0; 336 w->journal = NULL; 337 } 338 339 static void btree_node_write_unlock(struct closure *cl) 340 { 341 struct btree *b = container_of(cl, struct btree, io); 342 343 up(&b->io_mutex); 344 } 345 346 static void __btree_node_write_done(struct closure *cl) 347 { 348 struct btree *b = container_of(cl, struct btree, io); 349 struct btree_write *w = btree_prev_write(b); 350 351 bch_bbio_free(b->bio, b->c); 352 b->bio = NULL; 353 btree_complete_write(b, w); 354 355 if (btree_node_dirty(b)) 356 schedule_delayed_work(&b->work, 30 * HZ); 357 358 closure_return_with_destructor(cl, btree_node_write_unlock); 359 } 360 361 static void btree_node_write_done(struct closure *cl) 362 { 363 struct btree *b = container_of(cl, struct btree, io); 364 struct bio_vec *bv; 365 int n; 366 367 bio_for_each_segment_all(bv, b->bio, n) 368 __free_page(bv->bv_page); 369 370 __btree_node_write_done(cl); 371 } 372 373 static void btree_node_write_endio(struct bio *bio) 374 { 375 struct closure *cl = bio->bi_private; 376 struct btree *b = container_of(cl, struct btree, io); 377 378 if (bio->bi_error) 379 set_btree_node_io_error(b); 380 381 bch_bbio_count_io_errors(b->c, bio, bio->bi_error, "writing btree"); 382 closure_put(cl); 383 } 384 385 static void do_btree_node_write(struct btree *b) 386 { 387 struct closure *cl = &b->io; 388 struct bset *i = btree_bset_last(b); 389 BKEY_PADDED(key) k; 390 391 i->version = BCACHE_BSET_VERSION; 392 i->csum = btree_csum_set(b, i); 393 394 BUG_ON(b->bio); 395 b->bio = bch_bbio_alloc(b->c); 396 397 b->bio->bi_end_io = btree_node_write_endio; 398 b->bio->bi_private = cl; 399 b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c)); 400 bio_set_op_attrs(b->bio, REQ_OP_WRITE, REQ_META|WRITE_SYNC|REQ_FUA); 401 bch_bio_map(b->bio, i); 402 403 /* 404 * If we're appending to a leaf node, we don't technically need FUA - 405 * this write just needs to be persisted before the next journal write, 406 * which will be marked FLUSH|FUA. 407 * 408 * Similarly if we're writing a new btree root - the pointer is going to 409 * be in the next journal entry. 410 * 411 * But if we're writing a new btree node (that isn't a root) or 412 * appending to a non leaf btree node, we need either FUA or a flush 413 * when we write the parent with the new pointer. FUA is cheaper than a 414 * flush, and writes appending to leaf nodes aren't blocking anything so 415 * just make all btree node writes FUA to keep things sane. 416 */ 417 418 bkey_copy(&k.key, &b->key); 419 SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) + 420 bset_sector_offset(&b->keys, i)); 421 422 if (!bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) { 423 int j; 424 struct bio_vec *bv; 425 void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1)); 426 427 bio_for_each_segment_all(bv, b->bio, j) 428 memcpy(page_address(bv->bv_page), 429 base + j * PAGE_SIZE, PAGE_SIZE); 430 431 bch_submit_bbio(b->bio, b->c, &k.key, 0); 432 433 continue_at(cl, btree_node_write_done, NULL); 434 } else { 435 b->bio->bi_vcnt = 0; 436 bch_bio_map(b->bio, i); 437 438 bch_submit_bbio(b->bio, b->c, &k.key, 0); 439 440 closure_sync(cl); 441 continue_at_nobarrier(cl, __btree_node_write_done, NULL); 442 } 443 } 444 445 void __bch_btree_node_write(struct btree *b, struct closure *parent) 446 { 447 struct bset *i = btree_bset_last(b); 448 449 lockdep_assert_held(&b->write_lock); 450 451 trace_bcache_btree_write(b); 452 453 BUG_ON(current->bio_list); 454 BUG_ON(b->written >= btree_blocks(b)); 455 BUG_ON(b->written && !i->keys); 456 BUG_ON(btree_bset_first(b)->seq != i->seq); 457 bch_check_keys(&b->keys, "writing"); 458 459 cancel_delayed_work(&b->work); 460 461 /* If caller isn't waiting for write, parent refcount is cache set */ 462 down(&b->io_mutex); 463 closure_init(&b->io, parent ?: &b->c->cl); 464 465 clear_bit(BTREE_NODE_dirty, &b->flags); 466 change_bit(BTREE_NODE_write_idx, &b->flags); 467 468 do_btree_node_write(b); 469 470 atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size, 471 &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written); 472 473 b->written += set_blocks(i, block_bytes(b->c)); 474 } 475 476 void bch_btree_node_write(struct btree *b, struct closure *parent) 477 { 478 unsigned nsets = b->keys.nsets; 479 480 lockdep_assert_held(&b->lock); 481 482 __bch_btree_node_write(b, parent); 483 484 /* 485 * do verify if there was more than one set initially (i.e. we did a 486 * sort) and we sorted down to a single set: 487 */ 488 if (nsets && !b->keys.nsets) 489 bch_btree_verify(b); 490 491 bch_btree_init_next(b); 492 } 493 494 static void bch_btree_node_write_sync(struct btree *b) 495 { 496 struct closure cl; 497 498 closure_init_stack(&cl); 499 500 mutex_lock(&b->write_lock); 501 bch_btree_node_write(b, &cl); 502 mutex_unlock(&b->write_lock); 503 504 closure_sync(&cl); 505 } 506 507 static void btree_node_write_work(struct work_struct *w) 508 { 509 struct btree *b = container_of(to_delayed_work(w), struct btree, work); 510 511 mutex_lock(&b->write_lock); 512 if (btree_node_dirty(b)) 513 __bch_btree_node_write(b, NULL); 514 mutex_unlock(&b->write_lock); 515 } 516 517 static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref) 518 { 519 struct bset *i = btree_bset_last(b); 520 struct btree_write *w = btree_current_write(b); 521 522 lockdep_assert_held(&b->write_lock); 523 524 BUG_ON(!b->written); 525 BUG_ON(!i->keys); 526 527 if (!btree_node_dirty(b)) 528 schedule_delayed_work(&b->work, 30 * HZ); 529 530 set_btree_node_dirty(b); 531 532 if (journal_ref) { 533 if (w->journal && 534 journal_pin_cmp(b->c, w->journal, journal_ref)) { 535 atomic_dec_bug(w->journal); 536 w->journal = NULL; 537 } 538 539 if (!w->journal) { 540 w->journal = journal_ref; 541 atomic_inc(w->journal); 542 } 543 } 544 545 /* Force write if set is too big */ 546 if (set_bytes(i) > PAGE_SIZE - 48 && 547 !current->bio_list) 548 bch_btree_node_write(b, NULL); 549 } 550 551 /* 552 * Btree in memory cache - allocation/freeing 553 * mca -> memory cache 554 */ 555 556 #define mca_reserve(c) (((c->root && c->root->level) \ 557 ? c->root->level : 1) * 8 + 16) 558 #define mca_can_free(c) \ 559 max_t(int, 0, c->btree_cache_used - mca_reserve(c)) 560 561 static void mca_data_free(struct btree *b) 562 { 563 BUG_ON(b->io_mutex.count != 1); 564 565 bch_btree_keys_free(&b->keys); 566 567 b->c->btree_cache_used--; 568 list_move(&b->list, &b->c->btree_cache_freed); 569 } 570 571 static void mca_bucket_free(struct btree *b) 572 { 573 BUG_ON(btree_node_dirty(b)); 574 575 b->key.ptr[0] = 0; 576 hlist_del_init_rcu(&b->hash); 577 list_move(&b->list, &b->c->btree_cache_freeable); 578 } 579 580 static unsigned btree_order(struct bkey *k) 581 { 582 return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1); 583 } 584 585 static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp) 586 { 587 if (!bch_btree_keys_alloc(&b->keys, 588 max_t(unsigned, 589 ilog2(b->c->btree_pages), 590 btree_order(k)), 591 gfp)) { 592 b->c->btree_cache_used++; 593 list_move(&b->list, &b->c->btree_cache); 594 } else { 595 list_move(&b->list, &b->c->btree_cache_freed); 596 } 597 } 598 599 static struct btree *mca_bucket_alloc(struct cache_set *c, 600 struct bkey *k, gfp_t gfp) 601 { 602 struct btree *b = kzalloc(sizeof(struct btree), gfp); 603 if (!b) 604 return NULL; 605 606 init_rwsem(&b->lock); 607 lockdep_set_novalidate_class(&b->lock); 608 mutex_init(&b->write_lock); 609 lockdep_set_novalidate_class(&b->write_lock); 610 INIT_LIST_HEAD(&b->list); 611 INIT_DELAYED_WORK(&b->work, btree_node_write_work); 612 b->c = c; 613 sema_init(&b->io_mutex, 1); 614 615 mca_data_alloc(b, k, gfp); 616 return b; 617 } 618 619 static int mca_reap(struct btree *b, unsigned min_order, bool flush) 620 { 621 struct closure cl; 622 623 closure_init_stack(&cl); 624 lockdep_assert_held(&b->c->bucket_lock); 625 626 if (!down_write_trylock(&b->lock)) 627 return -ENOMEM; 628 629 BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data); 630 631 if (b->keys.page_order < min_order) 632 goto out_unlock; 633 634 if (!flush) { 635 if (btree_node_dirty(b)) 636 goto out_unlock; 637 638 if (down_trylock(&b->io_mutex)) 639 goto out_unlock; 640 up(&b->io_mutex); 641 } 642 643 mutex_lock(&b->write_lock); 644 if (btree_node_dirty(b)) 645 __bch_btree_node_write(b, &cl); 646 mutex_unlock(&b->write_lock); 647 648 closure_sync(&cl); 649 650 /* wait for any in flight btree write */ 651 down(&b->io_mutex); 652 up(&b->io_mutex); 653 654 return 0; 655 out_unlock: 656 rw_unlock(true, b); 657 return -ENOMEM; 658 } 659 660 static unsigned long bch_mca_scan(struct shrinker *shrink, 661 struct shrink_control *sc) 662 { 663 struct cache_set *c = container_of(shrink, struct cache_set, shrink); 664 struct btree *b, *t; 665 unsigned long i, nr = sc->nr_to_scan; 666 unsigned long freed = 0; 667 668 if (c->shrinker_disabled) 669 return SHRINK_STOP; 670 671 if (c->btree_cache_alloc_lock) 672 return SHRINK_STOP; 673 674 /* Return -1 if we can't do anything right now */ 675 if (sc->gfp_mask & __GFP_IO) 676 mutex_lock(&c->bucket_lock); 677 else if (!mutex_trylock(&c->bucket_lock)) 678 return -1; 679 680 /* 681 * It's _really_ critical that we don't free too many btree nodes - we 682 * have to always leave ourselves a reserve. The reserve is how we 683 * guarantee that allocating memory for a new btree node can always 684 * succeed, so that inserting keys into the btree can always succeed and 685 * IO can always make forward progress: 686 */ 687 nr /= c->btree_pages; 688 nr = min_t(unsigned long, nr, mca_can_free(c)); 689 690 i = 0; 691 list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) { 692 if (freed >= nr) 693 break; 694 695 if (++i > 3 && 696 !mca_reap(b, 0, false)) { 697 mca_data_free(b); 698 rw_unlock(true, b); 699 freed++; 700 } 701 } 702 703 for (i = 0; (nr--) && i < c->btree_cache_used; i++) { 704 if (list_empty(&c->btree_cache)) 705 goto out; 706 707 b = list_first_entry(&c->btree_cache, struct btree, list); 708 list_rotate_left(&c->btree_cache); 709 710 if (!b->accessed && 711 !mca_reap(b, 0, false)) { 712 mca_bucket_free(b); 713 mca_data_free(b); 714 rw_unlock(true, b); 715 freed++; 716 } else 717 b->accessed = 0; 718 } 719 out: 720 mutex_unlock(&c->bucket_lock); 721 return freed; 722 } 723 724 static unsigned long bch_mca_count(struct shrinker *shrink, 725 struct shrink_control *sc) 726 { 727 struct cache_set *c = container_of(shrink, struct cache_set, shrink); 728 729 if (c->shrinker_disabled) 730 return 0; 731 732 if (c->btree_cache_alloc_lock) 733 return 0; 734 735 return mca_can_free(c) * c->btree_pages; 736 } 737 738 void bch_btree_cache_free(struct cache_set *c) 739 { 740 struct btree *b; 741 struct closure cl; 742 closure_init_stack(&cl); 743 744 if (c->shrink.list.next) 745 unregister_shrinker(&c->shrink); 746 747 mutex_lock(&c->bucket_lock); 748 749 #ifdef CONFIG_BCACHE_DEBUG 750 if (c->verify_data) 751 list_move(&c->verify_data->list, &c->btree_cache); 752 753 free_pages((unsigned long) c->verify_ondisk, ilog2(bucket_pages(c))); 754 #endif 755 756 list_splice(&c->btree_cache_freeable, 757 &c->btree_cache); 758 759 while (!list_empty(&c->btree_cache)) { 760 b = list_first_entry(&c->btree_cache, struct btree, list); 761 762 if (btree_node_dirty(b)) 763 btree_complete_write(b, btree_current_write(b)); 764 clear_bit(BTREE_NODE_dirty, &b->flags); 765 766 mca_data_free(b); 767 } 768 769 while (!list_empty(&c->btree_cache_freed)) { 770 b = list_first_entry(&c->btree_cache_freed, 771 struct btree, list); 772 list_del(&b->list); 773 cancel_delayed_work_sync(&b->work); 774 kfree(b); 775 } 776 777 mutex_unlock(&c->bucket_lock); 778 } 779 780 int bch_btree_cache_alloc(struct cache_set *c) 781 { 782 unsigned i; 783 784 for (i = 0; i < mca_reserve(c); i++) 785 if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL)) 786 return -ENOMEM; 787 788 list_splice_init(&c->btree_cache, 789 &c->btree_cache_freeable); 790 791 #ifdef CONFIG_BCACHE_DEBUG 792 mutex_init(&c->verify_lock); 793 794 c->verify_ondisk = (void *) 795 __get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c))); 796 797 c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL); 798 799 if (c->verify_data && 800 c->verify_data->keys.set->data) 801 list_del_init(&c->verify_data->list); 802 else 803 c->verify_data = NULL; 804 #endif 805 806 c->shrink.count_objects = bch_mca_count; 807 c->shrink.scan_objects = bch_mca_scan; 808 c->shrink.seeks = 4; 809 c->shrink.batch = c->btree_pages * 2; 810 register_shrinker(&c->shrink); 811 812 return 0; 813 } 814 815 /* Btree in memory cache - hash table */ 816 817 static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k) 818 { 819 return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)]; 820 } 821 822 static struct btree *mca_find(struct cache_set *c, struct bkey *k) 823 { 824 struct btree *b; 825 826 rcu_read_lock(); 827 hlist_for_each_entry_rcu(b, mca_hash(c, k), hash) 828 if (PTR_HASH(c, &b->key) == PTR_HASH(c, k)) 829 goto out; 830 b = NULL; 831 out: 832 rcu_read_unlock(); 833 return b; 834 } 835 836 static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op) 837 { 838 struct task_struct *old; 839 840 old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current); 841 if (old && old != current) { 842 if (op) 843 prepare_to_wait(&c->btree_cache_wait, &op->wait, 844 TASK_UNINTERRUPTIBLE); 845 return -EINTR; 846 } 847 848 return 0; 849 } 850 851 static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op, 852 struct bkey *k) 853 { 854 struct btree *b; 855 856 trace_bcache_btree_cache_cannibalize(c); 857 858 if (mca_cannibalize_lock(c, op)) 859 return ERR_PTR(-EINTR); 860 861 list_for_each_entry_reverse(b, &c->btree_cache, list) 862 if (!mca_reap(b, btree_order(k), false)) 863 return b; 864 865 list_for_each_entry_reverse(b, &c->btree_cache, list) 866 if (!mca_reap(b, btree_order(k), true)) 867 return b; 868 869 WARN(1, "btree cache cannibalize failed\n"); 870 return ERR_PTR(-ENOMEM); 871 } 872 873 /* 874 * We can only have one thread cannibalizing other cached btree nodes at a time, 875 * or we'll deadlock. We use an open coded mutex to ensure that, which a 876 * cannibalize_bucket() will take. This means every time we unlock the root of 877 * the btree, we need to release this lock if we have it held. 878 */ 879 static void bch_cannibalize_unlock(struct cache_set *c) 880 { 881 if (c->btree_cache_alloc_lock == current) { 882 c->btree_cache_alloc_lock = NULL; 883 wake_up(&c->btree_cache_wait); 884 } 885 } 886 887 static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op, 888 struct bkey *k, int level) 889 { 890 struct btree *b; 891 892 BUG_ON(current->bio_list); 893 894 lockdep_assert_held(&c->bucket_lock); 895 896 if (mca_find(c, k)) 897 return NULL; 898 899 /* btree_free() doesn't free memory; it sticks the node on the end of 900 * the list. Check if there's any freed nodes there: 901 */ 902 list_for_each_entry(b, &c->btree_cache_freeable, list) 903 if (!mca_reap(b, btree_order(k), false)) 904 goto out; 905 906 /* We never free struct btree itself, just the memory that holds the on 907 * disk node. Check the freed list before allocating a new one: 908 */ 909 list_for_each_entry(b, &c->btree_cache_freed, list) 910 if (!mca_reap(b, 0, false)) { 911 mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO); 912 if (!b->keys.set[0].data) 913 goto err; 914 else 915 goto out; 916 } 917 918 b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO); 919 if (!b) 920 goto err; 921 922 BUG_ON(!down_write_trylock(&b->lock)); 923 if (!b->keys.set->data) 924 goto err; 925 out: 926 BUG_ON(b->io_mutex.count != 1); 927 928 bkey_copy(&b->key, k); 929 list_move(&b->list, &c->btree_cache); 930 hlist_del_init_rcu(&b->hash); 931 hlist_add_head_rcu(&b->hash, mca_hash(c, k)); 932 933 lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_); 934 b->parent = (void *) ~0UL; 935 b->flags = 0; 936 b->written = 0; 937 b->level = level; 938 939 if (!b->level) 940 bch_btree_keys_init(&b->keys, &bch_extent_keys_ops, 941 &b->c->expensive_debug_checks); 942 else 943 bch_btree_keys_init(&b->keys, &bch_btree_keys_ops, 944 &b->c->expensive_debug_checks); 945 946 return b; 947 err: 948 if (b) 949 rw_unlock(true, b); 950 951 b = mca_cannibalize(c, op, k); 952 if (!IS_ERR(b)) 953 goto out; 954 955 return b; 956 } 957 958 /** 959 * bch_btree_node_get - find a btree node in the cache and lock it, reading it 960 * in from disk if necessary. 961 * 962 * If IO is necessary and running under generic_make_request, returns -EAGAIN. 963 * 964 * The btree node will have either a read or a write lock held, depending on 965 * level and op->lock. 966 */ 967 struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op, 968 struct bkey *k, int level, bool write, 969 struct btree *parent) 970 { 971 int i = 0; 972 struct btree *b; 973 974 BUG_ON(level < 0); 975 retry: 976 b = mca_find(c, k); 977 978 if (!b) { 979 if (current->bio_list) 980 return ERR_PTR(-EAGAIN); 981 982 mutex_lock(&c->bucket_lock); 983 b = mca_alloc(c, op, k, level); 984 mutex_unlock(&c->bucket_lock); 985 986 if (!b) 987 goto retry; 988 if (IS_ERR(b)) 989 return b; 990 991 bch_btree_node_read(b); 992 993 if (!write) 994 downgrade_write(&b->lock); 995 } else { 996 rw_lock(write, b, level); 997 if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) { 998 rw_unlock(write, b); 999 goto retry; 1000 } 1001 BUG_ON(b->level != level); 1002 } 1003 1004 b->parent = parent; 1005 b->accessed = 1; 1006 1007 for (; i <= b->keys.nsets && b->keys.set[i].size; i++) { 1008 prefetch(b->keys.set[i].tree); 1009 prefetch(b->keys.set[i].data); 1010 } 1011 1012 for (; i <= b->keys.nsets; i++) 1013 prefetch(b->keys.set[i].data); 1014 1015 if (btree_node_io_error(b)) { 1016 rw_unlock(write, b); 1017 return ERR_PTR(-EIO); 1018 } 1019 1020 BUG_ON(!b->written); 1021 1022 return b; 1023 } 1024 1025 static void btree_node_prefetch(struct btree *parent, struct bkey *k) 1026 { 1027 struct btree *b; 1028 1029 mutex_lock(&parent->c->bucket_lock); 1030 b = mca_alloc(parent->c, NULL, k, parent->level - 1); 1031 mutex_unlock(&parent->c->bucket_lock); 1032 1033 if (!IS_ERR_OR_NULL(b)) { 1034 b->parent = parent; 1035 bch_btree_node_read(b); 1036 rw_unlock(true, b); 1037 } 1038 } 1039 1040 /* Btree alloc */ 1041 1042 static void btree_node_free(struct btree *b) 1043 { 1044 trace_bcache_btree_node_free(b); 1045 1046 BUG_ON(b == b->c->root); 1047 1048 mutex_lock(&b->write_lock); 1049 1050 if (btree_node_dirty(b)) 1051 btree_complete_write(b, btree_current_write(b)); 1052 clear_bit(BTREE_NODE_dirty, &b->flags); 1053 1054 mutex_unlock(&b->write_lock); 1055 1056 cancel_delayed_work(&b->work); 1057 1058 mutex_lock(&b->c->bucket_lock); 1059 bch_bucket_free(b->c, &b->key); 1060 mca_bucket_free(b); 1061 mutex_unlock(&b->c->bucket_lock); 1062 } 1063 1064 struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op, 1065 int level, bool wait, 1066 struct btree *parent) 1067 { 1068 BKEY_PADDED(key) k; 1069 struct btree *b = ERR_PTR(-EAGAIN); 1070 1071 mutex_lock(&c->bucket_lock); 1072 retry: 1073 if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait)) 1074 goto err; 1075 1076 bkey_put(c, &k.key); 1077 SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS); 1078 1079 b = mca_alloc(c, op, &k.key, level); 1080 if (IS_ERR(b)) 1081 goto err_free; 1082 1083 if (!b) { 1084 cache_bug(c, 1085 "Tried to allocate bucket that was in btree cache"); 1086 goto retry; 1087 } 1088 1089 b->accessed = 1; 1090 b->parent = parent; 1091 bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb)); 1092 1093 mutex_unlock(&c->bucket_lock); 1094 1095 trace_bcache_btree_node_alloc(b); 1096 return b; 1097 err_free: 1098 bch_bucket_free(c, &k.key); 1099 err: 1100 mutex_unlock(&c->bucket_lock); 1101 1102 trace_bcache_btree_node_alloc_fail(c); 1103 return b; 1104 } 1105 1106 static struct btree *bch_btree_node_alloc(struct cache_set *c, 1107 struct btree_op *op, int level, 1108 struct btree *parent) 1109 { 1110 return __bch_btree_node_alloc(c, op, level, op != NULL, parent); 1111 } 1112 1113 static struct btree *btree_node_alloc_replacement(struct btree *b, 1114 struct btree_op *op) 1115 { 1116 struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent); 1117 if (!IS_ERR_OR_NULL(n)) { 1118 mutex_lock(&n->write_lock); 1119 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort); 1120 bkey_copy_key(&n->key, &b->key); 1121 mutex_unlock(&n->write_lock); 1122 } 1123 1124 return n; 1125 } 1126 1127 static void make_btree_freeing_key(struct btree *b, struct bkey *k) 1128 { 1129 unsigned i; 1130 1131 mutex_lock(&b->c->bucket_lock); 1132 1133 atomic_inc(&b->c->prio_blocked); 1134 1135 bkey_copy(k, &b->key); 1136 bkey_copy_key(k, &ZERO_KEY); 1137 1138 for (i = 0; i < KEY_PTRS(k); i++) 1139 SET_PTR_GEN(k, i, 1140 bch_inc_gen(PTR_CACHE(b->c, &b->key, i), 1141 PTR_BUCKET(b->c, &b->key, i))); 1142 1143 mutex_unlock(&b->c->bucket_lock); 1144 } 1145 1146 static int btree_check_reserve(struct btree *b, struct btree_op *op) 1147 { 1148 struct cache_set *c = b->c; 1149 struct cache *ca; 1150 unsigned i, reserve = (c->root->level - b->level) * 2 + 1; 1151 1152 mutex_lock(&c->bucket_lock); 1153 1154 for_each_cache(ca, c, i) 1155 if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) { 1156 if (op) 1157 prepare_to_wait(&c->btree_cache_wait, &op->wait, 1158 TASK_UNINTERRUPTIBLE); 1159 mutex_unlock(&c->bucket_lock); 1160 return -EINTR; 1161 } 1162 1163 mutex_unlock(&c->bucket_lock); 1164 1165 return mca_cannibalize_lock(b->c, op); 1166 } 1167 1168 /* Garbage collection */ 1169 1170 static uint8_t __bch_btree_mark_key(struct cache_set *c, int level, 1171 struct bkey *k) 1172 { 1173 uint8_t stale = 0; 1174 unsigned i; 1175 struct bucket *g; 1176 1177 /* 1178 * ptr_invalid() can't return true for the keys that mark btree nodes as 1179 * freed, but since ptr_bad() returns true we'll never actually use them 1180 * for anything and thus we don't want mark their pointers here 1181 */ 1182 if (!bkey_cmp(k, &ZERO_KEY)) 1183 return stale; 1184 1185 for (i = 0; i < KEY_PTRS(k); i++) { 1186 if (!ptr_available(c, k, i)) 1187 continue; 1188 1189 g = PTR_BUCKET(c, k, i); 1190 1191 if (gen_after(g->last_gc, PTR_GEN(k, i))) 1192 g->last_gc = PTR_GEN(k, i); 1193 1194 if (ptr_stale(c, k, i)) { 1195 stale = max(stale, ptr_stale(c, k, i)); 1196 continue; 1197 } 1198 1199 cache_bug_on(GC_MARK(g) && 1200 (GC_MARK(g) == GC_MARK_METADATA) != (level != 0), 1201 c, "inconsistent ptrs: mark = %llu, level = %i", 1202 GC_MARK(g), level); 1203 1204 if (level) 1205 SET_GC_MARK(g, GC_MARK_METADATA); 1206 else if (KEY_DIRTY(k)) 1207 SET_GC_MARK(g, GC_MARK_DIRTY); 1208 else if (!GC_MARK(g)) 1209 SET_GC_MARK(g, GC_MARK_RECLAIMABLE); 1210 1211 /* guard against overflow */ 1212 SET_GC_SECTORS_USED(g, min_t(unsigned, 1213 GC_SECTORS_USED(g) + KEY_SIZE(k), 1214 MAX_GC_SECTORS_USED)); 1215 1216 BUG_ON(!GC_SECTORS_USED(g)); 1217 } 1218 1219 return stale; 1220 } 1221 1222 #define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k) 1223 1224 void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k) 1225 { 1226 unsigned i; 1227 1228 for (i = 0; i < KEY_PTRS(k); i++) 1229 if (ptr_available(c, k, i) && 1230 !ptr_stale(c, k, i)) { 1231 struct bucket *b = PTR_BUCKET(c, k, i); 1232 1233 b->gen = PTR_GEN(k, i); 1234 1235 if (level && bkey_cmp(k, &ZERO_KEY)) 1236 b->prio = BTREE_PRIO; 1237 else if (!level && b->prio == BTREE_PRIO) 1238 b->prio = INITIAL_PRIO; 1239 } 1240 1241 __bch_btree_mark_key(c, level, k); 1242 } 1243 1244 static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc) 1245 { 1246 uint8_t stale = 0; 1247 unsigned keys = 0, good_keys = 0; 1248 struct bkey *k; 1249 struct btree_iter iter; 1250 struct bset_tree *t; 1251 1252 gc->nodes++; 1253 1254 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) { 1255 stale = max(stale, btree_mark_key(b, k)); 1256 keys++; 1257 1258 if (bch_ptr_bad(&b->keys, k)) 1259 continue; 1260 1261 gc->key_bytes += bkey_u64s(k); 1262 gc->nkeys++; 1263 good_keys++; 1264 1265 gc->data += KEY_SIZE(k); 1266 } 1267 1268 for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++) 1269 btree_bug_on(t->size && 1270 bset_written(&b->keys, t) && 1271 bkey_cmp(&b->key, &t->end) < 0, 1272 b, "found short btree key in gc"); 1273 1274 if (b->c->gc_always_rewrite) 1275 return true; 1276 1277 if (stale > 10) 1278 return true; 1279 1280 if ((keys - good_keys) * 2 > keys) 1281 return true; 1282 1283 return false; 1284 } 1285 1286 #define GC_MERGE_NODES 4U 1287 1288 struct gc_merge_info { 1289 struct btree *b; 1290 unsigned keys; 1291 }; 1292 1293 static int bch_btree_insert_node(struct btree *, struct btree_op *, 1294 struct keylist *, atomic_t *, struct bkey *); 1295 1296 static int btree_gc_coalesce(struct btree *b, struct btree_op *op, 1297 struct gc_stat *gc, struct gc_merge_info *r) 1298 { 1299 unsigned i, nodes = 0, keys = 0, blocks; 1300 struct btree *new_nodes[GC_MERGE_NODES]; 1301 struct keylist keylist; 1302 struct closure cl; 1303 struct bkey *k; 1304 1305 bch_keylist_init(&keylist); 1306 1307 if (btree_check_reserve(b, NULL)) 1308 return 0; 1309 1310 memset(new_nodes, 0, sizeof(new_nodes)); 1311 closure_init_stack(&cl); 1312 1313 while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b)) 1314 keys += r[nodes++].keys; 1315 1316 blocks = btree_default_blocks(b->c) * 2 / 3; 1317 1318 if (nodes < 2 || 1319 __set_blocks(b->keys.set[0].data, keys, 1320 block_bytes(b->c)) > blocks * (nodes - 1)) 1321 return 0; 1322 1323 for (i = 0; i < nodes; i++) { 1324 new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL); 1325 if (IS_ERR_OR_NULL(new_nodes[i])) 1326 goto out_nocoalesce; 1327 } 1328 1329 /* 1330 * We have to check the reserve here, after we've allocated our new 1331 * nodes, to make sure the insert below will succeed - we also check 1332 * before as an optimization to potentially avoid a bunch of expensive 1333 * allocs/sorts 1334 */ 1335 if (btree_check_reserve(b, NULL)) 1336 goto out_nocoalesce; 1337 1338 for (i = 0; i < nodes; i++) 1339 mutex_lock(&new_nodes[i]->write_lock); 1340 1341 for (i = nodes - 1; i > 0; --i) { 1342 struct bset *n1 = btree_bset_first(new_nodes[i]); 1343 struct bset *n2 = btree_bset_first(new_nodes[i - 1]); 1344 struct bkey *k, *last = NULL; 1345 1346 keys = 0; 1347 1348 if (i > 1) { 1349 for (k = n2->start; 1350 k < bset_bkey_last(n2); 1351 k = bkey_next(k)) { 1352 if (__set_blocks(n1, n1->keys + keys + 1353 bkey_u64s(k), 1354 block_bytes(b->c)) > blocks) 1355 break; 1356 1357 last = k; 1358 keys += bkey_u64s(k); 1359 } 1360 } else { 1361 /* 1362 * Last node we're not getting rid of - we're getting 1363 * rid of the node at r[0]. Have to try and fit all of 1364 * the remaining keys into this node; we can't ensure 1365 * they will always fit due to rounding and variable 1366 * length keys (shouldn't be possible in practice, 1367 * though) 1368 */ 1369 if (__set_blocks(n1, n1->keys + n2->keys, 1370 block_bytes(b->c)) > 1371 btree_blocks(new_nodes[i])) 1372 goto out_nocoalesce; 1373 1374 keys = n2->keys; 1375 /* Take the key of the node we're getting rid of */ 1376 last = &r->b->key; 1377 } 1378 1379 BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) > 1380 btree_blocks(new_nodes[i])); 1381 1382 if (last) 1383 bkey_copy_key(&new_nodes[i]->key, last); 1384 1385 memcpy(bset_bkey_last(n1), 1386 n2->start, 1387 (void *) bset_bkey_idx(n2, keys) - (void *) n2->start); 1388 1389 n1->keys += keys; 1390 r[i].keys = n1->keys; 1391 1392 memmove(n2->start, 1393 bset_bkey_idx(n2, keys), 1394 (void *) bset_bkey_last(n2) - 1395 (void *) bset_bkey_idx(n2, keys)); 1396 1397 n2->keys -= keys; 1398 1399 if (__bch_keylist_realloc(&keylist, 1400 bkey_u64s(&new_nodes[i]->key))) 1401 goto out_nocoalesce; 1402 1403 bch_btree_node_write(new_nodes[i], &cl); 1404 bch_keylist_add(&keylist, &new_nodes[i]->key); 1405 } 1406 1407 for (i = 0; i < nodes; i++) 1408 mutex_unlock(&new_nodes[i]->write_lock); 1409 1410 closure_sync(&cl); 1411 1412 /* We emptied out this node */ 1413 BUG_ON(btree_bset_first(new_nodes[0])->keys); 1414 btree_node_free(new_nodes[0]); 1415 rw_unlock(true, new_nodes[0]); 1416 new_nodes[0] = NULL; 1417 1418 for (i = 0; i < nodes; i++) { 1419 if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key))) 1420 goto out_nocoalesce; 1421 1422 make_btree_freeing_key(r[i].b, keylist.top); 1423 bch_keylist_push(&keylist); 1424 } 1425 1426 bch_btree_insert_node(b, op, &keylist, NULL, NULL); 1427 BUG_ON(!bch_keylist_empty(&keylist)); 1428 1429 for (i = 0; i < nodes; i++) { 1430 btree_node_free(r[i].b); 1431 rw_unlock(true, r[i].b); 1432 1433 r[i].b = new_nodes[i]; 1434 } 1435 1436 memmove(r, r + 1, sizeof(r[0]) * (nodes - 1)); 1437 r[nodes - 1].b = ERR_PTR(-EINTR); 1438 1439 trace_bcache_btree_gc_coalesce(nodes); 1440 gc->nodes--; 1441 1442 bch_keylist_free(&keylist); 1443 1444 /* Invalidated our iterator */ 1445 return -EINTR; 1446 1447 out_nocoalesce: 1448 closure_sync(&cl); 1449 bch_keylist_free(&keylist); 1450 1451 while ((k = bch_keylist_pop(&keylist))) 1452 if (!bkey_cmp(k, &ZERO_KEY)) 1453 atomic_dec(&b->c->prio_blocked); 1454 1455 for (i = 0; i < nodes; i++) 1456 if (!IS_ERR_OR_NULL(new_nodes[i])) { 1457 btree_node_free(new_nodes[i]); 1458 rw_unlock(true, new_nodes[i]); 1459 } 1460 return 0; 1461 } 1462 1463 static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op, 1464 struct btree *replace) 1465 { 1466 struct keylist keys; 1467 struct btree *n; 1468 1469 if (btree_check_reserve(b, NULL)) 1470 return 0; 1471 1472 n = btree_node_alloc_replacement(replace, NULL); 1473 1474 /* recheck reserve after allocating replacement node */ 1475 if (btree_check_reserve(b, NULL)) { 1476 btree_node_free(n); 1477 rw_unlock(true, n); 1478 return 0; 1479 } 1480 1481 bch_btree_node_write_sync(n); 1482 1483 bch_keylist_init(&keys); 1484 bch_keylist_add(&keys, &n->key); 1485 1486 make_btree_freeing_key(replace, keys.top); 1487 bch_keylist_push(&keys); 1488 1489 bch_btree_insert_node(b, op, &keys, NULL, NULL); 1490 BUG_ON(!bch_keylist_empty(&keys)); 1491 1492 btree_node_free(replace); 1493 rw_unlock(true, n); 1494 1495 /* Invalidated our iterator */ 1496 return -EINTR; 1497 } 1498 1499 static unsigned btree_gc_count_keys(struct btree *b) 1500 { 1501 struct bkey *k; 1502 struct btree_iter iter; 1503 unsigned ret = 0; 1504 1505 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad) 1506 ret += bkey_u64s(k); 1507 1508 return ret; 1509 } 1510 1511 static int btree_gc_recurse(struct btree *b, struct btree_op *op, 1512 struct closure *writes, struct gc_stat *gc) 1513 { 1514 int ret = 0; 1515 bool should_rewrite; 1516 struct bkey *k; 1517 struct btree_iter iter; 1518 struct gc_merge_info r[GC_MERGE_NODES]; 1519 struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1; 1520 1521 bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done); 1522 1523 for (i = r; i < r + ARRAY_SIZE(r); i++) 1524 i->b = ERR_PTR(-EINTR); 1525 1526 while (1) { 1527 k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad); 1528 if (k) { 1529 r->b = bch_btree_node_get(b->c, op, k, b->level - 1, 1530 true, b); 1531 if (IS_ERR(r->b)) { 1532 ret = PTR_ERR(r->b); 1533 break; 1534 } 1535 1536 r->keys = btree_gc_count_keys(r->b); 1537 1538 ret = btree_gc_coalesce(b, op, gc, r); 1539 if (ret) 1540 break; 1541 } 1542 1543 if (!last->b) 1544 break; 1545 1546 if (!IS_ERR(last->b)) { 1547 should_rewrite = btree_gc_mark_node(last->b, gc); 1548 if (should_rewrite) { 1549 ret = btree_gc_rewrite_node(b, op, last->b); 1550 if (ret) 1551 break; 1552 } 1553 1554 if (last->b->level) { 1555 ret = btree_gc_recurse(last->b, op, writes, gc); 1556 if (ret) 1557 break; 1558 } 1559 1560 bkey_copy_key(&b->c->gc_done, &last->b->key); 1561 1562 /* 1563 * Must flush leaf nodes before gc ends, since replace 1564 * operations aren't journalled 1565 */ 1566 mutex_lock(&last->b->write_lock); 1567 if (btree_node_dirty(last->b)) 1568 bch_btree_node_write(last->b, writes); 1569 mutex_unlock(&last->b->write_lock); 1570 rw_unlock(true, last->b); 1571 } 1572 1573 memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1)); 1574 r->b = NULL; 1575 1576 if (need_resched()) { 1577 ret = -EAGAIN; 1578 break; 1579 } 1580 } 1581 1582 for (i = r; i < r + ARRAY_SIZE(r); i++) 1583 if (!IS_ERR_OR_NULL(i->b)) { 1584 mutex_lock(&i->b->write_lock); 1585 if (btree_node_dirty(i->b)) 1586 bch_btree_node_write(i->b, writes); 1587 mutex_unlock(&i->b->write_lock); 1588 rw_unlock(true, i->b); 1589 } 1590 1591 return ret; 1592 } 1593 1594 static int bch_btree_gc_root(struct btree *b, struct btree_op *op, 1595 struct closure *writes, struct gc_stat *gc) 1596 { 1597 struct btree *n = NULL; 1598 int ret = 0; 1599 bool should_rewrite; 1600 1601 should_rewrite = btree_gc_mark_node(b, gc); 1602 if (should_rewrite) { 1603 n = btree_node_alloc_replacement(b, NULL); 1604 1605 if (!IS_ERR_OR_NULL(n)) { 1606 bch_btree_node_write_sync(n); 1607 1608 bch_btree_set_root(n); 1609 btree_node_free(b); 1610 rw_unlock(true, n); 1611 1612 return -EINTR; 1613 } 1614 } 1615 1616 __bch_btree_mark_key(b->c, b->level + 1, &b->key); 1617 1618 if (b->level) { 1619 ret = btree_gc_recurse(b, op, writes, gc); 1620 if (ret) 1621 return ret; 1622 } 1623 1624 bkey_copy_key(&b->c->gc_done, &b->key); 1625 1626 return ret; 1627 } 1628 1629 static void btree_gc_start(struct cache_set *c) 1630 { 1631 struct cache *ca; 1632 struct bucket *b; 1633 unsigned i; 1634 1635 if (!c->gc_mark_valid) 1636 return; 1637 1638 mutex_lock(&c->bucket_lock); 1639 1640 c->gc_mark_valid = 0; 1641 c->gc_done = ZERO_KEY; 1642 1643 for_each_cache(ca, c, i) 1644 for_each_bucket(b, ca) { 1645 b->last_gc = b->gen; 1646 if (!atomic_read(&b->pin)) { 1647 SET_GC_MARK(b, 0); 1648 SET_GC_SECTORS_USED(b, 0); 1649 } 1650 } 1651 1652 mutex_unlock(&c->bucket_lock); 1653 } 1654 1655 static size_t bch_btree_gc_finish(struct cache_set *c) 1656 { 1657 size_t available = 0; 1658 struct bucket *b; 1659 struct cache *ca; 1660 unsigned i; 1661 1662 mutex_lock(&c->bucket_lock); 1663 1664 set_gc_sectors(c); 1665 c->gc_mark_valid = 1; 1666 c->need_gc = 0; 1667 1668 for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++) 1669 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i), 1670 GC_MARK_METADATA); 1671 1672 /* don't reclaim buckets to which writeback keys point */ 1673 rcu_read_lock(); 1674 for (i = 0; i < c->nr_uuids; i++) { 1675 struct bcache_device *d = c->devices[i]; 1676 struct cached_dev *dc; 1677 struct keybuf_key *w, *n; 1678 unsigned j; 1679 1680 if (!d || UUID_FLASH_ONLY(&c->uuids[i])) 1681 continue; 1682 dc = container_of(d, struct cached_dev, disk); 1683 1684 spin_lock(&dc->writeback_keys.lock); 1685 rbtree_postorder_for_each_entry_safe(w, n, 1686 &dc->writeback_keys.keys, node) 1687 for (j = 0; j < KEY_PTRS(&w->key); j++) 1688 SET_GC_MARK(PTR_BUCKET(c, &w->key, j), 1689 GC_MARK_DIRTY); 1690 spin_unlock(&dc->writeback_keys.lock); 1691 } 1692 rcu_read_unlock(); 1693 1694 for_each_cache(ca, c, i) { 1695 uint64_t *i; 1696 1697 ca->invalidate_needs_gc = 0; 1698 1699 for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++) 1700 SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA); 1701 1702 for (i = ca->prio_buckets; 1703 i < ca->prio_buckets + prio_buckets(ca) * 2; i++) 1704 SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA); 1705 1706 for_each_bucket(b, ca) { 1707 c->need_gc = max(c->need_gc, bucket_gc_gen(b)); 1708 1709 if (atomic_read(&b->pin)) 1710 continue; 1711 1712 BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b)); 1713 1714 if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE) 1715 available++; 1716 } 1717 } 1718 1719 mutex_unlock(&c->bucket_lock); 1720 return available; 1721 } 1722 1723 static void bch_btree_gc(struct cache_set *c) 1724 { 1725 int ret; 1726 unsigned long available; 1727 struct gc_stat stats; 1728 struct closure writes; 1729 struct btree_op op; 1730 uint64_t start_time = local_clock(); 1731 1732 trace_bcache_gc_start(c); 1733 1734 memset(&stats, 0, sizeof(struct gc_stat)); 1735 closure_init_stack(&writes); 1736 bch_btree_op_init(&op, SHRT_MAX); 1737 1738 btree_gc_start(c); 1739 1740 do { 1741 ret = btree_root(gc_root, c, &op, &writes, &stats); 1742 closure_sync(&writes); 1743 cond_resched(); 1744 1745 if (ret && ret != -EAGAIN) 1746 pr_warn("gc failed!"); 1747 } while (ret); 1748 1749 available = bch_btree_gc_finish(c); 1750 wake_up_allocators(c); 1751 1752 bch_time_stats_update(&c->btree_gc_time, start_time); 1753 1754 stats.key_bytes *= sizeof(uint64_t); 1755 stats.data <<= 9; 1756 stats.in_use = (c->nbuckets - available) * 100 / c->nbuckets; 1757 memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat)); 1758 1759 trace_bcache_gc_end(c); 1760 1761 bch_moving_gc(c); 1762 } 1763 1764 static int bch_gc_thread(void *arg) 1765 { 1766 struct cache_set *c = arg; 1767 struct cache *ca; 1768 unsigned i; 1769 1770 while (1) { 1771 again: 1772 bch_btree_gc(c); 1773 1774 set_current_state(TASK_INTERRUPTIBLE); 1775 if (kthread_should_stop()) 1776 break; 1777 1778 mutex_lock(&c->bucket_lock); 1779 1780 for_each_cache(ca, c, i) 1781 if (ca->invalidate_needs_gc) { 1782 mutex_unlock(&c->bucket_lock); 1783 set_current_state(TASK_RUNNING); 1784 goto again; 1785 } 1786 1787 mutex_unlock(&c->bucket_lock); 1788 1789 schedule(); 1790 } 1791 1792 return 0; 1793 } 1794 1795 int bch_gc_thread_start(struct cache_set *c) 1796 { 1797 c->gc_thread = kthread_create(bch_gc_thread, c, "bcache_gc"); 1798 if (IS_ERR(c->gc_thread)) 1799 return PTR_ERR(c->gc_thread); 1800 1801 set_task_state(c->gc_thread, TASK_INTERRUPTIBLE); 1802 return 0; 1803 } 1804 1805 /* Initial partial gc */ 1806 1807 static int bch_btree_check_recurse(struct btree *b, struct btree_op *op) 1808 { 1809 int ret = 0; 1810 struct bkey *k, *p = NULL; 1811 struct btree_iter iter; 1812 1813 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) 1814 bch_initial_mark_key(b->c, b->level, k); 1815 1816 bch_initial_mark_key(b->c, b->level + 1, &b->key); 1817 1818 if (b->level) { 1819 bch_btree_iter_init(&b->keys, &iter, NULL); 1820 1821 do { 1822 k = bch_btree_iter_next_filter(&iter, &b->keys, 1823 bch_ptr_bad); 1824 if (k) 1825 btree_node_prefetch(b, k); 1826 1827 if (p) 1828 ret = btree(check_recurse, p, b, op); 1829 1830 p = k; 1831 } while (p && !ret); 1832 } 1833 1834 return ret; 1835 } 1836 1837 int bch_btree_check(struct cache_set *c) 1838 { 1839 struct btree_op op; 1840 1841 bch_btree_op_init(&op, SHRT_MAX); 1842 1843 return btree_root(check_recurse, c, &op); 1844 } 1845 1846 void bch_initial_gc_finish(struct cache_set *c) 1847 { 1848 struct cache *ca; 1849 struct bucket *b; 1850 unsigned i; 1851 1852 bch_btree_gc_finish(c); 1853 1854 mutex_lock(&c->bucket_lock); 1855 1856 /* 1857 * We need to put some unused buckets directly on the prio freelist in 1858 * order to get the allocator thread started - it needs freed buckets in 1859 * order to rewrite the prios and gens, and it needs to rewrite prios 1860 * and gens in order to free buckets. 1861 * 1862 * This is only safe for buckets that have no live data in them, which 1863 * there should always be some of. 1864 */ 1865 for_each_cache(ca, c, i) { 1866 for_each_bucket(b, ca) { 1867 if (fifo_full(&ca->free[RESERVE_PRIO])) 1868 break; 1869 1870 if (bch_can_invalidate_bucket(ca, b) && 1871 !GC_MARK(b)) { 1872 __bch_invalidate_one_bucket(ca, b); 1873 fifo_push(&ca->free[RESERVE_PRIO], 1874 b - ca->buckets); 1875 } 1876 } 1877 } 1878 1879 mutex_unlock(&c->bucket_lock); 1880 } 1881 1882 /* Btree insertion */ 1883 1884 static bool btree_insert_key(struct btree *b, struct bkey *k, 1885 struct bkey *replace_key) 1886 { 1887 unsigned status; 1888 1889 BUG_ON(bkey_cmp(k, &b->key) > 0); 1890 1891 status = bch_btree_insert_key(&b->keys, k, replace_key); 1892 if (status != BTREE_INSERT_STATUS_NO_INSERT) { 1893 bch_check_keys(&b->keys, "%u for %s", status, 1894 replace_key ? "replace" : "insert"); 1895 1896 trace_bcache_btree_insert_key(b, k, replace_key != NULL, 1897 status); 1898 return true; 1899 } else 1900 return false; 1901 } 1902 1903 static size_t insert_u64s_remaining(struct btree *b) 1904 { 1905 long ret = bch_btree_keys_u64s_remaining(&b->keys); 1906 1907 /* 1908 * Might land in the middle of an existing extent and have to split it 1909 */ 1910 if (b->keys.ops->is_extents) 1911 ret -= KEY_MAX_U64S; 1912 1913 return max(ret, 0L); 1914 } 1915 1916 static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op, 1917 struct keylist *insert_keys, 1918 struct bkey *replace_key) 1919 { 1920 bool ret = false; 1921 int oldsize = bch_count_data(&b->keys); 1922 1923 while (!bch_keylist_empty(insert_keys)) { 1924 struct bkey *k = insert_keys->keys; 1925 1926 if (bkey_u64s(k) > insert_u64s_remaining(b)) 1927 break; 1928 1929 if (bkey_cmp(k, &b->key) <= 0) { 1930 if (!b->level) 1931 bkey_put(b->c, k); 1932 1933 ret |= btree_insert_key(b, k, replace_key); 1934 bch_keylist_pop_front(insert_keys); 1935 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) { 1936 BKEY_PADDED(key) temp; 1937 bkey_copy(&temp.key, insert_keys->keys); 1938 1939 bch_cut_back(&b->key, &temp.key); 1940 bch_cut_front(&b->key, insert_keys->keys); 1941 1942 ret |= btree_insert_key(b, &temp.key, replace_key); 1943 break; 1944 } else { 1945 break; 1946 } 1947 } 1948 1949 if (!ret) 1950 op->insert_collision = true; 1951 1952 BUG_ON(!bch_keylist_empty(insert_keys) && b->level); 1953 1954 BUG_ON(bch_count_data(&b->keys) < oldsize); 1955 return ret; 1956 } 1957 1958 static int btree_split(struct btree *b, struct btree_op *op, 1959 struct keylist *insert_keys, 1960 struct bkey *replace_key) 1961 { 1962 bool split; 1963 struct btree *n1, *n2 = NULL, *n3 = NULL; 1964 uint64_t start_time = local_clock(); 1965 struct closure cl; 1966 struct keylist parent_keys; 1967 1968 closure_init_stack(&cl); 1969 bch_keylist_init(&parent_keys); 1970 1971 if (btree_check_reserve(b, op)) { 1972 if (!b->level) 1973 return -EINTR; 1974 else 1975 WARN(1, "insufficient reserve for split\n"); 1976 } 1977 1978 n1 = btree_node_alloc_replacement(b, op); 1979 if (IS_ERR(n1)) 1980 goto err; 1981 1982 split = set_blocks(btree_bset_first(n1), 1983 block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5; 1984 1985 if (split) { 1986 unsigned keys = 0; 1987 1988 trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys); 1989 1990 n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent); 1991 if (IS_ERR(n2)) 1992 goto err_free1; 1993 1994 if (!b->parent) { 1995 n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL); 1996 if (IS_ERR(n3)) 1997 goto err_free2; 1998 } 1999 2000 mutex_lock(&n1->write_lock); 2001 mutex_lock(&n2->write_lock); 2002 2003 bch_btree_insert_keys(n1, op, insert_keys, replace_key); 2004 2005 /* 2006 * Has to be a linear search because we don't have an auxiliary 2007 * search tree yet 2008 */ 2009 2010 while (keys < (btree_bset_first(n1)->keys * 3) / 5) 2011 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), 2012 keys)); 2013 2014 bkey_copy_key(&n1->key, 2015 bset_bkey_idx(btree_bset_first(n1), keys)); 2016 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys)); 2017 2018 btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys; 2019 btree_bset_first(n1)->keys = keys; 2020 2021 memcpy(btree_bset_first(n2)->start, 2022 bset_bkey_last(btree_bset_first(n1)), 2023 btree_bset_first(n2)->keys * sizeof(uint64_t)); 2024 2025 bkey_copy_key(&n2->key, &b->key); 2026 2027 bch_keylist_add(&parent_keys, &n2->key); 2028 bch_btree_node_write(n2, &cl); 2029 mutex_unlock(&n2->write_lock); 2030 rw_unlock(true, n2); 2031 } else { 2032 trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys); 2033 2034 mutex_lock(&n1->write_lock); 2035 bch_btree_insert_keys(n1, op, insert_keys, replace_key); 2036 } 2037 2038 bch_keylist_add(&parent_keys, &n1->key); 2039 bch_btree_node_write(n1, &cl); 2040 mutex_unlock(&n1->write_lock); 2041 2042 if (n3) { 2043 /* Depth increases, make a new root */ 2044 mutex_lock(&n3->write_lock); 2045 bkey_copy_key(&n3->key, &MAX_KEY); 2046 bch_btree_insert_keys(n3, op, &parent_keys, NULL); 2047 bch_btree_node_write(n3, &cl); 2048 mutex_unlock(&n3->write_lock); 2049 2050 closure_sync(&cl); 2051 bch_btree_set_root(n3); 2052 rw_unlock(true, n3); 2053 } else if (!b->parent) { 2054 /* Root filled up but didn't need to be split */ 2055 closure_sync(&cl); 2056 bch_btree_set_root(n1); 2057 } else { 2058 /* Split a non root node */ 2059 closure_sync(&cl); 2060 make_btree_freeing_key(b, parent_keys.top); 2061 bch_keylist_push(&parent_keys); 2062 2063 bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL); 2064 BUG_ON(!bch_keylist_empty(&parent_keys)); 2065 } 2066 2067 btree_node_free(b); 2068 rw_unlock(true, n1); 2069 2070 bch_time_stats_update(&b->c->btree_split_time, start_time); 2071 2072 return 0; 2073 err_free2: 2074 bkey_put(b->c, &n2->key); 2075 btree_node_free(n2); 2076 rw_unlock(true, n2); 2077 err_free1: 2078 bkey_put(b->c, &n1->key); 2079 btree_node_free(n1); 2080 rw_unlock(true, n1); 2081 err: 2082 WARN(1, "bcache: btree split failed (level %u)", b->level); 2083 2084 if (n3 == ERR_PTR(-EAGAIN) || 2085 n2 == ERR_PTR(-EAGAIN) || 2086 n1 == ERR_PTR(-EAGAIN)) 2087 return -EAGAIN; 2088 2089 return -ENOMEM; 2090 } 2091 2092 static int bch_btree_insert_node(struct btree *b, struct btree_op *op, 2093 struct keylist *insert_keys, 2094 atomic_t *journal_ref, 2095 struct bkey *replace_key) 2096 { 2097 struct closure cl; 2098 2099 BUG_ON(b->level && replace_key); 2100 2101 closure_init_stack(&cl); 2102 2103 mutex_lock(&b->write_lock); 2104 2105 if (write_block(b) != btree_bset_last(b) && 2106 b->keys.last_set_unwritten) 2107 bch_btree_init_next(b); /* just wrote a set */ 2108 2109 if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) { 2110 mutex_unlock(&b->write_lock); 2111 goto split; 2112 } 2113 2114 BUG_ON(write_block(b) != btree_bset_last(b)); 2115 2116 if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) { 2117 if (!b->level) 2118 bch_btree_leaf_dirty(b, journal_ref); 2119 else 2120 bch_btree_node_write(b, &cl); 2121 } 2122 2123 mutex_unlock(&b->write_lock); 2124 2125 /* wait for btree node write if necessary, after unlock */ 2126 closure_sync(&cl); 2127 2128 return 0; 2129 split: 2130 if (current->bio_list) { 2131 op->lock = b->c->root->level + 1; 2132 return -EAGAIN; 2133 } else if (op->lock <= b->c->root->level) { 2134 op->lock = b->c->root->level + 1; 2135 return -EINTR; 2136 } else { 2137 /* Invalidated all iterators */ 2138 int ret = btree_split(b, op, insert_keys, replace_key); 2139 2140 if (bch_keylist_empty(insert_keys)) 2141 return 0; 2142 else if (!ret) 2143 return -EINTR; 2144 return ret; 2145 } 2146 } 2147 2148 int bch_btree_insert_check_key(struct btree *b, struct btree_op *op, 2149 struct bkey *check_key) 2150 { 2151 int ret = -EINTR; 2152 uint64_t btree_ptr = b->key.ptr[0]; 2153 unsigned long seq = b->seq; 2154 struct keylist insert; 2155 bool upgrade = op->lock == -1; 2156 2157 bch_keylist_init(&insert); 2158 2159 if (upgrade) { 2160 rw_unlock(false, b); 2161 rw_lock(true, b, b->level); 2162 2163 if (b->key.ptr[0] != btree_ptr || 2164 b->seq != seq + 1) { 2165 op->lock = b->level; 2166 goto out; 2167 } 2168 } 2169 2170 SET_KEY_PTRS(check_key, 1); 2171 get_random_bytes(&check_key->ptr[0], sizeof(uint64_t)); 2172 2173 SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV); 2174 2175 bch_keylist_add(&insert, check_key); 2176 2177 ret = bch_btree_insert_node(b, op, &insert, NULL, NULL); 2178 2179 BUG_ON(!ret && !bch_keylist_empty(&insert)); 2180 out: 2181 if (upgrade) 2182 downgrade_write(&b->lock); 2183 return ret; 2184 } 2185 2186 struct btree_insert_op { 2187 struct btree_op op; 2188 struct keylist *keys; 2189 atomic_t *journal_ref; 2190 struct bkey *replace_key; 2191 }; 2192 2193 static int btree_insert_fn(struct btree_op *b_op, struct btree *b) 2194 { 2195 struct btree_insert_op *op = container_of(b_op, 2196 struct btree_insert_op, op); 2197 2198 int ret = bch_btree_insert_node(b, &op->op, op->keys, 2199 op->journal_ref, op->replace_key); 2200 if (ret && !bch_keylist_empty(op->keys)) 2201 return ret; 2202 else 2203 return MAP_DONE; 2204 } 2205 2206 int bch_btree_insert(struct cache_set *c, struct keylist *keys, 2207 atomic_t *journal_ref, struct bkey *replace_key) 2208 { 2209 struct btree_insert_op op; 2210 int ret = 0; 2211 2212 BUG_ON(current->bio_list); 2213 BUG_ON(bch_keylist_empty(keys)); 2214 2215 bch_btree_op_init(&op.op, 0); 2216 op.keys = keys; 2217 op.journal_ref = journal_ref; 2218 op.replace_key = replace_key; 2219 2220 while (!ret && !bch_keylist_empty(keys)) { 2221 op.op.lock = 0; 2222 ret = bch_btree_map_leaf_nodes(&op.op, c, 2223 &START_KEY(keys->keys), 2224 btree_insert_fn); 2225 } 2226 2227 if (ret) { 2228 struct bkey *k; 2229 2230 pr_err("error %i", ret); 2231 2232 while ((k = bch_keylist_pop(keys))) 2233 bkey_put(c, k); 2234 } else if (op.op.insert_collision) 2235 ret = -ESRCH; 2236 2237 return ret; 2238 } 2239 2240 void bch_btree_set_root(struct btree *b) 2241 { 2242 unsigned i; 2243 struct closure cl; 2244 2245 closure_init_stack(&cl); 2246 2247 trace_bcache_btree_set_root(b); 2248 2249 BUG_ON(!b->written); 2250 2251 for (i = 0; i < KEY_PTRS(&b->key); i++) 2252 BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO); 2253 2254 mutex_lock(&b->c->bucket_lock); 2255 list_del_init(&b->list); 2256 mutex_unlock(&b->c->bucket_lock); 2257 2258 b->c->root = b; 2259 2260 bch_journal_meta(b->c, &cl); 2261 closure_sync(&cl); 2262 } 2263 2264 /* Map across nodes or keys */ 2265 2266 static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op, 2267 struct bkey *from, 2268 btree_map_nodes_fn *fn, int flags) 2269 { 2270 int ret = MAP_CONTINUE; 2271 2272 if (b->level) { 2273 struct bkey *k; 2274 struct btree_iter iter; 2275 2276 bch_btree_iter_init(&b->keys, &iter, from); 2277 2278 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, 2279 bch_ptr_bad))) { 2280 ret = btree(map_nodes_recurse, k, b, 2281 op, from, fn, flags); 2282 from = NULL; 2283 2284 if (ret != MAP_CONTINUE) 2285 return ret; 2286 } 2287 } 2288 2289 if (!b->level || flags == MAP_ALL_NODES) 2290 ret = fn(op, b); 2291 2292 return ret; 2293 } 2294 2295 int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c, 2296 struct bkey *from, btree_map_nodes_fn *fn, int flags) 2297 { 2298 return btree_root(map_nodes_recurse, c, op, from, fn, flags); 2299 } 2300 2301 static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op, 2302 struct bkey *from, btree_map_keys_fn *fn, 2303 int flags) 2304 { 2305 int ret = MAP_CONTINUE; 2306 struct bkey *k; 2307 struct btree_iter iter; 2308 2309 bch_btree_iter_init(&b->keys, &iter, from); 2310 2311 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) { 2312 ret = !b->level 2313 ? fn(op, b, k) 2314 : btree(map_keys_recurse, k, b, op, from, fn, flags); 2315 from = NULL; 2316 2317 if (ret != MAP_CONTINUE) 2318 return ret; 2319 } 2320 2321 if (!b->level && (flags & MAP_END_KEY)) 2322 ret = fn(op, b, &KEY(KEY_INODE(&b->key), 2323 KEY_OFFSET(&b->key), 0)); 2324 2325 return ret; 2326 } 2327 2328 int bch_btree_map_keys(struct btree_op *op, struct cache_set *c, 2329 struct bkey *from, btree_map_keys_fn *fn, int flags) 2330 { 2331 return btree_root(map_keys_recurse, c, op, from, fn, flags); 2332 } 2333 2334 /* Keybuf code */ 2335 2336 static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r) 2337 { 2338 /* Overlapping keys compare equal */ 2339 if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0) 2340 return -1; 2341 if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0) 2342 return 1; 2343 return 0; 2344 } 2345 2346 static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l, 2347 struct keybuf_key *r) 2348 { 2349 return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1); 2350 } 2351 2352 struct refill { 2353 struct btree_op op; 2354 unsigned nr_found; 2355 struct keybuf *buf; 2356 struct bkey *end; 2357 keybuf_pred_fn *pred; 2358 }; 2359 2360 static int refill_keybuf_fn(struct btree_op *op, struct btree *b, 2361 struct bkey *k) 2362 { 2363 struct refill *refill = container_of(op, struct refill, op); 2364 struct keybuf *buf = refill->buf; 2365 int ret = MAP_CONTINUE; 2366 2367 if (bkey_cmp(k, refill->end) >= 0) { 2368 ret = MAP_DONE; 2369 goto out; 2370 } 2371 2372 if (!KEY_SIZE(k)) /* end key */ 2373 goto out; 2374 2375 if (refill->pred(buf, k)) { 2376 struct keybuf_key *w; 2377 2378 spin_lock(&buf->lock); 2379 2380 w = array_alloc(&buf->freelist); 2381 if (!w) { 2382 spin_unlock(&buf->lock); 2383 return MAP_DONE; 2384 } 2385 2386 w->private = NULL; 2387 bkey_copy(&w->key, k); 2388 2389 if (RB_INSERT(&buf->keys, w, node, keybuf_cmp)) 2390 array_free(&buf->freelist, w); 2391 else 2392 refill->nr_found++; 2393 2394 if (array_freelist_empty(&buf->freelist)) 2395 ret = MAP_DONE; 2396 2397 spin_unlock(&buf->lock); 2398 } 2399 out: 2400 buf->last_scanned = *k; 2401 return ret; 2402 } 2403 2404 void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf, 2405 struct bkey *end, keybuf_pred_fn *pred) 2406 { 2407 struct bkey start = buf->last_scanned; 2408 struct refill refill; 2409 2410 cond_resched(); 2411 2412 bch_btree_op_init(&refill.op, -1); 2413 refill.nr_found = 0; 2414 refill.buf = buf; 2415 refill.end = end; 2416 refill.pred = pred; 2417 2418 bch_btree_map_keys(&refill.op, c, &buf->last_scanned, 2419 refill_keybuf_fn, MAP_END_KEY); 2420 2421 trace_bcache_keyscan(refill.nr_found, 2422 KEY_INODE(&start), KEY_OFFSET(&start), 2423 KEY_INODE(&buf->last_scanned), 2424 KEY_OFFSET(&buf->last_scanned)); 2425 2426 spin_lock(&buf->lock); 2427 2428 if (!RB_EMPTY_ROOT(&buf->keys)) { 2429 struct keybuf_key *w; 2430 w = RB_FIRST(&buf->keys, struct keybuf_key, node); 2431 buf->start = START_KEY(&w->key); 2432 2433 w = RB_LAST(&buf->keys, struct keybuf_key, node); 2434 buf->end = w->key; 2435 } else { 2436 buf->start = MAX_KEY; 2437 buf->end = MAX_KEY; 2438 } 2439 2440 spin_unlock(&buf->lock); 2441 } 2442 2443 static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w) 2444 { 2445 rb_erase(&w->node, &buf->keys); 2446 array_free(&buf->freelist, w); 2447 } 2448 2449 void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w) 2450 { 2451 spin_lock(&buf->lock); 2452 __bch_keybuf_del(buf, w); 2453 spin_unlock(&buf->lock); 2454 } 2455 2456 bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start, 2457 struct bkey *end) 2458 { 2459 bool ret = false; 2460 struct keybuf_key *p, *w, s; 2461 s.key = *start; 2462 2463 if (bkey_cmp(end, &buf->start) <= 0 || 2464 bkey_cmp(start, &buf->end) >= 0) 2465 return false; 2466 2467 spin_lock(&buf->lock); 2468 w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp); 2469 2470 while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) { 2471 p = w; 2472 w = RB_NEXT(w, node); 2473 2474 if (p->private) 2475 ret = true; 2476 else 2477 __bch_keybuf_del(buf, p); 2478 } 2479 2480 spin_unlock(&buf->lock); 2481 return ret; 2482 } 2483 2484 struct keybuf_key *bch_keybuf_next(struct keybuf *buf) 2485 { 2486 struct keybuf_key *w; 2487 spin_lock(&buf->lock); 2488 2489 w = RB_FIRST(&buf->keys, struct keybuf_key, node); 2490 2491 while (w && w->private) 2492 w = RB_NEXT(w, node); 2493 2494 if (w) 2495 w->private = ERR_PTR(-EINTR); 2496 2497 spin_unlock(&buf->lock); 2498 return w; 2499 } 2500 2501 struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c, 2502 struct keybuf *buf, 2503 struct bkey *end, 2504 keybuf_pred_fn *pred) 2505 { 2506 struct keybuf_key *ret; 2507 2508 while (1) { 2509 ret = bch_keybuf_next(buf); 2510 if (ret) 2511 break; 2512 2513 if (bkey_cmp(&buf->last_scanned, end) >= 0) { 2514 pr_debug("scan finished"); 2515 break; 2516 } 2517 2518 bch_refill_keybuf(c, buf, end, pred); 2519 } 2520 2521 return ret; 2522 } 2523 2524 void bch_keybuf_init(struct keybuf *buf) 2525 { 2526 buf->last_scanned = MAX_KEY; 2527 buf->keys = RB_ROOT; 2528 2529 spin_lock_init(&buf->lock); 2530 array_allocator_init(&buf->freelist); 2531 } 2532