1cafe5635SKent Overstreet /* 2cafe5635SKent Overstreet * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com> 3cafe5635SKent Overstreet * 4cafe5635SKent Overstreet * Uses a block device as cache for other block devices; optimized for SSDs. 5cafe5635SKent Overstreet * All allocation is done in buckets, which should match the erase block size 6cafe5635SKent Overstreet * of the device. 7cafe5635SKent Overstreet * 8cafe5635SKent Overstreet * Buckets containing cached data are kept on a heap sorted by priority; 9cafe5635SKent Overstreet * bucket priority is increased on cache hit, and periodically all the buckets 10cafe5635SKent Overstreet * on the heap have their priority scaled down. This currently is just used as 11cafe5635SKent Overstreet * an LRU but in the future should allow for more intelligent heuristics. 12cafe5635SKent Overstreet * 13cafe5635SKent Overstreet * Buckets have an 8 bit counter; freeing is accomplished by incrementing the 14cafe5635SKent Overstreet * counter. Garbage collection is used to remove stale pointers. 15cafe5635SKent Overstreet * 16cafe5635SKent Overstreet * Indexing is done via a btree; nodes are not necessarily fully sorted, rather 17cafe5635SKent Overstreet * as keys are inserted we only sort the pages that have not yet been written. 18cafe5635SKent Overstreet * When garbage collection is run, we resort the entire node. 19cafe5635SKent Overstreet * 20cafe5635SKent Overstreet * All configuration is done via sysfs; see Documentation/bcache.txt. 21cafe5635SKent Overstreet */ 22cafe5635SKent Overstreet 23cafe5635SKent Overstreet #include "bcache.h" 24cafe5635SKent Overstreet #include "btree.h" 25cafe5635SKent Overstreet #include "debug.h" 2665d45231SKent Overstreet #include "extents.h" 27cafe5635SKent Overstreet 28cafe5635SKent Overstreet #include <linux/slab.h> 29cafe5635SKent Overstreet #include <linux/bitops.h> 3072a44517SKent Overstreet #include <linux/freezer.h> 31cafe5635SKent Overstreet #include <linux/hash.h> 3272a44517SKent Overstreet #include <linux/kthread.h> 33cd953ed0SGeert Uytterhoeven #include <linux/prefetch.h> 34cafe5635SKent Overstreet #include <linux/random.h> 35cafe5635SKent Overstreet #include <linux/rcupdate.h> 36cafe5635SKent Overstreet #include <trace/events/bcache.h> 37cafe5635SKent Overstreet 38cafe5635SKent Overstreet /* 39cafe5635SKent Overstreet * Todo: 40cafe5635SKent Overstreet * register_bcache: Return errors out to userspace correctly 41cafe5635SKent Overstreet * 42cafe5635SKent Overstreet * Writeback: don't undirty key until after a cache flush 43cafe5635SKent Overstreet * 44cafe5635SKent Overstreet * Create an iterator for key pointers 45cafe5635SKent Overstreet * 46cafe5635SKent Overstreet * On btree write error, mark bucket such that it won't be freed from the cache 47cafe5635SKent Overstreet * 48cafe5635SKent Overstreet * Journalling: 49cafe5635SKent Overstreet * Check for bad keys in replay 50cafe5635SKent Overstreet * Propagate barriers 51cafe5635SKent Overstreet * Refcount journal entries in journal_replay 52cafe5635SKent Overstreet * 53cafe5635SKent Overstreet * Garbage collection: 54cafe5635SKent Overstreet * Finish incremental gc 55cafe5635SKent Overstreet * Gc should free old UUIDs, data for invalid UUIDs 56cafe5635SKent Overstreet * 57cafe5635SKent Overstreet * Provide a way to list backing device UUIDs we have data cached for, and 58cafe5635SKent Overstreet * probably how long it's been since we've seen them, and a way to invalidate 59cafe5635SKent Overstreet * dirty data for devices that will never be attached again 60cafe5635SKent Overstreet * 61cafe5635SKent Overstreet * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so 62cafe5635SKent Overstreet * that based on that and how much dirty data we have we can keep writeback 63cafe5635SKent Overstreet * from being starved 64cafe5635SKent Overstreet * 65cafe5635SKent Overstreet * Add a tracepoint or somesuch to watch for writeback starvation 66cafe5635SKent Overstreet * 67cafe5635SKent Overstreet * When btree depth > 1 and splitting an interior node, we have to make sure 68cafe5635SKent Overstreet * alloc_bucket() cannot fail. This should be true but is not completely 69cafe5635SKent Overstreet * obvious. 70cafe5635SKent Overstreet * 71cafe5635SKent Overstreet * Plugging? 72cafe5635SKent Overstreet * 73cafe5635SKent Overstreet * If data write is less than hard sector size of ssd, round up offset in open 74cafe5635SKent Overstreet * bucket to the next whole sector 75cafe5635SKent Overstreet * 76cafe5635SKent Overstreet * Superblock needs to be fleshed out for multiple cache devices 77cafe5635SKent Overstreet * 78cafe5635SKent Overstreet * Add a sysfs tunable for the number of writeback IOs in flight 79cafe5635SKent Overstreet * 80cafe5635SKent Overstreet * Add a sysfs tunable for the number of open data buckets 81cafe5635SKent Overstreet * 82cafe5635SKent Overstreet * IO tracking: Can we track when one process is doing io on behalf of another? 83cafe5635SKent Overstreet * IO tracking: Don't use just an average, weigh more recent stuff higher 84cafe5635SKent Overstreet * 85cafe5635SKent Overstreet * Test module load/unload 86cafe5635SKent Overstreet */ 87cafe5635SKent Overstreet 88cafe5635SKent Overstreet #define MAX_NEED_GC 64 89cafe5635SKent Overstreet #define MAX_SAVE_PRIO 72 90cafe5635SKent Overstreet 91cafe5635SKent Overstreet #define PTR_DIRTY_BIT (((uint64_t) 1 << 36)) 92cafe5635SKent Overstreet 93cafe5635SKent Overstreet #define PTR_HASH(c, k) \ 94cafe5635SKent Overstreet (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0)) 95cafe5635SKent Overstreet 96df8e8970SKent Overstreet #define insert_lock(s, b) ((b)->level <= (s)->lock) 97df8e8970SKent Overstreet 98df8e8970SKent Overstreet /* 99df8e8970SKent Overstreet * These macros are for recursing down the btree - they handle the details of 100df8e8970SKent Overstreet * locking and looking up nodes in the cache for you. They're best treated as 101df8e8970SKent Overstreet * mere syntax when reading code that uses them. 102df8e8970SKent Overstreet * 103df8e8970SKent Overstreet * op->lock determines whether we take a read or a write lock at a given depth. 104df8e8970SKent Overstreet * If you've got a read lock and find that you need a write lock (i.e. you're 105df8e8970SKent Overstreet * going to have to split), set op->lock and return -EINTR; btree_root() will 106df8e8970SKent Overstreet * call you again and you'll have the correct lock. 107df8e8970SKent Overstreet */ 108df8e8970SKent Overstreet 109df8e8970SKent Overstreet /** 110df8e8970SKent Overstreet * btree - recurse down the btree on a specified key 111df8e8970SKent Overstreet * @fn: function to call, which will be passed the child node 112df8e8970SKent Overstreet * @key: key to recurse on 113df8e8970SKent Overstreet * @b: parent btree node 114df8e8970SKent Overstreet * @op: pointer to struct btree_op 115df8e8970SKent Overstreet */ 116df8e8970SKent Overstreet #define btree(fn, key, b, op, ...) \ 117df8e8970SKent Overstreet ({ \ 118df8e8970SKent Overstreet int _r, l = (b)->level - 1; \ 119df8e8970SKent Overstreet bool _w = l <= (op)->lock; \ 1202452cc89SSlava Pestov struct btree *_child = bch_btree_node_get((b)->c, op, key, l, \ 1212452cc89SSlava Pestov _w, b); \ 122df8e8970SKent Overstreet if (!IS_ERR(_child)) { \ 123df8e8970SKent Overstreet _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \ 124df8e8970SKent Overstreet rw_unlock(_w, _child); \ 125df8e8970SKent Overstreet } else \ 126df8e8970SKent Overstreet _r = PTR_ERR(_child); \ 127df8e8970SKent Overstreet _r; \ 128df8e8970SKent Overstreet }) 129df8e8970SKent Overstreet 130df8e8970SKent Overstreet /** 131df8e8970SKent Overstreet * btree_root - call a function on the root of the btree 132df8e8970SKent Overstreet * @fn: function to call, which will be passed the child node 133df8e8970SKent Overstreet * @c: cache set 134df8e8970SKent Overstreet * @op: pointer to struct btree_op 135df8e8970SKent Overstreet */ 136df8e8970SKent Overstreet #define btree_root(fn, c, op, ...) \ 137df8e8970SKent Overstreet ({ \ 138df8e8970SKent Overstreet int _r = -EINTR; \ 139df8e8970SKent Overstreet do { \ 140df8e8970SKent Overstreet struct btree *_b = (c)->root; \ 141df8e8970SKent Overstreet bool _w = insert_lock(op, _b); \ 142df8e8970SKent Overstreet rw_lock(_w, _b, _b->level); \ 143df8e8970SKent Overstreet if (_b == (c)->root && \ 144df8e8970SKent Overstreet _w == insert_lock(op, _b)) { \ 145df8e8970SKent Overstreet _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \ 146df8e8970SKent Overstreet } \ 147df8e8970SKent Overstreet rw_unlock(_w, _b); \ 1480a63b66dSKent Overstreet bch_cannibalize_unlock(c); \ 14978365411SKent Overstreet if (_r == -EINTR) \ 15078365411SKent Overstreet schedule(); \ 151df8e8970SKent Overstreet } while (_r == -EINTR); \ 152df8e8970SKent Overstreet \ 1530a63b66dSKent Overstreet finish_wait(&(c)->btree_cache_wait, &(op)->wait); \ 154df8e8970SKent Overstreet _r; \ 155df8e8970SKent Overstreet }) 156df8e8970SKent Overstreet 157a85e968eSKent Overstreet static inline struct bset *write_block(struct btree *b) 158a85e968eSKent Overstreet { 159a85e968eSKent Overstreet return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c); 160a85e968eSKent Overstreet } 161a85e968eSKent Overstreet 1622a285686SKent Overstreet static void bch_btree_init_next(struct btree *b) 1632a285686SKent Overstreet { 1642a285686SKent Overstreet /* If not a leaf node, always sort */ 1652a285686SKent Overstreet if (b->level && b->keys.nsets) 1662a285686SKent Overstreet bch_btree_sort(&b->keys, &b->c->sort); 1672a285686SKent Overstreet else 1682a285686SKent Overstreet bch_btree_sort_lazy(&b->keys, &b->c->sort); 1692a285686SKent Overstreet 1702a285686SKent Overstreet if (b->written < btree_blocks(b)) 1712a285686SKent Overstreet bch_bset_init_next(&b->keys, write_block(b), 1722a285686SKent Overstreet bset_magic(&b->c->sb)); 1732a285686SKent Overstreet 1742a285686SKent Overstreet } 1752a285686SKent Overstreet 176cafe5635SKent Overstreet /* Btree key manipulation */ 177cafe5635SKent Overstreet 1783a3b6a4eSKent Overstreet void bkey_put(struct cache_set *c, struct bkey *k) 179e7c590ebSKent Overstreet { 180e7c590ebSKent Overstreet unsigned i; 181e7c590ebSKent Overstreet 182e7c590ebSKent Overstreet for (i = 0; i < KEY_PTRS(k); i++) 183e7c590ebSKent Overstreet if (ptr_available(c, k, i)) 184e7c590ebSKent Overstreet atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin); 185e7c590ebSKent Overstreet } 186e7c590ebSKent Overstreet 187cafe5635SKent Overstreet /* Btree IO */ 188cafe5635SKent Overstreet 189cafe5635SKent Overstreet static uint64_t btree_csum_set(struct btree *b, struct bset *i) 190cafe5635SKent Overstreet { 191cafe5635SKent Overstreet uint64_t crc = b->key.ptr[0]; 192fafff81cSKent Overstreet void *data = (void *) i + 8, *end = bset_bkey_last(i); 193cafe5635SKent Overstreet 194169ef1cfSKent Overstreet crc = bch_crc64_update(crc, data, end - data); 195c19ed23aSKent Overstreet return crc ^ 0xffffffffffffffffULL; 196cafe5635SKent Overstreet } 197cafe5635SKent Overstreet 19878b77bf8SKent Overstreet void bch_btree_node_read_done(struct btree *b) 199cafe5635SKent Overstreet { 200cafe5635SKent Overstreet const char *err = "bad btree header"; 201ee811287SKent Overstreet struct bset *i = btree_bset_first(b); 20257943511SKent Overstreet struct btree_iter *iter; 203cafe5635SKent Overstreet 204bcf090e0SKent Overstreet iter = mempool_alloc(b->c->fill_iter, GFP_NOIO); 20557943511SKent Overstreet iter->size = b->c->sb.bucket_size / b->c->sb.block_size; 206cafe5635SKent Overstreet iter->used = 0; 207cafe5635SKent Overstreet 208280481d0SKent Overstreet #ifdef CONFIG_BCACHE_DEBUG 209c052dd9aSKent Overstreet iter->b = &b->keys; 210280481d0SKent Overstreet #endif 211280481d0SKent Overstreet 21257943511SKent Overstreet if (!i->seq) 213cafe5635SKent Overstreet goto err; 214cafe5635SKent Overstreet 215cafe5635SKent Overstreet for (; 216a85e968eSKent Overstreet b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq; 217cafe5635SKent Overstreet i = write_block(b)) { 218cafe5635SKent Overstreet err = "unsupported bset version"; 219cafe5635SKent Overstreet if (i->version > BCACHE_BSET_VERSION) 220cafe5635SKent Overstreet goto err; 221cafe5635SKent Overstreet 222cafe5635SKent Overstreet err = "bad btree header"; 223ee811287SKent Overstreet if (b->written + set_blocks(i, block_bytes(b->c)) > 224ee811287SKent Overstreet btree_blocks(b)) 225cafe5635SKent Overstreet goto err; 226cafe5635SKent Overstreet 227cafe5635SKent Overstreet err = "bad magic"; 22881ab4190SKent Overstreet if (i->magic != bset_magic(&b->c->sb)) 229cafe5635SKent Overstreet goto err; 230cafe5635SKent Overstreet 231cafe5635SKent Overstreet err = "bad checksum"; 232cafe5635SKent Overstreet switch (i->version) { 233cafe5635SKent Overstreet case 0: 234cafe5635SKent Overstreet if (i->csum != csum_set(i)) 235cafe5635SKent Overstreet goto err; 236cafe5635SKent Overstreet break; 237cafe5635SKent Overstreet case BCACHE_BSET_VERSION: 238cafe5635SKent Overstreet if (i->csum != btree_csum_set(b, i)) 239cafe5635SKent Overstreet goto err; 240cafe5635SKent Overstreet break; 241cafe5635SKent Overstreet } 242cafe5635SKent Overstreet 243cafe5635SKent Overstreet err = "empty set"; 244a85e968eSKent Overstreet if (i != b->keys.set[0].data && !i->keys) 245cafe5635SKent Overstreet goto err; 246cafe5635SKent Overstreet 247fafff81cSKent Overstreet bch_btree_iter_push(iter, i->start, bset_bkey_last(i)); 248cafe5635SKent Overstreet 249ee811287SKent Overstreet b->written += set_blocks(i, block_bytes(b->c)); 250cafe5635SKent Overstreet } 251cafe5635SKent Overstreet 252cafe5635SKent Overstreet err = "corrupted btree"; 253cafe5635SKent Overstreet for (i = write_block(b); 254a85e968eSKent Overstreet bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key); 255cafe5635SKent Overstreet i = ((void *) i) + block_bytes(b->c)) 256a85e968eSKent Overstreet if (i->seq == b->keys.set[0].data->seq) 257cafe5635SKent Overstreet goto err; 258cafe5635SKent Overstreet 259a85e968eSKent Overstreet bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort); 260cafe5635SKent Overstreet 261a85e968eSKent Overstreet i = b->keys.set[0].data; 262cafe5635SKent Overstreet err = "short btree key"; 263a85e968eSKent Overstreet if (b->keys.set[0].size && 264a85e968eSKent Overstreet bkey_cmp(&b->key, &b->keys.set[0].end) < 0) 265cafe5635SKent Overstreet goto err; 266cafe5635SKent Overstreet 267cafe5635SKent Overstreet if (b->written < btree_blocks(b)) 268a85e968eSKent Overstreet bch_bset_init_next(&b->keys, write_block(b), 269a85e968eSKent Overstreet bset_magic(&b->c->sb)); 270cafe5635SKent Overstreet out: 27157943511SKent Overstreet mempool_free(iter, b->c->fill_iter); 27257943511SKent Overstreet return; 273cafe5635SKent Overstreet err: 274cafe5635SKent Overstreet set_btree_node_io_error(b); 27588b9f8c4SKent Overstreet bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys", 276cafe5635SKent Overstreet err, PTR_BUCKET_NR(b->c, &b->key, 0), 27788b9f8c4SKent Overstreet bset_block_offset(b, i), i->keys); 278cafe5635SKent Overstreet goto out; 279cafe5635SKent Overstreet } 280cafe5635SKent Overstreet 281*4246a0b6SChristoph Hellwig static void btree_node_read_endio(struct bio *bio) 282cafe5635SKent Overstreet { 28357943511SKent Overstreet struct closure *cl = bio->bi_private; 28457943511SKent Overstreet closure_put(cl); 28557943511SKent Overstreet } 286cafe5635SKent Overstreet 28778b77bf8SKent Overstreet static void bch_btree_node_read(struct btree *b) 28857943511SKent Overstreet { 28957943511SKent Overstreet uint64_t start_time = local_clock(); 29057943511SKent Overstreet struct closure cl; 29157943511SKent Overstreet struct bio *bio; 292cafe5635SKent Overstreet 293c37511b8SKent Overstreet trace_bcache_btree_read(b); 294c37511b8SKent Overstreet 29557943511SKent Overstreet closure_init_stack(&cl); 296cafe5635SKent Overstreet 29757943511SKent Overstreet bio = bch_bbio_alloc(b->c); 29857943511SKent Overstreet bio->bi_rw = REQ_META|READ_SYNC; 2994f024f37SKent Overstreet bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9; 30057943511SKent Overstreet bio->bi_end_io = btree_node_read_endio; 30157943511SKent Overstreet bio->bi_private = &cl; 30257943511SKent Overstreet 303a85e968eSKent Overstreet bch_bio_map(bio, b->keys.set[0].data); 30457943511SKent Overstreet 30557943511SKent Overstreet bch_submit_bbio(bio, b->c, &b->key, 0); 30657943511SKent Overstreet closure_sync(&cl); 30757943511SKent Overstreet 308*4246a0b6SChristoph Hellwig if (bio->bi_error) 30957943511SKent Overstreet set_btree_node_io_error(b); 31057943511SKent Overstreet 31157943511SKent Overstreet bch_bbio_free(bio, b->c); 31257943511SKent Overstreet 31357943511SKent Overstreet if (btree_node_io_error(b)) 31457943511SKent Overstreet goto err; 31557943511SKent Overstreet 31657943511SKent Overstreet bch_btree_node_read_done(b); 31757943511SKent Overstreet bch_time_stats_update(&b->c->btree_read_time, start_time); 31857943511SKent Overstreet 31957943511SKent Overstreet return; 32057943511SKent Overstreet err: 32161cbd250SGeert Uytterhoeven bch_cache_set_error(b->c, "io error reading bucket %zu", 32257943511SKent Overstreet PTR_BUCKET_NR(b->c, &b->key, 0)); 323cafe5635SKent Overstreet } 324cafe5635SKent Overstreet 325cafe5635SKent Overstreet static void btree_complete_write(struct btree *b, struct btree_write *w) 326cafe5635SKent Overstreet { 327cafe5635SKent Overstreet if (w->prio_blocked && 328cafe5635SKent Overstreet !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked)) 329119ba0f8SKent Overstreet wake_up_allocators(b->c); 330cafe5635SKent Overstreet 331cafe5635SKent Overstreet if (w->journal) { 332cafe5635SKent Overstreet atomic_dec_bug(w->journal); 333cafe5635SKent Overstreet __closure_wake_up(&b->c->journal.wait); 334cafe5635SKent Overstreet } 335cafe5635SKent Overstreet 336cafe5635SKent Overstreet w->prio_blocked = 0; 337cafe5635SKent Overstreet w->journal = NULL; 338cafe5635SKent Overstreet } 339cafe5635SKent Overstreet 340cb7a583eSKent Overstreet static void btree_node_write_unlock(struct closure *cl) 341cb7a583eSKent Overstreet { 342cb7a583eSKent Overstreet struct btree *b = container_of(cl, struct btree, io); 343cb7a583eSKent Overstreet 344cb7a583eSKent Overstreet up(&b->io_mutex); 345cb7a583eSKent Overstreet } 346cb7a583eSKent Overstreet 34757943511SKent Overstreet static void __btree_node_write_done(struct closure *cl) 348cafe5635SKent Overstreet { 349cb7a583eSKent Overstreet struct btree *b = container_of(cl, struct btree, io); 350cafe5635SKent Overstreet struct btree_write *w = btree_prev_write(b); 351cafe5635SKent Overstreet 352cafe5635SKent Overstreet bch_bbio_free(b->bio, b->c); 353cafe5635SKent Overstreet b->bio = NULL; 354cafe5635SKent Overstreet btree_complete_write(b, w); 355cafe5635SKent Overstreet 356cafe5635SKent Overstreet if (btree_node_dirty(b)) 35756b30770SKent Overstreet schedule_delayed_work(&b->work, 30 * HZ); 358cafe5635SKent Overstreet 359cb7a583eSKent Overstreet closure_return_with_destructor(cl, btree_node_write_unlock); 360cafe5635SKent Overstreet } 361cafe5635SKent Overstreet 36257943511SKent Overstreet static void btree_node_write_done(struct closure *cl) 363cafe5635SKent Overstreet { 364cb7a583eSKent Overstreet struct btree *b = container_of(cl, struct btree, io); 365cafe5635SKent Overstreet struct bio_vec *bv; 366cafe5635SKent Overstreet int n; 367cafe5635SKent Overstreet 3687988613bSKent Overstreet bio_for_each_segment_all(bv, b->bio, n) 369cafe5635SKent Overstreet __free_page(bv->bv_page); 370cafe5635SKent Overstreet 37157943511SKent Overstreet __btree_node_write_done(cl); 372cafe5635SKent Overstreet } 373cafe5635SKent Overstreet 374*4246a0b6SChristoph Hellwig static void btree_node_write_endio(struct bio *bio) 37557943511SKent Overstreet { 37657943511SKent Overstreet struct closure *cl = bio->bi_private; 377cb7a583eSKent Overstreet struct btree *b = container_of(cl, struct btree, io); 37857943511SKent Overstreet 379*4246a0b6SChristoph Hellwig if (bio->bi_error) 38057943511SKent Overstreet set_btree_node_io_error(b); 38157943511SKent Overstreet 382*4246a0b6SChristoph Hellwig bch_bbio_count_io_errors(b->c, bio, bio->bi_error, "writing btree"); 38357943511SKent Overstreet closure_put(cl); 38457943511SKent Overstreet } 38557943511SKent Overstreet 38657943511SKent Overstreet static void do_btree_node_write(struct btree *b) 387cafe5635SKent Overstreet { 388cb7a583eSKent Overstreet struct closure *cl = &b->io; 389ee811287SKent Overstreet struct bset *i = btree_bset_last(b); 390cafe5635SKent Overstreet BKEY_PADDED(key) k; 391cafe5635SKent Overstreet 392cafe5635SKent Overstreet i->version = BCACHE_BSET_VERSION; 393cafe5635SKent Overstreet i->csum = btree_csum_set(b, i); 394cafe5635SKent Overstreet 39557943511SKent Overstreet BUG_ON(b->bio); 39657943511SKent Overstreet b->bio = bch_bbio_alloc(b->c); 39757943511SKent Overstreet 39857943511SKent Overstreet b->bio->bi_end_io = btree_node_write_endio; 399faadf0c9SKent Overstreet b->bio->bi_private = cl; 400e49c7c37SKent Overstreet b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA; 401ee811287SKent Overstreet b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c)); 402169ef1cfSKent Overstreet bch_bio_map(b->bio, i); 403cafe5635SKent Overstreet 404e49c7c37SKent Overstreet /* 405e49c7c37SKent Overstreet * If we're appending to a leaf node, we don't technically need FUA - 406e49c7c37SKent Overstreet * this write just needs to be persisted before the next journal write, 407e49c7c37SKent Overstreet * which will be marked FLUSH|FUA. 408e49c7c37SKent Overstreet * 409e49c7c37SKent Overstreet * Similarly if we're writing a new btree root - the pointer is going to 410e49c7c37SKent Overstreet * be in the next journal entry. 411e49c7c37SKent Overstreet * 412e49c7c37SKent Overstreet * But if we're writing a new btree node (that isn't a root) or 413e49c7c37SKent Overstreet * appending to a non leaf btree node, we need either FUA or a flush 414e49c7c37SKent Overstreet * when we write the parent with the new pointer. FUA is cheaper than a 415e49c7c37SKent Overstreet * flush, and writes appending to leaf nodes aren't blocking anything so 416e49c7c37SKent Overstreet * just make all btree node writes FUA to keep things sane. 417e49c7c37SKent Overstreet */ 418e49c7c37SKent Overstreet 419cafe5635SKent Overstreet bkey_copy(&k.key, &b->key); 420ee811287SKent Overstreet SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) + 421a85e968eSKent Overstreet bset_sector_offset(&b->keys, i)); 422cafe5635SKent Overstreet 423501d52a9SKent Overstreet if (!bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) { 424cafe5635SKent Overstreet int j; 425cafe5635SKent Overstreet struct bio_vec *bv; 426cafe5635SKent Overstreet void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1)); 427cafe5635SKent Overstreet 4287988613bSKent Overstreet bio_for_each_segment_all(bv, b->bio, j) 429cafe5635SKent Overstreet memcpy(page_address(bv->bv_page), 430cafe5635SKent Overstreet base + j * PAGE_SIZE, PAGE_SIZE); 431cafe5635SKent Overstreet 432cafe5635SKent Overstreet bch_submit_bbio(b->bio, b->c, &k.key, 0); 433cafe5635SKent Overstreet 43457943511SKent Overstreet continue_at(cl, btree_node_write_done, NULL); 435cafe5635SKent Overstreet } else { 436cafe5635SKent Overstreet b->bio->bi_vcnt = 0; 437169ef1cfSKent Overstreet bch_bio_map(b->bio, i); 438cafe5635SKent Overstreet 439cafe5635SKent Overstreet bch_submit_bbio(b->bio, b->c, &k.key, 0); 440cafe5635SKent Overstreet 441cafe5635SKent Overstreet closure_sync(cl); 442cb7a583eSKent Overstreet continue_at_nobarrier(cl, __btree_node_write_done, NULL); 443cafe5635SKent Overstreet } 444cafe5635SKent Overstreet } 445cafe5635SKent Overstreet 4462a285686SKent Overstreet void __bch_btree_node_write(struct btree *b, struct closure *parent) 447cafe5635SKent Overstreet { 448ee811287SKent Overstreet struct bset *i = btree_bset_last(b); 449cafe5635SKent Overstreet 4502a285686SKent Overstreet lockdep_assert_held(&b->write_lock); 4512a285686SKent Overstreet 452c37511b8SKent Overstreet trace_bcache_btree_write(b); 453c37511b8SKent Overstreet 454cafe5635SKent Overstreet BUG_ON(current->bio_list); 45557943511SKent Overstreet BUG_ON(b->written >= btree_blocks(b)); 45657943511SKent Overstreet BUG_ON(b->written && !i->keys); 457ee811287SKent Overstreet BUG_ON(btree_bset_first(b)->seq != i->seq); 458dc9d98d6SKent Overstreet bch_check_keys(&b->keys, "writing"); 459cafe5635SKent Overstreet 460cafe5635SKent Overstreet cancel_delayed_work(&b->work); 461cafe5635SKent Overstreet 46257943511SKent Overstreet /* If caller isn't waiting for write, parent refcount is cache set */ 463cb7a583eSKent Overstreet down(&b->io_mutex); 464cb7a583eSKent Overstreet closure_init(&b->io, parent ?: &b->c->cl); 46557943511SKent Overstreet 466cafe5635SKent Overstreet clear_bit(BTREE_NODE_dirty, &b->flags); 467cafe5635SKent Overstreet change_bit(BTREE_NODE_write_idx, &b->flags); 468cafe5635SKent Overstreet 46957943511SKent Overstreet do_btree_node_write(b); 470cafe5635SKent Overstreet 471ee811287SKent Overstreet atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size, 472cafe5635SKent Overstreet &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written); 473cafe5635SKent Overstreet 474a85e968eSKent Overstreet b->written += set_blocks(i, block_bytes(b->c)); 4752a285686SKent Overstreet } 476a85e968eSKent Overstreet 4772a285686SKent Overstreet void bch_btree_node_write(struct btree *b, struct closure *parent) 4782a285686SKent Overstreet { 4792a285686SKent Overstreet unsigned nsets = b->keys.nsets; 4802a285686SKent Overstreet 4812a285686SKent Overstreet lockdep_assert_held(&b->lock); 4822a285686SKent Overstreet 4832a285686SKent Overstreet __bch_btree_node_write(b, parent); 484cafe5635SKent Overstreet 48578b77bf8SKent Overstreet /* 48678b77bf8SKent Overstreet * do verify if there was more than one set initially (i.e. we did a 48778b77bf8SKent Overstreet * sort) and we sorted down to a single set: 48878b77bf8SKent Overstreet */ 4892a285686SKent Overstreet if (nsets && !b->keys.nsets) 49078b77bf8SKent Overstreet bch_btree_verify(b); 49178b77bf8SKent Overstreet 4922a285686SKent Overstreet bch_btree_init_next(b); 493cafe5635SKent Overstreet } 494cafe5635SKent Overstreet 495f269af5aSKent Overstreet static void bch_btree_node_write_sync(struct btree *b) 496f269af5aSKent Overstreet { 497f269af5aSKent Overstreet struct closure cl; 498f269af5aSKent Overstreet 499f269af5aSKent Overstreet closure_init_stack(&cl); 5002a285686SKent Overstreet 5012a285686SKent Overstreet mutex_lock(&b->write_lock); 502f269af5aSKent Overstreet bch_btree_node_write(b, &cl); 5032a285686SKent Overstreet mutex_unlock(&b->write_lock); 5042a285686SKent Overstreet 505f269af5aSKent Overstreet closure_sync(&cl); 506f269af5aSKent Overstreet } 507f269af5aSKent Overstreet 50857943511SKent Overstreet static void btree_node_write_work(struct work_struct *w) 509cafe5635SKent Overstreet { 510cafe5635SKent Overstreet struct btree *b = container_of(to_delayed_work(w), struct btree, work); 511cafe5635SKent Overstreet 5122a285686SKent Overstreet mutex_lock(&b->write_lock); 513cafe5635SKent Overstreet if (btree_node_dirty(b)) 5142a285686SKent Overstreet __bch_btree_node_write(b, NULL); 5152a285686SKent Overstreet mutex_unlock(&b->write_lock); 516cafe5635SKent Overstreet } 517cafe5635SKent Overstreet 518c18536a7SKent Overstreet static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref) 519cafe5635SKent Overstreet { 520ee811287SKent Overstreet struct bset *i = btree_bset_last(b); 521cafe5635SKent Overstreet struct btree_write *w = btree_current_write(b); 522cafe5635SKent Overstreet 5232a285686SKent Overstreet lockdep_assert_held(&b->write_lock); 5242a285686SKent Overstreet 52557943511SKent Overstreet BUG_ON(!b->written); 52657943511SKent Overstreet BUG_ON(!i->keys); 527cafe5635SKent Overstreet 52857943511SKent Overstreet if (!btree_node_dirty(b)) 52956b30770SKent Overstreet schedule_delayed_work(&b->work, 30 * HZ); 53057943511SKent Overstreet 531cafe5635SKent Overstreet set_btree_node_dirty(b); 532cafe5635SKent Overstreet 533c18536a7SKent Overstreet if (journal_ref) { 534cafe5635SKent Overstreet if (w->journal && 535c18536a7SKent Overstreet journal_pin_cmp(b->c, w->journal, journal_ref)) { 536cafe5635SKent Overstreet atomic_dec_bug(w->journal); 537cafe5635SKent Overstreet w->journal = NULL; 538cafe5635SKent Overstreet } 539cafe5635SKent Overstreet 540cafe5635SKent Overstreet if (!w->journal) { 541c18536a7SKent Overstreet w->journal = journal_ref; 542cafe5635SKent Overstreet atomic_inc(w->journal); 543cafe5635SKent Overstreet } 544cafe5635SKent Overstreet } 545cafe5635SKent Overstreet 546cafe5635SKent Overstreet /* Force write if set is too big */ 54757943511SKent Overstreet if (set_bytes(i) > PAGE_SIZE - 48 && 54857943511SKent Overstreet !current->bio_list) 54957943511SKent Overstreet bch_btree_node_write(b, NULL); 550cafe5635SKent Overstreet } 551cafe5635SKent Overstreet 552cafe5635SKent Overstreet /* 553cafe5635SKent Overstreet * Btree in memory cache - allocation/freeing 554cafe5635SKent Overstreet * mca -> memory cache 555cafe5635SKent Overstreet */ 556cafe5635SKent Overstreet 557cafe5635SKent Overstreet #define mca_reserve(c) (((c->root && c->root->level) \ 558cafe5635SKent Overstreet ? c->root->level : 1) * 8 + 16) 559cafe5635SKent Overstreet #define mca_can_free(c) \ 5600a63b66dSKent Overstreet max_t(int, 0, c->btree_cache_used - mca_reserve(c)) 561cafe5635SKent Overstreet 562cafe5635SKent Overstreet static void mca_data_free(struct btree *b) 563cafe5635SKent Overstreet { 564cb7a583eSKent Overstreet BUG_ON(b->io_mutex.count != 1); 565cafe5635SKent Overstreet 566a85e968eSKent Overstreet bch_btree_keys_free(&b->keys); 567cafe5635SKent Overstreet 5680a63b66dSKent Overstreet b->c->btree_cache_used--; 569ee811287SKent Overstreet list_move(&b->list, &b->c->btree_cache_freed); 570cafe5635SKent Overstreet } 571cafe5635SKent Overstreet 572cafe5635SKent Overstreet static void mca_bucket_free(struct btree *b) 573cafe5635SKent Overstreet { 574cafe5635SKent Overstreet BUG_ON(btree_node_dirty(b)); 575cafe5635SKent Overstreet 576cafe5635SKent Overstreet b->key.ptr[0] = 0; 577cafe5635SKent Overstreet hlist_del_init_rcu(&b->hash); 578cafe5635SKent Overstreet list_move(&b->list, &b->c->btree_cache_freeable); 579cafe5635SKent Overstreet } 580cafe5635SKent Overstreet 581cafe5635SKent Overstreet static unsigned btree_order(struct bkey *k) 582cafe5635SKent Overstreet { 583cafe5635SKent Overstreet return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1); 584cafe5635SKent Overstreet } 585cafe5635SKent Overstreet 586cafe5635SKent Overstreet static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp) 587cafe5635SKent Overstreet { 588a85e968eSKent Overstreet if (!bch_btree_keys_alloc(&b->keys, 589ee811287SKent Overstreet max_t(unsigned, 590cafe5635SKent Overstreet ilog2(b->c->btree_pages), 591ee811287SKent Overstreet btree_order(k)), 592ee811287SKent Overstreet gfp)) { 5930a63b66dSKent Overstreet b->c->btree_cache_used++; 594ee811287SKent Overstreet list_move(&b->list, &b->c->btree_cache); 595ee811287SKent Overstreet } else { 596ee811287SKent Overstreet list_move(&b->list, &b->c->btree_cache_freed); 597ee811287SKent Overstreet } 598cafe5635SKent Overstreet } 599cafe5635SKent Overstreet 600cafe5635SKent Overstreet static struct btree *mca_bucket_alloc(struct cache_set *c, 601cafe5635SKent Overstreet struct bkey *k, gfp_t gfp) 602cafe5635SKent Overstreet { 603cafe5635SKent Overstreet struct btree *b = kzalloc(sizeof(struct btree), gfp); 604cafe5635SKent Overstreet if (!b) 605cafe5635SKent Overstreet return NULL; 606cafe5635SKent Overstreet 607cafe5635SKent Overstreet init_rwsem(&b->lock); 608cafe5635SKent Overstreet lockdep_set_novalidate_class(&b->lock); 6092a285686SKent Overstreet mutex_init(&b->write_lock); 6102a285686SKent Overstreet lockdep_set_novalidate_class(&b->write_lock); 611cafe5635SKent Overstreet INIT_LIST_HEAD(&b->list); 61257943511SKent Overstreet INIT_DELAYED_WORK(&b->work, btree_node_write_work); 613cafe5635SKent Overstreet b->c = c; 614cb7a583eSKent Overstreet sema_init(&b->io_mutex, 1); 615cafe5635SKent Overstreet 616cafe5635SKent Overstreet mca_data_alloc(b, k, gfp); 617cafe5635SKent Overstreet return b; 618cafe5635SKent Overstreet } 619cafe5635SKent Overstreet 620e8e1d468SKent Overstreet static int mca_reap(struct btree *b, unsigned min_order, bool flush) 621cafe5635SKent Overstreet { 622e8e1d468SKent Overstreet struct closure cl; 623e8e1d468SKent Overstreet 624e8e1d468SKent Overstreet closure_init_stack(&cl); 625cafe5635SKent Overstreet lockdep_assert_held(&b->c->bucket_lock); 626cafe5635SKent Overstreet 627cafe5635SKent Overstreet if (!down_write_trylock(&b->lock)) 628cafe5635SKent Overstreet return -ENOMEM; 629cafe5635SKent Overstreet 630a85e968eSKent Overstreet BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data); 631e8e1d468SKent Overstreet 632a85e968eSKent Overstreet if (b->keys.page_order < min_order) 633cb7a583eSKent Overstreet goto out_unlock; 634cb7a583eSKent Overstreet 635cb7a583eSKent Overstreet if (!flush) { 636cb7a583eSKent Overstreet if (btree_node_dirty(b)) 637cb7a583eSKent Overstreet goto out_unlock; 638cb7a583eSKent Overstreet 639cb7a583eSKent Overstreet if (down_trylock(&b->io_mutex)) 640cb7a583eSKent Overstreet goto out_unlock; 641cb7a583eSKent Overstreet up(&b->io_mutex); 642cafe5635SKent Overstreet } 643cafe5635SKent Overstreet 6442a285686SKent Overstreet mutex_lock(&b->write_lock); 645f269af5aSKent Overstreet if (btree_node_dirty(b)) 6462a285686SKent Overstreet __bch_btree_node_write(b, &cl); 6472a285686SKent Overstreet mutex_unlock(&b->write_lock); 6482a285686SKent Overstreet 6492a285686SKent Overstreet closure_sync(&cl); 650cafe5635SKent Overstreet 651e8e1d468SKent Overstreet /* wait for any in flight btree write */ 652cb7a583eSKent Overstreet down(&b->io_mutex); 653cb7a583eSKent Overstreet up(&b->io_mutex); 654e8e1d468SKent Overstreet 655cafe5635SKent Overstreet return 0; 656cb7a583eSKent Overstreet out_unlock: 657cb7a583eSKent Overstreet rw_unlock(true, b); 658cb7a583eSKent Overstreet return -ENOMEM; 659cafe5635SKent Overstreet } 660cafe5635SKent Overstreet 6617dc19d5aSDave Chinner static unsigned long bch_mca_scan(struct shrinker *shrink, 6627dc19d5aSDave Chinner struct shrink_control *sc) 663cafe5635SKent Overstreet { 664cafe5635SKent Overstreet struct cache_set *c = container_of(shrink, struct cache_set, shrink); 665cafe5635SKent Overstreet struct btree *b, *t; 666cafe5635SKent Overstreet unsigned long i, nr = sc->nr_to_scan; 6677dc19d5aSDave Chinner unsigned long freed = 0; 668cafe5635SKent Overstreet 669cafe5635SKent Overstreet if (c->shrinker_disabled) 6707dc19d5aSDave Chinner return SHRINK_STOP; 671cafe5635SKent Overstreet 6720a63b66dSKent Overstreet if (c->btree_cache_alloc_lock) 6737dc19d5aSDave Chinner return SHRINK_STOP; 674cafe5635SKent Overstreet 675cafe5635SKent Overstreet /* Return -1 if we can't do anything right now */ 676a698e08cSKent Overstreet if (sc->gfp_mask & __GFP_IO) 677cafe5635SKent Overstreet mutex_lock(&c->bucket_lock); 678cafe5635SKent Overstreet else if (!mutex_trylock(&c->bucket_lock)) 679cafe5635SKent Overstreet return -1; 680cafe5635SKent Overstreet 68136c9ea98SKent Overstreet /* 68236c9ea98SKent Overstreet * It's _really_ critical that we don't free too many btree nodes - we 68336c9ea98SKent Overstreet * have to always leave ourselves a reserve. The reserve is how we 68436c9ea98SKent Overstreet * guarantee that allocating memory for a new btree node can always 68536c9ea98SKent Overstreet * succeed, so that inserting keys into the btree can always succeed and 68636c9ea98SKent Overstreet * IO can always make forward progress: 68736c9ea98SKent Overstreet */ 688cafe5635SKent Overstreet nr /= c->btree_pages; 689cafe5635SKent Overstreet nr = min_t(unsigned long, nr, mca_can_free(c)); 690cafe5635SKent Overstreet 691cafe5635SKent Overstreet i = 0; 692cafe5635SKent Overstreet list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) { 6937dc19d5aSDave Chinner if (freed >= nr) 694cafe5635SKent Overstreet break; 695cafe5635SKent Overstreet 696cafe5635SKent Overstreet if (++i > 3 && 697e8e1d468SKent Overstreet !mca_reap(b, 0, false)) { 698cafe5635SKent Overstreet mca_data_free(b); 699cafe5635SKent Overstreet rw_unlock(true, b); 7007dc19d5aSDave Chinner freed++; 701cafe5635SKent Overstreet } 702cafe5635SKent Overstreet } 703cafe5635SKent Overstreet 7040a63b66dSKent Overstreet for (i = 0; (nr--) && i < c->btree_cache_used; i++) { 705cafe5635SKent Overstreet if (list_empty(&c->btree_cache)) 706cafe5635SKent Overstreet goto out; 707cafe5635SKent Overstreet 708cafe5635SKent Overstreet b = list_first_entry(&c->btree_cache, struct btree, list); 709cafe5635SKent Overstreet list_rotate_left(&c->btree_cache); 710cafe5635SKent Overstreet 711cafe5635SKent Overstreet if (!b->accessed && 712e8e1d468SKent Overstreet !mca_reap(b, 0, false)) { 713cafe5635SKent Overstreet mca_bucket_free(b); 714cafe5635SKent Overstreet mca_data_free(b); 715cafe5635SKent Overstreet rw_unlock(true, b); 7167dc19d5aSDave Chinner freed++; 717cafe5635SKent Overstreet } else 718cafe5635SKent Overstreet b->accessed = 0; 719cafe5635SKent Overstreet } 720cafe5635SKent Overstreet out: 721cafe5635SKent Overstreet mutex_unlock(&c->bucket_lock); 7227dc19d5aSDave Chinner return freed; 7237dc19d5aSDave Chinner } 7247dc19d5aSDave Chinner 7257dc19d5aSDave Chinner static unsigned long bch_mca_count(struct shrinker *shrink, 7267dc19d5aSDave Chinner struct shrink_control *sc) 7277dc19d5aSDave Chinner { 7287dc19d5aSDave Chinner struct cache_set *c = container_of(shrink, struct cache_set, shrink); 7297dc19d5aSDave Chinner 7307dc19d5aSDave Chinner if (c->shrinker_disabled) 7317dc19d5aSDave Chinner return 0; 7327dc19d5aSDave Chinner 7330a63b66dSKent Overstreet if (c->btree_cache_alloc_lock) 7347dc19d5aSDave Chinner return 0; 7357dc19d5aSDave Chinner 7367dc19d5aSDave Chinner return mca_can_free(c) * c->btree_pages; 737cafe5635SKent Overstreet } 738cafe5635SKent Overstreet 739cafe5635SKent Overstreet void bch_btree_cache_free(struct cache_set *c) 740cafe5635SKent Overstreet { 741cafe5635SKent Overstreet struct btree *b; 742cafe5635SKent Overstreet struct closure cl; 743cafe5635SKent Overstreet closure_init_stack(&cl); 744cafe5635SKent Overstreet 745cafe5635SKent Overstreet if (c->shrink.list.next) 746cafe5635SKent Overstreet unregister_shrinker(&c->shrink); 747cafe5635SKent Overstreet 748cafe5635SKent Overstreet mutex_lock(&c->bucket_lock); 749cafe5635SKent Overstreet 750cafe5635SKent Overstreet #ifdef CONFIG_BCACHE_DEBUG 751cafe5635SKent Overstreet if (c->verify_data) 752cafe5635SKent Overstreet list_move(&c->verify_data->list, &c->btree_cache); 75378b77bf8SKent Overstreet 75478b77bf8SKent Overstreet free_pages((unsigned long) c->verify_ondisk, ilog2(bucket_pages(c))); 755cafe5635SKent Overstreet #endif 756cafe5635SKent Overstreet 757cafe5635SKent Overstreet list_splice(&c->btree_cache_freeable, 758cafe5635SKent Overstreet &c->btree_cache); 759cafe5635SKent Overstreet 760cafe5635SKent Overstreet while (!list_empty(&c->btree_cache)) { 761cafe5635SKent Overstreet b = list_first_entry(&c->btree_cache, struct btree, list); 762cafe5635SKent Overstreet 763cafe5635SKent Overstreet if (btree_node_dirty(b)) 764cafe5635SKent Overstreet btree_complete_write(b, btree_current_write(b)); 765cafe5635SKent Overstreet clear_bit(BTREE_NODE_dirty, &b->flags); 766cafe5635SKent Overstreet 767cafe5635SKent Overstreet mca_data_free(b); 768cafe5635SKent Overstreet } 769cafe5635SKent Overstreet 770cafe5635SKent Overstreet while (!list_empty(&c->btree_cache_freed)) { 771cafe5635SKent Overstreet b = list_first_entry(&c->btree_cache_freed, 772cafe5635SKent Overstreet struct btree, list); 773cafe5635SKent Overstreet list_del(&b->list); 774cafe5635SKent Overstreet cancel_delayed_work_sync(&b->work); 775cafe5635SKent Overstreet kfree(b); 776cafe5635SKent Overstreet } 777cafe5635SKent Overstreet 778cafe5635SKent Overstreet mutex_unlock(&c->bucket_lock); 779cafe5635SKent Overstreet } 780cafe5635SKent Overstreet 781cafe5635SKent Overstreet int bch_btree_cache_alloc(struct cache_set *c) 782cafe5635SKent Overstreet { 783cafe5635SKent Overstreet unsigned i; 784cafe5635SKent Overstreet 785cafe5635SKent Overstreet for (i = 0; i < mca_reserve(c); i++) 78672a44517SKent Overstreet if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL)) 78772a44517SKent Overstreet return -ENOMEM; 788cafe5635SKent Overstreet 789cafe5635SKent Overstreet list_splice_init(&c->btree_cache, 790cafe5635SKent Overstreet &c->btree_cache_freeable); 791cafe5635SKent Overstreet 792cafe5635SKent Overstreet #ifdef CONFIG_BCACHE_DEBUG 793cafe5635SKent Overstreet mutex_init(&c->verify_lock); 794cafe5635SKent Overstreet 79578b77bf8SKent Overstreet c->verify_ondisk = (void *) 79678b77bf8SKent Overstreet __get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c))); 79778b77bf8SKent Overstreet 798cafe5635SKent Overstreet c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL); 799cafe5635SKent Overstreet 800cafe5635SKent Overstreet if (c->verify_data && 801a85e968eSKent Overstreet c->verify_data->keys.set->data) 802cafe5635SKent Overstreet list_del_init(&c->verify_data->list); 803cafe5635SKent Overstreet else 804cafe5635SKent Overstreet c->verify_data = NULL; 805cafe5635SKent Overstreet #endif 806cafe5635SKent Overstreet 8077dc19d5aSDave Chinner c->shrink.count_objects = bch_mca_count; 8087dc19d5aSDave Chinner c->shrink.scan_objects = bch_mca_scan; 809cafe5635SKent Overstreet c->shrink.seeks = 4; 810cafe5635SKent Overstreet c->shrink.batch = c->btree_pages * 2; 811cafe5635SKent Overstreet register_shrinker(&c->shrink); 812cafe5635SKent Overstreet 813cafe5635SKent Overstreet return 0; 814cafe5635SKent Overstreet } 815cafe5635SKent Overstreet 816cafe5635SKent Overstreet /* Btree in memory cache - hash table */ 817cafe5635SKent Overstreet 818cafe5635SKent Overstreet static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k) 819cafe5635SKent Overstreet { 820cafe5635SKent Overstreet return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)]; 821cafe5635SKent Overstreet } 822cafe5635SKent Overstreet 823cafe5635SKent Overstreet static struct btree *mca_find(struct cache_set *c, struct bkey *k) 824cafe5635SKent Overstreet { 825cafe5635SKent Overstreet struct btree *b; 826cafe5635SKent Overstreet 827cafe5635SKent Overstreet rcu_read_lock(); 828cafe5635SKent Overstreet hlist_for_each_entry_rcu(b, mca_hash(c, k), hash) 829cafe5635SKent Overstreet if (PTR_HASH(c, &b->key) == PTR_HASH(c, k)) 830cafe5635SKent Overstreet goto out; 831cafe5635SKent Overstreet b = NULL; 832cafe5635SKent Overstreet out: 833cafe5635SKent Overstreet rcu_read_unlock(); 834cafe5635SKent Overstreet return b; 835cafe5635SKent Overstreet } 836cafe5635SKent Overstreet 8370a63b66dSKent Overstreet static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op) 8380a63b66dSKent Overstreet { 8390a63b66dSKent Overstreet struct task_struct *old; 8400a63b66dSKent Overstreet 8410a63b66dSKent Overstreet old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current); 8420a63b66dSKent Overstreet if (old && old != current) { 8430a63b66dSKent Overstreet if (op) 8440a63b66dSKent Overstreet prepare_to_wait(&c->btree_cache_wait, &op->wait, 8450a63b66dSKent Overstreet TASK_UNINTERRUPTIBLE); 8460a63b66dSKent Overstreet return -EINTR; 8470a63b66dSKent Overstreet } 8480a63b66dSKent Overstreet 8490a63b66dSKent Overstreet return 0; 8500a63b66dSKent Overstreet } 8510a63b66dSKent Overstreet 8520a63b66dSKent Overstreet static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op, 8530a63b66dSKent Overstreet struct bkey *k) 854cafe5635SKent Overstreet { 855e8e1d468SKent Overstreet struct btree *b; 856cafe5635SKent Overstreet 857c37511b8SKent Overstreet trace_bcache_btree_cache_cannibalize(c); 858c37511b8SKent Overstreet 8590a63b66dSKent Overstreet if (mca_cannibalize_lock(c, op)) 8600a63b66dSKent Overstreet return ERR_PTR(-EINTR); 861cafe5635SKent Overstreet 862e8e1d468SKent Overstreet list_for_each_entry_reverse(b, &c->btree_cache, list) 863e8e1d468SKent Overstreet if (!mca_reap(b, btree_order(k), false)) 864e8e1d468SKent Overstreet return b; 865cafe5635SKent Overstreet 866e8e1d468SKent Overstreet list_for_each_entry_reverse(b, &c->btree_cache, list) 867e8e1d468SKent Overstreet if (!mca_reap(b, btree_order(k), true)) 868e8e1d468SKent Overstreet return b; 869e8e1d468SKent Overstreet 8700a63b66dSKent Overstreet WARN(1, "btree cache cannibalize failed\n"); 871e8e1d468SKent Overstreet return ERR_PTR(-ENOMEM); 872cafe5635SKent Overstreet } 873cafe5635SKent Overstreet 874cafe5635SKent Overstreet /* 875cafe5635SKent Overstreet * We can only have one thread cannibalizing other cached btree nodes at a time, 876cafe5635SKent Overstreet * or we'll deadlock. We use an open coded mutex to ensure that, which a 877cafe5635SKent Overstreet * cannibalize_bucket() will take. This means every time we unlock the root of 878cafe5635SKent Overstreet * the btree, we need to release this lock if we have it held. 879cafe5635SKent Overstreet */ 880df8e8970SKent Overstreet static void bch_cannibalize_unlock(struct cache_set *c) 881cafe5635SKent Overstreet { 8820a63b66dSKent Overstreet if (c->btree_cache_alloc_lock == current) { 8830a63b66dSKent Overstreet c->btree_cache_alloc_lock = NULL; 8840a63b66dSKent Overstreet wake_up(&c->btree_cache_wait); 885cafe5635SKent Overstreet } 886cafe5635SKent Overstreet } 887cafe5635SKent Overstreet 8880a63b66dSKent Overstreet static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op, 8890a63b66dSKent Overstreet struct bkey *k, int level) 890cafe5635SKent Overstreet { 891cafe5635SKent Overstreet struct btree *b; 892cafe5635SKent Overstreet 893e8e1d468SKent Overstreet BUG_ON(current->bio_list); 894e8e1d468SKent Overstreet 895cafe5635SKent Overstreet lockdep_assert_held(&c->bucket_lock); 896cafe5635SKent Overstreet 897cafe5635SKent Overstreet if (mca_find(c, k)) 898cafe5635SKent Overstreet return NULL; 899cafe5635SKent Overstreet 900cafe5635SKent Overstreet /* btree_free() doesn't free memory; it sticks the node on the end of 901cafe5635SKent Overstreet * the list. Check if there's any freed nodes there: 902cafe5635SKent Overstreet */ 903cafe5635SKent Overstreet list_for_each_entry(b, &c->btree_cache_freeable, list) 904e8e1d468SKent Overstreet if (!mca_reap(b, btree_order(k), false)) 905cafe5635SKent Overstreet goto out; 906cafe5635SKent Overstreet 907cafe5635SKent Overstreet /* We never free struct btree itself, just the memory that holds the on 908cafe5635SKent Overstreet * disk node. Check the freed list before allocating a new one: 909cafe5635SKent Overstreet */ 910cafe5635SKent Overstreet list_for_each_entry(b, &c->btree_cache_freed, list) 911e8e1d468SKent Overstreet if (!mca_reap(b, 0, false)) { 912cafe5635SKent Overstreet mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO); 913a85e968eSKent Overstreet if (!b->keys.set[0].data) 914cafe5635SKent Overstreet goto err; 915cafe5635SKent Overstreet else 916cafe5635SKent Overstreet goto out; 917cafe5635SKent Overstreet } 918cafe5635SKent Overstreet 919cafe5635SKent Overstreet b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO); 920cafe5635SKent Overstreet if (!b) 921cafe5635SKent Overstreet goto err; 922cafe5635SKent Overstreet 923cafe5635SKent Overstreet BUG_ON(!down_write_trylock(&b->lock)); 924a85e968eSKent Overstreet if (!b->keys.set->data) 925cafe5635SKent Overstreet goto err; 926cafe5635SKent Overstreet out: 927cb7a583eSKent Overstreet BUG_ON(b->io_mutex.count != 1); 928cafe5635SKent Overstreet 929cafe5635SKent Overstreet bkey_copy(&b->key, k); 930cafe5635SKent Overstreet list_move(&b->list, &c->btree_cache); 931cafe5635SKent Overstreet hlist_del_init_rcu(&b->hash); 932cafe5635SKent Overstreet hlist_add_head_rcu(&b->hash, mca_hash(c, k)); 933cafe5635SKent Overstreet 934cafe5635SKent Overstreet lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_); 935d6fd3b11SKent Overstreet b->parent = (void *) ~0UL; 936a85e968eSKent Overstreet b->flags = 0; 937a85e968eSKent Overstreet b->written = 0; 938a85e968eSKent Overstreet b->level = level; 939cafe5635SKent Overstreet 94065d45231SKent Overstreet if (!b->level) 941a85e968eSKent Overstreet bch_btree_keys_init(&b->keys, &bch_extent_keys_ops, 942a85e968eSKent Overstreet &b->c->expensive_debug_checks); 94365d45231SKent Overstreet else 944a85e968eSKent Overstreet bch_btree_keys_init(&b->keys, &bch_btree_keys_ops, 945a85e968eSKent Overstreet &b->c->expensive_debug_checks); 946cafe5635SKent Overstreet 947cafe5635SKent Overstreet return b; 948cafe5635SKent Overstreet err: 949cafe5635SKent Overstreet if (b) 950cafe5635SKent Overstreet rw_unlock(true, b); 951cafe5635SKent Overstreet 9520a63b66dSKent Overstreet b = mca_cannibalize(c, op, k); 953cafe5635SKent Overstreet if (!IS_ERR(b)) 954cafe5635SKent Overstreet goto out; 955cafe5635SKent Overstreet 956cafe5635SKent Overstreet return b; 957cafe5635SKent Overstreet } 958cafe5635SKent Overstreet 959cafe5635SKent Overstreet /** 960cafe5635SKent Overstreet * bch_btree_node_get - find a btree node in the cache and lock it, reading it 961cafe5635SKent Overstreet * in from disk if necessary. 962cafe5635SKent Overstreet * 963b54d6934SKent Overstreet * If IO is necessary and running under generic_make_request, returns -EAGAIN. 964cafe5635SKent Overstreet * 965cafe5635SKent Overstreet * The btree node will have either a read or a write lock held, depending on 966cafe5635SKent Overstreet * level and op->lock. 967cafe5635SKent Overstreet */ 9680a63b66dSKent Overstreet struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op, 9692452cc89SSlava Pestov struct bkey *k, int level, bool write, 9702452cc89SSlava Pestov struct btree *parent) 971cafe5635SKent Overstreet { 972cafe5635SKent Overstreet int i = 0; 973cafe5635SKent Overstreet struct btree *b; 974cafe5635SKent Overstreet 975cafe5635SKent Overstreet BUG_ON(level < 0); 976cafe5635SKent Overstreet retry: 977cafe5635SKent Overstreet b = mca_find(c, k); 978cafe5635SKent Overstreet 979cafe5635SKent Overstreet if (!b) { 98057943511SKent Overstreet if (current->bio_list) 98157943511SKent Overstreet return ERR_PTR(-EAGAIN); 98257943511SKent Overstreet 983cafe5635SKent Overstreet mutex_lock(&c->bucket_lock); 9840a63b66dSKent Overstreet b = mca_alloc(c, op, k, level); 985cafe5635SKent Overstreet mutex_unlock(&c->bucket_lock); 986cafe5635SKent Overstreet 987cafe5635SKent Overstreet if (!b) 988cafe5635SKent Overstreet goto retry; 989cafe5635SKent Overstreet if (IS_ERR(b)) 990cafe5635SKent Overstreet return b; 991cafe5635SKent Overstreet 99257943511SKent Overstreet bch_btree_node_read(b); 993cafe5635SKent Overstreet 994cafe5635SKent Overstreet if (!write) 995cafe5635SKent Overstreet downgrade_write(&b->lock); 996cafe5635SKent Overstreet } else { 997cafe5635SKent Overstreet rw_lock(write, b, level); 998cafe5635SKent Overstreet if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) { 999cafe5635SKent Overstreet rw_unlock(write, b); 1000cafe5635SKent Overstreet goto retry; 1001cafe5635SKent Overstreet } 1002cafe5635SKent Overstreet BUG_ON(b->level != level); 1003cafe5635SKent Overstreet } 1004cafe5635SKent Overstreet 10052452cc89SSlava Pestov b->parent = parent; 1006cafe5635SKent Overstreet b->accessed = 1; 1007cafe5635SKent Overstreet 1008a85e968eSKent Overstreet for (; i <= b->keys.nsets && b->keys.set[i].size; i++) { 1009a85e968eSKent Overstreet prefetch(b->keys.set[i].tree); 1010a85e968eSKent Overstreet prefetch(b->keys.set[i].data); 1011cafe5635SKent Overstreet } 1012cafe5635SKent Overstreet 1013a85e968eSKent Overstreet for (; i <= b->keys.nsets; i++) 1014a85e968eSKent Overstreet prefetch(b->keys.set[i].data); 1015cafe5635SKent Overstreet 101657943511SKent Overstreet if (btree_node_io_error(b)) { 1017cafe5635SKent Overstreet rw_unlock(write, b); 101857943511SKent Overstreet return ERR_PTR(-EIO); 101957943511SKent Overstreet } 102057943511SKent Overstreet 1021cafe5635SKent Overstreet BUG_ON(!b->written); 1022cafe5635SKent Overstreet 1023cafe5635SKent Overstreet return b; 1024cafe5635SKent Overstreet } 1025cafe5635SKent Overstreet 10262452cc89SSlava Pestov static void btree_node_prefetch(struct btree *parent, struct bkey *k) 1027cafe5635SKent Overstreet { 1028cafe5635SKent Overstreet struct btree *b; 1029cafe5635SKent Overstreet 10302452cc89SSlava Pestov mutex_lock(&parent->c->bucket_lock); 10312452cc89SSlava Pestov b = mca_alloc(parent->c, NULL, k, parent->level - 1); 10322452cc89SSlava Pestov mutex_unlock(&parent->c->bucket_lock); 1033cafe5635SKent Overstreet 1034cafe5635SKent Overstreet if (!IS_ERR_OR_NULL(b)) { 10352452cc89SSlava Pestov b->parent = parent; 103657943511SKent Overstreet bch_btree_node_read(b); 1037cafe5635SKent Overstreet rw_unlock(true, b); 1038cafe5635SKent Overstreet } 1039cafe5635SKent Overstreet } 1040cafe5635SKent Overstreet 1041cafe5635SKent Overstreet /* Btree alloc */ 1042cafe5635SKent Overstreet 1043e8e1d468SKent Overstreet static void btree_node_free(struct btree *b) 1044cafe5635SKent Overstreet { 1045c37511b8SKent Overstreet trace_bcache_btree_node_free(b); 1046c37511b8SKent Overstreet 1047cafe5635SKent Overstreet BUG_ON(b == b->c->root); 1048cafe5635SKent Overstreet 10492a285686SKent Overstreet mutex_lock(&b->write_lock); 10502a285686SKent Overstreet 1051cafe5635SKent Overstreet if (btree_node_dirty(b)) 1052cafe5635SKent Overstreet btree_complete_write(b, btree_current_write(b)); 1053cafe5635SKent Overstreet clear_bit(BTREE_NODE_dirty, &b->flags); 1054cafe5635SKent Overstreet 10552a285686SKent Overstreet mutex_unlock(&b->write_lock); 10562a285686SKent Overstreet 1057cafe5635SKent Overstreet cancel_delayed_work(&b->work); 1058cafe5635SKent Overstreet 1059cafe5635SKent Overstreet mutex_lock(&b->c->bucket_lock); 1060cafe5635SKent Overstreet bch_bucket_free(b->c, &b->key); 1061cafe5635SKent Overstreet mca_bucket_free(b); 1062cafe5635SKent Overstreet mutex_unlock(&b->c->bucket_lock); 1063cafe5635SKent Overstreet } 1064cafe5635SKent Overstreet 1065c5aa4a31SSlava Pestov struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op, 10662452cc89SSlava Pestov int level, bool wait, 10672452cc89SSlava Pestov struct btree *parent) 1068cafe5635SKent Overstreet { 1069cafe5635SKent Overstreet BKEY_PADDED(key) k; 1070cafe5635SKent Overstreet struct btree *b = ERR_PTR(-EAGAIN); 1071cafe5635SKent Overstreet 1072cafe5635SKent Overstreet mutex_lock(&c->bucket_lock); 1073cafe5635SKent Overstreet retry: 1074c5aa4a31SSlava Pestov if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait)) 1075cafe5635SKent Overstreet goto err; 1076cafe5635SKent Overstreet 10773a3b6a4eSKent Overstreet bkey_put(c, &k.key); 1078cafe5635SKent Overstreet SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS); 1079cafe5635SKent Overstreet 10800a63b66dSKent Overstreet b = mca_alloc(c, op, &k.key, level); 1081cafe5635SKent Overstreet if (IS_ERR(b)) 1082cafe5635SKent Overstreet goto err_free; 1083cafe5635SKent Overstreet 1084cafe5635SKent Overstreet if (!b) { 1085b1a67b0fSKent Overstreet cache_bug(c, 1086b1a67b0fSKent Overstreet "Tried to allocate bucket that was in btree cache"); 1087cafe5635SKent Overstreet goto retry; 1088cafe5635SKent Overstreet } 1089cafe5635SKent Overstreet 1090cafe5635SKent Overstreet b->accessed = 1; 10912452cc89SSlava Pestov b->parent = parent; 1092a85e968eSKent Overstreet bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb)); 1093cafe5635SKent Overstreet 1094cafe5635SKent Overstreet mutex_unlock(&c->bucket_lock); 1095c37511b8SKent Overstreet 1096c37511b8SKent Overstreet trace_bcache_btree_node_alloc(b); 1097cafe5635SKent Overstreet return b; 1098cafe5635SKent Overstreet err_free: 1099cafe5635SKent Overstreet bch_bucket_free(c, &k.key); 1100cafe5635SKent Overstreet err: 1101cafe5635SKent Overstreet mutex_unlock(&c->bucket_lock); 1102c37511b8SKent Overstreet 1103913dc33fSSlava Pestov trace_bcache_btree_node_alloc_fail(c); 1104cafe5635SKent Overstreet return b; 1105cafe5635SKent Overstreet } 1106cafe5635SKent Overstreet 1107c5aa4a31SSlava Pestov static struct btree *bch_btree_node_alloc(struct cache_set *c, 11082452cc89SSlava Pestov struct btree_op *op, int level, 11092452cc89SSlava Pestov struct btree *parent) 1110c5aa4a31SSlava Pestov { 11112452cc89SSlava Pestov return __bch_btree_node_alloc(c, op, level, op != NULL, parent); 1112c5aa4a31SSlava Pestov } 1113c5aa4a31SSlava Pestov 11140a63b66dSKent Overstreet static struct btree *btree_node_alloc_replacement(struct btree *b, 11150a63b66dSKent Overstreet struct btree_op *op) 1116cafe5635SKent Overstreet { 11172452cc89SSlava Pestov struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent); 111867539e85SKent Overstreet if (!IS_ERR_OR_NULL(n)) { 11192a285686SKent Overstreet mutex_lock(&n->write_lock); 112089ebb4a2SKent Overstreet bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort); 112167539e85SKent Overstreet bkey_copy_key(&n->key, &b->key); 11222a285686SKent Overstreet mutex_unlock(&n->write_lock); 112367539e85SKent Overstreet } 1124cafe5635SKent Overstreet 1125cafe5635SKent Overstreet return n; 1126cafe5635SKent Overstreet } 1127cafe5635SKent Overstreet 11288835c123SKent Overstreet static void make_btree_freeing_key(struct btree *b, struct bkey *k) 11298835c123SKent Overstreet { 11308835c123SKent Overstreet unsigned i; 11318835c123SKent Overstreet 113205335cffSKent Overstreet mutex_lock(&b->c->bucket_lock); 113305335cffSKent Overstreet 113405335cffSKent Overstreet atomic_inc(&b->c->prio_blocked); 113505335cffSKent Overstreet 11368835c123SKent Overstreet bkey_copy(k, &b->key); 11378835c123SKent Overstreet bkey_copy_key(k, &ZERO_KEY); 11388835c123SKent Overstreet 113905335cffSKent Overstreet for (i = 0; i < KEY_PTRS(k); i++) 114005335cffSKent Overstreet SET_PTR_GEN(k, i, 114105335cffSKent Overstreet bch_inc_gen(PTR_CACHE(b->c, &b->key, i), 114205335cffSKent Overstreet PTR_BUCKET(b->c, &b->key, i))); 11438835c123SKent Overstreet 114405335cffSKent Overstreet mutex_unlock(&b->c->bucket_lock); 11458835c123SKent Overstreet } 11468835c123SKent Overstreet 114778365411SKent Overstreet static int btree_check_reserve(struct btree *b, struct btree_op *op) 114878365411SKent Overstreet { 114978365411SKent Overstreet struct cache_set *c = b->c; 115078365411SKent Overstreet struct cache *ca; 11510a63b66dSKent Overstreet unsigned i, reserve = (c->root->level - b->level) * 2 + 1; 115278365411SKent Overstreet 115378365411SKent Overstreet mutex_lock(&c->bucket_lock); 115478365411SKent Overstreet 115578365411SKent Overstreet for_each_cache(ca, c, i) 115678365411SKent Overstreet if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) { 115778365411SKent Overstreet if (op) 11580a63b66dSKent Overstreet prepare_to_wait(&c->btree_cache_wait, &op->wait, 115978365411SKent Overstreet TASK_UNINTERRUPTIBLE); 11600a63b66dSKent Overstreet mutex_unlock(&c->bucket_lock); 11610a63b66dSKent Overstreet return -EINTR; 116278365411SKent Overstreet } 116378365411SKent Overstreet 116478365411SKent Overstreet mutex_unlock(&c->bucket_lock); 11650a63b66dSKent Overstreet 11660a63b66dSKent Overstreet return mca_cannibalize_lock(b->c, op); 116778365411SKent Overstreet } 116878365411SKent Overstreet 1169cafe5635SKent Overstreet /* Garbage collection */ 1170cafe5635SKent Overstreet 1171487dded8SKent Overstreet static uint8_t __bch_btree_mark_key(struct cache_set *c, int level, 1172487dded8SKent Overstreet struct bkey *k) 1173cafe5635SKent Overstreet { 1174cafe5635SKent Overstreet uint8_t stale = 0; 1175cafe5635SKent Overstreet unsigned i; 1176cafe5635SKent Overstreet struct bucket *g; 1177cafe5635SKent Overstreet 1178cafe5635SKent Overstreet /* 1179cafe5635SKent Overstreet * ptr_invalid() can't return true for the keys that mark btree nodes as 1180cafe5635SKent Overstreet * freed, but since ptr_bad() returns true we'll never actually use them 1181cafe5635SKent Overstreet * for anything and thus we don't want mark their pointers here 1182cafe5635SKent Overstreet */ 1183cafe5635SKent Overstreet if (!bkey_cmp(k, &ZERO_KEY)) 1184cafe5635SKent Overstreet return stale; 1185cafe5635SKent Overstreet 1186cafe5635SKent Overstreet for (i = 0; i < KEY_PTRS(k); i++) { 1187cafe5635SKent Overstreet if (!ptr_available(c, k, i)) 1188cafe5635SKent Overstreet continue; 1189cafe5635SKent Overstreet 1190cafe5635SKent Overstreet g = PTR_BUCKET(c, k, i); 1191cafe5635SKent Overstreet 11923a2fd9d5SKent Overstreet if (gen_after(g->last_gc, PTR_GEN(k, i))) 11933a2fd9d5SKent Overstreet g->last_gc = PTR_GEN(k, i); 1194cafe5635SKent Overstreet 1195cafe5635SKent Overstreet if (ptr_stale(c, k, i)) { 1196cafe5635SKent Overstreet stale = max(stale, ptr_stale(c, k, i)); 1197cafe5635SKent Overstreet continue; 1198cafe5635SKent Overstreet } 1199cafe5635SKent Overstreet 1200cafe5635SKent Overstreet cache_bug_on(GC_MARK(g) && 1201cafe5635SKent Overstreet (GC_MARK(g) == GC_MARK_METADATA) != (level != 0), 1202cafe5635SKent Overstreet c, "inconsistent ptrs: mark = %llu, level = %i", 1203cafe5635SKent Overstreet GC_MARK(g), level); 1204cafe5635SKent Overstreet 1205cafe5635SKent Overstreet if (level) 1206cafe5635SKent Overstreet SET_GC_MARK(g, GC_MARK_METADATA); 1207cafe5635SKent Overstreet else if (KEY_DIRTY(k)) 1208cafe5635SKent Overstreet SET_GC_MARK(g, GC_MARK_DIRTY); 12094fe6a816SKent Overstreet else if (!GC_MARK(g)) 12104fe6a816SKent Overstreet SET_GC_MARK(g, GC_MARK_RECLAIMABLE); 1211cafe5635SKent Overstreet 1212cafe5635SKent Overstreet /* guard against overflow */ 1213cafe5635SKent Overstreet SET_GC_SECTORS_USED(g, min_t(unsigned, 1214cafe5635SKent Overstreet GC_SECTORS_USED(g) + KEY_SIZE(k), 121594717447SDarrick J. Wong MAX_GC_SECTORS_USED)); 1216cafe5635SKent Overstreet 1217cafe5635SKent Overstreet BUG_ON(!GC_SECTORS_USED(g)); 1218cafe5635SKent Overstreet } 1219cafe5635SKent Overstreet 1220cafe5635SKent Overstreet return stale; 1221cafe5635SKent Overstreet } 1222cafe5635SKent Overstreet 1223cafe5635SKent Overstreet #define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k) 1224cafe5635SKent Overstreet 1225487dded8SKent Overstreet void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k) 1226487dded8SKent Overstreet { 1227487dded8SKent Overstreet unsigned i; 1228487dded8SKent Overstreet 1229487dded8SKent Overstreet for (i = 0; i < KEY_PTRS(k); i++) 1230487dded8SKent Overstreet if (ptr_available(c, k, i) && 1231487dded8SKent Overstreet !ptr_stale(c, k, i)) { 1232487dded8SKent Overstreet struct bucket *b = PTR_BUCKET(c, k, i); 1233487dded8SKent Overstreet 1234487dded8SKent Overstreet b->gen = PTR_GEN(k, i); 1235487dded8SKent Overstreet 1236487dded8SKent Overstreet if (level && bkey_cmp(k, &ZERO_KEY)) 1237487dded8SKent Overstreet b->prio = BTREE_PRIO; 1238487dded8SKent Overstreet else if (!level && b->prio == BTREE_PRIO) 1239487dded8SKent Overstreet b->prio = INITIAL_PRIO; 1240487dded8SKent Overstreet } 1241487dded8SKent Overstreet 1242487dded8SKent Overstreet __bch_btree_mark_key(c, level, k); 1243487dded8SKent Overstreet } 1244487dded8SKent Overstreet 1245a1f0358bSKent Overstreet static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc) 1246cafe5635SKent Overstreet { 1247cafe5635SKent Overstreet uint8_t stale = 0; 1248a1f0358bSKent Overstreet unsigned keys = 0, good_keys = 0; 1249cafe5635SKent Overstreet struct bkey *k; 1250cafe5635SKent Overstreet struct btree_iter iter; 1251cafe5635SKent Overstreet struct bset_tree *t; 1252cafe5635SKent Overstreet 1253cafe5635SKent Overstreet gc->nodes++; 1254cafe5635SKent Overstreet 1255c052dd9aSKent Overstreet for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) { 1256cafe5635SKent Overstreet stale = max(stale, btree_mark_key(b, k)); 1257a1f0358bSKent Overstreet keys++; 1258cafe5635SKent Overstreet 1259a85e968eSKent Overstreet if (bch_ptr_bad(&b->keys, k)) 1260cafe5635SKent Overstreet continue; 1261cafe5635SKent Overstreet 1262cafe5635SKent Overstreet gc->key_bytes += bkey_u64s(k); 1263cafe5635SKent Overstreet gc->nkeys++; 1264a1f0358bSKent Overstreet good_keys++; 1265cafe5635SKent Overstreet 1266cafe5635SKent Overstreet gc->data += KEY_SIZE(k); 1267cafe5635SKent Overstreet } 1268cafe5635SKent Overstreet 1269a85e968eSKent Overstreet for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++) 1270cafe5635SKent Overstreet btree_bug_on(t->size && 1271a85e968eSKent Overstreet bset_written(&b->keys, t) && 1272cafe5635SKent Overstreet bkey_cmp(&b->key, &t->end) < 0, 1273cafe5635SKent Overstreet b, "found short btree key in gc"); 1274cafe5635SKent Overstreet 1275a1f0358bSKent Overstreet if (b->c->gc_always_rewrite) 1276a1f0358bSKent Overstreet return true; 1277a1f0358bSKent Overstreet 1278a1f0358bSKent Overstreet if (stale > 10) 1279a1f0358bSKent Overstreet return true; 1280a1f0358bSKent Overstreet 1281a1f0358bSKent Overstreet if ((keys - good_keys) * 2 > keys) 1282a1f0358bSKent Overstreet return true; 1283a1f0358bSKent Overstreet 1284a1f0358bSKent Overstreet return false; 1285cafe5635SKent Overstreet } 1286cafe5635SKent Overstreet 1287a1f0358bSKent Overstreet #define GC_MERGE_NODES 4U 1288cafe5635SKent Overstreet 1289cafe5635SKent Overstreet struct gc_merge_info { 1290cafe5635SKent Overstreet struct btree *b; 1291cafe5635SKent Overstreet unsigned keys; 1292cafe5635SKent Overstreet }; 1293cafe5635SKent Overstreet 1294a1f0358bSKent Overstreet static int bch_btree_insert_node(struct btree *, struct btree_op *, 1295a1f0358bSKent Overstreet struct keylist *, atomic_t *, struct bkey *); 1296a1f0358bSKent Overstreet 1297a1f0358bSKent Overstreet static int btree_gc_coalesce(struct btree *b, struct btree_op *op, 12980a63b66dSKent Overstreet struct gc_stat *gc, struct gc_merge_info *r) 1299cafe5635SKent Overstreet { 1300a1f0358bSKent Overstreet unsigned i, nodes = 0, keys = 0, blocks; 1301a1f0358bSKent Overstreet struct btree *new_nodes[GC_MERGE_NODES]; 13020a63b66dSKent Overstreet struct keylist keylist; 1303b54d6934SKent Overstreet struct closure cl; 1304a1f0358bSKent Overstreet struct bkey *k; 1305b54d6934SKent Overstreet 13060a63b66dSKent Overstreet bch_keylist_init(&keylist); 13070a63b66dSKent Overstreet 13080a63b66dSKent Overstreet if (btree_check_reserve(b, NULL)) 13090a63b66dSKent Overstreet return 0; 13100a63b66dSKent Overstreet 1311a1f0358bSKent Overstreet memset(new_nodes, 0, sizeof(new_nodes)); 1312b54d6934SKent Overstreet closure_init_stack(&cl); 1313cafe5635SKent Overstreet 1314a1f0358bSKent Overstreet while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b)) 1315cafe5635SKent Overstreet keys += r[nodes++].keys; 1316cafe5635SKent Overstreet 1317cafe5635SKent Overstreet blocks = btree_default_blocks(b->c) * 2 / 3; 1318cafe5635SKent Overstreet 1319cafe5635SKent Overstreet if (nodes < 2 || 1320a85e968eSKent Overstreet __set_blocks(b->keys.set[0].data, keys, 1321ee811287SKent Overstreet block_bytes(b->c)) > blocks * (nodes - 1)) 1322a1f0358bSKent Overstreet return 0; 1323cafe5635SKent Overstreet 1324a1f0358bSKent Overstreet for (i = 0; i < nodes; i++) { 13250a63b66dSKent Overstreet new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL); 1326a1f0358bSKent Overstreet if (IS_ERR_OR_NULL(new_nodes[i])) 1327a1f0358bSKent Overstreet goto out_nocoalesce; 1328cafe5635SKent Overstreet } 1329cafe5635SKent Overstreet 13300a63b66dSKent Overstreet /* 13310a63b66dSKent Overstreet * We have to check the reserve here, after we've allocated our new 13320a63b66dSKent Overstreet * nodes, to make sure the insert below will succeed - we also check 13330a63b66dSKent Overstreet * before as an optimization to potentially avoid a bunch of expensive 13340a63b66dSKent Overstreet * allocs/sorts 13350a63b66dSKent Overstreet */ 13360a63b66dSKent Overstreet if (btree_check_reserve(b, NULL)) 13370a63b66dSKent Overstreet goto out_nocoalesce; 13380a63b66dSKent Overstreet 13392a285686SKent Overstreet for (i = 0; i < nodes; i++) 13402a285686SKent Overstreet mutex_lock(&new_nodes[i]->write_lock); 13412a285686SKent Overstreet 1342cafe5635SKent Overstreet for (i = nodes - 1; i > 0; --i) { 1343ee811287SKent Overstreet struct bset *n1 = btree_bset_first(new_nodes[i]); 1344ee811287SKent Overstreet struct bset *n2 = btree_bset_first(new_nodes[i - 1]); 1345cafe5635SKent Overstreet struct bkey *k, *last = NULL; 1346cafe5635SKent Overstreet 1347cafe5635SKent Overstreet keys = 0; 1348cafe5635SKent Overstreet 1349a1f0358bSKent Overstreet if (i > 1) { 1350cafe5635SKent Overstreet for (k = n2->start; 1351fafff81cSKent Overstreet k < bset_bkey_last(n2); 1352cafe5635SKent Overstreet k = bkey_next(k)) { 1353cafe5635SKent Overstreet if (__set_blocks(n1, n1->keys + keys + 1354ee811287SKent Overstreet bkey_u64s(k), 1355ee811287SKent Overstreet block_bytes(b->c)) > blocks) 1356cafe5635SKent Overstreet break; 1357cafe5635SKent Overstreet 1358cafe5635SKent Overstreet last = k; 1359cafe5635SKent Overstreet keys += bkey_u64s(k); 1360cafe5635SKent Overstreet } 1361a1f0358bSKent Overstreet } else { 1362a1f0358bSKent Overstreet /* 1363a1f0358bSKent Overstreet * Last node we're not getting rid of - we're getting 1364a1f0358bSKent Overstreet * rid of the node at r[0]. Have to try and fit all of 1365a1f0358bSKent Overstreet * the remaining keys into this node; we can't ensure 1366a1f0358bSKent Overstreet * they will always fit due to rounding and variable 1367a1f0358bSKent Overstreet * length keys (shouldn't be possible in practice, 1368a1f0358bSKent Overstreet * though) 1369a1f0358bSKent Overstreet */ 1370a1f0358bSKent Overstreet if (__set_blocks(n1, n1->keys + n2->keys, 1371ee811287SKent Overstreet block_bytes(b->c)) > 1372ee811287SKent Overstreet btree_blocks(new_nodes[i])) 1373a1f0358bSKent Overstreet goto out_nocoalesce; 1374a1f0358bSKent Overstreet 1375a1f0358bSKent Overstreet keys = n2->keys; 1376a1f0358bSKent Overstreet /* Take the key of the node we're getting rid of */ 1377a1f0358bSKent Overstreet last = &r->b->key; 1378a1f0358bSKent Overstreet } 1379cafe5635SKent Overstreet 1380ee811287SKent Overstreet BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) > 1381ee811287SKent Overstreet btree_blocks(new_nodes[i])); 1382cafe5635SKent Overstreet 1383a1f0358bSKent Overstreet if (last) 1384a1f0358bSKent Overstreet bkey_copy_key(&new_nodes[i]->key, last); 1385cafe5635SKent Overstreet 1386fafff81cSKent Overstreet memcpy(bset_bkey_last(n1), 1387cafe5635SKent Overstreet n2->start, 1388fafff81cSKent Overstreet (void *) bset_bkey_idx(n2, keys) - (void *) n2->start); 1389cafe5635SKent Overstreet 1390cafe5635SKent Overstreet n1->keys += keys; 1391a1f0358bSKent Overstreet r[i].keys = n1->keys; 1392cafe5635SKent Overstreet 1393cafe5635SKent Overstreet memmove(n2->start, 1394fafff81cSKent Overstreet bset_bkey_idx(n2, keys), 1395fafff81cSKent Overstreet (void *) bset_bkey_last(n2) - 1396fafff81cSKent Overstreet (void *) bset_bkey_idx(n2, keys)); 1397cafe5635SKent Overstreet 1398cafe5635SKent Overstreet n2->keys -= keys; 1399cafe5635SKent Overstreet 14000a63b66dSKent Overstreet if (__bch_keylist_realloc(&keylist, 1401085d2a3dSKent Overstreet bkey_u64s(&new_nodes[i]->key))) 1402a1f0358bSKent Overstreet goto out_nocoalesce; 1403a1f0358bSKent Overstreet 1404a1f0358bSKent Overstreet bch_btree_node_write(new_nodes[i], &cl); 14050a63b66dSKent Overstreet bch_keylist_add(&keylist, &new_nodes[i]->key); 1406cafe5635SKent Overstreet } 1407cafe5635SKent Overstreet 14082a285686SKent Overstreet for (i = 0; i < nodes; i++) 14092a285686SKent Overstreet mutex_unlock(&new_nodes[i]->write_lock); 14102a285686SKent Overstreet 141105335cffSKent Overstreet closure_sync(&cl); 141205335cffSKent Overstreet 141305335cffSKent Overstreet /* We emptied out this node */ 141405335cffSKent Overstreet BUG_ON(btree_bset_first(new_nodes[0])->keys); 141505335cffSKent Overstreet btree_node_free(new_nodes[0]); 141605335cffSKent Overstreet rw_unlock(true, new_nodes[0]); 1417400ffaa2SSlava Pestov new_nodes[0] = NULL; 141805335cffSKent Overstreet 1419a1f0358bSKent Overstreet for (i = 0; i < nodes; i++) { 14200a63b66dSKent Overstreet if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key))) 1421a1f0358bSKent Overstreet goto out_nocoalesce; 1422a1f0358bSKent Overstreet 14230a63b66dSKent Overstreet make_btree_freeing_key(r[i].b, keylist.top); 14240a63b66dSKent Overstreet bch_keylist_push(&keylist); 1425a1f0358bSKent Overstreet } 1426a1f0358bSKent Overstreet 14270a63b66dSKent Overstreet bch_btree_insert_node(b, op, &keylist, NULL, NULL); 14280a63b66dSKent Overstreet BUG_ON(!bch_keylist_empty(&keylist)); 1429a1f0358bSKent Overstreet 1430a1f0358bSKent Overstreet for (i = 0; i < nodes; i++) { 1431a1f0358bSKent Overstreet btree_node_free(r[i].b); 1432a1f0358bSKent Overstreet rw_unlock(true, r[i].b); 1433a1f0358bSKent Overstreet 1434a1f0358bSKent Overstreet r[i].b = new_nodes[i]; 1435a1f0358bSKent Overstreet } 1436a1f0358bSKent Overstreet 1437a1f0358bSKent Overstreet memmove(r, r + 1, sizeof(r[0]) * (nodes - 1)); 1438a1f0358bSKent Overstreet r[nodes - 1].b = ERR_PTR(-EINTR); 1439cafe5635SKent Overstreet 1440c37511b8SKent Overstreet trace_bcache_btree_gc_coalesce(nodes); 1441cafe5635SKent Overstreet gc->nodes--; 1442cafe5635SKent Overstreet 14430a63b66dSKent Overstreet bch_keylist_free(&keylist); 14440a63b66dSKent Overstreet 1445a1f0358bSKent Overstreet /* Invalidated our iterator */ 1446a1f0358bSKent Overstreet return -EINTR; 1447a1f0358bSKent Overstreet 1448a1f0358bSKent Overstreet out_nocoalesce: 1449a1f0358bSKent Overstreet closure_sync(&cl); 14500a63b66dSKent Overstreet bch_keylist_free(&keylist); 1451a1f0358bSKent Overstreet 14520a63b66dSKent Overstreet while ((k = bch_keylist_pop(&keylist))) 1453a1f0358bSKent Overstreet if (!bkey_cmp(k, &ZERO_KEY)) 1454a1f0358bSKent Overstreet atomic_dec(&b->c->prio_blocked); 1455a1f0358bSKent Overstreet 1456a1f0358bSKent Overstreet for (i = 0; i < nodes; i++) 1457a1f0358bSKent Overstreet if (!IS_ERR_OR_NULL(new_nodes[i])) { 1458a1f0358bSKent Overstreet btree_node_free(new_nodes[i]); 1459a1f0358bSKent Overstreet rw_unlock(true, new_nodes[i]); 1460a1f0358bSKent Overstreet } 1461a1f0358bSKent Overstreet return 0; 1462a1f0358bSKent Overstreet } 1463a1f0358bSKent Overstreet 14640a63b66dSKent Overstreet static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op, 14650a63b66dSKent Overstreet struct btree *replace) 14660a63b66dSKent Overstreet { 14670a63b66dSKent Overstreet struct keylist keys; 14680a63b66dSKent Overstreet struct btree *n; 14690a63b66dSKent Overstreet 14700a63b66dSKent Overstreet if (btree_check_reserve(b, NULL)) 14710a63b66dSKent Overstreet return 0; 14720a63b66dSKent Overstreet 14730a63b66dSKent Overstreet n = btree_node_alloc_replacement(replace, NULL); 14740a63b66dSKent Overstreet 14750a63b66dSKent Overstreet /* recheck reserve after allocating replacement node */ 14760a63b66dSKent Overstreet if (btree_check_reserve(b, NULL)) { 14770a63b66dSKent Overstreet btree_node_free(n); 14780a63b66dSKent Overstreet rw_unlock(true, n); 14790a63b66dSKent Overstreet return 0; 14800a63b66dSKent Overstreet } 14810a63b66dSKent Overstreet 14820a63b66dSKent Overstreet bch_btree_node_write_sync(n); 14830a63b66dSKent Overstreet 14840a63b66dSKent Overstreet bch_keylist_init(&keys); 14850a63b66dSKent Overstreet bch_keylist_add(&keys, &n->key); 14860a63b66dSKent Overstreet 14870a63b66dSKent Overstreet make_btree_freeing_key(replace, keys.top); 14880a63b66dSKent Overstreet bch_keylist_push(&keys); 14890a63b66dSKent Overstreet 14900a63b66dSKent Overstreet bch_btree_insert_node(b, op, &keys, NULL, NULL); 14910a63b66dSKent Overstreet BUG_ON(!bch_keylist_empty(&keys)); 14920a63b66dSKent Overstreet 14930a63b66dSKent Overstreet btree_node_free(replace); 14940a63b66dSKent Overstreet rw_unlock(true, n); 14950a63b66dSKent Overstreet 14960a63b66dSKent Overstreet /* Invalidated our iterator */ 14970a63b66dSKent Overstreet return -EINTR; 14980a63b66dSKent Overstreet } 14990a63b66dSKent Overstreet 1500a1f0358bSKent Overstreet static unsigned btree_gc_count_keys(struct btree *b) 1501a1f0358bSKent Overstreet { 1502a1f0358bSKent Overstreet struct bkey *k; 1503a1f0358bSKent Overstreet struct btree_iter iter; 1504a1f0358bSKent Overstreet unsigned ret = 0; 1505a1f0358bSKent Overstreet 1506c052dd9aSKent Overstreet for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad) 1507a1f0358bSKent Overstreet ret += bkey_u64s(k); 1508a1f0358bSKent Overstreet 1509a1f0358bSKent Overstreet return ret; 1510cafe5635SKent Overstreet } 1511cafe5635SKent Overstreet 1512cafe5635SKent Overstreet static int btree_gc_recurse(struct btree *b, struct btree_op *op, 1513cafe5635SKent Overstreet struct closure *writes, struct gc_stat *gc) 1514cafe5635SKent Overstreet { 1515a1f0358bSKent Overstreet int ret = 0; 1516a1f0358bSKent Overstreet bool should_rewrite; 1517a1f0358bSKent Overstreet struct bkey *k; 1518a1f0358bSKent Overstreet struct btree_iter iter; 1519cafe5635SKent Overstreet struct gc_merge_info r[GC_MERGE_NODES]; 15202a285686SKent Overstreet struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1; 1521cafe5635SKent Overstreet 1522c052dd9aSKent Overstreet bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done); 1523cafe5635SKent Overstreet 15242a285686SKent Overstreet for (i = r; i < r + ARRAY_SIZE(r); i++) 15252a285686SKent Overstreet i->b = ERR_PTR(-EINTR); 1526cafe5635SKent Overstreet 1527a1f0358bSKent Overstreet while (1) { 1528a85e968eSKent Overstreet k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad); 1529a1f0358bSKent Overstreet if (k) { 15300a63b66dSKent Overstreet r->b = bch_btree_node_get(b->c, op, k, b->level - 1, 15312452cc89SSlava Pestov true, b); 1532cafe5635SKent Overstreet if (IS_ERR(r->b)) { 1533cafe5635SKent Overstreet ret = PTR_ERR(r->b); 1534cafe5635SKent Overstreet break; 1535cafe5635SKent Overstreet } 1536cafe5635SKent Overstreet 1537a1f0358bSKent Overstreet r->keys = btree_gc_count_keys(r->b); 1538cafe5635SKent Overstreet 15390a63b66dSKent Overstreet ret = btree_gc_coalesce(b, op, gc, r); 1540a1f0358bSKent Overstreet if (ret) 1541cafe5635SKent Overstreet break; 1542cafe5635SKent Overstreet } 1543cafe5635SKent Overstreet 1544a1f0358bSKent Overstreet if (!last->b) 1545a1f0358bSKent Overstreet break; 1546cafe5635SKent Overstreet 1547a1f0358bSKent Overstreet if (!IS_ERR(last->b)) { 1548a1f0358bSKent Overstreet should_rewrite = btree_gc_mark_node(last->b, gc); 15490a63b66dSKent Overstreet if (should_rewrite) { 15500a63b66dSKent Overstreet ret = btree_gc_rewrite_node(b, op, last->b); 15510a63b66dSKent Overstreet if (ret) 1552a1f0358bSKent Overstreet break; 1553a1f0358bSKent Overstreet } 1554a1f0358bSKent Overstreet 1555a1f0358bSKent Overstreet if (last->b->level) { 1556a1f0358bSKent Overstreet ret = btree_gc_recurse(last->b, op, writes, gc); 1557a1f0358bSKent Overstreet if (ret) 1558a1f0358bSKent Overstreet break; 1559a1f0358bSKent Overstreet } 1560a1f0358bSKent Overstreet 1561a1f0358bSKent Overstreet bkey_copy_key(&b->c->gc_done, &last->b->key); 1562a1f0358bSKent Overstreet 1563a1f0358bSKent Overstreet /* 1564a1f0358bSKent Overstreet * Must flush leaf nodes before gc ends, since replace 1565a1f0358bSKent Overstreet * operations aren't journalled 1566cafe5635SKent Overstreet */ 15672a285686SKent Overstreet mutex_lock(&last->b->write_lock); 1568a1f0358bSKent Overstreet if (btree_node_dirty(last->b)) 1569a1f0358bSKent Overstreet bch_btree_node_write(last->b, writes); 15702a285686SKent Overstreet mutex_unlock(&last->b->write_lock); 1571a1f0358bSKent Overstreet rw_unlock(true, last->b); 1572a1f0358bSKent Overstreet } 1573a1f0358bSKent Overstreet 1574a1f0358bSKent Overstreet memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1)); 1575a1f0358bSKent Overstreet r->b = NULL; 1576a1f0358bSKent Overstreet 1577cafe5635SKent Overstreet if (need_resched()) { 1578cafe5635SKent Overstreet ret = -EAGAIN; 1579cafe5635SKent Overstreet break; 1580cafe5635SKent Overstreet } 1581cafe5635SKent Overstreet } 1582cafe5635SKent Overstreet 15832a285686SKent Overstreet for (i = r; i < r + ARRAY_SIZE(r); i++) 15842a285686SKent Overstreet if (!IS_ERR_OR_NULL(i->b)) { 15852a285686SKent Overstreet mutex_lock(&i->b->write_lock); 15862a285686SKent Overstreet if (btree_node_dirty(i->b)) 15872a285686SKent Overstreet bch_btree_node_write(i->b, writes); 15882a285686SKent Overstreet mutex_unlock(&i->b->write_lock); 15892a285686SKent Overstreet rw_unlock(true, i->b); 1590a1f0358bSKent Overstreet } 1591cafe5635SKent Overstreet 1592cafe5635SKent Overstreet return ret; 1593cafe5635SKent Overstreet } 1594cafe5635SKent Overstreet 1595cafe5635SKent Overstreet static int bch_btree_gc_root(struct btree *b, struct btree_op *op, 1596cafe5635SKent Overstreet struct closure *writes, struct gc_stat *gc) 1597cafe5635SKent Overstreet { 1598cafe5635SKent Overstreet struct btree *n = NULL; 1599a1f0358bSKent Overstreet int ret = 0; 1600a1f0358bSKent Overstreet bool should_rewrite; 1601cafe5635SKent Overstreet 1602a1f0358bSKent Overstreet should_rewrite = btree_gc_mark_node(b, gc); 1603a1f0358bSKent Overstreet if (should_rewrite) { 16040a63b66dSKent Overstreet n = btree_node_alloc_replacement(b, NULL); 1605cafe5635SKent Overstreet 1606cafe5635SKent Overstreet if (!IS_ERR_OR_NULL(n)) { 1607a1f0358bSKent Overstreet bch_btree_node_write_sync(n); 16082a285686SKent Overstreet 1609a1f0358bSKent Overstreet bch_btree_set_root(n); 1610a1f0358bSKent Overstreet btree_node_free(b); 1611a1f0358bSKent Overstreet rw_unlock(true, n); 1612a1f0358bSKent Overstreet 1613a1f0358bSKent Overstreet return -EINTR; 1614cafe5635SKent Overstreet } 1615a1f0358bSKent Overstreet } 1616a1f0358bSKent Overstreet 1617487dded8SKent Overstreet __bch_btree_mark_key(b->c, b->level + 1, &b->key); 1618487dded8SKent Overstreet 1619a1f0358bSKent Overstreet if (b->level) { 1620a1f0358bSKent Overstreet ret = btree_gc_recurse(b, op, writes, gc); 1621a1f0358bSKent Overstreet if (ret) 1622a1f0358bSKent Overstreet return ret; 1623a1f0358bSKent Overstreet } 1624a1f0358bSKent Overstreet 1625a1f0358bSKent Overstreet bkey_copy_key(&b->c->gc_done, &b->key); 1626cafe5635SKent Overstreet 1627cafe5635SKent Overstreet return ret; 1628cafe5635SKent Overstreet } 1629cafe5635SKent Overstreet 1630cafe5635SKent Overstreet static void btree_gc_start(struct cache_set *c) 1631cafe5635SKent Overstreet { 1632cafe5635SKent Overstreet struct cache *ca; 1633cafe5635SKent Overstreet struct bucket *b; 1634cafe5635SKent Overstreet unsigned i; 1635cafe5635SKent Overstreet 1636cafe5635SKent Overstreet if (!c->gc_mark_valid) 1637cafe5635SKent Overstreet return; 1638cafe5635SKent Overstreet 1639cafe5635SKent Overstreet mutex_lock(&c->bucket_lock); 1640cafe5635SKent Overstreet 1641cafe5635SKent Overstreet c->gc_mark_valid = 0; 1642cafe5635SKent Overstreet c->gc_done = ZERO_KEY; 1643cafe5635SKent Overstreet 1644cafe5635SKent Overstreet for_each_cache(ca, c, i) 1645cafe5635SKent Overstreet for_each_bucket(b, ca) { 16463a2fd9d5SKent Overstreet b->last_gc = b->gen; 164729ebf465SKent Overstreet if (!atomic_read(&b->pin)) { 16484fe6a816SKent Overstreet SET_GC_MARK(b, 0); 164929ebf465SKent Overstreet SET_GC_SECTORS_USED(b, 0); 165029ebf465SKent Overstreet } 1651cafe5635SKent Overstreet } 1652cafe5635SKent Overstreet 1653cafe5635SKent Overstreet mutex_unlock(&c->bucket_lock); 1654cafe5635SKent Overstreet } 1655cafe5635SKent Overstreet 16562531d9eeSKent Overstreet static size_t bch_btree_gc_finish(struct cache_set *c) 1657cafe5635SKent Overstreet { 1658cafe5635SKent Overstreet size_t available = 0; 1659cafe5635SKent Overstreet struct bucket *b; 1660cafe5635SKent Overstreet struct cache *ca; 1661cafe5635SKent Overstreet unsigned i; 1662cafe5635SKent Overstreet 1663cafe5635SKent Overstreet mutex_lock(&c->bucket_lock); 1664cafe5635SKent Overstreet 1665cafe5635SKent Overstreet set_gc_sectors(c); 1666cafe5635SKent Overstreet c->gc_mark_valid = 1; 1667cafe5635SKent Overstreet c->need_gc = 0; 1668cafe5635SKent Overstreet 1669cafe5635SKent Overstreet for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++) 1670cafe5635SKent Overstreet SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i), 1671cafe5635SKent Overstreet GC_MARK_METADATA); 1672cafe5635SKent Overstreet 1673bf0a628aSNicholas Swenson /* don't reclaim buckets to which writeback keys point */ 1674bf0a628aSNicholas Swenson rcu_read_lock(); 1675bf0a628aSNicholas Swenson for (i = 0; i < c->nr_uuids; i++) { 1676bf0a628aSNicholas Swenson struct bcache_device *d = c->devices[i]; 1677bf0a628aSNicholas Swenson struct cached_dev *dc; 1678bf0a628aSNicholas Swenson struct keybuf_key *w, *n; 1679bf0a628aSNicholas Swenson unsigned j; 1680bf0a628aSNicholas Swenson 1681bf0a628aSNicholas Swenson if (!d || UUID_FLASH_ONLY(&c->uuids[i])) 1682bf0a628aSNicholas Swenson continue; 1683bf0a628aSNicholas Swenson dc = container_of(d, struct cached_dev, disk); 1684bf0a628aSNicholas Swenson 1685bf0a628aSNicholas Swenson spin_lock(&dc->writeback_keys.lock); 1686bf0a628aSNicholas Swenson rbtree_postorder_for_each_entry_safe(w, n, 1687bf0a628aSNicholas Swenson &dc->writeback_keys.keys, node) 1688bf0a628aSNicholas Swenson for (j = 0; j < KEY_PTRS(&w->key); j++) 1689bf0a628aSNicholas Swenson SET_GC_MARK(PTR_BUCKET(c, &w->key, j), 1690bf0a628aSNicholas Swenson GC_MARK_DIRTY); 1691bf0a628aSNicholas Swenson spin_unlock(&dc->writeback_keys.lock); 1692bf0a628aSNicholas Swenson } 1693bf0a628aSNicholas Swenson rcu_read_unlock(); 1694bf0a628aSNicholas Swenson 1695cafe5635SKent Overstreet for_each_cache(ca, c, i) { 1696cafe5635SKent Overstreet uint64_t *i; 1697cafe5635SKent Overstreet 1698cafe5635SKent Overstreet ca->invalidate_needs_gc = 0; 1699cafe5635SKent Overstreet 1700cafe5635SKent Overstreet for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++) 1701cafe5635SKent Overstreet SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA); 1702cafe5635SKent Overstreet 1703cafe5635SKent Overstreet for (i = ca->prio_buckets; 1704cafe5635SKent Overstreet i < ca->prio_buckets + prio_buckets(ca) * 2; i++) 1705cafe5635SKent Overstreet SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA); 1706cafe5635SKent Overstreet 1707cafe5635SKent Overstreet for_each_bucket(b, ca) { 1708cafe5635SKent Overstreet c->need_gc = max(c->need_gc, bucket_gc_gen(b)); 1709cafe5635SKent Overstreet 17104fe6a816SKent Overstreet if (atomic_read(&b->pin)) 17114fe6a816SKent Overstreet continue; 17124fe6a816SKent Overstreet 17134fe6a816SKent Overstreet BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b)); 17144fe6a816SKent Overstreet 17154fe6a816SKent Overstreet if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE) 1716cafe5635SKent Overstreet available++; 1717cafe5635SKent Overstreet } 1718cafe5635SKent Overstreet } 1719cafe5635SKent Overstreet 1720cafe5635SKent Overstreet mutex_unlock(&c->bucket_lock); 1721cafe5635SKent Overstreet return available; 1722cafe5635SKent Overstreet } 1723cafe5635SKent Overstreet 172472a44517SKent Overstreet static void bch_btree_gc(struct cache_set *c) 1725cafe5635SKent Overstreet { 1726cafe5635SKent Overstreet int ret; 1727cafe5635SKent Overstreet unsigned long available; 1728cafe5635SKent Overstreet struct gc_stat stats; 1729cafe5635SKent Overstreet struct closure writes; 1730cafe5635SKent Overstreet struct btree_op op; 1731cafe5635SKent Overstreet uint64_t start_time = local_clock(); 173257943511SKent Overstreet 1733c37511b8SKent Overstreet trace_bcache_gc_start(c); 1734cafe5635SKent Overstreet 1735cafe5635SKent Overstreet memset(&stats, 0, sizeof(struct gc_stat)); 1736cafe5635SKent Overstreet closure_init_stack(&writes); 1737b54d6934SKent Overstreet bch_btree_op_init(&op, SHRT_MAX); 1738cafe5635SKent Overstreet 1739cafe5635SKent Overstreet btree_gc_start(c); 1740cafe5635SKent Overstreet 1741a1f0358bSKent Overstreet do { 1742cafe5635SKent Overstreet ret = btree_root(gc_root, c, &op, &writes, &stats); 1743cafe5635SKent Overstreet closure_sync(&writes); 1744cafe5635SKent Overstreet 1745a1f0358bSKent Overstreet if (ret && ret != -EAGAIN) 1746cafe5635SKent Overstreet pr_warn("gc failed!"); 1747a1f0358bSKent Overstreet } while (ret); 1748cafe5635SKent Overstreet 1749cafe5635SKent Overstreet available = bch_btree_gc_finish(c); 175057943511SKent Overstreet wake_up_allocators(c); 175157943511SKent Overstreet 1752169ef1cfSKent Overstreet bch_time_stats_update(&c->btree_gc_time, start_time); 1753cafe5635SKent Overstreet 1754cafe5635SKent Overstreet stats.key_bytes *= sizeof(uint64_t); 1755cafe5635SKent Overstreet stats.data <<= 9; 1756cafe5635SKent Overstreet stats.in_use = (c->nbuckets - available) * 100 / c->nbuckets; 1757cafe5635SKent Overstreet memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat)); 1758cafe5635SKent Overstreet 1759c37511b8SKent Overstreet trace_bcache_gc_end(c); 1760cafe5635SKent Overstreet 176172a44517SKent Overstreet bch_moving_gc(c); 1762cafe5635SKent Overstreet } 1763cafe5635SKent Overstreet 176472a44517SKent Overstreet static int bch_gc_thread(void *arg) 1765cafe5635SKent Overstreet { 176672a44517SKent Overstreet struct cache_set *c = arg; 1767a1f0358bSKent Overstreet struct cache *ca; 1768a1f0358bSKent Overstreet unsigned i; 176972a44517SKent Overstreet 177072a44517SKent Overstreet while (1) { 1771a1f0358bSKent Overstreet again: 177272a44517SKent Overstreet bch_btree_gc(c); 177372a44517SKent Overstreet 177472a44517SKent Overstreet set_current_state(TASK_INTERRUPTIBLE); 177572a44517SKent Overstreet if (kthread_should_stop()) 177672a44517SKent Overstreet break; 177772a44517SKent Overstreet 1778a1f0358bSKent Overstreet mutex_lock(&c->bucket_lock); 1779a1f0358bSKent Overstreet 1780a1f0358bSKent Overstreet for_each_cache(ca, c, i) 1781a1f0358bSKent Overstreet if (ca->invalidate_needs_gc) { 1782a1f0358bSKent Overstreet mutex_unlock(&c->bucket_lock); 1783a1f0358bSKent Overstreet set_current_state(TASK_RUNNING); 1784a1f0358bSKent Overstreet goto again; 1785a1f0358bSKent Overstreet } 1786a1f0358bSKent Overstreet 1787a1f0358bSKent Overstreet mutex_unlock(&c->bucket_lock); 1788a1f0358bSKent Overstreet 178972a44517SKent Overstreet try_to_freeze(); 179072a44517SKent Overstreet schedule(); 179172a44517SKent Overstreet } 179272a44517SKent Overstreet 179372a44517SKent Overstreet return 0; 179472a44517SKent Overstreet } 179572a44517SKent Overstreet 179672a44517SKent Overstreet int bch_gc_thread_start(struct cache_set *c) 179772a44517SKent Overstreet { 179872a44517SKent Overstreet c->gc_thread = kthread_create(bch_gc_thread, c, "bcache_gc"); 179972a44517SKent Overstreet if (IS_ERR(c->gc_thread)) 180072a44517SKent Overstreet return PTR_ERR(c->gc_thread); 180172a44517SKent Overstreet 180272a44517SKent Overstreet set_task_state(c->gc_thread, TASK_INTERRUPTIBLE); 180372a44517SKent Overstreet return 0; 1804cafe5635SKent Overstreet } 1805cafe5635SKent Overstreet 1806cafe5635SKent Overstreet /* Initial partial gc */ 1807cafe5635SKent Overstreet 1808487dded8SKent Overstreet static int bch_btree_check_recurse(struct btree *b, struct btree_op *op) 1809cafe5635SKent Overstreet { 181050310164SKent Overstreet int ret = 0; 181150310164SKent Overstreet struct bkey *k, *p = NULL; 1812cafe5635SKent Overstreet struct btree_iter iter; 1813cafe5635SKent Overstreet 1814487dded8SKent Overstreet for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) 1815487dded8SKent Overstreet bch_initial_mark_key(b->c, b->level, k); 1816cafe5635SKent Overstreet 1817487dded8SKent Overstreet bch_initial_mark_key(b->c, b->level + 1, &b->key); 1818cafe5635SKent Overstreet 1819cafe5635SKent Overstreet if (b->level) { 1820c052dd9aSKent Overstreet bch_btree_iter_init(&b->keys, &iter, NULL); 1821cafe5635SKent Overstreet 182250310164SKent Overstreet do { 1823a85e968eSKent Overstreet k = bch_btree_iter_next_filter(&iter, &b->keys, 1824a85e968eSKent Overstreet bch_ptr_bad); 182550310164SKent Overstreet if (k) 18262452cc89SSlava Pestov btree_node_prefetch(b, k); 182750310164SKent Overstreet 1828cafe5635SKent Overstreet if (p) 1829487dded8SKent Overstreet ret = btree(check_recurse, p, b, op); 1830cafe5635SKent Overstreet 183150310164SKent Overstreet p = k; 183250310164SKent Overstreet } while (p && !ret); 1833cafe5635SKent Overstreet } 1834cafe5635SKent Overstreet 1835487dded8SKent Overstreet return ret; 1836cafe5635SKent Overstreet } 1837cafe5635SKent Overstreet 1838c18536a7SKent Overstreet int bch_btree_check(struct cache_set *c) 1839cafe5635SKent Overstreet { 1840c18536a7SKent Overstreet struct btree_op op; 1841cafe5635SKent Overstreet 1842b54d6934SKent Overstreet bch_btree_op_init(&op, SHRT_MAX); 1843cafe5635SKent Overstreet 1844487dded8SKent Overstreet return btree_root(check_recurse, c, &op); 1845cafe5635SKent Overstreet } 1846cafe5635SKent Overstreet 18472531d9eeSKent Overstreet void bch_initial_gc_finish(struct cache_set *c) 18482531d9eeSKent Overstreet { 18492531d9eeSKent Overstreet struct cache *ca; 18502531d9eeSKent Overstreet struct bucket *b; 18512531d9eeSKent Overstreet unsigned i; 18522531d9eeSKent Overstreet 18532531d9eeSKent Overstreet bch_btree_gc_finish(c); 18542531d9eeSKent Overstreet 18552531d9eeSKent Overstreet mutex_lock(&c->bucket_lock); 18562531d9eeSKent Overstreet 18572531d9eeSKent Overstreet /* 18582531d9eeSKent Overstreet * We need to put some unused buckets directly on the prio freelist in 18592531d9eeSKent Overstreet * order to get the allocator thread started - it needs freed buckets in 18602531d9eeSKent Overstreet * order to rewrite the prios and gens, and it needs to rewrite prios 18612531d9eeSKent Overstreet * and gens in order to free buckets. 18622531d9eeSKent Overstreet * 18632531d9eeSKent Overstreet * This is only safe for buckets that have no live data in them, which 18642531d9eeSKent Overstreet * there should always be some of. 18652531d9eeSKent Overstreet */ 18662531d9eeSKent Overstreet for_each_cache(ca, c, i) { 18672531d9eeSKent Overstreet for_each_bucket(b, ca) { 18682531d9eeSKent Overstreet if (fifo_full(&ca->free[RESERVE_PRIO])) 18692531d9eeSKent Overstreet break; 18702531d9eeSKent Overstreet 18712531d9eeSKent Overstreet if (bch_can_invalidate_bucket(ca, b) && 18722531d9eeSKent Overstreet !GC_MARK(b)) { 18732531d9eeSKent Overstreet __bch_invalidate_one_bucket(ca, b); 18742531d9eeSKent Overstreet fifo_push(&ca->free[RESERVE_PRIO], 18752531d9eeSKent Overstreet b - ca->buckets); 18762531d9eeSKent Overstreet } 18772531d9eeSKent Overstreet } 18782531d9eeSKent Overstreet } 18792531d9eeSKent Overstreet 18802531d9eeSKent Overstreet mutex_unlock(&c->bucket_lock); 18812531d9eeSKent Overstreet } 18822531d9eeSKent Overstreet 1883cafe5635SKent Overstreet /* Btree insertion */ 1884cafe5635SKent Overstreet 1885829a60b9SKent Overstreet static bool btree_insert_key(struct btree *b, struct bkey *k, 18861b207d80SKent Overstreet struct bkey *replace_key) 1887cafe5635SKent Overstreet { 1888829a60b9SKent Overstreet unsigned status; 1889cafe5635SKent Overstreet 1890cafe5635SKent Overstreet BUG_ON(bkey_cmp(k, &b->key) > 0); 1891cafe5635SKent Overstreet 1892829a60b9SKent Overstreet status = bch_btree_insert_key(&b->keys, k, replace_key); 1893829a60b9SKent Overstreet if (status != BTREE_INSERT_STATUS_NO_INSERT) { 1894dc9d98d6SKent Overstreet bch_check_keys(&b->keys, "%u for %s", status, 18951b207d80SKent Overstreet replace_key ? "replace" : "insert"); 1896cafe5635SKent Overstreet 1897829a60b9SKent Overstreet trace_bcache_btree_insert_key(b, k, replace_key != NULL, 1898829a60b9SKent Overstreet status); 1899cafe5635SKent Overstreet return true; 1900829a60b9SKent Overstreet } else 1901829a60b9SKent Overstreet return false; 1902cafe5635SKent Overstreet } 1903cafe5635SKent Overstreet 190459158fdeSKent Overstreet static size_t insert_u64s_remaining(struct btree *b) 190559158fdeSKent Overstreet { 19063572324aSKent Overstreet long ret = bch_btree_keys_u64s_remaining(&b->keys); 190759158fdeSKent Overstreet 190859158fdeSKent Overstreet /* 190959158fdeSKent Overstreet * Might land in the middle of an existing extent and have to split it 191059158fdeSKent Overstreet */ 191159158fdeSKent Overstreet if (b->keys.ops->is_extents) 191259158fdeSKent Overstreet ret -= KEY_MAX_U64S; 191359158fdeSKent Overstreet 191459158fdeSKent Overstreet return max(ret, 0L); 191559158fdeSKent Overstreet } 191659158fdeSKent Overstreet 191726c949f8SKent Overstreet static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op, 19181b207d80SKent Overstreet struct keylist *insert_keys, 19191b207d80SKent Overstreet struct bkey *replace_key) 1920cafe5635SKent Overstreet { 1921cafe5635SKent Overstreet bool ret = false; 1922dc9d98d6SKent Overstreet int oldsize = bch_count_data(&b->keys); 1923cafe5635SKent Overstreet 192426c949f8SKent Overstreet while (!bch_keylist_empty(insert_keys)) { 1925c2f95ae2SKent Overstreet struct bkey *k = insert_keys->keys; 192626c949f8SKent Overstreet 192759158fdeSKent Overstreet if (bkey_u64s(k) > insert_u64s_remaining(b)) 1928403b6cdeSKent Overstreet break; 1929403b6cdeSKent Overstreet 1930403b6cdeSKent Overstreet if (bkey_cmp(k, &b->key) <= 0) { 19313a3b6a4eSKent Overstreet if (!b->level) 19323a3b6a4eSKent Overstreet bkey_put(b->c, k); 193326c949f8SKent Overstreet 1934829a60b9SKent Overstreet ret |= btree_insert_key(b, k, replace_key); 193526c949f8SKent Overstreet bch_keylist_pop_front(insert_keys); 193626c949f8SKent Overstreet } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) { 193726c949f8SKent Overstreet BKEY_PADDED(key) temp; 1938c2f95ae2SKent Overstreet bkey_copy(&temp.key, insert_keys->keys); 193926c949f8SKent Overstreet 194026c949f8SKent Overstreet bch_cut_back(&b->key, &temp.key); 1941c2f95ae2SKent Overstreet bch_cut_front(&b->key, insert_keys->keys); 194226c949f8SKent Overstreet 1943829a60b9SKent Overstreet ret |= btree_insert_key(b, &temp.key, replace_key); 194426c949f8SKent Overstreet break; 194526c949f8SKent Overstreet } else { 194626c949f8SKent Overstreet break; 194726c949f8SKent Overstreet } 1948cafe5635SKent Overstreet } 1949cafe5635SKent Overstreet 1950829a60b9SKent Overstreet if (!ret) 1951829a60b9SKent Overstreet op->insert_collision = true; 1952829a60b9SKent Overstreet 1953403b6cdeSKent Overstreet BUG_ON(!bch_keylist_empty(insert_keys) && b->level); 1954403b6cdeSKent Overstreet 1955dc9d98d6SKent Overstreet BUG_ON(bch_count_data(&b->keys) < oldsize); 1956cafe5635SKent Overstreet return ret; 1957cafe5635SKent Overstreet } 1958cafe5635SKent Overstreet 195926c949f8SKent Overstreet static int btree_split(struct btree *b, struct btree_op *op, 196026c949f8SKent Overstreet struct keylist *insert_keys, 19611b207d80SKent Overstreet struct bkey *replace_key) 1962cafe5635SKent Overstreet { 1963d6fd3b11SKent Overstreet bool split; 1964cafe5635SKent Overstreet struct btree *n1, *n2 = NULL, *n3 = NULL; 1965cafe5635SKent Overstreet uint64_t start_time = local_clock(); 1966b54d6934SKent Overstreet struct closure cl; 196717e21a9fSKent Overstreet struct keylist parent_keys; 1968b54d6934SKent Overstreet 1969b54d6934SKent Overstreet closure_init_stack(&cl); 197017e21a9fSKent Overstreet bch_keylist_init(&parent_keys); 1971cafe5635SKent Overstreet 19720a63b66dSKent Overstreet if (btree_check_reserve(b, op)) { 19730a63b66dSKent Overstreet if (!b->level) 197478365411SKent Overstreet return -EINTR; 19750a63b66dSKent Overstreet else 19760a63b66dSKent Overstreet WARN(1, "insufficient reserve for split\n"); 19770a63b66dSKent Overstreet } 197878365411SKent Overstreet 19790a63b66dSKent Overstreet n1 = btree_node_alloc_replacement(b, op); 1980cafe5635SKent Overstreet if (IS_ERR(n1)) 1981cafe5635SKent Overstreet goto err; 1982cafe5635SKent Overstreet 1983ee811287SKent Overstreet split = set_blocks(btree_bset_first(n1), 1984ee811287SKent Overstreet block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5; 1985cafe5635SKent Overstreet 1986cafe5635SKent Overstreet if (split) { 1987cafe5635SKent Overstreet unsigned keys = 0; 1988cafe5635SKent Overstreet 1989ee811287SKent Overstreet trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys); 1990c37511b8SKent Overstreet 19912452cc89SSlava Pestov n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent); 1992cafe5635SKent Overstreet if (IS_ERR(n2)) 1993cafe5635SKent Overstreet goto err_free1; 1994cafe5635SKent Overstreet 1995d6fd3b11SKent Overstreet if (!b->parent) { 19962452cc89SSlava Pestov n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL); 1997cafe5635SKent Overstreet if (IS_ERR(n3)) 1998cafe5635SKent Overstreet goto err_free2; 1999cafe5635SKent Overstreet } 2000cafe5635SKent Overstreet 20012a285686SKent Overstreet mutex_lock(&n1->write_lock); 20022a285686SKent Overstreet mutex_lock(&n2->write_lock); 20032a285686SKent Overstreet 20041b207d80SKent Overstreet bch_btree_insert_keys(n1, op, insert_keys, replace_key); 2005cafe5635SKent Overstreet 2006d6fd3b11SKent Overstreet /* 2007d6fd3b11SKent Overstreet * Has to be a linear search because we don't have an auxiliary 2008cafe5635SKent Overstreet * search tree yet 2009cafe5635SKent Overstreet */ 2010cafe5635SKent Overstreet 2011ee811287SKent Overstreet while (keys < (btree_bset_first(n1)->keys * 3) / 5) 2012ee811287SKent Overstreet keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), 2013fafff81cSKent Overstreet keys)); 2014cafe5635SKent Overstreet 2015fafff81cSKent Overstreet bkey_copy_key(&n1->key, 2016ee811287SKent Overstreet bset_bkey_idx(btree_bset_first(n1), keys)); 2017ee811287SKent Overstreet keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys)); 2018cafe5635SKent Overstreet 2019ee811287SKent Overstreet btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys; 2020ee811287SKent Overstreet btree_bset_first(n1)->keys = keys; 2021cafe5635SKent Overstreet 2022ee811287SKent Overstreet memcpy(btree_bset_first(n2)->start, 2023ee811287SKent Overstreet bset_bkey_last(btree_bset_first(n1)), 2024ee811287SKent Overstreet btree_bset_first(n2)->keys * sizeof(uint64_t)); 2025cafe5635SKent Overstreet 2026cafe5635SKent Overstreet bkey_copy_key(&n2->key, &b->key); 2027cafe5635SKent Overstreet 202817e21a9fSKent Overstreet bch_keylist_add(&parent_keys, &n2->key); 2029b54d6934SKent Overstreet bch_btree_node_write(n2, &cl); 20302a285686SKent Overstreet mutex_unlock(&n2->write_lock); 2031cafe5635SKent Overstreet rw_unlock(true, n2); 2032c37511b8SKent Overstreet } else { 2033ee811287SKent Overstreet trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys); 2034c37511b8SKent Overstreet 20352a285686SKent Overstreet mutex_lock(&n1->write_lock); 20361b207d80SKent Overstreet bch_btree_insert_keys(n1, op, insert_keys, replace_key); 2037c37511b8SKent Overstreet } 2038cafe5635SKent Overstreet 203917e21a9fSKent Overstreet bch_keylist_add(&parent_keys, &n1->key); 2040b54d6934SKent Overstreet bch_btree_node_write(n1, &cl); 20412a285686SKent Overstreet mutex_unlock(&n1->write_lock); 2042cafe5635SKent Overstreet 2043cafe5635SKent Overstreet if (n3) { 2044d6fd3b11SKent Overstreet /* Depth increases, make a new root */ 20452a285686SKent Overstreet mutex_lock(&n3->write_lock); 2046cafe5635SKent Overstreet bkey_copy_key(&n3->key, &MAX_KEY); 204717e21a9fSKent Overstreet bch_btree_insert_keys(n3, op, &parent_keys, NULL); 2048b54d6934SKent Overstreet bch_btree_node_write(n3, &cl); 20492a285686SKent Overstreet mutex_unlock(&n3->write_lock); 2050cafe5635SKent Overstreet 2051b54d6934SKent Overstreet closure_sync(&cl); 2052cafe5635SKent Overstreet bch_btree_set_root(n3); 2053cafe5635SKent Overstreet rw_unlock(true, n3); 2054d6fd3b11SKent Overstreet } else if (!b->parent) { 2055d6fd3b11SKent Overstreet /* Root filled up but didn't need to be split */ 2056b54d6934SKent Overstreet closure_sync(&cl); 2057cafe5635SKent Overstreet bch_btree_set_root(n1); 2058cafe5635SKent Overstreet } else { 205917e21a9fSKent Overstreet /* Split a non root node */ 2060b54d6934SKent Overstreet closure_sync(&cl); 206117e21a9fSKent Overstreet make_btree_freeing_key(b, parent_keys.top); 206217e21a9fSKent Overstreet bch_keylist_push(&parent_keys); 206317e21a9fSKent Overstreet 206417e21a9fSKent Overstreet bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL); 206517e21a9fSKent Overstreet BUG_ON(!bch_keylist_empty(&parent_keys)); 2066cafe5635SKent Overstreet } 2067cafe5635SKent Overstreet 206805335cffSKent Overstreet btree_node_free(b); 2069cafe5635SKent Overstreet rw_unlock(true, n1); 2070cafe5635SKent Overstreet 2071169ef1cfSKent Overstreet bch_time_stats_update(&b->c->btree_split_time, start_time); 2072cafe5635SKent Overstreet 2073cafe5635SKent Overstreet return 0; 2074cafe5635SKent Overstreet err_free2: 20755f5837d2SKent Overstreet bkey_put(b->c, &n2->key); 2076e8e1d468SKent Overstreet btree_node_free(n2); 2077cafe5635SKent Overstreet rw_unlock(true, n2); 2078cafe5635SKent Overstreet err_free1: 20795f5837d2SKent Overstreet bkey_put(b->c, &n1->key); 2080e8e1d468SKent Overstreet btree_node_free(n1); 2081cafe5635SKent Overstreet rw_unlock(true, n1); 2082cafe5635SKent Overstreet err: 20830a63b66dSKent Overstreet WARN(1, "bcache: btree split failed (level %u)", b->level); 20845f5837d2SKent Overstreet 2085cafe5635SKent Overstreet if (n3 == ERR_PTR(-EAGAIN) || 2086cafe5635SKent Overstreet n2 == ERR_PTR(-EAGAIN) || 2087cafe5635SKent Overstreet n1 == ERR_PTR(-EAGAIN)) 2088cafe5635SKent Overstreet return -EAGAIN; 2089cafe5635SKent Overstreet 2090cafe5635SKent Overstreet return -ENOMEM; 2091cafe5635SKent Overstreet } 2092cafe5635SKent Overstreet 209326c949f8SKent Overstreet static int bch_btree_insert_node(struct btree *b, struct btree_op *op, 2094c18536a7SKent Overstreet struct keylist *insert_keys, 20951b207d80SKent Overstreet atomic_t *journal_ref, 20961b207d80SKent Overstreet struct bkey *replace_key) 209726c949f8SKent Overstreet { 20982a285686SKent Overstreet struct closure cl; 20992a285686SKent Overstreet 21001b207d80SKent Overstreet BUG_ON(b->level && replace_key); 21011b207d80SKent Overstreet 21022a285686SKent Overstreet closure_init_stack(&cl); 21032a285686SKent Overstreet 21042a285686SKent Overstreet mutex_lock(&b->write_lock); 21052a285686SKent Overstreet 21062a285686SKent Overstreet if (write_block(b) != btree_bset_last(b) && 21072a285686SKent Overstreet b->keys.last_set_unwritten) 21082a285686SKent Overstreet bch_btree_init_next(b); /* just wrote a set */ 21092a285686SKent Overstreet 211059158fdeSKent Overstreet if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) { 21112a285686SKent Overstreet mutex_unlock(&b->write_lock); 21122a285686SKent Overstreet goto split; 21132a285686SKent Overstreet } 21142a285686SKent Overstreet 21152a285686SKent Overstreet BUG_ON(write_block(b) != btree_bset_last(b)); 21162a285686SKent Overstreet 21172a285686SKent Overstreet if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) { 21182a285686SKent Overstreet if (!b->level) 21192a285686SKent Overstreet bch_btree_leaf_dirty(b, journal_ref); 21202a285686SKent Overstreet else 21212a285686SKent Overstreet bch_btree_node_write(b, &cl); 21222a285686SKent Overstreet } 21232a285686SKent Overstreet 21242a285686SKent Overstreet mutex_unlock(&b->write_lock); 21252a285686SKent Overstreet 21262a285686SKent Overstreet /* wait for btree node write if necessary, after unlock */ 21272a285686SKent Overstreet closure_sync(&cl); 21282a285686SKent Overstreet 21292a285686SKent Overstreet return 0; 21302a285686SKent Overstreet split: 213126c949f8SKent Overstreet if (current->bio_list) { 213226c949f8SKent Overstreet op->lock = b->c->root->level + 1; 213317e21a9fSKent Overstreet return -EAGAIN; 213426c949f8SKent Overstreet } else if (op->lock <= b->c->root->level) { 213526c949f8SKent Overstreet op->lock = b->c->root->level + 1; 213617e21a9fSKent Overstreet return -EINTR; 213726c949f8SKent Overstreet } else { 213817e21a9fSKent Overstreet /* Invalidated all iterators */ 21393b3e9e50SKent Overstreet int ret = btree_split(b, op, insert_keys, replace_key); 21403b3e9e50SKent Overstreet 21412a285686SKent Overstreet if (bch_keylist_empty(insert_keys)) 214217e21a9fSKent Overstreet return 0; 21432a285686SKent Overstreet else if (!ret) 21442a285686SKent Overstreet return -EINTR; 21452a285686SKent Overstreet return ret; 214617e21a9fSKent Overstreet } 214726c949f8SKent Overstreet } 214826c949f8SKent Overstreet 2149e7c590ebSKent Overstreet int bch_btree_insert_check_key(struct btree *b, struct btree_op *op, 2150e7c590ebSKent Overstreet struct bkey *check_key) 2151e7c590ebSKent Overstreet { 2152e7c590ebSKent Overstreet int ret = -EINTR; 2153e7c590ebSKent Overstreet uint64_t btree_ptr = b->key.ptr[0]; 2154e7c590ebSKent Overstreet unsigned long seq = b->seq; 2155e7c590ebSKent Overstreet struct keylist insert; 2156e7c590ebSKent Overstreet bool upgrade = op->lock == -1; 2157e7c590ebSKent Overstreet 2158e7c590ebSKent Overstreet bch_keylist_init(&insert); 2159e7c590ebSKent Overstreet 2160e7c590ebSKent Overstreet if (upgrade) { 2161e7c590ebSKent Overstreet rw_unlock(false, b); 2162e7c590ebSKent Overstreet rw_lock(true, b, b->level); 2163e7c590ebSKent Overstreet 2164e7c590ebSKent Overstreet if (b->key.ptr[0] != btree_ptr || 2165e7c590ebSKent Overstreet b->seq != seq + 1) 2166e7c590ebSKent Overstreet goto out; 2167e7c590ebSKent Overstreet } 2168e7c590ebSKent Overstreet 2169e7c590ebSKent Overstreet SET_KEY_PTRS(check_key, 1); 2170e7c590ebSKent Overstreet get_random_bytes(&check_key->ptr[0], sizeof(uint64_t)); 2171e7c590ebSKent Overstreet 2172e7c590ebSKent Overstreet SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV); 2173e7c590ebSKent Overstreet 2174e7c590ebSKent Overstreet bch_keylist_add(&insert, check_key); 2175e7c590ebSKent Overstreet 21761b207d80SKent Overstreet ret = bch_btree_insert_node(b, op, &insert, NULL, NULL); 2177e7c590ebSKent Overstreet 2178e7c590ebSKent Overstreet BUG_ON(!ret && !bch_keylist_empty(&insert)); 2179e7c590ebSKent Overstreet out: 2180e7c590ebSKent Overstreet if (upgrade) 2181e7c590ebSKent Overstreet downgrade_write(&b->lock); 2182e7c590ebSKent Overstreet return ret; 2183e7c590ebSKent Overstreet } 2184e7c590ebSKent Overstreet 2185cc7b8819SKent Overstreet struct btree_insert_op { 2186cc7b8819SKent Overstreet struct btree_op op; 2187cc7b8819SKent Overstreet struct keylist *keys; 2188cc7b8819SKent Overstreet atomic_t *journal_ref; 2189cc7b8819SKent Overstreet struct bkey *replace_key; 2190cc7b8819SKent Overstreet }; 2191cc7b8819SKent Overstreet 219208239ca2SWei Yongjun static int btree_insert_fn(struct btree_op *b_op, struct btree *b) 2193cafe5635SKent Overstreet { 2194cc7b8819SKent Overstreet struct btree_insert_op *op = container_of(b_op, 2195cc7b8819SKent Overstreet struct btree_insert_op, op); 2196403b6cdeSKent Overstreet 2197cc7b8819SKent Overstreet int ret = bch_btree_insert_node(b, &op->op, op->keys, 2198cc7b8819SKent Overstreet op->journal_ref, op->replace_key); 2199cc7b8819SKent Overstreet if (ret && !bch_keylist_empty(op->keys)) 2200cc7b8819SKent Overstreet return ret; 2201cc7b8819SKent Overstreet else 2202cc7b8819SKent Overstreet return MAP_DONE; 2203cafe5635SKent Overstreet } 2204cafe5635SKent Overstreet 2205cc7b8819SKent Overstreet int bch_btree_insert(struct cache_set *c, struct keylist *keys, 2206cc7b8819SKent Overstreet atomic_t *journal_ref, struct bkey *replace_key) 2207cafe5635SKent Overstreet { 2208cc7b8819SKent Overstreet struct btree_insert_op op; 2209cafe5635SKent Overstreet int ret = 0; 2210cafe5635SKent Overstreet 2211cc7b8819SKent Overstreet BUG_ON(current->bio_list); 22124f3d4014SKent Overstreet BUG_ON(bch_keylist_empty(keys)); 2213cafe5635SKent Overstreet 2214cc7b8819SKent Overstreet bch_btree_op_init(&op.op, 0); 2215cc7b8819SKent Overstreet op.keys = keys; 2216cc7b8819SKent Overstreet op.journal_ref = journal_ref; 2217cc7b8819SKent Overstreet op.replace_key = replace_key; 2218cafe5635SKent Overstreet 2219cc7b8819SKent Overstreet while (!ret && !bch_keylist_empty(keys)) { 2220cc7b8819SKent Overstreet op.op.lock = 0; 2221cc7b8819SKent Overstreet ret = bch_btree_map_leaf_nodes(&op.op, c, 2222cc7b8819SKent Overstreet &START_KEY(keys->keys), 2223cc7b8819SKent Overstreet btree_insert_fn); 2224cc7b8819SKent Overstreet } 2225cc7b8819SKent Overstreet 2226cc7b8819SKent Overstreet if (ret) { 2227cafe5635SKent Overstreet struct bkey *k; 2228cafe5635SKent Overstreet 22291b207d80SKent Overstreet pr_err("error %i", ret); 2230cafe5635SKent Overstreet 22314f3d4014SKent Overstreet while ((k = bch_keylist_pop(keys))) 22323a3b6a4eSKent Overstreet bkey_put(c, k); 2233cc7b8819SKent Overstreet } else if (op.op.insert_collision) 2234cc7b8819SKent Overstreet ret = -ESRCH; 22356054c6d4SKent Overstreet 2236cafe5635SKent Overstreet return ret; 2237cafe5635SKent Overstreet } 2238cafe5635SKent Overstreet 2239cafe5635SKent Overstreet void bch_btree_set_root(struct btree *b) 2240cafe5635SKent Overstreet { 2241cafe5635SKent Overstreet unsigned i; 2242e49c7c37SKent Overstreet struct closure cl; 2243e49c7c37SKent Overstreet 2244e49c7c37SKent Overstreet closure_init_stack(&cl); 2245cafe5635SKent Overstreet 2246c37511b8SKent Overstreet trace_bcache_btree_set_root(b); 2247c37511b8SKent Overstreet 2248cafe5635SKent Overstreet BUG_ON(!b->written); 2249cafe5635SKent Overstreet 2250cafe5635SKent Overstreet for (i = 0; i < KEY_PTRS(&b->key); i++) 2251cafe5635SKent Overstreet BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO); 2252cafe5635SKent Overstreet 2253cafe5635SKent Overstreet mutex_lock(&b->c->bucket_lock); 2254cafe5635SKent Overstreet list_del_init(&b->list); 2255cafe5635SKent Overstreet mutex_unlock(&b->c->bucket_lock); 2256cafe5635SKent Overstreet 2257cafe5635SKent Overstreet b->c->root = b; 2258cafe5635SKent Overstreet 2259e49c7c37SKent Overstreet bch_journal_meta(b->c, &cl); 2260e49c7c37SKent Overstreet closure_sync(&cl); 2261cafe5635SKent Overstreet } 2262cafe5635SKent Overstreet 226348dad8baSKent Overstreet /* Map across nodes or keys */ 226448dad8baSKent Overstreet 226548dad8baSKent Overstreet static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op, 226648dad8baSKent Overstreet struct bkey *from, 226748dad8baSKent Overstreet btree_map_nodes_fn *fn, int flags) 226848dad8baSKent Overstreet { 226948dad8baSKent Overstreet int ret = MAP_CONTINUE; 227048dad8baSKent Overstreet 227148dad8baSKent Overstreet if (b->level) { 227248dad8baSKent Overstreet struct bkey *k; 227348dad8baSKent Overstreet struct btree_iter iter; 227448dad8baSKent Overstreet 2275c052dd9aSKent Overstreet bch_btree_iter_init(&b->keys, &iter, from); 227648dad8baSKent Overstreet 2277a85e968eSKent Overstreet while ((k = bch_btree_iter_next_filter(&iter, &b->keys, 227848dad8baSKent Overstreet bch_ptr_bad))) { 227948dad8baSKent Overstreet ret = btree(map_nodes_recurse, k, b, 228048dad8baSKent Overstreet op, from, fn, flags); 228148dad8baSKent Overstreet from = NULL; 228248dad8baSKent Overstreet 228348dad8baSKent Overstreet if (ret != MAP_CONTINUE) 228448dad8baSKent Overstreet return ret; 228548dad8baSKent Overstreet } 228648dad8baSKent Overstreet } 228748dad8baSKent Overstreet 228848dad8baSKent Overstreet if (!b->level || flags == MAP_ALL_NODES) 228948dad8baSKent Overstreet ret = fn(op, b); 229048dad8baSKent Overstreet 229148dad8baSKent Overstreet return ret; 229248dad8baSKent Overstreet } 229348dad8baSKent Overstreet 229448dad8baSKent Overstreet int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c, 229548dad8baSKent Overstreet struct bkey *from, btree_map_nodes_fn *fn, int flags) 229648dad8baSKent Overstreet { 2297b54d6934SKent Overstreet return btree_root(map_nodes_recurse, c, op, from, fn, flags); 229848dad8baSKent Overstreet } 229948dad8baSKent Overstreet 230048dad8baSKent Overstreet static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op, 230148dad8baSKent Overstreet struct bkey *from, btree_map_keys_fn *fn, 230248dad8baSKent Overstreet int flags) 230348dad8baSKent Overstreet { 230448dad8baSKent Overstreet int ret = MAP_CONTINUE; 230548dad8baSKent Overstreet struct bkey *k; 230648dad8baSKent Overstreet struct btree_iter iter; 230748dad8baSKent Overstreet 2308c052dd9aSKent Overstreet bch_btree_iter_init(&b->keys, &iter, from); 230948dad8baSKent Overstreet 2310a85e968eSKent Overstreet while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) { 231148dad8baSKent Overstreet ret = !b->level 231248dad8baSKent Overstreet ? fn(op, b, k) 231348dad8baSKent Overstreet : btree(map_keys_recurse, k, b, op, from, fn, flags); 231448dad8baSKent Overstreet from = NULL; 231548dad8baSKent Overstreet 231648dad8baSKent Overstreet if (ret != MAP_CONTINUE) 231748dad8baSKent Overstreet return ret; 231848dad8baSKent Overstreet } 231948dad8baSKent Overstreet 232048dad8baSKent Overstreet if (!b->level && (flags & MAP_END_KEY)) 232148dad8baSKent Overstreet ret = fn(op, b, &KEY(KEY_INODE(&b->key), 232248dad8baSKent Overstreet KEY_OFFSET(&b->key), 0)); 232348dad8baSKent Overstreet 232448dad8baSKent Overstreet return ret; 232548dad8baSKent Overstreet } 232648dad8baSKent Overstreet 232748dad8baSKent Overstreet int bch_btree_map_keys(struct btree_op *op, struct cache_set *c, 232848dad8baSKent Overstreet struct bkey *from, btree_map_keys_fn *fn, int flags) 232948dad8baSKent Overstreet { 2330b54d6934SKent Overstreet return btree_root(map_keys_recurse, c, op, from, fn, flags); 233148dad8baSKent Overstreet } 233248dad8baSKent Overstreet 2333cafe5635SKent Overstreet /* Keybuf code */ 2334cafe5635SKent Overstreet 2335cafe5635SKent Overstreet static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r) 2336cafe5635SKent Overstreet { 2337cafe5635SKent Overstreet /* Overlapping keys compare equal */ 2338cafe5635SKent Overstreet if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0) 2339cafe5635SKent Overstreet return -1; 2340cafe5635SKent Overstreet if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0) 2341cafe5635SKent Overstreet return 1; 2342cafe5635SKent Overstreet return 0; 2343cafe5635SKent Overstreet } 2344cafe5635SKent Overstreet 2345cafe5635SKent Overstreet static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l, 2346cafe5635SKent Overstreet struct keybuf_key *r) 2347cafe5635SKent Overstreet { 2348cafe5635SKent Overstreet return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1); 2349cafe5635SKent Overstreet } 2350cafe5635SKent Overstreet 235148dad8baSKent Overstreet struct refill { 235248dad8baSKent Overstreet struct btree_op op; 235348a915a8SKent Overstreet unsigned nr_found; 235448dad8baSKent Overstreet struct keybuf *buf; 235548dad8baSKent Overstreet struct bkey *end; 235648dad8baSKent Overstreet keybuf_pred_fn *pred; 235748dad8baSKent Overstreet }; 235848dad8baSKent Overstreet 235948dad8baSKent Overstreet static int refill_keybuf_fn(struct btree_op *op, struct btree *b, 236048dad8baSKent Overstreet struct bkey *k) 2361cafe5635SKent Overstreet { 236248dad8baSKent Overstreet struct refill *refill = container_of(op, struct refill, op); 236348dad8baSKent Overstreet struct keybuf *buf = refill->buf; 236448dad8baSKent Overstreet int ret = MAP_CONTINUE; 2365cafe5635SKent Overstreet 236648dad8baSKent Overstreet if (bkey_cmp(k, refill->end) >= 0) { 236748dad8baSKent Overstreet ret = MAP_DONE; 236848dad8baSKent Overstreet goto out; 2369cafe5635SKent Overstreet } 2370cafe5635SKent Overstreet 237148dad8baSKent Overstreet if (!KEY_SIZE(k)) /* end key */ 237248dad8baSKent Overstreet goto out; 2373cafe5635SKent Overstreet 237448dad8baSKent Overstreet if (refill->pred(buf, k)) { 2375cafe5635SKent Overstreet struct keybuf_key *w; 2376cafe5635SKent Overstreet 2377cafe5635SKent Overstreet spin_lock(&buf->lock); 2378cafe5635SKent Overstreet 2379cafe5635SKent Overstreet w = array_alloc(&buf->freelist); 238048dad8baSKent Overstreet if (!w) { 238148dad8baSKent Overstreet spin_unlock(&buf->lock); 238248dad8baSKent Overstreet return MAP_DONE; 238348dad8baSKent Overstreet } 2384cafe5635SKent Overstreet 2385cafe5635SKent Overstreet w->private = NULL; 2386cafe5635SKent Overstreet bkey_copy(&w->key, k); 2387cafe5635SKent Overstreet 2388cafe5635SKent Overstreet if (RB_INSERT(&buf->keys, w, node, keybuf_cmp)) 2389cafe5635SKent Overstreet array_free(&buf->freelist, w); 239048a915a8SKent Overstreet else 239148a915a8SKent Overstreet refill->nr_found++; 2392cafe5635SKent Overstreet 239348dad8baSKent Overstreet if (array_freelist_empty(&buf->freelist)) 239448dad8baSKent Overstreet ret = MAP_DONE; 239548dad8baSKent Overstreet 2396cafe5635SKent Overstreet spin_unlock(&buf->lock); 2397cafe5635SKent Overstreet } 239848dad8baSKent Overstreet out: 239948dad8baSKent Overstreet buf->last_scanned = *k; 240048dad8baSKent Overstreet return ret; 2401cafe5635SKent Overstreet } 2402cafe5635SKent Overstreet 2403cafe5635SKent Overstreet void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf, 240472c27061SKent Overstreet struct bkey *end, keybuf_pred_fn *pred) 2405cafe5635SKent Overstreet { 2406cafe5635SKent Overstreet struct bkey start = buf->last_scanned; 240748dad8baSKent Overstreet struct refill refill; 2408cafe5635SKent Overstreet 2409cafe5635SKent Overstreet cond_resched(); 2410cafe5635SKent Overstreet 2411b54d6934SKent Overstreet bch_btree_op_init(&refill.op, -1); 241248a915a8SKent Overstreet refill.nr_found = 0; 241348dad8baSKent Overstreet refill.buf = buf; 241448dad8baSKent Overstreet refill.end = end; 241548dad8baSKent Overstreet refill.pred = pred; 241648dad8baSKent Overstreet 241748dad8baSKent Overstreet bch_btree_map_keys(&refill.op, c, &buf->last_scanned, 241848dad8baSKent Overstreet refill_keybuf_fn, MAP_END_KEY); 2419cafe5635SKent Overstreet 242048a915a8SKent Overstreet trace_bcache_keyscan(refill.nr_found, 2421cafe5635SKent Overstreet KEY_INODE(&start), KEY_OFFSET(&start), 242248a915a8SKent Overstreet KEY_INODE(&buf->last_scanned), 242348a915a8SKent Overstreet KEY_OFFSET(&buf->last_scanned)); 2424cafe5635SKent Overstreet 2425cafe5635SKent Overstreet spin_lock(&buf->lock); 2426cafe5635SKent Overstreet 2427cafe5635SKent Overstreet if (!RB_EMPTY_ROOT(&buf->keys)) { 2428cafe5635SKent Overstreet struct keybuf_key *w; 2429cafe5635SKent Overstreet w = RB_FIRST(&buf->keys, struct keybuf_key, node); 2430cafe5635SKent Overstreet buf->start = START_KEY(&w->key); 2431cafe5635SKent Overstreet 2432cafe5635SKent Overstreet w = RB_LAST(&buf->keys, struct keybuf_key, node); 2433cafe5635SKent Overstreet buf->end = w->key; 2434cafe5635SKent Overstreet } else { 2435cafe5635SKent Overstreet buf->start = MAX_KEY; 2436cafe5635SKent Overstreet buf->end = MAX_KEY; 2437cafe5635SKent Overstreet } 2438cafe5635SKent Overstreet 2439cafe5635SKent Overstreet spin_unlock(&buf->lock); 2440cafe5635SKent Overstreet } 2441cafe5635SKent Overstreet 2442cafe5635SKent Overstreet static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w) 2443cafe5635SKent Overstreet { 2444cafe5635SKent Overstreet rb_erase(&w->node, &buf->keys); 2445cafe5635SKent Overstreet array_free(&buf->freelist, w); 2446cafe5635SKent Overstreet } 2447cafe5635SKent Overstreet 2448cafe5635SKent Overstreet void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w) 2449cafe5635SKent Overstreet { 2450cafe5635SKent Overstreet spin_lock(&buf->lock); 2451cafe5635SKent Overstreet __bch_keybuf_del(buf, w); 2452cafe5635SKent Overstreet spin_unlock(&buf->lock); 2453cafe5635SKent Overstreet } 2454cafe5635SKent Overstreet 2455cafe5635SKent Overstreet bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start, 2456cafe5635SKent Overstreet struct bkey *end) 2457cafe5635SKent Overstreet { 2458cafe5635SKent Overstreet bool ret = false; 2459cafe5635SKent Overstreet struct keybuf_key *p, *w, s; 2460cafe5635SKent Overstreet s.key = *start; 2461cafe5635SKent Overstreet 2462cafe5635SKent Overstreet if (bkey_cmp(end, &buf->start) <= 0 || 2463cafe5635SKent Overstreet bkey_cmp(start, &buf->end) >= 0) 2464cafe5635SKent Overstreet return false; 2465cafe5635SKent Overstreet 2466cafe5635SKent Overstreet spin_lock(&buf->lock); 2467cafe5635SKent Overstreet w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp); 2468cafe5635SKent Overstreet 2469cafe5635SKent Overstreet while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) { 2470cafe5635SKent Overstreet p = w; 2471cafe5635SKent Overstreet w = RB_NEXT(w, node); 2472cafe5635SKent Overstreet 2473cafe5635SKent Overstreet if (p->private) 2474cafe5635SKent Overstreet ret = true; 2475cafe5635SKent Overstreet else 2476cafe5635SKent Overstreet __bch_keybuf_del(buf, p); 2477cafe5635SKent Overstreet } 2478cafe5635SKent Overstreet 2479cafe5635SKent Overstreet spin_unlock(&buf->lock); 2480cafe5635SKent Overstreet return ret; 2481cafe5635SKent Overstreet } 2482cafe5635SKent Overstreet 2483cafe5635SKent Overstreet struct keybuf_key *bch_keybuf_next(struct keybuf *buf) 2484cafe5635SKent Overstreet { 2485cafe5635SKent Overstreet struct keybuf_key *w; 2486cafe5635SKent Overstreet spin_lock(&buf->lock); 2487cafe5635SKent Overstreet 2488cafe5635SKent Overstreet w = RB_FIRST(&buf->keys, struct keybuf_key, node); 2489cafe5635SKent Overstreet 2490cafe5635SKent Overstreet while (w && w->private) 2491cafe5635SKent Overstreet w = RB_NEXT(w, node); 2492cafe5635SKent Overstreet 2493cafe5635SKent Overstreet if (w) 2494cafe5635SKent Overstreet w->private = ERR_PTR(-EINTR); 2495cafe5635SKent Overstreet 2496cafe5635SKent Overstreet spin_unlock(&buf->lock); 2497cafe5635SKent Overstreet return w; 2498cafe5635SKent Overstreet } 2499cafe5635SKent Overstreet 2500cafe5635SKent Overstreet struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c, 2501cafe5635SKent Overstreet struct keybuf *buf, 250272c27061SKent Overstreet struct bkey *end, 250372c27061SKent Overstreet keybuf_pred_fn *pred) 2504cafe5635SKent Overstreet { 2505cafe5635SKent Overstreet struct keybuf_key *ret; 2506cafe5635SKent Overstreet 2507cafe5635SKent Overstreet while (1) { 2508cafe5635SKent Overstreet ret = bch_keybuf_next(buf); 2509cafe5635SKent Overstreet if (ret) 2510cafe5635SKent Overstreet break; 2511cafe5635SKent Overstreet 2512cafe5635SKent Overstreet if (bkey_cmp(&buf->last_scanned, end) >= 0) { 2513cafe5635SKent Overstreet pr_debug("scan finished"); 2514cafe5635SKent Overstreet break; 2515cafe5635SKent Overstreet } 2516cafe5635SKent Overstreet 251772c27061SKent Overstreet bch_refill_keybuf(c, buf, end, pred); 2518cafe5635SKent Overstreet } 2519cafe5635SKent Overstreet 2520cafe5635SKent Overstreet return ret; 2521cafe5635SKent Overstreet } 2522cafe5635SKent Overstreet 252372c27061SKent Overstreet void bch_keybuf_init(struct keybuf *buf) 2524cafe5635SKent Overstreet { 2525cafe5635SKent Overstreet buf->last_scanned = MAX_KEY; 2526cafe5635SKent Overstreet buf->keys = RB_ROOT; 2527cafe5635SKent Overstreet 2528cafe5635SKent Overstreet spin_lock_init(&buf->lock); 2529cafe5635SKent Overstreet array_allocator_init(&buf->freelist); 2530cafe5635SKent Overstreet } 2531