1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2cafe5635SKent Overstreet /* 3cafe5635SKent Overstreet * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com> 4cafe5635SKent Overstreet * 5cafe5635SKent Overstreet * Uses a block device as cache for other block devices; optimized for SSDs. 6cafe5635SKent Overstreet * All allocation is done in buckets, which should match the erase block size 7cafe5635SKent Overstreet * of the device. 8cafe5635SKent Overstreet * 9cafe5635SKent Overstreet * Buckets containing cached data are kept on a heap sorted by priority; 10cafe5635SKent Overstreet * bucket priority is increased on cache hit, and periodically all the buckets 11cafe5635SKent Overstreet * on the heap have their priority scaled down. This currently is just used as 12cafe5635SKent Overstreet * an LRU but in the future should allow for more intelligent heuristics. 13cafe5635SKent Overstreet * 14cafe5635SKent Overstreet * Buckets have an 8 bit counter; freeing is accomplished by incrementing the 15cafe5635SKent Overstreet * counter. Garbage collection is used to remove stale pointers. 16cafe5635SKent Overstreet * 17cafe5635SKent Overstreet * Indexing is done via a btree; nodes are not necessarily fully sorted, rather 18cafe5635SKent Overstreet * as keys are inserted we only sort the pages that have not yet been written. 19cafe5635SKent Overstreet * When garbage collection is run, we resort the entire node. 20cafe5635SKent Overstreet * 215fb94e9cSMauro Carvalho Chehab * All configuration is done via sysfs; see Documentation/admin-guide/bcache.rst. 22cafe5635SKent Overstreet */ 23cafe5635SKent Overstreet 24cafe5635SKent Overstreet #include "bcache.h" 25cafe5635SKent Overstreet #include "btree.h" 26cafe5635SKent Overstreet #include "debug.h" 2765d45231SKent Overstreet #include "extents.h" 28cafe5635SKent Overstreet 29cafe5635SKent Overstreet #include <linux/slab.h> 30cafe5635SKent Overstreet #include <linux/bitops.h> 31cafe5635SKent Overstreet #include <linux/hash.h> 3272a44517SKent Overstreet #include <linux/kthread.h> 33cd953ed0SGeert Uytterhoeven #include <linux/prefetch.h> 34cafe5635SKent Overstreet #include <linux/random.h> 35cafe5635SKent Overstreet #include <linux/rcupdate.h> 36e6017571SIngo Molnar #include <linux/sched/clock.h> 37b2d09103SIngo Molnar #include <linux/rculist.h> 38b2d09103SIngo Molnar 39cafe5635SKent Overstreet #include <trace/events/bcache.h> 40cafe5635SKent Overstreet 41cafe5635SKent Overstreet /* 42cafe5635SKent Overstreet * Todo: 43cafe5635SKent Overstreet * register_bcache: Return errors out to userspace correctly 44cafe5635SKent Overstreet * 45cafe5635SKent Overstreet * Writeback: don't undirty key until after a cache flush 46cafe5635SKent Overstreet * 47cafe5635SKent Overstreet * Create an iterator for key pointers 48cafe5635SKent Overstreet * 49cafe5635SKent Overstreet * On btree write error, mark bucket such that it won't be freed from the cache 50cafe5635SKent Overstreet * 51cafe5635SKent Overstreet * Journalling: 52cafe5635SKent Overstreet * Check for bad keys in replay 53cafe5635SKent Overstreet * Propagate barriers 54cafe5635SKent Overstreet * Refcount journal entries in journal_replay 55cafe5635SKent Overstreet * 56cafe5635SKent Overstreet * Garbage collection: 57cafe5635SKent Overstreet * Finish incremental gc 58cafe5635SKent Overstreet * Gc should free old UUIDs, data for invalid UUIDs 59cafe5635SKent Overstreet * 60cafe5635SKent Overstreet * Provide a way to list backing device UUIDs we have data cached for, and 61cafe5635SKent Overstreet * probably how long it's been since we've seen them, and a way to invalidate 62cafe5635SKent Overstreet * dirty data for devices that will never be attached again 63cafe5635SKent Overstreet * 64cafe5635SKent Overstreet * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so 65cafe5635SKent Overstreet * that based on that and how much dirty data we have we can keep writeback 66cafe5635SKent Overstreet * from being starved 67cafe5635SKent Overstreet * 68cafe5635SKent Overstreet * Add a tracepoint or somesuch to watch for writeback starvation 69cafe5635SKent Overstreet * 70cafe5635SKent Overstreet * When btree depth > 1 and splitting an interior node, we have to make sure 71cafe5635SKent Overstreet * alloc_bucket() cannot fail. This should be true but is not completely 72cafe5635SKent Overstreet * obvious. 73cafe5635SKent Overstreet * 74cafe5635SKent Overstreet * Plugging? 75cafe5635SKent Overstreet * 76cafe5635SKent Overstreet * If data write is less than hard sector size of ssd, round up offset in open 77cafe5635SKent Overstreet * bucket to the next whole sector 78cafe5635SKent Overstreet * 79cafe5635SKent Overstreet * Superblock needs to be fleshed out for multiple cache devices 80cafe5635SKent Overstreet * 81cafe5635SKent Overstreet * Add a sysfs tunable for the number of writeback IOs in flight 82cafe5635SKent Overstreet * 83cafe5635SKent Overstreet * Add a sysfs tunable for the number of open data buckets 84cafe5635SKent Overstreet * 85cafe5635SKent Overstreet * IO tracking: Can we track when one process is doing io on behalf of another? 86cafe5635SKent Overstreet * IO tracking: Don't use just an average, weigh more recent stuff higher 87cafe5635SKent Overstreet * 88cafe5635SKent Overstreet * Test module load/unload 89cafe5635SKent Overstreet */ 90cafe5635SKent Overstreet 91cafe5635SKent Overstreet #define MAX_NEED_GC 64 92cafe5635SKent Overstreet #define MAX_SAVE_PRIO 72 937f4a59deSTang Junhui #define MAX_GC_TIMES 100 945c25c4fcSTang Junhui #define MIN_GC_NODES 100 955c25c4fcSTang Junhui #define GC_SLEEP_MS 100 96cafe5635SKent Overstreet 97cafe5635SKent Overstreet #define PTR_DIRTY_BIT (((uint64_t) 1 << 36)) 98cafe5635SKent Overstreet 99cafe5635SKent Overstreet #define PTR_HASH(c, k) \ 100cafe5635SKent Overstreet (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0)) 101cafe5635SKent Overstreet 102df8e8970SKent Overstreet #define insert_lock(s, b) ((b)->level <= (s)->lock) 103df8e8970SKent Overstreet 104df8e8970SKent Overstreet /* 105df8e8970SKent Overstreet * These macros are for recursing down the btree - they handle the details of 106df8e8970SKent Overstreet * locking and looking up nodes in the cache for you. They're best treated as 107df8e8970SKent Overstreet * mere syntax when reading code that uses them. 108df8e8970SKent Overstreet * 109df8e8970SKent Overstreet * op->lock determines whether we take a read or a write lock at a given depth. 110df8e8970SKent Overstreet * If you've got a read lock and find that you need a write lock (i.e. you're 111df8e8970SKent Overstreet * going to have to split), set op->lock and return -EINTR; btree_root() will 112df8e8970SKent Overstreet * call you again and you'll have the correct lock. 113df8e8970SKent Overstreet */ 114df8e8970SKent Overstreet 115df8e8970SKent Overstreet /** 116df8e8970SKent Overstreet * btree - recurse down the btree on a specified key 117df8e8970SKent Overstreet * @fn: function to call, which will be passed the child node 118df8e8970SKent Overstreet * @key: key to recurse on 119df8e8970SKent Overstreet * @b: parent btree node 120df8e8970SKent Overstreet * @op: pointer to struct btree_op 121df8e8970SKent Overstreet */ 122df8e8970SKent Overstreet #define btree(fn, key, b, op, ...) \ 123df8e8970SKent Overstreet ({ \ 124df8e8970SKent Overstreet int _r, l = (b)->level - 1; \ 125df8e8970SKent Overstreet bool _w = l <= (op)->lock; \ 1262452cc89SSlava Pestov struct btree *_child = bch_btree_node_get((b)->c, op, key, l, \ 1272452cc89SSlava Pestov _w, b); \ 128df8e8970SKent Overstreet if (!IS_ERR(_child)) { \ 129df8e8970SKent Overstreet _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \ 130df8e8970SKent Overstreet rw_unlock(_w, _child); \ 131df8e8970SKent Overstreet } else \ 132df8e8970SKent Overstreet _r = PTR_ERR(_child); \ 133df8e8970SKent Overstreet _r; \ 134df8e8970SKent Overstreet }) 135df8e8970SKent Overstreet 136df8e8970SKent Overstreet /** 137df8e8970SKent Overstreet * btree_root - call a function on the root of the btree 138df8e8970SKent Overstreet * @fn: function to call, which will be passed the child node 139df8e8970SKent Overstreet * @c: cache set 140df8e8970SKent Overstreet * @op: pointer to struct btree_op 141df8e8970SKent Overstreet */ 142df8e8970SKent Overstreet #define btree_root(fn, c, op, ...) \ 143df8e8970SKent Overstreet ({ \ 144df8e8970SKent Overstreet int _r = -EINTR; \ 145df8e8970SKent Overstreet do { \ 146df8e8970SKent Overstreet struct btree *_b = (c)->root; \ 147df8e8970SKent Overstreet bool _w = insert_lock(op, _b); \ 148df8e8970SKent Overstreet rw_lock(_w, _b, _b->level); \ 149df8e8970SKent Overstreet if (_b == (c)->root && \ 150df8e8970SKent Overstreet _w == insert_lock(op, _b)) { \ 151df8e8970SKent Overstreet _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \ 152df8e8970SKent Overstreet } \ 153df8e8970SKent Overstreet rw_unlock(_w, _b); \ 1540a63b66dSKent Overstreet bch_cannibalize_unlock(c); \ 15578365411SKent Overstreet if (_r == -EINTR) \ 15678365411SKent Overstreet schedule(); \ 157df8e8970SKent Overstreet } while (_r == -EINTR); \ 158df8e8970SKent Overstreet \ 1590a63b66dSKent Overstreet finish_wait(&(c)->btree_cache_wait, &(op)->wait); \ 160df8e8970SKent Overstreet _r; \ 161df8e8970SKent Overstreet }) 162df8e8970SKent Overstreet 163a85e968eSKent Overstreet static inline struct bset *write_block(struct btree *b) 164a85e968eSKent Overstreet { 165a85e968eSKent Overstreet return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c); 166a85e968eSKent Overstreet } 167a85e968eSKent Overstreet 1682a285686SKent Overstreet static void bch_btree_init_next(struct btree *b) 1692a285686SKent Overstreet { 1702a285686SKent Overstreet /* If not a leaf node, always sort */ 1712a285686SKent Overstreet if (b->level && b->keys.nsets) 1722a285686SKent Overstreet bch_btree_sort(&b->keys, &b->c->sort); 1732a285686SKent Overstreet else 1742a285686SKent Overstreet bch_btree_sort_lazy(&b->keys, &b->c->sort); 1752a285686SKent Overstreet 1762a285686SKent Overstreet if (b->written < btree_blocks(b)) 1772a285686SKent Overstreet bch_bset_init_next(&b->keys, write_block(b), 1782a285686SKent Overstreet bset_magic(&b->c->sb)); 1792a285686SKent Overstreet 1802a285686SKent Overstreet } 1812a285686SKent Overstreet 182cafe5635SKent Overstreet /* Btree key manipulation */ 183cafe5635SKent Overstreet 1843a3b6a4eSKent Overstreet void bkey_put(struct cache_set *c, struct bkey *k) 185e7c590ebSKent Overstreet { 1866f10f7d1SColy Li unsigned int i; 187e7c590ebSKent Overstreet 188e7c590ebSKent Overstreet for (i = 0; i < KEY_PTRS(k); i++) 189e7c590ebSKent Overstreet if (ptr_available(c, k, i)) 190e7c590ebSKent Overstreet atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin); 191e7c590ebSKent Overstreet } 192e7c590ebSKent Overstreet 193cafe5635SKent Overstreet /* Btree IO */ 194cafe5635SKent Overstreet 195cafe5635SKent Overstreet static uint64_t btree_csum_set(struct btree *b, struct bset *i) 196cafe5635SKent Overstreet { 197cafe5635SKent Overstreet uint64_t crc = b->key.ptr[0]; 198fafff81cSKent Overstreet void *data = (void *) i + 8, *end = bset_bkey_last(i); 199cafe5635SKent Overstreet 200169ef1cfSKent Overstreet crc = bch_crc64_update(crc, data, end - data); 201c19ed23aSKent Overstreet return crc ^ 0xffffffffffffffffULL; 202cafe5635SKent Overstreet } 203cafe5635SKent Overstreet 20478b77bf8SKent Overstreet void bch_btree_node_read_done(struct btree *b) 205cafe5635SKent Overstreet { 206cafe5635SKent Overstreet const char *err = "bad btree header"; 207ee811287SKent Overstreet struct bset *i = btree_bset_first(b); 20857943511SKent Overstreet struct btree_iter *iter; 209cafe5635SKent Overstreet 210d2f96f48SShenghui Wang /* 211d2f96f48SShenghui Wang * c->fill_iter can allocate an iterator with more memory space 212d2f96f48SShenghui Wang * than static MAX_BSETS. 213d2f96f48SShenghui Wang * See the comment arount cache_set->fill_iter. 214d2f96f48SShenghui Wang */ 215d19936a2SKent Overstreet iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO); 21657943511SKent Overstreet iter->size = b->c->sb.bucket_size / b->c->sb.block_size; 217cafe5635SKent Overstreet iter->used = 0; 218cafe5635SKent Overstreet 219280481d0SKent Overstreet #ifdef CONFIG_BCACHE_DEBUG 220c052dd9aSKent Overstreet iter->b = &b->keys; 221280481d0SKent Overstreet #endif 222280481d0SKent Overstreet 22357943511SKent Overstreet if (!i->seq) 224cafe5635SKent Overstreet goto err; 225cafe5635SKent Overstreet 226cafe5635SKent Overstreet for (; 227a85e968eSKent Overstreet b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq; 228cafe5635SKent Overstreet i = write_block(b)) { 229cafe5635SKent Overstreet err = "unsupported bset version"; 230cafe5635SKent Overstreet if (i->version > BCACHE_BSET_VERSION) 231cafe5635SKent Overstreet goto err; 232cafe5635SKent Overstreet 233cafe5635SKent Overstreet err = "bad btree header"; 234ee811287SKent Overstreet if (b->written + set_blocks(i, block_bytes(b->c)) > 235ee811287SKent Overstreet btree_blocks(b)) 236cafe5635SKent Overstreet goto err; 237cafe5635SKent Overstreet 238cafe5635SKent Overstreet err = "bad magic"; 23981ab4190SKent Overstreet if (i->magic != bset_magic(&b->c->sb)) 240cafe5635SKent Overstreet goto err; 241cafe5635SKent Overstreet 242cafe5635SKent Overstreet err = "bad checksum"; 243cafe5635SKent Overstreet switch (i->version) { 244cafe5635SKent Overstreet case 0: 245cafe5635SKent Overstreet if (i->csum != csum_set(i)) 246cafe5635SKent Overstreet goto err; 247cafe5635SKent Overstreet break; 248cafe5635SKent Overstreet case BCACHE_BSET_VERSION: 249cafe5635SKent Overstreet if (i->csum != btree_csum_set(b, i)) 250cafe5635SKent Overstreet goto err; 251cafe5635SKent Overstreet break; 252cafe5635SKent Overstreet } 253cafe5635SKent Overstreet 254cafe5635SKent Overstreet err = "empty set"; 255a85e968eSKent Overstreet if (i != b->keys.set[0].data && !i->keys) 256cafe5635SKent Overstreet goto err; 257cafe5635SKent Overstreet 258fafff81cSKent Overstreet bch_btree_iter_push(iter, i->start, bset_bkey_last(i)); 259cafe5635SKent Overstreet 260ee811287SKent Overstreet b->written += set_blocks(i, block_bytes(b->c)); 261cafe5635SKent Overstreet } 262cafe5635SKent Overstreet 263cafe5635SKent Overstreet err = "corrupted btree"; 264cafe5635SKent Overstreet for (i = write_block(b); 265a85e968eSKent Overstreet bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key); 266cafe5635SKent Overstreet i = ((void *) i) + block_bytes(b->c)) 267a85e968eSKent Overstreet if (i->seq == b->keys.set[0].data->seq) 268cafe5635SKent Overstreet goto err; 269cafe5635SKent Overstreet 270a85e968eSKent Overstreet bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort); 271cafe5635SKent Overstreet 272a85e968eSKent Overstreet i = b->keys.set[0].data; 273cafe5635SKent Overstreet err = "short btree key"; 274a85e968eSKent Overstreet if (b->keys.set[0].size && 275a85e968eSKent Overstreet bkey_cmp(&b->key, &b->keys.set[0].end) < 0) 276cafe5635SKent Overstreet goto err; 277cafe5635SKent Overstreet 278cafe5635SKent Overstreet if (b->written < btree_blocks(b)) 279a85e968eSKent Overstreet bch_bset_init_next(&b->keys, write_block(b), 280a85e968eSKent Overstreet bset_magic(&b->c->sb)); 281cafe5635SKent Overstreet out: 282d19936a2SKent Overstreet mempool_free(iter, &b->c->fill_iter); 28357943511SKent Overstreet return; 284cafe5635SKent Overstreet err: 285cafe5635SKent Overstreet set_btree_node_io_error(b); 28688b9f8c4SKent Overstreet bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys", 287cafe5635SKent Overstreet err, PTR_BUCKET_NR(b->c, &b->key, 0), 28888b9f8c4SKent Overstreet bset_block_offset(b, i), i->keys); 289cafe5635SKent Overstreet goto out; 290cafe5635SKent Overstreet } 291cafe5635SKent Overstreet 2924246a0b6SChristoph Hellwig static void btree_node_read_endio(struct bio *bio) 293cafe5635SKent Overstreet { 29457943511SKent Overstreet struct closure *cl = bio->bi_private; 2951fae7cf0SColy Li 29657943511SKent Overstreet closure_put(cl); 29757943511SKent Overstreet } 298cafe5635SKent Overstreet 29978b77bf8SKent Overstreet static void bch_btree_node_read(struct btree *b) 30057943511SKent Overstreet { 30157943511SKent Overstreet uint64_t start_time = local_clock(); 30257943511SKent Overstreet struct closure cl; 30357943511SKent Overstreet struct bio *bio; 304cafe5635SKent Overstreet 305c37511b8SKent Overstreet trace_bcache_btree_read(b); 306c37511b8SKent Overstreet 30757943511SKent Overstreet closure_init_stack(&cl); 308cafe5635SKent Overstreet 30957943511SKent Overstreet bio = bch_bbio_alloc(b->c); 3104f024f37SKent Overstreet bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9; 31157943511SKent Overstreet bio->bi_end_io = btree_node_read_endio; 31257943511SKent Overstreet bio->bi_private = &cl; 31370fd7614SChristoph Hellwig bio->bi_opf = REQ_OP_READ | REQ_META; 31457943511SKent Overstreet 315a85e968eSKent Overstreet bch_bio_map(bio, b->keys.set[0].data); 31657943511SKent Overstreet 31757943511SKent Overstreet bch_submit_bbio(bio, b->c, &b->key, 0); 31857943511SKent Overstreet closure_sync(&cl); 31957943511SKent Overstreet 3204e4cbee9SChristoph Hellwig if (bio->bi_status) 32157943511SKent Overstreet set_btree_node_io_error(b); 32257943511SKent Overstreet 32357943511SKent Overstreet bch_bbio_free(bio, b->c); 32457943511SKent Overstreet 32557943511SKent Overstreet if (btree_node_io_error(b)) 32657943511SKent Overstreet goto err; 32757943511SKent Overstreet 32857943511SKent Overstreet bch_btree_node_read_done(b); 32957943511SKent Overstreet bch_time_stats_update(&b->c->btree_read_time, start_time); 33057943511SKent Overstreet 33157943511SKent Overstreet return; 33257943511SKent Overstreet err: 33361cbd250SGeert Uytterhoeven bch_cache_set_error(b->c, "io error reading bucket %zu", 33457943511SKent Overstreet PTR_BUCKET_NR(b->c, &b->key, 0)); 335cafe5635SKent Overstreet } 336cafe5635SKent Overstreet 337cafe5635SKent Overstreet static void btree_complete_write(struct btree *b, struct btree_write *w) 338cafe5635SKent Overstreet { 339cafe5635SKent Overstreet if (w->prio_blocked && 340cafe5635SKent Overstreet !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked)) 341119ba0f8SKent Overstreet wake_up_allocators(b->c); 342cafe5635SKent Overstreet 343cafe5635SKent Overstreet if (w->journal) { 344cafe5635SKent Overstreet atomic_dec_bug(w->journal); 345cafe5635SKent Overstreet __closure_wake_up(&b->c->journal.wait); 346cafe5635SKent Overstreet } 347cafe5635SKent Overstreet 348cafe5635SKent Overstreet w->prio_blocked = 0; 349cafe5635SKent Overstreet w->journal = NULL; 350cafe5635SKent Overstreet } 351cafe5635SKent Overstreet 352cb7a583eSKent Overstreet static void btree_node_write_unlock(struct closure *cl) 353cb7a583eSKent Overstreet { 354cb7a583eSKent Overstreet struct btree *b = container_of(cl, struct btree, io); 355cb7a583eSKent Overstreet 356cb7a583eSKent Overstreet up(&b->io_mutex); 357cb7a583eSKent Overstreet } 358cb7a583eSKent Overstreet 35957943511SKent Overstreet static void __btree_node_write_done(struct closure *cl) 360cafe5635SKent Overstreet { 361cb7a583eSKent Overstreet struct btree *b = container_of(cl, struct btree, io); 362cafe5635SKent Overstreet struct btree_write *w = btree_prev_write(b); 363cafe5635SKent Overstreet 364cafe5635SKent Overstreet bch_bbio_free(b->bio, b->c); 365cafe5635SKent Overstreet b->bio = NULL; 366cafe5635SKent Overstreet btree_complete_write(b, w); 367cafe5635SKent Overstreet 368cafe5635SKent Overstreet if (btree_node_dirty(b)) 36956b30770SKent Overstreet schedule_delayed_work(&b->work, 30 * HZ); 370cafe5635SKent Overstreet 371cb7a583eSKent Overstreet closure_return_with_destructor(cl, btree_node_write_unlock); 372cafe5635SKent Overstreet } 373cafe5635SKent Overstreet 37457943511SKent Overstreet static void btree_node_write_done(struct closure *cl) 375cafe5635SKent Overstreet { 376cb7a583eSKent Overstreet struct btree *b = container_of(cl, struct btree, io); 377cafe5635SKent Overstreet 378491221f8SGuoqing Jiang bio_free_pages(b->bio); 37957943511SKent Overstreet __btree_node_write_done(cl); 380cafe5635SKent Overstreet } 381cafe5635SKent Overstreet 3824246a0b6SChristoph Hellwig static void btree_node_write_endio(struct bio *bio) 38357943511SKent Overstreet { 38457943511SKent Overstreet struct closure *cl = bio->bi_private; 385cb7a583eSKent Overstreet struct btree *b = container_of(cl, struct btree, io); 38657943511SKent Overstreet 3874e4cbee9SChristoph Hellwig if (bio->bi_status) 38857943511SKent Overstreet set_btree_node_io_error(b); 38957943511SKent Overstreet 3904e4cbee9SChristoph Hellwig bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree"); 39157943511SKent Overstreet closure_put(cl); 39257943511SKent Overstreet } 39357943511SKent Overstreet 39457943511SKent Overstreet static void do_btree_node_write(struct btree *b) 395cafe5635SKent Overstreet { 396cb7a583eSKent Overstreet struct closure *cl = &b->io; 397ee811287SKent Overstreet struct bset *i = btree_bset_last(b); 398cafe5635SKent Overstreet BKEY_PADDED(key) k; 399cafe5635SKent Overstreet 400cafe5635SKent Overstreet i->version = BCACHE_BSET_VERSION; 401cafe5635SKent Overstreet i->csum = btree_csum_set(b, i); 402cafe5635SKent Overstreet 40357943511SKent Overstreet BUG_ON(b->bio); 40457943511SKent Overstreet b->bio = bch_bbio_alloc(b->c); 40557943511SKent Overstreet 40657943511SKent Overstreet b->bio->bi_end_io = btree_node_write_endio; 407faadf0c9SKent Overstreet b->bio->bi_private = cl; 408ee811287SKent Overstreet b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c)); 40970fd7614SChristoph Hellwig b->bio->bi_opf = REQ_OP_WRITE | REQ_META | REQ_FUA; 410169ef1cfSKent Overstreet bch_bio_map(b->bio, i); 411cafe5635SKent Overstreet 412e49c7c37SKent Overstreet /* 413e49c7c37SKent Overstreet * If we're appending to a leaf node, we don't technically need FUA - 414e49c7c37SKent Overstreet * this write just needs to be persisted before the next journal write, 415e49c7c37SKent Overstreet * which will be marked FLUSH|FUA. 416e49c7c37SKent Overstreet * 417e49c7c37SKent Overstreet * Similarly if we're writing a new btree root - the pointer is going to 418e49c7c37SKent Overstreet * be in the next journal entry. 419e49c7c37SKent Overstreet * 420e49c7c37SKent Overstreet * But if we're writing a new btree node (that isn't a root) or 421e49c7c37SKent Overstreet * appending to a non leaf btree node, we need either FUA or a flush 422e49c7c37SKent Overstreet * when we write the parent with the new pointer. FUA is cheaper than a 423e49c7c37SKent Overstreet * flush, and writes appending to leaf nodes aren't blocking anything so 424e49c7c37SKent Overstreet * just make all btree node writes FUA to keep things sane. 425e49c7c37SKent Overstreet */ 426e49c7c37SKent Overstreet 427cafe5635SKent Overstreet bkey_copy(&k.key, &b->key); 428ee811287SKent Overstreet SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) + 429a85e968eSKent Overstreet bset_sector_offset(&b->keys, i)); 430cafe5635SKent Overstreet 43125d8be77SMing Lei if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) { 432cafe5635SKent Overstreet struct bio_vec *bv; 433f936b06aSChristoph Hellwig void *addr = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1)); 4346dc4f100SMing Lei struct bvec_iter_all iter_all; 435cafe5635SKent Overstreet 4362b070cfeSChristoph Hellwig bio_for_each_segment_all(bv, b->bio, iter_all) { 437f936b06aSChristoph Hellwig memcpy(page_address(bv->bv_page), addr, PAGE_SIZE); 438f936b06aSChristoph Hellwig addr += PAGE_SIZE; 439f936b06aSChristoph Hellwig } 440cafe5635SKent Overstreet 441cafe5635SKent Overstreet bch_submit_bbio(b->bio, b->c, &k.key, 0); 442cafe5635SKent Overstreet 44357943511SKent Overstreet continue_at(cl, btree_node_write_done, NULL); 444cafe5635SKent Overstreet } else { 445b0d30981SColy Li /* 446b0d30981SColy Li * No problem for multipage bvec since the bio is 447b0d30981SColy Li * just allocated 448b0d30981SColy Li */ 449cafe5635SKent Overstreet b->bio->bi_vcnt = 0; 450169ef1cfSKent Overstreet bch_bio_map(b->bio, i); 451cafe5635SKent Overstreet 452cafe5635SKent Overstreet bch_submit_bbio(b->bio, b->c, &k.key, 0); 453cafe5635SKent Overstreet 454cafe5635SKent Overstreet closure_sync(cl); 455cb7a583eSKent Overstreet continue_at_nobarrier(cl, __btree_node_write_done, NULL); 456cafe5635SKent Overstreet } 457cafe5635SKent Overstreet } 458cafe5635SKent Overstreet 4592a285686SKent Overstreet void __bch_btree_node_write(struct btree *b, struct closure *parent) 460cafe5635SKent Overstreet { 461ee811287SKent Overstreet struct bset *i = btree_bset_last(b); 462cafe5635SKent Overstreet 4632a285686SKent Overstreet lockdep_assert_held(&b->write_lock); 4642a285686SKent Overstreet 465c37511b8SKent Overstreet trace_bcache_btree_write(b); 466c37511b8SKent Overstreet 467cafe5635SKent Overstreet BUG_ON(current->bio_list); 46857943511SKent Overstreet BUG_ON(b->written >= btree_blocks(b)); 46957943511SKent Overstreet BUG_ON(b->written && !i->keys); 470ee811287SKent Overstreet BUG_ON(btree_bset_first(b)->seq != i->seq); 471dc9d98d6SKent Overstreet bch_check_keys(&b->keys, "writing"); 472cafe5635SKent Overstreet 473cafe5635SKent Overstreet cancel_delayed_work(&b->work); 474cafe5635SKent Overstreet 47557943511SKent Overstreet /* If caller isn't waiting for write, parent refcount is cache set */ 476cb7a583eSKent Overstreet down(&b->io_mutex); 477cb7a583eSKent Overstreet closure_init(&b->io, parent ?: &b->c->cl); 47857943511SKent Overstreet 479cafe5635SKent Overstreet clear_bit(BTREE_NODE_dirty, &b->flags); 480cafe5635SKent Overstreet change_bit(BTREE_NODE_write_idx, &b->flags); 481cafe5635SKent Overstreet 48257943511SKent Overstreet do_btree_node_write(b); 483cafe5635SKent Overstreet 484ee811287SKent Overstreet atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size, 485cafe5635SKent Overstreet &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written); 486cafe5635SKent Overstreet 487a85e968eSKent Overstreet b->written += set_blocks(i, block_bytes(b->c)); 4882a285686SKent Overstreet } 489a85e968eSKent Overstreet 4902a285686SKent Overstreet void bch_btree_node_write(struct btree *b, struct closure *parent) 4912a285686SKent Overstreet { 4926f10f7d1SColy Li unsigned int nsets = b->keys.nsets; 4932a285686SKent Overstreet 4942a285686SKent Overstreet lockdep_assert_held(&b->lock); 4952a285686SKent Overstreet 4962a285686SKent Overstreet __bch_btree_node_write(b, parent); 497cafe5635SKent Overstreet 49878b77bf8SKent Overstreet /* 49978b77bf8SKent Overstreet * do verify if there was more than one set initially (i.e. we did a 50078b77bf8SKent Overstreet * sort) and we sorted down to a single set: 50178b77bf8SKent Overstreet */ 5022a285686SKent Overstreet if (nsets && !b->keys.nsets) 50378b77bf8SKent Overstreet bch_btree_verify(b); 50478b77bf8SKent Overstreet 5052a285686SKent Overstreet bch_btree_init_next(b); 506cafe5635SKent Overstreet } 507cafe5635SKent Overstreet 508f269af5aSKent Overstreet static void bch_btree_node_write_sync(struct btree *b) 509f269af5aSKent Overstreet { 510f269af5aSKent Overstreet struct closure cl; 511f269af5aSKent Overstreet 512f269af5aSKent Overstreet closure_init_stack(&cl); 5132a285686SKent Overstreet 5142a285686SKent Overstreet mutex_lock(&b->write_lock); 515f269af5aSKent Overstreet bch_btree_node_write(b, &cl); 5162a285686SKent Overstreet mutex_unlock(&b->write_lock); 5172a285686SKent Overstreet 518f269af5aSKent Overstreet closure_sync(&cl); 519f269af5aSKent Overstreet } 520f269af5aSKent Overstreet 52157943511SKent Overstreet static void btree_node_write_work(struct work_struct *w) 522cafe5635SKent Overstreet { 523cafe5635SKent Overstreet struct btree *b = container_of(to_delayed_work(w), struct btree, work); 524cafe5635SKent Overstreet 5252a285686SKent Overstreet mutex_lock(&b->write_lock); 526cafe5635SKent Overstreet if (btree_node_dirty(b)) 5272a285686SKent Overstreet __bch_btree_node_write(b, NULL); 5282a285686SKent Overstreet mutex_unlock(&b->write_lock); 529cafe5635SKent Overstreet } 530cafe5635SKent Overstreet 531c18536a7SKent Overstreet static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref) 532cafe5635SKent Overstreet { 533ee811287SKent Overstreet struct bset *i = btree_bset_last(b); 534cafe5635SKent Overstreet struct btree_write *w = btree_current_write(b); 535cafe5635SKent Overstreet 5362a285686SKent Overstreet lockdep_assert_held(&b->write_lock); 5372a285686SKent Overstreet 53857943511SKent Overstreet BUG_ON(!b->written); 53957943511SKent Overstreet BUG_ON(!i->keys); 540cafe5635SKent Overstreet 54157943511SKent Overstreet if (!btree_node_dirty(b)) 54256b30770SKent Overstreet schedule_delayed_work(&b->work, 30 * HZ); 54357943511SKent Overstreet 544cafe5635SKent Overstreet set_btree_node_dirty(b); 545cafe5635SKent Overstreet 546c18536a7SKent Overstreet if (journal_ref) { 547cafe5635SKent Overstreet if (w->journal && 548c18536a7SKent Overstreet journal_pin_cmp(b->c, w->journal, journal_ref)) { 549cafe5635SKent Overstreet atomic_dec_bug(w->journal); 550cafe5635SKent Overstreet w->journal = NULL; 551cafe5635SKent Overstreet } 552cafe5635SKent Overstreet 553cafe5635SKent Overstreet if (!w->journal) { 554c18536a7SKent Overstreet w->journal = journal_ref; 555cafe5635SKent Overstreet atomic_inc(w->journal); 556cafe5635SKent Overstreet } 557cafe5635SKent Overstreet } 558cafe5635SKent Overstreet 559cafe5635SKent Overstreet /* Force write if set is too big */ 56057943511SKent Overstreet if (set_bytes(i) > PAGE_SIZE - 48 && 56157943511SKent Overstreet !current->bio_list) 56257943511SKent Overstreet bch_btree_node_write(b, NULL); 563cafe5635SKent Overstreet } 564cafe5635SKent Overstreet 565cafe5635SKent Overstreet /* 566cafe5635SKent Overstreet * Btree in memory cache - allocation/freeing 567cafe5635SKent Overstreet * mca -> memory cache 568cafe5635SKent Overstreet */ 569cafe5635SKent Overstreet 570cafe5635SKent Overstreet #define mca_reserve(c) (((c->root && c->root->level) \ 571cafe5635SKent Overstreet ? c->root->level : 1) * 8 + 16) 572cafe5635SKent Overstreet #define mca_can_free(c) \ 5730a63b66dSKent Overstreet max_t(int, 0, c->btree_cache_used - mca_reserve(c)) 574cafe5635SKent Overstreet 575cafe5635SKent Overstreet static void mca_data_free(struct btree *b) 576cafe5635SKent Overstreet { 577cb7a583eSKent Overstreet BUG_ON(b->io_mutex.count != 1); 578cafe5635SKent Overstreet 579a85e968eSKent Overstreet bch_btree_keys_free(&b->keys); 580cafe5635SKent Overstreet 5810a63b66dSKent Overstreet b->c->btree_cache_used--; 582ee811287SKent Overstreet list_move(&b->list, &b->c->btree_cache_freed); 583cafe5635SKent Overstreet } 584cafe5635SKent Overstreet 585cafe5635SKent Overstreet static void mca_bucket_free(struct btree *b) 586cafe5635SKent Overstreet { 587cafe5635SKent Overstreet BUG_ON(btree_node_dirty(b)); 588cafe5635SKent Overstreet 589cafe5635SKent Overstreet b->key.ptr[0] = 0; 590cafe5635SKent Overstreet hlist_del_init_rcu(&b->hash); 591cafe5635SKent Overstreet list_move(&b->list, &b->c->btree_cache_freeable); 592cafe5635SKent Overstreet } 593cafe5635SKent Overstreet 5946f10f7d1SColy Li static unsigned int btree_order(struct bkey *k) 595cafe5635SKent Overstreet { 596cafe5635SKent Overstreet return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1); 597cafe5635SKent Overstreet } 598cafe5635SKent Overstreet 599cafe5635SKent Overstreet static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp) 600cafe5635SKent Overstreet { 601a85e968eSKent Overstreet if (!bch_btree_keys_alloc(&b->keys, 6026f10f7d1SColy Li max_t(unsigned int, 603cafe5635SKent Overstreet ilog2(b->c->btree_pages), 604ee811287SKent Overstreet btree_order(k)), 605ee811287SKent Overstreet gfp)) { 6060a63b66dSKent Overstreet b->c->btree_cache_used++; 607ee811287SKent Overstreet list_move(&b->list, &b->c->btree_cache); 608ee811287SKent Overstreet } else { 609ee811287SKent Overstreet list_move(&b->list, &b->c->btree_cache_freed); 610ee811287SKent Overstreet } 611cafe5635SKent Overstreet } 612cafe5635SKent Overstreet 613cafe5635SKent Overstreet static struct btree *mca_bucket_alloc(struct cache_set *c, 614cafe5635SKent Overstreet struct bkey *k, gfp_t gfp) 615cafe5635SKent Overstreet { 616bd9026c8SColy Li /* 617bd9026c8SColy Li * kzalloc() is necessary here for initialization, 618bd9026c8SColy Li * see code comments in bch_btree_keys_init(). 619bd9026c8SColy Li */ 620cafe5635SKent Overstreet struct btree *b = kzalloc(sizeof(struct btree), gfp); 6211fae7cf0SColy Li 622cafe5635SKent Overstreet if (!b) 623cafe5635SKent Overstreet return NULL; 624cafe5635SKent Overstreet 625cafe5635SKent Overstreet init_rwsem(&b->lock); 626cafe5635SKent Overstreet lockdep_set_novalidate_class(&b->lock); 6272a285686SKent Overstreet mutex_init(&b->write_lock); 6282a285686SKent Overstreet lockdep_set_novalidate_class(&b->write_lock); 629cafe5635SKent Overstreet INIT_LIST_HEAD(&b->list); 63057943511SKent Overstreet INIT_DELAYED_WORK(&b->work, btree_node_write_work); 631cafe5635SKent Overstreet b->c = c; 632cb7a583eSKent Overstreet sema_init(&b->io_mutex, 1); 633cafe5635SKent Overstreet 634cafe5635SKent Overstreet mca_data_alloc(b, k, gfp); 635cafe5635SKent Overstreet return b; 636cafe5635SKent Overstreet } 637cafe5635SKent Overstreet 6386f10f7d1SColy Li static int mca_reap(struct btree *b, unsigned int min_order, bool flush) 639cafe5635SKent Overstreet { 640e8e1d468SKent Overstreet struct closure cl; 641e8e1d468SKent Overstreet 642e8e1d468SKent Overstreet closure_init_stack(&cl); 643cafe5635SKent Overstreet lockdep_assert_held(&b->c->bucket_lock); 644cafe5635SKent Overstreet 645cafe5635SKent Overstreet if (!down_write_trylock(&b->lock)) 646cafe5635SKent Overstreet return -ENOMEM; 647cafe5635SKent Overstreet 648a85e968eSKent Overstreet BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data); 649e8e1d468SKent Overstreet 650a85e968eSKent Overstreet if (b->keys.page_order < min_order) 651cb7a583eSKent Overstreet goto out_unlock; 652cb7a583eSKent Overstreet 653cb7a583eSKent Overstreet if (!flush) { 654cb7a583eSKent Overstreet if (btree_node_dirty(b)) 655cb7a583eSKent Overstreet goto out_unlock; 656cb7a583eSKent Overstreet 657cb7a583eSKent Overstreet if (down_trylock(&b->io_mutex)) 658cb7a583eSKent Overstreet goto out_unlock; 659cb7a583eSKent Overstreet up(&b->io_mutex); 660cafe5635SKent Overstreet } 661cafe5635SKent Overstreet 662*41508bb7SColy Li /* 663*41508bb7SColy Li * BTREE_NODE_dirty might be cleared in btree_flush_btree() by 664*41508bb7SColy Li * __bch_btree_node_write(). To avoid an extra flush, acquire 665*41508bb7SColy Li * b->write_lock before checking BTREE_NODE_dirty bit. 666*41508bb7SColy Li */ 6672a285686SKent Overstreet mutex_lock(&b->write_lock); 668f269af5aSKent Overstreet if (btree_node_dirty(b)) 6692a285686SKent Overstreet __bch_btree_node_write(b, &cl); 6702a285686SKent Overstreet mutex_unlock(&b->write_lock); 6712a285686SKent Overstreet 6722a285686SKent Overstreet closure_sync(&cl); 673cafe5635SKent Overstreet 674e8e1d468SKent Overstreet /* wait for any in flight btree write */ 675cb7a583eSKent Overstreet down(&b->io_mutex); 676cb7a583eSKent Overstreet up(&b->io_mutex); 677e8e1d468SKent Overstreet 678cafe5635SKent Overstreet return 0; 679cb7a583eSKent Overstreet out_unlock: 680cb7a583eSKent Overstreet rw_unlock(true, b); 681cb7a583eSKent Overstreet return -ENOMEM; 682cafe5635SKent Overstreet } 683cafe5635SKent Overstreet 6847dc19d5aSDave Chinner static unsigned long bch_mca_scan(struct shrinker *shrink, 6857dc19d5aSDave Chinner struct shrink_control *sc) 686cafe5635SKent Overstreet { 687cafe5635SKent Overstreet struct cache_set *c = container_of(shrink, struct cache_set, shrink); 688cafe5635SKent Overstreet struct btree *b, *t; 689cafe5635SKent Overstreet unsigned long i, nr = sc->nr_to_scan; 6907dc19d5aSDave Chinner unsigned long freed = 0; 691ca71df31STang Junhui unsigned int btree_cache_used; 692cafe5635SKent Overstreet 693cafe5635SKent Overstreet if (c->shrinker_disabled) 6947dc19d5aSDave Chinner return SHRINK_STOP; 695cafe5635SKent Overstreet 6960a63b66dSKent Overstreet if (c->btree_cache_alloc_lock) 6977dc19d5aSDave Chinner return SHRINK_STOP; 698cafe5635SKent Overstreet 699cafe5635SKent Overstreet /* Return -1 if we can't do anything right now */ 700a698e08cSKent Overstreet if (sc->gfp_mask & __GFP_IO) 701cafe5635SKent Overstreet mutex_lock(&c->bucket_lock); 702cafe5635SKent Overstreet else if (!mutex_trylock(&c->bucket_lock)) 703cafe5635SKent Overstreet return -1; 704cafe5635SKent Overstreet 70536c9ea98SKent Overstreet /* 70636c9ea98SKent Overstreet * It's _really_ critical that we don't free too many btree nodes - we 70736c9ea98SKent Overstreet * have to always leave ourselves a reserve. The reserve is how we 70836c9ea98SKent Overstreet * guarantee that allocating memory for a new btree node can always 70936c9ea98SKent Overstreet * succeed, so that inserting keys into the btree can always succeed and 71036c9ea98SKent Overstreet * IO can always make forward progress: 71136c9ea98SKent Overstreet */ 712cafe5635SKent Overstreet nr /= c->btree_pages; 713cafe5635SKent Overstreet nr = min_t(unsigned long, nr, mca_can_free(c)); 714cafe5635SKent Overstreet 715cafe5635SKent Overstreet i = 0; 716ca71df31STang Junhui btree_cache_used = c->btree_cache_used; 717cafe5635SKent Overstreet list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) { 718ca71df31STang Junhui if (nr <= 0) 719ca71df31STang Junhui goto out; 720cafe5635SKent Overstreet 721cafe5635SKent Overstreet if (++i > 3 && 722e8e1d468SKent Overstreet !mca_reap(b, 0, false)) { 723cafe5635SKent Overstreet mca_data_free(b); 724cafe5635SKent Overstreet rw_unlock(true, b); 7257dc19d5aSDave Chinner freed++; 726cafe5635SKent Overstreet } 727ca71df31STang Junhui nr--; 728cafe5635SKent Overstreet } 729cafe5635SKent Overstreet 730ca71df31STang Junhui for (; (nr--) && i < btree_cache_used; i++) { 731cafe5635SKent Overstreet if (list_empty(&c->btree_cache)) 732cafe5635SKent Overstreet goto out; 733cafe5635SKent Overstreet 734cafe5635SKent Overstreet b = list_first_entry(&c->btree_cache, struct btree, list); 735cafe5635SKent Overstreet list_rotate_left(&c->btree_cache); 736cafe5635SKent Overstreet 737cafe5635SKent Overstreet if (!b->accessed && 738e8e1d468SKent Overstreet !mca_reap(b, 0, false)) { 739cafe5635SKent Overstreet mca_bucket_free(b); 740cafe5635SKent Overstreet mca_data_free(b); 741cafe5635SKent Overstreet rw_unlock(true, b); 7427dc19d5aSDave Chinner freed++; 743cafe5635SKent Overstreet } else 744cafe5635SKent Overstreet b->accessed = 0; 745cafe5635SKent Overstreet } 746cafe5635SKent Overstreet out: 747cafe5635SKent Overstreet mutex_unlock(&c->bucket_lock); 748f3641c3aSTang Junhui return freed * c->btree_pages; 7497dc19d5aSDave Chinner } 7507dc19d5aSDave Chinner 7517dc19d5aSDave Chinner static unsigned long bch_mca_count(struct shrinker *shrink, 7527dc19d5aSDave Chinner struct shrink_control *sc) 7537dc19d5aSDave Chinner { 7547dc19d5aSDave Chinner struct cache_set *c = container_of(shrink, struct cache_set, shrink); 7557dc19d5aSDave Chinner 7567dc19d5aSDave Chinner if (c->shrinker_disabled) 7577dc19d5aSDave Chinner return 0; 7587dc19d5aSDave Chinner 7590a63b66dSKent Overstreet if (c->btree_cache_alloc_lock) 7607dc19d5aSDave Chinner return 0; 7617dc19d5aSDave Chinner 7627dc19d5aSDave Chinner return mca_can_free(c) * c->btree_pages; 763cafe5635SKent Overstreet } 764cafe5635SKent Overstreet 765cafe5635SKent Overstreet void bch_btree_cache_free(struct cache_set *c) 766cafe5635SKent Overstreet { 767cafe5635SKent Overstreet struct btree *b; 768cafe5635SKent Overstreet struct closure cl; 7691fae7cf0SColy Li 770cafe5635SKent Overstreet closure_init_stack(&cl); 771cafe5635SKent Overstreet 772cafe5635SKent Overstreet if (c->shrink.list.next) 773cafe5635SKent Overstreet unregister_shrinker(&c->shrink); 774cafe5635SKent Overstreet 775cafe5635SKent Overstreet mutex_lock(&c->bucket_lock); 776cafe5635SKent Overstreet 777cafe5635SKent Overstreet #ifdef CONFIG_BCACHE_DEBUG 778cafe5635SKent Overstreet if (c->verify_data) 779cafe5635SKent Overstreet list_move(&c->verify_data->list, &c->btree_cache); 78078b77bf8SKent Overstreet 78178b77bf8SKent Overstreet free_pages((unsigned long) c->verify_ondisk, ilog2(bucket_pages(c))); 782cafe5635SKent Overstreet #endif 783cafe5635SKent Overstreet 784cafe5635SKent Overstreet list_splice(&c->btree_cache_freeable, 785cafe5635SKent Overstreet &c->btree_cache); 786cafe5635SKent Overstreet 787cafe5635SKent Overstreet while (!list_empty(&c->btree_cache)) { 788cafe5635SKent Overstreet b = list_first_entry(&c->btree_cache, struct btree, list); 789cafe5635SKent Overstreet 790*41508bb7SColy Li /* 791*41508bb7SColy Li * This function is called by cache_set_free(), no I/O 792*41508bb7SColy Li * request on cache now, it is unnecessary to acquire 793*41508bb7SColy Li * b->write_lock before clearing BTREE_NODE_dirty anymore. 794*41508bb7SColy Li */ 795e5ec5f47SColy Li if (btree_node_dirty(b)) { 796cafe5635SKent Overstreet btree_complete_write(b, btree_current_write(b)); 797cafe5635SKent Overstreet clear_bit(BTREE_NODE_dirty, &b->flags); 798e5ec5f47SColy Li } 799cafe5635SKent Overstreet mca_data_free(b); 800cafe5635SKent Overstreet } 801cafe5635SKent Overstreet 802cafe5635SKent Overstreet while (!list_empty(&c->btree_cache_freed)) { 803cafe5635SKent Overstreet b = list_first_entry(&c->btree_cache_freed, 804cafe5635SKent Overstreet struct btree, list); 805cafe5635SKent Overstreet list_del(&b->list); 806cafe5635SKent Overstreet cancel_delayed_work_sync(&b->work); 807cafe5635SKent Overstreet kfree(b); 808cafe5635SKent Overstreet } 809cafe5635SKent Overstreet 810cafe5635SKent Overstreet mutex_unlock(&c->bucket_lock); 811cafe5635SKent Overstreet } 812cafe5635SKent Overstreet 813cafe5635SKent Overstreet int bch_btree_cache_alloc(struct cache_set *c) 814cafe5635SKent Overstreet { 8156f10f7d1SColy Li unsigned int i; 816cafe5635SKent Overstreet 817cafe5635SKent Overstreet for (i = 0; i < mca_reserve(c); i++) 81872a44517SKent Overstreet if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL)) 81972a44517SKent Overstreet return -ENOMEM; 820cafe5635SKent Overstreet 821cafe5635SKent Overstreet list_splice_init(&c->btree_cache, 822cafe5635SKent Overstreet &c->btree_cache_freeable); 823cafe5635SKent Overstreet 824cafe5635SKent Overstreet #ifdef CONFIG_BCACHE_DEBUG 825cafe5635SKent Overstreet mutex_init(&c->verify_lock); 826cafe5635SKent Overstreet 82778b77bf8SKent Overstreet c->verify_ondisk = (void *) 82878b77bf8SKent Overstreet __get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c))); 82978b77bf8SKent Overstreet 830cafe5635SKent Overstreet c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL); 831cafe5635SKent Overstreet 832cafe5635SKent Overstreet if (c->verify_data && 833a85e968eSKent Overstreet c->verify_data->keys.set->data) 834cafe5635SKent Overstreet list_del_init(&c->verify_data->list); 835cafe5635SKent Overstreet else 836cafe5635SKent Overstreet c->verify_data = NULL; 837cafe5635SKent Overstreet #endif 838cafe5635SKent Overstreet 8397dc19d5aSDave Chinner c->shrink.count_objects = bch_mca_count; 8407dc19d5aSDave Chinner c->shrink.scan_objects = bch_mca_scan; 841cafe5635SKent Overstreet c->shrink.seeks = 4; 842cafe5635SKent Overstreet c->shrink.batch = c->btree_pages * 2; 8436c4ca1e3SMichael Lyle 8446c4ca1e3SMichael Lyle if (register_shrinker(&c->shrink)) 8456c4ca1e3SMichael Lyle pr_warn("bcache: %s: could not register shrinker", 8466c4ca1e3SMichael Lyle __func__); 847cafe5635SKent Overstreet 848cafe5635SKent Overstreet return 0; 849cafe5635SKent Overstreet } 850cafe5635SKent Overstreet 851cafe5635SKent Overstreet /* Btree in memory cache - hash table */ 852cafe5635SKent Overstreet 853cafe5635SKent Overstreet static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k) 854cafe5635SKent Overstreet { 855cafe5635SKent Overstreet return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)]; 856cafe5635SKent Overstreet } 857cafe5635SKent Overstreet 858cafe5635SKent Overstreet static struct btree *mca_find(struct cache_set *c, struct bkey *k) 859cafe5635SKent Overstreet { 860cafe5635SKent Overstreet struct btree *b; 861cafe5635SKent Overstreet 862cafe5635SKent Overstreet rcu_read_lock(); 863cafe5635SKent Overstreet hlist_for_each_entry_rcu(b, mca_hash(c, k), hash) 864cafe5635SKent Overstreet if (PTR_HASH(c, &b->key) == PTR_HASH(c, k)) 865cafe5635SKent Overstreet goto out; 866cafe5635SKent Overstreet b = NULL; 867cafe5635SKent Overstreet out: 868cafe5635SKent Overstreet rcu_read_unlock(); 869cafe5635SKent Overstreet return b; 870cafe5635SKent Overstreet } 871cafe5635SKent Overstreet 8720a63b66dSKent Overstreet static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op) 8730a63b66dSKent Overstreet { 8740a63b66dSKent Overstreet struct task_struct *old; 8750a63b66dSKent Overstreet 8760a63b66dSKent Overstreet old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current); 8770a63b66dSKent Overstreet if (old && old != current) { 8780a63b66dSKent Overstreet if (op) 8790a63b66dSKent Overstreet prepare_to_wait(&c->btree_cache_wait, &op->wait, 8800a63b66dSKent Overstreet TASK_UNINTERRUPTIBLE); 8810a63b66dSKent Overstreet return -EINTR; 8820a63b66dSKent Overstreet } 8830a63b66dSKent Overstreet 8840a63b66dSKent Overstreet return 0; 8850a63b66dSKent Overstreet } 8860a63b66dSKent Overstreet 8870a63b66dSKent Overstreet static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op, 8880a63b66dSKent Overstreet struct bkey *k) 889cafe5635SKent Overstreet { 890e8e1d468SKent Overstreet struct btree *b; 891cafe5635SKent Overstreet 892c37511b8SKent Overstreet trace_bcache_btree_cache_cannibalize(c); 893c37511b8SKent Overstreet 8940a63b66dSKent Overstreet if (mca_cannibalize_lock(c, op)) 8950a63b66dSKent Overstreet return ERR_PTR(-EINTR); 896cafe5635SKent Overstreet 897e8e1d468SKent Overstreet list_for_each_entry_reverse(b, &c->btree_cache, list) 898e8e1d468SKent Overstreet if (!mca_reap(b, btree_order(k), false)) 899e8e1d468SKent Overstreet return b; 900cafe5635SKent Overstreet 901e8e1d468SKent Overstreet list_for_each_entry_reverse(b, &c->btree_cache, list) 902e8e1d468SKent Overstreet if (!mca_reap(b, btree_order(k), true)) 903e8e1d468SKent Overstreet return b; 904e8e1d468SKent Overstreet 9050a63b66dSKent Overstreet WARN(1, "btree cache cannibalize failed\n"); 906e8e1d468SKent Overstreet return ERR_PTR(-ENOMEM); 907cafe5635SKent Overstreet } 908cafe5635SKent Overstreet 909cafe5635SKent Overstreet /* 910cafe5635SKent Overstreet * We can only have one thread cannibalizing other cached btree nodes at a time, 911cafe5635SKent Overstreet * or we'll deadlock. We use an open coded mutex to ensure that, which a 912cafe5635SKent Overstreet * cannibalize_bucket() will take. This means every time we unlock the root of 913cafe5635SKent Overstreet * the btree, we need to release this lock if we have it held. 914cafe5635SKent Overstreet */ 915df8e8970SKent Overstreet static void bch_cannibalize_unlock(struct cache_set *c) 916cafe5635SKent Overstreet { 9170a63b66dSKent Overstreet if (c->btree_cache_alloc_lock == current) { 9180a63b66dSKent Overstreet c->btree_cache_alloc_lock = NULL; 9190a63b66dSKent Overstreet wake_up(&c->btree_cache_wait); 920cafe5635SKent Overstreet } 921cafe5635SKent Overstreet } 922cafe5635SKent Overstreet 9230a63b66dSKent Overstreet static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op, 9240a63b66dSKent Overstreet struct bkey *k, int level) 925cafe5635SKent Overstreet { 926cafe5635SKent Overstreet struct btree *b; 927cafe5635SKent Overstreet 928e8e1d468SKent Overstreet BUG_ON(current->bio_list); 929e8e1d468SKent Overstreet 930cafe5635SKent Overstreet lockdep_assert_held(&c->bucket_lock); 931cafe5635SKent Overstreet 932cafe5635SKent Overstreet if (mca_find(c, k)) 933cafe5635SKent Overstreet return NULL; 934cafe5635SKent Overstreet 935cafe5635SKent Overstreet /* btree_free() doesn't free memory; it sticks the node on the end of 936cafe5635SKent Overstreet * the list. Check if there's any freed nodes there: 937cafe5635SKent Overstreet */ 938cafe5635SKent Overstreet list_for_each_entry(b, &c->btree_cache_freeable, list) 939e8e1d468SKent Overstreet if (!mca_reap(b, btree_order(k), false)) 940cafe5635SKent Overstreet goto out; 941cafe5635SKent Overstreet 942cafe5635SKent Overstreet /* We never free struct btree itself, just the memory that holds the on 943cafe5635SKent Overstreet * disk node. Check the freed list before allocating a new one: 944cafe5635SKent Overstreet */ 945cafe5635SKent Overstreet list_for_each_entry(b, &c->btree_cache_freed, list) 946e8e1d468SKent Overstreet if (!mca_reap(b, 0, false)) { 947cafe5635SKent Overstreet mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO); 948a85e968eSKent Overstreet if (!b->keys.set[0].data) 949cafe5635SKent Overstreet goto err; 950cafe5635SKent Overstreet else 951cafe5635SKent Overstreet goto out; 952cafe5635SKent Overstreet } 953cafe5635SKent Overstreet 954cafe5635SKent Overstreet b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO); 955cafe5635SKent Overstreet if (!b) 956cafe5635SKent Overstreet goto err; 957cafe5635SKent Overstreet 958cafe5635SKent Overstreet BUG_ON(!down_write_trylock(&b->lock)); 959a85e968eSKent Overstreet if (!b->keys.set->data) 960cafe5635SKent Overstreet goto err; 961cafe5635SKent Overstreet out: 962cb7a583eSKent Overstreet BUG_ON(b->io_mutex.count != 1); 963cafe5635SKent Overstreet 964cafe5635SKent Overstreet bkey_copy(&b->key, k); 965cafe5635SKent Overstreet list_move(&b->list, &c->btree_cache); 966cafe5635SKent Overstreet hlist_del_init_rcu(&b->hash); 967cafe5635SKent Overstreet hlist_add_head_rcu(&b->hash, mca_hash(c, k)); 968cafe5635SKent Overstreet 969cafe5635SKent Overstreet lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_); 970d6fd3b11SKent Overstreet b->parent = (void *) ~0UL; 971a85e968eSKent Overstreet b->flags = 0; 972a85e968eSKent Overstreet b->written = 0; 973a85e968eSKent Overstreet b->level = level; 974cafe5635SKent Overstreet 97565d45231SKent Overstreet if (!b->level) 976a85e968eSKent Overstreet bch_btree_keys_init(&b->keys, &bch_extent_keys_ops, 977a85e968eSKent Overstreet &b->c->expensive_debug_checks); 97865d45231SKent Overstreet else 979a85e968eSKent Overstreet bch_btree_keys_init(&b->keys, &bch_btree_keys_ops, 980a85e968eSKent Overstreet &b->c->expensive_debug_checks); 981cafe5635SKent Overstreet 982cafe5635SKent Overstreet return b; 983cafe5635SKent Overstreet err: 984cafe5635SKent Overstreet if (b) 985cafe5635SKent Overstreet rw_unlock(true, b); 986cafe5635SKent Overstreet 9870a63b66dSKent Overstreet b = mca_cannibalize(c, op, k); 988cafe5635SKent Overstreet if (!IS_ERR(b)) 989cafe5635SKent Overstreet goto out; 990cafe5635SKent Overstreet 991cafe5635SKent Overstreet return b; 992cafe5635SKent Overstreet } 993cafe5635SKent Overstreet 99447344e33SBart Van Assche /* 995cafe5635SKent Overstreet * bch_btree_node_get - find a btree node in the cache and lock it, reading it 996cafe5635SKent Overstreet * in from disk if necessary. 997cafe5635SKent Overstreet * 998b54d6934SKent Overstreet * If IO is necessary and running under generic_make_request, returns -EAGAIN. 999cafe5635SKent Overstreet * 1000cafe5635SKent Overstreet * The btree node will have either a read or a write lock held, depending on 1001cafe5635SKent Overstreet * level and op->lock. 1002cafe5635SKent Overstreet */ 10030a63b66dSKent Overstreet struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op, 10042452cc89SSlava Pestov struct bkey *k, int level, bool write, 10052452cc89SSlava Pestov struct btree *parent) 1006cafe5635SKent Overstreet { 1007cafe5635SKent Overstreet int i = 0; 1008cafe5635SKent Overstreet struct btree *b; 1009cafe5635SKent Overstreet 1010cafe5635SKent Overstreet BUG_ON(level < 0); 1011cafe5635SKent Overstreet retry: 1012cafe5635SKent Overstreet b = mca_find(c, k); 1013cafe5635SKent Overstreet 1014cafe5635SKent Overstreet if (!b) { 101557943511SKent Overstreet if (current->bio_list) 101657943511SKent Overstreet return ERR_PTR(-EAGAIN); 101757943511SKent Overstreet 1018cafe5635SKent Overstreet mutex_lock(&c->bucket_lock); 10190a63b66dSKent Overstreet b = mca_alloc(c, op, k, level); 1020cafe5635SKent Overstreet mutex_unlock(&c->bucket_lock); 1021cafe5635SKent Overstreet 1022cafe5635SKent Overstreet if (!b) 1023cafe5635SKent Overstreet goto retry; 1024cafe5635SKent Overstreet if (IS_ERR(b)) 1025cafe5635SKent Overstreet return b; 1026cafe5635SKent Overstreet 102757943511SKent Overstreet bch_btree_node_read(b); 1028cafe5635SKent Overstreet 1029cafe5635SKent Overstreet if (!write) 1030cafe5635SKent Overstreet downgrade_write(&b->lock); 1031cafe5635SKent Overstreet } else { 1032cafe5635SKent Overstreet rw_lock(write, b, level); 1033cafe5635SKent Overstreet if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) { 1034cafe5635SKent Overstreet rw_unlock(write, b); 1035cafe5635SKent Overstreet goto retry; 1036cafe5635SKent Overstreet } 1037cafe5635SKent Overstreet BUG_ON(b->level != level); 1038cafe5635SKent Overstreet } 1039cafe5635SKent Overstreet 1040c2e8dcf7SColy Li if (btree_node_io_error(b)) { 1041c2e8dcf7SColy Li rw_unlock(write, b); 1042c2e8dcf7SColy Li return ERR_PTR(-EIO); 1043c2e8dcf7SColy Li } 1044c2e8dcf7SColy Li 1045c2e8dcf7SColy Li BUG_ON(!b->written); 1046c2e8dcf7SColy Li 10472452cc89SSlava Pestov b->parent = parent; 1048cafe5635SKent Overstreet b->accessed = 1; 1049cafe5635SKent Overstreet 1050a85e968eSKent Overstreet for (; i <= b->keys.nsets && b->keys.set[i].size; i++) { 1051a85e968eSKent Overstreet prefetch(b->keys.set[i].tree); 1052a85e968eSKent Overstreet prefetch(b->keys.set[i].data); 1053cafe5635SKent Overstreet } 1054cafe5635SKent Overstreet 1055a85e968eSKent Overstreet for (; i <= b->keys.nsets; i++) 1056a85e968eSKent Overstreet prefetch(b->keys.set[i].data); 1057cafe5635SKent Overstreet 1058cafe5635SKent Overstreet return b; 1059cafe5635SKent Overstreet } 1060cafe5635SKent Overstreet 10612452cc89SSlava Pestov static void btree_node_prefetch(struct btree *parent, struct bkey *k) 1062cafe5635SKent Overstreet { 1063cafe5635SKent Overstreet struct btree *b; 1064cafe5635SKent Overstreet 10652452cc89SSlava Pestov mutex_lock(&parent->c->bucket_lock); 10662452cc89SSlava Pestov b = mca_alloc(parent->c, NULL, k, parent->level - 1); 10672452cc89SSlava Pestov mutex_unlock(&parent->c->bucket_lock); 1068cafe5635SKent Overstreet 1069cafe5635SKent Overstreet if (!IS_ERR_OR_NULL(b)) { 10702452cc89SSlava Pestov b->parent = parent; 107157943511SKent Overstreet bch_btree_node_read(b); 1072cafe5635SKent Overstreet rw_unlock(true, b); 1073cafe5635SKent Overstreet } 1074cafe5635SKent Overstreet } 1075cafe5635SKent Overstreet 1076cafe5635SKent Overstreet /* Btree alloc */ 1077cafe5635SKent Overstreet 1078e8e1d468SKent Overstreet static void btree_node_free(struct btree *b) 1079cafe5635SKent Overstreet { 1080c37511b8SKent Overstreet trace_bcache_btree_node_free(b); 1081c37511b8SKent Overstreet 1082cafe5635SKent Overstreet BUG_ON(b == b->c->root); 1083cafe5635SKent Overstreet 10842a285686SKent Overstreet mutex_lock(&b->write_lock); 10852a285686SKent Overstreet 1086e5ec5f47SColy Li if (btree_node_dirty(b)) { 1087cafe5635SKent Overstreet btree_complete_write(b, btree_current_write(b)); 1088cafe5635SKent Overstreet clear_bit(BTREE_NODE_dirty, &b->flags); 1089e5ec5f47SColy Li } 1090cafe5635SKent Overstreet 10912a285686SKent Overstreet mutex_unlock(&b->write_lock); 10922a285686SKent Overstreet 1093cafe5635SKent Overstreet cancel_delayed_work(&b->work); 1094cafe5635SKent Overstreet 1095cafe5635SKent Overstreet mutex_lock(&b->c->bucket_lock); 1096cafe5635SKent Overstreet bch_bucket_free(b->c, &b->key); 1097cafe5635SKent Overstreet mca_bucket_free(b); 1098cafe5635SKent Overstreet mutex_unlock(&b->c->bucket_lock); 1099cafe5635SKent Overstreet } 1100cafe5635SKent Overstreet 1101c5aa4a31SSlava Pestov struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op, 11022452cc89SSlava Pestov int level, bool wait, 11032452cc89SSlava Pestov struct btree *parent) 1104cafe5635SKent Overstreet { 1105cafe5635SKent Overstreet BKEY_PADDED(key) k; 1106cafe5635SKent Overstreet struct btree *b = ERR_PTR(-EAGAIN); 1107cafe5635SKent Overstreet 1108cafe5635SKent Overstreet mutex_lock(&c->bucket_lock); 1109cafe5635SKent Overstreet retry: 1110c5aa4a31SSlava Pestov if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait)) 1111cafe5635SKent Overstreet goto err; 1112cafe5635SKent Overstreet 11133a3b6a4eSKent Overstreet bkey_put(c, &k.key); 1114cafe5635SKent Overstreet SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS); 1115cafe5635SKent Overstreet 11160a63b66dSKent Overstreet b = mca_alloc(c, op, &k.key, level); 1117cafe5635SKent Overstreet if (IS_ERR(b)) 1118cafe5635SKent Overstreet goto err_free; 1119cafe5635SKent Overstreet 1120cafe5635SKent Overstreet if (!b) { 1121b1a67b0fSKent Overstreet cache_bug(c, 1122b1a67b0fSKent Overstreet "Tried to allocate bucket that was in btree cache"); 1123cafe5635SKent Overstreet goto retry; 1124cafe5635SKent Overstreet } 1125cafe5635SKent Overstreet 1126cafe5635SKent Overstreet b->accessed = 1; 11272452cc89SSlava Pestov b->parent = parent; 1128a85e968eSKent Overstreet bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb)); 1129cafe5635SKent Overstreet 1130cafe5635SKent Overstreet mutex_unlock(&c->bucket_lock); 1131c37511b8SKent Overstreet 1132c37511b8SKent Overstreet trace_bcache_btree_node_alloc(b); 1133cafe5635SKent Overstreet return b; 1134cafe5635SKent Overstreet err_free: 1135cafe5635SKent Overstreet bch_bucket_free(c, &k.key); 1136cafe5635SKent Overstreet err: 1137cafe5635SKent Overstreet mutex_unlock(&c->bucket_lock); 1138c37511b8SKent Overstreet 1139913dc33fSSlava Pestov trace_bcache_btree_node_alloc_fail(c); 1140cafe5635SKent Overstreet return b; 1141cafe5635SKent Overstreet } 1142cafe5635SKent Overstreet 1143c5aa4a31SSlava Pestov static struct btree *bch_btree_node_alloc(struct cache_set *c, 11442452cc89SSlava Pestov struct btree_op *op, int level, 11452452cc89SSlava Pestov struct btree *parent) 1146c5aa4a31SSlava Pestov { 11472452cc89SSlava Pestov return __bch_btree_node_alloc(c, op, level, op != NULL, parent); 1148c5aa4a31SSlava Pestov } 1149c5aa4a31SSlava Pestov 11500a63b66dSKent Overstreet static struct btree *btree_node_alloc_replacement(struct btree *b, 11510a63b66dSKent Overstreet struct btree_op *op) 1152cafe5635SKent Overstreet { 11532452cc89SSlava Pestov struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent); 11541fae7cf0SColy Li 115567539e85SKent Overstreet if (!IS_ERR_OR_NULL(n)) { 11562a285686SKent Overstreet mutex_lock(&n->write_lock); 115789ebb4a2SKent Overstreet bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort); 115867539e85SKent Overstreet bkey_copy_key(&n->key, &b->key); 11592a285686SKent Overstreet mutex_unlock(&n->write_lock); 116067539e85SKent Overstreet } 1161cafe5635SKent Overstreet 1162cafe5635SKent Overstreet return n; 1163cafe5635SKent Overstreet } 1164cafe5635SKent Overstreet 11658835c123SKent Overstreet static void make_btree_freeing_key(struct btree *b, struct bkey *k) 11668835c123SKent Overstreet { 11676f10f7d1SColy Li unsigned int i; 11688835c123SKent Overstreet 116905335cffSKent Overstreet mutex_lock(&b->c->bucket_lock); 117005335cffSKent Overstreet 117105335cffSKent Overstreet atomic_inc(&b->c->prio_blocked); 117205335cffSKent Overstreet 11738835c123SKent Overstreet bkey_copy(k, &b->key); 11748835c123SKent Overstreet bkey_copy_key(k, &ZERO_KEY); 11758835c123SKent Overstreet 117605335cffSKent Overstreet for (i = 0; i < KEY_PTRS(k); i++) 117705335cffSKent Overstreet SET_PTR_GEN(k, i, 117805335cffSKent Overstreet bch_inc_gen(PTR_CACHE(b->c, &b->key, i), 117905335cffSKent Overstreet PTR_BUCKET(b->c, &b->key, i))); 11808835c123SKent Overstreet 118105335cffSKent Overstreet mutex_unlock(&b->c->bucket_lock); 11828835c123SKent Overstreet } 11838835c123SKent Overstreet 118478365411SKent Overstreet static int btree_check_reserve(struct btree *b, struct btree_op *op) 118578365411SKent Overstreet { 118678365411SKent Overstreet struct cache_set *c = b->c; 118778365411SKent Overstreet struct cache *ca; 11886f10f7d1SColy Li unsigned int i, reserve = (c->root->level - b->level) * 2 + 1; 118978365411SKent Overstreet 119078365411SKent Overstreet mutex_lock(&c->bucket_lock); 119178365411SKent Overstreet 119278365411SKent Overstreet for_each_cache(ca, c, i) 119378365411SKent Overstreet if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) { 119478365411SKent Overstreet if (op) 11950a63b66dSKent Overstreet prepare_to_wait(&c->btree_cache_wait, &op->wait, 119678365411SKent Overstreet TASK_UNINTERRUPTIBLE); 11970a63b66dSKent Overstreet mutex_unlock(&c->bucket_lock); 11980a63b66dSKent Overstreet return -EINTR; 119978365411SKent Overstreet } 120078365411SKent Overstreet 120178365411SKent Overstreet mutex_unlock(&c->bucket_lock); 12020a63b66dSKent Overstreet 12030a63b66dSKent Overstreet return mca_cannibalize_lock(b->c, op); 120478365411SKent Overstreet } 120578365411SKent Overstreet 1206cafe5635SKent Overstreet /* Garbage collection */ 1207cafe5635SKent Overstreet 1208487dded8SKent Overstreet static uint8_t __bch_btree_mark_key(struct cache_set *c, int level, 1209487dded8SKent Overstreet struct bkey *k) 1210cafe5635SKent Overstreet { 1211cafe5635SKent Overstreet uint8_t stale = 0; 12126f10f7d1SColy Li unsigned int i; 1213cafe5635SKent Overstreet struct bucket *g; 1214cafe5635SKent Overstreet 1215cafe5635SKent Overstreet /* 1216cafe5635SKent Overstreet * ptr_invalid() can't return true for the keys that mark btree nodes as 1217cafe5635SKent Overstreet * freed, but since ptr_bad() returns true we'll never actually use them 1218cafe5635SKent Overstreet * for anything and thus we don't want mark their pointers here 1219cafe5635SKent Overstreet */ 1220cafe5635SKent Overstreet if (!bkey_cmp(k, &ZERO_KEY)) 1221cafe5635SKent Overstreet return stale; 1222cafe5635SKent Overstreet 1223cafe5635SKent Overstreet for (i = 0; i < KEY_PTRS(k); i++) { 1224cafe5635SKent Overstreet if (!ptr_available(c, k, i)) 1225cafe5635SKent Overstreet continue; 1226cafe5635SKent Overstreet 1227cafe5635SKent Overstreet g = PTR_BUCKET(c, k, i); 1228cafe5635SKent Overstreet 12293a2fd9d5SKent Overstreet if (gen_after(g->last_gc, PTR_GEN(k, i))) 12303a2fd9d5SKent Overstreet g->last_gc = PTR_GEN(k, i); 1231cafe5635SKent Overstreet 1232cafe5635SKent Overstreet if (ptr_stale(c, k, i)) { 1233cafe5635SKent Overstreet stale = max(stale, ptr_stale(c, k, i)); 1234cafe5635SKent Overstreet continue; 1235cafe5635SKent Overstreet } 1236cafe5635SKent Overstreet 1237cafe5635SKent Overstreet cache_bug_on(GC_MARK(g) && 1238cafe5635SKent Overstreet (GC_MARK(g) == GC_MARK_METADATA) != (level != 0), 1239cafe5635SKent Overstreet c, "inconsistent ptrs: mark = %llu, level = %i", 1240cafe5635SKent Overstreet GC_MARK(g), level); 1241cafe5635SKent Overstreet 1242cafe5635SKent Overstreet if (level) 1243cafe5635SKent Overstreet SET_GC_MARK(g, GC_MARK_METADATA); 1244cafe5635SKent Overstreet else if (KEY_DIRTY(k)) 1245cafe5635SKent Overstreet SET_GC_MARK(g, GC_MARK_DIRTY); 12464fe6a816SKent Overstreet else if (!GC_MARK(g)) 12474fe6a816SKent Overstreet SET_GC_MARK(g, GC_MARK_RECLAIMABLE); 1248cafe5635SKent Overstreet 1249cafe5635SKent Overstreet /* guard against overflow */ 12506f10f7d1SColy Li SET_GC_SECTORS_USED(g, min_t(unsigned int, 1251cafe5635SKent Overstreet GC_SECTORS_USED(g) + KEY_SIZE(k), 125294717447SDarrick J. Wong MAX_GC_SECTORS_USED)); 1253cafe5635SKent Overstreet 1254cafe5635SKent Overstreet BUG_ON(!GC_SECTORS_USED(g)); 1255cafe5635SKent Overstreet } 1256cafe5635SKent Overstreet 1257cafe5635SKent Overstreet return stale; 1258cafe5635SKent Overstreet } 1259cafe5635SKent Overstreet 1260cafe5635SKent Overstreet #define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k) 1261cafe5635SKent Overstreet 1262487dded8SKent Overstreet void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k) 1263487dded8SKent Overstreet { 12646f10f7d1SColy Li unsigned int i; 1265487dded8SKent Overstreet 1266487dded8SKent Overstreet for (i = 0; i < KEY_PTRS(k); i++) 1267487dded8SKent Overstreet if (ptr_available(c, k, i) && 1268487dded8SKent Overstreet !ptr_stale(c, k, i)) { 1269487dded8SKent Overstreet struct bucket *b = PTR_BUCKET(c, k, i); 1270487dded8SKent Overstreet 1271487dded8SKent Overstreet b->gen = PTR_GEN(k, i); 1272487dded8SKent Overstreet 1273487dded8SKent Overstreet if (level && bkey_cmp(k, &ZERO_KEY)) 1274487dded8SKent Overstreet b->prio = BTREE_PRIO; 1275487dded8SKent Overstreet else if (!level && b->prio == BTREE_PRIO) 1276487dded8SKent Overstreet b->prio = INITIAL_PRIO; 1277487dded8SKent Overstreet } 1278487dded8SKent Overstreet 1279487dded8SKent Overstreet __bch_btree_mark_key(c, level, k); 1280487dded8SKent Overstreet } 1281487dded8SKent Overstreet 1282d44c2f9eSTang Junhui void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats) 1283d44c2f9eSTang Junhui { 1284d44c2f9eSTang Junhui stats->in_use = (c->nbuckets - c->avail_nbuckets) * 100 / c->nbuckets; 1285d44c2f9eSTang Junhui } 1286d44c2f9eSTang Junhui 1287a1f0358bSKent Overstreet static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc) 1288cafe5635SKent Overstreet { 1289cafe5635SKent Overstreet uint8_t stale = 0; 12906f10f7d1SColy Li unsigned int keys = 0, good_keys = 0; 1291cafe5635SKent Overstreet struct bkey *k; 1292cafe5635SKent Overstreet struct btree_iter iter; 1293cafe5635SKent Overstreet struct bset_tree *t; 1294cafe5635SKent Overstreet 1295cafe5635SKent Overstreet gc->nodes++; 1296cafe5635SKent Overstreet 1297c052dd9aSKent Overstreet for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) { 1298cafe5635SKent Overstreet stale = max(stale, btree_mark_key(b, k)); 1299a1f0358bSKent Overstreet keys++; 1300cafe5635SKent Overstreet 1301a85e968eSKent Overstreet if (bch_ptr_bad(&b->keys, k)) 1302cafe5635SKent Overstreet continue; 1303cafe5635SKent Overstreet 1304cafe5635SKent Overstreet gc->key_bytes += bkey_u64s(k); 1305cafe5635SKent Overstreet gc->nkeys++; 1306a1f0358bSKent Overstreet good_keys++; 1307cafe5635SKent Overstreet 1308cafe5635SKent Overstreet gc->data += KEY_SIZE(k); 1309cafe5635SKent Overstreet } 1310cafe5635SKent Overstreet 1311a85e968eSKent Overstreet for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++) 1312cafe5635SKent Overstreet btree_bug_on(t->size && 1313a85e968eSKent Overstreet bset_written(&b->keys, t) && 1314cafe5635SKent Overstreet bkey_cmp(&b->key, &t->end) < 0, 1315cafe5635SKent Overstreet b, "found short btree key in gc"); 1316cafe5635SKent Overstreet 1317a1f0358bSKent Overstreet if (b->c->gc_always_rewrite) 1318a1f0358bSKent Overstreet return true; 1319a1f0358bSKent Overstreet 1320a1f0358bSKent Overstreet if (stale > 10) 1321a1f0358bSKent Overstreet return true; 1322a1f0358bSKent Overstreet 1323a1f0358bSKent Overstreet if ((keys - good_keys) * 2 > keys) 1324a1f0358bSKent Overstreet return true; 1325a1f0358bSKent Overstreet 1326a1f0358bSKent Overstreet return false; 1327cafe5635SKent Overstreet } 1328cafe5635SKent Overstreet 1329a1f0358bSKent Overstreet #define GC_MERGE_NODES 4U 1330cafe5635SKent Overstreet 1331cafe5635SKent Overstreet struct gc_merge_info { 1332cafe5635SKent Overstreet struct btree *b; 13336f10f7d1SColy Li unsigned int keys; 1334cafe5635SKent Overstreet }; 1335cafe5635SKent Overstreet 1336fc2d5988SColy Li static int bch_btree_insert_node(struct btree *b, struct btree_op *op, 1337fc2d5988SColy Li struct keylist *insert_keys, 1338fc2d5988SColy Li atomic_t *journal_ref, 1339fc2d5988SColy Li struct bkey *replace_key); 1340a1f0358bSKent Overstreet 1341a1f0358bSKent Overstreet static int btree_gc_coalesce(struct btree *b, struct btree_op *op, 13420a63b66dSKent Overstreet struct gc_stat *gc, struct gc_merge_info *r) 1343cafe5635SKent Overstreet { 13446f10f7d1SColy Li unsigned int i, nodes = 0, keys = 0, blocks; 1345a1f0358bSKent Overstreet struct btree *new_nodes[GC_MERGE_NODES]; 13460a63b66dSKent Overstreet struct keylist keylist; 1347b54d6934SKent Overstreet struct closure cl; 1348a1f0358bSKent Overstreet struct bkey *k; 1349b54d6934SKent Overstreet 13500a63b66dSKent Overstreet bch_keylist_init(&keylist); 13510a63b66dSKent Overstreet 13520a63b66dSKent Overstreet if (btree_check_reserve(b, NULL)) 13530a63b66dSKent Overstreet return 0; 13540a63b66dSKent Overstreet 1355a1f0358bSKent Overstreet memset(new_nodes, 0, sizeof(new_nodes)); 1356b54d6934SKent Overstreet closure_init_stack(&cl); 1357cafe5635SKent Overstreet 1358a1f0358bSKent Overstreet while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b)) 1359cafe5635SKent Overstreet keys += r[nodes++].keys; 1360cafe5635SKent Overstreet 1361cafe5635SKent Overstreet blocks = btree_default_blocks(b->c) * 2 / 3; 1362cafe5635SKent Overstreet 1363cafe5635SKent Overstreet if (nodes < 2 || 1364a85e968eSKent Overstreet __set_blocks(b->keys.set[0].data, keys, 1365ee811287SKent Overstreet block_bytes(b->c)) > blocks * (nodes - 1)) 1366a1f0358bSKent Overstreet return 0; 1367cafe5635SKent Overstreet 1368a1f0358bSKent Overstreet for (i = 0; i < nodes; i++) { 13690a63b66dSKent Overstreet new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL); 1370a1f0358bSKent Overstreet if (IS_ERR_OR_NULL(new_nodes[i])) 1371a1f0358bSKent Overstreet goto out_nocoalesce; 1372cafe5635SKent Overstreet } 1373cafe5635SKent Overstreet 13740a63b66dSKent Overstreet /* 13750a63b66dSKent Overstreet * We have to check the reserve here, after we've allocated our new 13760a63b66dSKent Overstreet * nodes, to make sure the insert below will succeed - we also check 13770a63b66dSKent Overstreet * before as an optimization to potentially avoid a bunch of expensive 13780a63b66dSKent Overstreet * allocs/sorts 13790a63b66dSKent Overstreet */ 13800a63b66dSKent Overstreet if (btree_check_reserve(b, NULL)) 13810a63b66dSKent Overstreet goto out_nocoalesce; 13820a63b66dSKent Overstreet 13832a285686SKent Overstreet for (i = 0; i < nodes; i++) 13842a285686SKent Overstreet mutex_lock(&new_nodes[i]->write_lock); 13852a285686SKent Overstreet 1386cafe5635SKent Overstreet for (i = nodes - 1; i > 0; --i) { 1387ee811287SKent Overstreet struct bset *n1 = btree_bset_first(new_nodes[i]); 1388ee811287SKent Overstreet struct bset *n2 = btree_bset_first(new_nodes[i - 1]); 1389cafe5635SKent Overstreet struct bkey *k, *last = NULL; 1390cafe5635SKent Overstreet 1391cafe5635SKent Overstreet keys = 0; 1392cafe5635SKent Overstreet 1393a1f0358bSKent Overstreet if (i > 1) { 1394cafe5635SKent Overstreet for (k = n2->start; 1395fafff81cSKent Overstreet k < bset_bkey_last(n2); 1396cafe5635SKent Overstreet k = bkey_next(k)) { 1397cafe5635SKent Overstreet if (__set_blocks(n1, n1->keys + keys + 1398ee811287SKent Overstreet bkey_u64s(k), 1399ee811287SKent Overstreet block_bytes(b->c)) > blocks) 1400cafe5635SKent Overstreet break; 1401cafe5635SKent Overstreet 1402cafe5635SKent Overstreet last = k; 1403cafe5635SKent Overstreet keys += bkey_u64s(k); 1404cafe5635SKent Overstreet } 1405a1f0358bSKent Overstreet } else { 1406a1f0358bSKent Overstreet /* 1407a1f0358bSKent Overstreet * Last node we're not getting rid of - we're getting 1408a1f0358bSKent Overstreet * rid of the node at r[0]. Have to try and fit all of 1409a1f0358bSKent Overstreet * the remaining keys into this node; we can't ensure 1410a1f0358bSKent Overstreet * they will always fit due to rounding and variable 1411a1f0358bSKent Overstreet * length keys (shouldn't be possible in practice, 1412a1f0358bSKent Overstreet * though) 1413a1f0358bSKent Overstreet */ 1414a1f0358bSKent Overstreet if (__set_blocks(n1, n1->keys + n2->keys, 1415ee811287SKent Overstreet block_bytes(b->c)) > 1416ee811287SKent Overstreet btree_blocks(new_nodes[i])) 1417a1f0358bSKent Overstreet goto out_nocoalesce; 1418a1f0358bSKent Overstreet 1419a1f0358bSKent Overstreet keys = n2->keys; 1420a1f0358bSKent Overstreet /* Take the key of the node we're getting rid of */ 1421a1f0358bSKent Overstreet last = &r->b->key; 1422a1f0358bSKent Overstreet } 1423cafe5635SKent Overstreet 1424ee811287SKent Overstreet BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) > 1425ee811287SKent Overstreet btree_blocks(new_nodes[i])); 1426cafe5635SKent Overstreet 1427a1f0358bSKent Overstreet if (last) 1428a1f0358bSKent Overstreet bkey_copy_key(&new_nodes[i]->key, last); 1429cafe5635SKent Overstreet 1430fafff81cSKent Overstreet memcpy(bset_bkey_last(n1), 1431cafe5635SKent Overstreet n2->start, 1432fafff81cSKent Overstreet (void *) bset_bkey_idx(n2, keys) - (void *) n2->start); 1433cafe5635SKent Overstreet 1434cafe5635SKent Overstreet n1->keys += keys; 1435a1f0358bSKent Overstreet r[i].keys = n1->keys; 1436cafe5635SKent Overstreet 1437cafe5635SKent Overstreet memmove(n2->start, 1438fafff81cSKent Overstreet bset_bkey_idx(n2, keys), 1439fafff81cSKent Overstreet (void *) bset_bkey_last(n2) - 1440fafff81cSKent Overstreet (void *) bset_bkey_idx(n2, keys)); 1441cafe5635SKent Overstreet 1442cafe5635SKent Overstreet n2->keys -= keys; 1443cafe5635SKent Overstreet 14440a63b66dSKent Overstreet if (__bch_keylist_realloc(&keylist, 1445085d2a3dSKent Overstreet bkey_u64s(&new_nodes[i]->key))) 1446a1f0358bSKent Overstreet goto out_nocoalesce; 1447a1f0358bSKent Overstreet 1448a1f0358bSKent Overstreet bch_btree_node_write(new_nodes[i], &cl); 14490a63b66dSKent Overstreet bch_keylist_add(&keylist, &new_nodes[i]->key); 1450cafe5635SKent Overstreet } 1451cafe5635SKent Overstreet 14522a285686SKent Overstreet for (i = 0; i < nodes; i++) 14532a285686SKent Overstreet mutex_unlock(&new_nodes[i]->write_lock); 14542a285686SKent Overstreet 145505335cffSKent Overstreet closure_sync(&cl); 145605335cffSKent Overstreet 145705335cffSKent Overstreet /* We emptied out this node */ 145805335cffSKent Overstreet BUG_ON(btree_bset_first(new_nodes[0])->keys); 145905335cffSKent Overstreet btree_node_free(new_nodes[0]); 146005335cffSKent Overstreet rw_unlock(true, new_nodes[0]); 1461400ffaa2SSlava Pestov new_nodes[0] = NULL; 146205335cffSKent Overstreet 1463a1f0358bSKent Overstreet for (i = 0; i < nodes; i++) { 14640a63b66dSKent Overstreet if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key))) 1465a1f0358bSKent Overstreet goto out_nocoalesce; 1466a1f0358bSKent Overstreet 14670a63b66dSKent Overstreet make_btree_freeing_key(r[i].b, keylist.top); 14680a63b66dSKent Overstreet bch_keylist_push(&keylist); 1469a1f0358bSKent Overstreet } 1470a1f0358bSKent Overstreet 14710a63b66dSKent Overstreet bch_btree_insert_node(b, op, &keylist, NULL, NULL); 14720a63b66dSKent Overstreet BUG_ON(!bch_keylist_empty(&keylist)); 1473a1f0358bSKent Overstreet 1474a1f0358bSKent Overstreet for (i = 0; i < nodes; i++) { 1475a1f0358bSKent Overstreet btree_node_free(r[i].b); 1476a1f0358bSKent Overstreet rw_unlock(true, r[i].b); 1477a1f0358bSKent Overstreet 1478a1f0358bSKent Overstreet r[i].b = new_nodes[i]; 1479a1f0358bSKent Overstreet } 1480a1f0358bSKent Overstreet 1481a1f0358bSKent Overstreet memmove(r, r + 1, sizeof(r[0]) * (nodes - 1)); 1482a1f0358bSKent Overstreet r[nodes - 1].b = ERR_PTR(-EINTR); 1483cafe5635SKent Overstreet 1484c37511b8SKent Overstreet trace_bcache_btree_gc_coalesce(nodes); 1485cafe5635SKent Overstreet gc->nodes--; 1486cafe5635SKent Overstreet 14870a63b66dSKent Overstreet bch_keylist_free(&keylist); 14880a63b66dSKent Overstreet 1489a1f0358bSKent Overstreet /* Invalidated our iterator */ 1490a1f0358bSKent Overstreet return -EINTR; 1491a1f0358bSKent Overstreet 1492a1f0358bSKent Overstreet out_nocoalesce: 1493a1f0358bSKent Overstreet closure_sync(&cl); 1494a1f0358bSKent Overstreet 14950a63b66dSKent Overstreet while ((k = bch_keylist_pop(&keylist))) 1496a1f0358bSKent Overstreet if (!bkey_cmp(k, &ZERO_KEY)) 1497a1f0358bSKent Overstreet atomic_dec(&b->c->prio_blocked); 1498f16277caSShenghui Wang bch_keylist_free(&keylist); 1499a1f0358bSKent Overstreet 1500a1f0358bSKent Overstreet for (i = 0; i < nodes; i++) 1501a1f0358bSKent Overstreet if (!IS_ERR_OR_NULL(new_nodes[i])) { 1502a1f0358bSKent Overstreet btree_node_free(new_nodes[i]); 1503a1f0358bSKent Overstreet rw_unlock(true, new_nodes[i]); 1504a1f0358bSKent Overstreet } 1505a1f0358bSKent Overstreet return 0; 1506a1f0358bSKent Overstreet } 1507a1f0358bSKent Overstreet 15080a63b66dSKent Overstreet static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op, 15090a63b66dSKent Overstreet struct btree *replace) 15100a63b66dSKent Overstreet { 15110a63b66dSKent Overstreet struct keylist keys; 15120a63b66dSKent Overstreet struct btree *n; 15130a63b66dSKent Overstreet 15140a63b66dSKent Overstreet if (btree_check_reserve(b, NULL)) 15150a63b66dSKent Overstreet return 0; 15160a63b66dSKent Overstreet 15170a63b66dSKent Overstreet n = btree_node_alloc_replacement(replace, NULL); 15180a63b66dSKent Overstreet 15190a63b66dSKent Overstreet /* recheck reserve after allocating replacement node */ 15200a63b66dSKent Overstreet if (btree_check_reserve(b, NULL)) { 15210a63b66dSKent Overstreet btree_node_free(n); 15220a63b66dSKent Overstreet rw_unlock(true, n); 15230a63b66dSKent Overstreet return 0; 15240a63b66dSKent Overstreet } 15250a63b66dSKent Overstreet 15260a63b66dSKent Overstreet bch_btree_node_write_sync(n); 15270a63b66dSKent Overstreet 15280a63b66dSKent Overstreet bch_keylist_init(&keys); 15290a63b66dSKent Overstreet bch_keylist_add(&keys, &n->key); 15300a63b66dSKent Overstreet 15310a63b66dSKent Overstreet make_btree_freeing_key(replace, keys.top); 15320a63b66dSKent Overstreet bch_keylist_push(&keys); 15330a63b66dSKent Overstreet 15340a63b66dSKent Overstreet bch_btree_insert_node(b, op, &keys, NULL, NULL); 15350a63b66dSKent Overstreet BUG_ON(!bch_keylist_empty(&keys)); 15360a63b66dSKent Overstreet 15370a63b66dSKent Overstreet btree_node_free(replace); 15380a63b66dSKent Overstreet rw_unlock(true, n); 15390a63b66dSKent Overstreet 15400a63b66dSKent Overstreet /* Invalidated our iterator */ 15410a63b66dSKent Overstreet return -EINTR; 15420a63b66dSKent Overstreet } 15430a63b66dSKent Overstreet 15446f10f7d1SColy Li static unsigned int btree_gc_count_keys(struct btree *b) 1545a1f0358bSKent Overstreet { 1546a1f0358bSKent Overstreet struct bkey *k; 1547a1f0358bSKent Overstreet struct btree_iter iter; 15486f10f7d1SColy Li unsigned int ret = 0; 1549a1f0358bSKent Overstreet 1550c052dd9aSKent Overstreet for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad) 1551a1f0358bSKent Overstreet ret += bkey_u64s(k); 1552a1f0358bSKent Overstreet 1553a1f0358bSKent Overstreet return ret; 1554cafe5635SKent Overstreet } 1555cafe5635SKent Overstreet 15567f4a59deSTang Junhui static size_t btree_gc_min_nodes(struct cache_set *c) 15577f4a59deSTang Junhui { 15587f4a59deSTang Junhui size_t min_nodes; 15597f4a59deSTang Junhui 15607f4a59deSTang Junhui /* 15617f4a59deSTang Junhui * Since incremental GC would stop 100ms when front 15627f4a59deSTang Junhui * side I/O comes, so when there are many btree nodes, 15637f4a59deSTang Junhui * if GC only processes constant (100) nodes each time, 15647f4a59deSTang Junhui * GC would last a long time, and the front side I/Os 15657f4a59deSTang Junhui * would run out of the buckets (since no new bucket 15667f4a59deSTang Junhui * can be allocated during GC), and be blocked again. 15677f4a59deSTang Junhui * So GC should not process constant nodes, but varied 15687f4a59deSTang Junhui * nodes according to the number of btree nodes, which 15697f4a59deSTang Junhui * realized by dividing GC into constant(100) times, 15707f4a59deSTang Junhui * so when there are many btree nodes, GC can process 15717f4a59deSTang Junhui * more nodes each time, otherwise, GC will process less 15727f4a59deSTang Junhui * nodes each time (but no less than MIN_GC_NODES) 15737f4a59deSTang Junhui */ 15747f4a59deSTang Junhui min_nodes = c->gc_stats.nodes / MAX_GC_TIMES; 15757f4a59deSTang Junhui if (min_nodes < MIN_GC_NODES) 15767f4a59deSTang Junhui min_nodes = MIN_GC_NODES; 15777f4a59deSTang Junhui 15787f4a59deSTang Junhui return min_nodes; 15797f4a59deSTang Junhui } 15807f4a59deSTang Junhui 15817f4a59deSTang Junhui 1582cafe5635SKent Overstreet static int btree_gc_recurse(struct btree *b, struct btree_op *op, 1583cafe5635SKent Overstreet struct closure *writes, struct gc_stat *gc) 1584cafe5635SKent Overstreet { 1585a1f0358bSKent Overstreet int ret = 0; 1586a1f0358bSKent Overstreet bool should_rewrite; 1587a1f0358bSKent Overstreet struct bkey *k; 1588a1f0358bSKent Overstreet struct btree_iter iter; 1589cafe5635SKent Overstreet struct gc_merge_info r[GC_MERGE_NODES]; 15902a285686SKent Overstreet struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1; 1591cafe5635SKent Overstreet 1592c052dd9aSKent Overstreet bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done); 1593cafe5635SKent Overstreet 15942a285686SKent Overstreet for (i = r; i < r + ARRAY_SIZE(r); i++) 15952a285686SKent Overstreet i->b = ERR_PTR(-EINTR); 1596cafe5635SKent Overstreet 1597a1f0358bSKent Overstreet while (1) { 1598a85e968eSKent Overstreet k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad); 1599a1f0358bSKent Overstreet if (k) { 16000a63b66dSKent Overstreet r->b = bch_btree_node_get(b->c, op, k, b->level - 1, 16012452cc89SSlava Pestov true, b); 1602cafe5635SKent Overstreet if (IS_ERR(r->b)) { 1603cafe5635SKent Overstreet ret = PTR_ERR(r->b); 1604cafe5635SKent Overstreet break; 1605cafe5635SKent Overstreet } 1606cafe5635SKent Overstreet 1607a1f0358bSKent Overstreet r->keys = btree_gc_count_keys(r->b); 1608cafe5635SKent Overstreet 16090a63b66dSKent Overstreet ret = btree_gc_coalesce(b, op, gc, r); 1610a1f0358bSKent Overstreet if (ret) 1611cafe5635SKent Overstreet break; 1612cafe5635SKent Overstreet } 1613cafe5635SKent Overstreet 1614a1f0358bSKent Overstreet if (!last->b) 1615a1f0358bSKent Overstreet break; 1616cafe5635SKent Overstreet 1617a1f0358bSKent Overstreet if (!IS_ERR(last->b)) { 1618a1f0358bSKent Overstreet should_rewrite = btree_gc_mark_node(last->b, gc); 16190a63b66dSKent Overstreet if (should_rewrite) { 16200a63b66dSKent Overstreet ret = btree_gc_rewrite_node(b, op, last->b); 16210a63b66dSKent Overstreet if (ret) 1622a1f0358bSKent Overstreet break; 1623a1f0358bSKent Overstreet } 1624a1f0358bSKent Overstreet 1625a1f0358bSKent Overstreet if (last->b->level) { 1626a1f0358bSKent Overstreet ret = btree_gc_recurse(last->b, op, writes, gc); 1627a1f0358bSKent Overstreet if (ret) 1628a1f0358bSKent Overstreet break; 1629a1f0358bSKent Overstreet } 1630a1f0358bSKent Overstreet 1631a1f0358bSKent Overstreet bkey_copy_key(&b->c->gc_done, &last->b->key); 1632a1f0358bSKent Overstreet 1633a1f0358bSKent Overstreet /* 1634a1f0358bSKent Overstreet * Must flush leaf nodes before gc ends, since replace 1635a1f0358bSKent Overstreet * operations aren't journalled 1636cafe5635SKent Overstreet */ 16372a285686SKent Overstreet mutex_lock(&last->b->write_lock); 1638a1f0358bSKent Overstreet if (btree_node_dirty(last->b)) 1639a1f0358bSKent Overstreet bch_btree_node_write(last->b, writes); 16402a285686SKent Overstreet mutex_unlock(&last->b->write_lock); 1641a1f0358bSKent Overstreet rw_unlock(true, last->b); 1642a1f0358bSKent Overstreet } 1643a1f0358bSKent Overstreet 1644a1f0358bSKent Overstreet memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1)); 1645a1f0358bSKent Overstreet r->b = NULL; 1646a1f0358bSKent Overstreet 16475c25c4fcSTang Junhui if (atomic_read(&b->c->search_inflight) && 16487f4a59deSTang Junhui gc->nodes >= gc->nodes_pre + btree_gc_min_nodes(b->c)) { 16495c25c4fcSTang Junhui gc->nodes_pre = gc->nodes; 16505c25c4fcSTang Junhui ret = -EAGAIN; 16515c25c4fcSTang Junhui break; 16525c25c4fcSTang Junhui } 16535c25c4fcSTang Junhui 1654cafe5635SKent Overstreet if (need_resched()) { 1655cafe5635SKent Overstreet ret = -EAGAIN; 1656cafe5635SKent Overstreet break; 1657cafe5635SKent Overstreet } 1658cafe5635SKent Overstreet } 1659cafe5635SKent Overstreet 16602a285686SKent Overstreet for (i = r; i < r + ARRAY_SIZE(r); i++) 16612a285686SKent Overstreet if (!IS_ERR_OR_NULL(i->b)) { 16622a285686SKent Overstreet mutex_lock(&i->b->write_lock); 16632a285686SKent Overstreet if (btree_node_dirty(i->b)) 16642a285686SKent Overstreet bch_btree_node_write(i->b, writes); 16652a285686SKent Overstreet mutex_unlock(&i->b->write_lock); 16662a285686SKent Overstreet rw_unlock(true, i->b); 1667a1f0358bSKent Overstreet } 1668cafe5635SKent Overstreet 1669cafe5635SKent Overstreet return ret; 1670cafe5635SKent Overstreet } 1671cafe5635SKent Overstreet 1672cafe5635SKent Overstreet static int bch_btree_gc_root(struct btree *b, struct btree_op *op, 1673cafe5635SKent Overstreet struct closure *writes, struct gc_stat *gc) 1674cafe5635SKent Overstreet { 1675cafe5635SKent Overstreet struct btree *n = NULL; 1676a1f0358bSKent Overstreet int ret = 0; 1677a1f0358bSKent Overstreet bool should_rewrite; 1678cafe5635SKent Overstreet 1679a1f0358bSKent Overstreet should_rewrite = btree_gc_mark_node(b, gc); 1680a1f0358bSKent Overstreet if (should_rewrite) { 16810a63b66dSKent Overstreet n = btree_node_alloc_replacement(b, NULL); 1682cafe5635SKent Overstreet 1683cafe5635SKent Overstreet if (!IS_ERR_OR_NULL(n)) { 1684a1f0358bSKent Overstreet bch_btree_node_write_sync(n); 16852a285686SKent Overstreet 1686a1f0358bSKent Overstreet bch_btree_set_root(n); 1687a1f0358bSKent Overstreet btree_node_free(b); 1688a1f0358bSKent Overstreet rw_unlock(true, n); 1689a1f0358bSKent Overstreet 1690a1f0358bSKent Overstreet return -EINTR; 1691cafe5635SKent Overstreet } 1692a1f0358bSKent Overstreet } 1693a1f0358bSKent Overstreet 1694487dded8SKent Overstreet __bch_btree_mark_key(b->c, b->level + 1, &b->key); 1695487dded8SKent Overstreet 1696a1f0358bSKent Overstreet if (b->level) { 1697a1f0358bSKent Overstreet ret = btree_gc_recurse(b, op, writes, gc); 1698a1f0358bSKent Overstreet if (ret) 1699a1f0358bSKent Overstreet return ret; 1700a1f0358bSKent Overstreet } 1701a1f0358bSKent Overstreet 1702a1f0358bSKent Overstreet bkey_copy_key(&b->c->gc_done, &b->key); 1703cafe5635SKent Overstreet 1704cafe5635SKent Overstreet return ret; 1705cafe5635SKent Overstreet } 1706cafe5635SKent Overstreet 1707cafe5635SKent Overstreet static void btree_gc_start(struct cache_set *c) 1708cafe5635SKent Overstreet { 1709cafe5635SKent Overstreet struct cache *ca; 1710cafe5635SKent Overstreet struct bucket *b; 17116f10f7d1SColy Li unsigned int i; 1712cafe5635SKent Overstreet 1713cafe5635SKent Overstreet if (!c->gc_mark_valid) 1714cafe5635SKent Overstreet return; 1715cafe5635SKent Overstreet 1716cafe5635SKent Overstreet mutex_lock(&c->bucket_lock); 1717cafe5635SKent Overstreet 1718cafe5635SKent Overstreet c->gc_mark_valid = 0; 1719cafe5635SKent Overstreet c->gc_done = ZERO_KEY; 1720cafe5635SKent Overstreet 1721cafe5635SKent Overstreet for_each_cache(ca, c, i) 1722cafe5635SKent Overstreet for_each_bucket(b, ca) { 17233a2fd9d5SKent Overstreet b->last_gc = b->gen; 172429ebf465SKent Overstreet if (!atomic_read(&b->pin)) { 17254fe6a816SKent Overstreet SET_GC_MARK(b, 0); 172629ebf465SKent Overstreet SET_GC_SECTORS_USED(b, 0); 172729ebf465SKent Overstreet } 1728cafe5635SKent Overstreet } 1729cafe5635SKent Overstreet 1730cafe5635SKent Overstreet mutex_unlock(&c->bucket_lock); 1731cafe5635SKent Overstreet } 1732cafe5635SKent Overstreet 1733d44c2f9eSTang Junhui static void bch_btree_gc_finish(struct cache_set *c) 1734cafe5635SKent Overstreet { 1735cafe5635SKent Overstreet struct bucket *b; 1736cafe5635SKent Overstreet struct cache *ca; 17376f10f7d1SColy Li unsigned int i; 1738cafe5635SKent Overstreet 1739cafe5635SKent Overstreet mutex_lock(&c->bucket_lock); 1740cafe5635SKent Overstreet 1741cafe5635SKent Overstreet set_gc_sectors(c); 1742cafe5635SKent Overstreet c->gc_mark_valid = 1; 1743cafe5635SKent Overstreet c->need_gc = 0; 1744cafe5635SKent Overstreet 1745cafe5635SKent Overstreet for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++) 1746cafe5635SKent Overstreet SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i), 1747cafe5635SKent Overstreet GC_MARK_METADATA); 1748cafe5635SKent Overstreet 1749bf0a628aSNicholas Swenson /* don't reclaim buckets to which writeback keys point */ 1750bf0a628aSNicholas Swenson rcu_read_lock(); 17512831231dSColy Li for (i = 0; i < c->devices_max_used; i++) { 1752bf0a628aSNicholas Swenson struct bcache_device *d = c->devices[i]; 1753bf0a628aSNicholas Swenson struct cached_dev *dc; 1754bf0a628aSNicholas Swenson struct keybuf_key *w, *n; 17556f10f7d1SColy Li unsigned int j; 1756bf0a628aSNicholas Swenson 1757bf0a628aSNicholas Swenson if (!d || UUID_FLASH_ONLY(&c->uuids[i])) 1758bf0a628aSNicholas Swenson continue; 1759bf0a628aSNicholas Swenson dc = container_of(d, struct cached_dev, disk); 1760bf0a628aSNicholas Swenson 1761bf0a628aSNicholas Swenson spin_lock(&dc->writeback_keys.lock); 1762bf0a628aSNicholas Swenson rbtree_postorder_for_each_entry_safe(w, n, 1763bf0a628aSNicholas Swenson &dc->writeback_keys.keys, node) 1764bf0a628aSNicholas Swenson for (j = 0; j < KEY_PTRS(&w->key); j++) 1765bf0a628aSNicholas Swenson SET_GC_MARK(PTR_BUCKET(c, &w->key, j), 1766bf0a628aSNicholas Swenson GC_MARK_DIRTY); 1767bf0a628aSNicholas Swenson spin_unlock(&dc->writeback_keys.lock); 1768bf0a628aSNicholas Swenson } 1769bf0a628aSNicholas Swenson rcu_read_unlock(); 1770bf0a628aSNicholas Swenson 1771d44c2f9eSTang Junhui c->avail_nbuckets = 0; 1772cafe5635SKent Overstreet for_each_cache(ca, c, i) { 1773cafe5635SKent Overstreet uint64_t *i; 1774cafe5635SKent Overstreet 1775cafe5635SKent Overstreet ca->invalidate_needs_gc = 0; 1776cafe5635SKent Overstreet 1777cafe5635SKent Overstreet for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++) 1778cafe5635SKent Overstreet SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA); 1779cafe5635SKent Overstreet 1780cafe5635SKent Overstreet for (i = ca->prio_buckets; 1781cafe5635SKent Overstreet i < ca->prio_buckets + prio_buckets(ca) * 2; i++) 1782cafe5635SKent Overstreet SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA); 1783cafe5635SKent Overstreet 1784cafe5635SKent Overstreet for_each_bucket(b, ca) { 1785cafe5635SKent Overstreet c->need_gc = max(c->need_gc, bucket_gc_gen(b)); 1786cafe5635SKent Overstreet 17874fe6a816SKent Overstreet if (atomic_read(&b->pin)) 17884fe6a816SKent Overstreet continue; 17894fe6a816SKent Overstreet 17904fe6a816SKent Overstreet BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b)); 17914fe6a816SKent Overstreet 17924fe6a816SKent Overstreet if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE) 1793d44c2f9eSTang Junhui c->avail_nbuckets++; 1794cafe5635SKent Overstreet } 1795cafe5635SKent Overstreet } 1796cafe5635SKent Overstreet 1797cafe5635SKent Overstreet mutex_unlock(&c->bucket_lock); 1798cafe5635SKent Overstreet } 1799cafe5635SKent Overstreet 180072a44517SKent Overstreet static void bch_btree_gc(struct cache_set *c) 1801cafe5635SKent Overstreet { 1802cafe5635SKent Overstreet int ret; 1803cafe5635SKent Overstreet struct gc_stat stats; 1804cafe5635SKent Overstreet struct closure writes; 1805cafe5635SKent Overstreet struct btree_op op; 1806cafe5635SKent Overstreet uint64_t start_time = local_clock(); 180757943511SKent Overstreet 1808c37511b8SKent Overstreet trace_bcache_gc_start(c); 1809cafe5635SKent Overstreet 1810cafe5635SKent Overstreet memset(&stats, 0, sizeof(struct gc_stat)); 1811cafe5635SKent Overstreet closure_init_stack(&writes); 1812b54d6934SKent Overstreet bch_btree_op_init(&op, SHRT_MAX); 1813cafe5635SKent Overstreet 1814cafe5635SKent Overstreet btree_gc_start(c); 1815cafe5635SKent Overstreet 1816771f393eSColy Li /* if CACHE_SET_IO_DISABLE set, gc thread should stop too */ 1817a1f0358bSKent Overstreet do { 1818cafe5635SKent Overstreet ret = btree_root(gc_root, c, &op, &writes, &stats); 1819cafe5635SKent Overstreet closure_sync(&writes); 1820c5f1e5adSKent Overstreet cond_resched(); 1821cafe5635SKent Overstreet 18225c25c4fcSTang Junhui if (ret == -EAGAIN) 18235c25c4fcSTang Junhui schedule_timeout_interruptible(msecs_to_jiffies 18245c25c4fcSTang Junhui (GC_SLEEP_MS)); 18255c25c4fcSTang Junhui else if (ret) 1826cafe5635SKent Overstreet pr_warn("gc failed!"); 1827771f393eSColy Li } while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags)); 1828cafe5635SKent Overstreet 1829d44c2f9eSTang Junhui bch_btree_gc_finish(c); 183057943511SKent Overstreet wake_up_allocators(c); 183157943511SKent Overstreet 1832169ef1cfSKent Overstreet bch_time_stats_update(&c->btree_gc_time, start_time); 1833cafe5635SKent Overstreet 1834cafe5635SKent Overstreet stats.key_bytes *= sizeof(uint64_t); 1835cafe5635SKent Overstreet stats.data <<= 9; 1836d44c2f9eSTang Junhui bch_update_bucket_in_use(c, &stats); 1837cafe5635SKent Overstreet memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat)); 1838cafe5635SKent Overstreet 1839c37511b8SKent Overstreet trace_bcache_gc_end(c); 1840cafe5635SKent Overstreet 184172a44517SKent Overstreet bch_moving_gc(c); 1842cafe5635SKent Overstreet } 1843cafe5635SKent Overstreet 1844be628be0SKent Overstreet static bool gc_should_run(struct cache_set *c) 1845cafe5635SKent Overstreet { 1846a1f0358bSKent Overstreet struct cache *ca; 18476f10f7d1SColy Li unsigned int i; 184872a44517SKent Overstreet 1849be628be0SKent Overstreet for_each_cache(ca, c, i) 1850be628be0SKent Overstreet if (ca->invalidate_needs_gc) 1851be628be0SKent Overstreet return true; 185272a44517SKent Overstreet 1853be628be0SKent Overstreet if (atomic_read(&c->sectors_to_gc) < 0) 1854be628be0SKent Overstreet return true; 1855be628be0SKent Overstreet 1856be628be0SKent Overstreet return false; 1857be628be0SKent Overstreet } 1858be628be0SKent Overstreet 1859be628be0SKent Overstreet static int bch_gc_thread(void *arg) 1860be628be0SKent Overstreet { 1861be628be0SKent Overstreet struct cache_set *c = arg; 1862be628be0SKent Overstreet 1863be628be0SKent Overstreet while (1) { 1864be628be0SKent Overstreet wait_event_interruptible(c->gc_wait, 1865771f393eSColy Li kthread_should_stop() || 1866771f393eSColy Li test_bit(CACHE_SET_IO_DISABLE, &c->flags) || 1867771f393eSColy Li gc_should_run(c)); 1868be628be0SKent Overstreet 1869771f393eSColy Li if (kthread_should_stop() || 1870771f393eSColy Li test_bit(CACHE_SET_IO_DISABLE, &c->flags)) 187172a44517SKent Overstreet break; 187272a44517SKent Overstreet 1873be628be0SKent Overstreet set_gc_sectors(c); 1874be628be0SKent Overstreet bch_btree_gc(c); 187572a44517SKent Overstreet } 187672a44517SKent Overstreet 1877771f393eSColy Li wait_for_kthread_stop(); 187872a44517SKent Overstreet return 0; 187972a44517SKent Overstreet } 188072a44517SKent Overstreet 188172a44517SKent Overstreet int bch_gc_thread_start(struct cache_set *c) 188272a44517SKent Overstreet { 1883be628be0SKent Overstreet c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc"); 18849d134117SVasyl Gomonovych return PTR_ERR_OR_ZERO(c->gc_thread); 1885cafe5635SKent Overstreet } 1886cafe5635SKent Overstreet 1887cafe5635SKent Overstreet /* Initial partial gc */ 1888cafe5635SKent Overstreet 1889487dded8SKent Overstreet static int bch_btree_check_recurse(struct btree *b, struct btree_op *op) 1890cafe5635SKent Overstreet { 189150310164SKent Overstreet int ret = 0; 189250310164SKent Overstreet struct bkey *k, *p = NULL; 1893cafe5635SKent Overstreet struct btree_iter iter; 1894cafe5635SKent Overstreet 1895487dded8SKent Overstreet for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) 1896487dded8SKent Overstreet bch_initial_mark_key(b->c, b->level, k); 1897cafe5635SKent Overstreet 1898487dded8SKent Overstreet bch_initial_mark_key(b->c, b->level + 1, &b->key); 1899cafe5635SKent Overstreet 1900cafe5635SKent Overstreet if (b->level) { 1901c052dd9aSKent Overstreet bch_btree_iter_init(&b->keys, &iter, NULL); 1902cafe5635SKent Overstreet 190350310164SKent Overstreet do { 1904a85e968eSKent Overstreet k = bch_btree_iter_next_filter(&iter, &b->keys, 1905a85e968eSKent Overstreet bch_ptr_bad); 19067f4a59deSTang Junhui if (k) { 19072452cc89SSlava Pestov btree_node_prefetch(b, k); 19087f4a59deSTang Junhui /* 19097f4a59deSTang Junhui * initiallize c->gc_stats.nodes 19107f4a59deSTang Junhui * for incremental GC 19117f4a59deSTang Junhui */ 19127f4a59deSTang Junhui b->c->gc_stats.nodes++; 19137f4a59deSTang Junhui } 191450310164SKent Overstreet 1915cafe5635SKent Overstreet if (p) 1916487dded8SKent Overstreet ret = btree(check_recurse, p, b, op); 1917cafe5635SKent Overstreet 191850310164SKent Overstreet p = k; 191950310164SKent Overstreet } while (p && !ret); 1920cafe5635SKent Overstreet } 1921cafe5635SKent Overstreet 1922487dded8SKent Overstreet return ret; 1923cafe5635SKent Overstreet } 1924cafe5635SKent Overstreet 1925c18536a7SKent Overstreet int bch_btree_check(struct cache_set *c) 1926cafe5635SKent Overstreet { 1927c18536a7SKent Overstreet struct btree_op op; 1928cafe5635SKent Overstreet 1929b54d6934SKent Overstreet bch_btree_op_init(&op, SHRT_MAX); 1930cafe5635SKent Overstreet 1931487dded8SKent Overstreet return btree_root(check_recurse, c, &op); 1932cafe5635SKent Overstreet } 1933cafe5635SKent Overstreet 19342531d9eeSKent Overstreet void bch_initial_gc_finish(struct cache_set *c) 19352531d9eeSKent Overstreet { 19362531d9eeSKent Overstreet struct cache *ca; 19372531d9eeSKent Overstreet struct bucket *b; 19386f10f7d1SColy Li unsigned int i; 19392531d9eeSKent Overstreet 19402531d9eeSKent Overstreet bch_btree_gc_finish(c); 19412531d9eeSKent Overstreet 19422531d9eeSKent Overstreet mutex_lock(&c->bucket_lock); 19432531d9eeSKent Overstreet 19442531d9eeSKent Overstreet /* 19452531d9eeSKent Overstreet * We need to put some unused buckets directly on the prio freelist in 19462531d9eeSKent Overstreet * order to get the allocator thread started - it needs freed buckets in 19472531d9eeSKent Overstreet * order to rewrite the prios and gens, and it needs to rewrite prios 19482531d9eeSKent Overstreet * and gens in order to free buckets. 19492531d9eeSKent Overstreet * 19502531d9eeSKent Overstreet * This is only safe for buckets that have no live data in them, which 19512531d9eeSKent Overstreet * there should always be some of. 19522531d9eeSKent Overstreet */ 19532531d9eeSKent Overstreet for_each_cache(ca, c, i) { 19542531d9eeSKent Overstreet for_each_bucket(b, ca) { 1955682811b3STang Junhui if (fifo_full(&ca->free[RESERVE_PRIO]) && 1956682811b3STang Junhui fifo_full(&ca->free[RESERVE_BTREE])) 19572531d9eeSKent Overstreet break; 19582531d9eeSKent Overstreet 19592531d9eeSKent Overstreet if (bch_can_invalidate_bucket(ca, b) && 19602531d9eeSKent Overstreet !GC_MARK(b)) { 19612531d9eeSKent Overstreet __bch_invalidate_one_bucket(ca, b); 1962682811b3STang Junhui if (!fifo_push(&ca->free[RESERVE_PRIO], 1963682811b3STang Junhui b - ca->buckets)) 1964682811b3STang Junhui fifo_push(&ca->free[RESERVE_BTREE], 19652531d9eeSKent Overstreet b - ca->buckets); 19662531d9eeSKent Overstreet } 19672531d9eeSKent Overstreet } 19682531d9eeSKent Overstreet } 19692531d9eeSKent Overstreet 19702531d9eeSKent Overstreet mutex_unlock(&c->bucket_lock); 19712531d9eeSKent Overstreet } 19722531d9eeSKent Overstreet 1973cafe5635SKent Overstreet /* Btree insertion */ 1974cafe5635SKent Overstreet 1975829a60b9SKent Overstreet static bool btree_insert_key(struct btree *b, struct bkey *k, 19761b207d80SKent Overstreet struct bkey *replace_key) 1977cafe5635SKent Overstreet { 19786f10f7d1SColy Li unsigned int status; 1979cafe5635SKent Overstreet 1980cafe5635SKent Overstreet BUG_ON(bkey_cmp(k, &b->key) > 0); 1981cafe5635SKent Overstreet 1982829a60b9SKent Overstreet status = bch_btree_insert_key(&b->keys, k, replace_key); 1983829a60b9SKent Overstreet if (status != BTREE_INSERT_STATUS_NO_INSERT) { 1984dc9d98d6SKent Overstreet bch_check_keys(&b->keys, "%u for %s", status, 19851b207d80SKent Overstreet replace_key ? "replace" : "insert"); 1986cafe5635SKent Overstreet 1987829a60b9SKent Overstreet trace_bcache_btree_insert_key(b, k, replace_key != NULL, 1988829a60b9SKent Overstreet status); 1989cafe5635SKent Overstreet return true; 1990829a60b9SKent Overstreet } else 1991829a60b9SKent Overstreet return false; 1992cafe5635SKent Overstreet } 1993cafe5635SKent Overstreet 199459158fdeSKent Overstreet static size_t insert_u64s_remaining(struct btree *b) 199559158fdeSKent Overstreet { 19963572324aSKent Overstreet long ret = bch_btree_keys_u64s_remaining(&b->keys); 199759158fdeSKent Overstreet 199859158fdeSKent Overstreet /* 199959158fdeSKent Overstreet * Might land in the middle of an existing extent and have to split it 200059158fdeSKent Overstreet */ 200159158fdeSKent Overstreet if (b->keys.ops->is_extents) 200259158fdeSKent Overstreet ret -= KEY_MAX_U64S; 200359158fdeSKent Overstreet 200459158fdeSKent Overstreet return max(ret, 0L); 200559158fdeSKent Overstreet } 200659158fdeSKent Overstreet 200726c949f8SKent Overstreet static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op, 20081b207d80SKent Overstreet struct keylist *insert_keys, 20091b207d80SKent Overstreet struct bkey *replace_key) 2010cafe5635SKent Overstreet { 2011cafe5635SKent Overstreet bool ret = false; 2012dc9d98d6SKent Overstreet int oldsize = bch_count_data(&b->keys); 2013cafe5635SKent Overstreet 201426c949f8SKent Overstreet while (!bch_keylist_empty(insert_keys)) { 2015c2f95ae2SKent Overstreet struct bkey *k = insert_keys->keys; 201626c949f8SKent Overstreet 201759158fdeSKent Overstreet if (bkey_u64s(k) > insert_u64s_remaining(b)) 2018403b6cdeSKent Overstreet break; 2019403b6cdeSKent Overstreet 2020403b6cdeSKent Overstreet if (bkey_cmp(k, &b->key) <= 0) { 20213a3b6a4eSKent Overstreet if (!b->level) 20223a3b6a4eSKent Overstreet bkey_put(b->c, k); 202326c949f8SKent Overstreet 2024829a60b9SKent Overstreet ret |= btree_insert_key(b, k, replace_key); 202526c949f8SKent Overstreet bch_keylist_pop_front(insert_keys); 202626c949f8SKent Overstreet } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) { 202726c949f8SKent Overstreet BKEY_PADDED(key) temp; 2028c2f95ae2SKent Overstreet bkey_copy(&temp.key, insert_keys->keys); 202926c949f8SKent Overstreet 203026c949f8SKent Overstreet bch_cut_back(&b->key, &temp.key); 2031c2f95ae2SKent Overstreet bch_cut_front(&b->key, insert_keys->keys); 203226c949f8SKent Overstreet 2033829a60b9SKent Overstreet ret |= btree_insert_key(b, &temp.key, replace_key); 203426c949f8SKent Overstreet break; 203526c949f8SKent Overstreet } else { 203626c949f8SKent Overstreet break; 203726c949f8SKent Overstreet } 2038cafe5635SKent Overstreet } 2039cafe5635SKent Overstreet 2040829a60b9SKent Overstreet if (!ret) 2041829a60b9SKent Overstreet op->insert_collision = true; 2042829a60b9SKent Overstreet 2043403b6cdeSKent Overstreet BUG_ON(!bch_keylist_empty(insert_keys) && b->level); 2044403b6cdeSKent Overstreet 2045dc9d98d6SKent Overstreet BUG_ON(bch_count_data(&b->keys) < oldsize); 2046cafe5635SKent Overstreet return ret; 2047cafe5635SKent Overstreet } 2048cafe5635SKent Overstreet 204926c949f8SKent Overstreet static int btree_split(struct btree *b, struct btree_op *op, 205026c949f8SKent Overstreet struct keylist *insert_keys, 20511b207d80SKent Overstreet struct bkey *replace_key) 2052cafe5635SKent Overstreet { 2053d6fd3b11SKent Overstreet bool split; 2054cafe5635SKent Overstreet struct btree *n1, *n2 = NULL, *n3 = NULL; 2055cafe5635SKent Overstreet uint64_t start_time = local_clock(); 2056b54d6934SKent Overstreet struct closure cl; 205717e21a9fSKent Overstreet struct keylist parent_keys; 2058b54d6934SKent Overstreet 2059b54d6934SKent Overstreet closure_init_stack(&cl); 206017e21a9fSKent Overstreet bch_keylist_init(&parent_keys); 2061cafe5635SKent Overstreet 20620a63b66dSKent Overstreet if (btree_check_reserve(b, op)) { 20630a63b66dSKent Overstreet if (!b->level) 206478365411SKent Overstreet return -EINTR; 20650a63b66dSKent Overstreet else 20660a63b66dSKent Overstreet WARN(1, "insufficient reserve for split\n"); 20670a63b66dSKent Overstreet } 206878365411SKent Overstreet 20690a63b66dSKent Overstreet n1 = btree_node_alloc_replacement(b, op); 2070cafe5635SKent Overstreet if (IS_ERR(n1)) 2071cafe5635SKent Overstreet goto err; 2072cafe5635SKent Overstreet 2073ee811287SKent Overstreet split = set_blocks(btree_bset_first(n1), 2074ee811287SKent Overstreet block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5; 2075cafe5635SKent Overstreet 2076cafe5635SKent Overstreet if (split) { 20776f10f7d1SColy Li unsigned int keys = 0; 2078cafe5635SKent Overstreet 2079ee811287SKent Overstreet trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys); 2080c37511b8SKent Overstreet 20812452cc89SSlava Pestov n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent); 2082cafe5635SKent Overstreet if (IS_ERR(n2)) 2083cafe5635SKent Overstreet goto err_free1; 2084cafe5635SKent Overstreet 2085d6fd3b11SKent Overstreet if (!b->parent) { 20862452cc89SSlava Pestov n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL); 2087cafe5635SKent Overstreet if (IS_ERR(n3)) 2088cafe5635SKent Overstreet goto err_free2; 2089cafe5635SKent Overstreet } 2090cafe5635SKent Overstreet 20912a285686SKent Overstreet mutex_lock(&n1->write_lock); 20922a285686SKent Overstreet mutex_lock(&n2->write_lock); 20932a285686SKent Overstreet 20941b207d80SKent Overstreet bch_btree_insert_keys(n1, op, insert_keys, replace_key); 2095cafe5635SKent Overstreet 2096d6fd3b11SKent Overstreet /* 2097d6fd3b11SKent Overstreet * Has to be a linear search because we don't have an auxiliary 2098cafe5635SKent Overstreet * search tree yet 2099cafe5635SKent Overstreet */ 2100cafe5635SKent Overstreet 2101ee811287SKent Overstreet while (keys < (btree_bset_first(n1)->keys * 3) / 5) 2102ee811287SKent Overstreet keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), 2103fafff81cSKent Overstreet keys)); 2104cafe5635SKent Overstreet 2105fafff81cSKent Overstreet bkey_copy_key(&n1->key, 2106ee811287SKent Overstreet bset_bkey_idx(btree_bset_first(n1), keys)); 2107ee811287SKent Overstreet keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys)); 2108cafe5635SKent Overstreet 2109ee811287SKent Overstreet btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys; 2110ee811287SKent Overstreet btree_bset_first(n1)->keys = keys; 2111cafe5635SKent Overstreet 2112ee811287SKent Overstreet memcpy(btree_bset_first(n2)->start, 2113ee811287SKent Overstreet bset_bkey_last(btree_bset_first(n1)), 2114ee811287SKent Overstreet btree_bset_first(n2)->keys * sizeof(uint64_t)); 2115cafe5635SKent Overstreet 2116cafe5635SKent Overstreet bkey_copy_key(&n2->key, &b->key); 2117cafe5635SKent Overstreet 211817e21a9fSKent Overstreet bch_keylist_add(&parent_keys, &n2->key); 2119b54d6934SKent Overstreet bch_btree_node_write(n2, &cl); 21202a285686SKent Overstreet mutex_unlock(&n2->write_lock); 2121cafe5635SKent Overstreet rw_unlock(true, n2); 2122c37511b8SKent Overstreet } else { 2123ee811287SKent Overstreet trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys); 2124c37511b8SKent Overstreet 21252a285686SKent Overstreet mutex_lock(&n1->write_lock); 21261b207d80SKent Overstreet bch_btree_insert_keys(n1, op, insert_keys, replace_key); 2127c37511b8SKent Overstreet } 2128cafe5635SKent Overstreet 212917e21a9fSKent Overstreet bch_keylist_add(&parent_keys, &n1->key); 2130b54d6934SKent Overstreet bch_btree_node_write(n1, &cl); 21312a285686SKent Overstreet mutex_unlock(&n1->write_lock); 2132cafe5635SKent Overstreet 2133cafe5635SKent Overstreet if (n3) { 2134d6fd3b11SKent Overstreet /* Depth increases, make a new root */ 21352a285686SKent Overstreet mutex_lock(&n3->write_lock); 2136cafe5635SKent Overstreet bkey_copy_key(&n3->key, &MAX_KEY); 213717e21a9fSKent Overstreet bch_btree_insert_keys(n3, op, &parent_keys, NULL); 2138b54d6934SKent Overstreet bch_btree_node_write(n3, &cl); 21392a285686SKent Overstreet mutex_unlock(&n3->write_lock); 2140cafe5635SKent Overstreet 2141b54d6934SKent Overstreet closure_sync(&cl); 2142cafe5635SKent Overstreet bch_btree_set_root(n3); 2143cafe5635SKent Overstreet rw_unlock(true, n3); 2144d6fd3b11SKent Overstreet } else if (!b->parent) { 2145d6fd3b11SKent Overstreet /* Root filled up but didn't need to be split */ 2146b54d6934SKent Overstreet closure_sync(&cl); 2147cafe5635SKent Overstreet bch_btree_set_root(n1); 2148cafe5635SKent Overstreet } else { 214917e21a9fSKent Overstreet /* Split a non root node */ 2150b54d6934SKent Overstreet closure_sync(&cl); 215117e21a9fSKent Overstreet make_btree_freeing_key(b, parent_keys.top); 215217e21a9fSKent Overstreet bch_keylist_push(&parent_keys); 215317e21a9fSKent Overstreet 215417e21a9fSKent Overstreet bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL); 215517e21a9fSKent Overstreet BUG_ON(!bch_keylist_empty(&parent_keys)); 2156cafe5635SKent Overstreet } 2157cafe5635SKent Overstreet 215805335cffSKent Overstreet btree_node_free(b); 2159cafe5635SKent Overstreet rw_unlock(true, n1); 2160cafe5635SKent Overstreet 2161169ef1cfSKent Overstreet bch_time_stats_update(&b->c->btree_split_time, start_time); 2162cafe5635SKent Overstreet 2163cafe5635SKent Overstreet return 0; 2164cafe5635SKent Overstreet err_free2: 21655f5837d2SKent Overstreet bkey_put(b->c, &n2->key); 2166e8e1d468SKent Overstreet btree_node_free(n2); 2167cafe5635SKent Overstreet rw_unlock(true, n2); 2168cafe5635SKent Overstreet err_free1: 21695f5837d2SKent Overstreet bkey_put(b->c, &n1->key); 2170e8e1d468SKent Overstreet btree_node_free(n1); 2171cafe5635SKent Overstreet rw_unlock(true, n1); 2172cafe5635SKent Overstreet err: 21730a63b66dSKent Overstreet WARN(1, "bcache: btree split failed (level %u)", b->level); 21745f5837d2SKent Overstreet 2175cafe5635SKent Overstreet if (n3 == ERR_PTR(-EAGAIN) || 2176cafe5635SKent Overstreet n2 == ERR_PTR(-EAGAIN) || 2177cafe5635SKent Overstreet n1 == ERR_PTR(-EAGAIN)) 2178cafe5635SKent Overstreet return -EAGAIN; 2179cafe5635SKent Overstreet 2180cafe5635SKent Overstreet return -ENOMEM; 2181cafe5635SKent Overstreet } 2182cafe5635SKent Overstreet 218326c949f8SKent Overstreet static int bch_btree_insert_node(struct btree *b, struct btree_op *op, 2184c18536a7SKent Overstreet struct keylist *insert_keys, 21851b207d80SKent Overstreet atomic_t *journal_ref, 21861b207d80SKent Overstreet struct bkey *replace_key) 218726c949f8SKent Overstreet { 21882a285686SKent Overstreet struct closure cl; 21892a285686SKent Overstreet 21901b207d80SKent Overstreet BUG_ON(b->level && replace_key); 21911b207d80SKent Overstreet 21922a285686SKent Overstreet closure_init_stack(&cl); 21932a285686SKent Overstreet 21942a285686SKent Overstreet mutex_lock(&b->write_lock); 21952a285686SKent Overstreet 21962a285686SKent Overstreet if (write_block(b) != btree_bset_last(b) && 21972a285686SKent Overstreet b->keys.last_set_unwritten) 21982a285686SKent Overstreet bch_btree_init_next(b); /* just wrote a set */ 21992a285686SKent Overstreet 220059158fdeSKent Overstreet if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) { 22012a285686SKent Overstreet mutex_unlock(&b->write_lock); 22022a285686SKent Overstreet goto split; 22032a285686SKent Overstreet } 22042a285686SKent Overstreet 22052a285686SKent Overstreet BUG_ON(write_block(b) != btree_bset_last(b)); 22062a285686SKent Overstreet 22072a285686SKent Overstreet if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) { 22082a285686SKent Overstreet if (!b->level) 22092a285686SKent Overstreet bch_btree_leaf_dirty(b, journal_ref); 22102a285686SKent Overstreet else 22112a285686SKent Overstreet bch_btree_node_write(b, &cl); 22122a285686SKent Overstreet } 22132a285686SKent Overstreet 22142a285686SKent Overstreet mutex_unlock(&b->write_lock); 22152a285686SKent Overstreet 22162a285686SKent Overstreet /* wait for btree node write if necessary, after unlock */ 22172a285686SKent Overstreet closure_sync(&cl); 22182a285686SKent Overstreet 22192a285686SKent Overstreet return 0; 22202a285686SKent Overstreet split: 222126c949f8SKent Overstreet if (current->bio_list) { 222226c949f8SKent Overstreet op->lock = b->c->root->level + 1; 222317e21a9fSKent Overstreet return -EAGAIN; 222426c949f8SKent Overstreet } else if (op->lock <= b->c->root->level) { 222526c949f8SKent Overstreet op->lock = b->c->root->level + 1; 222617e21a9fSKent Overstreet return -EINTR; 222726c949f8SKent Overstreet } else { 222817e21a9fSKent Overstreet /* Invalidated all iterators */ 22293b3e9e50SKent Overstreet int ret = btree_split(b, op, insert_keys, replace_key); 22303b3e9e50SKent Overstreet 22312a285686SKent Overstreet if (bch_keylist_empty(insert_keys)) 223217e21a9fSKent Overstreet return 0; 22332a285686SKent Overstreet else if (!ret) 22342a285686SKent Overstreet return -EINTR; 22352a285686SKent Overstreet return ret; 223617e21a9fSKent Overstreet } 223726c949f8SKent Overstreet } 223826c949f8SKent Overstreet 2239e7c590ebSKent Overstreet int bch_btree_insert_check_key(struct btree *b, struct btree_op *op, 2240e7c590ebSKent Overstreet struct bkey *check_key) 2241e7c590ebSKent Overstreet { 2242e7c590ebSKent Overstreet int ret = -EINTR; 2243e7c590ebSKent Overstreet uint64_t btree_ptr = b->key.ptr[0]; 2244e7c590ebSKent Overstreet unsigned long seq = b->seq; 2245e7c590ebSKent Overstreet struct keylist insert; 2246e7c590ebSKent Overstreet bool upgrade = op->lock == -1; 2247e7c590ebSKent Overstreet 2248e7c590ebSKent Overstreet bch_keylist_init(&insert); 2249e7c590ebSKent Overstreet 2250e7c590ebSKent Overstreet if (upgrade) { 2251e7c590ebSKent Overstreet rw_unlock(false, b); 2252e7c590ebSKent Overstreet rw_lock(true, b, b->level); 2253e7c590ebSKent Overstreet 2254e7c590ebSKent Overstreet if (b->key.ptr[0] != btree_ptr || 22552ef9ccbfSZheng Liu b->seq != seq + 1) { 22562ef9ccbfSZheng Liu op->lock = b->level; 2257e7c590ebSKent Overstreet goto out; 2258e7c590ebSKent Overstreet } 22592ef9ccbfSZheng Liu } 2260e7c590ebSKent Overstreet 2261e7c590ebSKent Overstreet SET_KEY_PTRS(check_key, 1); 2262e7c590ebSKent Overstreet get_random_bytes(&check_key->ptr[0], sizeof(uint64_t)); 2263e7c590ebSKent Overstreet 2264e7c590ebSKent Overstreet SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV); 2265e7c590ebSKent Overstreet 2266e7c590ebSKent Overstreet bch_keylist_add(&insert, check_key); 2267e7c590ebSKent Overstreet 22681b207d80SKent Overstreet ret = bch_btree_insert_node(b, op, &insert, NULL, NULL); 2269e7c590ebSKent Overstreet 2270e7c590ebSKent Overstreet BUG_ON(!ret && !bch_keylist_empty(&insert)); 2271e7c590ebSKent Overstreet out: 2272e7c590ebSKent Overstreet if (upgrade) 2273e7c590ebSKent Overstreet downgrade_write(&b->lock); 2274e7c590ebSKent Overstreet return ret; 2275e7c590ebSKent Overstreet } 2276e7c590ebSKent Overstreet 2277cc7b8819SKent Overstreet struct btree_insert_op { 2278cc7b8819SKent Overstreet struct btree_op op; 2279cc7b8819SKent Overstreet struct keylist *keys; 2280cc7b8819SKent Overstreet atomic_t *journal_ref; 2281cc7b8819SKent Overstreet struct bkey *replace_key; 2282cc7b8819SKent Overstreet }; 2283cc7b8819SKent Overstreet 228408239ca2SWei Yongjun static int btree_insert_fn(struct btree_op *b_op, struct btree *b) 2285cafe5635SKent Overstreet { 2286cc7b8819SKent Overstreet struct btree_insert_op *op = container_of(b_op, 2287cc7b8819SKent Overstreet struct btree_insert_op, op); 2288403b6cdeSKent Overstreet 2289cc7b8819SKent Overstreet int ret = bch_btree_insert_node(b, &op->op, op->keys, 2290cc7b8819SKent Overstreet op->journal_ref, op->replace_key); 2291cc7b8819SKent Overstreet if (ret && !bch_keylist_empty(op->keys)) 2292cc7b8819SKent Overstreet return ret; 2293cc7b8819SKent Overstreet else 2294cc7b8819SKent Overstreet return MAP_DONE; 2295cafe5635SKent Overstreet } 2296cafe5635SKent Overstreet 2297cc7b8819SKent Overstreet int bch_btree_insert(struct cache_set *c, struct keylist *keys, 2298cc7b8819SKent Overstreet atomic_t *journal_ref, struct bkey *replace_key) 2299cafe5635SKent Overstreet { 2300cc7b8819SKent Overstreet struct btree_insert_op op; 2301cafe5635SKent Overstreet int ret = 0; 2302cafe5635SKent Overstreet 2303cc7b8819SKent Overstreet BUG_ON(current->bio_list); 23044f3d4014SKent Overstreet BUG_ON(bch_keylist_empty(keys)); 2305cafe5635SKent Overstreet 2306cc7b8819SKent Overstreet bch_btree_op_init(&op.op, 0); 2307cc7b8819SKent Overstreet op.keys = keys; 2308cc7b8819SKent Overstreet op.journal_ref = journal_ref; 2309cc7b8819SKent Overstreet op.replace_key = replace_key; 2310cafe5635SKent Overstreet 2311cc7b8819SKent Overstreet while (!ret && !bch_keylist_empty(keys)) { 2312cc7b8819SKent Overstreet op.op.lock = 0; 2313cc7b8819SKent Overstreet ret = bch_btree_map_leaf_nodes(&op.op, c, 2314cc7b8819SKent Overstreet &START_KEY(keys->keys), 2315cc7b8819SKent Overstreet btree_insert_fn); 2316cc7b8819SKent Overstreet } 2317cc7b8819SKent Overstreet 2318cc7b8819SKent Overstreet if (ret) { 2319cafe5635SKent Overstreet struct bkey *k; 2320cafe5635SKent Overstreet 23211b207d80SKent Overstreet pr_err("error %i", ret); 2322cafe5635SKent Overstreet 23234f3d4014SKent Overstreet while ((k = bch_keylist_pop(keys))) 23243a3b6a4eSKent Overstreet bkey_put(c, k); 2325cc7b8819SKent Overstreet } else if (op.op.insert_collision) 2326cc7b8819SKent Overstreet ret = -ESRCH; 23276054c6d4SKent Overstreet 2328cafe5635SKent Overstreet return ret; 2329cafe5635SKent Overstreet } 2330cafe5635SKent Overstreet 2331cafe5635SKent Overstreet void bch_btree_set_root(struct btree *b) 2332cafe5635SKent Overstreet { 23336f10f7d1SColy Li unsigned int i; 2334e49c7c37SKent Overstreet struct closure cl; 2335e49c7c37SKent Overstreet 2336e49c7c37SKent Overstreet closure_init_stack(&cl); 2337cafe5635SKent Overstreet 2338c37511b8SKent Overstreet trace_bcache_btree_set_root(b); 2339c37511b8SKent Overstreet 2340cafe5635SKent Overstreet BUG_ON(!b->written); 2341cafe5635SKent Overstreet 2342cafe5635SKent Overstreet for (i = 0; i < KEY_PTRS(&b->key); i++) 2343cafe5635SKent Overstreet BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO); 2344cafe5635SKent Overstreet 2345cafe5635SKent Overstreet mutex_lock(&b->c->bucket_lock); 2346cafe5635SKent Overstreet list_del_init(&b->list); 2347cafe5635SKent Overstreet mutex_unlock(&b->c->bucket_lock); 2348cafe5635SKent Overstreet 2349cafe5635SKent Overstreet b->c->root = b; 2350cafe5635SKent Overstreet 2351e49c7c37SKent Overstreet bch_journal_meta(b->c, &cl); 2352e49c7c37SKent Overstreet closure_sync(&cl); 2353cafe5635SKent Overstreet } 2354cafe5635SKent Overstreet 235548dad8baSKent Overstreet /* Map across nodes or keys */ 235648dad8baSKent Overstreet 235748dad8baSKent Overstreet static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op, 235848dad8baSKent Overstreet struct bkey *from, 235948dad8baSKent Overstreet btree_map_nodes_fn *fn, int flags) 236048dad8baSKent Overstreet { 236148dad8baSKent Overstreet int ret = MAP_CONTINUE; 236248dad8baSKent Overstreet 236348dad8baSKent Overstreet if (b->level) { 236448dad8baSKent Overstreet struct bkey *k; 236548dad8baSKent Overstreet struct btree_iter iter; 236648dad8baSKent Overstreet 2367c052dd9aSKent Overstreet bch_btree_iter_init(&b->keys, &iter, from); 236848dad8baSKent Overstreet 2369a85e968eSKent Overstreet while ((k = bch_btree_iter_next_filter(&iter, &b->keys, 237048dad8baSKent Overstreet bch_ptr_bad))) { 237148dad8baSKent Overstreet ret = btree(map_nodes_recurse, k, b, 237248dad8baSKent Overstreet op, from, fn, flags); 237348dad8baSKent Overstreet from = NULL; 237448dad8baSKent Overstreet 237548dad8baSKent Overstreet if (ret != MAP_CONTINUE) 237648dad8baSKent Overstreet return ret; 237748dad8baSKent Overstreet } 237848dad8baSKent Overstreet } 237948dad8baSKent Overstreet 238048dad8baSKent Overstreet if (!b->level || flags == MAP_ALL_NODES) 238148dad8baSKent Overstreet ret = fn(op, b); 238248dad8baSKent Overstreet 238348dad8baSKent Overstreet return ret; 238448dad8baSKent Overstreet } 238548dad8baSKent Overstreet 238648dad8baSKent Overstreet int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c, 238748dad8baSKent Overstreet struct bkey *from, btree_map_nodes_fn *fn, int flags) 238848dad8baSKent Overstreet { 2389b54d6934SKent Overstreet return btree_root(map_nodes_recurse, c, op, from, fn, flags); 239048dad8baSKent Overstreet } 239148dad8baSKent Overstreet 239248dad8baSKent Overstreet static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op, 239348dad8baSKent Overstreet struct bkey *from, btree_map_keys_fn *fn, 239448dad8baSKent Overstreet int flags) 239548dad8baSKent Overstreet { 239648dad8baSKent Overstreet int ret = MAP_CONTINUE; 239748dad8baSKent Overstreet struct bkey *k; 239848dad8baSKent Overstreet struct btree_iter iter; 239948dad8baSKent Overstreet 2400c052dd9aSKent Overstreet bch_btree_iter_init(&b->keys, &iter, from); 240148dad8baSKent Overstreet 2402a85e968eSKent Overstreet while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) { 240348dad8baSKent Overstreet ret = !b->level 240448dad8baSKent Overstreet ? fn(op, b, k) 240548dad8baSKent Overstreet : btree(map_keys_recurse, k, b, op, from, fn, flags); 240648dad8baSKent Overstreet from = NULL; 240748dad8baSKent Overstreet 240848dad8baSKent Overstreet if (ret != MAP_CONTINUE) 240948dad8baSKent Overstreet return ret; 241048dad8baSKent Overstreet } 241148dad8baSKent Overstreet 241248dad8baSKent Overstreet if (!b->level && (flags & MAP_END_KEY)) 241348dad8baSKent Overstreet ret = fn(op, b, &KEY(KEY_INODE(&b->key), 241448dad8baSKent Overstreet KEY_OFFSET(&b->key), 0)); 241548dad8baSKent Overstreet 241648dad8baSKent Overstreet return ret; 241748dad8baSKent Overstreet } 241848dad8baSKent Overstreet 241948dad8baSKent Overstreet int bch_btree_map_keys(struct btree_op *op, struct cache_set *c, 242048dad8baSKent Overstreet struct bkey *from, btree_map_keys_fn *fn, int flags) 242148dad8baSKent Overstreet { 2422b54d6934SKent Overstreet return btree_root(map_keys_recurse, c, op, from, fn, flags); 242348dad8baSKent Overstreet } 242448dad8baSKent Overstreet 2425cafe5635SKent Overstreet /* Keybuf code */ 2426cafe5635SKent Overstreet 2427cafe5635SKent Overstreet static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r) 2428cafe5635SKent Overstreet { 2429cafe5635SKent Overstreet /* Overlapping keys compare equal */ 2430cafe5635SKent Overstreet if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0) 2431cafe5635SKent Overstreet return -1; 2432cafe5635SKent Overstreet if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0) 2433cafe5635SKent Overstreet return 1; 2434cafe5635SKent Overstreet return 0; 2435cafe5635SKent Overstreet } 2436cafe5635SKent Overstreet 2437cafe5635SKent Overstreet static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l, 2438cafe5635SKent Overstreet struct keybuf_key *r) 2439cafe5635SKent Overstreet { 2440cafe5635SKent Overstreet return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1); 2441cafe5635SKent Overstreet } 2442cafe5635SKent Overstreet 244348dad8baSKent Overstreet struct refill { 244448dad8baSKent Overstreet struct btree_op op; 24456f10f7d1SColy Li unsigned int nr_found; 244648dad8baSKent Overstreet struct keybuf *buf; 244748dad8baSKent Overstreet struct bkey *end; 244848dad8baSKent Overstreet keybuf_pred_fn *pred; 244948dad8baSKent Overstreet }; 245048dad8baSKent Overstreet 245148dad8baSKent Overstreet static int refill_keybuf_fn(struct btree_op *op, struct btree *b, 245248dad8baSKent Overstreet struct bkey *k) 2453cafe5635SKent Overstreet { 245448dad8baSKent Overstreet struct refill *refill = container_of(op, struct refill, op); 245548dad8baSKent Overstreet struct keybuf *buf = refill->buf; 245648dad8baSKent Overstreet int ret = MAP_CONTINUE; 2457cafe5635SKent Overstreet 24582d6cb6edSTang Junhui if (bkey_cmp(k, refill->end) > 0) { 245948dad8baSKent Overstreet ret = MAP_DONE; 246048dad8baSKent Overstreet goto out; 2461cafe5635SKent Overstreet } 2462cafe5635SKent Overstreet 246348dad8baSKent Overstreet if (!KEY_SIZE(k)) /* end key */ 246448dad8baSKent Overstreet goto out; 2465cafe5635SKent Overstreet 246648dad8baSKent Overstreet if (refill->pred(buf, k)) { 2467cafe5635SKent Overstreet struct keybuf_key *w; 2468cafe5635SKent Overstreet 2469cafe5635SKent Overstreet spin_lock(&buf->lock); 2470cafe5635SKent Overstreet 2471cafe5635SKent Overstreet w = array_alloc(&buf->freelist); 247248dad8baSKent Overstreet if (!w) { 247348dad8baSKent Overstreet spin_unlock(&buf->lock); 247448dad8baSKent Overstreet return MAP_DONE; 247548dad8baSKent Overstreet } 2476cafe5635SKent Overstreet 2477cafe5635SKent Overstreet w->private = NULL; 2478cafe5635SKent Overstreet bkey_copy(&w->key, k); 2479cafe5635SKent Overstreet 2480cafe5635SKent Overstreet if (RB_INSERT(&buf->keys, w, node, keybuf_cmp)) 2481cafe5635SKent Overstreet array_free(&buf->freelist, w); 248248a915a8SKent Overstreet else 248348a915a8SKent Overstreet refill->nr_found++; 2484cafe5635SKent Overstreet 248548dad8baSKent Overstreet if (array_freelist_empty(&buf->freelist)) 248648dad8baSKent Overstreet ret = MAP_DONE; 248748dad8baSKent Overstreet 2488cafe5635SKent Overstreet spin_unlock(&buf->lock); 2489cafe5635SKent Overstreet } 249048dad8baSKent Overstreet out: 249148dad8baSKent Overstreet buf->last_scanned = *k; 249248dad8baSKent Overstreet return ret; 2493cafe5635SKent Overstreet } 2494cafe5635SKent Overstreet 2495cafe5635SKent Overstreet void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf, 249672c27061SKent Overstreet struct bkey *end, keybuf_pred_fn *pred) 2497cafe5635SKent Overstreet { 2498cafe5635SKent Overstreet struct bkey start = buf->last_scanned; 249948dad8baSKent Overstreet struct refill refill; 2500cafe5635SKent Overstreet 2501cafe5635SKent Overstreet cond_resched(); 2502cafe5635SKent Overstreet 2503b54d6934SKent Overstreet bch_btree_op_init(&refill.op, -1); 250448a915a8SKent Overstreet refill.nr_found = 0; 250548dad8baSKent Overstreet refill.buf = buf; 250648dad8baSKent Overstreet refill.end = end; 250748dad8baSKent Overstreet refill.pred = pred; 250848dad8baSKent Overstreet 250948dad8baSKent Overstreet bch_btree_map_keys(&refill.op, c, &buf->last_scanned, 251048dad8baSKent Overstreet refill_keybuf_fn, MAP_END_KEY); 2511cafe5635SKent Overstreet 251248a915a8SKent Overstreet trace_bcache_keyscan(refill.nr_found, 2513cafe5635SKent Overstreet KEY_INODE(&start), KEY_OFFSET(&start), 251448a915a8SKent Overstreet KEY_INODE(&buf->last_scanned), 251548a915a8SKent Overstreet KEY_OFFSET(&buf->last_scanned)); 2516cafe5635SKent Overstreet 2517cafe5635SKent Overstreet spin_lock(&buf->lock); 2518cafe5635SKent Overstreet 2519cafe5635SKent Overstreet if (!RB_EMPTY_ROOT(&buf->keys)) { 2520cafe5635SKent Overstreet struct keybuf_key *w; 25211fae7cf0SColy Li 2522cafe5635SKent Overstreet w = RB_FIRST(&buf->keys, struct keybuf_key, node); 2523cafe5635SKent Overstreet buf->start = START_KEY(&w->key); 2524cafe5635SKent Overstreet 2525cafe5635SKent Overstreet w = RB_LAST(&buf->keys, struct keybuf_key, node); 2526cafe5635SKent Overstreet buf->end = w->key; 2527cafe5635SKent Overstreet } else { 2528cafe5635SKent Overstreet buf->start = MAX_KEY; 2529cafe5635SKent Overstreet buf->end = MAX_KEY; 2530cafe5635SKent Overstreet } 2531cafe5635SKent Overstreet 2532cafe5635SKent Overstreet spin_unlock(&buf->lock); 2533cafe5635SKent Overstreet } 2534cafe5635SKent Overstreet 2535cafe5635SKent Overstreet static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w) 2536cafe5635SKent Overstreet { 2537cafe5635SKent Overstreet rb_erase(&w->node, &buf->keys); 2538cafe5635SKent Overstreet array_free(&buf->freelist, w); 2539cafe5635SKent Overstreet } 2540cafe5635SKent Overstreet 2541cafe5635SKent Overstreet void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w) 2542cafe5635SKent Overstreet { 2543cafe5635SKent Overstreet spin_lock(&buf->lock); 2544cafe5635SKent Overstreet __bch_keybuf_del(buf, w); 2545cafe5635SKent Overstreet spin_unlock(&buf->lock); 2546cafe5635SKent Overstreet } 2547cafe5635SKent Overstreet 2548cafe5635SKent Overstreet bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start, 2549cafe5635SKent Overstreet struct bkey *end) 2550cafe5635SKent Overstreet { 2551cafe5635SKent Overstreet bool ret = false; 2552cafe5635SKent Overstreet struct keybuf_key *p, *w, s; 25531fae7cf0SColy Li 2554cafe5635SKent Overstreet s.key = *start; 2555cafe5635SKent Overstreet 2556cafe5635SKent Overstreet if (bkey_cmp(end, &buf->start) <= 0 || 2557cafe5635SKent Overstreet bkey_cmp(start, &buf->end) >= 0) 2558cafe5635SKent Overstreet return false; 2559cafe5635SKent Overstreet 2560cafe5635SKent Overstreet spin_lock(&buf->lock); 2561cafe5635SKent Overstreet w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp); 2562cafe5635SKent Overstreet 2563cafe5635SKent Overstreet while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) { 2564cafe5635SKent Overstreet p = w; 2565cafe5635SKent Overstreet w = RB_NEXT(w, node); 2566cafe5635SKent Overstreet 2567cafe5635SKent Overstreet if (p->private) 2568cafe5635SKent Overstreet ret = true; 2569cafe5635SKent Overstreet else 2570cafe5635SKent Overstreet __bch_keybuf_del(buf, p); 2571cafe5635SKent Overstreet } 2572cafe5635SKent Overstreet 2573cafe5635SKent Overstreet spin_unlock(&buf->lock); 2574cafe5635SKent Overstreet return ret; 2575cafe5635SKent Overstreet } 2576cafe5635SKent Overstreet 2577cafe5635SKent Overstreet struct keybuf_key *bch_keybuf_next(struct keybuf *buf) 2578cafe5635SKent Overstreet { 2579cafe5635SKent Overstreet struct keybuf_key *w; 25801fae7cf0SColy Li 2581cafe5635SKent Overstreet spin_lock(&buf->lock); 2582cafe5635SKent Overstreet 2583cafe5635SKent Overstreet w = RB_FIRST(&buf->keys, struct keybuf_key, node); 2584cafe5635SKent Overstreet 2585cafe5635SKent Overstreet while (w && w->private) 2586cafe5635SKent Overstreet w = RB_NEXT(w, node); 2587cafe5635SKent Overstreet 2588cafe5635SKent Overstreet if (w) 2589cafe5635SKent Overstreet w->private = ERR_PTR(-EINTR); 2590cafe5635SKent Overstreet 2591cafe5635SKent Overstreet spin_unlock(&buf->lock); 2592cafe5635SKent Overstreet return w; 2593cafe5635SKent Overstreet } 2594cafe5635SKent Overstreet 2595cafe5635SKent Overstreet struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c, 2596cafe5635SKent Overstreet struct keybuf *buf, 259772c27061SKent Overstreet struct bkey *end, 259872c27061SKent Overstreet keybuf_pred_fn *pred) 2599cafe5635SKent Overstreet { 2600cafe5635SKent Overstreet struct keybuf_key *ret; 2601cafe5635SKent Overstreet 2602cafe5635SKent Overstreet while (1) { 2603cafe5635SKent Overstreet ret = bch_keybuf_next(buf); 2604cafe5635SKent Overstreet if (ret) 2605cafe5635SKent Overstreet break; 2606cafe5635SKent Overstreet 2607cafe5635SKent Overstreet if (bkey_cmp(&buf->last_scanned, end) >= 0) { 2608cafe5635SKent Overstreet pr_debug("scan finished"); 2609cafe5635SKent Overstreet break; 2610cafe5635SKent Overstreet } 2611cafe5635SKent Overstreet 261272c27061SKent Overstreet bch_refill_keybuf(c, buf, end, pred); 2613cafe5635SKent Overstreet } 2614cafe5635SKent Overstreet 2615cafe5635SKent Overstreet return ret; 2616cafe5635SKent Overstreet } 2617cafe5635SKent Overstreet 261872c27061SKent Overstreet void bch_keybuf_init(struct keybuf *buf) 2619cafe5635SKent Overstreet { 2620cafe5635SKent Overstreet buf->last_scanned = MAX_KEY; 2621cafe5635SKent Overstreet buf->keys = RB_ROOT; 2622cafe5635SKent Overstreet 2623cafe5635SKent Overstreet spin_lock_init(&buf->lock); 2624cafe5635SKent Overstreet array_allocator_init(&buf->freelist); 2625cafe5635SKent Overstreet } 2626