1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2cafe5635SKent Overstreet /*
3cafe5635SKent Overstreet * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4cafe5635SKent Overstreet *
5cafe5635SKent Overstreet * Uses a block device as cache for other block devices; optimized for SSDs.
6cafe5635SKent Overstreet * All allocation is done in buckets, which should match the erase block size
7cafe5635SKent Overstreet * of the device.
8cafe5635SKent Overstreet *
9cafe5635SKent Overstreet * Buckets containing cached data are kept on a heap sorted by priority;
10cafe5635SKent Overstreet * bucket priority is increased on cache hit, and periodically all the buckets
11cafe5635SKent Overstreet * on the heap have their priority scaled down. This currently is just used as
12cafe5635SKent Overstreet * an LRU but in the future should allow for more intelligent heuristics.
13cafe5635SKent Overstreet *
14cafe5635SKent Overstreet * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
15cafe5635SKent Overstreet * counter. Garbage collection is used to remove stale pointers.
16cafe5635SKent Overstreet *
17cafe5635SKent Overstreet * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
18cafe5635SKent Overstreet * as keys are inserted we only sort the pages that have not yet been written.
19cafe5635SKent Overstreet * When garbage collection is run, we resort the entire node.
20cafe5635SKent Overstreet *
215fb94e9cSMauro Carvalho Chehab * All configuration is done via sysfs; see Documentation/admin-guide/bcache.rst.
22cafe5635SKent Overstreet */
23cafe5635SKent Overstreet
24cafe5635SKent Overstreet #include "bcache.h"
25cafe5635SKent Overstreet #include "btree.h"
26cafe5635SKent Overstreet #include "debug.h"
2765d45231SKent Overstreet #include "extents.h"
28cafe5635SKent Overstreet
29cafe5635SKent Overstreet #include <linux/slab.h>
30cafe5635SKent Overstreet #include <linux/bitops.h>
31cafe5635SKent Overstreet #include <linux/hash.h>
3272a44517SKent Overstreet #include <linux/kthread.h>
33cd953ed0SGeert Uytterhoeven #include <linux/prefetch.h>
34cafe5635SKent Overstreet #include <linux/random.h>
35cafe5635SKent Overstreet #include <linux/rcupdate.h>
36e6017571SIngo Molnar #include <linux/sched/clock.h>
37b2d09103SIngo Molnar #include <linux/rculist.h>
3850a260e8SColy Li #include <linux/delay.h>
39cafe5635SKent Overstreet #include <trace/events/bcache.h>
40cafe5635SKent Overstreet
41cafe5635SKent Overstreet /*
42cafe5635SKent Overstreet * Todo:
43cafe5635SKent Overstreet * register_bcache: Return errors out to userspace correctly
44cafe5635SKent Overstreet *
45cafe5635SKent Overstreet * Writeback: don't undirty key until after a cache flush
46cafe5635SKent Overstreet *
47cafe5635SKent Overstreet * Create an iterator for key pointers
48cafe5635SKent Overstreet *
49cafe5635SKent Overstreet * On btree write error, mark bucket such that it won't be freed from the cache
50cafe5635SKent Overstreet *
51cafe5635SKent Overstreet * Journalling:
52cafe5635SKent Overstreet * Check for bad keys in replay
53cafe5635SKent Overstreet * Propagate barriers
54cafe5635SKent Overstreet * Refcount journal entries in journal_replay
55cafe5635SKent Overstreet *
56cafe5635SKent Overstreet * Garbage collection:
57cafe5635SKent Overstreet * Finish incremental gc
58cafe5635SKent Overstreet * Gc should free old UUIDs, data for invalid UUIDs
59cafe5635SKent Overstreet *
60cafe5635SKent Overstreet * Provide a way to list backing device UUIDs we have data cached for, and
61cafe5635SKent Overstreet * probably how long it's been since we've seen them, and a way to invalidate
62cafe5635SKent Overstreet * dirty data for devices that will never be attached again
63cafe5635SKent Overstreet *
64cafe5635SKent Overstreet * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so
65cafe5635SKent Overstreet * that based on that and how much dirty data we have we can keep writeback
66cafe5635SKent Overstreet * from being starved
67cafe5635SKent Overstreet *
68cafe5635SKent Overstreet * Add a tracepoint or somesuch to watch for writeback starvation
69cafe5635SKent Overstreet *
70cafe5635SKent Overstreet * When btree depth > 1 and splitting an interior node, we have to make sure
71cafe5635SKent Overstreet * alloc_bucket() cannot fail. This should be true but is not completely
72cafe5635SKent Overstreet * obvious.
73cafe5635SKent Overstreet *
74cafe5635SKent Overstreet * Plugging?
75cafe5635SKent Overstreet *
76cafe5635SKent Overstreet * If data write is less than hard sector size of ssd, round up offset in open
77cafe5635SKent Overstreet * bucket to the next whole sector
78cafe5635SKent Overstreet *
79cafe5635SKent Overstreet * Superblock needs to be fleshed out for multiple cache devices
80cafe5635SKent Overstreet *
81cafe5635SKent Overstreet * Add a sysfs tunable for the number of writeback IOs in flight
82cafe5635SKent Overstreet *
83cafe5635SKent Overstreet * Add a sysfs tunable for the number of open data buckets
84cafe5635SKent Overstreet *
85cafe5635SKent Overstreet * IO tracking: Can we track when one process is doing io on behalf of another?
86cafe5635SKent Overstreet * IO tracking: Don't use just an average, weigh more recent stuff higher
87cafe5635SKent Overstreet *
88cafe5635SKent Overstreet * Test module load/unload
89cafe5635SKent Overstreet */
90cafe5635SKent Overstreet
91cafe5635SKent Overstreet #define MAX_NEED_GC 64
92cafe5635SKent Overstreet #define MAX_SAVE_PRIO 72
937f4a59deSTang Junhui #define MAX_GC_TIMES 100
945c25c4fcSTang Junhui #define MIN_GC_NODES 100
955c25c4fcSTang Junhui #define GC_SLEEP_MS 100
96cafe5635SKent Overstreet
97cafe5635SKent Overstreet #define PTR_DIRTY_BIT (((uint64_t) 1 << 36))
98cafe5635SKent Overstreet
99cafe5635SKent Overstreet #define PTR_HASH(c, k) \
100cafe5635SKent Overstreet (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
101cafe5635SKent Overstreet
1029f233ffeSKai Krakow static struct workqueue_struct *btree_io_wq;
1039f233ffeSKai Krakow
104df8e8970SKent Overstreet #define insert_lock(s, b) ((b)->level <= (s)->lock)
105df8e8970SKent Overstreet
106df8e8970SKent Overstreet
write_block(struct btree * b)107a85e968eSKent Overstreet static inline struct bset *write_block(struct btree *b)
108a85e968eSKent Overstreet {
1094e1ebae3SColy Li return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c->cache);
110a85e968eSKent Overstreet }
111a85e968eSKent Overstreet
bch_btree_init_next(struct btree * b)1122a285686SKent Overstreet static void bch_btree_init_next(struct btree *b)
1132a285686SKent Overstreet {
1142a285686SKent Overstreet /* If not a leaf node, always sort */
1152a285686SKent Overstreet if (b->level && b->keys.nsets)
1162a285686SKent Overstreet bch_btree_sort(&b->keys, &b->c->sort);
1172a285686SKent Overstreet else
1182a285686SKent Overstreet bch_btree_sort_lazy(&b->keys, &b->c->sort);
1192a285686SKent Overstreet
1202a285686SKent Overstreet if (b->written < btree_blocks(b))
1212a285686SKent Overstreet bch_bset_init_next(&b->keys, write_block(b),
1224a784266SColy Li bset_magic(&b->c->cache->sb));
1232a285686SKent Overstreet
1242a285686SKent Overstreet }
1252a285686SKent Overstreet
126cafe5635SKent Overstreet /* Btree key manipulation */
127cafe5635SKent Overstreet
bkey_put(struct cache_set * c,struct bkey * k)1283a3b6a4eSKent Overstreet void bkey_put(struct cache_set *c, struct bkey *k)
129e7c590ebSKent Overstreet {
1306f10f7d1SColy Li unsigned int i;
131e7c590ebSKent Overstreet
132e7c590ebSKent Overstreet for (i = 0; i < KEY_PTRS(k); i++)
133e7c590ebSKent Overstreet if (ptr_available(c, k, i))
134e7c590ebSKent Overstreet atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
135e7c590ebSKent Overstreet }
136e7c590ebSKent Overstreet
137cafe5635SKent Overstreet /* Btree IO */
138cafe5635SKent Overstreet
btree_csum_set(struct btree * b,struct bset * i)139cafe5635SKent Overstreet static uint64_t btree_csum_set(struct btree *b, struct bset *i)
140cafe5635SKent Overstreet {
141cafe5635SKent Overstreet uint64_t crc = b->key.ptr[0];
142fafff81cSKent Overstreet void *data = (void *) i + 8, *end = bset_bkey_last(i);
143cafe5635SKent Overstreet
14439fa7a95SChristoph Hellwig crc = crc64_be(crc, data, end - data);
145c19ed23aSKent Overstreet return crc ^ 0xffffffffffffffffULL;
146cafe5635SKent Overstreet }
147cafe5635SKent Overstreet
bch_btree_node_read_done(struct btree * b)14878b77bf8SKent Overstreet void bch_btree_node_read_done(struct btree *b)
149cafe5635SKent Overstreet {
150cafe5635SKent Overstreet const char *err = "bad btree header";
151ee811287SKent Overstreet struct bset *i = btree_bset_first(b);
152*866898efSKuan-Wei Chiu struct btree_iter iter;
153cafe5635SKent Overstreet
154d2f96f48SShenghui Wang /*
155d2f96f48SShenghui Wang * c->fill_iter can allocate an iterator with more memory space
156d2f96f48SShenghui Wang * than static MAX_BSETS.
157d2f96f48SShenghui Wang * See the comment arount cache_set->fill_iter.
158d2f96f48SShenghui Wang */
159*866898efSKuan-Wei Chiu iter.heap.data = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
160*866898efSKuan-Wei Chiu iter.heap.size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size;
161*866898efSKuan-Wei Chiu iter.heap.nr = 0;
162cafe5635SKent Overstreet
163280481d0SKent Overstreet #ifdef CONFIG_BCACHE_DEBUG
164*866898efSKuan-Wei Chiu iter.b = &b->keys;
165280481d0SKent Overstreet #endif
166280481d0SKent Overstreet
16757943511SKent Overstreet if (!i->seq)
168cafe5635SKent Overstreet goto err;
169cafe5635SKent Overstreet
170cafe5635SKent Overstreet for (;
171a85e968eSKent Overstreet b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq;
172cafe5635SKent Overstreet i = write_block(b)) {
173cafe5635SKent Overstreet err = "unsupported bset version";
174cafe5635SKent Overstreet if (i->version > BCACHE_BSET_VERSION)
175cafe5635SKent Overstreet goto err;
176cafe5635SKent Overstreet
177cafe5635SKent Overstreet err = "bad btree header";
1784e1ebae3SColy Li if (b->written + set_blocks(i, block_bytes(b->c->cache)) >
179ee811287SKent Overstreet btree_blocks(b))
180cafe5635SKent Overstreet goto err;
181cafe5635SKent Overstreet
182cafe5635SKent Overstreet err = "bad magic";
1834a784266SColy Li if (i->magic != bset_magic(&b->c->cache->sb))
184cafe5635SKent Overstreet goto err;
185cafe5635SKent Overstreet
186cafe5635SKent Overstreet err = "bad checksum";
187cafe5635SKent Overstreet switch (i->version) {
188cafe5635SKent Overstreet case 0:
189cafe5635SKent Overstreet if (i->csum != csum_set(i))
190cafe5635SKent Overstreet goto err;
191cafe5635SKent Overstreet break;
192cafe5635SKent Overstreet case BCACHE_BSET_VERSION:
193cafe5635SKent Overstreet if (i->csum != btree_csum_set(b, i))
194cafe5635SKent Overstreet goto err;
195cafe5635SKent Overstreet break;
196cafe5635SKent Overstreet }
197cafe5635SKent Overstreet
198cafe5635SKent Overstreet err = "empty set";
199a85e968eSKent Overstreet if (i != b->keys.set[0].data && !i->keys)
200cafe5635SKent Overstreet goto err;
201cafe5635SKent Overstreet
202*866898efSKuan-Wei Chiu bch_btree_iter_push(&iter, i->start, bset_bkey_last(i));
203cafe5635SKent Overstreet
2044e1ebae3SColy Li b->written += set_blocks(i, block_bytes(b->c->cache));
205cafe5635SKent Overstreet }
206cafe5635SKent Overstreet
207cafe5635SKent Overstreet err = "corrupted btree";
208cafe5635SKent Overstreet for (i = write_block(b);
209a85e968eSKent Overstreet bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
2104e1ebae3SColy Li i = ((void *) i) + block_bytes(b->c->cache))
211a85e968eSKent Overstreet if (i->seq == b->keys.set[0].data->seq)
212cafe5635SKent Overstreet goto err;
213cafe5635SKent Overstreet
214*866898efSKuan-Wei Chiu bch_btree_sort_and_fix_extents(&b->keys, &iter, &b->c->sort);
215cafe5635SKent Overstreet
216a85e968eSKent Overstreet i = b->keys.set[0].data;
217cafe5635SKent Overstreet err = "short btree key";
218a85e968eSKent Overstreet if (b->keys.set[0].size &&
219a85e968eSKent Overstreet bkey_cmp(&b->key, &b->keys.set[0].end) < 0)
220cafe5635SKent Overstreet goto err;
221cafe5635SKent Overstreet
222cafe5635SKent Overstreet if (b->written < btree_blocks(b))
223a85e968eSKent Overstreet bch_bset_init_next(&b->keys, write_block(b),
2244a784266SColy Li bset_magic(&b->c->cache->sb));
225cafe5635SKent Overstreet out:
226*866898efSKuan-Wei Chiu mempool_free(iter.heap.data, &b->c->fill_iter);
22757943511SKent Overstreet return;
228cafe5635SKent Overstreet err:
229cafe5635SKent Overstreet set_btree_node_io_error(b);
23088b9f8c4SKent Overstreet bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",
231cafe5635SKent Overstreet err, PTR_BUCKET_NR(b->c, &b->key, 0),
23288b9f8c4SKent Overstreet bset_block_offset(b, i), i->keys);
233cafe5635SKent Overstreet goto out;
234cafe5635SKent Overstreet }
235cafe5635SKent Overstreet
btree_node_read_endio(struct bio * bio)2364246a0b6SChristoph Hellwig static void btree_node_read_endio(struct bio *bio)
237cafe5635SKent Overstreet {
23857943511SKent Overstreet struct closure *cl = bio->bi_private;
2391fae7cf0SColy Li
24057943511SKent Overstreet closure_put(cl);
24157943511SKent Overstreet }
242cafe5635SKent Overstreet
bch_btree_node_read(struct btree * b)24378b77bf8SKent Overstreet static void bch_btree_node_read(struct btree *b)
24457943511SKent Overstreet {
24557943511SKent Overstreet uint64_t start_time = local_clock();
24657943511SKent Overstreet struct closure cl;
24757943511SKent Overstreet struct bio *bio;
248cafe5635SKent Overstreet
249c37511b8SKent Overstreet trace_bcache_btree_read(b);
250c37511b8SKent Overstreet
25157943511SKent Overstreet closure_init_stack(&cl);
252cafe5635SKent Overstreet
25357943511SKent Overstreet bio = bch_bbio_alloc(b->c);
2544f024f37SKent Overstreet bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
25557943511SKent Overstreet bio->bi_end_io = btree_node_read_endio;
25657943511SKent Overstreet bio->bi_private = &cl;
25770fd7614SChristoph Hellwig bio->bi_opf = REQ_OP_READ | REQ_META;
25857943511SKent Overstreet
259a85e968eSKent Overstreet bch_bio_map(bio, b->keys.set[0].data);
26057943511SKent Overstreet
26157943511SKent Overstreet bch_submit_bbio(bio, b->c, &b->key, 0);
26257943511SKent Overstreet closure_sync(&cl);
26357943511SKent Overstreet
2644e4cbee9SChristoph Hellwig if (bio->bi_status)
26557943511SKent Overstreet set_btree_node_io_error(b);
26657943511SKent Overstreet
26757943511SKent Overstreet bch_bbio_free(bio, b->c);
26857943511SKent Overstreet
26957943511SKent Overstreet if (btree_node_io_error(b))
27057943511SKent Overstreet goto err;
27157943511SKent Overstreet
27257943511SKent Overstreet bch_btree_node_read_done(b);
27357943511SKent Overstreet bch_time_stats_update(&b->c->btree_read_time, start_time);
27457943511SKent Overstreet
27557943511SKent Overstreet return;
27657943511SKent Overstreet err:
27761cbd250SGeert Uytterhoeven bch_cache_set_error(b->c, "io error reading bucket %zu",
27857943511SKent Overstreet PTR_BUCKET_NR(b->c, &b->key, 0));
279cafe5635SKent Overstreet }
280cafe5635SKent Overstreet
btree_complete_write(struct btree * b,struct btree_write * w)281cafe5635SKent Overstreet static void btree_complete_write(struct btree *b, struct btree_write *w)
282cafe5635SKent Overstreet {
283cafe5635SKent Overstreet if (w->prio_blocked &&
284cafe5635SKent Overstreet !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
285119ba0f8SKent Overstreet wake_up_allocators(b->c);
286cafe5635SKent Overstreet
287cafe5635SKent Overstreet if (w->journal) {
288cafe5635SKent Overstreet atomic_dec_bug(w->journal);
289cafe5635SKent Overstreet __closure_wake_up(&b->c->journal.wait);
290cafe5635SKent Overstreet }
291cafe5635SKent Overstreet
292cafe5635SKent Overstreet w->prio_blocked = 0;
293cafe5635SKent Overstreet w->journal = NULL;
294cafe5635SKent Overstreet }
295cafe5635SKent Overstreet
CLOSURE_CALLBACK(btree_node_write_unlock)296d4e3b928SKent Overstreet static CLOSURE_CALLBACK(btree_node_write_unlock)
297cb7a583eSKent Overstreet {
298d4e3b928SKent Overstreet closure_type(b, struct btree, io);
299cb7a583eSKent Overstreet
300cb7a583eSKent Overstreet up(&b->io_mutex);
301cb7a583eSKent Overstreet }
302cb7a583eSKent Overstreet
CLOSURE_CALLBACK(__btree_node_write_done)303d4e3b928SKent Overstreet static CLOSURE_CALLBACK(__btree_node_write_done)
304cafe5635SKent Overstreet {
305d4e3b928SKent Overstreet closure_type(b, struct btree, io);
306cafe5635SKent Overstreet struct btree_write *w = btree_prev_write(b);
307cafe5635SKent Overstreet
308cafe5635SKent Overstreet bch_bbio_free(b->bio, b->c);
309cafe5635SKent Overstreet b->bio = NULL;
310cafe5635SKent Overstreet btree_complete_write(b, w);
311cafe5635SKent Overstreet
312cafe5635SKent Overstreet if (btree_node_dirty(b))
3139f233ffeSKai Krakow queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
314cafe5635SKent Overstreet
315cb7a583eSKent Overstreet closure_return_with_destructor(cl, btree_node_write_unlock);
316cafe5635SKent Overstreet }
317cafe5635SKent Overstreet
CLOSURE_CALLBACK(btree_node_write_done)318d4e3b928SKent Overstreet static CLOSURE_CALLBACK(btree_node_write_done)
319cafe5635SKent Overstreet {
320d4e3b928SKent Overstreet closure_type(b, struct btree, io);
321cafe5635SKent Overstreet
322491221f8SGuoqing Jiang bio_free_pages(b->bio);
323d4e3b928SKent Overstreet __btree_node_write_done(&cl->work);
324cafe5635SKent Overstreet }
325cafe5635SKent Overstreet
btree_node_write_endio(struct bio * bio)3264246a0b6SChristoph Hellwig static void btree_node_write_endio(struct bio *bio)
32757943511SKent Overstreet {
32857943511SKent Overstreet struct closure *cl = bio->bi_private;
329cb7a583eSKent Overstreet struct btree *b = container_of(cl, struct btree, io);
33057943511SKent Overstreet
3314e4cbee9SChristoph Hellwig if (bio->bi_status)
33257943511SKent Overstreet set_btree_node_io_error(b);
33357943511SKent Overstreet
3344e4cbee9SChristoph Hellwig bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree");
33557943511SKent Overstreet closure_put(cl);
33657943511SKent Overstreet }
33757943511SKent Overstreet
do_btree_node_write(struct btree * b)33857943511SKent Overstreet static void do_btree_node_write(struct btree *b)
339cafe5635SKent Overstreet {
340cb7a583eSKent Overstreet struct closure *cl = &b->io;
341ee811287SKent Overstreet struct bset *i = btree_bset_last(b);
342cafe5635SKent Overstreet BKEY_PADDED(key) k;
343cafe5635SKent Overstreet
344cafe5635SKent Overstreet i->version = BCACHE_BSET_VERSION;
345cafe5635SKent Overstreet i->csum = btree_csum_set(b, i);
346cafe5635SKent Overstreet
34757943511SKent Overstreet BUG_ON(b->bio);
34857943511SKent Overstreet b->bio = bch_bbio_alloc(b->c);
34957943511SKent Overstreet
35057943511SKent Overstreet b->bio->bi_end_io = btree_node_write_endio;
351faadf0c9SKent Overstreet b->bio->bi_private = cl;
3524e1ebae3SColy Li b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c->cache));
35370fd7614SChristoph Hellwig b->bio->bi_opf = REQ_OP_WRITE | REQ_META | REQ_FUA;
354169ef1cfSKent Overstreet bch_bio_map(b->bio, i);
355cafe5635SKent Overstreet
356e49c7c37SKent Overstreet /*
357e49c7c37SKent Overstreet * If we're appending to a leaf node, we don't technically need FUA -
358e49c7c37SKent Overstreet * this write just needs to be persisted before the next journal write,
359e49c7c37SKent Overstreet * which will be marked FLUSH|FUA.
360e49c7c37SKent Overstreet *
361e49c7c37SKent Overstreet * Similarly if we're writing a new btree root - the pointer is going to
362e49c7c37SKent Overstreet * be in the next journal entry.
363e49c7c37SKent Overstreet *
364e49c7c37SKent Overstreet * But if we're writing a new btree node (that isn't a root) or
365e49c7c37SKent Overstreet * appending to a non leaf btree node, we need either FUA or a flush
366e49c7c37SKent Overstreet * when we write the parent with the new pointer. FUA is cheaper than a
367e49c7c37SKent Overstreet * flush, and writes appending to leaf nodes aren't blocking anything so
368e49c7c37SKent Overstreet * just make all btree node writes FUA to keep things sane.
369e49c7c37SKent Overstreet */
370e49c7c37SKent Overstreet
371cafe5635SKent Overstreet bkey_copy(&k.key, &b->key);
372ee811287SKent Overstreet SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
373a85e968eSKent Overstreet bset_sector_offset(&b->keys, i));
374cafe5635SKent Overstreet
37525d8be77SMing Lei if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
376cafe5635SKent Overstreet struct bio_vec *bv;
377f936b06aSChristoph Hellwig void *addr = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
3786dc4f100SMing Lei struct bvec_iter_all iter_all;
379cafe5635SKent Overstreet
3802b070cfeSChristoph Hellwig bio_for_each_segment_all(bv, b->bio, iter_all) {
3812878feaeSColy Li memcpy(page_address(bv->bv_page), addr, PAGE_SIZE);
382f936b06aSChristoph Hellwig addr += PAGE_SIZE;
383f936b06aSChristoph Hellwig }
384cafe5635SKent Overstreet
385cafe5635SKent Overstreet bch_submit_bbio(b->bio, b->c, &k.key, 0);
386cafe5635SKent Overstreet
38757943511SKent Overstreet continue_at(cl, btree_node_write_done, NULL);
388cafe5635SKent Overstreet } else {
389b0d30981SColy Li /*
390b0d30981SColy Li * No problem for multipage bvec since the bio is
391b0d30981SColy Li * just allocated
392b0d30981SColy Li */
393cafe5635SKent Overstreet b->bio->bi_vcnt = 0;
394169ef1cfSKent Overstreet bch_bio_map(b->bio, i);
395cafe5635SKent Overstreet
396cafe5635SKent Overstreet bch_submit_bbio(b->bio, b->c, &k.key, 0);
397cafe5635SKent Overstreet
398cafe5635SKent Overstreet closure_sync(cl);
399cb7a583eSKent Overstreet continue_at_nobarrier(cl, __btree_node_write_done, NULL);
400cafe5635SKent Overstreet }
401cafe5635SKent Overstreet }
402cafe5635SKent Overstreet
__bch_btree_node_write(struct btree * b,struct closure * parent)4032a285686SKent Overstreet void __bch_btree_node_write(struct btree *b, struct closure *parent)
404cafe5635SKent Overstreet {
405ee811287SKent Overstreet struct bset *i = btree_bset_last(b);
406cafe5635SKent Overstreet
4072a285686SKent Overstreet lockdep_assert_held(&b->write_lock);
4082a285686SKent Overstreet
409c37511b8SKent Overstreet trace_bcache_btree_write(b);
410c37511b8SKent Overstreet
411cafe5635SKent Overstreet BUG_ON(current->bio_list);
41257943511SKent Overstreet BUG_ON(b->written >= btree_blocks(b));
41357943511SKent Overstreet BUG_ON(b->written && !i->keys);
414ee811287SKent Overstreet BUG_ON(btree_bset_first(b)->seq != i->seq);
415dc9d98d6SKent Overstreet bch_check_keys(&b->keys, "writing");
416cafe5635SKent Overstreet
417cafe5635SKent Overstreet cancel_delayed_work(&b->work);
418cafe5635SKent Overstreet
41957943511SKent Overstreet /* If caller isn't waiting for write, parent refcount is cache set */
420cb7a583eSKent Overstreet down(&b->io_mutex);
421cb7a583eSKent Overstreet closure_init(&b->io, parent ?: &b->c->cl);
42257943511SKent Overstreet
423cafe5635SKent Overstreet clear_bit(BTREE_NODE_dirty, &b->flags);
424cafe5635SKent Overstreet change_bit(BTREE_NODE_write_idx, &b->flags);
425cafe5635SKent Overstreet
42657943511SKent Overstreet do_btree_node_write(b);
427cafe5635SKent Overstreet
4284a784266SColy Li atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->cache->sb.block_size,
42911e9560eSChristoph Hellwig &b->c->cache->btree_sectors_written);
430cafe5635SKent Overstreet
4314e1ebae3SColy Li b->written += set_blocks(i, block_bytes(b->c->cache));
4322a285686SKent Overstreet }
433a85e968eSKent Overstreet
bch_btree_node_write(struct btree * b,struct closure * parent)4342a285686SKent Overstreet void bch_btree_node_write(struct btree *b, struct closure *parent)
4352a285686SKent Overstreet {
4366f10f7d1SColy Li unsigned int nsets = b->keys.nsets;
4372a285686SKent Overstreet
4382a285686SKent Overstreet lockdep_assert_held(&b->lock);
4392a285686SKent Overstreet
4402a285686SKent Overstreet __bch_btree_node_write(b, parent);
441cafe5635SKent Overstreet
44278b77bf8SKent Overstreet /*
44378b77bf8SKent Overstreet * do verify if there was more than one set initially (i.e. we did a
44478b77bf8SKent Overstreet * sort) and we sorted down to a single set:
44578b77bf8SKent Overstreet */
4462a285686SKent Overstreet if (nsets && !b->keys.nsets)
44778b77bf8SKent Overstreet bch_btree_verify(b);
44878b77bf8SKent Overstreet
4492a285686SKent Overstreet bch_btree_init_next(b);
450cafe5635SKent Overstreet }
451cafe5635SKent Overstreet
bch_btree_node_write_sync(struct btree * b)452f269af5aSKent Overstreet static void bch_btree_node_write_sync(struct btree *b)
453f269af5aSKent Overstreet {
454f269af5aSKent Overstreet struct closure cl;
455f269af5aSKent Overstreet
456f269af5aSKent Overstreet closure_init_stack(&cl);
4572a285686SKent Overstreet
4582a285686SKent Overstreet mutex_lock(&b->write_lock);
459f269af5aSKent Overstreet bch_btree_node_write(b, &cl);
4602a285686SKent Overstreet mutex_unlock(&b->write_lock);
4612a285686SKent Overstreet
462f269af5aSKent Overstreet closure_sync(&cl);
463f269af5aSKent Overstreet }
464f269af5aSKent Overstreet
btree_node_write_work(struct work_struct * w)46557943511SKent Overstreet static void btree_node_write_work(struct work_struct *w)
466cafe5635SKent Overstreet {
467cafe5635SKent Overstreet struct btree *b = container_of(to_delayed_work(w), struct btree, work);
468cafe5635SKent Overstreet
4692a285686SKent Overstreet mutex_lock(&b->write_lock);
470cafe5635SKent Overstreet if (btree_node_dirty(b))
4712a285686SKent Overstreet __bch_btree_node_write(b, NULL);
4722a285686SKent Overstreet mutex_unlock(&b->write_lock);
473cafe5635SKent Overstreet }
474cafe5635SKent Overstreet
bch_btree_leaf_dirty(struct btree * b,atomic_t * journal_ref)475c18536a7SKent Overstreet static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
476cafe5635SKent Overstreet {
477ee811287SKent Overstreet struct bset *i = btree_bset_last(b);
478cafe5635SKent Overstreet struct btree_write *w = btree_current_write(b);
479cafe5635SKent Overstreet
4802a285686SKent Overstreet lockdep_assert_held(&b->write_lock);
4812a285686SKent Overstreet
48257943511SKent Overstreet BUG_ON(!b->written);
48357943511SKent Overstreet BUG_ON(!i->keys);
484cafe5635SKent Overstreet
48557943511SKent Overstreet if (!btree_node_dirty(b))
4869f233ffeSKai Krakow queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
48757943511SKent Overstreet
488cafe5635SKent Overstreet set_btree_node_dirty(b);
489cafe5635SKent Overstreet
4905dccefd3SColy Li /*
4915dccefd3SColy Li * w->journal is always the oldest journal pin of all bkeys
4925dccefd3SColy Li * in the leaf node, to make sure the oldest jset seq won't
4935dccefd3SColy Li * be increased before this btree node is flushed.
4945dccefd3SColy Li */
495c18536a7SKent Overstreet if (journal_ref) {
496cafe5635SKent Overstreet if (w->journal &&
497c18536a7SKent Overstreet journal_pin_cmp(b->c, w->journal, journal_ref)) {
498cafe5635SKent Overstreet atomic_dec_bug(w->journal);
499cafe5635SKent Overstreet w->journal = NULL;
500cafe5635SKent Overstreet }
501cafe5635SKent Overstreet
502cafe5635SKent Overstreet if (!w->journal) {
503c18536a7SKent Overstreet w->journal = journal_ref;
504cafe5635SKent Overstreet atomic_inc(w->journal);
505cafe5635SKent Overstreet }
506cafe5635SKent Overstreet }
507cafe5635SKent Overstreet
508cafe5635SKent Overstreet /* Force write if set is too big */
50957943511SKent Overstreet if (set_bytes(i) > PAGE_SIZE - 48 &&
51057943511SKent Overstreet !current->bio_list)
51157943511SKent Overstreet bch_btree_node_write(b, NULL);
512cafe5635SKent Overstreet }
513cafe5635SKent Overstreet
514cafe5635SKent Overstreet /*
515cafe5635SKent Overstreet * Btree in memory cache - allocation/freeing
516cafe5635SKent Overstreet * mca -> memory cache
517cafe5635SKent Overstreet */
518cafe5635SKent Overstreet
5197e59c506SDongsheng Yang #define mca_reserve(c) (((!IS_ERR_OR_NULL(c->root) && c->root->level) \
520cafe5635SKent Overstreet ? c->root->level : 1) * 8 + 16)
521cafe5635SKent Overstreet #define mca_can_free(c) \
5220a63b66dSKent Overstreet max_t(int, 0, c->btree_cache_used - mca_reserve(c))
523cafe5635SKent Overstreet
mca_data_free(struct btree * b)524cafe5635SKent Overstreet static void mca_data_free(struct btree *b)
525cafe5635SKent Overstreet {
526cb7a583eSKent Overstreet BUG_ON(b->io_mutex.count != 1);
527cafe5635SKent Overstreet
528a85e968eSKent Overstreet bch_btree_keys_free(&b->keys);
529cafe5635SKent Overstreet
5300a63b66dSKent Overstreet b->c->btree_cache_used--;
531ee811287SKent Overstreet list_move(&b->list, &b->c->btree_cache_freed);
532cafe5635SKent Overstreet }
533cafe5635SKent Overstreet
mca_bucket_free(struct btree * b)534cafe5635SKent Overstreet static void mca_bucket_free(struct btree *b)
535cafe5635SKent Overstreet {
536cafe5635SKent Overstreet BUG_ON(btree_node_dirty(b));
537cafe5635SKent Overstreet
538cafe5635SKent Overstreet b->key.ptr[0] = 0;
539cafe5635SKent Overstreet hlist_del_init_rcu(&b->hash);
540cafe5635SKent Overstreet list_move(&b->list, &b->c->btree_cache_freeable);
541cafe5635SKent Overstreet }
542cafe5635SKent Overstreet
btree_order(struct bkey * k)5436f10f7d1SColy Li static unsigned int btree_order(struct bkey *k)
544cafe5635SKent Overstreet {
545cafe5635SKent Overstreet return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
546cafe5635SKent Overstreet }
547cafe5635SKent Overstreet
mca_data_alloc(struct btree * b,struct bkey * k,gfp_t gfp)548cafe5635SKent Overstreet static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
549cafe5635SKent Overstreet {
550a85e968eSKent Overstreet if (!bch_btree_keys_alloc(&b->keys,
5516f10f7d1SColy Li max_t(unsigned int,
552cafe5635SKent Overstreet ilog2(b->c->btree_pages),
553ee811287SKent Overstreet btree_order(k)),
554ee811287SKent Overstreet gfp)) {
5550a63b66dSKent Overstreet b->c->btree_cache_used++;
556ee811287SKent Overstreet list_move(&b->list, &b->c->btree_cache);
557ee811287SKent Overstreet } else {
558ee811287SKent Overstreet list_move(&b->list, &b->c->btree_cache_freed);
559ee811287SKent Overstreet }
560cafe5635SKent Overstreet }
561cafe5635SKent Overstreet
5624c8a4924SKent Overstreet #define cmp_int(l, r) ((l > r) - (l < r))
5634c8a4924SKent Overstreet
5644c8a4924SKent Overstreet #ifdef CONFIG_PROVE_LOCKING
btree_lock_cmp_fn(const struct lockdep_map * _a,const struct lockdep_map * _b)5654c8a4924SKent Overstreet static int btree_lock_cmp_fn(const struct lockdep_map *_a,
5664c8a4924SKent Overstreet const struct lockdep_map *_b)
5674c8a4924SKent Overstreet {
5684c8a4924SKent Overstreet const struct btree *a = container_of(_a, struct btree, lock.dep_map);
5694c8a4924SKent Overstreet const struct btree *b = container_of(_b, struct btree, lock.dep_map);
5704c8a4924SKent Overstreet
5714c8a4924SKent Overstreet return -cmp_int(a->level, b->level) ?: bkey_cmp(&a->key, &b->key);
5724c8a4924SKent Overstreet }
5734c8a4924SKent Overstreet
btree_lock_print_fn(const struct lockdep_map * map)5744c8a4924SKent Overstreet static void btree_lock_print_fn(const struct lockdep_map *map)
5754c8a4924SKent Overstreet {
5764c8a4924SKent Overstreet const struct btree *b = container_of(map, struct btree, lock.dep_map);
5774c8a4924SKent Overstreet
5784c8a4924SKent Overstreet printk(KERN_CONT " l=%u %llu:%llu", b->level,
5794c8a4924SKent Overstreet KEY_INODE(&b->key), KEY_OFFSET(&b->key));
5804c8a4924SKent Overstreet }
5814c8a4924SKent Overstreet #endif
5824c8a4924SKent Overstreet
mca_bucket_alloc(struct cache_set * c,struct bkey * k,gfp_t gfp)583cafe5635SKent Overstreet static struct btree *mca_bucket_alloc(struct cache_set *c,
584cafe5635SKent Overstreet struct bkey *k, gfp_t gfp)
585cafe5635SKent Overstreet {
586bd9026c8SColy Li /*
587bd9026c8SColy Li * kzalloc() is necessary here for initialization,
588bd9026c8SColy Li * see code comments in bch_btree_keys_init().
589bd9026c8SColy Li */
590cafe5635SKent Overstreet struct btree *b = kzalloc(sizeof(struct btree), gfp);
5911fae7cf0SColy Li
592cafe5635SKent Overstreet if (!b)
593cafe5635SKent Overstreet return NULL;
594cafe5635SKent Overstreet
595cafe5635SKent Overstreet init_rwsem(&b->lock);
5964c8a4924SKent Overstreet lock_set_cmp_fn(&b->lock, btree_lock_cmp_fn, btree_lock_print_fn);
5972a285686SKent Overstreet mutex_init(&b->write_lock);
5982a285686SKent Overstreet lockdep_set_novalidate_class(&b->write_lock);
599cafe5635SKent Overstreet INIT_LIST_HEAD(&b->list);
60057943511SKent Overstreet INIT_DELAYED_WORK(&b->work, btree_node_write_work);
601cafe5635SKent Overstreet b->c = c;
602cb7a583eSKent Overstreet sema_init(&b->io_mutex, 1);
603cafe5635SKent Overstreet
604cafe5635SKent Overstreet mca_data_alloc(b, k, gfp);
605cafe5635SKent Overstreet return b;
606cafe5635SKent Overstreet }
607cafe5635SKent Overstreet
mca_reap(struct btree * b,unsigned int min_order,bool flush)6086f10f7d1SColy Li static int mca_reap(struct btree *b, unsigned int min_order, bool flush)
609cafe5635SKent Overstreet {
610e8e1d468SKent Overstreet struct closure cl;
611e8e1d468SKent Overstreet
612e8e1d468SKent Overstreet closure_init_stack(&cl);
613cafe5635SKent Overstreet lockdep_assert_held(&b->c->bucket_lock);
614cafe5635SKent Overstreet
615cafe5635SKent Overstreet if (!down_write_trylock(&b->lock))
616cafe5635SKent Overstreet return -ENOMEM;
617cafe5635SKent Overstreet
618a85e968eSKent Overstreet BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data);
619e8e1d468SKent Overstreet
620a85e968eSKent Overstreet if (b->keys.page_order < min_order)
621cb7a583eSKent Overstreet goto out_unlock;
622cb7a583eSKent Overstreet
623cb7a583eSKent Overstreet if (!flush) {
624cb7a583eSKent Overstreet if (btree_node_dirty(b))
625cb7a583eSKent Overstreet goto out_unlock;
626cb7a583eSKent Overstreet
627cb7a583eSKent Overstreet if (down_trylock(&b->io_mutex))
628cb7a583eSKent Overstreet goto out_unlock;
629cb7a583eSKent Overstreet up(&b->io_mutex);
630cafe5635SKent Overstreet }
631cafe5635SKent Overstreet
63250a260e8SColy Li retry:
63341508bb7SColy Li /*
63441508bb7SColy Li * BTREE_NODE_dirty might be cleared in btree_flush_btree() by
63541508bb7SColy Li * __bch_btree_node_write(). To avoid an extra flush, acquire
63641508bb7SColy Li * b->write_lock before checking BTREE_NODE_dirty bit.
63741508bb7SColy Li */
6382a285686SKent Overstreet mutex_lock(&b->write_lock);
63950a260e8SColy Li /*
64050a260e8SColy Li * If this btree node is selected in btree_flush_write() by journal
64150a260e8SColy Li * code, delay and retry until the node is flushed by journal code
64250a260e8SColy Li * and BTREE_NODE_journal_flush bit cleared by btree_flush_write().
64350a260e8SColy Li */
64450a260e8SColy Li if (btree_node_journal_flush(b)) {
64546f5aa88SJoe Perches pr_debug("bnode %p is flushing by journal, retry\n", b);
64650a260e8SColy Li mutex_unlock(&b->write_lock);
64750a260e8SColy Li udelay(1);
64850a260e8SColy Li goto retry;
64950a260e8SColy Li }
65050a260e8SColy Li
651f269af5aSKent Overstreet if (btree_node_dirty(b))
6522a285686SKent Overstreet __bch_btree_node_write(b, &cl);
6532a285686SKent Overstreet mutex_unlock(&b->write_lock);
6542a285686SKent Overstreet
6552a285686SKent Overstreet closure_sync(&cl);
656cafe5635SKent Overstreet
657e8e1d468SKent Overstreet /* wait for any in flight btree write */
658cb7a583eSKent Overstreet down(&b->io_mutex);
659cb7a583eSKent Overstreet up(&b->io_mutex);
660e8e1d468SKent Overstreet
661cafe5635SKent Overstreet return 0;
662cb7a583eSKent Overstreet out_unlock:
663cb7a583eSKent Overstreet rw_unlock(true, b);
664cb7a583eSKent Overstreet return -ENOMEM;
665cafe5635SKent Overstreet }
666cafe5635SKent Overstreet
bch_mca_scan(struct shrinker * shrink,struct shrink_control * sc)6677dc19d5aSDave Chinner static unsigned long bch_mca_scan(struct shrinker *shrink,
6687dc19d5aSDave Chinner struct shrink_control *sc)
669cafe5635SKent Overstreet {
670a6a1eb62SQi Zheng struct cache_set *c = shrink->private_data;
671cafe5635SKent Overstreet struct btree *b, *t;
672cafe5635SKent Overstreet unsigned long i, nr = sc->nr_to_scan;
6737dc19d5aSDave Chinner unsigned long freed = 0;
674ca71df31STang Junhui unsigned int btree_cache_used;
675cafe5635SKent Overstreet
676cafe5635SKent Overstreet if (c->shrinker_disabled)
6777dc19d5aSDave Chinner return SHRINK_STOP;
678cafe5635SKent Overstreet
6790a63b66dSKent Overstreet if (c->btree_cache_alloc_lock)
6807dc19d5aSDave Chinner return SHRINK_STOP;
681cafe5635SKent Overstreet
682cafe5635SKent Overstreet /* Return -1 if we can't do anything right now */
683a698e08cSKent Overstreet if (sc->gfp_mask & __GFP_IO)
684cafe5635SKent Overstreet mutex_lock(&c->bucket_lock);
685cafe5635SKent Overstreet else if (!mutex_trylock(&c->bucket_lock))
686cafe5635SKent Overstreet return -1;
687cafe5635SKent Overstreet
68836c9ea98SKent Overstreet /*
68936c9ea98SKent Overstreet * It's _really_ critical that we don't free too many btree nodes - we
69036c9ea98SKent Overstreet * have to always leave ourselves a reserve. The reserve is how we
69136c9ea98SKent Overstreet * guarantee that allocating memory for a new btree node can always
69236c9ea98SKent Overstreet * succeed, so that inserting keys into the btree can always succeed and
69336c9ea98SKent Overstreet * IO can always make forward progress:
69436c9ea98SKent Overstreet */
695cafe5635SKent Overstreet nr /= c->btree_pages;
6969fcc34b1SColy Li if (nr == 0)
6979fcc34b1SColy Li nr = 1;
698cafe5635SKent Overstreet nr = min_t(unsigned long, nr, mca_can_free(c));
699cafe5635SKent Overstreet
700cafe5635SKent Overstreet i = 0;
701ca71df31STang Junhui btree_cache_used = c->btree_cache_used;
702d5c9c470SColy Li list_for_each_entry_safe_reverse(b, t, &c->btree_cache_freeable, list) {
703ca71df31STang Junhui if (nr <= 0)
704ca71df31STang Junhui goto out;
705cafe5635SKent Overstreet
706d5c9c470SColy Li if (!mca_reap(b, 0, false)) {
707cafe5635SKent Overstreet mca_data_free(b);
708cafe5635SKent Overstreet rw_unlock(true, b);
7097dc19d5aSDave Chinner freed++;
710cafe5635SKent Overstreet }
711ca71df31STang Junhui nr--;
712d5c9c470SColy Li i++;
713cafe5635SKent Overstreet }
714cafe5635SKent Overstreet
715e3de0446SColy Li list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) {
716e3de0446SColy Li if (nr <= 0 || i >= btree_cache_used)
717cafe5635SKent Overstreet goto out;
718cafe5635SKent Overstreet
719125d98edSColy Li if (!mca_reap(b, 0, false)) {
720cafe5635SKent Overstreet mca_bucket_free(b);
721cafe5635SKent Overstreet mca_data_free(b);
722cafe5635SKent Overstreet rw_unlock(true, b);
7237dc19d5aSDave Chinner freed++;
724125d98edSColy Li }
725e3de0446SColy Li
726e3de0446SColy Li nr--;
727e3de0446SColy Li i++;
728cafe5635SKent Overstreet }
729cafe5635SKent Overstreet out:
730cafe5635SKent Overstreet mutex_unlock(&c->bucket_lock);
731f3641c3aSTang Junhui return freed * c->btree_pages;
7327dc19d5aSDave Chinner }
7337dc19d5aSDave Chinner
bch_mca_count(struct shrinker * shrink,struct shrink_control * sc)7347dc19d5aSDave Chinner static unsigned long bch_mca_count(struct shrinker *shrink,
7357dc19d5aSDave Chinner struct shrink_control *sc)
7367dc19d5aSDave Chinner {
737a6a1eb62SQi Zheng struct cache_set *c = shrink->private_data;
7387dc19d5aSDave Chinner
7397dc19d5aSDave Chinner if (c->shrinker_disabled)
7407dc19d5aSDave Chinner return 0;
7417dc19d5aSDave Chinner
7420a63b66dSKent Overstreet if (c->btree_cache_alloc_lock)
7437dc19d5aSDave Chinner return 0;
7447dc19d5aSDave Chinner
7457dc19d5aSDave Chinner return mca_can_free(c) * c->btree_pages;
746cafe5635SKent Overstreet }
747cafe5635SKent Overstreet
bch_btree_cache_free(struct cache_set * c)748cafe5635SKent Overstreet void bch_btree_cache_free(struct cache_set *c)
749cafe5635SKent Overstreet {
750cafe5635SKent Overstreet struct btree *b;
751cafe5635SKent Overstreet struct closure cl;
7521fae7cf0SColy Li
753cafe5635SKent Overstreet closure_init_stack(&cl);
754cafe5635SKent Overstreet
755a6a1eb62SQi Zheng if (c->shrink)
756a6a1eb62SQi Zheng shrinker_free(c->shrink);
757cafe5635SKent Overstreet
758cafe5635SKent Overstreet mutex_lock(&c->bucket_lock);
759cafe5635SKent Overstreet
760cafe5635SKent Overstreet #ifdef CONFIG_BCACHE_DEBUG
761cafe5635SKent Overstreet if (c->verify_data)
762cafe5635SKent Overstreet list_move(&c->verify_data->list, &c->btree_cache);
76378b77bf8SKent Overstreet
7644a784266SColy Li free_pages((unsigned long) c->verify_ondisk, ilog2(meta_bucket_pages(&c->cache->sb)));
765cafe5635SKent Overstreet #endif
766cafe5635SKent Overstreet
767cafe5635SKent Overstreet list_splice(&c->btree_cache_freeable,
768cafe5635SKent Overstreet &c->btree_cache);
769cafe5635SKent Overstreet
770cafe5635SKent Overstreet while (!list_empty(&c->btree_cache)) {
771cafe5635SKent Overstreet b = list_first_entry(&c->btree_cache, struct btree, list);
772cafe5635SKent Overstreet
77341508bb7SColy Li /*
77441508bb7SColy Li * This function is called by cache_set_free(), no I/O
77541508bb7SColy Li * request on cache now, it is unnecessary to acquire
77641508bb7SColy Li * b->write_lock before clearing BTREE_NODE_dirty anymore.
77741508bb7SColy Li */
778e5ec5f47SColy Li if (btree_node_dirty(b)) {
779cafe5635SKent Overstreet btree_complete_write(b, btree_current_write(b));
780cafe5635SKent Overstreet clear_bit(BTREE_NODE_dirty, &b->flags);
781e5ec5f47SColy Li }
782cafe5635SKent Overstreet mca_data_free(b);
783cafe5635SKent Overstreet }
784cafe5635SKent Overstreet
785cafe5635SKent Overstreet while (!list_empty(&c->btree_cache_freed)) {
786cafe5635SKent Overstreet b = list_first_entry(&c->btree_cache_freed,
787cafe5635SKent Overstreet struct btree, list);
788cafe5635SKent Overstreet list_del(&b->list);
789cafe5635SKent Overstreet cancel_delayed_work_sync(&b->work);
790cafe5635SKent Overstreet kfree(b);
791cafe5635SKent Overstreet }
792cafe5635SKent Overstreet
793cafe5635SKent Overstreet mutex_unlock(&c->bucket_lock);
794cafe5635SKent Overstreet }
795cafe5635SKent Overstreet
bch_btree_cache_alloc(struct cache_set * c)796cafe5635SKent Overstreet int bch_btree_cache_alloc(struct cache_set *c)
797cafe5635SKent Overstreet {
7986f10f7d1SColy Li unsigned int i;
799cafe5635SKent Overstreet
800cafe5635SKent Overstreet for (i = 0; i < mca_reserve(c); i++)
80172a44517SKent Overstreet if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
80272a44517SKent Overstreet return -ENOMEM;
803cafe5635SKent Overstreet
804cafe5635SKent Overstreet list_splice_init(&c->btree_cache,
805cafe5635SKent Overstreet &c->btree_cache_freeable);
806cafe5635SKent Overstreet
807cafe5635SKent Overstreet #ifdef CONFIG_BCACHE_DEBUG
808cafe5635SKent Overstreet mutex_init(&c->verify_lock);
809cafe5635SKent Overstreet
81078b77bf8SKent Overstreet c->verify_ondisk = (void *)
8114a784266SColy Li __get_free_pages(GFP_KERNEL|__GFP_COMP,
8124a784266SColy Li ilog2(meta_bucket_pages(&c->cache->sb)));
813bf6af170SColy Li if (!c->verify_ondisk) {
814bf6af170SColy Li /*
815bf6af170SColy Li * Don't worry about the mca_rereserve buckets
816bf6af170SColy Li * allocated in previous for-loop, they will be
817bf6af170SColy Li * handled properly in bch_cache_set_unregister().
818bf6af170SColy Li */
819bf6af170SColy Li return -ENOMEM;
820bf6af170SColy Li }
82178b77bf8SKent Overstreet
822cafe5635SKent Overstreet c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
823cafe5635SKent Overstreet
824cafe5635SKent Overstreet if (c->verify_data &&
825a85e968eSKent Overstreet c->verify_data->keys.set->data)
826cafe5635SKent Overstreet list_del_init(&c->verify_data->list);
827cafe5635SKent Overstreet else
828cafe5635SKent Overstreet c->verify_data = NULL;
829cafe5635SKent Overstreet #endif
830cafe5635SKent Overstreet
831a6a1eb62SQi Zheng c->shrink = shrinker_alloc(0, "md-bcache:%pU", c->set_uuid);
832a6a1eb62SQi Zheng if (!c->shrink) {
833a6a1eb62SQi Zheng pr_warn("bcache: %s: could not allocate shrinker\n", __func__);
834a6a1eb62SQi Zheng return 0;
835a6a1eb62SQi Zheng }
8366c4ca1e3SMichael Lyle
837a6a1eb62SQi Zheng c->shrink->count_objects = bch_mca_count;
838a6a1eb62SQi Zheng c->shrink->scan_objects = bch_mca_scan;
839a6a1eb62SQi Zheng c->shrink->seeks = 4;
840a6a1eb62SQi Zheng c->shrink->batch = c->btree_pages * 2;
841a6a1eb62SQi Zheng c->shrink->private_data = c;
842a6a1eb62SQi Zheng
843a6a1eb62SQi Zheng shrinker_register(c->shrink);
844cafe5635SKent Overstreet
845cafe5635SKent Overstreet return 0;
846cafe5635SKent Overstreet }
847cafe5635SKent Overstreet
848cafe5635SKent Overstreet /* Btree in memory cache - hash table */
849cafe5635SKent Overstreet
mca_hash(struct cache_set * c,struct bkey * k)850cafe5635SKent Overstreet static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k)
851cafe5635SKent Overstreet {
852cafe5635SKent Overstreet return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)];
853cafe5635SKent Overstreet }
854cafe5635SKent Overstreet
mca_find(struct cache_set * c,struct bkey * k)855cafe5635SKent Overstreet static struct btree *mca_find(struct cache_set *c, struct bkey *k)
856cafe5635SKent Overstreet {
857cafe5635SKent Overstreet struct btree *b;
858cafe5635SKent Overstreet
859cafe5635SKent Overstreet rcu_read_lock();
860cafe5635SKent Overstreet hlist_for_each_entry_rcu(b, mca_hash(c, k), hash)
861cafe5635SKent Overstreet if (PTR_HASH(c, &b->key) == PTR_HASH(c, k))
862cafe5635SKent Overstreet goto out;
863cafe5635SKent Overstreet b = NULL;
864cafe5635SKent Overstreet out:
865cafe5635SKent Overstreet rcu_read_unlock();
866cafe5635SKent Overstreet return b;
867cafe5635SKent Overstreet }
868cafe5635SKent Overstreet
mca_cannibalize_lock(struct cache_set * c,struct btree_op * op)8690a63b66dSKent Overstreet static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
8700a63b66dSKent Overstreet {
87134cf78bfSGuoju Fang spin_lock(&c->btree_cannibalize_lock);
87234cf78bfSGuoju Fang if (likely(c->btree_cache_alloc_lock == NULL)) {
87334cf78bfSGuoju Fang c->btree_cache_alloc_lock = current;
87434cf78bfSGuoju Fang } else if (c->btree_cache_alloc_lock != current) {
8750a63b66dSKent Overstreet if (op)
8760a63b66dSKent Overstreet prepare_to_wait(&c->btree_cache_wait, &op->wait,
8770a63b66dSKent Overstreet TASK_UNINTERRUPTIBLE);
87834cf78bfSGuoju Fang spin_unlock(&c->btree_cannibalize_lock);
8790a63b66dSKent Overstreet return -EINTR;
8800a63b66dSKent Overstreet }
88134cf78bfSGuoju Fang spin_unlock(&c->btree_cannibalize_lock);
8820a63b66dSKent Overstreet
8830a63b66dSKent Overstreet return 0;
8840a63b66dSKent Overstreet }
8850a63b66dSKent Overstreet
mca_cannibalize(struct cache_set * c,struct btree_op * op,struct bkey * k)8860a63b66dSKent Overstreet static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
8870a63b66dSKent Overstreet struct bkey *k)
888cafe5635SKent Overstreet {
889e8e1d468SKent Overstreet struct btree *b;
890cafe5635SKent Overstreet
891c37511b8SKent Overstreet trace_bcache_btree_cache_cannibalize(c);
892c37511b8SKent Overstreet
8930a63b66dSKent Overstreet if (mca_cannibalize_lock(c, op))
8940a63b66dSKent Overstreet return ERR_PTR(-EINTR);
895cafe5635SKent Overstreet
896e8e1d468SKent Overstreet list_for_each_entry_reverse(b, &c->btree_cache, list)
897e8e1d468SKent Overstreet if (!mca_reap(b, btree_order(k), false))
898e8e1d468SKent Overstreet return b;
899cafe5635SKent Overstreet
900e8e1d468SKent Overstreet list_for_each_entry_reverse(b, &c->btree_cache, list)
901e8e1d468SKent Overstreet if (!mca_reap(b, btree_order(k), true))
902e8e1d468SKent Overstreet return b;
903e8e1d468SKent Overstreet
9040a63b66dSKent Overstreet WARN(1, "btree cache cannibalize failed\n");
905e8e1d468SKent Overstreet return ERR_PTR(-ENOMEM);
906cafe5635SKent Overstreet }
907cafe5635SKent Overstreet
908cafe5635SKent Overstreet /*
909cafe5635SKent Overstreet * We can only have one thread cannibalizing other cached btree nodes at a time,
910cafe5635SKent Overstreet * or we'll deadlock. We use an open coded mutex to ensure that, which a
911cafe5635SKent Overstreet * cannibalize_bucket() will take. This means every time we unlock the root of
912cafe5635SKent Overstreet * the btree, we need to release this lock if we have it held.
913cafe5635SKent Overstreet */
bch_cannibalize_unlock(struct cache_set * c)914f0854489SMingzhe Zou void bch_cannibalize_unlock(struct cache_set *c)
915cafe5635SKent Overstreet {
91634cf78bfSGuoju Fang spin_lock(&c->btree_cannibalize_lock);
9170a63b66dSKent Overstreet if (c->btree_cache_alloc_lock == current) {
9180a63b66dSKent Overstreet c->btree_cache_alloc_lock = NULL;
9190a63b66dSKent Overstreet wake_up(&c->btree_cache_wait);
920cafe5635SKent Overstreet }
92134cf78bfSGuoju Fang spin_unlock(&c->btree_cannibalize_lock);
922cafe5635SKent Overstreet }
923cafe5635SKent Overstreet
mca_alloc(struct cache_set * c,struct btree_op * op,struct bkey * k,int level)9240a63b66dSKent Overstreet static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
9250a63b66dSKent Overstreet struct bkey *k, int level)
926cafe5635SKent Overstreet {
927cafe5635SKent Overstreet struct btree *b;
928cafe5635SKent Overstreet
929e8e1d468SKent Overstreet BUG_ON(current->bio_list);
930e8e1d468SKent Overstreet
931cafe5635SKent Overstreet lockdep_assert_held(&c->bucket_lock);
932cafe5635SKent Overstreet
933cafe5635SKent Overstreet if (mca_find(c, k))
934cafe5635SKent Overstreet return NULL;
935cafe5635SKent Overstreet
936cafe5635SKent Overstreet /* btree_free() doesn't free memory; it sticks the node on the end of
937cafe5635SKent Overstreet * the list. Check if there's any freed nodes there:
938cafe5635SKent Overstreet */
939cafe5635SKent Overstreet list_for_each_entry(b, &c->btree_cache_freeable, list)
940e8e1d468SKent Overstreet if (!mca_reap(b, btree_order(k), false))
941cafe5635SKent Overstreet goto out;
942cafe5635SKent Overstreet
943cafe5635SKent Overstreet /* We never free struct btree itself, just the memory that holds the on
944cafe5635SKent Overstreet * disk node. Check the freed list before allocating a new one:
945cafe5635SKent Overstreet */
946cafe5635SKent Overstreet list_for_each_entry(b, &c->btree_cache_freed, list)
947e8e1d468SKent Overstreet if (!mca_reap(b, 0, false)) {
948cafe5635SKent Overstreet mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
949a85e968eSKent Overstreet if (!b->keys.set[0].data)
950cafe5635SKent Overstreet goto err;
951cafe5635SKent Overstreet else
952cafe5635SKent Overstreet goto out;
953cafe5635SKent Overstreet }
954cafe5635SKent Overstreet
955cafe5635SKent Overstreet b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO);
956cafe5635SKent Overstreet if (!b)
957cafe5635SKent Overstreet goto err;
958cafe5635SKent Overstreet
959cafe5635SKent Overstreet BUG_ON(!down_write_trylock(&b->lock));
960a85e968eSKent Overstreet if (!b->keys.set->data)
961cafe5635SKent Overstreet goto err;
962cafe5635SKent Overstreet out:
963cb7a583eSKent Overstreet BUG_ON(b->io_mutex.count != 1);
964cafe5635SKent Overstreet
965cafe5635SKent Overstreet bkey_copy(&b->key, k);
966cafe5635SKent Overstreet list_move(&b->list, &c->btree_cache);
967cafe5635SKent Overstreet hlist_del_init_rcu(&b->hash);
968cafe5635SKent Overstreet hlist_add_head_rcu(&b->hash, mca_hash(c, k));
969cafe5635SKent Overstreet
970cafe5635SKent Overstreet lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
971d6fd3b11SKent Overstreet b->parent = (void *) ~0UL;
972a85e968eSKent Overstreet b->flags = 0;
973a85e968eSKent Overstreet b->written = 0;
974a85e968eSKent Overstreet b->level = level;
975cafe5635SKent Overstreet
97665d45231SKent Overstreet if (!b->level)
977a85e968eSKent Overstreet bch_btree_keys_init(&b->keys, &bch_extent_keys_ops,
978a85e968eSKent Overstreet &b->c->expensive_debug_checks);
97965d45231SKent Overstreet else
980a85e968eSKent Overstreet bch_btree_keys_init(&b->keys, &bch_btree_keys_ops,
981a85e968eSKent Overstreet &b->c->expensive_debug_checks);
982cafe5635SKent Overstreet
983cafe5635SKent Overstreet return b;
984cafe5635SKent Overstreet err:
985cafe5635SKent Overstreet if (b)
986cafe5635SKent Overstreet rw_unlock(true, b);
987cafe5635SKent Overstreet
9880a63b66dSKent Overstreet b = mca_cannibalize(c, op, k);
989cafe5635SKent Overstreet if (!IS_ERR(b))
990cafe5635SKent Overstreet goto out;
991cafe5635SKent Overstreet
992cafe5635SKent Overstreet return b;
993cafe5635SKent Overstreet }
994cafe5635SKent Overstreet
99547344e33SBart Van Assche /*
996cafe5635SKent Overstreet * bch_btree_node_get - find a btree node in the cache and lock it, reading it
997cafe5635SKent Overstreet * in from disk if necessary.
998cafe5635SKent Overstreet *
999ed00aabdSChristoph Hellwig * If IO is necessary and running under submit_bio_noacct, returns -EAGAIN.
1000cafe5635SKent Overstreet *
1001cafe5635SKent Overstreet * The btree node will have either a read or a write lock held, depending on
1002cafe5635SKent Overstreet * level and op->lock.
100331f5b956SColy Li *
100431f5b956SColy Li * Note: Only error code or btree pointer will be returned, it is unncessary
100531f5b956SColy Li * for callers to check NULL pointer.
1006cafe5635SKent Overstreet */
bch_btree_node_get(struct cache_set * c,struct btree_op * op,struct bkey * k,int level,bool write,struct btree * parent)10070a63b66dSKent Overstreet struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
10082452cc89SSlava Pestov struct bkey *k, int level, bool write,
10092452cc89SSlava Pestov struct btree *parent)
1010cafe5635SKent Overstreet {
1011cafe5635SKent Overstreet int i = 0;
1012cafe5635SKent Overstreet struct btree *b;
1013cafe5635SKent Overstreet
1014cafe5635SKent Overstreet BUG_ON(level < 0);
1015cafe5635SKent Overstreet retry:
1016cafe5635SKent Overstreet b = mca_find(c, k);
1017cafe5635SKent Overstreet
1018cafe5635SKent Overstreet if (!b) {
101957943511SKent Overstreet if (current->bio_list)
102057943511SKent Overstreet return ERR_PTR(-EAGAIN);
102157943511SKent Overstreet
1022cafe5635SKent Overstreet mutex_lock(&c->bucket_lock);
10230a63b66dSKent Overstreet b = mca_alloc(c, op, k, level);
1024cafe5635SKent Overstreet mutex_unlock(&c->bucket_lock);
1025cafe5635SKent Overstreet
1026cafe5635SKent Overstreet if (!b)
1027cafe5635SKent Overstreet goto retry;
1028cafe5635SKent Overstreet if (IS_ERR(b))
1029cafe5635SKent Overstreet return b;
1030cafe5635SKent Overstreet
103157943511SKent Overstreet bch_btree_node_read(b);
1032cafe5635SKent Overstreet
1033cafe5635SKent Overstreet if (!write)
1034cafe5635SKent Overstreet downgrade_write(&b->lock);
1035cafe5635SKent Overstreet } else {
1036cafe5635SKent Overstreet rw_lock(write, b, level);
1037cafe5635SKent Overstreet if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) {
1038cafe5635SKent Overstreet rw_unlock(write, b);
1039cafe5635SKent Overstreet goto retry;
1040cafe5635SKent Overstreet }
1041cafe5635SKent Overstreet BUG_ON(b->level != level);
1042cafe5635SKent Overstreet }
1043cafe5635SKent Overstreet
1044c2e8dcf7SColy Li if (btree_node_io_error(b)) {
1045c2e8dcf7SColy Li rw_unlock(write, b);
1046c2e8dcf7SColy Li return ERR_PTR(-EIO);
1047c2e8dcf7SColy Li }
1048c2e8dcf7SColy Li
1049c2e8dcf7SColy Li BUG_ON(!b->written);
1050c2e8dcf7SColy Li
10512452cc89SSlava Pestov b->parent = parent;
1052cafe5635SKent Overstreet
1053a85e968eSKent Overstreet for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
1054a85e968eSKent Overstreet prefetch(b->keys.set[i].tree);
1055a85e968eSKent Overstreet prefetch(b->keys.set[i].data);
1056cafe5635SKent Overstreet }
1057cafe5635SKent Overstreet
1058a85e968eSKent Overstreet for (; i <= b->keys.nsets; i++)
1059a85e968eSKent Overstreet prefetch(b->keys.set[i].data);
1060cafe5635SKent Overstreet
1061cafe5635SKent Overstreet return b;
1062cafe5635SKent Overstreet }
1063cafe5635SKent Overstreet
btree_node_prefetch(struct btree * parent,struct bkey * k)10642452cc89SSlava Pestov static void btree_node_prefetch(struct btree *parent, struct bkey *k)
1065cafe5635SKent Overstreet {
1066cafe5635SKent Overstreet struct btree *b;
1067cafe5635SKent Overstreet
10682452cc89SSlava Pestov mutex_lock(&parent->c->bucket_lock);
10692452cc89SSlava Pestov b = mca_alloc(parent->c, NULL, k, parent->level - 1);
10702452cc89SSlava Pestov mutex_unlock(&parent->c->bucket_lock);
1071cafe5635SKent Overstreet
1072cafe5635SKent Overstreet if (!IS_ERR_OR_NULL(b)) {
10732452cc89SSlava Pestov b->parent = parent;
107457943511SKent Overstreet bch_btree_node_read(b);
1075cafe5635SKent Overstreet rw_unlock(true, b);
1076cafe5635SKent Overstreet }
1077cafe5635SKent Overstreet }
1078cafe5635SKent Overstreet
1079cafe5635SKent Overstreet /* Btree alloc */
1080cafe5635SKent Overstreet
btree_node_free(struct btree * b)1081e8e1d468SKent Overstreet static void btree_node_free(struct btree *b)
1082cafe5635SKent Overstreet {
1083c37511b8SKent Overstreet trace_bcache_btree_node_free(b);
1084c37511b8SKent Overstreet
1085cafe5635SKent Overstreet BUG_ON(b == b->c->root);
1086cafe5635SKent Overstreet
108750a260e8SColy Li retry:
10882a285686SKent Overstreet mutex_lock(&b->write_lock);
108950a260e8SColy Li /*
109050a260e8SColy Li * If the btree node is selected and flushing in btree_flush_write(),
109150a260e8SColy Li * delay and retry until the BTREE_NODE_journal_flush bit cleared,
109250a260e8SColy Li * then it is safe to free the btree node here. Otherwise this btree
109350a260e8SColy Li * node will be in race condition.
109450a260e8SColy Li */
109550a260e8SColy Li if (btree_node_journal_flush(b)) {
109650a260e8SColy Li mutex_unlock(&b->write_lock);
109746f5aa88SJoe Perches pr_debug("bnode %p journal_flush set, retry\n", b);
109850a260e8SColy Li udelay(1);
109950a260e8SColy Li goto retry;
110050a260e8SColy Li }
11012a285686SKent Overstreet
1102e5ec5f47SColy Li if (btree_node_dirty(b)) {
1103cafe5635SKent Overstreet btree_complete_write(b, btree_current_write(b));
1104cafe5635SKent Overstreet clear_bit(BTREE_NODE_dirty, &b->flags);
1105e5ec5f47SColy Li }
1106cafe5635SKent Overstreet
11072a285686SKent Overstreet mutex_unlock(&b->write_lock);
11082a285686SKent Overstreet
1109cafe5635SKent Overstreet cancel_delayed_work(&b->work);
1110cafe5635SKent Overstreet
1111cafe5635SKent Overstreet mutex_lock(&b->c->bucket_lock);
1112cafe5635SKent Overstreet bch_bucket_free(b->c, &b->key);
1113cafe5635SKent Overstreet mca_bucket_free(b);
1114cafe5635SKent Overstreet mutex_unlock(&b->c->bucket_lock);
1115cafe5635SKent Overstreet }
1116cafe5635SKent Overstreet
111731f5b956SColy Li /*
111831f5b956SColy Li * Only error code or btree pointer will be returned, it is unncessary for
111931f5b956SColy Li * callers to check NULL pointer.
112031f5b956SColy Li */
__bch_btree_node_alloc(struct cache_set * c,struct btree_op * op,int level,bool wait,struct btree * parent)1121c5aa4a31SSlava Pestov struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
11222452cc89SSlava Pestov int level, bool wait,
11232452cc89SSlava Pestov struct btree *parent)
1124cafe5635SKent Overstreet {
1125cafe5635SKent Overstreet BKEY_PADDED(key) k;
112680fca8a1SZheng Wang struct btree *b;
1127cafe5635SKent Overstreet
1128cafe5635SKent Overstreet mutex_lock(&c->bucket_lock);
1129cafe5635SKent Overstreet retry:
113080fca8a1SZheng Wang /* return ERR_PTR(-EAGAIN) when it fails */
113180fca8a1SZheng Wang b = ERR_PTR(-EAGAIN);
113217e4aed8SColy Li if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, wait))
1133cafe5635SKent Overstreet goto err;
1134cafe5635SKent Overstreet
11353a3b6a4eSKent Overstreet bkey_put(c, &k.key);
1136cafe5635SKent Overstreet SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
1137cafe5635SKent Overstreet
11380a63b66dSKent Overstreet b = mca_alloc(c, op, &k.key, level);
1139cafe5635SKent Overstreet if (IS_ERR(b))
1140cafe5635SKent Overstreet goto err_free;
1141cafe5635SKent Overstreet
1142cafe5635SKent Overstreet if (!b) {
1143b1a67b0fSKent Overstreet cache_bug(c,
1144b1a67b0fSKent Overstreet "Tried to allocate bucket that was in btree cache");
1145cafe5635SKent Overstreet goto retry;
1146cafe5635SKent Overstreet }
1147cafe5635SKent Overstreet
11482452cc89SSlava Pestov b->parent = parent;
11494a784266SColy Li bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->cache->sb));
1150cafe5635SKent Overstreet
1151cafe5635SKent Overstreet mutex_unlock(&c->bucket_lock);
1152c37511b8SKent Overstreet
1153c37511b8SKent Overstreet trace_bcache_btree_node_alloc(b);
1154cafe5635SKent Overstreet return b;
1155cafe5635SKent Overstreet err_free:
1156cafe5635SKent Overstreet bch_bucket_free(c, &k.key);
1157cafe5635SKent Overstreet err:
1158cafe5635SKent Overstreet mutex_unlock(&c->bucket_lock);
1159c37511b8SKent Overstreet
1160913dc33fSSlava Pestov trace_bcache_btree_node_alloc_fail(c);
1161cafe5635SKent Overstreet return b;
1162cafe5635SKent Overstreet }
1163cafe5635SKent Overstreet
bch_btree_node_alloc(struct cache_set * c,struct btree_op * op,int level,struct btree * parent)1164c5aa4a31SSlava Pestov static struct btree *bch_btree_node_alloc(struct cache_set *c,
11652452cc89SSlava Pestov struct btree_op *op, int level,
11662452cc89SSlava Pestov struct btree *parent)
1167c5aa4a31SSlava Pestov {
11682452cc89SSlava Pestov return __bch_btree_node_alloc(c, op, level, op != NULL, parent);
1169c5aa4a31SSlava Pestov }
1170c5aa4a31SSlava Pestov
btree_node_alloc_replacement(struct btree * b,struct btree_op * op)11710a63b66dSKent Overstreet static struct btree *btree_node_alloc_replacement(struct btree *b,
11720a63b66dSKent Overstreet struct btree_op *op)
1173cafe5635SKent Overstreet {
11742452cc89SSlava Pestov struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
11751fae7cf0SColy Li
1176028ddcacSZheng Wang if (!IS_ERR(n)) {
11772a285686SKent Overstreet mutex_lock(&n->write_lock);
117889ebb4a2SKent Overstreet bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
117967539e85SKent Overstreet bkey_copy_key(&n->key, &b->key);
11802a285686SKent Overstreet mutex_unlock(&n->write_lock);
118167539e85SKent Overstreet }
1182cafe5635SKent Overstreet
1183cafe5635SKent Overstreet return n;
1184cafe5635SKent Overstreet }
1185cafe5635SKent Overstreet
make_btree_freeing_key(struct btree * b,struct bkey * k)11868835c123SKent Overstreet static void make_btree_freeing_key(struct btree *b, struct bkey *k)
11878835c123SKent Overstreet {
11886f10f7d1SColy Li unsigned int i;
11898835c123SKent Overstreet
119005335cffSKent Overstreet mutex_lock(&b->c->bucket_lock);
119105335cffSKent Overstreet
119205335cffSKent Overstreet atomic_inc(&b->c->prio_blocked);
119305335cffSKent Overstreet
11948835c123SKent Overstreet bkey_copy(k, &b->key);
11958835c123SKent Overstreet bkey_copy_key(k, &ZERO_KEY);
11968835c123SKent Overstreet
119705335cffSKent Overstreet for (i = 0; i < KEY_PTRS(k); i++)
119805335cffSKent Overstreet SET_PTR_GEN(k, i,
119911e9560eSChristoph Hellwig bch_inc_gen(b->c->cache,
120005335cffSKent Overstreet PTR_BUCKET(b->c, &b->key, i)));
12018835c123SKent Overstreet
120205335cffSKent Overstreet mutex_unlock(&b->c->bucket_lock);
12038835c123SKent Overstreet }
12048835c123SKent Overstreet
btree_check_reserve(struct btree * b,struct btree_op * op)120578365411SKent Overstreet static int btree_check_reserve(struct btree *b, struct btree_op *op)
120678365411SKent Overstreet {
120778365411SKent Overstreet struct cache_set *c = b->c;
120808fdb2cdSColy Li struct cache *ca = c->cache;
120908fdb2cdSColy Li unsigned int reserve = (c->root->level - b->level) * 2 + 1;
121078365411SKent Overstreet
121178365411SKent Overstreet mutex_lock(&c->bucket_lock);
121278365411SKent Overstreet
121378365411SKent Overstreet if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
121478365411SKent Overstreet if (op)
12150a63b66dSKent Overstreet prepare_to_wait(&c->btree_cache_wait, &op->wait,
121678365411SKent Overstreet TASK_UNINTERRUPTIBLE);
12170a63b66dSKent Overstreet mutex_unlock(&c->bucket_lock);
12180a63b66dSKent Overstreet return -EINTR;
121978365411SKent Overstreet }
122078365411SKent Overstreet
122178365411SKent Overstreet mutex_unlock(&c->bucket_lock);
12220a63b66dSKent Overstreet
12230a63b66dSKent Overstreet return mca_cannibalize_lock(b->c, op);
122478365411SKent Overstreet }
122578365411SKent Overstreet
1226cafe5635SKent Overstreet /* Garbage collection */
1227cafe5635SKent Overstreet
__bch_btree_mark_key(struct cache_set * c,int level,struct bkey * k)1228487dded8SKent Overstreet static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
1229487dded8SKent Overstreet struct bkey *k)
1230cafe5635SKent Overstreet {
1231cafe5635SKent Overstreet uint8_t stale = 0;
12326f10f7d1SColy Li unsigned int i;
1233cafe5635SKent Overstreet struct bucket *g;
1234cafe5635SKent Overstreet
1235cafe5635SKent Overstreet /*
1236cafe5635SKent Overstreet * ptr_invalid() can't return true for the keys that mark btree nodes as
1237cafe5635SKent Overstreet * freed, but since ptr_bad() returns true we'll never actually use them
1238cafe5635SKent Overstreet * for anything and thus we don't want mark their pointers here
1239cafe5635SKent Overstreet */
1240cafe5635SKent Overstreet if (!bkey_cmp(k, &ZERO_KEY))
1241cafe5635SKent Overstreet return stale;
1242cafe5635SKent Overstreet
1243cafe5635SKent Overstreet for (i = 0; i < KEY_PTRS(k); i++) {
1244cafe5635SKent Overstreet if (!ptr_available(c, k, i))
1245cafe5635SKent Overstreet continue;
1246cafe5635SKent Overstreet
1247cafe5635SKent Overstreet g = PTR_BUCKET(c, k, i);
1248cafe5635SKent Overstreet
12493a2fd9d5SKent Overstreet if (gen_after(g->last_gc, PTR_GEN(k, i)))
12503a2fd9d5SKent Overstreet g->last_gc = PTR_GEN(k, i);
1251cafe5635SKent Overstreet
1252cafe5635SKent Overstreet if (ptr_stale(c, k, i)) {
1253cafe5635SKent Overstreet stale = max(stale, ptr_stale(c, k, i));
1254cafe5635SKent Overstreet continue;
1255cafe5635SKent Overstreet }
1256cafe5635SKent Overstreet
1257cafe5635SKent Overstreet cache_bug_on(GC_MARK(g) &&
1258cafe5635SKent Overstreet (GC_MARK(g) == GC_MARK_METADATA) != (level != 0),
1259cafe5635SKent Overstreet c, "inconsistent ptrs: mark = %llu, level = %i",
1260cafe5635SKent Overstreet GC_MARK(g), level);
1261cafe5635SKent Overstreet
1262cafe5635SKent Overstreet if (level)
1263cafe5635SKent Overstreet SET_GC_MARK(g, GC_MARK_METADATA);
1264cafe5635SKent Overstreet else if (KEY_DIRTY(k))
1265cafe5635SKent Overstreet SET_GC_MARK(g, GC_MARK_DIRTY);
12664fe6a816SKent Overstreet else if (!GC_MARK(g))
12674fe6a816SKent Overstreet SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
1268cafe5635SKent Overstreet
1269cafe5635SKent Overstreet /* guard against overflow */
12706f10f7d1SColy Li SET_GC_SECTORS_USED(g, min_t(unsigned int,
1271cafe5635SKent Overstreet GC_SECTORS_USED(g) + KEY_SIZE(k),
127294717447SDarrick J. Wong MAX_GC_SECTORS_USED));
1273cafe5635SKent Overstreet
1274cafe5635SKent Overstreet BUG_ON(!GC_SECTORS_USED(g));
1275cafe5635SKent Overstreet }
1276cafe5635SKent Overstreet
1277cafe5635SKent Overstreet return stale;
1278cafe5635SKent Overstreet }
1279cafe5635SKent Overstreet
1280cafe5635SKent Overstreet #define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k)
1281cafe5635SKent Overstreet
bch_initial_mark_key(struct cache_set * c,int level,struct bkey * k)1282487dded8SKent Overstreet void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
1283487dded8SKent Overstreet {
12846f10f7d1SColy Li unsigned int i;
1285487dded8SKent Overstreet
1286487dded8SKent Overstreet for (i = 0; i < KEY_PTRS(k); i++)
1287487dded8SKent Overstreet if (ptr_available(c, k, i) &&
1288487dded8SKent Overstreet !ptr_stale(c, k, i)) {
1289487dded8SKent Overstreet struct bucket *b = PTR_BUCKET(c, k, i);
1290487dded8SKent Overstreet
1291487dded8SKent Overstreet b->gen = PTR_GEN(k, i);
1292487dded8SKent Overstreet
1293487dded8SKent Overstreet if (level && bkey_cmp(k, &ZERO_KEY))
1294487dded8SKent Overstreet b->prio = BTREE_PRIO;
1295487dded8SKent Overstreet else if (!level && b->prio == BTREE_PRIO)
1296487dded8SKent Overstreet b->prio = INITIAL_PRIO;
1297487dded8SKent Overstreet }
1298487dded8SKent Overstreet
1299487dded8SKent Overstreet __bch_btree_mark_key(c, level, k);
1300487dded8SKent Overstreet }
1301487dded8SKent Overstreet
bch_update_bucket_in_use(struct cache_set * c,struct gc_stat * stats)1302d44c2f9eSTang Junhui void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats)
1303d44c2f9eSTang Junhui {
1304d44c2f9eSTang Junhui stats->in_use = (c->nbuckets - c->avail_nbuckets) * 100 / c->nbuckets;
1305d44c2f9eSTang Junhui }
1306d44c2f9eSTang Junhui
btree_gc_mark_node(struct btree * b,struct gc_stat * gc)1307a1f0358bSKent Overstreet static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
1308cafe5635SKent Overstreet {
1309cafe5635SKent Overstreet uint8_t stale = 0;
13106f10f7d1SColy Li unsigned int keys = 0, good_keys = 0;
1311cafe5635SKent Overstreet struct bkey *k;
1312*866898efSKuan-Wei Chiu struct btree_iter iter;
1313cafe5635SKent Overstreet struct bset_tree *t;
1314cafe5635SKent Overstreet
1315*866898efSKuan-Wei Chiu min_heap_init(&iter.heap, NULL, MAX_BSETS);
1316*866898efSKuan-Wei Chiu
1317cafe5635SKent Overstreet gc->nodes++;
1318cafe5635SKent Overstreet
1319c052dd9aSKent Overstreet for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
1320cafe5635SKent Overstreet stale = max(stale, btree_mark_key(b, k));
1321a1f0358bSKent Overstreet keys++;
1322cafe5635SKent Overstreet
1323a85e968eSKent Overstreet if (bch_ptr_bad(&b->keys, k))
1324cafe5635SKent Overstreet continue;
1325cafe5635SKent Overstreet
1326cafe5635SKent Overstreet gc->key_bytes += bkey_u64s(k);
1327cafe5635SKent Overstreet gc->nkeys++;
1328a1f0358bSKent Overstreet good_keys++;
1329cafe5635SKent Overstreet
1330cafe5635SKent Overstreet gc->data += KEY_SIZE(k);
1331cafe5635SKent Overstreet }
1332cafe5635SKent Overstreet
1333a85e968eSKent Overstreet for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++)
1334cafe5635SKent Overstreet btree_bug_on(t->size &&
1335a85e968eSKent Overstreet bset_written(&b->keys, t) &&
1336cafe5635SKent Overstreet bkey_cmp(&b->key, &t->end) < 0,
1337cafe5635SKent Overstreet b, "found short btree key in gc");
1338cafe5635SKent Overstreet
1339a1f0358bSKent Overstreet if (b->c->gc_always_rewrite)
1340a1f0358bSKent Overstreet return true;
1341a1f0358bSKent Overstreet
1342a1f0358bSKent Overstreet if (stale > 10)
1343a1f0358bSKent Overstreet return true;
1344a1f0358bSKent Overstreet
1345a1f0358bSKent Overstreet if ((keys - good_keys) * 2 > keys)
1346a1f0358bSKent Overstreet return true;
1347a1f0358bSKent Overstreet
1348a1f0358bSKent Overstreet return false;
1349cafe5635SKent Overstreet }
1350cafe5635SKent Overstreet
1351a1f0358bSKent Overstreet #define GC_MERGE_NODES 4U
1352cafe5635SKent Overstreet
1353cafe5635SKent Overstreet struct gc_merge_info {
1354cafe5635SKent Overstreet struct btree *b;
13556f10f7d1SColy Li unsigned int keys;
1356cafe5635SKent Overstreet };
1357cafe5635SKent Overstreet
1358fc2d5988SColy Li static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
1359fc2d5988SColy Li struct keylist *insert_keys,
1360fc2d5988SColy Li atomic_t *journal_ref,
1361fc2d5988SColy Li struct bkey *replace_key);
1362a1f0358bSKent Overstreet
btree_gc_coalesce(struct btree * b,struct btree_op * op,struct gc_stat * gc,struct gc_merge_info * r)1363a1f0358bSKent Overstreet static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
13640a63b66dSKent Overstreet struct gc_stat *gc, struct gc_merge_info *r)
1365cafe5635SKent Overstreet {
13666f10f7d1SColy Li unsigned int i, nodes = 0, keys = 0, blocks;
1367a1f0358bSKent Overstreet struct btree *new_nodes[GC_MERGE_NODES];
13680a63b66dSKent Overstreet struct keylist keylist;
1369b54d6934SKent Overstreet struct closure cl;
1370a1f0358bSKent Overstreet struct bkey *k;
1371b54d6934SKent Overstreet
13720a63b66dSKent Overstreet bch_keylist_init(&keylist);
13730a63b66dSKent Overstreet
13740a63b66dSKent Overstreet if (btree_check_reserve(b, NULL))
13750a63b66dSKent Overstreet return 0;
13760a63b66dSKent Overstreet
1377a1f0358bSKent Overstreet memset(new_nodes, 0, sizeof(new_nodes));
1378b54d6934SKent Overstreet closure_init_stack(&cl);
1379cafe5635SKent Overstreet
1380f72f4312SColy Li while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
1381cafe5635SKent Overstreet keys += r[nodes++].keys;
1382cafe5635SKent Overstreet
1383cafe5635SKent Overstreet blocks = btree_default_blocks(b->c) * 2 / 3;
1384cafe5635SKent Overstreet
1385cafe5635SKent Overstreet if (nodes < 2 ||
1386a85e968eSKent Overstreet __set_blocks(b->keys.set[0].data, keys,
13874e1ebae3SColy Li block_bytes(b->c->cache)) > blocks * (nodes - 1))
1388a1f0358bSKent Overstreet return 0;
1389cafe5635SKent Overstreet
1390a1f0358bSKent Overstreet for (i = 0; i < nodes; i++) {
13910a63b66dSKent Overstreet new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL);
1392028ddcacSZheng Wang if (IS_ERR(new_nodes[i]))
1393a1f0358bSKent Overstreet goto out_nocoalesce;
1394cafe5635SKent Overstreet }
1395cafe5635SKent Overstreet
13960a63b66dSKent Overstreet /*
13970a63b66dSKent Overstreet * We have to check the reserve here, after we've allocated our new
13980a63b66dSKent Overstreet * nodes, to make sure the insert below will succeed - we also check
13990a63b66dSKent Overstreet * before as an optimization to potentially avoid a bunch of expensive
14000a63b66dSKent Overstreet * allocs/sorts
14010a63b66dSKent Overstreet */
14020a63b66dSKent Overstreet if (btree_check_reserve(b, NULL))
14030a63b66dSKent Overstreet goto out_nocoalesce;
14040a63b66dSKent Overstreet
14052a285686SKent Overstreet for (i = 0; i < nodes; i++)
14062a285686SKent Overstreet mutex_lock(&new_nodes[i]->write_lock);
14072a285686SKent Overstreet
1408cafe5635SKent Overstreet for (i = nodes - 1; i > 0; --i) {
1409ee811287SKent Overstreet struct bset *n1 = btree_bset_first(new_nodes[i]);
1410ee811287SKent Overstreet struct bset *n2 = btree_bset_first(new_nodes[i - 1]);
1411cafe5635SKent Overstreet struct bkey *k, *last = NULL;
1412cafe5635SKent Overstreet
1413cafe5635SKent Overstreet keys = 0;
1414cafe5635SKent Overstreet
1415a1f0358bSKent Overstreet if (i > 1) {
1416cafe5635SKent Overstreet for (k = n2->start;
1417fafff81cSKent Overstreet k < bset_bkey_last(n2);
1418cafe5635SKent Overstreet k = bkey_next(k)) {
1419cafe5635SKent Overstreet if (__set_blocks(n1, n1->keys + keys +
1420ee811287SKent Overstreet bkey_u64s(k),
14214e1ebae3SColy Li block_bytes(b->c->cache)) > blocks)
1422cafe5635SKent Overstreet break;
1423cafe5635SKent Overstreet
1424cafe5635SKent Overstreet last = k;
1425cafe5635SKent Overstreet keys += bkey_u64s(k);
1426cafe5635SKent Overstreet }
1427a1f0358bSKent Overstreet } else {
1428a1f0358bSKent Overstreet /*
1429a1f0358bSKent Overstreet * Last node we're not getting rid of - we're getting
1430a1f0358bSKent Overstreet * rid of the node at r[0]. Have to try and fit all of
1431a1f0358bSKent Overstreet * the remaining keys into this node; we can't ensure
1432a1f0358bSKent Overstreet * they will always fit due to rounding and variable
1433a1f0358bSKent Overstreet * length keys (shouldn't be possible in practice,
1434a1f0358bSKent Overstreet * though)
1435a1f0358bSKent Overstreet */
1436a1f0358bSKent Overstreet if (__set_blocks(n1, n1->keys + n2->keys,
14374e1ebae3SColy Li block_bytes(b->c->cache)) >
1438ee811287SKent Overstreet btree_blocks(new_nodes[i]))
1439be23e837SZhiqiang Liu goto out_unlock_nocoalesce;
1440a1f0358bSKent Overstreet
1441a1f0358bSKent Overstreet keys = n2->keys;
1442a1f0358bSKent Overstreet /* Take the key of the node we're getting rid of */
1443a1f0358bSKent Overstreet last = &r->b->key;
1444a1f0358bSKent Overstreet }
1445cafe5635SKent Overstreet
14464e1ebae3SColy Li BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c->cache)) >
1447ee811287SKent Overstreet btree_blocks(new_nodes[i]));
1448cafe5635SKent Overstreet
1449a1f0358bSKent Overstreet if (last)
1450a1f0358bSKent Overstreet bkey_copy_key(&new_nodes[i]->key, last);
1451cafe5635SKent Overstreet
1452fafff81cSKent Overstreet memcpy(bset_bkey_last(n1),
1453cafe5635SKent Overstreet n2->start,
1454fafff81cSKent Overstreet (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
1455cafe5635SKent Overstreet
1456cafe5635SKent Overstreet n1->keys += keys;
1457a1f0358bSKent Overstreet r[i].keys = n1->keys;
1458cafe5635SKent Overstreet
1459cafe5635SKent Overstreet memmove(n2->start,
1460fafff81cSKent Overstreet bset_bkey_idx(n2, keys),
1461fafff81cSKent Overstreet (void *) bset_bkey_last(n2) -
1462fafff81cSKent Overstreet (void *) bset_bkey_idx(n2, keys));
1463cafe5635SKent Overstreet
1464cafe5635SKent Overstreet n2->keys -= keys;
1465cafe5635SKent Overstreet
14660a63b66dSKent Overstreet if (__bch_keylist_realloc(&keylist,
1467085d2a3dSKent Overstreet bkey_u64s(&new_nodes[i]->key)))
1468be23e837SZhiqiang Liu goto out_unlock_nocoalesce;
1469a1f0358bSKent Overstreet
1470a1f0358bSKent Overstreet bch_btree_node_write(new_nodes[i], &cl);
14710a63b66dSKent Overstreet bch_keylist_add(&keylist, &new_nodes[i]->key);
1472cafe5635SKent Overstreet }
1473cafe5635SKent Overstreet
14742a285686SKent Overstreet for (i = 0; i < nodes; i++)
14752a285686SKent Overstreet mutex_unlock(&new_nodes[i]->write_lock);
14762a285686SKent Overstreet
147705335cffSKent Overstreet closure_sync(&cl);
147805335cffSKent Overstreet
147905335cffSKent Overstreet /* We emptied out this node */
148005335cffSKent Overstreet BUG_ON(btree_bset_first(new_nodes[0])->keys);
148105335cffSKent Overstreet btree_node_free(new_nodes[0]);
148205335cffSKent Overstreet rw_unlock(true, new_nodes[0]);
1483400ffaa2SSlava Pestov new_nodes[0] = NULL;
148405335cffSKent Overstreet
1485a1f0358bSKent Overstreet for (i = 0; i < nodes; i++) {
14860a63b66dSKent Overstreet if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key)))
1487a1f0358bSKent Overstreet goto out_nocoalesce;
1488a1f0358bSKent Overstreet
14890a63b66dSKent Overstreet make_btree_freeing_key(r[i].b, keylist.top);
14900a63b66dSKent Overstreet bch_keylist_push(&keylist);
1491a1f0358bSKent Overstreet }
1492a1f0358bSKent Overstreet
14930a63b66dSKent Overstreet bch_btree_insert_node(b, op, &keylist, NULL, NULL);
14940a63b66dSKent Overstreet BUG_ON(!bch_keylist_empty(&keylist));
1495a1f0358bSKent Overstreet
1496a1f0358bSKent Overstreet for (i = 0; i < nodes; i++) {
1497a1f0358bSKent Overstreet btree_node_free(r[i].b);
1498a1f0358bSKent Overstreet rw_unlock(true, r[i].b);
1499a1f0358bSKent Overstreet
1500a1f0358bSKent Overstreet r[i].b = new_nodes[i];
1501a1f0358bSKent Overstreet }
1502a1f0358bSKent Overstreet
1503a1f0358bSKent Overstreet memmove(r, r + 1, sizeof(r[0]) * (nodes - 1));
1504a1f0358bSKent Overstreet r[nodes - 1].b = ERR_PTR(-EINTR);
1505cafe5635SKent Overstreet
1506c37511b8SKent Overstreet trace_bcache_btree_gc_coalesce(nodes);
1507cafe5635SKent Overstreet gc->nodes--;
1508cafe5635SKent Overstreet
15090a63b66dSKent Overstreet bch_keylist_free(&keylist);
15100a63b66dSKent Overstreet
1511a1f0358bSKent Overstreet /* Invalidated our iterator */
1512a1f0358bSKent Overstreet return -EINTR;
1513a1f0358bSKent Overstreet
1514be23e837SZhiqiang Liu out_unlock_nocoalesce:
1515be23e837SZhiqiang Liu for (i = 0; i < nodes; i++)
1516be23e837SZhiqiang Liu mutex_unlock(&new_nodes[i]->write_lock);
1517be23e837SZhiqiang Liu
1518a1f0358bSKent Overstreet out_nocoalesce:
1519a1f0358bSKent Overstreet closure_sync(&cl);
1520a1f0358bSKent Overstreet
15210a63b66dSKent Overstreet while ((k = bch_keylist_pop(&keylist)))
1522a1f0358bSKent Overstreet if (!bkey_cmp(k, &ZERO_KEY))
1523a1f0358bSKent Overstreet atomic_dec(&b->c->prio_blocked);
1524f16277caSShenghui Wang bch_keylist_free(&keylist);
1525a1f0358bSKent Overstreet
1526a1f0358bSKent Overstreet for (i = 0; i < nodes; i++)
1527bb6cc253SMarkus Weippert if (!IS_ERR_OR_NULL(new_nodes[i])) {
1528a1f0358bSKent Overstreet btree_node_free(new_nodes[i]);
1529a1f0358bSKent Overstreet rw_unlock(true, new_nodes[i]);
1530a1f0358bSKent Overstreet }
1531a1f0358bSKent Overstreet return 0;
1532a1f0358bSKent Overstreet }
1533a1f0358bSKent Overstreet
btree_gc_rewrite_node(struct btree * b,struct btree_op * op,struct btree * replace)15340a63b66dSKent Overstreet static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
15350a63b66dSKent Overstreet struct btree *replace)
15360a63b66dSKent Overstreet {
15370a63b66dSKent Overstreet struct keylist keys;
15380a63b66dSKent Overstreet struct btree *n;
15390a63b66dSKent Overstreet
15400a63b66dSKent Overstreet if (btree_check_reserve(b, NULL))
15410a63b66dSKent Overstreet return 0;
15420a63b66dSKent Overstreet
15430a63b66dSKent Overstreet n = btree_node_alloc_replacement(replace, NULL);
1544777967e7SColy Li if (IS_ERR(n))
1545777967e7SColy Li return 0;
15460a63b66dSKent Overstreet
15470a63b66dSKent Overstreet /* recheck reserve after allocating replacement node */
15480a63b66dSKent Overstreet if (btree_check_reserve(b, NULL)) {
15490a63b66dSKent Overstreet btree_node_free(n);
15500a63b66dSKent Overstreet rw_unlock(true, n);
15510a63b66dSKent Overstreet return 0;
15520a63b66dSKent Overstreet }
15530a63b66dSKent Overstreet
15540a63b66dSKent Overstreet bch_btree_node_write_sync(n);
15550a63b66dSKent Overstreet
15560a63b66dSKent Overstreet bch_keylist_init(&keys);
15570a63b66dSKent Overstreet bch_keylist_add(&keys, &n->key);
15580a63b66dSKent Overstreet
15590a63b66dSKent Overstreet make_btree_freeing_key(replace, keys.top);
15600a63b66dSKent Overstreet bch_keylist_push(&keys);
15610a63b66dSKent Overstreet
15620a63b66dSKent Overstreet bch_btree_insert_node(b, op, &keys, NULL, NULL);
15630a63b66dSKent Overstreet BUG_ON(!bch_keylist_empty(&keys));
15640a63b66dSKent Overstreet
15650a63b66dSKent Overstreet btree_node_free(replace);
15660a63b66dSKent Overstreet rw_unlock(true, n);
15670a63b66dSKent Overstreet
15680a63b66dSKent Overstreet /* Invalidated our iterator */
15690a63b66dSKent Overstreet return -EINTR;
15700a63b66dSKent Overstreet }
15710a63b66dSKent Overstreet
btree_gc_count_keys(struct btree * b)15726f10f7d1SColy Li static unsigned int btree_gc_count_keys(struct btree *b)
1573a1f0358bSKent Overstreet {
1574a1f0358bSKent Overstreet struct bkey *k;
1575*866898efSKuan-Wei Chiu struct btree_iter iter;
15766f10f7d1SColy Li unsigned int ret = 0;
1577a1f0358bSKent Overstreet
1578*866898efSKuan-Wei Chiu min_heap_init(&iter.heap, NULL, MAX_BSETS);
1579*866898efSKuan-Wei Chiu
1580c052dd9aSKent Overstreet for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
1581a1f0358bSKent Overstreet ret += bkey_u64s(k);
1582a1f0358bSKent Overstreet
1583a1f0358bSKent Overstreet return ret;
1584cafe5635SKent Overstreet }
1585cafe5635SKent Overstreet
btree_gc_min_nodes(struct cache_set * c)15867f4a59deSTang Junhui static size_t btree_gc_min_nodes(struct cache_set *c)
15877f4a59deSTang Junhui {
15887f4a59deSTang Junhui size_t min_nodes;
15897f4a59deSTang Junhui
15907f4a59deSTang Junhui /*
15917f4a59deSTang Junhui * Since incremental GC would stop 100ms when front
15927f4a59deSTang Junhui * side I/O comes, so when there are many btree nodes,
15937f4a59deSTang Junhui * if GC only processes constant (100) nodes each time,
15947f4a59deSTang Junhui * GC would last a long time, and the front side I/Os
15957f4a59deSTang Junhui * would run out of the buckets (since no new bucket
15967f4a59deSTang Junhui * can be allocated during GC), and be blocked again.
15977f4a59deSTang Junhui * So GC should not process constant nodes, but varied
15987f4a59deSTang Junhui * nodes according to the number of btree nodes, which
15997f4a59deSTang Junhui * realized by dividing GC into constant(100) times,
16007f4a59deSTang Junhui * so when there are many btree nodes, GC can process
16017f4a59deSTang Junhui * more nodes each time, otherwise, GC will process less
16027f4a59deSTang Junhui * nodes each time (but no less than MIN_GC_NODES)
16037f4a59deSTang Junhui */
16047f4a59deSTang Junhui min_nodes = c->gc_stats.nodes / MAX_GC_TIMES;
16057f4a59deSTang Junhui if (min_nodes < MIN_GC_NODES)
16067f4a59deSTang Junhui min_nodes = MIN_GC_NODES;
16077f4a59deSTang Junhui
16087f4a59deSTang Junhui return min_nodes;
16097f4a59deSTang Junhui }
16107f4a59deSTang Junhui
16117f4a59deSTang Junhui
btree_gc_recurse(struct btree * b,struct btree_op * op,struct closure * writes,struct gc_stat * gc)1612cafe5635SKent Overstreet static int btree_gc_recurse(struct btree *b, struct btree_op *op,
1613cafe5635SKent Overstreet struct closure *writes, struct gc_stat *gc)
1614cafe5635SKent Overstreet {
1615a1f0358bSKent Overstreet int ret = 0;
1616a1f0358bSKent Overstreet bool should_rewrite;
1617a1f0358bSKent Overstreet struct bkey *k;
1618*866898efSKuan-Wei Chiu struct btree_iter iter;
1619cafe5635SKent Overstreet struct gc_merge_info r[GC_MERGE_NODES];
16202a285686SKent Overstreet struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
1621cafe5635SKent Overstreet
1622*866898efSKuan-Wei Chiu min_heap_init(&iter.heap, NULL, MAX_BSETS);
1623*866898efSKuan-Wei Chiu bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
1624cafe5635SKent Overstreet
16252a285686SKent Overstreet for (i = r; i < r + ARRAY_SIZE(r); i++)
16262a285686SKent Overstreet i->b = ERR_PTR(-EINTR);
1627cafe5635SKent Overstreet
1628a1f0358bSKent Overstreet while (1) {
1629*866898efSKuan-Wei Chiu k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
1630a1f0358bSKent Overstreet if (k) {
16310a63b66dSKent Overstreet r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
16322452cc89SSlava Pestov true, b);
1633cafe5635SKent Overstreet if (IS_ERR(r->b)) {
1634cafe5635SKent Overstreet ret = PTR_ERR(r->b);
1635cafe5635SKent Overstreet break;
1636cafe5635SKent Overstreet }
1637cafe5635SKent Overstreet
1638a1f0358bSKent Overstreet r->keys = btree_gc_count_keys(r->b);
1639cafe5635SKent Overstreet
16400a63b66dSKent Overstreet ret = btree_gc_coalesce(b, op, gc, r);
1641a1f0358bSKent Overstreet if (ret)
1642cafe5635SKent Overstreet break;
1643cafe5635SKent Overstreet }
1644cafe5635SKent Overstreet
1645a1f0358bSKent Overstreet if (!last->b)
1646a1f0358bSKent Overstreet break;
1647cafe5635SKent Overstreet
1648a1f0358bSKent Overstreet if (!IS_ERR(last->b)) {
1649a1f0358bSKent Overstreet should_rewrite = btree_gc_mark_node(last->b, gc);
16500a63b66dSKent Overstreet if (should_rewrite) {
16510a63b66dSKent Overstreet ret = btree_gc_rewrite_node(b, op, last->b);
16520a63b66dSKent Overstreet if (ret)
1653a1f0358bSKent Overstreet break;
1654a1f0358bSKent Overstreet }
1655a1f0358bSKent Overstreet
1656a1f0358bSKent Overstreet if (last->b->level) {
1657a1f0358bSKent Overstreet ret = btree_gc_recurse(last->b, op, writes, gc);
1658a1f0358bSKent Overstreet if (ret)
1659a1f0358bSKent Overstreet break;
1660a1f0358bSKent Overstreet }
1661a1f0358bSKent Overstreet
1662a1f0358bSKent Overstreet bkey_copy_key(&b->c->gc_done, &last->b->key);
1663a1f0358bSKent Overstreet
1664a1f0358bSKent Overstreet /*
1665a1f0358bSKent Overstreet * Must flush leaf nodes before gc ends, since replace
1666a1f0358bSKent Overstreet * operations aren't journalled
1667cafe5635SKent Overstreet */
16682a285686SKent Overstreet mutex_lock(&last->b->write_lock);
1669a1f0358bSKent Overstreet if (btree_node_dirty(last->b))
1670a1f0358bSKent Overstreet bch_btree_node_write(last->b, writes);
16712a285686SKent Overstreet mutex_unlock(&last->b->write_lock);
1672a1f0358bSKent Overstreet rw_unlock(true, last->b);
1673a1f0358bSKent Overstreet }
1674a1f0358bSKent Overstreet
1675a1f0358bSKent Overstreet memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
1676a1f0358bSKent Overstreet r->b = NULL;
1677a1f0358bSKent Overstreet
16785c25c4fcSTang Junhui if (atomic_read(&b->c->search_inflight) &&
16797f4a59deSTang Junhui gc->nodes >= gc->nodes_pre + btree_gc_min_nodes(b->c)) {
16805c25c4fcSTang Junhui gc->nodes_pre = gc->nodes;
16815c25c4fcSTang Junhui ret = -EAGAIN;
16825c25c4fcSTang Junhui break;
16835c25c4fcSTang Junhui }
16845c25c4fcSTang Junhui
1685cafe5635SKent Overstreet if (need_resched()) {
1686cafe5635SKent Overstreet ret = -EAGAIN;
1687cafe5635SKent Overstreet break;
1688cafe5635SKent Overstreet }
1689cafe5635SKent Overstreet }
1690cafe5635SKent Overstreet
16912a285686SKent Overstreet for (i = r; i < r + ARRAY_SIZE(r); i++)
16922a285686SKent Overstreet if (!IS_ERR_OR_NULL(i->b)) {
16932a285686SKent Overstreet mutex_lock(&i->b->write_lock);
16942a285686SKent Overstreet if (btree_node_dirty(i->b))
16952a285686SKent Overstreet bch_btree_node_write(i->b, writes);
16962a285686SKent Overstreet mutex_unlock(&i->b->write_lock);
16972a285686SKent Overstreet rw_unlock(true, i->b);
1698a1f0358bSKent Overstreet }
1699cafe5635SKent Overstreet
1700cafe5635SKent Overstreet return ret;
1701cafe5635SKent Overstreet }
1702cafe5635SKent Overstreet
bch_btree_gc_root(struct btree * b,struct btree_op * op,struct closure * writes,struct gc_stat * gc)1703cafe5635SKent Overstreet static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1704cafe5635SKent Overstreet struct closure *writes, struct gc_stat *gc)
1705cafe5635SKent Overstreet {
1706cafe5635SKent Overstreet struct btree *n = NULL;
1707a1f0358bSKent Overstreet int ret = 0;
1708a1f0358bSKent Overstreet bool should_rewrite;
1709cafe5635SKent Overstreet
1710a1f0358bSKent Overstreet should_rewrite = btree_gc_mark_node(b, gc);
1711a1f0358bSKent Overstreet if (should_rewrite) {
17120a63b66dSKent Overstreet n = btree_node_alloc_replacement(b, NULL);
1713cafe5635SKent Overstreet
1714028ddcacSZheng Wang if (!IS_ERR(n)) {
1715a1f0358bSKent Overstreet bch_btree_node_write_sync(n);
17162a285686SKent Overstreet
1717a1f0358bSKent Overstreet bch_btree_set_root(n);
1718a1f0358bSKent Overstreet btree_node_free(b);
1719a1f0358bSKent Overstreet rw_unlock(true, n);
1720a1f0358bSKent Overstreet
1721a1f0358bSKent Overstreet return -EINTR;
1722cafe5635SKent Overstreet }
1723a1f0358bSKent Overstreet }
1724a1f0358bSKent Overstreet
1725487dded8SKent Overstreet __bch_btree_mark_key(b->c, b->level + 1, &b->key);
1726487dded8SKent Overstreet
1727a1f0358bSKent Overstreet if (b->level) {
1728a1f0358bSKent Overstreet ret = btree_gc_recurse(b, op, writes, gc);
1729a1f0358bSKent Overstreet if (ret)
1730a1f0358bSKent Overstreet return ret;
1731a1f0358bSKent Overstreet }
1732a1f0358bSKent Overstreet
1733a1f0358bSKent Overstreet bkey_copy_key(&b->c->gc_done, &b->key);
1734cafe5635SKent Overstreet
1735cafe5635SKent Overstreet return ret;
1736cafe5635SKent Overstreet }
1737cafe5635SKent Overstreet
btree_gc_start(struct cache_set * c)1738cafe5635SKent Overstreet static void btree_gc_start(struct cache_set *c)
1739cafe5635SKent Overstreet {
1740cafe5635SKent Overstreet struct cache *ca;
1741cafe5635SKent Overstreet struct bucket *b;
1742cafe5635SKent Overstreet
1743cafe5635SKent Overstreet if (!c->gc_mark_valid)
1744cafe5635SKent Overstreet return;
1745cafe5635SKent Overstreet
1746cafe5635SKent Overstreet mutex_lock(&c->bucket_lock);
1747cafe5635SKent Overstreet
1748cafe5635SKent Overstreet c->gc_done = ZERO_KEY;
1749cafe5635SKent Overstreet
175008fdb2cdSColy Li ca = c->cache;
1751cafe5635SKent Overstreet for_each_bucket(b, ca) {
17523a2fd9d5SKent Overstreet b->last_gc = b->gen;
1753a14a68b7SDongsheng Yang if (bch_can_invalidate_bucket(ca, b))
1754a14a68b7SDongsheng Yang b->reclaimable_in_gc = 1;
175529ebf465SKent Overstreet if (!atomic_read(&b->pin)) {
17564fe6a816SKent Overstreet SET_GC_MARK(b, 0);
175729ebf465SKent Overstreet SET_GC_SECTORS_USED(b, 0);
175829ebf465SKent Overstreet }
1759cafe5635SKent Overstreet }
1760cafe5635SKent Overstreet
1761a14a68b7SDongsheng Yang c->gc_mark_valid = 0;
1762cafe5635SKent Overstreet mutex_unlock(&c->bucket_lock);
1763cafe5635SKent Overstreet }
1764cafe5635SKent Overstreet
bch_btree_gc_finish(struct cache_set * c)1765d44c2f9eSTang Junhui static void bch_btree_gc_finish(struct cache_set *c)
1766cafe5635SKent Overstreet {
1767cafe5635SKent Overstreet struct bucket *b;
1768cafe5635SKent Overstreet struct cache *ca;
176908fdb2cdSColy Li unsigned int i, j;
177008fdb2cdSColy Li uint64_t *k;
1771cafe5635SKent Overstreet
1772cafe5635SKent Overstreet mutex_lock(&c->bucket_lock);
1773cafe5635SKent Overstreet
1774cafe5635SKent Overstreet set_gc_sectors(c);
1775cafe5635SKent Overstreet c->gc_mark_valid = 1;
1776cafe5635SKent Overstreet c->need_gc = 0;
1777cafe5635SKent Overstreet
1778cafe5635SKent Overstreet for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
1779cafe5635SKent Overstreet SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
1780cafe5635SKent Overstreet GC_MARK_METADATA);
1781cafe5635SKent Overstreet
1782bf0a628aSNicholas Swenson /* don't reclaim buckets to which writeback keys point */
1783bf0a628aSNicholas Swenson rcu_read_lock();
17842831231dSColy Li for (i = 0; i < c->devices_max_used; i++) {
1785bf0a628aSNicholas Swenson struct bcache_device *d = c->devices[i];
1786bf0a628aSNicholas Swenson struct cached_dev *dc;
1787bf0a628aSNicholas Swenson struct keybuf_key *w, *n;
1788bf0a628aSNicholas Swenson
1789bf0a628aSNicholas Swenson if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
1790bf0a628aSNicholas Swenson continue;
1791bf0a628aSNicholas Swenson dc = container_of(d, struct cached_dev, disk);
1792bf0a628aSNicholas Swenson
1793bf0a628aSNicholas Swenson spin_lock(&dc->writeback_keys.lock);
1794bf0a628aSNicholas Swenson rbtree_postorder_for_each_entry_safe(w, n,
1795bf0a628aSNicholas Swenson &dc->writeback_keys.keys, node)
1796bf0a628aSNicholas Swenson for (j = 0; j < KEY_PTRS(&w->key); j++)
1797bf0a628aSNicholas Swenson SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
1798bf0a628aSNicholas Swenson GC_MARK_DIRTY);
1799bf0a628aSNicholas Swenson spin_unlock(&dc->writeback_keys.lock);
1800bf0a628aSNicholas Swenson }
1801bf0a628aSNicholas Swenson rcu_read_unlock();
1802bf0a628aSNicholas Swenson
1803d44c2f9eSTang Junhui c->avail_nbuckets = 0;
1804cafe5635SKent Overstreet
180508fdb2cdSColy Li ca = c->cache;
1806cafe5635SKent Overstreet ca->invalidate_needs_gc = 0;
1807cafe5635SKent Overstreet
180808fdb2cdSColy Li for (k = ca->sb.d; k < ca->sb.d + ca->sb.keys; k++)
180908fdb2cdSColy Li SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA);
1810cafe5635SKent Overstreet
181108fdb2cdSColy Li for (k = ca->prio_buckets;
181208fdb2cdSColy Li k < ca->prio_buckets + prio_buckets(ca) * 2; k++)
181308fdb2cdSColy Li SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA);
1814cafe5635SKent Overstreet
1815cafe5635SKent Overstreet for_each_bucket(b, ca) {
1816cafe5635SKent Overstreet c->need_gc = max(c->need_gc, bucket_gc_gen(b));
1817cafe5635SKent Overstreet
1818a14a68b7SDongsheng Yang if (b->reclaimable_in_gc)
1819a14a68b7SDongsheng Yang b->reclaimable_in_gc = 0;
1820a14a68b7SDongsheng Yang
18214fe6a816SKent Overstreet if (atomic_read(&b->pin))
18224fe6a816SKent Overstreet continue;
18234fe6a816SKent Overstreet
18244fe6a816SKent Overstreet BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
18254fe6a816SKent Overstreet
18264fe6a816SKent Overstreet if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
1827d44c2f9eSTang Junhui c->avail_nbuckets++;
1828cafe5635SKent Overstreet }
1829cafe5635SKent Overstreet
1830cafe5635SKent Overstreet mutex_unlock(&c->bucket_lock);
1831cafe5635SKent Overstreet }
1832cafe5635SKent Overstreet
bch_btree_gc(struct cache_set * c)183372a44517SKent Overstreet static void bch_btree_gc(struct cache_set *c)
1834cafe5635SKent Overstreet {
1835cafe5635SKent Overstreet int ret;
1836cafe5635SKent Overstreet struct gc_stat stats;
1837cafe5635SKent Overstreet struct closure writes;
1838cafe5635SKent Overstreet struct btree_op op;
1839cafe5635SKent Overstreet uint64_t start_time = local_clock();
184057943511SKent Overstreet
1841c37511b8SKent Overstreet trace_bcache_gc_start(c);
1842cafe5635SKent Overstreet
1843cafe5635SKent Overstreet memset(&stats, 0, sizeof(struct gc_stat));
1844cafe5635SKent Overstreet closure_init_stack(&writes);
1845b54d6934SKent Overstreet bch_btree_op_init(&op, SHRT_MAX);
1846cafe5635SKent Overstreet
1847cafe5635SKent Overstreet btree_gc_start(c);
1848cafe5635SKent Overstreet
1849771f393eSColy Li /* if CACHE_SET_IO_DISABLE set, gc thread should stop too */
1850a1f0358bSKent Overstreet do {
1851feac1a70SColy Li ret = bcache_btree_root(gc_root, c, &op, &writes, &stats);
1852cafe5635SKent Overstreet closure_sync(&writes);
1853c5f1e5adSKent Overstreet cond_resched();
1854cafe5635SKent Overstreet
18555c25c4fcSTang Junhui if (ret == -EAGAIN)
18565c25c4fcSTang Junhui schedule_timeout_interruptible(msecs_to_jiffies
18575c25c4fcSTang Junhui (GC_SLEEP_MS));
18585c25c4fcSTang Junhui else if (ret)
185946f5aa88SJoe Perches pr_warn("gc failed!\n");
1860771f393eSColy Li } while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags));
1861cafe5635SKent Overstreet
1862d44c2f9eSTang Junhui bch_btree_gc_finish(c);
186357943511SKent Overstreet wake_up_allocators(c);
186457943511SKent Overstreet
1865169ef1cfSKent Overstreet bch_time_stats_update(&c->btree_gc_time, start_time);
1866cafe5635SKent Overstreet
1867cafe5635SKent Overstreet stats.key_bytes *= sizeof(uint64_t);
1868cafe5635SKent Overstreet stats.data <<= 9;
1869d44c2f9eSTang Junhui bch_update_bucket_in_use(c, &stats);
1870cafe5635SKent Overstreet memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
1871cafe5635SKent Overstreet
1872c37511b8SKent Overstreet trace_bcache_gc_end(c);
1873cafe5635SKent Overstreet
187472a44517SKent Overstreet bch_moving_gc(c);
1875cafe5635SKent Overstreet }
1876cafe5635SKent Overstreet
gc_should_run(struct cache_set * c)1877be628be0SKent Overstreet static bool gc_should_run(struct cache_set *c)
1878cafe5635SKent Overstreet {
187908fdb2cdSColy Li struct cache *ca = c->cache;
188072a44517SKent Overstreet
1881be628be0SKent Overstreet if (ca->invalidate_needs_gc)
1882be628be0SKent Overstreet return true;
188372a44517SKent Overstreet
1884be628be0SKent Overstreet if (atomic_read(&c->sectors_to_gc) < 0)
1885be628be0SKent Overstreet return true;
1886be628be0SKent Overstreet
1887be628be0SKent Overstreet return false;
1888be628be0SKent Overstreet }
1889be628be0SKent Overstreet
bch_gc_thread(void * arg)1890be628be0SKent Overstreet static int bch_gc_thread(void *arg)
1891be628be0SKent Overstreet {
1892be628be0SKent Overstreet struct cache_set *c = arg;
1893be628be0SKent Overstreet
1894be628be0SKent Overstreet while (1) {
1895be628be0SKent Overstreet wait_event_interruptible(c->gc_wait,
1896771f393eSColy Li kthread_should_stop() ||
1897771f393eSColy Li test_bit(CACHE_SET_IO_DISABLE, &c->flags) ||
1898771f393eSColy Li gc_should_run(c));
1899be628be0SKent Overstreet
1900771f393eSColy Li if (kthread_should_stop() ||
1901771f393eSColy Li test_bit(CACHE_SET_IO_DISABLE, &c->flags))
190272a44517SKent Overstreet break;
190372a44517SKent Overstreet
1904be628be0SKent Overstreet set_gc_sectors(c);
1905be628be0SKent Overstreet bch_btree_gc(c);
190672a44517SKent Overstreet }
190772a44517SKent Overstreet
1908771f393eSColy Li wait_for_kthread_stop();
190972a44517SKent Overstreet return 0;
191072a44517SKent Overstreet }
191172a44517SKent Overstreet
bch_gc_thread_start(struct cache_set * c)191272a44517SKent Overstreet int bch_gc_thread_start(struct cache_set *c)
191372a44517SKent Overstreet {
1914be628be0SKent Overstreet c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
19159d134117SVasyl Gomonovych return PTR_ERR_OR_ZERO(c->gc_thread);
1916cafe5635SKent Overstreet }
1917cafe5635SKent Overstreet
1918cafe5635SKent Overstreet /* Initial partial gc */
1919cafe5635SKent Overstreet
bch_btree_check_recurse(struct btree * b,struct btree_op * op)1920487dded8SKent Overstreet static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
1921cafe5635SKent Overstreet {
192250310164SKent Overstreet int ret = 0;
192350310164SKent Overstreet struct bkey *k, *p = NULL;
1924*866898efSKuan-Wei Chiu struct btree_iter iter;
1925*866898efSKuan-Wei Chiu
1926*866898efSKuan-Wei Chiu min_heap_init(&iter.heap, NULL, MAX_BSETS);
1927cafe5635SKent Overstreet
1928487dded8SKent Overstreet for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
1929487dded8SKent Overstreet bch_initial_mark_key(b->c, b->level, k);
1930cafe5635SKent Overstreet
1931487dded8SKent Overstreet bch_initial_mark_key(b->c, b->level + 1, &b->key);
1932cafe5635SKent Overstreet
1933cafe5635SKent Overstreet if (b->level) {
1934*866898efSKuan-Wei Chiu bch_btree_iter_init(&b->keys, &iter, NULL);
1935cafe5635SKent Overstreet
193650310164SKent Overstreet do {
1937*866898efSKuan-Wei Chiu k = bch_btree_iter_next_filter(&iter, &b->keys,
1938a85e968eSKent Overstreet bch_ptr_bad);
19397f4a59deSTang Junhui if (k) {
19402452cc89SSlava Pestov btree_node_prefetch(b, k);
19417f4a59deSTang Junhui /*
19427f4a59deSTang Junhui * initiallize c->gc_stats.nodes
19437f4a59deSTang Junhui * for incremental GC
19447f4a59deSTang Junhui */
19457f4a59deSTang Junhui b->c->gc_stats.nodes++;
19467f4a59deSTang Junhui }
194750310164SKent Overstreet
1948cafe5635SKent Overstreet if (p)
1949feac1a70SColy Li ret = bcache_btree(check_recurse, p, b, op);
1950cafe5635SKent Overstreet
195150310164SKent Overstreet p = k;
195250310164SKent Overstreet } while (p && !ret);
1953cafe5635SKent Overstreet }
1954cafe5635SKent Overstreet
1955487dded8SKent Overstreet return ret;
1956cafe5635SKent Overstreet }
1957cafe5635SKent Overstreet
19588e710227SColy Li
bch_btree_check_thread(void * arg)19598e710227SColy Li static int bch_btree_check_thread(void *arg)
1960cafe5635SKent Overstreet {
19618e710227SColy Li int ret;
19628e710227SColy Li struct btree_check_info *info = arg;
19638e710227SColy Li struct btree_check_state *check_state = info->state;
19648e710227SColy Li struct cache_set *c = check_state->c;
1965*866898efSKuan-Wei Chiu struct btree_iter iter;
19668e710227SColy Li struct bkey *k, *p;
19678e710227SColy Li int cur_idx, prev_idx, skip_nr;
19688e710227SColy Li
19698e710227SColy Li k = p = NULL;
19708e710227SColy Li cur_idx = prev_idx = 0;
19718e710227SColy Li ret = 0;
19728e710227SColy Li
1973*866898efSKuan-Wei Chiu min_heap_init(&iter.heap, NULL, MAX_BSETS);
1974*866898efSKuan-Wei Chiu
19758e710227SColy Li /* root node keys are checked before thread created */
1976*866898efSKuan-Wei Chiu bch_btree_iter_init(&c->root->keys, &iter, NULL);
1977*866898efSKuan-Wei Chiu k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
19788e710227SColy Li BUG_ON(!k);
19798e710227SColy Li
19808e710227SColy Li p = k;
19818e710227SColy Li while (k) {
19828e710227SColy Li /*
19838e710227SColy Li * Fetch a root node key index, skip the keys which
19848e710227SColy Li * should be fetched by other threads, then check the
19858e710227SColy Li * sub-tree indexed by the fetched key.
19868e710227SColy Li */
19878e710227SColy Li spin_lock(&check_state->idx_lock);
19888e710227SColy Li cur_idx = check_state->key_idx;
19898e710227SColy Li check_state->key_idx++;
19908e710227SColy Li spin_unlock(&check_state->idx_lock);
19918e710227SColy Li
19928e710227SColy Li skip_nr = cur_idx - prev_idx;
19938e710227SColy Li
19948e710227SColy Li while (skip_nr) {
1995*866898efSKuan-Wei Chiu k = bch_btree_iter_next_filter(&iter,
19968e710227SColy Li &c->root->keys,
19978e710227SColy Li bch_ptr_bad);
19988e710227SColy Li if (k)
19998e710227SColy Li p = k;
20008e710227SColy Li else {
20018e710227SColy Li /*
20028e710227SColy Li * No more keys to check in root node,
20038e710227SColy Li * current checking threads are enough,
20048e710227SColy Li * stop creating more.
20058e710227SColy Li */
20068e710227SColy Li atomic_set(&check_state->enough, 1);
20078e710227SColy Li /* Update check_state->enough earlier */
2008eb9b6666SColy Li smp_mb__after_atomic();
20098e710227SColy Li goto out;
20108e710227SColy Li }
20118e710227SColy Li skip_nr--;
20128e710227SColy Li cond_resched();
20138e710227SColy Li }
20148e710227SColy Li
20158e710227SColy Li if (p) {
2016c18536a7SKent Overstreet struct btree_op op;
2017cafe5635SKent Overstreet
20188e710227SColy Li btree_node_prefetch(c->root, p);
20198e710227SColy Li c->gc_stats.nodes++;
20208e710227SColy Li bch_btree_op_init(&op, 0);
20218e710227SColy Li ret = bcache_btree(check_recurse, p, c->root, &op);
2022f0854489SMingzhe Zou /*
2023f0854489SMingzhe Zou * The op may be added to cache_set's btree_cache_wait
2024f0854489SMingzhe Zou * in mca_cannibalize(), must ensure it is removed from
2025f0854489SMingzhe Zou * the list and release btree_cache_alloc_lock before
2026f0854489SMingzhe Zou * free op memory.
2027f0854489SMingzhe Zou * Otherwise, the btree_cache_wait will be damaged.
2028f0854489SMingzhe Zou */
2029f0854489SMingzhe Zou bch_cannibalize_unlock(c);
2030f0854489SMingzhe Zou finish_wait(&c->btree_cache_wait, &(&op)->wait);
20318e710227SColy Li if (ret)
20328e710227SColy Li goto out;
20338e710227SColy Li }
20348e710227SColy Li p = NULL;
20358e710227SColy Li prev_idx = cur_idx;
20368e710227SColy Li cond_resched();
20378e710227SColy Li }
2038cafe5635SKent Overstreet
20398e710227SColy Li out:
20408e710227SColy Li info->result = ret;
20418e710227SColy Li /* update check_state->started among all CPUs */
2042eb9b6666SColy Li smp_mb__before_atomic();
20438e710227SColy Li if (atomic_dec_and_test(&check_state->started))
20448e710227SColy Li wake_up(&check_state->wait);
20458e710227SColy Li
20468e710227SColy Li return ret;
20478e710227SColy Li }
20488e710227SColy Li
20498e710227SColy Li
20508e710227SColy Li
bch_btree_chkthread_nr(void)20518e710227SColy Li static int bch_btree_chkthread_nr(void)
20528e710227SColy Li {
20538e710227SColy Li int n = num_online_cpus()/2;
20548e710227SColy Li
20558e710227SColy Li if (n == 0)
20568e710227SColy Li n = 1;
20578e710227SColy Li else if (n > BCH_BTR_CHKTHREAD_MAX)
20588e710227SColy Li n = BCH_BTR_CHKTHREAD_MAX;
20598e710227SColy Li
20608e710227SColy Li return n;
20618e710227SColy Li }
20628e710227SColy Li
bch_btree_check(struct cache_set * c)20638e710227SColy Li int bch_btree_check(struct cache_set *c)
20648e710227SColy Li {
20658e710227SColy Li int ret = 0;
20668e710227SColy Li int i;
20678e710227SColy Li struct bkey *k = NULL;
2068*866898efSKuan-Wei Chiu struct btree_iter iter;
206962253644SColy Li struct btree_check_state check_state;
20708e710227SColy Li
2071*866898efSKuan-Wei Chiu min_heap_init(&iter.heap, NULL, MAX_BSETS);
2072*866898efSKuan-Wei Chiu
20738e710227SColy Li /* check and mark root node keys */
20748e710227SColy Li for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid)
20758e710227SColy Li bch_initial_mark_key(c, c->root->level, k);
20768e710227SColy Li
20778e710227SColy Li bch_initial_mark_key(c, c->root->level + 1, &c->root->key);
20788e710227SColy Li
20798e710227SColy Li if (c->root->level == 0)
20808e710227SColy Li return 0;
20818e710227SColy Li
20827d6b902eSColy Li memset(&check_state, 0, sizeof(struct btree_check_state));
208362253644SColy Li check_state.c = c;
208462253644SColy Li check_state.total_threads = bch_btree_chkthread_nr();
208562253644SColy Li check_state.key_idx = 0;
208662253644SColy Li spin_lock_init(&check_state.idx_lock);
208762253644SColy Li atomic_set(&check_state.started, 0);
208862253644SColy Li atomic_set(&check_state.enough, 0);
208962253644SColy Li init_waitqueue_head(&check_state.wait);
20908e710227SColy Li
209162253644SColy Li rw_lock(0, c->root, c->root->level);
20928e710227SColy Li /*
20938e710227SColy Li * Run multiple threads to check btree nodes in parallel,
209462253644SColy Li * if check_state.enough is non-zero, it means current
20958e710227SColy Li * running check threads are enough, unncessary to create
20968e710227SColy Li * more.
20978e710227SColy Li */
209862253644SColy Li for (i = 0; i < check_state.total_threads; i++) {
209962253644SColy Li /* fetch latest check_state.enough earlier */
2100eb9b6666SColy Li smp_mb__before_atomic();
210162253644SColy Li if (atomic_read(&check_state.enough))
21028e710227SColy Li break;
21038e710227SColy Li
210462253644SColy Li check_state.infos[i].result = 0;
210562253644SColy Li check_state.infos[i].state = &check_state;
21068e710227SColy Li
210762253644SColy Li check_state.infos[i].thread =
21088e710227SColy Li kthread_run(bch_btree_check_thread,
210962253644SColy Li &check_state.infos[i],
211062253644SColy Li "bch_btrchk[%d]", i);
211162253644SColy Li if (IS_ERR(check_state.infos[i].thread)) {
211246f5aa88SJoe Perches pr_err("fails to run thread bch_btrchk[%d]\n", i);
21138e710227SColy Li for (--i; i >= 0; i--)
211462253644SColy Li kthread_stop(check_state.infos[i].thread);
21158e710227SColy Li ret = -ENOMEM;
21168e710227SColy Li goto out;
21178e710227SColy Li }
211862253644SColy Li atomic_inc(&check_state.started);
21198e710227SColy Li }
21208e710227SColy Li
2121887554abSMingzhe Zou /*
2122887554abSMingzhe Zou * Must wait for all threads to stop.
2123887554abSMingzhe Zou */
212462253644SColy Li wait_event(check_state.wait, atomic_read(&check_state.started) == 0);
21258e710227SColy Li
212662253644SColy Li for (i = 0; i < check_state.total_threads; i++) {
212762253644SColy Li if (check_state.infos[i].result) {
212862253644SColy Li ret = check_state.infos[i].result;
21298e710227SColy Li goto out;
21308e710227SColy Li }
21318e710227SColy Li }
21328e710227SColy Li
21338e710227SColy Li out:
213462253644SColy Li rw_unlock(0, c->root);
21358e710227SColy Li return ret;
2136cafe5635SKent Overstreet }
2137cafe5635SKent Overstreet
bch_initial_gc_finish(struct cache_set * c)21382531d9eeSKent Overstreet void bch_initial_gc_finish(struct cache_set *c)
21392531d9eeSKent Overstreet {
214008fdb2cdSColy Li struct cache *ca = c->cache;
21412531d9eeSKent Overstreet struct bucket *b;
21422531d9eeSKent Overstreet
21432531d9eeSKent Overstreet bch_btree_gc_finish(c);
21442531d9eeSKent Overstreet
21452531d9eeSKent Overstreet mutex_lock(&c->bucket_lock);
21462531d9eeSKent Overstreet
21472531d9eeSKent Overstreet /*
21482531d9eeSKent Overstreet * We need to put some unused buckets directly on the prio freelist in
21492531d9eeSKent Overstreet * order to get the allocator thread started - it needs freed buckets in
21502531d9eeSKent Overstreet * order to rewrite the prios and gens, and it needs to rewrite prios
21512531d9eeSKent Overstreet * and gens in order to free buckets.
21522531d9eeSKent Overstreet *
21532531d9eeSKent Overstreet * This is only safe for buckets that have no live data in them, which
21542531d9eeSKent Overstreet * there should always be some of.
21552531d9eeSKent Overstreet */
21562531d9eeSKent Overstreet for_each_bucket(b, ca) {
2157682811b3STang Junhui if (fifo_full(&ca->free[RESERVE_PRIO]) &&
2158682811b3STang Junhui fifo_full(&ca->free[RESERVE_BTREE]))
21592531d9eeSKent Overstreet break;
21602531d9eeSKent Overstreet
21612531d9eeSKent Overstreet if (bch_can_invalidate_bucket(ca, b) &&
21622531d9eeSKent Overstreet !GC_MARK(b)) {
21632531d9eeSKent Overstreet __bch_invalidate_one_bucket(ca, b);
2164682811b3STang Junhui if (!fifo_push(&ca->free[RESERVE_PRIO],
2165682811b3STang Junhui b - ca->buckets))
2166682811b3STang Junhui fifo_push(&ca->free[RESERVE_BTREE],
21672531d9eeSKent Overstreet b - ca->buckets);
21682531d9eeSKent Overstreet }
21692531d9eeSKent Overstreet }
21702531d9eeSKent Overstreet
21712531d9eeSKent Overstreet mutex_unlock(&c->bucket_lock);
21722531d9eeSKent Overstreet }
21732531d9eeSKent Overstreet
2174cafe5635SKent Overstreet /* Btree insertion */
2175cafe5635SKent Overstreet
btree_insert_key(struct btree * b,struct bkey * k,struct bkey * replace_key)2176829a60b9SKent Overstreet static bool btree_insert_key(struct btree *b, struct bkey *k,
21771b207d80SKent Overstreet struct bkey *replace_key)
2178cafe5635SKent Overstreet {
21796f10f7d1SColy Li unsigned int status;
2180cafe5635SKent Overstreet
2181cafe5635SKent Overstreet BUG_ON(bkey_cmp(k, &b->key) > 0);
2182cafe5635SKent Overstreet
2183829a60b9SKent Overstreet status = bch_btree_insert_key(&b->keys, k, replace_key);
2184829a60b9SKent Overstreet if (status != BTREE_INSERT_STATUS_NO_INSERT) {
2185dc9d98d6SKent Overstreet bch_check_keys(&b->keys, "%u for %s", status,
21861b207d80SKent Overstreet replace_key ? "replace" : "insert");
2187cafe5635SKent Overstreet
2188829a60b9SKent Overstreet trace_bcache_btree_insert_key(b, k, replace_key != NULL,
2189829a60b9SKent Overstreet status);
2190cafe5635SKent Overstreet return true;
2191829a60b9SKent Overstreet } else
2192829a60b9SKent Overstreet return false;
2193cafe5635SKent Overstreet }
2194cafe5635SKent Overstreet
insert_u64s_remaining(struct btree * b)219559158fdeSKent Overstreet static size_t insert_u64s_remaining(struct btree *b)
219659158fdeSKent Overstreet {
21973572324aSKent Overstreet long ret = bch_btree_keys_u64s_remaining(&b->keys);
219859158fdeSKent Overstreet
219959158fdeSKent Overstreet /*
220059158fdeSKent Overstreet * Might land in the middle of an existing extent and have to split it
220159158fdeSKent Overstreet */
220259158fdeSKent Overstreet if (b->keys.ops->is_extents)
220359158fdeSKent Overstreet ret -= KEY_MAX_U64S;
220459158fdeSKent Overstreet
220559158fdeSKent Overstreet return max(ret, 0L);
220659158fdeSKent Overstreet }
220759158fdeSKent Overstreet
bch_btree_insert_keys(struct btree * b,struct btree_op * op,struct keylist * insert_keys,struct bkey * replace_key)220826c949f8SKent Overstreet static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
22091b207d80SKent Overstreet struct keylist *insert_keys,
22101b207d80SKent Overstreet struct bkey *replace_key)
2211cafe5635SKent Overstreet {
2212cafe5635SKent Overstreet bool ret = false;
2213dc9d98d6SKent Overstreet int oldsize = bch_count_data(&b->keys);
2214cafe5635SKent Overstreet
221526c949f8SKent Overstreet while (!bch_keylist_empty(insert_keys)) {
2216c2f95ae2SKent Overstreet struct bkey *k = insert_keys->keys;
221726c949f8SKent Overstreet
221859158fdeSKent Overstreet if (bkey_u64s(k) > insert_u64s_remaining(b))
2219403b6cdeSKent Overstreet break;
2220403b6cdeSKent Overstreet
2221403b6cdeSKent Overstreet if (bkey_cmp(k, &b->key) <= 0) {
22223a3b6a4eSKent Overstreet if (!b->level)
22233a3b6a4eSKent Overstreet bkey_put(b->c, k);
222426c949f8SKent Overstreet
2225829a60b9SKent Overstreet ret |= btree_insert_key(b, k, replace_key);
222626c949f8SKent Overstreet bch_keylist_pop_front(insert_keys);
222726c949f8SKent Overstreet } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
222826c949f8SKent Overstreet BKEY_PADDED(key) temp;
2229c2f95ae2SKent Overstreet bkey_copy(&temp.key, insert_keys->keys);
223026c949f8SKent Overstreet
223126c949f8SKent Overstreet bch_cut_back(&b->key, &temp.key);
2232c2f95ae2SKent Overstreet bch_cut_front(&b->key, insert_keys->keys);
223326c949f8SKent Overstreet
2234829a60b9SKent Overstreet ret |= btree_insert_key(b, &temp.key, replace_key);
223526c949f8SKent Overstreet break;
223626c949f8SKent Overstreet } else {
223726c949f8SKent Overstreet break;
223826c949f8SKent Overstreet }
2239cafe5635SKent Overstreet }
2240cafe5635SKent Overstreet
2241829a60b9SKent Overstreet if (!ret)
2242829a60b9SKent Overstreet op->insert_collision = true;
2243829a60b9SKent Overstreet
2244403b6cdeSKent Overstreet BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
2245403b6cdeSKent Overstreet
2246dc9d98d6SKent Overstreet BUG_ON(bch_count_data(&b->keys) < oldsize);
2247cafe5635SKent Overstreet return ret;
2248cafe5635SKent Overstreet }
2249cafe5635SKent Overstreet
btree_split(struct btree * b,struct btree_op * op,struct keylist * insert_keys,struct bkey * replace_key)225026c949f8SKent Overstreet static int btree_split(struct btree *b, struct btree_op *op,
225126c949f8SKent Overstreet struct keylist *insert_keys,
22521b207d80SKent Overstreet struct bkey *replace_key)
2253cafe5635SKent Overstreet {
2254d6fd3b11SKent Overstreet bool split;
2255cafe5635SKent Overstreet struct btree *n1, *n2 = NULL, *n3 = NULL;
2256cafe5635SKent Overstreet uint64_t start_time = local_clock();
2257b54d6934SKent Overstreet struct closure cl;
225817e21a9fSKent Overstreet struct keylist parent_keys;
2259b54d6934SKent Overstreet
2260b54d6934SKent Overstreet closure_init_stack(&cl);
226117e21a9fSKent Overstreet bch_keylist_init(&parent_keys);
2262cafe5635SKent Overstreet
22630a63b66dSKent Overstreet if (btree_check_reserve(b, op)) {
22640a63b66dSKent Overstreet if (!b->level)
226578365411SKent Overstreet return -EINTR;
22660a63b66dSKent Overstreet else
22670a63b66dSKent Overstreet WARN(1, "insufficient reserve for split\n");
22680a63b66dSKent Overstreet }
226978365411SKent Overstreet
22700a63b66dSKent Overstreet n1 = btree_node_alloc_replacement(b, op);
2271cafe5635SKent Overstreet if (IS_ERR(n1))
2272cafe5635SKent Overstreet goto err;
2273cafe5635SKent Overstreet
2274ee811287SKent Overstreet split = set_blocks(btree_bset_first(n1),
22754e1ebae3SColy Li block_bytes(n1->c->cache)) > (btree_blocks(b) * 4) / 5;
2276cafe5635SKent Overstreet
2277cafe5635SKent Overstreet if (split) {
22786f10f7d1SColy Li unsigned int keys = 0;
2279cafe5635SKent Overstreet
2280ee811287SKent Overstreet trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
2281c37511b8SKent Overstreet
22822452cc89SSlava Pestov n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent);
2283cafe5635SKent Overstreet if (IS_ERR(n2))
2284cafe5635SKent Overstreet goto err_free1;
2285cafe5635SKent Overstreet
2286d6fd3b11SKent Overstreet if (!b->parent) {
22872452cc89SSlava Pestov n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL);
2288cafe5635SKent Overstreet if (IS_ERR(n3))
2289cafe5635SKent Overstreet goto err_free2;
2290cafe5635SKent Overstreet }
2291cafe5635SKent Overstreet
22922a285686SKent Overstreet mutex_lock(&n1->write_lock);
22932a285686SKent Overstreet mutex_lock(&n2->write_lock);
22942a285686SKent Overstreet
22951b207d80SKent Overstreet bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2296cafe5635SKent Overstreet
2297d6fd3b11SKent Overstreet /*
2298d6fd3b11SKent Overstreet * Has to be a linear search because we don't have an auxiliary
2299cafe5635SKent Overstreet * search tree yet
2300cafe5635SKent Overstreet */
2301cafe5635SKent Overstreet
2302ee811287SKent Overstreet while (keys < (btree_bset_first(n1)->keys * 3) / 5)
2303ee811287SKent Overstreet keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1),
2304fafff81cSKent Overstreet keys));
2305cafe5635SKent Overstreet
2306fafff81cSKent Overstreet bkey_copy_key(&n1->key,
2307ee811287SKent Overstreet bset_bkey_idx(btree_bset_first(n1), keys));
2308ee811287SKent Overstreet keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys));
2309cafe5635SKent Overstreet
2310ee811287SKent Overstreet btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys;
2311ee811287SKent Overstreet btree_bset_first(n1)->keys = keys;
2312cafe5635SKent Overstreet
2313ee811287SKent Overstreet memcpy(btree_bset_first(n2)->start,
2314ee811287SKent Overstreet bset_bkey_last(btree_bset_first(n1)),
2315ee811287SKent Overstreet btree_bset_first(n2)->keys * sizeof(uint64_t));
2316cafe5635SKent Overstreet
2317cafe5635SKent Overstreet bkey_copy_key(&n2->key, &b->key);
2318cafe5635SKent Overstreet
231917e21a9fSKent Overstreet bch_keylist_add(&parent_keys, &n2->key);
2320b54d6934SKent Overstreet bch_btree_node_write(n2, &cl);
23212a285686SKent Overstreet mutex_unlock(&n2->write_lock);
2322cafe5635SKent Overstreet rw_unlock(true, n2);
2323c37511b8SKent Overstreet } else {
2324ee811287SKent Overstreet trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys);
2325c37511b8SKent Overstreet
23262a285686SKent Overstreet mutex_lock(&n1->write_lock);
23271b207d80SKent Overstreet bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2328c37511b8SKent Overstreet }
2329cafe5635SKent Overstreet
233017e21a9fSKent Overstreet bch_keylist_add(&parent_keys, &n1->key);
2331b54d6934SKent Overstreet bch_btree_node_write(n1, &cl);
23322a285686SKent Overstreet mutex_unlock(&n1->write_lock);
2333cafe5635SKent Overstreet
2334cafe5635SKent Overstreet if (n3) {
2335d6fd3b11SKent Overstreet /* Depth increases, make a new root */
23362a285686SKent Overstreet mutex_lock(&n3->write_lock);
2337cafe5635SKent Overstreet bkey_copy_key(&n3->key, &MAX_KEY);
233817e21a9fSKent Overstreet bch_btree_insert_keys(n3, op, &parent_keys, NULL);
2339b54d6934SKent Overstreet bch_btree_node_write(n3, &cl);
23402a285686SKent Overstreet mutex_unlock(&n3->write_lock);
2341cafe5635SKent Overstreet
2342b54d6934SKent Overstreet closure_sync(&cl);
2343cafe5635SKent Overstreet bch_btree_set_root(n3);
2344cafe5635SKent Overstreet rw_unlock(true, n3);
2345d6fd3b11SKent Overstreet } else if (!b->parent) {
2346d6fd3b11SKent Overstreet /* Root filled up but didn't need to be split */
2347b54d6934SKent Overstreet closure_sync(&cl);
2348cafe5635SKent Overstreet bch_btree_set_root(n1);
2349cafe5635SKent Overstreet } else {
235017e21a9fSKent Overstreet /* Split a non root node */
2351b54d6934SKent Overstreet closure_sync(&cl);
235217e21a9fSKent Overstreet make_btree_freeing_key(b, parent_keys.top);
235317e21a9fSKent Overstreet bch_keylist_push(&parent_keys);
235417e21a9fSKent Overstreet
235517e21a9fSKent Overstreet bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL);
235617e21a9fSKent Overstreet BUG_ON(!bch_keylist_empty(&parent_keys));
2357cafe5635SKent Overstreet }
2358cafe5635SKent Overstreet
235905335cffSKent Overstreet btree_node_free(b);
2360cafe5635SKent Overstreet rw_unlock(true, n1);
2361cafe5635SKent Overstreet
2362169ef1cfSKent Overstreet bch_time_stats_update(&b->c->btree_split_time, start_time);
2363cafe5635SKent Overstreet
2364cafe5635SKent Overstreet return 0;
2365cafe5635SKent Overstreet err_free2:
23665f5837d2SKent Overstreet bkey_put(b->c, &n2->key);
2367e8e1d468SKent Overstreet btree_node_free(n2);
2368cafe5635SKent Overstreet rw_unlock(true, n2);
2369cafe5635SKent Overstreet err_free1:
23705f5837d2SKent Overstreet bkey_put(b->c, &n1->key);
2371e8e1d468SKent Overstreet btree_node_free(n1);
2372cafe5635SKent Overstreet rw_unlock(true, n1);
2373cafe5635SKent Overstreet err:
23740a63b66dSKent Overstreet WARN(1, "bcache: btree split failed (level %u)", b->level);
23755f5837d2SKent Overstreet
2376cafe5635SKent Overstreet if (n3 == ERR_PTR(-EAGAIN) ||
2377cafe5635SKent Overstreet n2 == ERR_PTR(-EAGAIN) ||
2378cafe5635SKent Overstreet n1 == ERR_PTR(-EAGAIN))
2379cafe5635SKent Overstreet return -EAGAIN;
2380cafe5635SKent Overstreet
2381cafe5635SKent Overstreet return -ENOMEM;
2382cafe5635SKent Overstreet }
2383cafe5635SKent Overstreet
bch_btree_insert_node(struct btree * b,struct btree_op * op,struct keylist * insert_keys,atomic_t * journal_ref,struct bkey * replace_key)238426c949f8SKent Overstreet static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
2385c18536a7SKent Overstreet struct keylist *insert_keys,
23861b207d80SKent Overstreet atomic_t *journal_ref,
23871b207d80SKent Overstreet struct bkey *replace_key)
238826c949f8SKent Overstreet {
23892a285686SKent Overstreet struct closure cl;
23902a285686SKent Overstreet
23911b207d80SKent Overstreet BUG_ON(b->level && replace_key);
23921b207d80SKent Overstreet
23932a285686SKent Overstreet closure_init_stack(&cl);
23942a285686SKent Overstreet
23952a285686SKent Overstreet mutex_lock(&b->write_lock);
23962a285686SKent Overstreet
23972a285686SKent Overstreet if (write_block(b) != btree_bset_last(b) &&
23982a285686SKent Overstreet b->keys.last_set_unwritten)
23992a285686SKent Overstreet bch_btree_init_next(b); /* just wrote a set */
24002a285686SKent Overstreet
240159158fdeSKent Overstreet if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) {
24022a285686SKent Overstreet mutex_unlock(&b->write_lock);
24032a285686SKent Overstreet goto split;
24042a285686SKent Overstreet }
24052a285686SKent Overstreet
24062a285686SKent Overstreet BUG_ON(write_block(b) != btree_bset_last(b));
24072a285686SKent Overstreet
24082a285686SKent Overstreet if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
24092a285686SKent Overstreet if (!b->level)
24102a285686SKent Overstreet bch_btree_leaf_dirty(b, journal_ref);
24112a285686SKent Overstreet else
24122a285686SKent Overstreet bch_btree_node_write(b, &cl);
24132a285686SKent Overstreet }
24142a285686SKent Overstreet
24152a285686SKent Overstreet mutex_unlock(&b->write_lock);
24162a285686SKent Overstreet
24172a285686SKent Overstreet /* wait for btree node write if necessary, after unlock */
24182a285686SKent Overstreet closure_sync(&cl);
24192a285686SKent Overstreet
24202a285686SKent Overstreet return 0;
24212a285686SKent Overstreet split:
242226c949f8SKent Overstreet if (current->bio_list) {
242326c949f8SKent Overstreet op->lock = b->c->root->level + 1;
242417e21a9fSKent Overstreet return -EAGAIN;
242526c949f8SKent Overstreet } else if (op->lock <= b->c->root->level) {
242626c949f8SKent Overstreet op->lock = b->c->root->level + 1;
242717e21a9fSKent Overstreet return -EINTR;
242826c949f8SKent Overstreet } else {
242917e21a9fSKent Overstreet /* Invalidated all iterators */
24303b3e9e50SKent Overstreet int ret = btree_split(b, op, insert_keys, replace_key);
24313b3e9e50SKent Overstreet
24322a285686SKent Overstreet if (bch_keylist_empty(insert_keys))
243317e21a9fSKent Overstreet return 0;
24342a285686SKent Overstreet else if (!ret)
24352a285686SKent Overstreet return -EINTR;
24362a285686SKent Overstreet return ret;
243717e21a9fSKent Overstreet }
243826c949f8SKent Overstreet }
243926c949f8SKent Overstreet
bch_btree_insert_check_key(struct btree * b,struct btree_op * op,struct bkey * check_key)2440e7c590ebSKent Overstreet int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
2441e7c590ebSKent Overstreet struct bkey *check_key)
2442e7c590ebSKent Overstreet {
2443e7c590ebSKent Overstreet int ret = -EINTR;
2444e7c590ebSKent Overstreet uint64_t btree_ptr = b->key.ptr[0];
2445e7c590ebSKent Overstreet unsigned long seq = b->seq;
2446e7c590ebSKent Overstreet struct keylist insert;
2447e7c590ebSKent Overstreet bool upgrade = op->lock == -1;
2448e7c590ebSKent Overstreet
2449e7c590ebSKent Overstreet bch_keylist_init(&insert);
2450e7c590ebSKent Overstreet
2451e7c590ebSKent Overstreet if (upgrade) {
2452e7c590ebSKent Overstreet rw_unlock(false, b);
2453e7c590ebSKent Overstreet rw_lock(true, b, b->level);
2454e7c590ebSKent Overstreet
2455e7c590ebSKent Overstreet if (b->key.ptr[0] != btree_ptr ||
24562ef9ccbfSZheng Liu b->seq != seq + 1) {
24572ef9ccbfSZheng Liu op->lock = b->level;
2458e7c590ebSKent Overstreet goto out;
2459e7c590ebSKent Overstreet }
24602ef9ccbfSZheng Liu }
2461e7c590ebSKent Overstreet
2462e7c590ebSKent Overstreet SET_KEY_PTRS(check_key, 1);
2463e7c590ebSKent Overstreet get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
2464e7c590ebSKent Overstreet
2465e7c590ebSKent Overstreet SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
2466e7c590ebSKent Overstreet
2467e7c590ebSKent Overstreet bch_keylist_add(&insert, check_key);
2468e7c590ebSKent Overstreet
24691b207d80SKent Overstreet ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
2470e7c590ebSKent Overstreet
2471e7c590ebSKent Overstreet BUG_ON(!ret && !bch_keylist_empty(&insert));
2472e7c590ebSKent Overstreet out:
2473e7c590ebSKent Overstreet if (upgrade)
2474e7c590ebSKent Overstreet downgrade_write(&b->lock);
2475e7c590ebSKent Overstreet return ret;
2476e7c590ebSKent Overstreet }
2477e7c590ebSKent Overstreet
2478cc7b8819SKent Overstreet struct btree_insert_op {
2479cc7b8819SKent Overstreet struct btree_op op;
2480cc7b8819SKent Overstreet struct keylist *keys;
2481cc7b8819SKent Overstreet atomic_t *journal_ref;
2482cc7b8819SKent Overstreet struct bkey *replace_key;
2483cc7b8819SKent Overstreet };
2484cc7b8819SKent Overstreet
btree_insert_fn(struct btree_op * b_op,struct btree * b)248508239ca2SWei Yongjun static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
2486cafe5635SKent Overstreet {
2487cc7b8819SKent Overstreet struct btree_insert_op *op = container_of(b_op,
2488cc7b8819SKent Overstreet struct btree_insert_op, op);
2489403b6cdeSKent Overstreet
2490cc7b8819SKent Overstreet int ret = bch_btree_insert_node(b, &op->op, op->keys,
2491cc7b8819SKent Overstreet op->journal_ref, op->replace_key);
2492cc7b8819SKent Overstreet if (ret && !bch_keylist_empty(op->keys))
2493cc7b8819SKent Overstreet return ret;
2494cc7b8819SKent Overstreet else
2495cc7b8819SKent Overstreet return MAP_DONE;
2496cafe5635SKent Overstreet }
2497cafe5635SKent Overstreet
bch_btree_insert(struct cache_set * c,struct keylist * keys,atomic_t * journal_ref,struct bkey * replace_key)2498cc7b8819SKent Overstreet int bch_btree_insert(struct cache_set *c, struct keylist *keys,
2499cc7b8819SKent Overstreet atomic_t *journal_ref, struct bkey *replace_key)
2500cafe5635SKent Overstreet {
2501cc7b8819SKent Overstreet struct btree_insert_op op;
2502cafe5635SKent Overstreet int ret = 0;
2503cafe5635SKent Overstreet
2504cc7b8819SKent Overstreet BUG_ON(current->bio_list);
25054f3d4014SKent Overstreet BUG_ON(bch_keylist_empty(keys));
2506cafe5635SKent Overstreet
2507cc7b8819SKent Overstreet bch_btree_op_init(&op.op, 0);
2508cc7b8819SKent Overstreet op.keys = keys;
2509cc7b8819SKent Overstreet op.journal_ref = journal_ref;
2510cc7b8819SKent Overstreet op.replace_key = replace_key;
2511cafe5635SKent Overstreet
2512cc7b8819SKent Overstreet while (!ret && !bch_keylist_empty(keys)) {
2513cc7b8819SKent Overstreet op.op.lock = 0;
2514cc7b8819SKent Overstreet ret = bch_btree_map_leaf_nodes(&op.op, c,
2515cc7b8819SKent Overstreet &START_KEY(keys->keys),
2516cc7b8819SKent Overstreet btree_insert_fn);
2517cc7b8819SKent Overstreet }
2518cc7b8819SKent Overstreet
2519cc7b8819SKent Overstreet if (ret) {
2520cafe5635SKent Overstreet struct bkey *k;
2521cafe5635SKent Overstreet
252246f5aa88SJoe Perches pr_err("error %i\n", ret);
2523cafe5635SKent Overstreet
25244f3d4014SKent Overstreet while ((k = bch_keylist_pop(keys)))
25253a3b6a4eSKent Overstreet bkey_put(c, k);
2526cc7b8819SKent Overstreet } else if (op.op.insert_collision)
2527cc7b8819SKent Overstreet ret = -ESRCH;
25286054c6d4SKent Overstreet
2529cafe5635SKent Overstreet return ret;
2530cafe5635SKent Overstreet }
2531cafe5635SKent Overstreet
bch_btree_set_root(struct btree * b)2532cafe5635SKent Overstreet void bch_btree_set_root(struct btree *b)
2533cafe5635SKent Overstreet {
25346f10f7d1SColy Li unsigned int i;
2535e49c7c37SKent Overstreet struct closure cl;
2536e49c7c37SKent Overstreet
2537e49c7c37SKent Overstreet closure_init_stack(&cl);
2538cafe5635SKent Overstreet
2539c37511b8SKent Overstreet trace_bcache_btree_set_root(b);
2540c37511b8SKent Overstreet
2541cafe5635SKent Overstreet BUG_ON(!b->written);
2542cafe5635SKent Overstreet
2543cafe5635SKent Overstreet for (i = 0; i < KEY_PTRS(&b->key); i++)
2544cafe5635SKent Overstreet BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO);
2545cafe5635SKent Overstreet
2546cafe5635SKent Overstreet mutex_lock(&b->c->bucket_lock);
2547cafe5635SKent Overstreet list_del_init(&b->list);
2548cafe5635SKent Overstreet mutex_unlock(&b->c->bucket_lock);
2549cafe5635SKent Overstreet
2550cafe5635SKent Overstreet b->c->root = b;
2551cafe5635SKent Overstreet
2552e49c7c37SKent Overstreet bch_journal_meta(b->c, &cl);
2553e49c7c37SKent Overstreet closure_sync(&cl);
2554cafe5635SKent Overstreet }
2555cafe5635SKent Overstreet
255648dad8baSKent Overstreet /* Map across nodes or keys */
255748dad8baSKent Overstreet
bch_btree_map_nodes_recurse(struct btree * b,struct btree_op * op,struct bkey * from,btree_map_nodes_fn * fn,int flags)255848dad8baSKent Overstreet static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
255948dad8baSKent Overstreet struct bkey *from,
256048dad8baSKent Overstreet btree_map_nodes_fn *fn, int flags)
256148dad8baSKent Overstreet {
256248dad8baSKent Overstreet int ret = MAP_CONTINUE;
256348dad8baSKent Overstreet
256448dad8baSKent Overstreet if (b->level) {
256548dad8baSKent Overstreet struct bkey *k;
2566*866898efSKuan-Wei Chiu struct btree_iter iter;
256748dad8baSKent Overstreet
2568*866898efSKuan-Wei Chiu min_heap_init(&iter.heap, NULL, MAX_BSETS);
2569*866898efSKuan-Wei Chiu bch_btree_iter_init(&b->keys, &iter, from);
257048dad8baSKent Overstreet
2571*866898efSKuan-Wei Chiu while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
257248dad8baSKent Overstreet bch_ptr_bad))) {
2573feac1a70SColy Li ret = bcache_btree(map_nodes_recurse, k, b,
257448dad8baSKent Overstreet op, from, fn, flags);
257548dad8baSKent Overstreet from = NULL;
257648dad8baSKent Overstreet
257748dad8baSKent Overstreet if (ret != MAP_CONTINUE)
257848dad8baSKent Overstreet return ret;
257948dad8baSKent Overstreet }
258048dad8baSKent Overstreet }
258148dad8baSKent Overstreet
258248dad8baSKent Overstreet if (!b->level || flags == MAP_ALL_NODES)
258348dad8baSKent Overstreet ret = fn(op, b);
258448dad8baSKent Overstreet
258548dad8baSKent Overstreet return ret;
258648dad8baSKent Overstreet }
258748dad8baSKent Overstreet
__bch_btree_map_nodes(struct btree_op * op,struct cache_set * c,struct bkey * from,btree_map_nodes_fn * fn,int flags)258848dad8baSKent Overstreet int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
258948dad8baSKent Overstreet struct bkey *from, btree_map_nodes_fn *fn, int flags)
259048dad8baSKent Overstreet {
2591feac1a70SColy Li return bcache_btree_root(map_nodes_recurse, c, op, from, fn, flags);
259248dad8baSKent Overstreet }
259348dad8baSKent Overstreet
bch_btree_map_keys_recurse(struct btree * b,struct btree_op * op,struct bkey * from,btree_map_keys_fn * fn,int flags)2594253a99d9SColy Li int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
259548dad8baSKent Overstreet struct bkey *from, btree_map_keys_fn *fn,
259648dad8baSKent Overstreet int flags)
259748dad8baSKent Overstreet {
259848dad8baSKent Overstreet int ret = MAP_CONTINUE;
259948dad8baSKent Overstreet struct bkey *k;
2600*866898efSKuan-Wei Chiu struct btree_iter iter;
260148dad8baSKent Overstreet
2602*866898efSKuan-Wei Chiu min_heap_init(&iter.heap, NULL, MAX_BSETS);
2603*866898efSKuan-Wei Chiu bch_btree_iter_init(&b->keys, &iter, from);
260448dad8baSKent Overstreet
2605*866898efSKuan-Wei Chiu while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
260648dad8baSKent Overstreet ret = !b->level
260748dad8baSKent Overstreet ? fn(op, b, k)
2608feac1a70SColy Li : bcache_btree(map_keys_recurse, k,
2609feac1a70SColy Li b, op, from, fn, flags);
261048dad8baSKent Overstreet from = NULL;
261148dad8baSKent Overstreet
261248dad8baSKent Overstreet if (ret != MAP_CONTINUE)
261348dad8baSKent Overstreet return ret;
261448dad8baSKent Overstreet }
261548dad8baSKent Overstreet
261648dad8baSKent Overstreet if (!b->level && (flags & MAP_END_KEY))
261748dad8baSKent Overstreet ret = fn(op, b, &KEY(KEY_INODE(&b->key),
261848dad8baSKent Overstreet KEY_OFFSET(&b->key), 0));
261948dad8baSKent Overstreet
262048dad8baSKent Overstreet return ret;
262148dad8baSKent Overstreet }
262248dad8baSKent Overstreet
bch_btree_map_keys(struct btree_op * op,struct cache_set * c,struct bkey * from,btree_map_keys_fn * fn,int flags)262348dad8baSKent Overstreet int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
262448dad8baSKent Overstreet struct bkey *from, btree_map_keys_fn *fn, int flags)
262548dad8baSKent Overstreet {
2626feac1a70SColy Li return bcache_btree_root(map_keys_recurse, c, op, from, fn, flags);
262748dad8baSKent Overstreet }
262848dad8baSKent Overstreet
2629cafe5635SKent Overstreet /* Keybuf code */
2630cafe5635SKent Overstreet
keybuf_cmp(struct keybuf_key * l,struct keybuf_key * r)2631cafe5635SKent Overstreet static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
2632cafe5635SKent Overstreet {
2633cafe5635SKent Overstreet /* Overlapping keys compare equal */
2634cafe5635SKent Overstreet if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0)
2635cafe5635SKent Overstreet return -1;
2636cafe5635SKent Overstreet if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0)
2637cafe5635SKent Overstreet return 1;
2638cafe5635SKent Overstreet return 0;
2639cafe5635SKent Overstreet }
2640cafe5635SKent Overstreet
keybuf_nonoverlapping_cmp(struct keybuf_key * l,struct keybuf_key * r)2641cafe5635SKent Overstreet static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
2642cafe5635SKent Overstreet struct keybuf_key *r)
2643cafe5635SKent Overstreet {
2644cafe5635SKent Overstreet return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
2645cafe5635SKent Overstreet }
2646cafe5635SKent Overstreet
264748dad8baSKent Overstreet struct refill {
264848dad8baSKent Overstreet struct btree_op op;
26496f10f7d1SColy Li unsigned int nr_found;
265048dad8baSKent Overstreet struct keybuf *buf;
265148dad8baSKent Overstreet struct bkey *end;
265248dad8baSKent Overstreet keybuf_pred_fn *pred;
265348dad8baSKent Overstreet };
265448dad8baSKent Overstreet
refill_keybuf_fn(struct btree_op * op,struct btree * b,struct bkey * k)265548dad8baSKent Overstreet static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
265648dad8baSKent Overstreet struct bkey *k)
2657cafe5635SKent Overstreet {
265848dad8baSKent Overstreet struct refill *refill = container_of(op, struct refill, op);
265948dad8baSKent Overstreet struct keybuf *buf = refill->buf;
266048dad8baSKent Overstreet int ret = MAP_CONTINUE;
2661cafe5635SKent Overstreet
26622d6cb6edSTang Junhui if (bkey_cmp(k, refill->end) > 0) {
266348dad8baSKent Overstreet ret = MAP_DONE;
266448dad8baSKent Overstreet goto out;
2665cafe5635SKent Overstreet }
2666cafe5635SKent Overstreet
266748dad8baSKent Overstreet if (!KEY_SIZE(k)) /* end key */
266848dad8baSKent Overstreet goto out;
2669cafe5635SKent Overstreet
267048dad8baSKent Overstreet if (refill->pred(buf, k)) {
2671cafe5635SKent Overstreet struct keybuf_key *w;
2672cafe5635SKent Overstreet
2673cafe5635SKent Overstreet spin_lock(&buf->lock);
2674cafe5635SKent Overstreet
2675cafe5635SKent Overstreet w = array_alloc(&buf->freelist);
267648dad8baSKent Overstreet if (!w) {
267748dad8baSKent Overstreet spin_unlock(&buf->lock);
267848dad8baSKent Overstreet return MAP_DONE;
267948dad8baSKent Overstreet }
2680cafe5635SKent Overstreet
2681cafe5635SKent Overstreet w->private = NULL;
2682cafe5635SKent Overstreet bkey_copy(&w->key, k);
2683cafe5635SKent Overstreet
2684cafe5635SKent Overstreet if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
2685cafe5635SKent Overstreet array_free(&buf->freelist, w);
268648a915a8SKent Overstreet else
268748a915a8SKent Overstreet refill->nr_found++;
2688cafe5635SKent Overstreet
268948dad8baSKent Overstreet if (array_freelist_empty(&buf->freelist))
269048dad8baSKent Overstreet ret = MAP_DONE;
269148dad8baSKent Overstreet
2692cafe5635SKent Overstreet spin_unlock(&buf->lock);
2693cafe5635SKent Overstreet }
269448dad8baSKent Overstreet out:
269548dad8baSKent Overstreet buf->last_scanned = *k;
269648dad8baSKent Overstreet return ret;
2697cafe5635SKent Overstreet }
2698cafe5635SKent Overstreet
bch_refill_keybuf(struct cache_set * c,struct keybuf * buf,struct bkey * end,keybuf_pred_fn * pred)2699cafe5635SKent Overstreet void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
270072c27061SKent Overstreet struct bkey *end, keybuf_pred_fn *pred)
2701cafe5635SKent Overstreet {
2702cafe5635SKent Overstreet struct bkey start = buf->last_scanned;
270348dad8baSKent Overstreet struct refill refill;
2704cafe5635SKent Overstreet
2705cafe5635SKent Overstreet cond_resched();
2706cafe5635SKent Overstreet
2707b54d6934SKent Overstreet bch_btree_op_init(&refill.op, -1);
270848a915a8SKent Overstreet refill.nr_found = 0;
270948dad8baSKent Overstreet refill.buf = buf;
271048dad8baSKent Overstreet refill.end = end;
271148dad8baSKent Overstreet refill.pred = pred;
271248dad8baSKent Overstreet
271348dad8baSKent Overstreet bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
271448dad8baSKent Overstreet refill_keybuf_fn, MAP_END_KEY);
2715cafe5635SKent Overstreet
271648a915a8SKent Overstreet trace_bcache_keyscan(refill.nr_found,
2717cafe5635SKent Overstreet KEY_INODE(&start), KEY_OFFSET(&start),
271848a915a8SKent Overstreet KEY_INODE(&buf->last_scanned),
271948a915a8SKent Overstreet KEY_OFFSET(&buf->last_scanned));
2720cafe5635SKent Overstreet
2721cafe5635SKent Overstreet spin_lock(&buf->lock);
2722cafe5635SKent Overstreet
2723cafe5635SKent Overstreet if (!RB_EMPTY_ROOT(&buf->keys)) {
2724cafe5635SKent Overstreet struct keybuf_key *w;
27251fae7cf0SColy Li
2726cafe5635SKent Overstreet w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2727cafe5635SKent Overstreet buf->start = START_KEY(&w->key);
2728cafe5635SKent Overstreet
2729cafe5635SKent Overstreet w = RB_LAST(&buf->keys, struct keybuf_key, node);
2730cafe5635SKent Overstreet buf->end = w->key;
2731cafe5635SKent Overstreet } else {
2732cafe5635SKent Overstreet buf->start = MAX_KEY;
2733cafe5635SKent Overstreet buf->end = MAX_KEY;
2734cafe5635SKent Overstreet }
2735cafe5635SKent Overstreet
2736cafe5635SKent Overstreet spin_unlock(&buf->lock);
2737cafe5635SKent Overstreet }
2738cafe5635SKent Overstreet
__bch_keybuf_del(struct keybuf * buf,struct keybuf_key * w)2739cafe5635SKent Overstreet static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2740cafe5635SKent Overstreet {
2741cafe5635SKent Overstreet rb_erase(&w->node, &buf->keys);
2742cafe5635SKent Overstreet array_free(&buf->freelist, w);
2743cafe5635SKent Overstreet }
2744cafe5635SKent Overstreet
bch_keybuf_del(struct keybuf * buf,struct keybuf_key * w)2745cafe5635SKent Overstreet void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2746cafe5635SKent Overstreet {
2747cafe5635SKent Overstreet spin_lock(&buf->lock);
2748cafe5635SKent Overstreet __bch_keybuf_del(buf, w);
2749cafe5635SKent Overstreet spin_unlock(&buf->lock);
2750cafe5635SKent Overstreet }
2751cafe5635SKent Overstreet
bch_keybuf_check_overlapping(struct keybuf * buf,struct bkey * start,struct bkey * end)2752cafe5635SKent Overstreet bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
2753cafe5635SKent Overstreet struct bkey *end)
2754cafe5635SKent Overstreet {
2755cafe5635SKent Overstreet bool ret = false;
2756cafe5635SKent Overstreet struct keybuf_key *p, *w, s;
27571fae7cf0SColy Li
2758cafe5635SKent Overstreet s.key = *start;
2759cafe5635SKent Overstreet
2760cafe5635SKent Overstreet if (bkey_cmp(end, &buf->start) <= 0 ||
2761cafe5635SKent Overstreet bkey_cmp(start, &buf->end) >= 0)
2762cafe5635SKent Overstreet return false;
2763cafe5635SKent Overstreet
2764cafe5635SKent Overstreet spin_lock(&buf->lock);
2765cafe5635SKent Overstreet w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
2766cafe5635SKent Overstreet
2767cafe5635SKent Overstreet while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) {
2768cafe5635SKent Overstreet p = w;
2769cafe5635SKent Overstreet w = RB_NEXT(w, node);
2770cafe5635SKent Overstreet
2771cafe5635SKent Overstreet if (p->private)
2772cafe5635SKent Overstreet ret = true;
2773cafe5635SKent Overstreet else
2774cafe5635SKent Overstreet __bch_keybuf_del(buf, p);
2775cafe5635SKent Overstreet }
2776cafe5635SKent Overstreet
2777cafe5635SKent Overstreet spin_unlock(&buf->lock);
2778cafe5635SKent Overstreet return ret;
2779cafe5635SKent Overstreet }
2780cafe5635SKent Overstreet
bch_keybuf_next(struct keybuf * buf)2781cafe5635SKent Overstreet struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
2782cafe5635SKent Overstreet {
2783cafe5635SKent Overstreet struct keybuf_key *w;
27841fae7cf0SColy Li
2785cafe5635SKent Overstreet spin_lock(&buf->lock);
2786cafe5635SKent Overstreet
2787cafe5635SKent Overstreet w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2788cafe5635SKent Overstreet
2789cafe5635SKent Overstreet while (w && w->private)
2790cafe5635SKent Overstreet w = RB_NEXT(w, node);
2791cafe5635SKent Overstreet
2792cafe5635SKent Overstreet if (w)
2793cafe5635SKent Overstreet w->private = ERR_PTR(-EINTR);
2794cafe5635SKent Overstreet
2795cafe5635SKent Overstreet spin_unlock(&buf->lock);
2796cafe5635SKent Overstreet return w;
2797cafe5635SKent Overstreet }
2798cafe5635SKent Overstreet
bch_keybuf_next_rescan(struct cache_set * c,struct keybuf * buf,struct bkey * end,keybuf_pred_fn * pred)2799cafe5635SKent Overstreet struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
2800cafe5635SKent Overstreet struct keybuf *buf,
280172c27061SKent Overstreet struct bkey *end,
280272c27061SKent Overstreet keybuf_pred_fn *pred)
2803cafe5635SKent Overstreet {
2804cafe5635SKent Overstreet struct keybuf_key *ret;
2805cafe5635SKent Overstreet
2806cafe5635SKent Overstreet while (1) {
2807cafe5635SKent Overstreet ret = bch_keybuf_next(buf);
2808cafe5635SKent Overstreet if (ret)
2809cafe5635SKent Overstreet break;
2810cafe5635SKent Overstreet
2811cafe5635SKent Overstreet if (bkey_cmp(&buf->last_scanned, end) >= 0) {
281246f5aa88SJoe Perches pr_debug("scan finished\n");
2813cafe5635SKent Overstreet break;
2814cafe5635SKent Overstreet }
2815cafe5635SKent Overstreet
281672c27061SKent Overstreet bch_refill_keybuf(c, buf, end, pred);
2817cafe5635SKent Overstreet }
2818cafe5635SKent Overstreet
2819cafe5635SKent Overstreet return ret;
2820cafe5635SKent Overstreet }
2821cafe5635SKent Overstreet
bch_keybuf_init(struct keybuf * buf)282272c27061SKent Overstreet void bch_keybuf_init(struct keybuf *buf)
2823cafe5635SKent Overstreet {
2824cafe5635SKent Overstreet buf->last_scanned = MAX_KEY;
2825cafe5635SKent Overstreet buf->keys = RB_ROOT;
2826cafe5635SKent Overstreet
2827cafe5635SKent Overstreet spin_lock_init(&buf->lock);
2828cafe5635SKent Overstreet array_allocator_init(&buf->freelist);
2829cafe5635SKent Overstreet }
28309f233ffeSKai Krakow
bch_btree_exit(void)28319f233ffeSKai Krakow void bch_btree_exit(void)
28329f233ffeSKai Krakow {
28339f233ffeSKai Krakow if (btree_io_wq)
28349f233ffeSKai Krakow destroy_workqueue(btree_io_wq);
28359f233ffeSKai Krakow }
28369f233ffeSKai Krakow
bch_btree_init(void)28379f233ffeSKai Krakow int __init bch_btree_init(void)
28389f233ffeSKai Krakow {
2839d797bd98SKai Krakow btree_io_wq = alloc_workqueue("bch_btree_io", WQ_MEM_RECLAIM, 0);
28409f233ffeSKai Krakow if (!btree_io_wq)
28419f233ffeSKai Krakow return -ENOMEM;
28429f233ffeSKai Krakow
28439f233ffeSKai Krakow return 0;
28449f233ffeSKai Krakow }
2845