xref: /linux/drivers/md/bcache/btree.c (revision a6a1eb6214cf7adfd1e2da33d819df17fe8a64d7)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2cafe5635SKent Overstreet /*
3cafe5635SKent Overstreet  * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4cafe5635SKent Overstreet  *
5cafe5635SKent Overstreet  * Uses a block device as cache for other block devices; optimized for SSDs.
6cafe5635SKent Overstreet  * All allocation is done in buckets, which should match the erase block size
7cafe5635SKent Overstreet  * of the device.
8cafe5635SKent Overstreet  *
9cafe5635SKent Overstreet  * Buckets containing cached data are kept on a heap sorted by priority;
10cafe5635SKent Overstreet  * bucket priority is increased on cache hit, and periodically all the buckets
11cafe5635SKent Overstreet  * on the heap have their priority scaled down. This currently is just used as
12cafe5635SKent Overstreet  * an LRU but in the future should allow for more intelligent heuristics.
13cafe5635SKent Overstreet  *
14cafe5635SKent Overstreet  * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
15cafe5635SKent Overstreet  * counter. Garbage collection is used to remove stale pointers.
16cafe5635SKent Overstreet  *
17cafe5635SKent Overstreet  * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
18cafe5635SKent Overstreet  * as keys are inserted we only sort the pages that have not yet been written.
19cafe5635SKent Overstreet  * When garbage collection is run, we resort the entire node.
20cafe5635SKent Overstreet  *
215fb94e9cSMauro Carvalho Chehab  * All configuration is done via sysfs; see Documentation/admin-guide/bcache.rst.
22cafe5635SKent Overstreet  */
23cafe5635SKent Overstreet 
24cafe5635SKent Overstreet #include "bcache.h"
25cafe5635SKent Overstreet #include "btree.h"
26cafe5635SKent Overstreet #include "debug.h"
2765d45231SKent Overstreet #include "extents.h"
28cafe5635SKent Overstreet 
29cafe5635SKent Overstreet #include <linux/slab.h>
30cafe5635SKent Overstreet #include <linux/bitops.h>
31cafe5635SKent Overstreet #include <linux/hash.h>
3272a44517SKent Overstreet #include <linux/kthread.h>
33cd953ed0SGeert Uytterhoeven #include <linux/prefetch.h>
34cafe5635SKent Overstreet #include <linux/random.h>
35cafe5635SKent Overstreet #include <linux/rcupdate.h>
36e6017571SIngo Molnar #include <linux/sched/clock.h>
37b2d09103SIngo Molnar #include <linux/rculist.h>
3850a260e8SColy Li #include <linux/delay.h>
39cafe5635SKent Overstreet #include <trace/events/bcache.h>
40cafe5635SKent Overstreet 
41cafe5635SKent Overstreet /*
42cafe5635SKent Overstreet  * Todo:
43cafe5635SKent Overstreet  * register_bcache: Return errors out to userspace correctly
44cafe5635SKent Overstreet  *
45cafe5635SKent Overstreet  * Writeback: don't undirty key until after a cache flush
46cafe5635SKent Overstreet  *
47cafe5635SKent Overstreet  * Create an iterator for key pointers
48cafe5635SKent Overstreet  *
49cafe5635SKent Overstreet  * On btree write error, mark bucket such that it won't be freed from the cache
50cafe5635SKent Overstreet  *
51cafe5635SKent Overstreet  * Journalling:
52cafe5635SKent Overstreet  *   Check for bad keys in replay
53cafe5635SKent Overstreet  *   Propagate barriers
54cafe5635SKent Overstreet  *   Refcount journal entries in journal_replay
55cafe5635SKent Overstreet  *
56cafe5635SKent Overstreet  * Garbage collection:
57cafe5635SKent Overstreet  *   Finish incremental gc
58cafe5635SKent Overstreet  *   Gc should free old UUIDs, data for invalid UUIDs
59cafe5635SKent Overstreet  *
60cafe5635SKent Overstreet  * Provide a way to list backing device UUIDs we have data cached for, and
61cafe5635SKent Overstreet  * probably how long it's been since we've seen them, and a way to invalidate
62cafe5635SKent Overstreet  * dirty data for devices that will never be attached again
63cafe5635SKent Overstreet  *
64cafe5635SKent Overstreet  * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so
65cafe5635SKent Overstreet  * that based on that and how much dirty data we have we can keep writeback
66cafe5635SKent Overstreet  * from being starved
67cafe5635SKent Overstreet  *
68cafe5635SKent Overstreet  * Add a tracepoint or somesuch to watch for writeback starvation
69cafe5635SKent Overstreet  *
70cafe5635SKent Overstreet  * When btree depth > 1 and splitting an interior node, we have to make sure
71cafe5635SKent Overstreet  * alloc_bucket() cannot fail. This should be true but is not completely
72cafe5635SKent Overstreet  * obvious.
73cafe5635SKent Overstreet  *
74cafe5635SKent Overstreet  * Plugging?
75cafe5635SKent Overstreet  *
76cafe5635SKent Overstreet  * If data write is less than hard sector size of ssd, round up offset in open
77cafe5635SKent Overstreet  * bucket to the next whole sector
78cafe5635SKent Overstreet  *
79cafe5635SKent Overstreet  * Superblock needs to be fleshed out for multiple cache devices
80cafe5635SKent Overstreet  *
81cafe5635SKent Overstreet  * Add a sysfs tunable for the number of writeback IOs in flight
82cafe5635SKent Overstreet  *
83cafe5635SKent Overstreet  * Add a sysfs tunable for the number of open data buckets
84cafe5635SKent Overstreet  *
85cafe5635SKent Overstreet  * IO tracking: Can we track when one process is doing io on behalf of another?
86cafe5635SKent Overstreet  * IO tracking: Don't use just an average, weigh more recent stuff higher
87cafe5635SKent Overstreet  *
88cafe5635SKent Overstreet  * Test module load/unload
89cafe5635SKent Overstreet  */
90cafe5635SKent Overstreet 
91cafe5635SKent Overstreet #define MAX_NEED_GC		64
92cafe5635SKent Overstreet #define MAX_SAVE_PRIO		72
937f4a59deSTang Junhui #define MAX_GC_TIMES		100
945c25c4fcSTang Junhui #define MIN_GC_NODES		100
955c25c4fcSTang Junhui #define GC_SLEEP_MS		100
96cafe5635SKent Overstreet 
97cafe5635SKent Overstreet #define PTR_DIRTY_BIT		(((uint64_t) 1 << 36))
98cafe5635SKent Overstreet 
99cafe5635SKent Overstreet #define PTR_HASH(c, k)							\
100cafe5635SKent Overstreet 	(((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
101cafe5635SKent Overstreet 
1029f233ffeSKai Krakow static struct workqueue_struct *btree_io_wq;
1039f233ffeSKai Krakow 
104df8e8970SKent Overstreet #define insert_lock(s, b)	((b)->level <= (s)->lock)
105df8e8970SKent Overstreet 
106df8e8970SKent Overstreet 
107a85e968eSKent Overstreet static inline struct bset *write_block(struct btree *b)
108a85e968eSKent Overstreet {
1094e1ebae3SColy Li 	return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c->cache);
110a85e968eSKent Overstreet }
111a85e968eSKent Overstreet 
1122a285686SKent Overstreet static void bch_btree_init_next(struct btree *b)
1132a285686SKent Overstreet {
1142a285686SKent Overstreet 	/* If not a leaf node, always sort */
1152a285686SKent Overstreet 	if (b->level && b->keys.nsets)
1162a285686SKent Overstreet 		bch_btree_sort(&b->keys, &b->c->sort);
1172a285686SKent Overstreet 	else
1182a285686SKent Overstreet 		bch_btree_sort_lazy(&b->keys, &b->c->sort);
1192a285686SKent Overstreet 
1202a285686SKent Overstreet 	if (b->written < btree_blocks(b))
1212a285686SKent Overstreet 		bch_bset_init_next(&b->keys, write_block(b),
1224a784266SColy Li 				   bset_magic(&b->c->cache->sb));
1232a285686SKent Overstreet 
1242a285686SKent Overstreet }
1252a285686SKent Overstreet 
126cafe5635SKent Overstreet /* Btree key manipulation */
127cafe5635SKent Overstreet 
1283a3b6a4eSKent Overstreet void bkey_put(struct cache_set *c, struct bkey *k)
129e7c590ebSKent Overstreet {
1306f10f7d1SColy Li 	unsigned int i;
131e7c590ebSKent Overstreet 
132e7c590ebSKent Overstreet 	for (i = 0; i < KEY_PTRS(k); i++)
133e7c590ebSKent Overstreet 		if (ptr_available(c, k, i))
134e7c590ebSKent Overstreet 			atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
135e7c590ebSKent Overstreet }
136e7c590ebSKent Overstreet 
137cafe5635SKent Overstreet /* Btree IO */
138cafe5635SKent Overstreet 
139cafe5635SKent Overstreet static uint64_t btree_csum_set(struct btree *b, struct bset *i)
140cafe5635SKent Overstreet {
141cafe5635SKent Overstreet 	uint64_t crc = b->key.ptr[0];
142fafff81cSKent Overstreet 	void *data = (void *) i + 8, *end = bset_bkey_last(i);
143cafe5635SKent Overstreet 
14439fa7a95SChristoph Hellwig 	crc = crc64_be(crc, data, end - data);
145c19ed23aSKent Overstreet 	return crc ^ 0xffffffffffffffffULL;
146cafe5635SKent Overstreet }
147cafe5635SKent Overstreet 
14878b77bf8SKent Overstreet void bch_btree_node_read_done(struct btree *b)
149cafe5635SKent Overstreet {
150cafe5635SKent Overstreet 	const char *err = "bad btree header";
151ee811287SKent Overstreet 	struct bset *i = btree_bset_first(b);
15257943511SKent Overstreet 	struct btree_iter *iter;
153cafe5635SKent Overstreet 
154d2f96f48SShenghui Wang 	/*
155d2f96f48SShenghui Wang 	 * c->fill_iter can allocate an iterator with more memory space
156d2f96f48SShenghui Wang 	 * than static MAX_BSETS.
157d2f96f48SShenghui Wang 	 * See the comment arount cache_set->fill_iter.
158d2f96f48SShenghui Wang 	 */
159d19936a2SKent Overstreet 	iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
1604a784266SColy Li 	iter->size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size;
161cafe5635SKent Overstreet 	iter->used = 0;
162cafe5635SKent Overstreet 
163280481d0SKent Overstreet #ifdef CONFIG_BCACHE_DEBUG
164c052dd9aSKent Overstreet 	iter->b = &b->keys;
165280481d0SKent Overstreet #endif
166280481d0SKent Overstreet 
16757943511SKent Overstreet 	if (!i->seq)
168cafe5635SKent Overstreet 		goto err;
169cafe5635SKent Overstreet 
170cafe5635SKent Overstreet 	for (;
171a85e968eSKent Overstreet 	     b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq;
172cafe5635SKent Overstreet 	     i = write_block(b)) {
173cafe5635SKent Overstreet 		err = "unsupported bset version";
174cafe5635SKent Overstreet 		if (i->version > BCACHE_BSET_VERSION)
175cafe5635SKent Overstreet 			goto err;
176cafe5635SKent Overstreet 
177cafe5635SKent Overstreet 		err = "bad btree header";
1784e1ebae3SColy Li 		if (b->written + set_blocks(i, block_bytes(b->c->cache)) >
179ee811287SKent Overstreet 		    btree_blocks(b))
180cafe5635SKent Overstreet 			goto err;
181cafe5635SKent Overstreet 
182cafe5635SKent Overstreet 		err = "bad magic";
1834a784266SColy Li 		if (i->magic != bset_magic(&b->c->cache->sb))
184cafe5635SKent Overstreet 			goto err;
185cafe5635SKent Overstreet 
186cafe5635SKent Overstreet 		err = "bad checksum";
187cafe5635SKent Overstreet 		switch (i->version) {
188cafe5635SKent Overstreet 		case 0:
189cafe5635SKent Overstreet 			if (i->csum != csum_set(i))
190cafe5635SKent Overstreet 				goto err;
191cafe5635SKent Overstreet 			break;
192cafe5635SKent Overstreet 		case BCACHE_BSET_VERSION:
193cafe5635SKent Overstreet 			if (i->csum != btree_csum_set(b, i))
194cafe5635SKent Overstreet 				goto err;
195cafe5635SKent Overstreet 			break;
196cafe5635SKent Overstreet 		}
197cafe5635SKent Overstreet 
198cafe5635SKent Overstreet 		err = "empty set";
199a85e968eSKent Overstreet 		if (i != b->keys.set[0].data && !i->keys)
200cafe5635SKent Overstreet 			goto err;
201cafe5635SKent Overstreet 
202fafff81cSKent Overstreet 		bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
203cafe5635SKent Overstreet 
2044e1ebae3SColy Li 		b->written += set_blocks(i, block_bytes(b->c->cache));
205cafe5635SKent Overstreet 	}
206cafe5635SKent Overstreet 
207cafe5635SKent Overstreet 	err = "corrupted btree";
208cafe5635SKent Overstreet 	for (i = write_block(b);
209a85e968eSKent Overstreet 	     bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
2104e1ebae3SColy Li 	     i = ((void *) i) + block_bytes(b->c->cache))
211a85e968eSKent Overstreet 		if (i->seq == b->keys.set[0].data->seq)
212cafe5635SKent Overstreet 			goto err;
213cafe5635SKent Overstreet 
214a85e968eSKent Overstreet 	bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
215cafe5635SKent Overstreet 
216a85e968eSKent Overstreet 	i = b->keys.set[0].data;
217cafe5635SKent Overstreet 	err = "short btree key";
218a85e968eSKent Overstreet 	if (b->keys.set[0].size &&
219a85e968eSKent Overstreet 	    bkey_cmp(&b->key, &b->keys.set[0].end) < 0)
220cafe5635SKent Overstreet 		goto err;
221cafe5635SKent Overstreet 
222cafe5635SKent Overstreet 	if (b->written < btree_blocks(b))
223a85e968eSKent Overstreet 		bch_bset_init_next(&b->keys, write_block(b),
2244a784266SColy Li 				   bset_magic(&b->c->cache->sb));
225cafe5635SKent Overstreet out:
226d19936a2SKent Overstreet 	mempool_free(iter, &b->c->fill_iter);
22757943511SKent Overstreet 	return;
228cafe5635SKent Overstreet err:
229cafe5635SKent Overstreet 	set_btree_node_io_error(b);
23088b9f8c4SKent Overstreet 	bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",
231cafe5635SKent Overstreet 			    err, PTR_BUCKET_NR(b->c, &b->key, 0),
23288b9f8c4SKent Overstreet 			    bset_block_offset(b, i), i->keys);
233cafe5635SKent Overstreet 	goto out;
234cafe5635SKent Overstreet }
235cafe5635SKent Overstreet 
2364246a0b6SChristoph Hellwig static void btree_node_read_endio(struct bio *bio)
237cafe5635SKent Overstreet {
23857943511SKent Overstreet 	struct closure *cl = bio->bi_private;
2391fae7cf0SColy Li 
24057943511SKent Overstreet 	closure_put(cl);
24157943511SKent Overstreet }
242cafe5635SKent Overstreet 
24378b77bf8SKent Overstreet static void bch_btree_node_read(struct btree *b)
24457943511SKent Overstreet {
24557943511SKent Overstreet 	uint64_t start_time = local_clock();
24657943511SKent Overstreet 	struct closure cl;
24757943511SKent Overstreet 	struct bio *bio;
248cafe5635SKent Overstreet 
249c37511b8SKent Overstreet 	trace_bcache_btree_read(b);
250c37511b8SKent Overstreet 
25157943511SKent Overstreet 	closure_init_stack(&cl);
252cafe5635SKent Overstreet 
25357943511SKent Overstreet 	bio = bch_bbio_alloc(b->c);
2544f024f37SKent Overstreet 	bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
25557943511SKent Overstreet 	bio->bi_end_io	= btree_node_read_endio;
25657943511SKent Overstreet 	bio->bi_private	= &cl;
25770fd7614SChristoph Hellwig 	bio->bi_opf = REQ_OP_READ | REQ_META;
25857943511SKent Overstreet 
259a85e968eSKent Overstreet 	bch_bio_map(bio, b->keys.set[0].data);
26057943511SKent Overstreet 
26157943511SKent Overstreet 	bch_submit_bbio(bio, b->c, &b->key, 0);
26257943511SKent Overstreet 	closure_sync(&cl);
26357943511SKent Overstreet 
2644e4cbee9SChristoph Hellwig 	if (bio->bi_status)
26557943511SKent Overstreet 		set_btree_node_io_error(b);
26657943511SKent Overstreet 
26757943511SKent Overstreet 	bch_bbio_free(bio, b->c);
26857943511SKent Overstreet 
26957943511SKent Overstreet 	if (btree_node_io_error(b))
27057943511SKent Overstreet 		goto err;
27157943511SKent Overstreet 
27257943511SKent Overstreet 	bch_btree_node_read_done(b);
27357943511SKent Overstreet 	bch_time_stats_update(&b->c->btree_read_time, start_time);
27457943511SKent Overstreet 
27557943511SKent Overstreet 	return;
27657943511SKent Overstreet err:
27761cbd250SGeert Uytterhoeven 	bch_cache_set_error(b->c, "io error reading bucket %zu",
27857943511SKent Overstreet 			    PTR_BUCKET_NR(b->c, &b->key, 0));
279cafe5635SKent Overstreet }
280cafe5635SKent Overstreet 
281cafe5635SKent Overstreet static void btree_complete_write(struct btree *b, struct btree_write *w)
282cafe5635SKent Overstreet {
283cafe5635SKent Overstreet 	if (w->prio_blocked &&
284cafe5635SKent Overstreet 	    !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
285119ba0f8SKent Overstreet 		wake_up_allocators(b->c);
286cafe5635SKent Overstreet 
287cafe5635SKent Overstreet 	if (w->journal) {
288cafe5635SKent Overstreet 		atomic_dec_bug(w->journal);
289cafe5635SKent Overstreet 		__closure_wake_up(&b->c->journal.wait);
290cafe5635SKent Overstreet 	}
291cafe5635SKent Overstreet 
292cafe5635SKent Overstreet 	w->prio_blocked	= 0;
293cafe5635SKent Overstreet 	w->journal	= NULL;
294cafe5635SKent Overstreet }
295cafe5635SKent Overstreet 
296cb7a583eSKent Overstreet static void btree_node_write_unlock(struct closure *cl)
297cb7a583eSKent Overstreet {
298cb7a583eSKent Overstreet 	struct btree *b = container_of(cl, struct btree, io);
299cb7a583eSKent Overstreet 
300cb7a583eSKent Overstreet 	up(&b->io_mutex);
301cb7a583eSKent Overstreet }
302cb7a583eSKent Overstreet 
30357943511SKent Overstreet static void __btree_node_write_done(struct closure *cl)
304cafe5635SKent Overstreet {
305cb7a583eSKent Overstreet 	struct btree *b = container_of(cl, struct btree, io);
306cafe5635SKent Overstreet 	struct btree_write *w = btree_prev_write(b);
307cafe5635SKent Overstreet 
308cafe5635SKent Overstreet 	bch_bbio_free(b->bio, b->c);
309cafe5635SKent Overstreet 	b->bio = NULL;
310cafe5635SKent Overstreet 	btree_complete_write(b, w);
311cafe5635SKent Overstreet 
312cafe5635SKent Overstreet 	if (btree_node_dirty(b))
3139f233ffeSKai Krakow 		queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
314cafe5635SKent Overstreet 
315cb7a583eSKent Overstreet 	closure_return_with_destructor(cl, btree_node_write_unlock);
316cafe5635SKent Overstreet }
317cafe5635SKent Overstreet 
31857943511SKent Overstreet static void btree_node_write_done(struct closure *cl)
319cafe5635SKent Overstreet {
320cb7a583eSKent Overstreet 	struct btree *b = container_of(cl, struct btree, io);
321cafe5635SKent Overstreet 
322491221f8SGuoqing Jiang 	bio_free_pages(b->bio);
32357943511SKent Overstreet 	__btree_node_write_done(cl);
324cafe5635SKent Overstreet }
325cafe5635SKent Overstreet 
3264246a0b6SChristoph Hellwig static void btree_node_write_endio(struct bio *bio)
32757943511SKent Overstreet {
32857943511SKent Overstreet 	struct closure *cl = bio->bi_private;
329cb7a583eSKent Overstreet 	struct btree *b = container_of(cl, struct btree, io);
33057943511SKent Overstreet 
3314e4cbee9SChristoph Hellwig 	if (bio->bi_status)
33257943511SKent Overstreet 		set_btree_node_io_error(b);
33357943511SKent Overstreet 
3344e4cbee9SChristoph Hellwig 	bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree");
33557943511SKent Overstreet 	closure_put(cl);
33657943511SKent Overstreet }
33757943511SKent Overstreet 
33857943511SKent Overstreet static void do_btree_node_write(struct btree *b)
339cafe5635SKent Overstreet {
340cb7a583eSKent Overstreet 	struct closure *cl = &b->io;
341ee811287SKent Overstreet 	struct bset *i = btree_bset_last(b);
342cafe5635SKent Overstreet 	BKEY_PADDED(key) k;
343cafe5635SKent Overstreet 
344cafe5635SKent Overstreet 	i->version	= BCACHE_BSET_VERSION;
345cafe5635SKent Overstreet 	i->csum		= btree_csum_set(b, i);
346cafe5635SKent Overstreet 
34757943511SKent Overstreet 	BUG_ON(b->bio);
34857943511SKent Overstreet 	b->bio = bch_bbio_alloc(b->c);
34957943511SKent Overstreet 
35057943511SKent Overstreet 	b->bio->bi_end_io	= btree_node_write_endio;
351faadf0c9SKent Overstreet 	b->bio->bi_private	= cl;
3524e1ebae3SColy Li 	b->bio->bi_iter.bi_size	= roundup(set_bytes(i), block_bytes(b->c->cache));
35370fd7614SChristoph Hellwig 	b->bio->bi_opf		= REQ_OP_WRITE | REQ_META | REQ_FUA;
354169ef1cfSKent Overstreet 	bch_bio_map(b->bio, i);
355cafe5635SKent Overstreet 
356e49c7c37SKent Overstreet 	/*
357e49c7c37SKent Overstreet 	 * If we're appending to a leaf node, we don't technically need FUA -
358e49c7c37SKent Overstreet 	 * this write just needs to be persisted before the next journal write,
359e49c7c37SKent Overstreet 	 * which will be marked FLUSH|FUA.
360e49c7c37SKent Overstreet 	 *
361e49c7c37SKent Overstreet 	 * Similarly if we're writing a new btree root - the pointer is going to
362e49c7c37SKent Overstreet 	 * be in the next journal entry.
363e49c7c37SKent Overstreet 	 *
364e49c7c37SKent Overstreet 	 * But if we're writing a new btree node (that isn't a root) or
365e49c7c37SKent Overstreet 	 * appending to a non leaf btree node, we need either FUA or a flush
366e49c7c37SKent Overstreet 	 * when we write the parent with the new pointer. FUA is cheaper than a
367e49c7c37SKent Overstreet 	 * flush, and writes appending to leaf nodes aren't blocking anything so
368e49c7c37SKent Overstreet 	 * just make all btree node writes FUA to keep things sane.
369e49c7c37SKent Overstreet 	 */
370e49c7c37SKent Overstreet 
371cafe5635SKent Overstreet 	bkey_copy(&k.key, &b->key);
372ee811287SKent Overstreet 	SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
373a85e968eSKent Overstreet 		       bset_sector_offset(&b->keys, i));
374cafe5635SKent Overstreet 
37525d8be77SMing Lei 	if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
376cafe5635SKent Overstreet 		struct bio_vec *bv;
377f936b06aSChristoph Hellwig 		void *addr = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
3786dc4f100SMing Lei 		struct bvec_iter_all iter_all;
379cafe5635SKent Overstreet 
3802b070cfeSChristoph Hellwig 		bio_for_each_segment_all(bv, b->bio, iter_all) {
3812878feaeSColy Li 			memcpy(page_address(bv->bv_page), addr, PAGE_SIZE);
382f936b06aSChristoph Hellwig 			addr += PAGE_SIZE;
383f936b06aSChristoph Hellwig 		}
384cafe5635SKent Overstreet 
385cafe5635SKent Overstreet 		bch_submit_bbio(b->bio, b->c, &k.key, 0);
386cafe5635SKent Overstreet 
38757943511SKent Overstreet 		continue_at(cl, btree_node_write_done, NULL);
388cafe5635SKent Overstreet 	} else {
389b0d30981SColy Li 		/*
390b0d30981SColy Li 		 * No problem for multipage bvec since the bio is
391b0d30981SColy Li 		 * just allocated
392b0d30981SColy Li 		 */
393cafe5635SKent Overstreet 		b->bio->bi_vcnt = 0;
394169ef1cfSKent Overstreet 		bch_bio_map(b->bio, i);
395cafe5635SKent Overstreet 
396cafe5635SKent Overstreet 		bch_submit_bbio(b->bio, b->c, &k.key, 0);
397cafe5635SKent Overstreet 
398cafe5635SKent Overstreet 		closure_sync(cl);
399cb7a583eSKent Overstreet 		continue_at_nobarrier(cl, __btree_node_write_done, NULL);
400cafe5635SKent Overstreet 	}
401cafe5635SKent Overstreet }
402cafe5635SKent Overstreet 
4032a285686SKent Overstreet void __bch_btree_node_write(struct btree *b, struct closure *parent)
404cafe5635SKent Overstreet {
405ee811287SKent Overstreet 	struct bset *i = btree_bset_last(b);
406cafe5635SKent Overstreet 
4072a285686SKent Overstreet 	lockdep_assert_held(&b->write_lock);
4082a285686SKent Overstreet 
409c37511b8SKent Overstreet 	trace_bcache_btree_write(b);
410c37511b8SKent Overstreet 
411cafe5635SKent Overstreet 	BUG_ON(current->bio_list);
41257943511SKent Overstreet 	BUG_ON(b->written >= btree_blocks(b));
41357943511SKent Overstreet 	BUG_ON(b->written && !i->keys);
414ee811287SKent Overstreet 	BUG_ON(btree_bset_first(b)->seq != i->seq);
415dc9d98d6SKent Overstreet 	bch_check_keys(&b->keys, "writing");
416cafe5635SKent Overstreet 
417cafe5635SKent Overstreet 	cancel_delayed_work(&b->work);
418cafe5635SKent Overstreet 
41957943511SKent Overstreet 	/* If caller isn't waiting for write, parent refcount is cache set */
420cb7a583eSKent Overstreet 	down(&b->io_mutex);
421cb7a583eSKent Overstreet 	closure_init(&b->io, parent ?: &b->c->cl);
42257943511SKent Overstreet 
423cafe5635SKent Overstreet 	clear_bit(BTREE_NODE_dirty,	 &b->flags);
424cafe5635SKent Overstreet 	change_bit(BTREE_NODE_write_idx, &b->flags);
425cafe5635SKent Overstreet 
42657943511SKent Overstreet 	do_btree_node_write(b);
427cafe5635SKent Overstreet 
4284a784266SColy Li 	atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->cache->sb.block_size,
42911e9560eSChristoph Hellwig 			&b->c->cache->btree_sectors_written);
430cafe5635SKent Overstreet 
4314e1ebae3SColy Li 	b->written += set_blocks(i, block_bytes(b->c->cache));
4322a285686SKent Overstreet }
433a85e968eSKent Overstreet 
4342a285686SKent Overstreet void bch_btree_node_write(struct btree *b, struct closure *parent)
4352a285686SKent Overstreet {
4366f10f7d1SColy Li 	unsigned int nsets = b->keys.nsets;
4372a285686SKent Overstreet 
4382a285686SKent Overstreet 	lockdep_assert_held(&b->lock);
4392a285686SKent Overstreet 
4402a285686SKent Overstreet 	__bch_btree_node_write(b, parent);
441cafe5635SKent Overstreet 
44278b77bf8SKent Overstreet 	/*
44378b77bf8SKent Overstreet 	 * do verify if there was more than one set initially (i.e. we did a
44478b77bf8SKent Overstreet 	 * sort) and we sorted down to a single set:
44578b77bf8SKent Overstreet 	 */
4462a285686SKent Overstreet 	if (nsets && !b->keys.nsets)
44778b77bf8SKent Overstreet 		bch_btree_verify(b);
44878b77bf8SKent Overstreet 
4492a285686SKent Overstreet 	bch_btree_init_next(b);
450cafe5635SKent Overstreet }
451cafe5635SKent Overstreet 
452f269af5aSKent Overstreet static void bch_btree_node_write_sync(struct btree *b)
453f269af5aSKent Overstreet {
454f269af5aSKent Overstreet 	struct closure cl;
455f269af5aSKent Overstreet 
456f269af5aSKent Overstreet 	closure_init_stack(&cl);
4572a285686SKent Overstreet 
4582a285686SKent Overstreet 	mutex_lock(&b->write_lock);
459f269af5aSKent Overstreet 	bch_btree_node_write(b, &cl);
4602a285686SKent Overstreet 	mutex_unlock(&b->write_lock);
4612a285686SKent Overstreet 
462f269af5aSKent Overstreet 	closure_sync(&cl);
463f269af5aSKent Overstreet }
464f269af5aSKent Overstreet 
46557943511SKent Overstreet static void btree_node_write_work(struct work_struct *w)
466cafe5635SKent Overstreet {
467cafe5635SKent Overstreet 	struct btree *b = container_of(to_delayed_work(w), struct btree, work);
468cafe5635SKent Overstreet 
4692a285686SKent Overstreet 	mutex_lock(&b->write_lock);
470cafe5635SKent Overstreet 	if (btree_node_dirty(b))
4712a285686SKent Overstreet 		__bch_btree_node_write(b, NULL);
4722a285686SKent Overstreet 	mutex_unlock(&b->write_lock);
473cafe5635SKent Overstreet }
474cafe5635SKent Overstreet 
475c18536a7SKent Overstreet static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
476cafe5635SKent Overstreet {
477ee811287SKent Overstreet 	struct bset *i = btree_bset_last(b);
478cafe5635SKent Overstreet 	struct btree_write *w = btree_current_write(b);
479cafe5635SKent Overstreet 
4802a285686SKent Overstreet 	lockdep_assert_held(&b->write_lock);
4812a285686SKent Overstreet 
48257943511SKent Overstreet 	BUG_ON(!b->written);
48357943511SKent Overstreet 	BUG_ON(!i->keys);
484cafe5635SKent Overstreet 
48557943511SKent Overstreet 	if (!btree_node_dirty(b))
4869f233ffeSKai Krakow 		queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
48757943511SKent Overstreet 
488cafe5635SKent Overstreet 	set_btree_node_dirty(b);
489cafe5635SKent Overstreet 
4905dccefd3SColy Li 	/*
4915dccefd3SColy Li 	 * w->journal is always the oldest journal pin of all bkeys
4925dccefd3SColy Li 	 * in the leaf node, to make sure the oldest jset seq won't
4935dccefd3SColy Li 	 * be increased before this btree node is flushed.
4945dccefd3SColy Li 	 */
495c18536a7SKent Overstreet 	if (journal_ref) {
496cafe5635SKent Overstreet 		if (w->journal &&
497c18536a7SKent Overstreet 		    journal_pin_cmp(b->c, w->journal, journal_ref)) {
498cafe5635SKent Overstreet 			atomic_dec_bug(w->journal);
499cafe5635SKent Overstreet 			w->journal = NULL;
500cafe5635SKent Overstreet 		}
501cafe5635SKent Overstreet 
502cafe5635SKent Overstreet 		if (!w->journal) {
503c18536a7SKent Overstreet 			w->journal = journal_ref;
504cafe5635SKent Overstreet 			atomic_inc(w->journal);
505cafe5635SKent Overstreet 		}
506cafe5635SKent Overstreet 	}
507cafe5635SKent Overstreet 
508cafe5635SKent Overstreet 	/* Force write if set is too big */
50957943511SKent Overstreet 	if (set_bytes(i) > PAGE_SIZE - 48 &&
51057943511SKent Overstreet 	    !current->bio_list)
51157943511SKent Overstreet 		bch_btree_node_write(b, NULL);
512cafe5635SKent Overstreet }
513cafe5635SKent Overstreet 
514cafe5635SKent Overstreet /*
515cafe5635SKent Overstreet  * Btree in memory cache - allocation/freeing
516cafe5635SKent Overstreet  * mca -> memory cache
517cafe5635SKent Overstreet  */
518cafe5635SKent Overstreet 
5197e59c506SDongsheng Yang #define mca_reserve(c)	(((!IS_ERR_OR_NULL(c->root) && c->root->level) \
520cafe5635SKent Overstreet 			  ? c->root->level : 1) * 8 + 16)
521cafe5635SKent Overstreet #define mca_can_free(c)						\
5220a63b66dSKent Overstreet 	max_t(int, 0, c->btree_cache_used - mca_reserve(c))
523cafe5635SKent Overstreet 
524cafe5635SKent Overstreet static void mca_data_free(struct btree *b)
525cafe5635SKent Overstreet {
526cb7a583eSKent Overstreet 	BUG_ON(b->io_mutex.count != 1);
527cafe5635SKent Overstreet 
528a85e968eSKent Overstreet 	bch_btree_keys_free(&b->keys);
529cafe5635SKent Overstreet 
5300a63b66dSKent Overstreet 	b->c->btree_cache_used--;
531ee811287SKent Overstreet 	list_move(&b->list, &b->c->btree_cache_freed);
532cafe5635SKent Overstreet }
533cafe5635SKent Overstreet 
534cafe5635SKent Overstreet static void mca_bucket_free(struct btree *b)
535cafe5635SKent Overstreet {
536cafe5635SKent Overstreet 	BUG_ON(btree_node_dirty(b));
537cafe5635SKent Overstreet 
538cafe5635SKent Overstreet 	b->key.ptr[0] = 0;
539cafe5635SKent Overstreet 	hlist_del_init_rcu(&b->hash);
540cafe5635SKent Overstreet 	list_move(&b->list, &b->c->btree_cache_freeable);
541cafe5635SKent Overstreet }
542cafe5635SKent Overstreet 
5436f10f7d1SColy Li static unsigned int btree_order(struct bkey *k)
544cafe5635SKent Overstreet {
545cafe5635SKent Overstreet 	return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
546cafe5635SKent Overstreet }
547cafe5635SKent Overstreet 
548cafe5635SKent Overstreet static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
549cafe5635SKent Overstreet {
550a85e968eSKent Overstreet 	if (!bch_btree_keys_alloc(&b->keys,
5516f10f7d1SColy Li 				  max_t(unsigned int,
552cafe5635SKent Overstreet 					ilog2(b->c->btree_pages),
553ee811287SKent Overstreet 					btree_order(k)),
554ee811287SKent Overstreet 				  gfp)) {
5550a63b66dSKent Overstreet 		b->c->btree_cache_used++;
556ee811287SKent Overstreet 		list_move(&b->list, &b->c->btree_cache);
557ee811287SKent Overstreet 	} else {
558ee811287SKent Overstreet 		list_move(&b->list, &b->c->btree_cache_freed);
559ee811287SKent Overstreet 	}
560cafe5635SKent Overstreet }
561cafe5635SKent Overstreet 
5624c8a4924SKent Overstreet #define cmp_int(l, r)		((l > r) - (l < r))
5634c8a4924SKent Overstreet 
5644c8a4924SKent Overstreet #ifdef CONFIG_PROVE_LOCKING
5654c8a4924SKent Overstreet static int btree_lock_cmp_fn(const struct lockdep_map *_a,
5664c8a4924SKent Overstreet 			     const struct lockdep_map *_b)
5674c8a4924SKent Overstreet {
5684c8a4924SKent Overstreet 	const struct btree *a = container_of(_a, struct btree, lock.dep_map);
5694c8a4924SKent Overstreet 	const struct btree *b = container_of(_b, struct btree, lock.dep_map);
5704c8a4924SKent Overstreet 
5714c8a4924SKent Overstreet 	return -cmp_int(a->level, b->level) ?: bkey_cmp(&a->key, &b->key);
5724c8a4924SKent Overstreet }
5734c8a4924SKent Overstreet 
5744c8a4924SKent Overstreet static void btree_lock_print_fn(const struct lockdep_map *map)
5754c8a4924SKent Overstreet {
5764c8a4924SKent Overstreet 	const struct btree *b = container_of(map, struct btree, lock.dep_map);
5774c8a4924SKent Overstreet 
5784c8a4924SKent Overstreet 	printk(KERN_CONT " l=%u %llu:%llu", b->level,
5794c8a4924SKent Overstreet 	       KEY_INODE(&b->key), KEY_OFFSET(&b->key));
5804c8a4924SKent Overstreet }
5814c8a4924SKent Overstreet #endif
5824c8a4924SKent Overstreet 
583cafe5635SKent Overstreet static struct btree *mca_bucket_alloc(struct cache_set *c,
584cafe5635SKent Overstreet 				      struct bkey *k, gfp_t gfp)
585cafe5635SKent Overstreet {
586bd9026c8SColy Li 	/*
587bd9026c8SColy Li 	 * kzalloc() is necessary here for initialization,
588bd9026c8SColy Li 	 * see code comments in bch_btree_keys_init().
589bd9026c8SColy Li 	 */
590cafe5635SKent Overstreet 	struct btree *b = kzalloc(sizeof(struct btree), gfp);
5911fae7cf0SColy Li 
592cafe5635SKent Overstreet 	if (!b)
593cafe5635SKent Overstreet 		return NULL;
594cafe5635SKent Overstreet 
595cafe5635SKent Overstreet 	init_rwsem(&b->lock);
5964c8a4924SKent Overstreet 	lock_set_cmp_fn(&b->lock, btree_lock_cmp_fn, btree_lock_print_fn);
5972a285686SKent Overstreet 	mutex_init(&b->write_lock);
5982a285686SKent Overstreet 	lockdep_set_novalidate_class(&b->write_lock);
599cafe5635SKent Overstreet 	INIT_LIST_HEAD(&b->list);
60057943511SKent Overstreet 	INIT_DELAYED_WORK(&b->work, btree_node_write_work);
601cafe5635SKent Overstreet 	b->c = c;
602cb7a583eSKent Overstreet 	sema_init(&b->io_mutex, 1);
603cafe5635SKent Overstreet 
604cafe5635SKent Overstreet 	mca_data_alloc(b, k, gfp);
605cafe5635SKent Overstreet 	return b;
606cafe5635SKent Overstreet }
607cafe5635SKent Overstreet 
6086f10f7d1SColy Li static int mca_reap(struct btree *b, unsigned int min_order, bool flush)
609cafe5635SKent Overstreet {
610e8e1d468SKent Overstreet 	struct closure cl;
611e8e1d468SKent Overstreet 
612e8e1d468SKent Overstreet 	closure_init_stack(&cl);
613cafe5635SKent Overstreet 	lockdep_assert_held(&b->c->bucket_lock);
614cafe5635SKent Overstreet 
615cafe5635SKent Overstreet 	if (!down_write_trylock(&b->lock))
616cafe5635SKent Overstreet 		return -ENOMEM;
617cafe5635SKent Overstreet 
618a85e968eSKent Overstreet 	BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data);
619e8e1d468SKent Overstreet 
620a85e968eSKent Overstreet 	if (b->keys.page_order < min_order)
621cb7a583eSKent Overstreet 		goto out_unlock;
622cb7a583eSKent Overstreet 
623cb7a583eSKent Overstreet 	if (!flush) {
624cb7a583eSKent Overstreet 		if (btree_node_dirty(b))
625cb7a583eSKent Overstreet 			goto out_unlock;
626cb7a583eSKent Overstreet 
627cb7a583eSKent Overstreet 		if (down_trylock(&b->io_mutex))
628cb7a583eSKent Overstreet 			goto out_unlock;
629cb7a583eSKent Overstreet 		up(&b->io_mutex);
630cafe5635SKent Overstreet 	}
631cafe5635SKent Overstreet 
63250a260e8SColy Li retry:
63341508bb7SColy Li 	/*
63441508bb7SColy Li 	 * BTREE_NODE_dirty might be cleared in btree_flush_btree() by
63541508bb7SColy Li 	 * __bch_btree_node_write(). To avoid an extra flush, acquire
63641508bb7SColy Li 	 * b->write_lock before checking BTREE_NODE_dirty bit.
63741508bb7SColy Li 	 */
6382a285686SKent Overstreet 	mutex_lock(&b->write_lock);
63950a260e8SColy Li 	/*
64050a260e8SColy Li 	 * If this btree node is selected in btree_flush_write() by journal
64150a260e8SColy Li 	 * code, delay and retry until the node is flushed by journal code
64250a260e8SColy Li 	 * and BTREE_NODE_journal_flush bit cleared by btree_flush_write().
64350a260e8SColy Li 	 */
64450a260e8SColy Li 	if (btree_node_journal_flush(b)) {
64546f5aa88SJoe Perches 		pr_debug("bnode %p is flushing by journal, retry\n", b);
64650a260e8SColy Li 		mutex_unlock(&b->write_lock);
64750a260e8SColy Li 		udelay(1);
64850a260e8SColy Li 		goto retry;
64950a260e8SColy Li 	}
65050a260e8SColy Li 
651f269af5aSKent Overstreet 	if (btree_node_dirty(b))
6522a285686SKent Overstreet 		__bch_btree_node_write(b, &cl);
6532a285686SKent Overstreet 	mutex_unlock(&b->write_lock);
6542a285686SKent Overstreet 
6552a285686SKent Overstreet 	closure_sync(&cl);
656cafe5635SKent Overstreet 
657e8e1d468SKent Overstreet 	/* wait for any in flight btree write */
658cb7a583eSKent Overstreet 	down(&b->io_mutex);
659cb7a583eSKent Overstreet 	up(&b->io_mutex);
660e8e1d468SKent Overstreet 
661cafe5635SKent Overstreet 	return 0;
662cb7a583eSKent Overstreet out_unlock:
663cb7a583eSKent Overstreet 	rw_unlock(true, b);
664cb7a583eSKent Overstreet 	return -ENOMEM;
665cafe5635SKent Overstreet }
666cafe5635SKent Overstreet 
6677dc19d5aSDave Chinner static unsigned long bch_mca_scan(struct shrinker *shrink,
6687dc19d5aSDave Chinner 				  struct shrink_control *sc)
669cafe5635SKent Overstreet {
670*a6a1eb62SQi Zheng 	struct cache_set *c = shrink->private_data;
671cafe5635SKent Overstreet 	struct btree *b, *t;
672cafe5635SKent Overstreet 	unsigned long i, nr = sc->nr_to_scan;
6737dc19d5aSDave Chinner 	unsigned long freed = 0;
674ca71df31STang Junhui 	unsigned int btree_cache_used;
675cafe5635SKent Overstreet 
676cafe5635SKent Overstreet 	if (c->shrinker_disabled)
6777dc19d5aSDave Chinner 		return SHRINK_STOP;
678cafe5635SKent Overstreet 
6790a63b66dSKent Overstreet 	if (c->btree_cache_alloc_lock)
6807dc19d5aSDave Chinner 		return SHRINK_STOP;
681cafe5635SKent Overstreet 
682cafe5635SKent Overstreet 	/* Return -1 if we can't do anything right now */
683a698e08cSKent Overstreet 	if (sc->gfp_mask & __GFP_IO)
684cafe5635SKent Overstreet 		mutex_lock(&c->bucket_lock);
685cafe5635SKent Overstreet 	else if (!mutex_trylock(&c->bucket_lock))
686cafe5635SKent Overstreet 		return -1;
687cafe5635SKent Overstreet 
68836c9ea98SKent Overstreet 	/*
68936c9ea98SKent Overstreet 	 * It's _really_ critical that we don't free too many btree nodes - we
69036c9ea98SKent Overstreet 	 * have to always leave ourselves a reserve. The reserve is how we
69136c9ea98SKent Overstreet 	 * guarantee that allocating memory for a new btree node can always
69236c9ea98SKent Overstreet 	 * succeed, so that inserting keys into the btree can always succeed and
69336c9ea98SKent Overstreet 	 * IO can always make forward progress:
69436c9ea98SKent Overstreet 	 */
695cafe5635SKent Overstreet 	nr /= c->btree_pages;
6969fcc34b1SColy Li 	if (nr == 0)
6979fcc34b1SColy Li 		nr = 1;
698cafe5635SKent Overstreet 	nr = min_t(unsigned long, nr, mca_can_free(c));
699cafe5635SKent Overstreet 
700cafe5635SKent Overstreet 	i = 0;
701ca71df31STang Junhui 	btree_cache_used = c->btree_cache_used;
702d5c9c470SColy Li 	list_for_each_entry_safe_reverse(b, t, &c->btree_cache_freeable, list) {
703ca71df31STang Junhui 		if (nr <= 0)
704ca71df31STang Junhui 			goto out;
705cafe5635SKent Overstreet 
706d5c9c470SColy Li 		if (!mca_reap(b, 0, false)) {
707cafe5635SKent Overstreet 			mca_data_free(b);
708cafe5635SKent Overstreet 			rw_unlock(true, b);
7097dc19d5aSDave Chinner 			freed++;
710cafe5635SKent Overstreet 		}
711ca71df31STang Junhui 		nr--;
712d5c9c470SColy Li 		i++;
713cafe5635SKent Overstreet 	}
714cafe5635SKent Overstreet 
715e3de0446SColy Li 	list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) {
716e3de0446SColy Li 		if (nr <= 0 || i >= btree_cache_used)
717cafe5635SKent Overstreet 			goto out;
718cafe5635SKent Overstreet 
719125d98edSColy Li 		if (!mca_reap(b, 0, false)) {
720cafe5635SKent Overstreet 			mca_bucket_free(b);
721cafe5635SKent Overstreet 			mca_data_free(b);
722cafe5635SKent Overstreet 			rw_unlock(true, b);
7237dc19d5aSDave Chinner 			freed++;
724125d98edSColy Li 		}
725e3de0446SColy Li 
726e3de0446SColy Li 		nr--;
727e3de0446SColy Li 		i++;
728cafe5635SKent Overstreet 	}
729cafe5635SKent Overstreet out:
730cafe5635SKent Overstreet 	mutex_unlock(&c->bucket_lock);
731f3641c3aSTang Junhui 	return freed * c->btree_pages;
7327dc19d5aSDave Chinner }
7337dc19d5aSDave Chinner 
7347dc19d5aSDave Chinner static unsigned long bch_mca_count(struct shrinker *shrink,
7357dc19d5aSDave Chinner 				   struct shrink_control *sc)
7367dc19d5aSDave Chinner {
737*a6a1eb62SQi Zheng 	struct cache_set *c = shrink->private_data;
7387dc19d5aSDave Chinner 
7397dc19d5aSDave Chinner 	if (c->shrinker_disabled)
7407dc19d5aSDave Chinner 		return 0;
7417dc19d5aSDave Chinner 
7420a63b66dSKent Overstreet 	if (c->btree_cache_alloc_lock)
7437dc19d5aSDave Chinner 		return 0;
7447dc19d5aSDave Chinner 
7457dc19d5aSDave Chinner 	return mca_can_free(c) * c->btree_pages;
746cafe5635SKent Overstreet }
747cafe5635SKent Overstreet 
748cafe5635SKent Overstreet void bch_btree_cache_free(struct cache_set *c)
749cafe5635SKent Overstreet {
750cafe5635SKent Overstreet 	struct btree *b;
751cafe5635SKent Overstreet 	struct closure cl;
7521fae7cf0SColy Li 
753cafe5635SKent Overstreet 	closure_init_stack(&cl);
754cafe5635SKent Overstreet 
755*a6a1eb62SQi Zheng 	if (c->shrink)
756*a6a1eb62SQi Zheng 		shrinker_free(c->shrink);
757cafe5635SKent Overstreet 
758cafe5635SKent Overstreet 	mutex_lock(&c->bucket_lock);
759cafe5635SKent Overstreet 
760cafe5635SKent Overstreet #ifdef CONFIG_BCACHE_DEBUG
761cafe5635SKent Overstreet 	if (c->verify_data)
762cafe5635SKent Overstreet 		list_move(&c->verify_data->list, &c->btree_cache);
76378b77bf8SKent Overstreet 
7644a784266SColy Li 	free_pages((unsigned long) c->verify_ondisk, ilog2(meta_bucket_pages(&c->cache->sb)));
765cafe5635SKent Overstreet #endif
766cafe5635SKent Overstreet 
767cafe5635SKent Overstreet 	list_splice(&c->btree_cache_freeable,
768cafe5635SKent Overstreet 		    &c->btree_cache);
769cafe5635SKent Overstreet 
770cafe5635SKent Overstreet 	while (!list_empty(&c->btree_cache)) {
771cafe5635SKent Overstreet 		b = list_first_entry(&c->btree_cache, struct btree, list);
772cafe5635SKent Overstreet 
77341508bb7SColy Li 		/*
77441508bb7SColy Li 		 * This function is called by cache_set_free(), no I/O
77541508bb7SColy Li 		 * request on cache now, it is unnecessary to acquire
77641508bb7SColy Li 		 * b->write_lock before clearing BTREE_NODE_dirty anymore.
77741508bb7SColy Li 		 */
778e5ec5f47SColy Li 		if (btree_node_dirty(b)) {
779cafe5635SKent Overstreet 			btree_complete_write(b, btree_current_write(b));
780cafe5635SKent Overstreet 			clear_bit(BTREE_NODE_dirty, &b->flags);
781e5ec5f47SColy Li 		}
782cafe5635SKent Overstreet 		mca_data_free(b);
783cafe5635SKent Overstreet 	}
784cafe5635SKent Overstreet 
785cafe5635SKent Overstreet 	while (!list_empty(&c->btree_cache_freed)) {
786cafe5635SKent Overstreet 		b = list_first_entry(&c->btree_cache_freed,
787cafe5635SKent Overstreet 				     struct btree, list);
788cafe5635SKent Overstreet 		list_del(&b->list);
789cafe5635SKent Overstreet 		cancel_delayed_work_sync(&b->work);
790cafe5635SKent Overstreet 		kfree(b);
791cafe5635SKent Overstreet 	}
792cafe5635SKent Overstreet 
793cafe5635SKent Overstreet 	mutex_unlock(&c->bucket_lock);
794cafe5635SKent Overstreet }
795cafe5635SKent Overstreet 
796cafe5635SKent Overstreet int bch_btree_cache_alloc(struct cache_set *c)
797cafe5635SKent Overstreet {
7986f10f7d1SColy Li 	unsigned int i;
799cafe5635SKent Overstreet 
800cafe5635SKent Overstreet 	for (i = 0; i < mca_reserve(c); i++)
80172a44517SKent Overstreet 		if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
80272a44517SKent Overstreet 			return -ENOMEM;
803cafe5635SKent Overstreet 
804cafe5635SKent Overstreet 	list_splice_init(&c->btree_cache,
805cafe5635SKent Overstreet 			 &c->btree_cache_freeable);
806cafe5635SKent Overstreet 
807cafe5635SKent Overstreet #ifdef CONFIG_BCACHE_DEBUG
808cafe5635SKent Overstreet 	mutex_init(&c->verify_lock);
809cafe5635SKent Overstreet 
81078b77bf8SKent Overstreet 	c->verify_ondisk = (void *)
8114a784266SColy Li 		__get_free_pages(GFP_KERNEL|__GFP_COMP,
8124a784266SColy Li 				 ilog2(meta_bucket_pages(&c->cache->sb)));
813bf6af170SColy Li 	if (!c->verify_ondisk) {
814bf6af170SColy Li 		/*
815bf6af170SColy Li 		 * Don't worry about the mca_rereserve buckets
816bf6af170SColy Li 		 * allocated in previous for-loop, they will be
817bf6af170SColy Li 		 * handled properly in bch_cache_set_unregister().
818bf6af170SColy Li 		 */
819bf6af170SColy Li 		return -ENOMEM;
820bf6af170SColy Li 	}
82178b77bf8SKent Overstreet 
822cafe5635SKent Overstreet 	c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
823cafe5635SKent Overstreet 
824cafe5635SKent Overstreet 	if (c->verify_data &&
825a85e968eSKent Overstreet 	    c->verify_data->keys.set->data)
826cafe5635SKent Overstreet 		list_del_init(&c->verify_data->list);
827cafe5635SKent Overstreet 	else
828cafe5635SKent Overstreet 		c->verify_data = NULL;
829cafe5635SKent Overstreet #endif
830cafe5635SKent Overstreet 
831*a6a1eb62SQi Zheng 	c->shrink = shrinker_alloc(0, "md-bcache:%pU", c->set_uuid);
832*a6a1eb62SQi Zheng 	if (!c->shrink) {
833*a6a1eb62SQi Zheng 		pr_warn("bcache: %s: could not allocate shrinker\n", __func__);
834*a6a1eb62SQi Zheng 		return 0;
835*a6a1eb62SQi Zheng 	}
8366c4ca1e3SMichael Lyle 
837*a6a1eb62SQi Zheng 	c->shrink->count_objects = bch_mca_count;
838*a6a1eb62SQi Zheng 	c->shrink->scan_objects = bch_mca_scan;
839*a6a1eb62SQi Zheng 	c->shrink->seeks = 4;
840*a6a1eb62SQi Zheng 	c->shrink->batch = c->btree_pages * 2;
841*a6a1eb62SQi Zheng 	c->shrink->private_data = c;
842*a6a1eb62SQi Zheng 
843*a6a1eb62SQi Zheng 	shrinker_register(c->shrink);
844cafe5635SKent Overstreet 
845cafe5635SKent Overstreet 	return 0;
846cafe5635SKent Overstreet }
847cafe5635SKent Overstreet 
848cafe5635SKent Overstreet /* Btree in memory cache - hash table */
849cafe5635SKent Overstreet 
850cafe5635SKent Overstreet static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k)
851cafe5635SKent Overstreet {
852cafe5635SKent Overstreet 	return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)];
853cafe5635SKent Overstreet }
854cafe5635SKent Overstreet 
855cafe5635SKent Overstreet static struct btree *mca_find(struct cache_set *c, struct bkey *k)
856cafe5635SKent Overstreet {
857cafe5635SKent Overstreet 	struct btree *b;
858cafe5635SKent Overstreet 
859cafe5635SKent Overstreet 	rcu_read_lock();
860cafe5635SKent Overstreet 	hlist_for_each_entry_rcu(b, mca_hash(c, k), hash)
861cafe5635SKent Overstreet 		if (PTR_HASH(c, &b->key) == PTR_HASH(c, k))
862cafe5635SKent Overstreet 			goto out;
863cafe5635SKent Overstreet 	b = NULL;
864cafe5635SKent Overstreet out:
865cafe5635SKent Overstreet 	rcu_read_unlock();
866cafe5635SKent Overstreet 	return b;
867cafe5635SKent Overstreet }
868cafe5635SKent Overstreet 
8690a63b66dSKent Overstreet static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
8700a63b66dSKent Overstreet {
87134cf78bfSGuoju Fang 	spin_lock(&c->btree_cannibalize_lock);
87234cf78bfSGuoju Fang 	if (likely(c->btree_cache_alloc_lock == NULL)) {
87334cf78bfSGuoju Fang 		c->btree_cache_alloc_lock = current;
87434cf78bfSGuoju Fang 	} else if (c->btree_cache_alloc_lock != current) {
8750a63b66dSKent Overstreet 		if (op)
8760a63b66dSKent Overstreet 			prepare_to_wait(&c->btree_cache_wait, &op->wait,
8770a63b66dSKent Overstreet 					TASK_UNINTERRUPTIBLE);
87834cf78bfSGuoju Fang 		spin_unlock(&c->btree_cannibalize_lock);
8790a63b66dSKent Overstreet 		return -EINTR;
8800a63b66dSKent Overstreet 	}
88134cf78bfSGuoju Fang 	spin_unlock(&c->btree_cannibalize_lock);
8820a63b66dSKent Overstreet 
8830a63b66dSKent Overstreet 	return 0;
8840a63b66dSKent Overstreet }
8850a63b66dSKent Overstreet 
8860a63b66dSKent Overstreet static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
8870a63b66dSKent Overstreet 				     struct bkey *k)
888cafe5635SKent Overstreet {
889e8e1d468SKent Overstreet 	struct btree *b;
890cafe5635SKent Overstreet 
891c37511b8SKent Overstreet 	trace_bcache_btree_cache_cannibalize(c);
892c37511b8SKent Overstreet 
8930a63b66dSKent Overstreet 	if (mca_cannibalize_lock(c, op))
8940a63b66dSKent Overstreet 		return ERR_PTR(-EINTR);
895cafe5635SKent Overstreet 
896e8e1d468SKent Overstreet 	list_for_each_entry_reverse(b, &c->btree_cache, list)
897e8e1d468SKent Overstreet 		if (!mca_reap(b, btree_order(k), false))
898e8e1d468SKent Overstreet 			return b;
899cafe5635SKent Overstreet 
900e8e1d468SKent Overstreet 	list_for_each_entry_reverse(b, &c->btree_cache, list)
901e8e1d468SKent Overstreet 		if (!mca_reap(b, btree_order(k), true))
902e8e1d468SKent Overstreet 			return b;
903e8e1d468SKent Overstreet 
9040a63b66dSKent Overstreet 	WARN(1, "btree cache cannibalize failed\n");
905e8e1d468SKent Overstreet 	return ERR_PTR(-ENOMEM);
906cafe5635SKent Overstreet }
907cafe5635SKent Overstreet 
908cafe5635SKent Overstreet /*
909cafe5635SKent Overstreet  * We can only have one thread cannibalizing other cached btree nodes at a time,
910cafe5635SKent Overstreet  * or we'll deadlock. We use an open coded mutex to ensure that, which a
911cafe5635SKent Overstreet  * cannibalize_bucket() will take. This means every time we unlock the root of
912cafe5635SKent Overstreet  * the btree, we need to release this lock if we have it held.
913cafe5635SKent Overstreet  */
914f0854489SMingzhe Zou void bch_cannibalize_unlock(struct cache_set *c)
915cafe5635SKent Overstreet {
91634cf78bfSGuoju Fang 	spin_lock(&c->btree_cannibalize_lock);
9170a63b66dSKent Overstreet 	if (c->btree_cache_alloc_lock == current) {
9180a63b66dSKent Overstreet 		c->btree_cache_alloc_lock = NULL;
9190a63b66dSKent Overstreet 		wake_up(&c->btree_cache_wait);
920cafe5635SKent Overstreet 	}
92134cf78bfSGuoju Fang 	spin_unlock(&c->btree_cannibalize_lock);
922cafe5635SKent Overstreet }
923cafe5635SKent Overstreet 
9240a63b66dSKent Overstreet static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
9250a63b66dSKent Overstreet 			       struct bkey *k, int level)
926cafe5635SKent Overstreet {
927cafe5635SKent Overstreet 	struct btree *b;
928cafe5635SKent Overstreet 
929e8e1d468SKent Overstreet 	BUG_ON(current->bio_list);
930e8e1d468SKent Overstreet 
931cafe5635SKent Overstreet 	lockdep_assert_held(&c->bucket_lock);
932cafe5635SKent Overstreet 
933cafe5635SKent Overstreet 	if (mca_find(c, k))
934cafe5635SKent Overstreet 		return NULL;
935cafe5635SKent Overstreet 
936cafe5635SKent Overstreet 	/* btree_free() doesn't free memory; it sticks the node on the end of
937cafe5635SKent Overstreet 	 * the list. Check if there's any freed nodes there:
938cafe5635SKent Overstreet 	 */
939cafe5635SKent Overstreet 	list_for_each_entry(b, &c->btree_cache_freeable, list)
940e8e1d468SKent Overstreet 		if (!mca_reap(b, btree_order(k), false))
941cafe5635SKent Overstreet 			goto out;
942cafe5635SKent Overstreet 
943cafe5635SKent Overstreet 	/* We never free struct btree itself, just the memory that holds the on
944cafe5635SKent Overstreet 	 * disk node. Check the freed list before allocating a new one:
945cafe5635SKent Overstreet 	 */
946cafe5635SKent Overstreet 	list_for_each_entry(b, &c->btree_cache_freed, list)
947e8e1d468SKent Overstreet 		if (!mca_reap(b, 0, false)) {
948cafe5635SKent Overstreet 			mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
949a85e968eSKent Overstreet 			if (!b->keys.set[0].data)
950cafe5635SKent Overstreet 				goto err;
951cafe5635SKent Overstreet 			else
952cafe5635SKent Overstreet 				goto out;
953cafe5635SKent Overstreet 		}
954cafe5635SKent Overstreet 
955cafe5635SKent Overstreet 	b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO);
956cafe5635SKent Overstreet 	if (!b)
957cafe5635SKent Overstreet 		goto err;
958cafe5635SKent Overstreet 
959cafe5635SKent Overstreet 	BUG_ON(!down_write_trylock(&b->lock));
960a85e968eSKent Overstreet 	if (!b->keys.set->data)
961cafe5635SKent Overstreet 		goto err;
962cafe5635SKent Overstreet out:
963cb7a583eSKent Overstreet 	BUG_ON(b->io_mutex.count != 1);
964cafe5635SKent Overstreet 
965cafe5635SKent Overstreet 	bkey_copy(&b->key, k);
966cafe5635SKent Overstreet 	list_move(&b->list, &c->btree_cache);
967cafe5635SKent Overstreet 	hlist_del_init_rcu(&b->hash);
968cafe5635SKent Overstreet 	hlist_add_head_rcu(&b->hash, mca_hash(c, k));
969cafe5635SKent Overstreet 
970cafe5635SKent Overstreet 	lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
971d6fd3b11SKent Overstreet 	b->parent	= (void *) ~0UL;
972a85e968eSKent Overstreet 	b->flags	= 0;
973a85e968eSKent Overstreet 	b->written	= 0;
974a85e968eSKent Overstreet 	b->level	= level;
975cafe5635SKent Overstreet 
97665d45231SKent Overstreet 	if (!b->level)
977a85e968eSKent Overstreet 		bch_btree_keys_init(&b->keys, &bch_extent_keys_ops,
978a85e968eSKent Overstreet 				    &b->c->expensive_debug_checks);
97965d45231SKent Overstreet 	else
980a85e968eSKent Overstreet 		bch_btree_keys_init(&b->keys, &bch_btree_keys_ops,
981a85e968eSKent Overstreet 				    &b->c->expensive_debug_checks);
982cafe5635SKent Overstreet 
983cafe5635SKent Overstreet 	return b;
984cafe5635SKent Overstreet err:
985cafe5635SKent Overstreet 	if (b)
986cafe5635SKent Overstreet 		rw_unlock(true, b);
987cafe5635SKent Overstreet 
9880a63b66dSKent Overstreet 	b = mca_cannibalize(c, op, k);
989cafe5635SKent Overstreet 	if (!IS_ERR(b))
990cafe5635SKent Overstreet 		goto out;
991cafe5635SKent Overstreet 
992cafe5635SKent Overstreet 	return b;
993cafe5635SKent Overstreet }
994cafe5635SKent Overstreet 
99547344e33SBart Van Assche /*
996cafe5635SKent Overstreet  * bch_btree_node_get - find a btree node in the cache and lock it, reading it
997cafe5635SKent Overstreet  * in from disk if necessary.
998cafe5635SKent Overstreet  *
999ed00aabdSChristoph Hellwig  * If IO is necessary and running under submit_bio_noacct, returns -EAGAIN.
1000cafe5635SKent Overstreet  *
1001cafe5635SKent Overstreet  * The btree node will have either a read or a write lock held, depending on
1002cafe5635SKent Overstreet  * level and op->lock.
1003cafe5635SKent Overstreet  */
10040a63b66dSKent Overstreet struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
10052452cc89SSlava Pestov 				 struct bkey *k, int level, bool write,
10062452cc89SSlava Pestov 				 struct btree *parent)
1007cafe5635SKent Overstreet {
1008cafe5635SKent Overstreet 	int i = 0;
1009cafe5635SKent Overstreet 	struct btree *b;
1010cafe5635SKent Overstreet 
1011cafe5635SKent Overstreet 	BUG_ON(level < 0);
1012cafe5635SKent Overstreet retry:
1013cafe5635SKent Overstreet 	b = mca_find(c, k);
1014cafe5635SKent Overstreet 
1015cafe5635SKent Overstreet 	if (!b) {
101657943511SKent Overstreet 		if (current->bio_list)
101757943511SKent Overstreet 			return ERR_PTR(-EAGAIN);
101857943511SKent Overstreet 
1019cafe5635SKent Overstreet 		mutex_lock(&c->bucket_lock);
10200a63b66dSKent Overstreet 		b = mca_alloc(c, op, k, level);
1021cafe5635SKent Overstreet 		mutex_unlock(&c->bucket_lock);
1022cafe5635SKent Overstreet 
1023cafe5635SKent Overstreet 		if (!b)
1024cafe5635SKent Overstreet 			goto retry;
1025cafe5635SKent Overstreet 		if (IS_ERR(b))
1026cafe5635SKent Overstreet 			return b;
1027cafe5635SKent Overstreet 
102857943511SKent Overstreet 		bch_btree_node_read(b);
1029cafe5635SKent Overstreet 
1030cafe5635SKent Overstreet 		if (!write)
1031cafe5635SKent Overstreet 			downgrade_write(&b->lock);
1032cafe5635SKent Overstreet 	} else {
1033cafe5635SKent Overstreet 		rw_lock(write, b, level);
1034cafe5635SKent Overstreet 		if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) {
1035cafe5635SKent Overstreet 			rw_unlock(write, b);
1036cafe5635SKent Overstreet 			goto retry;
1037cafe5635SKent Overstreet 		}
1038cafe5635SKent Overstreet 		BUG_ON(b->level != level);
1039cafe5635SKent Overstreet 	}
1040cafe5635SKent Overstreet 
1041c2e8dcf7SColy Li 	if (btree_node_io_error(b)) {
1042c2e8dcf7SColy Li 		rw_unlock(write, b);
1043c2e8dcf7SColy Li 		return ERR_PTR(-EIO);
1044c2e8dcf7SColy Li 	}
1045c2e8dcf7SColy Li 
1046c2e8dcf7SColy Li 	BUG_ON(!b->written);
1047c2e8dcf7SColy Li 
10482452cc89SSlava Pestov 	b->parent = parent;
1049cafe5635SKent Overstreet 
1050a85e968eSKent Overstreet 	for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
1051a85e968eSKent Overstreet 		prefetch(b->keys.set[i].tree);
1052a85e968eSKent Overstreet 		prefetch(b->keys.set[i].data);
1053cafe5635SKent Overstreet 	}
1054cafe5635SKent Overstreet 
1055a85e968eSKent Overstreet 	for (; i <= b->keys.nsets; i++)
1056a85e968eSKent Overstreet 		prefetch(b->keys.set[i].data);
1057cafe5635SKent Overstreet 
1058cafe5635SKent Overstreet 	return b;
1059cafe5635SKent Overstreet }
1060cafe5635SKent Overstreet 
10612452cc89SSlava Pestov static void btree_node_prefetch(struct btree *parent, struct bkey *k)
1062cafe5635SKent Overstreet {
1063cafe5635SKent Overstreet 	struct btree *b;
1064cafe5635SKent Overstreet 
10652452cc89SSlava Pestov 	mutex_lock(&parent->c->bucket_lock);
10662452cc89SSlava Pestov 	b = mca_alloc(parent->c, NULL, k, parent->level - 1);
10672452cc89SSlava Pestov 	mutex_unlock(&parent->c->bucket_lock);
1068cafe5635SKent Overstreet 
1069cafe5635SKent Overstreet 	if (!IS_ERR_OR_NULL(b)) {
10702452cc89SSlava Pestov 		b->parent = parent;
107157943511SKent Overstreet 		bch_btree_node_read(b);
1072cafe5635SKent Overstreet 		rw_unlock(true, b);
1073cafe5635SKent Overstreet 	}
1074cafe5635SKent Overstreet }
1075cafe5635SKent Overstreet 
1076cafe5635SKent Overstreet /* Btree alloc */
1077cafe5635SKent Overstreet 
1078e8e1d468SKent Overstreet static void btree_node_free(struct btree *b)
1079cafe5635SKent Overstreet {
1080c37511b8SKent Overstreet 	trace_bcache_btree_node_free(b);
1081c37511b8SKent Overstreet 
1082cafe5635SKent Overstreet 	BUG_ON(b == b->c->root);
1083cafe5635SKent Overstreet 
108450a260e8SColy Li retry:
10852a285686SKent Overstreet 	mutex_lock(&b->write_lock);
108650a260e8SColy Li 	/*
108750a260e8SColy Li 	 * If the btree node is selected and flushing in btree_flush_write(),
108850a260e8SColy Li 	 * delay and retry until the BTREE_NODE_journal_flush bit cleared,
108950a260e8SColy Li 	 * then it is safe to free the btree node here. Otherwise this btree
109050a260e8SColy Li 	 * node will be in race condition.
109150a260e8SColy Li 	 */
109250a260e8SColy Li 	if (btree_node_journal_flush(b)) {
109350a260e8SColy Li 		mutex_unlock(&b->write_lock);
109446f5aa88SJoe Perches 		pr_debug("bnode %p journal_flush set, retry\n", b);
109550a260e8SColy Li 		udelay(1);
109650a260e8SColy Li 		goto retry;
109750a260e8SColy Li 	}
10982a285686SKent Overstreet 
1099e5ec5f47SColy Li 	if (btree_node_dirty(b)) {
1100cafe5635SKent Overstreet 		btree_complete_write(b, btree_current_write(b));
1101cafe5635SKent Overstreet 		clear_bit(BTREE_NODE_dirty, &b->flags);
1102e5ec5f47SColy Li 	}
1103cafe5635SKent Overstreet 
11042a285686SKent Overstreet 	mutex_unlock(&b->write_lock);
11052a285686SKent Overstreet 
1106cafe5635SKent Overstreet 	cancel_delayed_work(&b->work);
1107cafe5635SKent Overstreet 
1108cafe5635SKent Overstreet 	mutex_lock(&b->c->bucket_lock);
1109cafe5635SKent Overstreet 	bch_bucket_free(b->c, &b->key);
1110cafe5635SKent Overstreet 	mca_bucket_free(b);
1111cafe5635SKent Overstreet 	mutex_unlock(&b->c->bucket_lock);
1112cafe5635SKent Overstreet }
1113cafe5635SKent Overstreet 
1114c5aa4a31SSlava Pestov struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
11152452cc89SSlava Pestov 				     int level, bool wait,
11162452cc89SSlava Pestov 				     struct btree *parent)
1117cafe5635SKent Overstreet {
1118cafe5635SKent Overstreet 	BKEY_PADDED(key) k;
111980fca8a1SZheng Wang 	struct btree *b;
1120cafe5635SKent Overstreet 
1121cafe5635SKent Overstreet 	mutex_lock(&c->bucket_lock);
1122cafe5635SKent Overstreet retry:
112380fca8a1SZheng Wang 	/* return ERR_PTR(-EAGAIN) when it fails */
112480fca8a1SZheng Wang 	b = ERR_PTR(-EAGAIN);
112517e4aed8SColy Li 	if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, wait))
1126cafe5635SKent Overstreet 		goto err;
1127cafe5635SKent Overstreet 
11283a3b6a4eSKent Overstreet 	bkey_put(c, &k.key);
1129cafe5635SKent Overstreet 	SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
1130cafe5635SKent Overstreet 
11310a63b66dSKent Overstreet 	b = mca_alloc(c, op, &k.key, level);
1132cafe5635SKent Overstreet 	if (IS_ERR(b))
1133cafe5635SKent Overstreet 		goto err_free;
1134cafe5635SKent Overstreet 
1135cafe5635SKent Overstreet 	if (!b) {
1136b1a67b0fSKent Overstreet 		cache_bug(c,
1137b1a67b0fSKent Overstreet 			"Tried to allocate bucket that was in btree cache");
1138cafe5635SKent Overstreet 		goto retry;
1139cafe5635SKent Overstreet 	}
1140cafe5635SKent Overstreet 
11412452cc89SSlava Pestov 	b->parent = parent;
11424a784266SColy Li 	bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->cache->sb));
1143cafe5635SKent Overstreet 
1144cafe5635SKent Overstreet 	mutex_unlock(&c->bucket_lock);
1145c37511b8SKent Overstreet 
1146c37511b8SKent Overstreet 	trace_bcache_btree_node_alloc(b);
1147cafe5635SKent Overstreet 	return b;
1148cafe5635SKent Overstreet err_free:
1149cafe5635SKent Overstreet 	bch_bucket_free(c, &k.key);
1150cafe5635SKent Overstreet err:
1151cafe5635SKent Overstreet 	mutex_unlock(&c->bucket_lock);
1152c37511b8SKent Overstreet 
1153913dc33fSSlava Pestov 	trace_bcache_btree_node_alloc_fail(c);
1154cafe5635SKent Overstreet 	return b;
1155cafe5635SKent Overstreet }
1156cafe5635SKent Overstreet 
1157c5aa4a31SSlava Pestov static struct btree *bch_btree_node_alloc(struct cache_set *c,
11582452cc89SSlava Pestov 					  struct btree_op *op, int level,
11592452cc89SSlava Pestov 					  struct btree *parent)
1160c5aa4a31SSlava Pestov {
11612452cc89SSlava Pestov 	return __bch_btree_node_alloc(c, op, level, op != NULL, parent);
1162c5aa4a31SSlava Pestov }
1163c5aa4a31SSlava Pestov 
11640a63b66dSKent Overstreet static struct btree *btree_node_alloc_replacement(struct btree *b,
11650a63b66dSKent Overstreet 						  struct btree_op *op)
1166cafe5635SKent Overstreet {
11672452cc89SSlava Pestov 	struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
11681fae7cf0SColy Li 
1169028ddcacSZheng Wang 	if (!IS_ERR(n)) {
11702a285686SKent Overstreet 		mutex_lock(&n->write_lock);
117189ebb4a2SKent Overstreet 		bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
117267539e85SKent Overstreet 		bkey_copy_key(&n->key, &b->key);
11732a285686SKent Overstreet 		mutex_unlock(&n->write_lock);
117467539e85SKent Overstreet 	}
1175cafe5635SKent Overstreet 
1176cafe5635SKent Overstreet 	return n;
1177cafe5635SKent Overstreet }
1178cafe5635SKent Overstreet 
11798835c123SKent Overstreet static void make_btree_freeing_key(struct btree *b, struct bkey *k)
11808835c123SKent Overstreet {
11816f10f7d1SColy Li 	unsigned int i;
11828835c123SKent Overstreet 
118305335cffSKent Overstreet 	mutex_lock(&b->c->bucket_lock);
118405335cffSKent Overstreet 
118505335cffSKent Overstreet 	atomic_inc(&b->c->prio_blocked);
118605335cffSKent Overstreet 
11878835c123SKent Overstreet 	bkey_copy(k, &b->key);
11888835c123SKent Overstreet 	bkey_copy_key(k, &ZERO_KEY);
11898835c123SKent Overstreet 
119005335cffSKent Overstreet 	for (i = 0; i < KEY_PTRS(k); i++)
119105335cffSKent Overstreet 		SET_PTR_GEN(k, i,
119211e9560eSChristoph Hellwig 			    bch_inc_gen(b->c->cache,
119305335cffSKent Overstreet 					PTR_BUCKET(b->c, &b->key, i)));
11948835c123SKent Overstreet 
119505335cffSKent Overstreet 	mutex_unlock(&b->c->bucket_lock);
11968835c123SKent Overstreet }
11978835c123SKent Overstreet 
119878365411SKent Overstreet static int btree_check_reserve(struct btree *b, struct btree_op *op)
119978365411SKent Overstreet {
120078365411SKent Overstreet 	struct cache_set *c = b->c;
120108fdb2cdSColy Li 	struct cache *ca = c->cache;
120208fdb2cdSColy Li 	unsigned int reserve = (c->root->level - b->level) * 2 + 1;
120378365411SKent Overstreet 
120478365411SKent Overstreet 	mutex_lock(&c->bucket_lock);
120578365411SKent Overstreet 
120678365411SKent Overstreet 	if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
120778365411SKent Overstreet 		if (op)
12080a63b66dSKent Overstreet 			prepare_to_wait(&c->btree_cache_wait, &op->wait,
120978365411SKent Overstreet 					TASK_UNINTERRUPTIBLE);
12100a63b66dSKent Overstreet 		mutex_unlock(&c->bucket_lock);
12110a63b66dSKent Overstreet 		return -EINTR;
121278365411SKent Overstreet 	}
121378365411SKent Overstreet 
121478365411SKent Overstreet 	mutex_unlock(&c->bucket_lock);
12150a63b66dSKent Overstreet 
12160a63b66dSKent Overstreet 	return mca_cannibalize_lock(b->c, op);
121778365411SKent Overstreet }
121878365411SKent Overstreet 
1219cafe5635SKent Overstreet /* Garbage collection */
1220cafe5635SKent Overstreet 
1221487dded8SKent Overstreet static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
1222487dded8SKent Overstreet 				    struct bkey *k)
1223cafe5635SKent Overstreet {
1224cafe5635SKent Overstreet 	uint8_t stale = 0;
12256f10f7d1SColy Li 	unsigned int i;
1226cafe5635SKent Overstreet 	struct bucket *g;
1227cafe5635SKent Overstreet 
1228cafe5635SKent Overstreet 	/*
1229cafe5635SKent Overstreet 	 * ptr_invalid() can't return true for the keys that mark btree nodes as
1230cafe5635SKent Overstreet 	 * freed, but since ptr_bad() returns true we'll never actually use them
1231cafe5635SKent Overstreet 	 * for anything and thus we don't want mark their pointers here
1232cafe5635SKent Overstreet 	 */
1233cafe5635SKent Overstreet 	if (!bkey_cmp(k, &ZERO_KEY))
1234cafe5635SKent Overstreet 		return stale;
1235cafe5635SKent Overstreet 
1236cafe5635SKent Overstreet 	for (i = 0; i < KEY_PTRS(k); i++) {
1237cafe5635SKent Overstreet 		if (!ptr_available(c, k, i))
1238cafe5635SKent Overstreet 			continue;
1239cafe5635SKent Overstreet 
1240cafe5635SKent Overstreet 		g = PTR_BUCKET(c, k, i);
1241cafe5635SKent Overstreet 
12423a2fd9d5SKent Overstreet 		if (gen_after(g->last_gc, PTR_GEN(k, i)))
12433a2fd9d5SKent Overstreet 			g->last_gc = PTR_GEN(k, i);
1244cafe5635SKent Overstreet 
1245cafe5635SKent Overstreet 		if (ptr_stale(c, k, i)) {
1246cafe5635SKent Overstreet 			stale = max(stale, ptr_stale(c, k, i));
1247cafe5635SKent Overstreet 			continue;
1248cafe5635SKent Overstreet 		}
1249cafe5635SKent Overstreet 
1250cafe5635SKent Overstreet 		cache_bug_on(GC_MARK(g) &&
1251cafe5635SKent Overstreet 			     (GC_MARK(g) == GC_MARK_METADATA) != (level != 0),
1252cafe5635SKent Overstreet 			     c, "inconsistent ptrs: mark = %llu, level = %i",
1253cafe5635SKent Overstreet 			     GC_MARK(g), level);
1254cafe5635SKent Overstreet 
1255cafe5635SKent Overstreet 		if (level)
1256cafe5635SKent Overstreet 			SET_GC_MARK(g, GC_MARK_METADATA);
1257cafe5635SKent Overstreet 		else if (KEY_DIRTY(k))
1258cafe5635SKent Overstreet 			SET_GC_MARK(g, GC_MARK_DIRTY);
12594fe6a816SKent Overstreet 		else if (!GC_MARK(g))
12604fe6a816SKent Overstreet 			SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
1261cafe5635SKent Overstreet 
1262cafe5635SKent Overstreet 		/* guard against overflow */
12636f10f7d1SColy Li 		SET_GC_SECTORS_USED(g, min_t(unsigned int,
1264cafe5635SKent Overstreet 					     GC_SECTORS_USED(g) + KEY_SIZE(k),
126594717447SDarrick J. Wong 					     MAX_GC_SECTORS_USED));
1266cafe5635SKent Overstreet 
1267cafe5635SKent Overstreet 		BUG_ON(!GC_SECTORS_USED(g));
1268cafe5635SKent Overstreet 	}
1269cafe5635SKent Overstreet 
1270cafe5635SKent Overstreet 	return stale;
1271cafe5635SKent Overstreet }
1272cafe5635SKent Overstreet 
1273cafe5635SKent Overstreet #define btree_mark_key(b, k)	__bch_btree_mark_key(b->c, b->level, k)
1274cafe5635SKent Overstreet 
1275487dded8SKent Overstreet void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
1276487dded8SKent Overstreet {
12776f10f7d1SColy Li 	unsigned int i;
1278487dded8SKent Overstreet 
1279487dded8SKent Overstreet 	for (i = 0; i < KEY_PTRS(k); i++)
1280487dded8SKent Overstreet 		if (ptr_available(c, k, i) &&
1281487dded8SKent Overstreet 		    !ptr_stale(c, k, i)) {
1282487dded8SKent Overstreet 			struct bucket *b = PTR_BUCKET(c, k, i);
1283487dded8SKent Overstreet 
1284487dded8SKent Overstreet 			b->gen = PTR_GEN(k, i);
1285487dded8SKent Overstreet 
1286487dded8SKent Overstreet 			if (level && bkey_cmp(k, &ZERO_KEY))
1287487dded8SKent Overstreet 				b->prio = BTREE_PRIO;
1288487dded8SKent Overstreet 			else if (!level && b->prio == BTREE_PRIO)
1289487dded8SKent Overstreet 				b->prio = INITIAL_PRIO;
1290487dded8SKent Overstreet 		}
1291487dded8SKent Overstreet 
1292487dded8SKent Overstreet 	__bch_btree_mark_key(c, level, k);
1293487dded8SKent Overstreet }
1294487dded8SKent Overstreet 
1295d44c2f9eSTang Junhui void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats)
1296d44c2f9eSTang Junhui {
1297d44c2f9eSTang Junhui 	stats->in_use = (c->nbuckets - c->avail_nbuckets) * 100 / c->nbuckets;
1298d44c2f9eSTang Junhui }
1299d44c2f9eSTang Junhui 
1300a1f0358bSKent Overstreet static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
1301cafe5635SKent Overstreet {
1302cafe5635SKent Overstreet 	uint8_t stale = 0;
13036f10f7d1SColy Li 	unsigned int keys = 0, good_keys = 0;
1304cafe5635SKent Overstreet 	struct bkey *k;
1305cafe5635SKent Overstreet 	struct btree_iter iter;
1306cafe5635SKent Overstreet 	struct bset_tree *t;
1307cafe5635SKent Overstreet 
1308cafe5635SKent Overstreet 	gc->nodes++;
1309cafe5635SKent Overstreet 
1310c052dd9aSKent Overstreet 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
1311cafe5635SKent Overstreet 		stale = max(stale, btree_mark_key(b, k));
1312a1f0358bSKent Overstreet 		keys++;
1313cafe5635SKent Overstreet 
1314a85e968eSKent Overstreet 		if (bch_ptr_bad(&b->keys, k))
1315cafe5635SKent Overstreet 			continue;
1316cafe5635SKent Overstreet 
1317cafe5635SKent Overstreet 		gc->key_bytes += bkey_u64s(k);
1318cafe5635SKent Overstreet 		gc->nkeys++;
1319a1f0358bSKent Overstreet 		good_keys++;
1320cafe5635SKent Overstreet 
1321cafe5635SKent Overstreet 		gc->data += KEY_SIZE(k);
1322cafe5635SKent Overstreet 	}
1323cafe5635SKent Overstreet 
1324a85e968eSKent Overstreet 	for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++)
1325cafe5635SKent Overstreet 		btree_bug_on(t->size &&
1326a85e968eSKent Overstreet 			     bset_written(&b->keys, t) &&
1327cafe5635SKent Overstreet 			     bkey_cmp(&b->key, &t->end) < 0,
1328cafe5635SKent Overstreet 			     b, "found short btree key in gc");
1329cafe5635SKent Overstreet 
1330a1f0358bSKent Overstreet 	if (b->c->gc_always_rewrite)
1331a1f0358bSKent Overstreet 		return true;
1332a1f0358bSKent Overstreet 
1333a1f0358bSKent Overstreet 	if (stale > 10)
1334a1f0358bSKent Overstreet 		return true;
1335a1f0358bSKent Overstreet 
1336a1f0358bSKent Overstreet 	if ((keys - good_keys) * 2 > keys)
1337a1f0358bSKent Overstreet 		return true;
1338a1f0358bSKent Overstreet 
1339a1f0358bSKent Overstreet 	return false;
1340cafe5635SKent Overstreet }
1341cafe5635SKent Overstreet 
1342a1f0358bSKent Overstreet #define GC_MERGE_NODES	4U
1343cafe5635SKent Overstreet 
1344cafe5635SKent Overstreet struct gc_merge_info {
1345cafe5635SKent Overstreet 	struct btree	*b;
13466f10f7d1SColy Li 	unsigned int	keys;
1347cafe5635SKent Overstreet };
1348cafe5635SKent Overstreet 
1349fc2d5988SColy Li static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
1350fc2d5988SColy Li 				 struct keylist *insert_keys,
1351fc2d5988SColy Li 				 atomic_t *journal_ref,
1352fc2d5988SColy Li 				 struct bkey *replace_key);
1353a1f0358bSKent Overstreet 
1354a1f0358bSKent Overstreet static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
13550a63b66dSKent Overstreet 			     struct gc_stat *gc, struct gc_merge_info *r)
1356cafe5635SKent Overstreet {
13576f10f7d1SColy Li 	unsigned int i, nodes = 0, keys = 0, blocks;
1358a1f0358bSKent Overstreet 	struct btree *new_nodes[GC_MERGE_NODES];
13590a63b66dSKent Overstreet 	struct keylist keylist;
1360b54d6934SKent Overstreet 	struct closure cl;
1361a1f0358bSKent Overstreet 	struct bkey *k;
1362b54d6934SKent Overstreet 
13630a63b66dSKent Overstreet 	bch_keylist_init(&keylist);
13640a63b66dSKent Overstreet 
13650a63b66dSKent Overstreet 	if (btree_check_reserve(b, NULL))
13660a63b66dSKent Overstreet 		return 0;
13670a63b66dSKent Overstreet 
1368a1f0358bSKent Overstreet 	memset(new_nodes, 0, sizeof(new_nodes));
1369b54d6934SKent Overstreet 	closure_init_stack(&cl);
1370cafe5635SKent Overstreet 
1371028ddcacSZheng Wang 	while (nodes < GC_MERGE_NODES && !IS_ERR(r[nodes].b))
1372cafe5635SKent Overstreet 		keys += r[nodes++].keys;
1373cafe5635SKent Overstreet 
1374cafe5635SKent Overstreet 	blocks = btree_default_blocks(b->c) * 2 / 3;
1375cafe5635SKent Overstreet 
1376cafe5635SKent Overstreet 	if (nodes < 2 ||
1377a85e968eSKent Overstreet 	    __set_blocks(b->keys.set[0].data, keys,
13784e1ebae3SColy Li 			 block_bytes(b->c->cache)) > blocks * (nodes - 1))
1379a1f0358bSKent Overstreet 		return 0;
1380cafe5635SKent Overstreet 
1381a1f0358bSKent Overstreet 	for (i = 0; i < nodes; i++) {
13820a63b66dSKent Overstreet 		new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL);
1383028ddcacSZheng Wang 		if (IS_ERR(new_nodes[i]))
1384a1f0358bSKent Overstreet 			goto out_nocoalesce;
1385cafe5635SKent Overstreet 	}
1386cafe5635SKent Overstreet 
13870a63b66dSKent Overstreet 	/*
13880a63b66dSKent Overstreet 	 * We have to check the reserve here, after we've allocated our new
13890a63b66dSKent Overstreet 	 * nodes, to make sure the insert below will succeed - we also check
13900a63b66dSKent Overstreet 	 * before as an optimization to potentially avoid a bunch of expensive
13910a63b66dSKent Overstreet 	 * allocs/sorts
13920a63b66dSKent Overstreet 	 */
13930a63b66dSKent Overstreet 	if (btree_check_reserve(b, NULL))
13940a63b66dSKent Overstreet 		goto out_nocoalesce;
13950a63b66dSKent Overstreet 
13962a285686SKent Overstreet 	for (i = 0; i < nodes; i++)
13972a285686SKent Overstreet 		mutex_lock(&new_nodes[i]->write_lock);
13982a285686SKent Overstreet 
1399cafe5635SKent Overstreet 	for (i = nodes - 1; i > 0; --i) {
1400ee811287SKent Overstreet 		struct bset *n1 = btree_bset_first(new_nodes[i]);
1401ee811287SKent Overstreet 		struct bset *n2 = btree_bset_first(new_nodes[i - 1]);
1402cafe5635SKent Overstreet 		struct bkey *k, *last = NULL;
1403cafe5635SKent Overstreet 
1404cafe5635SKent Overstreet 		keys = 0;
1405cafe5635SKent Overstreet 
1406a1f0358bSKent Overstreet 		if (i > 1) {
1407cafe5635SKent Overstreet 			for (k = n2->start;
1408fafff81cSKent Overstreet 			     k < bset_bkey_last(n2);
1409cafe5635SKent Overstreet 			     k = bkey_next(k)) {
1410cafe5635SKent Overstreet 				if (__set_blocks(n1, n1->keys + keys +
1411ee811287SKent Overstreet 						 bkey_u64s(k),
14124e1ebae3SColy Li 						 block_bytes(b->c->cache)) > blocks)
1413cafe5635SKent Overstreet 					break;
1414cafe5635SKent Overstreet 
1415cafe5635SKent Overstreet 				last = k;
1416cafe5635SKent Overstreet 				keys += bkey_u64s(k);
1417cafe5635SKent Overstreet 			}
1418a1f0358bSKent Overstreet 		} else {
1419a1f0358bSKent Overstreet 			/*
1420a1f0358bSKent Overstreet 			 * Last node we're not getting rid of - we're getting
1421a1f0358bSKent Overstreet 			 * rid of the node at r[0]. Have to try and fit all of
1422a1f0358bSKent Overstreet 			 * the remaining keys into this node; we can't ensure
1423a1f0358bSKent Overstreet 			 * they will always fit due to rounding and variable
1424a1f0358bSKent Overstreet 			 * length keys (shouldn't be possible in practice,
1425a1f0358bSKent Overstreet 			 * though)
1426a1f0358bSKent Overstreet 			 */
1427a1f0358bSKent Overstreet 			if (__set_blocks(n1, n1->keys + n2->keys,
14284e1ebae3SColy Li 					 block_bytes(b->c->cache)) >
1429ee811287SKent Overstreet 			    btree_blocks(new_nodes[i]))
1430be23e837SZhiqiang Liu 				goto out_unlock_nocoalesce;
1431a1f0358bSKent Overstreet 
1432a1f0358bSKent Overstreet 			keys = n2->keys;
1433a1f0358bSKent Overstreet 			/* Take the key of the node we're getting rid of */
1434a1f0358bSKent Overstreet 			last = &r->b->key;
1435a1f0358bSKent Overstreet 		}
1436cafe5635SKent Overstreet 
14374e1ebae3SColy Li 		BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c->cache)) >
1438ee811287SKent Overstreet 		       btree_blocks(new_nodes[i]));
1439cafe5635SKent Overstreet 
1440a1f0358bSKent Overstreet 		if (last)
1441a1f0358bSKent Overstreet 			bkey_copy_key(&new_nodes[i]->key, last);
1442cafe5635SKent Overstreet 
1443fafff81cSKent Overstreet 		memcpy(bset_bkey_last(n1),
1444cafe5635SKent Overstreet 		       n2->start,
1445fafff81cSKent Overstreet 		       (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
1446cafe5635SKent Overstreet 
1447cafe5635SKent Overstreet 		n1->keys += keys;
1448a1f0358bSKent Overstreet 		r[i].keys = n1->keys;
1449cafe5635SKent Overstreet 
1450cafe5635SKent Overstreet 		memmove(n2->start,
1451fafff81cSKent Overstreet 			bset_bkey_idx(n2, keys),
1452fafff81cSKent Overstreet 			(void *) bset_bkey_last(n2) -
1453fafff81cSKent Overstreet 			(void *) bset_bkey_idx(n2, keys));
1454cafe5635SKent Overstreet 
1455cafe5635SKent Overstreet 		n2->keys -= keys;
1456cafe5635SKent Overstreet 
14570a63b66dSKent Overstreet 		if (__bch_keylist_realloc(&keylist,
1458085d2a3dSKent Overstreet 					  bkey_u64s(&new_nodes[i]->key)))
1459be23e837SZhiqiang Liu 			goto out_unlock_nocoalesce;
1460a1f0358bSKent Overstreet 
1461a1f0358bSKent Overstreet 		bch_btree_node_write(new_nodes[i], &cl);
14620a63b66dSKent Overstreet 		bch_keylist_add(&keylist, &new_nodes[i]->key);
1463cafe5635SKent Overstreet 	}
1464cafe5635SKent Overstreet 
14652a285686SKent Overstreet 	for (i = 0; i < nodes; i++)
14662a285686SKent Overstreet 		mutex_unlock(&new_nodes[i]->write_lock);
14672a285686SKent Overstreet 
146805335cffSKent Overstreet 	closure_sync(&cl);
146905335cffSKent Overstreet 
147005335cffSKent Overstreet 	/* We emptied out this node */
147105335cffSKent Overstreet 	BUG_ON(btree_bset_first(new_nodes[0])->keys);
147205335cffSKent Overstreet 	btree_node_free(new_nodes[0]);
147305335cffSKent Overstreet 	rw_unlock(true, new_nodes[0]);
1474400ffaa2SSlava Pestov 	new_nodes[0] = NULL;
147505335cffSKent Overstreet 
1476a1f0358bSKent Overstreet 	for (i = 0; i < nodes; i++) {
14770a63b66dSKent Overstreet 		if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key)))
1478a1f0358bSKent Overstreet 			goto out_nocoalesce;
1479a1f0358bSKent Overstreet 
14800a63b66dSKent Overstreet 		make_btree_freeing_key(r[i].b, keylist.top);
14810a63b66dSKent Overstreet 		bch_keylist_push(&keylist);
1482a1f0358bSKent Overstreet 	}
1483a1f0358bSKent Overstreet 
14840a63b66dSKent Overstreet 	bch_btree_insert_node(b, op, &keylist, NULL, NULL);
14850a63b66dSKent Overstreet 	BUG_ON(!bch_keylist_empty(&keylist));
1486a1f0358bSKent Overstreet 
1487a1f0358bSKent Overstreet 	for (i = 0; i < nodes; i++) {
1488a1f0358bSKent Overstreet 		btree_node_free(r[i].b);
1489a1f0358bSKent Overstreet 		rw_unlock(true, r[i].b);
1490a1f0358bSKent Overstreet 
1491a1f0358bSKent Overstreet 		r[i].b = new_nodes[i];
1492a1f0358bSKent Overstreet 	}
1493a1f0358bSKent Overstreet 
1494a1f0358bSKent Overstreet 	memmove(r, r + 1, sizeof(r[0]) * (nodes - 1));
1495a1f0358bSKent Overstreet 	r[nodes - 1].b = ERR_PTR(-EINTR);
1496cafe5635SKent Overstreet 
1497c37511b8SKent Overstreet 	trace_bcache_btree_gc_coalesce(nodes);
1498cafe5635SKent Overstreet 	gc->nodes--;
1499cafe5635SKent Overstreet 
15000a63b66dSKent Overstreet 	bch_keylist_free(&keylist);
15010a63b66dSKent Overstreet 
1502a1f0358bSKent Overstreet 	/* Invalidated our iterator */
1503a1f0358bSKent Overstreet 	return -EINTR;
1504a1f0358bSKent Overstreet 
1505be23e837SZhiqiang Liu out_unlock_nocoalesce:
1506be23e837SZhiqiang Liu 	for (i = 0; i < nodes; i++)
1507be23e837SZhiqiang Liu 		mutex_unlock(&new_nodes[i]->write_lock);
1508be23e837SZhiqiang Liu 
1509a1f0358bSKent Overstreet out_nocoalesce:
1510a1f0358bSKent Overstreet 	closure_sync(&cl);
1511a1f0358bSKent Overstreet 
15120a63b66dSKent Overstreet 	while ((k = bch_keylist_pop(&keylist)))
1513a1f0358bSKent Overstreet 		if (!bkey_cmp(k, &ZERO_KEY))
1514a1f0358bSKent Overstreet 			atomic_dec(&b->c->prio_blocked);
1515f16277caSShenghui Wang 	bch_keylist_free(&keylist);
1516a1f0358bSKent Overstreet 
1517a1f0358bSKent Overstreet 	for (i = 0; i < nodes; i++)
1518028ddcacSZheng Wang 		if (!IS_ERR(new_nodes[i])) {
1519a1f0358bSKent Overstreet 			btree_node_free(new_nodes[i]);
1520a1f0358bSKent Overstreet 			rw_unlock(true, new_nodes[i]);
1521a1f0358bSKent Overstreet 		}
1522a1f0358bSKent Overstreet 	return 0;
1523a1f0358bSKent Overstreet }
1524a1f0358bSKent Overstreet 
15250a63b66dSKent Overstreet static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
15260a63b66dSKent Overstreet 				 struct btree *replace)
15270a63b66dSKent Overstreet {
15280a63b66dSKent Overstreet 	struct keylist keys;
15290a63b66dSKent Overstreet 	struct btree *n;
15300a63b66dSKent Overstreet 
15310a63b66dSKent Overstreet 	if (btree_check_reserve(b, NULL))
15320a63b66dSKent Overstreet 		return 0;
15330a63b66dSKent Overstreet 
15340a63b66dSKent Overstreet 	n = btree_node_alloc_replacement(replace, NULL);
15350a63b66dSKent Overstreet 
15360a63b66dSKent Overstreet 	/* recheck reserve after allocating replacement node */
15370a63b66dSKent Overstreet 	if (btree_check_reserve(b, NULL)) {
15380a63b66dSKent Overstreet 		btree_node_free(n);
15390a63b66dSKent Overstreet 		rw_unlock(true, n);
15400a63b66dSKent Overstreet 		return 0;
15410a63b66dSKent Overstreet 	}
15420a63b66dSKent Overstreet 
15430a63b66dSKent Overstreet 	bch_btree_node_write_sync(n);
15440a63b66dSKent Overstreet 
15450a63b66dSKent Overstreet 	bch_keylist_init(&keys);
15460a63b66dSKent Overstreet 	bch_keylist_add(&keys, &n->key);
15470a63b66dSKent Overstreet 
15480a63b66dSKent Overstreet 	make_btree_freeing_key(replace, keys.top);
15490a63b66dSKent Overstreet 	bch_keylist_push(&keys);
15500a63b66dSKent Overstreet 
15510a63b66dSKent Overstreet 	bch_btree_insert_node(b, op, &keys, NULL, NULL);
15520a63b66dSKent Overstreet 	BUG_ON(!bch_keylist_empty(&keys));
15530a63b66dSKent Overstreet 
15540a63b66dSKent Overstreet 	btree_node_free(replace);
15550a63b66dSKent Overstreet 	rw_unlock(true, n);
15560a63b66dSKent Overstreet 
15570a63b66dSKent Overstreet 	/* Invalidated our iterator */
15580a63b66dSKent Overstreet 	return -EINTR;
15590a63b66dSKent Overstreet }
15600a63b66dSKent Overstreet 
15616f10f7d1SColy Li static unsigned int btree_gc_count_keys(struct btree *b)
1562a1f0358bSKent Overstreet {
1563a1f0358bSKent Overstreet 	struct bkey *k;
1564a1f0358bSKent Overstreet 	struct btree_iter iter;
15656f10f7d1SColy Li 	unsigned int ret = 0;
1566a1f0358bSKent Overstreet 
1567c052dd9aSKent Overstreet 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
1568a1f0358bSKent Overstreet 		ret += bkey_u64s(k);
1569a1f0358bSKent Overstreet 
1570a1f0358bSKent Overstreet 	return ret;
1571cafe5635SKent Overstreet }
1572cafe5635SKent Overstreet 
15737f4a59deSTang Junhui static size_t btree_gc_min_nodes(struct cache_set *c)
15747f4a59deSTang Junhui {
15757f4a59deSTang Junhui 	size_t min_nodes;
15767f4a59deSTang Junhui 
15777f4a59deSTang Junhui 	/*
15787f4a59deSTang Junhui 	 * Since incremental GC would stop 100ms when front
15797f4a59deSTang Junhui 	 * side I/O comes, so when there are many btree nodes,
15807f4a59deSTang Junhui 	 * if GC only processes constant (100) nodes each time,
15817f4a59deSTang Junhui 	 * GC would last a long time, and the front side I/Os
15827f4a59deSTang Junhui 	 * would run out of the buckets (since no new bucket
15837f4a59deSTang Junhui 	 * can be allocated during GC), and be blocked again.
15847f4a59deSTang Junhui 	 * So GC should not process constant nodes, but varied
15857f4a59deSTang Junhui 	 * nodes according to the number of btree nodes, which
15867f4a59deSTang Junhui 	 * realized by dividing GC into constant(100) times,
15877f4a59deSTang Junhui 	 * so when there are many btree nodes, GC can process
15887f4a59deSTang Junhui 	 * more nodes each time, otherwise, GC will process less
15897f4a59deSTang Junhui 	 * nodes each time (but no less than MIN_GC_NODES)
15907f4a59deSTang Junhui 	 */
15917f4a59deSTang Junhui 	min_nodes = c->gc_stats.nodes / MAX_GC_TIMES;
15927f4a59deSTang Junhui 	if (min_nodes < MIN_GC_NODES)
15937f4a59deSTang Junhui 		min_nodes = MIN_GC_NODES;
15947f4a59deSTang Junhui 
15957f4a59deSTang Junhui 	return min_nodes;
15967f4a59deSTang Junhui }
15977f4a59deSTang Junhui 
15987f4a59deSTang Junhui 
1599cafe5635SKent Overstreet static int btree_gc_recurse(struct btree *b, struct btree_op *op,
1600cafe5635SKent Overstreet 			    struct closure *writes, struct gc_stat *gc)
1601cafe5635SKent Overstreet {
1602a1f0358bSKent Overstreet 	int ret = 0;
1603a1f0358bSKent Overstreet 	bool should_rewrite;
1604a1f0358bSKent Overstreet 	struct bkey *k;
1605a1f0358bSKent Overstreet 	struct btree_iter iter;
1606cafe5635SKent Overstreet 	struct gc_merge_info r[GC_MERGE_NODES];
16072a285686SKent Overstreet 	struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
1608cafe5635SKent Overstreet 
1609c052dd9aSKent Overstreet 	bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
1610cafe5635SKent Overstreet 
16112a285686SKent Overstreet 	for (i = r; i < r + ARRAY_SIZE(r); i++)
16122a285686SKent Overstreet 		i->b = ERR_PTR(-EINTR);
1613cafe5635SKent Overstreet 
1614a1f0358bSKent Overstreet 	while (1) {
1615a85e968eSKent Overstreet 		k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
1616a1f0358bSKent Overstreet 		if (k) {
16170a63b66dSKent Overstreet 			r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
16182452cc89SSlava Pestov 						  true, b);
1619cafe5635SKent Overstreet 			if (IS_ERR(r->b)) {
1620cafe5635SKent Overstreet 				ret = PTR_ERR(r->b);
1621cafe5635SKent Overstreet 				break;
1622cafe5635SKent Overstreet 			}
1623cafe5635SKent Overstreet 
1624a1f0358bSKent Overstreet 			r->keys = btree_gc_count_keys(r->b);
1625cafe5635SKent Overstreet 
16260a63b66dSKent Overstreet 			ret = btree_gc_coalesce(b, op, gc, r);
1627a1f0358bSKent Overstreet 			if (ret)
1628cafe5635SKent Overstreet 				break;
1629cafe5635SKent Overstreet 		}
1630cafe5635SKent Overstreet 
1631a1f0358bSKent Overstreet 		if (!last->b)
1632a1f0358bSKent Overstreet 			break;
1633cafe5635SKent Overstreet 
1634a1f0358bSKent Overstreet 		if (!IS_ERR(last->b)) {
1635a1f0358bSKent Overstreet 			should_rewrite = btree_gc_mark_node(last->b, gc);
16360a63b66dSKent Overstreet 			if (should_rewrite) {
16370a63b66dSKent Overstreet 				ret = btree_gc_rewrite_node(b, op, last->b);
16380a63b66dSKent Overstreet 				if (ret)
1639a1f0358bSKent Overstreet 					break;
1640a1f0358bSKent Overstreet 			}
1641a1f0358bSKent Overstreet 
1642a1f0358bSKent Overstreet 			if (last->b->level) {
1643a1f0358bSKent Overstreet 				ret = btree_gc_recurse(last->b, op, writes, gc);
1644a1f0358bSKent Overstreet 				if (ret)
1645a1f0358bSKent Overstreet 					break;
1646a1f0358bSKent Overstreet 			}
1647a1f0358bSKent Overstreet 
1648a1f0358bSKent Overstreet 			bkey_copy_key(&b->c->gc_done, &last->b->key);
1649a1f0358bSKent Overstreet 
1650a1f0358bSKent Overstreet 			/*
1651a1f0358bSKent Overstreet 			 * Must flush leaf nodes before gc ends, since replace
1652a1f0358bSKent Overstreet 			 * operations aren't journalled
1653cafe5635SKent Overstreet 			 */
16542a285686SKent Overstreet 			mutex_lock(&last->b->write_lock);
1655a1f0358bSKent Overstreet 			if (btree_node_dirty(last->b))
1656a1f0358bSKent Overstreet 				bch_btree_node_write(last->b, writes);
16572a285686SKent Overstreet 			mutex_unlock(&last->b->write_lock);
1658a1f0358bSKent Overstreet 			rw_unlock(true, last->b);
1659a1f0358bSKent Overstreet 		}
1660a1f0358bSKent Overstreet 
1661a1f0358bSKent Overstreet 		memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
1662a1f0358bSKent Overstreet 		r->b = NULL;
1663a1f0358bSKent Overstreet 
16645c25c4fcSTang Junhui 		if (atomic_read(&b->c->search_inflight) &&
16657f4a59deSTang Junhui 		    gc->nodes >= gc->nodes_pre + btree_gc_min_nodes(b->c)) {
16665c25c4fcSTang Junhui 			gc->nodes_pre =  gc->nodes;
16675c25c4fcSTang Junhui 			ret = -EAGAIN;
16685c25c4fcSTang Junhui 			break;
16695c25c4fcSTang Junhui 		}
16705c25c4fcSTang Junhui 
1671cafe5635SKent Overstreet 		if (need_resched()) {
1672cafe5635SKent Overstreet 			ret = -EAGAIN;
1673cafe5635SKent Overstreet 			break;
1674cafe5635SKent Overstreet 		}
1675cafe5635SKent Overstreet 	}
1676cafe5635SKent Overstreet 
16772a285686SKent Overstreet 	for (i = r; i < r + ARRAY_SIZE(r); i++)
16782a285686SKent Overstreet 		if (!IS_ERR_OR_NULL(i->b)) {
16792a285686SKent Overstreet 			mutex_lock(&i->b->write_lock);
16802a285686SKent Overstreet 			if (btree_node_dirty(i->b))
16812a285686SKent Overstreet 				bch_btree_node_write(i->b, writes);
16822a285686SKent Overstreet 			mutex_unlock(&i->b->write_lock);
16832a285686SKent Overstreet 			rw_unlock(true, i->b);
1684a1f0358bSKent Overstreet 		}
1685cafe5635SKent Overstreet 
1686cafe5635SKent Overstreet 	return ret;
1687cafe5635SKent Overstreet }
1688cafe5635SKent Overstreet 
1689cafe5635SKent Overstreet static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1690cafe5635SKent Overstreet 			     struct closure *writes, struct gc_stat *gc)
1691cafe5635SKent Overstreet {
1692cafe5635SKent Overstreet 	struct btree *n = NULL;
1693a1f0358bSKent Overstreet 	int ret = 0;
1694a1f0358bSKent Overstreet 	bool should_rewrite;
1695cafe5635SKent Overstreet 
1696a1f0358bSKent Overstreet 	should_rewrite = btree_gc_mark_node(b, gc);
1697a1f0358bSKent Overstreet 	if (should_rewrite) {
16980a63b66dSKent Overstreet 		n = btree_node_alloc_replacement(b, NULL);
1699cafe5635SKent Overstreet 
1700028ddcacSZheng Wang 		if (!IS_ERR(n)) {
1701a1f0358bSKent Overstreet 			bch_btree_node_write_sync(n);
17022a285686SKent Overstreet 
1703a1f0358bSKent Overstreet 			bch_btree_set_root(n);
1704a1f0358bSKent Overstreet 			btree_node_free(b);
1705a1f0358bSKent Overstreet 			rw_unlock(true, n);
1706a1f0358bSKent Overstreet 
1707a1f0358bSKent Overstreet 			return -EINTR;
1708cafe5635SKent Overstreet 		}
1709a1f0358bSKent Overstreet 	}
1710a1f0358bSKent Overstreet 
1711487dded8SKent Overstreet 	__bch_btree_mark_key(b->c, b->level + 1, &b->key);
1712487dded8SKent Overstreet 
1713a1f0358bSKent Overstreet 	if (b->level) {
1714a1f0358bSKent Overstreet 		ret = btree_gc_recurse(b, op, writes, gc);
1715a1f0358bSKent Overstreet 		if (ret)
1716a1f0358bSKent Overstreet 			return ret;
1717a1f0358bSKent Overstreet 	}
1718a1f0358bSKent Overstreet 
1719a1f0358bSKent Overstreet 	bkey_copy_key(&b->c->gc_done, &b->key);
1720cafe5635SKent Overstreet 
1721cafe5635SKent Overstreet 	return ret;
1722cafe5635SKent Overstreet }
1723cafe5635SKent Overstreet 
1724cafe5635SKent Overstreet static void btree_gc_start(struct cache_set *c)
1725cafe5635SKent Overstreet {
1726cafe5635SKent Overstreet 	struct cache *ca;
1727cafe5635SKent Overstreet 	struct bucket *b;
1728cafe5635SKent Overstreet 
1729cafe5635SKent Overstreet 	if (!c->gc_mark_valid)
1730cafe5635SKent Overstreet 		return;
1731cafe5635SKent Overstreet 
1732cafe5635SKent Overstreet 	mutex_lock(&c->bucket_lock);
1733cafe5635SKent Overstreet 
1734cafe5635SKent Overstreet 	c->gc_mark_valid = 0;
1735cafe5635SKent Overstreet 	c->gc_done = ZERO_KEY;
1736cafe5635SKent Overstreet 
173708fdb2cdSColy Li 	ca = c->cache;
1738cafe5635SKent Overstreet 	for_each_bucket(b, ca) {
17393a2fd9d5SKent Overstreet 		b->last_gc = b->gen;
174029ebf465SKent Overstreet 		if (!atomic_read(&b->pin)) {
17414fe6a816SKent Overstreet 			SET_GC_MARK(b, 0);
174229ebf465SKent Overstreet 			SET_GC_SECTORS_USED(b, 0);
174329ebf465SKent Overstreet 		}
1744cafe5635SKent Overstreet 	}
1745cafe5635SKent Overstreet 
1746cafe5635SKent Overstreet 	mutex_unlock(&c->bucket_lock);
1747cafe5635SKent Overstreet }
1748cafe5635SKent Overstreet 
1749d44c2f9eSTang Junhui static void bch_btree_gc_finish(struct cache_set *c)
1750cafe5635SKent Overstreet {
1751cafe5635SKent Overstreet 	struct bucket *b;
1752cafe5635SKent Overstreet 	struct cache *ca;
175308fdb2cdSColy Li 	unsigned int i, j;
175408fdb2cdSColy Li 	uint64_t *k;
1755cafe5635SKent Overstreet 
1756cafe5635SKent Overstreet 	mutex_lock(&c->bucket_lock);
1757cafe5635SKent Overstreet 
1758cafe5635SKent Overstreet 	set_gc_sectors(c);
1759cafe5635SKent Overstreet 	c->gc_mark_valid = 1;
1760cafe5635SKent Overstreet 	c->need_gc	= 0;
1761cafe5635SKent Overstreet 
1762cafe5635SKent Overstreet 	for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
1763cafe5635SKent Overstreet 		SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
1764cafe5635SKent Overstreet 			    GC_MARK_METADATA);
1765cafe5635SKent Overstreet 
1766bf0a628aSNicholas Swenson 	/* don't reclaim buckets to which writeback keys point */
1767bf0a628aSNicholas Swenson 	rcu_read_lock();
17682831231dSColy Li 	for (i = 0; i < c->devices_max_used; i++) {
1769bf0a628aSNicholas Swenson 		struct bcache_device *d = c->devices[i];
1770bf0a628aSNicholas Swenson 		struct cached_dev *dc;
1771bf0a628aSNicholas Swenson 		struct keybuf_key *w, *n;
1772bf0a628aSNicholas Swenson 
1773bf0a628aSNicholas Swenson 		if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
1774bf0a628aSNicholas Swenson 			continue;
1775bf0a628aSNicholas Swenson 		dc = container_of(d, struct cached_dev, disk);
1776bf0a628aSNicholas Swenson 
1777bf0a628aSNicholas Swenson 		spin_lock(&dc->writeback_keys.lock);
1778bf0a628aSNicholas Swenson 		rbtree_postorder_for_each_entry_safe(w, n,
1779bf0a628aSNicholas Swenson 					&dc->writeback_keys.keys, node)
1780bf0a628aSNicholas Swenson 			for (j = 0; j < KEY_PTRS(&w->key); j++)
1781bf0a628aSNicholas Swenson 				SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
1782bf0a628aSNicholas Swenson 					    GC_MARK_DIRTY);
1783bf0a628aSNicholas Swenson 		spin_unlock(&dc->writeback_keys.lock);
1784bf0a628aSNicholas Swenson 	}
1785bf0a628aSNicholas Swenson 	rcu_read_unlock();
1786bf0a628aSNicholas Swenson 
1787d44c2f9eSTang Junhui 	c->avail_nbuckets = 0;
1788cafe5635SKent Overstreet 
178908fdb2cdSColy Li 	ca = c->cache;
1790cafe5635SKent Overstreet 	ca->invalidate_needs_gc = 0;
1791cafe5635SKent Overstreet 
179208fdb2cdSColy Li 	for (k = ca->sb.d; k < ca->sb.d + ca->sb.keys; k++)
179308fdb2cdSColy Li 		SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA);
1794cafe5635SKent Overstreet 
179508fdb2cdSColy Li 	for (k = ca->prio_buckets;
179608fdb2cdSColy Li 	     k < ca->prio_buckets + prio_buckets(ca) * 2; k++)
179708fdb2cdSColy Li 		SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA);
1798cafe5635SKent Overstreet 
1799cafe5635SKent Overstreet 	for_each_bucket(b, ca) {
1800cafe5635SKent Overstreet 		c->need_gc	= max(c->need_gc, bucket_gc_gen(b));
1801cafe5635SKent Overstreet 
18024fe6a816SKent Overstreet 		if (atomic_read(&b->pin))
18034fe6a816SKent Overstreet 			continue;
18044fe6a816SKent Overstreet 
18054fe6a816SKent Overstreet 		BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
18064fe6a816SKent Overstreet 
18074fe6a816SKent Overstreet 		if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
1808d44c2f9eSTang Junhui 			c->avail_nbuckets++;
1809cafe5635SKent Overstreet 	}
1810cafe5635SKent Overstreet 
1811cafe5635SKent Overstreet 	mutex_unlock(&c->bucket_lock);
1812cafe5635SKent Overstreet }
1813cafe5635SKent Overstreet 
181472a44517SKent Overstreet static void bch_btree_gc(struct cache_set *c)
1815cafe5635SKent Overstreet {
1816cafe5635SKent Overstreet 	int ret;
1817cafe5635SKent Overstreet 	struct gc_stat stats;
1818cafe5635SKent Overstreet 	struct closure writes;
1819cafe5635SKent Overstreet 	struct btree_op op;
1820cafe5635SKent Overstreet 	uint64_t start_time = local_clock();
182157943511SKent Overstreet 
1822c37511b8SKent Overstreet 	trace_bcache_gc_start(c);
1823cafe5635SKent Overstreet 
1824cafe5635SKent Overstreet 	memset(&stats, 0, sizeof(struct gc_stat));
1825cafe5635SKent Overstreet 	closure_init_stack(&writes);
1826b54d6934SKent Overstreet 	bch_btree_op_init(&op, SHRT_MAX);
1827cafe5635SKent Overstreet 
1828cafe5635SKent Overstreet 	btree_gc_start(c);
1829cafe5635SKent Overstreet 
1830771f393eSColy Li 	/* if CACHE_SET_IO_DISABLE set, gc thread should stop too */
1831a1f0358bSKent Overstreet 	do {
1832feac1a70SColy Li 		ret = bcache_btree_root(gc_root, c, &op, &writes, &stats);
1833cafe5635SKent Overstreet 		closure_sync(&writes);
1834c5f1e5adSKent Overstreet 		cond_resched();
1835cafe5635SKent Overstreet 
18365c25c4fcSTang Junhui 		if (ret == -EAGAIN)
18375c25c4fcSTang Junhui 			schedule_timeout_interruptible(msecs_to_jiffies
18385c25c4fcSTang Junhui 						       (GC_SLEEP_MS));
18395c25c4fcSTang Junhui 		else if (ret)
184046f5aa88SJoe Perches 			pr_warn("gc failed!\n");
1841771f393eSColy Li 	} while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags));
1842cafe5635SKent Overstreet 
1843d44c2f9eSTang Junhui 	bch_btree_gc_finish(c);
184457943511SKent Overstreet 	wake_up_allocators(c);
184557943511SKent Overstreet 
1846169ef1cfSKent Overstreet 	bch_time_stats_update(&c->btree_gc_time, start_time);
1847cafe5635SKent Overstreet 
1848cafe5635SKent Overstreet 	stats.key_bytes *= sizeof(uint64_t);
1849cafe5635SKent Overstreet 	stats.data	<<= 9;
1850d44c2f9eSTang Junhui 	bch_update_bucket_in_use(c, &stats);
1851cafe5635SKent Overstreet 	memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
1852cafe5635SKent Overstreet 
1853c37511b8SKent Overstreet 	trace_bcache_gc_end(c);
1854cafe5635SKent Overstreet 
185572a44517SKent Overstreet 	bch_moving_gc(c);
1856cafe5635SKent Overstreet }
1857cafe5635SKent Overstreet 
1858be628be0SKent Overstreet static bool gc_should_run(struct cache_set *c)
1859cafe5635SKent Overstreet {
186008fdb2cdSColy Li 	struct cache *ca = c->cache;
186172a44517SKent Overstreet 
1862be628be0SKent Overstreet 	if (ca->invalidate_needs_gc)
1863be628be0SKent Overstreet 		return true;
186472a44517SKent Overstreet 
1865be628be0SKent Overstreet 	if (atomic_read(&c->sectors_to_gc) < 0)
1866be628be0SKent Overstreet 		return true;
1867be628be0SKent Overstreet 
1868be628be0SKent Overstreet 	return false;
1869be628be0SKent Overstreet }
1870be628be0SKent Overstreet 
1871be628be0SKent Overstreet static int bch_gc_thread(void *arg)
1872be628be0SKent Overstreet {
1873be628be0SKent Overstreet 	struct cache_set *c = arg;
1874be628be0SKent Overstreet 
1875be628be0SKent Overstreet 	while (1) {
1876be628be0SKent Overstreet 		wait_event_interruptible(c->gc_wait,
1877771f393eSColy Li 			   kthread_should_stop() ||
1878771f393eSColy Li 			   test_bit(CACHE_SET_IO_DISABLE, &c->flags) ||
1879771f393eSColy Li 			   gc_should_run(c));
1880be628be0SKent Overstreet 
1881771f393eSColy Li 		if (kthread_should_stop() ||
1882771f393eSColy Li 		    test_bit(CACHE_SET_IO_DISABLE, &c->flags))
188372a44517SKent Overstreet 			break;
188472a44517SKent Overstreet 
1885be628be0SKent Overstreet 		set_gc_sectors(c);
1886be628be0SKent Overstreet 		bch_btree_gc(c);
188772a44517SKent Overstreet 	}
188872a44517SKent Overstreet 
1889771f393eSColy Li 	wait_for_kthread_stop();
189072a44517SKent Overstreet 	return 0;
189172a44517SKent Overstreet }
189272a44517SKent Overstreet 
189372a44517SKent Overstreet int bch_gc_thread_start(struct cache_set *c)
189472a44517SKent Overstreet {
1895be628be0SKent Overstreet 	c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
18969d134117SVasyl Gomonovych 	return PTR_ERR_OR_ZERO(c->gc_thread);
1897cafe5635SKent Overstreet }
1898cafe5635SKent Overstreet 
1899cafe5635SKent Overstreet /* Initial partial gc */
1900cafe5635SKent Overstreet 
1901487dded8SKent Overstreet static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
1902cafe5635SKent Overstreet {
190350310164SKent Overstreet 	int ret = 0;
190450310164SKent Overstreet 	struct bkey *k, *p = NULL;
1905cafe5635SKent Overstreet 	struct btree_iter iter;
1906cafe5635SKent Overstreet 
1907487dded8SKent Overstreet 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
1908487dded8SKent Overstreet 		bch_initial_mark_key(b->c, b->level, k);
1909cafe5635SKent Overstreet 
1910487dded8SKent Overstreet 	bch_initial_mark_key(b->c, b->level + 1, &b->key);
1911cafe5635SKent Overstreet 
1912cafe5635SKent Overstreet 	if (b->level) {
1913c052dd9aSKent Overstreet 		bch_btree_iter_init(&b->keys, &iter, NULL);
1914cafe5635SKent Overstreet 
191550310164SKent Overstreet 		do {
1916a85e968eSKent Overstreet 			k = bch_btree_iter_next_filter(&iter, &b->keys,
1917a85e968eSKent Overstreet 						       bch_ptr_bad);
19187f4a59deSTang Junhui 			if (k) {
19192452cc89SSlava Pestov 				btree_node_prefetch(b, k);
19207f4a59deSTang Junhui 				/*
19217f4a59deSTang Junhui 				 * initiallize c->gc_stats.nodes
19227f4a59deSTang Junhui 				 * for incremental GC
19237f4a59deSTang Junhui 				 */
19247f4a59deSTang Junhui 				b->c->gc_stats.nodes++;
19257f4a59deSTang Junhui 			}
192650310164SKent Overstreet 
1927cafe5635SKent Overstreet 			if (p)
1928feac1a70SColy Li 				ret = bcache_btree(check_recurse, p, b, op);
1929cafe5635SKent Overstreet 
193050310164SKent Overstreet 			p = k;
193150310164SKent Overstreet 		} while (p && !ret);
1932cafe5635SKent Overstreet 	}
1933cafe5635SKent Overstreet 
1934487dded8SKent Overstreet 	return ret;
1935cafe5635SKent Overstreet }
1936cafe5635SKent Overstreet 
19378e710227SColy Li 
19388e710227SColy Li static int bch_btree_check_thread(void *arg)
1939cafe5635SKent Overstreet {
19408e710227SColy Li 	int ret;
19418e710227SColy Li 	struct btree_check_info *info = arg;
19428e710227SColy Li 	struct btree_check_state *check_state = info->state;
19438e710227SColy Li 	struct cache_set *c = check_state->c;
19448e710227SColy Li 	struct btree_iter iter;
19458e710227SColy Li 	struct bkey *k, *p;
19468e710227SColy Li 	int cur_idx, prev_idx, skip_nr;
19478e710227SColy Li 
19488e710227SColy Li 	k = p = NULL;
19498e710227SColy Li 	cur_idx = prev_idx = 0;
19508e710227SColy Li 	ret = 0;
19518e710227SColy Li 
19528e710227SColy Li 	/* root node keys are checked before thread created */
19538e710227SColy Li 	bch_btree_iter_init(&c->root->keys, &iter, NULL);
19548e710227SColy Li 	k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
19558e710227SColy Li 	BUG_ON(!k);
19568e710227SColy Li 
19578e710227SColy Li 	p = k;
19588e710227SColy Li 	while (k) {
19598e710227SColy Li 		/*
19608e710227SColy Li 		 * Fetch a root node key index, skip the keys which
19618e710227SColy Li 		 * should be fetched by other threads, then check the
19628e710227SColy Li 		 * sub-tree indexed by the fetched key.
19638e710227SColy Li 		 */
19648e710227SColy Li 		spin_lock(&check_state->idx_lock);
19658e710227SColy Li 		cur_idx = check_state->key_idx;
19668e710227SColy Li 		check_state->key_idx++;
19678e710227SColy Li 		spin_unlock(&check_state->idx_lock);
19688e710227SColy Li 
19698e710227SColy Li 		skip_nr = cur_idx - prev_idx;
19708e710227SColy Li 
19718e710227SColy Li 		while (skip_nr) {
19728e710227SColy Li 			k = bch_btree_iter_next_filter(&iter,
19738e710227SColy Li 						       &c->root->keys,
19748e710227SColy Li 						       bch_ptr_bad);
19758e710227SColy Li 			if (k)
19768e710227SColy Li 				p = k;
19778e710227SColy Li 			else {
19788e710227SColy Li 				/*
19798e710227SColy Li 				 * No more keys to check in root node,
19808e710227SColy Li 				 * current checking threads are enough,
19818e710227SColy Li 				 * stop creating more.
19828e710227SColy Li 				 */
19838e710227SColy Li 				atomic_set(&check_state->enough, 1);
19848e710227SColy Li 				/* Update check_state->enough earlier */
1985eb9b6666SColy Li 				smp_mb__after_atomic();
19868e710227SColy Li 				goto out;
19878e710227SColy Li 			}
19888e710227SColy Li 			skip_nr--;
19898e710227SColy Li 			cond_resched();
19908e710227SColy Li 		}
19918e710227SColy Li 
19928e710227SColy Li 		if (p) {
1993c18536a7SKent Overstreet 			struct btree_op op;
1994cafe5635SKent Overstreet 
19958e710227SColy Li 			btree_node_prefetch(c->root, p);
19968e710227SColy Li 			c->gc_stats.nodes++;
19978e710227SColy Li 			bch_btree_op_init(&op, 0);
19988e710227SColy Li 			ret = bcache_btree(check_recurse, p, c->root, &op);
1999f0854489SMingzhe Zou 			/*
2000f0854489SMingzhe Zou 			 * The op may be added to cache_set's btree_cache_wait
2001f0854489SMingzhe Zou 			 * in mca_cannibalize(), must ensure it is removed from
2002f0854489SMingzhe Zou 			 * the list and release btree_cache_alloc_lock before
2003f0854489SMingzhe Zou 			 * free op memory.
2004f0854489SMingzhe Zou 			 * Otherwise, the btree_cache_wait will be damaged.
2005f0854489SMingzhe Zou 			 */
2006f0854489SMingzhe Zou 			bch_cannibalize_unlock(c);
2007f0854489SMingzhe Zou 			finish_wait(&c->btree_cache_wait, &(&op)->wait);
20088e710227SColy Li 			if (ret)
20098e710227SColy Li 				goto out;
20108e710227SColy Li 		}
20118e710227SColy Li 		p = NULL;
20128e710227SColy Li 		prev_idx = cur_idx;
20138e710227SColy Li 		cond_resched();
20148e710227SColy Li 	}
2015cafe5635SKent Overstreet 
20168e710227SColy Li out:
20178e710227SColy Li 	info->result = ret;
20188e710227SColy Li 	/* update check_state->started among all CPUs */
2019eb9b6666SColy Li 	smp_mb__before_atomic();
20208e710227SColy Li 	if (atomic_dec_and_test(&check_state->started))
20218e710227SColy Li 		wake_up(&check_state->wait);
20228e710227SColy Li 
20238e710227SColy Li 	return ret;
20248e710227SColy Li }
20258e710227SColy Li 
20268e710227SColy Li 
20278e710227SColy Li 
20288e710227SColy Li static int bch_btree_chkthread_nr(void)
20298e710227SColy Li {
20308e710227SColy Li 	int n = num_online_cpus()/2;
20318e710227SColy Li 
20328e710227SColy Li 	if (n == 0)
20338e710227SColy Li 		n = 1;
20348e710227SColy Li 	else if (n > BCH_BTR_CHKTHREAD_MAX)
20358e710227SColy Li 		n = BCH_BTR_CHKTHREAD_MAX;
20368e710227SColy Li 
20378e710227SColy Li 	return n;
20388e710227SColy Li }
20398e710227SColy Li 
20408e710227SColy Li int bch_btree_check(struct cache_set *c)
20418e710227SColy Li {
20428e710227SColy Li 	int ret = 0;
20438e710227SColy Li 	int i;
20448e710227SColy Li 	struct bkey *k = NULL;
20458e710227SColy Li 	struct btree_iter iter;
204662253644SColy Li 	struct btree_check_state check_state;
20478e710227SColy Li 
20488e710227SColy Li 	/* check and mark root node keys */
20498e710227SColy Li 	for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid)
20508e710227SColy Li 		bch_initial_mark_key(c, c->root->level, k);
20518e710227SColy Li 
20528e710227SColy Li 	bch_initial_mark_key(c, c->root->level + 1, &c->root->key);
20538e710227SColy Li 
20548e710227SColy Li 	if (c->root->level == 0)
20558e710227SColy Li 		return 0;
20568e710227SColy Li 
20577d6b902eSColy Li 	memset(&check_state, 0, sizeof(struct btree_check_state));
205862253644SColy Li 	check_state.c = c;
205962253644SColy Li 	check_state.total_threads = bch_btree_chkthread_nr();
206062253644SColy Li 	check_state.key_idx = 0;
206162253644SColy Li 	spin_lock_init(&check_state.idx_lock);
206262253644SColy Li 	atomic_set(&check_state.started, 0);
206362253644SColy Li 	atomic_set(&check_state.enough, 0);
206462253644SColy Li 	init_waitqueue_head(&check_state.wait);
20658e710227SColy Li 
206662253644SColy Li 	rw_lock(0, c->root, c->root->level);
20678e710227SColy Li 	/*
20688e710227SColy Li 	 * Run multiple threads to check btree nodes in parallel,
206962253644SColy Li 	 * if check_state.enough is non-zero, it means current
20708e710227SColy Li 	 * running check threads are enough, unncessary to create
20718e710227SColy Li 	 * more.
20728e710227SColy Li 	 */
207362253644SColy Li 	for (i = 0; i < check_state.total_threads; i++) {
207462253644SColy Li 		/* fetch latest check_state.enough earlier */
2075eb9b6666SColy Li 		smp_mb__before_atomic();
207662253644SColy Li 		if (atomic_read(&check_state.enough))
20778e710227SColy Li 			break;
20788e710227SColy Li 
207962253644SColy Li 		check_state.infos[i].result = 0;
208062253644SColy Li 		check_state.infos[i].state = &check_state;
20818e710227SColy Li 
208262253644SColy Li 		check_state.infos[i].thread =
20838e710227SColy Li 			kthread_run(bch_btree_check_thread,
208462253644SColy Li 				    &check_state.infos[i],
208562253644SColy Li 				    "bch_btrchk[%d]", i);
208662253644SColy Li 		if (IS_ERR(check_state.infos[i].thread)) {
208746f5aa88SJoe Perches 			pr_err("fails to run thread bch_btrchk[%d]\n", i);
20888e710227SColy Li 			for (--i; i >= 0; i--)
208962253644SColy Li 				kthread_stop(check_state.infos[i].thread);
20908e710227SColy Li 			ret = -ENOMEM;
20918e710227SColy Li 			goto out;
20928e710227SColy Li 		}
209362253644SColy Li 		atomic_inc(&check_state.started);
20948e710227SColy Li 	}
20958e710227SColy Li 
2096887554abSMingzhe Zou 	/*
2097887554abSMingzhe Zou 	 * Must wait for all threads to stop.
2098887554abSMingzhe Zou 	 */
209962253644SColy Li 	wait_event(check_state.wait, atomic_read(&check_state.started) == 0);
21008e710227SColy Li 
210162253644SColy Li 	for (i = 0; i < check_state.total_threads; i++) {
210262253644SColy Li 		if (check_state.infos[i].result) {
210362253644SColy Li 			ret = check_state.infos[i].result;
21048e710227SColy Li 			goto out;
21058e710227SColy Li 		}
21068e710227SColy Li 	}
21078e710227SColy Li 
21088e710227SColy Li out:
210962253644SColy Li 	rw_unlock(0, c->root);
21108e710227SColy Li 	return ret;
2111cafe5635SKent Overstreet }
2112cafe5635SKent Overstreet 
21132531d9eeSKent Overstreet void bch_initial_gc_finish(struct cache_set *c)
21142531d9eeSKent Overstreet {
211508fdb2cdSColy Li 	struct cache *ca = c->cache;
21162531d9eeSKent Overstreet 	struct bucket *b;
21172531d9eeSKent Overstreet 
21182531d9eeSKent Overstreet 	bch_btree_gc_finish(c);
21192531d9eeSKent Overstreet 
21202531d9eeSKent Overstreet 	mutex_lock(&c->bucket_lock);
21212531d9eeSKent Overstreet 
21222531d9eeSKent Overstreet 	/*
21232531d9eeSKent Overstreet 	 * We need to put some unused buckets directly on the prio freelist in
21242531d9eeSKent Overstreet 	 * order to get the allocator thread started - it needs freed buckets in
21252531d9eeSKent Overstreet 	 * order to rewrite the prios and gens, and it needs to rewrite prios
21262531d9eeSKent Overstreet 	 * and gens in order to free buckets.
21272531d9eeSKent Overstreet 	 *
21282531d9eeSKent Overstreet 	 * This is only safe for buckets that have no live data in them, which
21292531d9eeSKent Overstreet 	 * there should always be some of.
21302531d9eeSKent Overstreet 	 */
21312531d9eeSKent Overstreet 	for_each_bucket(b, ca) {
2132682811b3STang Junhui 		if (fifo_full(&ca->free[RESERVE_PRIO]) &&
2133682811b3STang Junhui 		    fifo_full(&ca->free[RESERVE_BTREE]))
21342531d9eeSKent Overstreet 			break;
21352531d9eeSKent Overstreet 
21362531d9eeSKent Overstreet 		if (bch_can_invalidate_bucket(ca, b) &&
21372531d9eeSKent Overstreet 		    !GC_MARK(b)) {
21382531d9eeSKent Overstreet 			__bch_invalidate_one_bucket(ca, b);
2139682811b3STang Junhui 			if (!fifo_push(&ca->free[RESERVE_PRIO],
2140682811b3STang Junhui 			   b - ca->buckets))
2141682811b3STang Junhui 				fifo_push(&ca->free[RESERVE_BTREE],
21422531d9eeSKent Overstreet 					  b - ca->buckets);
21432531d9eeSKent Overstreet 		}
21442531d9eeSKent Overstreet 	}
21452531d9eeSKent Overstreet 
21462531d9eeSKent Overstreet 	mutex_unlock(&c->bucket_lock);
21472531d9eeSKent Overstreet }
21482531d9eeSKent Overstreet 
2149cafe5635SKent Overstreet /* Btree insertion */
2150cafe5635SKent Overstreet 
2151829a60b9SKent Overstreet static bool btree_insert_key(struct btree *b, struct bkey *k,
21521b207d80SKent Overstreet 			     struct bkey *replace_key)
2153cafe5635SKent Overstreet {
21546f10f7d1SColy Li 	unsigned int status;
2155cafe5635SKent Overstreet 
2156cafe5635SKent Overstreet 	BUG_ON(bkey_cmp(k, &b->key) > 0);
2157cafe5635SKent Overstreet 
2158829a60b9SKent Overstreet 	status = bch_btree_insert_key(&b->keys, k, replace_key);
2159829a60b9SKent Overstreet 	if (status != BTREE_INSERT_STATUS_NO_INSERT) {
2160dc9d98d6SKent Overstreet 		bch_check_keys(&b->keys, "%u for %s", status,
21611b207d80SKent Overstreet 			       replace_key ? "replace" : "insert");
2162cafe5635SKent Overstreet 
2163829a60b9SKent Overstreet 		trace_bcache_btree_insert_key(b, k, replace_key != NULL,
2164829a60b9SKent Overstreet 					      status);
2165cafe5635SKent Overstreet 		return true;
2166829a60b9SKent Overstreet 	} else
2167829a60b9SKent Overstreet 		return false;
2168cafe5635SKent Overstreet }
2169cafe5635SKent Overstreet 
217059158fdeSKent Overstreet static size_t insert_u64s_remaining(struct btree *b)
217159158fdeSKent Overstreet {
21723572324aSKent Overstreet 	long ret = bch_btree_keys_u64s_remaining(&b->keys);
217359158fdeSKent Overstreet 
217459158fdeSKent Overstreet 	/*
217559158fdeSKent Overstreet 	 * Might land in the middle of an existing extent and have to split it
217659158fdeSKent Overstreet 	 */
217759158fdeSKent Overstreet 	if (b->keys.ops->is_extents)
217859158fdeSKent Overstreet 		ret -= KEY_MAX_U64S;
217959158fdeSKent Overstreet 
218059158fdeSKent Overstreet 	return max(ret, 0L);
218159158fdeSKent Overstreet }
218259158fdeSKent Overstreet 
218326c949f8SKent Overstreet static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
21841b207d80SKent Overstreet 				  struct keylist *insert_keys,
21851b207d80SKent Overstreet 				  struct bkey *replace_key)
2186cafe5635SKent Overstreet {
2187cafe5635SKent Overstreet 	bool ret = false;
2188dc9d98d6SKent Overstreet 	int oldsize = bch_count_data(&b->keys);
2189cafe5635SKent Overstreet 
219026c949f8SKent Overstreet 	while (!bch_keylist_empty(insert_keys)) {
2191c2f95ae2SKent Overstreet 		struct bkey *k = insert_keys->keys;
219226c949f8SKent Overstreet 
219359158fdeSKent Overstreet 		if (bkey_u64s(k) > insert_u64s_remaining(b))
2194403b6cdeSKent Overstreet 			break;
2195403b6cdeSKent Overstreet 
2196403b6cdeSKent Overstreet 		if (bkey_cmp(k, &b->key) <= 0) {
21973a3b6a4eSKent Overstreet 			if (!b->level)
21983a3b6a4eSKent Overstreet 				bkey_put(b->c, k);
219926c949f8SKent Overstreet 
2200829a60b9SKent Overstreet 			ret |= btree_insert_key(b, k, replace_key);
220126c949f8SKent Overstreet 			bch_keylist_pop_front(insert_keys);
220226c949f8SKent Overstreet 		} else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
220326c949f8SKent Overstreet 			BKEY_PADDED(key) temp;
2204c2f95ae2SKent Overstreet 			bkey_copy(&temp.key, insert_keys->keys);
220526c949f8SKent Overstreet 
220626c949f8SKent Overstreet 			bch_cut_back(&b->key, &temp.key);
2207c2f95ae2SKent Overstreet 			bch_cut_front(&b->key, insert_keys->keys);
220826c949f8SKent Overstreet 
2209829a60b9SKent Overstreet 			ret |= btree_insert_key(b, &temp.key, replace_key);
221026c949f8SKent Overstreet 			break;
221126c949f8SKent Overstreet 		} else {
221226c949f8SKent Overstreet 			break;
221326c949f8SKent Overstreet 		}
2214cafe5635SKent Overstreet 	}
2215cafe5635SKent Overstreet 
2216829a60b9SKent Overstreet 	if (!ret)
2217829a60b9SKent Overstreet 		op->insert_collision = true;
2218829a60b9SKent Overstreet 
2219403b6cdeSKent Overstreet 	BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
2220403b6cdeSKent Overstreet 
2221dc9d98d6SKent Overstreet 	BUG_ON(bch_count_data(&b->keys) < oldsize);
2222cafe5635SKent Overstreet 	return ret;
2223cafe5635SKent Overstreet }
2224cafe5635SKent Overstreet 
222526c949f8SKent Overstreet static int btree_split(struct btree *b, struct btree_op *op,
222626c949f8SKent Overstreet 		       struct keylist *insert_keys,
22271b207d80SKent Overstreet 		       struct bkey *replace_key)
2228cafe5635SKent Overstreet {
2229d6fd3b11SKent Overstreet 	bool split;
2230cafe5635SKent Overstreet 	struct btree *n1, *n2 = NULL, *n3 = NULL;
2231cafe5635SKent Overstreet 	uint64_t start_time = local_clock();
2232b54d6934SKent Overstreet 	struct closure cl;
223317e21a9fSKent Overstreet 	struct keylist parent_keys;
2234b54d6934SKent Overstreet 
2235b54d6934SKent Overstreet 	closure_init_stack(&cl);
223617e21a9fSKent Overstreet 	bch_keylist_init(&parent_keys);
2237cafe5635SKent Overstreet 
22380a63b66dSKent Overstreet 	if (btree_check_reserve(b, op)) {
22390a63b66dSKent Overstreet 		if (!b->level)
224078365411SKent Overstreet 			return -EINTR;
22410a63b66dSKent Overstreet 		else
22420a63b66dSKent Overstreet 			WARN(1, "insufficient reserve for split\n");
22430a63b66dSKent Overstreet 	}
224478365411SKent Overstreet 
22450a63b66dSKent Overstreet 	n1 = btree_node_alloc_replacement(b, op);
2246cafe5635SKent Overstreet 	if (IS_ERR(n1))
2247cafe5635SKent Overstreet 		goto err;
2248cafe5635SKent Overstreet 
2249ee811287SKent Overstreet 	split = set_blocks(btree_bset_first(n1),
22504e1ebae3SColy Li 			   block_bytes(n1->c->cache)) > (btree_blocks(b) * 4) / 5;
2251cafe5635SKent Overstreet 
2252cafe5635SKent Overstreet 	if (split) {
22536f10f7d1SColy Li 		unsigned int keys = 0;
2254cafe5635SKent Overstreet 
2255ee811287SKent Overstreet 		trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
2256c37511b8SKent Overstreet 
22572452cc89SSlava Pestov 		n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent);
2258cafe5635SKent Overstreet 		if (IS_ERR(n2))
2259cafe5635SKent Overstreet 			goto err_free1;
2260cafe5635SKent Overstreet 
2261d6fd3b11SKent Overstreet 		if (!b->parent) {
22622452cc89SSlava Pestov 			n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL);
2263cafe5635SKent Overstreet 			if (IS_ERR(n3))
2264cafe5635SKent Overstreet 				goto err_free2;
2265cafe5635SKent Overstreet 		}
2266cafe5635SKent Overstreet 
22672a285686SKent Overstreet 		mutex_lock(&n1->write_lock);
22682a285686SKent Overstreet 		mutex_lock(&n2->write_lock);
22692a285686SKent Overstreet 
22701b207d80SKent Overstreet 		bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2271cafe5635SKent Overstreet 
2272d6fd3b11SKent Overstreet 		/*
2273d6fd3b11SKent Overstreet 		 * Has to be a linear search because we don't have an auxiliary
2274cafe5635SKent Overstreet 		 * search tree yet
2275cafe5635SKent Overstreet 		 */
2276cafe5635SKent Overstreet 
2277ee811287SKent Overstreet 		while (keys < (btree_bset_first(n1)->keys * 3) / 5)
2278ee811287SKent Overstreet 			keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1),
2279fafff81cSKent Overstreet 							keys));
2280cafe5635SKent Overstreet 
2281fafff81cSKent Overstreet 		bkey_copy_key(&n1->key,
2282ee811287SKent Overstreet 			      bset_bkey_idx(btree_bset_first(n1), keys));
2283ee811287SKent Overstreet 		keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys));
2284cafe5635SKent Overstreet 
2285ee811287SKent Overstreet 		btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys;
2286ee811287SKent Overstreet 		btree_bset_first(n1)->keys = keys;
2287cafe5635SKent Overstreet 
2288ee811287SKent Overstreet 		memcpy(btree_bset_first(n2)->start,
2289ee811287SKent Overstreet 		       bset_bkey_last(btree_bset_first(n1)),
2290ee811287SKent Overstreet 		       btree_bset_first(n2)->keys * sizeof(uint64_t));
2291cafe5635SKent Overstreet 
2292cafe5635SKent Overstreet 		bkey_copy_key(&n2->key, &b->key);
2293cafe5635SKent Overstreet 
229417e21a9fSKent Overstreet 		bch_keylist_add(&parent_keys, &n2->key);
2295b54d6934SKent Overstreet 		bch_btree_node_write(n2, &cl);
22962a285686SKent Overstreet 		mutex_unlock(&n2->write_lock);
2297cafe5635SKent Overstreet 		rw_unlock(true, n2);
2298c37511b8SKent Overstreet 	} else {
2299ee811287SKent Overstreet 		trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys);
2300c37511b8SKent Overstreet 
23012a285686SKent Overstreet 		mutex_lock(&n1->write_lock);
23021b207d80SKent Overstreet 		bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2303c37511b8SKent Overstreet 	}
2304cafe5635SKent Overstreet 
230517e21a9fSKent Overstreet 	bch_keylist_add(&parent_keys, &n1->key);
2306b54d6934SKent Overstreet 	bch_btree_node_write(n1, &cl);
23072a285686SKent Overstreet 	mutex_unlock(&n1->write_lock);
2308cafe5635SKent Overstreet 
2309cafe5635SKent Overstreet 	if (n3) {
2310d6fd3b11SKent Overstreet 		/* Depth increases, make a new root */
23112a285686SKent Overstreet 		mutex_lock(&n3->write_lock);
2312cafe5635SKent Overstreet 		bkey_copy_key(&n3->key, &MAX_KEY);
231317e21a9fSKent Overstreet 		bch_btree_insert_keys(n3, op, &parent_keys, NULL);
2314b54d6934SKent Overstreet 		bch_btree_node_write(n3, &cl);
23152a285686SKent Overstreet 		mutex_unlock(&n3->write_lock);
2316cafe5635SKent Overstreet 
2317b54d6934SKent Overstreet 		closure_sync(&cl);
2318cafe5635SKent Overstreet 		bch_btree_set_root(n3);
2319cafe5635SKent Overstreet 		rw_unlock(true, n3);
2320d6fd3b11SKent Overstreet 	} else if (!b->parent) {
2321d6fd3b11SKent Overstreet 		/* Root filled up but didn't need to be split */
2322b54d6934SKent Overstreet 		closure_sync(&cl);
2323cafe5635SKent Overstreet 		bch_btree_set_root(n1);
2324cafe5635SKent Overstreet 	} else {
232517e21a9fSKent Overstreet 		/* Split a non root node */
2326b54d6934SKent Overstreet 		closure_sync(&cl);
232717e21a9fSKent Overstreet 		make_btree_freeing_key(b, parent_keys.top);
232817e21a9fSKent Overstreet 		bch_keylist_push(&parent_keys);
232917e21a9fSKent Overstreet 
233017e21a9fSKent Overstreet 		bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL);
233117e21a9fSKent Overstreet 		BUG_ON(!bch_keylist_empty(&parent_keys));
2332cafe5635SKent Overstreet 	}
2333cafe5635SKent Overstreet 
233405335cffSKent Overstreet 	btree_node_free(b);
2335cafe5635SKent Overstreet 	rw_unlock(true, n1);
2336cafe5635SKent Overstreet 
2337169ef1cfSKent Overstreet 	bch_time_stats_update(&b->c->btree_split_time, start_time);
2338cafe5635SKent Overstreet 
2339cafe5635SKent Overstreet 	return 0;
2340cafe5635SKent Overstreet err_free2:
23415f5837d2SKent Overstreet 	bkey_put(b->c, &n2->key);
2342e8e1d468SKent Overstreet 	btree_node_free(n2);
2343cafe5635SKent Overstreet 	rw_unlock(true, n2);
2344cafe5635SKent Overstreet err_free1:
23455f5837d2SKent Overstreet 	bkey_put(b->c, &n1->key);
2346e8e1d468SKent Overstreet 	btree_node_free(n1);
2347cafe5635SKent Overstreet 	rw_unlock(true, n1);
2348cafe5635SKent Overstreet err:
23490a63b66dSKent Overstreet 	WARN(1, "bcache: btree split failed (level %u)", b->level);
23505f5837d2SKent Overstreet 
2351cafe5635SKent Overstreet 	if (n3 == ERR_PTR(-EAGAIN) ||
2352cafe5635SKent Overstreet 	    n2 == ERR_PTR(-EAGAIN) ||
2353cafe5635SKent Overstreet 	    n1 == ERR_PTR(-EAGAIN))
2354cafe5635SKent Overstreet 		return -EAGAIN;
2355cafe5635SKent Overstreet 
2356cafe5635SKent Overstreet 	return -ENOMEM;
2357cafe5635SKent Overstreet }
2358cafe5635SKent Overstreet 
235926c949f8SKent Overstreet static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
2360c18536a7SKent Overstreet 				 struct keylist *insert_keys,
23611b207d80SKent Overstreet 				 atomic_t *journal_ref,
23621b207d80SKent Overstreet 				 struct bkey *replace_key)
236326c949f8SKent Overstreet {
23642a285686SKent Overstreet 	struct closure cl;
23652a285686SKent Overstreet 
23661b207d80SKent Overstreet 	BUG_ON(b->level && replace_key);
23671b207d80SKent Overstreet 
23682a285686SKent Overstreet 	closure_init_stack(&cl);
23692a285686SKent Overstreet 
23702a285686SKent Overstreet 	mutex_lock(&b->write_lock);
23712a285686SKent Overstreet 
23722a285686SKent Overstreet 	if (write_block(b) != btree_bset_last(b) &&
23732a285686SKent Overstreet 	    b->keys.last_set_unwritten)
23742a285686SKent Overstreet 		bch_btree_init_next(b); /* just wrote a set */
23752a285686SKent Overstreet 
237659158fdeSKent Overstreet 	if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) {
23772a285686SKent Overstreet 		mutex_unlock(&b->write_lock);
23782a285686SKent Overstreet 		goto split;
23792a285686SKent Overstreet 	}
23802a285686SKent Overstreet 
23812a285686SKent Overstreet 	BUG_ON(write_block(b) != btree_bset_last(b));
23822a285686SKent Overstreet 
23832a285686SKent Overstreet 	if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
23842a285686SKent Overstreet 		if (!b->level)
23852a285686SKent Overstreet 			bch_btree_leaf_dirty(b, journal_ref);
23862a285686SKent Overstreet 		else
23872a285686SKent Overstreet 			bch_btree_node_write(b, &cl);
23882a285686SKent Overstreet 	}
23892a285686SKent Overstreet 
23902a285686SKent Overstreet 	mutex_unlock(&b->write_lock);
23912a285686SKent Overstreet 
23922a285686SKent Overstreet 	/* wait for btree node write if necessary, after unlock */
23932a285686SKent Overstreet 	closure_sync(&cl);
23942a285686SKent Overstreet 
23952a285686SKent Overstreet 	return 0;
23962a285686SKent Overstreet split:
239726c949f8SKent Overstreet 	if (current->bio_list) {
239826c949f8SKent Overstreet 		op->lock = b->c->root->level + 1;
239917e21a9fSKent Overstreet 		return -EAGAIN;
240026c949f8SKent Overstreet 	} else if (op->lock <= b->c->root->level) {
240126c949f8SKent Overstreet 		op->lock = b->c->root->level + 1;
240217e21a9fSKent Overstreet 		return -EINTR;
240326c949f8SKent Overstreet 	} else {
240417e21a9fSKent Overstreet 		/* Invalidated all iterators */
24053b3e9e50SKent Overstreet 		int ret = btree_split(b, op, insert_keys, replace_key);
24063b3e9e50SKent Overstreet 
24072a285686SKent Overstreet 		if (bch_keylist_empty(insert_keys))
240817e21a9fSKent Overstreet 			return 0;
24092a285686SKent Overstreet 		else if (!ret)
24102a285686SKent Overstreet 			return -EINTR;
24112a285686SKent Overstreet 		return ret;
241217e21a9fSKent Overstreet 	}
241326c949f8SKent Overstreet }
241426c949f8SKent Overstreet 
2415e7c590ebSKent Overstreet int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
2416e7c590ebSKent Overstreet 			       struct bkey *check_key)
2417e7c590ebSKent Overstreet {
2418e7c590ebSKent Overstreet 	int ret = -EINTR;
2419e7c590ebSKent Overstreet 	uint64_t btree_ptr = b->key.ptr[0];
2420e7c590ebSKent Overstreet 	unsigned long seq = b->seq;
2421e7c590ebSKent Overstreet 	struct keylist insert;
2422e7c590ebSKent Overstreet 	bool upgrade = op->lock == -1;
2423e7c590ebSKent Overstreet 
2424e7c590ebSKent Overstreet 	bch_keylist_init(&insert);
2425e7c590ebSKent Overstreet 
2426e7c590ebSKent Overstreet 	if (upgrade) {
2427e7c590ebSKent Overstreet 		rw_unlock(false, b);
2428e7c590ebSKent Overstreet 		rw_lock(true, b, b->level);
2429e7c590ebSKent Overstreet 
2430e7c590ebSKent Overstreet 		if (b->key.ptr[0] != btree_ptr ||
24312ef9ccbfSZheng Liu 		    b->seq != seq + 1) {
24322ef9ccbfSZheng Liu 			op->lock = b->level;
2433e7c590ebSKent Overstreet 			goto out;
2434e7c590ebSKent Overstreet 		}
24352ef9ccbfSZheng Liu 	}
2436e7c590ebSKent Overstreet 
2437e7c590ebSKent Overstreet 	SET_KEY_PTRS(check_key, 1);
2438e7c590ebSKent Overstreet 	get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
2439e7c590ebSKent Overstreet 
2440e7c590ebSKent Overstreet 	SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
2441e7c590ebSKent Overstreet 
2442e7c590ebSKent Overstreet 	bch_keylist_add(&insert, check_key);
2443e7c590ebSKent Overstreet 
24441b207d80SKent Overstreet 	ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
2445e7c590ebSKent Overstreet 
2446e7c590ebSKent Overstreet 	BUG_ON(!ret && !bch_keylist_empty(&insert));
2447e7c590ebSKent Overstreet out:
2448e7c590ebSKent Overstreet 	if (upgrade)
2449e7c590ebSKent Overstreet 		downgrade_write(&b->lock);
2450e7c590ebSKent Overstreet 	return ret;
2451e7c590ebSKent Overstreet }
2452e7c590ebSKent Overstreet 
2453cc7b8819SKent Overstreet struct btree_insert_op {
2454cc7b8819SKent Overstreet 	struct btree_op	op;
2455cc7b8819SKent Overstreet 	struct keylist	*keys;
2456cc7b8819SKent Overstreet 	atomic_t	*journal_ref;
2457cc7b8819SKent Overstreet 	struct bkey	*replace_key;
2458cc7b8819SKent Overstreet };
2459cc7b8819SKent Overstreet 
246008239ca2SWei Yongjun static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
2461cafe5635SKent Overstreet {
2462cc7b8819SKent Overstreet 	struct btree_insert_op *op = container_of(b_op,
2463cc7b8819SKent Overstreet 					struct btree_insert_op, op);
2464403b6cdeSKent Overstreet 
2465cc7b8819SKent Overstreet 	int ret = bch_btree_insert_node(b, &op->op, op->keys,
2466cc7b8819SKent Overstreet 					op->journal_ref, op->replace_key);
2467cc7b8819SKent Overstreet 	if (ret && !bch_keylist_empty(op->keys))
2468cc7b8819SKent Overstreet 		return ret;
2469cc7b8819SKent Overstreet 	else
2470cc7b8819SKent Overstreet 		return MAP_DONE;
2471cafe5635SKent Overstreet }
2472cafe5635SKent Overstreet 
2473cc7b8819SKent Overstreet int bch_btree_insert(struct cache_set *c, struct keylist *keys,
2474cc7b8819SKent Overstreet 		     atomic_t *journal_ref, struct bkey *replace_key)
2475cafe5635SKent Overstreet {
2476cc7b8819SKent Overstreet 	struct btree_insert_op op;
2477cafe5635SKent Overstreet 	int ret = 0;
2478cafe5635SKent Overstreet 
2479cc7b8819SKent Overstreet 	BUG_ON(current->bio_list);
24804f3d4014SKent Overstreet 	BUG_ON(bch_keylist_empty(keys));
2481cafe5635SKent Overstreet 
2482cc7b8819SKent Overstreet 	bch_btree_op_init(&op.op, 0);
2483cc7b8819SKent Overstreet 	op.keys		= keys;
2484cc7b8819SKent Overstreet 	op.journal_ref	= journal_ref;
2485cc7b8819SKent Overstreet 	op.replace_key	= replace_key;
2486cafe5635SKent Overstreet 
2487cc7b8819SKent Overstreet 	while (!ret && !bch_keylist_empty(keys)) {
2488cc7b8819SKent Overstreet 		op.op.lock = 0;
2489cc7b8819SKent Overstreet 		ret = bch_btree_map_leaf_nodes(&op.op, c,
2490cc7b8819SKent Overstreet 					       &START_KEY(keys->keys),
2491cc7b8819SKent Overstreet 					       btree_insert_fn);
2492cc7b8819SKent Overstreet 	}
2493cc7b8819SKent Overstreet 
2494cc7b8819SKent Overstreet 	if (ret) {
2495cafe5635SKent Overstreet 		struct bkey *k;
2496cafe5635SKent Overstreet 
249746f5aa88SJoe Perches 		pr_err("error %i\n", ret);
2498cafe5635SKent Overstreet 
24994f3d4014SKent Overstreet 		while ((k = bch_keylist_pop(keys)))
25003a3b6a4eSKent Overstreet 			bkey_put(c, k);
2501cc7b8819SKent Overstreet 	} else if (op.op.insert_collision)
2502cc7b8819SKent Overstreet 		ret = -ESRCH;
25036054c6d4SKent Overstreet 
2504cafe5635SKent Overstreet 	return ret;
2505cafe5635SKent Overstreet }
2506cafe5635SKent Overstreet 
2507cafe5635SKent Overstreet void bch_btree_set_root(struct btree *b)
2508cafe5635SKent Overstreet {
25096f10f7d1SColy Li 	unsigned int i;
2510e49c7c37SKent Overstreet 	struct closure cl;
2511e49c7c37SKent Overstreet 
2512e49c7c37SKent Overstreet 	closure_init_stack(&cl);
2513cafe5635SKent Overstreet 
2514c37511b8SKent Overstreet 	trace_bcache_btree_set_root(b);
2515c37511b8SKent Overstreet 
2516cafe5635SKent Overstreet 	BUG_ON(!b->written);
2517cafe5635SKent Overstreet 
2518cafe5635SKent Overstreet 	for (i = 0; i < KEY_PTRS(&b->key); i++)
2519cafe5635SKent Overstreet 		BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO);
2520cafe5635SKent Overstreet 
2521cafe5635SKent Overstreet 	mutex_lock(&b->c->bucket_lock);
2522cafe5635SKent Overstreet 	list_del_init(&b->list);
2523cafe5635SKent Overstreet 	mutex_unlock(&b->c->bucket_lock);
2524cafe5635SKent Overstreet 
2525cafe5635SKent Overstreet 	b->c->root = b;
2526cafe5635SKent Overstreet 
2527e49c7c37SKent Overstreet 	bch_journal_meta(b->c, &cl);
2528e49c7c37SKent Overstreet 	closure_sync(&cl);
2529cafe5635SKent Overstreet }
2530cafe5635SKent Overstreet 
253148dad8baSKent Overstreet /* Map across nodes or keys */
253248dad8baSKent Overstreet 
253348dad8baSKent Overstreet static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
253448dad8baSKent Overstreet 				       struct bkey *from,
253548dad8baSKent Overstreet 				       btree_map_nodes_fn *fn, int flags)
253648dad8baSKent Overstreet {
253748dad8baSKent Overstreet 	int ret = MAP_CONTINUE;
253848dad8baSKent Overstreet 
253948dad8baSKent Overstreet 	if (b->level) {
254048dad8baSKent Overstreet 		struct bkey *k;
254148dad8baSKent Overstreet 		struct btree_iter iter;
254248dad8baSKent Overstreet 
2543c052dd9aSKent Overstreet 		bch_btree_iter_init(&b->keys, &iter, from);
254448dad8baSKent Overstreet 
2545a85e968eSKent Overstreet 		while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
254648dad8baSKent Overstreet 						       bch_ptr_bad))) {
2547feac1a70SColy Li 			ret = bcache_btree(map_nodes_recurse, k, b,
254848dad8baSKent Overstreet 				    op, from, fn, flags);
254948dad8baSKent Overstreet 			from = NULL;
255048dad8baSKent Overstreet 
255148dad8baSKent Overstreet 			if (ret != MAP_CONTINUE)
255248dad8baSKent Overstreet 				return ret;
255348dad8baSKent Overstreet 		}
255448dad8baSKent Overstreet 	}
255548dad8baSKent Overstreet 
255648dad8baSKent Overstreet 	if (!b->level || flags == MAP_ALL_NODES)
255748dad8baSKent Overstreet 		ret = fn(op, b);
255848dad8baSKent Overstreet 
255948dad8baSKent Overstreet 	return ret;
256048dad8baSKent Overstreet }
256148dad8baSKent Overstreet 
256248dad8baSKent Overstreet int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
256348dad8baSKent Overstreet 			  struct bkey *from, btree_map_nodes_fn *fn, int flags)
256448dad8baSKent Overstreet {
2565feac1a70SColy Li 	return bcache_btree_root(map_nodes_recurse, c, op, from, fn, flags);
256648dad8baSKent Overstreet }
256748dad8baSKent Overstreet 
2568253a99d9SColy Li int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
256948dad8baSKent Overstreet 				      struct bkey *from, btree_map_keys_fn *fn,
257048dad8baSKent Overstreet 				      int flags)
257148dad8baSKent Overstreet {
257248dad8baSKent Overstreet 	int ret = MAP_CONTINUE;
257348dad8baSKent Overstreet 	struct bkey *k;
257448dad8baSKent Overstreet 	struct btree_iter iter;
257548dad8baSKent Overstreet 
2576c052dd9aSKent Overstreet 	bch_btree_iter_init(&b->keys, &iter, from);
257748dad8baSKent Overstreet 
2578a85e968eSKent Overstreet 	while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
257948dad8baSKent Overstreet 		ret = !b->level
258048dad8baSKent Overstreet 			? fn(op, b, k)
2581feac1a70SColy Li 			: bcache_btree(map_keys_recurse, k,
2582feac1a70SColy Li 				       b, op, from, fn, flags);
258348dad8baSKent Overstreet 		from = NULL;
258448dad8baSKent Overstreet 
258548dad8baSKent Overstreet 		if (ret != MAP_CONTINUE)
258648dad8baSKent Overstreet 			return ret;
258748dad8baSKent Overstreet 	}
258848dad8baSKent Overstreet 
258948dad8baSKent Overstreet 	if (!b->level && (flags & MAP_END_KEY))
259048dad8baSKent Overstreet 		ret = fn(op, b, &KEY(KEY_INODE(&b->key),
259148dad8baSKent Overstreet 				     KEY_OFFSET(&b->key), 0));
259248dad8baSKent Overstreet 
259348dad8baSKent Overstreet 	return ret;
259448dad8baSKent Overstreet }
259548dad8baSKent Overstreet 
259648dad8baSKent Overstreet int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
259748dad8baSKent Overstreet 		       struct bkey *from, btree_map_keys_fn *fn, int flags)
259848dad8baSKent Overstreet {
2599feac1a70SColy Li 	return bcache_btree_root(map_keys_recurse, c, op, from, fn, flags);
260048dad8baSKent Overstreet }
260148dad8baSKent Overstreet 
2602cafe5635SKent Overstreet /* Keybuf code */
2603cafe5635SKent Overstreet 
2604cafe5635SKent Overstreet static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
2605cafe5635SKent Overstreet {
2606cafe5635SKent Overstreet 	/* Overlapping keys compare equal */
2607cafe5635SKent Overstreet 	if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0)
2608cafe5635SKent Overstreet 		return -1;
2609cafe5635SKent Overstreet 	if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0)
2610cafe5635SKent Overstreet 		return 1;
2611cafe5635SKent Overstreet 	return 0;
2612cafe5635SKent Overstreet }
2613cafe5635SKent Overstreet 
2614cafe5635SKent Overstreet static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
2615cafe5635SKent Overstreet 					    struct keybuf_key *r)
2616cafe5635SKent Overstreet {
2617cafe5635SKent Overstreet 	return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
2618cafe5635SKent Overstreet }
2619cafe5635SKent Overstreet 
262048dad8baSKent Overstreet struct refill {
262148dad8baSKent Overstreet 	struct btree_op	op;
26226f10f7d1SColy Li 	unsigned int	nr_found;
262348dad8baSKent Overstreet 	struct keybuf	*buf;
262448dad8baSKent Overstreet 	struct bkey	*end;
262548dad8baSKent Overstreet 	keybuf_pred_fn	*pred;
262648dad8baSKent Overstreet };
262748dad8baSKent Overstreet 
262848dad8baSKent Overstreet static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
262948dad8baSKent Overstreet 			    struct bkey *k)
2630cafe5635SKent Overstreet {
263148dad8baSKent Overstreet 	struct refill *refill = container_of(op, struct refill, op);
263248dad8baSKent Overstreet 	struct keybuf *buf = refill->buf;
263348dad8baSKent Overstreet 	int ret = MAP_CONTINUE;
2634cafe5635SKent Overstreet 
26352d6cb6edSTang Junhui 	if (bkey_cmp(k, refill->end) > 0) {
263648dad8baSKent Overstreet 		ret = MAP_DONE;
263748dad8baSKent Overstreet 		goto out;
2638cafe5635SKent Overstreet 	}
2639cafe5635SKent Overstreet 
264048dad8baSKent Overstreet 	if (!KEY_SIZE(k)) /* end key */
264148dad8baSKent Overstreet 		goto out;
2642cafe5635SKent Overstreet 
264348dad8baSKent Overstreet 	if (refill->pred(buf, k)) {
2644cafe5635SKent Overstreet 		struct keybuf_key *w;
2645cafe5635SKent Overstreet 
2646cafe5635SKent Overstreet 		spin_lock(&buf->lock);
2647cafe5635SKent Overstreet 
2648cafe5635SKent Overstreet 		w = array_alloc(&buf->freelist);
264948dad8baSKent Overstreet 		if (!w) {
265048dad8baSKent Overstreet 			spin_unlock(&buf->lock);
265148dad8baSKent Overstreet 			return MAP_DONE;
265248dad8baSKent Overstreet 		}
2653cafe5635SKent Overstreet 
2654cafe5635SKent Overstreet 		w->private = NULL;
2655cafe5635SKent Overstreet 		bkey_copy(&w->key, k);
2656cafe5635SKent Overstreet 
2657cafe5635SKent Overstreet 		if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
2658cafe5635SKent Overstreet 			array_free(&buf->freelist, w);
265948a915a8SKent Overstreet 		else
266048a915a8SKent Overstreet 			refill->nr_found++;
2661cafe5635SKent Overstreet 
266248dad8baSKent Overstreet 		if (array_freelist_empty(&buf->freelist))
266348dad8baSKent Overstreet 			ret = MAP_DONE;
266448dad8baSKent Overstreet 
2665cafe5635SKent Overstreet 		spin_unlock(&buf->lock);
2666cafe5635SKent Overstreet 	}
266748dad8baSKent Overstreet out:
266848dad8baSKent Overstreet 	buf->last_scanned = *k;
266948dad8baSKent Overstreet 	return ret;
2670cafe5635SKent Overstreet }
2671cafe5635SKent Overstreet 
2672cafe5635SKent Overstreet void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
267372c27061SKent Overstreet 		       struct bkey *end, keybuf_pred_fn *pred)
2674cafe5635SKent Overstreet {
2675cafe5635SKent Overstreet 	struct bkey start = buf->last_scanned;
267648dad8baSKent Overstreet 	struct refill refill;
2677cafe5635SKent Overstreet 
2678cafe5635SKent Overstreet 	cond_resched();
2679cafe5635SKent Overstreet 
2680b54d6934SKent Overstreet 	bch_btree_op_init(&refill.op, -1);
268148a915a8SKent Overstreet 	refill.nr_found	= 0;
268248dad8baSKent Overstreet 	refill.buf	= buf;
268348dad8baSKent Overstreet 	refill.end	= end;
268448dad8baSKent Overstreet 	refill.pred	= pred;
268548dad8baSKent Overstreet 
268648dad8baSKent Overstreet 	bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
268748dad8baSKent Overstreet 			   refill_keybuf_fn, MAP_END_KEY);
2688cafe5635SKent Overstreet 
268948a915a8SKent Overstreet 	trace_bcache_keyscan(refill.nr_found,
2690cafe5635SKent Overstreet 			     KEY_INODE(&start), KEY_OFFSET(&start),
269148a915a8SKent Overstreet 			     KEY_INODE(&buf->last_scanned),
269248a915a8SKent Overstreet 			     KEY_OFFSET(&buf->last_scanned));
2693cafe5635SKent Overstreet 
2694cafe5635SKent Overstreet 	spin_lock(&buf->lock);
2695cafe5635SKent Overstreet 
2696cafe5635SKent Overstreet 	if (!RB_EMPTY_ROOT(&buf->keys)) {
2697cafe5635SKent Overstreet 		struct keybuf_key *w;
26981fae7cf0SColy Li 
2699cafe5635SKent Overstreet 		w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2700cafe5635SKent Overstreet 		buf->start	= START_KEY(&w->key);
2701cafe5635SKent Overstreet 
2702cafe5635SKent Overstreet 		w = RB_LAST(&buf->keys, struct keybuf_key, node);
2703cafe5635SKent Overstreet 		buf->end	= w->key;
2704cafe5635SKent Overstreet 	} else {
2705cafe5635SKent Overstreet 		buf->start	= MAX_KEY;
2706cafe5635SKent Overstreet 		buf->end	= MAX_KEY;
2707cafe5635SKent Overstreet 	}
2708cafe5635SKent Overstreet 
2709cafe5635SKent Overstreet 	spin_unlock(&buf->lock);
2710cafe5635SKent Overstreet }
2711cafe5635SKent Overstreet 
2712cafe5635SKent Overstreet static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2713cafe5635SKent Overstreet {
2714cafe5635SKent Overstreet 	rb_erase(&w->node, &buf->keys);
2715cafe5635SKent Overstreet 	array_free(&buf->freelist, w);
2716cafe5635SKent Overstreet }
2717cafe5635SKent Overstreet 
2718cafe5635SKent Overstreet void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2719cafe5635SKent Overstreet {
2720cafe5635SKent Overstreet 	spin_lock(&buf->lock);
2721cafe5635SKent Overstreet 	__bch_keybuf_del(buf, w);
2722cafe5635SKent Overstreet 	spin_unlock(&buf->lock);
2723cafe5635SKent Overstreet }
2724cafe5635SKent Overstreet 
2725cafe5635SKent Overstreet bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
2726cafe5635SKent Overstreet 				  struct bkey *end)
2727cafe5635SKent Overstreet {
2728cafe5635SKent Overstreet 	bool ret = false;
2729cafe5635SKent Overstreet 	struct keybuf_key *p, *w, s;
27301fae7cf0SColy Li 
2731cafe5635SKent Overstreet 	s.key = *start;
2732cafe5635SKent Overstreet 
2733cafe5635SKent Overstreet 	if (bkey_cmp(end, &buf->start) <= 0 ||
2734cafe5635SKent Overstreet 	    bkey_cmp(start, &buf->end) >= 0)
2735cafe5635SKent Overstreet 		return false;
2736cafe5635SKent Overstreet 
2737cafe5635SKent Overstreet 	spin_lock(&buf->lock);
2738cafe5635SKent Overstreet 	w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
2739cafe5635SKent Overstreet 
2740cafe5635SKent Overstreet 	while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) {
2741cafe5635SKent Overstreet 		p = w;
2742cafe5635SKent Overstreet 		w = RB_NEXT(w, node);
2743cafe5635SKent Overstreet 
2744cafe5635SKent Overstreet 		if (p->private)
2745cafe5635SKent Overstreet 			ret = true;
2746cafe5635SKent Overstreet 		else
2747cafe5635SKent Overstreet 			__bch_keybuf_del(buf, p);
2748cafe5635SKent Overstreet 	}
2749cafe5635SKent Overstreet 
2750cafe5635SKent Overstreet 	spin_unlock(&buf->lock);
2751cafe5635SKent Overstreet 	return ret;
2752cafe5635SKent Overstreet }
2753cafe5635SKent Overstreet 
2754cafe5635SKent Overstreet struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
2755cafe5635SKent Overstreet {
2756cafe5635SKent Overstreet 	struct keybuf_key *w;
27571fae7cf0SColy Li 
2758cafe5635SKent Overstreet 	spin_lock(&buf->lock);
2759cafe5635SKent Overstreet 
2760cafe5635SKent Overstreet 	w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2761cafe5635SKent Overstreet 
2762cafe5635SKent Overstreet 	while (w && w->private)
2763cafe5635SKent Overstreet 		w = RB_NEXT(w, node);
2764cafe5635SKent Overstreet 
2765cafe5635SKent Overstreet 	if (w)
2766cafe5635SKent Overstreet 		w->private = ERR_PTR(-EINTR);
2767cafe5635SKent Overstreet 
2768cafe5635SKent Overstreet 	spin_unlock(&buf->lock);
2769cafe5635SKent Overstreet 	return w;
2770cafe5635SKent Overstreet }
2771cafe5635SKent Overstreet 
2772cafe5635SKent Overstreet struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
2773cafe5635SKent Overstreet 					  struct keybuf *buf,
277472c27061SKent Overstreet 					  struct bkey *end,
277572c27061SKent Overstreet 					  keybuf_pred_fn *pred)
2776cafe5635SKent Overstreet {
2777cafe5635SKent Overstreet 	struct keybuf_key *ret;
2778cafe5635SKent Overstreet 
2779cafe5635SKent Overstreet 	while (1) {
2780cafe5635SKent Overstreet 		ret = bch_keybuf_next(buf);
2781cafe5635SKent Overstreet 		if (ret)
2782cafe5635SKent Overstreet 			break;
2783cafe5635SKent Overstreet 
2784cafe5635SKent Overstreet 		if (bkey_cmp(&buf->last_scanned, end) >= 0) {
278546f5aa88SJoe Perches 			pr_debug("scan finished\n");
2786cafe5635SKent Overstreet 			break;
2787cafe5635SKent Overstreet 		}
2788cafe5635SKent Overstreet 
278972c27061SKent Overstreet 		bch_refill_keybuf(c, buf, end, pred);
2790cafe5635SKent Overstreet 	}
2791cafe5635SKent Overstreet 
2792cafe5635SKent Overstreet 	return ret;
2793cafe5635SKent Overstreet }
2794cafe5635SKent Overstreet 
279572c27061SKent Overstreet void bch_keybuf_init(struct keybuf *buf)
2796cafe5635SKent Overstreet {
2797cafe5635SKent Overstreet 	buf->last_scanned	= MAX_KEY;
2798cafe5635SKent Overstreet 	buf->keys		= RB_ROOT;
2799cafe5635SKent Overstreet 
2800cafe5635SKent Overstreet 	spin_lock_init(&buf->lock);
2801cafe5635SKent Overstreet 	array_allocator_init(&buf->freelist);
2802cafe5635SKent Overstreet }
28039f233ffeSKai Krakow 
28049f233ffeSKai Krakow void bch_btree_exit(void)
28059f233ffeSKai Krakow {
28069f233ffeSKai Krakow 	if (btree_io_wq)
28079f233ffeSKai Krakow 		destroy_workqueue(btree_io_wq);
28089f233ffeSKai Krakow }
28099f233ffeSKai Krakow 
28109f233ffeSKai Krakow int __init bch_btree_init(void)
28119f233ffeSKai Krakow {
2812d797bd98SKai Krakow 	btree_io_wq = alloc_workqueue("bch_btree_io", WQ_MEM_RECLAIM, 0);
28139f233ffeSKai Krakow 	if (!btree_io_wq)
28149f233ffeSKai Krakow 		return -ENOMEM;
28159f233ffeSKai Krakow 
28169f233ffeSKai Krakow 	return 0;
28179f233ffeSKai Krakow }
2818