xref: /linux/drivers/md/bcache/btree.c (revision 7f4a59de28137aae4316a58f501b599ac3b87395)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2cafe5635SKent Overstreet /*
3cafe5635SKent Overstreet  * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4cafe5635SKent Overstreet  *
5cafe5635SKent Overstreet  * Uses a block device as cache for other block devices; optimized for SSDs.
6cafe5635SKent Overstreet  * All allocation is done in buckets, which should match the erase block size
7cafe5635SKent Overstreet  * of the device.
8cafe5635SKent Overstreet  *
9cafe5635SKent Overstreet  * Buckets containing cached data are kept on a heap sorted by priority;
10cafe5635SKent Overstreet  * bucket priority is increased on cache hit, and periodically all the buckets
11cafe5635SKent Overstreet  * on the heap have their priority scaled down. This currently is just used as
12cafe5635SKent Overstreet  * an LRU but in the future should allow for more intelligent heuristics.
13cafe5635SKent Overstreet  *
14cafe5635SKent Overstreet  * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
15cafe5635SKent Overstreet  * counter. Garbage collection is used to remove stale pointers.
16cafe5635SKent Overstreet  *
17cafe5635SKent Overstreet  * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
18cafe5635SKent Overstreet  * as keys are inserted we only sort the pages that have not yet been written.
19cafe5635SKent Overstreet  * When garbage collection is run, we resort the entire node.
20cafe5635SKent Overstreet  *
215fb94e9cSMauro Carvalho Chehab  * All configuration is done via sysfs; see Documentation/admin-guide/bcache.rst.
22cafe5635SKent Overstreet  */
23cafe5635SKent Overstreet 
24cafe5635SKent Overstreet #include "bcache.h"
25cafe5635SKent Overstreet #include "btree.h"
26cafe5635SKent Overstreet #include "debug.h"
2765d45231SKent Overstreet #include "extents.h"
28cafe5635SKent Overstreet 
29cafe5635SKent Overstreet #include <linux/slab.h>
30cafe5635SKent Overstreet #include <linux/bitops.h>
31cafe5635SKent Overstreet #include <linux/hash.h>
3272a44517SKent Overstreet #include <linux/kthread.h>
33cd953ed0SGeert Uytterhoeven #include <linux/prefetch.h>
34cafe5635SKent Overstreet #include <linux/random.h>
35cafe5635SKent Overstreet #include <linux/rcupdate.h>
36e6017571SIngo Molnar #include <linux/sched/clock.h>
37b2d09103SIngo Molnar #include <linux/rculist.h>
38b2d09103SIngo Molnar 
39cafe5635SKent Overstreet #include <trace/events/bcache.h>
40cafe5635SKent Overstreet 
41cafe5635SKent Overstreet /*
42cafe5635SKent Overstreet  * Todo:
43cafe5635SKent Overstreet  * register_bcache: Return errors out to userspace correctly
44cafe5635SKent Overstreet  *
45cafe5635SKent Overstreet  * Writeback: don't undirty key until after a cache flush
46cafe5635SKent Overstreet  *
47cafe5635SKent Overstreet  * Create an iterator for key pointers
48cafe5635SKent Overstreet  *
49cafe5635SKent Overstreet  * On btree write error, mark bucket such that it won't be freed from the cache
50cafe5635SKent Overstreet  *
51cafe5635SKent Overstreet  * Journalling:
52cafe5635SKent Overstreet  *   Check for bad keys in replay
53cafe5635SKent Overstreet  *   Propagate barriers
54cafe5635SKent Overstreet  *   Refcount journal entries in journal_replay
55cafe5635SKent Overstreet  *
56cafe5635SKent Overstreet  * Garbage collection:
57cafe5635SKent Overstreet  *   Finish incremental gc
58cafe5635SKent Overstreet  *   Gc should free old UUIDs, data for invalid UUIDs
59cafe5635SKent Overstreet  *
60cafe5635SKent Overstreet  * Provide a way to list backing device UUIDs we have data cached for, and
61cafe5635SKent Overstreet  * probably how long it's been since we've seen them, and a way to invalidate
62cafe5635SKent Overstreet  * dirty data for devices that will never be attached again
63cafe5635SKent Overstreet  *
64cafe5635SKent Overstreet  * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so
65cafe5635SKent Overstreet  * that based on that and how much dirty data we have we can keep writeback
66cafe5635SKent Overstreet  * from being starved
67cafe5635SKent Overstreet  *
68cafe5635SKent Overstreet  * Add a tracepoint or somesuch to watch for writeback starvation
69cafe5635SKent Overstreet  *
70cafe5635SKent Overstreet  * When btree depth > 1 and splitting an interior node, we have to make sure
71cafe5635SKent Overstreet  * alloc_bucket() cannot fail. This should be true but is not completely
72cafe5635SKent Overstreet  * obvious.
73cafe5635SKent Overstreet  *
74cafe5635SKent Overstreet  * Plugging?
75cafe5635SKent Overstreet  *
76cafe5635SKent Overstreet  * If data write is less than hard sector size of ssd, round up offset in open
77cafe5635SKent Overstreet  * bucket to the next whole sector
78cafe5635SKent Overstreet  *
79cafe5635SKent Overstreet  * Superblock needs to be fleshed out for multiple cache devices
80cafe5635SKent Overstreet  *
81cafe5635SKent Overstreet  * Add a sysfs tunable for the number of writeback IOs in flight
82cafe5635SKent Overstreet  *
83cafe5635SKent Overstreet  * Add a sysfs tunable for the number of open data buckets
84cafe5635SKent Overstreet  *
85cafe5635SKent Overstreet  * IO tracking: Can we track when one process is doing io on behalf of another?
86cafe5635SKent Overstreet  * IO tracking: Don't use just an average, weigh more recent stuff higher
87cafe5635SKent Overstreet  *
88cafe5635SKent Overstreet  * Test module load/unload
89cafe5635SKent Overstreet  */
90cafe5635SKent Overstreet 
91cafe5635SKent Overstreet #define MAX_NEED_GC		64
92cafe5635SKent Overstreet #define MAX_SAVE_PRIO		72
93*7f4a59deSTang Junhui #define MAX_GC_TIMES		100
945c25c4fcSTang Junhui #define MIN_GC_NODES		100
955c25c4fcSTang Junhui #define GC_SLEEP_MS		100
96cafe5635SKent Overstreet 
97cafe5635SKent Overstreet #define PTR_DIRTY_BIT		(((uint64_t) 1 << 36))
98cafe5635SKent Overstreet 
99cafe5635SKent Overstreet #define PTR_HASH(c, k)							\
100cafe5635SKent Overstreet 	(((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
101cafe5635SKent Overstreet 
102df8e8970SKent Overstreet #define insert_lock(s, b)	((b)->level <= (s)->lock)
103df8e8970SKent Overstreet 
104df8e8970SKent Overstreet /*
105df8e8970SKent Overstreet  * These macros are for recursing down the btree - they handle the details of
106df8e8970SKent Overstreet  * locking and looking up nodes in the cache for you. They're best treated as
107df8e8970SKent Overstreet  * mere syntax when reading code that uses them.
108df8e8970SKent Overstreet  *
109df8e8970SKent Overstreet  * op->lock determines whether we take a read or a write lock at a given depth.
110df8e8970SKent Overstreet  * If you've got a read lock and find that you need a write lock (i.e. you're
111df8e8970SKent Overstreet  * going to have to split), set op->lock and return -EINTR; btree_root() will
112df8e8970SKent Overstreet  * call you again and you'll have the correct lock.
113df8e8970SKent Overstreet  */
114df8e8970SKent Overstreet 
115df8e8970SKent Overstreet /**
116df8e8970SKent Overstreet  * btree - recurse down the btree on a specified key
117df8e8970SKent Overstreet  * @fn:		function to call, which will be passed the child node
118df8e8970SKent Overstreet  * @key:	key to recurse on
119df8e8970SKent Overstreet  * @b:		parent btree node
120df8e8970SKent Overstreet  * @op:		pointer to struct btree_op
121df8e8970SKent Overstreet  */
122df8e8970SKent Overstreet #define btree(fn, key, b, op, ...)					\
123df8e8970SKent Overstreet ({									\
124df8e8970SKent Overstreet 	int _r, l = (b)->level - 1;					\
125df8e8970SKent Overstreet 	bool _w = l <= (op)->lock;					\
1262452cc89SSlava Pestov 	struct btree *_child = bch_btree_node_get((b)->c, op, key, l,	\
1272452cc89SSlava Pestov 						  _w, b);		\
128df8e8970SKent Overstreet 	if (!IS_ERR(_child)) {						\
129df8e8970SKent Overstreet 		_r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__);	\
130df8e8970SKent Overstreet 		rw_unlock(_w, _child);					\
131df8e8970SKent Overstreet 	} else								\
132df8e8970SKent Overstreet 		_r = PTR_ERR(_child);					\
133df8e8970SKent Overstreet 	_r;								\
134df8e8970SKent Overstreet })
135df8e8970SKent Overstreet 
136df8e8970SKent Overstreet /**
137df8e8970SKent Overstreet  * btree_root - call a function on the root of the btree
138df8e8970SKent Overstreet  * @fn:		function to call, which will be passed the child node
139df8e8970SKent Overstreet  * @c:		cache set
140df8e8970SKent Overstreet  * @op:		pointer to struct btree_op
141df8e8970SKent Overstreet  */
142df8e8970SKent Overstreet #define btree_root(fn, c, op, ...)					\
143df8e8970SKent Overstreet ({									\
144df8e8970SKent Overstreet 	int _r = -EINTR;						\
145df8e8970SKent Overstreet 	do {								\
146df8e8970SKent Overstreet 		struct btree *_b = (c)->root;				\
147df8e8970SKent Overstreet 		bool _w = insert_lock(op, _b);				\
148df8e8970SKent Overstreet 		rw_lock(_w, _b, _b->level);				\
149df8e8970SKent Overstreet 		if (_b == (c)->root &&					\
150df8e8970SKent Overstreet 		    _w == insert_lock(op, _b)) {			\
151df8e8970SKent Overstreet 			_r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__);	\
152df8e8970SKent Overstreet 		}							\
153df8e8970SKent Overstreet 		rw_unlock(_w, _b);					\
1540a63b66dSKent Overstreet 		bch_cannibalize_unlock(c);				\
15578365411SKent Overstreet 		if (_r == -EINTR)					\
15678365411SKent Overstreet 			schedule();					\
157df8e8970SKent Overstreet 	} while (_r == -EINTR);						\
158df8e8970SKent Overstreet 									\
1590a63b66dSKent Overstreet 	finish_wait(&(c)->btree_cache_wait, &(op)->wait);		\
160df8e8970SKent Overstreet 	_r;								\
161df8e8970SKent Overstreet })
162df8e8970SKent Overstreet 
163a85e968eSKent Overstreet static inline struct bset *write_block(struct btree *b)
164a85e968eSKent Overstreet {
165a85e968eSKent Overstreet 	return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c);
166a85e968eSKent Overstreet }
167a85e968eSKent Overstreet 
1682a285686SKent Overstreet static void bch_btree_init_next(struct btree *b)
1692a285686SKent Overstreet {
1702a285686SKent Overstreet 	/* If not a leaf node, always sort */
1712a285686SKent Overstreet 	if (b->level && b->keys.nsets)
1722a285686SKent Overstreet 		bch_btree_sort(&b->keys, &b->c->sort);
1732a285686SKent Overstreet 	else
1742a285686SKent Overstreet 		bch_btree_sort_lazy(&b->keys, &b->c->sort);
1752a285686SKent Overstreet 
1762a285686SKent Overstreet 	if (b->written < btree_blocks(b))
1772a285686SKent Overstreet 		bch_bset_init_next(&b->keys, write_block(b),
1782a285686SKent Overstreet 				   bset_magic(&b->c->sb));
1792a285686SKent Overstreet 
1802a285686SKent Overstreet }
1812a285686SKent Overstreet 
182cafe5635SKent Overstreet /* Btree key manipulation */
183cafe5635SKent Overstreet 
1843a3b6a4eSKent Overstreet void bkey_put(struct cache_set *c, struct bkey *k)
185e7c590ebSKent Overstreet {
186e7c590ebSKent Overstreet 	unsigned i;
187e7c590ebSKent Overstreet 
188e7c590ebSKent Overstreet 	for (i = 0; i < KEY_PTRS(k); i++)
189e7c590ebSKent Overstreet 		if (ptr_available(c, k, i))
190e7c590ebSKent Overstreet 			atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
191e7c590ebSKent Overstreet }
192e7c590ebSKent Overstreet 
193cafe5635SKent Overstreet /* Btree IO */
194cafe5635SKent Overstreet 
195cafe5635SKent Overstreet static uint64_t btree_csum_set(struct btree *b, struct bset *i)
196cafe5635SKent Overstreet {
197cafe5635SKent Overstreet 	uint64_t crc = b->key.ptr[0];
198fafff81cSKent Overstreet 	void *data = (void *) i + 8, *end = bset_bkey_last(i);
199cafe5635SKent Overstreet 
200169ef1cfSKent Overstreet 	crc = bch_crc64_update(crc, data, end - data);
201c19ed23aSKent Overstreet 	return crc ^ 0xffffffffffffffffULL;
202cafe5635SKent Overstreet }
203cafe5635SKent Overstreet 
20478b77bf8SKent Overstreet void bch_btree_node_read_done(struct btree *b)
205cafe5635SKent Overstreet {
206cafe5635SKent Overstreet 	const char *err = "bad btree header";
207ee811287SKent Overstreet 	struct bset *i = btree_bset_first(b);
20857943511SKent Overstreet 	struct btree_iter *iter;
209cafe5635SKent Overstreet 
210d19936a2SKent Overstreet 	iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
21157943511SKent Overstreet 	iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
212cafe5635SKent Overstreet 	iter->used = 0;
213cafe5635SKent Overstreet 
214280481d0SKent Overstreet #ifdef CONFIG_BCACHE_DEBUG
215c052dd9aSKent Overstreet 	iter->b = &b->keys;
216280481d0SKent Overstreet #endif
217280481d0SKent Overstreet 
21857943511SKent Overstreet 	if (!i->seq)
219cafe5635SKent Overstreet 		goto err;
220cafe5635SKent Overstreet 
221cafe5635SKent Overstreet 	for (;
222a85e968eSKent Overstreet 	     b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq;
223cafe5635SKent Overstreet 	     i = write_block(b)) {
224cafe5635SKent Overstreet 		err = "unsupported bset version";
225cafe5635SKent Overstreet 		if (i->version > BCACHE_BSET_VERSION)
226cafe5635SKent Overstreet 			goto err;
227cafe5635SKent Overstreet 
228cafe5635SKent Overstreet 		err = "bad btree header";
229ee811287SKent Overstreet 		if (b->written + set_blocks(i, block_bytes(b->c)) >
230ee811287SKent Overstreet 		    btree_blocks(b))
231cafe5635SKent Overstreet 			goto err;
232cafe5635SKent Overstreet 
233cafe5635SKent Overstreet 		err = "bad magic";
23481ab4190SKent Overstreet 		if (i->magic != bset_magic(&b->c->sb))
235cafe5635SKent Overstreet 			goto err;
236cafe5635SKent Overstreet 
237cafe5635SKent Overstreet 		err = "bad checksum";
238cafe5635SKent Overstreet 		switch (i->version) {
239cafe5635SKent Overstreet 		case 0:
240cafe5635SKent Overstreet 			if (i->csum != csum_set(i))
241cafe5635SKent Overstreet 				goto err;
242cafe5635SKent Overstreet 			break;
243cafe5635SKent Overstreet 		case BCACHE_BSET_VERSION:
244cafe5635SKent Overstreet 			if (i->csum != btree_csum_set(b, i))
245cafe5635SKent Overstreet 				goto err;
246cafe5635SKent Overstreet 			break;
247cafe5635SKent Overstreet 		}
248cafe5635SKent Overstreet 
249cafe5635SKent Overstreet 		err = "empty set";
250a85e968eSKent Overstreet 		if (i != b->keys.set[0].data && !i->keys)
251cafe5635SKent Overstreet 			goto err;
252cafe5635SKent Overstreet 
253fafff81cSKent Overstreet 		bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
254cafe5635SKent Overstreet 
255ee811287SKent Overstreet 		b->written += set_blocks(i, block_bytes(b->c));
256cafe5635SKent Overstreet 	}
257cafe5635SKent Overstreet 
258cafe5635SKent Overstreet 	err = "corrupted btree";
259cafe5635SKent Overstreet 	for (i = write_block(b);
260a85e968eSKent Overstreet 	     bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
261cafe5635SKent Overstreet 	     i = ((void *) i) + block_bytes(b->c))
262a85e968eSKent Overstreet 		if (i->seq == b->keys.set[0].data->seq)
263cafe5635SKent Overstreet 			goto err;
264cafe5635SKent Overstreet 
265a85e968eSKent Overstreet 	bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
266cafe5635SKent Overstreet 
267a85e968eSKent Overstreet 	i = b->keys.set[0].data;
268cafe5635SKent Overstreet 	err = "short btree key";
269a85e968eSKent Overstreet 	if (b->keys.set[0].size &&
270a85e968eSKent Overstreet 	    bkey_cmp(&b->key, &b->keys.set[0].end) < 0)
271cafe5635SKent Overstreet 		goto err;
272cafe5635SKent Overstreet 
273cafe5635SKent Overstreet 	if (b->written < btree_blocks(b))
274a85e968eSKent Overstreet 		bch_bset_init_next(&b->keys, write_block(b),
275a85e968eSKent Overstreet 				   bset_magic(&b->c->sb));
276cafe5635SKent Overstreet out:
277d19936a2SKent Overstreet 	mempool_free(iter, &b->c->fill_iter);
27857943511SKent Overstreet 	return;
279cafe5635SKent Overstreet err:
280cafe5635SKent Overstreet 	set_btree_node_io_error(b);
28188b9f8c4SKent Overstreet 	bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",
282cafe5635SKent Overstreet 			    err, PTR_BUCKET_NR(b->c, &b->key, 0),
28388b9f8c4SKent Overstreet 			    bset_block_offset(b, i), i->keys);
284cafe5635SKent Overstreet 	goto out;
285cafe5635SKent Overstreet }
286cafe5635SKent Overstreet 
2874246a0b6SChristoph Hellwig static void btree_node_read_endio(struct bio *bio)
288cafe5635SKent Overstreet {
28957943511SKent Overstreet 	struct closure *cl = bio->bi_private;
29057943511SKent Overstreet 	closure_put(cl);
29157943511SKent Overstreet }
292cafe5635SKent Overstreet 
29378b77bf8SKent Overstreet static void bch_btree_node_read(struct btree *b)
29457943511SKent Overstreet {
29557943511SKent Overstreet 	uint64_t start_time = local_clock();
29657943511SKent Overstreet 	struct closure cl;
29757943511SKent Overstreet 	struct bio *bio;
298cafe5635SKent Overstreet 
299c37511b8SKent Overstreet 	trace_bcache_btree_read(b);
300c37511b8SKent Overstreet 
30157943511SKent Overstreet 	closure_init_stack(&cl);
302cafe5635SKent Overstreet 
30357943511SKent Overstreet 	bio = bch_bbio_alloc(b->c);
3044f024f37SKent Overstreet 	bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
30557943511SKent Overstreet 	bio->bi_end_io	= btree_node_read_endio;
30657943511SKent Overstreet 	bio->bi_private	= &cl;
30770fd7614SChristoph Hellwig 	bio->bi_opf = REQ_OP_READ | REQ_META;
30857943511SKent Overstreet 
309a85e968eSKent Overstreet 	bch_bio_map(bio, b->keys.set[0].data);
31057943511SKent Overstreet 
31157943511SKent Overstreet 	bch_submit_bbio(bio, b->c, &b->key, 0);
31257943511SKent Overstreet 	closure_sync(&cl);
31357943511SKent Overstreet 
3144e4cbee9SChristoph Hellwig 	if (bio->bi_status)
31557943511SKent Overstreet 		set_btree_node_io_error(b);
31657943511SKent Overstreet 
31757943511SKent Overstreet 	bch_bbio_free(bio, b->c);
31857943511SKent Overstreet 
31957943511SKent Overstreet 	if (btree_node_io_error(b))
32057943511SKent Overstreet 		goto err;
32157943511SKent Overstreet 
32257943511SKent Overstreet 	bch_btree_node_read_done(b);
32357943511SKent Overstreet 	bch_time_stats_update(&b->c->btree_read_time, start_time);
32457943511SKent Overstreet 
32557943511SKent Overstreet 	return;
32657943511SKent Overstreet err:
32761cbd250SGeert Uytterhoeven 	bch_cache_set_error(b->c, "io error reading bucket %zu",
32857943511SKent Overstreet 			    PTR_BUCKET_NR(b->c, &b->key, 0));
329cafe5635SKent Overstreet }
330cafe5635SKent Overstreet 
331cafe5635SKent Overstreet static void btree_complete_write(struct btree *b, struct btree_write *w)
332cafe5635SKent Overstreet {
333cafe5635SKent Overstreet 	if (w->prio_blocked &&
334cafe5635SKent Overstreet 	    !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
335119ba0f8SKent Overstreet 		wake_up_allocators(b->c);
336cafe5635SKent Overstreet 
337cafe5635SKent Overstreet 	if (w->journal) {
338cafe5635SKent Overstreet 		atomic_dec_bug(w->journal);
339cafe5635SKent Overstreet 		__closure_wake_up(&b->c->journal.wait);
340cafe5635SKent Overstreet 	}
341cafe5635SKent Overstreet 
342cafe5635SKent Overstreet 	w->prio_blocked	= 0;
343cafe5635SKent Overstreet 	w->journal	= NULL;
344cafe5635SKent Overstreet }
345cafe5635SKent Overstreet 
346cb7a583eSKent Overstreet static void btree_node_write_unlock(struct closure *cl)
347cb7a583eSKent Overstreet {
348cb7a583eSKent Overstreet 	struct btree *b = container_of(cl, struct btree, io);
349cb7a583eSKent Overstreet 
350cb7a583eSKent Overstreet 	up(&b->io_mutex);
351cb7a583eSKent Overstreet }
352cb7a583eSKent Overstreet 
35357943511SKent Overstreet static void __btree_node_write_done(struct closure *cl)
354cafe5635SKent Overstreet {
355cb7a583eSKent Overstreet 	struct btree *b = container_of(cl, struct btree, io);
356cafe5635SKent Overstreet 	struct btree_write *w = btree_prev_write(b);
357cafe5635SKent Overstreet 
358cafe5635SKent Overstreet 	bch_bbio_free(b->bio, b->c);
359cafe5635SKent Overstreet 	b->bio = NULL;
360cafe5635SKent Overstreet 	btree_complete_write(b, w);
361cafe5635SKent Overstreet 
362cafe5635SKent Overstreet 	if (btree_node_dirty(b))
36356b30770SKent Overstreet 		schedule_delayed_work(&b->work, 30 * HZ);
364cafe5635SKent Overstreet 
365cb7a583eSKent Overstreet 	closure_return_with_destructor(cl, btree_node_write_unlock);
366cafe5635SKent Overstreet }
367cafe5635SKent Overstreet 
36857943511SKent Overstreet static void btree_node_write_done(struct closure *cl)
369cafe5635SKent Overstreet {
370cb7a583eSKent Overstreet 	struct btree *b = container_of(cl, struct btree, io);
371cafe5635SKent Overstreet 
372491221f8SGuoqing Jiang 	bio_free_pages(b->bio);
37357943511SKent Overstreet 	__btree_node_write_done(cl);
374cafe5635SKent Overstreet }
375cafe5635SKent Overstreet 
3764246a0b6SChristoph Hellwig static void btree_node_write_endio(struct bio *bio)
37757943511SKent Overstreet {
37857943511SKent Overstreet 	struct closure *cl = bio->bi_private;
379cb7a583eSKent Overstreet 	struct btree *b = container_of(cl, struct btree, io);
38057943511SKent Overstreet 
3814e4cbee9SChristoph Hellwig 	if (bio->bi_status)
38257943511SKent Overstreet 		set_btree_node_io_error(b);
38357943511SKent Overstreet 
3844e4cbee9SChristoph Hellwig 	bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree");
38557943511SKent Overstreet 	closure_put(cl);
38657943511SKent Overstreet }
38757943511SKent Overstreet 
38857943511SKent Overstreet static void do_btree_node_write(struct btree *b)
389cafe5635SKent Overstreet {
390cb7a583eSKent Overstreet 	struct closure *cl = &b->io;
391ee811287SKent Overstreet 	struct bset *i = btree_bset_last(b);
392cafe5635SKent Overstreet 	BKEY_PADDED(key) k;
393cafe5635SKent Overstreet 
394cafe5635SKent Overstreet 	i->version	= BCACHE_BSET_VERSION;
395cafe5635SKent Overstreet 	i->csum		= btree_csum_set(b, i);
396cafe5635SKent Overstreet 
39757943511SKent Overstreet 	BUG_ON(b->bio);
39857943511SKent Overstreet 	b->bio = bch_bbio_alloc(b->c);
39957943511SKent Overstreet 
40057943511SKent Overstreet 	b->bio->bi_end_io	= btree_node_write_endio;
401faadf0c9SKent Overstreet 	b->bio->bi_private	= cl;
402ee811287SKent Overstreet 	b->bio->bi_iter.bi_size	= roundup(set_bytes(i), block_bytes(b->c));
40370fd7614SChristoph Hellwig 	b->bio->bi_opf		= REQ_OP_WRITE | REQ_META | REQ_FUA;
404169ef1cfSKent Overstreet 	bch_bio_map(b->bio, i);
405cafe5635SKent Overstreet 
406e49c7c37SKent Overstreet 	/*
407e49c7c37SKent Overstreet 	 * If we're appending to a leaf node, we don't technically need FUA -
408e49c7c37SKent Overstreet 	 * this write just needs to be persisted before the next journal write,
409e49c7c37SKent Overstreet 	 * which will be marked FLUSH|FUA.
410e49c7c37SKent Overstreet 	 *
411e49c7c37SKent Overstreet 	 * Similarly if we're writing a new btree root - the pointer is going to
412e49c7c37SKent Overstreet 	 * be in the next journal entry.
413e49c7c37SKent Overstreet 	 *
414e49c7c37SKent Overstreet 	 * But if we're writing a new btree node (that isn't a root) or
415e49c7c37SKent Overstreet 	 * appending to a non leaf btree node, we need either FUA or a flush
416e49c7c37SKent Overstreet 	 * when we write the parent with the new pointer. FUA is cheaper than a
417e49c7c37SKent Overstreet 	 * flush, and writes appending to leaf nodes aren't blocking anything so
418e49c7c37SKent Overstreet 	 * just make all btree node writes FUA to keep things sane.
419e49c7c37SKent Overstreet 	 */
420e49c7c37SKent Overstreet 
421cafe5635SKent Overstreet 	bkey_copy(&k.key, &b->key);
422ee811287SKent Overstreet 	SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
423a85e968eSKent Overstreet 		       bset_sector_offset(&b->keys, i));
424cafe5635SKent Overstreet 
42525d8be77SMing Lei 	if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
426cafe5635SKent Overstreet 		int j;
427cafe5635SKent Overstreet 		struct bio_vec *bv;
428cafe5635SKent Overstreet 		void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
429cafe5635SKent Overstreet 
4307988613bSKent Overstreet 		bio_for_each_segment_all(bv, b->bio, j)
431cafe5635SKent Overstreet 			memcpy(page_address(bv->bv_page),
432cafe5635SKent Overstreet 			       base + j * PAGE_SIZE, PAGE_SIZE);
433cafe5635SKent Overstreet 
434cafe5635SKent Overstreet 		bch_submit_bbio(b->bio, b->c, &k.key, 0);
435cafe5635SKent Overstreet 
43657943511SKent Overstreet 		continue_at(cl, btree_node_write_done, NULL);
437cafe5635SKent Overstreet 	} else {
438c2421edfSMing Lei 		/* No problem for multipage bvec since the bio is just allocated */
439cafe5635SKent Overstreet 		b->bio->bi_vcnt = 0;
440169ef1cfSKent Overstreet 		bch_bio_map(b->bio, i);
441cafe5635SKent Overstreet 
442cafe5635SKent Overstreet 		bch_submit_bbio(b->bio, b->c, &k.key, 0);
443cafe5635SKent Overstreet 
444cafe5635SKent Overstreet 		closure_sync(cl);
445cb7a583eSKent Overstreet 		continue_at_nobarrier(cl, __btree_node_write_done, NULL);
446cafe5635SKent Overstreet 	}
447cafe5635SKent Overstreet }
448cafe5635SKent Overstreet 
4492a285686SKent Overstreet void __bch_btree_node_write(struct btree *b, struct closure *parent)
450cafe5635SKent Overstreet {
451ee811287SKent Overstreet 	struct bset *i = btree_bset_last(b);
452cafe5635SKent Overstreet 
4532a285686SKent Overstreet 	lockdep_assert_held(&b->write_lock);
4542a285686SKent Overstreet 
455c37511b8SKent Overstreet 	trace_bcache_btree_write(b);
456c37511b8SKent Overstreet 
457cafe5635SKent Overstreet 	BUG_ON(current->bio_list);
45857943511SKent Overstreet 	BUG_ON(b->written >= btree_blocks(b));
45957943511SKent Overstreet 	BUG_ON(b->written && !i->keys);
460ee811287SKent Overstreet 	BUG_ON(btree_bset_first(b)->seq != i->seq);
461dc9d98d6SKent Overstreet 	bch_check_keys(&b->keys, "writing");
462cafe5635SKent Overstreet 
463cafe5635SKent Overstreet 	cancel_delayed_work(&b->work);
464cafe5635SKent Overstreet 
46557943511SKent Overstreet 	/* If caller isn't waiting for write, parent refcount is cache set */
466cb7a583eSKent Overstreet 	down(&b->io_mutex);
467cb7a583eSKent Overstreet 	closure_init(&b->io, parent ?: &b->c->cl);
46857943511SKent Overstreet 
469cafe5635SKent Overstreet 	clear_bit(BTREE_NODE_dirty,	 &b->flags);
470cafe5635SKent Overstreet 	change_bit(BTREE_NODE_write_idx, &b->flags);
471cafe5635SKent Overstreet 
47257943511SKent Overstreet 	do_btree_node_write(b);
473cafe5635SKent Overstreet 
474ee811287SKent Overstreet 	atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size,
475cafe5635SKent Overstreet 			&PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
476cafe5635SKent Overstreet 
477a85e968eSKent Overstreet 	b->written += set_blocks(i, block_bytes(b->c));
4782a285686SKent Overstreet }
479a85e968eSKent Overstreet 
4802a285686SKent Overstreet void bch_btree_node_write(struct btree *b, struct closure *parent)
4812a285686SKent Overstreet {
4822a285686SKent Overstreet 	unsigned nsets = b->keys.nsets;
4832a285686SKent Overstreet 
4842a285686SKent Overstreet 	lockdep_assert_held(&b->lock);
4852a285686SKent Overstreet 
4862a285686SKent Overstreet 	__bch_btree_node_write(b, parent);
487cafe5635SKent Overstreet 
48878b77bf8SKent Overstreet 	/*
48978b77bf8SKent Overstreet 	 * do verify if there was more than one set initially (i.e. we did a
49078b77bf8SKent Overstreet 	 * sort) and we sorted down to a single set:
49178b77bf8SKent Overstreet 	 */
4922a285686SKent Overstreet 	if (nsets && !b->keys.nsets)
49378b77bf8SKent Overstreet 		bch_btree_verify(b);
49478b77bf8SKent Overstreet 
4952a285686SKent Overstreet 	bch_btree_init_next(b);
496cafe5635SKent Overstreet }
497cafe5635SKent Overstreet 
498f269af5aSKent Overstreet static void bch_btree_node_write_sync(struct btree *b)
499f269af5aSKent Overstreet {
500f269af5aSKent Overstreet 	struct closure cl;
501f269af5aSKent Overstreet 
502f269af5aSKent Overstreet 	closure_init_stack(&cl);
5032a285686SKent Overstreet 
5042a285686SKent Overstreet 	mutex_lock(&b->write_lock);
505f269af5aSKent Overstreet 	bch_btree_node_write(b, &cl);
5062a285686SKent Overstreet 	mutex_unlock(&b->write_lock);
5072a285686SKent Overstreet 
508f269af5aSKent Overstreet 	closure_sync(&cl);
509f269af5aSKent Overstreet }
510f269af5aSKent Overstreet 
51157943511SKent Overstreet static void btree_node_write_work(struct work_struct *w)
512cafe5635SKent Overstreet {
513cafe5635SKent Overstreet 	struct btree *b = container_of(to_delayed_work(w), struct btree, work);
514cafe5635SKent Overstreet 
5152a285686SKent Overstreet 	mutex_lock(&b->write_lock);
516cafe5635SKent Overstreet 	if (btree_node_dirty(b))
5172a285686SKent Overstreet 		__bch_btree_node_write(b, NULL);
5182a285686SKent Overstreet 	mutex_unlock(&b->write_lock);
519cafe5635SKent Overstreet }
520cafe5635SKent Overstreet 
521c18536a7SKent Overstreet static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
522cafe5635SKent Overstreet {
523ee811287SKent Overstreet 	struct bset *i = btree_bset_last(b);
524cafe5635SKent Overstreet 	struct btree_write *w = btree_current_write(b);
525cafe5635SKent Overstreet 
5262a285686SKent Overstreet 	lockdep_assert_held(&b->write_lock);
5272a285686SKent Overstreet 
52857943511SKent Overstreet 	BUG_ON(!b->written);
52957943511SKent Overstreet 	BUG_ON(!i->keys);
530cafe5635SKent Overstreet 
53157943511SKent Overstreet 	if (!btree_node_dirty(b))
53256b30770SKent Overstreet 		schedule_delayed_work(&b->work, 30 * HZ);
53357943511SKent Overstreet 
534cafe5635SKent Overstreet 	set_btree_node_dirty(b);
535cafe5635SKent Overstreet 
536c18536a7SKent Overstreet 	if (journal_ref) {
537cafe5635SKent Overstreet 		if (w->journal &&
538c18536a7SKent Overstreet 		    journal_pin_cmp(b->c, w->journal, journal_ref)) {
539cafe5635SKent Overstreet 			atomic_dec_bug(w->journal);
540cafe5635SKent Overstreet 			w->journal = NULL;
541cafe5635SKent Overstreet 		}
542cafe5635SKent Overstreet 
543cafe5635SKent Overstreet 		if (!w->journal) {
544c18536a7SKent Overstreet 			w->journal = journal_ref;
545cafe5635SKent Overstreet 			atomic_inc(w->journal);
546cafe5635SKent Overstreet 		}
547cafe5635SKent Overstreet 	}
548cafe5635SKent Overstreet 
549cafe5635SKent Overstreet 	/* Force write if set is too big */
55057943511SKent Overstreet 	if (set_bytes(i) > PAGE_SIZE - 48 &&
55157943511SKent Overstreet 	    !current->bio_list)
55257943511SKent Overstreet 		bch_btree_node_write(b, NULL);
553cafe5635SKent Overstreet }
554cafe5635SKent Overstreet 
555cafe5635SKent Overstreet /*
556cafe5635SKent Overstreet  * Btree in memory cache - allocation/freeing
557cafe5635SKent Overstreet  * mca -> memory cache
558cafe5635SKent Overstreet  */
559cafe5635SKent Overstreet 
560cafe5635SKent Overstreet #define mca_reserve(c)	(((c->root && c->root->level)		\
561cafe5635SKent Overstreet 			  ? c->root->level : 1) * 8 + 16)
562cafe5635SKent Overstreet #define mca_can_free(c)						\
5630a63b66dSKent Overstreet 	max_t(int, 0, c->btree_cache_used - mca_reserve(c))
564cafe5635SKent Overstreet 
565cafe5635SKent Overstreet static void mca_data_free(struct btree *b)
566cafe5635SKent Overstreet {
567cb7a583eSKent Overstreet 	BUG_ON(b->io_mutex.count != 1);
568cafe5635SKent Overstreet 
569a85e968eSKent Overstreet 	bch_btree_keys_free(&b->keys);
570cafe5635SKent Overstreet 
5710a63b66dSKent Overstreet 	b->c->btree_cache_used--;
572ee811287SKent Overstreet 	list_move(&b->list, &b->c->btree_cache_freed);
573cafe5635SKent Overstreet }
574cafe5635SKent Overstreet 
575cafe5635SKent Overstreet static void mca_bucket_free(struct btree *b)
576cafe5635SKent Overstreet {
577cafe5635SKent Overstreet 	BUG_ON(btree_node_dirty(b));
578cafe5635SKent Overstreet 
579cafe5635SKent Overstreet 	b->key.ptr[0] = 0;
580cafe5635SKent Overstreet 	hlist_del_init_rcu(&b->hash);
581cafe5635SKent Overstreet 	list_move(&b->list, &b->c->btree_cache_freeable);
582cafe5635SKent Overstreet }
583cafe5635SKent Overstreet 
584cafe5635SKent Overstreet static unsigned btree_order(struct bkey *k)
585cafe5635SKent Overstreet {
586cafe5635SKent Overstreet 	return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
587cafe5635SKent Overstreet }
588cafe5635SKent Overstreet 
589cafe5635SKent Overstreet static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
590cafe5635SKent Overstreet {
591a85e968eSKent Overstreet 	if (!bch_btree_keys_alloc(&b->keys,
592ee811287SKent Overstreet 				  max_t(unsigned,
593cafe5635SKent Overstreet 					ilog2(b->c->btree_pages),
594ee811287SKent Overstreet 					btree_order(k)),
595ee811287SKent Overstreet 				  gfp)) {
5960a63b66dSKent Overstreet 		b->c->btree_cache_used++;
597ee811287SKent Overstreet 		list_move(&b->list, &b->c->btree_cache);
598ee811287SKent Overstreet 	} else {
599ee811287SKent Overstreet 		list_move(&b->list, &b->c->btree_cache_freed);
600ee811287SKent Overstreet 	}
601cafe5635SKent Overstreet }
602cafe5635SKent Overstreet 
603cafe5635SKent Overstreet static struct btree *mca_bucket_alloc(struct cache_set *c,
604cafe5635SKent Overstreet 				      struct bkey *k, gfp_t gfp)
605cafe5635SKent Overstreet {
606cafe5635SKent Overstreet 	struct btree *b = kzalloc(sizeof(struct btree), gfp);
607cafe5635SKent Overstreet 	if (!b)
608cafe5635SKent Overstreet 		return NULL;
609cafe5635SKent Overstreet 
610cafe5635SKent Overstreet 	init_rwsem(&b->lock);
611cafe5635SKent Overstreet 	lockdep_set_novalidate_class(&b->lock);
6122a285686SKent Overstreet 	mutex_init(&b->write_lock);
6132a285686SKent Overstreet 	lockdep_set_novalidate_class(&b->write_lock);
614cafe5635SKent Overstreet 	INIT_LIST_HEAD(&b->list);
61557943511SKent Overstreet 	INIT_DELAYED_WORK(&b->work, btree_node_write_work);
616cafe5635SKent Overstreet 	b->c = c;
617cb7a583eSKent Overstreet 	sema_init(&b->io_mutex, 1);
618cafe5635SKent Overstreet 
619cafe5635SKent Overstreet 	mca_data_alloc(b, k, gfp);
620cafe5635SKent Overstreet 	return b;
621cafe5635SKent Overstreet }
622cafe5635SKent Overstreet 
623e8e1d468SKent Overstreet static int mca_reap(struct btree *b, unsigned min_order, bool flush)
624cafe5635SKent Overstreet {
625e8e1d468SKent Overstreet 	struct closure cl;
626e8e1d468SKent Overstreet 
627e8e1d468SKent Overstreet 	closure_init_stack(&cl);
628cafe5635SKent Overstreet 	lockdep_assert_held(&b->c->bucket_lock);
629cafe5635SKent Overstreet 
630cafe5635SKent Overstreet 	if (!down_write_trylock(&b->lock))
631cafe5635SKent Overstreet 		return -ENOMEM;
632cafe5635SKent Overstreet 
633a85e968eSKent Overstreet 	BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data);
634e8e1d468SKent Overstreet 
635a85e968eSKent Overstreet 	if (b->keys.page_order < min_order)
636cb7a583eSKent Overstreet 		goto out_unlock;
637cb7a583eSKent Overstreet 
638cb7a583eSKent Overstreet 	if (!flush) {
639cb7a583eSKent Overstreet 		if (btree_node_dirty(b))
640cb7a583eSKent Overstreet 			goto out_unlock;
641cb7a583eSKent Overstreet 
642cb7a583eSKent Overstreet 		if (down_trylock(&b->io_mutex))
643cb7a583eSKent Overstreet 			goto out_unlock;
644cb7a583eSKent Overstreet 		up(&b->io_mutex);
645cafe5635SKent Overstreet 	}
646cafe5635SKent Overstreet 
6472a285686SKent Overstreet 	mutex_lock(&b->write_lock);
648f269af5aSKent Overstreet 	if (btree_node_dirty(b))
6492a285686SKent Overstreet 		__bch_btree_node_write(b, &cl);
6502a285686SKent Overstreet 	mutex_unlock(&b->write_lock);
6512a285686SKent Overstreet 
6522a285686SKent Overstreet 	closure_sync(&cl);
653cafe5635SKent Overstreet 
654e8e1d468SKent Overstreet 	/* wait for any in flight btree write */
655cb7a583eSKent Overstreet 	down(&b->io_mutex);
656cb7a583eSKent Overstreet 	up(&b->io_mutex);
657e8e1d468SKent Overstreet 
658cafe5635SKent Overstreet 	return 0;
659cb7a583eSKent Overstreet out_unlock:
660cb7a583eSKent Overstreet 	rw_unlock(true, b);
661cb7a583eSKent Overstreet 	return -ENOMEM;
662cafe5635SKent Overstreet }
663cafe5635SKent Overstreet 
6647dc19d5aSDave Chinner static unsigned long bch_mca_scan(struct shrinker *shrink,
6657dc19d5aSDave Chinner 				  struct shrink_control *sc)
666cafe5635SKent Overstreet {
667cafe5635SKent Overstreet 	struct cache_set *c = container_of(shrink, struct cache_set, shrink);
668cafe5635SKent Overstreet 	struct btree *b, *t;
669cafe5635SKent Overstreet 	unsigned long i, nr = sc->nr_to_scan;
6707dc19d5aSDave Chinner 	unsigned long freed = 0;
671ca71df31STang Junhui 	unsigned int btree_cache_used;
672cafe5635SKent Overstreet 
673cafe5635SKent Overstreet 	if (c->shrinker_disabled)
6747dc19d5aSDave Chinner 		return SHRINK_STOP;
675cafe5635SKent Overstreet 
6760a63b66dSKent Overstreet 	if (c->btree_cache_alloc_lock)
6777dc19d5aSDave Chinner 		return SHRINK_STOP;
678cafe5635SKent Overstreet 
679cafe5635SKent Overstreet 	/* Return -1 if we can't do anything right now */
680a698e08cSKent Overstreet 	if (sc->gfp_mask & __GFP_IO)
681cafe5635SKent Overstreet 		mutex_lock(&c->bucket_lock);
682cafe5635SKent Overstreet 	else if (!mutex_trylock(&c->bucket_lock))
683cafe5635SKent Overstreet 		return -1;
684cafe5635SKent Overstreet 
68536c9ea98SKent Overstreet 	/*
68636c9ea98SKent Overstreet 	 * It's _really_ critical that we don't free too many btree nodes - we
68736c9ea98SKent Overstreet 	 * have to always leave ourselves a reserve. The reserve is how we
68836c9ea98SKent Overstreet 	 * guarantee that allocating memory for a new btree node can always
68936c9ea98SKent Overstreet 	 * succeed, so that inserting keys into the btree can always succeed and
69036c9ea98SKent Overstreet 	 * IO can always make forward progress:
69136c9ea98SKent Overstreet 	 */
692cafe5635SKent Overstreet 	nr /= c->btree_pages;
693cafe5635SKent Overstreet 	nr = min_t(unsigned long, nr, mca_can_free(c));
694cafe5635SKent Overstreet 
695cafe5635SKent Overstreet 	i = 0;
696ca71df31STang Junhui 	btree_cache_used = c->btree_cache_used;
697cafe5635SKent Overstreet 	list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) {
698ca71df31STang Junhui 		if (nr <= 0)
699ca71df31STang Junhui 			goto out;
700cafe5635SKent Overstreet 
701cafe5635SKent Overstreet 		if (++i > 3 &&
702e8e1d468SKent Overstreet 		    !mca_reap(b, 0, false)) {
703cafe5635SKent Overstreet 			mca_data_free(b);
704cafe5635SKent Overstreet 			rw_unlock(true, b);
7057dc19d5aSDave Chinner 			freed++;
706cafe5635SKent Overstreet 		}
707ca71df31STang Junhui 		nr--;
708cafe5635SKent Overstreet 	}
709cafe5635SKent Overstreet 
710ca71df31STang Junhui 	for (;  (nr--) && i < btree_cache_used; i++) {
711cafe5635SKent Overstreet 		if (list_empty(&c->btree_cache))
712cafe5635SKent Overstreet 			goto out;
713cafe5635SKent Overstreet 
714cafe5635SKent Overstreet 		b = list_first_entry(&c->btree_cache, struct btree, list);
715cafe5635SKent Overstreet 		list_rotate_left(&c->btree_cache);
716cafe5635SKent Overstreet 
717cafe5635SKent Overstreet 		if (!b->accessed &&
718e8e1d468SKent Overstreet 		    !mca_reap(b, 0, false)) {
719cafe5635SKent Overstreet 			mca_bucket_free(b);
720cafe5635SKent Overstreet 			mca_data_free(b);
721cafe5635SKent Overstreet 			rw_unlock(true, b);
7227dc19d5aSDave Chinner 			freed++;
723cafe5635SKent Overstreet 		} else
724cafe5635SKent Overstreet 			b->accessed = 0;
725cafe5635SKent Overstreet 	}
726cafe5635SKent Overstreet out:
727cafe5635SKent Overstreet 	mutex_unlock(&c->bucket_lock);
728f3641c3aSTang Junhui 	return freed * c->btree_pages;
7297dc19d5aSDave Chinner }
7307dc19d5aSDave Chinner 
7317dc19d5aSDave Chinner static unsigned long bch_mca_count(struct shrinker *shrink,
7327dc19d5aSDave Chinner 				   struct shrink_control *sc)
7337dc19d5aSDave Chinner {
7347dc19d5aSDave Chinner 	struct cache_set *c = container_of(shrink, struct cache_set, shrink);
7357dc19d5aSDave Chinner 
7367dc19d5aSDave Chinner 	if (c->shrinker_disabled)
7377dc19d5aSDave Chinner 		return 0;
7387dc19d5aSDave Chinner 
7390a63b66dSKent Overstreet 	if (c->btree_cache_alloc_lock)
7407dc19d5aSDave Chinner 		return 0;
7417dc19d5aSDave Chinner 
7427dc19d5aSDave Chinner 	return mca_can_free(c) * c->btree_pages;
743cafe5635SKent Overstreet }
744cafe5635SKent Overstreet 
745cafe5635SKent Overstreet void bch_btree_cache_free(struct cache_set *c)
746cafe5635SKent Overstreet {
747cafe5635SKent Overstreet 	struct btree *b;
748cafe5635SKent Overstreet 	struct closure cl;
749cafe5635SKent Overstreet 	closure_init_stack(&cl);
750cafe5635SKent Overstreet 
751cafe5635SKent Overstreet 	if (c->shrink.list.next)
752cafe5635SKent Overstreet 		unregister_shrinker(&c->shrink);
753cafe5635SKent Overstreet 
754cafe5635SKent Overstreet 	mutex_lock(&c->bucket_lock);
755cafe5635SKent Overstreet 
756cafe5635SKent Overstreet #ifdef CONFIG_BCACHE_DEBUG
757cafe5635SKent Overstreet 	if (c->verify_data)
758cafe5635SKent Overstreet 		list_move(&c->verify_data->list, &c->btree_cache);
75978b77bf8SKent Overstreet 
76078b77bf8SKent Overstreet 	free_pages((unsigned long) c->verify_ondisk, ilog2(bucket_pages(c)));
761cafe5635SKent Overstreet #endif
762cafe5635SKent Overstreet 
763cafe5635SKent Overstreet 	list_splice(&c->btree_cache_freeable,
764cafe5635SKent Overstreet 		    &c->btree_cache);
765cafe5635SKent Overstreet 
766cafe5635SKent Overstreet 	while (!list_empty(&c->btree_cache)) {
767cafe5635SKent Overstreet 		b = list_first_entry(&c->btree_cache, struct btree, list);
768cafe5635SKent Overstreet 
769cafe5635SKent Overstreet 		if (btree_node_dirty(b))
770cafe5635SKent Overstreet 			btree_complete_write(b, btree_current_write(b));
771cafe5635SKent Overstreet 		clear_bit(BTREE_NODE_dirty, &b->flags);
772cafe5635SKent Overstreet 
773cafe5635SKent Overstreet 		mca_data_free(b);
774cafe5635SKent Overstreet 	}
775cafe5635SKent Overstreet 
776cafe5635SKent Overstreet 	while (!list_empty(&c->btree_cache_freed)) {
777cafe5635SKent Overstreet 		b = list_first_entry(&c->btree_cache_freed,
778cafe5635SKent Overstreet 				     struct btree, list);
779cafe5635SKent Overstreet 		list_del(&b->list);
780cafe5635SKent Overstreet 		cancel_delayed_work_sync(&b->work);
781cafe5635SKent Overstreet 		kfree(b);
782cafe5635SKent Overstreet 	}
783cafe5635SKent Overstreet 
784cafe5635SKent Overstreet 	mutex_unlock(&c->bucket_lock);
785cafe5635SKent Overstreet }
786cafe5635SKent Overstreet 
787cafe5635SKent Overstreet int bch_btree_cache_alloc(struct cache_set *c)
788cafe5635SKent Overstreet {
789cafe5635SKent Overstreet 	unsigned i;
790cafe5635SKent Overstreet 
791cafe5635SKent Overstreet 	for (i = 0; i < mca_reserve(c); i++)
79272a44517SKent Overstreet 		if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
79372a44517SKent Overstreet 			return -ENOMEM;
794cafe5635SKent Overstreet 
795cafe5635SKent Overstreet 	list_splice_init(&c->btree_cache,
796cafe5635SKent Overstreet 			 &c->btree_cache_freeable);
797cafe5635SKent Overstreet 
798cafe5635SKent Overstreet #ifdef CONFIG_BCACHE_DEBUG
799cafe5635SKent Overstreet 	mutex_init(&c->verify_lock);
800cafe5635SKent Overstreet 
80178b77bf8SKent Overstreet 	c->verify_ondisk = (void *)
80278b77bf8SKent Overstreet 		__get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c)));
80378b77bf8SKent Overstreet 
804cafe5635SKent Overstreet 	c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
805cafe5635SKent Overstreet 
806cafe5635SKent Overstreet 	if (c->verify_data &&
807a85e968eSKent Overstreet 	    c->verify_data->keys.set->data)
808cafe5635SKent Overstreet 		list_del_init(&c->verify_data->list);
809cafe5635SKent Overstreet 	else
810cafe5635SKent Overstreet 		c->verify_data = NULL;
811cafe5635SKent Overstreet #endif
812cafe5635SKent Overstreet 
8137dc19d5aSDave Chinner 	c->shrink.count_objects = bch_mca_count;
8147dc19d5aSDave Chinner 	c->shrink.scan_objects = bch_mca_scan;
815cafe5635SKent Overstreet 	c->shrink.seeks = 4;
816cafe5635SKent Overstreet 	c->shrink.batch = c->btree_pages * 2;
8176c4ca1e3SMichael Lyle 
8186c4ca1e3SMichael Lyle 	if (register_shrinker(&c->shrink))
8196c4ca1e3SMichael Lyle 		pr_warn("bcache: %s: could not register shrinker",
8206c4ca1e3SMichael Lyle 				__func__);
821cafe5635SKent Overstreet 
822cafe5635SKent Overstreet 	return 0;
823cafe5635SKent Overstreet }
824cafe5635SKent Overstreet 
825cafe5635SKent Overstreet /* Btree in memory cache - hash table */
826cafe5635SKent Overstreet 
827cafe5635SKent Overstreet static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k)
828cafe5635SKent Overstreet {
829cafe5635SKent Overstreet 	return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)];
830cafe5635SKent Overstreet }
831cafe5635SKent Overstreet 
832cafe5635SKent Overstreet static struct btree *mca_find(struct cache_set *c, struct bkey *k)
833cafe5635SKent Overstreet {
834cafe5635SKent Overstreet 	struct btree *b;
835cafe5635SKent Overstreet 
836cafe5635SKent Overstreet 	rcu_read_lock();
837cafe5635SKent Overstreet 	hlist_for_each_entry_rcu(b, mca_hash(c, k), hash)
838cafe5635SKent Overstreet 		if (PTR_HASH(c, &b->key) == PTR_HASH(c, k))
839cafe5635SKent Overstreet 			goto out;
840cafe5635SKent Overstreet 	b = NULL;
841cafe5635SKent Overstreet out:
842cafe5635SKent Overstreet 	rcu_read_unlock();
843cafe5635SKent Overstreet 	return b;
844cafe5635SKent Overstreet }
845cafe5635SKent Overstreet 
8460a63b66dSKent Overstreet static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
8470a63b66dSKent Overstreet {
8480a63b66dSKent Overstreet 	struct task_struct *old;
8490a63b66dSKent Overstreet 
8500a63b66dSKent Overstreet 	old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current);
8510a63b66dSKent Overstreet 	if (old && old != current) {
8520a63b66dSKent Overstreet 		if (op)
8530a63b66dSKent Overstreet 			prepare_to_wait(&c->btree_cache_wait, &op->wait,
8540a63b66dSKent Overstreet 					TASK_UNINTERRUPTIBLE);
8550a63b66dSKent Overstreet 		return -EINTR;
8560a63b66dSKent Overstreet 	}
8570a63b66dSKent Overstreet 
8580a63b66dSKent Overstreet 	return 0;
8590a63b66dSKent Overstreet }
8600a63b66dSKent Overstreet 
8610a63b66dSKent Overstreet static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
8620a63b66dSKent Overstreet 				     struct bkey *k)
863cafe5635SKent Overstreet {
864e8e1d468SKent Overstreet 	struct btree *b;
865cafe5635SKent Overstreet 
866c37511b8SKent Overstreet 	trace_bcache_btree_cache_cannibalize(c);
867c37511b8SKent Overstreet 
8680a63b66dSKent Overstreet 	if (mca_cannibalize_lock(c, op))
8690a63b66dSKent Overstreet 		return ERR_PTR(-EINTR);
870cafe5635SKent Overstreet 
871e8e1d468SKent Overstreet 	list_for_each_entry_reverse(b, &c->btree_cache, list)
872e8e1d468SKent Overstreet 		if (!mca_reap(b, btree_order(k), false))
873e8e1d468SKent Overstreet 			return b;
874cafe5635SKent Overstreet 
875e8e1d468SKent Overstreet 	list_for_each_entry_reverse(b, &c->btree_cache, list)
876e8e1d468SKent Overstreet 		if (!mca_reap(b, btree_order(k), true))
877e8e1d468SKent Overstreet 			return b;
878e8e1d468SKent Overstreet 
8790a63b66dSKent Overstreet 	WARN(1, "btree cache cannibalize failed\n");
880e8e1d468SKent Overstreet 	return ERR_PTR(-ENOMEM);
881cafe5635SKent Overstreet }
882cafe5635SKent Overstreet 
883cafe5635SKent Overstreet /*
884cafe5635SKent Overstreet  * We can only have one thread cannibalizing other cached btree nodes at a time,
885cafe5635SKent Overstreet  * or we'll deadlock. We use an open coded mutex to ensure that, which a
886cafe5635SKent Overstreet  * cannibalize_bucket() will take. This means every time we unlock the root of
887cafe5635SKent Overstreet  * the btree, we need to release this lock if we have it held.
888cafe5635SKent Overstreet  */
889df8e8970SKent Overstreet static void bch_cannibalize_unlock(struct cache_set *c)
890cafe5635SKent Overstreet {
8910a63b66dSKent Overstreet 	if (c->btree_cache_alloc_lock == current) {
8920a63b66dSKent Overstreet 		c->btree_cache_alloc_lock = NULL;
8930a63b66dSKent Overstreet 		wake_up(&c->btree_cache_wait);
894cafe5635SKent Overstreet 	}
895cafe5635SKent Overstreet }
896cafe5635SKent Overstreet 
8970a63b66dSKent Overstreet static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
8980a63b66dSKent Overstreet 			       struct bkey *k, int level)
899cafe5635SKent Overstreet {
900cafe5635SKent Overstreet 	struct btree *b;
901cafe5635SKent Overstreet 
902e8e1d468SKent Overstreet 	BUG_ON(current->bio_list);
903e8e1d468SKent Overstreet 
904cafe5635SKent Overstreet 	lockdep_assert_held(&c->bucket_lock);
905cafe5635SKent Overstreet 
906cafe5635SKent Overstreet 	if (mca_find(c, k))
907cafe5635SKent Overstreet 		return NULL;
908cafe5635SKent Overstreet 
909cafe5635SKent Overstreet 	/* btree_free() doesn't free memory; it sticks the node on the end of
910cafe5635SKent Overstreet 	 * the list. Check if there's any freed nodes there:
911cafe5635SKent Overstreet 	 */
912cafe5635SKent Overstreet 	list_for_each_entry(b, &c->btree_cache_freeable, list)
913e8e1d468SKent Overstreet 		if (!mca_reap(b, btree_order(k), false))
914cafe5635SKent Overstreet 			goto out;
915cafe5635SKent Overstreet 
916cafe5635SKent Overstreet 	/* We never free struct btree itself, just the memory that holds the on
917cafe5635SKent Overstreet 	 * disk node. Check the freed list before allocating a new one:
918cafe5635SKent Overstreet 	 */
919cafe5635SKent Overstreet 	list_for_each_entry(b, &c->btree_cache_freed, list)
920e8e1d468SKent Overstreet 		if (!mca_reap(b, 0, false)) {
921cafe5635SKent Overstreet 			mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
922a85e968eSKent Overstreet 			if (!b->keys.set[0].data)
923cafe5635SKent Overstreet 				goto err;
924cafe5635SKent Overstreet 			else
925cafe5635SKent Overstreet 				goto out;
926cafe5635SKent Overstreet 		}
927cafe5635SKent Overstreet 
928cafe5635SKent Overstreet 	b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO);
929cafe5635SKent Overstreet 	if (!b)
930cafe5635SKent Overstreet 		goto err;
931cafe5635SKent Overstreet 
932cafe5635SKent Overstreet 	BUG_ON(!down_write_trylock(&b->lock));
933a85e968eSKent Overstreet 	if (!b->keys.set->data)
934cafe5635SKent Overstreet 		goto err;
935cafe5635SKent Overstreet out:
936cb7a583eSKent Overstreet 	BUG_ON(b->io_mutex.count != 1);
937cafe5635SKent Overstreet 
938cafe5635SKent Overstreet 	bkey_copy(&b->key, k);
939cafe5635SKent Overstreet 	list_move(&b->list, &c->btree_cache);
940cafe5635SKent Overstreet 	hlist_del_init_rcu(&b->hash);
941cafe5635SKent Overstreet 	hlist_add_head_rcu(&b->hash, mca_hash(c, k));
942cafe5635SKent Overstreet 
943cafe5635SKent Overstreet 	lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
944d6fd3b11SKent Overstreet 	b->parent	= (void *) ~0UL;
945a85e968eSKent Overstreet 	b->flags	= 0;
946a85e968eSKent Overstreet 	b->written	= 0;
947a85e968eSKent Overstreet 	b->level	= level;
948cafe5635SKent Overstreet 
94965d45231SKent Overstreet 	if (!b->level)
950a85e968eSKent Overstreet 		bch_btree_keys_init(&b->keys, &bch_extent_keys_ops,
951a85e968eSKent Overstreet 				    &b->c->expensive_debug_checks);
95265d45231SKent Overstreet 	else
953a85e968eSKent Overstreet 		bch_btree_keys_init(&b->keys, &bch_btree_keys_ops,
954a85e968eSKent Overstreet 				    &b->c->expensive_debug_checks);
955cafe5635SKent Overstreet 
956cafe5635SKent Overstreet 	return b;
957cafe5635SKent Overstreet err:
958cafe5635SKent Overstreet 	if (b)
959cafe5635SKent Overstreet 		rw_unlock(true, b);
960cafe5635SKent Overstreet 
9610a63b66dSKent Overstreet 	b = mca_cannibalize(c, op, k);
962cafe5635SKent Overstreet 	if (!IS_ERR(b))
963cafe5635SKent Overstreet 		goto out;
964cafe5635SKent Overstreet 
965cafe5635SKent Overstreet 	return b;
966cafe5635SKent Overstreet }
967cafe5635SKent Overstreet 
96847344e33SBart Van Assche /*
969cafe5635SKent Overstreet  * bch_btree_node_get - find a btree node in the cache and lock it, reading it
970cafe5635SKent Overstreet  * in from disk if necessary.
971cafe5635SKent Overstreet  *
972b54d6934SKent Overstreet  * If IO is necessary and running under generic_make_request, returns -EAGAIN.
973cafe5635SKent Overstreet  *
974cafe5635SKent Overstreet  * The btree node will have either a read or a write lock held, depending on
975cafe5635SKent Overstreet  * level and op->lock.
976cafe5635SKent Overstreet  */
9770a63b66dSKent Overstreet struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
9782452cc89SSlava Pestov 				 struct bkey *k, int level, bool write,
9792452cc89SSlava Pestov 				 struct btree *parent)
980cafe5635SKent Overstreet {
981cafe5635SKent Overstreet 	int i = 0;
982cafe5635SKent Overstreet 	struct btree *b;
983cafe5635SKent Overstreet 
984cafe5635SKent Overstreet 	BUG_ON(level < 0);
985cafe5635SKent Overstreet retry:
986cafe5635SKent Overstreet 	b = mca_find(c, k);
987cafe5635SKent Overstreet 
988cafe5635SKent Overstreet 	if (!b) {
98957943511SKent Overstreet 		if (current->bio_list)
99057943511SKent Overstreet 			return ERR_PTR(-EAGAIN);
99157943511SKent Overstreet 
992cafe5635SKent Overstreet 		mutex_lock(&c->bucket_lock);
9930a63b66dSKent Overstreet 		b = mca_alloc(c, op, k, level);
994cafe5635SKent Overstreet 		mutex_unlock(&c->bucket_lock);
995cafe5635SKent Overstreet 
996cafe5635SKent Overstreet 		if (!b)
997cafe5635SKent Overstreet 			goto retry;
998cafe5635SKent Overstreet 		if (IS_ERR(b))
999cafe5635SKent Overstreet 			return b;
1000cafe5635SKent Overstreet 
100157943511SKent Overstreet 		bch_btree_node_read(b);
1002cafe5635SKent Overstreet 
1003cafe5635SKent Overstreet 		if (!write)
1004cafe5635SKent Overstreet 			downgrade_write(&b->lock);
1005cafe5635SKent Overstreet 	} else {
1006cafe5635SKent Overstreet 		rw_lock(write, b, level);
1007cafe5635SKent Overstreet 		if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) {
1008cafe5635SKent Overstreet 			rw_unlock(write, b);
1009cafe5635SKent Overstreet 			goto retry;
1010cafe5635SKent Overstreet 		}
1011cafe5635SKent Overstreet 		BUG_ON(b->level != level);
1012cafe5635SKent Overstreet 	}
1013cafe5635SKent Overstreet 
10142452cc89SSlava Pestov 	b->parent = parent;
1015cafe5635SKent Overstreet 	b->accessed = 1;
1016cafe5635SKent Overstreet 
1017a85e968eSKent Overstreet 	for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
1018a85e968eSKent Overstreet 		prefetch(b->keys.set[i].tree);
1019a85e968eSKent Overstreet 		prefetch(b->keys.set[i].data);
1020cafe5635SKent Overstreet 	}
1021cafe5635SKent Overstreet 
1022a85e968eSKent Overstreet 	for (; i <= b->keys.nsets; i++)
1023a85e968eSKent Overstreet 		prefetch(b->keys.set[i].data);
1024cafe5635SKent Overstreet 
102557943511SKent Overstreet 	if (btree_node_io_error(b)) {
1026cafe5635SKent Overstreet 		rw_unlock(write, b);
102757943511SKent Overstreet 		return ERR_PTR(-EIO);
102857943511SKent Overstreet 	}
102957943511SKent Overstreet 
1030cafe5635SKent Overstreet 	BUG_ON(!b->written);
1031cafe5635SKent Overstreet 
1032cafe5635SKent Overstreet 	return b;
1033cafe5635SKent Overstreet }
1034cafe5635SKent Overstreet 
10352452cc89SSlava Pestov static void btree_node_prefetch(struct btree *parent, struct bkey *k)
1036cafe5635SKent Overstreet {
1037cafe5635SKent Overstreet 	struct btree *b;
1038cafe5635SKent Overstreet 
10392452cc89SSlava Pestov 	mutex_lock(&parent->c->bucket_lock);
10402452cc89SSlava Pestov 	b = mca_alloc(parent->c, NULL, k, parent->level - 1);
10412452cc89SSlava Pestov 	mutex_unlock(&parent->c->bucket_lock);
1042cafe5635SKent Overstreet 
1043cafe5635SKent Overstreet 	if (!IS_ERR_OR_NULL(b)) {
10442452cc89SSlava Pestov 		b->parent = parent;
104557943511SKent Overstreet 		bch_btree_node_read(b);
1046cafe5635SKent Overstreet 		rw_unlock(true, b);
1047cafe5635SKent Overstreet 	}
1048cafe5635SKent Overstreet }
1049cafe5635SKent Overstreet 
1050cafe5635SKent Overstreet /* Btree alloc */
1051cafe5635SKent Overstreet 
1052e8e1d468SKent Overstreet static void btree_node_free(struct btree *b)
1053cafe5635SKent Overstreet {
1054c37511b8SKent Overstreet 	trace_bcache_btree_node_free(b);
1055c37511b8SKent Overstreet 
1056cafe5635SKent Overstreet 	BUG_ON(b == b->c->root);
1057cafe5635SKent Overstreet 
10582a285686SKent Overstreet 	mutex_lock(&b->write_lock);
10592a285686SKent Overstreet 
1060cafe5635SKent Overstreet 	if (btree_node_dirty(b))
1061cafe5635SKent Overstreet 		btree_complete_write(b, btree_current_write(b));
1062cafe5635SKent Overstreet 	clear_bit(BTREE_NODE_dirty, &b->flags);
1063cafe5635SKent Overstreet 
10642a285686SKent Overstreet 	mutex_unlock(&b->write_lock);
10652a285686SKent Overstreet 
1066cafe5635SKent Overstreet 	cancel_delayed_work(&b->work);
1067cafe5635SKent Overstreet 
1068cafe5635SKent Overstreet 	mutex_lock(&b->c->bucket_lock);
1069cafe5635SKent Overstreet 	bch_bucket_free(b->c, &b->key);
1070cafe5635SKent Overstreet 	mca_bucket_free(b);
1071cafe5635SKent Overstreet 	mutex_unlock(&b->c->bucket_lock);
1072cafe5635SKent Overstreet }
1073cafe5635SKent Overstreet 
1074c5aa4a31SSlava Pestov struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
10752452cc89SSlava Pestov 				     int level, bool wait,
10762452cc89SSlava Pestov 				     struct btree *parent)
1077cafe5635SKent Overstreet {
1078cafe5635SKent Overstreet 	BKEY_PADDED(key) k;
1079cafe5635SKent Overstreet 	struct btree *b = ERR_PTR(-EAGAIN);
1080cafe5635SKent Overstreet 
1081cafe5635SKent Overstreet 	mutex_lock(&c->bucket_lock);
1082cafe5635SKent Overstreet retry:
1083c5aa4a31SSlava Pestov 	if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait))
1084cafe5635SKent Overstreet 		goto err;
1085cafe5635SKent Overstreet 
10863a3b6a4eSKent Overstreet 	bkey_put(c, &k.key);
1087cafe5635SKent Overstreet 	SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
1088cafe5635SKent Overstreet 
10890a63b66dSKent Overstreet 	b = mca_alloc(c, op, &k.key, level);
1090cafe5635SKent Overstreet 	if (IS_ERR(b))
1091cafe5635SKent Overstreet 		goto err_free;
1092cafe5635SKent Overstreet 
1093cafe5635SKent Overstreet 	if (!b) {
1094b1a67b0fSKent Overstreet 		cache_bug(c,
1095b1a67b0fSKent Overstreet 			"Tried to allocate bucket that was in btree cache");
1096cafe5635SKent Overstreet 		goto retry;
1097cafe5635SKent Overstreet 	}
1098cafe5635SKent Overstreet 
1099cafe5635SKent Overstreet 	b->accessed = 1;
11002452cc89SSlava Pestov 	b->parent = parent;
1101a85e968eSKent Overstreet 	bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb));
1102cafe5635SKent Overstreet 
1103cafe5635SKent Overstreet 	mutex_unlock(&c->bucket_lock);
1104c37511b8SKent Overstreet 
1105c37511b8SKent Overstreet 	trace_bcache_btree_node_alloc(b);
1106cafe5635SKent Overstreet 	return b;
1107cafe5635SKent Overstreet err_free:
1108cafe5635SKent Overstreet 	bch_bucket_free(c, &k.key);
1109cafe5635SKent Overstreet err:
1110cafe5635SKent Overstreet 	mutex_unlock(&c->bucket_lock);
1111c37511b8SKent Overstreet 
1112913dc33fSSlava Pestov 	trace_bcache_btree_node_alloc_fail(c);
1113cafe5635SKent Overstreet 	return b;
1114cafe5635SKent Overstreet }
1115cafe5635SKent Overstreet 
1116c5aa4a31SSlava Pestov static struct btree *bch_btree_node_alloc(struct cache_set *c,
11172452cc89SSlava Pestov 					  struct btree_op *op, int level,
11182452cc89SSlava Pestov 					  struct btree *parent)
1119c5aa4a31SSlava Pestov {
11202452cc89SSlava Pestov 	return __bch_btree_node_alloc(c, op, level, op != NULL, parent);
1121c5aa4a31SSlava Pestov }
1122c5aa4a31SSlava Pestov 
11230a63b66dSKent Overstreet static struct btree *btree_node_alloc_replacement(struct btree *b,
11240a63b66dSKent Overstreet 						  struct btree_op *op)
1125cafe5635SKent Overstreet {
11262452cc89SSlava Pestov 	struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
112767539e85SKent Overstreet 	if (!IS_ERR_OR_NULL(n)) {
11282a285686SKent Overstreet 		mutex_lock(&n->write_lock);
112989ebb4a2SKent Overstreet 		bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
113067539e85SKent Overstreet 		bkey_copy_key(&n->key, &b->key);
11312a285686SKent Overstreet 		mutex_unlock(&n->write_lock);
113267539e85SKent Overstreet 	}
1133cafe5635SKent Overstreet 
1134cafe5635SKent Overstreet 	return n;
1135cafe5635SKent Overstreet }
1136cafe5635SKent Overstreet 
11378835c123SKent Overstreet static void make_btree_freeing_key(struct btree *b, struct bkey *k)
11388835c123SKent Overstreet {
11398835c123SKent Overstreet 	unsigned i;
11408835c123SKent Overstreet 
114105335cffSKent Overstreet 	mutex_lock(&b->c->bucket_lock);
114205335cffSKent Overstreet 
114305335cffSKent Overstreet 	atomic_inc(&b->c->prio_blocked);
114405335cffSKent Overstreet 
11458835c123SKent Overstreet 	bkey_copy(k, &b->key);
11468835c123SKent Overstreet 	bkey_copy_key(k, &ZERO_KEY);
11478835c123SKent Overstreet 
114805335cffSKent Overstreet 	for (i = 0; i < KEY_PTRS(k); i++)
114905335cffSKent Overstreet 		SET_PTR_GEN(k, i,
115005335cffSKent Overstreet 			    bch_inc_gen(PTR_CACHE(b->c, &b->key, i),
115105335cffSKent Overstreet 					PTR_BUCKET(b->c, &b->key, i)));
11528835c123SKent Overstreet 
115305335cffSKent Overstreet 	mutex_unlock(&b->c->bucket_lock);
11548835c123SKent Overstreet }
11558835c123SKent Overstreet 
115678365411SKent Overstreet static int btree_check_reserve(struct btree *b, struct btree_op *op)
115778365411SKent Overstreet {
115878365411SKent Overstreet 	struct cache_set *c = b->c;
115978365411SKent Overstreet 	struct cache *ca;
11600a63b66dSKent Overstreet 	unsigned i, reserve = (c->root->level - b->level) * 2 + 1;
116178365411SKent Overstreet 
116278365411SKent Overstreet 	mutex_lock(&c->bucket_lock);
116378365411SKent Overstreet 
116478365411SKent Overstreet 	for_each_cache(ca, c, i)
116578365411SKent Overstreet 		if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
116678365411SKent Overstreet 			if (op)
11670a63b66dSKent Overstreet 				prepare_to_wait(&c->btree_cache_wait, &op->wait,
116878365411SKent Overstreet 						TASK_UNINTERRUPTIBLE);
11690a63b66dSKent Overstreet 			mutex_unlock(&c->bucket_lock);
11700a63b66dSKent Overstreet 			return -EINTR;
117178365411SKent Overstreet 		}
117278365411SKent Overstreet 
117378365411SKent Overstreet 	mutex_unlock(&c->bucket_lock);
11740a63b66dSKent Overstreet 
11750a63b66dSKent Overstreet 	return mca_cannibalize_lock(b->c, op);
117678365411SKent Overstreet }
117778365411SKent Overstreet 
1178cafe5635SKent Overstreet /* Garbage collection */
1179cafe5635SKent Overstreet 
1180487dded8SKent Overstreet static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
1181487dded8SKent Overstreet 				    struct bkey *k)
1182cafe5635SKent Overstreet {
1183cafe5635SKent Overstreet 	uint8_t stale = 0;
1184cafe5635SKent Overstreet 	unsigned i;
1185cafe5635SKent Overstreet 	struct bucket *g;
1186cafe5635SKent Overstreet 
1187cafe5635SKent Overstreet 	/*
1188cafe5635SKent Overstreet 	 * ptr_invalid() can't return true for the keys that mark btree nodes as
1189cafe5635SKent Overstreet 	 * freed, but since ptr_bad() returns true we'll never actually use them
1190cafe5635SKent Overstreet 	 * for anything and thus we don't want mark their pointers here
1191cafe5635SKent Overstreet 	 */
1192cafe5635SKent Overstreet 	if (!bkey_cmp(k, &ZERO_KEY))
1193cafe5635SKent Overstreet 		return stale;
1194cafe5635SKent Overstreet 
1195cafe5635SKent Overstreet 	for (i = 0; i < KEY_PTRS(k); i++) {
1196cafe5635SKent Overstreet 		if (!ptr_available(c, k, i))
1197cafe5635SKent Overstreet 			continue;
1198cafe5635SKent Overstreet 
1199cafe5635SKent Overstreet 		g = PTR_BUCKET(c, k, i);
1200cafe5635SKent Overstreet 
12013a2fd9d5SKent Overstreet 		if (gen_after(g->last_gc, PTR_GEN(k, i)))
12023a2fd9d5SKent Overstreet 			g->last_gc = PTR_GEN(k, i);
1203cafe5635SKent Overstreet 
1204cafe5635SKent Overstreet 		if (ptr_stale(c, k, i)) {
1205cafe5635SKent Overstreet 			stale = max(stale, ptr_stale(c, k, i));
1206cafe5635SKent Overstreet 			continue;
1207cafe5635SKent Overstreet 		}
1208cafe5635SKent Overstreet 
1209cafe5635SKent Overstreet 		cache_bug_on(GC_MARK(g) &&
1210cafe5635SKent Overstreet 			     (GC_MARK(g) == GC_MARK_METADATA) != (level != 0),
1211cafe5635SKent Overstreet 			     c, "inconsistent ptrs: mark = %llu, level = %i",
1212cafe5635SKent Overstreet 			     GC_MARK(g), level);
1213cafe5635SKent Overstreet 
1214cafe5635SKent Overstreet 		if (level)
1215cafe5635SKent Overstreet 			SET_GC_MARK(g, GC_MARK_METADATA);
1216cafe5635SKent Overstreet 		else if (KEY_DIRTY(k))
1217cafe5635SKent Overstreet 			SET_GC_MARK(g, GC_MARK_DIRTY);
12184fe6a816SKent Overstreet 		else if (!GC_MARK(g))
12194fe6a816SKent Overstreet 			SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
1220cafe5635SKent Overstreet 
1221cafe5635SKent Overstreet 		/* guard against overflow */
1222cafe5635SKent Overstreet 		SET_GC_SECTORS_USED(g, min_t(unsigned,
1223cafe5635SKent Overstreet 					     GC_SECTORS_USED(g) + KEY_SIZE(k),
122494717447SDarrick J. Wong 					     MAX_GC_SECTORS_USED));
1225cafe5635SKent Overstreet 
1226cafe5635SKent Overstreet 		BUG_ON(!GC_SECTORS_USED(g));
1227cafe5635SKent Overstreet 	}
1228cafe5635SKent Overstreet 
1229cafe5635SKent Overstreet 	return stale;
1230cafe5635SKent Overstreet }
1231cafe5635SKent Overstreet 
1232cafe5635SKent Overstreet #define btree_mark_key(b, k)	__bch_btree_mark_key(b->c, b->level, k)
1233cafe5635SKent Overstreet 
1234487dded8SKent Overstreet void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
1235487dded8SKent Overstreet {
1236487dded8SKent Overstreet 	unsigned i;
1237487dded8SKent Overstreet 
1238487dded8SKent Overstreet 	for (i = 0; i < KEY_PTRS(k); i++)
1239487dded8SKent Overstreet 		if (ptr_available(c, k, i) &&
1240487dded8SKent Overstreet 		    !ptr_stale(c, k, i)) {
1241487dded8SKent Overstreet 			struct bucket *b = PTR_BUCKET(c, k, i);
1242487dded8SKent Overstreet 
1243487dded8SKent Overstreet 			b->gen = PTR_GEN(k, i);
1244487dded8SKent Overstreet 
1245487dded8SKent Overstreet 			if (level && bkey_cmp(k, &ZERO_KEY))
1246487dded8SKent Overstreet 				b->prio = BTREE_PRIO;
1247487dded8SKent Overstreet 			else if (!level && b->prio == BTREE_PRIO)
1248487dded8SKent Overstreet 				b->prio = INITIAL_PRIO;
1249487dded8SKent Overstreet 		}
1250487dded8SKent Overstreet 
1251487dded8SKent Overstreet 	__bch_btree_mark_key(c, level, k);
1252487dded8SKent Overstreet }
1253487dded8SKent Overstreet 
1254d44c2f9eSTang Junhui void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats)
1255d44c2f9eSTang Junhui {
1256d44c2f9eSTang Junhui 	stats->in_use = (c->nbuckets - c->avail_nbuckets) * 100 / c->nbuckets;
1257d44c2f9eSTang Junhui }
1258d44c2f9eSTang Junhui 
1259a1f0358bSKent Overstreet static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
1260cafe5635SKent Overstreet {
1261cafe5635SKent Overstreet 	uint8_t stale = 0;
1262a1f0358bSKent Overstreet 	unsigned keys = 0, good_keys = 0;
1263cafe5635SKent Overstreet 	struct bkey *k;
1264cafe5635SKent Overstreet 	struct btree_iter iter;
1265cafe5635SKent Overstreet 	struct bset_tree *t;
1266cafe5635SKent Overstreet 
1267cafe5635SKent Overstreet 	gc->nodes++;
1268cafe5635SKent Overstreet 
1269c052dd9aSKent Overstreet 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
1270cafe5635SKent Overstreet 		stale = max(stale, btree_mark_key(b, k));
1271a1f0358bSKent Overstreet 		keys++;
1272cafe5635SKent Overstreet 
1273a85e968eSKent Overstreet 		if (bch_ptr_bad(&b->keys, k))
1274cafe5635SKent Overstreet 			continue;
1275cafe5635SKent Overstreet 
1276cafe5635SKent Overstreet 		gc->key_bytes += bkey_u64s(k);
1277cafe5635SKent Overstreet 		gc->nkeys++;
1278a1f0358bSKent Overstreet 		good_keys++;
1279cafe5635SKent Overstreet 
1280cafe5635SKent Overstreet 		gc->data += KEY_SIZE(k);
1281cafe5635SKent Overstreet 	}
1282cafe5635SKent Overstreet 
1283a85e968eSKent Overstreet 	for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++)
1284cafe5635SKent Overstreet 		btree_bug_on(t->size &&
1285a85e968eSKent Overstreet 			     bset_written(&b->keys, t) &&
1286cafe5635SKent Overstreet 			     bkey_cmp(&b->key, &t->end) < 0,
1287cafe5635SKent Overstreet 			     b, "found short btree key in gc");
1288cafe5635SKent Overstreet 
1289a1f0358bSKent Overstreet 	if (b->c->gc_always_rewrite)
1290a1f0358bSKent Overstreet 		return true;
1291a1f0358bSKent Overstreet 
1292a1f0358bSKent Overstreet 	if (stale > 10)
1293a1f0358bSKent Overstreet 		return true;
1294a1f0358bSKent Overstreet 
1295a1f0358bSKent Overstreet 	if ((keys - good_keys) * 2 > keys)
1296a1f0358bSKent Overstreet 		return true;
1297a1f0358bSKent Overstreet 
1298a1f0358bSKent Overstreet 	return false;
1299cafe5635SKent Overstreet }
1300cafe5635SKent Overstreet 
1301a1f0358bSKent Overstreet #define GC_MERGE_NODES	4U
1302cafe5635SKent Overstreet 
1303cafe5635SKent Overstreet struct gc_merge_info {
1304cafe5635SKent Overstreet 	struct btree	*b;
1305cafe5635SKent Overstreet 	unsigned	keys;
1306cafe5635SKent Overstreet };
1307cafe5635SKent Overstreet 
1308a1f0358bSKent Overstreet static int bch_btree_insert_node(struct btree *, struct btree_op *,
1309a1f0358bSKent Overstreet 				 struct keylist *, atomic_t *, struct bkey *);
1310a1f0358bSKent Overstreet 
1311a1f0358bSKent Overstreet static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
13120a63b66dSKent Overstreet 			     struct gc_stat *gc, struct gc_merge_info *r)
1313cafe5635SKent Overstreet {
1314a1f0358bSKent Overstreet 	unsigned i, nodes = 0, keys = 0, blocks;
1315a1f0358bSKent Overstreet 	struct btree *new_nodes[GC_MERGE_NODES];
13160a63b66dSKent Overstreet 	struct keylist keylist;
1317b54d6934SKent Overstreet 	struct closure cl;
1318a1f0358bSKent Overstreet 	struct bkey *k;
1319b54d6934SKent Overstreet 
13200a63b66dSKent Overstreet 	bch_keylist_init(&keylist);
13210a63b66dSKent Overstreet 
13220a63b66dSKent Overstreet 	if (btree_check_reserve(b, NULL))
13230a63b66dSKent Overstreet 		return 0;
13240a63b66dSKent Overstreet 
1325a1f0358bSKent Overstreet 	memset(new_nodes, 0, sizeof(new_nodes));
1326b54d6934SKent Overstreet 	closure_init_stack(&cl);
1327cafe5635SKent Overstreet 
1328a1f0358bSKent Overstreet 	while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
1329cafe5635SKent Overstreet 		keys += r[nodes++].keys;
1330cafe5635SKent Overstreet 
1331cafe5635SKent Overstreet 	blocks = btree_default_blocks(b->c) * 2 / 3;
1332cafe5635SKent Overstreet 
1333cafe5635SKent Overstreet 	if (nodes < 2 ||
1334a85e968eSKent Overstreet 	    __set_blocks(b->keys.set[0].data, keys,
1335ee811287SKent Overstreet 			 block_bytes(b->c)) > blocks * (nodes - 1))
1336a1f0358bSKent Overstreet 		return 0;
1337cafe5635SKent Overstreet 
1338a1f0358bSKent Overstreet 	for (i = 0; i < nodes; i++) {
13390a63b66dSKent Overstreet 		new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL);
1340a1f0358bSKent Overstreet 		if (IS_ERR_OR_NULL(new_nodes[i]))
1341a1f0358bSKent Overstreet 			goto out_nocoalesce;
1342cafe5635SKent Overstreet 	}
1343cafe5635SKent Overstreet 
13440a63b66dSKent Overstreet 	/*
13450a63b66dSKent Overstreet 	 * We have to check the reserve here, after we've allocated our new
13460a63b66dSKent Overstreet 	 * nodes, to make sure the insert below will succeed - we also check
13470a63b66dSKent Overstreet 	 * before as an optimization to potentially avoid a bunch of expensive
13480a63b66dSKent Overstreet 	 * allocs/sorts
13490a63b66dSKent Overstreet 	 */
13500a63b66dSKent Overstreet 	if (btree_check_reserve(b, NULL))
13510a63b66dSKent Overstreet 		goto out_nocoalesce;
13520a63b66dSKent Overstreet 
13532a285686SKent Overstreet 	for (i = 0; i < nodes; i++)
13542a285686SKent Overstreet 		mutex_lock(&new_nodes[i]->write_lock);
13552a285686SKent Overstreet 
1356cafe5635SKent Overstreet 	for (i = nodes - 1; i > 0; --i) {
1357ee811287SKent Overstreet 		struct bset *n1 = btree_bset_first(new_nodes[i]);
1358ee811287SKent Overstreet 		struct bset *n2 = btree_bset_first(new_nodes[i - 1]);
1359cafe5635SKent Overstreet 		struct bkey *k, *last = NULL;
1360cafe5635SKent Overstreet 
1361cafe5635SKent Overstreet 		keys = 0;
1362cafe5635SKent Overstreet 
1363a1f0358bSKent Overstreet 		if (i > 1) {
1364cafe5635SKent Overstreet 			for (k = n2->start;
1365fafff81cSKent Overstreet 			     k < bset_bkey_last(n2);
1366cafe5635SKent Overstreet 			     k = bkey_next(k)) {
1367cafe5635SKent Overstreet 				if (__set_blocks(n1, n1->keys + keys +
1368ee811287SKent Overstreet 						 bkey_u64s(k),
1369ee811287SKent Overstreet 						 block_bytes(b->c)) > blocks)
1370cafe5635SKent Overstreet 					break;
1371cafe5635SKent Overstreet 
1372cafe5635SKent Overstreet 				last = k;
1373cafe5635SKent Overstreet 				keys += bkey_u64s(k);
1374cafe5635SKent Overstreet 			}
1375a1f0358bSKent Overstreet 		} else {
1376a1f0358bSKent Overstreet 			/*
1377a1f0358bSKent Overstreet 			 * Last node we're not getting rid of - we're getting
1378a1f0358bSKent Overstreet 			 * rid of the node at r[0]. Have to try and fit all of
1379a1f0358bSKent Overstreet 			 * the remaining keys into this node; we can't ensure
1380a1f0358bSKent Overstreet 			 * they will always fit due to rounding and variable
1381a1f0358bSKent Overstreet 			 * length keys (shouldn't be possible in practice,
1382a1f0358bSKent Overstreet 			 * though)
1383a1f0358bSKent Overstreet 			 */
1384a1f0358bSKent Overstreet 			if (__set_blocks(n1, n1->keys + n2->keys,
1385ee811287SKent Overstreet 					 block_bytes(b->c)) >
1386ee811287SKent Overstreet 			    btree_blocks(new_nodes[i]))
1387a1f0358bSKent Overstreet 				goto out_nocoalesce;
1388a1f0358bSKent Overstreet 
1389a1f0358bSKent Overstreet 			keys = n2->keys;
1390a1f0358bSKent Overstreet 			/* Take the key of the node we're getting rid of */
1391a1f0358bSKent Overstreet 			last = &r->b->key;
1392a1f0358bSKent Overstreet 		}
1393cafe5635SKent Overstreet 
1394ee811287SKent Overstreet 		BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) >
1395ee811287SKent Overstreet 		       btree_blocks(new_nodes[i]));
1396cafe5635SKent Overstreet 
1397a1f0358bSKent Overstreet 		if (last)
1398a1f0358bSKent Overstreet 			bkey_copy_key(&new_nodes[i]->key, last);
1399cafe5635SKent Overstreet 
1400fafff81cSKent Overstreet 		memcpy(bset_bkey_last(n1),
1401cafe5635SKent Overstreet 		       n2->start,
1402fafff81cSKent Overstreet 		       (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
1403cafe5635SKent Overstreet 
1404cafe5635SKent Overstreet 		n1->keys += keys;
1405a1f0358bSKent Overstreet 		r[i].keys = n1->keys;
1406cafe5635SKent Overstreet 
1407cafe5635SKent Overstreet 		memmove(n2->start,
1408fafff81cSKent Overstreet 			bset_bkey_idx(n2, keys),
1409fafff81cSKent Overstreet 			(void *) bset_bkey_last(n2) -
1410fafff81cSKent Overstreet 			(void *) bset_bkey_idx(n2, keys));
1411cafe5635SKent Overstreet 
1412cafe5635SKent Overstreet 		n2->keys -= keys;
1413cafe5635SKent Overstreet 
14140a63b66dSKent Overstreet 		if (__bch_keylist_realloc(&keylist,
1415085d2a3dSKent Overstreet 					  bkey_u64s(&new_nodes[i]->key)))
1416a1f0358bSKent Overstreet 			goto out_nocoalesce;
1417a1f0358bSKent Overstreet 
1418a1f0358bSKent Overstreet 		bch_btree_node_write(new_nodes[i], &cl);
14190a63b66dSKent Overstreet 		bch_keylist_add(&keylist, &new_nodes[i]->key);
1420cafe5635SKent Overstreet 	}
1421cafe5635SKent Overstreet 
14222a285686SKent Overstreet 	for (i = 0; i < nodes; i++)
14232a285686SKent Overstreet 		mutex_unlock(&new_nodes[i]->write_lock);
14242a285686SKent Overstreet 
142505335cffSKent Overstreet 	closure_sync(&cl);
142605335cffSKent Overstreet 
142705335cffSKent Overstreet 	/* We emptied out this node */
142805335cffSKent Overstreet 	BUG_ON(btree_bset_first(new_nodes[0])->keys);
142905335cffSKent Overstreet 	btree_node_free(new_nodes[0]);
143005335cffSKent Overstreet 	rw_unlock(true, new_nodes[0]);
1431400ffaa2SSlava Pestov 	new_nodes[0] = NULL;
143205335cffSKent Overstreet 
1433a1f0358bSKent Overstreet 	for (i = 0; i < nodes; i++) {
14340a63b66dSKent Overstreet 		if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key)))
1435a1f0358bSKent Overstreet 			goto out_nocoalesce;
1436a1f0358bSKent Overstreet 
14370a63b66dSKent Overstreet 		make_btree_freeing_key(r[i].b, keylist.top);
14380a63b66dSKent Overstreet 		bch_keylist_push(&keylist);
1439a1f0358bSKent Overstreet 	}
1440a1f0358bSKent Overstreet 
14410a63b66dSKent Overstreet 	bch_btree_insert_node(b, op, &keylist, NULL, NULL);
14420a63b66dSKent Overstreet 	BUG_ON(!bch_keylist_empty(&keylist));
1443a1f0358bSKent Overstreet 
1444a1f0358bSKent Overstreet 	for (i = 0; i < nodes; i++) {
1445a1f0358bSKent Overstreet 		btree_node_free(r[i].b);
1446a1f0358bSKent Overstreet 		rw_unlock(true, r[i].b);
1447a1f0358bSKent Overstreet 
1448a1f0358bSKent Overstreet 		r[i].b = new_nodes[i];
1449a1f0358bSKent Overstreet 	}
1450a1f0358bSKent Overstreet 
1451a1f0358bSKent Overstreet 	memmove(r, r + 1, sizeof(r[0]) * (nodes - 1));
1452a1f0358bSKent Overstreet 	r[nodes - 1].b = ERR_PTR(-EINTR);
1453cafe5635SKent Overstreet 
1454c37511b8SKent Overstreet 	trace_bcache_btree_gc_coalesce(nodes);
1455cafe5635SKent Overstreet 	gc->nodes--;
1456cafe5635SKent Overstreet 
14570a63b66dSKent Overstreet 	bch_keylist_free(&keylist);
14580a63b66dSKent Overstreet 
1459a1f0358bSKent Overstreet 	/* Invalidated our iterator */
1460a1f0358bSKent Overstreet 	return -EINTR;
1461a1f0358bSKent Overstreet 
1462a1f0358bSKent Overstreet out_nocoalesce:
1463a1f0358bSKent Overstreet 	closure_sync(&cl);
14640a63b66dSKent Overstreet 	bch_keylist_free(&keylist);
1465a1f0358bSKent Overstreet 
14660a63b66dSKent Overstreet 	while ((k = bch_keylist_pop(&keylist)))
1467a1f0358bSKent Overstreet 		if (!bkey_cmp(k, &ZERO_KEY))
1468a1f0358bSKent Overstreet 			atomic_dec(&b->c->prio_blocked);
1469a1f0358bSKent Overstreet 
1470a1f0358bSKent Overstreet 	for (i = 0; i < nodes; i++)
1471a1f0358bSKent Overstreet 		if (!IS_ERR_OR_NULL(new_nodes[i])) {
1472a1f0358bSKent Overstreet 			btree_node_free(new_nodes[i]);
1473a1f0358bSKent Overstreet 			rw_unlock(true, new_nodes[i]);
1474a1f0358bSKent Overstreet 		}
1475a1f0358bSKent Overstreet 	return 0;
1476a1f0358bSKent Overstreet }
1477a1f0358bSKent Overstreet 
14780a63b66dSKent Overstreet static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
14790a63b66dSKent Overstreet 				 struct btree *replace)
14800a63b66dSKent Overstreet {
14810a63b66dSKent Overstreet 	struct keylist keys;
14820a63b66dSKent Overstreet 	struct btree *n;
14830a63b66dSKent Overstreet 
14840a63b66dSKent Overstreet 	if (btree_check_reserve(b, NULL))
14850a63b66dSKent Overstreet 		return 0;
14860a63b66dSKent Overstreet 
14870a63b66dSKent Overstreet 	n = btree_node_alloc_replacement(replace, NULL);
14880a63b66dSKent Overstreet 
14890a63b66dSKent Overstreet 	/* recheck reserve after allocating replacement node */
14900a63b66dSKent Overstreet 	if (btree_check_reserve(b, NULL)) {
14910a63b66dSKent Overstreet 		btree_node_free(n);
14920a63b66dSKent Overstreet 		rw_unlock(true, n);
14930a63b66dSKent Overstreet 		return 0;
14940a63b66dSKent Overstreet 	}
14950a63b66dSKent Overstreet 
14960a63b66dSKent Overstreet 	bch_btree_node_write_sync(n);
14970a63b66dSKent Overstreet 
14980a63b66dSKent Overstreet 	bch_keylist_init(&keys);
14990a63b66dSKent Overstreet 	bch_keylist_add(&keys, &n->key);
15000a63b66dSKent Overstreet 
15010a63b66dSKent Overstreet 	make_btree_freeing_key(replace, keys.top);
15020a63b66dSKent Overstreet 	bch_keylist_push(&keys);
15030a63b66dSKent Overstreet 
15040a63b66dSKent Overstreet 	bch_btree_insert_node(b, op, &keys, NULL, NULL);
15050a63b66dSKent Overstreet 	BUG_ON(!bch_keylist_empty(&keys));
15060a63b66dSKent Overstreet 
15070a63b66dSKent Overstreet 	btree_node_free(replace);
15080a63b66dSKent Overstreet 	rw_unlock(true, n);
15090a63b66dSKent Overstreet 
15100a63b66dSKent Overstreet 	/* Invalidated our iterator */
15110a63b66dSKent Overstreet 	return -EINTR;
15120a63b66dSKent Overstreet }
15130a63b66dSKent Overstreet 
1514a1f0358bSKent Overstreet static unsigned btree_gc_count_keys(struct btree *b)
1515a1f0358bSKent Overstreet {
1516a1f0358bSKent Overstreet 	struct bkey *k;
1517a1f0358bSKent Overstreet 	struct btree_iter iter;
1518a1f0358bSKent Overstreet 	unsigned ret = 0;
1519a1f0358bSKent Overstreet 
1520c052dd9aSKent Overstreet 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
1521a1f0358bSKent Overstreet 		ret += bkey_u64s(k);
1522a1f0358bSKent Overstreet 
1523a1f0358bSKent Overstreet 	return ret;
1524cafe5635SKent Overstreet }
1525cafe5635SKent Overstreet 
1526*7f4a59deSTang Junhui static size_t btree_gc_min_nodes(struct cache_set *c)
1527*7f4a59deSTang Junhui {
1528*7f4a59deSTang Junhui 	size_t min_nodes;
1529*7f4a59deSTang Junhui 
1530*7f4a59deSTang Junhui 	/*
1531*7f4a59deSTang Junhui 	 * Since incremental GC would stop 100ms when front
1532*7f4a59deSTang Junhui 	 * side I/O comes, so when there are many btree nodes,
1533*7f4a59deSTang Junhui 	 * if GC only processes constant (100) nodes each time,
1534*7f4a59deSTang Junhui 	 * GC would last a long time, and the front side I/Os
1535*7f4a59deSTang Junhui 	 * would run out of the buckets (since no new bucket
1536*7f4a59deSTang Junhui 	 * can be allocated during GC), and be blocked again.
1537*7f4a59deSTang Junhui 	 * So GC should not process constant nodes, but varied
1538*7f4a59deSTang Junhui 	 * nodes according to the number of btree nodes, which
1539*7f4a59deSTang Junhui 	 * realized by dividing GC into constant(100) times,
1540*7f4a59deSTang Junhui 	 * so when there are many btree nodes, GC can process
1541*7f4a59deSTang Junhui 	 * more nodes each time, otherwise, GC will process less
1542*7f4a59deSTang Junhui 	 * nodes each time (but no less than MIN_GC_NODES)
1543*7f4a59deSTang Junhui 	 */
1544*7f4a59deSTang Junhui 	min_nodes = c->gc_stats.nodes / MAX_GC_TIMES;
1545*7f4a59deSTang Junhui 	if (min_nodes < MIN_GC_NODES)
1546*7f4a59deSTang Junhui 		min_nodes = MIN_GC_NODES;
1547*7f4a59deSTang Junhui 
1548*7f4a59deSTang Junhui 	return min_nodes;
1549*7f4a59deSTang Junhui }
1550*7f4a59deSTang Junhui 
1551*7f4a59deSTang Junhui 
1552cafe5635SKent Overstreet static int btree_gc_recurse(struct btree *b, struct btree_op *op,
1553cafe5635SKent Overstreet 			    struct closure *writes, struct gc_stat *gc)
1554cafe5635SKent Overstreet {
1555a1f0358bSKent Overstreet 	int ret = 0;
1556a1f0358bSKent Overstreet 	bool should_rewrite;
1557a1f0358bSKent Overstreet 	struct bkey *k;
1558a1f0358bSKent Overstreet 	struct btree_iter iter;
1559cafe5635SKent Overstreet 	struct gc_merge_info r[GC_MERGE_NODES];
15602a285686SKent Overstreet 	struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
1561cafe5635SKent Overstreet 
1562c052dd9aSKent Overstreet 	bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
1563cafe5635SKent Overstreet 
15642a285686SKent Overstreet 	for (i = r; i < r + ARRAY_SIZE(r); i++)
15652a285686SKent Overstreet 		i->b = ERR_PTR(-EINTR);
1566cafe5635SKent Overstreet 
1567a1f0358bSKent Overstreet 	while (1) {
1568a85e968eSKent Overstreet 		k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
1569a1f0358bSKent Overstreet 		if (k) {
15700a63b66dSKent Overstreet 			r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
15712452cc89SSlava Pestov 						  true, b);
1572cafe5635SKent Overstreet 			if (IS_ERR(r->b)) {
1573cafe5635SKent Overstreet 				ret = PTR_ERR(r->b);
1574cafe5635SKent Overstreet 				break;
1575cafe5635SKent Overstreet 			}
1576cafe5635SKent Overstreet 
1577a1f0358bSKent Overstreet 			r->keys = btree_gc_count_keys(r->b);
1578cafe5635SKent Overstreet 
15790a63b66dSKent Overstreet 			ret = btree_gc_coalesce(b, op, gc, r);
1580a1f0358bSKent Overstreet 			if (ret)
1581cafe5635SKent Overstreet 				break;
1582cafe5635SKent Overstreet 		}
1583cafe5635SKent Overstreet 
1584a1f0358bSKent Overstreet 		if (!last->b)
1585a1f0358bSKent Overstreet 			break;
1586cafe5635SKent Overstreet 
1587a1f0358bSKent Overstreet 		if (!IS_ERR(last->b)) {
1588a1f0358bSKent Overstreet 			should_rewrite = btree_gc_mark_node(last->b, gc);
15890a63b66dSKent Overstreet 			if (should_rewrite) {
15900a63b66dSKent Overstreet 				ret = btree_gc_rewrite_node(b, op, last->b);
15910a63b66dSKent Overstreet 				if (ret)
1592a1f0358bSKent Overstreet 					break;
1593a1f0358bSKent Overstreet 			}
1594a1f0358bSKent Overstreet 
1595a1f0358bSKent Overstreet 			if (last->b->level) {
1596a1f0358bSKent Overstreet 				ret = btree_gc_recurse(last->b, op, writes, gc);
1597a1f0358bSKent Overstreet 				if (ret)
1598a1f0358bSKent Overstreet 					break;
1599a1f0358bSKent Overstreet 			}
1600a1f0358bSKent Overstreet 
1601a1f0358bSKent Overstreet 			bkey_copy_key(&b->c->gc_done, &last->b->key);
1602a1f0358bSKent Overstreet 
1603a1f0358bSKent Overstreet 			/*
1604a1f0358bSKent Overstreet 			 * Must flush leaf nodes before gc ends, since replace
1605a1f0358bSKent Overstreet 			 * operations aren't journalled
1606cafe5635SKent Overstreet 			 */
16072a285686SKent Overstreet 			mutex_lock(&last->b->write_lock);
1608a1f0358bSKent Overstreet 			if (btree_node_dirty(last->b))
1609a1f0358bSKent Overstreet 				bch_btree_node_write(last->b, writes);
16102a285686SKent Overstreet 			mutex_unlock(&last->b->write_lock);
1611a1f0358bSKent Overstreet 			rw_unlock(true, last->b);
1612a1f0358bSKent Overstreet 		}
1613a1f0358bSKent Overstreet 
1614a1f0358bSKent Overstreet 		memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
1615a1f0358bSKent Overstreet 		r->b = NULL;
1616a1f0358bSKent Overstreet 
16175c25c4fcSTang Junhui 		if (atomic_read(&b->c->search_inflight) &&
1618*7f4a59deSTang Junhui 		    gc->nodes >= gc->nodes_pre + btree_gc_min_nodes(b->c)) {
16195c25c4fcSTang Junhui 			gc->nodes_pre =  gc->nodes;
16205c25c4fcSTang Junhui 			ret = -EAGAIN;
16215c25c4fcSTang Junhui 			break;
16225c25c4fcSTang Junhui 		}
16235c25c4fcSTang Junhui 
1624cafe5635SKent Overstreet 		if (need_resched()) {
1625cafe5635SKent Overstreet 			ret = -EAGAIN;
1626cafe5635SKent Overstreet 			break;
1627cafe5635SKent Overstreet 		}
1628cafe5635SKent Overstreet 	}
1629cafe5635SKent Overstreet 
16302a285686SKent Overstreet 	for (i = r; i < r + ARRAY_SIZE(r); i++)
16312a285686SKent Overstreet 		if (!IS_ERR_OR_NULL(i->b)) {
16322a285686SKent Overstreet 			mutex_lock(&i->b->write_lock);
16332a285686SKent Overstreet 			if (btree_node_dirty(i->b))
16342a285686SKent Overstreet 				bch_btree_node_write(i->b, writes);
16352a285686SKent Overstreet 			mutex_unlock(&i->b->write_lock);
16362a285686SKent Overstreet 			rw_unlock(true, i->b);
1637a1f0358bSKent Overstreet 		}
1638cafe5635SKent Overstreet 
1639cafe5635SKent Overstreet 	return ret;
1640cafe5635SKent Overstreet }
1641cafe5635SKent Overstreet 
1642cafe5635SKent Overstreet static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1643cafe5635SKent Overstreet 			     struct closure *writes, struct gc_stat *gc)
1644cafe5635SKent Overstreet {
1645cafe5635SKent Overstreet 	struct btree *n = NULL;
1646a1f0358bSKent Overstreet 	int ret = 0;
1647a1f0358bSKent Overstreet 	bool should_rewrite;
1648cafe5635SKent Overstreet 
1649a1f0358bSKent Overstreet 	should_rewrite = btree_gc_mark_node(b, gc);
1650a1f0358bSKent Overstreet 	if (should_rewrite) {
16510a63b66dSKent Overstreet 		n = btree_node_alloc_replacement(b, NULL);
1652cafe5635SKent Overstreet 
1653cafe5635SKent Overstreet 		if (!IS_ERR_OR_NULL(n)) {
1654a1f0358bSKent Overstreet 			bch_btree_node_write_sync(n);
16552a285686SKent Overstreet 
1656a1f0358bSKent Overstreet 			bch_btree_set_root(n);
1657a1f0358bSKent Overstreet 			btree_node_free(b);
1658a1f0358bSKent Overstreet 			rw_unlock(true, n);
1659a1f0358bSKent Overstreet 
1660a1f0358bSKent Overstreet 			return -EINTR;
1661cafe5635SKent Overstreet 		}
1662a1f0358bSKent Overstreet 	}
1663a1f0358bSKent Overstreet 
1664487dded8SKent Overstreet 	__bch_btree_mark_key(b->c, b->level + 1, &b->key);
1665487dded8SKent Overstreet 
1666a1f0358bSKent Overstreet 	if (b->level) {
1667a1f0358bSKent Overstreet 		ret = btree_gc_recurse(b, op, writes, gc);
1668a1f0358bSKent Overstreet 		if (ret)
1669a1f0358bSKent Overstreet 			return ret;
1670a1f0358bSKent Overstreet 	}
1671a1f0358bSKent Overstreet 
1672a1f0358bSKent Overstreet 	bkey_copy_key(&b->c->gc_done, &b->key);
1673cafe5635SKent Overstreet 
1674cafe5635SKent Overstreet 	return ret;
1675cafe5635SKent Overstreet }
1676cafe5635SKent Overstreet 
1677cafe5635SKent Overstreet static void btree_gc_start(struct cache_set *c)
1678cafe5635SKent Overstreet {
1679cafe5635SKent Overstreet 	struct cache *ca;
1680cafe5635SKent Overstreet 	struct bucket *b;
1681cafe5635SKent Overstreet 	unsigned i;
1682cafe5635SKent Overstreet 
1683cafe5635SKent Overstreet 	if (!c->gc_mark_valid)
1684cafe5635SKent Overstreet 		return;
1685cafe5635SKent Overstreet 
1686cafe5635SKent Overstreet 	mutex_lock(&c->bucket_lock);
1687cafe5635SKent Overstreet 
1688cafe5635SKent Overstreet 	c->gc_mark_valid = 0;
1689cafe5635SKent Overstreet 	c->gc_done = ZERO_KEY;
1690cafe5635SKent Overstreet 
1691cafe5635SKent Overstreet 	for_each_cache(ca, c, i)
1692cafe5635SKent Overstreet 		for_each_bucket(b, ca) {
16933a2fd9d5SKent Overstreet 			b->last_gc = b->gen;
169429ebf465SKent Overstreet 			if (!atomic_read(&b->pin)) {
16954fe6a816SKent Overstreet 				SET_GC_MARK(b, 0);
169629ebf465SKent Overstreet 				SET_GC_SECTORS_USED(b, 0);
169729ebf465SKent Overstreet 			}
1698cafe5635SKent Overstreet 		}
1699cafe5635SKent Overstreet 
1700cafe5635SKent Overstreet 	mutex_unlock(&c->bucket_lock);
1701cafe5635SKent Overstreet }
1702cafe5635SKent Overstreet 
1703d44c2f9eSTang Junhui static void bch_btree_gc_finish(struct cache_set *c)
1704cafe5635SKent Overstreet {
1705cafe5635SKent Overstreet 	struct bucket *b;
1706cafe5635SKent Overstreet 	struct cache *ca;
1707cafe5635SKent Overstreet 	unsigned i;
1708cafe5635SKent Overstreet 
1709cafe5635SKent Overstreet 	mutex_lock(&c->bucket_lock);
1710cafe5635SKent Overstreet 
1711cafe5635SKent Overstreet 	set_gc_sectors(c);
1712cafe5635SKent Overstreet 	c->gc_mark_valid = 1;
1713cafe5635SKent Overstreet 	c->need_gc	= 0;
1714cafe5635SKent Overstreet 
1715cafe5635SKent Overstreet 	for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
1716cafe5635SKent Overstreet 		SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
1717cafe5635SKent Overstreet 			    GC_MARK_METADATA);
1718cafe5635SKent Overstreet 
1719bf0a628aSNicholas Swenson 	/* don't reclaim buckets to which writeback keys point */
1720bf0a628aSNicholas Swenson 	rcu_read_lock();
17212831231dSColy Li 	for (i = 0; i < c->devices_max_used; i++) {
1722bf0a628aSNicholas Swenson 		struct bcache_device *d = c->devices[i];
1723bf0a628aSNicholas Swenson 		struct cached_dev *dc;
1724bf0a628aSNicholas Swenson 		struct keybuf_key *w, *n;
1725bf0a628aSNicholas Swenson 		unsigned j;
1726bf0a628aSNicholas Swenson 
1727bf0a628aSNicholas Swenson 		if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
1728bf0a628aSNicholas Swenson 			continue;
1729bf0a628aSNicholas Swenson 		dc = container_of(d, struct cached_dev, disk);
1730bf0a628aSNicholas Swenson 
1731bf0a628aSNicholas Swenson 		spin_lock(&dc->writeback_keys.lock);
1732bf0a628aSNicholas Swenson 		rbtree_postorder_for_each_entry_safe(w, n,
1733bf0a628aSNicholas Swenson 					&dc->writeback_keys.keys, node)
1734bf0a628aSNicholas Swenson 			for (j = 0; j < KEY_PTRS(&w->key); j++)
1735bf0a628aSNicholas Swenson 				SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
1736bf0a628aSNicholas Swenson 					    GC_MARK_DIRTY);
1737bf0a628aSNicholas Swenson 		spin_unlock(&dc->writeback_keys.lock);
1738bf0a628aSNicholas Swenson 	}
1739bf0a628aSNicholas Swenson 	rcu_read_unlock();
1740bf0a628aSNicholas Swenson 
1741d44c2f9eSTang Junhui 	c->avail_nbuckets = 0;
1742cafe5635SKent Overstreet 	for_each_cache(ca, c, i) {
1743cafe5635SKent Overstreet 		uint64_t *i;
1744cafe5635SKent Overstreet 
1745cafe5635SKent Overstreet 		ca->invalidate_needs_gc = 0;
1746cafe5635SKent Overstreet 
1747cafe5635SKent Overstreet 		for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++)
1748cafe5635SKent Overstreet 			SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1749cafe5635SKent Overstreet 
1750cafe5635SKent Overstreet 		for (i = ca->prio_buckets;
1751cafe5635SKent Overstreet 		     i < ca->prio_buckets + prio_buckets(ca) * 2; i++)
1752cafe5635SKent Overstreet 			SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1753cafe5635SKent Overstreet 
1754cafe5635SKent Overstreet 		for_each_bucket(b, ca) {
1755cafe5635SKent Overstreet 			c->need_gc	= max(c->need_gc, bucket_gc_gen(b));
1756cafe5635SKent Overstreet 
17574fe6a816SKent Overstreet 			if (atomic_read(&b->pin))
17584fe6a816SKent Overstreet 				continue;
17594fe6a816SKent Overstreet 
17604fe6a816SKent Overstreet 			BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
17614fe6a816SKent Overstreet 
17624fe6a816SKent Overstreet 			if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
1763d44c2f9eSTang Junhui 				c->avail_nbuckets++;
1764cafe5635SKent Overstreet 		}
1765cafe5635SKent Overstreet 	}
1766cafe5635SKent Overstreet 
1767cafe5635SKent Overstreet 	mutex_unlock(&c->bucket_lock);
1768cafe5635SKent Overstreet }
1769cafe5635SKent Overstreet 
177072a44517SKent Overstreet static void bch_btree_gc(struct cache_set *c)
1771cafe5635SKent Overstreet {
1772cafe5635SKent Overstreet 	int ret;
1773cafe5635SKent Overstreet 	struct gc_stat stats;
1774cafe5635SKent Overstreet 	struct closure writes;
1775cafe5635SKent Overstreet 	struct btree_op op;
1776cafe5635SKent Overstreet 	uint64_t start_time = local_clock();
177757943511SKent Overstreet 
1778c37511b8SKent Overstreet 	trace_bcache_gc_start(c);
1779cafe5635SKent Overstreet 
1780cafe5635SKent Overstreet 	memset(&stats, 0, sizeof(struct gc_stat));
1781cafe5635SKent Overstreet 	closure_init_stack(&writes);
1782b54d6934SKent Overstreet 	bch_btree_op_init(&op, SHRT_MAX);
1783cafe5635SKent Overstreet 
1784cafe5635SKent Overstreet 	btree_gc_start(c);
1785cafe5635SKent Overstreet 
1786771f393eSColy Li 	/* if CACHE_SET_IO_DISABLE set, gc thread should stop too */
1787a1f0358bSKent Overstreet 	do {
1788cafe5635SKent Overstreet 		ret = btree_root(gc_root, c, &op, &writes, &stats);
1789cafe5635SKent Overstreet 		closure_sync(&writes);
1790c5f1e5adSKent Overstreet 		cond_resched();
1791cafe5635SKent Overstreet 
17925c25c4fcSTang Junhui 		if (ret == -EAGAIN)
17935c25c4fcSTang Junhui 			schedule_timeout_interruptible(msecs_to_jiffies
17945c25c4fcSTang Junhui 						       (GC_SLEEP_MS));
17955c25c4fcSTang Junhui 		else if (ret)
1796cafe5635SKent Overstreet 			pr_warn("gc failed!");
1797771f393eSColy Li 	} while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags));
1798cafe5635SKent Overstreet 
1799d44c2f9eSTang Junhui 	bch_btree_gc_finish(c);
180057943511SKent Overstreet 	wake_up_allocators(c);
180157943511SKent Overstreet 
1802169ef1cfSKent Overstreet 	bch_time_stats_update(&c->btree_gc_time, start_time);
1803cafe5635SKent Overstreet 
1804cafe5635SKent Overstreet 	stats.key_bytes *= sizeof(uint64_t);
1805cafe5635SKent Overstreet 	stats.data	<<= 9;
1806d44c2f9eSTang Junhui 	bch_update_bucket_in_use(c, &stats);
1807cafe5635SKent Overstreet 	memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
1808cafe5635SKent Overstreet 
1809c37511b8SKent Overstreet 	trace_bcache_gc_end(c);
1810cafe5635SKent Overstreet 
181172a44517SKent Overstreet 	bch_moving_gc(c);
1812cafe5635SKent Overstreet }
1813cafe5635SKent Overstreet 
1814be628be0SKent Overstreet static bool gc_should_run(struct cache_set *c)
1815cafe5635SKent Overstreet {
1816a1f0358bSKent Overstreet 	struct cache *ca;
1817a1f0358bSKent Overstreet 	unsigned i;
181872a44517SKent Overstreet 
1819be628be0SKent Overstreet 	for_each_cache(ca, c, i)
1820be628be0SKent Overstreet 		if (ca->invalidate_needs_gc)
1821be628be0SKent Overstreet 			return true;
182272a44517SKent Overstreet 
1823be628be0SKent Overstreet 	if (atomic_read(&c->sectors_to_gc) < 0)
1824be628be0SKent Overstreet 		return true;
1825be628be0SKent Overstreet 
1826be628be0SKent Overstreet 	return false;
1827be628be0SKent Overstreet }
1828be628be0SKent Overstreet 
1829be628be0SKent Overstreet static int bch_gc_thread(void *arg)
1830be628be0SKent Overstreet {
1831be628be0SKent Overstreet 	struct cache_set *c = arg;
1832be628be0SKent Overstreet 
1833be628be0SKent Overstreet 	while (1) {
1834be628be0SKent Overstreet 		wait_event_interruptible(c->gc_wait,
1835771f393eSColy Li 			   kthread_should_stop() ||
1836771f393eSColy Li 			   test_bit(CACHE_SET_IO_DISABLE, &c->flags) ||
1837771f393eSColy Li 			   gc_should_run(c));
1838be628be0SKent Overstreet 
1839771f393eSColy Li 		if (kthread_should_stop() ||
1840771f393eSColy Li 		    test_bit(CACHE_SET_IO_DISABLE, &c->flags))
184172a44517SKent Overstreet 			break;
184272a44517SKent Overstreet 
1843be628be0SKent Overstreet 		set_gc_sectors(c);
1844be628be0SKent Overstreet 		bch_btree_gc(c);
184572a44517SKent Overstreet 	}
184672a44517SKent Overstreet 
1847771f393eSColy Li 	wait_for_kthread_stop();
184872a44517SKent Overstreet 	return 0;
184972a44517SKent Overstreet }
185072a44517SKent Overstreet 
185172a44517SKent Overstreet int bch_gc_thread_start(struct cache_set *c)
185272a44517SKent Overstreet {
1853be628be0SKent Overstreet 	c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
18549d134117SVasyl Gomonovych 	return PTR_ERR_OR_ZERO(c->gc_thread);
1855cafe5635SKent Overstreet }
1856cafe5635SKent Overstreet 
1857cafe5635SKent Overstreet /* Initial partial gc */
1858cafe5635SKent Overstreet 
1859487dded8SKent Overstreet static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
1860cafe5635SKent Overstreet {
186150310164SKent Overstreet 	int ret = 0;
186250310164SKent Overstreet 	struct bkey *k, *p = NULL;
1863cafe5635SKent Overstreet 	struct btree_iter iter;
1864cafe5635SKent Overstreet 
1865487dded8SKent Overstreet 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
1866487dded8SKent Overstreet 		bch_initial_mark_key(b->c, b->level, k);
1867cafe5635SKent Overstreet 
1868487dded8SKent Overstreet 	bch_initial_mark_key(b->c, b->level + 1, &b->key);
1869cafe5635SKent Overstreet 
1870cafe5635SKent Overstreet 	if (b->level) {
1871c052dd9aSKent Overstreet 		bch_btree_iter_init(&b->keys, &iter, NULL);
1872cafe5635SKent Overstreet 
187350310164SKent Overstreet 		do {
1874a85e968eSKent Overstreet 			k = bch_btree_iter_next_filter(&iter, &b->keys,
1875a85e968eSKent Overstreet 						       bch_ptr_bad);
1876*7f4a59deSTang Junhui 			if (k) {
18772452cc89SSlava Pestov 				btree_node_prefetch(b, k);
1878*7f4a59deSTang Junhui 				/*
1879*7f4a59deSTang Junhui 				 * initiallize c->gc_stats.nodes
1880*7f4a59deSTang Junhui 				 * for incremental GC
1881*7f4a59deSTang Junhui 				 */
1882*7f4a59deSTang Junhui 				b->c->gc_stats.nodes++;
1883*7f4a59deSTang Junhui 			}
188450310164SKent Overstreet 
1885cafe5635SKent Overstreet 			if (p)
1886487dded8SKent Overstreet 				ret = btree(check_recurse, p, b, op);
1887cafe5635SKent Overstreet 
188850310164SKent Overstreet 			p = k;
188950310164SKent Overstreet 		} while (p && !ret);
1890cafe5635SKent Overstreet 	}
1891cafe5635SKent Overstreet 
1892487dded8SKent Overstreet 	return ret;
1893cafe5635SKent Overstreet }
1894cafe5635SKent Overstreet 
1895c18536a7SKent Overstreet int bch_btree_check(struct cache_set *c)
1896cafe5635SKent Overstreet {
1897c18536a7SKent Overstreet 	struct btree_op op;
1898cafe5635SKent Overstreet 
1899b54d6934SKent Overstreet 	bch_btree_op_init(&op, SHRT_MAX);
1900cafe5635SKent Overstreet 
1901487dded8SKent Overstreet 	return btree_root(check_recurse, c, &op);
1902cafe5635SKent Overstreet }
1903cafe5635SKent Overstreet 
19042531d9eeSKent Overstreet void bch_initial_gc_finish(struct cache_set *c)
19052531d9eeSKent Overstreet {
19062531d9eeSKent Overstreet 	struct cache *ca;
19072531d9eeSKent Overstreet 	struct bucket *b;
19082531d9eeSKent Overstreet 	unsigned i;
19092531d9eeSKent Overstreet 
19102531d9eeSKent Overstreet 	bch_btree_gc_finish(c);
19112531d9eeSKent Overstreet 
19122531d9eeSKent Overstreet 	mutex_lock(&c->bucket_lock);
19132531d9eeSKent Overstreet 
19142531d9eeSKent Overstreet 	/*
19152531d9eeSKent Overstreet 	 * We need to put some unused buckets directly on the prio freelist in
19162531d9eeSKent Overstreet 	 * order to get the allocator thread started - it needs freed buckets in
19172531d9eeSKent Overstreet 	 * order to rewrite the prios and gens, and it needs to rewrite prios
19182531d9eeSKent Overstreet 	 * and gens in order to free buckets.
19192531d9eeSKent Overstreet 	 *
19202531d9eeSKent Overstreet 	 * This is only safe for buckets that have no live data in them, which
19212531d9eeSKent Overstreet 	 * there should always be some of.
19222531d9eeSKent Overstreet 	 */
19232531d9eeSKent Overstreet 	for_each_cache(ca, c, i) {
19242531d9eeSKent Overstreet 		for_each_bucket(b, ca) {
1925682811b3STang Junhui 			if (fifo_full(&ca->free[RESERVE_PRIO]) &&
1926682811b3STang Junhui 			    fifo_full(&ca->free[RESERVE_BTREE]))
19272531d9eeSKent Overstreet 				break;
19282531d9eeSKent Overstreet 
19292531d9eeSKent Overstreet 			if (bch_can_invalidate_bucket(ca, b) &&
19302531d9eeSKent Overstreet 			    !GC_MARK(b)) {
19312531d9eeSKent Overstreet 				__bch_invalidate_one_bucket(ca, b);
1932682811b3STang Junhui 				if (!fifo_push(&ca->free[RESERVE_PRIO],
1933682811b3STang Junhui 				   b - ca->buckets))
1934682811b3STang Junhui 					fifo_push(&ca->free[RESERVE_BTREE],
19352531d9eeSKent Overstreet 						  b - ca->buckets);
19362531d9eeSKent Overstreet 			}
19372531d9eeSKent Overstreet 		}
19382531d9eeSKent Overstreet 	}
19392531d9eeSKent Overstreet 
19402531d9eeSKent Overstreet 	mutex_unlock(&c->bucket_lock);
19412531d9eeSKent Overstreet }
19422531d9eeSKent Overstreet 
1943cafe5635SKent Overstreet /* Btree insertion */
1944cafe5635SKent Overstreet 
1945829a60b9SKent Overstreet static bool btree_insert_key(struct btree *b, struct bkey *k,
19461b207d80SKent Overstreet 			     struct bkey *replace_key)
1947cafe5635SKent Overstreet {
1948829a60b9SKent Overstreet 	unsigned status;
1949cafe5635SKent Overstreet 
1950cafe5635SKent Overstreet 	BUG_ON(bkey_cmp(k, &b->key) > 0);
1951cafe5635SKent Overstreet 
1952829a60b9SKent Overstreet 	status = bch_btree_insert_key(&b->keys, k, replace_key);
1953829a60b9SKent Overstreet 	if (status != BTREE_INSERT_STATUS_NO_INSERT) {
1954dc9d98d6SKent Overstreet 		bch_check_keys(&b->keys, "%u for %s", status,
19551b207d80SKent Overstreet 			       replace_key ? "replace" : "insert");
1956cafe5635SKent Overstreet 
1957829a60b9SKent Overstreet 		trace_bcache_btree_insert_key(b, k, replace_key != NULL,
1958829a60b9SKent Overstreet 					      status);
1959cafe5635SKent Overstreet 		return true;
1960829a60b9SKent Overstreet 	} else
1961829a60b9SKent Overstreet 		return false;
1962cafe5635SKent Overstreet }
1963cafe5635SKent Overstreet 
196459158fdeSKent Overstreet static size_t insert_u64s_remaining(struct btree *b)
196559158fdeSKent Overstreet {
19663572324aSKent Overstreet 	long ret = bch_btree_keys_u64s_remaining(&b->keys);
196759158fdeSKent Overstreet 
196859158fdeSKent Overstreet 	/*
196959158fdeSKent Overstreet 	 * Might land in the middle of an existing extent and have to split it
197059158fdeSKent Overstreet 	 */
197159158fdeSKent Overstreet 	if (b->keys.ops->is_extents)
197259158fdeSKent Overstreet 		ret -= KEY_MAX_U64S;
197359158fdeSKent Overstreet 
197459158fdeSKent Overstreet 	return max(ret, 0L);
197559158fdeSKent Overstreet }
197659158fdeSKent Overstreet 
197726c949f8SKent Overstreet static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
19781b207d80SKent Overstreet 				  struct keylist *insert_keys,
19791b207d80SKent Overstreet 				  struct bkey *replace_key)
1980cafe5635SKent Overstreet {
1981cafe5635SKent Overstreet 	bool ret = false;
1982dc9d98d6SKent Overstreet 	int oldsize = bch_count_data(&b->keys);
1983cafe5635SKent Overstreet 
198426c949f8SKent Overstreet 	while (!bch_keylist_empty(insert_keys)) {
1985c2f95ae2SKent Overstreet 		struct bkey *k = insert_keys->keys;
198626c949f8SKent Overstreet 
198759158fdeSKent Overstreet 		if (bkey_u64s(k) > insert_u64s_remaining(b))
1988403b6cdeSKent Overstreet 			break;
1989403b6cdeSKent Overstreet 
1990403b6cdeSKent Overstreet 		if (bkey_cmp(k, &b->key) <= 0) {
19913a3b6a4eSKent Overstreet 			if (!b->level)
19923a3b6a4eSKent Overstreet 				bkey_put(b->c, k);
199326c949f8SKent Overstreet 
1994829a60b9SKent Overstreet 			ret |= btree_insert_key(b, k, replace_key);
199526c949f8SKent Overstreet 			bch_keylist_pop_front(insert_keys);
199626c949f8SKent Overstreet 		} else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
199726c949f8SKent Overstreet 			BKEY_PADDED(key) temp;
1998c2f95ae2SKent Overstreet 			bkey_copy(&temp.key, insert_keys->keys);
199926c949f8SKent Overstreet 
200026c949f8SKent Overstreet 			bch_cut_back(&b->key, &temp.key);
2001c2f95ae2SKent Overstreet 			bch_cut_front(&b->key, insert_keys->keys);
200226c949f8SKent Overstreet 
2003829a60b9SKent Overstreet 			ret |= btree_insert_key(b, &temp.key, replace_key);
200426c949f8SKent Overstreet 			break;
200526c949f8SKent Overstreet 		} else {
200626c949f8SKent Overstreet 			break;
200726c949f8SKent Overstreet 		}
2008cafe5635SKent Overstreet 	}
2009cafe5635SKent Overstreet 
2010829a60b9SKent Overstreet 	if (!ret)
2011829a60b9SKent Overstreet 		op->insert_collision = true;
2012829a60b9SKent Overstreet 
2013403b6cdeSKent Overstreet 	BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
2014403b6cdeSKent Overstreet 
2015dc9d98d6SKent Overstreet 	BUG_ON(bch_count_data(&b->keys) < oldsize);
2016cafe5635SKent Overstreet 	return ret;
2017cafe5635SKent Overstreet }
2018cafe5635SKent Overstreet 
201926c949f8SKent Overstreet static int btree_split(struct btree *b, struct btree_op *op,
202026c949f8SKent Overstreet 		       struct keylist *insert_keys,
20211b207d80SKent Overstreet 		       struct bkey *replace_key)
2022cafe5635SKent Overstreet {
2023d6fd3b11SKent Overstreet 	bool split;
2024cafe5635SKent Overstreet 	struct btree *n1, *n2 = NULL, *n3 = NULL;
2025cafe5635SKent Overstreet 	uint64_t start_time = local_clock();
2026b54d6934SKent Overstreet 	struct closure cl;
202717e21a9fSKent Overstreet 	struct keylist parent_keys;
2028b54d6934SKent Overstreet 
2029b54d6934SKent Overstreet 	closure_init_stack(&cl);
203017e21a9fSKent Overstreet 	bch_keylist_init(&parent_keys);
2031cafe5635SKent Overstreet 
20320a63b66dSKent Overstreet 	if (btree_check_reserve(b, op)) {
20330a63b66dSKent Overstreet 		if (!b->level)
203478365411SKent Overstreet 			return -EINTR;
20350a63b66dSKent Overstreet 		else
20360a63b66dSKent Overstreet 			WARN(1, "insufficient reserve for split\n");
20370a63b66dSKent Overstreet 	}
203878365411SKent Overstreet 
20390a63b66dSKent Overstreet 	n1 = btree_node_alloc_replacement(b, op);
2040cafe5635SKent Overstreet 	if (IS_ERR(n1))
2041cafe5635SKent Overstreet 		goto err;
2042cafe5635SKent Overstreet 
2043ee811287SKent Overstreet 	split = set_blocks(btree_bset_first(n1),
2044ee811287SKent Overstreet 			   block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;
2045cafe5635SKent Overstreet 
2046cafe5635SKent Overstreet 	if (split) {
2047cafe5635SKent Overstreet 		unsigned keys = 0;
2048cafe5635SKent Overstreet 
2049ee811287SKent Overstreet 		trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
2050c37511b8SKent Overstreet 
20512452cc89SSlava Pestov 		n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent);
2052cafe5635SKent Overstreet 		if (IS_ERR(n2))
2053cafe5635SKent Overstreet 			goto err_free1;
2054cafe5635SKent Overstreet 
2055d6fd3b11SKent Overstreet 		if (!b->parent) {
20562452cc89SSlava Pestov 			n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL);
2057cafe5635SKent Overstreet 			if (IS_ERR(n3))
2058cafe5635SKent Overstreet 				goto err_free2;
2059cafe5635SKent Overstreet 		}
2060cafe5635SKent Overstreet 
20612a285686SKent Overstreet 		mutex_lock(&n1->write_lock);
20622a285686SKent Overstreet 		mutex_lock(&n2->write_lock);
20632a285686SKent Overstreet 
20641b207d80SKent Overstreet 		bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2065cafe5635SKent Overstreet 
2066d6fd3b11SKent Overstreet 		/*
2067d6fd3b11SKent Overstreet 		 * Has to be a linear search because we don't have an auxiliary
2068cafe5635SKent Overstreet 		 * search tree yet
2069cafe5635SKent Overstreet 		 */
2070cafe5635SKent Overstreet 
2071ee811287SKent Overstreet 		while (keys < (btree_bset_first(n1)->keys * 3) / 5)
2072ee811287SKent Overstreet 			keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1),
2073fafff81cSKent Overstreet 							keys));
2074cafe5635SKent Overstreet 
2075fafff81cSKent Overstreet 		bkey_copy_key(&n1->key,
2076ee811287SKent Overstreet 			      bset_bkey_idx(btree_bset_first(n1), keys));
2077ee811287SKent Overstreet 		keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys));
2078cafe5635SKent Overstreet 
2079ee811287SKent Overstreet 		btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys;
2080ee811287SKent Overstreet 		btree_bset_first(n1)->keys = keys;
2081cafe5635SKent Overstreet 
2082ee811287SKent Overstreet 		memcpy(btree_bset_first(n2)->start,
2083ee811287SKent Overstreet 		       bset_bkey_last(btree_bset_first(n1)),
2084ee811287SKent Overstreet 		       btree_bset_first(n2)->keys * sizeof(uint64_t));
2085cafe5635SKent Overstreet 
2086cafe5635SKent Overstreet 		bkey_copy_key(&n2->key, &b->key);
2087cafe5635SKent Overstreet 
208817e21a9fSKent Overstreet 		bch_keylist_add(&parent_keys, &n2->key);
2089b54d6934SKent Overstreet 		bch_btree_node_write(n2, &cl);
20902a285686SKent Overstreet 		mutex_unlock(&n2->write_lock);
2091cafe5635SKent Overstreet 		rw_unlock(true, n2);
2092c37511b8SKent Overstreet 	} else {
2093ee811287SKent Overstreet 		trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys);
2094c37511b8SKent Overstreet 
20952a285686SKent Overstreet 		mutex_lock(&n1->write_lock);
20961b207d80SKent Overstreet 		bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2097c37511b8SKent Overstreet 	}
2098cafe5635SKent Overstreet 
209917e21a9fSKent Overstreet 	bch_keylist_add(&parent_keys, &n1->key);
2100b54d6934SKent Overstreet 	bch_btree_node_write(n1, &cl);
21012a285686SKent Overstreet 	mutex_unlock(&n1->write_lock);
2102cafe5635SKent Overstreet 
2103cafe5635SKent Overstreet 	if (n3) {
2104d6fd3b11SKent Overstreet 		/* Depth increases, make a new root */
21052a285686SKent Overstreet 		mutex_lock(&n3->write_lock);
2106cafe5635SKent Overstreet 		bkey_copy_key(&n3->key, &MAX_KEY);
210717e21a9fSKent Overstreet 		bch_btree_insert_keys(n3, op, &parent_keys, NULL);
2108b54d6934SKent Overstreet 		bch_btree_node_write(n3, &cl);
21092a285686SKent Overstreet 		mutex_unlock(&n3->write_lock);
2110cafe5635SKent Overstreet 
2111b54d6934SKent Overstreet 		closure_sync(&cl);
2112cafe5635SKent Overstreet 		bch_btree_set_root(n3);
2113cafe5635SKent Overstreet 		rw_unlock(true, n3);
2114d6fd3b11SKent Overstreet 	} else if (!b->parent) {
2115d6fd3b11SKent Overstreet 		/* Root filled up but didn't need to be split */
2116b54d6934SKent Overstreet 		closure_sync(&cl);
2117cafe5635SKent Overstreet 		bch_btree_set_root(n1);
2118cafe5635SKent Overstreet 	} else {
211917e21a9fSKent Overstreet 		/* Split a non root node */
2120b54d6934SKent Overstreet 		closure_sync(&cl);
212117e21a9fSKent Overstreet 		make_btree_freeing_key(b, parent_keys.top);
212217e21a9fSKent Overstreet 		bch_keylist_push(&parent_keys);
212317e21a9fSKent Overstreet 
212417e21a9fSKent Overstreet 		bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL);
212517e21a9fSKent Overstreet 		BUG_ON(!bch_keylist_empty(&parent_keys));
2126cafe5635SKent Overstreet 	}
2127cafe5635SKent Overstreet 
212805335cffSKent Overstreet 	btree_node_free(b);
2129cafe5635SKent Overstreet 	rw_unlock(true, n1);
2130cafe5635SKent Overstreet 
2131169ef1cfSKent Overstreet 	bch_time_stats_update(&b->c->btree_split_time, start_time);
2132cafe5635SKent Overstreet 
2133cafe5635SKent Overstreet 	return 0;
2134cafe5635SKent Overstreet err_free2:
21355f5837d2SKent Overstreet 	bkey_put(b->c, &n2->key);
2136e8e1d468SKent Overstreet 	btree_node_free(n2);
2137cafe5635SKent Overstreet 	rw_unlock(true, n2);
2138cafe5635SKent Overstreet err_free1:
21395f5837d2SKent Overstreet 	bkey_put(b->c, &n1->key);
2140e8e1d468SKent Overstreet 	btree_node_free(n1);
2141cafe5635SKent Overstreet 	rw_unlock(true, n1);
2142cafe5635SKent Overstreet err:
21430a63b66dSKent Overstreet 	WARN(1, "bcache: btree split failed (level %u)", b->level);
21445f5837d2SKent Overstreet 
2145cafe5635SKent Overstreet 	if (n3 == ERR_PTR(-EAGAIN) ||
2146cafe5635SKent Overstreet 	    n2 == ERR_PTR(-EAGAIN) ||
2147cafe5635SKent Overstreet 	    n1 == ERR_PTR(-EAGAIN))
2148cafe5635SKent Overstreet 		return -EAGAIN;
2149cafe5635SKent Overstreet 
2150cafe5635SKent Overstreet 	return -ENOMEM;
2151cafe5635SKent Overstreet }
2152cafe5635SKent Overstreet 
215326c949f8SKent Overstreet static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
2154c18536a7SKent Overstreet 				 struct keylist *insert_keys,
21551b207d80SKent Overstreet 				 atomic_t *journal_ref,
21561b207d80SKent Overstreet 				 struct bkey *replace_key)
215726c949f8SKent Overstreet {
21582a285686SKent Overstreet 	struct closure cl;
21592a285686SKent Overstreet 
21601b207d80SKent Overstreet 	BUG_ON(b->level && replace_key);
21611b207d80SKent Overstreet 
21622a285686SKent Overstreet 	closure_init_stack(&cl);
21632a285686SKent Overstreet 
21642a285686SKent Overstreet 	mutex_lock(&b->write_lock);
21652a285686SKent Overstreet 
21662a285686SKent Overstreet 	if (write_block(b) != btree_bset_last(b) &&
21672a285686SKent Overstreet 	    b->keys.last_set_unwritten)
21682a285686SKent Overstreet 		bch_btree_init_next(b); /* just wrote a set */
21692a285686SKent Overstreet 
217059158fdeSKent Overstreet 	if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) {
21712a285686SKent Overstreet 		mutex_unlock(&b->write_lock);
21722a285686SKent Overstreet 		goto split;
21732a285686SKent Overstreet 	}
21742a285686SKent Overstreet 
21752a285686SKent Overstreet 	BUG_ON(write_block(b) != btree_bset_last(b));
21762a285686SKent Overstreet 
21772a285686SKent Overstreet 	if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
21782a285686SKent Overstreet 		if (!b->level)
21792a285686SKent Overstreet 			bch_btree_leaf_dirty(b, journal_ref);
21802a285686SKent Overstreet 		else
21812a285686SKent Overstreet 			bch_btree_node_write(b, &cl);
21822a285686SKent Overstreet 	}
21832a285686SKent Overstreet 
21842a285686SKent Overstreet 	mutex_unlock(&b->write_lock);
21852a285686SKent Overstreet 
21862a285686SKent Overstreet 	/* wait for btree node write if necessary, after unlock */
21872a285686SKent Overstreet 	closure_sync(&cl);
21882a285686SKent Overstreet 
21892a285686SKent Overstreet 	return 0;
21902a285686SKent Overstreet split:
219126c949f8SKent Overstreet 	if (current->bio_list) {
219226c949f8SKent Overstreet 		op->lock = b->c->root->level + 1;
219317e21a9fSKent Overstreet 		return -EAGAIN;
219426c949f8SKent Overstreet 	} else if (op->lock <= b->c->root->level) {
219526c949f8SKent Overstreet 		op->lock = b->c->root->level + 1;
219617e21a9fSKent Overstreet 		return -EINTR;
219726c949f8SKent Overstreet 	} else {
219817e21a9fSKent Overstreet 		/* Invalidated all iterators */
21993b3e9e50SKent Overstreet 		int ret = btree_split(b, op, insert_keys, replace_key);
22003b3e9e50SKent Overstreet 
22012a285686SKent Overstreet 		if (bch_keylist_empty(insert_keys))
220217e21a9fSKent Overstreet 			return 0;
22032a285686SKent Overstreet 		else if (!ret)
22042a285686SKent Overstreet 			return -EINTR;
22052a285686SKent Overstreet 		return ret;
220617e21a9fSKent Overstreet 	}
220726c949f8SKent Overstreet }
220826c949f8SKent Overstreet 
2209e7c590ebSKent Overstreet int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
2210e7c590ebSKent Overstreet 			       struct bkey *check_key)
2211e7c590ebSKent Overstreet {
2212e7c590ebSKent Overstreet 	int ret = -EINTR;
2213e7c590ebSKent Overstreet 	uint64_t btree_ptr = b->key.ptr[0];
2214e7c590ebSKent Overstreet 	unsigned long seq = b->seq;
2215e7c590ebSKent Overstreet 	struct keylist insert;
2216e7c590ebSKent Overstreet 	bool upgrade = op->lock == -1;
2217e7c590ebSKent Overstreet 
2218e7c590ebSKent Overstreet 	bch_keylist_init(&insert);
2219e7c590ebSKent Overstreet 
2220e7c590ebSKent Overstreet 	if (upgrade) {
2221e7c590ebSKent Overstreet 		rw_unlock(false, b);
2222e7c590ebSKent Overstreet 		rw_lock(true, b, b->level);
2223e7c590ebSKent Overstreet 
2224e7c590ebSKent Overstreet 		if (b->key.ptr[0] != btree_ptr ||
22252ef9ccbfSZheng Liu                    b->seq != seq + 1) {
22262ef9ccbfSZheng Liu 			op->lock = b->level;
2227e7c590ebSKent Overstreet 			goto out;
2228e7c590ebSKent Overstreet                }
22292ef9ccbfSZheng Liu 	}
2230e7c590ebSKent Overstreet 
2231e7c590ebSKent Overstreet 	SET_KEY_PTRS(check_key, 1);
2232e7c590ebSKent Overstreet 	get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
2233e7c590ebSKent Overstreet 
2234e7c590ebSKent Overstreet 	SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
2235e7c590ebSKent Overstreet 
2236e7c590ebSKent Overstreet 	bch_keylist_add(&insert, check_key);
2237e7c590ebSKent Overstreet 
22381b207d80SKent Overstreet 	ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
2239e7c590ebSKent Overstreet 
2240e7c590ebSKent Overstreet 	BUG_ON(!ret && !bch_keylist_empty(&insert));
2241e7c590ebSKent Overstreet out:
2242e7c590ebSKent Overstreet 	if (upgrade)
2243e7c590ebSKent Overstreet 		downgrade_write(&b->lock);
2244e7c590ebSKent Overstreet 	return ret;
2245e7c590ebSKent Overstreet }
2246e7c590ebSKent Overstreet 
2247cc7b8819SKent Overstreet struct btree_insert_op {
2248cc7b8819SKent Overstreet 	struct btree_op	op;
2249cc7b8819SKent Overstreet 	struct keylist	*keys;
2250cc7b8819SKent Overstreet 	atomic_t	*journal_ref;
2251cc7b8819SKent Overstreet 	struct bkey	*replace_key;
2252cc7b8819SKent Overstreet };
2253cc7b8819SKent Overstreet 
225408239ca2SWei Yongjun static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
2255cafe5635SKent Overstreet {
2256cc7b8819SKent Overstreet 	struct btree_insert_op *op = container_of(b_op,
2257cc7b8819SKent Overstreet 					struct btree_insert_op, op);
2258403b6cdeSKent Overstreet 
2259cc7b8819SKent Overstreet 	int ret = bch_btree_insert_node(b, &op->op, op->keys,
2260cc7b8819SKent Overstreet 					op->journal_ref, op->replace_key);
2261cc7b8819SKent Overstreet 	if (ret && !bch_keylist_empty(op->keys))
2262cc7b8819SKent Overstreet 		return ret;
2263cc7b8819SKent Overstreet 	else
2264cc7b8819SKent Overstreet 		return MAP_DONE;
2265cafe5635SKent Overstreet }
2266cafe5635SKent Overstreet 
2267cc7b8819SKent Overstreet int bch_btree_insert(struct cache_set *c, struct keylist *keys,
2268cc7b8819SKent Overstreet 		     atomic_t *journal_ref, struct bkey *replace_key)
2269cafe5635SKent Overstreet {
2270cc7b8819SKent Overstreet 	struct btree_insert_op op;
2271cafe5635SKent Overstreet 	int ret = 0;
2272cafe5635SKent Overstreet 
2273cc7b8819SKent Overstreet 	BUG_ON(current->bio_list);
22744f3d4014SKent Overstreet 	BUG_ON(bch_keylist_empty(keys));
2275cafe5635SKent Overstreet 
2276cc7b8819SKent Overstreet 	bch_btree_op_init(&op.op, 0);
2277cc7b8819SKent Overstreet 	op.keys		= keys;
2278cc7b8819SKent Overstreet 	op.journal_ref	= journal_ref;
2279cc7b8819SKent Overstreet 	op.replace_key	= replace_key;
2280cafe5635SKent Overstreet 
2281cc7b8819SKent Overstreet 	while (!ret && !bch_keylist_empty(keys)) {
2282cc7b8819SKent Overstreet 		op.op.lock = 0;
2283cc7b8819SKent Overstreet 		ret = bch_btree_map_leaf_nodes(&op.op, c,
2284cc7b8819SKent Overstreet 					       &START_KEY(keys->keys),
2285cc7b8819SKent Overstreet 					       btree_insert_fn);
2286cc7b8819SKent Overstreet 	}
2287cc7b8819SKent Overstreet 
2288cc7b8819SKent Overstreet 	if (ret) {
2289cafe5635SKent Overstreet 		struct bkey *k;
2290cafe5635SKent Overstreet 
22911b207d80SKent Overstreet 		pr_err("error %i", ret);
2292cafe5635SKent Overstreet 
22934f3d4014SKent Overstreet 		while ((k = bch_keylist_pop(keys)))
22943a3b6a4eSKent Overstreet 			bkey_put(c, k);
2295cc7b8819SKent Overstreet 	} else if (op.op.insert_collision)
2296cc7b8819SKent Overstreet 		ret = -ESRCH;
22976054c6d4SKent Overstreet 
2298cafe5635SKent Overstreet 	return ret;
2299cafe5635SKent Overstreet }
2300cafe5635SKent Overstreet 
2301cafe5635SKent Overstreet void bch_btree_set_root(struct btree *b)
2302cafe5635SKent Overstreet {
2303cafe5635SKent Overstreet 	unsigned i;
2304e49c7c37SKent Overstreet 	struct closure cl;
2305e49c7c37SKent Overstreet 
2306e49c7c37SKent Overstreet 	closure_init_stack(&cl);
2307cafe5635SKent Overstreet 
2308c37511b8SKent Overstreet 	trace_bcache_btree_set_root(b);
2309c37511b8SKent Overstreet 
2310cafe5635SKent Overstreet 	BUG_ON(!b->written);
2311cafe5635SKent Overstreet 
2312cafe5635SKent Overstreet 	for (i = 0; i < KEY_PTRS(&b->key); i++)
2313cafe5635SKent Overstreet 		BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO);
2314cafe5635SKent Overstreet 
2315cafe5635SKent Overstreet 	mutex_lock(&b->c->bucket_lock);
2316cafe5635SKent Overstreet 	list_del_init(&b->list);
2317cafe5635SKent Overstreet 	mutex_unlock(&b->c->bucket_lock);
2318cafe5635SKent Overstreet 
2319cafe5635SKent Overstreet 	b->c->root = b;
2320cafe5635SKent Overstreet 
2321e49c7c37SKent Overstreet 	bch_journal_meta(b->c, &cl);
2322e49c7c37SKent Overstreet 	closure_sync(&cl);
2323cafe5635SKent Overstreet }
2324cafe5635SKent Overstreet 
232548dad8baSKent Overstreet /* Map across nodes or keys */
232648dad8baSKent Overstreet 
232748dad8baSKent Overstreet static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
232848dad8baSKent Overstreet 				       struct bkey *from,
232948dad8baSKent Overstreet 				       btree_map_nodes_fn *fn, int flags)
233048dad8baSKent Overstreet {
233148dad8baSKent Overstreet 	int ret = MAP_CONTINUE;
233248dad8baSKent Overstreet 
233348dad8baSKent Overstreet 	if (b->level) {
233448dad8baSKent Overstreet 		struct bkey *k;
233548dad8baSKent Overstreet 		struct btree_iter iter;
233648dad8baSKent Overstreet 
2337c052dd9aSKent Overstreet 		bch_btree_iter_init(&b->keys, &iter, from);
233848dad8baSKent Overstreet 
2339a85e968eSKent Overstreet 		while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
234048dad8baSKent Overstreet 						       bch_ptr_bad))) {
234148dad8baSKent Overstreet 			ret = btree(map_nodes_recurse, k, b,
234248dad8baSKent Overstreet 				    op, from, fn, flags);
234348dad8baSKent Overstreet 			from = NULL;
234448dad8baSKent Overstreet 
234548dad8baSKent Overstreet 			if (ret != MAP_CONTINUE)
234648dad8baSKent Overstreet 				return ret;
234748dad8baSKent Overstreet 		}
234848dad8baSKent Overstreet 	}
234948dad8baSKent Overstreet 
235048dad8baSKent Overstreet 	if (!b->level || flags == MAP_ALL_NODES)
235148dad8baSKent Overstreet 		ret = fn(op, b);
235248dad8baSKent Overstreet 
235348dad8baSKent Overstreet 	return ret;
235448dad8baSKent Overstreet }
235548dad8baSKent Overstreet 
235648dad8baSKent Overstreet int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
235748dad8baSKent Overstreet 			  struct bkey *from, btree_map_nodes_fn *fn, int flags)
235848dad8baSKent Overstreet {
2359b54d6934SKent Overstreet 	return btree_root(map_nodes_recurse, c, op, from, fn, flags);
236048dad8baSKent Overstreet }
236148dad8baSKent Overstreet 
236248dad8baSKent Overstreet static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
236348dad8baSKent Overstreet 				      struct bkey *from, btree_map_keys_fn *fn,
236448dad8baSKent Overstreet 				      int flags)
236548dad8baSKent Overstreet {
236648dad8baSKent Overstreet 	int ret = MAP_CONTINUE;
236748dad8baSKent Overstreet 	struct bkey *k;
236848dad8baSKent Overstreet 	struct btree_iter iter;
236948dad8baSKent Overstreet 
2370c052dd9aSKent Overstreet 	bch_btree_iter_init(&b->keys, &iter, from);
237148dad8baSKent Overstreet 
2372a85e968eSKent Overstreet 	while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
237348dad8baSKent Overstreet 		ret = !b->level
237448dad8baSKent Overstreet 			? fn(op, b, k)
237548dad8baSKent Overstreet 			: btree(map_keys_recurse, k, b, op, from, fn, flags);
237648dad8baSKent Overstreet 		from = NULL;
237748dad8baSKent Overstreet 
237848dad8baSKent Overstreet 		if (ret != MAP_CONTINUE)
237948dad8baSKent Overstreet 			return ret;
238048dad8baSKent Overstreet 	}
238148dad8baSKent Overstreet 
238248dad8baSKent Overstreet 	if (!b->level && (flags & MAP_END_KEY))
238348dad8baSKent Overstreet 		ret = fn(op, b, &KEY(KEY_INODE(&b->key),
238448dad8baSKent Overstreet 				     KEY_OFFSET(&b->key), 0));
238548dad8baSKent Overstreet 
238648dad8baSKent Overstreet 	return ret;
238748dad8baSKent Overstreet }
238848dad8baSKent Overstreet 
238948dad8baSKent Overstreet int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
239048dad8baSKent Overstreet 		       struct bkey *from, btree_map_keys_fn *fn, int flags)
239148dad8baSKent Overstreet {
2392b54d6934SKent Overstreet 	return btree_root(map_keys_recurse, c, op, from, fn, flags);
239348dad8baSKent Overstreet }
239448dad8baSKent Overstreet 
2395cafe5635SKent Overstreet /* Keybuf code */
2396cafe5635SKent Overstreet 
2397cafe5635SKent Overstreet static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
2398cafe5635SKent Overstreet {
2399cafe5635SKent Overstreet 	/* Overlapping keys compare equal */
2400cafe5635SKent Overstreet 	if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0)
2401cafe5635SKent Overstreet 		return -1;
2402cafe5635SKent Overstreet 	if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0)
2403cafe5635SKent Overstreet 		return 1;
2404cafe5635SKent Overstreet 	return 0;
2405cafe5635SKent Overstreet }
2406cafe5635SKent Overstreet 
2407cafe5635SKent Overstreet static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
2408cafe5635SKent Overstreet 					    struct keybuf_key *r)
2409cafe5635SKent Overstreet {
2410cafe5635SKent Overstreet 	return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
2411cafe5635SKent Overstreet }
2412cafe5635SKent Overstreet 
241348dad8baSKent Overstreet struct refill {
241448dad8baSKent Overstreet 	struct btree_op	op;
241548a915a8SKent Overstreet 	unsigned	nr_found;
241648dad8baSKent Overstreet 	struct keybuf	*buf;
241748dad8baSKent Overstreet 	struct bkey	*end;
241848dad8baSKent Overstreet 	keybuf_pred_fn	*pred;
241948dad8baSKent Overstreet };
242048dad8baSKent Overstreet 
242148dad8baSKent Overstreet static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
242248dad8baSKent Overstreet 			    struct bkey *k)
2423cafe5635SKent Overstreet {
242448dad8baSKent Overstreet 	struct refill *refill = container_of(op, struct refill, op);
242548dad8baSKent Overstreet 	struct keybuf *buf = refill->buf;
242648dad8baSKent Overstreet 	int ret = MAP_CONTINUE;
2427cafe5635SKent Overstreet 
242848dad8baSKent Overstreet 	if (bkey_cmp(k, refill->end) >= 0) {
242948dad8baSKent Overstreet 		ret = MAP_DONE;
243048dad8baSKent Overstreet 		goto out;
2431cafe5635SKent Overstreet 	}
2432cafe5635SKent Overstreet 
243348dad8baSKent Overstreet 	if (!KEY_SIZE(k)) /* end key */
243448dad8baSKent Overstreet 		goto out;
2435cafe5635SKent Overstreet 
243648dad8baSKent Overstreet 	if (refill->pred(buf, k)) {
2437cafe5635SKent Overstreet 		struct keybuf_key *w;
2438cafe5635SKent Overstreet 
2439cafe5635SKent Overstreet 		spin_lock(&buf->lock);
2440cafe5635SKent Overstreet 
2441cafe5635SKent Overstreet 		w = array_alloc(&buf->freelist);
244248dad8baSKent Overstreet 		if (!w) {
244348dad8baSKent Overstreet 			spin_unlock(&buf->lock);
244448dad8baSKent Overstreet 			return MAP_DONE;
244548dad8baSKent Overstreet 		}
2446cafe5635SKent Overstreet 
2447cafe5635SKent Overstreet 		w->private = NULL;
2448cafe5635SKent Overstreet 		bkey_copy(&w->key, k);
2449cafe5635SKent Overstreet 
2450cafe5635SKent Overstreet 		if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
2451cafe5635SKent Overstreet 			array_free(&buf->freelist, w);
245248a915a8SKent Overstreet 		else
245348a915a8SKent Overstreet 			refill->nr_found++;
2454cafe5635SKent Overstreet 
245548dad8baSKent Overstreet 		if (array_freelist_empty(&buf->freelist))
245648dad8baSKent Overstreet 			ret = MAP_DONE;
245748dad8baSKent Overstreet 
2458cafe5635SKent Overstreet 		spin_unlock(&buf->lock);
2459cafe5635SKent Overstreet 	}
246048dad8baSKent Overstreet out:
246148dad8baSKent Overstreet 	buf->last_scanned = *k;
246248dad8baSKent Overstreet 	return ret;
2463cafe5635SKent Overstreet }
2464cafe5635SKent Overstreet 
2465cafe5635SKent Overstreet void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
246672c27061SKent Overstreet 		       struct bkey *end, keybuf_pred_fn *pred)
2467cafe5635SKent Overstreet {
2468cafe5635SKent Overstreet 	struct bkey start = buf->last_scanned;
246948dad8baSKent Overstreet 	struct refill refill;
2470cafe5635SKent Overstreet 
2471cafe5635SKent Overstreet 	cond_resched();
2472cafe5635SKent Overstreet 
2473b54d6934SKent Overstreet 	bch_btree_op_init(&refill.op, -1);
247448a915a8SKent Overstreet 	refill.nr_found	= 0;
247548dad8baSKent Overstreet 	refill.buf	= buf;
247648dad8baSKent Overstreet 	refill.end	= end;
247748dad8baSKent Overstreet 	refill.pred	= pred;
247848dad8baSKent Overstreet 
247948dad8baSKent Overstreet 	bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
248048dad8baSKent Overstreet 			   refill_keybuf_fn, MAP_END_KEY);
2481cafe5635SKent Overstreet 
248248a915a8SKent Overstreet 	trace_bcache_keyscan(refill.nr_found,
2483cafe5635SKent Overstreet 			     KEY_INODE(&start), KEY_OFFSET(&start),
248448a915a8SKent Overstreet 			     KEY_INODE(&buf->last_scanned),
248548a915a8SKent Overstreet 			     KEY_OFFSET(&buf->last_scanned));
2486cafe5635SKent Overstreet 
2487cafe5635SKent Overstreet 	spin_lock(&buf->lock);
2488cafe5635SKent Overstreet 
2489cafe5635SKent Overstreet 	if (!RB_EMPTY_ROOT(&buf->keys)) {
2490cafe5635SKent Overstreet 		struct keybuf_key *w;
2491cafe5635SKent Overstreet 		w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2492cafe5635SKent Overstreet 		buf->start	= START_KEY(&w->key);
2493cafe5635SKent Overstreet 
2494cafe5635SKent Overstreet 		w = RB_LAST(&buf->keys, struct keybuf_key, node);
2495cafe5635SKent Overstreet 		buf->end	= w->key;
2496cafe5635SKent Overstreet 	} else {
2497cafe5635SKent Overstreet 		buf->start	= MAX_KEY;
2498cafe5635SKent Overstreet 		buf->end	= MAX_KEY;
2499cafe5635SKent Overstreet 	}
2500cafe5635SKent Overstreet 
2501cafe5635SKent Overstreet 	spin_unlock(&buf->lock);
2502cafe5635SKent Overstreet }
2503cafe5635SKent Overstreet 
2504cafe5635SKent Overstreet static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2505cafe5635SKent Overstreet {
2506cafe5635SKent Overstreet 	rb_erase(&w->node, &buf->keys);
2507cafe5635SKent Overstreet 	array_free(&buf->freelist, w);
2508cafe5635SKent Overstreet }
2509cafe5635SKent Overstreet 
2510cafe5635SKent Overstreet void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2511cafe5635SKent Overstreet {
2512cafe5635SKent Overstreet 	spin_lock(&buf->lock);
2513cafe5635SKent Overstreet 	__bch_keybuf_del(buf, w);
2514cafe5635SKent Overstreet 	spin_unlock(&buf->lock);
2515cafe5635SKent Overstreet }
2516cafe5635SKent Overstreet 
2517cafe5635SKent Overstreet bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
2518cafe5635SKent Overstreet 				  struct bkey *end)
2519cafe5635SKent Overstreet {
2520cafe5635SKent Overstreet 	bool ret = false;
2521cafe5635SKent Overstreet 	struct keybuf_key *p, *w, s;
2522cafe5635SKent Overstreet 	s.key = *start;
2523cafe5635SKent Overstreet 
2524cafe5635SKent Overstreet 	if (bkey_cmp(end, &buf->start) <= 0 ||
2525cafe5635SKent Overstreet 	    bkey_cmp(start, &buf->end) >= 0)
2526cafe5635SKent Overstreet 		return false;
2527cafe5635SKent Overstreet 
2528cafe5635SKent Overstreet 	spin_lock(&buf->lock);
2529cafe5635SKent Overstreet 	w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
2530cafe5635SKent Overstreet 
2531cafe5635SKent Overstreet 	while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) {
2532cafe5635SKent Overstreet 		p = w;
2533cafe5635SKent Overstreet 		w = RB_NEXT(w, node);
2534cafe5635SKent Overstreet 
2535cafe5635SKent Overstreet 		if (p->private)
2536cafe5635SKent Overstreet 			ret = true;
2537cafe5635SKent Overstreet 		else
2538cafe5635SKent Overstreet 			__bch_keybuf_del(buf, p);
2539cafe5635SKent Overstreet 	}
2540cafe5635SKent Overstreet 
2541cafe5635SKent Overstreet 	spin_unlock(&buf->lock);
2542cafe5635SKent Overstreet 	return ret;
2543cafe5635SKent Overstreet }
2544cafe5635SKent Overstreet 
2545cafe5635SKent Overstreet struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
2546cafe5635SKent Overstreet {
2547cafe5635SKent Overstreet 	struct keybuf_key *w;
2548cafe5635SKent Overstreet 	spin_lock(&buf->lock);
2549cafe5635SKent Overstreet 
2550cafe5635SKent Overstreet 	w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2551cafe5635SKent Overstreet 
2552cafe5635SKent Overstreet 	while (w && w->private)
2553cafe5635SKent Overstreet 		w = RB_NEXT(w, node);
2554cafe5635SKent Overstreet 
2555cafe5635SKent Overstreet 	if (w)
2556cafe5635SKent Overstreet 		w->private = ERR_PTR(-EINTR);
2557cafe5635SKent Overstreet 
2558cafe5635SKent Overstreet 	spin_unlock(&buf->lock);
2559cafe5635SKent Overstreet 	return w;
2560cafe5635SKent Overstreet }
2561cafe5635SKent Overstreet 
2562cafe5635SKent Overstreet struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
2563cafe5635SKent Overstreet 					  struct keybuf *buf,
256472c27061SKent Overstreet 					  struct bkey *end,
256572c27061SKent Overstreet 					  keybuf_pred_fn *pred)
2566cafe5635SKent Overstreet {
2567cafe5635SKent Overstreet 	struct keybuf_key *ret;
2568cafe5635SKent Overstreet 
2569cafe5635SKent Overstreet 	while (1) {
2570cafe5635SKent Overstreet 		ret = bch_keybuf_next(buf);
2571cafe5635SKent Overstreet 		if (ret)
2572cafe5635SKent Overstreet 			break;
2573cafe5635SKent Overstreet 
2574cafe5635SKent Overstreet 		if (bkey_cmp(&buf->last_scanned, end) >= 0) {
2575cafe5635SKent Overstreet 			pr_debug("scan finished");
2576cafe5635SKent Overstreet 			break;
2577cafe5635SKent Overstreet 		}
2578cafe5635SKent Overstreet 
257972c27061SKent Overstreet 		bch_refill_keybuf(c, buf, end, pred);
2580cafe5635SKent Overstreet 	}
2581cafe5635SKent Overstreet 
2582cafe5635SKent Overstreet 	return ret;
2583cafe5635SKent Overstreet }
2584cafe5635SKent Overstreet 
258572c27061SKent Overstreet void bch_keybuf_init(struct keybuf *buf)
2586cafe5635SKent Overstreet {
2587cafe5635SKent Overstreet 	buf->last_scanned	= MAX_KEY;
2588cafe5635SKent Overstreet 	buf->keys		= RB_ROOT;
2589cafe5635SKent Overstreet 
2590cafe5635SKent Overstreet 	spin_lock_init(&buf->lock);
2591cafe5635SKent Overstreet 	array_allocator_init(&buf->freelist);
2592cafe5635SKent Overstreet }
2593