xref: /linux/drivers/md/bcache/btree.c (revision c6ed444fd6fffaaf2e3857d926ed18bf3df81e8e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4  *
5  * Uses a block device as cache for other block devices; optimized for SSDs.
6  * All allocation is done in buckets, which should match the erase block size
7  * of the device.
8  *
9  * Buckets containing cached data are kept on a heap sorted by priority;
10  * bucket priority is increased on cache hit, and periodically all the buckets
11  * on the heap have their priority scaled down. This currently is just used as
12  * an LRU but in the future should allow for more intelligent heuristics.
13  *
14  * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
15  * counter. Garbage collection is used to remove stale pointers.
16  *
17  * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
18  * as keys are inserted we only sort the pages that have not yet been written.
19  * When garbage collection is run, we resort the entire node.
20  *
21  * All configuration is done via sysfs; see Documentation/admin-guide/bcache.rst.
22  */
23 
24 #include "bcache.h"
25 #include "btree.h"
26 #include "debug.h"
27 #include "extents.h"
28 
29 #include <linux/slab.h>
30 #include <linux/bitops.h>
31 #include <linux/hash.h>
32 #include <linux/kthread.h>
33 #include <linux/prefetch.h>
34 #include <linux/random.h>
35 #include <linux/rcupdate.h>
36 #include <linux/sched/clock.h>
37 #include <linux/rculist.h>
38 
39 #include <trace/events/bcache.h>
40 
41 /*
42  * Todo:
43  * register_bcache: Return errors out to userspace correctly
44  *
45  * Writeback: don't undirty key until after a cache flush
46  *
47  * Create an iterator for key pointers
48  *
49  * On btree write error, mark bucket such that it won't be freed from the cache
50  *
51  * Journalling:
52  *   Check for bad keys in replay
53  *   Propagate barriers
54  *   Refcount journal entries in journal_replay
55  *
56  * Garbage collection:
57  *   Finish incremental gc
58  *   Gc should free old UUIDs, data for invalid UUIDs
59  *
60  * Provide a way to list backing device UUIDs we have data cached for, and
61  * probably how long it's been since we've seen them, and a way to invalidate
62  * dirty data for devices that will never be attached again
63  *
64  * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so
65  * that based on that and how much dirty data we have we can keep writeback
66  * from being starved
67  *
68  * Add a tracepoint or somesuch to watch for writeback starvation
69  *
70  * When btree depth > 1 and splitting an interior node, we have to make sure
71  * alloc_bucket() cannot fail. This should be true but is not completely
72  * obvious.
73  *
74  * Plugging?
75  *
76  * If data write is less than hard sector size of ssd, round up offset in open
77  * bucket to the next whole sector
78  *
79  * Superblock needs to be fleshed out for multiple cache devices
80  *
81  * Add a sysfs tunable for the number of writeback IOs in flight
82  *
83  * Add a sysfs tunable for the number of open data buckets
84  *
85  * IO tracking: Can we track when one process is doing io on behalf of another?
86  * IO tracking: Don't use just an average, weigh more recent stuff higher
87  *
88  * Test module load/unload
89  */
90 
91 #define MAX_NEED_GC		64
92 #define MAX_SAVE_PRIO		72
93 #define MAX_GC_TIMES		100
94 #define MIN_GC_NODES		100
95 #define GC_SLEEP_MS		100
96 
97 #define PTR_DIRTY_BIT		(((uint64_t) 1 << 36))
98 
99 #define PTR_HASH(c, k)							\
100 	(((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
101 
102 #define insert_lock(s, b)	((b)->level <= (s)->lock)
103 
104 /*
105  * These macros are for recursing down the btree - they handle the details of
106  * locking and looking up nodes in the cache for you. They're best treated as
107  * mere syntax when reading code that uses them.
108  *
109  * op->lock determines whether we take a read or a write lock at a given depth.
110  * If you've got a read lock and find that you need a write lock (i.e. you're
111  * going to have to split), set op->lock and return -EINTR; btree_root() will
112  * call you again and you'll have the correct lock.
113  */
114 
115 /**
116  * btree - recurse down the btree on a specified key
117  * @fn:		function to call, which will be passed the child node
118  * @key:	key to recurse on
119  * @b:		parent btree node
120  * @op:		pointer to struct btree_op
121  */
122 #define btree(fn, key, b, op, ...)					\
123 ({									\
124 	int _r, l = (b)->level - 1;					\
125 	bool _w = l <= (op)->lock;					\
126 	struct btree *_child = bch_btree_node_get((b)->c, op, key, l,	\
127 						  _w, b);		\
128 	if (!IS_ERR(_child)) {						\
129 		_r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__);	\
130 		rw_unlock(_w, _child);					\
131 	} else								\
132 		_r = PTR_ERR(_child);					\
133 	_r;								\
134 })
135 
136 /**
137  * btree_root - call a function on the root of the btree
138  * @fn:		function to call, which will be passed the child node
139  * @c:		cache set
140  * @op:		pointer to struct btree_op
141  */
142 #define btree_root(fn, c, op, ...)					\
143 ({									\
144 	int _r = -EINTR;						\
145 	do {								\
146 		struct btree *_b = (c)->root;				\
147 		bool _w = insert_lock(op, _b);				\
148 		rw_lock(_w, _b, _b->level);				\
149 		if (_b == (c)->root &&					\
150 		    _w == insert_lock(op, _b)) {			\
151 			_r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__);	\
152 		}							\
153 		rw_unlock(_w, _b);					\
154 		bch_cannibalize_unlock(c);				\
155 		if (_r == -EINTR)					\
156 			schedule();					\
157 	} while (_r == -EINTR);						\
158 									\
159 	finish_wait(&(c)->btree_cache_wait, &(op)->wait);		\
160 	_r;								\
161 })
162 
163 static inline struct bset *write_block(struct btree *b)
164 {
165 	return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c);
166 }
167 
168 static void bch_btree_init_next(struct btree *b)
169 {
170 	/* If not a leaf node, always sort */
171 	if (b->level && b->keys.nsets)
172 		bch_btree_sort(&b->keys, &b->c->sort);
173 	else
174 		bch_btree_sort_lazy(&b->keys, &b->c->sort);
175 
176 	if (b->written < btree_blocks(b))
177 		bch_bset_init_next(&b->keys, write_block(b),
178 				   bset_magic(&b->c->sb));
179 
180 }
181 
182 /* Btree key manipulation */
183 
184 void bkey_put(struct cache_set *c, struct bkey *k)
185 {
186 	unsigned i;
187 
188 	for (i = 0; i < KEY_PTRS(k); i++)
189 		if (ptr_available(c, k, i))
190 			atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
191 }
192 
193 /* Btree IO */
194 
195 static uint64_t btree_csum_set(struct btree *b, struct bset *i)
196 {
197 	uint64_t crc = b->key.ptr[0];
198 	void *data = (void *) i + 8, *end = bset_bkey_last(i);
199 
200 	crc = bch_crc64_update(crc, data, end - data);
201 	return crc ^ 0xffffffffffffffffULL;
202 }
203 
204 void bch_btree_node_read_done(struct btree *b)
205 {
206 	const char *err = "bad btree header";
207 	struct bset *i = btree_bset_first(b);
208 	struct btree_iter *iter;
209 
210 	iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
211 	iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
212 	iter->used = 0;
213 
214 #ifdef CONFIG_BCACHE_DEBUG
215 	iter->b = &b->keys;
216 #endif
217 
218 	if (!i->seq)
219 		goto err;
220 
221 	for (;
222 	     b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq;
223 	     i = write_block(b)) {
224 		err = "unsupported bset version";
225 		if (i->version > BCACHE_BSET_VERSION)
226 			goto err;
227 
228 		err = "bad btree header";
229 		if (b->written + set_blocks(i, block_bytes(b->c)) >
230 		    btree_blocks(b))
231 			goto err;
232 
233 		err = "bad magic";
234 		if (i->magic != bset_magic(&b->c->sb))
235 			goto err;
236 
237 		err = "bad checksum";
238 		switch (i->version) {
239 		case 0:
240 			if (i->csum != csum_set(i))
241 				goto err;
242 			break;
243 		case BCACHE_BSET_VERSION:
244 			if (i->csum != btree_csum_set(b, i))
245 				goto err;
246 			break;
247 		}
248 
249 		err = "empty set";
250 		if (i != b->keys.set[0].data && !i->keys)
251 			goto err;
252 
253 		bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
254 
255 		b->written += set_blocks(i, block_bytes(b->c));
256 	}
257 
258 	err = "corrupted btree";
259 	for (i = write_block(b);
260 	     bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
261 	     i = ((void *) i) + block_bytes(b->c))
262 		if (i->seq == b->keys.set[0].data->seq)
263 			goto err;
264 
265 	bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
266 
267 	i = b->keys.set[0].data;
268 	err = "short btree key";
269 	if (b->keys.set[0].size &&
270 	    bkey_cmp(&b->key, &b->keys.set[0].end) < 0)
271 		goto err;
272 
273 	if (b->written < btree_blocks(b))
274 		bch_bset_init_next(&b->keys, write_block(b),
275 				   bset_magic(&b->c->sb));
276 out:
277 	mempool_free(iter, &b->c->fill_iter);
278 	return;
279 err:
280 	set_btree_node_io_error(b);
281 	bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",
282 			    err, PTR_BUCKET_NR(b->c, &b->key, 0),
283 			    bset_block_offset(b, i), i->keys);
284 	goto out;
285 }
286 
287 static void btree_node_read_endio(struct bio *bio)
288 {
289 	struct closure *cl = bio->bi_private;
290 	closure_put(cl);
291 }
292 
293 static void bch_btree_node_read(struct btree *b)
294 {
295 	uint64_t start_time = local_clock();
296 	struct closure cl;
297 	struct bio *bio;
298 
299 	trace_bcache_btree_read(b);
300 
301 	closure_init_stack(&cl);
302 
303 	bio = bch_bbio_alloc(b->c);
304 	bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
305 	bio->bi_end_io	= btree_node_read_endio;
306 	bio->bi_private	= &cl;
307 	bio->bi_opf = REQ_OP_READ | REQ_META;
308 
309 	bch_bio_map(bio, b->keys.set[0].data);
310 
311 	bch_submit_bbio(bio, b->c, &b->key, 0);
312 	closure_sync(&cl);
313 
314 	if (bio->bi_status)
315 		set_btree_node_io_error(b);
316 
317 	bch_bbio_free(bio, b->c);
318 
319 	if (btree_node_io_error(b))
320 		goto err;
321 
322 	bch_btree_node_read_done(b);
323 	bch_time_stats_update(&b->c->btree_read_time, start_time);
324 
325 	return;
326 err:
327 	bch_cache_set_error(b->c, "io error reading bucket %zu",
328 			    PTR_BUCKET_NR(b->c, &b->key, 0));
329 }
330 
331 static void btree_complete_write(struct btree *b, struct btree_write *w)
332 {
333 	if (w->prio_blocked &&
334 	    !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
335 		wake_up_allocators(b->c);
336 
337 	if (w->journal) {
338 		atomic_dec_bug(w->journal);
339 		__closure_wake_up(&b->c->journal.wait);
340 	}
341 
342 	w->prio_blocked	= 0;
343 	w->journal	= NULL;
344 }
345 
346 static void btree_node_write_unlock(struct closure *cl)
347 {
348 	struct btree *b = container_of(cl, struct btree, io);
349 
350 	up(&b->io_mutex);
351 }
352 
353 static void __btree_node_write_done(struct closure *cl)
354 {
355 	struct btree *b = container_of(cl, struct btree, io);
356 	struct btree_write *w = btree_prev_write(b);
357 
358 	bch_bbio_free(b->bio, b->c);
359 	b->bio = NULL;
360 	btree_complete_write(b, w);
361 
362 	if (btree_node_dirty(b))
363 		schedule_delayed_work(&b->work, 30 * HZ);
364 
365 	closure_return_with_destructor(cl, btree_node_write_unlock);
366 }
367 
368 static void btree_node_write_done(struct closure *cl)
369 {
370 	struct btree *b = container_of(cl, struct btree, io);
371 
372 	bio_free_pages(b->bio);
373 	__btree_node_write_done(cl);
374 }
375 
376 static void btree_node_write_endio(struct bio *bio)
377 {
378 	struct closure *cl = bio->bi_private;
379 	struct btree *b = container_of(cl, struct btree, io);
380 
381 	if (bio->bi_status)
382 		set_btree_node_io_error(b);
383 
384 	bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree");
385 	closure_put(cl);
386 }
387 
388 static void do_btree_node_write(struct btree *b)
389 {
390 	struct closure *cl = &b->io;
391 	struct bset *i = btree_bset_last(b);
392 	BKEY_PADDED(key) k;
393 
394 	i->version	= BCACHE_BSET_VERSION;
395 	i->csum		= btree_csum_set(b, i);
396 
397 	BUG_ON(b->bio);
398 	b->bio = bch_bbio_alloc(b->c);
399 
400 	b->bio->bi_end_io	= btree_node_write_endio;
401 	b->bio->bi_private	= cl;
402 	b->bio->bi_iter.bi_size	= roundup(set_bytes(i), block_bytes(b->c));
403 	b->bio->bi_opf		= REQ_OP_WRITE | REQ_META | REQ_FUA;
404 	bch_bio_map(b->bio, i);
405 
406 	/*
407 	 * If we're appending to a leaf node, we don't technically need FUA -
408 	 * this write just needs to be persisted before the next journal write,
409 	 * which will be marked FLUSH|FUA.
410 	 *
411 	 * Similarly if we're writing a new btree root - the pointer is going to
412 	 * be in the next journal entry.
413 	 *
414 	 * But if we're writing a new btree node (that isn't a root) or
415 	 * appending to a non leaf btree node, we need either FUA or a flush
416 	 * when we write the parent with the new pointer. FUA is cheaper than a
417 	 * flush, and writes appending to leaf nodes aren't blocking anything so
418 	 * just make all btree node writes FUA to keep things sane.
419 	 */
420 
421 	bkey_copy(&k.key, &b->key);
422 	SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
423 		       bset_sector_offset(&b->keys, i));
424 
425 	if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
426 		int j;
427 		struct bio_vec *bv;
428 		void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
429 
430 		bio_for_each_segment_all(bv, b->bio, j)
431 			memcpy(page_address(bv->bv_page),
432 			       base + j * PAGE_SIZE, PAGE_SIZE);
433 
434 		bch_submit_bbio(b->bio, b->c, &k.key, 0);
435 
436 		continue_at(cl, btree_node_write_done, NULL);
437 	} else {
438 		/* No problem for multipage bvec since the bio is just allocated */
439 		b->bio->bi_vcnt = 0;
440 		bch_bio_map(b->bio, i);
441 
442 		bch_submit_bbio(b->bio, b->c, &k.key, 0);
443 
444 		closure_sync(cl);
445 		continue_at_nobarrier(cl, __btree_node_write_done, NULL);
446 	}
447 }
448 
449 void __bch_btree_node_write(struct btree *b, struct closure *parent)
450 {
451 	struct bset *i = btree_bset_last(b);
452 
453 	lockdep_assert_held(&b->write_lock);
454 
455 	trace_bcache_btree_write(b);
456 
457 	BUG_ON(current->bio_list);
458 	BUG_ON(b->written >= btree_blocks(b));
459 	BUG_ON(b->written && !i->keys);
460 	BUG_ON(btree_bset_first(b)->seq != i->seq);
461 	bch_check_keys(&b->keys, "writing");
462 
463 	cancel_delayed_work(&b->work);
464 
465 	/* If caller isn't waiting for write, parent refcount is cache set */
466 	down(&b->io_mutex);
467 	closure_init(&b->io, parent ?: &b->c->cl);
468 
469 	clear_bit(BTREE_NODE_dirty,	 &b->flags);
470 	change_bit(BTREE_NODE_write_idx, &b->flags);
471 
472 	do_btree_node_write(b);
473 
474 	atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size,
475 			&PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
476 
477 	b->written += set_blocks(i, block_bytes(b->c));
478 }
479 
480 void bch_btree_node_write(struct btree *b, struct closure *parent)
481 {
482 	unsigned nsets = b->keys.nsets;
483 
484 	lockdep_assert_held(&b->lock);
485 
486 	__bch_btree_node_write(b, parent);
487 
488 	/*
489 	 * do verify if there was more than one set initially (i.e. we did a
490 	 * sort) and we sorted down to a single set:
491 	 */
492 	if (nsets && !b->keys.nsets)
493 		bch_btree_verify(b);
494 
495 	bch_btree_init_next(b);
496 }
497 
498 static void bch_btree_node_write_sync(struct btree *b)
499 {
500 	struct closure cl;
501 
502 	closure_init_stack(&cl);
503 
504 	mutex_lock(&b->write_lock);
505 	bch_btree_node_write(b, &cl);
506 	mutex_unlock(&b->write_lock);
507 
508 	closure_sync(&cl);
509 }
510 
511 static void btree_node_write_work(struct work_struct *w)
512 {
513 	struct btree *b = container_of(to_delayed_work(w), struct btree, work);
514 
515 	mutex_lock(&b->write_lock);
516 	if (btree_node_dirty(b))
517 		__bch_btree_node_write(b, NULL);
518 	mutex_unlock(&b->write_lock);
519 }
520 
521 static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
522 {
523 	struct bset *i = btree_bset_last(b);
524 	struct btree_write *w = btree_current_write(b);
525 
526 	lockdep_assert_held(&b->write_lock);
527 
528 	BUG_ON(!b->written);
529 	BUG_ON(!i->keys);
530 
531 	if (!btree_node_dirty(b))
532 		schedule_delayed_work(&b->work, 30 * HZ);
533 
534 	set_btree_node_dirty(b);
535 
536 	if (journal_ref) {
537 		if (w->journal &&
538 		    journal_pin_cmp(b->c, w->journal, journal_ref)) {
539 			atomic_dec_bug(w->journal);
540 			w->journal = NULL;
541 		}
542 
543 		if (!w->journal) {
544 			w->journal = journal_ref;
545 			atomic_inc(w->journal);
546 		}
547 	}
548 
549 	/* Force write if set is too big */
550 	if (set_bytes(i) > PAGE_SIZE - 48 &&
551 	    !current->bio_list)
552 		bch_btree_node_write(b, NULL);
553 }
554 
555 /*
556  * Btree in memory cache - allocation/freeing
557  * mca -> memory cache
558  */
559 
560 #define mca_reserve(c)	(((c->root && c->root->level)		\
561 			  ? c->root->level : 1) * 8 + 16)
562 #define mca_can_free(c)						\
563 	max_t(int, 0, c->btree_cache_used - mca_reserve(c))
564 
565 static void mca_data_free(struct btree *b)
566 {
567 	BUG_ON(b->io_mutex.count != 1);
568 
569 	bch_btree_keys_free(&b->keys);
570 
571 	b->c->btree_cache_used--;
572 	list_move(&b->list, &b->c->btree_cache_freed);
573 }
574 
575 static void mca_bucket_free(struct btree *b)
576 {
577 	BUG_ON(btree_node_dirty(b));
578 
579 	b->key.ptr[0] = 0;
580 	hlist_del_init_rcu(&b->hash);
581 	list_move(&b->list, &b->c->btree_cache_freeable);
582 }
583 
584 static unsigned btree_order(struct bkey *k)
585 {
586 	return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
587 }
588 
589 static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
590 {
591 	if (!bch_btree_keys_alloc(&b->keys,
592 				  max_t(unsigned,
593 					ilog2(b->c->btree_pages),
594 					btree_order(k)),
595 				  gfp)) {
596 		b->c->btree_cache_used++;
597 		list_move(&b->list, &b->c->btree_cache);
598 	} else {
599 		list_move(&b->list, &b->c->btree_cache_freed);
600 	}
601 }
602 
603 static struct btree *mca_bucket_alloc(struct cache_set *c,
604 				      struct bkey *k, gfp_t gfp)
605 {
606 	struct btree *b = kzalloc(sizeof(struct btree), gfp);
607 	if (!b)
608 		return NULL;
609 
610 	init_rwsem(&b->lock);
611 	lockdep_set_novalidate_class(&b->lock);
612 	mutex_init(&b->write_lock);
613 	lockdep_set_novalidate_class(&b->write_lock);
614 	INIT_LIST_HEAD(&b->list);
615 	INIT_DELAYED_WORK(&b->work, btree_node_write_work);
616 	b->c = c;
617 	sema_init(&b->io_mutex, 1);
618 
619 	mca_data_alloc(b, k, gfp);
620 	return b;
621 }
622 
623 static int mca_reap(struct btree *b, unsigned min_order, bool flush)
624 {
625 	struct closure cl;
626 
627 	closure_init_stack(&cl);
628 	lockdep_assert_held(&b->c->bucket_lock);
629 
630 	if (!down_write_trylock(&b->lock))
631 		return -ENOMEM;
632 
633 	BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data);
634 
635 	if (b->keys.page_order < min_order)
636 		goto out_unlock;
637 
638 	if (!flush) {
639 		if (btree_node_dirty(b))
640 			goto out_unlock;
641 
642 		if (down_trylock(&b->io_mutex))
643 			goto out_unlock;
644 		up(&b->io_mutex);
645 	}
646 
647 	mutex_lock(&b->write_lock);
648 	if (btree_node_dirty(b))
649 		__bch_btree_node_write(b, &cl);
650 	mutex_unlock(&b->write_lock);
651 
652 	closure_sync(&cl);
653 
654 	/* wait for any in flight btree write */
655 	down(&b->io_mutex);
656 	up(&b->io_mutex);
657 
658 	return 0;
659 out_unlock:
660 	rw_unlock(true, b);
661 	return -ENOMEM;
662 }
663 
664 static unsigned long bch_mca_scan(struct shrinker *shrink,
665 				  struct shrink_control *sc)
666 {
667 	struct cache_set *c = container_of(shrink, struct cache_set, shrink);
668 	struct btree *b, *t;
669 	unsigned long i, nr = sc->nr_to_scan;
670 	unsigned long freed = 0;
671 	unsigned int btree_cache_used;
672 
673 	if (c->shrinker_disabled)
674 		return SHRINK_STOP;
675 
676 	if (c->btree_cache_alloc_lock)
677 		return SHRINK_STOP;
678 
679 	/* Return -1 if we can't do anything right now */
680 	if (sc->gfp_mask & __GFP_IO)
681 		mutex_lock(&c->bucket_lock);
682 	else if (!mutex_trylock(&c->bucket_lock))
683 		return -1;
684 
685 	/*
686 	 * It's _really_ critical that we don't free too many btree nodes - we
687 	 * have to always leave ourselves a reserve. The reserve is how we
688 	 * guarantee that allocating memory for a new btree node can always
689 	 * succeed, so that inserting keys into the btree can always succeed and
690 	 * IO can always make forward progress:
691 	 */
692 	nr /= c->btree_pages;
693 	nr = min_t(unsigned long, nr, mca_can_free(c));
694 
695 	i = 0;
696 	btree_cache_used = c->btree_cache_used;
697 	list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) {
698 		if (nr <= 0)
699 			goto out;
700 
701 		if (++i > 3 &&
702 		    !mca_reap(b, 0, false)) {
703 			mca_data_free(b);
704 			rw_unlock(true, b);
705 			freed++;
706 		}
707 		nr--;
708 	}
709 
710 	for (;  (nr--) && i < btree_cache_used; i++) {
711 		if (list_empty(&c->btree_cache))
712 			goto out;
713 
714 		b = list_first_entry(&c->btree_cache, struct btree, list);
715 		list_rotate_left(&c->btree_cache);
716 
717 		if (!b->accessed &&
718 		    !mca_reap(b, 0, false)) {
719 			mca_bucket_free(b);
720 			mca_data_free(b);
721 			rw_unlock(true, b);
722 			freed++;
723 		} else
724 			b->accessed = 0;
725 	}
726 out:
727 	mutex_unlock(&c->bucket_lock);
728 	return freed * c->btree_pages;
729 }
730 
731 static unsigned long bch_mca_count(struct shrinker *shrink,
732 				   struct shrink_control *sc)
733 {
734 	struct cache_set *c = container_of(shrink, struct cache_set, shrink);
735 
736 	if (c->shrinker_disabled)
737 		return 0;
738 
739 	if (c->btree_cache_alloc_lock)
740 		return 0;
741 
742 	return mca_can_free(c) * c->btree_pages;
743 }
744 
745 void bch_btree_cache_free(struct cache_set *c)
746 {
747 	struct btree *b;
748 	struct closure cl;
749 	closure_init_stack(&cl);
750 
751 	if (c->shrink.list.next)
752 		unregister_shrinker(&c->shrink);
753 
754 	mutex_lock(&c->bucket_lock);
755 
756 #ifdef CONFIG_BCACHE_DEBUG
757 	if (c->verify_data)
758 		list_move(&c->verify_data->list, &c->btree_cache);
759 
760 	free_pages((unsigned long) c->verify_ondisk, ilog2(bucket_pages(c)));
761 #endif
762 
763 	list_splice(&c->btree_cache_freeable,
764 		    &c->btree_cache);
765 
766 	while (!list_empty(&c->btree_cache)) {
767 		b = list_first_entry(&c->btree_cache, struct btree, list);
768 
769 		if (btree_node_dirty(b))
770 			btree_complete_write(b, btree_current_write(b));
771 		clear_bit(BTREE_NODE_dirty, &b->flags);
772 
773 		mca_data_free(b);
774 	}
775 
776 	while (!list_empty(&c->btree_cache_freed)) {
777 		b = list_first_entry(&c->btree_cache_freed,
778 				     struct btree, list);
779 		list_del(&b->list);
780 		cancel_delayed_work_sync(&b->work);
781 		kfree(b);
782 	}
783 
784 	mutex_unlock(&c->bucket_lock);
785 }
786 
787 int bch_btree_cache_alloc(struct cache_set *c)
788 {
789 	unsigned i;
790 
791 	for (i = 0; i < mca_reserve(c); i++)
792 		if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
793 			return -ENOMEM;
794 
795 	list_splice_init(&c->btree_cache,
796 			 &c->btree_cache_freeable);
797 
798 #ifdef CONFIG_BCACHE_DEBUG
799 	mutex_init(&c->verify_lock);
800 
801 	c->verify_ondisk = (void *)
802 		__get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c)));
803 
804 	c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
805 
806 	if (c->verify_data &&
807 	    c->verify_data->keys.set->data)
808 		list_del_init(&c->verify_data->list);
809 	else
810 		c->verify_data = NULL;
811 #endif
812 
813 	c->shrink.count_objects = bch_mca_count;
814 	c->shrink.scan_objects = bch_mca_scan;
815 	c->shrink.seeks = 4;
816 	c->shrink.batch = c->btree_pages * 2;
817 
818 	if (register_shrinker(&c->shrink))
819 		pr_warn("bcache: %s: could not register shrinker",
820 				__func__);
821 
822 	return 0;
823 }
824 
825 /* Btree in memory cache - hash table */
826 
827 static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k)
828 {
829 	return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)];
830 }
831 
832 static struct btree *mca_find(struct cache_set *c, struct bkey *k)
833 {
834 	struct btree *b;
835 
836 	rcu_read_lock();
837 	hlist_for_each_entry_rcu(b, mca_hash(c, k), hash)
838 		if (PTR_HASH(c, &b->key) == PTR_HASH(c, k))
839 			goto out;
840 	b = NULL;
841 out:
842 	rcu_read_unlock();
843 	return b;
844 }
845 
846 static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
847 {
848 	struct task_struct *old;
849 
850 	old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current);
851 	if (old && old != current) {
852 		if (op)
853 			prepare_to_wait(&c->btree_cache_wait, &op->wait,
854 					TASK_UNINTERRUPTIBLE);
855 		return -EINTR;
856 	}
857 
858 	return 0;
859 }
860 
861 static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
862 				     struct bkey *k)
863 {
864 	struct btree *b;
865 
866 	trace_bcache_btree_cache_cannibalize(c);
867 
868 	if (mca_cannibalize_lock(c, op))
869 		return ERR_PTR(-EINTR);
870 
871 	list_for_each_entry_reverse(b, &c->btree_cache, list)
872 		if (!mca_reap(b, btree_order(k), false))
873 			return b;
874 
875 	list_for_each_entry_reverse(b, &c->btree_cache, list)
876 		if (!mca_reap(b, btree_order(k), true))
877 			return b;
878 
879 	WARN(1, "btree cache cannibalize failed\n");
880 	return ERR_PTR(-ENOMEM);
881 }
882 
883 /*
884  * We can only have one thread cannibalizing other cached btree nodes at a time,
885  * or we'll deadlock. We use an open coded mutex to ensure that, which a
886  * cannibalize_bucket() will take. This means every time we unlock the root of
887  * the btree, we need to release this lock if we have it held.
888  */
889 static void bch_cannibalize_unlock(struct cache_set *c)
890 {
891 	if (c->btree_cache_alloc_lock == current) {
892 		c->btree_cache_alloc_lock = NULL;
893 		wake_up(&c->btree_cache_wait);
894 	}
895 }
896 
897 static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
898 			       struct bkey *k, int level)
899 {
900 	struct btree *b;
901 
902 	BUG_ON(current->bio_list);
903 
904 	lockdep_assert_held(&c->bucket_lock);
905 
906 	if (mca_find(c, k))
907 		return NULL;
908 
909 	/* btree_free() doesn't free memory; it sticks the node on the end of
910 	 * the list. Check if there's any freed nodes there:
911 	 */
912 	list_for_each_entry(b, &c->btree_cache_freeable, list)
913 		if (!mca_reap(b, btree_order(k), false))
914 			goto out;
915 
916 	/* We never free struct btree itself, just the memory that holds the on
917 	 * disk node. Check the freed list before allocating a new one:
918 	 */
919 	list_for_each_entry(b, &c->btree_cache_freed, list)
920 		if (!mca_reap(b, 0, false)) {
921 			mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
922 			if (!b->keys.set[0].data)
923 				goto err;
924 			else
925 				goto out;
926 		}
927 
928 	b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO);
929 	if (!b)
930 		goto err;
931 
932 	BUG_ON(!down_write_trylock(&b->lock));
933 	if (!b->keys.set->data)
934 		goto err;
935 out:
936 	BUG_ON(b->io_mutex.count != 1);
937 
938 	bkey_copy(&b->key, k);
939 	list_move(&b->list, &c->btree_cache);
940 	hlist_del_init_rcu(&b->hash);
941 	hlist_add_head_rcu(&b->hash, mca_hash(c, k));
942 
943 	lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
944 	b->parent	= (void *) ~0UL;
945 	b->flags	= 0;
946 	b->written	= 0;
947 	b->level	= level;
948 
949 	if (!b->level)
950 		bch_btree_keys_init(&b->keys, &bch_extent_keys_ops,
951 				    &b->c->expensive_debug_checks);
952 	else
953 		bch_btree_keys_init(&b->keys, &bch_btree_keys_ops,
954 				    &b->c->expensive_debug_checks);
955 
956 	return b;
957 err:
958 	if (b)
959 		rw_unlock(true, b);
960 
961 	b = mca_cannibalize(c, op, k);
962 	if (!IS_ERR(b))
963 		goto out;
964 
965 	return b;
966 }
967 
968 /*
969  * bch_btree_node_get - find a btree node in the cache and lock it, reading it
970  * in from disk if necessary.
971  *
972  * If IO is necessary and running under generic_make_request, returns -EAGAIN.
973  *
974  * The btree node will have either a read or a write lock held, depending on
975  * level and op->lock.
976  */
977 struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
978 				 struct bkey *k, int level, bool write,
979 				 struct btree *parent)
980 {
981 	int i = 0;
982 	struct btree *b;
983 
984 	BUG_ON(level < 0);
985 retry:
986 	b = mca_find(c, k);
987 
988 	if (!b) {
989 		if (current->bio_list)
990 			return ERR_PTR(-EAGAIN);
991 
992 		mutex_lock(&c->bucket_lock);
993 		b = mca_alloc(c, op, k, level);
994 		mutex_unlock(&c->bucket_lock);
995 
996 		if (!b)
997 			goto retry;
998 		if (IS_ERR(b))
999 			return b;
1000 
1001 		bch_btree_node_read(b);
1002 
1003 		if (!write)
1004 			downgrade_write(&b->lock);
1005 	} else {
1006 		rw_lock(write, b, level);
1007 		if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) {
1008 			rw_unlock(write, b);
1009 			goto retry;
1010 		}
1011 		BUG_ON(b->level != level);
1012 	}
1013 
1014 	if (btree_node_io_error(b)) {
1015 		rw_unlock(write, b);
1016 		return ERR_PTR(-EIO);
1017 	}
1018 
1019 	BUG_ON(!b->written);
1020 
1021 	b->parent = parent;
1022 	b->accessed = 1;
1023 
1024 	for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
1025 		prefetch(b->keys.set[i].tree);
1026 		prefetch(b->keys.set[i].data);
1027 	}
1028 
1029 	for (; i <= b->keys.nsets; i++)
1030 		prefetch(b->keys.set[i].data);
1031 
1032 	return b;
1033 }
1034 
1035 static void btree_node_prefetch(struct btree *parent, struct bkey *k)
1036 {
1037 	struct btree *b;
1038 
1039 	mutex_lock(&parent->c->bucket_lock);
1040 	b = mca_alloc(parent->c, NULL, k, parent->level - 1);
1041 	mutex_unlock(&parent->c->bucket_lock);
1042 
1043 	if (!IS_ERR_OR_NULL(b)) {
1044 		b->parent = parent;
1045 		bch_btree_node_read(b);
1046 		rw_unlock(true, b);
1047 	}
1048 }
1049 
1050 /* Btree alloc */
1051 
1052 static void btree_node_free(struct btree *b)
1053 {
1054 	trace_bcache_btree_node_free(b);
1055 
1056 	BUG_ON(b == b->c->root);
1057 
1058 	mutex_lock(&b->write_lock);
1059 
1060 	if (btree_node_dirty(b))
1061 		btree_complete_write(b, btree_current_write(b));
1062 	clear_bit(BTREE_NODE_dirty, &b->flags);
1063 
1064 	mutex_unlock(&b->write_lock);
1065 
1066 	cancel_delayed_work(&b->work);
1067 
1068 	mutex_lock(&b->c->bucket_lock);
1069 	bch_bucket_free(b->c, &b->key);
1070 	mca_bucket_free(b);
1071 	mutex_unlock(&b->c->bucket_lock);
1072 }
1073 
1074 struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
1075 				     int level, bool wait,
1076 				     struct btree *parent)
1077 {
1078 	BKEY_PADDED(key) k;
1079 	struct btree *b = ERR_PTR(-EAGAIN);
1080 
1081 	mutex_lock(&c->bucket_lock);
1082 retry:
1083 	if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait))
1084 		goto err;
1085 
1086 	bkey_put(c, &k.key);
1087 	SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
1088 
1089 	b = mca_alloc(c, op, &k.key, level);
1090 	if (IS_ERR(b))
1091 		goto err_free;
1092 
1093 	if (!b) {
1094 		cache_bug(c,
1095 			"Tried to allocate bucket that was in btree cache");
1096 		goto retry;
1097 	}
1098 
1099 	b->accessed = 1;
1100 	b->parent = parent;
1101 	bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb));
1102 
1103 	mutex_unlock(&c->bucket_lock);
1104 
1105 	trace_bcache_btree_node_alloc(b);
1106 	return b;
1107 err_free:
1108 	bch_bucket_free(c, &k.key);
1109 err:
1110 	mutex_unlock(&c->bucket_lock);
1111 
1112 	trace_bcache_btree_node_alloc_fail(c);
1113 	return b;
1114 }
1115 
1116 static struct btree *bch_btree_node_alloc(struct cache_set *c,
1117 					  struct btree_op *op, int level,
1118 					  struct btree *parent)
1119 {
1120 	return __bch_btree_node_alloc(c, op, level, op != NULL, parent);
1121 }
1122 
1123 static struct btree *btree_node_alloc_replacement(struct btree *b,
1124 						  struct btree_op *op)
1125 {
1126 	struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
1127 	if (!IS_ERR_OR_NULL(n)) {
1128 		mutex_lock(&n->write_lock);
1129 		bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
1130 		bkey_copy_key(&n->key, &b->key);
1131 		mutex_unlock(&n->write_lock);
1132 	}
1133 
1134 	return n;
1135 }
1136 
1137 static void make_btree_freeing_key(struct btree *b, struct bkey *k)
1138 {
1139 	unsigned i;
1140 
1141 	mutex_lock(&b->c->bucket_lock);
1142 
1143 	atomic_inc(&b->c->prio_blocked);
1144 
1145 	bkey_copy(k, &b->key);
1146 	bkey_copy_key(k, &ZERO_KEY);
1147 
1148 	for (i = 0; i < KEY_PTRS(k); i++)
1149 		SET_PTR_GEN(k, i,
1150 			    bch_inc_gen(PTR_CACHE(b->c, &b->key, i),
1151 					PTR_BUCKET(b->c, &b->key, i)));
1152 
1153 	mutex_unlock(&b->c->bucket_lock);
1154 }
1155 
1156 static int btree_check_reserve(struct btree *b, struct btree_op *op)
1157 {
1158 	struct cache_set *c = b->c;
1159 	struct cache *ca;
1160 	unsigned i, reserve = (c->root->level - b->level) * 2 + 1;
1161 
1162 	mutex_lock(&c->bucket_lock);
1163 
1164 	for_each_cache(ca, c, i)
1165 		if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
1166 			if (op)
1167 				prepare_to_wait(&c->btree_cache_wait, &op->wait,
1168 						TASK_UNINTERRUPTIBLE);
1169 			mutex_unlock(&c->bucket_lock);
1170 			return -EINTR;
1171 		}
1172 
1173 	mutex_unlock(&c->bucket_lock);
1174 
1175 	return mca_cannibalize_lock(b->c, op);
1176 }
1177 
1178 /* Garbage collection */
1179 
1180 static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
1181 				    struct bkey *k)
1182 {
1183 	uint8_t stale = 0;
1184 	unsigned i;
1185 	struct bucket *g;
1186 
1187 	/*
1188 	 * ptr_invalid() can't return true for the keys that mark btree nodes as
1189 	 * freed, but since ptr_bad() returns true we'll never actually use them
1190 	 * for anything and thus we don't want mark their pointers here
1191 	 */
1192 	if (!bkey_cmp(k, &ZERO_KEY))
1193 		return stale;
1194 
1195 	for (i = 0; i < KEY_PTRS(k); i++) {
1196 		if (!ptr_available(c, k, i))
1197 			continue;
1198 
1199 		g = PTR_BUCKET(c, k, i);
1200 
1201 		if (gen_after(g->last_gc, PTR_GEN(k, i)))
1202 			g->last_gc = PTR_GEN(k, i);
1203 
1204 		if (ptr_stale(c, k, i)) {
1205 			stale = max(stale, ptr_stale(c, k, i));
1206 			continue;
1207 		}
1208 
1209 		cache_bug_on(GC_MARK(g) &&
1210 			     (GC_MARK(g) == GC_MARK_METADATA) != (level != 0),
1211 			     c, "inconsistent ptrs: mark = %llu, level = %i",
1212 			     GC_MARK(g), level);
1213 
1214 		if (level)
1215 			SET_GC_MARK(g, GC_MARK_METADATA);
1216 		else if (KEY_DIRTY(k))
1217 			SET_GC_MARK(g, GC_MARK_DIRTY);
1218 		else if (!GC_MARK(g))
1219 			SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
1220 
1221 		/* guard against overflow */
1222 		SET_GC_SECTORS_USED(g, min_t(unsigned,
1223 					     GC_SECTORS_USED(g) + KEY_SIZE(k),
1224 					     MAX_GC_SECTORS_USED));
1225 
1226 		BUG_ON(!GC_SECTORS_USED(g));
1227 	}
1228 
1229 	return stale;
1230 }
1231 
1232 #define btree_mark_key(b, k)	__bch_btree_mark_key(b->c, b->level, k)
1233 
1234 void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
1235 {
1236 	unsigned i;
1237 
1238 	for (i = 0; i < KEY_PTRS(k); i++)
1239 		if (ptr_available(c, k, i) &&
1240 		    !ptr_stale(c, k, i)) {
1241 			struct bucket *b = PTR_BUCKET(c, k, i);
1242 
1243 			b->gen = PTR_GEN(k, i);
1244 
1245 			if (level && bkey_cmp(k, &ZERO_KEY))
1246 				b->prio = BTREE_PRIO;
1247 			else if (!level && b->prio == BTREE_PRIO)
1248 				b->prio = INITIAL_PRIO;
1249 		}
1250 
1251 	__bch_btree_mark_key(c, level, k);
1252 }
1253 
1254 void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats)
1255 {
1256 	stats->in_use = (c->nbuckets - c->avail_nbuckets) * 100 / c->nbuckets;
1257 }
1258 
1259 static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
1260 {
1261 	uint8_t stale = 0;
1262 	unsigned keys = 0, good_keys = 0;
1263 	struct bkey *k;
1264 	struct btree_iter iter;
1265 	struct bset_tree *t;
1266 
1267 	gc->nodes++;
1268 
1269 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
1270 		stale = max(stale, btree_mark_key(b, k));
1271 		keys++;
1272 
1273 		if (bch_ptr_bad(&b->keys, k))
1274 			continue;
1275 
1276 		gc->key_bytes += bkey_u64s(k);
1277 		gc->nkeys++;
1278 		good_keys++;
1279 
1280 		gc->data += KEY_SIZE(k);
1281 	}
1282 
1283 	for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++)
1284 		btree_bug_on(t->size &&
1285 			     bset_written(&b->keys, t) &&
1286 			     bkey_cmp(&b->key, &t->end) < 0,
1287 			     b, "found short btree key in gc");
1288 
1289 	if (b->c->gc_always_rewrite)
1290 		return true;
1291 
1292 	if (stale > 10)
1293 		return true;
1294 
1295 	if ((keys - good_keys) * 2 > keys)
1296 		return true;
1297 
1298 	return false;
1299 }
1300 
1301 #define GC_MERGE_NODES	4U
1302 
1303 struct gc_merge_info {
1304 	struct btree	*b;
1305 	unsigned	keys;
1306 };
1307 
1308 static int bch_btree_insert_node(struct btree *, struct btree_op *,
1309 				 struct keylist *, atomic_t *, struct bkey *);
1310 
1311 static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
1312 			     struct gc_stat *gc, struct gc_merge_info *r)
1313 {
1314 	unsigned i, nodes = 0, keys = 0, blocks;
1315 	struct btree *new_nodes[GC_MERGE_NODES];
1316 	struct keylist keylist;
1317 	struct closure cl;
1318 	struct bkey *k;
1319 
1320 	bch_keylist_init(&keylist);
1321 
1322 	if (btree_check_reserve(b, NULL))
1323 		return 0;
1324 
1325 	memset(new_nodes, 0, sizeof(new_nodes));
1326 	closure_init_stack(&cl);
1327 
1328 	while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
1329 		keys += r[nodes++].keys;
1330 
1331 	blocks = btree_default_blocks(b->c) * 2 / 3;
1332 
1333 	if (nodes < 2 ||
1334 	    __set_blocks(b->keys.set[0].data, keys,
1335 			 block_bytes(b->c)) > blocks * (nodes - 1))
1336 		return 0;
1337 
1338 	for (i = 0; i < nodes; i++) {
1339 		new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL);
1340 		if (IS_ERR_OR_NULL(new_nodes[i]))
1341 			goto out_nocoalesce;
1342 	}
1343 
1344 	/*
1345 	 * We have to check the reserve here, after we've allocated our new
1346 	 * nodes, to make sure the insert below will succeed - we also check
1347 	 * before as an optimization to potentially avoid a bunch of expensive
1348 	 * allocs/sorts
1349 	 */
1350 	if (btree_check_reserve(b, NULL))
1351 		goto out_nocoalesce;
1352 
1353 	for (i = 0; i < nodes; i++)
1354 		mutex_lock(&new_nodes[i]->write_lock);
1355 
1356 	for (i = nodes - 1; i > 0; --i) {
1357 		struct bset *n1 = btree_bset_first(new_nodes[i]);
1358 		struct bset *n2 = btree_bset_first(new_nodes[i - 1]);
1359 		struct bkey *k, *last = NULL;
1360 
1361 		keys = 0;
1362 
1363 		if (i > 1) {
1364 			for (k = n2->start;
1365 			     k < bset_bkey_last(n2);
1366 			     k = bkey_next(k)) {
1367 				if (__set_blocks(n1, n1->keys + keys +
1368 						 bkey_u64s(k),
1369 						 block_bytes(b->c)) > blocks)
1370 					break;
1371 
1372 				last = k;
1373 				keys += bkey_u64s(k);
1374 			}
1375 		} else {
1376 			/*
1377 			 * Last node we're not getting rid of - we're getting
1378 			 * rid of the node at r[0]. Have to try and fit all of
1379 			 * the remaining keys into this node; we can't ensure
1380 			 * they will always fit due to rounding and variable
1381 			 * length keys (shouldn't be possible in practice,
1382 			 * though)
1383 			 */
1384 			if (__set_blocks(n1, n1->keys + n2->keys,
1385 					 block_bytes(b->c)) >
1386 			    btree_blocks(new_nodes[i]))
1387 				goto out_nocoalesce;
1388 
1389 			keys = n2->keys;
1390 			/* Take the key of the node we're getting rid of */
1391 			last = &r->b->key;
1392 		}
1393 
1394 		BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) >
1395 		       btree_blocks(new_nodes[i]));
1396 
1397 		if (last)
1398 			bkey_copy_key(&new_nodes[i]->key, last);
1399 
1400 		memcpy(bset_bkey_last(n1),
1401 		       n2->start,
1402 		       (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
1403 
1404 		n1->keys += keys;
1405 		r[i].keys = n1->keys;
1406 
1407 		memmove(n2->start,
1408 			bset_bkey_idx(n2, keys),
1409 			(void *) bset_bkey_last(n2) -
1410 			(void *) bset_bkey_idx(n2, keys));
1411 
1412 		n2->keys -= keys;
1413 
1414 		if (__bch_keylist_realloc(&keylist,
1415 					  bkey_u64s(&new_nodes[i]->key)))
1416 			goto out_nocoalesce;
1417 
1418 		bch_btree_node_write(new_nodes[i], &cl);
1419 		bch_keylist_add(&keylist, &new_nodes[i]->key);
1420 	}
1421 
1422 	for (i = 0; i < nodes; i++)
1423 		mutex_unlock(&new_nodes[i]->write_lock);
1424 
1425 	closure_sync(&cl);
1426 
1427 	/* We emptied out this node */
1428 	BUG_ON(btree_bset_first(new_nodes[0])->keys);
1429 	btree_node_free(new_nodes[0]);
1430 	rw_unlock(true, new_nodes[0]);
1431 	new_nodes[0] = NULL;
1432 
1433 	for (i = 0; i < nodes; i++) {
1434 		if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key)))
1435 			goto out_nocoalesce;
1436 
1437 		make_btree_freeing_key(r[i].b, keylist.top);
1438 		bch_keylist_push(&keylist);
1439 	}
1440 
1441 	bch_btree_insert_node(b, op, &keylist, NULL, NULL);
1442 	BUG_ON(!bch_keylist_empty(&keylist));
1443 
1444 	for (i = 0; i < nodes; i++) {
1445 		btree_node_free(r[i].b);
1446 		rw_unlock(true, r[i].b);
1447 
1448 		r[i].b = new_nodes[i];
1449 	}
1450 
1451 	memmove(r, r + 1, sizeof(r[0]) * (nodes - 1));
1452 	r[nodes - 1].b = ERR_PTR(-EINTR);
1453 
1454 	trace_bcache_btree_gc_coalesce(nodes);
1455 	gc->nodes--;
1456 
1457 	bch_keylist_free(&keylist);
1458 
1459 	/* Invalidated our iterator */
1460 	return -EINTR;
1461 
1462 out_nocoalesce:
1463 	closure_sync(&cl);
1464 	bch_keylist_free(&keylist);
1465 
1466 	while ((k = bch_keylist_pop(&keylist)))
1467 		if (!bkey_cmp(k, &ZERO_KEY))
1468 			atomic_dec(&b->c->prio_blocked);
1469 
1470 	for (i = 0; i < nodes; i++)
1471 		if (!IS_ERR_OR_NULL(new_nodes[i])) {
1472 			btree_node_free(new_nodes[i]);
1473 			rw_unlock(true, new_nodes[i]);
1474 		}
1475 	return 0;
1476 }
1477 
1478 static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
1479 				 struct btree *replace)
1480 {
1481 	struct keylist keys;
1482 	struct btree *n;
1483 
1484 	if (btree_check_reserve(b, NULL))
1485 		return 0;
1486 
1487 	n = btree_node_alloc_replacement(replace, NULL);
1488 
1489 	/* recheck reserve after allocating replacement node */
1490 	if (btree_check_reserve(b, NULL)) {
1491 		btree_node_free(n);
1492 		rw_unlock(true, n);
1493 		return 0;
1494 	}
1495 
1496 	bch_btree_node_write_sync(n);
1497 
1498 	bch_keylist_init(&keys);
1499 	bch_keylist_add(&keys, &n->key);
1500 
1501 	make_btree_freeing_key(replace, keys.top);
1502 	bch_keylist_push(&keys);
1503 
1504 	bch_btree_insert_node(b, op, &keys, NULL, NULL);
1505 	BUG_ON(!bch_keylist_empty(&keys));
1506 
1507 	btree_node_free(replace);
1508 	rw_unlock(true, n);
1509 
1510 	/* Invalidated our iterator */
1511 	return -EINTR;
1512 }
1513 
1514 static unsigned btree_gc_count_keys(struct btree *b)
1515 {
1516 	struct bkey *k;
1517 	struct btree_iter iter;
1518 	unsigned ret = 0;
1519 
1520 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
1521 		ret += bkey_u64s(k);
1522 
1523 	return ret;
1524 }
1525 
1526 static size_t btree_gc_min_nodes(struct cache_set *c)
1527 {
1528 	size_t min_nodes;
1529 
1530 	/*
1531 	 * Since incremental GC would stop 100ms when front
1532 	 * side I/O comes, so when there are many btree nodes,
1533 	 * if GC only processes constant (100) nodes each time,
1534 	 * GC would last a long time, and the front side I/Os
1535 	 * would run out of the buckets (since no new bucket
1536 	 * can be allocated during GC), and be blocked again.
1537 	 * So GC should not process constant nodes, but varied
1538 	 * nodes according to the number of btree nodes, which
1539 	 * realized by dividing GC into constant(100) times,
1540 	 * so when there are many btree nodes, GC can process
1541 	 * more nodes each time, otherwise, GC will process less
1542 	 * nodes each time (but no less than MIN_GC_NODES)
1543 	 */
1544 	min_nodes = c->gc_stats.nodes / MAX_GC_TIMES;
1545 	if (min_nodes < MIN_GC_NODES)
1546 		min_nodes = MIN_GC_NODES;
1547 
1548 	return min_nodes;
1549 }
1550 
1551 
1552 static int btree_gc_recurse(struct btree *b, struct btree_op *op,
1553 			    struct closure *writes, struct gc_stat *gc)
1554 {
1555 	int ret = 0;
1556 	bool should_rewrite;
1557 	struct bkey *k;
1558 	struct btree_iter iter;
1559 	struct gc_merge_info r[GC_MERGE_NODES];
1560 	struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
1561 
1562 	bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
1563 
1564 	for (i = r; i < r + ARRAY_SIZE(r); i++)
1565 		i->b = ERR_PTR(-EINTR);
1566 
1567 	while (1) {
1568 		k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
1569 		if (k) {
1570 			r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
1571 						  true, b);
1572 			if (IS_ERR(r->b)) {
1573 				ret = PTR_ERR(r->b);
1574 				break;
1575 			}
1576 
1577 			r->keys = btree_gc_count_keys(r->b);
1578 
1579 			ret = btree_gc_coalesce(b, op, gc, r);
1580 			if (ret)
1581 				break;
1582 		}
1583 
1584 		if (!last->b)
1585 			break;
1586 
1587 		if (!IS_ERR(last->b)) {
1588 			should_rewrite = btree_gc_mark_node(last->b, gc);
1589 			if (should_rewrite) {
1590 				ret = btree_gc_rewrite_node(b, op, last->b);
1591 				if (ret)
1592 					break;
1593 			}
1594 
1595 			if (last->b->level) {
1596 				ret = btree_gc_recurse(last->b, op, writes, gc);
1597 				if (ret)
1598 					break;
1599 			}
1600 
1601 			bkey_copy_key(&b->c->gc_done, &last->b->key);
1602 
1603 			/*
1604 			 * Must flush leaf nodes before gc ends, since replace
1605 			 * operations aren't journalled
1606 			 */
1607 			mutex_lock(&last->b->write_lock);
1608 			if (btree_node_dirty(last->b))
1609 				bch_btree_node_write(last->b, writes);
1610 			mutex_unlock(&last->b->write_lock);
1611 			rw_unlock(true, last->b);
1612 		}
1613 
1614 		memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
1615 		r->b = NULL;
1616 
1617 		if (atomic_read(&b->c->search_inflight) &&
1618 		    gc->nodes >= gc->nodes_pre + btree_gc_min_nodes(b->c)) {
1619 			gc->nodes_pre =  gc->nodes;
1620 			ret = -EAGAIN;
1621 			break;
1622 		}
1623 
1624 		if (need_resched()) {
1625 			ret = -EAGAIN;
1626 			break;
1627 		}
1628 	}
1629 
1630 	for (i = r; i < r + ARRAY_SIZE(r); i++)
1631 		if (!IS_ERR_OR_NULL(i->b)) {
1632 			mutex_lock(&i->b->write_lock);
1633 			if (btree_node_dirty(i->b))
1634 				bch_btree_node_write(i->b, writes);
1635 			mutex_unlock(&i->b->write_lock);
1636 			rw_unlock(true, i->b);
1637 		}
1638 
1639 	return ret;
1640 }
1641 
1642 static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1643 			     struct closure *writes, struct gc_stat *gc)
1644 {
1645 	struct btree *n = NULL;
1646 	int ret = 0;
1647 	bool should_rewrite;
1648 
1649 	should_rewrite = btree_gc_mark_node(b, gc);
1650 	if (should_rewrite) {
1651 		n = btree_node_alloc_replacement(b, NULL);
1652 
1653 		if (!IS_ERR_OR_NULL(n)) {
1654 			bch_btree_node_write_sync(n);
1655 
1656 			bch_btree_set_root(n);
1657 			btree_node_free(b);
1658 			rw_unlock(true, n);
1659 
1660 			return -EINTR;
1661 		}
1662 	}
1663 
1664 	__bch_btree_mark_key(b->c, b->level + 1, &b->key);
1665 
1666 	if (b->level) {
1667 		ret = btree_gc_recurse(b, op, writes, gc);
1668 		if (ret)
1669 			return ret;
1670 	}
1671 
1672 	bkey_copy_key(&b->c->gc_done, &b->key);
1673 
1674 	return ret;
1675 }
1676 
1677 static void btree_gc_start(struct cache_set *c)
1678 {
1679 	struct cache *ca;
1680 	struct bucket *b;
1681 	unsigned i;
1682 
1683 	if (!c->gc_mark_valid)
1684 		return;
1685 
1686 	mutex_lock(&c->bucket_lock);
1687 
1688 	c->gc_mark_valid = 0;
1689 	c->gc_done = ZERO_KEY;
1690 
1691 	for_each_cache(ca, c, i)
1692 		for_each_bucket(b, ca) {
1693 			b->last_gc = b->gen;
1694 			if (!atomic_read(&b->pin)) {
1695 				SET_GC_MARK(b, 0);
1696 				SET_GC_SECTORS_USED(b, 0);
1697 			}
1698 		}
1699 
1700 	mutex_unlock(&c->bucket_lock);
1701 }
1702 
1703 static void bch_btree_gc_finish(struct cache_set *c)
1704 {
1705 	struct bucket *b;
1706 	struct cache *ca;
1707 	unsigned i;
1708 
1709 	mutex_lock(&c->bucket_lock);
1710 
1711 	set_gc_sectors(c);
1712 	c->gc_mark_valid = 1;
1713 	c->need_gc	= 0;
1714 
1715 	for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
1716 		SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
1717 			    GC_MARK_METADATA);
1718 
1719 	/* don't reclaim buckets to which writeback keys point */
1720 	rcu_read_lock();
1721 	for (i = 0; i < c->devices_max_used; i++) {
1722 		struct bcache_device *d = c->devices[i];
1723 		struct cached_dev *dc;
1724 		struct keybuf_key *w, *n;
1725 		unsigned j;
1726 
1727 		if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
1728 			continue;
1729 		dc = container_of(d, struct cached_dev, disk);
1730 
1731 		spin_lock(&dc->writeback_keys.lock);
1732 		rbtree_postorder_for_each_entry_safe(w, n,
1733 					&dc->writeback_keys.keys, node)
1734 			for (j = 0; j < KEY_PTRS(&w->key); j++)
1735 				SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
1736 					    GC_MARK_DIRTY);
1737 		spin_unlock(&dc->writeback_keys.lock);
1738 	}
1739 	rcu_read_unlock();
1740 
1741 	c->avail_nbuckets = 0;
1742 	for_each_cache(ca, c, i) {
1743 		uint64_t *i;
1744 
1745 		ca->invalidate_needs_gc = 0;
1746 
1747 		for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++)
1748 			SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1749 
1750 		for (i = ca->prio_buckets;
1751 		     i < ca->prio_buckets + prio_buckets(ca) * 2; i++)
1752 			SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1753 
1754 		for_each_bucket(b, ca) {
1755 			c->need_gc	= max(c->need_gc, bucket_gc_gen(b));
1756 
1757 			if (atomic_read(&b->pin))
1758 				continue;
1759 
1760 			BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
1761 
1762 			if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
1763 				c->avail_nbuckets++;
1764 		}
1765 	}
1766 
1767 	mutex_unlock(&c->bucket_lock);
1768 }
1769 
1770 static void bch_btree_gc(struct cache_set *c)
1771 {
1772 	int ret;
1773 	struct gc_stat stats;
1774 	struct closure writes;
1775 	struct btree_op op;
1776 	uint64_t start_time = local_clock();
1777 
1778 	trace_bcache_gc_start(c);
1779 
1780 	memset(&stats, 0, sizeof(struct gc_stat));
1781 	closure_init_stack(&writes);
1782 	bch_btree_op_init(&op, SHRT_MAX);
1783 
1784 	btree_gc_start(c);
1785 
1786 	/* if CACHE_SET_IO_DISABLE set, gc thread should stop too */
1787 	do {
1788 		ret = btree_root(gc_root, c, &op, &writes, &stats);
1789 		closure_sync(&writes);
1790 		cond_resched();
1791 
1792 		if (ret == -EAGAIN)
1793 			schedule_timeout_interruptible(msecs_to_jiffies
1794 						       (GC_SLEEP_MS));
1795 		else if (ret)
1796 			pr_warn("gc failed!");
1797 	} while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags));
1798 
1799 	bch_btree_gc_finish(c);
1800 	wake_up_allocators(c);
1801 
1802 	bch_time_stats_update(&c->btree_gc_time, start_time);
1803 
1804 	stats.key_bytes *= sizeof(uint64_t);
1805 	stats.data	<<= 9;
1806 	bch_update_bucket_in_use(c, &stats);
1807 	memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
1808 
1809 	trace_bcache_gc_end(c);
1810 
1811 	bch_moving_gc(c);
1812 }
1813 
1814 static bool gc_should_run(struct cache_set *c)
1815 {
1816 	struct cache *ca;
1817 	unsigned i;
1818 
1819 	for_each_cache(ca, c, i)
1820 		if (ca->invalidate_needs_gc)
1821 			return true;
1822 
1823 	if (atomic_read(&c->sectors_to_gc) < 0)
1824 		return true;
1825 
1826 	return false;
1827 }
1828 
1829 static int bch_gc_thread(void *arg)
1830 {
1831 	struct cache_set *c = arg;
1832 
1833 	while (1) {
1834 		wait_event_interruptible(c->gc_wait,
1835 			   kthread_should_stop() ||
1836 			   test_bit(CACHE_SET_IO_DISABLE, &c->flags) ||
1837 			   gc_should_run(c));
1838 
1839 		if (kthread_should_stop() ||
1840 		    test_bit(CACHE_SET_IO_DISABLE, &c->flags))
1841 			break;
1842 
1843 		set_gc_sectors(c);
1844 		bch_btree_gc(c);
1845 	}
1846 
1847 	wait_for_kthread_stop();
1848 	return 0;
1849 }
1850 
1851 int bch_gc_thread_start(struct cache_set *c)
1852 {
1853 	c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
1854 	return PTR_ERR_OR_ZERO(c->gc_thread);
1855 }
1856 
1857 /* Initial partial gc */
1858 
1859 static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
1860 {
1861 	int ret = 0;
1862 	struct bkey *k, *p = NULL;
1863 	struct btree_iter iter;
1864 
1865 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
1866 		bch_initial_mark_key(b->c, b->level, k);
1867 
1868 	bch_initial_mark_key(b->c, b->level + 1, &b->key);
1869 
1870 	if (b->level) {
1871 		bch_btree_iter_init(&b->keys, &iter, NULL);
1872 
1873 		do {
1874 			k = bch_btree_iter_next_filter(&iter, &b->keys,
1875 						       bch_ptr_bad);
1876 			if (k) {
1877 				btree_node_prefetch(b, k);
1878 				/*
1879 				 * initiallize c->gc_stats.nodes
1880 				 * for incremental GC
1881 				 */
1882 				b->c->gc_stats.nodes++;
1883 			}
1884 
1885 			if (p)
1886 				ret = btree(check_recurse, p, b, op);
1887 
1888 			p = k;
1889 		} while (p && !ret);
1890 	}
1891 
1892 	return ret;
1893 }
1894 
1895 int bch_btree_check(struct cache_set *c)
1896 {
1897 	struct btree_op op;
1898 
1899 	bch_btree_op_init(&op, SHRT_MAX);
1900 
1901 	return btree_root(check_recurse, c, &op);
1902 }
1903 
1904 void bch_initial_gc_finish(struct cache_set *c)
1905 {
1906 	struct cache *ca;
1907 	struct bucket *b;
1908 	unsigned i;
1909 
1910 	bch_btree_gc_finish(c);
1911 
1912 	mutex_lock(&c->bucket_lock);
1913 
1914 	/*
1915 	 * We need to put some unused buckets directly on the prio freelist in
1916 	 * order to get the allocator thread started - it needs freed buckets in
1917 	 * order to rewrite the prios and gens, and it needs to rewrite prios
1918 	 * and gens in order to free buckets.
1919 	 *
1920 	 * This is only safe for buckets that have no live data in them, which
1921 	 * there should always be some of.
1922 	 */
1923 	for_each_cache(ca, c, i) {
1924 		for_each_bucket(b, ca) {
1925 			if (fifo_full(&ca->free[RESERVE_PRIO]) &&
1926 			    fifo_full(&ca->free[RESERVE_BTREE]))
1927 				break;
1928 
1929 			if (bch_can_invalidate_bucket(ca, b) &&
1930 			    !GC_MARK(b)) {
1931 				__bch_invalidate_one_bucket(ca, b);
1932 				if (!fifo_push(&ca->free[RESERVE_PRIO],
1933 				   b - ca->buckets))
1934 					fifo_push(&ca->free[RESERVE_BTREE],
1935 						  b - ca->buckets);
1936 			}
1937 		}
1938 	}
1939 
1940 	mutex_unlock(&c->bucket_lock);
1941 }
1942 
1943 /* Btree insertion */
1944 
1945 static bool btree_insert_key(struct btree *b, struct bkey *k,
1946 			     struct bkey *replace_key)
1947 {
1948 	unsigned status;
1949 
1950 	BUG_ON(bkey_cmp(k, &b->key) > 0);
1951 
1952 	status = bch_btree_insert_key(&b->keys, k, replace_key);
1953 	if (status != BTREE_INSERT_STATUS_NO_INSERT) {
1954 		bch_check_keys(&b->keys, "%u for %s", status,
1955 			       replace_key ? "replace" : "insert");
1956 
1957 		trace_bcache_btree_insert_key(b, k, replace_key != NULL,
1958 					      status);
1959 		return true;
1960 	} else
1961 		return false;
1962 }
1963 
1964 static size_t insert_u64s_remaining(struct btree *b)
1965 {
1966 	long ret = bch_btree_keys_u64s_remaining(&b->keys);
1967 
1968 	/*
1969 	 * Might land in the middle of an existing extent and have to split it
1970 	 */
1971 	if (b->keys.ops->is_extents)
1972 		ret -= KEY_MAX_U64S;
1973 
1974 	return max(ret, 0L);
1975 }
1976 
1977 static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
1978 				  struct keylist *insert_keys,
1979 				  struct bkey *replace_key)
1980 {
1981 	bool ret = false;
1982 	int oldsize = bch_count_data(&b->keys);
1983 
1984 	while (!bch_keylist_empty(insert_keys)) {
1985 		struct bkey *k = insert_keys->keys;
1986 
1987 		if (bkey_u64s(k) > insert_u64s_remaining(b))
1988 			break;
1989 
1990 		if (bkey_cmp(k, &b->key) <= 0) {
1991 			if (!b->level)
1992 				bkey_put(b->c, k);
1993 
1994 			ret |= btree_insert_key(b, k, replace_key);
1995 			bch_keylist_pop_front(insert_keys);
1996 		} else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
1997 			BKEY_PADDED(key) temp;
1998 			bkey_copy(&temp.key, insert_keys->keys);
1999 
2000 			bch_cut_back(&b->key, &temp.key);
2001 			bch_cut_front(&b->key, insert_keys->keys);
2002 
2003 			ret |= btree_insert_key(b, &temp.key, replace_key);
2004 			break;
2005 		} else {
2006 			break;
2007 		}
2008 	}
2009 
2010 	if (!ret)
2011 		op->insert_collision = true;
2012 
2013 	BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
2014 
2015 	BUG_ON(bch_count_data(&b->keys) < oldsize);
2016 	return ret;
2017 }
2018 
2019 static int btree_split(struct btree *b, struct btree_op *op,
2020 		       struct keylist *insert_keys,
2021 		       struct bkey *replace_key)
2022 {
2023 	bool split;
2024 	struct btree *n1, *n2 = NULL, *n3 = NULL;
2025 	uint64_t start_time = local_clock();
2026 	struct closure cl;
2027 	struct keylist parent_keys;
2028 
2029 	closure_init_stack(&cl);
2030 	bch_keylist_init(&parent_keys);
2031 
2032 	if (btree_check_reserve(b, op)) {
2033 		if (!b->level)
2034 			return -EINTR;
2035 		else
2036 			WARN(1, "insufficient reserve for split\n");
2037 	}
2038 
2039 	n1 = btree_node_alloc_replacement(b, op);
2040 	if (IS_ERR(n1))
2041 		goto err;
2042 
2043 	split = set_blocks(btree_bset_first(n1),
2044 			   block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;
2045 
2046 	if (split) {
2047 		unsigned keys = 0;
2048 
2049 		trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
2050 
2051 		n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent);
2052 		if (IS_ERR(n2))
2053 			goto err_free1;
2054 
2055 		if (!b->parent) {
2056 			n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL);
2057 			if (IS_ERR(n3))
2058 				goto err_free2;
2059 		}
2060 
2061 		mutex_lock(&n1->write_lock);
2062 		mutex_lock(&n2->write_lock);
2063 
2064 		bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2065 
2066 		/*
2067 		 * Has to be a linear search because we don't have an auxiliary
2068 		 * search tree yet
2069 		 */
2070 
2071 		while (keys < (btree_bset_first(n1)->keys * 3) / 5)
2072 			keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1),
2073 							keys));
2074 
2075 		bkey_copy_key(&n1->key,
2076 			      bset_bkey_idx(btree_bset_first(n1), keys));
2077 		keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys));
2078 
2079 		btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys;
2080 		btree_bset_first(n1)->keys = keys;
2081 
2082 		memcpy(btree_bset_first(n2)->start,
2083 		       bset_bkey_last(btree_bset_first(n1)),
2084 		       btree_bset_first(n2)->keys * sizeof(uint64_t));
2085 
2086 		bkey_copy_key(&n2->key, &b->key);
2087 
2088 		bch_keylist_add(&parent_keys, &n2->key);
2089 		bch_btree_node_write(n2, &cl);
2090 		mutex_unlock(&n2->write_lock);
2091 		rw_unlock(true, n2);
2092 	} else {
2093 		trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys);
2094 
2095 		mutex_lock(&n1->write_lock);
2096 		bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2097 	}
2098 
2099 	bch_keylist_add(&parent_keys, &n1->key);
2100 	bch_btree_node_write(n1, &cl);
2101 	mutex_unlock(&n1->write_lock);
2102 
2103 	if (n3) {
2104 		/* Depth increases, make a new root */
2105 		mutex_lock(&n3->write_lock);
2106 		bkey_copy_key(&n3->key, &MAX_KEY);
2107 		bch_btree_insert_keys(n3, op, &parent_keys, NULL);
2108 		bch_btree_node_write(n3, &cl);
2109 		mutex_unlock(&n3->write_lock);
2110 
2111 		closure_sync(&cl);
2112 		bch_btree_set_root(n3);
2113 		rw_unlock(true, n3);
2114 	} else if (!b->parent) {
2115 		/* Root filled up but didn't need to be split */
2116 		closure_sync(&cl);
2117 		bch_btree_set_root(n1);
2118 	} else {
2119 		/* Split a non root node */
2120 		closure_sync(&cl);
2121 		make_btree_freeing_key(b, parent_keys.top);
2122 		bch_keylist_push(&parent_keys);
2123 
2124 		bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL);
2125 		BUG_ON(!bch_keylist_empty(&parent_keys));
2126 	}
2127 
2128 	btree_node_free(b);
2129 	rw_unlock(true, n1);
2130 
2131 	bch_time_stats_update(&b->c->btree_split_time, start_time);
2132 
2133 	return 0;
2134 err_free2:
2135 	bkey_put(b->c, &n2->key);
2136 	btree_node_free(n2);
2137 	rw_unlock(true, n2);
2138 err_free1:
2139 	bkey_put(b->c, &n1->key);
2140 	btree_node_free(n1);
2141 	rw_unlock(true, n1);
2142 err:
2143 	WARN(1, "bcache: btree split failed (level %u)", b->level);
2144 
2145 	if (n3 == ERR_PTR(-EAGAIN) ||
2146 	    n2 == ERR_PTR(-EAGAIN) ||
2147 	    n1 == ERR_PTR(-EAGAIN))
2148 		return -EAGAIN;
2149 
2150 	return -ENOMEM;
2151 }
2152 
2153 static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
2154 				 struct keylist *insert_keys,
2155 				 atomic_t *journal_ref,
2156 				 struct bkey *replace_key)
2157 {
2158 	struct closure cl;
2159 
2160 	BUG_ON(b->level && replace_key);
2161 
2162 	closure_init_stack(&cl);
2163 
2164 	mutex_lock(&b->write_lock);
2165 
2166 	if (write_block(b) != btree_bset_last(b) &&
2167 	    b->keys.last_set_unwritten)
2168 		bch_btree_init_next(b); /* just wrote a set */
2169 
2170 	if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) {
2171 		mutex_unlock(&b->write_lock);
2172 		goto split;
2173 	}
2174 
2175 	BUG_ON(write_block(b) != btree_bset_last(b));
2176 
2177 	if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
2178 		if (!b->level)
2179 			bch_btree_leaf_dirty(b, journal_ref);
2180 		else
2181 			bch_btree_node_write(b, &cl);
2182 	}
2183 
2184 	mutex_unlock(&b->write_lock);
2185 
2186 	/* wait for btree node write if necessary, after unlock */
2187 	closure_sync(&cl);
2188 
2189 	return 0;
2190 split:
2191 	if (current->bio_list) {
2192 		op->lock = b->c->root->level + 1;
2193 		return -EAGAIN;
2194 	} else if (op->lock <= b->c->root->level) {
2195 		op->lock = b->c->root->level + 1;
2196 		return -EINTR;
2197 	} else {
2198 		/* Invalidated all iterators */
2199 		int ret = btree_split(b, op, insert_keys, replace_key);
2200 
2201 		if (bch_keylist_empty(insert_keys))
2202 			return 0;
2203 		else if (!ret)
2204 			return -EINTR;
2205 		return ret;
2206 	}
2207 }
2208 
2209 int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
2210 			       struct bkey *check_key)
2211 {
2212 	int ret = -EINTR;
2213 	uint64_t btree_ptr = b->key.ptr[0];
2214 	unsigned long seq = b->seq;
2215 	struct keylist insert;
2216 	bool upgrade = op->lock == -1;
2217 
2218 	bch_keylist_init(&insert);
2219 
2220 	if (upgrade) {
2221 		rw_unlock(false, b);
2222 		rw_lock(true, b, b->level);
2223 
2224 		if (b->key.ptr[0] != btree_ptr ||
2225                    b->seq != seq + 1) {
2226 			op->lock = b->level;
2227 			goto out;
2228                }
2229 	}
2230 
2231 	SET_KEY_PTRS(check_key, 1);
2232 	get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
2233 
2234 	SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
2235 
2236 	bch_keylist_add(&insert, check_key);
2237 
2238 	ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
2239 
2240 	BUG_ON(!ret && !bch_keylist_empty(&insert));
2241 out:
2242 	if (upgrade)
2243 		downgrade_write(&b->lock);
2244 	return ret;
2245 }
2246 
2247 struct btree_insert_op {
2248 	struct btree_op	op;
2249 	struct keylist	*keys;
2250 	atomic_t	*journal_ref;
2251 	struct bkey	*replace_key;
2252 };
2253 
2254 static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
2255 {
2256 	struct btree_insert_op *op = container_of(b_op,
2257 					struct btree_insert_op, op);
2258 
2259 	int ret = bch_btree_insert_node(b, &op->op, op->keys,
2260 					op->journal_ref, op->replace_key);
2261 	if (ret && !bch_keylist_empty(op->keys))
2262 		return ret;
2263 	else
2264 		return MAP_DONE;
2265 }
2266 
2267 int bch_btree_insert(struct cache_set *c, struct keylist *keys,
2268 		     atomic_t *journal_ref, struct bkey *replace_key)
2269 {
2270 	struct btree_insert_op op;
2271 	int ret = 0;
2272 
2273 	BUG_ON(current->bio_list);
2274 	BUG_ON(bch_keylist_empty(keys));
2275 
2276 	bch_btree_op_init(&op.op, 0);
2277 	op.keys		= keys;
2278 	op.journal_ref	= journal_ref;
2279 	op.replace_key	= replace_key;
2280 
2281 	while (!ret && !bch_keylist_empty(keys)) {
2282 		op.op.lock = 0;
2283 		ret = bch_btree_map_leaf_nodes(&op.op, c,
2284 					       &START_KEY(keys->keys),
2285 					       btree_insert_fn);
2286 	}
2287 
2288 	if (ret) {
2289 		struct bkey *k;
2290 
2291 		pr_err("error %i", ret);
2292 
2293 		while ((k = bch_keylist_pop(keys)))
2294 			bkey_put(c, k);
2295 	} else if (op.op.insert_collision)
2296 		ret = -ESRCH;
2297 
2298 	return ret;
2299 }
2300 
2301 void bch_btree_set_root(struct btree *b)
2302 {
2303 	unsigned i;
2304 	struct closure cl;
2305 
2306 	closure_init_stack(&cl);
2307 
2308 	trace_bcache_btree_set_root(b);
2309 
2310 	BUG_ON(!b->written);
2311 
2312 	for (i = 0; i < KEY_PTRS(&b->key); i++)
2313 		BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO);
2314 
2315 	mutex_lock(&b->c->bucket_lock);
2316 	list_del_init(&b->list);
2317 	mutex_unlock(&b->c->bucket_lock);
2318 
2319 	b->c->root = b;
2320 
2321 	bch_journal_meta(b->c, &cl);
2322 	closure_sync(&cl);
2323 }
2324 
2325 /* Map across nodes or keys */
2326 
2327 static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
2328 				       struct bkey *from,
2329 				       btree_map_nodes_fn *fn, int flags)
2330 {
2331 	int ret = MAP_CONTINUE;
2332 
2333 	if (b->level) {
2334 		struct bkey *k;
2335 		struct btree_iter iter;
2336 
2337 		bch_btree_iter_init(&b->keys, &iter, from);
2338 
2339 		while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
2340 						       bch_ptr_bad))) {
2341 			ret = btree(map_nodes_recurse, k, b,
2342 				    op, from, fn, flags);
2343 			from = NULL;
2344 
2345 			if (ret != MAP_CONTINUE)
2346 				return ret;
2347 		}
2348 	}
2349 
2350 	if (!b->level || flags == MAP_ALL_NODES)
2351 		ret = fn(op, b);
2352 
2353 	return ret;
2354 }
2355 
2356 int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
2357 			  struct bkey *from, btree_map_nodes_fn *fn, int flags)
2358 {
2359 	return btree_root(map_nodes_recurse, c, op, from, fn, flags);
2360 }
2361 
2362 static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
2363 				      struct bkey *from, btree_map_keys_fn *fn,
2364 				      int flags)
2365 {
2366 	int ret = MAP_CONTINUE;
2367 	struct bkey *k;
2368 	struct btree_iter iter;
2369 
2370 	bch_btree_iter_init(&b->keys, &iter, from);
2371 
2372 	while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
2373 		ret = !b->level
2374 			? fn(op, b, k)
2375 			: btree(map_keys_recurse, k, b, op, from, fn, flags);
2376 		from = NULL;
2377 
2378 		if (ret != MAP_CONTINUE)
2379 			return ret;
2380 	}
2381 
2382 	if (!b->level && (flags & MAP_END_KEY))
2383 		ret = fn(op, b, &KEY(KEY_INODE(&b->key),
2384 				     KEY_OFFSET(&b->key), 0));
2385 
2386 	return ret;
2387 }
2388 
2389 int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
2390 		       struct bkey *from, btree_map_keys_fn *fn, int flags)
2391 {
2392 	return btree_root(map_keys_recurse, c, op, from, fn, flags);
2393 }
2394 
2395 /* Keybuf code */
2396 
2397 static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
2398 {
2399 	/* Overlapping keys compare equal */
2400 	if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0)
2401 		return -1;
2402 	if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0)
2403 		return 1;
2404 	return 0;
2405 }
2406 
2407 static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
2408 					    struct keybuf_key *r)
2409 {
2410 	return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
2411 }
2412 
2413 struct refill {
2414 	struct btree_op	op;
2415 	unsigned	nr_found;
2416 	struct keybuf	*buf;
2417 	struct bkey	*end;
2418 	keybuf_pred_fn	*pred;
2419 };
2420 
2421 static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
2422 			    struct bkey *k)
2423 {
2424 	struct refill *refill = container_of(op, struct refill, op);
2425 	struct keybuf *buf = refill->buf;
2426 	int ret = MAP_CONTINUE;
2427 
2428 	if (bkey_cmp(k, refill->end) >= 0) {
2429 		ret = MAP_DONE;
2430 		goto out;
2431 	}
2432 
2433 	if (!KEY_SIZE(k)) /* end key */
2434 		goto out;
2435 
2436 	if (refill->pred(buf, k)) {
2437 		struct keybuf_key *w;
2438 
2439 		spin_lock(&buf->lock);
2440 
2441 		w = array_alloc(&buf->freelist);
2442 		if (!w) {
2443 			spin_unlock(&buf->lock);
2444 			return MAP_DONE;
2445 		}
2446 
2447 		w->private = NULL;
2448 		bkey_copy(&w->key, k);
2449 
2450 		if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
2451 			array_free(&buf->freelist, w);
2452 		else
2453 			refill->nr_found++;
2454 
2455 		if (array_freelist_empty(&buf->freelist))
2456 			ret = MAP_DONE;
2457 
2458 		spin_unlock(&buf->lock);
2459 	}
2460 out:
2461 	buf->last_scanned = *k;
2462 	return ret;
2463 }
2464 
2465 void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
2466 		       struct bkey *end, keybuf_pred_fn *pred)
2467 {
2468 	struct bkey start = buf->last_scanned;
2469 	struct refill refill;
2470 
2471 	cond_resched();
2472 
2473 	bch_btree_op_init(&refill.op, -1);
2474 	refill.nr_found	= 0;
2475 	refill.buf	= buf;
2476 	refill.end	= end;
2477 	refill.pred	= pred;
2478 
2479 	bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
2480 			   refill_keybuf_fn, MAP_END_KEY);
2481 
2482 	trace_bcache_keyscan(refill.nr_found,
2483 			     KEY_INODE(&start), KEY_OFFSET(&start),
2484 			     KEY_INODE(&buf->last_scanned),
2485 			     KEY_OFFSET(&buf->last_scanned));
2486 
2487 	spin_lock(&buf->lock);
2488 
2489 	if (!RB_EMPTY_ROOT(&buf->keys)) {
2490 		struct keybuf_key *w;
2491 		w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2492 		buf->start	= START_KEY(&w->key);
2493 
2494 		w = RB_LAST(&buf->keys, struct keybuf_key, node);
2495 		buf->end	= w->key;
2496 	} else {
2497 		buf->start	= MAX_KEY;
2498 		buf->end	= MAX_KEY;
2499 	}
2500 
2501 	spin_unlock(&buf->lock);
2502 }
2503 
2504 static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2505 {
2506 	rb_erase(&w->node, &buf->keys);
2507 	array_free(&buf->freelist, w);
2508 }
2509 
2510 void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2511 {
2512 	spin_lock(&buf->lock);
2513 	__bch_keybuf_del(buf, w);
2514 	spin_unlock(&buf->lock);
2515 }
2516 
2517 bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
2518 				  struct bkey *end)
2519 {
2520 	bool ret = false;
2521 	struct keybuf_key *p, *w, s;
2522 	s.key = *start;
2523 
2524 	if (bkey_cmp(end, &buf->start) <= 0 ||
2525 	    bkey_cmp(start, &buf->end) >= 0)
2526 		return false;
2527 
2528 	spin_lock(&buf->lock);
2529 	w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
2530 
2531 	while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) {
2532 		p = w;
2533 		w = RB_NEXT(w, node);
2534 
2535 		if (p->private)
2536 			ret = true;
2537 		else
2538 			__bch_keybuf_del(buf, p);
2539 	}
2540 
2541 	spin_unlock(&buf->lock);
2542 	return ret;
2543 }
2544 
2545 struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
2546 {
2547 	struct keybuf_key *w;
2548 	spin_lock(&buf->lock);
2549 
2550 	w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2551 
2552 	while (w && w->private)
2553 		w = RB_NEXT(w, node);
2554 
2555 	if (w)
2556 		w->private = ERR_PTR(-EINTR);
2557 
2558 	spin_unlock(&buf->lock);
2559 	return w;
2560 }
2561 
2562 struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
2563 					  struct keybuf *buf,
2564 					  struct bkey *end,
2565 					  keybuf_pred_fn *pred)
2566 {
2567 	struct keybuf_key *ret;
2568 
2569 	while (1) {
2570 		ret = bch_keybuf_next(buf);
2571 		if (ret)
2572 			break;
2573 
2574 		if (bkey_cmp(&buf->last_scanned, end) >= 0) {
2575 			pr_debug("scan finished");
2576 			break;
2577 		}
2578 
2579 		bch_refill_keybuf(c, buf, end, pred);
2580 	}
2581 
2582 	return ret;
2583 }
2584 
2585 void bch_keybuf_init(struct keybuf *buf)
2586 {
2587 	buf->last_scanned	= MAX_KEY;
2588 	buf->keys		= RB_ROOT;
2589 
2590 	spin_lock_init(&buf->lock);
2591 	array_allocator_init(&buf->freelist);
2592 }
2593