xref: /linux/drivers/md/bcache/btree.c (revision c06944560a562828d507166b4f87c01c367cc9c1)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4  *
5  * Uses a block device as cache for other block devices; optimized for SSDs.
6  * All allocation is done in buckets, which should match the erase block size
7  * of the device.
8  *
9  * Buckets containing cached data are kept on a heap sorted by priority;
10  * bucket priority is increased on cache hit, and periodically all the buckets
11  * on the heap have their priority scaled down. This currently is just used as
12  * an LRU but in the future should allow for more intelligent heuristics.
13  *
14  * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
15  * counter. Garbage collection is used to remove stale pointers.
16  *
17  * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
18  * as keys are inserted we only sort the pages that have not yet been written.
19  * When garbage collection is run, we resort the entire node.
20  *
21  * All configuration is done via sysfs; see Documentation/admin-guide/bcache.rst.
22  */
23 
24 #include "bcache.h"
25 #include "btree.h"
26 #include "debug.h"
27 #include "extents.h"
28 
29 #include <linux/slab.h>
30 #include <linux/bitops.h>
31 #include <linux/hash.h>
32 #include <linux/kthread.h>
33 #include <linux/prefetch.h>
34 #include <linux/random.h>
35 #include <linux/rcupdate.h>
36 #include <linux/sched/clock.h>
37 #include <linux/rculist.h>
38 #include <linux/delay.h>
39 #include <linux/sort.h>
40 #include <trace/events/bcache.h>
41 
42 /*
43  * Todo:
44  * register_bcache: Return errors out to userspace correctly
45  *
46  * Writeback: don't undirty key until after a cache flush
47  *
48  * Create an iterator for key pointers
49  *
50  * On btree write error, mark bucket such that it won't be freed from the cache
51  *
52  * Journalling:
53  *   Check for bad keys in replay
54  *   Propagate barriers
55  *   Refcount journal entries in journal_replay
56  *
57  * Garbage collection:
58  *   Finish incremental gc
59  *   Gc should free old UUIDs, data for invalid UUIDs
60  *
61  * Provide a way to list backing device UUIDs we have data cached for, and
62  * probably how long it's been since we've seen them, and a way to invalidate
63  * dirty data for devices that will never be attached again
64  *
65  * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so
66  * that based on that and how much dirty data we have we can keep writeback
67  * from being starved
68  *
69  * Add a tracepoint or somesuch to watch for writeback starvation
70  *
71  * When btree depth > 1 and splitting an interior node, we have to make sure
72  * alloc_bucket() cannot fail. This should be true but is not completely
73  * obvious.
74  *
75  * Plugging?
76  *
77  * If data write is less than hard sector size of ssd, round up offset in open
78  * bucket to the next whole sector
79  *
80  * Superblock needs to be fleshed out for multiple cache devices
81  *
82  * Add a sysfs tunable for the number of writeback IOs in flight
83  *
84  * Add a sysfs tunable for the number of open data buckets
85  *
86  * IO tracking: Can we track when one process is doing io on behalf of another?
87  * IO tracking: Don't use just an average, weigh more recent stuff higher
88  *
89  * Test module load/unload
90  */
91 
92 #define MAX_GC_TIMES		100
93 #define MIN_GC_NODES		100
94 #define GC_SLEEP_MS		100
95 
96 #define PTR_DIRTY_BIT		(((uint64_t) 1 << 36))
97 
98 #define PTR_HASH(c, k)							\
99 	(((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
100 
101 static struct workqueue_struct *btree_io_wq;
102 
103 #define insert_lock(s, b)	((b)->level <= (s)->lock)
104 
105 
write_block(struct btree * b)106 static inline struct bset *write_block(struct btree *b)
107 {
108 	return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c->cache);
109 }
110 
bch_btree_init_next(struct btree * b)111 static void bch_btree_init_next(struct btree *b)
112 {
113 	/* If not a leaf node, always sort */
114 	if (b->level && b->keys.nsets)
115 		bch_btree_sort(&b->keys, &b->c->sort);
116 	else
117 		bch_btree_sort_lazy(&b->keys, &b->c->sort);
118 
119 	if (b->written < btree_blocks(b))
120 		bch_bset_init_next(&b->keys, write_block(b),
121 				   bset_magic(&b->c->cache->sb));
122 
123 }
124 
125 /* Btree key manipulation */
126 
bkey_put(struct cache_set * c,struct bkey * k)127 void bkey_put(struct cache_set *c, struct bkey *k)
128 {
129 	unsigned int i;
130 
131 	for (i = 0; i < KEY_PTRS(k); i++)
132 		if (ptr_available(c, k, i))
133 			atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
134 }
135 
136 /* Btree IO */
137 
btree_csum_set(struct btree * b,struct bset * i)138 static uint64_t btree_csum_set(struct btree *b, struct bset *i)
139 {
140 	uint64_t crc = b->key.ptr[0];
141 	void *data = (void *) i + 8, *end = bset_bkey_last(i);
142 
143 	crc = crc64_be(crc, data, end - data);
144 	return crc ^ 0xffffffffffffffffULL;
145 }
146 
bch_btree_node_read_done(struct btree * b)147 void bch_btree_node_read_done(struct btree *b)
148 {
149 	const char *err = "bad btree header";
150 	struct bset *i = btree_bset_first(b);
151 	struct btree_iter *iter;
152 
153 	/*
154 	 * c->fill_iter can allocate an iterator with more memory space
155 	 * than static MAX_BSETS.
156 	 * See the comment arount cache_set->fill_iter.
157 	 */
158 	iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
159 	iter->size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size;
160 	iter->used = 0;
161 
162 #ifdef CONFIG_BCACHE_DEBUG
163 	iter->b = &b->keys;
164 #endif
165 
166 	if (!i->seq)
167 		goto err;
168 
169 	for (;
170 	     b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq;
171 	     i = write_block(b)) {
172 		err = "unsupported bset version";
173 		if (i->version > BCACHE_BSET_VERSION)
174 			goto err;
175 
176 		err = "bad btree header";
177 		if (b->written + set_blocks(i, block_bytes(b->c->cache)) >
178 		    btree_blocks(b))
179 			goto err;
180 
181 		err = "bad magic";
182 		if (i->magic != bset_magic(&b->c->cache->sb))
183 			goto err;
184 
185 		err = "bad checksum";
186 		switch (i->version) {
187 		case 0:
188 			if (i->csum != csum_set(i))
189 				goto err;
190 			break;
191 		case BCACHE_BSET_VERSION:
192 			if (i->csum != btree_csum_set(b, i))
193 				goto err;
194 			break;
195 		}
196 
197 		err = "empty set";
198 		if (i != b->keys.set[0].data && !i->keys)
199 			goto err;
200 
201 		bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
202 
203 		b->written += set_blocks(i, block_bytes(b->c->cache));
204 	}
205 
206 	err = "corrupted btree";
207 	for (i = write_block(b);
208 	     bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
209 	     i = ((void *) i) + block_bytes(b->c->cache))
210 		if (i->seq == b->keys.set[0].data->seq)
211 			goto err;
212 
213 	bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
214 
215 	i = b->keys.set[0].data;
216 	err = "short btree key";
217 	if (b->keys.set[0].size &&
218 	    bkey_cmp(&b->key, &b->keys.set[0].end) < 0)
219 		goto err;
220 
221 	if (b->written < btree_blocks(b))
222 		bch_bset_init_next(&b->keys, write_block(b),
223 				   bset_magic(&b->c->cache->sb));
224 out:
225 	mempool_free(iter, &b->c->fill_iter);
226 	return;
227 err:
228 	set_btree_node_io_error(b);
229 	bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",
230 			    err, PTR_BUCKET_NR(b->c, &b->key, 0),
231 			    bset_block_offset(b, i), i->keys);
232 	goto out;
233 }
234 
btree_node_read_endio(struct bio * bio)235 static void btree_node_read_endio(struct bio *bio)
236 {
237 	struct closure *cl = bio->bi_private;
238 
239 	closure_put(cl);
240 }
241 
bch_btree_node_read(struct btree * b)242 static void bch_btree_node_read(struct btree *b)
243 {
244 	uint64_t start_time = local_clock();
245 	struct closure cl;
246 	struct bio *bio;
247 
248 	trace_bcache_btree_read(b);
249 
250 	closure_init_stack(&cl);
251 
252 	bio = bch_bbio_alloc(b->c);
253 	bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
254 	bio->bi_end_io	= btree_node_read_endio;
255 	bio->bi_private	= &cl;
256 	bio->bi_opf = REQ_OP_READ | REQ_META;
257 
258 	bch_bio_map(bio, b->keys.set[0].data);
259 
260 	bch_submit_bbio(bio, b->c, &b->key, 0);
261 	closure_sync(&cl);
262 
263 	if (bio->bi_status)
264 		set_btree_node_io_error(b);
265 
266 	bch_bbio_free(bio, b->c);
267 
268 	if (btree_node_io_error(b))
269 		goto err;
270 
271 	bch_btree_node_read_done(b);
272 	bch_time_stats_update(&b->c->btree_read_time, start_time);
273 
274 	return;
275 err:
276 	bch_cache_set_error(b->c, "io error reading bucket %zu",
277 			    PTR_BUCKET_NR(b->c, &b->key, 0));
278 }
279 
btree_complete_write(struct btree * b,struct btree_write * w)280 static void btree_complete_write(struct btree *b, struct btree_write *w)
281 {
282 	if (w->prio_blocked &&
283 	    !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
284 		wake_up_allocators(b->c);
285 
286 	if (w->journal) {
287 		atomic_dec_bug(w->journal);
288 		__closure_wake_up(&b->c->journal.wait);
289 	}
290 
291 	w->prio_blocked	= 0;
292 	w->journal	= NULL;
293 }
294 
CLOSURE_CALLBACK(btree_node_write_unlock)295 static CLOSURE_CALLBACK(btree_node_write_unlock)
296 {
297 	closure_type(b, struct btree, io);
298 
299 	up(&b->io_mutex);
300 }
301 
CLOSURE_CALLBACK(__btree_node_write_done)302 static CLOSURE_CALLBACK(__btree_node_write_done)
303 {
304 	closure_type(b, struct btree, io);
305 	struct btree_write *w = btree_prev_write(b);
306 
307 	bch_bbio_free(b->bio, b->c);
308 	b->bio = NULL;
309 	btree_complete_write(b, w);
310 
311 	if (btree_node_dirty(b))
312 		queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
313 
314 	closure_return_with_destructor(cl, btree_node_write_unlock);
315 }
316 
CLOSURE_CALLBACK(btree_node_write_done)317 static CLOSURE_CALLBACK(btree_node_write_done)
318 {
319 	closure_type(b, struct btree, io);
320 
321 	bio_free_pages(b->bio);
322 	__btree_node_write_done(&cl->work);
323 }
324 
btree_node_write_endio(struct bio * bio)325 static void btree_node_write_endio(struct bio *bio)
326 {
327 	struct closure *cl = bio->bi_private;
328 	struct btree *b = container_of(cl, struct btree, io);
329 
330 	if (bio->bi_status)
331 		set_btree_node_io_error(b);
332 
333 	bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree");
334 	closure_put(cl);
335 }
336 
do_btree_node_write(struct btree * b)337 static void do_btree_node_write(struct btree *b)
338 {
339 	struct closure *cl = &b->io;
340 	struct bset *i = btree_bset_last(b);
341 	BKEY_PADDED(key) k;
342 
343 	i->version	= BCACHE_BSET_VERSION;
344 	i->csum		= btree_csum_set(b, i);
345 
346 	BUG_ON(b->bio);
347 	b->bio = bch_bbio_alloc(b->c);
348 
349 	b->bio->bi_end_io	= btree_node_write_endio;
350 	b->bio->bi_private	= cl;
351 	b->bio->bi_iter.bi_size	= roundup(set_bytes(i), block_bytes(b->c->cache));
352 	b->bio->bi_opf		= REQ_OP_WRITE | REQ_META | REQ_FUA;
353 	bch_bio_map(b->bio, i);
354 
355 	/*
356 	 * If we're appending to a leaf node, we don't technically need FUA -
357 	 * this write just needs to be persisted before the next journal write,
358 	 * which will be marked FLUSH|FUA.
359 	 *
360 	 * Similarly if we're writing a new btree root - the pointer is going to
361 	 * be in the next journal entry.
362 	 *
363 	 * But if we're writing a new btree node (that isn't a root) or
364 	 * appending to a non leaf btree node, we need either FUA or a flush
365 	 * when we write the parent with the new pointer. FUA is cheaper than a
366 	 * flush, and writes appending to leaf nodes aren't blocking anything so
367 	 * just make all btree node writes FUA to keep things sane.
368 	 */
369 
370 	bkey_copy(&k.key, &b->key);
371 	SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
372 		       bset_sector_offset(&b->keys, i));
373 
374 	if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
375 		struct bio_vec *bv;
376 		void *addr = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
377 		struct bvec_iter_all iter_all;
378 
379 		bio_for_each_segment_all(bv, b->bio, iter_all) {
380 			memcpy(page_address(bv->bv_page), addr, PAGE_SIZE);
381 			addr += PAGE_SIZE;
382 		}
383 
384 		bch_submit_bbio(b->bio, b->c, &k.key, 0);
385 
386 		continue_at(cl, btree_node_write_done, NULL);
387 	} else {
388 		/*
389 		 * No problem for multipage bvec since the bio is
390 		 * just allocated
391 		 */
392 		b->bio->bi_vcnt = 0;
393 		bch_bio_map(b->bio, i);
394 
395 		bch_submit_bbio(b->bio, b->c, &k.key, 0);
396 
397 		closure_sync(cl);
398 		continue_at_nobarrier(cl, __btree_node_write_done, NULL);
399 	}
400 }
401 
__bch_btree_node_write(struct btree * b,struct closure * parent)402 void __bch_btree_node_write(struct btree *b, struct closure *parent)
403 {
404 	struct bset *i = btree_bset_last(b);
405 
406 	lockdep_assert_held(&b->write_lock);
407 
408 	trace_bcache_btree_write(b);
409 
410 	BUG_ON(current->bio_list);
411 	BUG_ON(b->written >= btree_blocks(b));
412 	BUG_ON(b->written && !i->keys);
413 	BUG_ON(btree_bset_first(b)->seq != i->seq);
414 	bch_check_keys(&b->keys, "writing");
415 
416 	cancel_delayed_work(&b->work);
417 
418 	/* If caller isn't waiting for write, parent refcount is cache set */
419 	down(&b->io_mutex);
420 	closure_init(&b->io, parent ?: &b->c->cl);
421 
422 	clear_bit(BTREE_NODE_dirty,	 &b->flags);
423 	change_bit(BTREE_NODE_write_idx, &b->flags);
424 
425 	do_btree_node_write(b);
426 
427 	atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->cache->sb.block_size,
428 			&b->c->cache->btree_sectors_written);
429 
430 	b->written += set_blocks(i, block_bytes(b->c->cache));
431 }
432 
bch_btree_node_write(struct btree * b,struct closure * parent)433 void bch_btree_node_write(struct btree *b, struct closure *parent)
434 {
435 	unsigned int nsets = b->keys.nsets;
436 
437 	lockdep_assert_held(&b->lock);
438 
439 	__bch_btree_node_write(b, parent);
440 
441 	/*
442 	 * do verify if there was more than one set initially (i.e. we did a
443 	 * sort) and we sorted down to a single set:
444 	 */
445 	if (nsets && !b->keys.nsets)
446 		bch_btree_verify(b);
447 
448 	bch_btree_init_next(b);
449 }
450 
bch_btree_node_write_sync(struct btree * b)451 static void bch_btree_node_write_sync(struct btree *b)
452 {
453 	struct closure cl;
454 
455 	closure_init_stack(&cl);
456 
457 	mutex_lock(&b->write_lock);
458 	bch_btree_node_write(b, &cl);
459 	mutex_unlock(&b->write_lock);
460 
461 	closure_sync(&cl);
462 }
463 
btree_node_write_work(struct work_struct * w)464 static void btree_node_write_work(struct work_struct *w)
465 {
466 	struct btree *b = container_of(to_delayed_work(w), struct btree, work);
467 
468 	mutex_lock(&b->write_lock);
469 	if (btree_node_dirty(b))
470 		__bch_btree_node_write(b, NULL);
471 	mutex_unlock(&b->write_lock);
472 }
473 
bch_btree_leaf_dirty(struct btree * b,atomic_t * journal_ref)474 static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
475 {
476 	struct bset *i = btree_bset_last(b);
477 	struct btree_write *w = btree_current_write(b);
478 
479 	lockdep_assert_held(&b->write_lock);
480 
481 	BUG_ON(!b->written);
482 	BUG_ON(!i->keys);
483 
484 	if (!btree_node_dirty(b))
485 		queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
486 
487 	set_btree_node_dirty(b);
488 
489 	/*
490 	 * w->journal is always the oldest journal pin of all bkeys
491 	 * in the leaf node, to make sure the oldest jset seq won't
492 	 * be increased before this btree node is flushed.
493 	 */
494 	if (journal_ref) {
495 		if (w->journal &&
496 		    journal_pin_cmp(b->c, w->journal, journal_ref)) {
497 			atomic_dec_bug(w->journal);
498 			w->journal = NULL;
499 		}
500 
501 		if (!w->journal) {
502 			w->journal = journal_ref;
503 			atomic_inc(w->journal);
504 		}
505 	}
506 
507 	/* Force write if set is too big */
508 	if (set_bytes(i) > PAGE_SIZE - 48 &&
509 	    !current->bio_list)
510 		bch_btree_node_write(b, NULL);
511 }
512 
513 /*
514  * Btree in memory cache - allocation/freeing
515  * mca -> memory cache
516  */
517 
518 #define mca_reserve(c)	(((!IS_ERR_OR_NULL(c->root) && c->root->level) \
519 			  ? c->root->level : 1) * 8 + 16)
520 #define mca_can_free(c)						\
521 	max_t(int, 0, c->btree_cache_used - mca_reserve(c))
522 
mca_data_free(struct btree * b)523 static void mca_data_free(struct btree *b)
524 {
525 	BUG_ON(b->io_mutex.count != 1);
526 
527 	bch_btree_keys_free(&b->keys);
528 
529 	b->c->btree_cache_used--;
530 	list_move(&b->list, &b->c->btree_cache_freed);
531 }
532 
mca_bucket_free(struct btree * b)533 static void mca_bucket_free(struct btree *b)
534 {
535 	BUG_ON(btree_node_dirty(b));
536 
537 	b->key.ptr[0] = 0;
538 	hlist_del_init_rcu(&b->hash);
539 	list_move(&b->list, &b->c->btree_cache_freeable);
540 }
541 
btree_order(struct bkey * k)542 static unsigned int btree_order(struct bkey *k)
543 {
544 	return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
545 }
546 
mca_data_alloc(struct btree * b,struct bkey * k,gfp_t gfp)547 static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
548 {
549 	if (!bch_btree_keys_alloc(&b->keys,
550 				  max_t(unsigned int,
551 					ilog2(b->c->btree_pages),
552 					btree_order(k)),
553 				  gfp)) {
554 		b->c->btree_cache_used++;
555 		list_move(&b->list, &b->c->btree_cache);
556 	} else {
557 		list_move(&b->list, &b->c->btree_cache_freed);
558 	}
559 }
560 
561 #ifdef CONFIG_PROVE_LOCKING
btree_lock_cmp_fn(const struct lockdep_map * _a,const struct lockdep_map * _b)562 static int btree_lock_cmp_fn(const struct lockdep_map *_a,
563 			     const struct lockdep_map *_b)
564 {
565 	const struct btree *a = container_of(_a, struct btree, lock.dep_map);
566 	const struct btree *b = container_of(_b, struct btree, lock.dep_map);
567 
568 	return -cmp_int(a->level, b->level) ?: bkey_cmp(&a->key, &b->key);
569 }
570 
btree_lock_print_fn(const struct lockdep_map * map)571 static void btree_lock_print_fn(const struct lockdep_map *map)
572 {
573 	const struct btree *b = container_of(map, struct btree, lock.dep_map);
574 
575 	printk(KERN_CONT " l=%u %llu:%llu", b->level,
576 	       KEY_INODE(&b->key), KEY_OFFSET(&b->key));
577 }
578 #endif
579 
mca_bucket_alloc(struct cache_set * c,struct bkey * k,gfp_t gfp)580 static struct btree *mca_bucket_alloc(struct cache_set *c,
581 				      struct bkey *k, gfp_t gfp)
582 {
583 	/*
584 	 * kzalloc() is necessary here for initialization,
585 	 * see code comments in bch_btree_keys_init().
586 	 */
587 	struct btree *b = kzalloc(sizeof(struct btree), gfp);
588 
589 	if (!b)
590 		return NULL;
591 
592 	init_rwsem(&b->lock);
593 	lock_set_cmp_fn(&b->lock, btree_lock_cmp_fn, btree_lock_print_fn);
594 	mutex_init(&b->write_lock);
595 	lockdep_set_novalidate_class(&b->write_lock);
596 	INIT_LIST_HEAD(&b->list);
597 	INIT_DELAYED_WORK(&b->work, btree_node_write_work);
598 	b->c = c;
599 	sema_init(&b->io_mutex, 1);
600 
601 	mca_data_alloc(b, k, gfp);
602 	return b;
603 }
604 
mca_reap(struct btree * b,unsigned int min_order,bool flush)605 static int mca_reap(struct btree *b, unsigned int min_order, bool flush)
606 {
607 	struct closure cl;
608 
609 	closure_init_stack(&cl);
610 	lockdep_assert_held(&b->c->bucket_lock);
611 
612 	if (!down_write_trylock(&b->lock))
613 		return -ENOMEM;
614 
615 	BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data);
616 
617 	if (b->keys.page_order < min_order)
618 		goto out_unlock;
619 
620 	if (!flush) {
621 		if (btree_node_dirty(b))
622 			goto out_unlock;
623 
624 		if (down_trylock(&b->io_mutex))
625 			goto out_unlock;
626 		up(&b->io_mutex);
627 	}
628 
629 retry:
630 	/*
631 	 * BTREE_NODE_dirty might be cleared in btree_flush_btree() by
632 	 * __bch_btree_node_write(). To avoid an extra flush, acquire
633 	 * b->write_lock before checking BTREE_NODE_dirty bit.
634 	 */
635 	mutex_lock(&b->write_lock);
636 	/*
637 	 * If this btree node is selected in btree_flush_write() by journal
638 	 * code, delay and retry until the node is flushed by journal code
639 	 * and BTREE_NODE_journal_flush bit cleared by btree_flush_write().
640 	 */
641 	if (btree_node_journal_flush(b)) {
642 		pr_debug("bnode %p is flushing by journal, retry\n", b);
643 		mutex_unlock(&b->write_lock);
644 		udelay(1);
645 		goto retry;
646 	}
647 
648 	if (btree_node_dirty(b))
649 		__bch_btree_node_write(b, &cl);
650 	mutex_unlock(&b->write_lock);
651 
652 	closure_sync(&cl);
653 
654 	/* wait for any in flight btree write */
655 	down(&b->io_mutex);
656 	up(&b->io_mutex);
657 
658 	return 0;
659 out_unlock:
660 	rw_unlock(true, b);
661 	return -ENOMEM;
662 }
663 
bch_mca_scan(struct shrinker * shrink,struct shrink_control * sc)664 static unsigned long bch_mca_scan(struct shrinker *shrink,
665 				  struct shrink_control *sc)
666 {
667 	struct cache_set *c = shrink->private_data;
668 	struct btree *b, *t;
669 	unsigned long i, nr = sc->nr_to_scan;
670 	unsigned long freed = 0;
671 	unsigned int btree_cache_used;
672 
673 	if (c->shrinker_disabled)
674 		return SHRINK_STOP;
675 
676 	if (c->btree_cache_alloc_lock)
677 		return SHRINK_STOP;
678 
679 	/* Return -1 if we can't do anything right now */
680 	if (sc->gfp_mask & __GFP_IO)
681 		mutex_lock(&c->bucket_lock);
682 	else if (!mutex_trylock(&c->bucket_lock))
683 		return -1;
684 
685 	/*
686 	 * It's _really_ critical that we don't free too many btree nodes - we
687 	 * have to always leave ourselves a reserve. The reserve is how we
688 	 * guarantee that allocating memory for a new btree node can always
689 	 * succeed, so that inserting keys into the btree can always succeed and
690 	 * IO can always make forward progress:
691 	 */
692 	nr /= c->btree_pages;
693 	if (nr == 0)
694 		nr = 1;
695 	nr = min_t(unsigned long, nr, mca_can_free(c));
696 
697 	i = 0;
698 	btree_cache_used = c->btree_cache_used;
699 	list_for_each_entry_safe_reverse(b, t, &c->btree_cache_freeable, list) {
700 		if (nr <= 0)
701 			goto out;
702 
703 		if (!mca_reap(b, 0, false)) {
704 			mca_data_free(b);
705 			rw_unlock(true, b);
706 			freed++;
707 		}
708 		nr--;
709 		i++;
710 	}
711 
712 	list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) {
713 		if (nr <= 0 || i >= btree_cache_used)
714 			goto out;
715 
716 		if (!mca_reap(b, 0, false)) {
717 			mca_bucket_free(b);
718 			mca_data_free(b);
719 			rw_unlock(true, b);
720 			freed++;
721 		}
722 
723 		nr--;
724 		i++;
725 	}
726 out:
727 	mutex_unlock(&c->bucket_lock);
728 	return freed * c->btree_pages;
729 }
730 
bch_mca_count(struct shrinker * shrink,struct shrink_control * sc)731 static unsigned long bch_mca_count(struct shrinker *shrink,
732 				   struct shrink_control *sc)
733 {
734 	struct cache_set *c = shrink->private_data;
735 
736 	if (c->shrinker_disabled)
737 		return 0;
738 
739 	if (c->btree_cache_alloc_lock)
740 		return 0;
741 
742 	return mca_can_free(c) * c->btree_pages;
743 }
744 
bch_btree_cache_free(struct cache_set * c)745 void bch_btree_cache_free(struct cache_set *c)
746 {
747 	struct btree *b;
748 	struct closure cl;
749 
750 	closure_init_stack(&cl);
751 
752 	if (c->shrink)
753 		shrinker_free(c->shrink);
754 
755 	mutex_lock(&c->bucket_lock);
756 
757 #ifdef CONFIG_BCACHE_DEBUG
758 	if (c->verify_data)
759 		list_move(&c->verify_data->list, &c->btree_cache);
760 
761 	free_pages((unsigned long) c->verify_ondisk, ilog2(meta_bucket_pages(&c->cache->sb)));
762 #endif
763 
764 	list_splice(&c->btree_cache_freeable,
765 		    &c->btree_cache);
766 
767 	while (!list_empty(&c->btree_cache)) {
768 		b = list_first_entry(&c->btree_cache, struct btree, list);
769 
770 		/*
771 		 * This function is called by cache_set_free(), no I/O
772 		 * request on cache now, it is unnecessary to acquire
773 		 * b->write_lock before clearing BTREE_NODE_dirty anymore.
774 		 */
775 		if (btree_node_dirty(b)) {
776 			btree_complete_write(b, btree_current_write(b));
777 			clear_bit(BTREE_NODE_dirty, &b->flags);
778 		}
779 		mca_data_free(b);
780 	}
781 
782 	while (!list_empty(&c->btree_cache_freed)) {
783 		b = list_first_entry(&c->btree_cache_freed,
784 				     struct btree, list);
785 		list_del(&b->list);
786 		cancel_delayed_work_sync(&b->work);
787 		kfree(b);
788 	}
789 
790 	mutex_unlock(&c->bucket_lock);
791 }
792 
bch_btree_cache_alloc(struct cache_set * c)793 int bch_btree_cache_alloc(struct cache_set *c)
794 {
795 	unsigned int i;
796 
797 	for (i = 0; i < mca_reserve(c); i++)
798 		if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
799 			return -ENOMEM;
800 
801 	list_splice_init(&c->btree_cache,
802 			 &c->btree_cache_freeable);
803 
804 #ifdef CONFIG_BCACHE_DEBUG
805 	mutex_init(&c->verify_lock);
806 
807 	c->verify_ondisk = (void *)
808 		__get_free_pages(GFP_KERNEL|__GFP_COMP,
809 				 ilog2(meta_bucket_pages(&c->cache->sb)));
810 	if (!c->verify_ondisk) {
811 		/*
812 		 * Don't worry about the mca_rereserve buckets
813 		 * allocated in previous for-loop, they will be
814 		 * handled properly in bch_cache_set_unregister().
815 		 */
816 		return -ENOMEM;
817 	}
818 
819 	c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
820 
821 	if (c->verify_data &&
822 	    c->verify_data->keys.set->data)
823 		list_del_init(&c->verify_data->list);
824 	else
825 		c->verify_data = NULL;
826 #endif
827 
828 	c->shrink = shrinker_alloc(0, "md-bcache:%pU", c->set_uuid);
829 	if (!c->shrink) {
830 		pr_warn("bcache: %s: could not allocate shrinker\n", __func__);
831 		return 0;
832 	}
833 
834 	c->shrink->count_objects = bch_mca_count;
835 	c->shrink->scan_objects = bch_mca_scan;
836 	c->shrink->seeks = 4;
837 	c->shrink->batch = c->btree_pages * 2;
838 	c->shrink->private_data = c;
839 
840 	shrinker_register(c->shrink);
841 
842 	return 0;
843 }
844 
845 /* Btree in memory cache - hash table */
846 
mca_hash(struct cache_set * c,struct bkey * k)847 static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k)
848 {
849 	return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)];
850 }
851 
mca_find(struct cache_set * c,struct bkey * k)852 static struct btree *mca_find(struct cache_set *c, struct bkey *k)
853 {
854 	struct btree *b;
855 
856 	rcu_read_lock();
857 	hlist_for_each_entry_rcu(b, mca_hash(c, k), hash)
858 		if (PTR_HASH(c, &b->key) == PTR_HASH(c, k))
859 			goto out;
860 	b = NULL;
861 out:
862 	rcu_read_unlock();
863 	return b;
864 }
865 
mca_cannibalize_lock(struct cache_set * c,struct btree_op * op)866 static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
867 {
868 	spin_lock(&c->btree_cannibalize_lock);
869 	if (likely(c->btree_cache_alloc_lock == NULL)) {
870 		c->btree_cache_alloc_lock = current;
871 	} else if (c->btree_cache_alloc_lock != current) {
872 		if (op)
873 			prepare_to_wait(&c->btree_cache_wait, &op->wait,
874 					TASK_UNINTERRUPTIBLE);
875 		spin_unlock(&c->btree_cannibalize_lock);
876 		return -EINTR;
877 	}
878 	spin_unlock(&c->btree_cannibalize_lock);
879 
880 	return 0;
881 }
882 
mca_cannibalize(struct cache_set * c,struct btree_op * op,struct bkey * k)883 static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
884 				     struct bkey *k)
885 {
886 	struct btree *b;
887 
888 	trace_bcache_btree_cache_cannibalize(c);
889 
890 	if (mca_cannibalize_lock(c, op))
891 		return ERR_PTR(-EINTR);
892 
893 	list_for_each_entry_reverse(b, &c->btree_cache, list)
894 		if (!mca_reap(b, btree_order(k), false))
895 			return b;
896 
897 	list_for_each_entry_reverse(b, &c->btree_cache, list)
898 		if (!mca_reap(b, btree_order(k), true))
899 			return b;
900 
901 	WARN(1, "btree cache cannibalize failed\n");
902 	return ERR_PTR(-ENOMEM);
903 }
904 
905 /*
906  * We can only have one thread cannibalizing other cached btree nodes at a time,
907  * or we'll deadlock. We use an open coded mutex to ensure that, which a
908  * cannibalize_bucket() will take. This means every time we unlock the root of
909  * the btree, we need to release this lock if we have it held.
910  */
bch_cannibalize_unlock(struct cache_set * c)911 void bch_cannibalize_unlock(struct cache_set *c)
912 {
913 	spin_lock(&c->btree_cannibalize_lock);
914 	if (c->btree_cache_alloc_lock == current) {
915 		c->btree_cache_alloc_lock = NULL;
916 		wake_up(&c->btree_cache_wait);
917 	}
918 	spin_unlock(&c->btree_cannibalize_lock);
919 }
920 
mca_alloc(struct cache_set * c,struct btree_op * op,struct bkey * k,int level)921 static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
922 			       struct bkey *k, int level)
923 {
924 	struct btree *b;
925 
926 	BUG_ON(current->bio_list);
927 
928 	lockdep_assert_held(&c->bucket_lock);
929 
930 	if (mca_find(c, k))
931 		return NULL;
932 
933 	/* btree_free() doesn't free memory; it sticks the node on the end of
934 	 * the list. Check if there's any freed nodes there:
935 	 */
936 	list_for_each_entry(b, &c->btree_cache_freeable, list)
937 		if (!mca_reap(b, btree_order(k), false))
938 			goto out;
939 
940 	/* We never free struct btree itself, just the memory that holds the on
941 	 * disk node. Check the freed list before allocating a new one:
942 	 */
943 	list_for_each_entry(b, &c->btree_cache_freed, list)
944 		if (!mca_reap(b, 0, false)) {
945 			mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
946 			if (!b->keys.set[0].data)
947 				goto err;
948 			else
949 				goto out;
950 		}
951 
952 	b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO);
953 	if (!b)
954 		goto err;
955 
956 	BUG_ON(!down_write_trylock(&b->lock));
957 	if (!b->keys.set->data)
958 		goto err;
959 out:
960 	BUG_ON(b->io_mutex.count != 1);
961 
962 	bkey_copy(&b->key, k);
963 	list_move(&b->list, &c->btree_cache);
964 	hlist_del_init_rcu(&b->hash);
965 	hlist_add_head_rcu(&b->hash, mca_hash(c, k));
966 
967 	lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
968 	b->parent	= (void *) ~0UL;
969 	b->flags	= 0;
970 	b->written	= 0;
971 	b->level	= level;
972 
973 	if (!b->level)
974 		bch_btree_keys_init(&b->keys, &bch_extent_keys_ops,
975 				    &b->c->expensive_debug_checks);
976 	else
977 		bch_btree_keys_init(&b->keys, &bch_btree_keys_ops,
978 				    &b->c->expensive_debug_checks);
979 
980 	return b;
981 err:
982 	if (b)
983 		rw_unlock(true, b);
984 
985 	b = mca_cannibalize(c, op, k);
986 	if (!IS_ERR(b))
987 		goto out;
988 
989 	return b;
990 }
991 
992 /*
993  * bch_btree_node_get - find a btree node in the cache and lock it, reading it
994  * in from disk if necessary.
995  *
996  * If IO is necessary and running under submit_bio_noacct, returns -EAGAIN.
997  *
998  * The btree node will have either a read or a write lock held, depending on
999  * level and op->lock.
1000  *
1001  * Note: Only error code or btree pointer will be returned, it is unncessary
1002  *       for callers to check NULL pointer.
1003  */
bch_btree_node_get(struct cache_set * c,struct btree_op * op,struct bkey * k,int level,bool write,struct btree * parent)1004 struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
1005 				 struct bkey *k, int level, bool write,
1006 				 struct btree *parent)
1007 {
1008 	int i = 0;
1009 	struct btree *b;
1010 
1011 	BUG_ON(level < 0);
1012 retry:
1013 	b = mca_find(c, k);
1014 
1015 	if (!b) {
1016 		if (current->bio_list)
1017 			return ERR_PTR(-EAGAIN);
1018 
1019 		mutex_lock(&c->bucket_lock);
1020 		b = mca_alloc(c, op, k, level);
1021 		mutex_unlock(&c->bucket_lock);
1022 
1023 		if (!b)
1024 			goto retry;
1025 		if (IS_ERR(b))
1026 			return b;
1027 
1028 		bch_btree_node_read(b);
1029 
1030 		if (!write)
1031 			downgrade_write(&b->lock);
1032 	} else {
1033 		rw_lock(write, b, level);
1034 		if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) {
1035 			rw_unlock(write, b);
1036 			goto retry;
1037 		}
1038 		BUG_ON(b->level != level);
1039 	}
1040 
1041 	if (btree_node_io_error(b)) {
1042 		rw_unlock(write, b);
1043 		return ERR_PTR(-EIO);
1044 	}
1045 
1046 	BUG_ON(!b->written);
1047 
1048 	b->parent = parent;
1049 
1050 	for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
1051 		prefetch(b->keys.set[i].tree);
1052 		prefetch(b->keys.set[i].data);
1053 	}
1054 
1055 	for (; i <= b->keys.nsets; i++)
1056 		prefetch(b->keys.set[i].data);
1057 
1058 	return b;
1059 }
1060 
btree_node_prefetch(struct btree * parent,struct bkey * k)1061 static void btree_node_prefetch(struct btree *parent, struct bkey *k)
1062 {
1063 	struct btree *b;
1064 
1065 	mutex_lock(&parent->c->bucket_lock);
1066 	b = mca_alloc(parent->c, NULL, k, parent->level - 1);
1067 	mutex_unlock(&parent->c->bucket_lock);
1068 
1069 	if (!IS_ERR_OR_NULL(b)) {
1070 		b->parent = parent;
1071 		bch_btree_node_read(b);
1072 		rw_unlock(true, b);
1073 	}
1074 }
1075 
1076 /* Btree alloc */
1077 
btree_node_free(struct btree * b)1078 static void btree_node_free(struct btree *b)
1079 {
1080 	trace_bcache_btree_node_free(b);
1081 
1082 	BUG_ON(b == b->c->root);
1083 
1084 retry:
1085 	mutex_lock(&b->write_lock);
1086 	/*
1087 	 * If the btree node is selected and flushing in btree_flush_write(),
1088 	 * delay and retry until the BTREE_NODE_journal_flush bit cleared,
1089 	 * then it is safe to free the btree node here. Otherwise this btree
1090 	 * node will be in race condition.
1091 	 */
1092 	if (btree_node_journal_flush(b)) {
1093 		mutex_unlock(&b->write_lock);
1094 		pr_debug("bnode %p journal_flush set, retry\n", b);
1095 		udelay(1);
1096 		goto retry;
1097 	}
1098 
1099 	if (btree_node_dirty(b)) {
1100 		btree_complete_write(b, btree_current_write(b));
1101 		clear_bit(BTREE_NODE_dirty, &b->flags);
1102 	}
1103 
1104 	mutex_unlock(&b->write_lock);
1105 
1106 	cancel_delayed_work(&b->work);
1107 
1108 	mutex_lock(&b->c->bucket_lock);
1109 	bch_bucket_free(b->c, &b->key);
1110 	mca_bucket_free(b);
1111 	mutex_unlock(&b->c->bucket_lock);
1112 }
1113 
1114 /*
1115  * Only error code or btree pointer will be returned, it is unncessary for
1116  * callers to check NULL pointer.
1117  */
__bch_btree_node_alloc(struct cache_set * c,struct btree_op * op,int level,bool wait,struct btree * parent)1118 struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
1119 				     int level, bool wait,
1120 				     struct btree *parent)
1121 {
1122 	BKEY_PADDED(key) k;
1123 	struct btree *b;
1124 
1125 	mutex_lock(&c->bucket_lock);
1126 retry:
1127 	/* return ERR_PTR(-EAGAIN) when it fails */
1128 	b = ERR_PTR(-EAGAIN);
1129 	if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, wait))
1130 		goto err;
1131 
1132 	bkey_put(c, &k.key);
1133 	SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
1134 
1135 	b = mca_alloc(c, op, &k.key, level);
1136 	if (IS_ERR(b))
1137 		goto err_free;
1138 
1139 	if (!b) {
1140 		cache_bug(c,
1141 			"Tried to allocate bucket that was in btree cache");
1142 		goto retry;
1143 	}
1144 
1145 	b->parent = parent;
1146 	bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->cache->sb));
1147 
1148 	mutex_unlock(&c->bucket_lock);
1149 
1150 	trace_bcache_btree_node_alloc(b);
1151 	return b;
1152 err_free:
1153 	bch_bucket_free(c, &k.key);
1154 err:
1155 	mutex_unlock(&c->bucket_lock);
1156 
1157 	trace_bcache_btree_node_alloc_fail(c);
1158 	return b;
1159 }
1160 
bch_btree_node_alloc(struct cache_set * c,struct btree_op * op,int level,struct btree * parent)1161 static struct btree *bch_btree_node_alloc(struct cache_set *c,
1162 					  struct btree_op *op, int level,
1163 					  struct btree *parent)
1164 {
1165 	return __bch_btree_node_alloc(c, op, level, op != NULL, parent);
1166 }
1167 
btree_node_alloc_replacement(struct btree * b,struct btree_op * op)1168 static struct btree *btree_node_alloc_replacement(struct btree *b,
1169 						  struct btree_op *op)
1170 {
1171 	struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
1172 
1173 	if (!IS_ERR(n)) {
1174 		mutex_lock(&n->write_lock);
1175 		bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
1176 		bkey_copy_key(&n->key, &b->key);
1177 		mutex_unlock(&n->write_lock);
1178 	}
1179 
1180 	return n;
1181 }
1182 
make_btree_freeing_key(struct btree * b,struct bkey * k)1183 static void make_btree_freeing_key(struct btree *b, struct bkey *k)
1184 {
1185 	unsigned int i;
1186 
1187 	mutex_lock(&b->c->bucket_lock);
1188 
1189 	atomic_inc(&b->c->prio_blocked);
1190 
1191 	bkey_copy(k, &b->key);
1192 	bkey_copy_key(k, &ZERO_KEY);
1193 
1194 	for (i = 0; i < KEY_PTRS(k); i++)
1195 		SET_PTR_GEN(k, i,
1196 			    bch_inc_gen(b->c->cache,
1197 					PTR_BUCKET(b->c, &b->key, i)));
1198 
1199 	mutex_unlock(&b->c->bucket_lock);
1200 }
1201 
btree_check_reserve(struct btree * b,struct btree_op * op)1202 static int btree_check_reserve(struct btree *b, struct btree_op *op)
1203 {
1204 	struct cache_set *c = b->c;
1205 	struct cache *ca = c->cache;
1206 	unsigned int reserve = (c->root->level - b->level) * 2 + 1;
1207 
1208 	mutex_lock(&c->bucket_lock);
1209 
1210 	if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
1211 		if (op)
1212 			prepare_to_wait(&c->btree_cache_wait, &op->wait,
1213 					TASK_UNINTERRUPTIBLE);
1214 		mutex_unlock(&c->bucket_lock);
1215 		return -EINTR;
1216 	}
1217 
1218 	mutex_unlock(&c->bucket_lock);
1219 
1220 	return mca_cannibalize_lock(b->c, op);
1221 }
1222 
1223 /* Garbage collection */
1224 
__bch_btree_mark_key(struct cache_set * c,int level,struct bkey * k)1225 static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
1226 				    struct bkey *k)
1227 {
1228 	uint8_t stale = 0;
1229 	unsigned int i;
1230 	struct bucket *g;
1231 
1232 	/*
1233 	 * ptr_invalid() can't return true for the keys that mark btree nodes as
1234 	 * freed, but since ptr_bad() returns true we'll never actually use them
1235 	 * for anything and thus we don't want mark their pointers here
1236 	 */
1237 	if (!bkey_cmp(k, &ZERO_KEY))
1238 		return stale;
1239 
1240 	for (i = 0; i < KEY_PTRS(k); i++) {
1241 		if (!ptr_available(c, k, i))
1242 			continue;
1243 
1244 		g = PTR_BUCKET(c, k, i);
1245 
1246 		if (gen_after(g->last_gc, PTR_GEN(k, i)))
1247 			g->last_gc = PTR_GEN(k, i);
1248 
1249 		if (ptr_stale(c, k, i)) {
1250 			stale = max(stale, ptr_stale(c, k, i));
1251 			continue;
1252 		}
1253 
1254 		cache_bug_on(GC_MARK(g) &&
1255 			     (GC_MARK(g) == GC_MARK_METADATA) != (level != 0),
1256 			     c, "inconsistent ptrs: mark = %llu, level = %i",
1257 			     GC_MARK(g), level);
1258 
1259 		if (level)
1260 			SET_GC_MARK(g, GC_MARK_METADATA);
1261 		else if (KEY_DIRTY(k))
1262 			SET_GC_MARK(g, GC_MARK_DIRTY);
1263 		else if (!GC_MARK(g))
1264 			SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
1265 
1266 		/* guard against overflow */
1267 		SET_GC_SECTORS_USED(g, min_t(unsigned int,
1268 					     GC_SECTORS_USED(g) + KEY_SIZE(k),
1269 					     MAX_GC_SECTORS_USED));
1270 
1271 		BUG_ON(!GC_SECTORS_USED(g));
1272 	}
1273 
1274 	return stale;
1275 }
1276 
1277 #define btree_mark_key(b, k)	__bch_btree_mark_key(b->c, b->level, k)
1278 
bch_initial_mark_key(struct cache_set * c,int level,struct bkey * k)1279 void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
1280 {
1281 	unsigned int i;
1282 
1283 	for (i = 0; i < KEY_PTRS(k); i++)
1284 		if (ptr_available(c, k, i) &&
1285 		    !ptr_stale(c, k, i)) {
1286 			struct bucket *b = PTR_BUCKET(c, k, i);
1287 
1288 			b->gen = PTR_GEN(k, i);
1289 
1290 			if (level && bkey_cmp(k, &ZERO_KEY))
1291 				b->prio = BTREE_PRIO;
1292 			else if (!level && b->prio == BTREE_PRIO)
1293 				b->prio = INITIAL_PRIO;
1294 		}
1295 
1296 	__bch_btree_mark_key(c, level, k);
1297 }
1298 
bch_update_bucket_in_use(struct cache_set * c,struct gc_stat * stats)1299 void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats)
1300 {
1301 	stats->in_use = (c->nbuckets - c->avail_nbuckets) * 100 / c->nbuckets;
1302 }
1303 
btree_gc_mark_node(struct btree * b,struct gc_stat * gc)1304 static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
1305 {
1306 	uint8_t stale = 0;
1307 	unsigned int keys = 0, good_keys = 0;
1308 	struct bkey *k;
1309 	struct btree_iter_stack iter;
1310 	struct bset_tree *t;
1311 
1312 	gc->nodes++;
1313 
1314 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
1315 		stale = max(stale, btree_mark_key(b, k));
1316 		keys++;
1317 
1318 		if (bch_ptr_bad(&b->keys, k))
1319 			continue;
1320 
1321 		gc->key_bytes += bkey_u64s(k);
1322 		gc->nkeys++;
1323 		good_keys++;
1324 
1325 		gc->data += KEY_SIZE(k);
1326 	}
1327 
1328 	for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++)
1329 		btree_bug_on(t->size &&
1330 			     bset_written(&b->keys, t) &&
1331 			     bkey_cmp(&b->key, &t->end) < 0,
1332 			     b, "found short btree key in gc");
1333 
1334 	if (b->c->gc_always_rewrite)
1335 		return true;
1336 
1337 	if (stale > 10)
1338 		return true;
1339 
1340 	if ((keys - good_keys) * 2 > keys)
1341 		return true;
1342 
1343 	return false;
1344 }
1345 
1346 #define GC_MERGE_NODES	4U
1347 
1348 struct gc_merge_info {
1349 	struct btree	*b;
1350 	unsigned int	keys;
1351 };
1352 
1353 static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
1354 				 struct keylist *insert_keys,
1355 				 atomic_t *journal_ref,
1356 				 struct bkey *replace_key);
1357 
btree_gc_coalesce(struct btree * b,struct btree_op * op,struct gc_stat * gc,struct gc_merge_info * r)1358 static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
1359 			     struct gc_stat *gc, struct gc_merge_info *r)
1360 {
1361 	unsigned int i, nodes = 0, keys = 0, blocks;
1362 	struct btree *new_nodes[GC_MERGE_NODES];
1363 	struct keylist keylist;
1364 	struct closure cl;
1365 	struct bkey *k;
1366 
1367 	bch_keylist_init(&keylist);
1368 
1369 	if (btree_check_reserve(b, NULL))
1370 		return 0;
1371 
1372 	memset(new_nodes, 0, sizeof(new_nodes));
1373 	closure_init_stack(&cl);
1374 
1375 	while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
1376 		keys += r[nodes++].keys;
1377 
1378 	blocks = btree_default_blocks(b->c) * 2 / 3;
1379 
1380 	if (nodes < 2 ||
1381 	    __set_blocks(b->keys.set[0].data, keys,
1382 			 block_bytes(b->c->cache)) > blocks * (nodes - 1))
1383 		return 0;
1384 
1385 	for (i = 0; i < nodes; i++) {
1386 		new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL);
1387 		if (IS_ERR(new_nodes[i]))
1388 			goto out_nocoalesce;
1389 	}
1390 
1391 	/*
1392 	 * We have to check the reserve here, after we've allocated our new
1393 	 * nodes, to make sure the insert below will succeed - we also check
1394 	 * before as an optimization to potentially avoid a bunch of expensive
1395 	 * allocs/sorts
1396 	 */
1397 	if (btree_check_reserve(b, NULL))
1398 		goto out_nocoalesce;
1399 
1400 	for (i = 0; i < nodes; i++)
1401 		mutex_lock(&new_nodes[i]->write_lock);
1402 
1403 	for (i = nodes - 1; i > 0; --i) {
1404 		struct bset *n1 = btree_bset_first(new_nodes[i]);
1405 		struct bset *n2 = btree_bset_first(new_nodes[i - 1]);
1406 		struct bkey *k, *last = NULL;
1407 
1408 		keys = 0;
1409 
1410 		if (i > 1) {
1411 			for (k = n2->start;
1412 			     k < bset_bkey_last(n2);
1413 			     k = bkey_next(k)) {
1414 				if (__set_blocks(n1, n1->keys + keys +
1415 						 bkey_u64s(k),
1416 						 block_bytes(b->c->cache)) > blocks)
1417 					break;
1418 
1419 				last = k;
1420 				keys += bkey_u64s(k);
1421 			}
1422 		} else {
1423 			/*
1424 			 * Last node we're not getting rid of - we're getting
1425 			 * rid of the node at r[0]. Have to try and fit all of
1426 			 * the remaining keys into this node; we can't ensure
1427 			 * they will always fit due to rounding and variable
1428 			 * length keys (shouldn't be possible in practice,
1429 			 * though)
1430 			 */
1431 			if (__set_blocks(n1, n1->keys + n2->keys,
1432 					 block_bytes(b->c->cache)) >
1433 			    btree_blocks(new_nodes[i]))
1434 				goto out_unlock_nocoalesce;
1435 
1436 			keys = n2->keys;
1437 			/* Take the key of the node we're getting rid of */
1438 			last = &r->b->key;
1439 		}
1440 
1441 		BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c->cache)) >
1442 		       btree_blocks(new_nodes[i]));
1443 
1444 		if (last)
1445 			bkey_copy_key(&new_nodes[i]->key, last);
1446 
1447 		memcpy(bset_bkey_last(n1),
1448 		       n2->start,
1449 		       (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
1450 
1451 		n1->keys += keys;
1452 		r[i].keys = n1->keys;
1453 
1454 		memmove(n2->start,
1455 			bset_bkey_idx(n2, keys),
1456 			(void *) bset_bkey_last(n2) -
1457 			(void *) bset_bkey_idx(n2, keys));
1458 
1459 		n2->keys -= keys;
1460 
1461 		if (__bch_keylist_realloc(&keylist,
1462 					  bkey_u64s(&new_nodes[i]->key)))
1463 			goto out_unlock_nocoalesce;
1464 
1465 		bch_btree_node_write(new_nodes[i], &cl);
1466 		bch_keylist_add(&keylist, &new_nodes[i]->key);
1467 	}
1468 
1469 	for (i = 0; i < nodes; i++)
1470 		mutex_unlock(&new_nodes[i]->write_lock);
1471 
1472 	closure_sync(&cl);
1473 
1474 	/* We emptied out this node */
1475 	BUG_ON(btree_bset_first(new_nodes[0])->keys);
1476 	btree_node_free(new_nodes[0]);
1477 	rw_unlock(true, new_nodes[0]);
1478 	new_nodes[0] = NULL;
1479 
1480 	for (i = 0; i < nodes; i++) {
1481 		if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key)))
1482 			goto out_nocoalesce;
1483 
1484 		make_btree_freeing_key(r[i].b, keylist.top);
1485 		bch_keylist_push(&keylist);
1486 	}
1487 
1488 	bch_btree_insert_node(b, op, &keylist, NULL, NULL);
1489 	BUG_ON(!bch_keylist_empty(&keylist));
1490 
1491 	for (i = 0; i < nodes; i++) {
1492 		btree_node_free(r[i].b);
1493 		rw_unlock(true, r[i].b);
1494 
1495 		r[i].b = new_nodes[i];
1496 	}
1497 
1498 	memmove(r, r + 1, sizeof(r[0]) * (nodes - 1));
1499 	r[nodes - 1].b = ERR_PTR(-EINTR);
1500 
1501 	trace_bcache_btree_gc_coalesce(nodes);
1502 	gc->nodes--;
1503 
1504 	bch_keylist_free(&keylist);
1505 
1506 	/* Invalidated our iterator */
1507 	return -EINTR;
1508 
1509 out_unlock_nocoalesce:
1510 	for (i = 0; i < nodes; i++)
1511 		mutex_unlock(&new_nodes[i]->write_lock);
1512 
1513 out_nocoalesce:
1514 	closure_sync(&cl);
1515 
1516 	while ((k = bch_keylist_pop(&keylist)))
1517 		if (!bkey_cmp(k, &ZERO_KEY))
1518 			atomic_dec(&b->c->prio_blocked);
1519 	bch_keylist_free(&keylist);
1520 
1521 	for (i = 0; i < nodes; i++)
1522 		if (!IS_ERR_OR_NULL(new_nodes[i])) {
1523 			btree_node_free(new_nodes[i]);
1524 			rw_unlock(true, new_nodes[i]);
1525 		}
1526 	return 0;
1527 }
1528 
btree_gc_rewrite_node(struct btree * b,struct btree_op * op,struct btree * replace)1529 static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
1530 				 struct btree *replace)
1531 {
1532 	struct keylist keys;
1533 	struct btree *n;
1534 
1535 	if (btree_check_reserve(b, NULL))
1536 		return 0;
1537 
1538 	n = btree_node_alloc_replacement(replace, NULL);
1539 	if (IS_ERR(n))
1540 		return 0;
1541 
1542 	/* recheck reserve after allocating replacement node */
1543 	if (btree_check_reserve(b, NULL)) {
1544 		btree_node_free(n);
1545 		rw_unlock(true, n);
1546 		return 0;
1547 	}
1548 
1549 	bch_btree_node_write_sync(n);
1550 
1551 	bch_keylist_init(&keys);
1552 	bch_keylist_add(&keys, &n->key);
1553 
1554 	make_btree_freeing_key(replace, keys.top);
1555 	bch_keylist_push(&keys);
1556 
1557 	bch_btree_insert_node(b, op, &keys, NULL, NULL);
1558 	BUG_ON(!bch_keylist_empty(&keys));
1559 
1560 	btree_node_free(replace);
1561 	rw_unlock(true, n);
1562 
1563 	/* Invalidated our iterator */
1564 	return -EINTR;
1565 }
1566 
btree_gc_count_keys(struct btree * b)1567 static unsigned int btree_gc_count_keys(struct btree *b)
1568 {
1569 	struct bkey *k;
1570 	struct btree_iter_stack iter;
1571 	unsigned int ret = 0;
1572 
1573 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
1574 		ret += bkey_u64s(k);
1575 
1576 	return ret;
1577 }
1578 
btree_gc_min_nodes(struct cache_set * c)1579 static size_t btree_gc_min_nodes(struct cache_set *c)
1580 {
1581 	size_t min_nodes;
1582 
1583 	/*
1584 	 * Since incremental GC would stop 100ms when front
1585 	 * side I/O comes, so when there are many btree nodes,
1586 	 * if GC only processes constant (100) nodes each time,
1587 	 * GC would last a long time, and the front side I/Os
1588 	 * would run out of the buckets (since no new bucket
1589 	 * can be allocated during GC), and be blocked again.
1590 	 * So GC should not process constant nodes, but varied
1591 	 * nodes according to the number of btree nodes, which
1592 	 * realized by dividing GC into constant(100) times,
1593 	 * so when there are many btree nodes, GC can process
1594 	 * more nodes each time, otherwise, GC will process less
1595 	 * nodes each time (but no less than MIN_GC_NODES)
1596 	 */
1597 	min_nodes = c->gc_stats.nodes / MAX_GC_TIMES;
1598 	if (min_nodes < MIN_GC_NODES)
1599 		min_nodes = MIN_GC_NODES;
1600 
1601 	return min_nodes;
1602 }
1603 
1604 
btree_gc_recurse(struct btree * b,struct btree_op * op,struct closure * writes,struct gc_stat * gc)1605 static int btree_gc_recurse(struct btree *b, struct btree_op *op,
1606 			    struct closure *writes, struct gc_stat *gc)
1607 {
1608 	int ret = 0;
1609 	bool should_rewrite;
1610 	struct bkey *k;
1611 	struct btree_iter_stack iter;
1612 	struct gc_merge_info r[GC_MERGE_NODES];
1613 	struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
1614 
1615 	bch_btree_iter_stack_init(&b->keys, &iter, &b->c->gc_done);
1616 
1617 	for (i = r; i < r + ARRAY_SIZE(r); i++)
1618 		i->b = ERR_PTR(-EINTR);
1619 
1620 	while (1) {
1621 		k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
1622 					       bch_ptr_bad);
1623 		if (k) {
1624 			r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
1625 						  true, b);
1626 			if (IS_ERR(r->b)) {
1627 				ret = PTR_ERR(r->b);
1628 				break;
1629 			}
1630 
1631 			r->keys = btree_gc_count_keys(r->b);
1632 
1633 			ret = btree_gc_coalesce(b, op, gc, r);
1634 			if (ret)
1635 				break;
1636 		}
1637 
1638 		if (!last->b)
1639 			break;
1640 
1641 		if (!IS_ERR(last->b)) {
1642 			should_rewrite = btree_gc_mark_node(last->b, gc);
1643 			if (should_rewrite) {
1644 				ret = btree_gc_rewrite_node(b, op, last->b);
1645 				if (ret)
1646 					break;
1647 			}
1648 
1649 			if (last->b->level) {
1650 				ret = btree_gc_recurse(last->b, op, writes, gc);
1651 				if (ret)
1652 					break;
1653 			}
1654 
1655 			bkey_copy_key(&b->c->gc_done, &last->b->key);
1656 
1657 			/*
1658 			 * Must flush leaf nodes before gc ends, since replace
1659 			 * operations aren't journalled
1660 			 */
1661 			mutex_lock(&last->b->write_lock);
1662 			if (btree_node_dirty(last->b))
1663 				bch_btree_node_write(last->b, writes);
1664 			mutex_unlock(&last->b->write_lock);
1665 			rw_unlock(true, last->b);
1666 		}
1667 
1668 		memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
1669 		r->b = NULL;
1670 
1671 		if (atomic_read(&b->c->search_inflight) &&
1672 		    gc->nodes >= gc->nodes_pre + btree_gc_min_nodes(b->c)) {
1673 			gc->nodes_pre =  gc->nodes;
1674 			ret = -EAGAIN;
1675 			break;
1676 		}
1677 
1678 		if (need_resched()) {
1679 			ret = -EAGAIN;
1680 			break;
1681 		}
1682 	}
1683 
1684 	for (i = r; i < r + ARRAY_SIZE(r); i++)
1685 		if (!IS_ERR_OR_NULL(i->b)) {
1686 			mutex_lock(&i->b->write_lock);
1687 			if (btree_node_dirty(i->b))
1688 				bch_btree_node_write(i->b, writes);
1689 			mutex_unlock(&i->b->write_lock);
1690 			rw_unlock(true, i->b);
1691 		}
1692 
1693 	return ret;
1694 }
1695 
bch_btree_gc_root(struct btree * b,struct btree_op * op,struct closure * writes,struct gc_stat * gc)1696 static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1697 			     struct closure *writes, struct gc_stat *gc)
1698 {
1699 	struct btree *n = NULL;
1700 	int ret = 0;
1701 	bool should_rewrite;
1702 
1703 	should_rewrite = btree_gc_mark_node(b, gc);
1704 	if (should_rewrite) {
1705 		n = btree_node_alloc_replacement(b, NULL);
1706 
1707 		if (!IS_ERR(n)) {
1708 			bch_btree_node_write_sync(n);
1709 
1710 			bch_btree_set_root(n);
1711 			btree_node_free(b);
1712 			rw_unlock(true, n);
1713 
1714 			return -EINTR;
1715 		}
1716 	}
1717 
1718 	__bch_btree_mark_key(b->c, b->level + 1, &b->key);
1719 
1720 	if (b->level) {
1721 		ret = btree_gc_recurse(b, op, writes, gc);
1722 		if (ret)
1723 			return ret;
1724 	}
1725 
1726 	bkey_copy_key(&b->c->gc_done, &b->key);
1727 
1728 	return ret;
1729 }
1730 
btree_gc_start(struct cache_set * c)1731 static void btree_gc_start(struct cache_set *c)
1732 {
1733 	struct cache *ca;
1734 	struct bucket *b;
1735 
1736 	if (!c->gc_mark_valid)
1737 		return;
1738 
1739 	mutex_lock(&c->bucket_lock);
1740 
1741 	c->gc_done = ZERO_KEY;
1742 
1743 	ca = c->cache;
1744 	for_each_bucket(b, ca) {
1745 		b->last_gc = b->gen;
1746 		if (bch_can_invalidate_bucket(ca, b))
1747 			b->reclaimable_in_gc = 1;
1748 		if (!atomic_read(&b->pin)) {
1749 			SET_GC_MARK(b, 0);
1750 			SET_GC_SECTORS_USED(b, 0);
1751 		}
1752 	}
1753 
1754 	c->gc_mark_valid = 0;
1755 	mutex_unlock(&c->bucket_lock);
1756 }
1757 
bch_btree_gc_finish(struct cache_set * c)1758 static void bch_btree_gc_finish(struct cache_set *c)
1759 {
1760 	struct bucket *b;
1761 	struct cache *ca;
1762 	unsigned int i, j;
1763 	uint64_t *k;
1764 
1765 	mutex_lock(&c->bucket_lock);
1766 
1767 	set_gc_sectors(c);
1768 	c->gc_mark_valid = 1;
1769 	c->need_gc	= 0;
1770 
1771 	for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
1772 		SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
1773 			    GC_MARK_METADATA);
1774 
1775 	/* don't reclaim buckets to which writeback keys point */
1776 	rcu_read_lock();
1777 	for (i = 0; i < c->devices_max_used; i++) {
1778 		struct bcache_device *d = c->devices[i];
1779 		struct cached_dev *dc;
1780 		struct keybuf_key *w, *n;
1781 
1782 		if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
1783 			continue;
1784 		dc = container_of(d, struct cached_dev, disk);
1785 
1786 		spin_lock(&dc->writeback_keys.lock);
1787 		rbtree_postorder_for_each_entry_safe(w, n,
1788 					&dc->writeback_keys.keys, node)
1789 			for (j = 0; j < KEY_PTRS(&w->key); j++)
1790 				SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
1791 					    GC_MARK_DIRTY);
1792 		spin_unlock(&dc->writeback_keys.lock);
1793 	}
1794 	rcu_read_unlock();
1795 
1796 	c->avail_nbuckets = 0;
1797 
1798 	ca = c->cache;
1799 	ca->invalidate_needs_gc = 0;
1800 
1801 	for (k = ca->sb.d; k < ca->sb.d + ca->sb.keys; k++)
1802 		SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA);
1803 
1804 	for (k = ca->prio_buckets;
1805 	     k < ca->prio_buckets + prio_buckets(ca) * 2; k++)
1806 		SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA);
1807 
1808 	for_each_bucket(b, ca) {
1809 		c->need_gc	= max(c->need_gc, bucket_gc_gen(b));
1810 
1811 		if (b->reclaimable_in_gc)
1812 			b->reclaimable_in_gc = 0;
1813 
1814 		if (atomic_read(&b->pin))
1815 			continue;
1816 
1817 		BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
1818 
1819 		if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
1820 			c->avail_nbuckets++;
1821 	}
1822 
1823 	mutex_unlock(&c->bucket_lock);
1824 }
1825 
bch_btree_gc(struct cache_set * c)1826 static void bch_btree_gc(struct cache_set *c)
1827 {
1828 	int ret;
1829 	struct gc_stat stats;
1830 	struct closure writes;
1831 	struct btree_op op;
1832 	uint64_t start_time = local_clock();
1833 
1834 	trace_bcache_gc_start(c);
1835 
1836 	memset(&stats, 0, sizeof(struct gc_stat));
1837 	closure_init_stack(&writes);
1838 	bch_btree_op_init(&op, SHRT_MAX);
1839 
1840 	btree_gc_start(c);
1841 
1842 	/* if CACHE_SET_IO_DISABLE set, gc thread should stop too */
1843 	do {
1844 		ret = bcache_btree_root(gc_root, c, &op, &writes, &stats);
1845 		closure_sync(&writes);
1846 		cond_resched();
1847 
1848 		if (ret == -EAGAIN)
1849 			schedule_timeout_interruptible(msecs_to_jiffies
1850 						       (GC_SLEEP_MS));
1851 		else if (ret)
1852 			pr_warn("gc failed!\n");
1853 	} while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags));
1854 
1855 	bch_btree_gc_finish(c);
1856 	wake_up_allocators(c);
1857 
1858 	bch_time_stats_update(&c->btree_gc_time, start_time);
1859 
1860 	stats.key_bytes *= sizeof(uint64_t);
1861 	stats.data	<<= 9;
1862 	bch_update_bucket_in_use(c, &stats);
1863 	memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
1864 
1865 	trace_bcache_gc_end(c);
1866 
1867 	bch_moving_gc(c);
1868 }
1869 
gc_should_run(struct cache_set * c)1870 static bool gc_should_run(struct cache_set *c)
1871 {
1872 	struct cache *ca = c->cache;
1873 
1874 	if (ca->invalidate_needs_gc)
1875 		return true;
1876 
1877 	if (atomic_read(&c->sectors_to_gc) < 0)
1878 		return true;
1879 
1880 	return false;
1881 }
1882 
bch_gc_thread(void * arg)1883 static int bch_gc_thread(void *arg)
1884 {
1885 	struct cache_set *c = arg;
1886 
1887 	while (1) {
1888 		wait_event_interruptible(c->gc_wait,
1889 			   kthread_should_stop() ||
1890 			   test_bit(CACHE_SET_IO_DISABLE, &c->flags) ||
1891 			   gc_should_run(c));
1892 
1893 		if (kthread_should_stop() ||
1894 		    test_bit(CACHE_SET_IO_DISABLE, &c->flags))
1895 			break;
1896 
1897 		set_gc_sectors(c);
1898 		bch_btree_gc(c);
1899 	}
1900 
1901 	wait_for_kthread_stop();
1902 	return 0;
1903 }
1904 
bch_gc_thread_start(struct cache_set * c)1905 int bch_gc_thread_start(struct cache_set *c)
1906 {
1907 	c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
1908 	return PTR_ERR_OR_ZERO(c->gc_thread);
1909 }
1910 
1911 /* Initial partial gc */
1912 
bch_btree_check_recurse(struct btree * b,struct btree_op * op)1913 static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
1914 {
1915 	int ret = 0;
1916 	struct bkey *k, *p = NULL;
1917 	struct btree_iter_stack iter;
1918 
1919 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
1920 		bch_initial_mark_key(b->c, b->level, k);
1921 
1922 	bch_initial_mark_key(b->c, b->level + 1, &b->key);
1923 
1924 	if (b->level) {
1925 		bch_btree_iter_stack_init(&b->keys, &iter, NULL);
1926 
1927 		do {
1928 			k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
1929 						       bch_ptr_bad);
1930 			if (k) {
1931 				btree_node_prefetch(b, k);
1932 				/*
1933 				 * initiallize c->gc_stats.nodes
1934 				 * for incremental GC
1935 				 */
1936 				b->c->gc_stats.nodes++;
1937 			}
1938 
1939 			if (p)
1940 				ret = bcache_btree(check_recurse, p, b, op);
1941 
1942 			p = k;
1943 		} while (p && !ret);
1944 	}
1945 
1946 	return ret;
1947 }
1948 
1949 
bch_btree_check_thread(void * arg)1950 static int bch_btree_check_thread(void *arg)
1951 {
1952 	int ret;
1953 	struct btree_check_info *info = arg;
1954 	struct btree_check_state *check_state = info->state;
1955 	struct cache_set *c = check_state->c;
1956 	struct btree_iter_stack iter;
1957 	struct bkey *k, *p;
1958 	int cur_idx, prev_idx, skip_nr;
1959 
1960 	k = p = NULL;
1961 	cur_idx = prev_idx = 0;
1962 	ret = 0;
1963 
1964 	/* root node keys are checked before thread created */
1965 	bch_btree_iter_stack_init(&c->root->keys, &iter, NULL);
1966 	k = bch_btree_iter_next_filter(&iter.iter, &c->root->keys, bch_ptr_bad);
1967 	BUG_ON(!k);
1968 
1969 	p = k;
1970 	while (k) {
1971 		/*
1972 		 * Fetch a root node key index, skip the keys which
1973 		 * should be fetched by other threads, then check the
1974 		 * sub-tree indexed by the fetched key.
1975 		 */
1976 		spin_lock(&check_state->idx_lock);
1977 		cur_idx = check_state->key_idx;
1978 		check_state->key_idx++;
1979 		spin_unlock(&check_state->idx_lock);
1980 
1981 		skip_nr = cur_idx - prev_idx;
1982 
1983 		while (skip_nr) {
1984 			k = bch_btree_iter_next_filter(&iter.iter,
1985 						       &c->root->keys,
1986 						       bch_ptr_bad);
1987 			if (k)
1988 				p = k;
1989 			else {
1990 				/*
1991 				 * No more keys to check in root node,
1992 				 * current checking threads are enough,
1993 				 * stop creating more.
1994 				 */
1995 				atomic_set(&check_state->enough, 1);
1996 				/* Update check_state->enough earlier */
1997 				smp_mb__after_atomic();
1998 				goto out;
1999 			}
2000 			skip_nr--;
2001 			cond_resched();
2002 		}
2003 
2004 		if (p) {
2005 			struct btree_op op;
2006 
2007 			btree_node_prefetch(c->root, p);
2008 			c->gc_stats.nodes++;
2009 			bch_btree_op_init(&op, 0);
2010 			ret = bcache_btree(check_recurse, p, c->root, &op);
2011 			/*
2012 			 * The op may be added to cache_set's btree_cache_wait
2013 			 * in mca_cannibalize(), must ensure it is removed from
2014 			 * the list and release btree_cache_alloc_lock before
2015 			 * free op memory.
2016 			 * Otherwise, the btree_cache_wait will be damaged.
2017 			 */
2018 			bch_cannibalize_unlock(c);
2019 			finish_wait(&c->btree_cache_wait, &(&op)->wait);
2020 			if (ret)
2021 				goto out;
2022 		}
2023 		p = NULL;
2024 		prev_idx = cur_idx;
2025 		cond_resched();
2026 	}
2027 
2028 out:
2029 	info->result = ret;
2030 	/* update check_state->started among all CPUs */
2031 	smp_mb__before_atomic();
2032 	if (atomic_dec_and_test(&check_state->started))
2033 		wake_up(&check_state->wait);
2034 
2035 	return ret;
2036 }
2037 
2038 
2039 
bch_btree_chkthread_nr(void)2040 static int bch_btree_chkthread_nr(void)
2041 {
2042 	int n = num_online_cpus()/2;
2043 
2044 	if (n == 0)
2045 		n = 1;
2046 	else if (n > BCH_BTR_CHKTHREAD_MAX)
2047 		n = BCH_BTR_CHKTHREAD_MAX;
2048 
2049 	return n;
2050 }
2051 
bch_btree_check(struct cache_set * c)2052 int bch_btree_check(struct cache_set *c)
2053 {
2054 	int ret = 0;
2055 	int i;
2056 	struct bkey *k = NULL;
2057 	struct btree_iter_stack iter;
2058 	struct btree_check_state check_state;
2059 
2060 	/* check and mark root node keys */
2061 	for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid)
2062 		bch_initial_mark_key(c, c->root->level, k);
2063 
2064 	bch_initial_mark_key(c, c->root->level + 1, &c->root->key);
2065 
2066 	if (c->root->level == 0)
2067 		return 0;
2068 
2069 	memset(&check_state, 0, sizeof(struct btree_check_state));
2070 	check_state.c = c;
2071 	check_state.total_threads = bch_btree_chkthread_nr();
2072 	check_state.key_idx = 0;
2073 	spin_lock_init(&check_state.idx_lock);
2074 	atomic_set(&check_state.started, 0);
2075 	atomic_set(&check_state.enough, 0);
2076 	init_waitqueue_head(&check_state.wait);
2077 
2078 	rw_lock(0, c->root, c->root->level);
2079 	/*
2080 	 * Run multiple threads to check btree nodes in parallel,
2081 	 * if check_state.enough is non-zero, it means current
2082 	 * running check threads are enough, unncessary to create
2083 	 * more.
2084 	 */
2085 	for (i = 0; i < check_state.total_threads; i++) {
2086 		/* fetch latest check_state.enough earlier */
2087 		smp_mb__before_atomic();
2088 		if (atomic_read(&check_state.enough))
2089 			break;
2090 
2091 		check_state.infos[i].result = 0;
2092 		check_state.infos[i].state = &check_state;
2093 
2094 		check_state.infos[i].thread =
2095 			kthread_run(bch_btree_check_thread,
2096 				    &check_state.infos[i],
2097 				    "bch_btrchk[%d]", i);
2098 		if (IS_ERR(check_state.infos[i].thread)) {
2099 			pr_err("fails to run thread bch_btrchk[%d]\n", i);
2100 			for (--i; i >= 0; i--)
2101 				kthread_stop(check_state.infos[i].thread);
2102 			ret = -ENOMEM;
2103 			goto out;
2104 		}
2105 		atomic_inc(&check_state.started);
2106 	}
2107 
2108 	/*
2109 	 * Must wait for all threads to stop.
2110 	 */
2111 	wait_event(check_state.wait, atomic_read(&check_state.started) == 0);
2112 
2113 	for (i = 0; i < check_state.total_threads; i++) {
2114 		if (check_state.infos[i].result) {
2115 			ret = check_state.infos[i].result;
2116 			goto out;
2117 		}
2118 	}
2119 
2120 out:
2121 	rw_unlock(0, c->root);
2122 	return ret;
2123 }
2124 
bch_initial_gc_finish(struct cache_set * c)2125 void bch_initial_gc_finish(struct cache_set *c)
2126 {
2127 	struct cache *ca = c->cache;
2128 	struct bucket *b;
2129 
2130 	bch_btree_gc_finish(c);
2131 
2132 	mutex_lock(&c->bucket_lock);
2133 
2134 	/*
2135 	 * We need to put some unused buckets directly on the prio freelist in
2136 	 * order to get the allocator thread started - it needs freed buckets in
2137 	 * order to rewrite the prios and gens, and it needs to rewrite prios
2138 	 * and gens in order to free buckets.
2139 	 *
2140 	 * This is only safe for buckets that have no live data in them, which
2141 	 * there should always be some of.
2142 	 */
2143 	for_each_bucket(b, ca) {
2144 		if (fifo_full(&ca->free[RESERVE_PRIO]) &&
2145 		    fifo_full(&ca->free[RESERVE_BTREE]))
2146 			break;
2147 
2148 		if (bch_can_invalidate_bucket(ca, b) &&
2149 		    !GC_MARK(b)) {
2150 			__bch_invalidate_one_bucket(ca, b);
2151 			if (!fifo_push(&ca->free[RESERVE_PRIO],
2152 			   b - ca->buckets))
2153 				fifo_push(&ca->free[RESERVE_BTREE],
2154 					  b - ca->buckets);
2155 		}
2156 	}
2157 
2158 	mutex_unlock(&c->bucket_lock);
2159 }
2160 
2161 /* Btree insertion */
2162 
btree_insert_key(struct btree * b,struct bkey * k,struct bkey * replace_key)2163 static bool btree_insert_key(struct btree *b, struct bkey *k,
2164 			     struct bkey *replace_key)
2165 {
2166 	unsigned int status;
2167 
2168 	BUG_ON(bkey_cmp(k, &b->key) > 0);
2169 
2170 	status = bch_btree_insert_key(&b->keys, k, replace_key);
2171 	if (status != BTREE_INSERT_STATUS_NO_INSERT) {
2172 		bch_check_keys(&b->keys, "%u for %s", status,
2173 			       replace_key ? "replace" : "insert");
2174 
2175 		trace_bcache_btree_insert_key(b, k, replace_key != NULL,
2176 					      status);
2177 		return true;
2178 	} else
2179 		return false;
2180 }
2181 
insert_u64s_remaining(struct btree * b)2182 static size_t insert_u64s_remaining(struct btree *b)
2183 {
2184 	long ret = bch_btree_keys_u64s_remaining(&b->keys);
2185 
2186 	/*
2187 	 * Might land in the middle of an existing extent and have to split it
2188 	 */
2189 	if (b->keys.ops->is_extents)
2190 		ret -= KEY_MAX_U64S;
2191 
2192 	return max(ret, 0L);
2193 }
2194 
bch_btree_insert_keys(struct btree * b,struct btree_op * op,struct keylist * insert_keys,struct bkey * replace_key)2195 static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
2196 				  struct keylist *insert_keys,
2197 				  struct bkey *replace_key)
2198 {
2199 	bool ret = false;
2200 	int oldsize = bch_count_data(&b->keys);
2201 
2202 	while (!bch_keylist_empty(insert_keys)) {
2203 		struct bkey *k = insert_keys->keys;
2204 
2205 		if (bkey_u64s(k) > insert_u64s_remaining(b))
2206 			break;
2207 
2208 		if (bkey_cmp(k, &b->key) <= 0) {
2209 			if (!b->level)
2210 				bkey_put(b->c, k);
2211 
2212 			ret |= btree_insert_key(b, k, replace_key);
2213 			bch_keylist_pop_front(insert_keys);
2214 		} else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
2215 			BKEY_PADDED(key) temp;
2216 			bkey_copy(&temp.key, insert_keys->keys);
2217 
2218 			bch_cut_back(&b->key, &temp.key);
2219 			bch_cut_front(&b->key, insert_keys->keys);
2220 
2221 			ret |= btree_insert_key(b, &temp.key, replace_key);
2222 			break;
2223 		} else {
2224 			break;
2225 		}
2226 	}
2227 
2228 	if (!ret)
2229 		op->insert_collision = true;
2230 
2231 	BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
2232 
2233 	BUG_ON(bch_count_data(&b->keys) < oldsize);
2234 	return ret;
2235 }
2236 
btree_split(struct btree * b,struct btree_op * op,struct keylist * insert_keys,struct bkey * replace_key)2237 static int btree_split(struct btree *b, struct btree_op *op,
2238 		       struct keylist *insert_keys,
2239 		       struct bkey *replace_key)
2240 {
2241 	bool split;
2242 	struct btree *n1, *n2 = NULL, *n3 = NULL;
2243 	uint64_t start_time = local_clock();
2244 	struct closure cl;
2245 	struct keylist parent_keys;
2246 
2247 	closure_init_stack(&cl);
2248 	bch_keylist_init(&parent_keys);
2249 
2250 	if (btree_check_reserve(b, op)) {
2251 		if (!b->level)
2252 			return -EINTR;
2253 		else
2254 			WARN(1, "insufficient reserve for split\n");
2255 	}
2256 
2257 	n1 = btree_node_alloc_replacement(b, op);
2258 	if (IS_ERR(n1))
2259 		goto err;
2260 
2261 	split = set_blocks(btree_bset_first(n1),
2262 			   block_bytes(n1->c->cache)) > (btree_blocks(b) * 4) / 5;
2263 
2264 	if (split) {
2265 		unsigned int keys = 0;
2266 
2267 		trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
2268 
2269 		n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent);
2270 		if (IS_ERR(n2))
2271 			goto err_free1;
2272 
2273 		if (!b->parent) {
2274 			n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL);
2275 			if (IS_ERR(n3))
2276 				goto err_free2;
2277 		}
2278 
2279 		mutex_lock(&n1->write_lock);
2280 		mutex_lock(&n2->write_lock);
2281 
2282 		bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2283 
2284 		/*
2285 		 * Has to be a linear search because we don't have an auxiliary
2286 		 * search tree yet
2287 		 */
2288 
2289 		while (keys < (btree_bset_first(n1)->keys * 3) / 5)
2290 			keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1),
2291 							keys));
2292 
2293 		bkey_copy_key(&n1->key,
2294 			      bset_bkey_idx(btree_bset_first(n1), keys));
2295 		keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys));
2296 
2297 		btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys;
2298 		btree_bset_first(n1)->keys = keys;
2299 
2300 		memcpy(btree_bset_first(n2)->start,
2301 		       bset_bkey_last(btree_bset_first(n1)),
2302 		       btree_bset_first(n2)->keys * sizeof(uint64_t));
2303 
2304 		bkey_copy_key(&n2->key, &b->key);
2305 
2306 		bch_keylist_add(&parent_keys, &n2->key);
2307 		bch_btree_node_write(n2, &cl);
2308 		mutex_unlock(&n2->write_lock);
2309 		rw_unlock(true, n2);
2310 	} else {
2311 		trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys);
2312 
2313 		mutex_lock(&n1->write_lock);
2314 		bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2315 	}
2316 
2317 	bch_keylist_add(&parent_keys, &n1->key);
2318 	bch_btree_node_write(n1, &cl);
2319 	mutex_unlock(&n1->write_lock);
2320 
2321 	if (n3) {
2322 		/* Depth increases, make a new root */
2323 		mutex_lock(&n3->write_lock);
2324 		bkey_copy_key(&n3->key, &MAX_KEY);
2325 		bch_btree_insert_keys(n3, op, &parent_keys, NULL);
2326 		bch_btree_node_write(n3, &cl);
2327 		mutex_unlock(&n3->write_lock);
2328 
2329 		closure_sync(&cl);
2330 		bch_btree_set_root(n3);
2331 		rw_unlock(true, n3);
2332 	} else if (!b->parent) {
2333 		/* Root filled up but didn't need to be split */
2334 		closure_sync(&cl);
2335 		bch_btree_set_root(n1);
2336 	} else {
2337 		/* Split a non root node */
2338 		closure_sync(&cl);
2339 		make_btree_freeing_key(b, parent_keys.top);
2340 		bch_keylist_push(&parent_keys);
2341 
2342 		bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL);
2343 		BUG_ON(!bch_keylist_empty(&parent_keys));
2344 	}
2345 
2346 	btree_node_free(b);
2347 	rw_unlock(true, n1);
2348 
2349 	bch_time_stats_update(&b->c->btree_split_time, start_time);
2350 
2351 	return 0;
2352 err_free2:
2353 	bkey_put(b->c, &n2->key);
2354 	btree_node_free(n2);
2355 	rw_unlock(true, n2);
2356 err_free1:
2357 	bkey_put(b->c, &n1->key);
2358 	btree_node_free(n1);
2359 	rw_unlock(true, n1);
2360 err:
2361 	WARN(1, "bcache: btree split failed (level %u)", b->level);
2362 
2363 	if (n3 == ERR_PTR(-EAGAIN) ||
2364 	    n2 == ERR_PTR(-EAGAIN) ||
2365 	    n1 == ERR_PTR(-EAGAIN))
2366 		return -EAGAIN;
2367 
2368 	return -ENOMEM;
2369 }
2370 
bch_btree_insert_node(struct btree * b,struct btree_op * op,struct keylist * insert_keys,atomic_t * journal_ref,struct bkey * replace_key)2371 static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
2372 				 struct keylist *insert_keys,
2373 				 atomic_t *journal_ref,
2374 				 struct bkey *replace_key)
2375 {
2376 	struct closure cl;
2377 
2378 	BUG_ON(b->level && replace_key);
2379 
2380 	closure_init_stack(&cl);
2381 
2382 	mutex_lock(&b->write_lock);
2383 
2384 	if (write_block(b) != btree_bset_last(b) &&
2385 	    b->keys.last_set_unwritten)
2386 		bch_btree_init_next(b); /* just wrote a set */
2387 
2388 	if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) {
2389 		mutex_unlock(&b->write_lock);
2390 		goto split;
2391 	}
2392 
2393 	BUG_ON(write_block(b) != btree_bset_last(b));
2394 
2395 	if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
2396 		if (!b->level)
2397 			bch_btree_leaf_dirty(b, journal_ref);
2398 		else
2399 			bch_btree_node_write(b, &cl);
2400 	}
2401 
2402 	mutex_unlock(&b->write_lock);
2403 
2404 	/* wait for btree node write if necessary, after unlock */
2405 	closure_sync(&cl);
2406 
2407 	return 0;
2408 split:
2409 	if (current->bio_list) {
2410 		op->lock = b->c->root->level + 1;
2411 		return -EAGAIN;
2412 	} else if (op->lock <= b->c->root->level) {
2413 		op->lock = b->c->root->level + 1;
2414 		return -EINTR;
2415 	} else {
2416 		/* Invalidated all iterators */
2417 		int ret = btree_split(b, op, insert_keys, replace_key);
2418 
2419 		if (bch_keylist_empty(insert_keys))
2420 			return 0;
2421 		else if (!ret)
2422 			return -EINTR;
2423 		return ret;
2424 	}
2425 }
2426 
bch_btree_insert_check_key(struct btree * b,struct btree_op * op,struct bkey * check_key)2427 int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
2428 			       struct bkey *check_key)
2429 {
2430 	int ret = -EINTR;
2431 	uint64_t btree_ptr = b->key.ptr[0];
2432 	unsigned long seq = b->seq;
2433 	struct keylist insert;
2434 	bool upgrade = op->lock == -1;
2435 
2436 	bch_keylist_init(&insert);
2437 
2438 	if (upgrade) {
2439 		rw_unlock(false, b);
2440 		rw_lock(true, b, b->level);
2441 
2442 		if (b->key.ptr[0] != btree_ptr ||
2443 		    b->seq != seq + 1) {
2444 			op->lock = b->level;
2445 			goto out;
2446 		}
2447 	}
2448 
2449 	SET_KEY_PTRS(check_key, 1);
2450 	get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
2451 
2452 	SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
2453 
2454 	bch_keylist_add(&insert, check_key);
2455 
2456 	ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
2457 
2458 	BUG_ON(!ret && !bch_keylist_empty(&insert));
2459 out:
2460 	if (upgrade)
2461 		downgrade_write(&b->lock);
2462 	return ret;
2463 }
2464 
2465 struct btree_insert_op {
2466 	struct btree_op	op;
2467 	struct keylist	*keys;
2468 	atomic_t	*journal_ref;
2469 	struct bkey	*replace_key;
2470 };
2471 
btree_insert_fn(struct btree_op * b_op,struct btree * b)2472 static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
2473 {
2474 	struct btree_insert_op *op = container_of(b_op,
2475 					struct btree_insert_op, op);
2476 
2477 	int ret = bch_btree_insert_node(b, &op->op, op->keys,
2478 					op->journal_ref, op->replace_key);
2479 	if (ret && !bch_keylist_empty(op->keys))
2480 		return ret;
2481 	else
2482 		return MAP_DONE;
2483 }
2484 
bch_btree_insert(struct cache_set * c,struct keylist * keys,atomic_t * journal_ref,struct bkey * replace_key)2485 int bch_btree_insert(struct cache_set *c, struct keylist *keys,
2486 		     atomic_t *journal_ref, struct bkey *replace_key)
2487 {
2488 	struct btree_insert_op op;
2489 	int ret = 0;
2490 
2491 	BUG_ON(current->bio_list);
2492 	BUG_ON(bch_keylist_empty(keys));
2493 
2494 	bch_btree_op_init(&op.op, 0);
2495 	op.keys		= keys;
2496 	op.journal_ref	= journal_ref;
2497 	op.replace_key	= replace_key;
2498 
2499 	while (!ret && !bch_keylist_empty(keys)) {
2500 		op.op.lock = 0;
2501 		ret = bch_btree_map_leaf_nodes(&op.op, c,
2502 					       &START_KEY(keys->keys),
2503 					       btree_insert_fn);
2504 	}
2505 
2506 	if (ret) {
2507 		struct bkey *k;
2508 
2509 		pr_err("error %i\n", ret);
2510 
2511 		while ((k = bch_keylist_pop(keys)))
2512 			bkey_put(c, k);
2513 	} else if (op.op.insert_collision)
2514 		ret = -ESRCH;
2515 
2516 	return ret;
2517 }
2518 
bch_btree_set_root(struct btree * b)2519 void bch_btree_set_root(struct btree *b)
2520 {
2521 	unsigned int i;
2522 	struct closure cl;
2523 
2524 	closure_init_stack(&cl);
2525 
2526 	trace_bcache_btree_set_root(b);
2527 
2528 	BUG_ON(!b->written);
2529 
2530 	for (i = 0; i < KEY_PTRS(&b->key); i++)
2531 		BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO);
2532 
2533 	mutex_lock(&b->c->bucket_lock);
2534 	list_del_init(&b->list);
2535 	mutex_unlock(&b->c->bucket_lock);
2536 
2537 	b->c->root = b;
2538 
2539 	bch_journal_meta(b->c, &cl);
2540 	closure_sync(&cl);
2541 }
2542 
2543 /* Map across nodes or keys */
2544 
bch_btree_map_nodes_recurse(struct btree * b,struct btree_op * op,struct bkey * from,btree_map_nodes_fn * fn,int flags)2545 static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
2546 				       struct bkey *from,
2547 				       btree_map_nodes_fn *fn, int flags)
2548 {
2549 	int ret = MAP_CONTINUE;
2550 
2551 	if (b->level) {
2552 		struct bkey *k;
2553 		struct btree_iter_stack iter;
2554 
2555 		bch_btree_iter_stack_init(&b->keys, &iter, from);
2556 
2557 		while ((k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
2558 						       bch_ptr_bad))) {
2559 			ret = bcache_btree(map_nodes_recurse, k, b,
2560 				    op, from, fn, flags);
2561 			from = NULL;
2562 
2563 			if (ret != MAP_CONTINUE)
2564 				return ret;
2565 		}
2566 	}
2567 
2568 	if (!b->level || flags == MAP_ALL_NODES)
2569 		ret = fn(op, b);
2570 
2571 	return ret;
2572 }
2573 
__bch_btree_map_nodes(struct btree_op * op,struct cache_set * c,struct bkey * from,btree_map_nodes_fn * fn,int flags)2574 int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
2575 			  struct bkey *from, btree_map_nodes_fn *fn, int flags)
2576 {
2577 	return bcache_btree_root(map_nodes_recurse, c, op, from, fn, flags);
2578 }
2579 
bch_btree_map_keys_recurse(struct btree * b,struct btree_op * op,struct bkey * from,btree_map_keys_fn * fn,int flags)2580 int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
2581 				      struct bkey *from, btree_map_keys_fn *fn,
2582 				      int flags)
2583 {
2584 	int ret = MAP_CONTINUE;
2585 	struct bkey *k;
2586 	struct btree_iter_stack iter;
2587 
2588 	bch_btree_iter_stack_init(&b->keys, &iter, from);
2589 
2590 	while ((k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
2591 					       bch_ptr_bad))) {
2592 		ret = !b->level
2593 			? fn(op, b, k)
2594 			: bcache_btree(map_keys_recurse, k,
2595 				       b, op, from, fn, flags);
2596 		from = NULL;
2597 
2598 		if (ret != MAP_CONTINUE)
2599 			return ret;
2600 	}
2601 
2602 	if (!b->level && (flags & MAP_END_KEY))
2603 		ret = fn(op, b, &KEY(KEY_INODE(&b->key),
2604 				     KEY_OFFSET(&b->key), 0));
2605 
2606 	return ret;
2607 }
2608 
bch_btree_map_keys(struct btree_op * op,struct cache_set * c,struct bkey * from,btree_map_keys_fn * fn,int flags)2609 int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
2610 		       struct bkey *from, btree_map_keys_fn *fn, int flags)
2611 {
2612 	return bcache_btree_root(map_keys_recurse, c, op, from, fn, flags);
2613 }
2614 
2615 /* Keybuf code */
2616 
keybuf_cmp(struct keybuf_key * l,struct keybuf_key * r)2617 static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
2618 {
2619 	/* Overlapping keys compare equal */
2620 	if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0)
2621 		return -1;
2622 	if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0)
2623 		return 1;
2624 	return 0;
2625 }
2626 
keybuf_nonoverlapping_cmp(struct keybuf_key * l,struct keybuf_key * r)2627 static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
2628 					    struct keybuf_key *r)
2629 {
2630 	return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
2631 }
2632 
2633 struct refill {
2634 	struct btree_op	op;
2635 	unsigned int	nr_found;
2636 	struct keybuf	*buf;
2637 	struct bkey	*end;
2638 	keybuf_pred_fn	*pred;
2639 };
2640 
refill_keybuf_fn(struct btree_op * op,struct btree * b,struct bkey * k)2641 static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
2642 			    struct bkey *k)
2643 {
2644 	struct refill *refill = container_of(op, struct refill, op);
2645 	struct keybuf *buf = refill->buf;
2646 	int ret = MAP_CONTINUE;
2647 
2648 	if (bkey_cmp(k, refill->end) > 0) {
2649 		ret = MAP_DONE;
2650 		goto out;
2651 	}
2652 
2653 	if (!KEY_SIZE(k)) /* end key */
2654 		goto out;
2655 
2656 	if (refill->pred(buf, k)) {
2657 		struct keybuf_key *w;
2658 
2659 		spin_lock(&buf->lock);
2660 
2661 		w = array_alloc(&buf->freelist);
2662 		if (!w) {
2663 			spin_unlock(&buf->lock);
2664 			return MAP_DONE;
2665 		}
2666 
2667 		w->private = NULL;
2668 		bkey_copy(&w->key, k);
2669 
2670 		if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
2671 			array_free(&buf->freelist, w);
2672 		else
2673 			refill->nr_found++;
2674 
2675 		if (array_freelist_empty(&buf->freelist))
2676 			ret = MAP_DONE;
2677 
2678 		spin_unlock(&buf->lock);
2679 	}
2680 out:
2681 	buf->last_scanned = *k;
2682 	return ret;
2683 }
2684 
bch_refill_keybuf(struct cache_set * c,struct keybuf * buf,struct bkey * end,keybuf_pred_fn * pred)2685 void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
2686 		       struct bkey *end, keybuf_pred_fn *pred)
2687 {
2688 	struct bkey start = buf->last_scanned;
2689 	struct refill refill;
2690 
2691 	cond_resched();
2692 
2693 	bch_btree_op_init(&refill.op, -1);
2694 	refill.nr_found	= 0;
2695 	refill.buf	= buf;
2696 	refill.end	= end;
2697 	refill.pred	= pred;
2698 
2699 	bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
2700 			   refill_keybuf_fn, MAP_END_KEY);
2701 
2702 	trace_bcache_keyscan(refill.nr_found,
2703 			     KEY_INODE(&start), KEY_OFFSET(&start),
2704 			     KEY_INODE(&buf->last_scanned),
2705 			     KEY_OFFSET(&buf->last_scanned));
2706 
2707 	spin_lock(&buf->lock);
2708 
2709 	if (!RB_EMPTY_ROOT(&buf->keys)) {
2710 		struct keybuf_key *w;
2711 
2712 		w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2713 		buf->start	= START_KEY(&w->key);
2714 
2715 		w = RB_LAST(&buf->keys, struct keybuf_key, node);
2716 		buf->end	= w->key;
2717 	} else {
2718 		buf->start	= MAX_KEY;
2719 		buf->end	= MAX_KEY;
2720 	}
2721 
2722 	spin_unlock(&buf->lock);
2723 }
2724 
__bch_keybuf_del(struct keybuf * buf,struct keybuf_key * w)2725 static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2726 {
2727 	rb_erase(&w->node, &buf->keys);
2728 	array_free(&buf->freelist, w);
2729 }
2730 
bch_keybuf_del(struct keybuf * buf,struct keybuf_key * w)2731 void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2732 {
2733 	spin_lock(&buf->lock);
2734 	__bch_keybuf_del(buf, w);
2735 	spin_unlock(&buf->lock);
2736 }
2737 
bch_keybuf_check_overlapping(struct keybuf * buf,struct bkey * start,struct bkey * end)2738 bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
2739 				  struct bkey *end)
2740 {
2741 	bool ret = false;
2742 	struct keybuf_key *p, *w, s;
2743 
2744 	s.key = *start;
2745 
2746 	if (bkey_cmp(end, &buf->start) <= 0 ||
2747 	    bkey_cmp(start, &buf->end) >= 0)
2748 		return false;
2749 
2750 	spin_lock(&buf->lock);
2751 	w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
2752 
2753 	while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) {
2754 		p = w;
2755 		w = RB_NEXT(w, node);
2756 
2757 		if (p->private)
2758 			ret = true;
2759 		else
2760 			__bch_keybuf_del(buf, p);
2761 	}
2762 
2763 	spin_unlock(&buf->lock);
2764 	return ret;
2765 }
2766 
bch_keybuf_next(struct keybuf * buf)2767 struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
2768 {
2769 	struct keybuf_key *w;
2770 
2771 	spin_lock(&buf->lock);
2772 
2773 	w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2774 
2775 	while (w && w->private)
2776 		w = RB_NEXT(w, node);
2777 
2778 	if (w)
2779 		w->private = ERR_PTR(-EINTR);
2780 
2781 	spin_unlock(&buf->lock);
2782 	return w;
2783 }
2784 
bch_keybuf_next_rescan(struct cache_set * c,struct keybuf * buf,struct bkey * end,keybuf_pred_fn * pred)2785 struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
2786 					  struct keybuf *buf,
2787 					  struct bkey *end,
2788 					  keybuf_pred_fn *pred)
2789 {
2790 	struct keybuf_key *ret;
2791 
2792 	while (1) {
2793 		ret = bch_keybuf_next(buf);
2794 		if (ret)
2795 			break;
2796 
2797 		if (bkey_cmp(&buf->last_scanned, end) >= 0) {
2798 			pr_debug("scan finished\n");
2799 			break;
2800 		}
2801 
2802 		bch_refill_keybuf(c, buf, end, pred);
2803 	}
2804 
2805 	return ret;
2806 }
2807 
bch_keybuf_init(struct keybuf * buf)2808 void bch_keybuf_init(struct keybuf *buf)
2809 {
2810 	buf->last_scanned	= MAX_KEY;
2811 	buf->keys		= RB_ROOT;
2812 
2813 	spin_lock_init(&buf->lock);
2814 	array_allocator_init(&buf->freelist);
2815 }
2816 
bch_btree_exit(void)2817 void bch_btree_exit(void)
2818 {
2819 	if (btree_io_wq)
2820 		destroy_workqueue(btree_io_wq);
2821 }
2822 
bch_btree_init(void)2823 int __init bch_btree_init(void)
2824 {
2825 	btree_io_wq = alloc_workqueue("bch_btree_io", WQ_MEM_RECLAIM, 0);
2826 	if (!btree_io_wq)
2827 		return -ENOMEM;
2828 
2829 	return 0;
2830 }
2831