1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4 *
5 * Uses a block device as cache for other block devices; optimized for SSDs.
6 * All allocation is done in buckets, which should match the erase block size
7 * of the device.
8 *
9 * Buckets containing cached data are kept on a heap sorted by priority;
10 * bucket priority is increased on cache hit, and periodically all the buckets
11 * on the heap have their priority scaled down. This currently is just used as
12 * an LRU but in the future should allow for more intelligent heuristics.
13 *
14 * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
15 * counter. Garbage collection is used to remove stale pointers.
16 *
17 * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
18 * as keys are inserted we only sort the pages that have not yet been written.
19 * When garbage collection is run, we resort the entire node.
20 *
21 * All configuration is done via sysfs; see Documentation/admin-guide/bcache.rst.
22 */
23
24 #include "bcache.h"
25 #include "btree.h"
26 #include "debug.h"
27 #include "extents.h"
28
29 #include <linux/slab.h>
30 #include <linux/bitops.h>
31 #include <linux/hash.h>
32 #include <linux/kthread.h>
33 #include <linux/prefetch.h>
34 #include <linux/random.h>
35 #include <linux/rcupdate.h>
36 #include <linux/sched/clock.h>
37 #include <linux/rculist.h>
38 #include <linux/delay.h>
39 #include <trace/events/bcache.h>
40
41 /*
42 * Todo:
43 * register_bcache: Return errors out to userspace correctly
44 *
45 * Writeback: don't undirty key until after a cache flush
46 *
47 * Create an iterator for key pointers
48 *
49 * On btree write error, mark bucket such that it won't be freed from the cache
50 *
51 * Journalling:
52 * Check for bad keys in replay
53 * Propagate barriers
54 * Refcount journal entries in journal_replay
55 *
56 * Garbage collection:
57 * Finish incremental gc
58 * Gc should free old UUIDs, data for invalid UUIDs
59 *
60 * Provide a way to list backing device UUIDs we have data cached for, and
61 * probably how long it's been since we've seen them, and a way to invalidate
62 * dirty data for devices that will never be attached again
63 *
64 * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so
65 * that based on that and how much dirty data we have we can keep writeback
66 * from being starved
67 *
68 * Add a tracepoint or somesuch to watch for writeback starvation
69 *
70 * When btree depth > 1 and splitting an interior node, we have to make sure
71 * alloc_bucket() cannot fail. This should be true but is not completely
72 * obvious.
73 *
74 * Plugging?
75 *
76 * If data write is less than hard sector size of ssd, round up offset in open
77 * bucket to the next whole sector
78 *
79 * Superblock needs to be fleshed out for multiple cache devices
80 *
81 * Add a sysfs tunable for the number of writeback IOs in flight
82 *
83 * Add a sysfs tunable for the number of open data buckets
84 *
85 * IO tracking: Can we track when one process is doing io on behalf of another?
86 * IO tracking: Don't use just an average, weigh more recent stuff higher
87 *
88 * Test module load/unload
89 */
90
91 #define MAX_NEED_GC 64
92 #define MAX_SAVE_PRIO 72
93 #define MAX_GC_TIMES 100
94 #define MIN_GC_NODES 100
95 #define GC_SLEEP_MS 100
96
97 #define PTR_DIRTY_BIT (((uint64_t) 1 << 36))
98
99 #define PTR_HASH(c, k) \
100 (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
101
102 static struct workqueue_struct *btree_io_wq;
103
104 #define insert_lock(s, b) ((b)->level <= (s)->lock)
105
106
write_block(struct btree * b)107 static inline struct bset *write_block(struct btree *b)
108 {
109 return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c->cache);
110 }
111
bch_btree_init_next(struct btree * b)112 static void bch_btree_init_next(struct btree *b)
113 {
114 /* If not a leaf node, always sort */
115 if (b->level && b->keys.nsets)
116 bch_btree_sort(&b->keys, &b->c->sort);
117 else
118 bch_btree_sort_lazy(&b->keys, &b->c->sort);
119
120 if (b->written < btree_blocks(b))
121 bch_bset_init_next(&b->keys, write_block(b),
122 bset_magic(&b->c->cache->sb));
123
124 }
125
126 /* Btree key manipulation */
127
bkey_put(struct cache_set * c,struct bkey * k)128 void bkey_put(struct cache_set *c, struct bkey *k)
129 {
130 unsigned int i;
131
132 for (i = 0; i < KEY_PTRS(k); i++)
133 if (ptr_available(c, k, i))
134 atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
135 }
136
137 /* Btree IO */
138
btree_csum_set(struct btree * b,struct bset * i)139 static uint64_t btree_csum_set(struct btree *b, struct bset *i)
140 {
141 uint64_t crc = b->key.ptr[0];
142 void *data = (void *) i + 8, *end = bset_bkey_last(i);
143
144 crc = crc64_be(crc, data, end - data);
145 return crc ^ 0xffffffffffffffffULL;
146 }
147
bch_btree_node_read_done(struct btree * b)148 void bch_btree_node_read_done(struct btree *b)
149 {
150 const char *err = "bad btree header";
151 struct bset *i = btree_bset_first(b);
152 struct btree_iter iter;
153
154 /*
155 * c->fill_iter can allocate an iterator with more memory space
156 * than static MAX_BSETS.
157 * See the comment arount cache_set->fill_iter.
158 */
159 iter.heap.data = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
160 iter.heap.size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size;
161 iter.heap.nr = 0;
162
163 #ifdef CONFIG_BCACHE_DEBUG
164 iter.b = &b->keys;
165 #endif
166
167 if (!i->seq)
168 goto err;
169
170 for (;
171 b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq;
172 i = write_block(b)) {
173 err = "unsupported bset version";
174 if (i->version > BCACHE_BSET_VERSION)
175 goto err;
176
177 err = "bad btree header";
178 if (b->written + set_blocks(i, block_bytes(b->c->cache)) >
179 btree_blocks(b))
180 goto err;
181
182 err = "bad magic";
183 if (i->magic != bset_magic(&b->c->cache->sb))
184 goto err;
185
186 err = "bad checksum";
187 switch (i->version) {
188 case 0:
189 if (i->csum != csum_set(i))
190 goto err;
191 break;
192 case BCACHE_BSET_VERSION:
193 if (i->csum != btree_csum_set(b, i))
194 goto err;
195 break;
196 }
197
198 err = "empty set";
199 if (i != b->keys.set[0].data && !i->keys)
200 goto err;
201
202 bch_btree_iter_push(&iter, i->start, bset_bkey_last(i));
203
204 b->written += set_blocks(i, block_bytes(b->c->cache));
205 }
206
207 err = "corrupted btree";
208 for (i = write_block(b);
209 bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
210 i = ((void *) i) + block_bytes(b->c->cache))
211 if (i->seq == b->keys.set[0].data->seq)
212 goto err;
213
214 bch_btree_sort_and_fix_extents(&b->keys, &iter, &b->c->sort);
215
216 i = b->keys.set[0].data;
217 err = "short btree key";
218 if (b->keys.set[0].size &&
219 bkey_cmp(&b->key, &b->keys.set[0].end) < 0)
220 goto err;
221
222 if (b->written < btree_blocks(b))
223 bch_bset_init_next(&b->keys, write_block(b),
224 bset_magic(&b->c->cache->sb));
225 out:
226 mempool_free(iter.heap.data, &b->c->fill_iter);
227 return;
228 err:
229 set_btree_node_io_error(b);
230 bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",
231 err, PTR_BUCKET_NR(b->c, &b->key, 0),
232 bset_block_offset(b, i), i->keys);
233 goto out;
234 }
235
btree_node_read_endio(struct bio * bio)236 static void btree_node_read_endio(struct bio *bio)
237 {
238 struct closure *cl = bio->bi_private;
239
240 closure_put(cl);
241 }
242
bch_btree_node_read(struct btree * b)243 static void bch_btree_node_read(struct btree *b)
244 {
245 uint64_t start_time = local_clock();
246 struct closure cl;
247 struct bio *bio;
248
249 trace_bcache_btree_read(b);
250
251 closure_init_stack(&cl);
252
253 bio = bch_bbio_alloc(b->c);
254 bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
255 bio->bi_end_io = btree_node_read_endio;
256 bio->bi_private = &cl;
257 bio->bi_opf = REQ_OP_READ | REQ_META;
258
259 bch_bio_map(bio, b->keys.set[0].data);
260
261 bch_submit_bbio(bio, b->c, &b->key, 0);
262 closure_sync(&cl);
263
264 if (bio->bi_status)
265 set_btree_node_io_error(b);
266
267 bch_bbio_free(bio, b->c);
268
269 if (btree_node_io_error(b))
270 goto err;
271
272 bch_btree_node_read_done(b);
273 bch_time_stats_update(&b->c->btree_read_time, start_time);
274
275 return;
276 err:
277 bch_cache_set_error(b->c, "io error reading bucket %zu",
278 PTR_BUCKET_NR(b->c, &b->key, 0));
279 }
280
btree_complete_write(struct btree * b,struct btree_write * w)281 static void btree_complete_write(struct btree *b, struct btree_write *w)
282 {
283 if (w->prio_blocked &&
284 !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
285 wake_up_allocators(b->c);
286
287 if (w->journal) {
288 atomic_dec_bug(w->journal);
289 __closure_wake_up(&b->c->journal.wait);
290 }
291
292 w->prio_blocked = 0;
293 w->journal = NULL;
294 }
295
CLOSURE_CALLBACK(btree_node_write_unlock)296 static CLOSURE_CALLBACK(btree_node_write_unlock)
297 {
298 closure_type(b, struct btree, io);
299
300 up(&b->io_mutex);
301 }
302
CLOSURE_CALLBACK(__btree_node_write_done)303 static CLOSURE_CALLBACK(__btree_node_write_done)
304 {
305 closure_type(b, struct btree, io);
306 struct btree_write *w = btree_prev_write(b);
307
308 bch_bbio_free(b->bio, b->c);
309 b->bio = NULL;
310 btree_complete_write(b, w);
311
312 if (btree_node_dirty(b))
313 queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
314
315 closure_return_with_destructor(cl, btree_node_write_unlock);
316 }
317
CLOSURE_CALLBACK(btree_node_write_done)318 static CLOSURE_CALLBACK(btree_node_write_done)
319 {
320 closure_type(b, struct btree, io);
321
322 bio_free_pages(b->bio);
323 __btree_node_write_done(&cl->work);
324 }
325
btree_node_write_endio(struct bio * bio)326 static void btree_node_write_endio(struct bio *bio)
327 {
328 struct closure *cl = bio->bi_private;
329 struct btree *b = container_of(cl, struct btree, io);
330
331 if (bio->bi_status)
332 set_btree_node_io_error(b);
333
334 bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree");
335 closure_put(cl);
336 }
337
do_btree_node_write(struct btree * b)338 static void do_btree_node_write(struct btree *b)
339 {
340 struct closure *cl = &b->io;
341 struct bset *i = btree_bset_last(b);
342 BKEY_PADDED(key) k;
343
344 i->version = BCACHE_BSET_VERSION;
345 i->csum = btree_csum_set(b, i);
346
347 BUG_ON(b->bio);
348 b->bio = bch_bbio_alloc(b->c);
349
350 b->bio->bi_end_io = btree_node_write_endio;
351 b->bio->bi_private = cl;
352 b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c->cache));
353 b->bio->bi_opf = REQ_OP_WRITE | REQ_META | REQ_FUA;
354 bch_bio_map(b->bio, i);
355
356 /*
357 * If we're appending to a leaf node, we don't technically need FUA -
358 * this write just needs to be persisted before the next journal write,
359 * which will be marked FLUSH|FUA.
360 *
361 * Similarly if we're writing a new btree root - the pointer is going to
362 * be in the next journal entry.
363 *
364 * But if we're writing a new btree node (that isn't a root) or
365 * appending to a non leaf btree node, we need either FUA or a flush
366 * when we write the parent with the new pointer. FUA is cheaper than a
367 * flush, and writes appending to leaf nodes aren't blocking anything so
368 * just make all btree node writes FUA to keep things sane.
369 */
370
371 bkey_copy(&k.key, &b->key);
372 SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
373 bset_sector_offset(&b->keys, i));
374
375 if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
376 struct bio_vec *bv;
377 void *addr = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
378 struct bvec_iter_all iter_all;
379
380 bio_for_each_segment_all(bv, b->bio, iter_all) {
381 memcpy(page_address(bv->bv_page), addr, PAGE_SIZE);
382 addr += PAGE_SIZE;
383 }
384
385 bch_submit_bbio(b->bio, b->c, &k.key, 0);
386
387 continue_at(cl, btree_node_write_done, NULL);
388 } else {
389 /*
390 * No problem for multipage bvec since the bio is
391 * just allocated
392 */
393 b->bio->bi_vcnt = 0;
394 bch_bio_map(b->bio, i);
395
396 bch_submit_bbio(b->bio, b->c, &k.key, 0);
397
398 closure_sync(cl);
399 continue_at_nobarrier(cl, __btree_node_write_done, NULL);
400 }
401 }
402
__bch_btree_node_write(struct btree * b,struct closure * parent)403 void __bch_btree_node_write(struct btree *b, struct closure *parent)
404 {
405 struct bset *i = btree_bset_last(b);
406
407 lockdep_assert_held(&b->write_lock);
408
409 trace_bcache_btree_write(b);
410
411 BUG_ON(current->bio_list);
412 BUG_ON(b->written >= btree_blocks(b));
413 BUG_ON(b->written && !i->keys);
414 BUG_ON(btree_bset_first(b)->seq != i->seq);
415 bch_check_keys(&b->keys, "writing");
416
417 cancel_delayed_work(&b->work);
418
419 /* If caller isn't waiting for write, parent refcount is cache set */
420 down(&b->io_mutex);
421 closure_init(&b->io, parent ?: &b->c->cl);
422
423 clear_bit(BTREE_NODE_dirty, &b->flags);
424 change_bit(BTREE_NODE_write_idx, &b->flags);
425
426 do_btree_node_write(b);
427
428 atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->cache->sb.block_size,
429 &b->c->cache->btree_sectors_written);
430
431 b->written += set_blocks(i, block_bytes(b->c->cache));
432 }
433
bch_btree_node_write(struct btree * b,struct closure * parent)434 void bch_btree_node_write(struct btree *b, struct closure *parent)
435 {
436 unsigned int nsets = b->keys.nsets;
437
438 lockdep_assert_held(&b->lock);
439
440 __bch_btree_node_write(b, parent);
441
442 /*
443 * do verify if there was more than one set initially (i.e. we did a
444 * sort) and we sorted down to a single set:
445 */
446 if (nsets && !b->keys.nsets)
447 bch_btree_verify(b);
448
449 bch_btree_init_next(b);
450 }
451
bch_btree_node_write_sync(struct btree * b)452 static void bch_btree_node_write_sync(struct btree *b)
453 {
454 struct closure cl;
455
456 closure_init_stack(&cl);
457
458 mutex_lock(&b->write_lock);
459 bch_btree_node_write(b, &cl);
460 mutex_unlock(&b->write_lock);
461
462 closure_sync(&cl);
463 }
464
btree_node_write_work(struct work_struct * w)465 static void btree_node_write_work(struct work_struct *w)
466 {
467 struct btree *b = container_of(to_delayed_work(w), struct btree, work);
468
469 mutex_lock(&b->write_lock);
470 if (btree_node_dirty(b))
471 __bch_btree_node_write(b, NULL);
472 mutex_unlock(&b->write_lock);
473 }
474
bch_btree_leaf_dirty(struct btree * b,atomic_t * journal_ref)475 static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
476 {
477 struct bset *i = btree_bset_last(b);
478 struct btree_write *w = btree_current_write(b);
479
480 lockdep_assert_held(&b->write_lock);
481
482 BUG_ON(!b->written);
483 BUG_ON(!i->keys);
484
485 if (!btree_node_dirty(b))
486 queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
487
488 set_btree_node_dirty(b);
489
490 /*
491 * w->journal is always the oldest journal pin of all bkeys
492 * in the leaf node, to make sure the oldest jset seq won't
493 * be increased before this btree node is flushed.
494 */
495 if (journal_ref) {
496 if (w->journal &&
497 journal_pin_cmp(b->c, w->journal, journal_ref)) {
498 atomic_dec_bug(w->journal);
499 w->journal = NULL;
500 }
501
502 if (!w->journal) {
503 w->journal = journal_ref;
504 atomic_inc(w->journal);
505 }
506 }
507
508 /* Force write if set is too big */
509 if (set_bytes(i) > PAGE_SIZE - 48 &&
510 !current->bio_list)
511 bch_btree_node_write(b, NULL);
512 }
513
514 /*
515 * Btree in memory cache - allocation/freeing
516 * mca -> memory cache
517 */
518
519 #define mca_reserve(c) (((!IS_ERR_OR_NULL(c->root) && c->root->level) \
520 ? c->root->level : 1) * 8 + 16)
521 #define mca_can_free(c) \
522 max_t(int, 0, c->btree_cache_used - mca_reserve(c))
523
mca_data_free(struct btree * b)524 static void mca_data_free(struct btree *b)
525 {
526 BUG_ON(b->io_mutex.count != 1);
527
528 bch_btree_keys_free(&b->keys);
529
530 b->c->btree_cache_used--;
531 list_move(&b->list, &b->c->btree_cache_freed);
532 }
533
mca_bucket_free(struct btree * b)534 static void mca_bucket_free(struct btree *b)
535 {
536 BUG_ON(btree_node_dirty(b));
537
538 b->key.ptr[0] = 0;
539 hlist_del_init_rcu(&b->hash);
540 list_move(&b->list, &b->c->btree_cache_freeable);
541 }
542
btree_order(struct bkey * k)543 static unsigned int btree_order(struct bkey *k)
544 {
545 return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
546 }
547
mca_data_alloc(struct btree * b,struct bkey * k,gfp_t gfp)548 static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
549 {
550 if (!bch_btree_keys_alloc(&b->keys,
551 max_t(unsigned int,
552 ilog2(b->c->btree_pages),
553 btree_order(k)),
554 gfp)) {
555 b->c->btree_cache_used++;
556 list_move(&b->list, &b->c->btree_cache);
557 } else {
558 list_move(&b->list, &b->c->btree_cache_freed);
559 }
560 }
561
562 #define cmp_int(l, r) ((l > r) - (l < r))
563
564 #ifdef CONFIG_PROVE_LOCKING
btree_lock_cmp_fn(const struct lockdep_map * _a,const struct lockdep_map * _b)565 static int btree_lock_cmp_fn(const struct lockdep_map *_a,
566 const struct lockdep_map *_b)
567 {
568 const struct btree *a = container_of(_a, struct btree, lock.dep_map);
569 const struct btree *b = container_of(_b, struct btree, lock.dep_map);
570
571 return -cmp_int(a->level, b->level) ?: bkey_cmp(&a->key, &b->key);
572 }
573
btree_lock_print_fn(const struct lockdep_map * map)574 static void btree_lock_print_fn(const struct lockdep_map *map)
575 {
576 const struct btree *b = container_of(map, struct btree, lock.dep_map);
577
578 printk(KERN_CONT " l=%u %llu:%llu", b->level,
579 KEY_INODE(&b->key), KEY_OFFSET(&b->key));
580 }
581 #endif
582
mca_bucket_alloc(struct cache_set * c,struct bkey * k,gfp_t gfp)583 static struct btree *mca_bucket_alloc(struct cache_set *c,
584 struct bkey *k, gfp_t gfp)
585 {
586 /*
587 * kzalloc() is necessary here for initialization,
588 * see code comments in bch_btree_keys_init().
589 */
590 struct btree *b = kzalloc(sizeof(struct btree), gfp);
591
592 if (!b)
593 return NULL;
594
595 init_rwsem(&b->lock);
596 lock_set_cmp_fn(&b->lock, btree_lock_cmp_fn, btree_lock_print_fn);
597 mutex_init(&b->write_lock);
598 lockdep_set_novalidate_class(&b->write_lock);
599 INIT_LIST_HEAD(&b->list);
600 INIT_DELAYED_WORK(&b->work, btree_node_write_work);
601 b->c = c;
602 sema_init(&b->io_mutex, 1);
603
604 mca_data_alloc(b, k, gfp);
605 return b;
606 }
607
mca_reap(struct btree * b,unsigned int min_order,bool flush)608 static int mca_reap(struct btree *b, unsigned int min_order, bool flush)
609 {
610 struct closure cl;
611
612 closure_init_stack(&cl);
613 lockdep_assert_held(&b->c->bucket_lock);
614
615 if (!down_write_trylock(&b->lock))
616 return -ENOMEM;
617
618 BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data);
619
620 if (b->keys.page_order < min_order)
621 goto out_unlock;
622
623 if (!flush) {
624 if (btree_node_dirty(b))
625 goto out_unlock;
626
627 if (down_trylock(&b->io_mutex))
628 goto out_unlock;
629 up(&b->io_mutex);
630 }
631
632 retry:
633 /*
634 * BTREE_NODE_dirty might be cleared in btree_flush_btree() by
635 * __bch_btree_node_write(). To avoid an extra flush, acquire
636 * b->write_lock before checking BTREE_NODE_dirty bit.
637 */
638 mutex_lock(&b->write_lock);
639 /*
640 * If this btree node is selected in btree_flush_write() by journal
641 * code, delay and retry until the node is flushed by journal code
642 * and BTREE_NODE_journal_flush bit cleared by btree_flush_write().
643 */
644 if (btree_node_journal_flush(b)) {
645 pr_debug("bnode %p is flushing by journal, retry\n", b);
646 mutex_unlock(&b->write_lock);
647 udelay(1);
648 goto retry;
649 }
650
651 if (btree_node_dirty(b))
652 __bch_btree_node_write(b, &cl);
653 mutex_unlock(&b->write_lock);
654
655 closure_sync(&cl);
656
657 /* wait for any in flight btree write */
658 down(&b->io_mutex);
659 up(&b->io_mutex);
660
661 return 0;
662 out_unlock:
663 rw_unlock(true, b);
664 return -ENOMEM;
665 }
666
bch_mca_scan(struct shrinker * shrink,struct shrink_control * sc)667 static unsigned long bch_mca_scan(struct shrinker *shrink,
668 struct shrink_control *sc)
669 {
670 struct cache_set *c = shrink->private_data;
671 struct btree *b, *t;
672 unsigned long i, nr = sc->nr_to_scan;
673 unsigned long freed = 0;
674 unsigned int btree_cache_used;
675
676 if (c->shrinker_disabled)
677 return SHRINK_STOP;
678
679 if (c->btree_cache_alloc_lock)
680 return SHRINK_STOP;
681
682 /* Return -1 if we can't do anything right now */
683 if (sc->gfp_mask & __GFP_IO)
684 mutex_lock(&c->bucket_lock);
685 else if (!mutex_trylock(&c->bucket_lock))
686 return -1;
687
688 /*
689 * It's _really_ critical that we don't free too many btree nodes - we
690 * have to always leave ourselves a reserve. The reserve is how we
691 * guarantee that allocating memory for a new btree node can always
692 * succeed, so that inserting keys into the btree can always succeed and
693 * IO can always make forward progress:
694 */
695 nr /= c->btree_pages;
696 if (nr == 0)
697 nr = 1;
698 nr = min_t(unsigned long, nr, mca_can_free(c));
699
700 i = 0;
701 btree_cache_used = c->btree_cache_used;
702 list_for_each_entry_safe_reverse(b, t, &c->btree_cache_freeable, list) {
703 if (nr <= 0)
704 goto out;
705
706 if (!mca_reap(b, 0, false)) {
707 mca_data_free(b);
708 rw_unlock(true, b);
709 freed++;
710 }
711 nr--;
712 i++;
713 }
714
715 list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) {
716 if (nr <= 0 || i >= btree_cache_used)
717 goto out;
718
719 if (!mca_reap(b, 0, false)) {
720 mca_bucket_free(b);
721 mca_data_free(b);
722 rw_unlock(true, b);
723 freed++;
724 }
725
726 nr--;
727 i++;
728 }
729 out:
730 mutex_unlock(&c->bucket_lock);
731 return freed * c->btree_pages;
732 }
733
bch_mca_count(struct shrinker * shrink,struct shrink_control * sc)734 static unsigned long bch_mca_count(struct shrinker *shrink,
735 struct shrink_control *sc)
736 {
737 struct cache_set *c = shrink->private_data;
738
739 if (c->shrinker_disabled)
740 return 0;
741
742 if (c->btree_cache_alloc_lock)
743 return 0;
744
745 return mca_can_free(c) * c->btree_pages;
746 }
747
bch_btree_cache_free(struct cache_set * c)748 void bch_btree_cache_free(struct cache_set *c)
749 {
750 struct btree *b;
751 struct closure cl;
752
753 closure_init_stack(&cl);
754
755 if (c->shrink)
756 shrinker_free(c->shrink);
757
758 mutex_lock(&c->bucket_lock);
759
760 #ifdef CONFIG_BCACHE_DEBUG
761 if (c->verify_data)
762 list_move(&c->verify_data->list, &c->btree_cache);
763
764 free_pages((unsigned long) c->verify_ondisk, ilog2(meta_bucket_pages(&c->cache->sb)));
765 #endif
766
767 list_splice(&c->btree_cache_freeable,
768 &c->btree_cache);
769
770 while (!list_empty(&c->btree_cache)) {
771 b = list_first_entry(&c->btree_cache, struct btree, list);
772
773 /*
774 * This function is called by cache_set_free(), no I/O
775 * request on cache now, it is unnecessary to acquire
776 * b->write_lock before clearing BTREE_NODE_dirty anymore.
777 */
778 if (btree_node_dirty(b)) {
779 btree_complete_write(b, btree_current_write(b));
780 clear_bit(BTREE_NODE_dirty, &b->flags);
781 }
782 mca_data_free(b);
783 }
784
785 while (!list_empty(&c->btree_cache_freed)) {
786 b = list_first_entry(&c->btree_cache_freed,
787 struct btree, list);
788 list_del(&b->list);
789 cancel_delayed_work_sync(&b->work);
790 kfree(b);
791 }
792
793 mutex_unlock(&c->bucket_lock);
794 }
795
bch_btree_cache_alloc(struct cache_set * c)796 int bch_btree_cache_alloc(struct cache_set *c)
797 {
798 unsigned int i;
799
800 for (i = 0; i < mca_reserve(c); i++)
801 if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
802 return -ENOMEM;
803
804 list_splice_init(&c->btree_cache,
805 &c->btree_cache_freeable);
806
807 #ifdef CONFIG_BCACHE_DEBUG
808 mutex_init(&c->verify_lock);
809
810 c->verify_ondisk = (void *)
811 __get_free_pages(GFP_KERNEL|__GFP_COMP,
812 ilog2(meta_bucket_pages(&c->cache->sb)));
813 if (!c->verify_ondisk) {
814 /*
815 * Don't worry about the mca_rereserve buckets
816 * allocated in previous for-loop, they will be
817 * handled properly in bch_cache_set_unregister().
818 */
819 return -ENOMEM;
820 }
821
822 c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
823
824 if (c->verify_data &&
825 c->verify_data->keys.set->data)
826 list_del_init(&c->verify_data->list);
827 else
828 c->verify_data = NULL;
829 #endif
830
831 c->shrink = shrinker_alloc(0, "md-bcache:%pU", c->set_uuid);
832 if (!c->shrink) {
833 pr_warn("bcache: %s: could not allocate shrinker\n", __func__);
834 return 0;
835 }
836
837 c->shrink->count_objects = bch_mca_count;
838 c->shrink->scan_objects = bch_mca_scan;
839 c->shrink->seeks = 4;
840 c->shrink->batch = c->btree_pages * 2;
841 c->shrink->private_data = c;
842
843 shrinker_register(c->shrink);
844
845 return 0;
846 }
847
848 /* Btree in memory cache - hash table */
849
mca_hash(struct cache_set * c,struct bkey * k)850 static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k)
851 {
852 return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)];
853 }
854
mca_find(struct cache_set * c,struct bkey * k)855 static struct btree *mca_find(struct cache_set *c, struct bkey *k)
856 {
857 struct btree *b;
858
859 rcu_read_lock();
860 hlist_for_each_entry_rcu(b, mca_hash(c, k), hash)
861 if (PTR_HASH(c, &b->key) == PTR_HASH(c, k))
862 goto out;
863 b = NULL;
864 out:
865 rcu_read_unlock();
866 return b;
867 }
868
mca_cannibalize_lock(struct cache_set * c,struct btree_op * op)869 static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
870 {
871 spin_lock(&c->btree_cannibalize_lock);
872 if (likely(c->btree_cache_alloc_lock == NULL)) {
873 c->btree_cache_alloc_lock = current;
874 } else if (c->btree_cache_alloc_lock != current) {
875 if (op)
876 prepare_to_wait(&c->btree_cache_wait, &op->wait,
877 TASK_UNINTERRUPTIBLE);
878 spin_unlock(&c->btree_cannibalize_lock);
879 return -EINTR;
880 }
881 spin_unlock(&c->btree_cannibalize_lock);
882
883 return 0;
884 }
885
mca_cannibalize(struct cache_set * c,struct btree_op * op,struct bkey * k)886 static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
887 struct bkey *k)
888 {
889 struct btree *b;
890
891 trace_bcache_btree_cache_cannibalize(c);
892
893 if (mca_cannibalize_lock(c, op))
894 return ERR_PTR(-EINTR);
895
896 list_for_each_entry_reverse(b, &c->btree_cache, list)
897 if (!mca_reap(b, btree_order(k), false))
898 return b;
899
900 list_for_each_entry_reverse(b, &c->btree_cache, list)
901 if (!mca_reap(b, btree_order(k), true))
902 return b;
903
904 WARN(1, "btree cache cannibalize failed\n");
905 return ERR_PTR(-ENOMEM);
906 }
907
908 /*
909 * We can only have one thread cannibalizing other cached btree nodes at a time,
910 * or we'll deadlock. We use an open coded mutex to ensure that, which a
911 * cannibalize_bucket() will take. This means every time we unlock the root of
912 * the btree, we need to release this lock if we have it held.
913 */
bch_cannibalize_unlock(struct cache_set * c)914 void bch_cannibalize_unlock(struct cache_set *c)
915 {
916 spin_lock(&c->btree_cannibalize_lock);
917 if (c->btree_cache_alloc_lock == current) {
918 c->btree_cache_alloc_lock = NULL;
919 wake_up(&c->btree_cache_wait);
920 }
921 spin_unlock(&c->btree_cannibalize_lock);
922 }
923
mca_alloc(struct cache_set * c,struct btree_op * op,struct bkey * k,int level)924 static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
925 struct bkey *k, int level)
926 {
927 struct btree *b;
928
929 BUG_ON(current->bio_list);
930
931 lockdep_assert_held(&c->bucket_lock);
932
933 if (mca_find(c, k))
934 return NULL;
935
936 /* btree_free() doesn't free memory; it sticks the node on the end of
937 * the list. Check if there's any freed nodes there:
938 */
939 list_for_each_entry(b, &c->btree_cache_freeable, list)
940 if (!mca_reap(b, btree_order(k), false))
941 goto out;
942
943 /* We never free struct btree itself, just the memory that holds the on
944 * disk node. Check the freed list before allocating a new one:
945 */
946 list_for_each_entry(b, &c->btree_cache_freed, list)
947 if (!mca_reap(b, 0, false)) {
948 mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
949 if (!b->keys.set[0].data)
950 goto err;
951 else
952 goto out;
953 }
954
955 b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO);
956 if (!b)
957 goto err;
958
959 BUG_ON(!down_write_trylock(&b->lock));
960 if (!b->keys.set->data)
961 goto err;
962 out:
963 BUG_ON(b->io_mutex.count != 1);
964
965 bkey_copy(&b->key, k);
966 list_move(&b->list, &c->btree_cache);
967 hlist_del_init_rcu(&b->hash);
968 hlist_add_head_rcu(&b->hash, mca_hash(c, k));
969
970 lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
971 b->parent = (void *) ~0UL;
972 b->flags = 0;
973 b->written = 0;
974 b->level = level;
975
976 if (!b->level)
977 bch_btree_keys_init(&b->keys, &bch_extent_keys_ops,
978 &b->c->expensive_debug_checks);
979 else
980 bch_btree_keys_init(&b->keys, &bch_btree_keys_ops,
981 &b->c->expensive_debug_checks);
982
983 return b;
984 err:
985 if (b)
986 rw_unlock(true, b);
987
988 b = mca_cannibalize(c, op, k);
989 if (!IS_ERR(b))
990 goto out;
991
992 return b;
993 }
994
995 /*
996 * bch_btree_node_get - find a btree node in the cache and lock it, reading it
997 * in from disk if necessary.
998 *
999 * If IO is necessary and running under submit_bio_noacct, returns -EAGAIN.
1000 *
1001 * The btree node will have either a read or a write lock held, depending on
1002 * level and op->lock.
1003 *
1004 * Note: Only error code or btree pointer will be returned, it is unncessary
1005 * for callers to check NULL pointer.
1006 */
bch_btree_node_get(struct cache_set * c,struct btree_op * op,struct bkey * k,int level,bool write,struct btree * parent)1007 struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
1008 struct bkey *k, int level, bool write,
1009 struct btree *parent)
1010 {
1011 int i = 0;
1012 struct btree *b;
1013
1014 BUG_ON(level < 0);
1015 retry:
1016 b = mca_find(c, k);
1017
1018 if (!b) {
1019 if (current->bio_list)
1020 return ERR_PTR(-EAGAIN);
1021
1022 mutex_lock(&c->bucket_lock);
1023 b = mca_alloc(c, op, k, level);
1024 mutex_unlock(&c->bucket_lock);
1025
1026 if (!b)
1027 goto retry;
1028 if (IS_ERR(b))
1029 return b;
1030
1031 bch_btree_node_read(b);
1032
1033 if (!write)
1034 downgrade_write(&b->lock);
1035 } else {
1036 rw_lock(write, b, level);
1037 if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) {
1038 rw_unlock(write, b);
1039 goto retry;
1040 }
1041 BUG_ON(b->level != level);
1042 }
1043
1044 if (btree_node_io_error(b)) {
1045 rw_unlock(write, b);
1046 return ERR_PTR(-EIO);
1047 }
1048
1049 BUG_ON(!b->written);
1050
1051 b->parent = parent;
1052
1053 for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
1054 prefetch(b->keys.set[i].tree);
1055 prefetch(b->keys.set[i].data);
1056 }
1057
1058 for (; i <= b->keys.nsets; i++)
1059 prefetch(b->keys.set[i].data);
1060
1061 return b;
1062 }
1063
btree_node_prefetch(struct btree * parent,struct bkey * k)1064 static void btree_node_prefetch(struct btree *parent, struct bkey *k)
1065 {
1066 struct btree *b;
1067
1068 mutex_lock(&parent->c->bucket_lock);
1069 b = mca_alloc(parent->c, NULL, k, parent->level - 1);
1070 mutex_unlock(&parent->c->bucket_lock);
1071
1072 if (!IS_ERR_OR_NULL(b)) {
1073 b->parent = parent;
1074 bch_btree_node_read(b);
1075 rw_unlock(true, b);
1076 }
1077 }
1078
1079 /* Btree alloc */
1080
btree_node_free(struct btree * b)1081 static void btree_node_free(struct btree *b)
1082 {
1083 trace_bcache_btree_node_free(b);
1084
1085 BUG_ON(b == b->c->root);
1086
1087 retry:
1088 mutex_lock(&b->write_lock);
1089 /*
1090 * If the btree node is selected and flushing in btree_flush_write(),
1091 * delay and retry until the BTREE_NODE_journal_flush bit cleared,
1092 * then it is safe to free the btree node here. Otherwise this btree
1093 * node will be in race condition.
1094 */
1095 if (btree_node_journal_flush(b)) {
1096 mutex_unlock(&b->write_lock);
1097 pr_debug("bnode %p journal_flush set, retry\n", b);
1098 udelay(1);
1099 goto retry;
1100 }
1101
1102 if (btree_node_dirty(b)) {
1103 btree_complete_write(b, btree_current_write(b));
1104 clear_bit(BTREE_NODE_dirty, &b->flags);
1105 }
1106
1107 mutex_unlock(&b->write_lock);
1108
1109 cancel_delayed_work(&b->work);
1110
1111 mutex_lock(&b->c->bucket_lock);
1112 bch_bucket_free(b->c, &b->key);
1113 mca_bucket_free(b);
1114 mutex_unlock(&b->c->bucket_lock);
1115 }
1116
1117 /*
1118 * Only error code or btree pointer will be returned, it is unncessary for
1119 * callers to check NULL pointer.
1120 */
__bch_btree_node_alloc(struct cache_set * c,struct btree_op * op,int level,bool wait,struct btree * parent)1121 struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
1122 int level, bool wait,
1123 struct btree *parent)
1124 {
1125 BKEY_PADDED(key) k;
1126 struct btree *b;
1127
1128 mutex_lock(&c->bucket_lock);
1129 retry:
1130 /* return ERR_PTR(-EAGAIN) when it fails */
1131 b = ERR_PTR(-EAGAIN);
1132 if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, wait))
1133 goto err;
1134
1135 bkey_put(c, &k.key);
1136 SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
1137
1138 b = mca_alloc(c, op, &k.key, level);
1139 if (IS_ERR(b))
1140 goto err_free;
1141
1142 if (!b) {
1143 cache_bug(c,
1144 "Tried to allocate bucket that was in btree cache");
1145 goto retry;
1146 }
1147
1148 b->parent = parent;
1149 bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->cache->sb));
1150
1151 mutex_unlock(&c->bucket_lock);
1152
1153 trace_bcache_btree_node_alloc(b);
1154 return b;
1155 err_free:
1156 bch_bucket_free(c, &k.key);
1157 err:
1158 mutex_unlock(&c->bucket_lock);
1159
1160 trace_bcache_btree_node_alloc_fail(c);
1161 return b;
1162 }
1163
bch_btree_node_alloc(struct cache_set * c,struct btree_op * op,int level,struct btree * parent)1164 static struct btree *bch_btree_node_alloc(struct cache_set *c,
1165 struct btree_op *op, int level,
1166 struct btree *parent)
1167 {
1168 return __bch_btree_node_alloc(c, op, level, op != NULL, parent);
1169 }
1170
btree_node_alloc_replacement(struct btree * b,struct btree_op * op)1171 static struct btree *btree_node_alloc_replacement(struct btree *b,
1172 struct btree_op *op)
1173 {
1174 struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
1175
1176 if (!IS_ERR(n)) {
1177 mutex_lock(&n->write_lock);
1178 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
1179 bkey_copy_key(&n->key, &b->key);
1180 mutex_unlock(&n->write_lock);
1181 }
1182
1183 return n;
1184 }
1185
make_btree_freeing_key(struct btree * b,struct bkey * k)1186 static void make_btree_freeing_key(struct btree *b, struct bkey *k)
1187 {
1188 unsigned int i;
1189
1190 mutex_lock(&b->c->bucket_lock);
1191
1192 atomic_inc(&b->c->prio_blocked);
1193
1194 bkey_copy(k, &b->key);
1195 bkey_copy_key(k, &ZERO_KEY);
1196
1197 for (i = 0; i < KEY_PTRS(k); i++)
1198 SET_PTR_GEN(k, i,
1199 bch_inc_gen(b->c->cache,
1200 PTR_BUCKET(b->c, &b->key, i)));
1201
1202 mutex_unlock(&b->c->bucket_lock);
1203 }
1204
btree_check_reserve(struct btree * b,struct btree_op * op)1205 static int btree_check_reserve(struct btree *b, struct btree_op *op)
1206 {
1207 struct cache_set *c = b->c;
1208 struct cache *ca = c->cache;
1209 unsigned int reserve = (c->root->level - b->level) * 2 + 1;
1210
1211 mutex_lock(&c->bucket_lock);
1212
1213 if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
1214 if (op)
1215 prepare_to_wait(&c->btree_cache_wait, &op->wait,
1216 TASK_UNINTERRUPTIBLE);
1217 mutex_unlock(&c->bucket_lock);
1218 return -EINTR;
1219 }
1220
1221 mutex_unlock(&c->bucket_lock);
1222
1223 return mca_cannibalize_lock(b->c, op);
1224 }
1225
1226 /* Garbage collection */
1227
__bch_btree_mark_key(struct cache_set * c,int level,struct bkey * k)1228 static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
1229 struct bkey *k)
1230 {
1231 uint8_t stale = 0;
1232 unsigned int i;
1233 struct bucket *g;
1234
1235 /*
1236 * ptr_invalid() can't return true for the keys that mark btree nodes as
1237 * freed, but since ptr_bad() returns true we'll never actually use them
1238 * for anything and thus we don't want mark their pointers here
1239 */
1240 if (!bkey_cmp(k, &ZERO_KEY))
1241 return stale;
1242
1243 for (i = 0; i < KEY_PTRS(k); i++) {
1244 if (!ptr_available(c, k, i))
1245 continue;
1246
1247 g = PTR_BUCKET(c, k, i);
1248
1249 if (gen_after(g->last_gc, PTR_GEN(k, i)))
1250 g->last_gc = PTR_GEN(k, i);
1251
1252 if (ptr_stale(c, k, i)) {
1253 stale = max(stale, ptr_stale(c, k, i));
1254 continue;
1255 }
1256
1257 cache_bug_on(GC_MARK(g) &&
1258 (GC_MARK(g) == GC_MARK_METADATA) != (level != 0),
1259 c, "inconsistent ptrs: mark = %llu, level = %i",
1260 GC_MARK(g), level);
1261
1262 if (level)
1263 SET_GC_MARK(g, GC_MARK_METADATA);
1264 else if (KEY_DIRTY(k))
1265 SET_GC_MARK(g, GC_MARK_DIRTY);
1266 else if (!GC_MARK(g))
1267 SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
1268
1269 /* guard against overflow */
1270 SET_GC_SECTORS_USED(g, min_t(unsigned int,
1271 GC_SECTORS_USED(g) + KEY_SIZE(k),
1272 MAX_GC_SECTORS_USED));
1273
1274 BUG_ON(!GC_SECTORS_USED(g));
1275 }
1276
1277 return stale;
1278 }
1279
1280 #define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k)
1281
bch_initial_mark_key(struct cache_set * c,int level,struct bkey * k)1282 void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
1283 {
1284 unsigned int i;
1285
1286 for (i = 0; i < KEY_PTRS(k); i++)
1287 if (ptr_available(c, k, i) &&
1288 !ptr_stale(c, k, i)) {
1289 struct bucket *b = PTR_BUCKET(c, k, i);
1290
1291 b->gen = PTR_GEN(k, i);
1292
1293 if (level && bkey_cmp(k, &ZERO_KEY))
1294 b->prio = BTREE_PRIO;
1295 else if (!level && b->prio == BTREE_PRIO)
1296 b->prio = INITIAL_PRIO;
1297 }
1298
1299 __bch_btree_mark_key(c, level, k);
1300 }
1301
bch_update_bucket_in_use(struct cache_set * c,struct gc_stat * stats)1302 void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats)
1303 {
1304 stats->in_use = (c->nbuckets - c->avail_nbuckets) * 100 / c->nbuckets;
1305 }
1306
btree_gc_mark_node(struct btree * b,struct gc_stat * gc)1307 static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
1308 {
1309 uint8_t stale = 0;
1310 unsigned int keys = 0, good_keys = 0;
1311 struct bkey *k;
1312 struct btree_iter iter;
1313 struct bset_tree *t;
1314
1315 min_heap_init(&iter.heap, NULL, MAX_BSETS);
1316
1317 gc->nodes++;
1318
1319 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
1320 stale = max(stale, btree_mark_key(b, k));
1321 keys++;
1322
1323 if (bch_ptr_bad(&b->keys, k))
1324 continue;
1325
1326 gc->key_bytes += bkey_u64s(k);
1327 gc->nkeys++;
1328 good_keys++;
1329
1330 gc->data += KEY_SIZE(k);
1331 }
1332
1333 for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++)
1334 btree_bug_on(t->size &&
1335 bset_written(&b->keys, t) &&
1336 bkey_cmp(&b->key, &t->end) < 0,
1337 b, "found short btree key in gc");
1338
1339 if (b->c->gc_always_rewrite)
1340 return true;
1341
1342 if (stale > 10)
1343 return true;
1344
1345 if ((keys - good_keys) * 2 > keys)
1346 return true;
1347
1348 return false;
1349 }
1350
1351 #define GC_MERGE_NODES 4U
1352
1353 struct gc_merge_info {
1354 struct btree *b;
1355 unsigned int keys;
1356 };
1357
1358 static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
1359 struct keylist *insert_keys,
1360 atomic_t *journal_ref,
1361 struct bkey *replace_key);
1362
btree_gc_coalesce(struct btree * b,struct btree_op * op,struct gc_stat * gc,struct gc_merge_info * r)1363 static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
1364 struct gc_stat *gc, struct gc_merge_info *r)
1365 {
1366 unsigned int i, nodes = 0, keys = 0, blocks;
1367 struct btree *new_nodes[GC_MERGE_NODES];
1368 struct keylist keylist;
1369 struct closure cl;
1370 struct bkey *k;
1371
1372 bch_keylist_init(&keylist);
1373
1374 if (btree_check_reserve(b, NULL))
1375 return 0;
1376
1377 memset(new_nodes, 0, sizeof(new_nodes));
1378 closure_init_stack(&cl);
1379
1380 while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
1381 keys += r[nodes++].keys;
1382
1383 blocks = btree_default_blocks(b->c) * 2 / 3;
1384
1385 if (nodes < 2 ||
1386 __set_blocks(b->keys.set[0].data, keys,
1387 block_bytes(b->c->cache)) > blocks * (nodes - 1))
1388 return 0;
1389
1390 for (i = 0; i < nodes; i++) {
1391 new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL);
1392 if (IS_ERR(new_nodes[i]))
1393 goto out_nocoalesce;
1394 }
1395
1396 /*
1397 * We have to check the reserve here, after we've allocated our new
1398 * nodes, to make sure the insert below will succeed - we also check
1399 * before as an optimization to potentially avoid a bunch of expensive
1400 * allocs/sorts
1401 */
1402 if (btree_check_reserve(b, NULL))
1403 goto out_nocoalesce;
1404
1405 for (i = 0; i < nodes; i++)
1406 mutex_lock(&new_nodes[i]->write_lock);
1407
1408 for (i = nodes - 1; i > 0; --i) {
1409 struct bset *n1 = btree_bset_first(new_nodes[i]);
1410 struct bset *n2 = btree_bset_first(new_nodes[i - 1]);
1411 struct bkey *k, *last = NULL;
1412
1413 keys = 0;
1414
1415 if (i > 1) {
1416 for (k = n2->start;
1417 k < bset_bkey_last(n2);
1418 k = bkey_next(k)) {
1419 if (__set_blocks(n1, n1->keys + keys +
1420 bkey_u64s(k),
1421 block_bytes(b->c->cache)) > blocks)
1422 break;
1423
1424 last = k;
1425 keys += bkey_u64s(k);
1426 }
1427 } else {
1428 /*
1429 * Last node we're not getting rid of - we're getting
1430 * rid of the node at r[0]. Have to try and fit all of
1431 * the remaining keys into this node; we can't ensure
1432 * they will always fit due to rounding and variable
1433 * length keys (shouldn't be possible in practice,
1434 * though)
1435 */
1436 if (__set_blocks(n1, n1->keys + n2->keys,
1437 block_bytes(b->c->cache)) >
1438 btree_blocks(new_nodes[i]))
1439 goto out_unlock_nocoalesce;
1440
1441 keys = n2->keys;
1442 /* Take the key of the node we're getting rid of */
1443 last = &r->b->key;
1444 }
1445
1446 BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c->cache)) >
1447 btree_blocks(new_nodes[i]));
1448
1449 if (last)
1450 bkey_copy_key(&new_nodes[i]->key, last);
1451
1452 memcpy(bset_bkey_last(n1),
1453 n2->start,
1454 (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
1455
1456 n1->keys += keys;
1457 r[i].keys = n1->keys;
1458
1459 memmove(n2->start,
1460 bset_bkey_idx(n2, keys),
1461 (void *) bset_bkey_last(n2) -
1462 (void *) bset_bkey_idx(n2, keys));
1463
1464 n2->keys -= keys;
1465
1466 if (__bch_keylist_realloc(&keylist,
1467 bkey_u64s(&new_nodes[i]->key)))
1468 goto out_unlock_nocoalesce;
1469
1470 bch_btree_node_write(new_nodes[i], &cl);
1471 bch_keylist_add(&keylist, &new_nodes[i]->key);
1472 }
1473
1474 for (i = 0; i < nodes; i++)
1475 mutex_unlock(&new_nodes[i]->write_lock);
1476
1477 closure_sync(&cl);
1478
1479 /* We emptied out this node */
1480 BUG_ON(btree_bset_first(new_nodes[0])->keys);
1481 btree_node_free(new_nodes[0]);
1482 rw_unlock(true, new_nodes[0]);
1483 new_nodes[0] = NULL;
1484
1485 for (i = 0; i < nodes; i++) {
1486 if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key)))
1487 goto out_nocoalesce;
1488
1489 make_btree_freeing_key(r[i].b, keylist.top);
1490 bch_keylist_push(&keylist);
1491 }
1492
1493 bch_btree_insert_node(b, op, &keylist, NULL, NULL);
1494 BUG_ON(!bch_keylist_empty(&keylist));
1495
1496 for (i = 0; i < nodes; i++) {
1497 btree_node_free(r[i].b);
1498 rw_unlock(true, r[i].b);
1499
1500 r[i].b = new_nodes[i];
1501 }
1502
1503 memmove(r, r + 1, sizeof(r[0]) * (nodes - 1));
1504 r[nodes - 1].b = ERR_PTR(-EINTR);
1505
1506 trace_bcache_btree_gc_coalesce(nodes);
1507 gc->nodes--;
1508
1509 bch_keylist_free(&keylist);
1510
1511 /* Invalidated our iterator */
1512 return -EINTR;
1513
1514 out_unlock_nocoalesce:
1515 for (i = 0; i < nodes; i++)
1516 mutex_unlock(&new_nodes[i]->write_lock);
1517
1518 out_nocoalesce:
1519 closure_sync(&cl);
1520
1521 while ((k = bch_keylist_pop(&keylist)))
1522 if (!bkey_cmp(k, &ZERO_KEY))
1523 atomic_dec(&b->c->prio_blocked);
1524 bch_keylist_free(&keylist);
1525
1526 for (i = 0; i < nodes; i++)
1527 if (!IS_ERR_OR_NULL(new_nodes[i])) {
1528 btree_node_free(new_nodes[i]);
1529 rw_unlock(true, new_nodes[i]);
1530 }
1531 return 0;
1532 }
1533
btree_gc_rewrite_node(struct btree * b,struct btree_op * op,struct btree * replace)1534 static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
1535 struct btree *replace)
1536 {
1537 struct keylist keys;
1538 struct btree *n;
1539
1540 if (btree_check_reserve(b, NULL))
1541 return 0;
1542
1543 n = btree_node_alloc_replacement(replace, NULL);
1544 if (IS_ERR(n))
1545 return 0;
1546
1547 /* recheck reserve after allocating replacement node */
1548 if (btree_check_reserve(b, NULL)) {
1549 btree_node_free(n);
1550 rw_unlock(true, n);
1551 return 0;
1552 }
1553
1554 bch_btree_node_write_sync(n);
1555
1556 bch_keylist_init(&keys);
1557 bch_keylist_add(&keys, &n->key);
1558
1559 make_btree_freeing_key(replace, keys.top);
1560 bch_keylist_push(&keys);
1561
1562 bch_btree_insert_node(b, op, &keys, NULL, NULL);
1563 BUG_ON(!bch_keylist_empty(&keys));
1564
1565 btree_node_free(replace);
1566 rw_unlock(true, n);
1567
1568 /* Invalidated our iterator */
1569 return -EINTR;
1570 }
1571
btree_gc_count_keys(struct btree * b)1572 static unsigned int btree_gc_count_keys(struct btree *b)
1573 {
1574 struct bkey *k;
1575 struct btree_iter iter;
1576 unsigned int ret = 0;
1577
1578 min_heap_init(&iter.heap, NULL, MAX_BSETS);
1579
1580 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
1581 ret += bkey_u64s(k);
1582
1583 return ret;
1584 }
1585
btree_gc_min_nodes(struct cache_set * c)1586 static size_t btree_gc_min_nodes(struct cache_set *c)
1587 {
1588 size_t min_nodes;
1589
1590 /*
1591 * Since incremental GC would stop 100ms when front
1592 * side I/O comes, so when there are many btree nodes,
1593 * if GC only processes constant (100) nodes each time,
1594 * GC would last a long time, and the front side I/Os
1595 * would run out of the buckets (since no new bucket
1596 * can be allocated during GC), and be blocked again.
1597 * So GC should not process constant nodes, but varied
1598 * nodes according to the number of btree nodes, which
1599 * realized by dividing GC into constant(100) times,
1600 * so when there are many btree nodes, GC can process
1601 * more nodes each time, otherwise, GC will process less
1602 * nodes each time (but no less than MIN_GC_NODES)
1603 */
1604 min_nodes = c->gc_stats.nodes / MAX_GC_TIMES;
1605 if (min_nodes < MIN_GC_NODES)
1606 min_nodes = MIN_GC_NODES;
1607
1608 return min_nodes;
1609 }
1610
1611
btree_gc_recurse(struct btree * b,struct btree_op * op,struct closure * writes,struct gc_stat * gc)1612 static int btree_gc_recurse(struct btree *b, struct btree_op *op,
1613 struct closure *writes, struct gc_stat *gc)
1614 {
1615 int ret = 0;
1616 bool should_rewrite;
1617 struct bkey *k;
1618 struct btree_iter iter;
1619 struct gc_merge_info r[GC_MERGE_NODES];
1620 struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
1621
1622 min_heap_init(&iter.heap, NULL, MAX_BSETS);
1623 bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
1624
1625 for (i = r; i < r + ARRAY_SIZE(r); i++)
1626 i->b = ERR_PTR(-EINTR);
1627
1628 while (1) {
1629 k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
1630 if (k) {
1631 r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
1632 true, b);
1633 if (IS_ERR(r->b)) {
1634 ret = PTR_ERR(r->b);
1635 break;
1636 }
1637
1638 r->keys = btree_gc_count_keys(r->b);
1639
1640 ret = btree_gc_coalesce(b, op, gc, r);
1641 if (ret)
1642 break;
1643 }
1644
1645 if (!last->b)
1646 break;
1647
1648 if (!IS_ERR(last->b)) {
1649 should_rewrite = btree_gc_mark_node(last->b, gc);
1650 if (should_rewrite) {
1651 ret = btree_gc_rewrite_node(b, op, last->b);
1652 if (ret)
1653 break;
1654 }
1655
1656 if (last->b->level) {
1657 ret = btree_gc_recurse(last->b, op, writes, gc);
1658 if (ret)
1659 break;
1660 }
1661
1662 bkey_copy_key(&b->c->gc_done, &last->b->key);
1663
1664 /*
1665 * Must flush leaf nodes before gc ends, since replace
1666 * operations aren't journalled
1667 */
1668 mutex_lock(&last->b->write_lock);
1669 if (btree_node_dirty(last->b))
1670 bch_btree_node_write(last->b, writes);
1671 mutex_unlock(&last->b->write_lock);
1672 rw_unlock(true, last->b);
1673 }
1674
1675 memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
1676 r->b = NULL;
1677
1678 if (atomic_read(&b->c->search_inflight) &&
1679 gc->nodes >= gc->nodes_pre + btree_gc_min_nodes(b->c)) {
1680 gc->nodes_pre = gc->nodes;
1681 ret = -EAGAIN;
1682 break;
1683 }
1684
1685 if (need_resched()) {
1686 ret = -EAGAIN;
1687 break;
1688 }
1689 }
1690
1691 for (i = r; i < r + ARRAY_SIZE(r); i++)
1692 if (!IS_ERR_OR_NULL(i->b)) {
1693 mutex_lock(&i->b->write_lock);
1694 if (btree_node_dirty(i->b))
1695 bch_btree_node_write(i->b, writes);
1696 mutex_unlock(&i->b->write_lock);
1697 rw_unlock(true, i->b);
1698 }
1699
1700 return ret;
1701 }
1702
bch_btree_gc_root(struct btree * b,struct btree_op * op,struct closure * writes,struct gc_stat * gc)1703 static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1704 struct closure *writes, struct gc_stat *gc)
1705 {
1706 struct btree *n = NULL;
1707 int ret = 0;
1708 bool should_rewrite;
1709
1710 should_rewrite = btree_gc_mark_node(b, gc);
1711 if (should_rewrite) {
1712 n = btree_node_alloc_replacement(b, NULL);
1713
1714 if (!IS_ERR(n)) {
1715 bch_btree_node_write_sync(n);
1716
1717 bch_btree_set_root(n);
1718 btree_node_free(b);
1719 rw_unlock(true, n);
1720
1721 return -EINTR;
1722 }
1723 }
1724
1725 __bch_btree_mark_key(b->c, b->level + 1, &b->key);
1726
1727 if (b->level) {
1728 ret = btree_gc_recurse(b, op, writes, gc);
1729 if (ret)
1730 return ret;
1731 }
1732
1733 bkey_copy_key(&b->c->gc_done, &b->key);
1734
1735 return ret;
1736 }
1737
btree_gc_start(struct cache_set * c)1738 static void btree_gc_start(struct cache_set *c)
1739 {
1740 struct cache *ca;
1741 struct bucket *b;
1742
1743 if (!c->gc_mark_valid)
1744 return;
1745
1746 mutex_lock(&c->bucket_lock);
1747
1748 c->gc_done = ZERO_KEY;
1749
1750 ca = c->cache;
1751 for_each_bucket(b, ca) {
1752 b->last_gc = b->gen;
1753 if (bch_can_invalidate_bucket(ca, b))
1754 b->reclaimable_in_gc = 1;
1755 if (!atomic_read(&b->pin)) {
1756 SET_GC_MARK(b, 0);
1757 SET_GC_SECTORS_USED(b, 0);
1758 }
1759 }
1760
1761 c->gc_mark_valid = 0;
1762 mutex_unlock(&c->bucket_lock);
1763 }
1764
bch_btree_gc_finish(struct cache_set * c)1765 static void bch_btree_gc_finish(struct cache_set *c)
1766 {
1767 struct bucket *b;
1768 struct cache *ca;
1769 unsigned int i, j;
1770 uint64_t *k;
1771
1772 mutex_lock(&c->bucket_lock);
1773
1774 set_gc_sectors(c);
1775 c->gc_mark_valid = 1;
1776 c->need_gc = 0;
1777
1778 for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
1779 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
1780 GC_MARK_METADATA);
1781
1782 /* don't reclaim buckets to which writeback keys point */
1783 rcu_read_lock();
1784 for (i = 0; i < c->devices_max_used; i++) {
1785 struct bcache_device *d = c->devices[i];
1786 struct cached_dev *dc;
1787 struct keybuf_key *w, *n;
1788
1789 if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
1790 continue;
1791 dc = container_of(d, struct cached_dev, disk);
1792
1793 spin_lock(&dc->writeback_keys.lock);
1794 rbtree_postorder_for_each_entry_safe(w, n,
1795 &dc->writeback_keys.keys, node)
1796 for (j = 0; j < KEY_PTRS(&w->key); j++)
1797 SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
1798 GC_MARK_DIRTY);
1799 spin_unlock(&dc->writeback_keys.lock);
1800 }
1801 rcu_read_unlock();
1802
1803 c->avail_nbuckets = 0;
1804
1805 ca = c->cache;
1806 ca->invalidate_needs_gc = 0;
1807
1808 for (k = ca->sb.d; k < ca->sb.d + ca->sb.keys; k++)
1809 SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA);
1810
1811 for (k = ca->prio_buckets;
1812 k < ca->prio_buckets + prio_buckets(ca) * 2; k++)
1813 SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA);
1814
1815 for_each_bucket(b, ca) {
1816 c->need_gc = max(c->need_gc, bucket_gc_gen(b));
1817
1818 if (b->reclaimable_in_gc)
1819 b->reclaimable_in_gc = 0;
1820
1821 if (atomic_read(&b->pin))
1822 continue;
1823
1824 BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
1825
1826 if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
1827 c->avail_nbuckets++;
1828 }
1829
1830 mutex_unlock(&c->bucket_lock);
1831 }
1832
bch_btree_gc(struct cache_set * c)1833 static void bch_btree_gc(struct cache_set *c)
1834 {
1835 int ret;
1836 struct gc_stat stats;
1837 struct closure writes;
1838 struct btree_op op;
1839 uint64_t start_time = local_clock();
1840
1841 trace_bcache_gc_start(c);
1842
1843 memset(&stats, 0, sizeof(struct gc_stat));
1844 closure_init_stack(&writes);
1845 bch_btree_op_init(&op, SHRT_MAX);
1846
1847 btree_gc_start(c);
1848
1849 /* if CACHE_SET_IO_DISABLE set, gc thread should stop too */
1850 do {
1851 ret = bcache_btree_root(gc_root, c, &op, &writes, &stats);
1852 closure_sync(&writes);
1853 cond_resched();
1854
1855 if (ret == -EAGAIN)
1856 schedule_timeout_interruptible(msecs_to_jiffies
1857 (GC_SLEEP_MS));
1858 else if (ret)
1859 pr_warn("gc failed!\n");
1860 } while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags));
1861
1862 bch_btree_gc_finish(c);
1863 wake_up_allocators(c);
1864
1865 bch_time_stats_update(&c->btree_gc_time, start_time);
1866
1867 stats.key_bytes *= sizeof(uint64_t);
1868 stats.data <<= 9;
1869 bch_update_bucket_in_use(c, &stats);
1870 memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
1871
1872 trace_bcache_gc_end(c);
1873
1874 bch_moving_gc(c);
1875 }
1876
gc_should_run(struct cache_set * c)1877 static bool gc_should_run(struct cache_set *c)
1878 {
1879 struct cache *ca = c->cache;
1880
1881 if (ca->invalidate_needs_gc)
1882 return true;
1883
1884 if (atomic_read(&c->sectors_to_gc) < 0)
1885 return true;
1886
1887 return false;
1888 }
1889
bch_gc_thread(void * arg)1890 static int bch_gc_thread(void *arg)
1891 {
1892 struct cache_set *c = arg;
1893
1894 while (1) {
1895 wait_event_interruptible(c->gc_wait,
1896 kthread_should_stop() ||
1897 test_bit(CACHE_SET_IO_DISABLE, &c->flags) ||
1898 gc_should_run(c));
1899
1900 if (kthread_should_stop() ||
1901 test_bit(CACHE_SET_IO_DISABLE, &c->flags))
1902 break;
1903
1904 set_gc_sectors(c);
1905 bch_btree_gc(c);
1906 }
1907
1908 wait_for_kthread_stop();
1909 return 0;
1910 }
1911
bch_gc_thread_start(struct cache_set * c)1912 int bch_gc_thread_start(struct cache_set *c)
1913 {
1914 c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
1915 return PTR_ERR_OR_ZERO(c->gc_thread);
1916 }
1917
1918 /* Initial partial gc */
1919
bch_btree_check_recurse(struct btree * b,struct btree_op * op)1920 static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
1921 {
1922 int ret = 0;
1923 struct bkey *k, *p = NULL;
1924 struct btree_iter iter;
1925
1926 min_heap_init(&iter.heap, NULL, MAX_BSETS);
1927
1928 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
1929 bch_initial_mark_key(b->c, b->level, k);
1930
1931 bch_initial_mark_key(b->c, b->level + 1, &b->key);
1932
1933 if (b->level) {
1934 bch_btree_iter_init(&b->keys, &iter, NULL);
1935
1936 do {
1937 k = bch_btree_iter_next_filter(&iter, &b->keys,
1938 bch_ptr_bad);
1939 if (k) {
1940 btree_node_prefetch(b, k);
1941 /*
1942 * initiallize c->gc_stats.nodes
1943 * for incremental GC
1944 */
1945 b->c->gc_stats.nodes++;
1946 }
1947
1948 if (p)
1949 ret = bcache_btree(check_recurse, p, b, op);
1950
1951 p = k;
1952 } while (p && !ret);
1953 }
1954
1955 return ret;
1956 }
1957
1958
bch_btree_check_thread(void * arg)1959 static int bch_btree_check_thread(void *arg)
1960 {
1961 int ret;
1962 struct btree_check_info *info = arg;
1963 struct btree_check_state *check_state = info->state;
1964 struct cache_set *c = check_state->c;
1965 struct btree_iter iter;
1966 struct bkey *k, *p;
1967 int cur_idx, prev_idx, skip_nr;
1968
1969 k = p = NULL;
1970 cur_idx = prev_idx = 0;
1971 ret = 0;
1972
1973 min_heap_init(&iter.heap, NULL, MAX_BSETS);
1974
1975 /* root node keys are checked before thread created */
1976 bch_btree_iter_init(&c->root->keys, &iter, NULL);
1977 k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
1978 BUG_ON(!k);
1979
1980 p = k;
1981 while (k) {
1982 /*
1983 * Fetch a root node key index, skip the keys which
1984 * should be fetched by other threads, then check the
1985 * sub-tree indexed by the fetched key.
1986 */
1987 spin_lock(&check_state->idx_lock);
1988 cur_idx = check_state->key_idx;
1989 check_state->key_idx++;
1990 spin_unlock(&check_state->idx_lock);
1991
1992 skip_nr = cur_idx - prev_idx;
1993
1994 while (skip_nr) {
1995 k = bch_btree_iter_next_filter(&iter,
1996 &c->root->keys,
1997 bch_ptr_bad);
1998 if (k)
1999 p = k;
2000 else {
2001 /*
2002 * No more keys to check in root node,
2003 * current checking threads are enough,
2004 * stop creating more.
2005 */
2006 atomic_set(&check_state->enough, 1);
2007 /* Update check_state->enough earlier */
2008 smp_mb__after_atomic();
2009 goto out;
2010 }
2011 skip_nr--;
2012 cond_resched();
2013 }
2014
2015 if (p) {
2016 struct btree_op op;
2017
2018 btree_node_prefetch(c->root, p);
2019 c->gc_stats.nodes++;
2020 bch_btree_op_init(&op, 0);
2021 ret = bcache_btree(check_recurse, p, c->root, &op);
2022 /*
2023 * The op may be added to cache_set's btree_cache_wait
2024 * in mca_cannibalize(), must ensure it is removed from
2025 * the list and release btree_cache_alloc_lock before
2026 * free op memory.
2027 * Otherwise, the btree_cache_wait will be damaged.
2028 */
2029 bch_cannibalize_unlock(c);
2030 finish_wait(&c->btree_cache_wait, &(&op)->wait);
2031 if (ret)
2032 goto out;
2033 }
2034 p = NULL;
2035 prev_idx = cur_idx;
2036 cond_resched();
2037 }
2038
2039 out:
2040 info->result = ret;
2041 /* update check_state->started among all CPUs */
2042 smp_mb__before_atomic();
2043 if (atomic_dec_and_test(&check_state->started))
2044 wake_up(&check_state->wait);
2045
2046 return ret;
2047 }
2048
2049
2050
bch_btree_chkthread_nr(void)2051 static int bch_btree_chkthread_nr(void)
2052 {
2053 int n = num_online_cpus()/2;
2054
2055 if (n == 0)
2056 n = 1;
2057 else if (n > BCH_BTR_CHKTHREAD_MAX)
2058 n = BCH_BTR_CHKTHREAD_MAX;
2059
2060 return n;
2061 }
2062
bch_btree_check(struct cache_set * c)2063 int bch_btree_check(struct cache_set *c)
2064 {
2065 int ret = 0;
2066 int i;
2067 struct bkey *k = NULL;
2068 struct btree_iter iter;
2069 struct btree_check_state check_state;
2070
2071 min_heap_init(&iter.heap, NULL, MAX_BSETS);
2072
2073 /* check and mark root node keys */
2074 for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid)
2075 bch_initial_mark_key(c, c->root->level, k);
2076
2077 bch_initial_mark_key(c, c->root->level + 1, &c->root->key);
2078
2079 if (c->root->level == 0)
2080 return 0;
2081
2082 memset(&check_state, 0, sizeof(struct btree_check_state));
2083 check_state.c = c;
2084 check_state.total_threads = bch_btree_chkthread_nr();
2085 check_state.key_idx = 0;
2086 spin_lock_init(&check_state.idx_lock);
2087 atomic_set(&check_state.started, 0);
2088 atomic_set(&check_state.enough, 0);
2089 init_waitqueue_head(&check_state.wait);
2090
2091 rw_lock(0, c->root, c->root->level);
2092 /*
2093 * Run multiple threads to check btree nodes in parallel,
2094 * if check_state.enough is non-zero, it means current
2095 * running check threads are enough, unncessary to create
2096 * more.
2097 */
2098 for (i = 0; i < check_state.total_threads; i++) {
2099 /* fetch latest check_state.enough earlier */
2100 smp_mb__before_atomic();
2101 if (atomic_read(&check_state.enough))
2102 break;
2103
2104 check_state.infos[i].result = 0;
2105 check_state.infos[i].state = &check_state;
2106
2107 check_state.infos[i].thread =
2108 kthread_run(bch_btree_check_thread,
2109 &check_state.infos[i],
2110 "bch_btrchk[%d]", i);
2111 if (IS_ERR(check_state.infos[i].thread)) {
2112 pr_err("fails to run thread bch_btrchk[%d]\n", i);
2113 for (--i; i >= 0; i--)
2114 kthread_stop(check_state.infos[i].thread);
2115 ret = -ENOMEM;
2116 goto out;
2117 }
2118 atomic_inc(&check_state.started);
2119 }
2120
2121 /*
2122 * Must wait for all threads to stop.
2123 */
2124 wait_event(check_state.wait, atomic_read(&check_state.started) == 0);
2125
2126 for (i = 0; i < check_state.total_threads; i++) {
2127 if (check_state.infos[i].result) {
2128 ret = check_state.infos[i].result;
2129 goto out;
2130 }
2131 }
2132
2133 out:
2134 rw_unlock(0, c->root);
2135 return ret;
2136 }
2137
bch_initial_gc_finish(struct cache_set * c)2138 void bch_initial_gc_finish(struct cache_set *c)
2139 {
2140 struct cache *ca = c->cache;
2141 struct bucket *b;
2142
2143 bch_btree_gc_finish(c);
2144
2145 mutex_lock(&c->bucket_lock);
2146
2147 /*
2148 * We need to put some unused buckets directly on the prio freelist in
2149 * order to get the allocator thread started - it needs freed buckets in
2150 * order to rewrite the prios and gens, and it needs to rewrite prios
2151 * and gens in order to free buckets.
2152 *
2153 * This is only safe for buckets that have no live data in them, which
2154 * there should always be some of.
2155 */
2156 for_each_bucket(b, ca) {
2157 if (fifo_full(&ca->free[RESERVE_PRIO]) &&
2158 fifo_full(&ca->free[RESERVE_BTREE]))
2159 break;
2160
2161 if (bch_can_invalidate_bucket(ca, b) &&
2162 !GC_MARK(b)) {
2163 __bch_invalidate_one_bucket(ca, b);
2164 if (!fifo_push(&ca->free[RESERVE_PRIO],
2165 b - ca->buckets))
2166 fifo_push(&ca->free[RESERVE_BTREE],
2167 b - ca->buckets);
2168 }
2169 }
2170
2171 mutex_unlock(&c->bucket_lock);
2172 }
2173
2174 /* Btree insertion */
2175
btree_insert_key(struct btree * b,struct bkey * k,struct bkey * replace_key)2176 static bool btree_insert_key(struct btree *b, struct bkey *k,
2177 struct bkey *replace_key)
2178 {
2179 unsigned int status;
2180
2181 BUG_ON(bkey_cmp(k, &b->key) > 0);
2182
2183 status = bch_btree_insert_key(&b->keys, k, replace_key);
2184 if (status != BTREE_INSERT_STATUS_NO_INSERT) {
2185 bch_check_keys(&b->keys, "%u for %s", status,
2186 replace_key ? "replace" : "insert");
2187
2188 trace_bcache_btree_insert_key(b, k, replace_key != NULL,
2189 status);
2190 return true;
2191 } else
2192 return false;
2193 }
2194
insert_u64s_remaining(struct btree * b)2195 static size_t insert_u64s_remaining(struct btree *b)
2196 {
2197 long ret = bch_btree_keys_u64s_remaining(&b->keys);
2198
2199 /*
2200 * Might land in the middle of an existing extent and have to split it
2201 */
2202 if (b->keys.ops->is_extents)
2203 ret -= KEY_MAX_U64S;
2204
2205 return max(ret, 0L);
2206 }
2207
bch_btree_insert_keys(struct btree * b,struct btree_op * op,struct keylist * insert_keys,struct bkey * replace_key)2208 static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
2209 struct keylist *insert_keys,
2210 struct bkey *replace_key)
2211 {
2212 bool ret = false;
2213 int oldsize = bch_count_data(&b->keys);
2214
2215 while (!bch_keylist_empty(insert_keys)) {
2216 struct bkey *k = insert_keys->keys;
2217
2218 if (bkey_u64s(k) > insert_u64s_remaining(b))
2219 break;
2220
2221 if (bkey_cmp(k, &b->key) <= 0) {
2222 if (!b->level)
2223 bkey_put(b->c, k);
2224
2225 ret |= btree_insert_key(b, k, replace_key);
2226 bch_keylist_pop_front(insert_keys);
2227 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
2228 BKEY_PADDED(key) temp;
2229 bkey_copy(&temp.key, insert_keys->keys);
2230
2231 bch_cut_back(&b->key, &temp.key);
2232 bch_cut_front(&b->key, insert_keys->keys);
2233
2234 ret |= btree_insert_key(b, &temp.key, replace_key);
2235 break;
2236 } else {
2237 break;
2238 }
2239 }
2240
2241 if (!ret)
2242 op->insert_collision = true;
2243
2244 BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
2245
2246 BUG_ON(bch_count_data(&b->keys) < oldsize);
2247 return ret;
2248 }
2249
btree_split(struct btree * b,struct btree_op * op,struct keylist * insert_keys,struct bkey * replace_key)2250 static int btree_split(struct btree *b, struct btree_op *op,
2251 struct keylist *insert_keys,
2252 struct bkey *replace_key)
2253 {
2254 bool split;
2255 struct btree *n1, *n2 = NULL, *n3 = NULL;
2256 uint64_t start_time = local_clock();
2257 struct closure cl;
2258 struct keylist parent_keys;
2259
2260 closure_init_stack(&cl);
2261 bch_keylist_init(&parent_keys);
2262
2263 if (btree_check_reserve(b, op)) {
2264 if (!b->level)
2265 return -EINTR;
2266 else
2267 WARN(1, "insufficient reserve for split\n");
2268 }
2269
2270 n1 = btree_node_alloc_replacement(b, op);
2271 if (IS_ERR(n1))
2272 goto err;
2273
2274 split = set_blocks(btree_bset_first(n1),
2275 block_bytes(n1->c->cache)) > (btree_blocks(b) * 4) / 5;
2276
2277 if (split) {
2278 unsigned int keys = 0;
2279
2280 trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
2281
2282 n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent);
2283 if (IS_ERR(n2))
2284 goto err_free1;
2285
2286 if (!b->parent) {
2287 n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL);
2288 if (IS_ERR(n3))
2289 goto err_free2;
2290 }
2291
2292 mutex_lock(&n1->write_lock);
2293 mutex_lock(&n2->write_lock);
2294
2295 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2296
2297 /*
2298 * Has to be a linear search because we don't have an auxiliary
2299 * search tree yet
2300 */
2301
2302 while (keys < (btree_bset_first(n1)->keys * 3) / 5)
2303 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1),
2304 keys));
2305
2306 bkey_copy_key(&n1->key,
2307 bset_bkey_idx(btree_bset_first(n1), keys));
2308 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys));
2309
2310 btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys;
2311 btree_bset_first(n1)->keys = keys;
2312
2313 memcpy(btree_bset_first(n2)->start,
2314 bset_bkey_last(btree_bset_first(n1)),
2315 btree_bset_first(n2)->keys * sizeof(uint64_t));
2316
2317 bkey_copy_key(&n2->key, &b->key);
2318
2319 bch_keylist_add(&parent_keys, &n2->key);
2320 bch_btree_node_write(n2, &cl);
2321 mutex_unlock(&n2->write_lock);
2322 rw_unlock(true, n2);
2323 } else {
2324 trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys);
2325
2326 mutex_lock(&n1->write_lock);
2327 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2328 }
2329
2330 bch_keylist_add(&parent_keys, &n1->key);
2331 bch_btree_node_write(n1, &cl);
2332 mutex_unlock(&n1->write_lock);
2333
2334 if (n3) {
2335 /* Depth increases, make a new root */
2336 mutex_lock(&n3->write_lock);
2337 bkey_copy_key(&n3->key, &MAX_KEY);
2338 bch_btree_insert_keys(n3, op, &parent_keys, NULL);
2339 bch_btree_node_write(n3, &cl);
2340 mutex_unlock(&n3->write_lock);
2341
2342 closure_sync(&cl);
2343 bch_btree_set_root(n3);
2344 rw_unlock(true, n3);
2345 } else if (!b->parent) {
2346 /* Root filled up but didn't need to be split */
2347 closure_sync(&cl);
2348 bch_btree_set_root(n1);
2349 } else {
2350 /* Split a non root node */
2351 closure_sync(&cl);
2352 make_btree_freeing_key(b, parent_keys.top);
2353 bch_keylist_push(&parent_keys);
2354
2355 bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL);
2356 BUG_ON(!bch_keylist_empty(&parent_keys));
2357 }
2358
2359 btree_node_free(b);
2360 rw_unlock(true, n1);
2361
2362 bch_time_stats_update(&b->c->btree_split_time, start_time);
2363
2364 return 0;
2365 err_free2:
2366 bkey_put(b->c, &n2->key);
2367 btree_node_free(n2);
2368 rw_unlock(true, n2);
2369 err_free1:
2370 bkey_put(b->c, &n1->key);
2371 btree_node_free(n1);
2372 rw_unlock(true, n1);
2373 err:
2374 WARN(1, "bcache: btree split failed (level %u)", b->level);
2375
2376 if (n3 == ERR_PTR(-EAGAIN) ||
2377 n2 == ERR_PTR(-EAGAIN) ||
2378 n1 == ERR_PTR(-EAGAIN))
2379 return -EAGAIN;
2380
2381 return -ENOMEM;
2382 }
2383
bch_btree_insert_node(struct btree * b,struct btree_op * op,struct keylist * insert_keys,atomic_t * journal_ref,struct bkey * replace_key)2384 static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
2385 struct keylist *insert_keys,
2386 atomic_t *journal_ref,
2387 struct bkey *replace_key)
2388 {
2389 struct closure cl;
2390
2391 BUG_ON(b->level && replace_key);
2392
2393 closure_init_stack(&cl);
2394
2395 mutex_lock(&b->write_lock);
2396
2397 if (write_block(b) != btree_bset_last(b) &&
2398 b->keys.last_set_unwritten)
2399 bch_btree_init_next(b); /* just wrote a set */
2400
2401 if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) {
2402 mutex_unlock(&b->write_lock);
2403 goto split;
2404 }
2405
2406 BUG_ON(write_block(b) != btree_bset_last(b));
2407
2408 if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
2409 if (!b->level)
2410 bch_btree_leaf_dirty(b, journal_ref);
2411 else
2412 bch_btree_node_write(b, &cl);
2413 }
2414
2415 mutex_unlock(&b->write_lock);
2416
2417 /* wait for btree node write if necessary, after unlock */
2418 closure_sync(&cl);
2419
2420 return 0;
2421 split:
2422 if (current->bio_list) {
2423 op->lock = b->c->root->level + 1;
2424 return -EAGAIN;
2425 } else if (op->lock <= b->c->root->level) {
2426 op->lock = b->c->root->level + 1;
2427 return -EINTR;
2428 } else {
2429 /* Invalidated all iterators */
2430 int ret = btree_split(b, op, insert_keys, replace_key);
2431
2432 if (bch_keylist_empty(insert_keys))
2433 return 0;
2434 else if (!ret)
2435 return -EINTR;
2436 return ret;
2437 }
2438 }
2439
bch_btree_insert_check_key(struct btree * b,struct btree_op * op,struct bkey * check_key)2440 int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
2441 struct bkey *check_key)
2442 {
2443 int ret = -EINTR;
2444 uint64_t btree_ptr = b->key.ptr[0];
2445 unsigned long seq = b->seq;
2446 struct keylist insert;
2447 bool upgrade = op->lock == -1;
2448
2449 bch_keylist_init(&insert);
2450
2451 if (upgrade) {
2452 rw_unlock(false, b);
2453 rw_lock(true, b, b->level);
2454
2455 if (b->key.ptr[0] != btree_ptr ||
2456 b->seq != seq + 1) {
2457 op->lock = b->level;
2458 goto out;
2459 }
2460 }
2461
2462 SET_KEY_PTRS(check_key, 1);
2463 get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
2464
2465 SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
2466
2467 bch_keylist_add(&insert, check_key);
2468
2469 ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
2470
2471 BUG_ON(!ret && !bch_keylist_empty(&insert));
2472 out:
2473 if (upgrade)
2474 downgrade_write(&b->lock);
2475 return ret;
2476 }
2477
2478 struct btree_insert_op {
2479 struct btree_op op;
2480 struct keylist *keys;
2481 atomic_t *journal_ref;
2482 struct bkey *replace_key;
2483 };
2484
btree_insert_fn(struct btree_op * b_op,struct btree * b)2485 static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
2486 {
2487 struct btree_insert_op *op = container_of(b_op,
2488 struct btree_insert_op, op);
2489
2490 int ret = bch_btree_insert_node(b, &op->op, op->keys,
2491 op->journal_ref, op->replace_key);
2492 if (ret && !bch_keylist_empty(op->keys))
2493 return ret;
2494 else
2495 return MAP_DONE;
2496 }
2497
bch_btree_insert(struct cache_set * c,struct keylist * keys,atomic_t * journal_ref,struct bkey * replace_key)2498 int bch_btree_insert(struct cache_set *c, struct keylist *keys,
2499 atomic_t *journal_ref, struct bkey *replace_key)
2500 {
2501 struct btree_insert_op op;
2502 int ret = 0;
2503
2504 BUG_ON(current->bio_list);
2505 BUG_ON(bch_keylist_empty(keys));
2506
2507 bch_btree_op_init(&op.op, 0);
2508 op.keys = keys;
2509 op.journal_ref = journal_ref;
2510 op.replace_key = replace_key;
2511
2512 while (!ret && !bch_keylist_empty(keys)) {
2513 op.op.lock = 0;
2514 ret = bch_btree_map_leaf_nodes(&op.op, c,
2515 &START_KEY(keys->keys),
2516 btree_insert_fn);
2517 }
2518
2519 if (ret) {
2520 struct bkey *k;
2521
2522 pr_err("error %i\n", ret);
2523
2524 while ((k = bch_keylist_pop(keys)))
2525 bkey_put(c, k);
2526 } else if (op.op.insert_collision)
2527 ret = -ESRCH;
2528
2529 return ret;
2530 }
2531
bch_btree_set_root(struct btree * b)2532 void bch_btree_set_root(struct btree *b)
2533 {
2534 unsigned int i;
2535 struct closure cl;
2536
2537 closure_init_stack(&cl);
2538
2539 trace_bcache_btree_set_root(b);
2540
2541 BUG_ON(!b->written);
2542
2543 for (i = 0; i < KEY_PTRS(&b->key); i++)
2544 BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO);
2545
2546 mutex_lock(&b->c->bucket_lock);
2547 list_del_init(&b->list);
2548 mutex_unlock(&b->c->bucket_lock);
2549
2550 b->c->root = b;
2551
2552 bch_journal_meta(b->c, &cl);
2553 closure_sync(&cl);
2554 }
2555
2556 /* Map across nodes or keys */
2557
bch_btree_map_nodes_recurse(struct btree * b,struct btree_op * op,struct bkey * from,btree_map_nodes_fn * fn,int flags)2558 static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
2559 struct bkey *from,
2560 btree_map_nodes_fn *fn, int flags)
2561 {
2562 int ret = MAP_CONTINUE;
2563
2564 if (b->level) {
2565 struct bkey *k;
2566 struct btree_iter iter;
2567
2568 min_heap_init(&iter.heap, NULL, MAX_BSETS);
2569 bch_btree_iter_init(&b->keys, &iter, from);
2570
2571 while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
2572 bch_ptr_bad))) {
2573 ret = bcache_btree(map_nodes_recurse, k, b,
2574 op, from, fn, flags);
2575 from = NULL;
2576
2577 if (ret != MAP_CONTINUE)
2578 return ret;
2579 }
2580 }
2581
2582 if (!b->level || flags == MAP_ALL_NODES)
2583 ret = fn(op, b);
2584
2585 return ret;
2586 }
2587
__bch_btree_map_nodes(struct btree_op * op,struct cache_set * c,struct bkey * from,btree_map_nodes_fn * fn,int flags)2588 int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
2589 struct bkey *from, btree_map_nodes_fn *fn, int flags)
2590 {
2591 return bcache_btree_root(map_nodes_recurse, c, op, from, fn, flags);
2592 }
2593
bch_btree_map_keys_recurse(struct btree * b,struct btree_op * op,struct bkey * from,btree_map_keys_fn * fn,int flags)2594 int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
2595 struct bkey *from, btree_map_keys_fn *fn,
2596 int flags)
2597 {
2598 int ret = MAP_CONTINUE;
2599 struct bkey *k;
2600 struct btree_iter iter;
2601
2602 min_heap_init(&iter.heap, NULL, MAX_BSETS);
2603 bch_btree_iter_init(&b->keys, &iter, from);
2604
2605 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
2606 ret = !b->level
2607 ? fn(op, b, k)
2608 : bcache_btree(map_keys_recurse, k,
2609 b, op, from, fn, flags);
2610 from = NULL;
2611
2612 if (ret != MAP_CONTINUE)
2613 return ret;
2614 }
2615
2616 if (!b->level && (flags & MAP_END_KEY))
2617 ret = fn(op, b, &KEY(KEY_INODE(&b->key),
2618 KEY_OFFSET(&b->key), 0));
2619
2620 return ret;
2621 }
2622
bch_btree_map_keys(struct btree_op * op,struct cache_set * c,struct bkey * from,btree_map_keys_fn * fn,int flags)2623 int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
2624 struct bkey *from, btree_map_keys_fn *fn, int flags)
2625 {
2626 return bcache_btree_root(map_keys_recurse, c, op, from, fn, flags);
2627 }
2628
2629 /* Keybuf code */
2630
keybuf_cmp(struct keybuf_key * l,struct keybuf_key * r)2631 static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
2632 {
2633 /* Overlapping keys compare equal */
2634 if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0)
2635 return -1;
2636 if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0)
2637 return 1;
2638 return 0;
2639 }
2640
keybuf_nonoverlapping_cmp(struct keybuf_key * l,struct keybuf_key * r)2641 static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
2642 struct keybuf_key *r)
2643 {
2644 return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
2645 }
2646
2647 struct refill {
2648 struct btree_op op;
2649 unsigned int nr_found;
2650 struct keybuf *buf;
2651 struct bkey *end;
2652 keybuf_pred_fn *pred;
2653 };
2654
refill_keybuf_fn(struct btree_op * op,struct btree * b,struct bkey * k)2655 static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
2656 struct bkey *k)
2657 {
2658 struct refill *refill = container_of(op, struct refill, op);
2659 struct keybuf *buf = refill->buf;
2660 int ret = MAP_CONTINUE;
2661
2662 if (bkey_cmp(k, refill->end) > 0) {
2663 ret = MAP_DONE;
2664 goto out;
2665 }
2666
2667 if (!KEY_SIZE(k)) /* end key */
2668 goto out;
2669
2670 if (refill->pred(buf, k)) {
2671 struct keybuf_key *w;
2672
2673 spin_lock(&buf->lock);
2674
2675 w = array_alloc(&buf->freelist);
2676 if (!w) {
2677 spin_unlock(&buf->lock);
2678 return MAP_DONE;
2679 }
2680
2681 w->private = NULL;
2682 bkey_copy(&w->key, k);
2683
2684 if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
2685 array_free(&buf->freelist, w);
2686 else
2687 refill->nr_found++;
2688
2689 if (array_freelist_empty(&buf->freelist))
2690 ret = MAP_DONE;
2691
2692 spin_unlock(&buf->lock);
2693 }
2694 out:
2695 buf->last_scanned = *k;
2696 return ret;
2697 }
2698
bch_refill_keybuf(struct cache_set * c,struct keybuf * buf,struct bkey * end,keybuf_pred_fn * pred)2699 void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
2700 struct bkey *end, keybuf_pred_fn *pred)
2701 {
2702 struct bkey start = buf->last_scanned;
2703 struct refill refill;
2704
2705 cond_resched();
2706
2707 bch_btree_op_init(&refill.op, -1);
2708 refill.nr_found = 0;
2709 refill.buf = buf;
2710 refill.end = end;
2711 refill.pred = pred;
2712
2713 bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
2714 refill_keybuf_fn, MAP_END_KEY);
2715
2716 trace_bcache_keyscan(refill.nr_found,
2717 KEY_INODE(&start), KEY_OFFSET(&start),
2718 KEY_INODE(&buf->last_scanned),
2719 KEY_OFFSET(&buf->last_scanned));
2720
2721 spin_lock(&buf->lock);
2722
2723 if (!RB_EMPTY_ROOT(&buf->keys)) {
2724 struct keybuf_key *w;
2725
2726 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2727 buf->start = START_KEY(&w->key);
2728
2729 w = RB_LAST(&buf->keys, struct keybuf_key, node);
2730 buf->end = w->key;
2731 } else {
2732 buf->start = MAX_KEY;
2733 buf->end = MAX_KEY;
2734 }
2735
2736 spin_unlock(&buf->lock);
2737 }
2738
__bch_keybuf_del(struct keybuf * buf,struct keybuf_key * w)2739 static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2740 {
2741 rb_erase(&w->node, &buf->keys);
2742 array_free(&buf->freelist, w);
2743 }
2744
bch_keybuf_del(struct keybuf * buf,struct keybuf_key * w)2745 void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2746 {
2747 spin_lock(&buf->lock);
2748 __bch_keybuf_del(buf, w);
2749 spin_unlock(&buf->lock);
2750 }
2751
bch_keybuf_check_overlapping(struct keybuf * buf,struct bkey * start,struct bkey * end)2752 bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
2753 struct bkey *end)
2754 {
2755 bool ret = false;
2756 struct keybuf_key *p, *w, s;
2757
2758 s.key = *start;
2759
2760 if (bkey_cmp(end, &buf->start) <= 0 ||
2761 bkey_cmp(start, &buf->end) >= 0)
2762 return false;
2763
2764 spin_lock(&buf->lock);
2765 w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
2766
2767 while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) {
2768 p = w;
2769 w = RB_NEXT(w, node);
2770
2771 if (p->private)
2772 ret = true;
2773 else
2774 __bch_keybuf_del(buf, p);
2775 }
2776
2777 spin_unlock(&buf->lock);
2778 return ret;
2779 }
2780
bch_keybuf_next(struct keybuf * buf)2781 struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
2782 {
2783 struct keybuf_key *w;
2784
2785 spin_lock(&buf->lock);
2786
2787 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2788
2789 while (w && w->private)
2790 w = RB_NEXT(w, node);
2791
2792 if (w)
2793 w->private = ERR_PTR(-EINTR);
2794
2795 spin_unlock(&buf->lock);
2796 return w;
2797 }
2798
bch_keybuf_next_rescan(struct cache_set * c,struct keybuf * buf,struct bkey * end,keybuf_pred_fn * pred)2799 struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
2800 struct keybuf *buf,
2801 struct bkey *end,
2802 keybuf_pred_fn *pred)
2803 {
2804 struct keybuf_key *ret;
2805
2806 while (1) {
2807 ret = bch_keybuf_next(buf);
2808 if (ret)
2809 break;
2810
2811 if (bkey_cmp(&buf->last_scanned, end) >= 0) {
2812 pr_debug("scan finished\n");
2813 break;
2814 }
2815
2816 bch_refill_keybuf(c, buf, end, pred);
2817 }
2818
2819 return ret;
2820 }
2821
bch_keybuf_init(struct keybuf * buf)2822 void bch_keybuf_init(struct keybuf *buf)
2823 {
2824 buf->last_scanned = MAX_KEY;
2825 buf->keys = RB_ROOT;
2826
2827 spin_lock_init(&buf->lock);
2828 array_allocator_init(&buf->freelist);
2829 }
2830
bch_btree_exit(void)2831 void bch_btree_exit(void)
2832 {
2833 if (btree_io_wq)
2834 destroy_workqueue(btree_io_wq);
2835 }
2836
bch_btree_init(void)2837 int __init bch_btree_init(void)
2838 {
2839 btree_io_wq = alloc_workqueue("bch_btree_io", WQ_MEM_RECLAIM, 0);
2840 if (!btree_io_wq)
2841 return -ENOMEM;
2842
2843 return 0;
2844 }
2845