1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4 *
5 * Uses a block device as cache for other block devices; optimized for SSDs.
6 * All allocation is done in buckets, which should match the erase block size
7 * of the device.
8 *
9 * Buckets containing cached data are kept on a heap sorted by priority;
10 * bucket priority is increased on cache hit, and periodically all the buckets
11 * on the heap have their priority scaled down. This currently is just used as
12 * an LRU but in the future should allow for more intelligent heuristics.
13 *
14 * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
15 * counter. Garbage collection is used to remove stale pointers.
16 *
17 * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
18 * as keys are inserted we only sort the pages that have not yet been written.
19 * When garbage collection is run, we resort the entire node.
20 *
21 * All configuration is done via sysfs; see Documentation/admin-guide/bcache.rst.
22 */
23
24 #include "bcache.h"
25 #include "btree.h"
26 #include "debug.h"
27 #include "extents.h"
28 #include "writeback.h"
29
sort_key_next(struct btree_iter * iter,struct btree_iter_set * i)30 static void sort_key_next(struct btree_iter *iter,
31 struct btree_iter_set *i)
32 {
33 i->k = bkey_next(i->k);
34
35 if (i->k == i->end)
36 *i = iter->heap.data[--iter->heap.nr];
37 }
38
new_bch_key_sort_cmp(const void * l,const void * r,void * args)39 static bool new_bch_key_sort_cmp(const void *l, const void *r, void *args)
40 {
41 struct btree_iter_set *_l = (struct btree_iter_set *)l;
42 struct btree_iter_set *_r = (struct btree_iter_set *)r;
43 int64_t c = bkey_cmp(_l->k, _r->k);
44
45 return !(c ? c > 0 : _l->k < _r->k);
46 }
47
__ptr_invalid(struct cache_set * c,const struct bkey * k)48 static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
49 {
50 unsigned int i;
51
52 for (i = 0; i < KEY_PTRS(k); i++)
53 if (ptr_available(c, k, i)) {
54 struct cache *ca = c->cache;
55 size_t bucket = PTR_BUCKET_NR(c, k, i);
56 size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
57
58 if (KEY_SIZE(k) + r > c->cache->sb.bucket_size ||
59 bucket < ca->sb.first_bucket ||
60 bucket >= ca->sb.nbuckets)
61 return true;
62 }
63
64 return false;
65 }
66
67 /* Common among btree and extent ptrs */
68
bch_ptr_status(struct cache_set * c,const struct bkey * k)69 static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
70 {
71 unsigned int i;
72
73 for (i = 0; i < KEY_PTRS(k); i++)
74 if (ptr_available(c, k, i)) {
75 struct cache *ca = c->cache;
76 size_t bucket = PTR_BUCKET_NR(c, k, i);
77 size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
78
79 if (KEY_SIZE(k) + r > c->cache->sb.bucket_size)
80 return "bad, length too big";
81 if (bucket < ca->sb.first_bucket)
82 return "bad, short offset";
83 if (bucket >= ca->sb.nbuckets)
84 return "bad, offset past end of device";
85 if (ptr_stale(c, k, i))
86 return "stale";
87 }
88
89 if (!bkey_cmp(k, &ZERO_KEY))
90 return "bad, null key";
91 if (!KEY_PTRS(k))
92 return "bad, no pointers";
93 if (!KEY_SIZE(k))
94 return "zeroed key";
95 return "";
96 }
97
bch_extent_to_text(char * buf,size_t size,const struct bkey * k)98 void bch_extent_to_text(char *buf, size_t size, const struct bkey *k)
99 {
100 unsigned int i = 0;
101 char *out = buf, *end = buf + size;
102
103 #define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
104
105 p("%llu:%llu len %llu -> [", KEY_INODE(k), KEY_START(k), KEY_SIZE(k));
106
107 for (i = 0; i < KEY_PTRS(k); i++) {
108 if (i)
109 p(", ");
110
111 if (PTR_DEV(k, i) == PTR_CHECK_DEV)
112 p("check dev");
113 else
114 p("%llu:%llu gen %llu", PTR_DEV(k, i),
115 PTR_OFFSET(k, i), PTR_GEN(k, i));
116 }
117
118 p("]");
119
120 if (KEY_DIRTY(k))
121 p(" dirty");
122 if (KEY_CSUM(k))
123 p(" cs%llu %llx", KEY_CSUM(k), k->ptr[1]);
124 #undef p
125 }
126
bch_bkey_dump(struct btree_keys * keys,const struct bkey * k)127 static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k)
128 {
129 struct btree *b = container_of(keys, struct btree, keys);
130 unsigned int j;
131 char buf[80];
132
133 bch_extent_to_text(buf, sizeof(buf), k);
134 pr_cont(" %s", buf);
135
136 for (j = 0; j < KEY_PTRS(k); j++) {
137 size_t n = PTR_BUCKET_NR(b->c, k, j);
138
139 pr_cont(" bucket %zu", n);
140 if (n >= b->c->cache->sb.first_bucket && n < b->c->cache->sb.nbuckets)
141 pr_cont(" prio %i",
142 PTR_BUCKET(b->c, k, j)->prio);
143 }
144
145 pr_cont(" %s\n", bch_ptr_status(b->c, k));
146 }
147
148 /* Btree ptrs */
149
__bch_btree_ptr_invalid(struct cache_set * c,const struct bkey * k)150 bool __bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k)
151 {
152 char buf[80];
153
154 if (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k))
155 goto bad;
156
157 if (__ptr_invalid(c, k))
158 goto bad;
159
160 return false;
161 bad:
162 bch_extent_to_text(buf, sizeof(buf), k);
163 cache_bug(c, "spotted btree ptr %s: %s", buf, bch_ptr_status(c, k));
164 return true;
165 }
166
bch_btree_ptr_invalid(struct btree_keys * bk,const struct bkey * k)167 static bool bch_btree_ptr_invalid(struct btree_keys *bk, const struct bkey *k)
168 {
169 struct btree *b = container_of(bk, struct btree, keys);
170
171 return __bch_btree_ptr_invalid(b->c, k);
172 }
173
btree_ptr_bad_expensive(struct btree * b,const struct bkey * k)174 static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k)
175 {
176 unsigned int i;
177 char buf[80];
178 struct bucket *g;
179
180 if (mutex_trylock(&b->c->bucket_lock)) {
181 for (i = 0; i < KEY_PTRS(k); i++)
182 if (ptr_available(b->c, k, i)) {
183 g = PTR_BUCKET(b->c, k, i);
184
185 if (KEY_DIRTY(k) ||
186 g->prio != BTREE_PRIO ||
187 (b->c->gc_mark_valid &&
188 GC_MARK(g) != GC_MARK_METADATA))
189 goto err;
190 }
191
192 mutex_unlock(&b->c->bucket_lock);
193 }
194
195 return false;
196 err:
197 mutex_unlock(&b->c->bucket_lock);
198 bch_extent_to_text(buf, sizeof(buf), k);
199 btree_bug(b,
200 "inconsistent btree pointer %s: bucket %zi pin %i prio %i gen %i last_gc %i mark %llu",
201 buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin),
202 g->prio, g->gen, g->last_gc, GC_MARK(g));
203 return true;
204 }
205
bch_btree_ptr_bad(struct btree_keys * bk,const struct bkey * k)206 static bool bch_btree_ptr_bad(struct btree_keys *bk, const struct bkey *k)
207 {
208 struct btree *b = container_of(bk, struct btree, keys);
209 unsigned int i;
210
211 if (!bkey_cmp(k, &ZERO_KEY) ||
212 !KEY_PTRS(k) ||
213 bch_ptr_invalid(bk, k))
214 return true;
215
216 for (i = 0; i < KEY_PTRS(k); i++)
217 if (!ptr_available(b->c, k, i) ||
218 ptr_stale(b->c, k, i))
219 return true;
220
221 if (expensive_debug_checks(b->c) &&
222 btree_ptr_bad_expensive(b, k))
223 return true;
224
225 return false;
226 }
227
bch_btree_ptr_insert_fixup(struct btree_keys * bk,struct bkey * insert,struct btree_iter * iter,struct bkey * replace_key)228 static bool bch_btree_ptr_insert_fixup(struct btree_keys *bk,
229 struct bkey *insert,
230 struct btree_iter *iter,
231 struct bkey *replace_key)
232 {
233 struct btree *b = container_of(bk, struct btree, keys);
234
235 if (!KEY_OFFSET(insert))
236 btree_current_write(b)->prio_blocked++;
237
238 return false;
239 }
240
241 const struct btree_keys_ops bch_btree_keys_ops = {
242 .sort_cmp = new_bch_key_sort_cmp,
243 .insert_fixup = bch_btree_ptr_insert_fixup,
244 .key_invalid = bch_btree_ptr_invalid,
245 .key_bad = bch_btree_ptr_bad,
246 .key_to_text = bch_extent_to_text,
247 .key_dump = bch_bkey_dump,
248 };
249
250 /* Extents */
251
252 /*
253 * Returns true if l > r - unless l == r, in which case returns true if l is
254 * older than r.
255 *
256 * Necessary for btree_sort_fixup() - if there are multiple keys that compare
257 * equal in different sets, we have to process them newest to oldest.
258 */
259
new_bch_extent_sort_cmp(const void * l,const void * r,void __always_unused * args)260 static bool new_bch_extent_sort_cmp(const void *l, const void *r, void __always_unused *args)
261 {
262 struct btree_iter_set *_l = (struct btree_iter_set *)l;
263 struct btree_iter_set *_r = (struct btree_iter_set *)r;
264 int64_t c = bkey_cmp(&START_KEY(_l->k), &START_KEY(_r->k));
265
266 return !(c ? c > 0 : _l->k < _r->k);
267 }
268
new_btree_iter_swap(void * iter1,void * iter2,void __always_unused * args)269 static inline void new_btree_iter_swap(void *iter1, void *iter2, void __always_unused *args)
270 {
271 struct btree_iter_set *_iter1 = iter1;
272 struct btree_iter_set *_iter2 = iter2;
273
274 swap(*_iter1, *_iter2);
275 }
276
bch_extent_sort_fixup(struct btree_iter * iter,struct bkey * tmp)277 static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter,
278 struct bkey *tmp)
279 {
280 const struct min_heap_callbacks callbacks = {
281 .less = new_bch_extent_sort_cmp,
282 .swp = new_btree_iter_swap,
283 };
284 while (iter->heap.nr > 1) {
285 struct btree_iter_set *top = iter->heap.data, *i = top + 1;
286
287 if (iter->heap.nr > 2 &&
288 !new_bch_extent_sort_cmp(&i[0], &i[1], NULL))
289 i++;
290
291 if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0)
292 break;
293
294 if (!KEY_SIZE(i->k)) {
295 sort_key_next(iter, i);
296 min_heap_sift_down(&iter->heap, i - top, &callbacks, NULL);
297 continue;
298 }
299
300 if (top->k > i->k) {
301 if (bkey_cmp(top->k, i->k) >= 0)
302 sort_key_next(iter, i);
303 else
304 bch_cut_front(top->k, i->k);
305
306 min_heap_sift_down(&iter->heap, i - top, &callbacks, NULL);
307 } else {
308 /* can't happen because of comparison func */
309 BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k)));
310
311 if (bkey_cmp(i->k, top->k) < 0) {
312 bkey_copy(tmp, top->k);
313
314 bch_cut_back(&START_KEY(i->k), tmp);
315 bch_cut_front(i->k, top->k);
316 min_heap_sift_down(&iter->heap, 0, &callbacks, NULL);
317
318 return tmp;
319 } else {
320 bch_cut_back(&START_KEY(i->k), top->k);
321 }
322 }
323 }
324
325 return NULL;
326 }
327
bch_subtract_dirty(struct bkey * k,struct cache_set * c,uint64_t offset,int sectors)328 static void bch_subtract_dirty(struct bkey *k,
329 struct cache_set *c,
330 uint64_t offset,
331 int sectors)
332 {
333 if (KEY_DIRTY(k))
334 bcache_dev_sectors_dirty_add(c, KEY_INODE(k),
335 offset, -sectors);
336 }
337
bch_extent_insert_fixup(struct btree_keys * b,struct bkey * insert,struct btree_iter * iter,struct bkey * replace_key)338 static bool bch_extent_insert_fixup(struct btree_keys *b,
339 struct bkey *insert,
340 struct btree_iter *iter,
341 struct bkey *replace_key)
342 {
343 struct cache_set *c = container_of(b, struct btree, keys)->c;
344
345 uint64_t old_offset;
346 unsigned int old_size, sectors_found = 0;
347
348 BUG_ON(!KEY_OFFSET(insert));
349 BUG_ON(!KEY_SIZE(insert));
350
351 while (1) {
352 struct bkey *k = bch_btree_iter_next(iter);
353
354 if (!k)
355 break;
356
357 if (bkey_cmp(&START_KEY(k), insert) >= 0) {
358 if (KEY_SIZE(k))
359 break;
360 else
361 continue;
362 }
363
364 if (bkey_cmp(k, &START_KEY(insert)) <= 0)
365 continue;
366
367 old_offset = KEY_START(k);
368 old_size = KEY_SIZE(k);
369
370 /*
371 * We might overlap with 0 size extents; we can't skip these
372 * because if they're in the set we're inserting to we have to
373 * adjust them so they don't overlap with the key we're
374 * inserting. But we don't want to check them for replace
375 * operations.
376 */
377
378 if (replace_key && KEY_SIZE(k)) {
379 /*
380 * k might have been split since we inserted/found the
381 * key we're replacing
382 */
383 unsigned int i;
384 uint64_t offset = KEY_START(k) -
385 KEY_START(replace_key);
386
387 /* But it must be a subset of the replace key */
388 if (KEY_START(k) < KEY_START(replace_key) ||
389 KEY_OFFSET(k) > KEY_OFFSET(replace_key))
390 goto check_failed;
391
392 /* We didn't find a key that we were supposed to */
393 if (KEY_START(k) > KEY_START(insert) + sectors_found)
394 goto check_failed;
395
396 if (!bch_bkey_equal_header(k, replace_key))
397 goto check_failed;
398
399 /* skip past gen */
400 offset <<= 8;
401
402 BUG_ON(!KEY_PTRS(replace_key));
403
404 for (i = 0; i < KEY_PTRS(replace_key); i++)
405 if (k->ptr[i] != replace_key->ptr[i] + offset)
406 goto check_failed;
407
408 sectors_found = KEY_OFFSET(k) - KEY_START(insert);
409 }
410
411 if (bkey_cmp(insert, k) < 0 &&
412 bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) {
413 /*
414 * We overlapped in the middle of an existing key: that
415 * means we have to split the old key. But we have to do
416 * slightly different things depending on whether the
417 * old key has been written out yet.
418 */
419
420 struct bkey *top;
421
422 bch_subtract_dirty(k, c, KEY_START(insert),
423 KEY_SIZE(insert));
424
425 if (bkey_written(b, k)) {
426 /*
427 * We insert a new key to cover the top of the
428 * old key, and the old key is modified in place
429 * to represent the bottom split.
430 *
431 * It's completely arbitrary whether the new key
432 * is the top or the bottom, but it has to match
433 * up with what btree_sort_fixup() does - it
434 * doesn't check for this kind of overlap, it
435 * depends on us inserting a new key for the top
436 * here.
437 */
438 top = bch_bset_search(b, bset_tree_last(b),
439 insert);
440 bch_bset_insert(b, top, k);
441 } else {
442 BKEY_PADDED(key) temp;
443 bkey_copy(&temp.key, k);
444 bch_bset_insert(b, k, &temp.key);
445 top = bkey_next(k);
446 }
447
448 bch_cut_front(insert, top);
449 bch_cut_back(&START_KEY(insert), k);
450 bch_bset_fix_invalidated_key(b, k);
451 goto out;
452 }
453
454 if (bkey_cmp(insert, k) < 0) {
455 bch_cut_front(insert, k);
456 } else {
457 if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0)
458 old_offset = KEY_START(insert);
459
460 if (bkey_written(b, k) &&
461 bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) {
462 /*
463 * Completely overwrote, so we don't have to
464 * invalidate the binary search tree
465 */
466 bch_cut_front(k, k);
467 } else {
468 __bch_cut_back(&START_KEY(insert), k);
469 bch_bset_fix_invalidated_key(b, k);
470 }
471 }
472
473 bch_subtract_dirty(k, c, old_offset, old_size - KEY_SIZE(k));
474 }
475
476 check_failed:
477 if (replace_key) {
478 if (!sectors_found) {
479 return true;
480 } else if (sectors_found < KEY_SIZE(insert)) {
481 SET_KEY_OFFSET(insert, KEY_OFFSET(insert) -
482 (KEY_SIZE(insert) - sectors_found));
483 SET_KEY_SIZE(insert, sectors_found);
484 }
485 }
486 out:
487 if (KEY_DIRTY(insert))
488 bcache_dev_sectors_dirty_add(c, KEY_INODE(insert),
489 KEY_START(insert),
490 KEY_SIZE(insert));
491
492 return false;
493 }
494
__bch_extent_invalid(struct cache_set * c,const struct bkey * k)495 bool __bch_extent_invalid(struct cache_set *c, const struct bkey *k)
496 {
497 char buf[80];
498
499 if (!KEY_SIZE(k))
500 return true;
501
502 if (KEY_SIZE(k) > KEY_OFFSET(k))
503 goto bad;
504
505 if (__ptr_invalid(c, k))
506 goto bad;
507
508 return false;
509 bad:
510 bch_extent_to_text(buf, sizeof(buf), k);
511 cache_bug(c, "spotted extent %s: %s", buf, bch_ptr_status(c, k));
512 return true;
513 }
514
bch_extent_invalid(struct btree_keys * bk,const struct bkey * k)515 static bool bch_extent_invalid(struct btree_keys *bk, const struct bkey *k)
516 {
517 struct btree *b = container_of(bk, struct btree, keys);
518
519 return __bch_extent_invalid(b->c, k);
520 }
521
bch_extent_bad_expensive(struct btree * b,const struct bkey * k,unsigned int ptr)522 static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k,
523 unsigned int ptr)
524 {
525 struct bucket *g = PTR_BUCKET(b->c, k, ptr);
526 char buf[80];
527
528 if (mutex_trylock(&b->c->bucket_lock)) {
529 if (b->c->gc_mark_valid &&
530 (!GC_MARK(g) ||
531 GC_MARK(g) == GC_MARK_METADATA ||
532 (GC_MARK(g) != GC_MARK_DIRTY && KEY_DIRTY(k))))
533 goto err;
534
535 if (g->prio == BTREE_PRIO)
536 goto err;
537
538 mutex_unlock(&b->c->bucket_lock);
539 }
540
541 return false;
542 err:
543 mutex_unlock(&b->c->bucket_lock);
544 bch_extent_to_text(buf, sizeof(buf), k);
545 btree_bug(b,
546 "inconsistent extent pointer %s:\nbucket %zu pin %i prio %i gen %i last_gc %i mark %llu",
547 buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin),
548 g->prio, g->gen, g->last_gc, GC_MARK(g));
549 return true;
550 }
551
bch_extent_bad(struct btree_keys * bk,const struct bkey * k)552 static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k)
553 {
554 struct btree *b = container_of(bk, struct btree, keys);
555 unsigned int i, stale;
556 char buf[80];
557
558 if (!KEY_PTRS(k) ||
559 bch_extent_invalid(bk, k))
560 return true;
561
562 for (i = 0; i < KEY_PTRS(k); i++)
563 if (!ptr_available(b->c, k, i))
564 return true;
565
566 for (i = 0; i < KEY_PTRS(k); i++) {
567 stale = ptr_stale(b->c, k, i);
568
569 if (stale && KEY_DIRTY(k)) {
570 bch_extent_to_text(buf, sizeof(buf), k);
571 pr_info("stale dirty pointer, stale %u, key: %s\n",
572 stale, buf);
573 }
574
575 btree_bug_on(stale > BUCKET_GC_GEN_MAX, b,
576 "key too stale: %i, need_gc %u",
577 stale, b->c->need_gc);
578
579 if (stale)
580 return true;
581
582 if (expensive_debug_checks(b->c) &&
583 bch_extent_bad_expensive(b, k, i))
584 return true;
585 }
586
587 return false;
588 }
589
merge_chksums(struct bkey * l,struct bkey * r)590 static uint64_t merge_chksums(struct bkey *l, struct bkey *r)
591 {
592 return (l->ptr[KEY_PTRS(l)] + r->ptr[KEY_PTRS(r)]) &
593 ~((uint64_t)1 << 63);
594 }
595
bch_extent_merge(struct btree_keys * bk,struct bkey * l,struct bkey * r)596 static bool bch_extent_merge(struct btree_keys *bk,
597 struct bkey *l,
598 struct bkey *r)
599 {
600 struct btree *b = container_of(bk, struct btree, keys);
601 unsigned int i;
602
603 if (key_merging_disabled(b->c))
604 return false;
605
606 for (i = 0; i < KEY_PTRS(l); i++)
607 if (l->ptr[i] + MAKE_PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
608 PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i))
609 return false;
610
611 /* Keys with no pointers aren't restricted to one bucket and could
612 * overflow KEY_SIZE
613 */
614 if (KEY_SIZE(l) + KEY_SIZE(r) > USHRT_MAX) {
615 SET_KEY_OFFSET(l, KEY_OFFSET(l) + USHRT_MAX - KEY_SIZE(l));
616 SET_KEY_SIZE(l, USHRT_MAX);
617
618 bch_cut_front(l, r);
619 return false;
620 }
621
622 if (KEY_CSUM(l)) {
623 if (KEY_CSUM(r))
624 l->ptr[KEY_PTRS(l)] = merge_chksums(l, r);
625 else
626 SET_KEY_CSUM(l, 0);
627 }
628
629 SET_KEY_OFFSET(l, KEY_OFFSET(l) + KEY_SIZE(r));
630 SET_KEY_SIZE(l, KEY_SIZE(l) + KEY_SIZE(r));
631
632 return true;
633 }
634
635 const struct btree_keys_ops bch_extent_keys_ops = {
636 .sort_cmp = new_bch_extent_sort_cmp,
637 .sort_fixup = bch_extent_sort_fixup,
638 .insert_fixup = bch_extent_insert_fixup,
639 .key_invalid = bch_extent_invalid,
640 .key_bad = bch_extent_bad,
641 .key_merge = bch_extent_merge,
642 .key_to_text = bch_extent_to_text,
643 .key_dump = bch_bkey_dump,
644 .is_extents = true,
645 };
646