xref: /linux/drivers/md/bcache/bset.c (revision d56d000a1f424aa77538bd5aad18b43037ed20cc)
1 /*
2  * Code for working with individual keys, and sorted sets of keys with in a
3  * btree node
4  *
5  * Copyright 2012 Google, Inc.
6  */
7 
8 #include "bcache.h"
9 #include "btree.h"
10 #include "debug.h"
11 
12 #include <linux/random.h>
13 #include <linux/prefetch.h>
14 
15 /* Keylists */
16 
17 int bch_keylist_realloc(struct keylist *l, int nptrs, struct cache_set *c)
18 {
19 	size_t oldsize = bch_keylist_nkeys(l);
20 	size_t newsize = oldsize + 2 + nptrs;
21 	uint64_t *old_keys = l->keys_p == l->inline_keys ? NULL : l->keys_p;
22 	uint64_t *new_keys;
23 
24 	/* The journalling code doesn't handle the case where the keys to insert
25 	 * is bigger than an empty write: If we just return -ENOMEM here,
26 	 * bio_insert() and bio_invalidate() will insert the keys created so far
27 	 * and finish the rest when the keylist is empty.
28 	 */
29 	if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
30 		return -ENOMEM;
31 
32 	newsize = roundup_pow_of_two(newsize);
33 
34 	if (newsize <= KEYLIST_INLINE ||
35 	    roundup_pow_of_two(oldsize) == newsize)
36 		return 0;
37 
38 	new_keys = krealloc(old_keys, sizeof(uint64_t) * newsize, GFP_NOIO);
39 
40 	if (!new_keys)
41 		return -ENOMEM;
42 
43 	if (!old_keys)
44 		memcpy(new_keys, l->inline_keys, sizeof(uint64_t) * oldsize);
45 
46 	l->keys_p = new_keys;
47 	l->top_p = new_keys + oldsize;
48 
49 	return 0;
50 }
51 
52 struct bkey *bch_keylist_pop(struct keylist *l)
53 {
54 	struct bkey *k = l->keys;
55 
56 	if (k == l->top)
57 		return NULL;
58 
59 	while (bkey_next(k) != l->top)
60 		k = bkey_next(k);
61 
62 	return l->top = k;
63 }
64 
65 void bch_keylist_pop_front(struct keylist *l)
66 {
67 	l->top_p -= bkey_u64s(l->keys);
68 
69 	memmove(l->keys,
70 		bkey_next(l->keys),
71 		bch_keylist_bytes(l));
72 }
73 
74 /* Pointer validation */
75 
76 static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
77 {
78 	unsigned i;
79 
80 	for (i = 0; i < KEY_PTRS(k); i++)
81 		if (ptr_available(c, k, i)) {
82 			struct cache *ca = PTR_CACHE(c, k, i);
83 			size_t bucket = PTR_BUCKET_NR(c, k, i);
84 			size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
85 
86 			if (KEY_SIZE(k) + r > c->sb.bucket_size ||
87 			    bucket <  ca->sb.first_bucket ||
88 			    bucket >= ca->sb.nbuckets)
89 				return true;
90 		}
91 
92 	return false;
93 }
94 
95 bool bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k)
96 {
97 	char buf[80];
98 
99 	if (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k))
100 		goto bad;
101 
102 	if (__ptr_invalid(c, k))
103 		goto bad;
104 
105 	return false;
106 bad:
107 	bch_bkey_to_text(buf, sizeof(buf), k);
108 	cache_bug(c, "spotted btree ptr %s: %s", buf, bch_ptr_status(c, k));
109 	return true;
110 }
111 
112 bool bch_extent_ptr_invalid(struct cache_set *c, const struct bkey *k)
113 {
114 	char buf[80];
115 
116 	if (!KEY_SIZE(k))
117 		return true;
118 
119 	if (KEY_SIZE(k) > KEY_OFFSET(k))
120 		goto bad;
121 
122 	if (__ptr_invalid(c, k))
123 		goto bad;
124 
125 	return false;
126 bad:
127 	bch_bkey_to_text(buf, sizeof(buf), k);
128 	cache_bug(c, "spotted extent %s: %s", buf, bch_ptr_status(c, k));
129 	return true;
130 }
131 
132 static bool ptr_bad_expensive_checks(struct btree *b, const struct bkey *k,
133 				     unsigned ptr)
134 {
135 	struct bucket *g = PTR_BUCKET(b->c, k, ptr);
136 	char buf[80];
137 
138 	if (mutex_trylock(&b->c->bucket_lock)) {
139 		if (b->level) {
140 			if (KEY_DIRTY(k) ||
141 			    g->prio != BTREE_PRIO ||
142 			    (b->c->gc_mark_valid &&
143 			     GC_MARK(g) != GC_MARK_METADATA))
144 				goto err;
145 
146 		} else {
147 			if (g->prio == BTREE_PRIO)
148 				goto err;
149 
150 			if (KEY_DIRTY(k) &&
151 			    b->c->gc_mark_valid &&
152 			    GC_MARK(g) != GC_MARK_DIRTY)
153 				goto err;
154 		}
155 		mutex_unlock(&b->c->bucket_lock);
156 	}
157 
158 	return false;
159 err:
160 	mutex_unlock(&b->c->bucket_lock);
161 	bch_bkey_to_text(buf, sizeof(buf), k);
162 	btree_bug(b,
163 "inconsistent pointer %s: bucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
164 		  buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin),
165 		  g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
166 	return true;
167 }
168 
169 bool bch_ptr_bad(struct btree *b, const struct bkey *k)
170 {
171 	struct bucket *g;
172 	unsigned i, stale;
173 
174 	if (!bkey_cmp(k, &ZERO_KEY) ||
175 	    !KEY_PTRS(k) ||
176 	    bch_ptr_invalid(b, k))
177 		return true;
178 
179 	for (i = 0; i < KEY_PTRS(k); i++)
180 		if (!ptr_available(b->c, k, i))
181 			return true;
182 
183 	if (!expensive_debug_checks(b->c) && KEY_DIRTY(k))
184 		return false;
185 
186 	for (i = 0; i < KEY_PTRS(k); i++) {
187 		g = PTR_BUCKET(b->c, k, i);
188 		stale = ptr_stale(b->c, k, i);
189 
190 		btree_bug_on(stale > 96, b,
191 			     "key too stale: %i, need_gc %u",
192 			     stale, b->c->need_gc);
193 
194 		btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k),
195 			     b, "stale dirty pointer");
196 
197 		if (stale)
198 			return true;
199 
200 		if (expensive_debug_checks(b->c) &&
201 		    ptr_bad_expensive_checks(b, k, i))
202 			return true;
203 	}
204 
205 	return false;
206 }
207 
208 /* Key/pointer manipulation */
209 
210 void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,
211 			      unsigned i)
212 {
213 	BUG_ON(i > KEY_PTRS(src));
214 
215 	/* Only copy the header, key, and one pointer. */
216 	memcpy(dest, src, 2 * sizeof(uint64_t));
217 	dest->ptr[0] = src->ptr[i];
218 	SET_KEY_PTRS(dest, 1);
219 	/* We didn't copy the checksum so clear that bit. */
220 	SET_KEY_CSUM(dest, 0);
221 }
222 
223 bool __bch_cut_front(const struct bkey *where, struct bkey *k)
224 {
225 	unsigned i, len = 0;
226 
227 	if (bkey_cmp(where, &START_KEY(k)) <= 0)
228 		return false;
229 
230 	if (bkey_cmp(where, k) < 0)
231 		len = KEY_OFFSET(k) - KEY_OFFSET(where);
232 	else
233 		bkey_copy_key(k, where);
234 
235 	for (i = 0; i < KEY_PTRS(k); i++)
236 		SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + KEY_SIZE(k) - len);
237 
238 	BUG_ON(len > KEY_SIZE(k));
239 	SET_KEY_SIZE(k, len);
240 	return true;
241 }
242 
243 bool __bch_cut_back(const struct bkey *where, struct bkey *k)
244 {
245 	unsigned len = 0;
246 
247 	if (bkey_cmp(where, k) >= 0)
248 		return false;
249 
250 	BUG_ON(KEY_INODE(where) != KEY_INODE(k));
251 
252 	if (bkey_cmp(where, &START_KEY(k)) > 0)
253 		len = KEY_OFFSET(where) - KEY_START(k);
254 
255 	bkey_copy_key(k, where);
256 
257 	BUG_ON(len > KEY_SIZE(k));
258 	SET_KEY_SIZE(k, len);
259 	return true;
260 }
261 
262 static uint64_t merge_chksums(struct bkey *l, struct bkey *r)
263 {
264 	return (l->ptr[KEY_PTRS(l)] + r->ptr[KEY_PTRS(r)]) &
265 		~((uint64_t)1 << 63);
266 }
267 
268 /* Tries to merge l and r: l should be lower than r
269  * Returns true if we were able to merge. If we did merge, l will be the merged
270  * key, r will be untouched.
271  */
272 bool bch_bkey_try_merge(struct btree *b, struct bkey *l, struct bkey *r)
273 {
274 	unsigned i;
275 
276 	if (key_merging_disabled(b->c))
277 		return false;
278 
279 	if (KEY_PTRS(l) != KEY_PTRS(r) ||
280 	    KEY_DIRTY(l) != KEY_DIRTY(r) ||
281 	    bkey_cmp(l, &START_KEY(r)))
282 		return false;
283 
284 	for (i = 0; i < KEY_PTRS(l); i++)
285 		if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
286 		    PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i))
287 			return false;
288 
289 	/* Keys with no pointers aren't restricted to one bucket and could
290 	 * overflow KEY_SIZE
291 	 */
292 	if (KEY_SIZE(l) + KEY_SIZE(r) > USHRT_MAX) {
293 		SET_KEY_OFFSET(l, KEY_OFFSET(l) + USHRT_MAX - KEY_SIZE(l));
294 		SET_KEY_SIZE(l, USHRT_MAX);
295 
296 		bch_cut_front(l, r);
297 		return false;
298 	}
299 
300 	if (KEY_CSUM(l)) {
301 		if (KEY_CSUM(r))
302 			l->ptr[KEY_PTRS(l)] = merge_chksums(l, r);
303 		else
304 			SET_KEY_CSUM(l, 0);
305 	}
306 
307 	SET_KEY_OFFSET(l, KEY_OFFSET(l) + KEY_SIZE(r));
308 	SET_KEY_SIZE(l, KEY_SIZE(l) + KEY_SIZE(r));
309 
310 	return true;
311 }
312 
313 /* Binary tree stuff for auxiliary search trees */
314 
315 static unsigned inorder_next(unsigned j, unsigned size)
316 {
317 	if (j * 2 + 1 < size) {
318 		j = j * 2 + 1;
319 
320 		while (j * 2 < size)
321 			j *= 2;
322 	} else
323 		j >>= ffz(j) + 1;
324 
325 	return j;
326 }
327 
328 static unsigned inorder_prev(unsigned j, unsigned size)
329 {
330 	if (j * 2 < size) {
331 		j = j * 2;
332 
333 		while (j * 2 + 1 < size)
334 			j = j * 2 + 1;
335 	} else
336 		j >>= ffs(j);
337 
338 	return j;
339 }
340 
341 /* I have no idea why this code works... and I'm the one who wrote it
342  *
343  * However, I do know what it does:
344  * Given a binary tree constructed in an array (i.e. how you normally implement
345  * a heap), it converts a node in the tree - referenced by array index - to the
346  * index it would have if you did an inorder traversal.
347  *
348  * Also tested for every j, size up to size somewhere around 6 million.
349  *
350  * The binary tree starts at array index 1, not 0
351  * extra is a function of size:
352  *   extra = (size - rounddown_pow_of_two(size - 1)) << 1;
353  */
354 static unsigned __to_inorder(unsigned j, unsigned size, unsigned extra)
355 {
356 	unsigned b = fls(j);
357 	unsigned shift = fls(size - 1) - b;
358 
359 	j  ^= 1U << (b - 1);
360 	j <<= 1;
361 	j  |= 1;
362 	j <<= shift;
363 
364 	if (j > extra)
365 		j -= (j - extra) >> 1;
366 
367 	return j;
368 }
369 
370 static unsigned to_inorder(unsigned j, struct bset_tree *t)
371 {
372 	return __to_inorder(j, t->size, t->extra);
373 }
374 
375 static unsigned __inorder_to_tree(unsigned j, unsigned size, unsigned extra)
376 {
377 	unsigned shift;
378 
379 	if (j > extra)
380 		j += j - extra;
381 
382 	shift = ffs(j);
383 
384 	j >>= shift;
385 	j  |= roundup_pow_of_two(size) >> shift;
386 
387 	return j;
388 }
389 
390 static unsigned inorder_to_tree(unsigned j, struct bset_tree *t)
391 {
392 	return __inorder_to_tree(j, t->size, t->extra);
393 }
394 
395 #if 0
396 void inorder_test(void)
397 {
398 	unsigned long done = 0;
399 	ktime_t start = ktime_get();
400 
401 	for (unsigned size = 2;
402 	     size < 65536000;
403 	     size++) {
404 		unsigned extra = (size - rounddown_pow_of_two(size - 1)) << 1;
405 		unsigned i = 1, j = rounddown_pow_of_two(size - 1);
406 
407 		if (!(size % 4096))
408 			printk(KERN_NOTICE "loop %u, %llu per us\n", size,
409 			       done / ktime_us_delta(ktime_get(), start));
410 
411 		while (1) {
412 			if (__inorder_to_tree(i, size, extra) != j)
413 				panic("size %10u j %10u i %10u", size, j, i);
414 
415 			if (__to_inorder(j, size, extra) != i)
416 				panic("size %10u j %10u i %10u", size, j, i);
417 
418 			if (j == rounddown_pow_of_two(size) - 1)
419 				break;
420 
421 			BUG_ON(inorder_prev(inorder_next(j, size), size) != j);
422 
423 			j = inorder_next(j, size);
424 			i++;
425 		}
426 
427 		done += size - 1;
428 	}
429 }
430 #endif
431 
432 /*
433  * Cacheline/offset <-> bkey pointer arithmetic:
434  *
435  * t->tree is a binary search tree in an array; each node corresponds to a key
436  * in one cacheline in t->set (BSET_CACHELINE bytes).
437  *
438  * This means we don't have to store the full index of the key that a node in
439  * the binary tree points to; to_inorder() gives us the cacheline, and then
440  * bkey_float->m gives us the offset within that cacheline, in units of 8 bytes.
441  *
442  * cacheline_to_bkey() and friends abstract out all the pointer arithmetic to
443  * make this work.
444  *
445  * To construct the bfloat for an arbitrary key we need to know what the key
446  * immediately preceding it is: we have to check if the two keys differ in the
447  * bits we're going to store in bkey_float->mantissa. t->prev[j] stores the size
448  * of the previous key so we can walk backwards to it from t->tree[j]'s key.
449  */
450 
451 static struct bkey *cacheline_to_bkey(struct bset_tree *t, unsigned cacheline,
452 				      unsigned offset)
453 {
454 	return ((void *) t->data) + cacheline * BSET_CACHELINE + offset * 8;
455 }
456 
457 static unsigned bkey_to_cacheline(struct bset_tree *t, struct bkey *k)
458 {
459 	return ((void *) k - (void *) t->data) / BSET_CACHELINE;
460 }
461 
462 static unsigned bkey_to_cacheline_offset(struct bkey *k)
463 {
464 	return ((size_t) k & (BSET_CACHELINE - 1)) / sizeof(uint64_t);
465 }
466 
467 static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned j)
468 {
469 	return cacheline_to_bkey(t, to_inorder(j, t), t->tree[j].m);
470 }
471 
472 static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned j)
473 {
474 	return (void *) (((uint64_t *) tree_to_bkey(t, j)) - t->prev[j]);
475 }
476 
477 /*
478  * For the write set - the one we're currently inserting keys into - we don't
479  * maintain a full search tree, we just keep a simple lookup table in t->prev.
480  */
481 static struct bkey *table_to_bkey(struct bset_tree *t, unsigned cacheline)
482 {
483 	return cacheline_to_bkey(t, cacheline, t->prev[cacheline]);
484 }
485 
486 static inline uint64_t shrd128(uint64_t high, uint64_t low, uint8_t shift)
487 {
488 	low >>= shift;
489 	low  |= (high << 1) << (63U - shift);
490 	return low;
491 }
492 
493 static inline unsigned bfloat_mantissa(const struct bkey *k,
494 				       struct bkey_float *f)
495 {
496 	const uint64_t *p = &k->low - (f->exponent >> 6);
497 	return shrd128(p[-1], p[0], f->exponent & 63) & BKEY_MANTISSA_MASK;
498 }
499 
500 static void make_bfloat(struct bset_tree *t, unsigned j)
501 {
502 	struct bkey_float *f = &t->tree[j];
503 	struct bkey *m = tree_to_bkey(t, j);
504 	struct bkey *p = tree_to_prev_bkey(t, j);
505 
506 	struct bkey *l = is_power_of_2(j)
507 		? t->data->start
508 		: tree_to_prev_bkey(t, j >> ffs(j));
509 
510 	struct bkey *r = is_power_of_2(j + 1)
511 		? node(t->data, t->data->keys - bkey_u64s(&t->end))
512 		: tree_to_bkey(t, j >> (ffz(j) + 1));
513 
514 	BUG_ON(m < l || m > r);
515 	BUG_ON(bkey_next(p) != m);
516 
517 	if (KEY_INODE(l) != KEY_INODE(r))
518 		f->exponent = fls64(KEY_INODE(r) ^ KEY_INODE(l)) + 64;
519 	else
520 		f->exponent = fls64(r->low ^ l->low);
521 
522 	f->exponent = max_t(int, f->exponent - BKEY_MANTISSA_BITS, 0);
523 
524 	/*
525 	 * Setting f->exponent = 127 flags this node as failed, and causes the
526 	 * lookup code to fall back to comparing against the original key.
527 	 */
528 
529 	if (bfloat_mantissa(m, f) != bfloat_mantissa(p, f))
530 		f->mantissa = bfloat_mantissa(m, f) - 1;
531 	else
532 		f->exponent = 127;
533 }
534 
535 static void bset_alloc_tree(struct btree *b, struct bset_tree *t)
536 {
537 	if (t != b->sets) {
538 		unsigned j = roundup(t[-1].size,
539 				     64 / sizeof(struct bkey_float));
540 
541 		t->tree = t[-1].tree + j;
542 		t->prev = t[-1].prev + j;
543 	}
544 
545 	while (t < b->sets + MAX_BSETS)
546 		t++->size = 0;
547 }
548 
549 static void bset_build_unwritten_tree(struct btree *b)
550 {
551 	struct bset_tree *t = b->sets + b->nsets;
552 
553 	bset_alloc_tree(b, t);
554 
555 	if (t->tree != b->sets->tree + bset_tree_space(b)) {
556 		t->prev[0] = bkey_to_cacheline_offset(t->data->start);
557 		t->size = 1;
558 	}
559 }
560 
561 static void bset_build_written_tree(struct btree *b)
562 {
563 	struct bset_tree *t = b->sets + b->nsets;
564 	struct bkey *k = t->data->start;
565 	unsigned j, cacheline = 1;
566 
567 	bset_alloc_tree(b, t);
568 
569 	t->size = min_t(unsigned,
570 			bkey_to_cacheline(t, end(t->data)),
571 			b->sets->tree + bset_tree_space(b) - t->tree);
572 
573 	if (t->size < 2) {
574 		t->size = 0;
575 		return;
576 	}
577 
578 	t->extra = (t->size - rounddown_pow_of_two(t->size - 1)) << 1;
579 
580 	/* First we figure out where the first key in each cacheline is */
581 	for (j = inorder_next(0, t->size);
582 	     j;
583 	     j = inorder_next(j, t->size)) {
584 		while (bkey_to_cacheline(t, k) != cacheline)
585 			k = bkey_next(k);
586 
587 		t->prev[j] = bkey_u64s(k);
588 		k = bkey_next(k);
589 		cacheline++;
590 		t->tree[j].m = bkey_to_cacheline_offset(k);
591 	}
592 
593 	while (bkey_next(k) != end(t->data))
594 		k = bkey_next(k);
595 
596 	t->end = *k;
597 
598 	/* Then we build the tree */
599 	for (j = inorder_next(0, t->size);
600 	     j;
601 	     j = inorder_next(j, t->size))
602 		make_bfloat(t, j);
603 }
604 
605 void bch_bset_fix_invalidated_key(struct btree *b, struct bkey *k)
606 {
607 	struct bset_tree *t;
608 	unsigned inorder, j = 1;
609 
610 	for (t = b->sets; t <= &b->sets[b->nsets]; t++)
611 		if (k < end(t->data))
612 			goto found_set;
613 
614 	BUG();
615 found_set:
616 	if (!t->size || !bset_written(b, t))
617 		return;
618 
619 	inorder = bkey_to_cacheline(t, k);
620 
621 	if (k == t->data->start)
622 		goto fix_left;
623 
624 	if (bkey_next(k) == end(t->data)) {
625 		t->end = *k;
626 		goto fix_right;
627 	}
628 
629 	j = inorder_to_tree(inorder, t);
630 
631 	if (j &&
632 	    j < t->size &&
633 	    k == tree_to_bkey(t, j))
634 fix_left:	do {
635 			make_bfloat(t, j);
636 			j = j * 2;
637 		} while (j < t->size);
638 
639 	j = inorder_to_tree(inorder + 1, t);
640 
641 	if (j &&
642 	    j < t->size &&
643 	    k == tree_to_prev_bkey(t, j))
644 fix_right:	do {
645 			make_bfloat(t, j);
646 			j = j * 2 + 1;
647 		} while (j < t->size);
648 }
649 
650 void bch_bset_fix_lookup_table(struct btree *b, struct bkey *k)
651 {
652 	struct bset_tree *t = &b->sets[b->nsets];
653 	unsigned shift = bkey_u64s(k);
654 	unsigned j = bkey_to_cacheline(t, k);
655 
656 	/* We're getting called from btree_split() or btree_gc, just bail out */
657 	if (!t->size)
658 		return;
659 
660 	/* k is the key we just inserted; we need to find the entry in the
661 	 * lookup table for the first key that is strictly greater than k:
662 	 * it's either k's cacheline or the next one
663 	 */
664 	if (j < t->size &&
665 	    table_to_bkey(t, j) <= k)
666 		j++;
667 
668 	/* Adjust all the lookup table entries, and find a new key for any that
669 	 * have gotten too big
670 	 */
671 	for (; j < t->size; j++) {
672 		t->prev[j] += shift;
673 
674 		if (t->prev[j] > 7) {
675 			k = table_to_bkey(t, j - 1);
676 
677 			while (k < cacheline_to_bkey(t, j, 0))
678 				k = bkey_next(k);
679 
680 			t->prev[j] = bkey_to_cacheline_offset(k);
681 		}
682 	}
683 
684 	if (t->size == b->sets->tree + bset_tree_space(b) - t->tree)
685 		return;
686 
687 	/* Possibly add a new entry to the end of the lookup table */
688 
689 	for (k = table_to_bkey(t, t->size - 1);
690 	     k != end(t->data);
691 	     k = bkey_next(k))
692 		if (t->size == bkey_to_cacheline(t, k)) {
693 			t->prev[t->size] = bkey_to_cacheline_offset(k);
694 			t->size++;
695 		}
696 }
697 
698 void bch_bset_init_next(struct btree *b)
699 {
700 	struct bset *i = write_block(b);
701 
702 	if (i != b->sets[0].data) {
703 		b->sets[++b->nsets].data = i;
704 		i->seq = b->sets[0].data->seq;
705 	} else
706 		get_random_bytes(&i->seq, sizeof(uint64_t));
707 
708 	i->magic	= bset_magic(&b->c->sb);
709 	i->version	= 0;
710 	i->keys		= 0;
711 
712 	bset_build_unwritten_tree(b);
713 }
714 
715 struct bset_search_iter {
716 	struct bkey *l, *r;
717 };
718 
719 static struct bset_search_iter bset_search_write_set(struct btree *b,
720 						     struct bset_tree *t,
721 						     const struct bkey *search)
722 {
723 	unsigned li = 0, ri = t->size;
724 
725 	BUG_ON(!b->nsets &&
726 	       t->size < bkey_to_cacheline(t, end(t->data)));
727 
728 	while (li + 1 != ri) {
729 		unsigned m = (li + ri) >> 1;
730 
731 		if (bkey_cmp(table_to_bkey(t, m), search) > 0)
732 			ri = m;
733 		else
734 			li = m;
735 	}
736 
737 	return (struct bset_search_iter) {
738 		table_to_bkey(t, li),
739 		ri < t->size ? table_to_bkey(t, ri) : end(t->data)
740 	};
741 }
742 
743 static struct bset_search_iter bset_search_tree(struct btree *b,
744 						struct bset_tree *t,
745 						const struct bkey *search)
746 {
747 	struct bkey *l, *r;
748 	struct bkey_float *f;
749 	unsigned inorder, j, n = 1;
750 
751 	do {
752 		unsigned p = n << 4;
753 		p &= ((int) (p - t->size)) >> 31;
754 
755 		prefetch(&t->tree[p]);
756 
757 		j = n;
758 		f = &t->tree[j];
759 
760 		/*
761 		 * n = (f->mantissa > bfloat_mantissa())
762 		 *	? j * 2
763 		 *	: j * 2 + 1;
764 		 *
765 		 * We need to subtract 1 from f->mantissa for the sign bit trick
766 		 * to work  - that's done in make_bfloat()
767 		 */
768 		if (likely(f->exponent != 127))
769 			n = j * 2 + (((unsigned)
770 				      (f->mantissa -
771 				       bfloat_mantissa(search, f))) >> 31);
772 		else
773 			n = (bkey_cmp(tree_to_bkey(t, j), search) > 0)
774 				? j * 2
775 				: j * 2 + 1;
776 	} while (n < t->size);
777 
778 	inorder = to_inorder(j, t);
779 
780 	/*
781 	 * n would have been the node we recursed to - the low bit tells us if
782 	 * we recursed left or recursed right.
783 	 */
784 	if (n & 1) {
785 		l = cacheline_to_bkey(t, inorder, f->m);
786 
787 		if (++inorder != t->size) {
788 			f = &t->tree[inorder_next(j, t->size)];
789 			r = cacheline_to_bkey(t, inorder, f->m);
790 		} else
791 			r = end(t->data);
792 	} else {
793 		r = cacheline_to_bkey(t, inorder, f->m);
794 
795 		if (--inorder) {
796 			f = &t->tree[inorder_prev(j, t->size)];
797 			l = cacheline_to_bkey(t, inorder, f->m);
798 		} else
799 			l = t->data->start;
800 	}
801 
802 	return (struct bset_search_iter) {l, r};
803 }
804 
805 struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
806 			       const struct bkey *search)
807 {
808 	struct bset_search_iter i;
809 
810 	/*
811 	 * First, we search for a cacheline, then lastly we do a linear search
812 	 * within that cacheline.
813 	 *
814 	 * To search for the cacheline, there's three different possibilities:
815 	 *  * The set is too small to have a search tree, so we just do a linear
816 	 *    search over the whole set.
817 	 *  * The set is the one we're currently inserting into; keeping a full
818 	 *    auxiliary search tree up to date would be too expensive, so we
819 	 *    use a much simpler lookup table to do a binary search -
820 	 *    bset_search_write_set().
821 	 *  * Or we use the auxiliary search tree we constructed earlier -
822 	 *    bset_search_tree()
823 	 */
824 
825 	if (unlikely(!t->size)) {
826 		i.l = t->data->start;
827 		i.r = end(t->data);
828 	} else if (bset_written(b, t)) {
829 		/*
830 		 * Each node in the auxiliary search tree covers a certain range
831 		 * of bits, and keys above and below the set it covers might
832 		 * differ outside those bits - so we have to special case the
833 		 * start and end - handle that here:
834 		 */
835 
836 		if (unlikely(bkey_cmp(search, &t->end) >= 0))
837 			return end(t->data);
838 
839 		if (unlikely(bkey_cmp(search, t->data->start) < 0))
840 			return t->data->start;
841 
842 		i = bset_search_tree(b, t, search);
843 	} else
844 		i = bset_search_write_set(b, t, search);
845 
846 	if (expensive_debug_checks(b->c)) {
847 		BUG_ON(bset_written(b, t) &&
848 		       i.l != t->data->start &&
849 		       bkey_cmp(tree_to_prev_bkey(t,
850 			  inorder_to_tree(bkey_to_cacheline(t, i.l), t)),
851 				search) > 0);
852 
853 		BUG_ON(i.r != end(t->data) &&
854 		       bkey_cmp(i.r, search) <= 0);
855 	}
856 
857 	while (likely(i.l != i.r) &&
858 	       bkey_cmp(i.l, search) <= 0)
859 		i.l = bkey_next(i.l);
860 
861 	return i.l;
862 }
863 
864 /* Btree iterator */
865 
866 /*
867  * Returns true if l > r - unless l == r, in which case returns true if l is
868  * older than r.
869  *
870  * Necessary for btree_sort_fixup() - if there are multiple keys that compare
871  * equal in different sets, we have to process them newest to oldest.
872  */
873 static inline bool btree_iter_cmp(struct btree_iter_set l,
874 				  struct btree_iter_set r)
875 {
876 	int64_t c = bkey_cmp(&START_KEY(l.k), &START_KEY(r.k));
877 
878 	return c ? c > 0 : l.k < r.k;
879 }
880 
881 static inline bool btree_iter_end(struct btree_iter *iter)
882 {
883 	return !iter->used;
884 }
885 
886 void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
887 			 struct bkey *end)
888 {
889 	if (k != end)
890 		BUG_ON(!heap_add(iter,
891 				 ((struct btree_iter_set) { k, end }),
892 				 btree_iter_cmp));
893 }
894 
895 struct bkey *__bch_btree_iter_init(struct btree *b, struct btree_iter *iter,
896 				   struct bkey *search, struct bset_tree *start)
897 {
898 	struct bkey *ret = NULL;
899 	iter->size = ARRAY_SIZE(iter->data);
900 	iter->used = 0;
901 
902 #ifdef CONFIG_BCACHE_DEBUG
903 	iter->b = b;
904 #endif
905 
906 	for (; start <= &b->sets[b->nsets]; start++) {
907 		ret = bch_bset_search(b, start, search);
908 		bch_btree_iter_push(iter, ret, end(start->data));
909 	}
910 
911 	return ret;
912 }
913 
914 struct bkey *bch_btree_iter_next(struct btree_iter *iter)
915 {
916 	struct btree_iter_set unused;
917 	struct bkey *ret = NULL;
918 
919 	if (!btree_iter_end(iter)) {
920 		bch_btree_iter_next_check(iter);
921 
922 		ret = iter->data->k;
923 		iter->data->k = bkey_next(iter->data->k);
924 
925 		if (iter->data->k > iter->data->end) {
926 			WARN_ONCE(1, "bset was corrupt!\n");
927 			iter->data->k = iter->data->end;
928 		}
929 
930 		if (iter->data->k == iter->data->end)
931 			heap_pop(iter, unused, btree_iter_cmp);
932 		else
933 			heap_sift(iter, 0, btree_iter_cmp);
934 	}
935 
936 	return ret;
937 }
938 
939 struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
940 					struct btree *b, ptr_filter_fn fn)
941 {
942 	struct bkey *ret;
943 
944 	do {
945 		ret = bch_btree_iter_next(iter);
946 	} while (ret && fn(b, ret));
947 
948 	return ret;
949 }
950 
951 /* Mergesort */
952 
953 static void sort_key_next(struct btree_iter *iter,
954 			  struct btree_iter_set *i)
955 {
956 	i->k = bkey_next(i->k);
957 
958 	if (i->k == i->end)
959 		*i = iter->data[--iter->used];
960 }
961 
962 static struct bkey *btree_sort_fixup(struct btree_iter *iter, struct bkey *tmp)
963 {
964 	while (iter->used > 1) {
965 		struct btree_iter_set *top = iter->data, *i = top + 1;
966 
967 		if (iter->used > 2 &&
968 		    btree_iter_cmp(i[0], i[1]))
969 			i++;
970 
971 		if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0)
972 			break;
973 
974 		if (!KEY_SIZE(i->k)) {
975 			sort_key_next(iter, i);
976 			heap_sift(iter, i - top, btree_iter_cmp);
977 			continue;
978 		}
979 
980 		if (top->k > i->k) {
981 			if (bkey_cmp(top->k, i->k) >= 0)
982 				sort_key_next(iter, i);
983 			else
984 				bch_cut_front(top->k, i->k);
985 
986 			heap_sift(iter, i - top, btree_iter_cmp);
987 		} else {
988 			/* can't happen because of comparison func */
989 			BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k)));
990 
991 			if (bkey_cmp(i->k, top->k) < 0) {
992 				bkey_copy(tmp, top->k);
993 
994 				bch_cut_back(&START_KEY(i->k), tmp);
995 				bch_cut_front(i->k, top->k);
996 				heap_sift(iter, 0, btree_iter_cmp);
997 
998 				return tmp;
999 			} else {
1000 				bch_cut_back(&START_KEY(i->k), top->k);
1001 			}
1002 		}
1003 	}
1004 
1005 	return NULL;
1006 }
1007 
1008 static void btree_mergesort(struct btree *b, struct bset *out,
1009 			    struct btree_iter *iter,
1010 			    bool fixup, bool remove_stale)
1011 {
1012 	struct bkey *k, *last = NULL;
1013 	BKEY_PADDED(k) tmp;
1014 	bool (*bad)(struct btree *, const struct bkey *) = remove_stale
1015 		? bch_ptr_bad
1016 		: bch_ptr_invalid;
1017 
1018 	while (!btree_iter_end(iter)) {
1019 		if (fixup && !b->level)
1020 			k = btree_sort_fixup(iter, &tmp.k);
1021 		else
1022 			k = NULL;
1023 
1024 		if (!k)
1025 			k = bch_btree_iter_next(iter);
1026 
1027 		if (bad(b, k))
1028 			continue;
1029 
1030 		if (!last) {
1031 			last = out->start;
1032 			bkey_copy(last, k);
1033 		} else if (b->level ||
1034 			   !bch_bkey_try_merge(b, last, k)) {
1035 			last = bkey_next(last);
1036 			bkey_copy(last, k);
1037 		}
1038 	}
1039 
1040 	out->keys = last ? (uint64_t *) bkey_next(last) - out->d : 0;
1041 
1042 	pr_debug("sorted %i keys", out->keys);
1043 }
1044 
1045 static void __btree_sort(struct btree *b, struct btree_iter *iter,
1046 			 unsigned start, unsigned order, bool fixup)
1047 {
1048 	uint64_t start_time;
1049 	bool remove_stale = !b->written;
1050 	struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOIO,
1051 						     order);
1052 	if (!out) {
1053 		mutex_lock(&b->c->sort_lock);
1054 		out = b->c->sort;
1055 		order = ilog2(bucket_pages(b->c));
1056 	}
1057 
1058 	start_time = local_clock();
1059 
1060 	btree_mergesort(b, out, iter, fixup, remove_stale);
1061 	b->nsets = start;
1062 
1063 	if (!fixup && !start && b->written)
1064 		bch_btree_verify(b, out);
1065 
1066 	if (!start && order == b->page_order) {
1067 		/*
1068 		 * Our temporary buffer is the same size as the btree node's
1069 		 * buffer, we can just swap buffers instead of doing a big
1070 		 * memcpy()
1071 		 */
1072 
1073 		out->magic	= bset_magic(&b->c->sb);
1074 		out->seq	= b->sets[0].data->seq;
1075 		out->version	= b->sets[0].data->version;
1076 		swap(out, b->sets[0].data);
1077 
1078 		if (b->c->sort == b->sets[0].data)
1079 			b->c->sort = out;
1080 	} else {
1081 		b->sets[start].data->keys = out->keys;
1082 		memcpy(b->sets[start].data->start, out->start,
1083 		       (void *) end(out) - (void *) out->start);
1084 	}
1085 
1086 	if (out == b->c->sort)
1087 		mutex_unlock(&b->c->sort_lock);
1088 	else
1089 		free_pages((unsigned long) out, order);
1090 
1091 	if (b->written)
1092 		bset_build_written_tree(b);
1093 
1094 	if (!start)
1095 		bch_time_stats_update(&b->c->sort_time, start_time);
1096 }
1097 
1098 void bch_btree_sort_partial(struct btree *b, unsigned start)
1099 {
1100 	size_t order = b->page_order, keys = 0;
1101 	struct btree_iter iter;
1102 	int oldsize = bch_count_data(b);
1103 
1104 	__bch_btree_iter_init(b, &iter, NULL, &b->sets[start]);
1105 
1106 	BUG_ON(b->sets[b->nsets].data == write_block(b) &&
1107 	       (b->sets[b->nsets].size || b->nsets));
1108 
1109 
1110 	if (start) {
1111 		unsigned i;
1112 
1113 		for (i = start; i <= b->nsets; i++)
1114 			keys += b->sets[i].data->keys;
1115 
1116 		order = roundup_pow_of_two(__set_bytes(b->sets->data,
1117 						       keys)) / PAGE_SIZE;
1118 		if (order)
1119 			order = ilog2(order);
1120 	}
1121 
1122 	__btree_sort(b, &iter, start, order, false);
1123 
1124 	EBUG_ON(b->written && oldsize >= 0 && bch_count_data(b) != oldsize);
1125 }
1126 
1127 void bch_btree_sort_and_fix_extents(struct btree *b, struct btree_iter *iter)
1128 {
1129 	BUG_ON(!b->written);
1130 	__btree_sort(b, iter, 0, b->page_order, true);
1131 }
1132 
1133 void bch_btree_sort_into(struct btree *b, struct btree *new)
1134 {
1135 	uint64_t start_time = local_clock();
1136 
1137 	struct btree_iter iter;
1138 	bch_btree_iter_init(b, &iter, NULL);
1139 
1140 	btree_mergesort(b, new->sets->data, &iter, false, true);
1141 
1142 	bch_time_stats_update(&b->c->sort_time, start_time);
1143 
1144 	bkey_copy_key(&new->key, &b->key);
1145 	new->sets->size = 0;
1146 }
1147 
1148 #define SORT_CRIT	(4096 / sizeof(uint64_t))
1149 
1150 void bch_btree_sort_lazy(struct btree *b)
1151 {
1152 	unsigned crit = SORT_CRIT;
1153 	int i;
1154 
1155 	/* Don't sort if nothing to do */
1156 	if (!b->nsets)
1157 		goto out;
1158 
1159 	/* If not a leaf node, always sort */
1160 	if (b->level) {
1161 		bch_btree_sort(b);
1162 		return;
1163 	}
1164 
1165 	for (i = b->nsets - 1; i >= 0; --i) {
1166 		crit *= b->c->sort_crit_factor;
1167 
1168 		if (b->sets[i].data->keys < crit) {
1169 			bch_btree_sort_partial(b, i);
1170 			return;
1171 		}
1172 	}
1173 
1174 	/* Sort if we'd overflow */
1175 	if (b->nsets + 1 == MAX_BSETS) {
1176 		bch_btree_sort(b);
1177 		return;
1178 	}
1179 
1180 out:
1181 	bset_build_written_tree(b);
1182 }
1183 
1184 /* Sysfs stuff */
1185 
1186 struct bset_stats {
1187 	struct btree_op op;
1188 	size_t nodes;
1189 	size_t sets_written, sets_unwritten;
1190 	size_t bytes_written, bytes_unwritten;
1191 	size_t floats, failed;
1192 };
1193 
1194 static int btree_bset_stats(struct btree_op *op, struct btree *b)
1195 {
1196 	struct bset_stats *stats = container_of(op, struct bset_stats, op);
1197 	unsigned i;
1198 
1199 	stats->nodes++;
1200 
1201 	for (i = 0; i <= b->nsets; i++) {
1202 		struct bset_tree *t = &b->sets[i];
1203 		size_t bytes = t->data->keys * sizeof(uint64_t);
1204 		size_t j;
1205 
1206 		if (bset_written(b, t)) {
1207 			stats->sets_written++;
1208 			stats->bytes_written += bytes;
1209 
1210 			stats->floats += t->size - 1;
1211 
1212 			for (j = 1; j < t->size; j++)
1213 				if (t->tree[j].exponent == 127)
1214 					stats->failed++;
1215 		} else {
1216 			stats->sets_unwritten++;
1217 			stats->bytes_unwritten += bytes;
1218 		}
1219 	}
1220 
1221 	return MAP_CONTINUE;
1222 }
1223 
1224 int bch_bset_print_stats(struct cache_set *c, char *buf)
1225 {
1226 	struct bset_stats t;
1227 	int ret;
1228 
1229 	memset(&t, 0, sizeof(struct bset_stats));
1230 	bch_btree_op_init(&t.op, -1);
1231 
1232 	ret = bch_btree_map_nodes(&t.op, c, &ZERO_KEY, btree_bset_stats);
1233 	if (ret < 0)
1234 		return ret;
1235 
1236 	return snprintf(buf, PAGE_SIZE,
1237 			"btree nodes:		%zu\n"
1238 			"written sets:		%zu\n"
1239 			"unwritten sets:		%zu\n"
1240 			"written key bytes:	%zu\n"
1241 			"unwritten key bytes:	%zu\n"
1242 			"floats:			%zu\n"
1243 			"failed:			%zu\n",
1244 			t.nodes,
1245 			t.sets_written, t.sets_unwritten,
1246 			t.bytes_written, t.bytes_unwritten,
1247 			t.floats, t.failed);
1248 }
1249