xref: /linux/drivers/md/bcache/bset.c (revision 905e46acd3272d04566fec49afbd7ad9e2ed9ae3)
1 /*
2  * Code for working with individual keys, and sorted sets of keys with in a
3  * btree node
4  *
5  * Copyright 2012 Google, Inc.
6  */
7 
8 #define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__
9 
10 #include "util.h"
11 #include "bset.h"
12 
13 #include <linux/console.h>
14 #include <linux/sched/clock.h>
15 #include <linux/random.h>
16 #include <linux/prefetch.h>
17 
18 #ifdef CONFIG_BCACHE_DEBUG
19 
20 void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set)
21 {
22 	struct bkey *k, *next;
23 
24 	for (k = i->start; k < bset_bkey_last(i); k = next) {
25 		next = bkey_next(k);
26 
27 		printk(KERN_ERR "block %u key %u/%u: ", set,
28 		       (unsigned) ((u64 *) k - i->d), i->keys);
29 
30 		if (b->ops->key_dump)
31 			b->ops->key_dump(b, k);
32 		else
33 			printk("%llu:%llu\n", KEY_INODE(k), KEY_OFFSET(k));
34 
35 		if (next < bset_bkey_last(i) &&
36 		    bkey_cmp(k, b->ops->is_extents ?
37 			     &START_KEY(next) : next) > 0)
38 			printk(KERN_ERR "Key skipped backwards\n");
39 	}
40 }
41 
42 void bch_dump_bucket(struct btree_keys *b)
43 {
44 	unsigned i;
45 
46 	console_lock();
47 	for (i = 0; i <= b->nsets; i++)
48 		bch_dump_bset(b, b->set[i].data,
49 			      bset_sector_offset(b, b->set[i].data));
50 	console_unlock();
51 }
52 
53 int __bch_count_data(struct btree_keys *b)
54 {
55 	unsigned ret = 0;
56 	struct btree_iter iter;
57 	struct bkey *k;
58 
59 	if (b->ops->is_extents)
60 		for_each_key(b, k, &iter)
61 			ret += KEY_SIZE(k);
62 	return ret;
63 }
64 
65 void __bch_check_keys(struct btree_keys *b, const char *fmt, ...)
66 {
67 	va_list args;
68 	struct bkey *k, *p = NULL;
69 	struct btree_iter iter;
70 	const char *err;
71 
72 	for_each_key(b, k, &iter) {
73 		if (b->ops->is_extents) {
74 			err = "Keys out of order";
75 			if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0)
76 				goto bug;
77 
78 			if (bch_ptr_invalid(b, k))
79 				continue;
80 
81 			err =  "Overlapping keys";
82 			if (p && bkey_cmp(p, &START_KEY(k)) > 0)
83 				goto bug;
84 		} else {
85 			if (bch_ptr_bad(b, k))
86 				continue;
87 
88 			err = "Duplicate keys";
89 			if (p && !bkey_cmp(p, k))
90 				goto bug;
91 		}
92 		p = k;
93 	}
94 #if 0
95 	err = "Key larger than btree node key";
96 	if (p && bkey_cmp(p, &b->key) > 0)
97 		goto bug;
98 #endif
99 	return;
100 bug:
101 	bch_dump_bucket(b);
102 
103 	va_start(args, fmt);
104 	vprintk(fmt, args);
105 	va_end(args);
106 
107 	panic("bch_check_keys error:  %s:\n", err);
108 }
109 
110 static void bch_btree_iter_next_check(struct btree_iter *iter)
111 {
112 	struct bkey *k = iter->data->k, *next = bkey_next(k);
113 
114 	if (next < iter->data->end &&
115 	    bkey_cmp(k, iter->b->ops->is_extents ?
116 		     &START_KEY(next) : next) > 0) {
117 		bch_dump_bucket(iter->b);
118 		panic("Key skipped backwards\n");
119 	}
120 }
121 
122 #else
123 
124 static inline void bch_btree_iter_next_check(struct btree_iter *iter) {}
125 
126 #endif
127 
128 /* Keylists */
129 
130 int __bch_keylist_realloc(struct keylist *l, unsigned u64s)
131 {
132 	size_t oldsize = bch_keylist_nkeys(l);
133 	size_t newsize = oldsize + u64s;
134 	uint64_t *old_keys = l->keys_p == l->inline_keys ? NULL : l->keys_p;
135 	uint64_t *new_keys;
136 
137 	newsize = roundup_pow_of_two(newsize);
138 
139 	if (newsize <= KEYLIST_INLINE ||
140 	    roundup_pow_of_two(oldsize) == newsize)
141 		return 0;
142 
143 	new_keys = krealloc(old_keys, sizeof(uint64_t) * newsize, GFP_NOIO);
144 
145 	if (!new_keys)
146 		return -ENOMEM;
147 
148 	if (!old_keys)
149 		memcpy(new_keys, l->inline_keys, sizeof(uint64_t) * oldsize);
150 
151 	l->keys_p = new_keys;
152 	l->top_p = new_keys + oldsize;
153 
154 	return 0;
155 }
156 
157 struct bkey *bch_keylist_pop(struct keylist *l)
158 {
159 	struct bkey *k = l->keys;
160 
161 	if (k == l->top)
162 		return NULL;
163 
164 	while (bkey_next(k) != l->top)
165 		k = bkey_next(k);
166 
167 	return l->top = k;
168 }
169 
170 void bch_keylist_pop_front(struct keylist *l)
171 {
172 	l->top_p -= bkey_u64s(l->keys);
173 
174 	memmove(l->keys,
175 		bkey_next(l->keys),
176 		bch_keylist_bytes(l));
177 }
178 
179 /* Key/pointer manipulation */
180 
181 void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,
182 			      unsigned i)
183 {
184 	BUG_ON(i > KEY_PTRS(src));
185 
186 	/* Only copy the header, key, and one pointer. */
187 	memcpy(dest, src, 2 * sizeof(uint64_t));
188 	dest->ptr[0] = src->ptr[i];
189 	SET_KEY_PTRS(dest, 1);
190 	/* We didn't copy the checksum so clear that bit. */
191 	SET_KEY_CSUM(dest, 0);
192 }
193 
194 bool __bch_cut_front(const struct bkey *where, struct bkey *k)
195 {
196 	unsigned i, len = 0;
197 
198 	if (bkey_cmp(where, &START_KEY(k)) <= 0)
199 		return false;
200 
201 	if (bkey_cmp(where, k) < 0)
202 		len = KEY_OFFSET(k) - KEY_OFFSET(where);
203 	else
204 		bkey_copy_key(k, where);
205 
206 	for (i = 0; i < KEY_PTRS(k); i++)
207 		SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + KEY_SIZE(k) - len);
208 
209 	BUG_ON(len > KEY_SIZE(k));
210 	SET_KEY_SIZE(k, len);
211 	return true;
212 }
213 
214 bool __bch_cut_back(const struct bkey *where, struct bkey *k)
215 {
216 	unsigned len = 0;
217 
218 	if (bkey_cmp(where, k) >= 0)
219 		return false;
220 
221 	BUG_ON(KEY_INODE(where) != KEY_INODE(k));
222 
223 	if (bkey_cmp(where, &START_KEY(k)) > 0)
224 		len = KEY_OFFSET(where) - KEY_START(k);
225 
226 	bkey_copy_key(k, where);
227 
228 	BUG_ON(len > KEY_SIZE(k));
229 	SET_KEY_SIZE(k, len);
230 	return true;
231 }
232 
233 /* Auxiliary search trees */
234 
235 /* 32 bits total: */
236 #define BKEY_MID_BITS		3
237 #define BKEY_EXPONENT_BITS	7
238 #define BKEY_MANTISSA_BITS	(32 - BKEY_MID_BITS - BKEY_EXPONENT_BITS)
239 #define BKEY_MANTISSA_MASK	((1 << BKEY_MANTISSA_BITS) - 1)
240 
241 struct bkey_float {
242 	unsigned	exponent:BKEY_EXPONENT_BITS;
243 	unsigned	m:BKEY_MID_BITS;
244 	unsigned	mantissa:BKEY_MANTISSA_BITS;
245 } __packed;
246 
247 /*
248  * BSET_CACHELINE was originally intended to match the hardware cacheline size -
249  * it used to be 64, but I realized the lookup code would touch slightly less
250  * memory if it was 128.
251  *
252  * It definites the number of bytes (in struct bset) per struct bkey_float in
253  * the auxiliar search tree - when we're done searching the bset_float tree we
254  * have this many bytes left that we do a linear search over.
255  *
256  * Since (after level 5) every level of the bset_tree is on a new cacheline,
257  * we're touching one fewer cacheline in the bset tree in exchange for one more
258  * cacheline in the linear search - but the linear search might stop before it
259  * gets to the second cacheline.
260  */
261 
262 #define BSET_CACHELINE		128
263 
264 /* Space required for the btree node keys */
265 static inline size_t btree_keys_bytes(struct btree_keys *b)
266 {
267 	return PAGE_SIZE << b->page_order;
268 }
269 
270 static inline size_t btree_keys_cachelines(struct btree_keys *b)
271 {
272 	return btree_keys_bytes(b) / BSET_CACHELINE;
273 }
274 
275 /* Space required for the auxiliary search trees */
276 static inline size_t bset_tree_bytes(struct btree_keys *b)
277 {
278 	return btree_keys_cachelines(b) * sizeof(struct bkey_float);
279 }
280 
281 /* Space required for the prev pointers */
282 static inline size_t bset_prev_bytes(struct btree_keys *b)
283 {
284 	return btree_keys_cachelines(b) * sizeof(uint8_t);
285 }
286 
287 /* Memory allocation */
288 
289 void bch_btree_keys_free(struct btree_keys *b)
290 {
291 	struct bset_tree *t = b->set;
292 
293 	if (bset_prev_bytes(b) < PAGE_SIZE)
294 		kfree(t->prev);
295 	else
296 		free_pages((unsigned long) t->prev,
297 			   get_order(bset_prev_bytes(b)));
298 
299 	if (bset_tree_bytes(b) < PAGE_SIZE)
300 		kfree(t->tree);
301 	else
302 		free_pages((unsigned long) t->tree,
303 			   get_order(bset_tree_bytes(b)));
304 
305 	free_pages((unsigned long) t->data, b->page_order);
306 
307 	t->prev = NULL;
308 	t->tree = NULL;
309 	t->data = NULL;
310 }
311 EXPORT_SYMBOL(bch_btree_keys_free);
312 
313 int bch_btree_keys_alloc(struct btree_keys *b, unsigned page_order, gfp_t gfp)
314 {
315 	struct bset_tree *t = b->set;
316 
317 	BUG_ON(t->data);
318 
319 	b->page_order = page_order;
320 
321 	t->data = (void *) __get_free_pages(gfp, b->page_order);
322 	if (!t->data)
323 		goto err;
324 
325 	t->tree = bset_tree_bytes(b) < PAGE_SIZE
326 		? kmalloc(bset_tree_bytes(b), gfp)
327 		: (void *) __get_free_pages(gfp, get_order(bset_tree_bytes(b)));
328 	if (!t->tree)
329 		goto err;
330 
331 	t->prev = bset_prev_bytes(b) < PAGE_SIZE
332 		? kmalloc(bset_prev_bytes(b), gfp)
333 		: (void *) __get_free_pages(gfp, get_order(bset_prev_bytes(b)));
334 	if (!t->prev)
335 		goto err;
336 
337 	return 0;
338 err:
339 	bch_btree_keys_free(b);
340 	return -ENOMEM;
341 }
342 EXPORT_SYMBOL(bch_btree_keys_alloc);
343 
344 void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops,
345 			 bool *expensive_debug_checks)
346 {
347 	unsigned i;
348 
349 	b->ops = ops;
350 	b->expensive_debug_checks = expensive_debug_checks;
351 	b->nsets = 0;
352 	b->last_set_unwritten = 0;
353 
354 	/* XXX: shouldn't be needed */
355 	for (i = 0; i < MAX_BSETS; i++)
356 		b->set[i].size = 0;
357 	/*
358 	 * Second loop starts at 1 because b->keys[0]->data is the memory we
359 	 * allocated
360 	 */
361 	for (i = 1; i < MAX_BSETS; i++)
362 		b->set[i].data = NULL;
363 }
364 EXPORT_SYMBOL(bch_btree_keys_init);
365 
366 /* Binary tree stuff for auxiliary search trees */
367 
368 static unsigned inorder_next(unsigned j, unsigned size)
369 {
370 	if (j * 2 + 1 < size) {
371 		j = j * 2 + 1;
372 
373 		while (j * 2 < size)
374 			j *= 2;
375 	} else
376 		j >>= ffz(j) + 1;
377 
378 	return j;
379 }
380 
381 static unsigned inorder_prev(unsigned j, unsigned size)
382 {
383 	if (j * 2 < size) {
384 		j = j * 2;
385 
386 		while (j * 2 + 1 < size)
387 			j = j * 2 + 1;
388 	} else
389 		j >>= ffs(j);
390 
391 	return j;
392 }
393 
394 /* I have no idea why this code works... and I'm the one who wrote it
395  *
396  * However, I do know what it does:
397  * Given a binary tree constructed in an array (i.e. how you normally implement
398  * a heap), it converts a node in the tree - referenced by array index - to the
399  * index it would have if you did an inorder traversal.
400  *
401  * Also tested for every j, size up to size somewhere around 6 million.
402  *
403  * The binary tree starts at array index 1, not 0
404  * extra is a function of size:
405  *   extra = (size - rounddown_pow_of_two(size - 1)) << 1;
406  */
407 static unsigned __to_inorder(unsigned j, unsigned size, unsigned extra)
408 {
409 	unsigned b = fls(j);
410 	unsigned shift = fls(size - 1) - b;
411 
412 	j  ^= 1U << (b - 1);
413 	j <<= 1;
414 	j  |= 1;
415 	j <<= shift;
416 
417 	if (j > extra)
418 		j -= (j - extra) >> 1;
419 
420 	return j;
421 }
422 
423 static unsigned to_inorder(unsigned j, struct bset_tree *t)
424 {
425 	return __to_inorder(j, t->size, t->extra);
426 }
427 
428 static unsigned __inorder_to_tree(unsigned j, unsigned size, unsigned extra)
429 {
430 	unsigned shift;
431 
432 	if (j > extra)
433 		j += j - extra;
434 
435 	shift = ffs(j);
436 
437 	j >>= shift;
438 	j  |= roundup_pow_of_two(size) >> shift;
439 
440 	return j;
441 }
442 
443 static unsigned inorder_to_tree(unsigned j, struct bset_tree *t)
444 {
445 	return __inorder_to_tree(j, t->size, t->extra);
446 }
447 
448 #if 0
449 void inorder_test(void)
450 {
451 	unsigned long done = 0;
452 	ktime_t start = ktime_get();
453 
454 	for (unsigned size = 2;
455 	     size < 65536000;
456 	     size++) {
457 		unsigned extra = (size - rounddown_pow_of_two(size - 1)) << 1;
458 		unsigned i = 1, j = rounddown_pow_of_two(size - 1);
459 
460 		if (!(size % 4096))
461 			printk(KERN_NOTICE "loop %u, %llu per us\n", size,
462 			       done / ktime_us_delta(ktime_get(), start));
463 
464 		while (1) {
465 			if (__inorder_to_tree(i, size, extra) != j)
466 				panic("size %10u j %10u i %10u", size, j, i);
467 
468 			if (__to_inorder(j, size, extra) != i)
469 				panic("size %10u j %10u i %10u", size, j, i);
470 
471 			if (j == rounddown_pow_of_two(size) - 1)
472 				break;
473 
474 			BUG_ON(inorder_prev(inorder_next(j, size), size) != j);
475 
476 			j = inorder_next(j, size);
477 			i++;
478 		}
479 
480 		done += size - 1;
481 	}
482 }
483 #endif
484 
485 /*
486  * Cacheline/offset <-> bkey pointer arithmetic:
487  *
488  * t->tree is a binary search tree in an array; each node corresponds to a key
489  * in one cacheline in t->set (BSET_CACHELINE bytes).
490  *
491  * This means we don't have to store the full index of the key that a node in
492  * the binary tree points to; to_inorder() gives us the cacheline, and then
493  * bkey_float->m gives us the offset within that cacheline, in units of 8 bytes.
494  *
495  * cacheline_to_bkey() and friends abstract out all the pointer arithmetic to
496  * make this work.
497  *
498  * To construct the bfloat for an arbitrary key we need to know what the key
499  * immediately preceding it is: we have to check if the two keys differ in the
500  * bits we're going to store in bkey_float->mantissa. t->prev[j] stores the size
501  * of the previous key so we can walk backwards to it from t->tree[j]'s key.
502  */
503 
504 static struct bkey *cacheline_to_bkey(struct bset_tree *t, unsigned cacheline,
505 				      unsigned offset)
506 {
507 	return ((void *) t->data) + cacheline * BSET_CACHELINE + offset * 8;
508 }
509 
510 static unsigned bkey_to_cacheline(struct bset_tree *t, struct bkey *k)
511 {
512 	return ((void *) k - (void *) t->data) / BSET_CACHELINE;
513 }
514 
515 static unsigned bkey_to_cacheline_offset(struct bset_tree *t,
516 					 unsigned cacheline,
517 					 struct bkey *k)
518 {
519 	return (u64 *) k - (u64 *) cacheline_to_bkey(t, cacheline, 0);
520 }
521 
522 static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned j)
523 {
524 	return cacheline_to_bkey(t, to_inorder(j, t), t->tree[j].m);
525 }
526 
527 static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned j)
528 {
529 	return (void *) (((uint64_t *) tree_to_bkey(t, j)) - t->prev[j]);
530 }
531 
532 /*
533  * For the write set - the one we're currently inserting keys into - we don't
534  * maintain a full search tree, we just keep a simple lookup table in t->prev.
535  */
536 static struct bkey *table_to_bkey(struct bset_tree *t, unsigned cacheline)
537 {
538 	return cacheline_to_bkey(t, cacheline, t->prev[cacheline]);
539 }
540 
541 static inline uint64_t shrd128(uint64_t high, uint64_t low, uint8_t shift)
542 {
543 	low >>= shift;
544 	low  |= (high << 1) << (63U - shift);
545 	return low;
546 }
547 
548 static inline unsigned bfloat_mantissa(const struct bkey *k,
549 				       struct bkey_float *f)
550 {
551 	const uint64_t *p = &k->low - (f->exponent >> 6);
552 	return shrd128(p[-1], p[0], f->exponent & 63) & BKEY_MANTISSA_MASK;
553 }
554 
555 static void make_bfloat(struct bset_tree *t, unsigned j)
556 {
557 	struct bkey_float *f = &t->tree[j];
558 	struct bkey *m = tree_to_bkey(t, j);
559 	struct bkey *p = tree_to_prev_bkey(t, j);
560 
561 	struct bkey *l = is_power_of_2(j)
562 		? t->data->start
563 		: tree_to_prev_bkey(t, j >> ffs(j));
564 
565 	struct bkey *r = is_power_of_2(j + 1)
566 		? bset_bkey_idx(t->data, t->data->keys - bkey_u64s(&t->end))
567 		: tree_to_bkey(t, j >> (ffz(j) + 1));
568 
569 	BUG_ON(m < l || m > r);
570 	BUG_ON(bkey_next(p) != m);
571 
572 	if (KEY_INODE(l) != KEY_INODE(r))
573 		f->exponent = fls64(KEY_INODE(r) ^ KEY_INODE(l)) + 64;
574 	else
575 		f->exponent = fls64(r->low ^ l->low);
576 
577 	f->exponent = max_t(int, f->exponent - BKEY_MANTISSA_BITS, 0);
578 
579 	/*
580 	 * Setting f->exponent = 127 flags this node as failed, and causes the
581 	 * lookup code to fall back to comparing against the original key.
582 	 */
583 
584 	if (bfloat_mantissa(m, f) != bfloat_mantissa(p, f))
585 		f->mantissa = bfloat_mantissa(m, f) - 1;
586 	else
587 		f->exponent = 127;
588 }
589 
590 static void bset_alloc_tree(struct btree_keys *b, struct bset_tree *t)
591 {
592 	if (t != b->set) {
593 		unsigned j = roundup(t[-1].size,
594 				     64 / sizeof(struct bkey_float));
595 
596 		t->tree = t[-1].tree + j;
597 		t->prev = t[-1].prev + j;
598 	}
599 
600 	while (t < b->set + MAX_BSETS)
601 		t++->size = 0;
602 }
603 
604 static void bch_bset_build_unwritten_tree(struct btree_keys *b)
605 {
606 	struct bset_tree *t = bset_tree_last(b);
607 
608 	BUG_ON(b->last_set_unwritten);
609 	b->last_set_unwritten = 1;
610 
611 	bset_alloc_tree(b, t);
612 
613 	if (t->tree != b->set->tree + btree_keys_cachelines(b)) {
614 		t->prev[0] = bkey_to_cacheline_offset(t, 0, t->data->start);
615 		t->size = 1;
616 	}
617 }
618 
619 void bch_bset_init_next(struct btree_keys *b, struct bset *i, uint64_t magic)
620 {
621 	if (i != b->set->data) {
622 		b->set[++b->nsets].data = i;
623 		i->seq = b->set->data->seq;
624 	} else
625 		get_random_bytes(&i->seq, sizeof(uint64_t));
626 
627 	i->magic	= magic;
628 	i->version	= 0;
629 	i->keys		= 0;
630 
631 	bch_bset_build_unwritten_tree(b);
632 }
633 EXPORT_SYMBOL(bch_bset_init_next);
634 
635 void bch_bset_build_written_tree(struct btree_keys *b)
636 {
637 	struct bset_tree *t = bset_tree_last(b);
638 	struct bkey *prev = NULL, *k = t->data->start;
639 	unsigned j, cacheline = 1;
640 
641 	b->last_set_unwritten = 0;
642 
643 	bset_alloc_tree(b, t);
644 
645 	t->size = min_t(unsigned,
646 			bkey_to_cacheline(t, bset_bkey_last(t->data)),
647 			b->set->tree + btree_keys_cachelines(b) - t->tree);
648 
649 	if (t->size < 2) {
650 		t->size = 0;
651 		return;
652 	}
653 
654 	t->extra = (t->size - rounddown_pow_of_two(t->size - 1)) << 1;
655 
656 	/* First we figure out where the first key in each cacheline is */
657 	for (j = inorder_next(0, t->size);
658 	     j;
659 	     j = inorder_next(j, t->size)) {
660 		while (bkey_to_cacheline(t, k) < cacheline)
661 			prev = k, k = bkey_next(k);
662 
663 		t->prev[j] = bkey_u64s(prev);
664 		t->tree[j].m = bkey_to_cacheline_offset(t, cacheline++, k);
665 	}
666 
667 	while (bkey_next(k) != bset_bkey_last(t->data))
668 		k = bkey_next(k);
669 
670 	t->end = *k;
671 
672 	/* Then we build the tree */
673 	for (j = inorder_next(0, t->size);
674 	     j;
675 	     j = inorder_next(j, t->size))
676 		make_bfloat(t, j);
677 }
678 EXPORT_SYMBOL(bch_bset_build_written_tree);
679 
680 /* Insert */
681 
682 void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k)
683 {
684 	struct bset_tree *t;
685 	unsigned inorder, j = 1;
686 
687 	for (t = b->set; t <= bset_tree_last(b); t++)
688 		if (k < bset_bkey_last(t->data))
689 			goto found_set;
690 
691 	BUG();
692 found_set:
693 	if (!t->size || !bset_written(b, t))
694 		return;
695 
696 	inorder = bkey_to_cacheline(t, k);
697 
698 	if (k == t->data->start)
699 		goto fix_left;
700 
701 	if (bkey_next(k) == bset_bkey_last(t->data)) {
702 		t->end = *k;
703 		goto fix_right;
704 	}
705 
706 	j = inorder_to_tree(inorder, t);
707 
708 	if (j &&
709 	    j < t->size &&
710 	    k == tree_to_bkey(t, j))
711 fix_left:	do {
712 			make_bfloat(t, j);
713 			j = j * 2;
714 		} while (j < t->size);
715 
716 	j = inorder_to_tree(inorder + 1, t);
717 
718 	if (j &&
719 	    j < t->size &&
720 	    k == tree_to_prev_bkey(t, j))
721 fix_right:	do {
722 			make_bfloat(t, j);
723 			j = j * 2 + 1;
724 		} while (j < t->size);
725 }
726 EXPORT_SYMBOL(bch_bset_fix_invalidated_key);
727 
728 static void bch_bset_fix_lookup_table(struct btree_keys *b,
729 				      struct bset_tree *t,
730 				      struct bkey *k)
731 {
732 	unsigned shift = bkey_u64s(k);
733 	unsigned j = bkey_to_cacheline(t, k);
734 
735 	/* We're getting called from btree_split() or btree_gc, just bail out */
736 	if (!t->size)
737 		return;
738 
739 	/* k is the key we just inserted; we need to find the entry in the
740 	 * lookup table for the first key that is strictly greater than k:
741 	 * it's either k's cacheline or the next one
742 	 */
743 	while (j < t->size &&
744 	       table_to_bkey(t, j) <= k)
745 		j++;
746 
747 	/* Adjust all the lookup table entries, and find a new key for any that
748 	 * have gotten too big
749 	 */
750 	for (; j < t->size; j++) {
751 		t->prev[j] += shift;
752 
753 		if (t->prev[j] > 7) {
754 			k = table_to_bkey(t, j - 1);
755 
756 			while (k < cacheline_to_bkey(t, j, 0))
757 				k = bkey_next(k);
758 
759 			t->prev[j] = bkey_to_cacheline_offset(t, j, k);
760 		}
761 	}
762 
763 	if (t->size == b->set->tree + btree_keys_cachelines(b) - t->tree)
764 		return;
765 
766 	/* Possibly add a new entry to the end of the lookup table */
767 
768 	for (k = table_to_bkey(t, t->size - 1);
769 	     k != bset_bkey_last(t->data);
770 	     k = bkey_next(k))
771 		if (t->size == bkey_to_cacheline(t, k)) {
772 			t->prev[t->size] = bkey_to_cacheline_offset(t, t->size, k);
773 			t->size++;
774 		}
775 }
776 
777 /*
778  * Tries to merge l and r: l should be lower than r
779  * Returns true if we were able to merge. If we did merge, l will be the merged
780  * key, r will be untouched.
781  */
782 bool bch_bkey_try_merge(struct btree_keys *b, struct bkey *l, struct bkey *r)
783 {
784 	if (!b->ops->key_merge)
785 		return false;
786 
787 	/*
788 	 * Generic header checks
789 	 * Assumes left and right are in order
790 	 * Left and right must be exactly aligned
791 	 */
792 	if (!bch_bkey_equal_header(l, r) ||
793 	     bkey_cmp(l, &START_KEY(r)))
794 		return false;
795 
796 	return b->ops->key_merge(b, l, r);
797 }
798 EXPORT_SYMBOL(bch_bkey_try_merge);
799 
800 void bch_bset_insert(struct btree_keys *b, struct bkey *where,
801 		     struct bkey *insert)
802 {
803 	struct bset_tree *t = bset_tree_last(b);
804 
805 	BUG_ON(!b->last_set_unwritten);
806 	BUG_ON(bset_byte_offset(b, t->data) +
807 	       __set_bytes(t->data, t->data->keys + bkey_u64s(insert)) >
808 	       PAGE_SIZE << b->page_order);
809 
810 	memmove((uint64_t *) where + bkey_u64s(insert),
811 		where,
812 		(void *) bset_bkey_last(t->data) - (void *) where);
813 
814 	t->data->keys += bkey_u64s(insert);
815 	bkey_copy(where, insert);
816 	bch_bset_fix_lookup_table(b, t, where);
817 }
818 EXPORT_SYMBOL(bch_bset_insert);
819 
820 unsigned bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
821 			      struct bkey *replace_key)
822 {
823 	unsigned status = BTREE_INSERT_STATUS_NO_INSERT;
824 	struct bset *i = bset_tree_last(b)->data;
825 	struct bkey *m, *prev = NULL;
826 	struct btree_iter iter;
827 
828 	BUG_ON(b->ops->is_extents && !KEY_SIZE(k));
829 
830 	m = bch_btree_iter_init(b, &iter, b->ops->is_extents
831 				? PRECEDING_KEY(&START_KEY(k))
832 				: PRECEDING_KEY(k));
833 
834 	if (b->ops->insert_fixup(b, k, &iter, replace_key))
835 		return status;
836 
837 	status = BTREE_INSERT_STATUS_INSERT;
838 
839 	while (m != bset_bkey_last(i) &&
840 	       bkey_cmp(k, b->ops->is_extents ? &START_KEY(m) : m) > 0)
841 		prev = m, m = bkey_next(m);
842 
843 	/* prev is in the tree, if we merge we're done */
844 	status = BTREE_INSERT_STATUS_BACK_MERGE;
845 	if (prev &&
846 	    bch_bkey_try_merge(b, prev, k))
847 		goto merged;
848 #if 0
849 	status = BTREE_INSERT_STATUS_OVERWROTE;
850 	if (m != bset_bkey_last(i) &&
851 	    KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m))
852 		goto copy;
853 #endif
854 	status = BTREE_INSERT_STATUS_FRONT_MERGE;
855 	if (m != bset_bkey_last(i) &&
856 	    bch_bkey_try_merge(b, k, m))
857 		goto copy;
858 
859 	bch_bset_insert(b, m, k);
860 copy:	bkey_copy(m, k);
861 merged:
862 	return status;
863 }
864 EXPORT_SYMBOL(bch_btree_insert_key);
865 
866 /* Lookup */
867 
868 struct bset_search_iter {
869 	struct bkey *l, *r;
870 };
871 
872 static struct bset_search_iter bset_search_write_set(struct bset_tree *t,
873 						     const struct bkey *search)
874 {
875 	unsigned li = 0, ri = t->size;
876 
877 	while (li + 1 != ri) {
878 		unsigned m = (li + ri) >> 1;
879 
880 		if (bkey_cmp(table_to_bkey(t, m), search) > 0)
881 			ri = m;
882 		else
883 			li = m;
884 	}
885 
886 	return (struct bset_search_iter) {
887 		table_to_bkey(t, li),
888 		ri < t->size ? table_to_bkey(t, ri) : bset_bkey_last(t->data)
889 	};
890 }
891 
892 static struct bset_search_iter bset_search_tree(struct bset_tree *t,
893 						const struct bkey *search)
894 {
895 	struct bkey *l, *r;
896 	struct bkey_float *f;
897 	unsigned inorder, j, n = 1;
898 
899 	do {
900 		unsigned p = n << 4;
901 		p &= ((int) (p - t->size)) >> 31;
902 
903 		prefetch(&t->tree[p]);
904 
905 		j = n;
906 		f = &t->tree[j];
907 
908 		/*
909 		 * n = (f->mantissa > bfloat_mantissa())
910 		 *	? j * 2
911 		 *	: j * 2 + 1;
912 		 *
913 		 * We need to subtract 1 from f->mantissa for the sign bit trick
914 		 * to work  - that's done in make_bfloat()
915 		 */
916 		if (likely(f->exponent != 127))
917 			n = j * 2 + (((unsigned)
918 				      (f->mantissa -
919 				       bfloat_mantissa(search, f))) >> 31);
920 		else
921 			n = (bkey_cmp(tree_to_bkey(t, j), search) > 0)
922 				? j * 2
923 				: j * 2 + 1;
924 	} while (n < t->size);
925 
926 	inorder = to_inorder(j, t);
927 
928 	/*
929 	 * n would have been the node we recursed to - the low bit tells us if
930 	 * we recursed left or recursed right.
931 	 */
932 	if (n & 1) {
933 		l = cacheline_to_bkey(t, inorder, f->m);
934 
935 		if (++inorder != t->size) {
936 			f = &t->tree[inorder_next(j, t->size)];
937 			r = cacheline_to_bkey(t, inorder, f->m);
938 		} else
939 			r = bset_bkey_last(t->data);
940 	} else {
941 		r = cacheline_to_bkey(t, inorder, f->m);
942 
943 		if (--inorder) {
944 			f = &t->tree[inorder_prev(j, t->size)];
945 			l = cacheline_to_bkey(t, inorder, f->m);
946 		} else
947 			l = t->data->start;
948 	}
949 
950 	return (struct bset_search_iter) {l, r};
951 }
952 
953 struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
954 			       const struct bkey *search)
955 {
956 	struct bset_search_iter i;
957 
958 	/*
959 	 * First, we search for a cacheline, then lastly we do a linear search
960 	 * within that cacheline.
961 	 *
962 	 * To search for the cacheline, there's three different possibilities:
963 	 *  * The set is too small to have a search tree, so we just do a linear
964 	 *    search over the whole set.
965 	 *  * The set is the one we're currently inserting into; keeping a full
966 	 *    auxiliary search tree up to date would be too expensive, so we
967 	 *    use a much simpler lookup table to do a binary search -
968 	 *    bset_search_write_set().
969 	 *  * Or we use the auxiliary search tree we constructed earlier -
970 	 *    bset_search_tree()
971 	 */
972 
973 	if (unlikely(!t->size)) {
974 		i.l = t->data->start;
975 		i.r = bset_bkey_last(t->data);
976 	} else if (bset_written(b, t)) {
977 		/*
978 		 * Each node in the auxiliary search tree covers a certain range
979 		 * of bits, and keys above and below the set it covers might
980 		 * differ outside those bits - so we have to special case the
981 		 * start and end - handle that here:
982 		 */
983 
984 		if (unlikely(bkey_cmp(search, &t->end) >= 0))
985 			return bset_bkey_last(t->data);
986 
987 		if (unlikely(bkey_cmp(search, t->data->start) < 0))
988 			return t->data->start;
989 
990 		i = bset_search_tree(t, search);
991 	} else {
992 		BUG_ON(!b->nsets &&
993 		       t->size < bkey_to_cacheline(t, bset_bkey_last(t->data)));
994 
995 		i = bset_search_write_set(t, search);
996 	}
997 
998 	if (btree_keys_expensive_checks(b)) {
999 		BUG_ON(bset_written(b, t) &&
1000 		       i.l != t->data->start &&
1001 		       bkey_cmp(tree_to_prev_bkey(t,
1002 			  inorder_to_tree(bkey_to_cacheline(t, i.l), t)),
1003 				search) > 0);
1004 
1005 		BUG_ON(i.r != bset_bkey_last(t->data) &&
1006 		       bkey_cmp(i.r, search) <= 0);
1007 	}
1008 
1009 	while (likely(i.l != i.r) &&
1010 	       bkey_cmp(i.l, search) <= 0)
1011 		i.l = bkey_next(i.l);
1012 
1013 	return i.l;
1014 }
1015 EXPORT_SYMBOL(__bch_bset_search);
1016 
1017 /* Btree iterator */
1018 
1019 typedef bool (btree_iter_cmp_fn)(struct btree_iter_set,
1020 				 struct btree_iter_set);
1021 
1022 static inline bool btree_iter_cmp(struct btree_iter_set l,
1023 				  struct btree_iter_set r)
1024 {
1025 	return bkey_cmp(l.k, r.k) > 0;
1026 }
1027 
1028 static inline bool btree_iter_end(struct btree_iter *iter)
1029 {
1030 	return !iter->used;
1031 }
1032 
1033 void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
1034 			 struct bkey *end)
1035 {
1036 	if (k != end)
1037 		BUG_ON(!heap_add(iter,
1038 				 ((struct btree_iter_set) { k, end }),
1039 				 btree_iter_cmp));
1040 }
1041 
1042 static struct bkey *__bch_btree_iter_init(struct btree_keys *b,
1043 					  struct btree_iter *iter,
1044 					  struct bkey *search,
1045 					  struct bset_tree *start)
1046 {
1047 	struct bkey *ret = NULL;
1048 	iter->size = ARRAY_SIZE(iter->data);
1049 	iter->used = 0;
1050 
1051 #ifdef CONFIG_BCACHE_DEBUG
1052 	iter->b = b;
1053 #endif
1054 
1055 	for (; start <= bset_tree_last(b); start++) {
1056 		ret = bch_bset_search(b, start, search);
1057 		bch_btree_iter_push(iter, ret, bset_bkey_last(start->data));
1058 	}
1059 
1060 	return ret;
1061 }
1062 
1063 struct bkey *bch_btree_iter_init(struct btree_keys *b,
1064 				 struct btree_iter *iter,
1065 				 struct bkey *search)
1066 {
1067 	return __bch_btree_iter_init(b, iter, search, b->set);
1068 }
1069 EXPORT_SYMBOL(bch_btree_iter_init);
1070 
1071 static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter,
1072 						 btree_iter_cmp_fn *cmp)
1073 {
1074 	struct btree_iter_set unused;
1075 	struct bkey *ret = NULL;
1076 
1077 	if (!btree_iter_end(iter)) {
1078 		bch_btree_iter_next_check(iter);
1079 
1080 		ret = iter->data->k;
1081 		iter->data->k = bkey_next(iter->data->k);
1082 
1083 		if (iter->data->k > iter->data->end) {
1084 			WARN_ONCE(1, "bset was corrupt!\n");
1085 			iter->data->k = iter->data->end;
1086 		}
1087 
1088 		if (iter->data->k == iter->data->end)
1089 			heap_pop(iter, unused, cmp);
1090 		else
1091 			heap_sift(iter, 0, cmp);
1092 	}
1093 
1094 	return ret;
1095 }
1096 
1097 struct bkey *bch_btree_iter_next(struct btree_iter *iter)
1098 {
1099 	return __bch_btree_iter_next(iter, btree_iter_cmp);
1100 
1101 }
1102 EXPORT_SYMBOL(bch_btree_iter_next);
1103 
1104 struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
1105 					struct btree_keys *b, ptr_filter_fn fn)
1106 {
1107 	struct bkey *ret;
1108 
1109 	do {
1110 		ret = bch_btree_iter_next(iter);
1111 	} while (ret && fn(b, ret));
1112 
1113 	return ret;
1114 }
1115 
1116 /* Mergesort */
1117 
1118 void bch_bset_sort_state_free(struct bset_sort_state *state)
1119 {
1120 	if (state->pool)
1121 		mempool_destroy(state->pool);
1122 }
1123 
1124 int bch_bset_sort_state_init(struct bset_sort_state *state, unsigned page_order)
1125 {
1126 	spin_lock_init(&state->time.lock);
1127 
1128 	state->page_order = page_order;
1129 	state->crit_factor = int_sqrt(1 << page_order);
1130 
1131 	state->pool = mempool_create_page_pool(1, page_order);
1132 	if (!state->pool)
1133 		return -ENOMEM;
1134 
1135 	return 0;
1136 }
1137 EXPORT_SYMBOL(bch_bset_sort_state_init);
1138 
1139 static void btree_mergesort(struct btree_keys *b, struct bset *out,
1140 			    struct btree_iter *iter,
1141 			    bool fixup, bool remove_stale)
1142 {
1143 	int i;
1144 	struct bkey *k, *last = NULL;
1145 	BKEY_PADDED(k) tmp;
1146 	bool (*bad)(struct btree_keys *, const struct bkey *) = remove_stale
1147 		? bch_ptr_bad
1148 		: bch_ptr_invalid;
1149 
1150 	/* Heapify the iterator, using our comparison function */
1151 	for (i = iter->used / 2 - 1; i >= 0; --i)
1152 		heap_sift(iter, i, b->ops->sort_cmp);
1153 
1154 	while (!btree_iter_end(iter)) {
1155 		if (b->ops->sort_fixup && fixup)
1156 			k = b->ops->sort_fixup(iter, &tmp.k);
1157 		else
1158 			k = NULL;
1159 
1160 		if (!k)
1161 			k = __bch_btree_iter_next(iter, b->ops->sort_cmp);
1162 
1163 		if (bad(b, k))
1164 			continue;
1165 
1166 		if (!last) {
1167 			last = out->start;
1168 			bkey_copy(last, k);
1169 		} else if (!bch_bkey_try_merge(b, last, k)) {
1170 			last = bkey_next(last);
1171 			bkey_copy(last, k);
1172 		}
1173 	}
1174 
1175 	out->keys = last ? (uint64_t *) bkey_next(last) - out->d : 0;
1176 
1177 	pr_debug("sorted %i keys", out->keys);
1178 }
1179 
1180 static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
1181 			 unsigned start, unsigned order, bool fixup,
1182 			 struct bset_sort_state *state)
1183 {
1184 	uint64_t start_time;
1185 	bool used_mempool = false;
1186 	struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOWAIT,
1187 						     order);
1188 	if (!out) {
1189 		struct page *outp;
1190 
1191 		BUG_ON(order > state->page_order);
1192 
1193 		outp = mempool_alloc(state->pool, GFP_NOIO);
1194 		out = page_address(outp);
1195 		used_mempool = true;
1196 		order = state->page_order;
1197 	}
1198 
1199 	start_time = local_clock();
1200 
1201 	btree_mergesort(b, out, iter, fixup, false);
1202 	b->nsets = start;
1203 
1204 	if (!start && order == b->page_order) {
1205 		/*
1206 		 * Our temporary buffer is the same size as the btree node's
1207 		 * buffer, we can just swap buffers instead of doing a big
1208 		 * memcpy()
1209 		 */
1210 
1211 		out->magic	= b->set->data->magic;
1212 		out->seq	= b->set->data->seq;
1213 		out->version	= b->set->data->version;
1214 		swap(out, b->set->data);
1215 	} else {
1216 		b->set[start].data->keys = out->keys;
1217 		memcpy(b->set[start].data->start, out->start,
1218 		       (void *) bset_bkey_last(out) - (void *) out->start);
1219 	}
1220 
1221 	if (used_mempool)
1222 		mempool_free(virt_to_page(out), state->pool);
1223 	else
1224 		free_pages((unsigned long) out, order);
1225 
1226 	bch_bset_build_written_tree(b);
1227 
1228 	if (!start)
1229 		bch_time_stats_update(&state->time, start_time);
1230 }
1231 
1232 void bch_btree_sort_partial(struct btree_keys *b, unsigned start,
1233 			    struct bset_sort_state *state)
1234 {
1235 	size_t order = b->page_order, keys = 0;
1236 	struct btree_iter iter;
1237 	int oldsize = bch_count_data(b);
1238 
1239 	__bch_btree_iter_init(b, &iter, NULL, &b->set[start]);
1240 
1241 	if (start) {
1242 		unsigned i;
1243 
1244 		for (i = start; i <= b->nsets; i++)
1245 			keys += b->set[i].data->keys;
1246 
1247 		order = get_order(__set_bytes(b->set->data, keys));
1248 	}
1249 
1250 	__btree_sort(b, &iter, start, order, false, state);
1251 
1252 	EBUG_ON(oldsize >= 0 && bch_count_data(b) != oldsize);
1253 }
1254 EXPORT_SYMBOL(bch_btree_sort_partial);
1255 
1256 void bch_btree_sort_and_fix_extents(struct btree_keys *b,
1257 				    struct btree_iter *iter,
1258 				    struct bset_sort_state *state)
1259 {
1260 	__btree_sort(b, iter, 0, b->page_order, true, state);
1261 }
1262 
1263 void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,
1264 			 struct bset_sort_state *state)
1265 {
1266 	uint64_t start_time = local_clock();
1267 
1268 	struct btree_iter iter;
1269 	bch_btree_iter_init(b, &iter, NULL);
1270 
1271 	btree_mergesort(b, new->set->data, &iter, false, true);
1272 
1273 	bch_time_stats_update(&state->time, start_time);
1274 
1275 	new->set->size = 0; // XXX: why?
1276 }
1277 
1278 #define SORT_CRIT	(4096 / sizeof(uint64_t))
1279 
1280 void bch_btree_sort_lazy(struct btree_keys *b, struct bset_sort_state *state)
1281 {
1282 	unsigned crit = SORT_CRIT;
1283 	int i;
1284 
1285 	/* Don't sort if nothing to do */
1286 	if (!b->nsets)
1287 		goto out;
1288 
1289 	for (i = b->nsets - 1; i >= 0; --i) {
1290 		crit *= state->crit_factor;
1291 
1292 		if (b->set[i].data->keys < crit) {
1293 			bch_btree_sort_partial(b, i, state);
1294 			return;
1295 		}
1296 	}
1297 
1298 	/* Sort if we'd overflow */
1299 	if (b->nsets + 1 == MAX_BSETS) {
1300 		bch_btree_sort(b, state);
1301 		return;
1302 	}
1303 
1304 out:
1305 	bch_bset_build_written_tree(b);
1306 }
1307 EXPORT_SYMBOL(bch_btree_sort_lazy);
1308 
1309 void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *stats)
1310 {
1311 	unsigned i;
1312 
1313 	for (i = 0; i <= b->nsets; i++) {
1314 		struct bset_tree *t = &b->set[i];
1315 		size_t bytes = t->data->keys * sizeof(uint64_t);
1316 		size_t j;
1317 
1318 		if (bset_written(b, t)) {
1319 			stats->sets_written++;
1320 			stats->bytes_written += bytes;
1321 
1322 			stats->floats += t->size - 1;
1323 
1324 			for (j = 1; j < t->size; j++)
1325 				if (t->tree[j].exponent == 127)
1326 					stats->failed++;
1327 		} else {
1328 			stats->sets_unwritten++;
1329 			stats->bytes_unwritten += bytes;
1330 		}
1331 	}
1332 }
1333