xref: /linux/drivers/md/bcache/bcache.h (revision 31a1b26f16e822577def5402ffc79cfe4aed2db9)
1 #ifndef _BCACHE_H
2 #define _BCACHE_H
3 
4 /*
5  * SOME HIGH LEVEL CODE DOCUMENTATION:
6  *
7  * Bcache mostly works with cache sets, cache devices, and backing devices.
8  *
9  * Support for multiple cache devices hasn't quite been finished off yet, but
10  * it's about 95% plumbed through. A cache set and its cache devices is sort of
11  * like a md raid array and its component devices. Most of the code doesn't care
12  * about individual cache devices, the main abstraction is the cache set.
13  *
14  * Multiple cache devices is intended to give us the ability to mirror dirty
15  * cached data and metadata, without mirroring clean cached data.
16  *
17  * Backing devices are different, in that they have a lifetime independent of a
18  * cache set. When you register a newly formatted backing device it'll come up
19  * in passthrough mode, and then you can attach and detach a backing device from
20  * a cache set at runtime - while it's mounted and in use. Detaching implicitly
21  * invalidates any cached data for that backing device.
22  *
23  * A cache set can have multiple (many) backing devices attached to it.
24  *
25  * There's also flash only volumes - this is the reason for the distinction
26  * between struct cached_dev and struct bcache_device. A flash only volume
27  * works much like a bcache device that has a backing device, except the
28  * "cached" data is always dirty. The end result is that we get thin
29  * provisioning with very little additional code.
30  *
31  * Flash only volumes work but they're not production ready because the moving
32  * garbage collector needs more work. More on that later.
33  *
34  * BUCKETS/ALLOCATION:
35  *
36  * Bcache is primarily designed for caching, which means that in normal
37  * operation all of our available space will be allocated. Thus, we need an
38  * efficient way of deleting things from the cache so we can write new things to
39  * it.
40  *
41  * To do this, we first divide the cache device up into buckets. A bucket is the
42  * unit of allocation; they're typically around 1 mb - anywhere from 128k to 2M+
43  * works efficiently.
44  *
45  * Each bucket has a 16 bit priority, and an 8 bit generation associated with
46  * it. The gens and priorities for all the buckets are stored contiguously and
47  * packed on disk (in a linked list of buckets - aside from the superblock, all
48  * of bcache's metadata is stored in buckets).
49  *
50  * The priority is used to implement an LRU. We reset a bucket's priority when
51  * we allocate it or on cache it, and every so often we decrement the priority
52  * of each bucket. It could be used to implement something more sophisticated,
53  * if anyone ever gets around to it.
54  *
55  * The generation is used for invalidating buckets. Each pointer also has an 8
56  * bit generation embedded in it; for a pointer to be considered valid, its gen
57  * must match the gen of the bucket it points into.  Thus, to reuse a bucket all
58  * we have to do is increment its gen (and write its new gen to disk; we batch
59  * this up).
60  *
61  * Bcache is entirely COW - we never write twice to a bucket, even buckets that
62  * contain metadata (including btree nodes).
63  *
64  * THE BTREE:
65  *
66  * Bcache is in large part design around the btree.
67  *
68  * At a high level, the btree is just an index of key -> ptr tuples.
69  *
70  * Keys represent extents, and thus have a size field. Keys also have a variable
71  * number of pointers attached to them (potentially zero, which is handy for
72  * invalidating the cache).
73  *
74  * The key itself is an inode:offset pair. The inode number corresponds to a
75  * backing device or a flash only volume. The offset is the ending offset of the
76  * extent within the inode - not the starting offset; this makes lookups
77  * slightly more convenient.
78  *
79  * Pointers contain the cache device id, the offset on that device, and an 8 bit
80  * generation number. More on the gen later.
81  *
82  * Index lookups are not fully abstracted - cache lookups in particular are
83  * still somewhat mixed in with the btree code, but things are headed in that
84  * direction.
85  *
86  * Updates are fairly well abstracted, though. There are two different ways of
87  * updating the btree; insert and replace.
88  *
89  * BTREE_INSERT will just take a list of keys and insert them into the btree -
90  * overwriting (possibly only partially) any extents they overlap with. This is
91  * used to update the index after a write.
92  *
93  * BTREE_REPLACE is really cmpxchg(); it inserts a key into the btree iff it is
94  * overwriting a key that matches another given key. This is used for inserting
95  * data into the cache after a cache miss, and for background writeback, and for
96  * the moving garbage collector.
97  *
98  * There is no "delete" operation; deleting things from the index is
99  * accomplished by either by invalidating pointers (by incrementing a bucket's
100  * gen) or by inserting a key with 0 pointers - which will overwrite anything
101  * previously present at that location in the index.
102  *
103  * This means that there are always stale/invalid keys in the btree. They're
104  * filtered out by the code that iterates through a btree node, and removed when
105  * a btree node is rewritten.
106  *
107  * BTREE NODES:
108  *
109  * Our unit of allocation is a bucket, and we we can't arbitrarily allocate and
110  * free smaller than a bucket - so, that's how big our btree nodes are.
111  *
112  * (If buckets are really big we'll only use part of the bucket for a btree node
113  * - no less than 1/4th - but a bucket still contains no more than a single
114  * btree node. I'd actually like to change this, but for now we rely on the
115  * bucket's gen for deleting btree nodes when we rewrite/split a node.)
116  *
117  * Anyways, btree nodes are big - big enough to be inefficient with a textbook
118  * btree implementation.
119  *
120  * The way this is solved is that btree nodes are internally log structured; we
121  * can append new keys to an existing btree node without rewriting it. This
122  * means each set of keys we write is sorted, but the node is not.
123  *
124  * We maintain this log structure in memory - keeping 1Mb of keys sorted would
125  * be expensive, and we have to distinguish between the keys we have written and
126  * the keys we haven't. So to do a lookup in a btree node, we have to search
127  * each sorted set. But we do merge written sets together lazily, so the cost of
128  * these extra searches is quite low (normally most of the keys in a btree node
129  * will be in one big set, and then there'll be one or two sets that are much
130  * smaller).
131  *
132  * This log structure makes bcache's btree more of a hybrid between a
133  * conventional btree and a compacting data structure, with some of the
134  * advantages of both.
135  *
136  * GARBAGE COLLECTION:
137  *
138  * We can't just invalidate any bucket - it might contain dirty data or
139  * metadata. If it once contained dirty data, other writes might overwrite it
140  * later, leaving no valid pointers into that bucket in the index.
141  *
142  * Thus, the primary purpose of garbage collection is to find buckets to reuse.
143  * It also counts how much valid data it each bucket currently contains, so that
144  * allocation can reuse buckets sooner when they've been mostly overwritten.
145  *
146  * It also does some things that are really internal to the btree
147  * implementation. If a btree node contains pointers that are stale by more than
148  * some threshold, it rewrites the btree node to avoid the bucket's generation
149  * wrapping around. It also merges adjacent btree nodes if they're empty enough.
150  *
151  * THE JOURNAL:
152  *
153  * Bcache's journal is not necessary for consistency; we always strictly
154  * order metadata writes so that the btree and everything else is consistent on
155  * disk in the event of an unclean shutdown, and in fact bcache had writeback
156  * caching (with recovery from unclean shutdown) before journalling was
157  * implemented.
158  *
159  * Rather, the journal is purely a performance optimization; we can't complete a
160  * write until we've updated the index on disk, otherwise the cache would be
161  * inconsistent in the event of an unclean shutdown. This means that without the
162  * journal, on random write workloads we constantly have to update all the leaf
163  * nodes in the btree, and those writes will be mostly empty (appending at most
164  * a few keys each) - highly inefficient in terms of amount of metadata writes,
165  * and it puts more strain on the various btree resorting/compacting code.
166  *
167  * The journal is just a log of keys we've inserted; on startup we just reinsert
168  * all the keys in the open journal entries. That means that when we're updating
169  * a node in the btree, we can wait until a 4k block of keys fills up before
170  * writing them out.
171  *
172  * For simplicity, we only journal updates to leaf nodes; updates to parent
173  * nodes are rare enough (since our leaf nodes are huge) that it wasn't worth
174  * the complexity to deal with journalling them (in particular, journal replay)
175  * - updates to non leaf nodes just happen synchronously (see btree_split()).
176  */
177 
178 #define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__
179 
180 #include <linux/bio.h>
181 #include <linux/kobject.h>
182 #include <linux/list.h>
183 #include <linux/mutex.h>
184 #include <linux/rbtree.h>
185 #include <linux/rwsem.h>
186 #include <linux/types.h>
187 #include <linux/workqueue.h>
188 
189 #include "util.h"
190 #include "closure.h"
191 
192 struct bucket {
193 	atomic_t	pin;
194 	uint16_t	prio;
195 	uint8_t		gen;
196 	uint8_t		disk_gen;
197 	uint8_t		last_gc; /* Most out of date gen in the btree */
198 	uint8_t		gc_gen;
199 	uint16_t	gc_mark;
200 };
201 
202 /*
203  * I'd use bitfields for these, but I don't trust the compiler not to screw me
204  * as multiple threads touch struct bucket without locking
205  */
206 
207 BITMASK(GC_MARK,	 struct bucket, gc_mark, 0, 2);
208 #define GC_MARK_RECLAIMABLE	0
209 #define GC_MARK_DIRTY		1
210 #define GC_MARK_METADATA	2
211 BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 14);
212 
213 struct bkey {
214 	uint64_t	high;
215 	uint64_t	low;
216 	uint64_t	ptr[];
217 };
218 
219 /* Enough for a key with 6 pointers */
220 #define BKEY_PAD		8
221 
222 #define BKEY_PADDED(key)					\
223 	union { struct bkey key; uint64_t key ## _pad[BKEY_PAD]; }
224 
225 /* Version 0: Cache device
226  * Version 1: Backing device
227  * Version 2: Seed pointer into btree node checksum
228  * Version 3: Cache device with new UUID format
229  * Version 4: Backing device with data offset
230  */
231 #define BCACHE_SB_VERSION_CDEV			0
232 #define BCACHE_SB_VERSION_BDEV			1
233 #define BCACHE_SB_VERSION_CDEV_WITH_UUID	3
234 #define BCACHE_SB_VERSION_BDEV_WITH_OFFSET	4
235 #define BCACHE_SB_MAX_VERSION			4
236 
237 #define SB_SECTOR		8
238 #define SB_SIZE			4096
239 #define SB_LABEL_SIZE		32
240 #define SB_JOURNAL_BUCKETS	256U
241 /* SB_JOURNAL_BUCKETS must be divisible by BITS_PER_LONG */
242 #define MAX_CACHES_PER_SET	8
243 
244 #define BDEV_DATA_START_DEFAULT	16	/* sectors */
245 
246 struct cache_sb {
247 	uint64_t		csum;
248 	uint64_t		offset;	/* sector where this sb was written */
249 	uint64_t		version;
250 
251 	uint8_t			magic[16];
252 
253 	uint8_t			uuid[16];
254 	union {
255 		uint8_t		set_uuid[16];
256 		uint64_t	set_magic;
257 	};
258 	uint8_t			label[SB_LABEL_SIZE];
259 
260 	uint64_t		flags;
261 	uint64_t		seq;
262 	uint64_t		pad[8];
263 
264 	union {
265 	struct {
266 		/* Cache devices */
267 		uint64_t	nbuckets;	/* device size */
268 
269 		uint16_t	block_size;	/* sectors */
270 		uint16_t	bucket_size;	/* sectors */
271 
272 		uint16_t	nr_in_set;
273 		uint16_t	nr_this_dev;
274 	};
275 	struct {
276 		/* Backing devices */
277 		uint64_t	data_offset;
278 
279 		/*
280 		 * block_size from the cache device section is still used by
281 		 * backing devices, so don't add anything here until we fix
282 		 * things to not need it for backing devices anymore
283 		 */
284 	};
285 	};
286 
287 	uint32_t		last_mount;	/* time_t */
288 
289 	uint16_t		first_bucket;
290 	union {
291 		uint16_t	njournal_buckets;
292 		uint16_t	keys;
293 	};
294 	uint64_t		d[SB_JOURNAL_BUCKETS];	/* journal buckets */
295 };
296 
297 BITMASK(CACHE_SYNC,		struct cache_sb, flags, 0, 1);
298 BITMASK(CACHE_DISCARD,		struct cache_sb, flags, 1, 1);
299 BITMASK(CACHE_REPLACEMENT,	struct cache_sb, flags, 2, 3);
300 #define CACHE_REPLACEMENT_LRU	0U
301 #define CACHE_REPLACEMENT_FIFO	1U
302 #define CACHE_REPLACEMENT_RANDOM 2U
303 
304 BITMASK(BDEV_CACHE_MODE,	struct cache_sb, flags, 0, 4);
305 #define CACHE_MODE_WRITETHROUGH	0U
306 #define CACHE_MODE_WRITEBACK	1U
307 #define CACHE_MODE_WRITEAROUND	2U
308 #define CACHE_MODE_NONE		3U
309 BITMASK(BDEV_STATE,		struct cache_sb, flags, 61, 2);
310 #define BDEV_STATE_NONE		0U
311 #define BDEV_STATE_CLEAN	1U
312 #define BDEV_STATE_DIRTY	2U
313 #define BDEV_STATE_STALE	3U
314 
315 /* Version 1: Seed pointer into btree node checksum
316  */
317 #define BCACHE_BSET_VERSION	1
318 
319 /*
320  * This is the on disk format for btree nodes - a btree node on disk is a list
321  * of these; within each set the keys are sorted
322  */
323 struct bset {
324 	uint64_t		csum;
325 	uint64_t		magic;
326 	uint64_t		seq;
327 	uint32_t		version;
328 	uint32_t		keys;
329 
330 	union {
331 		struct bkey	start[0];
332 		uint64_t	d[0];
333 	};
334 };
335 
336 /*
337  * On disk format for priorities and gens - see super.c near prio_write() for
338  * more.
339  */
340 struct prio_set {
341 	uint64_t		csum;
342 	uint64_t		magic;
343 	uint64_t		seq;
344 	uint32_t		version;
345 	uint32_t		pad;
346 
347 	uint64_t		next_bucket;
348 
349 	struct bucket_disk {
350 		uint16_t	prio;
351 		uint8_t		gen;
352 	} __attribute((packed)) data[];
353 };
354 
355 struct uuid_entry {
356 	union {
357 		struct {
358 			uint8_t		uuid[16];
359 			uint8_t		label[32];
360 			uint32_t	first_reg;
361 			uint32_t	last_reg;
362 			uint32_t	invalidated;
363 
364 			uint32_t	flags;
365 			/* Size of flash only volumes */
366 			uint64_t	sectors;
367 		};
368 
369 		uint8_t	pad[128];
370 	};
371 };
372 
373 BITMASK(UUID_FLASH_ONLY,	struct uuid_entry, flags, 0, 1);
374 
375 #include "journal.h"
376 #include "stats.h"
377 struct search;
378 struct btree;
379 struct keybuf;
380 
381 struct keybuf_key {
382 	struct rb_node		node;
383 	BKEY_PADDED(key);
384 	void			*private;
385 };
386 
387 typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *);
388 
389 struct keybuf {
390 	struct bkey		last_scanned;
391 	spinlock_t		lock;
392 
393 	/*
394 	 * Beginning and end of range in rb tree - so that we can skip taking
395 	 * lock and checking the rb tree when we need to check for overlapping
396 	 * keys.
397 	 */
398 	struct bkey		start;
399 	struct bkey		end;
400 
401 	struct rb_root		keys;
402 
403 #define KEYBUF_NR		100
404 	DECLARE_ARRAY_ALLOCATOR(struct keybuf_key, freelist, KEYBUF_NR);
405 };
406 
407 struct bio_split_pool {
408 	struct bio_set		*bio_split;
409 	mempool_t		*bio_split_hook;
410 };
411 
412 struct bio_split_hook {
413 	struct closure		cl;
414 	struct bio_split_pool	*p;
415 	struct bio		*bio;
416 	bio_end_io_t		*bi_end_io;
417 	void			*bi_private;
418 };
419 
420 struct bcache_device {
421 	struct closure		cl;
422 
423 	struct kobject		kobj;
424 
425 	struct cache_set	*c;
426 	unsigned		id;
427 #define BCACHEDEVNAME_SIZE	12
428 	char			name[BCACHEDEVNAME_SIZE];
429 
430 	struct gendisk		*disk;
431 
432 	/* If nonzero, we're closing */
433 	atomic_t		closing;
434 
435 	/* If nonzero, we're detaching/unregistering from cache set */
436 	atomic_t		detaching;
437 	int			flush_done;
438 
439 	uint64_t		nr_stripes;
440 	unsigned		stripe_size_bits;
441 	atomic_t		*stripe_sectors_dirty;
442 
443 	unsigned long		sectors_dirty_last;
444 	long			sectors_dirty_derivative;
445 
446 	mempool_t		*unaligned_bvec;
447 	struct bio_set		*bio_split;
448 
449 	unsigned		data_csum:1;
450 
451 	int (*cache_miss)(struct btree *, struct search *,
452 			  struct bio *, unsigned);
453 	int (*ioctl) (struct bcache_device *, fmode_t, unsigned, unsigned long);
454 
455 	struct bio_split_pool	bio_split_hook;
456 };
457 
458 struct io {
459 	/* Used to track sequential IO so it can be skipped */
460 	struct hlist_node	hash;
461 	struct list_head	lru;
462 
463 	unsigned long		jiffies;
464 	unsigned		sequential;
465 	sector_t		last;
466 };
467 
468 struct cached_dev {
469 	struct list_head	list;
470 	struct bcache_device	disk;
471 	struct block_device	*bdev;
472 
473 	struct cache_sb		sb;
474 	struct bio		sb_bio;
475 	struct bio_vec		sb_bv[1];
476 	struct closure_with_waitlist sb_write;
477 
478 	/* Refcount on the cache set. Always nonzero when we're caching. */
479 	atomic_t		count;
480 	struct work_struct	detach;
481 
482 	/*
483 	 * Device might not be running if it's dirty and the cache set hasn't
484 	 * showed up yet.
485 	 */
486 	atomic_t		running;
487 
488 	/*
489 	 * Writes take a shared lock from start to finish; scanning for dirty
490 	 * data to refill the rb tree requires an exclusive lock.
491 	 */
492 	struct rw_semaphore	writeback_lock;
493 
494 	/*
495 	 * Nonzero, and writeback has a refcount (d->count), iff there is dirty
496 	 * data in the cache. Protected by writeback_lock; must have an
497 	 * shared lock to set and exclusive lock to clear.
498 	 */
499 	atomic_t		has_dirty;
500 
501 	struct ratelimit	writeback_rate;
502 	struct delayed_work	writeback_rate_update;
503 
504 	/*
505 	 * Internal to the writeback code, so read_dirty() can keep track of
506 	 * where it's at.
507 	 */
508 	sector_t		last_read;
509 
510 	/* Number of writeback bios in flight */
511 	atomic_t		in_flight;
512 	struct closure_with_timer writeback;
513 	struct closure_waitlist	writeback_wait;
514 
515 	struct keybuf		writeback_keys;
516 
517 	/* For tracking sequential IO */
518 #define RECENT_IO_BITS	7
519 #define RECENT_IO	(1 << RECENT_IO_BITS)
520 	struct io		io[RECENT_IO];
521 	struct hlist_head	io_hash[RECENT_IO + 1];
522 	struct list_head	io_lru;
523 	spinlock_t		io_lock;
524 
525 	struct cache_accounting	accounting;
526 
527 	/* The rest of this all shows up in sysfs */
528 	unsigned		sequential_cutoff;
529 	unsigned		readahead;
530 
531 	unsigned		sequential_merge:1;
532 	unsigned		verify:1;
533 
534 	unsigned		partial_stripes_expensive:1;
535 	unsigned		writeback_metadata:1;
536 	unsigned		writeback_running:1;
537 	unsigned char		writeback_percent;
538 	unsigned		writeback_delay;
539 
540 	int			writeback_rate_change;
541 	int64_t			writeback_rate_derivative;
542 	uint64_t		writeback_rate_target;
543 
544 	unsigned		writeback_rate_update_seconds;
545 	unsigned		writeback_rate_d_term;
546 	unsigned		writeback_rate_p_term_inverse;
547 	unsigned		writeback_rate_d_smooth;
548 };
549 
550 enum alloc_watermarks {
551 	WATERMARK_PRIO,
552 	WATERMARK_METADATA,
553 	WATERMARK_MOVINGGC,
554 	WATERMARK_NONE,
555 	WATERMARK_MAX
556 };
557 
558 struct cache {
559 	struct cache_set	*set;
560 	struct cache_sb		sb;
561 	struct bio		sb_bio;
562 	struct bio_vec		sb_bv[1];
563 
564 	struct kobject		kobj;
565 	struct block_device	*bdev;
566 
567 	unsigned		watermark[WATERMARK_MAX];
568 
569 	struct task_struct	*alloc_thread;
570 
571 	struct closure		prio;
572 	struct prio_set		*disk_buckets;
573 
574 	/*
575 	 * When allocating new buckets, prio_write() gets first dibs - since we
576 	 * may not be allocate at all without writing priorities and gens.
577 	 * prio_buckets[] contains the last buckets we wrote priorities to (so
578 	 * gc can mark them as metadata), prio_next[] contains the buckets
579 	 * allocated for the next prio write.
580 	 */
581 	uint64_t		*prio_buckets;
582 	uint64_t		*prio_last_buckets;
583 
584 	/*
585 	 * free: Buckets that are ready to be used
586 	 *
587 	 * free_inc: Incoming buckets - these are buckets that currently have
588 	 * cached data in them, and we can't reuse them until after we write
589 	 * their new gen to disk. After prio_write() finishes writing the new
590 	 * gens/prios, they'll be moved to the free list (and possibly discarded
591 	 * in the process)
592 	 *
593 	 * unused: GC found nothing pointing into these buckets (possibly
594 	 * because all the data they contained was overwritten), so we only
595 	 * need to discard them before they can be moved to the free list.
596 	 */
597 	DECLARE_FIFO(long, free);
598 	DECLARE_FIFO(long, free_inc);
599 	DECLARE_FIFO(long, unused);
600 
601 	size_t			fifo_last_bucket;
602 
603 	/* Allocation stuff: */
604 	struct bucket		*buckets;
605 
606 	DECLARE_HEAP(struct bucket *, heap);
607 
608 	/*
609 	 * max(gen - disk_gen) for all buckets. When it gets too big we have to
610 	 * call prio_write() to keep gens from wrapping.
611 	 */
612 	uint8_t			need_save_prio;
613 	unsigned		gc_move_threshold;
614 
615 	/*
616 	 * If nonzero, we know we aren't going to find any buckets to invalidate
617 	 * until a gc finishes - otherwise we could pointlessly burn a ton of
618 	 * cpu
619 	 */
620 	unsigned		invalidate_needs_gc:1;
621 
622 	bool			discard; /* Get rid of? */
623 
624 	/*
625 	 * We preallocate structs for issuing discards to buckets, and keep them
626 	 * on this list when they're not in use; do_discard() issues discards
627 	 * whenever there's work to do and is called by free_some_buckets() and
628 	 * when a discard finishes.
629 	 */
630 	atomic_t		discards_in_flight;
631 	struct list_head	discards;
632 
633 	struct journal_device	journal;
634 
635 	/* The rest of this all shows up in sysfs */
636 #define IO_ERROR_SHIFT		20
637 	atomic_t		io_errors;
638 	atomic_t		io_count;
639 
640 	atomic_long_t		meta_sectors_written;
641 	atomic_long_t		btree_sectors_written;
642 	atomic_long_t		sectors_written;
643 
644 	struct bio_split_pool	bio_split_hook;
645 };
646 
647 struct gc_stat {
648 	size_t			nodes;
649 	size_t			key_bytes;
650 
651 	size_t			nkeys;
652 	uint64_t		data;	/* sectors */
653 	uint64_t		dirty;	/* sectors */
654 	unsigned		in_use; /* percent */
655 };
656 
657 /*
658  * Flag bits, for how the cache set is shutting down, and what phase it's at:
659  *
660  * CACHE_SET_UNREGISTERING means we're not just shutting down, we're detaching
661  * all the backing devices first (their cached data gets invalidated, and they
662  * won't automatically reattach).
663  *
664  * CACHE_SET_STOPPING always gets set first when we're closing down a cache set;
665  * we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e.
666  * flushing dirty data).
667  */
668 #define CACHE_SET_UNREGISTERING		0
669 #define	CACHE_SET_STOPPING		1
670 
671 struct cache_set {
672 	struct closure		cl;
673 
674 	struct list_head	list;
675 	struct kobject		kobj;
676 	struct kobject		internal;
677 	struct dentry		*debug;
678 	struct cache_accounting accounting;
679 
680 	unsigned long		flags;
681 
682 	struct cache_sb		sb;
683 
684 	struct cache		*cache[MAX_CACHES_PER_SET];
685 	struct cache		*cache_by_alloc[MAX_CACHES_PER_SET];
686 	int			caches_loaded;
687 
688 	struct bcache_device	**devices;
689 	struct list_head	cached_devs;
690 	uint64_t		cached_dev_sectors;
691 	struct closure		caching;
692 
693 	struct closure_with_waitlist sb_write;
694 
695 	mempool_t		*search;
696 	mempool_t		*bio_meta;
697 	struct bio_set		*bio_split;
698 
699 	/* For the btree cache */
700 	struct shrinker		shrink;
701 
702 	/* For the btree cache and anything allocation related */
703 	struct mutex		bucket_lock;
704 
705 	/* log2(bucket_size), in sectors */
706 	unsigned short		bucket_bits;
707 
708 	/* log2(block_size), in sectors */
709 	unsigned short		block_bits;
710 
711 	/*
712 	 * Default number of pages for a new btree node - may be less than a
713 	 * full bucket
714 	 */
715 	unsigned		btree_pages;
716 
717 	/*
718 	 * Lists of struct btrees; lru is the list for structs that have memory
719 	 * allocated for actual btree node, freed is for structs that do not.
720 	 *
721 	 * We never free a struct btree, except on shutdown - we just put it on
722 	 * the btree_cache_freed list and reuse it later. This simplifies the
723 	 * code, and it doesn't cost us much memory as the memory usage is
724 	 * dominated by buffers that hold the actual btree node data and those
725 	 * can be freed - and the number of struct btrees allocated is
726 	 * effectively bounded.
727 	 *
728 	 * btree_cache_freeable effectively is a small cache - we use it because
729 	 * high order page allocations can be rather expensive, and it's quite
730 	 * common to delete and allocate btree nodes in quick succession. It
731 	 * should never grow past ~2-3 nodes in practice.
732 	 */
733 	struct list_head	btree_cache;
734 	struct list_head	btree_cache_freeable;
735 	struct list_head	btree_cache_freed;
736 
737 	/* Number of elements in btree_cache + btree_cache_freeable lists */
738 	unsigned		bucket_cache_used;
739 
740 	/*
741 	 * If we need to allocate memory for a new btree node and that
742 	 * allocation fails, we can cannibalize another node in the btree cache
743 	 * to satisfy the allocation. However, only one thread can be doing this
744 	 * at a time, for obvious reasons - try_harder and try_wait are
745 	 * basically a lock for this that we can wait on asynchronously. The
746 	 * btree_root() macro releases the lock when it returns.
747 	 */
748 	struct closure		*try_harder;
749 	struct closure_waitlist	try_wait;
750 	uint64_t		try_harder_start;
751 
752 	/*
753 	 * When we free a btree node, we increment the gen of the bucket the
754 	 * node is in - but we can't rewrite the prios and gens until we
755 	 * finished whatever it is we were doing, otherwise after a crash the
756 	 * btree node would be freed but for say a split, we might not have the
757 	 * pointers to the new nodes inserted into the btree yet.
758 	 *
759 	 * This is a refcount that blocks prio_write() until the new keys are
760 	 * written.
761 	 */
762 	atomic_t		prio_blocked;
763 	struct closure_waitlist	bucket_wait;
764 
765 	/*
766 	 * For any bio we don't skip we subtract the number of sectors from
767 	 * rescale; when it hits 0 we rescale all the bucket priorities.
768 	 */
769 	atomic_t		rescale;
770 	/*
771 	 * When we invalidate buckets, we use both the priority and the amount
772 	 * of good data to determine which buckets to reuse first - to weight
773 	 * those together consistently we keep track of the smallest nonzero
774 	 * priority of any bucket.
775 	 */
776 	uint16_t		min_prio;
777 
778 	/*
779 	 * max(gen - gc_gen) for all buckets. When it gets too big we have to gc
780 	 * to keep gens from wrapping around.
781 	 */
782 	uint8_t			need_gc;
783 	struct gc_stat		gc_stats;
784 	size_t			nbuckets;
785 
786 	struct closure_with_waitlist gc;
787 	/* Where in the btree gc currently is */
788 	struct bkey		gc_done;
789 
790 	/*
791 	 * The allocation code needs gc_mark in struct bucket to be correct, but
792 	 * it's not while a gc is in progress. Protected by bucket_lock.
793 	 */
794 	int			gc_mark_valid;
795 
796 	/* Counts how many sectors bio_insert has added to the cache */
797 	atomic_t		sectors_to_gc;
798 
799 	struct closure		moving_gc;
800 	struct closure_waitlist	moving_gc_wait;
801 	struct keybuf		moving_gc_keys;
802 	/* Number of moving GC bios in flight */
803 	atomic_t		in_flight;
804 
805 	struct btree		*root;
806 
807 #ifdef CONFIG_BCACHE_DEBUG
808 	struct btree		*verify_data;
809 	struct mutex		verify_lock;
810 #endif
811 
812 	unsigned		nr_uuids;
813 	struct uuid_entry	*uuids;
814 	BKEY_PADDED(uuid_bucket);
815 	struct closure_with_waitlist uuid_write;
816 
817 	/*
818 	 * A btree node on disk could have too many bsets for an iterator to fit
819 	 * on the stack - have to dynamically allocate them
820 	 */
821 	mempool_t		*fill_iter;
822 
823 	/*
824 	 * btree_sort() is a merge sort and requires temporary space - single
825 	 * element mempool
826 	 */
827 	struct mutex		sort_lock;
828 	struct bset		*sort;
829 	unsigned		sort_crit_factor;
830 
831 	/* List of buckets we're currently writing data to */
832 	struct list_head	data_buckets;
833 	spinlock_t		data_bucket_lock;
834 
835 	struct journal		journal;
836 
837 #define CONGESTED_MAX		1024
838 	unsigned		congested_last_us;
839 	atomic_t		congested;
840 
841 	/* The rest of this all shows up in sysfs */
842 	unsigned		congested_read_threshold_us;
843 	unsigned		congested_write_threshold_us;
844 
845 	spinlock_t		sort_time_lock;
846 	struct time_stats	sort_time;
847 	struct time_stats	btree_gc_time;
848 	struct time_stats	btree_split_time;
849 	spinlock_t		btree_read_time_lock;
850 	struct time_stats	btree_read_time;
851 	struct time_stats	try_harder_time;
852 
853 	atomic_long_t		cache_read_races;
854 	atomic_long_t		writeback_keys_done;
855 	atomic_long_t		writeback_keys_failed;
856 	unsigned		error_limit;
857 	unsigned		error_decay;
858 	unsigned short		journal_delay_ms;
859 	unsigned		verify:1;
860 	unsigned		key_merging_disabled:1;
861 	unsigned		gc_always_rewrite:1;
862 	unsigned		shrinker_disabled:1;
863 	unsigned		copy_gc_enabled:1;
864 
865 #define BUCKET_HASH_BITS	12
866 	struct hlist_head	bucket_hash[1 << BUCKET_HASH_BITS];
867 };
868 
869 static inline bool key_merging_disabled(struct cache_set *c)
870 {
871 #ifdef CONFIG_BCACHE_DEBUG
872 	return c->key_merging_disabled;
873 #else
874 	return 0;
875 #endif
876 }
877 
878 static inline bool SB_IS_BDEV(const struct cache_sb *sb)
879 {
880 	return sb->version == BCACHE_SB_VERSION_BDEV
881 		|| sb->version == BCACHE_SB_VERSION_BDEV_WITH_OFFSET;
882 }
883 
884 struct bbio {
885 	unsigned		submit_time_us;
886 	union {
887 		struct bkey	key;
888 		uint64_t	_pad[3];
889 		/*
890 		 * We only need pad = 3 here because we only ever carry around a
891 		 * single pointer - i.e. the pointer we're doing io to/from.
892 		 */
893 	};
894 	struct bio		bio;
895 };
896 
897 static inline unsigned local_clock_us(void)
898 {
899 	return local_clock() >> 10;
900 }
901 
902 #define BTREE_PRIO		USHRT_MAX
903 #define INITIAL_PRIO		32768
904 
905 #define btree_bytes(c)		((c)->btree_pages * PAGE_SIZE)
906 #define btree_blocks(b)							\
907 	((unsigned) (KEY_SIZE(&b->key) >> (b)->c->block_bits))
908 
909 #define btree_default_blocks(c)						\
910 	((unsigned) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits))
911 
912 #define bucket_pages(c)		((c)->sb.bucket_size / PAGE_SECTORS)
913 #define bucket_bytes(c)		((c)->sb.bucket_size << 9)
914 #define block_bytes(c)		((c)->sb.block_size << 9)
915 
916 #define __set_bytes(i, k)	(sizeof(*(i)) + (k) * sizeof(uint64_t))
917 #define set_bytes(i)		__set_bytes(i, i->keys)
918 
919 #define __set_blocks(i, k, c)	DIV_ROUND_UP(__set_bytes(i, k), block_bytes(c))
920 #define set_blocks(i, c)	__set_blocks(i, (i)->keys, c)
921 
922 #define node(i, j)		((struct bkey *) ((i)->d + (j)))
923 #define end(i)			node(i, (i)->keys)
924 
925 #define index(i, b)							\
926 	((size_t) (((void *) i - (void *) (b)->sets[0].data) /		\
927 		   block_bytes(b->c)))
928 
929 #define btree_data_space(b)	(PAGE_SIZE << (b)->page_order)
930 
931 #define prios_per_bucket(c)				\
932 	((bucket_bytes(c) - sizeof(struct prio_set)) /	\
933 	 sizeof(struct bucket_disk))
934 #define prio_buckets(c)					\
935 	DIV_ROUND_UP((size_t) (c)->sb.nbuckets, prios_per_bucket(c))
936 
937 #define JSET_MAGIC		0x245235c1a3625032ULL
938 #define PSET_MAGIC		0x6750e15f87337f91ULL
939 #define BSET_MAGIC		0x90135c78b99e07f5ULL
940 
941 #define jset_magic(c)		((c)->sb.set_magic ^ JSET_MAGIC)
942 #define pset_magic(c)		((c)->sb.set_magic ^ PSET_MAGIC)
943 #define bset_magic(c)		((c)->sb.set_magic ^ BSET_MAGIC)
944 
945 /* Bkey fields: all units are in sectors */
946 
947 #define KEY_FIELD(name, field, offset, size)				\
948 	BITMASK(name, struct bkey, field, offset, size)
949 
950 #define PTR_FIELD(name, offset, size)					\
951 	static inline uint64_t name(const struct bkey *k, unsigned i)	\
952 	{ return (k->ptr[i] >> offset) & ~(((uint64_t) ~0) << size); }	\
953 									\
954 	static inline void SET_##name(struct bkey *k, unsigned i, uint64_t v)\
955 	{								\
956 		k->ptr[i] &= ~(~((uint64_t) ~0 << size) << offset);	\
957 		k->ptr[i] |= v << offset;				\
958 	}
959 
960 KEY_FIELD(KEY_PTRS,	high, 60, 3)
961 KEY_FIELD(HEADER_SIZE,	high, 58, 2)
962 KEY_FIELD(KEY_CSUM,	high, 56, 2)
963 KEY_FIELD(KEY_PINNED,	high, 55, 1)
964 KEY_FIELD(KEY_DIRTY,	high, 36, 1)
965 
966 KEY_FIELD(KEY_SIZE,	high, 20, 16)
967 KEY_FIELD(KEY_INODE,	high, 0,  20)
968 
969 /* Next time I change the on disk format, KEY_OFFSET() won't be 64 bits */
970 
971 static inline uint64_t KEY_OFFSET(const struct bkey *k)
972 {
973 	return k->low;
974 }
975 
976 static inline void SET_KEY_OFFSET(struct bkey *k, uint64_t v)
977 {
978 	k->low = v;
979 }
980 
981 PTR_FIELD(PTR_DEV,		51, 12)
982 PTR_FIELD(PTR_OFFSET,		8,  43)
983 PTR_FIELD(PTR_GEN,		0,  8)
984 
985 #define PTR_CHECK_DEV		((1 << 12) - 1)
986 
987 #define PTR(gen, offset, dev)						\
988 	((((uint64_t) dev) << 51) | ((uint64_t) offset) << 8 | gen)
989 
990 static inline size_t sector_to_bucket(struct cache_set *c, sector_t s)
991 {
992 	return s >> c->bucket_bits;
993 }
994 
995 static inline sector_t bucket_to_sector(struct cache_set *c, size_t b)
996 {
997 	return ((sector_t) b) << c->bucket_bits;
998 }
999 
1000 static inline sector_t bucket_remainder(struct cache_set *c, sector_t s)
1001 {
1002 	return s & (c->sb.bucket_size - 1);
1003 }
1004 
1005 static inline struct cache *PTR_CACHE(struct cache_set *c,
1006 				      const struct bkey *k,
1007 				      unsigned ptr)
1008 {
1009 	return c->cache[PTR_DEV(k, ptr)];
1010 }
1011 
1012 static inline size_t PTR_BUCKET_NR(struct cache_set *c,
1013 				   const struct bkey *k,
1014 				   unsigned ptr)
1015 {
1016 	return sector_to_bucket(c, PTR_OFFSET(k, ptr));
1017 }
1018 
1019 static inline struct bucket *PTR_BUCKET(struct cache_set *c,
1020 					const struct bkey *k,
1021 					unsigned ptr)
1022 {
1023 	return PTR_CACHE(c, k, ptr)->buckets + PTR_BUCKET_NR(c, k, ptr);
1024 }
1025 
1026 /* Btree key macros */
1027 
1028 /*
1029  * The high bit being set is a relic from when we used it to do binary
1030  * searches - it told you where a key started. It's not used anymore,
1031  * and can probably be safely dropped.
1032  */
1033 #define KEY(dev, sector, len)						\
1034 ((struct bkey) {							\
1035 	.high = (1ULL << 63) | ((uint64_t) (len) << 20) | (dev),	\
1036 	.low = (sector)							\
1037 })
1038 
1039 static inline void bkey_init(struct bkey *k)
1040 {
1041 	*k = KEY(0, 0, 0);
1042 }
1043 
1044 #define KEY_START(k)		(KEY_OFFSET(k) - KEY_SIZE(k))
1045 #define START_KEY(k)		KEY(KEY_INODE(k), KEY_START(k), 0)
1046 #define MAX_KEY			KEY(~(~0 << 20), ((uint64_t) ~0) >> 1, 0)
1047 #define ZERO_KEY		KEY(0, 0, 0)
1048 
1049 /*
1050  * This is used for various on disk data structures - cache_sb, prio_set, bset,
1051  * jset: The checksum is _always_ the first 8 bytes of these structs
1052  */
1053 #define csum_set(i)							\
1054 	bch_crc64(((void *) (i)) + sizeof(uint64_t),			\
1055 	      ((void *) end(i)) - (((void *) (i)) + sizeof(uint64_t)))
1056 
1057 /* Error handling macros */
1058 
1059 #define btree_bug(b, ...)						\
1060 do {									\
1061 	if (bch_cache_set_error((b)->c, __VA_ARGS__))			\
1062 		dump_stack();						\
1063 } while (0)
1064 
1065 #define cache_bug(c, ...)						\
1066 do {									\
1067 	if (bch_cache_set_error(c, __VA_ARGS__))			\
1068 		dump_stack();						\
1069 } while (0)
1070 
1071 #define btree_bug_on(cond, b, ...)					\
1072 do {									\
1073 	if (cond)							\
1074 		btree_bug(b, __VA_ARGS__);				\
1075 } while (0)
1076 
1077 #define cache_bug_on(cond, c, ...)					\
1078 do {									\
1079 	if (cond)							\
1080 		cache_bug(c, __VA_ARGS__);				\
1081 } while (0)
1082 
1083 #define cache_set_err_on(cond, c, ...)					\
1084 do {									\
1085 	if (cond)							\
1086 		bch_cache_set_error(c, __VA_ARGS__);			\
1087 } while (0)
1088 
1089 /* Looping macros */
1090 
1091 #define for_each_cache(ca, cs, iter)					\
1092 	for (iter = 0; ca = cs->cache[iter], iter < (cs)->sb.nr_in_set; iter++)
1093 
1094 #define for_each_bucket(b, ca)						\
1095 	for (b = (ca)->buckets + (ca)->sb.first_bucket;			\
1096 	     b < (ca)->buckets + (ca)->sb.nbuckets; b++)
1097 
1098 static inline void __bkey_put(struct cache_set *c, struct bkey *k)
1099 {
1100 	unsigned i;
1101 
1102 	for (i = 0; i < KEY_PTRS(k); i++)
1103 		atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
1104 }
1105 
1106 static inline void cached_dev_put(struct cached_dev *dc)
1107 {
1108 	if (atomic_dec_and_test(&dc->count))
1109 		schedule_work(&dc->detach);
1110 }
1111 
1112 static inline bool cached_dev_get(struct cached_dev *dc)
1113 {
1114 	if (!atomic_inc_not_zero(&dc->count))
1115 		return false;
1116 
1117 	/* Paired with the mb in cached_dev_attach */
1118 	smp_mb__after_atomic_inc();
1119 	return true;
1120 }
1121 
1122 /*
1123  * bucket_gc_gen() returns the difference between the bucket's current gen and
1124  * the oldest gen of any pointer into that bucket in the btree (last_gc).
1125  *
1126  * bucket_disk_gen() returns the difference between the current gen and the gen
1127  * on disk; they're both used to make sure gens don't wrap around.
1128  */
1129 
1130 static inline uint8_t bucket_gc_gen(struct bucket *b)
1131 {
1132 	return b->gen - b->last_gc;
1133 }
1134 
1135 static inline uint8_t bucket_disk_gen(struct bucket *b)
1136 {
1137 	return b->gen - b->disk_gen;
1138 }
1139 
1140 #define BUCKET_GC_GEN_MAX	96U
1141 #define BUCKET_DISK_GEN_MAX	64U
1142 
1143 #define kobj_attribute_write(n, fn)					\
1144 	static struct kobj_attribute ksysfs_##n = __ATTR(n, S_IWUSR, NULL, fn)
1145 
1146 #define kobj_attribute_rw(n, show, store)				\
1147 	static struct kobj_attribute ksysfs_##n =			\
1148 		__ATTR(n, S_IWUSR|S_IRUSR, show, store)
1149 
1150 static inline void wake_up_allocators(struct cache_set *c)
1151 {
1152 	struct cache *ca;
1153 	unsigned i;
1154 
1155 	for_each_cache(ca, c, i)
1156 		wake_up_process(ca->alloc_thread);
1157 }
1158 
1159 /* Forward declarations */
1160 
1161 void bch_count_io_errors(struct cache *, int, const char *);
1162 void bch_bbio_count_io_errors(struct cache_set *, struct bio *,
1163 			      int, const char *);
1164 void bch_bbio_endio(struct cache_set *, struct bio *, int, const char *);
1165 void bch_bbio_free(struct bio *, struct cache_set *);
1166 struct bio *bch_bbio_alloc(struct cache_set *);
1167 
1168 struct bio *bch_bio_split(struct bio *, int, gfp_t, struct bio_set *);
1169 void bch_generic_make_request(struct bio *, struct bio_split_pool *);
1170 void __bch_submit_bbio(struct bio *, struct cache_set *);
1171 void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned);
1172 
1173 uint8_t bch_inc_gen(struct cache *, struct bucket *);
1174 void bch_rescale_priorities(struct cache_set *, int);
1175 bool bch_bucket_add_unused(struct cache *, struct bucket *);
1176 
1177 long bch_bucket_alloc(struct cache *, unsigned, struct closure *);
1178 void bch_bucket_free(struct cache_set *, struct bkey *);
1179 
1180 int __bch_bucket_alloc_set(struct cache_set *, unsigned,
1181 			   struct bkey *, int, struct closure *);
1182 int bch_bucket_alloc_set(struct cache_set *, unsigned,
1183 			 struct bkey *, int, struct closure *);
1184 
1185 __printf(2, 3)
1186 bool bch_cache_set_error(struct cache_set *, const char *, ...);
1187 
1188 void bch_prio_write(struct cache *);
1189 void bch_write_bdev_super(struct cached_dev *, struct closure *);
1190 
1191 extern struct workqueue_struct *bcache_wq, *bch_gc_wq;
1192 extern const char * const bch_cache_modes[];
1193 extern struct mutex bch_register_lock;
1194 extern struct list_head bch_cache_sets;
1195 
1196 extern struct kobj_type bch_cached_dev_ktype;
1197 extern struct kobj_type bch_flash_dev_ktype;
1198 extern struct kobj_type bch_cache_set_ktype;
1199 extern struct kobj_type bch_cache_set_internal_ktype;
1200 extern struct kobj_type bch_cache_ktype;
1201 
1202 void bch_cached_dev_release(struct kobject *);
1203 void bch_flash_dev_release(struct kobject *);
1204 void bch_cache_set_release(struct kobject *);
1205 void bch_cache_release(struct kobject *);
1206 
1207 int bch_uuid_write(struct cache_set *);
1208 void bcache_write_super(struct cache_set *);
1209 
1210 int bch_flash_dev_create(struct cache_set *c, uint64_t size);
1211 
1212 int bch_cached_dev_attach(struct cached_dev *, struct cache_set *);
1213 void bch_cached_dev_detach(struct cached_dev *);
1214 void bch_cached_dev_run(struct cached_dev *);
1215 void bcache_device_stop(struct bcache_device *);
1216 
1217 void bch_cache_set_unregister(struct cache_set *);
1218 void bch_cache_set_stop(struct cache_set *);
1219 
1220 struct cache_set *bch_cache_set_alloc(struct cache_sb *);
1221 void bch_btree_cache_free(struct cache_set *);
1222 int bch_btree_cache_alloc(struct cache_set *);
1223 void bch_moving_init_cache_set(struct cache_set *);
1224 
1225 int bch_cache_allocator_start(struct cache *ca);
1226 void bch_cache_allocator_exit(struct cache *ca);
1227 int bch_cache_allocator_init(struct cache *ca);
1228 
1229 void bch_debug_exit(void);
1230 int bch_debug_init(struct kobject *);
1231 void bch_writeback_exit(void);
1232 int bch_writeback_init(void);
1233 void bch_request_exit(void);
1234 int bch_request_init(void);
1235 void bch_btree_exit(void);
1236 int bch_btree_init(void);
1237 
1238 #endif /* _BCACHE_H */
1239