xref: /linux/drivers/md/bcache/request.c (revision e5c86679d5e864947a52fb31e45a425dea3e7fa9)
1 /*
2  * Main bcache entry point - handle a read or a write request and decide what to
3  * do with it; the make_request functions are called by the block layer.
4  *
5  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6  * Copyright 2012 Google, Inc.
7  */
8 
9 #include "bcache.h"
10 #include "btree.h"
11 #include "debug.h"
12 #include "request.h"
13 #include "writeback.h"
14 
15 #include <linux/module.h>
16 #include <linux/hash.h>
17 #include <linux/random.h>
18 #include <linux/backing-dev.h>
19 
20 #include <trace/events/bcache.h>
21 
22 #define CUTOFF_CACHE_ADD	95
23 #define CUTOFF_CACHE_READA	90
24 
25 struct kmem_cache *bch_search_cache;
26 
27 static void bch_data_insert_start(struct closure *);
28 
29 static unsigned cache_mode(struct cached_dev *dc, struct bio *bio)
30 {
31 	return BDEV_CACHE_MODE(&dc->sb);
32 }
33 
34 static bool verify(struct cached_dev *dc, struct bio *bio)
35 {
36 	return dc->verify;
37 }
38 
39 static void bio_csum(struct bio *bio, struct bkey *k)
40 {
41 	struct bio_vec bv;
42 	struct bvec_iter iter;
43 	uint64_t csum = 0;
44 
45 	bio_for_each_segment(bv, bio, iter) {
46 		void *d = kmap(bv.bv_page) + bv.bv_offset;
47 		csum = bch_crc64_update(csum, d, bv.bv_len);
48 		kunmap(bv.bv_page);
49 	}
50 
51 	k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
52 }
53 
54 /* Insert data into cache */
55 
56 static void bch_data_insert_keys(struct closure *cl)
57 {
58 	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
59 	atomic_t *journal_ref = NULL;
60 	struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
61 	int ret;
62 
63 	/*
64 	 * If we're looping, might already be waiting on
65 	 * another journal write - can't wait on more than one journal write at
66 	 * a time
67 	 *
68 	 * XXX: this looks wrong
69 	 */
70 #if 0
71 	while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING)
72 		closure_sync(&s->cl);
73 #endif
74 
75 	if (!op->replace)
76 		journal_ref = bch_journal(op->c, &op->insert_keys,
77 					  op->flush_journal ? cl : NULL);
78 
79 	ret = bch_btree_insert(op->c, &op->insert_keys,
80 			       journal_ref, replace_key);
81 	if (ret == -ESRCH) {
82 		op->replace_collision = true;
83 	} else if (ret) {
84 		op->error		= -ENOMEM;
85 		op->insert_data_done	= true;
86 	}
87 
88 	if (journal_ref)
89 		atomic_dec_bug(journal_ref);
90 
91 	if (!op->insert_data_done) {
92 		continue_at(cl, bch_data_insert_start, op->wq);
93 		return;
94 	}
95 
96 	bch_keylist_free(&op->insert_keys);
97 	closure_return(cl);
98 }
99 
100 static int bch_keylist_realloc(struct keylist *l, unsigned u64s,
101 			       struct cache_set *c)
102 {
103 	size_t oldsize = bch_keylist_nkeys(l);
104 	size_t newsize = oldsize + u64s;
105 
106 	/*
107 	 * The journalling code doesn't handle the case where the keys to insert
108 	 * is bigger than an empty write: If we just return -ENOMEM here,
109 	 * bio_insert() and bio_invalidate() will insert the keys created so far
110 	 * and finish the rest when the keylist is empty.
111 	 */
112 	if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
113 		return -ENOMEM;
114 
115 	return __bch_keylist_realloc(l, u64s);
116 }
117 
118 static void bch_data_invalidate(struct closure *cl)
119 {
120 	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
121 	struct bio *bio = op->bio;
122 
123 	pr_debug("invalidating %i sectors from %llu",
124 		 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
125 
126 	while (bio_sectors(bio)) {
127 		unsigned sectors = min(bio_sectors(bio),
128 				       1U << (KEY_SIZE_BITS - 1));
129 
130 		if (bch_keylist_realloc(&op->insert_keys, 2, op->c))
131 			goto out;
132 
133 		bio->bi_iter.bi_sector	+= sectors;
134 		bio->bi_iter.bi_size	-= sectors << 9;
135 
136 		bch_keylist_add(&op->insert_keys,
137 				&KEY(op->inode, bio->bi_iter.bi_sector, sectors));
138 	}
139 
140 	op->insert_data_done = true;
141 	bio_put(bio);
142 out:
143 	continue_at(cl, bch_data_insert_keys, op->wq);
144 }
145 
146 static void bch_data_insert_error(struct closure *cl)
147 {
148 	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
149 
150 	/*
151 	 * Our data write just errored, which means we've got a bunch of keys to
152 	 * insert that point to data that wasn't succesfully written.
153 	 *
154 	 * We don't have to insert those keys but we still have to invalidate
155 	 * that region of the cache - so, if we just strip off all the pointers
156 	 * from the keys we'll accomplish just that.
157 	 */
158 
159 	struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys;
160 
161 	while (src != op->insert_keys.top) {
162 		struct bkey *n = bkey_next(src);
163 
164 		SET_KEY_PTRS(src, 0);
165 		memmove(dst, src, bkey_bytes(src));
166 
167 		dst = bkey_next(dst);
168 		src = n;
169 	}
170 
171 	op->insert_keys.top = dst;
172 
173 	bch_data_insert_keys(cl);
174 }
175 
176 static void bch_data_insert_endio(struct bio *bio)
177 {
178 	struct closure *cl = bio->bi_private;
179 	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
180 
181 	if (bio->bi_error) {
182 		/* TODO: We could try to recover from this. */
183 		if (op->writeback)
184 			op->error = bio->bi_error;
185 		else if (!op->replace)
186 			set_closure_fn(cl, bch_data_insert_error, op->wq);
187 		else
188 			set_closure_fn(cl, NULL, NULL);
189 	}
190 
191 	bch_bbio_endio(op->c, bio, bio->bi_error, "writing data to cache");
192 }
193 
194 static void bch_data_insert_start(struct closure *cl)
195 {
196 	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
197 	struct bio *bio = op->bio, *n;
198 
199 	if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
200 		wake_up_gc(op->c);
201 
202 	if (op->bypass)
203 		return bch_data_invalidate(cl);
204 
205 	/*
206 	 * Journal writes are marked REQ_PREFLUSH; if the original write was a
207 	 * flush, it'll wait on the journal write.
208 	 */
209 	bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA);
210 
211 	do {
212 		unsigned i;
213 		struct bkey *k;
214 		struct bio_set *split = op->c->bio_split;
215 
216 		/* 1 for the device pointer and 1 for the chksum */
217 		if (bch_keylist_realloc(&op->insert_keys,
218 					3 + (op->csum ? 1 : 0),
219 					op->c)) {
220 			continue_at(cl, bch_data_insert_keys, op->wq);
221 			return;
222 		}
223 
224 		k = op->insert_keys.top;
225 		bkey_init(k);
226 		SET_KEY_INODE(k, op->inode);
227 		SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
228 
229 		if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
230 				       op->write_point, op->write_prio,
231 				       op->writeback))
232 			goto err;
233 
234 		n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
235 
236 		n->bi_end_io	= bch_data_insert_endio;
237 		n->bi_private	= cl;
238 
239 		if (op->writeback) {
240 			SET_KEY_DIRTY(k, true);
241 
242 			for (i = 0; i < KEY_PTRS(k); i++)
243 				SET_GC_MARK(PTR_BUCKET(op->c, k, i),
244 					    GC_MARK_DIRTY);
245 		}
246 
247 		SET_KEY_CSUM(k, op->csum);
248 		if (KEY_CSUM(k))
249 			bio_csum(n, k);
250 
251 		trace_bcache_cache_insert(k);
252 		bch_keylist_push(&op->insert_keys);
253 
254 		bio_set_op_attrs(n, REQ_OP_WRITE, 0);
255 		bch_submit_bbio(n, op->c, k, 0);
256 	} while (n != bio);
257 
258 	op->insert_data_done = true;
259 	continue_at(cl, bch_data_insert_keys, op->wq);
260 	return;
261 err:
262 	/* bch_alloc_sectors() blocks if s->writeback = true */
263 	BUG_ON(op->writeback);
264 
265 	/*
266 	 * But if it's not a writeback write we'd rather just bail out if
267 	 * there aren't any buckets ready to write to - it might take awhile and
268 	 * we might be starving btree writes for gc or something.
269 	 */
270 
271 	if (!op->replace) {
272 		/*
273 		 * Writethrough write: We can't complete the write until we've
274 		 * updated the index. But we don't want to delay the write while
275 		 * we wait for buckets to be freed up, so just invalidate the
276 		 * rest of the write.
277 		 */
278 		op->bypass = true;
279 		return bch_data_invalidate(cl);
280 	} else {
281 		/*
282 		 * From a cache miss, we can just insert the keys for the data
283 		 * we have written or bail out if we didn't do anything.
284 		 */
285 		op->insert_data_done = true;
286 		bio_put(bio);
287 
288 		if (!bch_keylist_empty(&op->insert_keys))
289 			continue_at(cl, bch_data_insert_keys, op->wq);
290 		else
291 			closure_return(cl);
292 	}
293 }
294 
295 /**
296  * bch_data_insert - stick some data in the cache
297  *
298  * This is the starting point for any data to end up in a cache device; it could
299  * be from a normal write, or a writeback write, or a write to a flash only
300  * volume - it's also used by the moving garbage collector to compact data in
301  * mostly empty buckets.
302  *
303  * It first writes the data to the cache, creating a list of keys to be inserted
304  * (if the data had to be fragmented there will be multiple keys); after the
305  * data is written it calls bch_journal, and after the keys have been added to
306  * the next journal write they're inserted into the btree.
307  *
308  * It inserts the data in s->cache_bio; bi_sector is used for the key offset,
309  * and op->inode is used for the key inode.
310  *
311  * If s->bypass is true, instead of inserting the data it invalidates the
312  * region of the cache represented by s->cache_bio and op->inode.
313  */
314 void bch_data_insert(struct closure *cl)
315 {
316 	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
317 
318 	trace_bcache_write(op->c, op->inode, op->bio,
319 			   op->writeback, op->bypass);
320 
321 	bch_keylist_init(&op->insert_keys);
322 	bio_get(op->bio);
323 	bch_data_insert_start(cl);
324 }
325 
326 /* Congested? */
327 
328 unsigned bch_get_congested(struct cache_set *c)
329 {
330 	int i;
331 	long rand;
332 
333 	if (!c->congested_read_threshold_us &&
334 	    !c->congested_write_threshold_us)
335 		return 0;
336 
337 	i = (local_clock_us() - c->congested_last_us) / 1024;
338 	if (i < 0)
339 		return 0;
340 
341 	i += atomic_read(&c->congested);
342 	if (i >= 0)
343 		return 0;
344 
345 	i += CONGESTED_MAX;
346 
347 	if (i > 0)
348 		i = fract_exp_two(i, 6);
349 
350 	rand = get_random_int();
351 	i -= bitmap_weight(&rand, BITS_PER_LONG);
352 
353 	return i > 0 ? i : 1;
354 }
355 
356 static void add_sequential(struct task_struct *t)
357 {
358 	ewma_add(t->sequential_io_avg,
359 		 t->sequential_io, 8, 0);
360 
361 	t->sequential_io = 0;
362 }
363 
364 static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
365 {
366 	return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
367 }
368 
369 static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
370 {
371 	struct cache_set *c = dc->disk.c;
372 	unsigned mode = cache_mode(dc, bio);
373 	unsigned sectors, congested = bch_get_congested(c);
374 	struct task_struct *task = current;
375 	struct io *i;
376 
377 	if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
378 	    c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
379 	    (bio_op(bio) == REQ_OP_DISCARD))
380 		goto skip;
381 
382 	if (mode == CACHE_MODE_NONE ||
383 	    (mode == CACHE_MODE_WRITEAROUND &&
384 	     op_is_write(bio_op(bio))))
385 		goto skip;
386 
387 	if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
388 	    bio_sectors(bio) & (c->sb.block_size - 1)) {
389 		pr_debug("skipping unaligned io");
390 		goto skip;
391 	}
392 
393 	if (bypass_torture_test(dc)) {
394 		if ((get_random_int() & 3) == 3)
395 			goto skip;
396 		else
397 			goto rescale;
398 	}
399 
400 	if (!congested && !dc->sequential_cutoff)
401 		goto rescale;
402 
403 	if (!congested &&
404 	    mode == CACHE_MODE_WRITEBACK &&
405 	    op_is_write(bio->bi_opf) &&
406 	    op_is_sync(bio->bi_opf))
407 		goto rescale;
408 
409 	spin_lock(&dc->io_lock);
410 
411 	hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
412 		if (i->last == bio->bi_iter.bi_sector &&
413 		    time_before(jiffies, i->jiffies))
414 			goto found;
415 
416 	i = list_first_entry(&dc->io_lru, struct io, lru);
417 
418 	add_sequential(task);
419 	i->sequential = 0;
420 found:
421 	if (i->sequential + bio->bi_iter.bi_size > i->sequential)
422 		i->sequential	+= bio->bi_iter.bi_size;
423 
424 	i->last			 = bio_end_sector(bio);
425 	i->jiffies		 = jiffies + msecs_to_jiffies(5000);
426 	task->sequential_io	 = i->sequential;
427 
428 	hlist_del(&i->hash);
429 	hlist_add_head(&i->hash, iohash(dc, i->last));
430 	list_move_tail(&i->lru, &dc->io_lru);
431 
432 	spin_unlock(&dc->io_lock);
433 
434 	sectors = max(task->sequential_io,
435 		      task->sequential_io_avg) >> 9;
436 
437 	if (dc->sequential_cutoff &&
438 	    sectors >= dc->sequential_cutoff >> 9) {
439 		trace_bcache_bypass_sequential(bio);
440 		goto skip;
441 	}
442 
443 	if (congested && sectors >= congested) {
444 		trace_bcache_bypass_congested(bio);
445 		goto skip;
446 	}
447 
448 rescale:
449 	bch_rescale_priorities(c, bio_sectors(bio));
450 	return false;
451 skip:
452 	bch_mark_sectors_bypassed(c, dc, bio_sectors(bio));
453 	return true;
454 }
455 
456 /* Cache lookup */
457 
458 struct search {
459 	/* Stack frame for bio_complete */
460 	struct closure		cl;
461 
462 	struct bbio		bio;
463 	struct bio		*orig_bio;
464 	struct bio		*cache_miss;
465 	struct bcache_device	*d;
466 
467 	unsigned		insert_bio_sectors;
468 	unsigned		recoverable:1;
469 	unsigned		write:1;
470 	unsigned		read_dirty_data:1;
471 
472 	unsigned long		start_time;
473 
474 	struct btree_op		op;
475 	struct data_insert_op	iop;
476 };
477 
478 static void bch_cache_read_endio(struct bio *bio)
479 {
480 	struct bbio *b = container_of(bio, struct bbio, bio);
481 	struct closure *cl = bio->bi_private;
482 	struct search *s = container_of(cl, struct search, cl);
483 
484 	/*
485 	 * If the bucket was reused while our bio was in flight, we might have
486 	 * read the wrong data. Set s->error but not error so it doesn't get
487 	 * counted against the cache device, but we'll still reread the data
488 	 * from the backing device.
489 	 */
490 
491 	if (bio->bi_error)
492 		s->iop.error = bio->bi_error;
493 	else if (!KEY_DIRTY(&b->key) &&
494 		 ptr_stale(s->iop.c, &b->key, 0)) {
495 		atomic_long_inc(&s->iop.c->cache_read_races);
496 		s->iop.error = -EINTR;
497 	}
498 
499 	bch_bbio_endio(s->iop.c, bio, bio->bi_error, "reading from cache");
500 }
501 
502 /*
503  * Read from a single key, handling the initial cache miss if the key starts in
504  * the middle of the bio
505  */
506 static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
507 {
508 	struct search *s = container_of(op, struct search, op);
509 	struct bio *n, *bio = &s->bio.bio;
510 	struct bkey *bio_key;
511 	unsigned ptr;
512 
513 	if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
514 		return MAP_CONTINUE;
515 
516 	if (KEY_INODE(k) != s->iop.inode ||
517 	    KEY_START(k) > bio->bi_iter.bi_sector) {
518 		unsigned bio_sectors = bio_sectors(bio);
519 		unsigned sectors = KEY_INODE(k) == s->iop.inode
520 			? min_t(uint64_t, INT_MAX,
521 				KEY_START(k) - bio->bi_iter.bi_sector)
522 			: INT_MAX;
523 
524 		int ret = s->d->cache_miss(b, s, bio, sectors);
525 		if (ret != MAP_CONTINUE)
526 			return ret;
527 
528 		/* if this was a complete miss we shouldn't get here */
529 		BUG_ON(bio_sectors <= sectors);
530 	}
531 
532 	if (!KEY_SIZE(k))
533 		return MAP_CONTINUE;
534 
535 	/* XXX: figure out best pointer - for multiple cache devices */
536 	ptr = 0;
537 
538 	PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
539 
540 	if (KEY_DIRTY(k))
541 		s->read_dirty_data = true;
542 
543 	n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
544 				      KEY_OFFSET(k) - bio->bi_iter.bi_sector),
545 			   GFP_NOIO, s->d->bio_split);
546 
547 	bio_key = &container_of(n, struct bbio, bio)->key;
548 	bch_bkey_copy_single_ptr(bio_key, k, ptr);
549 
550 	bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
551 	bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
552 
553 	n->bi_end_io	= bch_cache_read_endio;
554 	n->bi_private	= &s->cl;
555 
556 	/*
557 	 * The bucket we're reading from might be reused while our bio
558 	 * is in flight, and we could then end up reading the wrong
559 	 * data.
560 	 *
561 	 * We guard against this by checking (in cache_read_endio()) if
562 	 * the pointer is stale again; if so, we treat it as an error
563 	 * and reread from the backing device (but we don't pass that
564 	 * error up anywhere).
565 	 */
566 
567 	__bch_submit_bbio(n, b->c);
568 	return n == bio ? MAP_DONE : MAP_CONTINUE;
569 }
570 
571 static void cache_lookup(struct closure *cl)
572 {
573 	struct search *s = container_of(cl, struct search, iop.cl);
574 	struct bio *bio = &s->bio.bio;
575 	int ret;
576 
577 	bch_btree_op_init(&s->op, -1);
578 
579 	ret = bch_btree_map_keys(&s->op, s->iop.c,
580 				 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
581 				 cache_lookup_fn, MAP_END_KEY);
582 	if (ret == -EAGAIN) {
583 		continue_at(cl, cache_lookup, bcache_wq);
584 		return;
585 	}
586 
587 	closure_return(cl);
588 }
589 
590 /* Common code for the make_request functions */
591 
592 static void request_endio(struct bio *bio)
593 {
594 	struct closure *cl = bio->bi_private;
595 
596 	if (bio->bi_error) {
597 		struct search *s = container_of(cl, struct search, cl);
598 		s->iop.error = bio->bi_error;
599 		/* Only cache read errors are recoverable */
600 		s->recoverable = false;
601 	}
602 
603 	bio_put(bio);
604 	closure_put(cl);
605 }
606 
607 static void bio_complete(struct search *s)
608 {
609 	if (s->orig_bio) {
610 		generic_end_io_acct(bio_data_dir(s->orig_bio),
611 				    &s->d->disk->part0, s->start_time);
612 
613 		trace_bcache_request_end(s->d, s->orig_bio);
614 		s->orig_bio->bi_error = s->iop.error;
615 		bio_endio(s->orig_bio);
616 		s->orig_bio = NULL;
617 	}
618 }
619 
620 static void do_bio_hook(struct search *s, struct bio *orig_bio)
621 {
622 	struct bio *bio = &s->bio.bio;
623 
624 	bio_init(bio, NULL, 0);
625 	__bio_clone_fast(bio, orig_bio);
626 	bio->bi_end_io		= request_endio;
627 	bio->bi_private		= &s->cl;
628 
629 	bio_cnt_set(bio, 3);
630 }
631 
632 static void search_free(struct closure *cl)
633 {
634 	struct search *s = container_of(cl, struct search, cl);
635 	bio_complete(s);
636 
637 	if (s->iop.bio)
638 		bio_put(s->iop.bio);
639 
640 	closure_debug_destroy(cl);
641 	mempool_free(s, s->d->c->search);
642 }
643 
644 static inline struct search *search_alloc(struct bio *bio,
645 					  struct bcache_device *d)
646 {
647 	struct search *s;
648 
649 	s = mempool_alloc(d->c->search, GFP_NOIO);
650 
651 	closure_init(&s->cl, NULL);
652 	do_bio_hook(s, bio);
653 
654 	s->orig_bio		= bio;
655 	s->cache_miss		= NULL;
656 	s->d			= d;
657 	s->recoverable		= 1;
658 	s->write		= op_is_write(bio_op(bio));
659 	s->read_dirty_data	= 0;
660 	s->start_time		= jiffies;
661 
662 	s->iop.c		= d->c;
663 	s->iop.bio		= NULL;
664 	s->iop.inode		= d->id;
665 	s->iop.write_point	= hash_long((unsigned long) current, 16);
666 	s->iop.write_prio	= 0;
667 	s->iop.error		= 0;
668 	s->iop.flags		= 0;
669 	s->iop.flush_journal	= op_is_flush(bio->bi_opf);
670 	s->iop.wq		= bcache_wq;
671 
672 	return s;
673 }
674 
675 /* Cached devices */
676 
677 static void cached_dev_bio_complete(struct closure *cl)
678 {
679 	struct search *s = container_of(cl, struct search, cl);
680 	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
681 
682 	search_free(cl);
683 	cached_dev_put(dc);
684 }
685 
686 /* Process reads */
687 
688 static void cached_dev_cache_miss_done(struct closure *cl)
689 {
690 	struct search *s = container_of(cl, struct search, cl);
691 
692 	if (s->iop.replace_collision)
693 		bch_mark_cache_miss_collision(s->iop.c, s->d);
694 
695 	if (s->iop.bio)
696 		bio_free_pages(s->iop.bio);
697 
698 	cached_dev_bio_complete(cl);
699 }
700 
701 static void cached_dev_read_error(struct closure *cl)
702 {
703 	struct search *s = container_of(cl, struct search, cl);
704 	struct bio *bio = &s->bio.bio;
705 
706 	if (s->recoverable) {
707 		/* Retry from the backing device: */
708 		trace_bcache_read_retry(s->orig_bio);
709 
710 		s->iop.error = 0;
711 		do_bio_hook(s, s->orig_bio);
712 
713 		/* XXX: invalidate cache */
714 
715 		closure_bio_submit(bio, cl);
716 	}
717 
718 	continue_at(cl, cached_dev_cache_miss_done, NULL);
719 }
720 
721 static void cached_dev_read_done(struct closure *cl)
722 {
723 	struct search *s = container_of(cl, struct search, cl);
724 	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
725 
726 	/*
727 	 * We had a cache miss; cache_bio now contains data ready to be inserted
728 	 * into the cache.
729 	 *
730 	 * First, we copy the data we just read from cache_bio's bounce buffers
731 	 * to the buffers the original bio pointed to:
732 	 */
733 
734 	if (s->iop.bio) {
735 		bio_reset(s->iop.bio);
736 		s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector;
737 		s->iop.bio->bi_bdev = s->cache_miss->bi_bdev;
738 		s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
739 		bch_bio_map(s->iop.bio, NULL);
740 
741 		bio_copy_data(s->cache_miss, s->iop.bio);
742 
743 		bio_put(s->cache_miss);
744 		s->cache_miss = NULL;
745 	}
746 
747 	if (verify(dc, &s->bio.bio) && s->recoverable && !s->read_dirty_data)
748 		bch_data_verify(dc, s->orig_bio);
749 
750 	bio_complete(s);
751 
752 	if (s->iop.bio &&
753 	    !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) {
754 		BUG_ON(!s->iop.replace);
755 		closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
756 	}
757 
758 	continue_at(cl, cached_dev_cache_miss_done, NULL);
759 }
760 
761 static void cached_dev_read_done_bh(struct closure *cl)
762 {
763 	struct search *s = container_of(cl, struct search, cl);
764 	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
765 
766 	bch_mark_cache_accounting(s->iop.c, s->d,
767 				  !s->cache_miss, s->iop.bypass);
768 	trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass);
769 
770 	if (s->iop.error)
771 		continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
772 	else if (s->iop.bio || verify(dc, &s->bio.bio))
773 		continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
774 	else
775 		continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
776 }
777 
778 static int cached_dev_cache_miss(struct btree *b, struct search *s,
779 				 struct bio *bio, unsigned sectors)
780 {
781 	int ret = MAP_CONTINUE;
782 	unsigned reada = 0;
783 	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
784 	struct bio *miss, *cache_bio;
785 
786 	if (s->cache_miss || s->iop.bypass) {
787 		miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
788 		ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
789 		goto out_submit;
790 	}
791 
792 	if (!(bio->bi_opf & REQ_RAHEAD) &&
793 	    !(bio->bi_opf & REQ_META) &&
794 	    s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
795 		reada = min_t(sector_t, dc->readahead >> 9,
796 			      bdev_sectors(bio->bi_bdev) - bio_end_sector(bio));
797 
798 	s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
799 
800 	s->iop.replace_key = KEY(s->iop.inode,
801 				 bio->bi_iter.bi_sector + s->insert_bio_sectors,
802 				 s->insert_bio_sectors);
803 
804 	ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
805 	if (ret)
806 		return ret;
807 
808 	s->iop.replace = true;
809 
810 	miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
811 
812 	/* btree_search_recurse()'s btree iterator is no good anymore */
813 	ret = miss == bio ? MAP_DONE : -EINTR;
814 
815 	cache_bio = bio_alloc_bioset(GFP_NOWAIT,
816 			DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
817 			dc->disk.bio_split);
818 	if (!cache_bio)
819 		goto out_submit;
820 
821 	cache_bio->bi_iter.bi_sector	= miss->bi_iter.bi_sector;
822 	cache_bio->bi_bdev		= miss->bi_bdev;
823 	cache_bio->bi_iter.bi_size	= s->insert_bio_sectors << 9;
824 
825 	cache_bio->bi_end_io	= request_endio;
826 	cache_bio->bi_private	= &s->cl;
827 
828 	bch_bio_map(cache_bio, NULL);
829 	if (bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
830 		goto out_put;
831 
832 	if (reada)
833 		bch_mark_cache_readahead(s->iop.c, s->d);
834 
835 	s->cache_miss	= miss;
836 	s->iop.bio	= cache_bio;
837 	bio_get(cache_bio);
838 	closure_bio_submit(cache_bio, &s->cl);
839 
840 	return ret;
841 out_put:
842 	bio_put(cache_bio);
843 out_submit:
844 	miss->bi_end_io		= request_endio;
845 	miss->bi_private	= &s->cl;
846 	closure_bio_submit(miss, &s->cl);
847 	return ret;
848 }
849 
850 static void cached_dev_read(struct cached_dev *dc, struct search *s)
851 {
852 	struct closure *cl = &s->cl;
853 
854 	closure_call(&s->iop.cl, cache_lookup, NULL, cl);
855 	continue_at(cl, cached_dev_read_done_bh, NULL);
856 }
857 
858 /* Process writes */
859 
860 static void cached_dev_write_complete(struct closure *cl)
861 {
862 	struct search *s = container_of(cl, struct search, cl);
863 	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
864 
865 	up_read_non_owner(&dc->writeback_lock);
866 	cached_dev_bio_complete(cl);
867 }
868 
869 static void cached_dev_write(struct cached_dev *dc, struct search *s)
870 {
871 	struct closure *cl = &s->cl;
872 	struct bio *bio = &s->bio.bio;
873 	struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
874 	struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
875 
876 	bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
877 
878 	down_read_non_owner(&dc->writeback_lock);
879 	if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
880 		/*
881 		 * We overlap with some dirty data undergoing background
882 		 * writeback, force this write to writeback
883 		 */
884 		s->iop.bypass = false;
885 		s->iop.writeback = true;
886 	}
887 
888 	/*
889 	 * Discards aren't _required_ to do anything, so skipping if
890 	 * check_overlapping returned true is ok
891 	 *
892 	 * But check_overlapping drops dirty keys for which io hasn't started,
893 	 * so we still want to call it.
894 	 */
895 	if (bio_op(bio) == REQ_OP_DISCARD)
896 		s->iop.bypass = true;
897 
898 	if (should_writeback(dc, s->orig_bio,
899 			     cache_mode(dc, bio),
900 			     s->iop.bypass)) {
901 		s->iop.bypass = false;
902 		s->iop.writeback = true;
903 	}
904 
905 	if (s->iop.bypass) {
906 		s->iop.bio = s->orig_bio;
907 		bio_get(s->iop.bio);
908 
909 		if ((bio_op(bio) != REQ_OP_DISCARD) ||
910 		    blk_queue_discard(bdev_get_queue(dc->bdev)))
911 			closure_bio_submit(bio, cl);
912 	} else if (s->iop.writeback) {
913 		bch_writeback_add(dc);
914 		s->iop.bio = bio;
915 
916 		if (bio->bi_opf & REQ_PREFLUSH) {
917 			/* Also need to send a flush to the backing device */
918 			struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
919 							     dc->disk.bio_split);
920 
921 			flush->bi_bdev	= bio->bi_bdev;
922 			flush->bi_end_io = request_endio;
923 			flush->bi_private = cl;
924 			flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
925 
926 			closure_bio_submit(flush, cl);
927 		}
928 	} else {
929 		s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split);
930 
931 		closure_bio_submit(bio, cl);
932 	}
933 
934 	closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
935 	continue_at(cl, cached_dev_write_complete, NULL);
936 }
937 
938 static void cached_dev_nodata(struct closure *cl)
939 {
940 	struct search *s = container_of(cl, struct search, cl);
941 	struct bio *bio = &s->bio.bio;
942 
943 	if (s->iop.flush_journal)
944 		bch_journal_meta(s->iop.c, cl);
945 
946 	/* If it's a flush, we send the flush to the backing device too */
947 	closure_bio_submit(bio, cl);
948 
949 	continue_at(cl, cached_dev_bio_complete, NULL);
950 }
951 
952 /* Cached devices - read & write stuff */
953 
954 static blk_qc_t cached_dev_make_request(struct request_queue *q,
955 					struct bio *bio)
956 {
957 	struct search *s;
958 	struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
959 	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
960 	int rw = bio_data_dir(bio);
961 
962 	generic_start_io_acct(rw, bio_sectors(bio), &d->disk->part0);
963 
964 	bio->bi_bdev = dc->bdev;
965 	bio->bi_iter.bi_sector += dc->sb.data_offset;
966 
967 	if (cached_dev_get(dc)) {
968 		s = search_alloc(bio, d);
969 		trace_bcache_request_start(s->d, bio);
970 
971 		if (!bio->bi_iter.bi_size) {
972 			/*
973 			 * can't call bch_journal_meta from under
974 			 * generic_make_request
975 			 */
976 			continue_at_nobarrier(&s->cl,
977 					      cached_dev_nodata,
978 					      bcache_wq);
979 		} else {
980 			s->iop.bypass = check_should_bypass(dc, bio);
981 
982 			if (rw)
983 				cached_dev_write(dc, s);
984 			else
985 				cached_dev_read(dc, s);
986 		}
987 	} else {
988 		if ((bio_op(bio) == REQ_OP_DISCARD) &&
989 		    !blk_queue_discard(bdev_get_queue(dc->bdev)))
990 			bio_endio(bio);
991 		else
992 			generic_make_request(bio);
993 	}
994 
995 	return BLK_QC_T_NONE;
996 }
997 
998 static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
999 			    unsigned int cmd, unsigned long arg)
1000 {
1001 	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1002 	return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
1003 }
1004 
1005 static int cached_dev_congested(void *data, int bits)
1006 {
1007 	struct bcache_device *d = data;
1008 	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1009 	struct request_queue *q = bdev_get_queue(dc->bdev);
1010 	int ret = 0;
1011 
1012 	if (bdi_congested(q->backing_dev_info, bits))
1013 		return 1;
1014 
1015 	if (cached_dev_get(dc)) {
1016 		unsigned i;
1017 		struct cache *ca;
1018 
1019 		for_each_cache(ca, d->c, i) {
1020 			q = bdev_get_queue(ca->bdev);
1021 			ret |= bdi_congested(q->backing_dev_info, bits);
1022 		}
1023 
1024 		cached_dev_put(dc);
1025 	}
1026 
1027 	return ret;
1028 }
1029 
1030 void bch_cached_dev_request_init(struct cached_dev *dc)
1031 {
1032 	struct gendisk *g = dc->disk.disk;
1033 
1034 	g->queue->make_request_fn		= cached_dev_make_request;
1035 	g->queue->backing_dev_info->congested_fn = cached_dev_congested;
1036 	dc->disk.cache_miss			= cached_dev_cache_miss;
1037 	dc->disk.ioctl				= cached_dev_ioctl;
1038 }
1039 
1040 /* Flash backed devices */
1041 
1042 static int flash_dev_cache_miss(struct btree *b, struct search *s,
1043 				struct bio *bio, unsigned sectors)
1044 {
1045 	unsigned bytes = min(sectors, bio_sectors(bio)) << 9;
1046 
1047 	swap(bio->bi_iter.bi_size, bytes);
1048 	zero_fill_bio(bio);
1049 	swap(bio->bi_iter.bi_size, bytes);
1050 
1051 	bio_advance(bio, bytes);
1052 
1053 	if (!bio->bi_iter.bi_size)
1054 		return MAP_DONE;
1055 
1056 	return MAP_CONTINUE;
1057 }
1058 
1059 static void flash_dev_nodata(struct closure *cl)
1060 {
1061 	struct search *s = container_of(cl, struct search, cl);
1062 
1063 	if (s->iop.flush_journal)
1064 		bch_journal_meta(s->iop.c, cl);
1065 
1066 	continue_at(cl, search_free, NULL);
1067 }
1068 
1069 static blk_qc_t flash_dev_make_request(struct request_queue *q,
1070 					     struct bio *bio)
1071 {
1072 	struct search *s;
1073 	struct closure *cl;
1074 	struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
1075 	int rw = bio_data_dir(bio);
1076 
1077 	generic_start_io_acct(rw, bio_sectors(bio), &d->disk->part0);
1078 
1079 	s = search_alloc(bio, d);
1080 	cl = &s->cl;
1081 	bio = &s->bio.bio;
1082 
1083 	trace_bcache_request_start(s->d, bio);
1084 
1085 	if (!bio->bi_iter.bi_size) {
1086 		/*
1087 		 * can't call bch_journal_meta from under
1088 		 * generic_make_request
1089 		 */
1090 		continue_at_nobarrier(&s->cl,
1091 				      flash_dev_nodata,
1092 				      bcache_wq);
1093 		return BLK_QC_T_NONE;
1094 	} else if (rw) {
1095 		bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
1096 					&KEY(d->id, bio->bi_iter.bi_sector, 0),
1097 					&KEY(d->id, bio_end_sector(bio), 0));
1098 
1099 		s->iop.bypass		= (bio_op(bio) == REQ_OP_DISCARD) != 0;
1100 		s->iop.writeback	= true;
1101 		s->iop.bio		= bio;
1102 
1103 		closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1104 	} else {
1105 		closure_call(&s->iop.cl, cache_lookup, NULL, cl);
1106 	}
1107 
1108 	continue_at(cl, search_free, NULL);
1109 	return BLK_QC_T_NONE;
1110 }
1111 
1112 static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
1113 			   unsigned int cmd, unsigned long arg)
1114 {
1115 	return -ENOTTY;
1116 }
1117 
1118 static int flash_dev_congested(void *data, int bits)
1119 {
1120 	struct bcache_device *d = data;
1121 	struct request_queue *q;
1122 	struct cache *ca;
1123 	unsigned i;
1124 	int ret = 0;
1125 
1126 	for_each_cache(ca, d->c, i) {
1127 		q = bdev_get_queue(ca->bdev);
1128 		ret |= bdi_congested(q->backing_dev_info, bits);
1129 	}
1130 
1131 	return ret;
1132 }
1133 
1134 void bch_flash_dev_request_init(struct bcache_device *d)
1135 {
1136 	struct gendisk *g = d->disk;
1137 
1138 	g->queue->make_request_fn		= flash_dev_make_request;
1139 	g->queue->backing_dev_info->congested_fn = flash_dev_congested;
1140 	d->cache_miss				= flash_dev_cache_miss;
1141 	d->ioctl				= flash_dev_ioctl;
1142 }
1143 
1144 void bch_request_exit(void)
1145 {
1146 	if (bch_search_cache)
1147 		kmem_cache_destroy(bch_search_cache);
1148 }
1149 
1150 int __init bch_request_init(void)
1151 {
1152 	bch_search_cache = KMEM_CACHE(search, 0);
1153 	if (!bch_search_cache)
1154 		return -ENOMEM;
1155 
1156 	return 0;
1157 }
1158