Lines Matching refs:bio
40 static void bio_csum(struct bio *bio, struct bkey *k) in bio_csum() argument
46 bio_for_each_segment(bv, bio, iter) { in bio_csum()
111 struct bio *bio = op->bio; in bch_data_invalidate() local
114 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector); in bch_data_invalidate()
116 while (bio_sectors(bio)) { in bch_data_invalidate()
117 unsigned int sectors = min(bio_sectors(bio), in bch_data_invalidate()
123 bio->bi_iter.bi_sector += sectors; in bch_data_invalidate()
124 bio->bi_iter.bi_size -= sectors << 9; in bch_data_invalidate()
128 bio->bi_iter.bi_sector, in bch_data_invalidate()
134 bio_put(bio); in bch_data_invalidate()
169 static void bch_data_insert_endio(struct bio *bio) in bch_data_insert_endio() argument
171 struct closure *cl = bio->bi_private; in bch_data_insert_endio()
174 if (bio->bi_status) { in bch_data_insert_endio()
177 op->status = bio->bi_status; in bch_data_insert_endio()
184 bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache"); in bch_data_insert_endio()
190 struct bio *bio = op->bio, *n; in CLOSURE_CALLBACK() local
195 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) in CLOSURE_CALLBACK()
202 bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA); in CLOSURE_CALLBACK()
220 SET_KEY_OFFSET(k, bio->bi_iter.bi_sector); in CLOSURE_CALLBACK()
222 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio), in CLOSURE_CALLBACK()
227 n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split); in CLOSURE_CALLBACK()
249 } while (n != bio); in CLOSURE_CALLBACK()
279 bio_put(bio); in CLOSURE_CALLBACK()
312 trace_bcache_write(op->c, op->inode, op->bio, in CLOSURE_CALLBACK()
316 bio_get(op->bio); in CLOSURE_CALLBACK()
363 static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) in check_should_bypass() argument
372 (bio_op(bio) == REQ_OP_DISCARD)) in check_should_bypass()
392 op_is_write(bio_op(bio)))) in check_should_bypass()
405 if ((bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND))) { in check_should_bypass()
406 if (!(bio->bi_opf & (REQ_META|REQ_PRIO)) && in check_should_bypass()
411 if (bio->bi_iter.bi_sector & (c->cache->sb.block_size - 1) || in check_should_bypass()
412 bio_sectors(bio) & (c->cache->sb.block_size - 1)) { in check_should_bypass()
430 hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash) in check_should_bypass()
431 if (i->last == bio->bi_iter.bi_sector && in check_should_bypass()
440 if (i->sequential + bio->bi_iter.bi_size > i->sequential) in check_should_bypass()
441 i->sequential += bio->bi_iter.bi_size; in check_should_bypass()
443 i->last = bio_end_sector(bio); in check_should_bypass()
458 trace_bcache_bypass_sequential(bio); in check_should_bypass()
463 trace_bcache_bypass_congested(bio); in check_should_bypass()
468 bch_rescale_priorities(c, bio_sectors(bio)); in check_should_bypass()
471 bch_mark_sectors_bypassed(c, dc, bio_sectors(bio)); in check_should_bypass()
481 struct bbio bio; member
482 struct bio *orig_bio;
483 struct bio *cache_miss;
499 static void bch_cache_read_endio(struct bio *bio) in bch_cache_read_endio() argument
501 struct bbio *b = container_of(bio, struct bbio, bio); in bch_cache_read_endio()
502 struct closure *cl = bio->bi_private; in bch_cache_read_endio()
512 if (bio->bi_status) in bch_cache_read_endio()
513 s->iop.status = bio->bi_status; in bch_cache_read_endio()
520 bch_bbio_endio(s->iop.c, bio, bio->bi_status, "reading from cache"); in bch_cache_read_endio()
530 struct bio *n, *bio = &s->bio.bio; in cache_lookup_fn() local
534 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0) in cache_lookup_fn()
538 KEY_START(k) > bio->bi_iter.bi_sector) { in cache_lookup_fn()
539 unsigned int bio_sectors = bio_sectors(bio); in cache_lookup_fn()
542 KEY_START(k) - bio->bi_iter.bi_sector) in cache_lookup_fn()
544 int ret = s->d->cache_miss(b, s, bio, sectors); in cache_lookup_fn()
564 n = bio_next_split(bio, min_t(uint64_t, INT_MAX, in cache_lookup_fn()
565 KEY_OFFSET(k) - bio->bi_iter.bi_sector), in cache_lookup_fn()
568 bio_key = &container_of(n, struct bbio, bio)->key; in cache_lookup_fn()
589 return n == bio ? MAP_DONE : MAP_CONTINUE; in cache_lookup_fn()
595 struct bio *bio = &s->bio.bio; in CLOSURE_CALLBACK() local
602 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0), in CLOSURE_CALLBACK()
635 static void request_endio(struct bio *bio) in request_endio() argument
637 struct closure *cl = bio->bi_private; in request_endio()
639 if (bio->bi_status) { in request_endio()
642 s->iop.status = bio->bi_status; in request_endio()
647 bio_put(bio); in request_endio()
651 static void backing_request_endio(struct bio *bio) in backing_request_endio() argument
653 struct closure *cl = bio->bi_private; in backing_request_endio()
655 if (bio->bi_status) { in backing_request_endio()
667 bio->bi_opf & REQ_PREFLUSH)) { in backing_request_endio()
669 dc->bdev, bio->bi_status); in backing_request_endio()
672 s->iop.status = bio->bi_status; in backing_request_endio()
676 bch_count_backing_io_errors(dc, bio); in backing_request_endio()
679 bio_put(bio); in backing_request_endio()
697 struct bio *orig_bio, in do_bio_hook()
700 struct bio *bio = &s->bio.bio; in do_bio_hook() local
702 bio_init_clone(orig_bio->bi_bdev, bio, orig_bio, GFP_NOIO); in do_bio_hook()
709 bio->bi_end_io = end_io_fn; in do_bio_hook()
710 bio->bi_private = &s->cl; in do_bio_hook()
712 bio_cnt_set(bio, 3); in do_bio_hook()
721 if (s->iop.bio) in CLOSURE_CALLBACK()
722 bio_put(s->iop.bio); in CLOSURE_CALLBACK()
729 static inline struct search *search_alloc(struct bio *bio, in search_alloc() argument
738 do_bio_hook(s, bio, request_endio); in search_alloc()
741 s->orig_bio = bio; in search_alloc()
746 s->write = op_is_write(bio_op(bio)); in search_alloc()
752 s->iop.bio = NULL; in search_alloc()
758 s->iop.flush_journal = op_is_flush(bio->bi_opf); in search_alloc()
784 if (s->iop.bio) in CLOSURE_CALLBACK()
785 bio_free_pages(s->iop.bio); in CLOSURE_CALLBACK()
793 struct bio *bio = &s->bio.bio; in CLOSURE_CALLBACK() local
812 closure_bio_submit(s->iop.c, bio, cl); in CLOSURE_CALLBACK()
826 if (s->iop.bio) in CLOSURE_CALLBACK()
827 bio_free_pages(s->iop.bio); in CLOSURE_CALLBACK()
846 if (s->iop.bio) { in CLOSURE_CALLBACK()
847 bio_reset(s->iop.bio, s->cache_miss->bi_bdev, REQ_OP_READ); in CLOSURE_CALLBACK()
848 s->iop.bio->bi_iter.bi_sector = in CLOSURE_CALLBACK()
850 s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9; in CLOSURE_CALLBACK()
851 bio_clone_blkg_association(s->iop.bio, s->cache_miss); in CLOSURE_CALLBACK()
852 bch_bio_map(s->iop.bio, NULL); in CLOSURE_CALLBACK()
854 bio_copy_data(s->cache_miss, s->iop.bio); in CLOSURE_CALLBACK()
866 if (s->iop.bio && in CLOSURE_CALLBACK()
886 else if (s->iop.bio || verify(dc)) in CLOSURE_CALLBACK()
893 struct bio *bio, unsigned int sectors) in cached_dev_cache_miss() argument
897 struct bio *miss, *cache_bio; in cached_dev_cache_miss()
903 miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split); in cached_dev_cache_miss()
904 ret = miss == bio ? MAP_DONE : MAP_CONTINUE; in cached_dev_cache_miss()
911 s->insert_bio_sectors = min3(size_limit, sectors, bio_sectors(bio)); in cached_dev_cache_miss()
914 bio->bi_iter.bi_sector + s->insert_bio_sectors, in cached_dev_cache_miss()
923 miss = bio_next_split(bio, s->insert_bio_sectors, GFP_NOIO, in cached_dev_cache_miss()
927 ret = miss == bio ? MAP_DONE : -EINTR; in cached_dev_cache_miss()
946 s->iop.bio = cache_bio; in cached_dev_cache_miss()
984 struct bio *bio = &s->bio.bio; in cached_dev_write() local
985 struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0); in cached_dev_write()
986 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0); in cached_dev_write()
1007 if (bio_op(bio) == REQ_OP_DISCARD) in cached_dev_write()
1018 s->iop.bio = s->orig_bio; in cached_dev_write()
1019 bio_get(s->iop.bio); in cached_dev_write()
1021 if (bio_op(bio) == REQ_OP_DISCARD && in cached_dev_write()
1026 bio->bi_end_io = backing_request_endio; in cached_dev_write()
1027 closure_bio_submit(s->iop.c, bio, cl); in cached_dev_write()
1031 s->iop.bio = bio; in cached_dev_write()
1033 if (bio->bi_opf & REQ_PREFLUSH) { in cached_dev_write()
1038 struct bio *flush; in cached_dev_write()
1040 flush = bio_alloc_bioset(bio->bi_bdev, 0, in cached_dev_write()
1053 s->iop.bio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO, in cached_dev_write()
1056 bio->bi_end_io = backing_request_endio; in cached_dev_write()
1057 closure_bio_submit(s->iop.c, bio, cl); in cached_dev_write()
1068 struct bio *bio = &s->bio.bio; in CLOSURE_CALLBACK() local
1074 bio->bi_end_io = backing_request_endio; in CLOSURE_CALLBACK()
1075 closure_bio_submit(s->iop.c, bio, cl); in CLOSURE_CALLBACK()
1088 static void detached_dev_end_io(struct bio *bio) in detached_dev_end_io() argument
1092 ddip = bio->bi_private; in detached_dev_end_io()
1093 bio->bi_end_io = ddip->bi_end_io; in detached_dev_end_io()
1094 bio->bi_private = ddip->bi_private; in detached_dev_end_io()
1097 bio_end_io_acct_remapped(bio, ddip->start_time, ddip->orig_bdev); in detached_dev_end_io()
1099 if (bio->bi_status) { in detached_dev_end_io()
1103 bch_count_backing_io_errors(dc, bio); in detached_dev_end_io()
1107 bio_endio(bio); in detached_dev_end_io()
1110 static void detached_dev_do_request(struct bcache_device *d, struct bio *bio, in detached_dev_do_request() argument
1123 bio->bi_status = BLK_STS_RESOURCE; in detached_dev_do_request()
1124 bio_endio(bio); in detached_dev_do_request()
1132 ddip->bi_end_io = bio->bi_end_io; in detached_dev_do_request()
1133 ddip->bi_private = bio->bi_private; in detached_dev_do_request()
1134 bio->bi_end_io = detached_dev_end_io; in detached_dev_do_request()
1135 bio->bi_private = ddip; in detached_dev_do_request()
1137 if ((bio_op(bio) == REQ_OP_DISCARD) && in detached_dev_do_request()
1139 detached_dev_end_io(bio); in detached_dev_do_request()
1141 submit_bio_noacct(bio); in detached_dev_do_request()
1184 void cached_dev_submit_bio(struct bio *bio) in cached_dev_submit_bio() argument
1187 struct block_device *orig_bdev = bio->bi_bdev; in cached_dev_submit_bio()
1191 int rw = bio_data_dir(bio); in cached_dev_submit_bio()
1195 bio->bi_status = BLK_STS_IOERR; in cached_dev_submit_bio()
1196 bio_endio(bio); in cached_dev_submit_bio()
1215 start_time = bio_start_io_acct(bio); in cached_dev_submit_bio()
1217 bio_set_dev(bio, dc->bdev); in cached_dev_submit_bio()
1218 bio->bi_iter.bi_sector += dc->sb.data_offset; in cached_dev_submit_bio()
1221 s = search_alloc(bio, d, orig_bdev, start_time); in cached_dev_submit_bio()
1222 trace_bcache_request_start(s->d, bio); in cached_dev_submit_bio()
1224 if (!bio->bi_iter.bi_size) { in cached_dev_submit_bio()
1233 s->iop.bypass = check_should_bypass(dc, bio); in cached_dev_submit_bio()
1242 detached_dev_do_request(d, bio, orig_bdev, start_time); in cached_dev_submit_bio()
1266 struct bio *bio, unsigned int sectors) in flash_dev_cache_miss() argument
1268 unsigned int bytes = min(sectors, bio_sectors(bio)) << 9; in flash_dev_cache_miss()
1270 swap(bio->bi_iter.bi_size, bytes); in flash_dev_cache_miss()
1271 zero_fill_bio(bio); in flash_dev_cache_miss()
1272 swap(bio->bi_iter.bi_size, bytes); in flash_dev_cache_miss()
1274 bio_advance(bio, bytes); in flash_dev_cache_miss()
1276 if (!bio->bi_iter.bi_size) in flash_dev_cache_miss()
1292 void flash_dev_submit_bio(struct bio *bio) in flash_dev_submit_bio() argument
1296 struct bcache_device *d = bio->bi_bdev->bd_disk->private_data; in flash_dev_submit_bio()
1299 bio->bi_status = BLK_STS_IOERR; in flash_dev_submit_bio()
1300 bio_endio(bio); in flash_dev_submit_bio()
1304 s = search_alloc(bio, d, bio->bi_bdev, bio_start_io_acct(bio)); in flash_dev_submit_bio()
1306 bio = &s->bio.bio; in flash_dev_submit_bio()
1308 trace_bcache_request_start(s->d, bio); in flash_dev_submit_bio()
1310 if (!bio->bi_iter.bi_size) { in flash_dev_submit_bio()
1318 } else if (bio_data_dir(bio)) { in flash_dev_submit_bio()
1320 &KEY(d->id, bio->bi_iter.bi_sector, 0), in flash_dev_submit_bio()
1321 &KEY(d->id, bio_end_sector(bio), 0)); in flash_dev_submit_bio()
1323 s->iop.bypass = (bio_op(bio) == REQ_OP_DISCARD) != 0; in flash_dev_submit_bio()
1325 s->iop.bio = bio; in flash_dev_submit_bio()