Lines Matching refs:rbio
152 static void promote_start(struct promote_op *op, struct bch_read_bio *rbio) in promote_start() argument
156 trace_and_count(op->write.op.c, read_promote, &rbio->bio); in promote_start()
159 BUG_ON(!rbio->bounce); in promote_start()
160 BUG_ON(rbio->bio.bi_vcnt > bio->bi_max_vecs); in promote_start()
162 memcpy(bio->bi_io_vec, rbio->bio.bi_io_vec, in promote_start()
163 sizeof(struct bio_vec) * rbio->bio.bi_vcnt); in promote_start()
164 swap(bio->bi_vcnt, rbio->bio.bi_vcnt); in promote_start()
166 bch2_data_update_read_done(&op->write, rbio->pick.crc); in promote_start()
176 struct bch_read_bio **rbio, in __promote_alloc() argument
201 *rbio = kzalloc(sizeof(struct bch_read_bio) + in __promote_alloc()
204 if (!*rbio) { in __promote_alloc()
209 rbio_init(&(*rbio)->bio, opts); in __promote_alloc()
210 bio_init(&(*rbio)->bio, NULL, (*rbio)->bio.bi_inline_vecs, pages, 0); in __promote_alloc()
212 if (bch2_bio_alloc_pages(&(*rbio)->bio, sectors << 9, GFP_KERNEL)) { in __promote_alloc()
217 (*rbio)->bounce = true; in __promote_alloc()
218 (*rbio)->split = true; in __promote_alloc()
219 (*rbio)->kmalloc = true; in __promote_alloc()
267 if (*rbio) in __promote_alloc()
268 bio_free_pages(&(*rbio)->bio); in __promote_alloc()
269 kfree(*rbio); in __promote_alloc()
270 *rbio = NULL; in __promote_alloc()
284 struct bch_read_bio **rbio, in promote_alloc() argument
315 k, pos, pick, opts, sectors, rbio, failed); in promote_alloc()
331 struct bch_read_bio *rbio, struct bpos read_pos) in bch2_read_err_msg_trans() argument
334 (subvol_inum) { rbio->subvol, read_pos.inode }, in bch2_read_err_msg_trans()
339 struct bch_read_bio *rbio, struct bpos read_pos) in bch2_read_err_msg() argument
341 bch2_trans_run(c, bch2_read_err_msg_trans(trans, out, rbio, read_pos)); in bch2_read_err_msg()
355 bch2_rbio_parent(struct bch_read_bio *rbio) in bch2_rbio_parent() argument
357 return rbio->split ? rbio->parent : rbio; in bch2_rbio_parent()
361 static void bch2_rbio_punt(struct bch_read_bio *rbio, work_func_t fn, in bch2_rbio_punt() argument
365 if (context <= rbio->context) { in bch2_rbio_punt()
366 fn(&rbio->work); in bch2_rbio_punt()
368 rbio->work.func = fn; in bch2_rbio_punt()
369 rbio->context = context; in bch2_rbio_punt()
370 queue_work(wq, &rbio->work); in bch2_rbio_punt()
374 static inline struct bch_read_bio *bch2_rbio_free(struct bch_read_bio *rbio) in bch2_rbio_free() argument
376 BUG_ON(rbio->bounce && !rbio->split); in bch2_rbio_free()
378 if (rbio->promote) in bch2_rbio_free()
379 promote_free(rbio->c, rbio->promote); in bch2_rbio_free()
380 rbio->promote = NULL; in bch2_rbio_free()
382 if (rbio->bounce) in bch2_rbio_free()
383 bch2_bio_free_pages_pool(rbio->c, &rbio->bio); in bch2_rbio_free()
385 if (rbio->split) { in bch2_rbio_free()
386 struct bch_read_bio *parent = rbio->parent; in bch2_rbio_free()
388 if (rbio->kmalloc) in bch2_rbio_free()
389 kfree(rbio); in bch2_rbio_free()
391 bio_put(&rbio->bio); in bch2_rbio_free()
393 rbio = parent; in bch2_rbio_free()
396 return rbio; in bch2_rbio_free()
403 static void bch2_rbio_done(struct bch_read_bio *rbio) in bch2_rbio_done() argument
405 if (rbio->start_time) in bch2_rbio_done()
406 bch2_time_stats_update(&rbio->c->times[BCH_TIME_data_read], in bch2_rbio_done()
407 rbio->start_time); in bch2_rbio_done()
408 bio_endio(&rbio->bio); in bch2_rbio_done()
411 static void bch2_read_retry_nodecode(struct bch_fs *c, struct bch_read_bio *rbio, in bch2_read_retry_nodecode() argument
427 bch2_trans_iter_init(trans, &iter, rbio->data_btree, in bch2_read_retry_nodecode()
428 rbio->read_pos, BTREE_ITER_slots); in bch2_read_retry_nodecode()
431 rbio->bio.bi_status = 0; in bch2_read_retry_nodecode()
441 rbio->pick.ptr, in bch2_read_retry_nodecode()
442 rbio->data_pos.offset - in bch2_read_retry_nodecode()
443 rbio->pick.crc.offset)) { in bch2_read_retry_nodecode()
445 rbio->hole = true; in bch2_read_retry_nodecode()
449 ret = __bch2_read_extent(trans, rbio, bvec_iter, in bch2_read_retry_nodecode()
450 rbio->read_pos, in bch2_read_retry_nodecode()
451 rbio->data_btree, in bch2_read_retry_nodecode()
458 bch2_rbio_done(rbio); in bch2_read_retry_nodecode()
464 rbio->bio.bi_status = BLK_STS_IOERR; in bch2_read_retry_nodecode()
470 struct bch_read_bio *rbio = in bch2_rbio_retry() local
472 struct bch_fs *c = rbio->c; in bch2_rbio_retry()
473 struct bvec_iter iter = rbio->bvec_iter; in bch2_rbio_retry()
474 unsigned flags = rbio->flags; in bch2_rbio_retry()
476 .subvol = rbio->subvol, in bch2_rbio_retry()
477 .inum = rbio->read_pos.inode, in bch2_rbio_retry()
481 trace_and_count(c, read_retry, &rbio->bio); in bch2_rbio_retry()
483 if (rbio->retry == READ_RETRY_AVOID) in bch2_rbio_retry()
484 bch2_mark_io_failure(&failed, &rbio->pick); in bch2_rbio_retry()
486 rbio->bio.bi_status = 0; in bch2_rbio_retry()
488 rbio = bch2_rbio_free(rbio); in bch2_rbio_retry()
494 bch2_read_retry_nodecode(c, rbio, iter, &failed, flags); in bch2_rbio_retry()
499 __bch2_read(c, rbio, iter, inum, &failed, flags); in bch2_rbio_retry()
503 static void bch2_rbio_error(struct bch_read_bio *rbio, int retry, in bch2_rbio_error() argument
506 rbio->retry = retry; in bch2_rbio_error()
508 if (rbio->flags & BCH_READ_IN_RETRY) in bch2_rbio_error()
512 rbio = bch2_rbio_free(rbio); in bch2_rbio_error()
514 rbio->bio.bi_status = error; in bch2_rbio_error()
515 bch2_rbio_done(rbio); in bch2_rbio_error()
517 bch2_rbio_punt(rbio, bch2_rbio_retry, in bch2_rbio_error()
524 struct bch_read_bio *rbio = in bch2_read_io_err() local
526 struct bio *bio = &rbio->bio; in bch2_read_io_err()
527 struct bch_fs *c = rbio->c; in bch2_read_io_err()
528 struct bch_dev *ca = rbio->have_ioref ? bch2_dev_have_ref(c, rbio->pick.ptr.dev) : NULL; in bch2_read_io_err()
531 bch2_read_err_msg(c, &buf, rbio, rbio->read_pos); in bch2_read_io_err()
542 bch2_rbio_error(rbio, READ_RETRY_AVOID, bio->bi_status); in bch2_read_io_err()
546 struct bch_read_bio *rbio) in __bch2_rbio_narrow_crcs() argument
548 struct bch_fs *c = rbio->c; in __bch2_rbio_narrow_crcs()
549 u64 data_offset = rbio->data_pos.offset - rbio->pick.crc.offset; in __bch2_rbio_narrow_crcs()
556 if (crc_is_compressed(rbio->pick.crc)) in __bch2_rbio_narrow_crcs()
559 k = bch2_bkey_get_iter(trans, &iter, rbio->data_btree, rbio->data_pos, in __bch2_rbio_narrow_crcs()
564 if (bversion_cmp(k.k->bversion, rbio->version) || in __bch2_rbio_narrow_crcs()
565 !bch2_bkey_matches_ptr(c, k, rbio->pick.ptr, data_offset)) in __bch2_rbio_narrow_crcs()
570 k.k->p.offset > data_offset + rbio->pick.crc.uncompressed_size) in __bch2_rbio_narrow_crcs()
573 if (bch2_rechecksum_bio(c, &rbio->bio, rbio->version, in __bch2_rbio_narrow_crcs()
574 rbio->pick.crc, NULL, &new_crc, in __bch2_rbio_narrow_crcs()
576 rbio->pick.crc.csum_type)) { in __bch2_rbio_narrow_crcs()
602 static noinline void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio) in bch2_rbio_narrow_crcs() argument
604 bch2_trans_commit_do(rbio->c, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, in bch2_rbio_narrow_crcs()
605 __bch2_rbio_narrow_crcs(trans, rbio)); in bch2_rbio_narrow_crcs()
610 struct bch_read_bio *rbio = in bch2_read_csum_err() local
612 struct bch_fs *c = rbio->c; in bch2_read_csum_err()
613 struct bio *src = &rbio->bio; in bch2_read_csum_err()
614 struct bch_extent_crc_unpacked crc = rbio->pick.crc; in bch2_read_csum_err()
615 struct nonce nonce = extent_nonce(rbio->version, crc); in bch2_read_csum_err()
619 bch2_read_err_msg(c, &buf, rbio, rbio->read_pos); in bch2_read_csum_err()
621 bch2_csum_err_msg(&buf, crc.csum_type, rbio->pick.crc.csum, csum); in bch2_read_csum_err()
623 struct bch_dev *ca = rbio->have_ioref ? bch2_dev_have_ref(c, rbio->pick.ptr.dev) : NULL; in bch2_read_csum_err()
631 bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR); in bch2_read_csum_err()
637 struct bch_read_bio *rbio = in bch2_read_decompress_err() local
639 struct bch_fs *c = rbio->c; in bch2_read_decompress_err()
642 bch2_read_err_msg(c, &buf, rbio, rbio->read_pos); in bch2_read_decompress_err()
645 struct bch_dev *ca = rbio->have_ioref ? bch2_dev_have_ref(c, rbio->pick.ptr.dev) : NULL; in bch2_read_decompress_err()
651 bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR); in bch2_read_decompress_err()
657 struct bch_read_bio *rbio = in bch2_read_decrypt_err() local
659 struct bch_fs *c = rbio->c; in bch2_read_decrypt_err()
662 bch2_read_err_msg(c, &buf, rbio, rbio->read_pos); in bch2_read_decrypt_err()
665 struct bch_dev *ca = rbio->have_ioref ? bch2_dev_have_ref(c, rbio->pick.ptr.dev) : NULL; in bch2_read_decrypt_err()
671 bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR); in bch2_read_decrypt_err()
678 struct bch_read_bio *rbio = in __bch2_read_endio() local
680 struct bch_fs *c = rbio->c; in __bch2_read_endio()
681 struct bio *src = &rbio->bio; in __bch2_read_endio()
682 struct bio *dst = &bch2_rbio_parent(rbio)->bio; in __bch2_read_endio()
683 struct bvec_iter dst_iter = rbio->bvec_iter; in __bch2_read_endio()
684 struct bch_extent_crc_unpacked crc = rbio->pick.crc; in __bch2_read_endio()
685 struct nonce nonce = extent_nonce(rbio->version, crc); in __bch2_read_endio()
693 if (rbio->bounce) { in __bch2_read_endio()
698 src->bi_iter = rbio->bvec_iter; in __bch2_read_endio()
702 if (bch2_crc_cmp(csum, rbio->pick.crc.csum) && !c->opts.no_data_io) in __bch2_read_endio()
712 if (unlikely(rbio->narrow_crcs)) in __bch2_read_endio()
713 bch2_rbio_narrow_crcs(rbio); in __bch2_read_endio()
715 if (rbio->flags & BCH_READ_NODECODE) in __bch2_read_endio()
719 crc.offset += rbio->offset_into_extent; in __bch2_read_endio()
720 crc.live_size = bvec_iter_sectors(rbio->bvec_iter); in __bch2_read_endio()
742 if (rbio->bounce) { in __bch2_read_endio()
749 if (rbio->promote) { in __bch2_read_endio()
758 promote_start(rbio->promote, rbio); in __bch2_read_endio()
759 rbio->promote = NULL; in __bch2_read_endio()
762 if (likely(!(rbio->flags & BCH_READ_IN_RETRY))) { in __bch2_read_endio()
763 rbio = bch2_rbio_free(rbio); in __bch2_read_endio()
764 bch2_rbio_done(rbio); in __bch2_read_endio()
775 if (!rbio->bounce && (rbio->flags & BCH_READ_USER_MAPPED)) { in __bch2_read_endio()
776 rbio->flags |= BCH_READ_MUST_BOUNCE; in __bch2_read_endio()
777 bch2_rbio_error(rbio, READ_RETRY, BLK_STS_IOERR); in __bch2_read_endio()
781 bch2_rbio_punt(rbio, bch2_read_csum_err, RBIO_CONTEXT_UNBOUND, system_unbound_wq); in __bch2_read_endio()
784 bch2_rbio_punt(rbio, bch2_read_decompress_err, RBIO_CONTEXT_UNBOUND, system_unbound_wq); in __bch2_read_endio()
787 bch2_rbio_punt(rbio, bch2_read_decrypt_err, RBIO_CONTEXT_UNBOUND, system_unbound_wq); in __bch2_read_endio()
793 struct bch_read_bio *rbio = in bch2_read_endio() local
795 struct bch_fs *c = rbio->c; in bch2_read_endio()
796 struct bch_dev *ca = rbio->have_ioref ? bch2_dev_have_ref(c, rbio->pick.ptr.dev) : NULL; in bch2_read_endio()
800 if (rbio->have_ioref) { in bch2_read_endio()
801 bch2_latency_acct(ca, rbio->submit_time, READ); in bch2_read_endio()
805 if (!rbio->split) in bch2_read_endio()
806 rbio->bio.bi_end_io = rbio->end_io; in bch2_read_endio()
809 bch2_rbio_punt(rbio, bch2_read_io_err, RBIO_CONTEXT_UNBOUND, system_unbound_wq); in bch2_read_endio()
813 if (((rbio->flags & BCH_READ_RETRY_IF_STALE) && race_fault()) || in bch2_read_endio()
814 (ca && dev_ptr_stale(ca, &rbio->pick.ptr))) { in bch2_read_endio()
815 trace_and_count(c, read_reuse_race, &rbio->bio); in bch2_read_endio()
817 if (rbio->flags & BCH_READ_RETRY_IF_STALE) in bch2_read_endio()
818 bch2_rbio_error(rbio, READ_RETRY, BLK_STS_AGAIN); in bch2_read_endio()
820 bch2_rbio_error(rbio, READ_ERR, BLK_STS_AGAIN); in bch2_read_endio()
824 if (rbio->narrow_crcs || in bch2_read_endio()
825 rbio->promote || in bch2_read_endio()
826 crc_is_compressed(rbio->pick.crc) || in bch2_read_endio()
827 bch2_csum_type_is_encryption(rbio->pick.crc.csum_type)) in bch2_read_endio()
829 else if (rbio->pick.crc.csum_type) in bch2_read_endio()
832 bch2_rbio_punt(rbio, __bch2_read_endio, context, wq); in bch2_read_endio()
890 struct bch_read_bio *rbio = NULL; in __bch2_read_extent() local
993 &rbio, &bounce, &read_full, failed); in __bch2_read_extent()
1013 if (rbio) { in __bch2_read_extent()
1020 EBUG_ON(rbio->bio.bi_iter.bi_size < in __bch2_read_extent()
1022 rbio->bio.bi_iter.bi_size = in __bch2_read_extent()
1027 rbio = rbio_init(bio_alloc_bioset(NULL, in __bch2_read_extent()
1034 bch2_bio_alloc_pages_pool(c, &rbio->bio, sectors << 9); in __bch2_read_extent()
1035 rbio->bounce = true; in __bch2_read_extent()
1036 rbio->split = true; in __bch2_read_extent()
1046 rbio = rbio_init(bio_alloc_clone(NULL, &orig->bio, GFP_NOFS, in __bch2_read_extent()
1049 rbio->bio.bi_iter = iter; in __bch2_read_extent()
1050 rbio->split = true; in __bch2_read_extent()
1052 rbio = orig; in __bch2_read_extent()
1053 rbio->bio.bi_iter = iter; in __bch2_read_extent()
1054 EBUG_ON(bio_flagged(&rbio->bio, BIO_CHAIN)); in __bch2_read_extent()
1057 EBUG_ON(bio_sectors(&rbio->bio) != pick.crc.compressed_size); in __bch2_read_extent()
1059 rbio->c = c; in __bch2_read_extent()
1060 rbio->submit_time = local_clock(); in __bch2_read_extent()
1061 if (rbio->split) in __bch2_read_extent()
1062 rbio->parent = orig; in __bch2_read_extent()
1064 rbio->end_io = orig->bio.bi_end_io; in __bch2_read_extent()
1065 rbio->bvec_iter = iter; in __bch2_read_extent()
1066 rbio->offset_into_extent= offset_into_extent; in __bch2_read_extent()
1067 rbio->flags = flags; in __bch2_read_extent()
1068 rbio->have_ioref = ca != NULL; in __bch2_read_extent()
1069 rbio->narrow_crcs = narrow_crcs; in __bch2_read_extent()
1070 rbio->hole = 0; in __bch2_read_extent()
1071 rbio->retry = 0; in __bch2_read_extent()
1072 rbio->context = 0; in __bch2_read_extent()
1074 rbio->devs_have = bch2_bkey_devs(k); in __bch2_read_extent()
1075 rbio->pick = pick; in __bch2_read_extent()
1076 rbio->subvol = orig->subvol; in __bch2_read_extent()
1077 rbio->read_pos = read_pos; in __bch2_read_extent()
1078 rbio->data_btree = data_btree; in __bch2_read_extent()
1079 rbio->data_pos = data_pos; in __bch2_read_extent()
1080 rbio->version = k.k->bversion; in __bch2_read_extent()
1081 rbio->promote = promote; in __bch2_read_extent()
1082 INIT_WORK(&rbio->work, NULL); in __bch2_read_extent()
1087 rbio->bio.bi_opf = orig->bio.bi_opf; in __bch2_read_extent()
1088 rbio->bio.bi_iter.bi_sector = pick.ptr.offset; in __bch2_read_extent()
1089 rbio->bio.bi_end_io = bch2_read_endio; in __bch2_read_extent()
1091 if (rbio->bounce) in __bch2_read_extent()
1092 trace_and_count(c, read_bounce, &rbio->bio); in __bch2_read_extent()
1094 this_cpu_add(c->counters[BCH_COUNTER_io_read], bio_sectors(&rbio->bio)); in __bch2_read_extent()
1095 bch2_increment_clock(c, bio_sectors(&rbio->bio), READ); in __bch2_read_extent()
1119 if (!rbio->pick.idx) { in __bch2_read_extent()
1120 if (unlikely(!rbio->have_ioref)) { in __bch2_read_extent()
1122 bch2_read_err_msg_trans(trans, &buf, rbio, read_pos); in __bch2_read_extent()
1129 bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR); in __bch2_read_extent()
1134 bio_sectors(&rbio->bio)); in __bch2_read_extent()
1135 bio_set_dev(&rbio->bio, ca->disk_sb.bdev); in __bch2_read_extent()
1139 bio_endio(&rbio->bio); in __bch2_read_extent()
1142 submit_bio(&rbio->bio); in __bch2_read_extent()
1144 submit_bio_wait(&rbio->bio); in __bch2_read_extent()
1154 if (bch2_ec_read_extent(trans, rbio, k)) { in __bch2_read_extent()
1155 bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR); in __bch2_read_extent()
1160 bio_endio(&rbio->bio); in __bch2_read_extent()
1170 rbio->context = RBIO_CONTEXT_UNBOUND; in __bch2_read_extent()
1171 bch2_read_endio(&rbio->bio); in __bch2_read_extent()
1173 ret = rbio->retry; in __bch2_read_extent()
1174 rbio = bch2_rbio_free(rbio); in __bch2_read_extent()
1210 void __bch2_read(struct bch_fs *c, struct bch_read_bio *rbio, in __bch2_read() argument
1272 ret = __bch2_read_extent(trans, rbio, bvec_iter, iter.pos, in __bch2_read()
1282 bio_advance_iter(&rbio->bio, &bvec_iter, bytes); in __bch2_read()
1300 rbio->bio.bi_status = BLK_STS_IOERR; in __bch2_read()
1301 bch2_rbio_done(rbio); in __bch2_read()