xref: /linux/fs/bcachefs/io_read.c (revision c1e822754cc7f28b98c6897d62e8b47b4001e422)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Some low level IO code, and hacks for various block layer limitations
4  *
5  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6  * Copyright 2012 Google, Inc.
7  */
8 
9 #include "bcachefs.h"
10 #include "alloc_background.h"
11 #include "alloc_foreground.h"
12 #include "btree_update.h"
13 #include "buckets.h"
14 #include "checksum.h"
15 #include "clock.h"
16 #include "compress.h"
17 #include "data_update.h"
18 #include "disk_groups.h"
19 #include "ec.h"
20 #include "error.h"
21 #include "io_read.h"
22 #include "io_misc.h"
23 #include "io_write.h"
24 #include "subvolume.h"
25 #include "trace.h"
26 
27 #include <linux/sched/mm.h>
28 
29 #ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
30 
bch2_target_congested(struct bch_fs * c,u16 target)31 static bool bch2_target_congested(struct bch_fs *c, u16 target)
32 {
33 	const struct bch_devs_mask *devs;
34 	unsigned d, nr = 0, total = 0;
35 	u64 now = local_clock(), last;
36 	s64 congested;
37 	struct bch_dev *ca;
38 
39 	if (!target)
40 		return false;
41 
42 	rcu_read_lock();
43 	devs = bch2_target_to_mask(c, target) ?:
44 		&c->rw_devs[BCH_DATA_user];
45 
46 	for_each_set_bit(d, devs->d, BCH_SB_MEMBERS_MAX) {
47 		ca = rcu_dereference(c->devs[d]);
48 		if (!ca)
49 			continue;
50 
51 		congested = atomic_read(&ca->congested);
52 		last = READ_ONCE(ca->congested_last);
53 		if (time_after64(now, last))
54 			congested -= (now - last) >> 12;
55 
56 		total += max(congested, 0LL);
57 		nr++;
58 	}
59 	rcu_read_unlock();
60 
61 	return bch2_rand_range(nr * CONGESTED_MAX) < total;
62 }
63 
64 #else
65 
bch2_target_congested(struct bch_fs * c,u16 target)66 static bool bch2_target_congested(struct bch_fs *c, u16 target)
67 {
68 	return false;
69 }
70 
71 #endif
72 
73 /* Cache promotion on read */
74 
75 struct promote_op {
76 	struct rcu_head		rcu;
77 	u64			start_time;
78 
79 	struct rhash_head	hash;
80 	struct bpos		pos;
81 
82 	struct data_update	write;
83 	struct bio_vec		bi_inline_vecs[]; /* must be last */
84 };
85 
86 static const struct rhashtable_params bch_promote_params = {
87 	.head_offset		= offsetof(struct promote_op, hash),
88 	.key_offset		= offsetof(struct promote_op, pos),
89 	.key_len		= sizeof(struct bpos),
90 	.automatic_shrinking	= true,
91 };
92 
should_promote(struct bch_fs * c,struct bkey_s_c k,struct bpos pos,struct bch_io_opts opts,unsigned flags,struct bch_io_failures * failed)93 static inline int should_promote(struct bch_fs *c, struct bkey_s_c k,
94 				  struct bpos pos,
95 				  struct bch_io_opts opts,
96 				  unsigned flags,
97 				  struct bch_io_failures *failed)
98 {
99 	if (!failed) {
100 		BUG_ON(!opts.promote_target);
101 
102 		if (!(flags & BCH_READ_MAY_PROMOTE))
103 			return -BCH_ERR_nopromote_may_not;
104 
105 		if (bch2_bkey_has_target(c, k, opts.promote_target))
106 			return -BCH_ERR_nopromote_already_promoted;
107 
108 		if (bkey_extent_is_unwritten(k))
109 			return -BCH_ERR_nopromote_unwritten;
110 
111 		if (bch2_target_congested(c, opts.promote_target))
112 			return -BCH_ERR_nopromote_congested;
113 	}
114 
115 	if (rhashtable_lookup_fast(&c->promote_table, &pos,
116 				   bch_promote_params))
117 		return -BCH_ERR_nopromote_in_flight;
118 
119 	return 0;
120 }
121 
promote_free(struct bch_fs * c,struct promote_op * op)122 static void promote_free(struct bch_fs *c, struct promote_op *op)
123 {
124 	int ret;
125 
126 	bch2_data_update_exit(&op->write);
127 
128 	ret = rhashtable_remove_fast(&c->promote_table, &op->hash,
129 				     bch_promote_params);
130 	BUG_ON(ret);
131 	bch2_write_ref_put(c, BCH_WRITE_REF_promote);
132 	kfree_rcu(op, rcu);
133 }
134 
promote_done(struct bch_write_op * wop)135 static void promote_done(struct bch_write_op *wop)
136 {
137 	struct promote_op *op =
138 		container_of(wop, struct promote_op, write.op);
139 	struct bch_fs *c = op->write.op.c;
140 
141 	bch2_time_stats_update(&c->times[BCH_TIME_data_promote],
142 			       op->start_time);
143 	promote_free(c, op);
144 }
145 
promote_start(struct promote_op * op,struct bch_read_bio * rbio)146 static void promote_start(struct promote_op *op, struct bch_read_bio *rbio)
147 {
148 	struct bio *bio = &op->write.op.wbio.bio;
149 
150 	trace_and_count(op->write.op.c, read_promote, &rbio->bio);
151 
152 	/* we now own pages: */
153 	BUG_ON(!rbio->bounce);
154 	BUG_ON(rbio->bio.bi_vcnt > bio->bi_max_vecs);
155 
156 	memcpy(bio->bi_io_vec, rbio->bio.bi_io_vec,
157 	       sizeof(struct bio_vec) * rbio->bio.bi_vcnt);
158 	swap(bio->bi_vcnt, rbio->bio.bi_vcnt);
159 
160 	bch2_data_update_read_done(&op->write, rbio->pick.crc);
161 }
162 
__promote_alloc(struct btree_trans * trans,enum btree_id btree_id,struct bkey_s_c k,struct bpos pos,struct extent_ptr_decoded * pick,struct bch_io_opts opts,unsigned sectors,struct bch_read_bio ** rbio,struct bch_io_failures * failed)163 static struct promote_op *__promote_alloc(struct btree_trans *trans,
164 					  enum btree_id btree_id,
165 					  struct bkey_s_c k,
166 					  struct bpos pos,
167 					  struct extent_ptr_decoded *pick,
168 					  struct bch_io_opts opts,
169 					  unsigned sectors,
170 					  struct bch_read_bio **rbio,
171 					  struct bch_io_failures *failed)
172 {
173 	struct bch_fs *c = trans->c;
174 	struct promote_op *op = NULL;
175 	struct bio *bio;
176 	unsigned pages = DIV_ROUND_UP(sectors, PAGE_SECTORS);
177 	int ret;
178 
179 	if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_promote))
180 		return ERR_PTR(-BCH_ERR_nopromote_no_writes);
181 
182 	op = kzalloc(struct_size(op, bi_inline_vecs, pages), GFP_KERNEL);
183 	if (!op) {
184 		ret = -BCH_ERR_nopromote_enomem;
185 		goto err;
186 	}
187 
188 	op->start_time = local_clock();
189 	op->pos = pos;
190 
191 	/*
192 	 * We don't use the mempool here because extents that aren't
193 	 * checksummed or compressed can be too big for the mempool:
194 	 */
195 	*rbio = kzalloc(sizeof(struct bch_read_bio) +
196 			sizeof(struct bio_vec) * pages,
197 			GFP_KERNEL);
198 	if (!*rbio) {
199 		ret = -BCH_ERR_nopromote_enomem;
200 		goto err;
201 	}
202 
203 	rbio_init(&(*rbio)->bio, opts);
204 	bio_init(&(*rbio)->bio, NULL, (*rbio)->bio.bi_inline_vecs, pages, 0);
205 
206 	if (bch2_bio_alloc_pages(&(*rbio)->bio, sectors << 9, GFP_KERNEL)) {
207 		ret = -BCH_ERR_nopromote_enomem;
208 		goto err;
209 	}
210 
211 	(*rbio)->bounce		= true;
212 	(*rbio)->split		= true;
213 	(*rbio)->kmalloc	= true;
214 
215 	if (rhashtable_lookup_insert_fast(&c->promote_table, &op->hash,
216 					  bch_promote_params)) {
217 		ret = -BCH_ERR_nopromote_in_flight;
218 		goto err;
219 	}
220 
221 	bio = &op->write.op.wbio.bio;
222 	bio_init(bio, NULL, bio->bi_inline_vecs, pages, 0);
223 
224 	struct data_update_opts update_opts = {};
225 
226 	if (!failed) {
227 		update_opts.target = opts.promote_target;
228 		update_opts.extra_replicas = 1;
229 		update_opts.write_flags = BCH_WRITE_ALLOC_NOWAIT|BCH_WRITE_CACHED;
230 	} else {
231 		update_opts.target = opts.foreground_target;
232 
233 		struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
234 		unsigned i = 0;
235 		bkey_for_each_ptr(ptrs, ptr) {
236 			if (bch2_dev_io_failures(failed, ptr->dev))
237 				update_opts.rewrite_ptrs |= BIT(i);
238 			i++;
239 		}
240 	}
241 
242 	ret = bch2_data_update_init(trans, NULL, NULL, &op->write,
243 			writepoint_hashed((unsigned long) current),
244 			opts,
245 			update_opts,
246 			btree_id, k);
247 	/*
248 	 * possible errors: -BCH_ERR_nocow_lock_blocked,
249 	 * -BCH_ERR_ENOSPC_disk_reservation:
250 	 */
251 	if (ret) {
252 		BUG_ON(rhashtable_remove_fast(&c->promote_table, &op->hash,
253 					      bch_promote_params));
254 		goto err;
255 	}
256 
257 	op->write.op.end_io = promote_done;
258 
259 	return op;
260 err:
261 	if (*rbio)
262 		bio_free_pages(&(*rbio)->bio);
263 	kfree(*rbio);
264 	*rbio = NULL;
265 	kfree(op);
266 	bch2_write_ref_put(c, BCH_WRITE_REF_promote);
267 	return ERR_PTR(ret);
268 }
269 
270 noinline
promote_alloc(struct btree_trans * trans,struct bvec_iter iter,struct bkey_s_c k,struct extent_ptr_decoded * pick,struct bch_io_opts opts,unsigned flags,struct bch_read_bio ** rbio,bool * bounce,bool * read_full,struct bch_io_failures * failed)271 static struct promote_op *promote_alloc(struct btree_trans *trans,
272 					struct bvec_iter iter,
273 					struct bkey_s_c k,
274 					struct extent_ptr_decoded *pick,
275 					struct bch_io_opts opts,
276 					unsigned flags,
277 					struct bch_read_bio **rbio,
278 					bool *bounce,
279 					bool *read_full,
280 					struct bch_io_failures *failed)
281 {
282 	struct bch_fs *c = trans->c;
283 	/*
284 	 * if failed != NULL we're not actually doing a promote, we're
285 	 * recovering from an io/checksum error
286 	 */
287 	bool promote_full = (failed ||
288 			     *read_full ||
289 			     READ_ONCE(c->opts.promote_whole_extents));
290 	/* data might have to be decompressed in the write path: */
291 	unsigned sectors = promote_full
292 		? max(pick->crc.compressed_size, pick->crc.live_size)
293 		: bvec_iter_sectors(iter);
294 	struct bpos pos = promote_full
295 		? bkey_start_pos(k.k)
296 		: POS(k.k->p.inode, iter.bi_sector);
297 	struct promote_op *promote;
298 	int ret;
299 
300 	ret = should_promote(c, k, pos, opts, flags, failed);
301 	if (ret)
302 		goto nopromote;
303 
304 	promote = __promote_alloc(trans,
305 				  k.k->type == KEY_TYPE_reflink_v
306 				  ? BTREE_ID_reflink
307 				  : BTREE_ID_extents,
308 				  k, pos, pick, opts, sectors, rbio, failed);
309 	ret = PTR_ERR_OR_ZERO(promote);
310 	if (ret)
311 		goto nopromote;
312 
313 	*bounce		= true;
314 	*read_full	= promote_full;
315 	return promote;
316 nopromote:
317 	trace_read_nopromote(c, ret);
318 	return NULL;
319 }
320 
321 /* Read */
322 
323 #define READ_RETRY_AVOID	1
324 #define READ_RETRY		2
325 #define READ_ERR		3
326 
327 enum rbio_context {
328 	RBIO_CONTEXT_NULL,
329 	RBIO_CONTEXT_HIGHPRI,
330 	RBIO_CONTEXT_UNBOUND,
331 };
332 
333 static inline struct bch_read_bio *
bch2_rbio_parent(struct bch_read_bio * rbio)334 bch2_rbio_parent(struct bch_read_bio *rbio)
335 {
336 	return rbio->split ? rbio->parent : rbio;
337 }
338 
339 __always_inline
bch2_rbio_punt(struct bch_read_bio * rbio,work_func_t fn,enum rbio_context context,struct workqueue_struct * wq)340 static void bch2_rbio_punt(struct bch_read_bio *rbio, work_func_t fn,
341 			   enum rbio_context context,
342 			   struct workqueue_struct *wq)
343 {
344 	if (context <= rbio->context) {
345 		fn(&rbio->work);
346 	} else {
347 		rbio->work.func		= fn;
348 		rbio->context		= context;
349 		queue_work(wq, &rbio->work);
350 	}
351 }
352 
bch2_rbio_free(struct bch_read_bio * rbio)353 static inline struct bch_read_bio *bch2_rbio_free(struct bch_read_bio *rbio)
354 {
355 	BUG_ON(rbio->bounce && !rbio->split);
356 
357 	if (rbio->promote)
358 		promote_free(rbio->c, rbio->promote);
359 	rbio->promote = NULL;
360 
361 	if (rbio->bounce)
362 		bch2_bio_free_pages_pool(rbio->c, &rbio->bio);
363 
364 	if (rbio->split) {
365 		struct bch_read_bio *parent = rbio->parent;
366 
367 		if (rbio->kmalloc)
368 			kfree(rbio);
369 		else
370 			bio_put(&rbio->bio);
371 
372 		rbio = parent;
373 	}
374 
375 	return rbio;
376 }
377 
378 /*
379  * Only called on a top level bch_read_bio to complete an entire read request,
380  * not a split:
381  */
bch2_rbio_done(struct bch_read_bio * rbio)382 static void bch2_rbio_done(struct bch_read_bio *rbio)
383 {
384 	if (rbio->start_time)
385 		bch2_time_stats_update(&rbio->c->times[BCH_TIME_data_read],
386 				       rbio->start_time);
387 	bio_endio(&rbio->bio);
388 }
389 
bch2_read_retry_nodecode(struct bch_fs * c,struct bch_read_bio * rbio,struct bvec_iter bvec_iter,struct bch_io_failures * failed,unsigned flags)390 static void bch2_read_retry_nodecode(struct bch_fs *c, struct bch_read_bio *rbio,
391 				     struct bvec_iter bvec_iter,
392 				     struct bch_io_failures *failed,
393 				     unsigned flags)
394 {
395 	struct btree_trans *trans = bch2_trans_get(c);
396 	struct btree_iter iter;
397 	struct bkey_buf sk;
398 	struct bkey_s_c k;
399 	int ret;
400 
401 	flags &= ~BCH_READ_LAST_FRAGMENT;
402 	flags |= BCH_READ_MUST_CLONE;
403 
404 	bch2_bkey_buf_init(&sk);
405 
406 	bch2_trans_iter_init(trans, &iter, rbio->data_btree,
407 			     rbio->read_pos, BTREE_ITER_slots);
408 retry:
409 	bch2_trans_begin(trans);
410 	rbio->bio.bi_status = 0;
411 
412 	ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter)));
413 	if (ret)
414 		goto err;
415 
416 	bch2_bkey_buf_reassemble(&sk, c, k);
417 	k = bkey_i_to_s_c(sk.k);
418 
419 	if (!bch2_bkey_matches_ptr(c, k,
420 				   rbio->pick.ptr,
421 				   rbio->data_pos.offset -
422 				   rbio->pick.crc.offset)) {
423 		/* extent we wanted to read no longer exists: */
424 		rbio->hole = true;
425 		goto out;
426 	}
427 
428 	ret = __bch2_read_extent(trans, rbio, bvec_iter,
429 				 rbio->read_pos,
430 				 rbio->data_btree,
431 				 k, 0, failed, flags);
432 	if (ret == READ_RETRY)
433 		goto retry;
434 	if (ret)
435 		goto err;
436 out:
437 	bch2_rbio_done(rbio);
438 	bch2_trans_iter_exit(trans, &iter);
439 	bch2_trans_put(trans);
440 	bch2_bkey_buf_exit(&sk, c);
441 	return;
442 err:
443 	rbio->bio.bi_status = BLK_STS_IOERR;
444 	goto out;
445 }
446 
bch2_rbio_retry(struct work_struct * work)447 static void bch2_rbio_retry(struct work_struct *work)
448 {
449 	struct bch_read_bio *rbio =
450 		container_of(work, struct bch_read_bio, work);
451 	struct bch_fs *c	= rbio->c;
452 	struct bvec_iter iter	= rbio->bvec_iter;
453 	unsigned flags		= rbio->flags;
454 	subvol_inum inum = {
455 		.subvol = rbio->subvol,
456 		.inum	= rbio->read_pos.inode,
457 	};
458 	struct bch_io_failures failed = { .nr = 0 };
459 
460 	trace_and_count(c, read_retry, &rbio->bio);
461 
462 	if (rbio->retry == READ_RETRY_AVOID)
463 		bch2_mark_io_failure(&failed, &rbio->pick);
464 
465 	rbio->bio.bi_status = 0;
466 
467 	rbio = bch2_rbio_free(rbio);
468 
469 	flags |= BCH_READ_IN_RETRY;
470 	flags &= ~BCH_READ_MAY_PROMOTE;
471 
472 	if (flags & BCH_READ_NODECODE) {
473 		bch2_read_retry_nodecode(c, rbio, iter, &failed, flags);
474 	} else {
475 		flags &= ~BCH_READ_LAST_FRAGMENT;
476 		flags |= BCH_READ_MUST_CLONE;
477 
478 		__bch2_read(c, rbio, iter, inum, &failed, flags);
479 	}
480 }
481 
bch2_rbio_error(struct bch_read_bio * rbio,int retry,blk_status_t error)482 static void bch2_rbio_error(struct bch_read_bio *rbio, int retry,
483 			    blk_status_t error)
484 {
485 	rbio->retry = retry;
486 
487 	if (rbio->flags & BCH_READ_IN_RETRY)
488 		return;
489 
490 	if (retry == READ_ERR) {
491 		rbio = bch2_rbio_free(rbio);
492 
493 		rbio->bio.bi_status = error;
494 		bch2_rbio_done(rbio);
495 	} else {
496 		bch2_rbio_punt(rbio, bch2_rbio_retry,
497 			       RBIO_CONTEXT_UNBOUND, system_unbound_wq);
498 	}
499 }
500 
__bch2_rbio_narrow_crcs(struct btree_trans * trans,struct bch_read_bio * rbio)501 static int __bch2_rbio_narrow_crcs(struct btree_trans *trans,
502 				   struct bch_read_bio *rbio)
503 {
504 	struct bch_fs *c = rbio->c;
505 	u64 data_offset = rbio->data_pos.offset - rbio->pick.crc.offset;
506 	struct bch_extent_crc_unpacked new_crc;
507 	struct btree_iter iter;
508 	struct bkey_i *new;
509 	struct bkey_s_c k;
510 	int ret = 0;
511 
512 	if (crc_is_compressed(rbio->pick.crc))
513 		return 0;
514 
515 	k = bch2_bkey_get_iter(trans, &iter, rbio->data_btree, rbio->data_pos,
516 			       BTREE_ITER_slots|BTREE_ITER_intent);
517 	if ((ret = bkey_err(k)))
518 		goto out;
519 
520 	if (bversion_cmp(k.k->bversion, rbio->version) ||
521 	    !bch2_bkey_matches_ptr(c, k, rbio->pick.ptr, data_offset))
522 		goto out;
523 
524 	/* Extent was merged? */
525 	if (bkey_start_offset(k.k) < data_offset ||
526 	    k.k->p.offset > data_offset + rbio->pick.crc.uncompressed_size)
527 		goto out;
528 
529 	if (bch2_rechecksum_bio(c, &rbio->bio, rbio->version,
530 			rbio->pick.crc, NULL, &new_crc,
531 			bkey_start_offset(k.k) - data_offset, k.k->size,
532 			rbio->pick.crc.csum_type)) {
533 		bch_err(c, "error verifying existing checksum while narrowing checksum (memory corruption?)");
534 		ret = 0;
535 		goto out;
536 	}
537 
538 	/*
539 	 * going to be temporarily appending another checksum entry:
540 	 */
541 	new = bch2_trans_kmalloc(trans, bkey_bytes(k.k) +
542 				 sizeof(struct bch_extent_crc128));
543 	if ((ret = PTR_ERR_OR_ZERO(new)))
544 		goto out;
545 
546 	bkey_reassemble(new, k);
547 
548 	if (!bch2_bkey_narrow_crcs(new, new_crc))
549 		goto out;
550 
551 	ret = bch2_trans_update(trans, &iter, new,
552 				BTREE_UPDATE_internal_snapshot_node);
553 out:
554 	bch2_trans_iter_exit(trans, &iter);
555 	return ret;
556 }
557 
bch2_rbio_narrow_crcs(struct bch_read_bio * rbio)558 static noinline void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio)
559 {
560 	bch2_trans_commit_do(rbio->c, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
561 			     __bch2_rbio_narrow_crcs(trans, rbio));
562 }
563 
564 /* Inner part that may run in process context */
__bch2_read_endio(struct work_struct * work)565 static void __bch2_read_endio(struct work_struct *work)
566 {
567 	struct bch_read_bio *rbio =
568 		container_of(work, struct bch_read_bio, work);
569 	struct bch_fs *c	= rbio->c;
570 	struct bio *src		= &rbio->bio;
571 	struct bio *dst		= &bch2_rbio_parent(rbio)->bio;
572 	struct bvec_iter dst_iter = rbio->bvec_iter;
573 	struct bch_extent_crc_unpacked crc = rbio->pick.crc;
574 	struct nonce nonce = extent_nonce(rbio->version, crc);
575 	unsigned nofs_flags;
576 	struct bch_csum csum;
577 	int ret;
578 
579 	nofs_flags = memalloc_nofs_save();
580 
581 	/* Reset iterator for checksumming and copying bounced data: */
582 	if (rbio->bounce) {
583 		src->bi_iter.bi_size		= crc.compressed_size << 9;
584 		src->bi_iter.bi_idx		= 0;
585 		src->bi_iter.bi_bvec_done	= 0;
586 	} else {
587 		src->bi_iter			= rbio->bvec_iter;
588 	}
589 
590 	csum = bch2_checksum_bio(c, crc.csum_type, nonce, src);
591 	if (bch2_crc_cmp(csum, rbio->pick.crc.csum) && !c->opts.no_data_io)
592 		goto csum_err;
593 
594 	/*
595 	 * XXX
596 	 * We need to rework the narrow_crcs path to deliver the read completion
597 	 * first, and then punt to a different workqueue, otherwise we're
598 	 * holding up reads while doing btree updates which is bad for memory
599 	 * reclaim.
600 	 */
601 	if (unlikely(rbio->narrow_crcs))
602 		bch2_rbio_narrow_crcs(rbio);
603 
604 	if (rbio->flags & BCH_READ_NODECODE)
605 		goto nodecode;
606 
607 	/* Adjust crc to point to subset of data we want: */
608 	crc.offset     += rbio->offset_into_extent;
609 	crc.live_size	= bvec_iter_sectors(rbio->bvec_iter);
610 
611 	if (crc_is_compressed(crc)) {
612 		ret = bch2_encrypt_bio(c, crc.csum_type, nonce, src);
613 		if (ret)
614 			goto decrypt_err;
615 
616 		if (bch2_bio_uncompress(c, src, dst, dst_iter, crc) &&
617 		    !c->opts.no_data_io)
618 			goto decompression_err;
619 	} else {
620 		/* don't need to decrypt the entire bio: */
621 		nonce = nonce_add(nonce, crc.offset << 9);
622 		bio_advance(src, crc.offset << 9);
623 
624 		BUG_ON(src->bi_iter.bi_size < dst_iter.bi_size);
625 		src->bi_iter.bi_size = dst_iter.bi_size;
626 
627 		ret = bch2_encrypt_bio(c, crc.csum_type, nonce, src);
628 		if (ret)
629 			goto decrypt_err;
630 
631 		if (rbio->bounce) {
632 			struct bvec_iter src_iter = src->bi_iter;
633 
634 			bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
635 		}
636 	}
637 
638 	if (rbio->promote) {
639 		/*
640 		 * Re encrypt data we decrypted, so it's consistent with
641 		 * rbio->crc:
642 		 */
643 		ret = bch2_encrypt_bio(c, crc.csum_type, nonce, src);
644 		if (ret)
645 			goto decrypt_err;
646 
647 		promote_start(rbio->promote, rbio);
648 		rbio->promote = NULL;
649 	}
650 nodecode:
651 	if (likely(!(rbio->flags & BCH_READ_IN_RETRY))) {
652 		rbio = bch2_rbio_free(rbio);
653 		bch2_rbio_done(rbio);
654 	}
655 out:
656 	memalloc_nofs_restore(nofs_flags);
657 	return;
658 csum_err:
659 	/*
660 	 * Checksum error: if the bio wasn't bounced, we may have been
661 	 * reading into buffers owned by userspace (that userspace can
662 	 * scribble over) - retry the read, bouncing it this time:
663 	 */
664 	if (!rbio->bounce && (rbio->flags & BCH_READ_USER_MAPPED)) {
665 		rbio->flags |= BCH_READ_MUST_BOUNCE;
666 		bch2_rbio_error(rbio, READ_RETRY, BLK_STS_IOERR);
667 		goto out;
668 	}
669 
670 	struct printbuf buf = PRINTBUF;
671 	buf.atomic++;
672 	prt_str(&buf, "data ");
673 	bch2_csum_err_msg(&buf, crc.csum_type, rbio->pick.crc.csum, csum);
674 
675 	struct bch_dev *ca = rbio->have_ioref ? bch2_dev_have_ref(c, rbio->pick.ptr.dev) : NULL;
676 	if (ca) {
677 		bch_err_inum_offset_ratelimited(ca,
678 			rbio->read_pos.inode,
679 			rbio->read_pos.offset << 9,
680 			"data %s", buf.buf);
681 		bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
682 	}
683 	printbuf_exit(&buf);
684 	bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
685 	goto out;
686 decompression_err:
687 	bch_err_inum_offset_ratelimited(c, rbio->read_pos.inode,
688 					rbio->read_pos.offset << 9,
689 					"decompression error");
690 	bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR);
691 	goto out;
692 decrypt_err:
693 	bch_err_inum_offset_ratelimited(c, rbio->read_pos.inode,
694 					rbio->read_pos.offset << 9,
695 					"decrypt error");
696 	bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR);
697 	goto out;
698 }
699 
bch2_read_endio(struct bio * bio)700 static void bch2_read_endio(struct bio *bio)
701 {
702 	struct bch_read_bio *rbio =
703 		container_of(bio, struct bch_read_bio, bio);
704 	struct bch_fs *c	= rbio->c;
705 	struct bch_dev *ca = rbio->have_ioref ? bch2_dev_have_ref(c, rbio->pick.ptr.dev) : NULL;
706 	struct workqueue_struct *wq = NULL;
707 	enum rbio_context context = RBIO_CONTEXT_NULL;
708 
709 	if (rbio->have_ioref) {
710 		bch2_latency_acct(ca, rbio->submit_time, READ);
711 		percpu_ref_put(&ca->io_ref);
712 	}
713 
714 	if (!rbio->split)
715 		rbio->bio.bi_end_io = rbio->end_io;
716 
717 	if (bio->bi_status) {
718 		if (ca) {
719 			bch_err_inum_offset_ratelimited(ca,
720 				rbio->read_pos.inode,
721 				rbio->read_pos.offset,
722 				"data read error: %s",
723 				bch2_blk_status_to_str(bio->bi_status));
724 			bch2_io_error(ca, BCH_MEMBER_ERROR_read);
725 		}
726 		bch2_rbio_error(rbio, READ_RETRY_AVOID, bio->bi_status);
727 		return;
728 	}
729 
730 	if (((rbio->flags & BCH_READ_RETRY_IF_STALE) && race_fault()) ||
731 	    (ca && dev_ptr_stale(ca, &rbio->pick.ptr))) {
732 		trace_and_count(c, read_reuse_race, &rbio->bio);
733 
734 		if (rbio->flags & BCH_READ_RETRY_IF_STALE)
735 			bch2_rbio_error(rbio, READ_RETRY, BLK_STS_AGAIN);
736 		else
737 			bch2_rbio_error(rbio, READ_ERR, BLK_STS_AGAIN);
738 		return;
739 	}
740 
741 	if (rbio->narrow_crcs ||
742 	    rbio->promote ||
743 	    crc_is_compressed(rbio->pick.crc) ||
744 	    bch2_csum_type_is_encryption(rbio->pick.crc.csum_type))
745 		context = RBIO_CONTEXT_UNBOUND,	wq = system_unbound_wq;
746 	else if (rbio->pick.crc.csum_type)
747 		context = RBIO_CONTEXT_HIGHPRI,	wq = system_highpri_wq;
748 
749 	bch2_rbio_punt(rbio, __bch2_read_endio, context, wq);
750 }
751 
__bch2_read_indirect_extent(struct btree_trans * trans,unsigned * offset_into_extent,struct bkey_buf * orig_k)752 int __bch2_read_indirect_extent(struct btree_trans *trans,
753 				unsigned *offset_into_extent,
754 				struct bkey_buf *orig_k)
755 {
756 	struct btree_iter iter;
757 	struct bkey_s_c k;
758 	u64 reflink_offset;
759 	int ret;
760 
761 	reflink_offset = le64_to_cpu(bkey_i_to_reflink_p(orig_k->k)->v.idx) +
762 		*offset_into_extent;
763 
764 	k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_reflink,
765 			       POS(0, reflink_offset), 0);
766 	ret = bkey_err(k);
767 	if (ret)
768 		goto err;
769 
770 	if (k.k->type != KEY_TYPE_reflink_v &&
771 	    k.k->type != KEY_TYPE_indirect_inline_data) {
772 		bch_err_inum_offset_ratelimited(trans->c,
773 			orig_k->k->k.p.inode,
774 			orig_k->k->k.p.offset << 9,
775 			"%llu len %u points to nonexistent indirect extent %llu",
776 			orig_k->k->k.p.offset,
777 			orig_k->k->k.size,
778 			reflink_offset);
779 		bch2_inconsistent_error(trans->c);
780 		ret = -BCH_ERR_missing_indirect_extent;
781 		goto err;
782 	}
783 
784 	*offset_into_extent = iter.pos.offset - bkey_start_offset(k.k);
785 	bch2_bkey_buf_reassemble(orig_k, trans->c, k);
786 err:
787 	bch2_trans_iter_exit(trans, &iter);
788 	return ret;
789 }
790 
read_from_stale_dirty_pointer(struct btree_trans * trans,struct bch_dev * ca,struct bkey_s_c k,struct bch_extent_ptr ptr)791 static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans,
792 						   struct bch_dev *ca,
793 						   struct bkey_s_c k,
794 						   struct bch_extent_ptr ptr)
795 {
796 	struct bch_fs *c = trans->c;
797 	struct btree_iter iter;
798 	struct printbuf buf = PRINTBUF;
799 	int ret;
800 
801 	bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
802 			     PTR_BUCKET_POS(ca, &ptr),
803 			     BTREE_ITER_cached);
804 
805 	u8 *gen = bucket_gen(ca, iter.pos.offset);
806 	if (gen) {
807 
808 		prt_printf(&buf, "Attempting to read from stale dirty pointer:\n");
809 		printbuf_indent_add(&buf, 2);
810 
811 		bch2_bkey_val_to_text(&buf, c, k);
812 		prt_newline(&buf);
813 
814 		prt_printf(&buf, "memory gen: %u", *gen);
815 
816 		ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter)));
817 		if (!ret) {
818 			prt_newline(&buf);
819 			bch2_bkey_val_to_text(&buf, c, k);
820 		}
821 	} else {
822 		prt_printf(&buf, "Attempting to read from invalid bucket %llu:%llu:\n",
823 			   iter.pos.inode, iter.pos.offset);
824 		printbuf_indent_add(&buf, 2);
825 
826 		prt_printf(&buf, "first bucket %u nbuckets %llu\n",
827 			   ca->mi.first_bucket, ca->mi.nbuckets);
828 
829 		bch2_bkey_val_to_text(&buf, c, k);
830 		prt_newline(&buf);
831 	}
832 
833 	bch2_fs_inconsistent(c, "%s", buf.buf);
834 
835 	bch2_trans_iter_exit(trans, &iter);
836 	printbuf_exit(&buf);
837 }
838 
__bch2_read_extent(struct btree_trans * trans,struct bch_read_bio * orig,struct bvec_iter iter,struct bpos read_pos,enum btree_id data_btree,struct bkey_s_c k,unsigned offset_into_extent,struct bch_io_failures * failed,unsigned flags)839 int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
840 		       struct bvec_iter iter, struct bpos read_pos,
841 		       enum btree_id data_btree, struct bkey_s_c k,
842 		       unsigned offset_into_extent,
843 		       struct bch_io_failures *failed, unsigned flags)
844 {
845 	struct bch_fs *c = trans->c;
846 	struct extent_ptr_decoded pick;
847 	struct bch_read_bio *rbio = NULL;
848 	struct promote_op *promote = NULL;
849 	bool bounce = false, read_full = false, narrow_crcs = false;
850 	struct bpos data_pos = bkey_start_pos(k.k);
851 	int pick_ret;
852 
853 	if (bkey_extent_is_inline_data(k.k)) {
854 		unsigned bytes = min_t(unsigned, iter.bi_size,
855 				       bkey_inline_data_bytes(k.k));
856 
857 		swap(iter.bi_size, bytes);
858 		memcpy_to_bio(&orig->bio, iter, bkey_inline_data_p(k));
859 		swap(iter.bi_size, bytes);
860 		bio_advance_iter(&orig->bio, &iter, bytes);
861 		zero_fill_bio_iter(&orig->bio, iter);
862 		goto out_read_done;
863 	}
864 retry_pick:
865 	pick_ret = bch2_bkey_pick_read_device(c, k, failed, &pick);
866 
867 	/* hole or reservation - just zero fill: */
868 	if (!pick_ret)
869 		goto hole;
870 
871 	if (pick_ret < 0) {
872 		struct printbuf buf = PRINTBUF;
873 		bch2_bkey_val_to_text(&buf, c, k);
874 
875 		bch_err_inum_offset_ratelimited(c,
876 				read_pos.inode, read_pos.offset << 9,
877 				"no device to read from: %s\n  %s",
878 				bch2_err_str(pick_ret),
879 				buf.buf);
880 		printbuf_exit(&buf);
881 		goto err;
882 	}
883 
884 	struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ);
885 
886 	/*
887 	 * Stale dirty pointers are treated as IO errors, but @failed isn't
888 	 * allocated unless we're in the retry path - so if we're not in the
889 	 * retry path, don't check here, it'll be caught in bch2_read_endio()
890 	 * and we'll end up in the retry path:
891 	 */
892 	if ((flags & BCH_READ_IN_RETRY) &&
893 	    !pick.ptr.cached &&
894 	    ca &&
895 	    unlikely(dev_ptr_stale(ca, &pick.ptr))) {
896 		read_from_stale_dirty_pointer(trans, ca, k, pick.ptr);
897 		bch2_mark_io_failure(failed, &pick);
898 		percpu_ref_put(&ca->io_ref);
899 		goto retry_pick;
900 	}
901 
902 	/*
903 	 * Unlock the iterator while the btree node's lock is still in
904 	 * cache, before doing the IO:
905 	 */
906 	bch2_trans_unlock(trans);
907 
908 	if (flags & BCH_READ_NODECODE) {
909 		/*
910 		 * can happen if we retry, and the extent we were going to read
911 		 * has been merged in the meantime:
912 		 */
913 		if (pick.crc.compressed_size > orig->bio.bi_vcnt * PAGE_SECTORS) {
914 			if (ca)
915 				percpu_ref_put(&ca->io_ref);
916 			goto hole;
917 		}
918 
919 		iter.bi_size	= pick.crc.compressed_size << 9;
920 		goto get_bio;
921 	}
922 
923 	if (!(flags & BCH_READ_LAST_FRAGMENT) ||
924 	    bio_flagged(&orig->bio, BIO_CHAIN))
925 		flags |= BCH_READ_MUST_CLONE;
926 
927 	narrow_crcs = !(flags & BCH_READ_IN_RETRY) &&
928 		bch2_can_narrow_extent_crcs(k, pick.crc);
929 
930 	if (narrow_crcs && (flags & BCH_READ_USER_MAPPED))
931 		flags |= BCH_READ_MUST_BOUNCE;
932 
933 	EBUG_ON(offset_into_extent + bvec_iter_sectors(iter) > k.k->size);
934 
935 	if (crc_is_compressed(pick.crc) ||
936 	    (pick.crc.csum_type != BCH_CSUM_none &&
937 	     (bvec_iter_sectors(iter) != pick.crc.uncompressed_size ||
938 	      (bch2_csum_type_is_encryption(pick.crc.csum_type) &&
939 	       (flags & BCH_READ_USER_MAPPED)) ||
940 	      (flags & BCH_READ_MUST_BOUNCE)))) {
941 		read_full = true;
942 		bounce = true;
943 	}
944 
945 	if (orig->opts.promote_target)// || failed)
946 		promote = promote_alloc(trans, iter, k, &pick, orig->opts, flags,
947 					&rbio, &bounce, &read_full, failed);
948 
949 	if (!read_full) {
950 		EBUG_ON(crc_is_compressed(pick.crc));
951 		EBUG_ON(pick.crc.csum_type &&
952 			(bvec_iter_sectors(iter) != pick.crc.uncompressed_size ||
953 			 bvec_iter_sectors(iter) != pick.crc.live_size ||
954 			 pick.crc.offset ||
955 			 offset_into_extent));
956 
957 		data_pos.offset += offset_into_extent;
958 		pick.ptr.offset += pick.crc.offset +
959 			offset_into_extent;
960 		offset_into_extent		= 0;
961 		pick.crc.compressed_size	= bvec_iter_sectors(iter);
962 		pick.crc.uncompressed_size	= bvec_iter_sectors(iter);
963 		pick.crc.offset			= 0;
964 		pick.crc.live_size		= bvec_iter_sectors(iter);
965 	}
966 get_bio:
967 	if (rbio) {
968 		/*
969 		 * promote already allocated bounce rbio:
970 		 * promote needs to allocate a bio big enough for uncompressing
971 		 * data in the write path, but we're not going to use it all
972 		 * here:
973 		 */
974 		EBUG_ON(rbio->bio.bi_iter.bi_size <
975 		       pick.crc.compressed_size << 9);
976 		rbio->bio.bi_iter.bi_size =
977 			pick.crc.compressed_size << 9;
978 	} else if (bounce) {
979 		unsigned sectors = pick.crc.compressed_size;
980 
981 		rbio = rbio_init(bio_alloc_bioset(NULL,
982 						  DIV_ROUND_UP(sectors, PAGE_SECTORS),
983 						  0,
984 						  GFP_NOFS,
985 						  &c->bio_read_split),
986 				 orig->opts);
987 
988 		bch2_bio_alloc_pages_pool(c, &rbio->bio, sectors << 9);
989 		rbio->bounce	= true;
990 		rbio->split	= true;
991 	} else if (flags & BCH_READ_MUST_CLONE) {
992 		/*
993 		 * Have to clone if there were any splits, due to error
994 		 * reporting issues (if a split errored, and retrying didn't
995 		 * work, when it reports the error to its parent (us) we don't
996 		 * know if the error was from our bio, and we should retry, or
997 		 * from the whole bio, in which case we don't want to retry and
998 		 * lose the error)
999 		 */
1000 		rbio = rbio_init(bio_alloc_clone(NULL, &orig->bio, GFP_NOFS,
1001 						 &c->bio_read_split),
1002 				 orig->opts);
1003 		rbio->bio.bi_iter = iter;
1004 		rbio->split	= true;
1005 	} else {
1006 		rbio = orig;
1007 		rbio->bio.bi_iter = iter;
1008 		EBUG_ON(bio_flagged(&rbio->bio, BIO_CHAIN));
1009 	}
1010 
1011 	EBUG_ON(bio_sectors(&rbio->bio) != pick.crc.compressed_size);
1012 
1013 	rbio->c			= c;
1014 	rbio->submit_time	= local_clock();
1015 	if (rbio->split)
1016 		rbio->parent	= orig;
1017 	else
1018 		rbio->end_io	= orig->bio.bi_end_io;
1019 	rbio->bvec_iter		= iter;
1020 	rbio->offset_into_extent= offset_into_extent;
1021 	rbio->flags		= flags;
1022 	rbio->have_ioref	= ca != NULL;
1023 	rbio->narrow_crcs	= narrow_crcs;
1024 	rbio->hole		= 0;
1025 	rbio->retry		= 0;
1026 	rbio->context		= 0;
1027 	/* XXX: only initialize this if needed */
1028 	rbio->devs_have		= bch2_bkey_devs(k);
1029 	rbio->pick		= pick;
1030 	rbio->subvol		= orig->subvol;
1031 	rbio->read_pos		= read_pos;
1032 	rbio->data_btree	= data_btree;
1033 	rbio->data_pos		= data_pos;
1034 	rbio->version		= k.k->bversion;
1035 	rbio->promote		= promote;
1036 	INIT_WORK(&rbio->work, NULL);
1037 
1038 	if (flags & BCH_READ_NODECODE)
1039 		orig->pick = pick;
1040 
1041 	rbio->bio.bi_opf	= orig->bio.bi_opf;
1042 	rbio->bio.bi_iter.bi_sector = pick.ptr.offset;
1043 	rbio->bio.bi_end_io	= bch2_read_endio;
1044 
1045 	if (rbio->bounce)
1046 		trace_and_count(c, read_bounce, &rbio->bio);
1047 
1048 	this_cpu_add(c->counters[BCH_COUNTER_io_read], bio_sectors(&rbio->bio));
1049 	bch2_increment_clock(c, bio_sectors(&rbio->bio), READ);
1050 
1051 	/*
1052 	 * If it's being moved internally, we don't want to flag it as a cache
1053 	 * hit:
1054 	 */
1055 	if (ca && pick.ptr.cached && !(flags & BCH_READ_NODECODE))
1056 		bch2_bucket_io_time_reset(trans, pick.ptr.dev,
1057 			PTR_BUCKET_NR(ca, &pick.ptr), READ);
1058 
1059 	if (!(flags & (BCH_READ_IN_RETRY|BCH_READ_LAST_FRAGMENT))) {
1060 		bio_inc_remaining(&orig->bio);
1061 		trace_and_count(c, read_split, &orig->bio);
1062 	}
1063 
1064 	if (!rbio->pick.idx) {
1065 		if (!rbio->have_ioref) {
1066 			bch_err_inum_offset_ratelimited(c,
1067 					read_pos.inode,
1068 					read_pos.offset << 9,
1069 					"no device to read from");
1070 			bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
1071 			goto out;
1072 		}
1073 
1074 		this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_user],
1075 			     bio_sectors(&rbio->bio));
1076 		bio_set_dev(&rbio->bio, ca->disk_sb.bdev);
1077 
1078 		if (unlikely(c->opts.no_data_io)) {
1079 			if (likely(!(flags & BCH_READ_IN_RETRY)))
1080 				bio_endio(&rbio->bio);
1081 		} else {
1082 			if (likely(!(flags & BCH_READ_IN_RETRY)))
1083 				submit_bio(&rbio->bio);
1084 			else
1085 				submit_bio_wait(&rbio->bio);
1086 		}
1087 
1088 		/*
1089 		 * We just submitted IO which may block, we expect relock fail
1090 		 * events and shouldn't count them:
1091 		 */
1092 		trans->notrace_relock_fail = true;
1093 	} else {
1094 		/* Attempting reconstruct read: */
1095 		if (bch2_ec_read_extent(trans, rbio, k)) {
1096 			bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
1097 			goto out;
1098 		}
1099 
1100 		if (likely(!(flags & BCH_READ_IN_RETRY)))
1101 			bio_endio(&rbio->bio);
1102 	}
1103 out:
1104 	if (likely(!(flags & BCH_READ_IN_RETRY))) {
1105 		return 0;
1106 	} else {
1107 		int ret;
1108 
1109 		rbio->context = RBIO_CONTEXT_UNBOUND;
1110 		bch2_read_endio(&rbio->bio);
1111 
1112 		ret = rbio->retry;
1113 		rbio = bch2_rbio_free(rbio);
1114 
1115 		if (ret == READ_RETRY_AVOID) {
1116 			bch2_mark_io_failure(failed, &pick);
1117 			ret = READ_RETRY;
1118 		}
1119 
1120 		if (!ret)
1121 			goto out_read_done;
1122 
1123 		return ret;
1124 	}
1125 
1126 err:
1127 	if (flags & BCH_READ_IN_RETRY)
1128 		return READ_ERR;
1129 
1130 	orig->bio.bi_status = BLK_STS_IOERR;
1131 	goto out_read_done;
1132 
1133 hole:
1134 	/*
1135 	 * won't normally happen in the BCH_READ_NODECODE
1136 	 * (bch2_move_extent()) path, but if we retry and the extent we wanted
1137 	 * to read no longer exists we have to signal that:
1138 	 */
1139 	if (flags & BCH_READ_NODECODE)
1140 		orig->hole = true;
1141 
1142 	zero_fill_bio_iter(&orig->bio, iter);
1143 out_read_done:
1144 	if (flags & BCH_READ_LAST_FRAGMENT)
1145 		bch2_rbio_done(orig);
1146 	return 0;
1147 }
1148 
__bch2_read(struct bch_fs * c,struct bch_read_bio * rbio,struct bvec_iter bvec_iter,subvol_inum inum,struct bch_io_failures * failed,unsigned flags)1149 void __bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
1150 		 struct bvec_iter bvec_iter, subvol_inum inum,
1151 		 struct bch_io_failures *failed, unsigned flags)
1152 {
1153 	struct btree_trans *trans = bch2_trans_get(c);
1154 	struct btree_iter iter;
1155 	struct bkey_buf sk;
1156 	struct bkey_s_c k;
1157 	int ret;
1158 
1159 	BUG_ON(flags & BCH_READ_NODECODE);
1160 
1161 	bch2_bkey_buf_init(&sk);
1162 	bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
1163 			     POS(inum.inum, bvec_iter.bi_sector),
1164 			     BTREE_ITER_slots);
1165 
1166 	while (1) {
1167 		unsigned bytes, sectors, offset_into_extent;
1168 		enum btree_id data_btree = BTREE_ID_extents;
1169 
1170 		bch2_trans_begin(trans);
1171 
1172 		u32 snapshot;
1173 		ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
1174 		if (ret)
1175 			goto err;
1176 
1177 		bch2_btree_iter_set_snapshot(&iter, snapshot);
1178 
1179 		bch2_btree_iter_set_pos(&iter,
1180 				POS(inum.inum, bvec_iter.bi_sector));
1181 
1182 		k = bch2_btree_iter_peek_slot(&iter);
1183 		ret = bkey_err(k);
1184 		if (ret)
1185 			goto err;
1186 
1187 		offset_into_extent = iter.pos.offset -
1188 			bkey_start_offset(k.k);
1189 		sectors = k.k->size - offset_into_extent;
1190 
1191 		bch2_bkey_buf_reassemble(&sk, c, k);
1192 
1193 		ret = bch2_read_indirect_extent(trans, &data_btree,
1194 					&offset_into_extent, &sk);
1195 		if (ret)
1196 			goto err;
1197 
1198 		k = bkey_i_to_s_c(sk.k);
1199 
1200 		/*
1201 		 * With indirect extents, the amount of data to read is the min
1202 		 * of the original extent and the indirect extent:
1203 		 */
1204 		sectors = min(sectors, k.k->size - offset_into_extent);
1205 
1206 		bytes = min(sectors, bvec_iter_sectors(bvec_iter)) << 9;
1207 		swap(bvec_iter.bi_size, bytes);
1208 
1209 		if (bvec_iter.bi_size == bytes)
1210 			flags |= BCH_READ_LAST_FRAGMENT;
1211 
1212 		ret = __bch2_read_extent(trans, rbio, bvec_iter, iter.pos,
1213 					 data_btree, k,
1214 					 offset_into_extent, failed, flags);
1215 		if (ret)
1216 			goto err;
1217 
1218 		if (flags & BCH_READ_LAST_FRAGMENT)
1219 			break;
1220 
1221 		swap(bvec_iter.bi_size, bytes);
1222 		bio_advance_iter(&rbio->bio, &bvec_iter, bytes);
1223 err:
1224 		if (ret &&
1225 		    !bch2_err_matches(ret, BCH_ERR_transaction_restart) &&
1226 		    ret != READ_RETRY &&
1227 		    ret != READ_RETRY_AVOID)
1228 			break;
1229 	}
1230 
1231 	bch2_trans_iter_exit(trans, &iter);
1232 	bch2_trans_put(trans);
1233 	bch2_bkey_buf_exit(&sk, c);
1234 
1235 	if (ret) {
1236 		bch_err_inum_offset_ratelimited(c, inum.inum,
1237 						bvec_iter.bi_sector << 9,
1238 						"read error %i from btree lookup", ret);
1239 		rbio->bio.bi_status = BLK_STS_IOERR;
1240 		bch2_rbio_done(rbio);
1241 	}
1242 }
1243 
bch2_fs_io_read_exit(struct bch_fs * c)1244 void bch2_fs_io_read_exit(struct bch_fs *c)
1245 {
1246 	if (c->promote_table.tbl)
1247 		rhashtable_destroy(&c->promote_table);
1248 	bioset_exit(&c->bio_read_split);
1249 	bioset_exit(&c->bio_read);
1250 }
1251 
bch2_fs_io_read_init(struct bch_fs * c)1252 int bch2_fs_io_read_init(struct bch_fs *c)
1253 {
1254 	if (bioset_init(&c->bio_read, 1, offsetof(struct bch_read_bio, bio),
1255 			BIOSET_NEED_BVECS))
1256 		return -BCH_ERR_ENOMEM_bio_read_init;
1257 
1258 	if (bioset_init(&c->bio_read_split, 1, offsetof(struct bch_read_bio, bio),
1259 			BIOSET_NEED_BVECS))
1260 		return -BCH_ERR_ENOMEM_bio_read_split_init;
1261 
1262 	if (rhashtable_init(&c->promote_table, &bch_promote_params))
1263 		return -BCH_ERR_ENOMEM_promote_table_init;
1264 
1265 	return 0;
1266 }
1267