xref: /linux/fs/bcachefs/extents.c (revision c7546e2c3cb739a3c1a2f5acaf9bb629d401afe5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4  *
5  * Code for managing the extent btree and dynamically updating the writeback
6  * dirty sector count.
7  */
8 
9 #include "bcachefs.h"
10 #include "bkey_methods.h"
11 #include "btree_cache.h"
12 #include "btree_gc.h"
13 #include "btree_io.h"
14 #include "btree_iter.h"
15 #include "buckets.h"
16 #include "checksum.h"
17 #include "compress.h"
18 #include "debug.h"
19 #include "disk_groups.h"
20 #include "error.h"
21 #include "extents.h"
22 #include "inode.h"
23 #include "journal.h"
24 #include "replicas.h"
25 #include "super.h"
26 #include "super-io.h"
27 #include "trace.h"
28 #include "util.h"
29 
30 static unsigned bch2_crc_field_size_max[] = {
31 	[BCH_EXTENT_ENTRY_crc32] = CRC32_SIZE_MAX,
32 	[BCH_EXTENT_ENTRY_crc64] = CRC64_SIZE_MAX,
33 	[BCH_EXTENT_ENTRY_crc128] = CRC128_SIZE_MAX,
34 };
35 
36 static void bch2_extent_crc_pack(union bch_extent_crc *,
37 				 struct bch_extent_crc_unpacked,
38 				 enum bch_extent_entry_type);
39 
40 struct bch_dev_io_failures *bch2_dev_io_failures(struct bch_io_failures *f,
41 						 unsigned dev)
42 {
43 	struct bch_dev_io_failures *i;
44 
45 	for (i = f->devs; i < f->devs + f->nr; i++)
46 		if (i->dev == dev)
47 			return i;
48 
49 	return NULL;
50 }
51 
52 void bch2_mark_io_failure(struct bch_io_failures *failed,
53 			  struct extent_ptr_decoded *p)
54 {
55 	struct bch_dev_io_failures *f = bch2_dev_io_failures(failed, p->ptr.dev);
56 
57 	if (!f) {
58 		BUG_ON(failed->nr >= ARRAY_SIZE(failed->devs));
59 
60 		f = &failed->devs[failed->nr++];
61 		f->dev		= p->ptr.dev;
62 		f->idx		= p->idx;
63 		f->nr_failed	= 1;
64 		f->nr_retries	= 0;
65 	} else if (p->idx != f->idx) {
66 		f->idx		= p->idx;
67 		f->nr_failed	= 1;
68 		f->nr_retries	= 0;
69 	} else {
70 		f->nr_failed++;
71 	}
72 }
73 
74 static inline u64 dev_latency(struct bch_fs *c, unsigned dev)
75 {
76 	struct bch_dev *ca = bch2_dev_rcu(c, dev);
77 	return ca ? atomic64_read(&ca->cur_latency[READ]) : S64_MAX;
78 }
79 
80 /*
81  * returns true if p1 is better than p2:
82  */
83 static inline bool ptr_better(struct bch_fs *c,
84 			      const struct extent_ptr_decoded p1,
85 			      const struct extent_ptr_decoded p2)
86 {
87 	if (likely(!p1.idx && !p2.idx)) {
88 		u64 l1 = dev_latency(c, p1.ptr.dev);
89 		u64 l2 = dev_latency(c, p2.ptr.dev);
90 
91 		/* Pick at random, biased in favor of the faster device: */
92 
93 		return bch2_rand_range(l1 + l2) > l1;
94 	}
95 
96 	if (bch2_force_reconstruct_read)
97 		return p1.idx > p2.idx;
98 
99 	return p1.idx < p2.idx;
100 }
101 
102 /*
103  * This picks a non-stale pointer, preferably from a device other than @avoid.
104  * Avoid can be NULL, meaning pick any. If there are no non-stale pointers to
105  * other devices, it will still pick a pointer from avoid.
106  */
107 int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
108 			       struct bch_io_failures *failed,
109 			       struct extent_ptr_decoded *pick)
110 {
111 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
112 	const union bch_extent_entry *entry;
113 	struct extent_ptr_decoded p;
114 	struct bch_dev_io_failures *f;
115 	int ret = 0;
116 
117 	if (k.k->type == KEY_TYPE_error)
118 		return -BCH_ERR_key_type_error;
119 
120 	rcu_read_lock();
121 	bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
122 		/*
123 		 * Unwritten extent: no need to actually read, treat it as a
124 		 * hole and return 0s:
125 		 */
126 		if (p.ptr.unwritten) {
127 			ret = 0;
128 			break;
129 		}
130 
131 		/*
132 		 * If there are any dirty pointers it's an error if we can't
133 		 * read:
134 		 */
135 		if (!ret && !p.ptr.cached)
136 			ret = -BCH_ERR_no_device_to_read_from;
137 
138 		struct bch_dev *ca = bch2_dev_rcu(c, p.ptr.dev);
139 
140 		if (p.ptr.cached && (!ca || dev_ptr_stale_rcu(ca, &p.ptr)))
141 			continue;
142 
143 		f = failed ? bch2_dev_io_failures(failed, p.ptr.dev) : NULL;
144 		if (f)
145 			p.idx = f->nr_failed < f->nr_retries
146 				? f->idx
147 				: f->idx + 1;
148 
149 		if (!p.idx && (!ca || !bch2_dev_is_readable(ca)))
150 			p.idx++;
151 
152 		if (!p.idx && p.has_ec && bch2_force_reconstruct_read)
153 			p.idx++;
154 
155 		if (p.idx > (unsigned) p.has_ec)
156 			continue;
157 
158 		if (ret > 0 && !ptr_better(c, p, *pick))
159 			continue;
160 
161 		*pick = p;
162 		ret = 1;
163 	}
164 	rcu_read_unlock();
165 
166 	return ret;
167 }
168 
169 /* KEY_TYPE_btree_ptr: */
170 
171 int bch2_btree_ptr_validate(struct bch_fs *c, struct bkey_s_c k,
172 			    enum bch_validate_flags flags)
173 {
174 	int ret = 0;
175 
176 	bkey_fsck_err_on(bkey_val_u64s(k.k) > BCH_REPLICAS_MAX,
177 			 c, btree_ptr_val_too_big,
178 			 "value too big (%zu > %u)", bkey_val_u64s(k.k), BCH_REPLICAS_MAX);
179 
180 	ret = bch2_bkey_ptrs_validate(c, k, flags);
181 fsck_err:
182 	return ret;
183 }
184 
185 void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c,
186 			    struct bkey_s_c k)
187 {
188 	bch2_bkey_ptrs_to_text(out, c, k);
189 }
190 
191 int bch2_btree_ptr_v2_validate(struct bch_fs *c, struct bkey_s_c k,
192 			       enum bch_validate_flags flags)
193 {
194 	struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
195 	int ret = 0;
196 
197 	bkey_fsck_err_on(bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX,
198 			 c, btree_ptr_v2_val_too_big,
199 			 "value too big (%zu > %zu)",
200 			 bkey_val_u64s(k.k), BKEY_BTREE_PTR_VAL_U64s_MAX);
201 
202 	bkey_fsck_err_on(bpos_ge(bp.v->min_key, bp.k->p),
203 			 c, btree_ptr_v2_min_key_bad,
204 			 "min_key > key");
205 
206 	if (flags & BCH_VALIDATE_write)
207 		bkey_fsck_err_on(!bp.v->sectors_written,
208 				 c, btree_ptr_v2_written_0,
209 				 "sectors_written == 0");
210 
211 	ret = bch2_bkey_ptrs_validate(c, k, flags);
212 fsck_err:
213 	return ret;
214 }
215 
216 void bch2_btree_ptr_v2_to_text(struct printbuf *out, struct bch_fs *c,
217 			       struct bkey_s_c k)
218 {
219 	struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
220 
221 	prt_printf(out, "seq %llx written %u min_key %s",
222 	       le64_to_cpu(bp.v->seq),
223 	       le16_to_cpu(bp.v->sectors_written),
224 	       BTREE_PTR_RANGE_UPDATED(bp.v) ? "R " : "");
225 
226 	bch2_bpos_to_text(out, bp.v->min_key);
227 	prt_printf(out, " ");
228 	bch2_bkey_ptrs_to_text(out, c, k);
229 }
230 
231 void bch2_btree_ptr_v2_compat(enum btree_id btree_id, unsigned version,
232 			      unsigned big_endian, int write,
233 			      struct bkey_s k)
234 {
235 	struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(k);
236 
237 	compat_bpos(0, btree_id, version, big_endian, write, &bp.v->min_key);
238 
239 	if (version < bcachefs_metadata_version_inode_btree_change &&
240 	    btree_id_is_extents(btree_id) &&
241 	    !bkey_eq(bp.v->min_key, POS_MIN))
242 		bp.v->min_key = write
243 			? bpos_nosnap_predecessor(bp.v->min_key)
244 			: bpos_nosnap_successor(bp.v->min_key);
245 }
246 
247 /* KEY_TYPE_extent: */
248 
249 bool bch2_extent_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r)
250 {
251 	struct bkey_ptrs   l_ptrs = bch2_bkey_ptrs(l);
252 	struct bkey_ptrs_c r_ptrs = bch2_bkey_ptrs_c(r);
253 	union bch_extent_entry *en_l;
254 	const union bch_extent_entry *en_r;
255 	struct extent_ptr_decoded lp, rp;
256 	bool use_right_ptr;
257 
258 	en_l = l_ptrs.start;
259 	en_r = r_ptrs.start;
260 	while (en_l < l_ptrs.end && en_r < r_ptrs.end) {
261 		if (extent_entry_type(en_l) != extent_entry_type(en_r))
262 			return false;
263 
264 		en_l = extent_entry_next(en_l);
265 		en_r = extent_entry_next(en_r);
266 	}
267 
268 	if (en_l < l_ptrs.end || en_r < r_ptrs.end)
269 		return false;
270 
271 	en_l = l_ptrs.start;
272 	en_r = r_ptrs.start;
273 	lp.crc = bch2_extent_crc_unpack(l.k, NULL);
274 	rp.crc = bch2_extent_crc_unpack(r.k, NULL);
275 
276 	while (__bkey_ptr_next_decode(l.k, l_ptrs.end, lp, en_l) &&
277 	       __bkey_ptr_next_decode(r.k, r_ptrs.end, rp, en_r)) {
278 		if (lp.ptr.offset + lp.crc.offset + lp.crc.live_size !=
279 		    rp.ptr.offset + rp.crc.offset ||
280 		    lp.ptr.dev			!= rp.ptr.dev ||
281 		    lp.ptr.gen			!= rp.ptr.gen ||
282 		    lp.ptr.unwritten		!= rp.ptr.unwritten ||
283 		    lp.has_ec			!= rp.has_ec)
284 			return false;
285 
286 		/* Extents may not straddle buckets: */
287 		rcu_read_lock();
288 		struct bch_dev *ca = bch2_dev_rcu(c, lp.ptr.dev);
289 		bool same_bucket = ca && PTR_BUCKET_NR(ca, &lp.ptr) == PTR_BUCKET_NR(ca, &rp.ptr);
290 		rcu_read_unlock();
291 
292 		if (!same_bucket)
293 			return false;
294 
295 		if (lp.has_ec			!= rp.has_ec ||
296 		    (lp.has_ec &&
297 		     (lp.ec.block		!= rp.ec.block ||
298 		      lp.ec.redundancy		!= rp.ec.redundancy ||
299 		      lp.ec.idx			!= rp.ec.idx)))
300 			return false;
301 
302 		if (lp.crc.compression_type	!= rp.crc.compression_type ||
303 		    lp.crc.nonce		!= rp.crc.nonce)
304 			return false;
305 
306 		if (lp.crc.offset + lp.crc.live_size + rp.crc.live_size <=
307 		    lp.crc.uncompressed_size) {
308 			/* can use left extent's crc entry */
309 		} else if (lp.crc.live_size <= rp.crc.offset) {
310 			/* can use right extent's crc entry */
311 		} else {
312 			/* check if checksums can be merged: */
313 			if (lp.crc.csum_type		!= rp.crc.csum_type ||
314 			    lp.crc.nonce		!= rp.crc.nonce ||
315 			    crc_is_compressed(lp.crc) ||
316 			    !bch2_checksum_mergeable(lp.crc.csum_type))
317 				return false;
318 
319 			if (lp.crc.offset + lp.crc.live_size != lp.crc.compressed_size ||
320 			    rp.crc.offset)
321 				return false;
322 
323 			if (lp.crc.csum_type &&
324 			    lp.crc.uncompressed_size +
325 			    rp.crc.uncompressed_size > (c->opts.encoded_extent_max >> 9))
326 				return false;
327 		}
328 
329 		en_l = extent_entry_next(en_l);
330 		en_r = extent_entry_next(en_r);
331 	}
332 
333 	en_l = l_ptrs.start;
334 	en_r = r_ptrs.start;
335 	while (en_l < l_ptrs.end && en_r < r_ptrs.end) {
336 		if (extent_entry_is_crc(en_l)) {
337 			struct bch_extent_crc_unpacked crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
338 			struct bch_extent_crc_unpacked crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
339 
340 			if (crc_l.uncompressed_size + crc_r.uncompressed_size >
341 			    bch2_crc_field_size_max[extent_entry_type(en_l)])
342 				return false;
343 		}
344 
345 		en_l = extent_entry_next(en_l);
346 		en_r = extent_entry_next(en_r);
347 	}
348 
349 	use_right_ptr = false;
350 	en_l = l_ptrs.start;
351 	en_r = r_ptrs.start;
352 	while (en_l < l_ptrs.end) {
353 		if (extent_entry_type(en_l) == BCH_EXTENT_ENTRY_ptr &&
354 		    use_right_ptr)
355 			en_l->ptr = en_r->ptr;
356 
357 		if (extent_entry_is_crc(en_l)) {
358 			struct bch_extent_crc_unpacked crc_l =
359 				bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
360 			struct bch_extent_crc_unpacked crc_r =
361 				bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
362 
363 			use_right_ptr = false;
364 
365 			if (crc_l.offset + crc_l.live_size + crc_r.live_size <=
366 			    crc_l.uncompressed_size) {
367 				/* can use left extent's crc entry */
368 			} else if (crc_l.live_size <= crc_r.offset) {
369 				/* can use right extent's crc entry */
370 				crc_r.offset -= crc_l.live_size;
371 				bch2_extent_crc_pack(entry_to_crc(en_l), crc_r,
372 						     extent_entry_type(en_l));
373 				use_right_ptr = true;
374 			} else {
375 				crc_l.csum = bch2_checksum_merge(crc_l.csum_type,
376 								 crc_l.csum,
377 								 crc_r.csum,
378 								 crc_r.uncompressed_size << 9);
379 
380 				crc_l.uncompressed_size	+= crc_r.uncompressed_size;
381 				crc_l.compressed_size	+= crc_r.compressed_size;
382 				bch2_extent_crc_pack(entry_to_crc(en_l), crc_l,
383 						     extent_entry_type(en_l));
384 			}
385 		}
386 
387 		en_l = extent_entry_next(en_l);
388 		en_r = extent_entry_next(en_r);
389 	}
390 
391 	bch2_key_resize(l.k, l.k->size + r.k->size);
392 	return true;
393 }
394 
395 /* KEY_TYPE_reservation: */
396 
397 int bch2_reservation_validate(struct bch_fs *c, struct bkey_s_c k,
398 			      enum bch_validate_flags flags)
399 {
400 	struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
401 	int ret = 0;
402 
403 	bkey_fsck_err_on(!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX,
404 			 c, reservation_key_nr_replicas_invalid,
405 			 "invalid nr_replicas (%u)", r.v->nr_replicas);
406 fsck_err:
407 	return ret;
408 }
409 
410 void bch2_reservation_to_text(struct printbuf *out, struct bch_fs *c,
411 			      struct bkey_s_c k)
412 {
413 	struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
414 
415 	prt_printf(out, "generation %u replicas %u",
416 	       le32_to_cpu(r.v->generation),
417 	       r.v->nr_replicas);
418 }
419 
420 bool bch2_reservation_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r)
421 {
422 	struct bkey_s_reservation l = bkey_s_to_reservation(_l);
423 	struct bkey_s_c_reservation r = bkey_s_c_to_reservation(_r);
424 
425 	if (l.v->generation != r.v->generation ||
426 	    l.v->nr_replicas != r.v->nr_replicas)
427 		return false;
428 
429 	bch2_key_resize(l.k, l.k->size + r.k->size);
430 	return true;
431 }
432 
433 /* Extent checksum entries: */
434 
435 /* returns true if not equal */
436 static inline bool bch2_crc_unpacked_cmp(struct bch_extent_crc_unpacked l,
437 					 struct bch_extent_crc_unpacked r)
438 {
439 	return (l.csum_type		!= r.csum_type ||
440 		l.compression_type	!= r.compression_type ||
441 		l.compressed_size	!= r.compressed_size ||
442 		l.uncompressed_size	!= r.uncompressed_size ||
443 		l.offset		!= r.offset ||
444 		l.live_size		!= r.live_size ||
445 		l.nonce			!= r.nonce ||
446 		bch2_crc_cmp(l.csum, r.csum));
447 }
448 
449 static inline bool can_narrow_crc(struct bch_extent_crc_unpacked u,
450 				  struct bch_extent_crc_unpacked n)
451 {
452 	return !crc_is_compressed(u) &&
453 		u.csum_type &&
454 		u.uncompressed_size > u.live_size &&
455 		bch2_csum_type_is_encryption(u.csum_type) ==
456 		bch2_csum_type_is_encryption(n.csum_type);
457 }
458 
459 bool bch2_can_narrow_extent_crcs(struct bkey_s_c k,
460 				 struct bch_extent_crc_unpacked n)
461 {
462 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
463 	struct bch_extent_crc_unpacked crc;
464 	const union bch_extent_entry *i;
465 
466 	if (!n.csum_type)
467 		return false;
468 
469 	bkey_for_each_crc(k.k, ptrs, crc, i)
470 		if (can_narrow_crc(crc, n))
471 			return true;
472 
473 	return false;
474 }
475 
476 /*
477  * We're writing another replica for this extent, so while we've got the data in
478  * memory we'll be computing a new checksum for the currently live data.
479  *
480  * If there are other replicas we aren't moving, and they are checksummed but
481  * not compressed, we can modify them to point to only the data that is
482  * currently live (so that readers won't have to bounce) while we've got the
483  * checksum we need:
484  */
485 bool bch2_bkey_narrow_crcs(struct bkey_i *k, struct bch_extent_crc_unpacked n)
486 {
487 	struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
488 	struct bch_extent_crc_unpacked u;
489 	struct extent_ptr_decoded p;
490 	union bch_extent_entry *i;
491 	bool ret = false;
492 
493 	/* Find a checksum entry that covers only live data: */
494 	if (!n.csum_type) {
495 		bkey_for_each_crc(&k->k, ptrs, u, i)
496 			if (!crc_is_compressed(u) &&
497 			    u.csum_type &&
498 			    u.live_size == u.uncompressed_size) {
499 				n = u;
500 				goto found;
501 			}
502 		return false;
503 	}
504 found:
505 	BUG_ON(crc_is_compressed(n));
506 	BUG_ON(n.offset);
507 	BUG_ON(n.live_size != k->k.size);
508 
509 restart_narrow_pointers:
510 	ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
511 
512 	bkey_for_each_ptr_decode(&k->k, ptrs, p, i)
513 		if (can_narrow_crc(p.crc, n)) {
514 			bch2_bkey_drop_ptr_noerror(bkey_i_to_s(k), &i->ptr);
515 			p.ptr.offset += p.crc.offset;
516 			p.crc = n;
517 			bch2_extent_ptr_decoded_append(k, &p);
518 			ret = true;
519 			goto restart_narrow_pointers;
520 		}
521 
522 	return ret;
523 }
524 
525 static void bch2_extent_crc_pack(union bch_extent_crc *dst,
526 				 struct bch_extent_crc_unpacked src,
527 				 enum bch_extent_entry_type type)
528 {
529 #define set_common_fields(_dst, _src)					\
530 		_dst.type		= 1 << type;			\
531 		_dst.csum_type		= _src.csum_type,		\
532 		_dst.compression_type	= _src.compression_type,	\
533 		_dst._compressed_size	= _src.compressed_size - 1,	\
534 		_dst._uncompressed_size	= _src.uncompressed_size - 1,	\
535 		_dst.offset		= _src.offset
536 
537 	switch (type) {
538 	case BCH_EXTENT_ENTRY_crc32:
539 		set_common_fields(dst->crc32, src);
540 		dst->crc32.csum		= (u32 __force) *((__le32 *) &src.csum.lo);
541 		break;
542 	case BCH_EXTENT_ENTRY_crc64:
543 		set_common_fields(dst->crc64, src);
544 		dst->crc64.nonce	= src.nonce;
545 		dst->crc64.csum_lo	= (u64 __force) src.csum.lo;
546 		dst->crc64.csum_hi	= (u64 __force) *((__le16 *) &src.csum.hi);
547 		break;
548 	case BCH_EXTENT_ENTRY_crc128:
549 		set_common_fields(dst->crc128, src);
550 		dst->crc128.nonce	= src.nonce;
551 		dst->crc128.csum	= src.csum;
552 		break;
553 	default:
554 		BUG();
555 	}
556 #undef set_common_fields
557 }
558 
559 void bch2_extent_crc_append(struct bkey_i *k,
560 			    struct bch_extent_crc_unpacked new)
561 {
562 	struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
563 	union bch_extent_crc *crc = (void *) ptrs.end;
564 	enum bch_extent_entry_type type;
565 
566 	if (bch_crc_bytes[new.csum_type]	<= 4 &&
567 	    new.uncompressed_size		<= CRC32_SIZE_MAX &&
568 	    new.nonce				<= CRC32_NONCE_MAX)
569 		type = BCH_EXTENT_ENTRY_crc32;
570 	else if (bch_crc_bytes[new.csum_type]	<= 10 &&
571 		   new.uncompressed_size	<= CRC64_SIZE_MAX &&
572 		   new.nonce			<= CRC64_NONCE_MAX)
573 		type = BCH_EXTENT_ENTRY_crc64;
574 	else if (bch_crc_bytes[new.csum_type]	<= 16 &&
575 		   new.uncompressed_size	<= CRC128_SIZE_MAX &&
576 		   new.nonce			<= CRC128_NONCE_MAX)
577 		type = BCH_EXTENT_ENTRY_crc128;
578 	else
579 		BUG();
580 
581 	bch2_extent_crc_pack(crc, new, type);
582 
583 	k->k.u64s += extent_entry_u64s(ptrs.end);
584 
585 	EBUG_ON(bkey_val_u64s(&k->k) > BKEY_EXTENT_VAL_U64s_MAX);
586 }
587 
588 /* Generic code for keys with pointers: */
589 
590 unsigned bch2_bkey_nr_ptrs(struct bkey_s_c k)
591 {
592 	return bch2_bkey_devs(k).nr;
593 }
594 
595 unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c k)
596 {
597 	return k.k->type == KEY_TYPE_reservation
598 		? bkey_s_c_to_reservation(k).v->nr_replicas
599 		: bch2_bkey_dirty_devs(k).nr;
600 }
601 
602 unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c k)
603 {
604 	unsigned ret = 0;
605 
606 	if (k.k->type == KEY_TYPE_reservation) {
607 		ret = bkey_s_c_to_reservation(k).v->nr_replicas;
608 	} else {
609 		struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
610 		const union bch_extent_entry *entry;
611 		struct extent_ptr_decoded p;
612 
613 		bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
614 			ret += !p.ptr.cached && !crc_is_compressed(p.crc);
615 	}
616 
617 	return ret;
618 }
619 
620 unsigned bch2_bkey_sectors_compressed(struct bkey_s_c k)
621 {
622 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
623 	const union bch_extent_entry *entry;
624 	struct extent_ptr_decoded p;
625 	unsigned ret = 0;
626 
627 	bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
628 		if (!p.ptr.cached && crc_is_compressed(p.crc))
629 			ret += p.crc.compressed_size;
630 
631 	return ret;
632 }
633 
634 bool bch2_bkey_is_incompressible(struct bkey_s_c k)
635 {
636 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
637 	const union bch_extent_entry *entry;
638 	struct bch_extent_crc_unpacked crc;
639 
640 	bkey_for_each_crc(k.k, ptrs, crc, entry)
641 		if (crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
642 			return true;
643 	return false;
644 }
645 
646 unsigned bch2_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
647 {
648 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
649 	const union bch_extent_entry *entry;
650 	struct extent_ptr_decoded p = { 0 };
651 	unsigned replicas = 0;
652 
653 	bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
654 		if (p.ptr.cached)
655 			continue;
656 
657 		if (p.has_ec)
658 			replicas += p.ec.redundancy;
659 
660 		replicas++;
661 
662 	}
663 
664 	return replicas;
665 }
666 
667 static inline unsigned __extent_ptr_durability(struct bch_dev *ca, struct extent_ptr_decoded *p)
668 {
669 	if (p->ptr.cached)
670 		return 0;
671 
672 	return p->has_ec
673 		? p->ec.redundancy + 1
674 		: ca->mi.durability;
675 }
676 
677 unsigned bch2_extent_ptr_desired_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
678 {
679 	struct bch_dev *ca = bch2_dev_rcu(c, p->ptr.dev);
680 
681 	return ca ? __extent_ptr_durability(ca, p) : 0;
682 }
683 
684 unsigned bch2_extent_ptr_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
685 {
686 	struct bch_dev *ca = bch2_dev_rcu(c, p->ptr.dev);
687 
688 	if (!ca || ca->mi.state == BCH_MEMBER_STATE_failed)
689 		return 0;
690 
691 	return __extent_ptr_durability(ca, p);
692 }
693 
694 unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k)
695 {
696 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
697 	const union bch_extent_entry *entry;
698 	struct extent_ptr_decoded p;
699 	unsigned durability = 0;
700 
701 	rcu_read_lock();
702 	bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
703 		durability += bch2_extent_ptr_durability(c, &p);
704 	rcu_read_unlock();
705 
706 	return durability;
707 }
708 
709 static unsigned bch2_bkey_durability_safe(struct bch_fs *c, struct bkey_s_c k)
710 {
711 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
712 	const union bch_extent_entry *entry;
713 	struct extent_ptr_decoded p;
714 	unsigned durability = 0;
715 
716 	rcu_read_lock();
717 	bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
718 		if (p.ptr.dev < c->sb.nr_devices && c->devs[p.ptr.dev])
719 			durability += bch2_extent_ptr_durability(c, &p);
720 	rcu_read_unlock();
721 
722 	return durability;
723 }
724 
725 void bch2_bkey_extent_entry_drop(struct bkey_i *k, union bch_extent_entry *entry)
726 {
727 	union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k));
728 	union bch_extent_entry *next = extent_entry_next(entry);
729 
730 	memmove_u64s(entry, next, (u64 *) end - (u64 *) next);
731 	k->k.u64s -= extent_entry_u64s(entry);
732 }
733 
734 void bch2_extent_ptr_decoded_append(struct bkey_i *k,
735 				    struct extent_ptr_decoded *p)
736 {
737 	struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
738 	struct bch_extent_crc_unpacked crc =
739 		bch2_extent_crc_unpack(&k->k, NULL);
740 	union bch_extent_entry *pos;
741 
742 	if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
743 		pos = ptrs.start;
744 		goto found;
745 	}
746 
747 	bkey_for_each_crc(&k->k, ptrs, crc, pos)
748 		if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
749 			pos = extent_entry_next(pos);
750 			goto found;
751 		}
752 
753 	bch2_extent_crc_append(k, p->crc);
754 	pos = bkey_val_end(bkey_i_to_s(k));
755 found:
756 	p->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
757 	__extent_entry_insert(k, pos, to_entry(&p->ptr));
758 
759 	if (p->has_ec) {
760 		p->ec.type = 1 << BCH_EXTENT_ENTRY_stripe_ptr;
761 		__extent_entry_insert(k, pos, to_entry(&p->ec));
762 	}
763 }
764 
765 static union bch_extent_entry *extent_entry_prev(struct bkey_ptrs ptrs,
766 					  union bch_extent_entry *entry)
767 {
768 	union bch_extent_entry *i = ptrs.start;
769 
770 	if (i == entry)
771 		return NULL;
772 
773 	while (extent_entry_next(i) != entry)
774 		i = extent_entry_next(i);
775 	return i;
776 }
777 
778 /*
779  * Returns pointer to the next entry after the one being dropped:
780  */
781 void bch2_bkey_drop_ptr_noerror(struct bkey_s k, struct bch_extent_ptr *ptr)
782 {
783 	struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
784 	union bch_extent_entry *entry = to_entry(ptr), *next;
785 	bool drop_crc = true;
786 
787 	if (k.k->type == KEY_TYPE_stripe) {
788 		ptr->dev = BCH_SB_MEMBER_INVALID;
789 		return;
790 	}
791 
792 	EBUG_ON(ptr < &ptrs.start->ptr ||
793 		ptr >= &ptrs.end->ptr);
794 	EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr);
795 
796 	for (next = extent_entry_next(entry);
797 	     next != ptrs.end;
798 	     next = extent_entry_next(next)) {
799 		if (extent_entry_is_crc(next)) {
800 			break;
801 		} else if (extent_entry_is_ptr(next)) {
802 			drop_crc = false;
803 			break;
804 		}
805 	}
806 
807 	extent_entry_drop(k, entry);
808 
809 	while ((entry = extent_entry_prev(ptrs, entry))) {
810 		if (extent_entry_is_ptr(entry))
811 			break;
812 
813 		if ((extent_entry_is_crc(entry) && drop_crc) ||
814 		    extent_entry_is_stripe_ptr(entry))
815 			extent_entry_drop(k, entry);
816 	}
817 }
818 
819 void bch2_bkey_drop_ptr(struct bkey_s k, struct bch_extent_ptr *ptr)
820 {
821 	if (k.k->type != KEY_TYPE_stripe) {
822 		struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k.s_c);
823 		const union bch_extent_entry *entry;
824 		struct extent_ptr_decoded p;
825 
826 		bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
827 			if (p.ptr.dev == ptr->dev && p.has_ec) {
828 				ptr->dev = BCH_SB_MEMBER_INVALID;
829 				return;
830 			}
831 	}
832 
833 	bool have_dirty = bch2_bkey_dirty_devs(k.s_c).nr;
834 
835 	bch2_bkey_drop_ptr_noerror(k, ptr);
836 
837 	/*
838 	 * If we deleted all the dirty pointers and there's still cached
839 	 * pointers, we could set the cached pointers to dirty if they're not
840 	 * stale - but to do that correctly we'd need to grab an open_bucket
841 	 * reference so that we don't race with bucket reuse:
842 	 */
843 	if (have_dirty &&
844 	    !bch2_bkey_dirty_devs(k.s_c).nr) {
845 		k.k->type = KEY_TYPE_error;
846 		set_bkey_val_u64s(k.k, 0);
847 	} else if (!bch2_bkey_nr_ptrs(k.s_c)) {
848 		k.k->type = KEY_TYPE_deleted;
849 		set_bkey_val_u64s(k.k, 0);
850 	}
851 }
852 
853 void bch2_bkey_drop_device(struct bkey_s k, unsigned dev)
854 {
855 	bch2_bkey_drop_ptrs(k, ptr, ptr->dev == dev);
856 }
857 
858 void bch2_bkey_drop_device_noerror(struct bkey_s k, unsigned dev)
859 {
860 	bch2_bkey_drop_ptrs_noerror(k, ptr, ptr->dev == dev);
861 }
862 
863 const struct bch_extent_ptr *bch2_bkey_has_device_c(struct bkey_s_c k, unsigned dev)
864 {
865 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
866 
867 	bkey_for_each_ptr(ptrs, ptr)
868 		if (ptr->dev == dev)
869 			return ptr;
870 
871 	return NULL;
872 }
873 
874 bool bch2_bkey_has_target(struct bch_fs *c, struct bkey_s_c k, unsigned target)
875 {
876 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
877 	struct bch_dev *ca;
878 	bool ret = false;
879 
880 	rcu_read_lock();
881 	bkey_for_each_ptr(ptrs, ptr)
882 		if (bch2_dev_in_target(c, ptr->dev, target) &&
883 		    (ca = bch2_dev_rcu(c, ptr->dev)) &&
884 		    (!ptr->cached ||
885 		     !dev_ptr_stale_rcu(ca, ptr))) {
886 			ret = true;
887 			break;
888 		}
889 	rcu_read_unlock();
890 
891 	return ret;
892 }
893 
894 bool bch2_bkey_matches_ptr(struct bch_fs *c, struct bkey_s_c k,
895 			   struct bch_extent_ptr m, u64 offset)
896 {
897 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
898 	const union bch_extent_entry *entry;
899 	struct extent_ptr_decoded p;
900 
901 	bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
902 		if (p.ptr.dev	== m.dev &&
903 		    p.ptr.gen	== m.gen &&
904 		    (s64) p.ptr.offset + p.crc.offset - bkey_start_offset(k.k) ==
905 		    (s64) m.offset  - offset)
906 			return true;
907 
908 	return false;
909 }
910 
911 /*
912  * Returns true if two extents refer to the same data:
913  */
914 bool bch2_extents_match(struct bkey_s_c k1, struct bkey_s_c k2)
915 {
916 	if (k1.k->type != k2.k->type)
917 		return false;
918 
919 	if (bkey_extent_is_direct_data(k1.k)) {
920 		struct bkey_ptrs_c ptrs1 = bch2_bkey_ptrs_c(k1);
921 		struct bkey_ptrs_c ptrs2 = bch2_bkey_ptrs_c(k2);
922 		const union bch_extent_entry *entry1, *entry2;
923 		struct extent_ptr_decoded p1, p2;
924 
925 		if (bkey_extent_is_unwritten(k1) != bkey_extent_is_unwritten(k2))
926 			return false;
927 
928 		bkey_for_each_ptr_decode(k1.k, ptrs1, p1, entry1)
929 			bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2)
930 				if (p1.ptr.dev		== p2.ptr.dev &&
931 				    p1.ptr.gen		== p2.ptr.gen &&
932 
933 				    /*
934 				     * This checks that the two pointers point
935 				     * to the same region on disk - adjusting
936 				     * for the difference in where the extents
937 				     * start, since one may have been trimmed:
938 				     */
939 				    (s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) ==
940 				    (s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k) &&
941 
942 				    /*
943 				     * This additionally checks that the
944 				     * extents overlap on disk, since the
945 				     * previous check may trigger spuriously
946 				     * when one extent is immediately partially
947 				     * overwritten with another extent (so that
948 				     * on disk they are adjacent) and
949 				     * compression is in use:
950 				     */
951 				    ((p1.ptr.offset >= p2.ptr.offset &&
952 				      p1.ptr.offset  < p2.ptr.offset + p2.crc.compressed_size) ||
953 				     (p2.ptr.offset >= p1.ptr.offset &&
954 				      p2.ptr.offset  < p1.ptr.offset + p1.crc.compressed_size)))
955 					return true;
956 
957 		return false;
958 	} else {
959 		/* KEY_TYPE_deleted, etc. */
960 		return true;
961 	}
962 }
963 
964 struct bch_extent_ptr *
965 bch2_extent_has_ptr(struct bkey_s_c k1, struct extent_ptr_decoded p1, struct bkey_s k2)
966 {
967 	struct bkey_ptrs ptrs2 = bch2_bkey_ptrs(k2);
968 	union bch_extent_entry *entry2;
969 	struct extent_ptr_decoded p2;
970 
971 	bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2)
972 		if (p1.ptr.dev		== p2.ptr.dev &&
973 		    p1.ptr.gen		== p2.ptr.gen &&
974 		    (s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) ==
975 		    (s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k))
976 			return &entry2->ptr;
977 
978 	return NULL;
979 }
980 
981 void bch2_extent_ptr_set_cached(struct bkey_s k, struct bch_extent_ptr *ptr)
982 {
983 	struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
984 	union bch_extent_entry *entry;
985 	union bch_extent_entry *ec = NULL;
986 
987 	bkey_extent_entry_for_each(ptrs, entry) {
988 		if (&entry->ptr == ptr) {
989 			ptr->cached = true;
990 			if (ec)
991 				extent_entry_drop(k, ec);
992 			return;
993 		}
994 
995 		if (extent_entry_is_stripe_ptr(entry))
996 			ec = entry;
997 		else if (extent_entry_is_ptr(entry))
998 			ec = NULL;
999 	}
1000 
1001 	BUG();
1002 }
1003 
1004 /*
1005  * bch_extent_normalize - clean up an extent, dropping stale pointers etc.
1006  *
1007  * Returns true if @k should be dropped entirely
1008  *
1009  * For existing keys, only called when btree nodes are being rewritten, not when
1010  * they're merely being compacted/resorted in memory.
1011  */
1012 bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
1013 {
1014 	struct bch_dev *ca;
1015 
1016 	rcu_read_lock();
1017 	bch2_bkey_drop_ptrs(k, ptr,
1018 		ptr->cached &&
1019 		(ca = bch2_dev_rcu(c, ptr->dev)) &&
1020 		dev_ptr_stale_rcu(ca, ptr) > 0);
1021 	rcu_read_unlock();
1022 
1023 	return bkey_deleted(k.k);
1024 }
1025 
1026 void bch2_extent_ptr_to_text(struct printbuf *out, struct bch_fs *c, const struct bch_extent_ptr *ptr)
1027 {
1028 	out->atomic++;
1029 	rcu_read_lock();
1030 	struct bch_dev *ca = bch2_dev_rcu_noerror(c, ptr->dev);
1031 	if (!ca) {
1032 		prt_printf(out, "ptr: %u:%llu gen %u%s", ptr->dev,
1033 			   (u64) ptr->offset, ptr->gen,
1034 			   ptr->cached ? " cached" : "");
1035 	} else {
1036 		u32 offset;
1037 		u64 b = sector_to_bucket_and_offset(ca, ptr->offset, &offset);
1038 
1039 		prt_printf(out, "ptr: %u:%llu:%u gen %u",
1040 			   ptr->dev, b, offset, ptr->gen);
1041 		if (ca->mi.durability != 1)
1042 			prt_printf(out, " d=%u", ca->mi.durability);
1043 		if (ptr->cached)
1044 			prt_str(out, " cached");
1045 		if (ptr->unwritten)
1046 			prt_str(out, " unwritten");
1047 		int stale = dev_ptr_stale_rcu(ca, ptr);
1048 		if (stale > 0)
1049 			prt_printf(out, " stale");
1050 		else if (stale)
1051 			prt_printf(out, " invalid");
1052 	}
1053 	rcu_read_unlock();
1054 	--out->atomic;
1055 }
1056 
1057 void bch2_extent_crc_unpacked_to_text(struct printbuf *out, struct bch_extent_crc_unpacked *crc)
1058 {
1059 	prt_printf(out, "crc: c_size %u size %u offset %u nonce %u csum ",
1060 		   crc->compressed_size,
1061 		   crc->uncompressed_size,
1062 		   crc->offset, crc->nonce);
1063 	bch2_prt_csum_type(out, crc->csum_type);
1064 	prt_printf(out, " %0llx:%0llx ", crc->csum.hi, crc->csum.lo);
1065 	prt_str(out, " compress ");
1066 	bch2_prt_compression_type(out, crc->compression_type);
1067 }
1068 
1069 void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
1070 			    struct bkey_s_c k)
1071 {
1072 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1073 	const union bch_extent_entry *entry;
1074 	bool first = true;
1075 
1076 	if (c)
1077 		prt_printf(out, "durability: %u ", bch2_bkey_durability_safe(c, k));
1078 
1079 	bkey_extent_entry_for_each(ptrs, entry) {
1080 		if (!first)
1081 			prt_printf(out, " ");
1082 
1083 		switch (__extent_entry_type(entry)) {
1084 		case BCH_EXTENT_ENTRY_ptr:
1085 			bch2_extent_ptr_to_text(out, c, entry_to_ptr(entry));
1086 			break;
1087 
1088 		case BCH_EXTENT_ENTRY_crc32:
1089 		case BCH_EXTENT_ENTRY_crc64:
1090 		case BCH_EXTENT_ENTRY_crc128: {
1091 			struct bch_extent_crc_unpacked crc =
1092 				bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
1093 
1094 			bch2_extent_crc_unpacked_to_text(out, &crc);
1095 			break;
1096 		}
1097 		case BCH_EXTENT_ENTRY_stripe_ptr: {
1098 			const struct bch_extent_stripe_ptr *ec = &entry->stripe_ptr;
1099 
1100 			prt_printf(out, "ec: idx %llu block %u",
1101 			       (u64) ec->idx, ec->block);
1102 			break;
1103 		}
1104 		case BCH_EXTENT_ENTRY_rebalance: {
1105 			const struct bch_extent_rebalance *r = &entry->rebalance;
1106 
1107 			prt_str(out, "rebalance: target ");
1108 			if (c)
1109 				bch2_target_to_text(out, c, r->target);
1110 			else
1111 				prt_printf(out, "%u", r->target);
1112 			prt_str(out, " compression ");
1113 			bch2_compression_opt_to_text(out, r->compression);
1114 			break;
1115 		}
1116 		default:
1117 			prt_printf(out, "(invalid extent entry %.16llx)", *((u64 *) entry));
1118 			return;
1119 		}
1120 
1121 		first = false;
1122 	}
1123 }
1124 
1125 static int extent_ptr_validate(struct bch_fs *c,
1126 			       struct bkey_s_c k,
1127 			       enum bch_validate_flags flags,
1128 			       const struct bch_extent_ptr *ptr,
1129 			       unsigned size_ondisk,
1130 			       bool metadata)
1131 {
1132 	int ret = 0;
1133 
1134 	/* bad pointers are repaired by check_fix_ptrs(): */
1135 	rcu_read_lock();
1136 	struct bch_dev *ca = bch2_dev_rcu_noerror(c, ptr->dev);
1137 	if (!ca) {
1138 		rcu_read_unlock();
1139 		return 0;
1140 	}
1141 	u32 bucket_offset;
1142 	u64 bucket = sector_to_bucket_and_offset(ca, ptr->offset, &bucket_offset);
1143 	unsigned first_bucket	= ca->mi.first_bucket;
1144 	u64 nbuckets		= ca->mi.nbuckets;
1145 	unsigned bucket_size	= ca->mi.bucket_size;
1146 	rcu_read_unlock();
1147 
1148 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1149 	bkey_for_each_ptr(ptrs, ptr2)
1150 		bkey_fsck_err_on(ptr != ptr2 && ptr->dev == ptr2->dev,
1151 				 c, ptr_to_duplicate_device,
1152 				 "multiple pointers to same device (%u)", ptr->dev);
1153 
1154 
1155 	bkey_fsck_err_on(bucket >= nbuckets,
1156 			 c, ptr_after_last_bucket,
1157 			 "pointer past last bucket (%llu > %llu)", bucket, nbuckets);
1158 	bkey_fsck_err_on(bucket < first_bucket,
1159 			 c, ptr_before_first_bucket,
1160 			 "pointer before first bucket (%llu < %u)", bucket, first_bucket);
1161 	bkey_fsck_err_on(bucket_offset + size_ondisk > bucket_size,
1162 			 c, ptr_spans_multiple_buckets,
1163 			 "pointer spans multiple buckets (%u + %u > %u)",
1164 		       bucket_offset, size_ondisk, bucket_size);
1165 fsck_err:
1166 	return ret;
1167 }
1168 
1169 int bch2_bkey_ptrs_validate(struct bch_fs *c, struct bkey_s_c k,
1170 			    enum bch_validate_flags flags)
1171 {
1172 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1173 	const union bch_extent_entry *entry;
1174 	struct bch_extent_crc_unpacked crc;
1175 	unsigned size_ondisk = k.k->size;
1176 	unsigned nonce = UINT_MAX;
1177 	unsigned nr_ptrs = 0;
1178 	bool have_written = false, have_unwritten = false, have_ec = false, crc_since_last_ptr = false;
1179 	int ret = 0;
1180 
1181 	if (bkey_is_btree_ptr(k.k))
1182 		size_ondisk = btree_sectors(c);
1183 
1184 	bkey_extent_entry_for_each(ptrs, entry) {
1185 		bkey_fsck_err_on(__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX,
1186 				 c, extent_ptrs_invalid_entry,
1187 				 "invalid extent entry type (got %u, max %u)",
1188 				 __extent_entry_type(entry), BCH_EXTENT_ENTRY_MAX);
1189 
1190 		bkey_fsck_err_on(bkey_is_btree_ptr(k.k) &&
1191 				 !extent_entry_is_ptr(entry),
1192 				 c, btree_ptr_has_non_ptr,
1193 				 "has non ptr field");
1194 
1195 		switch (extent_entry_type(entry)) {
1196 		case BCH_EXTENT_ENTRY_ptr:
1197 			ret = extent_ptr_validate(c, k, flags, &entry->ptr, size_ondisk, false);
1198 			if (ret)
1199 				return ret;
1200 
1201 			bkey_fsck_err_on(entry->ptr.cached && have_ec,
1202 					 c, ptr_cached_and_erasure_coded,
1203 					 "cached, erasure coded ptr");
1204 
1205 			if (!entry->ptr.unwritten)
1206 				have_written = true;
1207 			else
1208 				have_unwritten = true;
1209 
1210 			have_ec = false;
1211 			crc_since_last_ptr = false;
1212 			nr_ptrs++;
1213 			break;
1214 		case BCH_EXTENT_ENTRY_crc32:
1215 		case BCH_EXTENT_ENTRY_crc64:
1216 		case BCH_EXTENT_ENTRY_crc128:
1217 			crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
1218 
1219 			bkey_fsck_err_on(crc.offset + crc.live_size > crc.uncompressed_size,
1220 					 c, ptr_crc_uncompressed_size_too_small,
1221 					 "checksum offset + key size > uncompressed size");
1222 			bkey_fsck_err_on(!bch2_checksum_type_valid(c, crc.csum_type),
1223 					 c, ptr_crc_csum_type_unknown,
1224 					 "invalid checksum type");
1225 			bkey_fsck_err_on(crc.compression_type >= BCH_COMPRESSION_TYPE_NR,
1226 					 c, ptr_crc_compression_type_unknown,
1227 					 "invalid compression type");
1228 
1229 			if (bch2_csum_type_is_encryption(crc.csum_type)) {
1230 				if (nonce == UINT_MAX)
1231 					nonce = crc.offset + crc.nonce;
1232 				else if (nonce != crc.offset + crc.nonce)
1233 					bkey_fsck_err(c, ptr_crc_nonce_mismatch,
1234 						      "incorrect nonce");
1235 			}
1236 
1237 			bkey_fsck_err_on(crc_since_last_ptr,
1238 					 c, ptr_crc_redundant,
1239 					 "redundant crc entry");
1240 			crc_since_last_ptr = true;
1241 
1242 			bkey_fsck_err_on(crc_is_encoded(crc) &&
1243 					 (crc.uncompressed_size > c->opts.encoded_extent_max >> 9) &&
1244 					 (flags & (BCH_VALIDATE_write|BCH_VALIDATE_commit)),
1245 					 c, ptr_crc_uncompressed_size_too_big,
1246 					 "too large encoded extent");
1247 
1248 			size_ondisk = crc.compressed_size;
1249 			break;
1250 		case BCH_EXTENT_ENTRY_stripe_ptr:
1251 			bkey_fsck_err_on(have_ec,
1252 					 c, ptr_stripe_redundant,
1253 					 "redundant stripe entry");
1254 			have_ec = true;
1255 			break;
1256 		case BCH_EXTENT_ENTRY_rebalance: {
1257 			/*
1258 			 * this shouldn't be a fsck error, for forward
1259 			 * compatibility; the rebalance code should just refetch
1260 			 * the compression opt if it's unknown
1261 			 */
1262 #if 0
1263 			const struct bch_extent_rebalance *r = &entry->rebalance;
1264 
1265 			if (!bch2_compression_opt_valid(r->compression)) {
1266 				struct bch_compression_opt opt = __bch2_compression_decode(r->compression);
1267 				prt_printf(err, "invalid compression opt %u:%u",
1268 					   opt.type, opt.level);
1269 				return -BCH_ERR_invalid_bkey;
1270 			}
1271 #endif
1272 			break;
1273 		}
1274 		}
1275 	}
1276 
1277 	bkey_fsck_err_on(!nr_ptrs,
1278 			 c, extent_ptrs_no_ptrs,
1279 			 "no ptrs");
1280 	bkey_fsck_err_on(nr_ptrs > BCH_BKEY_PTRS_MAX,
1281 			 c, extent_ptrs_too_many_ptrs,
1282 			 "too many ptrs: %u > %u", nr_ptrs, BCH_BKEY_PTRS_MAX);
1283 	bkey_fsck_err_on(have_written && have_unwritten,
1284 			 c, extent_ptrs_written_and_unwritten,
1285 			 "extent with unwritten and written ptrs");
1286 	bkey_fsck_err_on(k.k->type != KEY_TYPE_extent && have_unwritten,
1287 			 c, extent_ptrs_unwritten,
1288 			 "has unwritten ptrs");
1289 	bkey_fsck_err_on(crc_since_last_ptr,
1290 			 c, extent_ptrs_redundant_crc,
1291 			 "redundant crc entry");
1292 	bkey_fsck_err_on(have_ec,
1293 			 c, extent_ptrs_redundant_stripe,
1294 			 "redundant stripe entry");
1295 fsck_err:
1296 	return ret;
1297 }
1298 
1299 void bch2_ptr_swab(struct bkey_s k)
1300 {
1301 	struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
1302 	union bch_extent_entry *entry;
1303 	u64 *d;
1304 
1305 	for (d =  (u64 *) ptrs.start;
1306 	     d != (u64 *) ptrs.end;
1307 	     d++)
1308 		*d = swab64(*d);
1309 
1310 	for (entry = ptrs.start;
1311 	     entry < ptrs.end;
1312 	     entry = extent_entry_next(entry)) {
1313 		switch (extent_entry_type(entry)) {
1314 		case BCH_EXTENT_ENTRY_ptr:
1315 			break;
1316 		case BCH_EXTENT_ENTRY_crc32:
1317 			entry->crc32.csum = swab32(entry->crc32.csum);
1318 			break;
1319 		case BCH_EXTENT_ENTRY_crc64:
1320 			entry->crc64.csum_hi = swab16(entry->crc64.csum_hi);
1321 			entry->crc64.csum_lo = swab64(entry->crc64.csum_lo);
1322 			break;
1323 		case BCH_EXTENT_ENTRY_crc128:
1324 			entry->crc128.csum.hi = (__force __le64)
1325 				swab64((__force u64) entry->crc128.csum.hi);
1326 			entry->crc128.csum.lo = (__force __le64)
1327 				swab64((__force u64) entry->crc128.csum.lo);
1328 			break;
1329 		case BCH_EXTENT_ENTRY_stripe_ptr:
1330 			break;
1331 		case BCH_EXTENT_ENTRY_rebalance:
1332 			break;
1333 		}
1334 	}
1335 }
1336 
1337 const struct bch_extent_rebalance *bch2_bkey_rebalance_opts(struct bkey_s_c k)
1338 {
1339 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1340 	const union bch_extent_entry *entry;
1341 
1342 	bkey_extent_entry_for_each(ptrs, entry)
1343 		if (__extent_entry_type(entry) == BCH_EXTENT_ENTRY_rebalance)
1344 			return &entry->rebalance;
1345 
1346 	return NULL;
1347 }
1348 
1349 unsigned bch2_bkey_ptrs_need_rebalance(struct bch_fs *c, struct bkey_s_c k,
1350 				       unsigned target, unsigned compression)
1351 {
1352 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1353 	unsigned rewrite_ptrs = 0;
1354 
1355 	if (compression) {
1356 		unsigned compression_type = bch2_compression_opt_to_type(compression);
1357 		const union bch_extent_entry *entry;
1358 		struct extent_ptr_decoded p;
1359 		unsigned i = 0;
1360 
1361 		bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1362 			if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible ||
1363 			    p.ptr.unwritten) {
1364 				rewrite_ptrs = 0;
1365 				goto incompressible;
1366 			}
1367 
1368 			if (!p.ptr.cached && p.crc.compression_type != compression_type)
1369 				rewrite_ptrs |= 1U << i;
1370 			i++;
1371 		}
1372 	}
1373 incompressible:
1374 	if (target && bch2_target_accepts_data(c, BCH_DATA_user, target)) {
1375 		unsigned i = 0;
1376 
1377 		bkey_for_each_ptr(ptrs, ptr) {
1378 			if (!ptr->cached && !bch2_dev_in_target(c, ptr->dev, target))
1379 				rewrite_ptrs |= 1U << i;
1380 			i++;
1381 		}
1382 	}
1383 
1384 	return rewrite_ptrs;
1385 }
1386 
1387 bool bch2_bkey_needs_rebalance(struct bch_fs *c, struct bkey_s_c k)
1388 {
1389 	const struct bch_extent_rebalance *r = bch2_bkey_rebalance_opts(k);
1390 
1391 	/*
1392 	 * If it's an indirect extent, we don't delete the rebalance entry when
1393 	 * done so that we know what options were applied - check if it still
1394 	 * needs work done:
1395 	 */
1396 	if (r &&
1397 	    k.k->type == KEY_TYPE_reflink_v &&
1398 	    !bch2_bkey_ptrs_need_rebalance(c, k, r->target, r->compression))
1399 		r = NULL;
1400 
1401 	return r != NULL;
1402 }
1403 
1404 static u64 __bch2_bkey_sectors_need_rebalance(struct bch_fs *c, struct bkey_s_c k,
1405 				       unsigned target, unsigned compression)
1406 {
1407 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1408 	const union bch_extent_entry *entry;
1409 	struct extent_ptr_decoded p;
1410 	u64 sectors = 0;
1411 
1412 	if (compression) {
1413 		unsigned compression_type = bch2_compression_opt_to_type(compression);
1414 
1415 		bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1416 			if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible ||
1417 			    p.ptr.unwritten) {
1418 				sectors = 0;
1419 				goto incompressible;
1420 			}
1421 
1422 			if (!p.ptr.cached && p.crc.compression_type != compression_type)
1423 				sectors += p.crc.compressed_size;
1424 		}
1425 	}
1426 incompressible:
1427 	if (target && bch2_target_accepts_data(c, BCH_DATA_user, target)) {
1428 		bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
1429 			if (!p.ptr.cached && !bch2_dev_in_target(c, p.ptr.dev, target))
1430 				sectors += p.crc.compressed_size;
1431 	}
1432 
1433 	return sectors;
1434 }
1435 
1436 u64 bch2_bkey_sectors_need_rebalance(struct bch_fs *c, struct bkey_s_c k)
1437 {
1438 	const struct bch_extent_rebalance *r = bch2_bkey_rebalance_opts(k);
1439 
1440 	return r ? __bch2_bkey_sectors_need_rebalance(c, k, r->target, r->compression) : 0;
1441 }
1442 
1443 int bch2_bkey_set_needs_rebalance(struct bch_fs *c, struct bkey_i *_k,
1444 				  struct bch_io_opts *opts)
1445 {
1446 	struct bkey_s k = bkey_i_to_s(_k);
1447 	struct bch_extent_rebalance *r;
1448 	unsigned target = opts->background_target;
1449 	unsigned compression = background_compression(*opts);
1450 	bool needs_rebalance;
1451 
1452 	if (!bkey_extent_is_direct_data(k.k))
1453 		return 0;
1454 
1455 	/* get existing rebalance entry: */
1456 	r = (struct bch_extent_rebalance *) bch2_bkey_rebalance_opts(k.s_c);
1457 	if (r) {
1458 		if (k.k->type == KEY_TYPE_reflink_v) {
1459 			/*
1460 			 * indirect extents: existing options take precedence,
1461 			 * so that we don't move extents back and forth if
1462 			 * they're referenced by different inodes with different
1463 			 * options:
1464 			 */
1465 			if (r->target)
1466 				target = r->target;
1467 			if (r->compression)
1468 				compression = r->compression;
1469 		}
1470 
1471 		r->target	= target;
1472 		r->compression	= compression;
1473 	}
1474 
1475 	needs_rebalance = bch2_bkey_ptrs_need_rebalance(c, k.s_c, target, compression);
1476 
1477 	if (needs_rebalance && !r) {
1478 		union bch_extent_entry *new = bkey_val_end(k);
1479 
1480 		new->rebalance.type		= 1U << BCH_EXTENT_ENTRY_rebalance;
1481 		new->rebalance.compression	= compression;
1482 		new->rebalance.target		= target;
1483 		new->rebalance.unused		= 0;
1484 		k.k->u64s += extent_entry_u64s(new);
1485 	} else if (!needs_rebalance && r && k.k->type != KEY_TYPE_reflink_v) {
1486 		/*
1487 		 * For indirect extents, don't delete the rebalance entry when
1488 		 * we're finished so that we know we specifically moved it or
1489 		 * compressed it to its current location/compression type
1490 		 */
1491 		extent_entry_drop(k, (union bch_extent_entry *) r);
1492 	}
1493 
1494 	return 0;
1495 }
1496 
1497 /* Generic extent code: */
1498 
1499 int bch2_cut_front_s(struct bpos where, struct bkey_s k)
1500 {
1501 	unsigned new_val_u64s = bkey_val_u64s(k.k);
1502 	int val_u64s_delta;
1503 	u64 sub;
1504 
1505 	if (bkey_le(where, bkey_start_pos(k.k)))
1506 		return 0;
1507 
1508 	EBUG_ON(bkey_gt(where, k.k->p));
1509 
1510 	sub = where.offset - bkey_start_offset(k.k);
1511 
1512 	k.k->size -= sub;
1513 
1514 	if (!k.k->size) {
1515 		k.k->type = KEY_TYPE_deleted;
1516 		new_val_u64s = 0;
1517 	}
1518 
1519 	switch (k.k->type) {
1520 	case KEY_TYPE_extent:
1521 	case KEY_TYPE_reflink_v: {
1522 		struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
1523 		union bch_extent_entry *entry;
1524 		bool seen_crc = false;
1525 
1526 		bkey_extent_entry_for_each(ptrs, entry) {
1527 			switch (extent_entry_type(entry)) {
1528 			case BCH_EXTENT_ENTRY_ptr:
1529 				if (!seen_crc)
1530 					entry->ptr.offset += sub;
1531 				break;
1532 			case BCH_EXTENT_ENTRY_crc32:
1533 				entry->crc32.offset += sub;
1534 				break;
1535 			case BCH_EXTENT_ENTRY_crc64:
1536 				entry->crc64.offset += sub;
1537 				break;
1538 			case BCH_EXTENT_ENTRY_crc128:
1539 				entry->crc128.offset += sub;
1540 				break;
1541 			case BCH_EXTENT_ENTRY_stripe_ptr:
1542 				break;
1543 			case BCH_EXTENT_ENTRY_rebalance:
1544 				break;
1545 			}
1546 
1547 			if (extent_entry_is_crc(entry))
1548 				seen_crc = true;
1549 		}
1550 
1551 		break;
1552 	}
1553 	case KEY_TYPE_reflink_p: {
1554 		struct bkey_s_reflink_p p = bkey_s_to_reflink_p(k);
1555 
1556 		le64_add_cpu(&p.v->idx, sub);
1557 		break;
1558 	}
1559 	case KEY_TYPE_inline_data:
1560 	case KEY_TYPE_indirect_inline_data: {
1561 		void *p = bkey_inline_data_p(k);
1562 		unsigned bytes = bkey_inline_data_bytes(k.k);
1563 
1564 		sub = min_t(u64, sub << 9, bytes);
1565 
1566 		memmove(p, p + sub, bytes - sub);
1567 
1568 		new_val_u64s -= sub >> 3;
1569 		break;
1570 	}
1571 	}
1572 
1573 	val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
1574 	BUG_ON(val_u64s_delta < 0);
1575 
1576 	set_bkey_val_u64s(k.k, new_val_u64s);
1577 	memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
1578 	return -val_u64s_delta;
1579 }
1580 
1581 int bch2_cut_back_s(struct bpos where, struct bkey_s k)
1582 {
1583 	unsigned new_val_u64s = bkey_val_u64s(k.k);
1584 	int val_u64s_delta;
1585 	u64 len = 0;
1586 
1587 	if (bkey_ge(where, k.k->p))
1588 		return 0;
1589 
1590 	EBUG_ON(bkey_lt(where, bkey_start_pos(k.k)));
1591 
1592 	len = where.offset - bkey_start_offset(k.k);
1593 
1594 	k.k->p.offset = where.offset;
1595 	k.k->size = len;
1596 
1597 	if (!len) {
1598 		k.k->type = KEY_TYPE_deleted;
1599 		new_val_u64s = 0;
1600 	}
1601 
1602 	switch (k.k->type) {
1603 	case KEY_TYPE_inline_data:
1604 	case KEY_TYPE_indirect_inline_data:
1605 		new_val_u64s = (bkey_inline_data_offset(k.k) +
1606 				min(bkey_inline_data_bytes(k.k), k.k->size << 9)) >> 3;
1607 		break;
1608 	}
1609 
1610 	val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
1611 	BUG_ON(val_u64s_delta < 0);
1612 
1613 	set_bkey_val_u64s(k.k, new_val_u64s);
1614 	memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
1615 	return -val_u64s_delta;
1616 }
1617