1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4 *
5 * Code for managing the extent btree and dynamically updating the writeback
6 * dirty sector count.
7 */
8
9 #include "bcachefs.h"
10 #include "bkey_methods.h"
11 #include "btree_cache.h"
12 #include "btree_gc.h"
13 #include "btree_io.h"
14 #include "btree_iter.h"
15 #include "buckets.h"
16 #include "checksum.h"
17 #include "compress.h"
18 #include "debug.h"
19 #include "disk_groups.h"
20 #include "error.h"
21 #include "extents.h"
22 #include "inode.h"
23 #include "journal.h"
24 #include "replicas.h"
25 #include "super.h"
26 #include "super-io.h"
27 #include "trace.h"
28 #include "util.h"
29
30 static unsigned bch2_crc_field_size_max[] = {
31 [BCH_EXTENT_ENTRY_crc32] = CRC32_SIZE_MAX,
32 [BCH_EXTENT_ENTRY_crc64] = CRC64_SIZE_MAX,
33 [BCH_EXTENT_ENTRY_crc128] = CRC128_SIZE_MAX,
34 };
35
36 static void bch2_extent_crc_pack(union bch_extent_crc *,
37 struct bch_extent_crc_unpacked,
38 enum bch_extent_entry_type);
39
bch2_dev_io_failures(struct bch_io_failures * f,unsigned dev)40 struct bch_dev_io_failures *bch2_dev_io_failures(struct bch_io_failures *f,
41 unsigned dev)
42 {
43 struct bch_dev_io_failures *i;
44
45 for (i = f->devs; i < f->devs + f->nr; i++)
46 if (i->dev == dev)
47 return i;
48
49 return NULL;
50 }
51
bch2_mark_io_failure(struct bch_io_failures * failed,struct extent_ptr_decoded * p)52 void bch2_mark_io_failure(struct bch_io_failures *failed,
53 struct extent_ptr_decoded *p)
54 {
55 struct bch_dev_io_failures *f = bch2_dev_io_failures(failed, p->ptr.dev);
56
57 if (!f) {
58 BUG_ON(failed->nr >= ARRAY_SIZE(failed->devs));
59
60 f = &failed->devs[failed->nr++];
61 f->dev = p->ptr.dev;
62 f->idx = p->idx;
63 f->nr_failed = 1;
64 f->nr_retries = 0;
65 } else if (p->idx != f->idx) {
66 f->idx = p->idx;
67 f->nr_failed = 1;
68 f->nr_retries = 0;
69 } else {
70 f->nr_failed++;
71 }
72 }
73
dev_latency(struct bch_fs * c,unsigned dev)74 static inline u64 dev_latency(struct bch_fs *c, unsigned dev)
75 {
76 struct bch_dev *ca = bch2_dev_rcu(c, dev);
77 return ca ? atomic64_read(&ca->cur_latency[READ]) : S64_MAX;
78 }
79
80 /*
81 * returns true if p1 is better than p2:
82 */
ptr_better(struct bch_fs * c,const struct extent_ptr_decoded p1,const struct extent_ptr_decoded p2)83 static inline bool ptr_better(struct bch_fs *c,
84 const struct extent_ptr_decoded p1,
85 const struct extent_ptr_decoded p2)
86 {
87 if (likely(!p1.idx && !p2.idx)) {
88 u64 l1 = dev_latency(c, p1.ptr.dev);
89 u64 l2 = dev_latency(c, p2.ptr.dev);
90
91 /* Pick at random, biased in favor of the faster device: */
92
93 return bch2_rand_range(l1 + l2) > l1;
94 }
95
96 if (bch2_force_reconstruct_read)
97 return p1.idx > p2.idx;
98
99 return p1.idx < p2.idx;
100 }
101
102 /*
103 * This picks a non-stale pointer, preferably from a device other than @avoid.
104 * Avoid can be NULL, meaning pick any. If there are no non-stale pointers to
105 * other devices, it will still pick a pointer from avoid.
106 */
bch2_bkey_pick_read_device(struct bch_fs * c,struct bkey_s_c k,struct bch_io_failures * failed,struct extent_ptr_decoded * pick)107 int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
108 struct bch_io_failures *failed,
109 struct extent_ptr_decoded *pick)
110 {
111 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
112 const union bch_extent_entry *entry;
113 struct extent_ptr_decoded p;
114 struct bch_dev_io_failures *f;
115 int ret = 0;
116
117 if (k.k->type == KEY_TYPE_error)
118 return -EIO;
119
120 rcu_read_lock();
121 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
122 /*
123 * Unwritten extent: no need to actually read, treat it as a
124 * hole and return 0s:
125 */
126 if (p.ptr.unwritten) {
127 ret = 0;
128 break;
129 }
130
131 /*
132 * If there are any dirty pointers it's an error if we can't
133 * read:
134 */
135 if (!ret && !p.ptr.cached)
136 ret = -EIO;
137
138 struct bch_dev *ca = bch2_dev_rcu(c, p.ptr.dev);
139
140 if (p.ptr.cached && (!ca || dev_ptr_stale_rcu(ca, &p.ptr)))
141 continue;
142
143 f = failed ? bch2_dev_io_failures(failed, p.ptr.dev) : NULL;
144 if (f)
145 p.idx = f->nr_failed < f->nr_retries
146 ? f->idx
147 : f->idx + 1;
148
149 if (!p.idx && !ca)
150 p.idx++;
151
152 if (!p.idx && p.has_ec && bch2_force_reconstruct_read)
153 p.idx++;
154
155 if (!p.idx && !bch2_dev_is_readable(ca))
156 p.idx++;
157
158 if (p.idx >= (unsigned) p.has_ec + 1)
159 continue;
160
161 if (ret > 0 && !ptr_better(c, p, *pick))
162 continue;
163
164 *pick = p;
165 ret = 1;
166 }
167 rcu_read_unlock();
168
169 return ret;
170 }
171
172 /* KEY_TYPE_btree_ptr: */
173
bch2_btree_ptr_validate(struct bch_fs * c,struct bkey_s_c k,enum bch_validate_flags flags)174 int bch2_btree_ptr_validate(struct bch_fs *c, struct bkey_s_c k,
175 enum bch_validate_flags flags)
176 {
177 int ret = 0;
178
179 bkey_fsck_err_on(bkey_val_u64s(k.k) > BCH_REPLICAS_MAX,
180 c, btree_ptr_val_too_big,
181 "value too big (%zu > %u)", bkey_val_u64s(k.k), BCH_REPLICAS_MAX);
182
183 ret = bch2_bkey_ptrs_validate(c, k, flags);
184 fsck_err:
185 return ret;
186 }
187
bch2_btree_ptr_to_text(struct printbuf * out,struct bch_fs * c,struct bkey_s_c k)188 void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c,
189 struct bkey_s_c k)
190 {
191 bch2_bkey_ptrs_to_text(out, c, k);
192 }
193
bch2_btree_ptr_v2_validate(struct bch_fs * c,struct bkey_s_c k,enum bch_validate_flags flags)194 int bch2_btree_ptr_v2_validate(struct bch_fs *c, struct bkey_s_c k,
195 enum bch_validate_flags flags)
196 {
197 struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
198 int ret = 0;
199
200 bkey_fsck_err_on(bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX,
201 c, btree_ptr_v2_val_too_big,
202 "value too big (%zu > %zu)",
203 bkey_val_u64s(k.k), BKEY_BTREE_PTR_VAL_U64s_MAX);
204
205 bkey_fsck_err_on(bpos_ge(bp.v->min_key, bp.k->p),
206 c, btree_ptr_v2_min_key_bad,
207 "min_key > key");
208
209 if (flags & BCH_VALIDATE_write)
210 bkey_fsck_err_on(!bp.v->sectors_written,
211 c, btree_ptr_v2_written_0,
212 "sectors_written == 0");
213
214 ret = bch2_bkey_ptrs_validate(c, k, flags);
215 fsck_err:
216 return ret;
217 }
218
bch2_btree_ptr_v2_to_text(struct printbuf * out,struct bch_fs * c,struct bkey_s_c k)219 void bch2_btree_ptr_v2_to_text(struct printbuf *out, struct bch_fs *c,
220 struct bkey_s_c k)
221 {
222 struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
223
224 prt_printf(out, "seq %llx written %u min_key %s",
225 le64_to_cpu(bp.v->seq),
226 le16_to_cpu(bp.v->sectors_written),
227 BTREE_PTR_RANGE_UPDATED(bp.v) ? "R " : "");
228
229 bch2_bpos_to_text(out, bp.v->min_key);
230 prt_printf(out, " ");
231 bch2_bkey_ptrs_to_text(out, c, k);
232 }
233
bch2_btree_ptr_v2_compat(enum btree_id btree_id,unsigned version,unsigned big_endian,int write,struct bkey_s k)234 void bch2_btree_ptr_v2_compat(enum btree_id btree_id, unsigned version,
235 unsigned big_endian, int write,
236 struct bkey_s k)
237 {
238 struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(k);
239
240 compat_bpos(0, btree_id, version, big_endian, write, &bp.v->min_key);
241
242 if (version < bcachefs_metadata_version_inode_btree_change &&
243 btree_id_is_extents(btree_id) &&
244 !bkey_eq(bp.v->min_key, POS_MIN))
245 bp.v->min_key = write
246 ? bpos_nosnap_predecessor(bp.v->min_key)
247 : bpos_nosnap_successor(bp.v->min_key);
248 }
249
250 /* KEY_TYPE_extent: */
251
bch2_extent_merge(struct bch_fs * c,struct bkey_s l,struct bkey_s_c r)252 bool bch2_extent_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r)
253 {
254 struct bkey_ptrs l_ptrs = bch2_bkey_ptrs(l);
255 struct bkey_ptrs_c r_ptrs = bch2_bkey_ptrs_c(r);
256 union bch_extent_entry *en_l;
257 const union bch_extent_entry *en_r;
258 struct extent_ptr_decoded lp, rp;
259 bool use_right_ptr;
260
261 en_l = l_ptrs.start;
262 en_r = r_ptrs.start;
263 while (en_l < l_ptrs.end && en_r < r_ptrs.end) {
264 if (extent_entry_type(en_l) != extent_entry_type(en_r))
265 return false;
266
267 en_l = extent_entry_next(en_l);
268 en_r = extent_entry_next(en_r);
269 }
270
271 if (en_l < l_ptrs.end || en_r < r_ptrs.end)
272 return false;
273
274 en_l = l_ptrs.start;
275 en_r = r_ptrs.start;
276 lp.crc = bch2_extent_crc_unpack(l.k, NULL);
277 rp.crc = bch2_extent_crc_unpack(r.k, NULL);
278
279 while (__bkey_ptr_next_decode(l.k, l_ptrs.end, lp, en_l) &&
280 __bkey_ptr_next_decode(r.k, r_ptrs.end, rp, en_r)) {
281 if (lp.ptr.offset + lp.crc.offset + lp.crc.live_size !=
282 rp.ptr.offset + rp.crc.offset ||
283 lp.ptr.dev != rp.ptr.dev ||
284 lp.ptr.gen != rp.ptr.gen ||
285 lp.ptr.unwritten != rp.ptr.unwritten ||
286 lp.has_ec != rp.has_ec)
287 return false;
288
289 /* Extents may not straddle buckets: */
290 rcu_read_lock();
291 struct bch_dev *ca = bch2_dev_rcu(c, lp.ptr.dev);
292 bool same_bucket = ca && PTR_BUCKET_NR(ca, &lp.ptr) == PTR_BUCKET_NR(ca, &rp.ptr);
293 rcu_read_unlock();
294
295 if (!same_bucket)
296 return false;
297
298 if (lp.has_ec != rp.has_ec ||
299 (lp.has_ec &&
300 (lp.ec.block != rp.ec.block ||
301 lp.ec.redundancy != rp.ec.redundancy ||
302 lp.ec.idx != rp.ec.idx)))
303 return false;
304
305 if (lp.crc.compression_type != rp.crc.compression_type ||
306 lp.crc.nonce != rp.crc.nonce)
307 return false;
308
309 if (lp.crc.offset + lp.crc.live_size + rp.crc.live_size <=
310 lp.crc.uncompressed_size) {
311 /* can use left extent's crc entry */
312 } else if (lp.crc.live_size <= rp.crc.offset) {
313 /* can use right extent's crc entry */
314 } else {
315 /* check if checksums can be merged: */
316 if (lp.crc.csum_type != rp.crc.csum_type ||
317 lp.crc.nonce != rp.crc.nonce ||
318 crc_is_compressed(lp.crc) ||
319 !bch2_checksum_mergeable(lp.crc.csum_type))
320 return false;
321
322 if (lp.crc.offset + lp.crc.live_size != lp.crc.compressed_size ||
323 rp.crc.offset)
324 return false;
325
326 if (lp.crc.csum_type &&
327 lp.crc.uncompressed_size +
328 rp.crc.uncompressed_size > (c->opts.encoded_extent_max >> 9))
329 return false;
330 }
331
332 en_l = extent_entry_next(en_l);
333 en_r = extent_entry_next(en_r);
334 }
335
336 en_l = l_ptrs.start;
337 en_r = r_ptrs.start;
338 while (en_l < l_ptrs.end && en_r < r_ptrs.end) {
339 if (extent_entry_is_crc(en_l)) {
340 struct bch_extent_crc_unpacked crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
341 struct bch_extent_crc_unpacked crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
342
343 if (crc_l.uncompressed_size + crc_r.uncompressed_size >
344 bch2_crc_field_size_max[extent_entry_type(en_l)])
345 return false;
346 }
347
348 en_l = extent_entry_next(en_l);
349 en_r = extent_entry_next(en_r);
350 }
351
352 use_right_ptr = false;
353 en_l = l_ptrs.start;
354 en_r = r_ptrs.start;
355 while (en_l < l_ptrs.end) {
356 if (extent_entry_type(en_l) == BCH_EXTENT_ENTRY_ptr &&
357 use_right_ptr)
358 en_l->ptr = en_r->ptr;
359
360 if (extent_entry_is_crc(en_l)) {
361 struct bch_extent_crc_unpacked crc_l =
362 bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
363 struct bch_extent_crc_unpacked crc_r =
364 bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
365
366 use_right_ptr = false;
367
368 if (crc_l.offset + crc_l.live_size + crc_r.live_size <=
369 crc_l.uncompressed_size) {
370 /* can use left extent's crc entry */
371 } else if (crc_l.live_size <= crc_r.offset) {
372 /* can use right extent's crc entry */
373 crc_r.offset -= crc_l.live_size;
374 bch2_extent_crc_pack(entry_to_crc(en_l), crc_r,
375 extent_entry_type(en_l));
376 use_right_ptr = true;
377 } else {
378 crc_l.csum = bch2_checksum_merge(crc_l.csum_type,
379 crc_l.csum,
380 crc_r.csum,
381 crc_r.uncompressed_size << 9);
382
383 crc_l.uncompressed_size += crc_r.uncompressed_size;
384 crc_l.compressed_size += crc_r.compressed_size;
385 bch2_extent_crc_pack(entry_to_crc(en_l), crc_l,
386 extent_entry_type(en_l));
387 }
388 }
389
390 en_l = extent_entry_next(en_l);
391 en_r = extent_entry_next(en_r);
392 }
393
394 bch2_key_resize(l.k, l.k->size + r.k->size);
395 return true;
396 }
397
398 /* KEY_TYPE_reservation: */
399
bch2_reservation_validate(struct bch_fs * c,struct bkey_s_c k,enum bch_validate_flags flags)400 int bch2_reservation_validate(struct bch_fs *c, struct bkey_s_c k,
401 enum bch_validate_flags flags)
402 {
403 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
404 int ret = 0;
405
406 bkey_fsck_err_on(!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX,
407 c, reservation_key_nr_replicas_invalid,
408 "invalid nr_replicas (%u)", r.v->nr_replicas);
409 fsck_err:
410 return ret;
411 }
412
bch2_reservation_to_text(struct printbuf * out,struct bch_fs * c,struct bkey_s_c k)413 void bch2_reservation_to_text(struct printbuf *out, struct bch_fs *c,
414 struct bkey_s_c k)
415 {
416 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
417
418 prt_printf(out, "generation %u replicas %u",
419 le32_to_cpu(r.v->generation),
420 r.v->nr_replicas);
421 }
422
bch2_reservation_merge(struct bch_fs * c,struct bkey_s _l,struct bkey_s_c _r)423 bool bch2_reservation_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r)
424 {
425 struct bkey_s_reservation l = bkey_s_to_reservation(_l);
426 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(_r);
427
428 if (l.v->generation != r.v->generation ||
429 l.v->nr_replicas != r.v->nr_replicas)
430 return false;
431
432 bch2_key_resize(l.k, l.k->size + r.k->size);
433 return true;
434 }
435
436 /* Extent checksum entries: */
437
438 /* returns true if not equal */
bch2_crc_unpacked_cmp(struct bch_extent_crc_unpacked l,struct bch_extent_crc_unpacked r)439 static inline bool bch2_crc_unpacked_cmp(struct bch_extent_crc_unpacked l,
440 struct bch_extent_crc_unpacked r)
441 {
442 return (l.csum_type != r.csum_type ||
443 l.compression_type != r.compression_type ||
444 l.compressed_size != r.compressed_size ||
445 l.uncompressed_size != r.uncompressed_size ||
446 l.offset != r.offset ||
447 l.live_size != r.live_size ||
448 l.nonce != r.nonce ||
449 bch2_crc_cmp(l.csum, r.csum));
450 }
451
can_narrow_crc(struct bch_extent_crc_unpacked u,struct bch_extent_crc_unpacked n)452 static inline bool can_narrow_crc(struct bch_extent_crc_unpacked u,
453 struct bch_extent_crc_unpacked n)
454 {
455 return !crc_is_compressed(u) &&
456 u.csum_type &&
457 u.uncompressed_size > u.live_size &&
458 bch2_csum_type_is_encryption(u.csum_type) ==
459 bch2_csum_type_is_encryption(n.csum_type);
460 }
461
bch2_can_narrow_extent_crcs(struct bkey_s_c k,struct bch_extent_crc_unpacked n)462 bool bch2_can_narrow_extent_crcs(struct bkey_s_c k,
463 struct bch_extent_crc_unpacked n)
464 {
465 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
466 struct bch_extent_crc_unpacked crc;
467 const union bch_extent_entry *i;
468
469 if (!n.csum_type)
470 return false;
471
472 bkey_for_each_crc(k.k, ptrs, crc, i)
473 if (can_narrow_crc(crc, n))
474 return true;
475
476 return false;
477 }
478
479 /*
480 * We're writing another replica for this extent, so while we've got the data in
481 * memory we'll be computing a new checksum for the currently live data.
482 *
483 * If there are other replicas we aren't moving, and they are checksummed but
484 * not compressed, we can modify them to point to only the data that is
485 * currently live (so that readers won't have to bounce) while we've got the
486 * checksum we need:
487 */
bch2_bkey_narrow_crcs(struct bkey_i * k,struct bch_extent_crc_unpacked n)488 bool bch2_bkey_narrow_crcs(struct bkey_i *k, struct bch_extent_crc_unpacked n)
489 {
490 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
491 struct bch_extent_crc_unpacked u;
492 struct extent_ptr_decoded p;
493 union bch_extent_entry *i;
494 bool ret = false;
495
496 /* Find a checksum entry that covers only live data: */
497 if (!n.csum_type) {
498 bkey_for_each_crc(&k->k, ptrs, u, i)
499 if (!crc_is_compressed(u) &&
500 u.csum_type &&
501 u.live_size == u.uncompressed_size) {
502 n = u;
503 goto found;
504 }
505 return false;
506 }
507 found:
508 BUG_ON(crc_is_compressed(n));
509 BUG_ON(n.offset);
510 BUG_ON(n.live_size != k->k.size);
511
512 restart_narrow_pointers:
513 ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
514
515 bkey_for_each_ptr_decode(&k->k, ptrs, p, i)
516 if (can_narrow_crc(p.crc, n)) {
517 bch2_bkey_drop_ptr_noerror(bkey_i_to_s(k), &i->ptr);
518 p.ptr.offset += p.crc.offset;
519 p.crc = n;
520 bch2_extent_ptr_decoded_append(k, &p);
521 ret = true;
522 goto restart_narrow_pointers;
523 }
524
525 return ret;
526 }
527
bch2_extent_crc_pack(union bch_extent_crc * dst,struct bch_extent_crc_unpacked src,enum bch_extent_entry_type type)528 static void bch2_extent_crc_pack(union bch_extent_crc *dst,
529 struct bch_extent_crc_unpacked src,
530 enum bch_extent_entry_type type)
531 {
532 #define set_common_fields(_dst, _src) \
533 _dst.type = 1 << type; \
534 _dst.csum_type = _src.csum_type, \
535 _dst.compression_type = _src.compression_type, \
536 _dst._compressed_size = _src.compressed_size - 1, \
537 _dst._uncompressed_size = _src.uncompressed_size - 1, \
538 _dst.offset = _src.offset
539
540 switch (type) {
541 case BCH_EXTENT_ENTRY_crc32:
542 set_common_fields(dst->crc32, src);
543 dst->crc32.csum = (u32 __force) *((__le32 *) &src.csum.lo);
544 break;
545 case BCH_EXTENT_ENTRY_crc64:
546 set_common_fields(dst->crc64, src);
547 dst->crc64.nonce = src.nonce;
548 dst->crc64.csum_lo = (u64 __force) src.csum.lo;
549 dst->crc64.csum_hi = (u64 __force) *((__le16 *) &src.csum.hi);
550 break;
551 case BCH_EXTENT_ENTRY_crc128:
552 set_common_fields(dst->crc128, src);
553 dst->crc128.nonce = src.nonce;
554 dst->crc128.csum = src.csum;
555 break;
556 default:
557 BUG();
558 }
559 #undef set_common_fields
560 }
561
bch2_extent_crc_append(struct bkey_i * k,struct bch_extent_crc_unpacked new)562 void bch2_extent_crc_append(struct bkey_i *k,
563 struct bch_extent_crc_unpacked new)
564 {
565 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
566 union bch_extent_crc *crc = (void *) ptrs.end;
567 enum bch_extent_entry_type type;
568
569 if (bch_crc_bytes[new.csum_type] <= 4 &&
570 new.uncompressed_size <= CRC32_SIZE_MAX &&
571 new.nonce <= CRC32_NONCE_MAX)
572 type = BCH_EXTENT_ENTRY_crc32;
573 else if (bch_crc_bytes[new.csum_type] <= 10 &&
574 new.uncompressed_size <= CRC64_SIZE_MAX &&
575 new.nonce <= CRC64_NONCE_MAX)
576 type = BCH_EXTENT_ENTRY_crc64;
577 else if (bch_crc_bytes[new.csum_type] <= 16 &&
578 new.uncompressed_size <= CRC128_SIZE_MAX &&
579 new.nonce <= CRC128_NONCE_MAX)
580 type = BCH_EXTENT_ENTRY_crc128;
581 else
582 BUG();
583
584 bch2_extent_crc_pack(crc, new, type);
585
586 k->k.u64s += extent_entry_u64s(ptrs.end);
587
588 EBUG_ON(bkey_val_u64s(&k->k) > BKEY_EXTENT_VAL_U64s_MAX);
589 }
590
591 /* Generic code for keys with pointers: */
592
bch2_bkey_nr_ptrs(struct bkey_s_c k)593 unsigned bch2_bkey_nr_ptrs(struct bkey_s_c k)
594 {
595 return bch2_bkey_devs(k).nr;
596 }
597
bch2_bkey_nr_ptrs_allocated(struct bkey_s_c k)598 unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c k)
599 {
600 return k.k->type == KEY_TYPE_reservation
601 ? bkey_s_c_to_reservation(k).v->nr_replicas
602 : bch2_bkey_dirty_devs(k).nr;
603 }
604
bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c k)605 unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c k)
606 {
607 unsigned ret = 0;
608
609 if (k.k->type == KEY_TYPE_reservation) {
610 ret = bkey_s_c_to_reservation(k).v->nr_replicas;
611 } else {
612 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
613 const union bch_extent_entry *entry;
614 struct extent_ptr_decoded p;
615
616 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
617 ret += !p.ptr.cached && !crc_is_compressed(p.crc);
618 }
619
620 return ret;
621 }
622
bch2_bkey_sectors_compressed(struct bkey_s_c k)623 unsigned bch2_bkey_sectors_compressed(struct bkey_s_c k)
624 {
625 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
626 const union bch_extent_entry *entry;
627 struct extent_ptr_decoded p;
628 unsigned ret = 0;
629
630 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
631 if (!p.ptr.cached && crc_is_compressed(p.crc))
632 ret += p.crc.compressed_size;
633
634 return ret;
635 }
636
bch2_bkey_is_incompressible(struct bkey_s_c k)637 bool bch2_bkey_is_incompressible(struct bkey_s_c k)
638 {
639 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
640 const union bch_extent_entry *entry;
641 struct bch_extent_crc_unpacked crc;
642
643 bkey_for_each_crc(k.k, ptrs, crc, entry)
644 if (crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
645 return true;
646 return false;
647 }
648
bch2_bkey_replicas(struct bch_fs * c,struct bkey_s_c k)649 unsigned bch2_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
650 {
651 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
652 const union bch_extent_entry *entry;
653 struct extent_ptr_decoded p = { 0 };
654 unsigned replicas = 0;
655
656 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
657 if (p.ptr.cached)
658 continue;
659
660 if (p.has_ec)
661 replicas += p.ec.redundancy;
662
663 replicas++;
664
665 }
666
667 return replicas;
668 }
669
__extent_ptr_durability(struct bch_dev * ca,struct extent_ptr_decoded * p)670 static inline unsigned __extent_ptr_durability(struct bch_dev *ca, struct extent_ptr_decoded *p)
671 {
672 if (p->ptr.cached)
673 return 0;
674
675 return p->has_ec
676 ? p->ec.redundancy + 1
677 : ca->mi.durability;
678 }
679
bch2_extent_ptr_desired_durability(struct bch_fs * c,struct extent_ptr_decoded * p)680 unsigned bch2_extent_ptr_desired_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
681 {
682 struct bch_dev *ca = bch2_dev_rcu(c, p->ptr.dev);
683
684 return ca ? __extent_ptr_durability(ca, p) : 0;
685 }
686
bch2_extent_ptr_durability(struct bch_fs * c,struct extent_ptr_decoded * p)687 unsigned bch2_extent_ptr_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
688 {
689 struct bch_dev *ca = bch2_dev_rcu(c, p->ptr.dev);
690
691 if (!ca || ca->mi.state == BCH_MEMBER_STATE_failed)
692 return 0;
693
694 return __extent_ptr_durability(ca, p);
695 }
696
bch2_bkey_durability(struct bch_fs * c,struct bkey_s_c k)697 unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k)
698 {
699 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
700 const union bch_extent_entry *entry;
701 struct extent_ptr_decoded p;
702 unsigned durability = 0;
703
704 rcu_read_lock();
705 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
706 durability += bch2_extent_ptr_durability(c, &p);
707 rcu_read_unlock();
708
709 return durability;
710 }
711
bch2_bkey_durability_safe(struct bch_fs * c,struct bkey_s_c k)712 static unsigned bch2_bkey_durability_safe(struct bch_fs *c, struct bkey_s_c k)
713 {
714 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
715 const union bch_extent_entry *entry;
716 struct extent_ptr_decoded p;
717 unsigned durability = 0;
718
719 rcu_read_lock();
720 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
721 if (p.ptr.dev < c->sb.nr_devices && c->devs[p.ptr.dev])
722 durability += bch2_extent_ptr_durability(c, &p);
723 rcu_read_unlock();
724
725 return durability;
726 }
727
bch2_bkey_extent_entry_drop(struct bkey_i * k,union bch_extent_entry * entry)728 void bch2_bkey_extent_entry_drop(struct bkey_i *k, union bch_extent_entry *entry)
729 {
730 union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k));
731 union bch_extent_entry *next = extent_entry_next(entry);
732
733 memmove_u64s(entry, next, (u64 *) end - (u64 *) next);
734 k->k.u64s -= extent_entry_u64s(entry);
735 }
736
bch2_extent_ptr_decoded_append(struct bkey_i * k,struct extent_ptr_decoded * p)737 void bch2_extent_ptr_decoded_append(struct bkey_i *k,
738 struct extent_ptr_decoded *p)
739 {
740 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
741 struct bch_extent_crc_unpacked crc =
742 bch2_extent_crc_unpack(&k->k, NULL);
743 union bch_extent_entry *pos;
744
745 if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
746 pos = ptrs.start;
747 goto found;
748 }
749
750 bkey_for_each_crc(&k->k, ptrs, crc, pos)
751 if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
752 pos = extent_entry_next(pos);
753 goto found;
754 }
755
756 bch2_extent_crc_append(k, p->crc);
757 pos = bkey_val_end(bkey_i_to_s(k));
758 found:
759 p->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
760 __extent_entry_insert(k, pos, to_entry(&p->ptr));
761
762 if (p->has_ec) {
763 p->ec.type = 1 << BCH_EXTENT_ENTRY_stripe_ptr;
764 __extent_entry_insert(k, pos, to_entry(&p->ec));
765 }
766 }
767
extent_entry_prev(struct bkey_ptrs ptrs,union bch_extent_entry * entry)768 static union bch_extent_entry *extent_entry_prev(struct bkey_ptrs ptrs,
769 union bch_extent_entry *entry)
770 {
771 union bch_extent_entry *i = ptrs.start;
772
773 if (i == entry)
774 return NULL;
775
776 while (extent_entry_next(i) != entry)
777 i = extent_entry_next(i);
778 return i;
779 }
780
781 /*
782 * Returns pointer to the next entry after the one being dropped:
783 */
bch2_bkey_drop_ptr_noerror(struct bkey_s k,struct bch_extent_ptr * ptr)784 void bch2_bkey_drop_ptr_noerror(struct bkey_s k, struct bch_extent_ptr *ptr)
785 {
786 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
787 union bch_extent_entry *entry = to_entry(ptr), *next;
788 bool drop_crc = true;
789
790 if (k.k->type == KEY_TYPE_stripe) {
791 ptr->dev = BCH_SB_MEMBER_INVALID;
792 return;
793 }
794
795 EBUG_ON(ptr < &ptrs.start->ptr ||
796 ptr >= &ptrs.end->ptr);
797 EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr);
798
799 for (next = extent_entry_next(entry);
800 next != ptrs.end;
801 next = extent_entry_next(next)) {
802 if (extent_entry_is_crc(next)) {
803 break;
804 } else if (extent_entry_is_ptr(next)) {
805 drop_crc = false;
806 break;
807 }
808 }
809
810 extent_entry_drop(k, entry);
811
812 while ((entry = extent_entry_prev(ptrs, entry))) {
813 if (extent_entry_is_ptr(entry))
814 break;
815
816 if ((extent_entry_is_crc(entry) && drop_crc) ||
817 extent_entry_is_stripe_ptr(entry))
818 extent_entry_drop(k, entry);
819 }
820 }
821
bch2_bkey_drop_ptr(struct bkey_s k,struct bch_extent_ptr * ptr)822 void bch2_bkey_drop_ptr(struct bkey_s k, struct bch_extent_ptr *ptr)
823 {
824 bool have_dirty = bch2_bkey_dirty_devs(k.s_c).nr;
825
826 bch2_bkey_drop_ptr_noerror(k, ptr);
827
828 /*
829 * If we deleted all the dirty pointers and there's still cached
830 * pointers, we could set the cached pointers to dirty if they're not
831 * stale - but to do that correctly we'd need to grab an open_bucket
832 * reference so that we don't race with bucket reuse:
833 */
834 if (have_dirty &&
835 !bch2_bkey_dirty_devs(k.s_c).nr) {
836 k.k->type = KEY_TYPE_error;
837 set_bkey_val_u64s(k.k, 0);
838 } else if (!bch2_bkey_nr_ptrs(k.s_c)) {
839 k.k->type = KEY_TYPE_deleted;
840 set_bkey_val_u64s(k.k, 0);
841 }
842 }
843
bch2_bkey_drop_device(struct bkey_s k,unsigned dev)844 void bch2_bkey_drop_device(struct bkey_s k, unsigned dev)
845 {
846 bch2_bkey_drop_ptrs(k, ptr, ptr->dev == dev);
847 }
848
bch2_bkey_drop_device_noerror(struct bkey_s k,unsigned dev)849 void bch2_bkey_drop_device_noerror(struct bkey_s k, unsigned dev)
850 {
851 struct bch_extent_ptr *ptr = bch2_bkey_has_device(k, dev);
852
853 if (ptr)
854 bch2_bkey_drop_ptr_noerror(k, ptr);
855 }
856
bch2_bkey_has_device_c(struct bkey_s_c k,unsigned dev)857 const struct bch_extent_ptr *bch2_bkey_has_device_c(struct bkey_s_c k, unsigned dev)
858 {
859 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
860
861 bkey_for_each_ptr(ptrs, ptr)
862 if (ptr->dev == dev)
863 return ptr;
864
865 return NULL;
866 }
867
bch2_bkey_has_target(struct bch_fs * c,struct bkey_s_c k,unsigned target)868 bool bch2_bkey_has_target(struct bch_fs *c, struct bkey_s_c k, unsigned target)
869 {
870 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
871 struct bch_dev *ca;
872 bool ret = false;
873
874 rcu_read_lock();
875 bkey_for_each_ptr(ptrs, ptr)
876 if (bch2_dev_in_target(c, ptr->dev, target) &&
877 (ca = bch2_dev_rcu(c, ptr->dev)) &&
878 (!ptr->cached ||
879 !dev_ptr_stale_rcu(ca, ptr))) {
880 ret = true;
881 break;
882 }
883 rcu_read_unlock();
884
885 return ret;
886 }
887
bch2_bkey_matches_ptr(struct bch_fs * c,struct bkey_s_c k,struct bch_extent_ptr m,u64 offset)888 bool bch2_bkey_matches_ptr(struct bch_fs *c, struct bkey_s_c k,
889 struct bch_extent_ptr m, u64 offset)
890 {
891 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
892 const union bch_extent_entry *entry;
893 struct extent_ptr_decoded p;
894
895 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
896 if (p.ptr.dev == m.dev &&
897 p.ptr.gen == m.gen &&
898 (s64) p.ptr.offset + p.crc.offset - bkey_start_offset(k.k) ==
899 (s64) m.offset - offset)
900 return true;
901
902 return false;
903 }
904
905 /*
906 * Returns true if two extents refer to the same data:
907 */
bch2_extents_match(struct bkey_s_c k1,struct bkey_s_c k2)908 bool bch2_extents_match(struct bkey_s_c k1, struct bkey_s_c k2)
909 {
910 if (k1.k->type != k2.k->type)
911 return false;
912
913 if (bkey_extent_is_direct_data(k1.k)) {
914 struct bkey_ptrs_c ptrs1 = bch2_bkey_ptrs_c(k1);
915 struct bkey_ptrs_c ptrs2 = bch2_bkey_ptrs_c(k2);
916 const union bch_extent_entry *entry1, *entry2;
917 struct extent_ptr_decoded p1, p2;
918
919 if (bkey_extent_is_unwritten(k1) != bkey_extent_is_unwritten(k2))
920 return false;
921
922 bkey_for_each_ptr_decode(k1.k, ptrs1, p1, entry1)
923 bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2)
924 if (p1.ptr.dev == p2.ptr.dev &&
925 p1.ptr.gen == p2.ptr.gen &&
926
927 /*
928 * This checks that the two pointers point
929 * to the same region on disk - adjusting
930 * for the difference in where the extents
931 * start, since one may have been trimmed:
932 */
933 (s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) ==
934 (s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k) &&
935
936 /*
937 * This additionally checks that the
938 * extents overlap on disk, since the
939 * previous check may trigger spuriously
940 * when one extent is immediately partially
941 * overwritten with another extent (so that
942 * on disk they are adjacent) and
943 * compression is in use:
944 */
945 ((p1.ptr.offset >= p2.ptr.offset &&
946 p1.ptr.offset < p2.ptr.offset + p2.crc.compressed_size) ||
947 (p2.ptr.offset >= p1.ptr.offset &&
948 p2.ptr.offset < p1.ptr.offset + p1.crc.compressed_size)))
949 return true;
950
951 return false;
952 } else {
953 /* KEY_TYPE_deleted, etc. */
954 return true;
955 }
956 }
957
958 struct bch_extent_ptr *
bch2_extent_has_ptr(struct bkey_s_c k1,struct extent_ptr_decoded p1,struct bkey_s k2)959 bch2_extent_has_ptr(struct bkey_s_c k1, struct extent_ptr_decoded p1, struct bkey_s k2)
960 {
961 struct bkey_ptrs ptrs2 = bch2_bkey_ptrs(k2);
962 union bch_extent_entry *entry2;
963 struct extent_ptr_decoded p2;
964
965 bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2)
966 if (p1.ptr.dev == p2.ptr.dev &&
967 p1.ptr.gen == p2.ptr.gen &&
968 (s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) ==
969 (s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k))
970 return &entry2->ptr;
971
972 return NULL;
973 }
974
bch2_extent_ptr_set_cached(struct bkey_s k,struct bch_extent_ptr * ptr)975 void bch2_extent_ptr_set_cached(struct bkey_s k, struct bch_extent_ptr *ptr)
976 {
977 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
978 union bch_extent_entry *entry;
979 union bch_extent_entry *ec = NULL;
980
981 bkey_extent_entry_for_each(ptrs, entry) {
982 if (&entry->ptr == ptr) {
983 ptr->cached = true;
984 if (ec)
985 extent_entry_drop(k, ec);
986 return;
987 }
988
989 if (extent_entry_is_stripe_ptr(entry))
990 ec = entry;
991 else if (extent_entry_is_ptr(entry))
992 ec = NULL;
993 }
994
995 BUG();
996 }
997
998 /*
999 * bch_extent_normalize - clean up an extent, dropping stale pointers etc.
1000 *
1001 * Returns true if @k should be dropped entirely
1002 *
1003 * For existing keys, only called when btree nodes are being rewritten, not when
1004 * they're merely being compacted/resorted in memory.
1005 */
bch2_extent_normalize(struct bch_fs * c,struct bkey_s k)1006 bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
1007 {
1008 struct bch_dev *ca;
1009
1010 rcu_read_lock();
1011 bch2_bkey_drop_ptrs(k, ptr,
1012 ptr->cached &&
1013 (ca = bch2_dev_rcu(c, ptr->dev)) &&
1014 dev_ptr_stale_rcu(ca, ptr) > 0);
1015 rcu_read_unlock();
1016
1017 return bkey_deleted(k.k);
1018 }
1019
bch2_extent_ptr_to_text(struct printbuf * out,struct bch_fs * c,const struct bch_extent_ptr * ptr)1020 void bch2_extent_ptr_to_text(struct printbuf *out, struct bch_fs *c, const struct bch_extent_ptr *ptr)
1021 {
1022 out->atomic++;
1023 rcu_read_lock();
1024 struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
1025 if (!ca) {
1026 prt_printf(out, "ptr: %u:%llu gen %u%s", ptr->dev,
1027 (u64) ptr->offset, ptr->gen,
1028 ptr->cached ? " cached" : "");
1029 } else {
1030 u32 offset;
1031 u64 b = sector_to_bucket_and_offset(ca, ptr->offset, &offset);
1032
1033 prt_printf(out, "ptr: %u:%llu:%u gen %u",
1034 ptr->dev, b, offset, ptr->gen);
1035 if (ca->mi.durability != 1)
1036 prt_printf(out, " d=%u", ca->mi.durability);
1037 if (ptr->cached)
1038 prt_str(out, " cached");
1039 if (ptr->unwritten)
1040 prt_str(out, " unwritten");
1041 int stale = dev_ptr_stale_rcu(ca, ptr);
1042 if (stale > 0)
1043 prt_printf(out, " stale");
1044 else if (stale)
1045 prt_printf(out, " invalid");
1046 }
1047 rcu_read_unlock();
1048 --out->atomic;
1049 }
1050
bch2_extent_crc_unpacked_to_text(struct printbuf * out,struct bch_extent_crc_unpacked * crc)1051 void bch2_extent_crc_unpacked_to_text(struct printbuf *out, struct bch_extent_crc_unpacked *crc)
1052 {
1053 prt_printf(out, "crc: c_size %u size %u offset %u nonce %u csum ",
1054 crc->compressed_size,
1055 crc->uncompressed_size,
1056 crc->offset, crc->nonce);
1057 bch2_prt_csum_type(out, crc->csum_type);
1058 prt_printf(out, " %0llx:%0llx ", crc->csum.hi, crc->csum.lo);
1059 prt_str(out, " compress ");
1060 bch2_prt_compression_type(out, crc->compression_type);
1061 }
1062
bch2_bkey_ptrs_to_text(struct printbuf * out,struct bch_fs * c,struct bkey_s_c k)1063 void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
1064 struct bkey_s_c k)
1065 {
1066 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1067 const union bch_extent_entry *entry;
1068 bool first = true;
1069
1070 if (c)
1071 prt_printf(out, "durability: %u ", bch2_bkey_durability_safe(c, k));
1072
1073 bkey_extent_entry_for_each(ptrs, entry) {
1074 if (!first)
1075 prt_printf(out, " ");
1076
1077 switch (__extent_entry_type(entry)) {
1078 case BCH_EXTENT_ENTRY_ptr:
1079 bch2_extent_ptr_to_text(out, c, entry_to_ptr(entry));
1080 break;
1081
1082 case BCH_EXTENT_ENTRY_crc32:
1083 case BCH_EXTENT_ENTRY_crc64:
1084 case BCH_EXTENT_ENTRY_crc128: {
1085 struct bch_extent_crc_unpacked crc =
1086 bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
1087
1088 bch2_extent_crc_unpacked_to_text(out, &crc);
1089 break;
1090 }
1091 case BCH_EXTENT_ENTRY_stripe_ptr: {
1092 const struct bch_extent_stripe_ptr *ec = &entry->stripe_ptr;
1093
1094 prt_printf(out, "ec: idx %llu block %u",
1095 (u64) ec->idx, ec->block);
1096 break;
1097 }
1098 case BCH_EXTENT_ENTRY_rebalance: {
1099 const struct bch_extent_rebalance *r = &entry->rebalance;
1100
1101 prt_str(out, "rebalance: target ");
1102 if (c)
1103 bch2_target_to_text(out, c, r->target);
1104 else
1105 prt_printf(out, "%u", r->target);
1106 prt_str(out, " compression ");
1107 bch2_compression_opt_to_text(out, r->compression);
1108 break;
1109 }
1110 default:
1111 prt_printf(out, "(invalid extent entry %.16llx)", *((u64 *) entry));
1112 return;
1113 }
1114
1115 first = false;
1116 }
1117 }
1118
extent_ptr_validate(struct bch_fs * c,struct bkey_s_c k,enum bch_validate_flags flags,const struct bch_extent_ptr * ptr,unsigned size_ondisk,bool metadata)1119 static int extent_ptr_validate(struct bch_fs *c,
1120 struct bkey_s_c k,
1121 enum bch_validate_flags flags,
1122 const struct bch_extent_ptr *ptr,
1123 unsigned size_ondisk,
1124 bool metadata)
1125 {
1126 int ret = 0;
1127
1128 rcu_read_lock();
1129 struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
1130 if (!ca) {
1131 rcu_read_unlock();
1132 return 0;
1133 }
1134 u32 bucket_offset;
1135 u64 bucket = sector_to_bucket_and_offset(ca, ptr->offset, &bucket_offset);
1136 unsigned first_bucket = ca->mi.first_bucket;
1137 u64 nbuckets = ca->mi.nbuckets;
1138 unsigned bucket_size = ca->mi.bucket_size;
1139 rcu_read_unlock();
1140
1141 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1142 bkey_for_each_ptr(ptrs, ptr2)
1143 bkey_fsck_err_on(ptr != ptr2 && ptr->dev == ptr2->dev,
1144 c, ptr_to_duplicate_device,
1145 "multiple pointers to same device (%u)", ptr->dev);
1146
1147
1148 bkey_fsck_err_on(bucket >= nbuckets,
1149 c, ptr_after_last_bucket,
1150 "pointer past last bucket (%llu > %llu)", bucket, nbuckets);
1151 bkey_fsck_err_on(bucket < first_bucket,
1152 c, ptr_before_first_bucket,
1153 "pointer before first bucket (%llu < %u)", bucket, first_bucket);
1154 bkey_fsck_err_on(bucket_offset + size_ondisk > bucket_size,
1155 c, ptr_spans_multiple_buckets,
1156 "pointer spans multiple buckets (%u + %u > %u)",
1157 bucket_offset, size_ondisk, bucket_size);
1158 fsck_err:
1159 return ret;
1160 }
1161
bch2_bkey_ptrs_validate(struct bch_fs * c,struct bkey_s_c k,enum bch_validate_flags flags)1162 int bch2_bkey_ptrs_validate(struct bch_fs *c, struct bkey_s_c k,
1163 enum bch_validate_flags flags)
1164 {
1165 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1166 const union bch_extent_entry *entry;
1167 struct bch_extent_crc_unpacked crc;
1168 unsigned size_ondisk = k.k->size;
1169 unsigned nonce = UINT_MAX;
1170 unsigned nr_ptrs = 0;
1171 bool have_written = false, have_unwritten = false, have_ec = false, crc_since_last_ptr = false;
1172 int ret = 0;
1173
1174 if (bkey_is_btree_ptr(k.k))
1175 size_ondisk = btree_sectors(c);
1176
1177 bkey_extent_entry_for_each(ptrs, entry) {
1178 bkey_fsck_err_on(__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX,
1179 c, extent_ptrs_invalid_entry,
1180 "invalid extent entry type (got %u, max %u)",
1181 __extent_entry_type(entry), BCH_EXTENT_ENTRY_MAX);
1182
1183 bkey_fsck_err_on(bkey_is_btree_ptr(k.k) &&
1184 !extent_entry_is_ptr(entry),
1185 c, btree_ptr_has_non_ptr,
1186 "has non ptr field");
1187
1188 switch (extent_entry_type(entry)) {
1189 case BCH_EXTENT_ENTRY_ptr:
1190 ret = extent_ptr_validate(c, k, flags, &entry->ptr, size_ondisk, false);
1191 if (ret)
1192 return ret;
1193
1194 bkey_fsck_err_on(entry->ptr.cached && have_ec,
1195 c, ptr_cached_and_erasure_coded,
1196 "cached, erasure coded ptr");
1197
1198 if (!entry->ptr.unwritten)
1199 have_written = true;
1200 else
1201 have_unwritten = true;
1202
1203 have_ec = false;
1204 crc_since_last_ptr = false;
1205 nr_ptrs++;
1206 break;
1207 case BCH_EXTENT_ENTRY_crc32:
1208 case BCH_EXTENT_ENTRY_crc64:
1209 case BCH_EXTENT_ENTRY_crc128:
1210 crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
1211
1212 bkey_fsck_err_on(crc.offset + crc.live_size > crc.uncompressed_size,
1213 c, ptr_crc_uncompressed_size_too_small,
1214 "checksum offset + key size > uncompressed size");
1215 bkey_fsck_err_on(!bch2_checksum_type_valid(c, crc.csum_type),
1216 c, ptr_crc_csum_type_unknown,
1217 "invalid checksum type");
1218 bkey_fsck_err_on(crc.compression_type >= BCH_COMPRESSION_TYPE_NR,
1219 c, ptr_crc_compression_type_unknown,
1220 "invalid compression type");
1221
1222 if (bch2_csum_type_is_encryption(crc.csum_type)) {
1223 if (nonce == UINT_MAX)
1224 nonce = crc.offset + crc.nonce;
1225 else if (nonce != crc.offset + crc.nonce)
1226 bkey_fsck_err(c, ptr_crc_nonce_mismatch,
1227 "incorrect nonce");
1228 }
1229
1230 bkey_fsck_err_on(crc_since_last_ptr,
1231 c, ptr_crc_redundant,
1232 "redundant crc entry");
1233 crc_since_last_ptr = true;
1234
1235 bkey_fsck_err_on(crc_is_encoded(crc) &&
1236 (crc.uncompressed_size > c->opts.encoded_extent_max >> 9) &&
1237 (flags & (BCH_VALIDATE_write|BCH_VALIDATE_commit)),
1238 c, ptr_crc_uncompressed_size_too_big,
1239 "too large encoded extent");
1240
1241 size_ondisk = crc.compressed_size;
1242 break;
1243 case BCH_EXTENT_ENTRY_stripe_ptr:
1244 bkey_fsck_err_on(have_ec,
1245 c, ptr_stripe_redundant,
1246 "redundant stripe entry");
1247 have_ec = true;
1248 break;
1249 case BCH_EXTENT_ENTRY_rebalance: {
1250 /*
1251 * this shouldn't be a fsck error, for forward
1252 * compatibility; the rebalance code should just refetch
1253 * the compression opt if it's unknown
1254 */
1255 #if 0
1256 const struct bch_extent_rebalance *r = &entry->rebalance;
1257
1258 if (!bch2_compression_opt_valid(r->compression)) {
1259 struct bch_compression_opt opt = __bch2_compression_decode(r->compression);
1260 prt_printf(err, "invalid compression opt %u:%u",
1261 opt.type, opt.level);
1262 return -BCH_ERR_invalid_bkey;
1263 }
1264 #endif
1265 break;
1266 }
1267 }
1268 }
1269
1270 bkey_fsck_err_on(!nr_ptrs,
1271 c, extent_ptrs_no_ptrs,
1272 "no ptrs");
1273 bkey_fsck_err_on(nr_ptrs > BCH_BKEY_PTRS_MAX,
1274 c, extent_ptrs_too_many_ptrs,
1275 "too many ptrs: %u > %u", nr_ptrs, BCH_BKEY_PTRS_MAX);
1276 bkey_fsck_err_on(have_written && have_unwritten,
1277 c, extent_ptrs_written_and_unwritten,
1278 "extent with unwritten and written ptrs");
1279 bkey_fsck_err_on(k.k->type != KEY_TYPE_extent && have_unwritten,
1280 c, extent_ptrs_unwritten,
1281 "has unwritten ptrs");
1282 bkey_fsck_err_on(crc_since_last_ptr,
1283 c, extent_ptrs_redundant_crc,
1284 "redundant crc entry");
1285 bkey_fsck_err_on(have_ec,
1286 c, extent_ptrs_redundant_stripe,
1287 "redundant stripe entry");
1288 fsck_err:
1289 return ret;
1290 }
1291
bch2_ptr_swab(struct bkey_s k)1292 void bch2_ptr_swab(struct bkey_s k)
1293 {
1294 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
1295 union bch_extent_entry *entry;
1296 u64 *d;
1297
1298 for (d = (u64 *) ptrs.start;
1299 d != (u64 *) ptrs.end;
1300 d++)
1301 *d = swab64(*d);
1302
1303 for (entry = ptrs.start;
1304 entry < ptrs.end;
1305 entry = extent_entry_next(entry)) {
1306 switch (extent_entry_type(entry)) {
1307 case BCH_EXTENT_ENTRY_ptr:
1308 break;
1309 case BCH_EXTENT_ENTRY_crc32:
1310 entry->crc32.csum = swab32(entry->crc32.csum);
1311 break;
1312 case BCH_EXTENT_ENTRY_crc64:
1313 entry->crc64.csum_hi = swab16(entry->crc64.csum_hi);
1314 entry->crc64.csum_lo = swab64(entry->crc64.csum_lo);
1315 break;
1316 case BCH_EXTENT_ENTRY_crc128:
1317 entry->crc128.csum.hi = (__force __le64)
1318 swab64((__force u64) entry->crc128.csum.hi);
1319 entry->crc128.csum.lo = (__force __le64)
1320 swab64((__force u64) entry->crc128.csum.lo);
1321 break;
1322 case BCH_EXTENT_ENTRY_stripe_ptr:
1323 break;
1324 case BCH_EXTENT_ENTRY_rebalance:
1325 break;
1326 }
1327 }
1328 }
1329
bch2_bkey_rebalance_opts(struct bkey_s_c k)1330 const struct bch_extent_rebalance *bch2_bkey_rebalance_opts(struct bkey_s_c k)
1331 {
1332 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1333 const union bch_extent_entry *entry;
1334
1335 bkey_extent_entry_for_each(ptrs, entry)
1336 if (__extent_entry_type(entry) == BCH_EXTENT_ENTRY_rebalance)
1337 return &entry->rebalance;
1338
1339 return NULL;
1340 }
1341
bch2_bkey_ptrs_need_rebalance(struct bch_fs * c,struct bkey_s_c k,unsigned target,unsigned compression)1342 unsigned bch2_bkey_ptrs_need_rebalance(struct bch_fs *c, struct bkey_s_c k,
1343 unsigned target, unsigned compression)
1344 {
1345 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1346 unsigned rewrite_ptrs = 0;
1347
1348 if (compression) {
1349 unsigned compression_type = bch2_compression_opt_to_type(compression);
1350 const union bch_extent_entry *entry;
1351 struct extent_ptr_decoded p;
1352 unsigned i = 0;
1353
1354 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1355 if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible ||
1356 p.ptr.unwritten) {
1357 rewrite_ptrs = 0;
1358 goto incompressible;
1359 }
1360
1361 if (!p.ptr.cached && p.crc.compression_type != compression_type)
1362 rewrite_ptrs |= 1U << i;
1363 i++;
1364 }
1365 }
1366 incompressible:
1367 if (target && bch2_target_accepts_data(c, BCH_DATA_user, target)) {
1368 unsigned i = 0;
1369
1370 bkey_for_each_ptr(ptrs, ptr) {
1371 if (!ptr->cached && !bch2_dev_in_target(c, ptr->dev, target))
1372 rewrite_ptrs |= 1U << i;
1373 i++;
1374 }
1375 }
1376
1377 return rewrite_ptrs;
1378 }
1379
bch2_bkey_needs_rebalance(struct bch_fs * c,struct bkey_s_c k)1380 bool bch2_bkey_needs_rebalance(struct bch_fs *c, struct bkey_s_c k)
1381 {
1382 const struct bch_extent_rebalance *r = bch2_bkey_rebalance_opts(k);
1383
1384 /*
1385 * If it's an indirect extent, we don't delete the rebalance entry when
1386 * done so that we know what options were applied - check if it still
1387 * needs work done:
1388 */
1389 if (r &&
1390 k.k->type == KEY_TYPE_reflink_v &&
1391 !bch2_bkey_ptrs_need_rebalance(c, k, r->target, r->compression))
1392 r = NULL;
1393
1394 return r != NULL;
1395 }
1396
__bch2_bkey_sectors_need_rebalance(struct bch_fs * c,struct bkey_s_c k,unsigned target,unsigned compression)1397 static u64 __bch2_bkey_sectors_need_rebalance(struct bch_fs *c, struct bkey_s_c k,
1398 unsigned target, unsigned compression)
1399 {
1400 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1401 const union bch_extent_entry *entry;
1402 struct extent_ptr_decoded p;
1403 u64 sectors = 0;
1404
1405 if (compression) {
1406 unsigned compression_type = bch2_compression_opt_to_type(compression);
1407
1408 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1409 if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible ||
1410 p.ptr.unwritten) {
1411 sectors = 0;
1412 goto incompressible;
1413 }
1414
1415 if (!p.ptr.cached && p.crc.compression_type != compression_type)
1416 sectors += p.crc.compressed_size;
1417 }
1418 }
1419 incompressible:
1420 if (target && bch2_target_accepts_data(c, BCH_DATA_user, target)) {
1421 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
1422 if (!p.ptr.cached && !bch2_dev_in_target(c, p.ptr.dev, target))
1423 sectors += p.crc.compressed_size;
1424 }
1425
1426 return sectors;
1427 }
1428
bch2_bkey_sectors_need_rebalance(struct bch_fs * c,struct bkey_s_c k)1429 u64 bch2_bkey_sectors_need_rebalance(struct bch_fs *c, struct bkey_s_c k)
1430 {
1431 const struct bch_extent_rebalance *r = bch2_bkey_rebalance_opts(k);
1432
1433 return r ? __bch2_bkey_sectors_need_rebalance(c, k, r->target, r->compression) : 0;
1434 }
1435
bch2_bkey_set_needs_rebalance(struct bch_fs * c,struct bkey_i * _k,struct bch_io_opts * opts)1436 int bch2_bkey_set_needs_rebalance(struct bch_fs *c, struct bkey_i *_k,
1437 struct bch_io_opts *opts)
1438 {
1439 struct bkey_s k = bkey_i_to_s(_k);
1440 struct bch_extent_rebalance *r;
1441 unsigned target = opts->background_target;
1442 unsigned compression = background_compression(*opts);
1443 bool needs_rebalance;
1444
1445 if (!bkey_extent_is_direct_data(k.k))
1446 return 0;
1447
1448 /* get existing rebalance entry: */
1449 r = (struct bch_extent_rebalance *) bch2_bkey_rebalance_opts(k.s_c);
1450 if (r) {
1451 if (k.k->type == KEY_TYPE_reflink_v) {
1452 /*
1453 * indirect extents: existing options take precedence,
1454 * so that we don't move extents back and forth if
1455 * they're referenced by different inodes with different
1456 * options:
1457 */
1458 if (r->target)
1459 target = r->target;
1460 if (r->compression)
1461 compression = r->compression;
1462 }
1463
1464 r->target = target;
1465 r->compression = compression;
1466 }
1467
1468 needs_rebalance = bch2_bkey_ptrs_need_rebalance(c, k.s_c, target, compression);
1469
1470 if (needs_rebalance && !r) {
1471 union bch_extent_entry *new = bkey_val_end(k);
1472
1473 new->rebalance.type = 1U << BCH_EXTENT_ENTRY_rebalance;
1474 new->rebalance.compression = compression;
1475 new->rebalance.target = target;
1476 new->rebalance.unused = 0;
1477 k.k->u64s += extent_entry_u64s(new);
1478 } else if (!needs_rebalance && r && k.k->type != KEY_TYPE_reflink_v) {
1479 /*
1480 * For indirect extents, don't delete the rebalance entry when
1481 * we're finished so that we know we specifically moved it or
1482 * compressed it to its current location/compression type
1483 */
1484 extent_entry_drop(k, (union bch_extent_entry *) r);
1485 }
1486
1487 return 0;
1488 }
1489
1490 /* Generic extent code: */
1491
bch2_cut_front_s(struct bpos where,struct bkey_s k)1492 int bch2_cut_front_s(struct bpos where, struct bkey_s k)
1493 {
1494 unsigned new_val_u64s = bkey_val_u64s(k.k);
1495 int val_u64s_delta;
1496 u64 sub;
1497
1498 if (bkey_le(where, bkey_start_pos(k.k)))
1499 return 0;
1500
1501 EBUG_ON(bkey_gt(where, k.k->p));
1502
1503 sub = where.offset - bkey_start_offset(k.k);
1504
1505 k.k->size -= sub;
1506
1507 if (!k.k->size) {
1508 k.k->type = KEY_TYPE_deleted;
1509 new_val_u64s = 0;
1510 }
1511
1512 switch (k.k->type) {
1513 case KEY_TYPE_extent:
1514 case KEY_TYPE_reflink_v: {
1515 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
1516 union bch_extent_entry *entry;
1517 bool seen_crc = false;
1518
1519 bkey_extent_entry_for_each(ptrs, entry) {
1520 switch (extent_entry_type(entry)) {
1521 case BCH_EXTENT_ENTRY_ptr:
1522 if (!seen_crc)
1523 entry->ptr.offset += sub;
1524 break;
1525 case BCH_EXTENT_ENTRY_crc32:
1526 entry->crc32.offset += sub;
1527 break;
1528 case BCH_EXTENT_ENTRY_crc64:
1529 entry->crc64.offset += sub;
1530 break;
1531 case BCH_EXTENT_ENTRY_crc128:
1532 entry->crc128.offset += sub;
1533 break;
1534 case BCH_EXTENT_ENTRY_stripe_ptr:
1535 break;
1536 case BCH_EXTENT_ENTRY_rebalance:
1537 break;
1538 }
1539
1540 if (extent_entry_is_crc(entry))
1541 seen_crc = true;
1542 }
1543
1544 break;
1545 }
1546 case KEY_TYPE_reflink_p: {
1547 struct bkey_s_reflink_p p = bkey_s_to_reflink_p(k);
1548
1549 le64_add_cpu(&p.v->idx, sub);
1550 break;
1551 }
1552 case KEY_TYPE_inline_data:
1553 case KEY_TYPE_indirect_inline_data: {
1554 void *p = bkey_inline_data_p(k);
1555 unsigned bytes = bkey_inline_data_bytes(k.k);
1556
1557 sub = min_t(u64, sub << 9, bytes);
1558
1559 memmove(p, p + sub, bytes - sub);
1560
1561 new_val_u64s -= sub >> 3;
1562 break;
1563 }
1564 }
1565
1566 val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
1567 BUG_ON(val_u64s_delta < 0);
1568
1569 set_bkey_val_u64s(k.k, new_val_u64s);
1570 memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
1571 return -val_u64s_delta;
1572 }
1573
bch2_cut_back_s(struct bpos where,struct bkey_s k)1574 int bch2_cut_back_s(struct bpos where, struct bkey_s k)
1575 {
1576 unsigned new_val_u64s = bkey_val_u64s(k.k);
1577 int val_u64s_delta;
1578 u64 len = 0;
1579
1580 if (bkey_ge(where, k.k->p))
1581 return 0;
1582
1583 EBUG_ON(bkey_lt(where, bkey_start_pos(k.k)));
1584
1585 len = where.offset - bkey_start_offset(k.k);
1586
1587 k.k->p.offset = where.offset;
1588 k.k->size = len;
1589
1590 if (!len) {
1591 k.k->type = KEY_TYPE_deleted;
1592 new_val_u64s = 0;
1593 }
1594
1595 switch (k.k->type) {
1596 case KEY_TYPE_inline_data:
1597 case KEY_TYPE_indirect_inline_data:
1598 new_val_u64s = (bkey_inline_data_offset(k.k) +
1599 min(bkey_inline_data_bytes(k.k), k.k->size << 9)) >> 3;
1600 break;
1601 }
1602
1603 val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
1604 BUG_ON(val_u64s_delta < 0);
1605
1606 set_bkey_val_u64s(k.k, new_val_u64s);
1607 memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
1608 return -val_u64s_delta;
1609 }
1610