1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com> 4 * 5 * Code for managing the extent btree and dynamically updating the writeback 6 * dirty sector count. 7 */ 8 9 #include "bcachefs.h" 10 #include "bkey_methods.h" 11 #include "btree_cache.h" 12 #include "btree_gc.h" 13 #include "btree_io.h" 14 #include "btree_iter.h" 15 #include "buckets.h" 16 #include "checksum.h" 17 #include "compress.h" 18 #include "debug.h" 19 #include "disk_groups.h" 20 #include "error.h" 21 #include "extents.h" 22 #include "inode.h" 23 #include "journal.h" 24 #include "replicas.h" 25 #include "super.h" 26 #include "super-io.h" 27 #include "trace.h" 28 #include "util.h" 29 30 static unsigned bch2_crc_field_size_max[] = { 31 [BCH_EXTENT_ENTRY_crc32] = CRC32_SIZE_MAX, 32 [BCH_EXTENT_ENTRY_crc64] = CRC64_SIZE_MAX, 33 [BCH_EXTENT_ENTRY_crc128] = CRC128_SIZE_MAX, 34 }; 35 36 static void bch2_extent_crc_pack(union bch_extent_crc *, 37 struct bch_extent_crc_unpacked, 38 enum bch_extent_entry_type); 39 40 struct bch_dev_io_failures *bch2_dev_io_failures(struct bch_io_failures *f, 41 unsigned dev) 42 { 43 struct bch_dev_io_failures *i; 44 45 for (i = f->devs; i < f->devs + f->nr; i++) 46 if (i->dev == dev) 47 return i; 48 49 return NULL; 50 } 51 52 void bch2_mark_io_failure(struct bch_io_failures *failed, 53 struct extent_ptr_decoded *p) 54 { 55 struct bch_dev_io_failures *f = bch2_dev_io_failures(failed, p->ptr.dev); 56 57 if (!f) { 58 BUG_ON(failed->nr >= ARRAY_SIZE(failed->devs)); 59 60 f = &failed->devs[failed->nr++]; 61 f->dev = p->ptr.dev; 62 f->idx = p->idx; 63 f->nr_failed = 1; 64 f->nr_retries = 0; 65 } else if (p->idx != f->idx) { 66 f->idx = p->idx; 67 f->nr_failed = 1; 68 f->nr_retries = 0; 69 } else { 70 f->nr_failed++; 71 } 72 } 73 74 static inline u64 dev_latency(struct bch_fs *c, unsigned dev) 75 { 76 struct bch_dev *ca = bch2_dev_rcu(c, dev); 77 return ca ? atomic64_read(&ca->cur_latency[READ]) : S64_MAX; 78 } 79 80 /* 81 * returns true if p1 is better than p2: 82 */ 83 static inline bool ptr_better(struct bch_fs *c, 84 const struct extent_ptr_decoded p1, 85 const struct extent_ptr_decoded p2) 86 { 87 if (likely(!p1.idx && !p2.idx)) { 88 u64 l1 = dev_latency(c, p1.ptr.dev); 89 u64 l2 = dev_latency(c, p2.ptr.dev); 90 91 /* Pick at random, biased in favor of the faster device: */ 92 93 return bch2_rand_range(l1 + l2) > l1; 94 } 95 96 if (bch2_force_reconstruct_read) 97 return p1.idx > p2.idx; 98 99 return p1.idx < p2.idx; 100 } 101 102 /* 103 * This picks a non-stale pointer, preferably from a device other than @avoid. 104 * Avoid can be NULL, meaning pick any. If there are no non-stale pointers to 105 * other devices, it will still pick a pointer from avoid. 106 */ 107 int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k, 108 struct bch_io_failures *failed, 109 struct extent_ptr_decoded *pick) 110 { 111 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 112 const union bch_extent_entry *entry; 113 struct extent_ptr_decoded p; 114 struct bch_dev_io_failures *f; 115 int ret = 0; 116 117 if (k.k->type == KEY_TYPE_error) 118 return -EIO; 119 120 rcu_read_lock(); 121 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { 122 /* 123 * Unwritten extent: no need to actually read, treat it as a 124 * hole and return 0s: 125 */ 126 if (p.ptr.unwritten) { 127 ret = 0; 128 break; 129 } 130 131 /* 132 * If there are any dirty pointers it's an error if we can't 133 * read: 134 */ 135 if (!ret && !p.ptr.cached) 136 ret = -EIO; 137 138 struct bch_dev *ca = bch2_dev_rcu(c, p.ptr.dev); 139 140 if (p.ptr.cached && (!ca || dev_ptr_stale_rcu(ca, &p.ptr))) 141 continue; 142 143 f = failed ? bch2_dev_io_failures(failed, p.ptr.dev) : NULL; 144 if (f) 145 p.idx = f->nr_failed < f->nr_retries 146 ? f->idx 147 : f->idx + 1; 148 149 if (!p.idx && !ca) 150 p.idx++; 151 152 if (!p.idx && p.has_ec && bch2_force_reconstruct_read) 153 p.idx++; 154 155 if (!p.idx && !bch2_dev_is_readable(ca)) 156 p.idx++; 157 158 if (p.idx >= (unsigned) p.has_ec + 1) 159 continue; 160 161 if (ret > 0 && !ptr_better(c, p, *pick)) 162 continue; 163 164 *pick = p; 165 ret = 1; 166 } 167 rcu_read_unlock(); 168 169 return ret; 170 } 171 172 /* KEY_TYPE_btree_ptr: */ 173 174 int bch2_btree_ptr_invalid(struct bch_fs *c, struct bkey_s_c k, 175 enum bch_validate_flags flags, 176 struct printbuf *err) 177 { 178 int ret = 0; 179 180 bkey_fsck_err_on(bkey_val_u64s(k.k) > BCH_REPLICAS_MAX, c, err, 181 btree_ptr_val_too_big, 182 "value too big (%zu > %u)", bkey_val_u64s(k.k), BCH_REPLICAS_MAX); 183 184 ret = bch2_bkey_ptrs_invalid(c, k, flags, err); 185 fsck_err: 186 return ret; 187 } 188 189 void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c, 190 struct bkey_s_c k) 191 { 192 bch2_bkey_ptrs_to_text(out, c, k); 193 } 194 195 int bch2_btree_ptr_v2_invalid(struct bch_fs *c, struct bkey_s_c k, 196 enum bch_validate_flags flags, 197 struct printbuf *err) 198 { 199 struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k); 200 int ret = 0; 201 202 bkey_fsck_err_on(bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX, 203 c, err, btree_ptr_v2_val_too_big, 204 "value too big (%zu > %zu)", 205 bkey_val_u64s(k.k), BKEY_BTREE_PTR_VAL_U64s_MAX); 206 207 bkey_fsck_err_on(bpos_ge(bp.v->min_key, bp.k->p), 208 c, err, btree_ptr_v2_min_key_bad, 209 "min_key > key"); 210 211 if (flags & BCH_VALIDATE_write) 212 bkey_fsck_err_on(!bp.v->sectors_written, 213 c, err, btree_ptr_v2_written_0, 214 "sectors_written == 0"); 215 216 ret = bch2_bkey_ptrs_invalid(c, k, flags, err); 217 fsck_err: 218 return ret; 219 } 220 221 void bch2_btree_ptr_v2_to_text(struct printbuf *out, struct bch_fs *c, 222 struct bkey_s_c k) 223 { 224 struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k); 225 226 prt_printf(out, "seq %llx written %u min_key %s", 227 le64_to_cpu(bp.v->seq), 228 le16_to_cpu(bp.v->sectors_written), 229 BTREE_PTR_RANGE_UPDATED(bp.v) ? "R " : ""); 230 231 bch2_bpos_to_text(out, bp.v->min_key); 232 prt_printf(out, " "); 233 bch2_bkey_ptrs_to_text(out, c, k); 234 } 235 236 void bch2_btree_ptr_v2_compat(enum btree_id btree_id, unsigned version, 237 unsigned big_endian, int write, 238 struct bkey_s k) 239 { 240 struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(k); 241 242 compat_bpos(0, btree_id, version, big_endian, write, &bp.v->min_key); 243 244 if (version < bcachefs_metadata_version_inode_btree_change && 245 btree_id_is_extents(btree_id) && 246 !bkey_eq(bp.v->min_key, POS_MIN)) 247 bp.v->min_key = write 248 ? bpos_nosnap_predecessor(bp.v->min_key) 249 : bpos_nosnap_successor(bp.v->min_key); 250 } 251 252 /* KEY_TYPE_extent: */ 253 254 bool bch2_extent_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r) 255 { 256 struct bkey_ptrs l_ptrs = bch2_bkey_ptrs(l); 257 struct bkey_ptrs_c r_ptrs = bch2_bkey_ptrs_c(r); 258 union bch_extent_entry *en_l; 259 const union bch_extent_entry *en_r; 260 struct extent_ptr_decoded lp, rp; 261 bool use_right_ptr; 262 263 en_l = l_ptrs.start; 264 en_r = r_ptrs.start; 265 while (en_l < l_ptrs.end && en_r < r_ptrs.end) { 266 if (extent_entry_type(en_l) != extent_entry_type(en_r)) 267 return false; 268 269 en_l = extent_entry_next(en_l); 270 en_r = extent_entry_next(en_r); 271 } 272 273 if (en_l < l_ptrs.end || en_r < r_ptrs.end) 274 return false; 275 276 en_l = l_ptrs.start; 277 en_r = r_ptrs.start; 278 lp.crc = bch2_extent_crc_unpack(l.k, NULL); 279 rp.crc = bch2_extent_crc_unpack(r.k, NULL); 280 281 while (__bkey_ptr_next_decode(l.k, l_ptrs.end, lp, en_l) && 282 __bkey_ptr_next_decode(r.k, r_ptrs.end, rp, en_r)) { 283 if (lp.ptr.offset + lp.crc.offset + lp.crc.live_size != 284 rp.ptr.offset + rp.crc.offset || 285 lp.ptr.dev != rp.ptr.dev || 286 lp.ptr.gen != rp.ptr.gen || 287 lp.ptr.unwritten != rp.ptr.unwritten || 288 lp.has_ec != rp.has_ec) 289 return false; 290 291 /* Extents may not straddle buckets: */ 292 rcu_read_lock(); 293 struct bch_dev *ca = bch2_dev_rcu(c, lp.ptr.dev); 294 bool same_bucket = ca && PTR_BUCKET_NR(ca, &lp.ptr) == PTR_BUCKET_NR(ca, &rp.ptr); 295 rcu_read_unlock(); 296 297 if (!same_bucket) 298 return false; 299 300 if (lp.has_ec != rp.has_ec || 301 (lp.has_ec && 302 (lp.ec.block != rp.ec.block || 303 lp.ec.redundancy != rp.ec.redundancy || 304 lp.ec.idx != rp.ec.idx))) 305 return false; 306 307 if (lp.crc.compression_type != rp.crc.compression_type || 308 lp.crc.nonce != rp.crc.nonce) 309 return false; 310 311 if (lp.crc.offset + lp.crc.live_size + rp.crc.live_size <= 312 lp.crc.uncompressed_size) { 313 /* can use left extent's crc entry */ 314 } else if (lp.crc.live_size <= rp.crc.offset) { 315 /* can use right extent's crc entry */ 316 } else { 317 /* check if checksums can be merged: */ 318 if (lp.crc.csum_type != rp.crc.csum_type || 319 lp.crc.nonce != rp.crc.nonce || 320 crc_is_compressed(lp.crc) || 321 !bch2_checksum_mergeable(lp.crc.csum_type)) 322 return false; 323 324 if (lp.crc.offset + lp.crc.live_size != lp.crc.compressed_size || 325 rp.crc.offset) 326 return false; 327 328 if (lp.crc.csum_type && 329 lp.crc.uncompressed_size + 330 rp.crc.uncompressed_size > (c->opts.encoded_extent_max >> 9)) 331 return false; 332 } 333 334 en_l = extent_entry_next(en_l); 335 en_r = extent_entry_next(en_r); 336 } 337 338 en_l = l_ptrs.start; 339 en_r = r_ptrs.start; 340 while (en_l < l_ptrs.end && en_r < r_ptrs.end) { 341 if (extent_entry_is_crc(en_l)) { 342 struct bch_extent_crc_unpacked crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l)); 343 struct bch_extent_crc_unpacked crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r)); 344 345 if (crc_l.uncompressed_size + crc_r.uncompressed_size > 346 bch2_crc_field_size_max[extent_entry_type(en_l)]) 347 return false; 348 } 349 350 en_l = extent_entry_next(en_l); 351 en_r = extent_entry_next(en_r); 352 } 353 354 use_right_ptr = false; 355 en_l = l_ptrs.start; 356 en_r = r_ptrs.start; 357 while (en_l < l_ptrs.end) { 358 if (extent_entry_type(en_l) == BCH_EXTENT_ENTRY_ptr && 359 use_right_ptr) 360 en_l->ptr = en_r->ptr; 361 362 if (extent_entry_is_crc(en_l)) { 363 struct bch_extent_crc_unpacked crc_l = 364 bch2_extent_crc_unpack(l.k, entry_to_crc(en_l)); 365 struct bch_extent_crc_unpacked crc_r = 366 bch2_extent_crc_unpack(r.k, entry_to_crc(en_r)); 367 368 use_right_ptr = false; 369 370 if (crc_l.offset + crc_l.live_size + crc_r.live_size <= 371 crc_l.uncompressed_size) { 372 /* can use left extent's crc entry */ 373 } else if (crc_l.live_size <= crc_r.offset) { 374 /* can use right extent's crc entry */ 375 crc_r.offset -= crc_l.live_size; 376 bch2_extent_crc_pack(entry_to_crc(en_l), crc_r, 377 extent_entry_type(en_l)); 378 use_right_ptr = true; 379 } else { 380 crc_l.csum = bch2_checksum_merge(crc_l.csum_type, 381 crc_l.csum, 382 crc_r.csum, 383 crc_r.uncompressed_size << 9); 384 385 crc_l.uncompressed_size += crc_r.uncompressed_size; 386 crc_l.compressed_size += crc_r.compressed_size; 387 bch2_extent_crc_pack(entry_to_crc(en_l), crc_l, 388 extent_entry_type(en_l)); 389 } 390 } 391 392 en_l = extent_entry_next(en_l); 393 en_r = extent_entry_next(en_r); 394 } 395 396 bch2_key_resize(l.k, l.k->size + r.k->size); 397 return true; 398 } 399 400 /* KEY_TYPE_reservation: */ 401 402 int bch2_reservation_invalid(struct bch_fs *c, struct bkey_s_c k, 403 enum bch_validate_flags flags, 404 struct printbuf *err) 405 { 406 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k); 407 int ret = 0; 408 409 bkey_fsck_err_on(!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX, c, err, 410 reservation_key_nr_replicas_invalid, 411 "invalid nr_replicas (%u)", r.v->nr_replicas); 412 fsck_err: 413 return ret; 414 } 415 416 void bch2_reservation_to_text(struct printbuf *out, struct bch_fs *c, 417 struct bkey_s_c k) 418 { 419 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k); 420 421 prt_printf(out, "generation %u replicas %u", 422 le32_to_cpu(r.v->generation), 423 r.v->nr_replicas); 424 } 425 426 bool bch2_reservation_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r) 427 { 428 struct bkey_s_reservation l = bkey_s_to_reservation(_l); 429 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(_r); 430 431 if (l.v->generation != r.v->generation || 432 l.v->nr_replicas != r.v->nr_replicas) 433 return false; 434 435 bch2_key_resize(l.k, l.k->size + r.k->size); 436 return true; 437 } 438 439 /* Extent checksum entries: */ 440 441 /* returns true if not equal */ 442 static inline bool bch2_crc_unpacked_cmp(struct bch_extent_crc_unpacked l, 443 struct bch_extent_crc_unpacked r) 444 { 445 return (l.csum_type != r.csum_type || 446 l.compression_type != r.compression_type || 447 l.compressed_size != r.compressed_size || 448 l.uncompressed_size != r.uncompressed_size || 449 l.offset != r.offset || 450 l.live_size != r.live_size || 451 l.nonce != r.nonce || 452 bch2_crc_cmp(l.csum, r.csum)); 453 } 454 455 static inline bool can_narrow_crc(struct bch_extent_crc_unpacked u, 456 struct bch_extent_crc_unpacked n) 457 { 458 return !crc_is_compressed(u) && 459 u.csum_type && 460 u.uncompressed_size > u.live_size && 461 bch2_csum_type_is_encryption(u.csum_type) == 462 bch2_csum_type_is_encryption(n.csum_type); 463 } 464 465 bool bch2_can_narrow_extent_crcs(struct bkey_s_c k, 466 struct bch_extent_crc_unpacked n) 467 { 468 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 469 struct bch_extent_crc_unpacked crc; 470 const union bch_extent_entry *i; 471 472 if (!n.csum_type) 473 return false; 474 475 bkey_for_each_crc(k.k, ptrs, crc, i) 476 if (can_narrow_crc(crc, n)) 477 return true; 478 479 return false; 480 } 481 482 /* 483 * We're writing another replica for this extent, so while we've got the data in 484 * memory we'll be computing a new checksum for the currently live data. 485 * 486 * If there are other replicas we aren't moving, and they are checksummed but 487 * not compressed, we can modify them to point to only the data that is 488 * currently live (so that readers won't have to bounce) while we've got the 489 * checksum we need: 490 */ 491 bool bch2_bkey_narrow_crcs(struct bkey_i *k, struct bch_extent_crc_unpacked n) 492 { 493 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k)); 494 struct bch_extent_crc_unpacked u; 495 struct extent_ptr_decoded p; 496 union bch_extent_entry *i; 497 bool ret = false; 498 499 /* Find a checksum entry that covers only live data: */ 500 if (!n.csum_type) { 501 bkey_for_each_crc(&k->k, ptrs, u, i) 502 if (!crc_is_compressed(u) && 503 u.csum_type && 504 u.live_size == u.uncompressed_size) { 505 n = u; 506 goto found; 507 } 508 return false; 509 } 510 found: 511 BUG_ON(crc_is_compressed(n)); 512 BUG_ON(n.offset); 513 BUG_ON(n.live_size != k->k.size); 514 515 restart_narrow_pointers: 516 ptrs = bch2_bkey_ptrs(bkey_i_to_s(k)); 517 518 bkey_for_each_ptr_decode(&k->k, ptrs, p, i) 519 if (can_narrow_crc(p.crc, n)) { 520 bch2_bkey_drop_ptr_noerror(bkey_i_to_s(k), &i->ptr); 521 p.ptr.offset += p.crc.offset; 522 p.crc = n; 523 bch2_extent_ptr_decoded_append(k, &p); 524 ret = true; 525 goto restart_narrow_pointers; 526 } 527 528 return ret; 529 } 530 531 static void bch2_extent_crc_pack(union bch_extent_crc *dst, 532 struct bch_extent_crc_unpacked src, 533 enum bch_extent_entry_type type) 534 { 535 #define set_common_fields(_dst, _src) \ 536 _dst.type = 1 << type; \ 537 _dst.csum_type = _src.csum_type, \ 538 _dst.compression_type = _src.compression_type, \ 539 _dst._compressed_size = _src.compressed_size - 1, \ 540 _dst._uncompressed_size = _src.uncompressed_size - 1, \ 541 _dst.offset = _src.offset 542 543 switch (type) { 544 case BCH_EXTENT_ENTRY_crc32: 545 set_common_fields(dst->crc32, src); 546 dst->crc32.csum = (u32 __force) *((__le32 *) &src.csum.lo); 547 break; 548 case BCH_EXTENT_ENTRY_crc64: 549 set_common_fields(dst->crc64, src); 550 dst->crc64.nonce = src.nonce; 551 dst->crc64.csum_lo = (u64 __force) src.csum.lo; 552 dst->crc64.csum_hi = (u64 __force) *((__le16 *) &src.csum.hi); 553 break; 554 case BCH_EXTENT_ENTRY_crc128: 555 set_common_fields(dst->crc128, src); 556 dst->crc128.nonce = src.nonce; 557 dst->crc128.csum = src.csum; 558 break; 559 default: 560 BUG(); 561 } 562 #undef set_common_fields 563 } 564 565 void bch2_extent_crc_append(struct bkey_i *k, 566 struct bch_extent_crc_unpacked new) 567 { 568 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k)); 569 union bch_extent_crc *crc = (void *) ptrs.end; 570 enum bch_extent_entry_type type; 571 572 if (bch_crc_bytes[new.csum_type] <= 4 && 573 new.uncompressed_size <= CRC32_SIZE_MAX && 574 new.nonce <= CRC32_NONCE_MAX) 575 type = BCH_EXTENT_ENTRY_crc32; 576 else if (bch_crc_bytes[new.csum_type] <= 10 && 577 new.uncompressed_size <= CRC64_SIZE_MAX && 578 new.nonce <= CRC64_NONCE_MAX) 579 type = BCH_EXTENT_ENTRY_crc64; 580 else if (bch_crc_bytes[new.csum_type] <= 16 && 581 new.uncompressed_size <= CRC128_SIZE_MAX && 582 new.nonce <= CRC128_NONCE_MAX) 583 type = BCH_EXTENT_ENTRY_crc128; 584 else 585 BUG(); 586 587 bch2_extent_crc_pack(crc, new, type); 588 589 k->k.u64s += extent_entry_u64s(ptrs.end); 590 591 EBUG_ON(bkey_val_u64s(&k->k) > BKEY_EXTENT_VAL_U64s_MAX); 592 } 593 594 /* Generic code for keys with pointers: */ 595 596 unsigned bch2_bkey_nr_ptrs(struct bkey_s_c k) 597 { 598 return bch2_bkey_devs(k).nr; 599 } 600 601 unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c k) 602 { 603 return k.k->type == KEY_TYPE_reservation 604 ? bkey_s_c_to_reservation(k).v->nr_replicas 605 : bch2_bkey_dirty_devs(k).nr; 606 } 607 608 unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c k) 609 { 610 unsigned ret = 0; 611 612 if (k.k->type == KEY_TYPE_reservation) { 613 ret = bkey_s_c_to_reservation(k).v->nr_replicas; 614 } else { 615 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 616 const union bch_extent_entry *entry; 617 struct extent_ptr_decoded p; 618 619 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) 620 ret += !p.ptr.cached && !crc_is_compressed(p.crc); 621 } 622 623 return ret; 624 } 625 626 unsigned bch2_bkey_sectors_compressed(struct bkey_s_c k) 627 { 628 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 629 const union bch_extent_entry *entry; 630 struct extent_ptr_decoded p; 631 unsigned ret = 0; 632 633 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) 634 if (!p.ptr.cached && crc_is_compressed(p.crc)) 635 ret += p.crc.compressed_size; 636 637 return ret; 638 } 639 640 bool bch2_bkey_is_incompressible(struct bkey_s_c k) 641 { 642 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 643 const union bch_extent_entry *entry; 644 struct bch_extent_crc_unpacked crc; 645 646 bkey_for_each_crc(k.k, ptrs, crc, entry) 647 if (crc.compression_type == BCH_COMPRESSION_TYPE_incompressible) 648 return true; 649 return false; 650 } 651 652 unsigned bch2_bkey_replicas(struct bch_fs *c, struct bkey_s_c k) 653 { 654 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 655 const union bch_extent_entry *entry; 656 struct extent_ptr_decoded p = { 0 }; 657 unsigned replicas = 0; 658 659 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { 660 if (p.ptr.cached) 661 continue; 662 663 if (p.has_ec) 664 replicas += p.ec.redundancy; 665 666 replicas++; 667 668 } 669 670 return replicas; 671 } 672 673 static inline unsigned __extent_ptr_durability(struct bch_dev *ca, struct extent_ptr_decoded *p) 674 { 675 if (p->ptr.cached) 676 return 0; 677 678 return p->has_ec 679 ? p->ec.redundancy + 1 680 : ca->mi.durability; 681 } 682 683 unsigned bch2_extent_ptr_desired_durability(struct bch_fs *c, struct extent_ptr_decoded *p) 684 { 685 struct bch_dev *ca = bch2_dev_rcu(c, p->ptr.dev); 686 687 return ca ? __extent_ptr_durability(ca, p) : 0; 688 } 689 690 unsigned bch2_extent_ptr_durability(struct bch_fs *c, struct extent_ptr_decoded *p) 691 { 692 struct bch_dev *ca = bch2_dev_rcu(c, p->ptr.dev); 693 694 if (!ca || ca->mi.state == BCH_MEMBER_STATE_failed) 695 return 0; 696 697 return __extent_ptr_durability(ca, p); 698 } 699 700 unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k) 701 { 702 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 703 const union bch_extent_entry *entry; 704 struct extent_ptr_decoded p; 705 unsigned durability = 0; 706 707 rcu_read_lock(); 708 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) 709 durability += bch2_extent_ptr_durability(c, &p); 710 rcu_read_unlock(); 711 712 return durability; 713 } 714 715 static unsigned bch2_bkey_durability_safe(struct bch_fs *c, struct bkey_s_c k) 716 { 717 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 718 const union bch_extent_entry *entry; 719 struct extent_ptr_decoded p; 720 unsigned durability = 0; 721 722 rcu_read_lock(); 723 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) 724 if (p.ptr.dev < c->sb.nr_devices && c->devs[p.ptr.dev]) 725 durability += bch2_extent_ptr_durability(c, &p); 726 rcu_read_unlock(); 727 728 return durability; 729 } 730 731 void bch2_bkey_extent_entry_drop(struct bkey_i *k, union bch_extent_entry *entry) 732 { 733 union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k)); 734 union bch_extent_entry *next = extent_entry_next(entry); 735 736 memmove_u64s(entry, next, (u64 *) end - (u64 *) next); 737 k->k.u64s -= extent_entry_u64s(entry); 738 } 739 740 void bch2_extent_ptr_decoded_append(struct bkey_i *k, 741 struct extent_ptr_decoded *p) 742 { 743 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k)); 744 struct bch_extent_crc_unpacked crc = 745 bch2_extent_crc_unpack(&k->k, NULL); 746 union bch_extent_entry *pos; 747 748 if (!bch2_crc_unpacked_cmp(crc, p->crc)) { 749 pos = ptrs.start; 750 goto found; 751 } 752 753 bkey_for_each_crc(&k->k, ptrs, crc, pos) 754 if (!bch2_crc_unpacked_cmp(crc, p->crc)) { 755 pos = extent_entry_next(pos); 756 goto found; 757 } 758 759 bch2_extent_crc_append(k, p->crc); 760 pos = bkey_val_end(bkey_i_to_s(k)); 761 found: 762 p->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr; 763 __extent_entry_insert(k, pos, to_entry(&p->ptr)); 764 765 if (p->has_ec) { 766 p->ec.type = 1 << BCH_EXTENT_ENTRY_stripe_ptr; 767 __extent_entry_insert(k, pos, to_entry(&p->ec)); 768 } 769 } 770 771 static union bch_extent_entry *extent_entry_prev(struct bkey_ptrs ptrs, 772 union bch_extent_entry *entry) 773 { 774 union bch_extent_entry *i = ptrs.start; 775 776 if (i == entry) 777 return NULL; 778 779 while (extent_entry_next(i) != entry) 780 i = extent_entry_next(i); 781 return i; 782 } 783 784 /* 785 * Returns pointer to the next entry after the one being dropped: 786 */ 787 union bch_extent_entry *bch2_bkey_drop_ptr_noerror(struct bkey_s k, 788 struct bch_extent_ptr *ptr) 789 { 790 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k); 791 union bch_extent_entry *entry = to_entry(ptr), *next; 792 union bch_extent_entry *ret = entry; 793 bool drop_crc = true; 794 795 EBUG_ON(ptr < &ptrs.start->ptr || 796 ptr >= &ptrs.end->ptr); 797 EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr); 798 799 for (next = extent_entry_next(entry); 800 next != ptrs.end; 801 next = extent_entry_next(next)) { 802 if (extent_entry_is_crc(next)) { 803 break; 804 } else if (extent_entry_is_ptr(next)) { 805 drop_crc = false; 806 break; 807 } 808 } 809 810 extent_entry_drop(k, entry); 811 812 while ((entry = extent_entry_prev(ptrs, entry))) { 813 if (extent_entry_is_ptr(entry)) 814 break; 815 816 if ((extent_entry_is_crc(entry) && drop_crc) || 817 extent_entry_is_stripe_ptr(entry)) { 818 ret = (void *) ret - extent_entry_bytes(entry); 819 extent_entry_drop(k, entry); 820 } 821 } 822 823 return ret; 824 } 825 826 union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s k, 827 struct bch_extent_ptr *ptr) 828 { 829 bool have_dirty = bch2_bkey_dirty_devs(k.s_c).nr; 830 union bch_extent_entry *ret = 831 bch2_bkey_drop_ptr_noerror(k, ptr); 832 833 /* 834 * If we deleted all the dirty pointers and there's still cached 835 * pointers, we could set the cached pointers to dirty if they're not 836 * stale - but to do that correctly we'd need to grab an open_bucket 837 * reference so that we don't race with bucket reuse: 838 */ 839 if (have_dirty && 840 !bch2_bkey_dirty_devs(k.s_c).nr) { 841 k.k->type = KEY_TYPE_error; 842 set_bkey_val_u64s(k.k, 0); 843 ret = NULL; 844 } else if (!bch2_bkey_nr_ptrs(k.s_c)) { 845 k.k->type = KEY_TYPE_deleted; 846 set_bkey_val_u64s(k.k, 0); 847 ret = NULL; 848 } 849 850 return ret; 851 } 852 853 void bch2_bkey_drop_device(struct bkey_s k, unsigned dev) 854 { 855 bch2_bkey_drop_ptrs(k, ptr, ptr->dev == dev); 856 } 857 858 void bch2_bkey_drop_device_noerror(struct bkey_s k, unsigned dev) 859 { 860 struct bch_extent_ptr *ptr = bch2_bkey_has_device(k, dev); 861 862 if (ptr) 863 bch2_bkey_drop_ptr_noerror(k, ptr); 864 } 865 866 const struct bch_extent_ptr *bch2_bkey_has_device_c(struct bkey_s_c k, unsigned dev) 867 { 868 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 869 870 bkey_for_each_ptr(ptrs, ptr) 871 if (ptr->dev == dev) 872 return ptr; 873 874 return NULL; 875 } 876 877 bool bch2_bkey_has_target(struct bch_fs *c, struct bkey_s_c k, unsigned target) 878 { 879 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 880 struct bch_dev *ca; 881 bool ret = false; 882 883 rcu_read_lock(); 884 bkey_for_each_ptr(ptrs, ptr) 885 if (bch2_dev_in_target(c, ptr->dev, target) && 886 (ca = bch2_dev_rcu(c, ptr->dev)) && 887 (!ptr->cached || 888 !dev_ptr_stale_rcu(ca, ptr))) { 889 ret = true; 890 break; 891 } 892 rcu_read_unlock(); 893 894 return ret; 895 } 896 897 bool bch2_bkey_matches_ptr(struct bch_fs *c, struct bkey_s_c k, 898 struct bch_extent_ptr m, u64 offset) 899 { 900 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 901 const union bch_extent_entry *entry; 902 struct extent_ptr_decoded p; 903 904 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) 905 if (p.ptr.dev == m.dev && 906 p.ptr.gen == m.gen && 907 (s64) p.ptr.offset + p.crc.offset - bkey_start_offset(k.k) == 908 (s64) m.offset - offset) 909 return true; 910 911 return false; 912 } 913 914 /* 915 * Returns true if two extents refer to the same data: 916 */ 917 bool bch2_extents_match(struct bkey_s_c k1, struct bkey_s_c k2) 918 { 919 if (k1.k->type != k2.k->type) 920 return false; 921 922 if (bkey_extent_is_direct_data(k1.k)) { 923 struct bkey_ptrs_c ptrs1 = bch2_bkey_ptrs_c(k1); 924 struct bkey_ptrs_c ptrs2 = bch2_bkey_ptrs_c(k2); 925 const union bch_extent_entry *entry1, *entry2; 926 struct extent_ptr_decoded p1, p2; 927 928 if (bkey_extent_is_unwritten(k1) != bkey_extent_is_unwritten(k2)) 929 return false; 930 931 bkey_for_each_ptr_decode(k1.k, ptrs1, p1, entry1) 932 bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2) 933 if (p1.ptr.dev == p2.ptr.dev && 934 p1.ptr.gen == p2.ptr.gen && 935 (s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) == 936 (s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k)) 937 return true; 938 939 return false; 940 } else { 941 /* KEY_TYPE_deleted, etc. */ 942 return true; 943 } 944 } 945 946 struct bch_extent_ptr * 947 bch2_extent_has_ptr(struct bkey_s_c k1, struct extent_ptr_decoded p1, struct bkey_s k2) 948 { 949 struct bkey_ptrs ptrs2 = bch2_bkey_ptrs(k2); 950 union bch_extent_entry *entry2; 951 struct extent_ptr_decoded p2; 952 953 bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2) 954 if (p1.ptr.dev == p2.ptr.dev && 955 p1.ptr.gen == p2.ptr.gen && 956 (s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) == 957 (s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k)) 958 return &entry2->ptr; 959 960 return NULL; 961 } 962 963 void bch2_extent_ptr_set_cached(struct bkey_s k, struct bch_extent_ptr *ptr) 964 { 965 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k); 966 union bch_extent_entry *entry; 967 union bch_extent_entry *ec = NULL; 968 969 bkey_extent_entry_for_each(ptrs, entry) { 970 if (&entry->ptr == ptr) { 971 ptr->cached = true; 972 if (ec) 973 extent_entry_drop(k, ec); 974 return; 975 } 976 977 if (extent_entry_is_stripe_ptr(entry)) 978 ec = entry; 979 else if (extent_entry_is_ptr(entry)) 980 ec = NULL; 981 } 982 983 BUG(); 984 } 985 986 /* 987 * bch_extent_normalize - clean up an extent, dropping stale pointers etc. 988 * 989 * Returns true if @k should be dropped entirely 990 * 991 * For existing keys, only called when btree nodes are being rewritten, not when 992 * they're merely being compacted/resorted in memory. 993 */ 994 bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k) 995 { 996 struct bch_dev *ca; 997 998 rcu_read_lock(); 999 bch2_bkey_drop_ptrs(k, ptr, 1000 ptr->cached && 1001 (ca = bch2_dev_rcu(c, ptr->dev)) && 1002 dev_ptr_stale_rcu(ca, ptr) > 0); 1003 rcu_read_unlock(); 1004 1005 return bkey_deleted(k.k); 1006 } 1007 1008 void bch2_extent_ptr_to_text(struct printbuf *out, struct bch_fs *c, const struct bch_extent_ptr *ptr) 1009 { 1010 out->atomic++; 1011 rcu_read_lock(); 1012 struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev); 1013 if (!ca) { 1014 prt_printf(out, "ptr: %u:%llu gen %u%s", ptr->dev, 1015 (u64) ptr->offset, ptr->gen, 1016 ptr->cached ? " cached" : ""); 1017 } else { 1018 u32 offset; 1019 u64 b = sector_to_bucket_and_offset(ca, ptr->offset, &offset); 1020 1021 prt_printf(out, "ptr: %u:%llu:%u gen %u", 1022 ptr->dev, b, offset, ptr->gen); 1023 if (ptr->cached) 1024 prt_str(out, " cached"); 1025 if (ptr->unwritten) 1026 prt_str(out, " unwritten"); 1027 int stale = dev_ptr_stale_rcu(ca, ptr); 1028 if (stale > 0) 1029 prt_printf(out, " stale"); 1030 else if (stale) 1031 prt_printf(out, " invalid"); 1032 } 1033 rcu_read_unlock(); 1034 --out->atomic; 1035 } 1036 1037 void bch2_extent_crc_unpacked_to_text(struct printbuf *out, struct bch_extent_crc_unpacked *crc) 1038 { 1039 prt_printf(out, "crc: c_size %u size %u offset %u nonce %u csum ", 1040 crc->compressed_size, 1041 crc->uncompressed_size, 1042 crc->offset, crc->nonce); 1043 bch2_prt_csum_type(out, crc->csum_type); 1044 prt_printf(out, " %0llx:%0llx ", crc->csum.hi, crc->csum.lo); 1045 prt_str(out, " compress "); 1046 bch2_prt_compression_type(out, crc->compression_type); 1047 } 1048 1049 void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c, 1050 struct bkey_s_c k) 1051 { 1052 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 1053 const union bch_extent_entry *entry; 1054 bool first = true; 1055 1056 if (c) 1057 prt_printf(out, "durability: %u ", bch2_bkey_durability_safe(c, k)); 1058 1059 bkey_extent_entry_for_each(ptrs, entry) { 1060 if (!first) 1061 prt_printf(out, " "); 1062 1063 switch (__extent_entry_type(entry)) { 1064 case BCH_EXTENT_ENTRY_ptr: 1065 bch2_extent_ptr_to_text(out, c, entry_to_ptr(entry)); 1066 break; 1067 1068 case BCH_EXTENT_ENTRY_crc32: 1069 case BCH_EXTENT_ENTRY_crc64: 1070 case BCH_EXTENT_ENTRY_crc128: { 1071 struct bch_extent_crc_unpacked crc = 1072 bch2_extent_crc_unpack(k.k, entry_to_crc(entry)); 1073 1074 bch2_extent_crc_unpacked_to_text(out, &crc); 1075 break; 1076 } 1077 case BCH_EXTENT_ENTRY_stripe_ptr: { 1078 const struct bch_extent_stripe_ptr *ec = &entry->stripe_ptr; 1079 1080 prt_printf(out, "ec: idx %llu block %u", 1081 (u64) ec->idx, ec->block); 1082 break; 1083 } 1084 case BCH_EXTENT_ENTRY_rebalance: { 1085 const struct bch_extent_rebalance *r = &entry->rebalance; 1086 1087 prt_str(out, "rebalance: target "); 1088 if (c) 1089 bch2_target_to_text(out, c, r->target); 1090 else 1091 prt_printf(out, "%u", r->target); 1092 prt_str(out, " compression "); 1093 bch2_compression_opt_to_text(out, r->compression); 1094 break; 1095 } 1096 default: 1097 prt_printf(out, "(invalid extent entry %.16llx)", *((u64 *) entry)); 1098 return; 1099 } 1100 1101 first = false; 1102 } 1103 } 1104 1105 1106 static int extent_ptr_invalid(struct bch_fs *c, 1107 struct bkey_s_c k, 1108 enum bch_validate_flags flags, 1109 const struct bch_extent_ptr *ptr, 1110 unsigned size_ondisk, 1111 bool metadata, 1112 struct printbuf *err) 1113 { 1114 int ret = 0; 1115 1116 rcu_read_lock(); 1117 struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev); 1118 if (!ca) { 1119 rcu_read_unlock(); 1120 return 0; 1121 } 1122 u32 bucket_offset; 1123 u64 bucket = sector_to_bucket_and_offset(ca, ptr->offset, &bucket_offset); 1124 unsigned first_bucket = ca->mi.first_bucket; 1125 u64 nbuckets = ca->mi.nbuckets; 1126 unsigned bucket_size = ca->mi.bucket_size; 1127 rcu_read_unlock(); 1128 1129 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 1130 bkey_for_each_ptr(ptrs, ptr2) 1131 bkey_fsck_err_on(ptr != ptr2 && ptr->dev == ptr2->dev, c, err, 1132 ptr_to_duplicate_device, 1133 "multiple pointers to same device (%u)", ptr->dev); 1134 1135 1136 bkey_fsck_err_on(bucket >= nbuckets, c, err, 1137 ptr_after_last_bucket, 1138 "pointer past last bucket (%llu > %llu)", bucket, nbuckets); 1139 bkey_fsck_err_on(bucket < first_bucket, c, err, 1140 ptr_before_first_bucket, 1141 "pointer before first bucket (%llu < %u)", bucket, first_bucket); 1142 bkey_fsck_err_on(bucket_offset + size_ondisk > bucket_size, c, err, 1143 ptr_spans_multiple_buckets, 1144 "pointer spans multiple buckets (%u + %u > %u)", 1145 bucket_offset, size_ondisk, bucket_size); 1146 fsck_err: 1147 return ret; 1148 } 1149 1150 int bch2_bkey_ptrs_invalid(struct bch_fs *c, struct bkey_s_c k, 1151 enum bch_validate_flags flags, 1152 struct printbuf *err) 1153 { 1154 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 1155 const union bch_extent_entry *entry; 1156 struct bch_extent_crc_unpacked crc; 1157 unsigned size_ondisk = k.k->size; 1158 unsigned nonce = UINT_MAX; 1159 unsigned nr_ptrs = 0; 1160 bool have_written = false, have_unwritten = false, have_ec = false, crc_since_last_ptr = false; 1161 int ret = 0; 1162 1163 if (bkey_is_btree_ptr(k.k)) 1164 size_ondisk = btree_sectors(c); 1165 1166 bkey_extent_entry_for_each(ptrs, entry) { 1167 bkey_fsck_err_on(__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX, c, err, 1168 extent_ptrs_invalid_entry, 1169 "invalid extent entry type (got %u, max %u)", 1170 __extent_entry_type(entry), BCH_EXTENT_ENTRY_MAX); 1171 1172 bkey_fsck_err_on(bkey_is_btree_ptr(k.k) && 1173 !extent_entry_is_ptr(entry), c, err, 1174 btree_ptr_has_non_ptr, 1175 "has non ptr field"); 1176 1177 switch (extent_entry_type(entry)) { 1178 case BCH_EXTENT_ENTRY_ptr: 1179 ret = extent_ptr_invalid(c, k, flags, &entry->ptr, 1180 size_ondisk, false, err); 1181 if (ret) 1182 return ret; 1183 1184 bkey_fsck_err_on(entry->ptr.cached && have_ec, c, err, 1185 ptr_cached_and_erasure_coded, 1186 "cached, erasure coded ptr"); 1187 1188 if (!entry->ptr.unwritten) 1189 have_written = true; 1190 else 1191 have_unwritten = true; 1192 1193 have_ec = false; 1194 crc_since_last_ptr = false; 1195 nr_ptrs++; 1196 break; 1197 case BCH_EXTENT_ENTRY_crc32: 1198 case BCH_EXTENT_ENTRY_crc64: 1199 case BCH_EXTENT_ENTRY_crc128: 1200 crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry)); 1201 1202 bkey_fsck_err_on(crc.offset + crc.live_size > crc.uncompressed_size, c, err, 1203 ptr_crc_uncompressed_size_too_small, 1204 "checksum offset + key size > uncompressed size"); 1205 bkey_fsck_err_on(!bch2_checksum_type_valid(c, crc.csum_type), c, err, 1206 ptr_crc_csum_type_unknown, 1207 "invalid checksum type"); 1208 bkey_fsck_err_on(crc.compression_type >= BCH_COMPRESSION_TYPE_NR, c, err, 1209 ptr_crc_compression_type_unknown, 1210 "invalid compression type"); 1211 1212 if (bch2_csum_type_is_encryption(crc.csum_type)) { 1213 if (nonce == UINT_MAX) 1214 nonce = crc.offset + crc.nonce; 1215 else if (nonce != crc.offset + crc.nonce) 1216 bkey_fsck_err(c, err, ptr_crc_nonce_mismatch, 1217 "incorrect nonce"); 1218 } 1219 1220 bkey_fsck_err_on(crc_since_last_ptr, c, err, 1221 ptr_crc_redundant, 1222 "redundant crc entry"); 1223 crc_since_last_ptr = true; 1224 1225 bkey_fsck_err_on(crc_is_encoded(crc) && 1226 (crc.uncompressed_size > c->opts.encoded_extent_max >> 9) && 1227 (flags & (BCH_VALIDATE_write|BCH_VALIDATE_commit)), c, err, 1228 ptr_crc_uncompressed_size_too_big, 1229 "too large encoded extent"); 1230 1231 size_ondisk = crc.compressed_size; 1232 break; 1233 case BCH_EXTENT_ENTRY_stripe_ptr: 1234 bkey_fsck_err_on(have_ec, c, err, 1235 ptr_stripe_redundant, 1236 "redundant stripe entry"); 1237 have_ec = true; 1238 break; 1239 case BCH_EXTENT_ENTRY_rebalance: { 1240 const struct bch_extent_rebalance *r = &entry->rebalance; 1241 1242 if (!bch2_compression_opt_valid(r->compression)) { 1243 struct bch_compression_opt opt = __bch2_compression_decode(r->compression); 1244 prt_printf(err, "invalid compression opt %u:%u", 1245 opt.type, opt.level); 1246 return -BCH_ERR_invalid_bkey; 1247 } 1248 break; 1249 } 1250 } 1251 } 1252 1253 bkey_fsck_err_on(!nr_ptrs, c, err, 1254 extent_ptrs_no_ptrs, 1255 "no ptrs"); 1256 bkey_fsck_err_on(nr_ptrs > BCH_BKEY_PTRS_MAX, c, err, 1257 extent_ptrs_too_many_ptrs, 1258 "too many ptrs: %u > %u", nr_ptrs, BCH_BKEY_PTRS_MAX); 1259 bkey_fsck_err_on(have_written && have_unwritten, c, err, 1260 extent_ptrs_written_and_unwritten, 1261 "extent with unwritten and written ptrs"); 1262 bkey_fsck_err_on(k.k->type != KEY_TYPE_extent && have_unwritten, c, err, 1263 extent_ptrs_unwritten, 1264 "has unwritten ptrs"); 1265 bkey_fsck_err_on(crc_since_last_ptr, c, err, 1266 extent_ptrs_redundant_crc, 1267 "redundant crc entry"); 1268 bkey_fsck_err_on(have_ec, c, err, 1269 extent_ptrs_redundant_stripe, 1270 "redundant stripe entry"); 1271 fsck_err: 1272 return ret; 1273 } 1274 1275 void bch2_ptr_swab(struct bkey_s k) 1276 { 1277 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k); 1278 union bch_extent_entry *entry; 1279 u64 *d; 1280 1281 for (d = (u64 *) ptrs.start; 1282 d != (u64 *) ptrs.end; 1283 d++) 1284 *d = swab64(*d); 1285 1286 for (entry = ptrs.start; 1287 entry < ptrs.end; 1288 entry = extent_entry_next(entry)) { 1289 switch (extent_entry_type(entry)) { 1290 case BCH_EXTENT_ENTRY_ptr: 1291 break; 1292 case BCH_EXTENT_ENTRY_crc32: 1293 entry->crc32.csum = swab32(entry->crc32.csum); 1294 break; 1295 case BCH_EXTENT_ENTRY_crc64: 1296 entry->crc64.csum_hi = swab16(entry->crc64.csum_hi); 1297 entry->crc64.csum_lo = swab64(entry->crc64.csum_lo); 1298 break; 1299 case BCH_EXTENT_ENTRY_crc128: 1300 entry->crc128.csum.hi = (__force __le64) 1301 swab64((__force u64) entry->crc128.csum.hi); 1302 entry->crc128.csum.lo = (__force __le64) 1303 swab64((__force u64) entry->crc128.csum.lo); 1304 break; 1305 case BCH_EXTENT_ENTRY_stripe_ptr: 1306 break; 1307 case BCH_EXTENT_ENTRY_rebalance: 1308 break; 1309 } 1310 } 1311 } 1312 1313 const struct bch_extent_rebalance *bch2_bkey_rebalance_opts(struct bkey_s_c k) 1314 { 1315 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 1316 const union bch_extent_entry *entry; 1317 1318 bkey_extent_entry_for_each(ptrs, entry) 1319 if (__extent_entry_type(entry) == BCH_EXTENT_ENTRY_rebalance) 1320 return &entry->rebalance; 1321 1322 return NULL; 1323 } 1324 1325 unsigned bch2_bkey_ptrs_need_rebalance(struct bch_fs *c, struct bkey_s_c k, 1326 unsigned target, unsigned compression) 1327 { 1328 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 1329 unsigned rewrite_ptrs = 0; 1330 1331 if (compression) { 1332 unsigned compression_type = bch2_compression_opt_to_type(compression); 1333 const union bch_extent_entry *entry; 1334 struct extent_ptr_decoded p; 1335 unsigned i = 0; 1336 1337 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { 1338 if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible || 1339 p.ptr.unwritten) { 1340 rewrite_ptrs = 0; 1341 goto incompressible; 1342 } 1343 1344 if (!p.ptr.cached && p.crc.compression_type != compression_type) 1345 rewrite_ptrs |= 1U << i; 1346 i++; 1347 } 1348 } 1349 incompressible: 1350 if (target && bch2_target_accepts_data(c, BCH_DATA_user, target)) { 1351 unsigned i = 0; 1352 1353 bkey_for_each_ptr(ptrs, ptr) { 1354 if (!ptr->cached && !bch2_dev_in_target(c, ptr->dev, target)) 1355 rewrite_ptrs |= 1U << i; 1356 i++; 1357 } 1358 } 1359 1360 return rewrite_ptrs; 1361 } 1362 1363 bool bch2_bkey_needs_rebalance(struct bch_fs *c, struct bkey_s_c k) 1364 { 1365 const struct bch_extent_rebalance *r = bch2_bkey_rebalance_opts(k); 1366 1367 /* 1368 * If it's an indirect extent, we don't delete the rebalance entry when 1369 * done so that we know what options were applied - check if it still 1370 * needs work done: 1371 */ 1372 if (r && 1373 k.k->type == KEY_TYPE_reflink_v && 1374 !bch2_bkey_ptrs_need_rebalance(c, k, r->target, r->compression)) 1375 r = NULL; 1376 1377 return r != NULL; 1378 } 1379 1380 int bch2_bkey_set_needs_rebalance(struct bch_fs *c, struct bkey_i *_k, 1381 struct bch_io_opts *opts) 1382 { 1383 struct bkey_s k = bkey_i_to_s(_k); 1384 struct bch_extent_rebalance *r; 1385 unsigned target = opts->background_target; 1386 unsigned compression = background_compression(*opts); 1387 bool needs_rebalance; 1388 1389 if (!bkey_extent_is_direct_data(k.k)) 1390 return 0; 1391 1392 /* get existing rebalance entry: */ 1393 r = (struct bch_extent_rebalance *) bch2_bkey_rebalance_opts(k.s_c); 1394 if (r) { 1395 if (k.k->type == KEY_TYPE_reflink_v) { 1396 /* 1397 * indirect extents: existing options take precedence, 1398 * so that we don't move extents back and forth if 1399 * they're referenced by different inodes with different 1400 * options: 1401 */ 1402 if (r->target) 1403 target = r->target; 1404 if (r->compression) 1405 compression = r->compression; 1406 } 1407 1408 r->target = target; 1409 r->compression = compression; 1410 } 1411 1412 needs_rebalance = bch2_bkey_ptrs_need_rebalance(c, k.s_c, target, compression); 1413 1414 if (needs_rebalance && !r) { 1415 union bch_extent_entry *new = bkey_val_end(k); 1416 1417 new->rebalance.type = 1U << BCH_EXTENT_ENTRY_rebalance; 1418 new->rebalance.compression = compression; 1419 new->rebalance.target = target; 1420 new->rebalance.unused = 0; 1421 k.k->u64s += extent_entry_u64s(new); 1422 } else if (!needs_rebalance && r && k.k->type != KEY_TYPE_reflink_v) { 1423 /* 1424 * For indirect extents, don't delete the rebalance entry when 1425 * we're finished so that we know we specifically moved it or 1426 * compressed it to its current location/compression type 1427 */ 1428 extent_entry_drop(k, (union bch_extent_entry *) r); 1429 } 1430 1431 return 0; 1432 } 1433 1434 /* Generic extent code: */ 1435 1436 int bch2_cut_front_s(struct bpos where, struct bkey_s k) 1437 { 1438 unsigned new_val_u64s = bkey_val_u64s(k.k); 1439 int val_u64s_delta; 1440 u64 sub; 1441 1442 if (bkey_le(where, bkey_start_pos(k.k))) 1443 return 0; 1444 1445 EBUG_ON(bkey_gt(where, k.k->p)); 1446 1447 sub = where.offset - bkey_start_offset(k.k); 1448 1449 k.k->size -= sub; 1450 1451 if (!k.k->size) { 1452 k.k->type = KEY_TYPE_deleted; 1453 new_val_u64s = 0; 1454 } 1455 1456 switch (k.k->type) { 1457 case KEY_TYPE_extent: 1458 case KEY_TYPE_reflink_v: { 1459 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k); 1460 union bch_extent_entry *entry; 1461 bool seen_crc = false; 1462 1463 bkey_extent_entry_for_each(ptrs, entry) { 1464 switch (extent_entry_type(entry)) { 1465 case BCH_EXTENT_ENTRY_ptr: 1466 if (!seen_crc) 1467 entry->ptr.offset += sub; 1468 break; 1469 case BCH_EXTENT_ENTRY_crc32: 1470 entry->crc32.offset += sub; 1471 break; 1472 case BCH_EXTENT_ENTRY_crc64: 1473 entry->crc64.offset += sub; 1474 break; 1475 case BCH_EXTENT_ENTRY_crc128: 1476 entry->crc128.offset += sub; 1477 break; 1478 case BCH_EXTENT_ENTRY_stripe_ptr: 1479 break; 1480 case BCH_EXTENT_ENTRY_rebalance: 1481 break; 1482 } 1483 1484 if (extent_entry_is_crc(entry)) 1485 seen_crc = true; 1486 } 1487 1488 break; 1489 } 1490 case KEY_TYPE_reflink_p: { 1491 struct bkey_s_reflink_p p = bkey_s_to_reflink_p(k); 1492 1493 le64_add_cpu(&p.v->idx, sub); 1494 break; 1495 } 1496 case KEY_TYPE_inline_data: 1497 case KEY_TYPE_indirect_inline_data: { 1498 void *p = bkey_inline_data_p(k); 1499 unsigned bytes = bkey_inline_data_bytes(k.k); 1500 1501 sub = min_t(u64, sub << 9, bytes); 1502 1503 memmove(p, p + sub, bytes - sub); 1504 1505 new_val_u64s -= sub >> 3; 1506 break; 1507 } 1508 } 1509 1510 val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s; 1511 BUG_ON(val_u64s_delta < 0); 1512 1513 set_bkey_val_u64s(k.k, new_val_u64s); 1514 memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64)); 1515 return -val_u64s_delta; 1516 } 1517 1518 int bch2_cut_back_s(struct bpos where, struct bkey_s k) 1519 { 1520 unsigned new_val_u64s = bkey_val_u64s(k.k); 1521 int val_u64s_delta; 1522 u64 len = 0; 1523 1524 if (bkey_ge(where, k.k->p)) 1525 return 0; 1526 1527 EBUG_ON(bkey_lt(where, bkey_start_pos(k.k))); 1528 1529 len = where.offset - bkey_start_offset(k.k); 1530 1531 k.k->p.offset = where.offset; 1532 k.k->size = len; 1533 1534 if (!len) { 1535 k.k->type = KEY_TYPE_deleted; 1536 new_val_u64s = 0; 1537 } 1538 1539 switch (k.k->type) { 1540 case KEY_TYPE_inline_data: 1541 case KEY_TYPE_indirect_inline_data: 1542 new_val_u64s = (bkey_inline_data_offset(k.k) + 1543 min(bkey_inline_data_bytes(k.k), k.k->size << 9)) >> 3; 1544 break; 1545 } 1546 1547 val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s; 1548 BUG_ON(val_u64s_delta < 0); 1549 1550 set_bkey_val_u64s(k.k, new_val_u64s); 1551 memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64)); 1552 return -val_u64s_delta; 1553 } 1554