1 // SPDX-License-Identifier: GPL-2.0 2 3 /* erasure coding */ 4 5 #include "bcachefs.h" 6 #include "alloc_background.h" 7 #include "alloc_foreground.h" 8 #include "backpointers.h" 9 #include "bkey_buf.h" 10 #include "bset.h" 11 #include "btree_gc.h" 12 #include "btree_update.h" 13 #include "btree_write_buffer.h" 14 #include "buckets.h" 15 #include "checksum.h" 16 #include "disk_accounting.h" 17 #include "disk_groups.h" 18 #include "ec.h" 19 #include "error.h" 20 #include "io_read.h" 21 #include "io_write.h" 22 #include "keylist.h" 23 #include "recovery.h" 24 #include "replicas.h" 25 #include "super-io.h" 26 #include "util.h" 27 28 #include <linux/sort.h> 29 #include <linux/string_choices.h> 30 31 #ifdef __KERNEL__ 32 33 #include <linux/raid/pq.h> 34 #include <linux/raid/xor.h> 35 36 static void raid5_recov(unsigned disks, unsigned failed_idx, 37 size_t size, void **data) 38 { 39 unsigned i = 2, nr; 40 41 BUG_ON(failed_idx >= disks); 42 43 swap(data[0], data[failed_idx]); 44 memcpy(data[0], data[1], size); 45 46 while (i < disks) { 47 nr = min_t(unsigned, disks - i, MAX_XOR_BLOCKS); 48 xor_blocks(nr, size, data[0], data + i); 49 i += nr; 50 } 51 52 swap(data[0], data[failed_idx]); 53 } 54 55 static void raid_gen(int nd, int np, size_t size, void **v) 56 { 57 if (np >= 1) 58 raid5_recov(nd + np, nd, size, v); 59 if (np >= 2) 60 raid6_call.gen_syndrome(nd + np, size, v); 61 BUG_ON(np > 2); 62 } 63 64 static void raid_rec(int nr, int *ir, int nd, int np, size_t size, void **v) 65 { 66 switch (nr) { 67 case 0: 68 break; 69 case 1: 70 if (ir[0] < nd + 1) 71 raid5_recov(nd + 1, ir[0], size, v); 72 else 73 raid6_call.gen_syndrome(nd + np, size, v); 74 break; 75 case 2: 76 if (ir[1] < nd) { 77 /* data+data failure. */ 78 raid6_2data_recov(nd + np, size, ir[0], ir[1], v); 79 } else if (ir[0] < nd) { 80 /* data + p/q failure */ 81 82 if (ir[1] == nd) /* data + p failure */ 83 raid6_datap_recov(nd + np, size, ir[0], v); 84 else { /* data + q failure */ 85 raid5_recov(nd + 1, ir[0], size, v); 86 raid6_call.gen_syndrome(nd + np, size, v); 87 } 88 } else { 89 raid_gen(nd, np, size, v); 90 } 91 break; 92 default: 93 BUG(); 94 } 95 } 96 97 #else 98 99 #include <raid/raid.h> 100 101 #endif 102 103 struct ec_bio { 104 struct bch_dev *ca; 105 struct ec_stripe_buf *buf; 106 size_t idx; 107 struct bio bio; 108 }; 109 110 /* Stripes btree keys: */ 111 112 int bch2_stripe_validate(struct bch_fs *c, struct bkey_s_c k, 113 struct bkey_validate_context from) 114 { 115 const struct bch_stripe *s = bkey_s_c_to_stripe(k).v; 116 int ret = 0; 117 118 bkey_fsck_err_on(bkey_eq(k.k->p, POS_MIN) || 119 bpos_gt(k.k->p, POS(0, U32_MAX)), 120 c, stripe_pos_bad, 121 "stripe at bad pos"); 122 123 bkey_fsck_err_on(bkey_val_u64s(k.k) < stripe_val_u64s(s), 124 c, stripe_val_size_bad, 125 "incorrect value size (%zu < %u)", 126 bkey_val_u64s(k.k), stripe_val_u64s(s)); 127 128 bkey_fsck_err_on(s->csum_granularity_bits >= 64, 129 c, stripe_csum_granularity_bad, 130 "invalid csum granularity (%u >= 64)", 131 s->csum_granularity_bits); 132 133 ret = bch2_bkey_ptrs_validate(c, k, from); 134 fsck_err: 135 return ret; 136 } 137 138 void bch2_stripe_to_text(struct printbuf *out, struct bch_fs *c, 139 struct bkey_s_c k) 140 { 141 const struct bch_stripe *sp = bkey_s_c_to_stripe(k).v; 142 struct bch_stripe s = {}; 143 144 memcpy(&s, sp, min(sizeof(s), bkey_val_bytes(k.k))); 145 146 unsigned nr_data = s.nr_blocks - s.nr_redundant; 147 148 prt_printf(out, "algo %u sectors %u blocks %u:%u csum ", 149 s.algorithm, 150 le16_to_cpu(s.sectors), 151 nr_data, 152 s.nr_redundant); 153 bch2_prt_csum_type(out, s.csum_type); 154 prt_str(out, " gran "); 155 if (s.csum_granularity_bits < 64) 156 prt_printf(out, "%llu", 1ULL << s.csum_granularity_bits); 157 else 158 prt_printf(out, "(invalid shift %u)", s.csum_granularity_bits); 159 160 if (s.disk_label) { 161 prt_str(out, " label"); 162 bch2_disk_path_to_text(out, c, s.disk_label - 1); 163 } 164 165 for (unsigned i = 0; i < s.nr_blocks; i++) { 166 const struct bch_extent_ptr *ptr = sp->ptrs + i; 167 168 if ((void *) ptr >= bkey_val_end(k)) 169 break; 170 171 prt_char(out, ' '); 172 bch2_extent_ptr_to_text(out, c, ptr); 173 174 if (s.csum_type < BCH_CSUM_NR && 175 i < nr_data && 176 stripe_blockcount_offset(&s, i) < bkey_val_bytes(k.k)) 177 prt_printf(out, "#%u", stripe_blockcount_get(sp, i)); 178 } 179 } 180 181 /* Triggers: */ 182 183 static int __mark_stripe_bucket(struct btree_trans *trans, 184 struct bch_dev *ca, 185 struct bkey_s_c_stripe s, 186 unsigned ptr_idx, bool deleting, 187 struct bpos bucket, 188 struct bch_alloc_v4 *a, 189 enum btree_iter_update_trigger_flags flags) 190 { 191 const struct bch_extent_ptr *ptr = s.v->ptrs + ptr_idx; 192 unsigned nr_data = s.v->nr_blocks - s.v->nr_redundant; 193 bool parity = ptr_idx >= nr_data; 194 enum bch_data_type data_type = parity ? BCH_DATA_parity : BCH_DATA_stripe; 195 s64 sectors = parity ? le16_to_cpu(s.v->sectors) : 0; 196 struct printbuf buf = PRINTBUF; 197 int ret = 0; 198 199 struct bch_fs *c = trans->c; 200 if (deleting) 201 sectors = -sectors; 202 203 if (!deleting) { 204 if (bch2_trans_inconsistent_on(a->stripe || 205 a->stripe_redundancy, trans, 206 "bucket %llu:%llu gen %u data type %s dirty_sectors %u: multiple stripes using same bucket (%u, %llu)\n%s", 207 bucket.inode, bucket.offset, a->gen, 208 bch2_data_type_str(a->data_type), 209 a->dirty_sectors, 210 a->stripe, s.k->p.offset, 211 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) { 212 ret = -BCH_ERR_mark_stripe; 213 goto err; 214 } 215 216 if (bch2_trans_inconsistent_on(parity && bch2_bucket_sectors_total(*a), trans, 217 "bucket %llu:%llu gen %u data type %s dirty_sectors %u cached_sectors %u: data already in parity bucket\n%s", 218 bucket.inode, bucket.offset, a->gen, 219 bch2_data_type_str(a->data_type), 220 a->dirty_sectors, 221 a->cached_sectors, 222 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) { 223 ret = -BCH_ERR_mark_stripe; 224 goto err; 225 } 226 } else { 227 if (bch2_trans_inconsistent_on(a->stripe != s.k->p.offset || 228 a->stripe_redundancy != s.v->nr_redundant, trans, 229 "bucket %llu:%llu gen %u: not marked as stripe when deleting stripe (got %u)\n%s", 230 bucket.inode, bucket.offset, a->gen, 231 a->stripe, 232 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) { 233 ret = -BCH_ERR_mark_stripe; 234 goto err; 235 } 236 237 if (bch2_trans_inconsistent_on(a->data_type != data_type, trans, 238 "bucket %llu:%llu gen %u data type %s: wrong data type when stripe, should be %s\n%s", 239 bucket.inode, bucket.offset, a->gen, 240 bch2_data_type_str(a->data_type), 241 bch2_data_type_str(data_type), 242 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) { 243 ret = -BCH_ERR_mark_stripe; 244 goto err; 245 } 246 247 if (bch2_trans_inconsistent_on(parity && 248 (a->dirty_sectors != -sectors || 249 a->cached_sectors), trans, 250 "bucket %llu:%llu gen %u dirty_sectors %u cached_sectors %u: wrong sectors when deleting parity block of stripe\n%s", 251 bucket.inode, bucket.offset, a->gen, 252 a->dirty_sectors, 253 a->cached_sectors, 254 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) { 255 ret = -BCH_ERR_mark_stripe; 256 goto err; 257 } 258 } 259 260 if (sectors) { 261 ret = bch2_bucket_ref_update(trans, ca, s.s_c, ptr, sectors, data_type, 262 a->gen, a->data_type, &a->dirty_sectors); 263 if (ret) 264 goto err; 265 } 266 267 if (!deleting) { 268 a->stripe = s.k->p.offset; 269 a->stripe_redundancy = s.v->nr_redundant; 270 alloc_data_type_set(a, data_type); 271 } else { 272 a->stripe = 0; 273 a->stripe_redundancy = 0; 274 alloc_data_type_set(a, BCH_DATA_user); 275 } 276 err: 277 printbuf_exit(&buf); 278 return ret; 279 } 280 281 static int mark_stripe_bucket(struct btree_trans *trans, 282 struct bkey_s_c_stripe s, 283 unsigned ptr_idx, bool deleting, 284 enum btree_iter_update_trigger_flags flags) 285 { 286 struct bch_fs *c = trans->c; 287 const struct bch_extent_ptr *ptr = s.v->ptrs + ptr_idx; 288 struct printbuf buf = PRINTBUF; 289 int ret = 0; 290 291 struct bch_dev *ca = bch2_dev_tryget(c, ptr->dev); 292 if (unlikely(!ca)) { 293 if (ptr->dev != BCH_SB_MEMBER_INVALID && !(flags & BTREE_TRIGGER_overwrite)) 294 ret = -BCH_ERR_mark_stripe; 295 goto err; 296 } 297 298 struct bpos bucket = PTR_BUCKET_POS(ca, ptr); 299 300 if (flags & BTREE_TRIGGER_transactional) { 301 struct bkey_i_alloc_v4 *a = 302 bch2_trans_start_alloc_update(trans, bucket, 0); 303 ret = PTR_ERR_OR_ZERO(a) ?: 304 __mark_stripe_bucket(trans, ca, s, ptr_idx, deleting, bucket, &a->v, flags); 305 } 306 307 if (flags & BTREE_TRIGGER_gc) { 308 struct bucket *g = gc_bucket(ca, bucket.offset); 309 if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u\n %s", 310 ptr->dev, 311 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) { 312 ret = -BCH_ERR_mark_stripe; 313 goto err; 314 } 315 316 bucket_lock(g); 317 struct bch_alloc_v4 old = bucket_m_to_alloc(*g), new = old; 318 ret = __mark_stripe_bucket(trans, ca, s, ptr_idx, deleting, bucket, &new, flags); 319 alloc_to_bucket(g, new); 320 bucket_unlock(g); 321 322 if (!ret) 323 ret = bch2_alloc_key_to_dev_counters(trans, ca, &old, &new, flags); 324 } 325 err: 326 bch2_dev_put(ca); 327 printbuf_exit(&buf); 328 return ret; 329 } 330 331 static int mark_stripe_buckets(struct btree_trans *trans, 332 struct bkey_s_c old, struct bkey_s_c new, 333 enum btree_iter_update_trigger_flags flags) 334 { 335 const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe 336 ? bkey_s_c_to_stripe(old).v : NULL; 337 const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe 338 ? bkey_s_c_to_stripe(new).v : NULL; 339 340 BUG_ON(old_s && new_s && old_s->nr_blocks != new_s->nr_blocks); 341 342 unsigned nr_blocks = new_s ? new_s->nr_blocks : old_s->nr_blocks; 343 344 for (unsigned i = 0; i < nr_blocks; i++) { 345 if (new_s && old_s && 346 !memcmp(&new_s->ptrs[i], 347 &old_s->ptrs[i], 348 sizeof(new_s->ptrs[i]))) 349 continue; 350 351 if (new_s) { 352 int ret = mark_stripe_bucket(trans, 353 bkey_s_c_to_stripe(new), i, false, flags); 354 if (ret) 355 return ret; 356 } 357 358 if (old_s) { 359 int ret = mark_stripe_bucket(trans, 360 bkey_s_c_to_stripe(old), i, true, flags); 361 if (ret) 362 return ret; 363 } 364 } 365 366 return 0; 367 } 368 369 static inline void stripe_to_mem(struct stripe *m, const struct bch_stripe *s) 370 { 371 m->sectors = le16_to_cpu(s->sectors); 372 m->algorithm = s->algorithm; 373 m->nr_blocks = s->nr_blocks; 374 m->nr_redundant = s->nr_redundant; 375 m->disk_label = s->disk_label; 376 m->blocks_nonempty = 0; 377 378 for (unsigned i = 0; i < s->nr_blocks; i++) 379 m->blocks_nonempty += !!stripe_blockcount_get(s, i); 380 } 381 382 int bch2_trigger_stripe(struct btree_trans *trans, 383 enum btree_id btree, unsigned level, 384 struct bkey_s_c old, struct bkey_s _new, 385 enum btree_iter_update_trigger_flags flags) 386 { 387 struct bkey_s_c new = _new.s_c; 388 struct bch_fs *c = trans->c; 389 u64 idx = new.k->p.offset; 390 const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe 391 ? bkey_s_c_to_stripe(old).v : NULL; 392 const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe 393 ? bkey_s_c_to_stripe(new).v : NULL; 394 395 if (unlikely(flags & BTREE_TRIGGER_check_repair)) 396 return bch2_check_fix_ptrs(trans, btree, level, _new.s_c, flags); 397 398 BUG_ON(new_s && old_s && 399 (new_s->nr_blocks != old_s->nr_blocks || 400 new_s->nr_redundant != old_s->nr_redundant)); 401 402 403 if (flags & (BTREE_TRIGGER_transactional|BTREE_TRIGGER_gc)) { 404 /* 405 * If the pointers aren't changing, we don't need to do anything: 406 */ 407 if (new_s && old_s && 408 new_s->nr_blocks == old_s->nr_blocks && 409 new_s->nr_redundant == old_s->nr_redundant && 410 !memcmp(old_s->ptrs, new_s->ptrs, 411 new_s->nr_blocks * sizeof(struct bch_extent_ptr))) 412 return 0; 413 414 struct gc_stripe *gc = NULL; 415 if (flags & BTREE_TRIGGER_gc) { 416 gc = genradix_ptr_alloc(&c->gc_stripes, idx, GFP_KERNEL); 417 if (!gc) { 418 bch_err(c, "error allocating memory for gc_stripes, idx %llu", idx); 419 return -BCH_ERR_ENOMEM_mark_stripe; 420 } 421 422 /* 423 * This will be wrong when we bring back runtime gc: we should 424 * be unmarking the old key and then marking the new key 425 * 426 * Also: when we bring back runtime gc, locking 427 */ 428 gc->alive = true; 429 gc->sectors = le16_to_cpu(new_s->sectors); 430 gc->nr_blocks = new_s->nr_blocks; 431 gc->nr_redundant = new_s->nr_redundant; 432 433 for (unsigned i = 0; i < new_s->nr_blocks; i++) 434 gc->ptrs[i] = new_s->ptrs[i]; 435 436 /* 437 * gc recalculates this field from stripe ptr 438 * references: 439 */ 440 memset(gc->block_sectors, 0, sizeof(gc->block_sectors)); 441 } 442 443 if (new_s) { 444 s64 sectors = (u64) le16_to_cpu(new_s->sectors) * new_s->nr_redundant; 445 446 struct disk_accounting_pos acc = { 447 .type = BCH_DISK_ACCOUNTING_replicas, 448 }; 449 bch2_bkey_to_replicas(&acc.replicas, new); 450 int ret = bch2_disk_accounting_mod(trans, &acc, §ors, 1, gc); 451 if (ret) 452 return ret; 453 454 if (gc) 455 memcpy(&gc->r.e, &acc.replicas, replicas_entry_bytes(&acc.replicas)); 456 } 457 458 if (old_s) { 459 s64 sectors = -((s64) le16_to_cpu(old_s->sectors)) * old_s->nr_redundant; 460 461 struct disk_accounting_pos acc = { 462 .type = BCH_DISK_ACCOUNTING_replicas, 463 }; 464 bch2_bkey_to_replicas(&acc.replicas, old); 465 int ret = bch2_disk_accounting_mod(trans, &acc, §ors, 1, gc); 466 if (ret) 467 return ret; 468 } 469 470 int ret = mark_stripe_buckets(trans, old, new, flags); 471 if (ret) 472 return ret; 473 } 474 475 if (flags & BTREE_TRIGGER_atomic) { 476 struct stripe *m = genradix_ptr(&c->stripes, idx); 477 478 if (!m) { 479 struct printbuf buf1 = PRINTBUF; 480 struct printbuf buf2 = PRINTBUF; 481 482 bch2_bkey_val_to_text(&buf1, c, old); 483 bch2_bkey_val_to_text(&buf2, c, new); 484 bch_err_ratelimited(c, "error marking nonexistent stripe %llu while marking\n" 485 "old %s\n" 486 "new %s", idx, buf1.buf, buf2.buf); 487 printbuf_exit(&buf2); 488 printbuf_exit(&buf1); 489 bch2_inconsistent_error(c); 490 return -1; 491 } 492 493 if (!new_s) { 494 bch2_stripes_heap_del(c, m, idx); 495 496 memset(m, 0, sizeof(*m)); 497 } else { 498 stripe_to_mem(m, new_s); 499 500 if (!old_s) 501 bch2_stripes_heap_insert(c, m, idx); 502 else 503 bch2_stripes_heap_update(c, m, idx); 504 } 505 } 506 507 return 0; 508 } 509 510 /* returns blocknr in stripe that we matched: */ 511 static const struct bch_extent_ptr *bkey_matches_stripe(struct bch_stripe *s, 512 struct bkey_s_c k, unsigned *block) 513 { 514 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 515 unsigned i, nr_data = s->nr_blocks - s->nr_redundant; 516 517 bkey_for_each_ptr(ptrs, ptr) 518 for (i = 0; i < nr_data; i++) 519 if (__bch2_ptr_matches_stripe(&s->ptrs[i], ptr, 520 le16_to_cpu(s->sectors))) { 521 *block = i; 522 return ptr; 523 } 524 525 return NULL; 526 } 527 528 static bool extent_has_stripe_ptr(struct bkey_s_c k, u64 idx) 529 { 530 switch (k.k->type) { 531 case KEY_TYPE_extent: { 532 struct bkey_s_c_extent e = bkey_s_c_to_extent(k); 533 const union bch_extent_entry *entry; 534 535 extent_for_each_entry(e, entry) 536 if (extent_entry_type(entry) == 537 BCH_EXTENT_ENTRY_stripe_ptr && 538 entry->stripe_ptr.idx == idx) 539 return true; 540 541 break; 542 } 543 } 544 545 return false; 546 } 547 548 /* Stripe bufs: */ 549 550 static void ec_stripe_buf_exit(struct ec_stripe_buf *buf) 551 { 552 if (buf->key.k.type == KEY_TYPE_stripe) { 553 struct bkey_i_stripe *s = bkey_i_to_stripe(&buf->key); 554 unsigned i; 555 556 for (i = 0; i < s->v.nr_blocks; i++) { 557 kvfree(buf->data[i]); 558 buf->data[i] = NULL; 559 } 560 } 561 } 562 563 /* XXX: this is a non-mempoolified memory allocation: */ 564 static int ec_stripe_buf_init(struct ec_stripe_buf *buf, 565 unsigned offset, unsigned size) 566 { 567 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; 568 unsigned csum_granularity = 1U << v->csum_granularity_bits; 569 unsigned end = offset + size; 570 unsigned i; 571 572 BUG_ON(end > le16_to_cpu(v->sectors)); 573 574 offset = round_down(offset, csum_granularity); 575 end = min_t(unsigned, le16_to_cpu(v->sectors), 576 round_up(end, csum_granularity)); 577 578 buf->offset = offset; 579 buf->size = end - offset; 580 581 memset(buf->valid, 0xFF, sizeof(buf->valid)); 582 583 for (i = 0; i < v->nr_blocks; i++) { 584 buf->data[i] = kvmalloc(buf->size << 9, GFP_KERNEL); 585 if (!buf->data[i]) 586 goto err; 587 } 588 589 return 0; 590 err: 591 ec_stripe_buf_exit(buf); 592 return -BCH_ERR_ENOMEM_stripe_buf; 593 } 594 595 /* Checksumming: */ 596 597 static struct bch_csum ec_block_checksum(struct ec_stripe_buf *buf, 598 unsigned block, unsigned offset) 599 { 600 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; 601 unsigned csum_granularity = 1 << v->csum_granularity_bits; 602 unsigned end = buf->offset + buf->size; 603 unsigned len = min(csum_granularity, end - offset); 604 605 BUG_ON(offset >= end); 606 BUG_ON(offset < buf->offset); 607 BUG_ON(offset & (csum_granularity - 1)); 608 BUG_ON(offset + len != le16_to_cpu(v->sectors) && 609 (len & (csum_granularity - 1))); 610 611 return bch2_checksum(NULL, v->csum_type, 612 null_nonce(), 613 buf->data[block] + ((offset - buf->offset) << 9), 614 len << 9); 615 } 616 617 static void ec_generate_checksums(struct ec_stripe_buf *buf) 618 { 619 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; 620 unsigned i, j, csums_per_device = stripe_csums_per_device(v); 621 622 if (!v->csum_type) 623 return; 624 625 BUG_ON(buf->offset); 626 BUG_ON(buf->size != le16_to_cpu(v->sectors)); 627 628 for (i = 0; i < v->nr_blocks; i++) 629 for (j = 0; j < csums_per_device; j++) 630 stripe_csum_set(v, i, j, 631 ec_block_checksum(buf, i, j << v->csum_granularity_bits)); 632 } 633 634 static void ec_validate_checksums(struct bch_fs *c, struct ec_stripe_buf *buf) 635 { 636 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; 637 unsigned csum_granularity = 1 << v->csum_granularity_bits; 638 unsigned i; 639 640 if (!v->csum_type) 641 return; 642 643 for (i = 0; i < v->nr_blocks; i++) { 644 unsigned offset = buf->offset; 645 unsigned end = buf->offset + buf->size; 646 647 if (!test_bit(i, buf->valid)) 648 continue; 649 650 while (offset < end) { 651 unsigned j = offset >> v->csum_granularity_bits; 652 unsigned len = min(csum_granularity, end - offset); 653 struct bch_csum want = stripe_csum_get(v, i, j); 654 struct bch_csum got = ec_block_checksum(buf, i, offset); 655 656 if (bch2_crc_cmp(want, got)) { 657 struct bch_dev *ca = bch2_dev_tryget(c, v->ptrs[i].dev); 658 if (ca) { 659 struct printbuf err = PRINTBUF; 660 661 prt_str(&err, "stripe "); 662 bch2_csum_err_msg(&err, v->csum_type, want, got); 663 prt_printf(&err, " for %ps at %u of\n ", (void *) _RET_IP_, i); 664 bch2_bkey_val_to_text(&err, c, bkey_i_to_s_c(&buf->key)); 665 bch_err_ratelimited(ca, "%s", err.buf); 666 printbuf_exit(&err); 667 668 bch2_io_error(ca, BCH_MEMBER_ERROR_checksum); 669 } 670 671 clear_bit(i, buf->valid); 672 break; 673 } 674 675 offset += len; 676 } 677 } 678 } 679 680 /* Erasure coding: */ 681 682 static void ec_generate_ec(struct ec_stripe_buf *buf) 683 { 684 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; 685 unsigned nr_data = v->nr_blocks - v->nr_redundant; 686 unsigned bytes = le16_to_cpu(v->sectors) << 9; 687 688 raid_gen(nr_data, v->nr_redundant, bytes, buf->data); 689 } 690 691 static unsigned ec_nr_failed(struct ec_stripe_buf *buf) 692 { 693 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; 694 695 return v->nr_blocks - bitmap_weight(buf->valid, v->nr_blocks); 696 } 697 698 static int ec_do_recov(struct bch_fs *c, struct ec_stripe_buf *buf) 699 { 700 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; 701 unsigned i, failed[BCH_BKEY_PTRS_MAX], nr_failed = 0; 702 unsigned nr_data = v->nr_blocks - v->nr_redundant; 703 unsigned bytes = buf->size << 9; 704 705 if (ec_nr_failed(buf) > v->nr_redundant) { 706 bch_err_ratelimited(c, 707 "error doing reconstruct read: unable to read enough blocks"); 708 return -1; 709 } 710 711 for (i = 0; i < nr_data; i++) 712 if (!test_bit(i, buf->valid)) 713 failed[nr_failed++] = i; 714 715 raid_rec(nr_failed, failed, nr_data, v->nr_redundant, bytes, buf->data); 716 return 0; 717 } 718 719 /* IO: */ 720 721 static void ec_block_endio(struct bio *bio) 722 { 723 struct ec_bio *ec_bio = container_of(bio, struct ec_bio, bio); 724 struct bch_stripe *v = &bkey_i_to_stripe(&ec_bio->buf->key)->v; 725 struct bch_extent_ptr *ptr = &v->ptrs[ec_bio->idx]; 726 struct bch_dev *ca = ec_bio->ca; 727 struct closure *cl = bio->bi_private; 728 729 if (bch2_dev_io_err_on(bio->bi_status, ca, 730 bio_data_dir(bio) 731 ? BCH_MEMBER_ERROR_write 732 : BCH_MEMBER_ERROR_read, 733 "erasure coding %s error: %s", 734 str_write_read(bio_data_dir(bio)), 735 bch2_blk_status_to_str(bio->bi_status))) 736 clear_bit(ec_bio->idx, ec_bio->buf->valid); 737 738 int stale = dev_ptr_stale(ca, ptr); 739 if (stale) { 740 bch_err_ratelimited(ca->fs, 741 "error %s stripe: stale/invalid pointer (%i) after io", 742 bio_data_dir(bio) == READ ? "reading from" : "writing to", 743 stale); 744 clear_bit(ec_bio->idx, ec_bio->buf->valid); 745 } 746 747 bio_put(&ec_bio->bio); 748 percpu_ref_put(&ca->io_ref); 749 closure_put(cl); 750 } 751 752 static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf, 753 blk_opf_t opf, unsigned idx, struct closure *cl) 754 { 755 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; 756 unsigned offset = 0, bytes = buf->size << 9; 757 struct bch_extent_ptr *ptr = &v->ptrs[idx]; 758 enum bch_data_type data_type = idx < v->nr_blocks - v->nr_redundant 759 ? BCH_DATA_user 760 : BCH_DATA_parity; 761 int rw = op_is_write(opf); 762 763 struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, rw); 764 if (!ca) { 765 clear_bit(idx, buf->valid); 766 return; 767 } 768 769 int stale = dev_ptr_stale(ca, ptr); 770 if (stale) { 771 bch_err_ratelimited(c, 772 "error %s stripe: stale pointer (%i)", 773 rw == READ ? "reading from" : "writing to", 774 stale); 775 clear_bit(idx, buf->valid); 776 return; 777 } 778 779 780 this_cpu_add(ca->io_done->sectors[rw][data_type], buf->size); 781 782 while (offset < bytes) { 783 unsigned nr_iovecs = min_t(size_t, BIO_MAX_VECS, 784 DIV_ROUND_UP(bytes, PAGE_SIZE)); 785 unsigned b = min_t(size_t, bytes - offset, 786 nr_iovecs << PAGE_SHIFT); 787 struct ec_bio *ec_bio; 788 789 ec_bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev, 790 nr_iovecs, 791 opf, 792 GFP_KERNEL, 793 &c->ec_bioset), 794 struct ec_bio, bio); 795 796 ec_bio->ca = ca; 797 ec_bio->buf = buf; 798 ec_bio->idx = idx; 799 800 ec_bio->bio.bi_iter.bi_sector = ptr->offset + buf->offset + (offset >> 9); 801 ec_bio->bio.bi_end_io = ec_block_endio; 802 ec_bio->bio.bi_private = cl; 803 804 bch2_bio_map(&ec_bio->bio, buf->data[idx] + offset, b); 805 806 closure_get(cl); 807 percpu_ref_get(&ca->io_ref); 808 809 submit_bio(&ec_bio->bio); 810 811 offset += b; 812 } 813 814 percpu_ref_put(&ca->io_ref); 815 } 816 817 static int get_stripe_key_trans(struct btree_trans *trans, u64 idx, 818 struct ec_stripe_buf *stripe) 819 { 820 struct btree_iter iter; 821 struct bkey_s_c k; 822 int ret; 823 824 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes, 825 POS(0, idx), BTREE_ITER_slots); 826 ret = bkey_err(k); 827 if (ret) 828 goto err; 829 if (k.k->type != KEY_TYPE_stripe) { 830 ret = -ENOENT; 831 goto err; 832 } 833 bkey_reassemble(&stripe->key, k); 834 err: 835 bch2_trans_iter_exit(trans, &iter); 836 return ret; 837 } 838 839 /* recovery read path: */ 840 int bch2_ec_read_extent(struct btree_trans *trans, struct bch_read_bio *rbio, 841 struct bkey_s_c orig_k) 842 { 843 struct bch_fs *c = trans->c; 844 struct ec_stripe_buf *buf = NULL; 845 struct closure cl; 846 struct bch_stripe *v; 847 unsigned i, offset; 848 const char *msg = NULL; 849 struct printbuf msgbuf = PRINTBUF; 850 int ret = 0; 851 852 closure_init_stack(&cl); 853 854 BUG_ON(!rbio->pick.has_ec); 855 856 buf = kzalloc(sizeof(*buf), GFP_NOFS); 857 if (!buf) 858 return -BCH_ERR_ENOMEM_ec_read_extent; 859 860 ret = lockrestart_do(trans, get_stripe_key_trans(trans, rbio->pick.ec.idx, buf)); 861 if (ret) { 862 msg = "stripe not found"; 863 goto err; 864 } 865 866 v = &bkey_i_to_stripe(&buf->key)->v; 867 868 if (!bch2_ptr_matches_stripe(v, rbio->pick)) { 869 msg = "pointer doesn't match stripe"; 870 goto err; 871 } 872 873 offset = rbio->bio.bi_iter.bi_sector - v->ptrs[rbio->pick.ec.block].offset; 874 if (offset + bio_sectors(&rbio->bio) > le16_to_cpu(v->sectors)) { 875 msg = "read is bigger than stripe"; 876 goto err; 877 } 878 879 ret = ec_stripe_buf_init(buf, offset, bio_sectors(&rbio->bio)); 880 if (ret) { 881 msg = "-ENOMEM"; 882 goto err; 883 } 884 885 for (i = 0; i < v->nr_blocks; i++) 886 ec_block_io(c, buf, REQ_OP_READ, i, &cl); 887 888 closure_sync(&cl); 889 890 if (ec_nr_failed(buf) > v->nr_redundant) { 891 msg = "unable to read enough blocks"; 892 goto err; 893 } 894 895 ec_validate_checksums(c, buf); 896 897 ret = ec_do_recov(c, buf); 898 if (ret) 899 goto err; 900 901 memcpy_to_bio(&rbio->bio, rbio->bio.bi_iter, 902 buf->data[rbio->pick.ec.block] + ((offset - buf->offset) << 9)); 903 out: 904 ec_stripe_buf_exit(buf); 905 kfree(buf); 906 return ret; 907 err: 908 bch2_bkey_val_to_text(&msgbuf, c, orig_k); 909 bch_err_ratelimited(c, 910 "error doing reconstruct read: %s\n %s", msg, msgbuf.buf); 911 printbuf_exit(&msgbuf); 912 ret = -BCH_ERR_stripe_reconstruct; 913 goto out; 914 } 915 916 /* stripe bucket accounting: */ 917 918 static int __ec_stripe_mem_alloc(struct bch_fs *c, size_t idx, gfp_t gfp) 919 { 920 ec_stripes_heap n, *h = &c->ec_stripes_heap; 921 922 if (idx >= h->size) { 923 if (!init_heap(&n, max(1024UL, roundup_pow_of_two(idx + 1)), gfp)) 924 return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc; 925 926 mutex_lock(&c->ec_stripes_heap_lock); 927 if (n.size > h->size) { 928 memcpy(n.data, h->data, h->nr * sizeof(h->data[0])); 929 n.nr = h->nr; 930 swap(*h, n); 931 } 932 mutex_unlock(&c->ec_stripes_heap_lock); 933 934 free_heap(&n); 935 } 936 937 if (!genradix_ptr_alloc(&c->stripes, idx, gfp)) 938 return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc; 939 940 if (c->gc_pos.phase != GC_PHASE_not_running && 941 !genradix_ptr_alloc(&c->gc_stripes, idx, gfp)) 942 return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc; 943 944 return 0; 945 } 946 947 static int ec_stripe_mem_alloc(struct btree_trans *trans, 948 struct btree_iter *iter) 949 { 950 return allocate_dropping_locks_errcode(trans, 951 __ec_stripe_mem_alloc(trans->c, iter->pos.offset, _gfp)); 952 } 953 954 /* 955 * Hash table of open stripes: 956 * Stripes that are being created or modified are kept in a hash table, so that 957 * stripe deletion can skip them. 958 */ 959 960 static bool __bch2_stripe_is_open(struct bch_fs *c, u64 idx) 961 { 962 unsigned hash = hash_64(idx, ilog2(ARRAY_SIZE(c->ec_stripes_new))); 963 struct ec_stripe_new *s; 964 965 hlist_for_each_entry(s, &c->ec_stripes_new[hash], hash) 966 if (s->idx == idx) 967 return true; 968 return false; 969 } 970 971 static bool bch2_stripe_is_open(struct bch_fs *c, u64 idx) 972 { 973 bool ret = false; 974 975 spin_lock(&c->ec_stripes_new_lock); 976 ret = __bch2_stripe_is_open(c, idx); 977 spin_unlock(&c->ec_stripes_new_lock); 978 979 return ret; 980 } 981 982 static bool bch2_try_open_stripe(struct bch_fs *c, 983 struct ec_stripe_new *s, 984 u64 idx) 985 { 986 bool ret; 987 988 spin_lock(&c->ec_stripes_new_lock); 989 ret = !__bch2_stripe_is_open(c, idx); 990 if (ret) { 991 unsigned hash = hash_64(idx, ilog2(ARRAY_SIZE(c->ec_stripes_new))); 992 993 s->idx = idx; 994 hlist_add_head(&s->hash, &c->ec_stripes_new[hash]); 995 } 996 spin_unlock(&c->ec_stripes_new_lock); 997 998 return ret; 999 } 1000 1001 static void bch2_stripe_close(struct bch_fs *c, struct ec_stripe_new *s) 1002 { 1003 BUG_ON(!s->idx); 1004 1005 spin_lock(&c->ec_stripes_new_lock); 1006 hlist_del_init(&s->hash); 1007 spin_unlock(&c->ec_stripes_new_lock); 1008 1009 s->idx = 0; 1010 } 1011 1012 /* Heap of all existing stripes, ordered by blocks_nonempty */ 1013 1014 static u64 stripe_idx_to_delete(struct bch_fs *c) 1015 { 1016 ec_stripes_heap *h = &c->ec_stripes_heap; 1017 1018 lockdep_assert_held(&c->ec_stripes_heap_lock); 1019 1020 if (h->nr && 1021 h->data[0].blocks_nonempty == 0 && 1022 !bch2_stripe_is_open(c, h->data[0].idx)) 1023 return h->data[0].idx; 1024 1025 return 0; 1026 } 1027 1028 static inline void ec_stripes_heap_set_backpointer(ec_stripes_heap *h, 1029 size_t i) 1030 { 1031 struct bch_fs *c = container_of(h, struct bch_fs, ec_stripes_heap); 1032 1033 genradix_ptr(&c->stripes, h->data[i].idx)->heap_idx = i; 1034 } 1035 1036 static inline bool ec_stripes_heap_cmp(const void *l, const void *r, void __always_unused *args) 1037 { 1038 struct ec_stripe_heap_entry *_l = (struct ec_stripe_heap_entry *)l; 1039 struct ec_stripe_heap_entry *_r = (struct ec_stripe_heap_entry *)r; 1040 1041 return ((_l->blocks_nonempty > _r->blocks_nonempty) < 1042 (_l->blocks_nonempty < _r->blocks_nonempty)); 1043 } 1044 1045 static inline void ec_stripes_heap_swap(void *l, void *r, void *h) 1046 { 1047 struct ec_stripe_heap_entry *_l = (struct ec_stripe_heap_entry *)l; 1048 struct ec_stripe_heap_entry *_r = (struct ec_stripe_heap_entry *)r; 1049 ec_stripes_heap *_h = (ec_stripes_heap *)h; 1050 size_t i = _l - _h->data; 1051 size_t j = _r - _h->data; 1052 1053 swap(*_l, *_r); 1054 1055 ec_stripes_heap_set_backpointer(_h, i); 1056 ec_stripes_heap_set_backpointer(_h, j); 1057 } 1058 1059 static const struct min_heap_callbacks callbacks = { 1060 .less = ec_stripes_heap_cmp, 1061 .swp = ec_stripes_heap_swap, 1062 }; 1063 1064 static void heap_verify_backpointer(struct bch_fs *c, size_t idx) 1065 { 1066 ec_stripes_heap *h = &c->ec_stripes_heap; 1067 struct stripe *m = genradix_ptr(&c->stripes, idx); 1068 1069 BUG_ON(m->heap_idx >= h->nr); 1070 BUG_ON(h->data[m->heap_idx].idx != idx); 1071 } 1072 1073 void bch2_stripes_heap_del(struct bch_fs *c, 1074 struct stripe *m, size_t idx) 1075 { 1076 mutex_lock(&c->ec_stripes_heap_lock); 1077 heap_verify_backpointer(c, idx); 1078 1079 min_heap_del(&c->ec_stripes_heap, m->heap_idx, &callbacks, &c->ec_stripes_heap); 1080 mutex_unlock(&c->ec_stripes_heap_lock); 1081 } 1082 1083 void bch2_stripes_heap_insert(struct bch_fs *c, 1084 struct stripe *m, size_t idx) 1085 { 1086 mutex_lock(&c->ec_stripes_heap_lock); 1087 BUG_ON(min_heap_full(&c->ec_stripes_heap)); 1088 1089 genradix_ptr(&c->stripes, idx)->heap_idx = c->ec_stripes_heap.nr; 1090 min_heap_push(&c->ec_stripes_heap, &((struct ec_stripe_heap_entry) { 1091 .idx = idx, 1092 .blocks_nonempty = m->blocks_nonempty, 1093 }), 1094 &callbacks, 1095 &c->ec_stripes_heap); 1096 1097 heap_verify_backpointer(c, idx); 1098 mutex_unlock(&c->ec_stripes_heap_lock); 1099 } 1100 1101 void bch2_stripes_heap_update(struct bch_fs *c, 1102 struct stripe *m, size_t idx) 1103 { 1104 ec_stripes_heap *h = &c->ec_stripes_heap; 1105 bool do_deletes; 1106 size_t i; 1107 1108 mutex_lock(&c->ec_stripes_heap_lock); 1109 heap_verify_backpointer(c, idx); 1110 1111 h->data[m->heap_idx].blocks_nonempty = m->blocks_nonempty; 1112 1113 i = m->heap_idx; 1114 min_heap_sift_up(h, i, &callbacks, &c->ec_stripes_heap); 1115 min_heap_sift_down(h, i, &callbacks, &c->ec_stripes_heap); 1116 1117 heap_verify_backpointer(c, idx); 1118 1119 do_deletes = stripe_idx_to_delete(c) != 0; 1120 mutex_unlock(&c->ec_stripes_heap_lock); 1121 1122 if (do_deletes) 1123 bch2_do_stripe_deletes(c); 1124 } 1125 1126 /* stripe deletion */ 1127 1128 static int ec_stripe_delete(struct btree_trans *trans, u64 idx) 1129 { 1130 struct bch_fs *c = trans->c; 1131 struct btree_iter iter; 1132 struct bkey_s_c k; 1133 struct bkey_s_c_stripe s; 1134 int ret; 1135 1136 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes, POS(0, idx), 1137 BTREE_ITER_intent); 1138 ret = bkey_err(k); 1139 if (ret) 1140 goto err; 1141 1142 if (k.k->type != KEY_TYPE_stripe) { 1143 bch2_fs_inconsistent(c, "attempting to delete nonexistent stripe %llu", idx); 1144 ret = -EINVAL; 1145 goto err; 1146 } 1147 1148 s = bkey_s_c_to_stripe(k); 1149 for (unsigned i = 0; i < s.v->nr_blocks; i++) 1150 if (stripe_blockcount_get(s.v, i)) { 1151 struct printbuf buf = PRINTBUF; 1152 1153 bch2_bkey_val_to_text(&buf, c, k); 1154 bch2_fs_inconsistent(c, "attempting to delete nonempty stripe %s", buf.buf); 1155 printbuf_exit(&buf); 1156 ret = -EINVAL; 1157 goto err; 1158 } 1159 1160 ret = bch2_btree_delete_at(trans, &iter, 0); 1161 err: 1162 bch2_trans_iter_exit(trans, &iter); 1163 return ret; 1164 } 1165 1166 static void ec_stripe_delete_work(struct work_struct *work) 1167 { 1168 struct bch_fs *c = 1169 container_of(work, struct bch_fs, ec_stripe_delete_work); 1170 1171 while (1) { 1172 mutex_lock(&c->ec_stripes_heap_lock); 1173 u64 idx = stripe_idx_to_delete(c); 1174 mutex_unlock(&c->ec_stripes_heap_lock); 1175 1176 if (!idx) 1177 break; 1178 1179 int ret = bch2_trans_commit_do(c, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, 1180 ec_stripe_delete(trans, idx)); 1181 bch_err_fn(c, ret); 1182 if (ret) 1183 break; 1184 } 1185 1186 bch2_write_ref_put(c, BCH_WRITE_REF_stripe_delete); 1187 } 1188 1189 void bch2_do_stripe_deletes(struct bch_fs *c) 1190 { 1191 if (bch2_write_ref_tryget(c, BCH_WRITE_REF_stripe_delete) && 1192 !queue_work(c->write_ref_wq, &c->ec_stripe_delete_work)) 1193 bch2_write_ref_put(c, BCH_WRITE_REF_stripe_delete); 1194 } 1195 1196 /* stripe creation: */ 1197 1198 static int ec_stripe_key_update(struct btree_trans *trans, 1199 struct bkey_i_stripe *old, 1200 struct bkey_i_stripe *new) 1201 { 1202 struct bch_fs *c = trans->c; 1203 bool create = !old; 1204 1205 struct btree_iter iter; 1206 struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes, 1207 new->k.p, BTREE_ITER_intent); 1208 int ret = bkey_err(k); 1209 if (ret) 1210 goto err; 1211 1212 if (bch2_fs_inconsistent_on(k.k->type != (create ? KEY_TYPE_deleted : KEY_TYPE_stripe), 1213 c, "error %s stripe: got existing key type %s", 1214 create ? "creating" : "updating", 1215 bch2_bkey_types[k.k->type])) { 1216 ret = -EINVAL; 1217 goto err; 1218 } 1219 1220 if (k.k->type == KEY_TYPE_stripe) { 1221 const struct bch_stripe *v = bkey_s_c_to_stripe(k).v; 1222 1223 BUG_ON(old->v.nr_blocks != new->v.nr_blocks); 1224 BUG_ON(old->v.nr_blocks != v->nr_blocks); 1225 1226 for (unsigned i = 0; i < new->v.nr_blocks; i++) { 1227 unsigned sectors = stripe_blockcount_get(v, i); 1228 1229 if (!bch2_extent_ptr_eq(old->v.ptrs[i], new->v.ptrs[i]) && sectors) { 1230 struct printbuf buf = PRINTBUF; 1231 1232 prt_printf(&buf, "stripe changed nonempty block %u", i); 1233 prt_str(&buf, "\nold: "); 1234 bch2_bkey_val_to_text(&buf, c, k); 1235 prt_str(&buf, "\nnew: "); 1236 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&new->k_i)); 1237 bch2_fs_inconsistent(c, "%s", buf.buf); 1238 printbuf_exit(&buf); 1239 ret = -EINVAL; 1240 goto err; 1241 } 1242 1243 /* 1244 * If the stripe ptr changed underneath us, it must have 1245 * been dev_remove_stripes() -> * invalidate_stripe_to_dev() 1246 */ 1247 if (!bch2_extent_ptr_eq(old->v.ptrs[i], v->ptrs[i])) { 1248 BUG_ON(v->ptrs[i].dev != BCH_SB_MEMBER_INVALID); 1249 1250 if (bch2_extent_ptr_eq(old->v.ptrs[i], new->v.ptrs[i])) 1251 new->v.ptrs[i].dev = BCH_SB_MEMBER_INVALID; 1252 } 1253 1254 stripe_blockcount_set(&new->v, i, sectors); 1255 } 1256 } 1257 1258 ret = bch2_trans_update(trans, &iter, &new->k_i, 0); 1259 err: 1260 bch2_trans_iter_exit(trans, &iter); 1261 return ret; 1262 } 1263 1264 static int ec_stripe_update_extent(struct btree_trans *trans, 1265 struct bch_dev *ca, 1266 struct bpos bucket, u8 gen, 1267 struct ec_stripe_buf *s, 1268 struct bkey_s_c_backpointer bp, 1269 struct bkey_buf *last_flushed) 1270 { 1271 struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v; 1272 struct bch_fs *c = trans->c; 1273 struct btree_iter iter; 1274 struct bkey_s_c k; 1275 const struct bch_extent_ptr *ptr_c; 1276 struct bch_extent_ptr *ec_ptr = NULL; 1277 struct bch_extent_stripe_ptr stripe_ptr; 1278 struct bkey_i *n; 1279 int ret, dev, block; 1280 1281 if (bp.v->level) { 1282 struct printbuf buf = PRINTBUF; 1283 struct btree_iter node_iter; 1284 struct btree *b; 1285 1286 b = bch2_backpointer_get_node(trans, bp, &node_iter, last_flushed); 1287 bch2_trans_iter_exit(trans, &node_iter); 1288 1289 if (!b) 1290 return 0; 1291 1292 prt_printf(&buf, "found btree node in erasure coded bucket: b=%px\n", b); 1293 bch2_bkey_val_to_text(&buf, c, bp.s_c); 1294 1295 bch2_fs_inconsistent(c, "%s", buf.buf); 1296 printbuf_exit(&buf); 1297 return -EIO; 1298 } 1299 1300 k = bch2_backpointer_get_key(trans, bp, &iter, BTREE_ITER_intent, last_flushed); 1301 ret = bkey_err(k); 1302 if (ret) 1303 return ret; 1304 if (!k.k) { 1305 /* 1306 * extent no longer exists - we could flush the btree 1307 * write buffer and retry to verify, but no need: 1308 */ 1309 return 0; 1310 } 1311 1312 if (extent_has_stripe_ptr(k, s->key.k.p.offset)) 1313 goto out; 1314 1315 ptr_c = bkey_matches_stripe(v, k, &block); 1316 /* 1317 * It doesn't generally make sense to erasure code cached ptrs: 1318 * XXX: should we be incrementing a counter? 1319 */ 1320 if (!ptr_c || ptr_c->cached) 1321 goto out; 1322 1323 dev = v->ptrs[block].dev; 1324 1325 n = bch2_trans_kmalloc(trans, bkey_bytes(k.k) + sizeof(stripe_ptr)); 1326 ret = PTR_ERR_OR_ZERO(n); 1327 if (ret) 1328 goto out; 1329 1330 bkey_reassemble(n, k); 1331 1332 bch2_bkey_drop_ptrs_noerror(bkey_i_to_s(n), ptr, ptr->dev != dev); 1333 ec_ptr = bch2_bkey_has_device(bkey_i_to_s(n), dev); 1334 BUG_ON(!ec_ptr); 1335 1336 stripe_ptr = (struct bch_extent_stripe_ptr) { 1337 .type = 1 << BCH_EXTENT_ENTRY_stripe_ptr, 1338 .block = block, 1339 .redundancy = v->nr_redundant, 1340 .idx = s->key.k.p.offset, 1341 }; 1342 1343 __extent_entry_insert(n, 1344 (union bch_extent_entry *) ec_ptr, 1345 (union bch_extent_entry *) &stripe_ptr); 1346 1347 ret = bch2_trans_update(trans, &iter, n, 0); 1348 out: 1349 bch2_trans_iter_exit(trans, &iter); 1350 return ret; 1351 } 1352 1353 static int ec_stripe_update_bucket(struct btree_trans *trans, struct ec_stripe_buf *s, 1354 unsigned block) 1355 { 1356 struct bch_fs *c = trans->c; 1357 struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v; 1358 struct bch_extent_ptr ptr = v->ptrs[block]; 1359 int ret = 0; 1360 1361 struct bch_dev *ca = bch2_dev_tryget(c, ptr.dev); 1362 if (!ca) 1363 return -EIO; 1364 1365 struct bpos bucket_pos = PTR_BUCKET_POS(ca, &ptr); 1366 1367 struct bkey_buf last_flushed; 1368 bch2_bkey_buf_init(&last_flushed); 1369 bkey_init(&last_flushed.k->k); 1370 1371 ret = for_each_btree_key_max_commit(trans, bp_iter, BTREE_ID_backpointers, 1372 bucket_pos_to_bp_start(ca, bucket_pos), 1373 bucket_pos_to_bp_end(ca, bucket_pos), 0, bp_k, 1374 NULL, NULL, 1375 BCH_TRANS_COMMIT_no_check_rw| 1376 BCH_TRANS_COMMIT_no_enospc, ({ 1377 if (bkey_ge(bp_k.k->p, bucket_pos_to_bp(ca, bpos_nosnap_successor(bucket_pos), 0))) 1378 break; 1379 1380 if (bp_k.k->type != KEY_TYPE_backpointer) 1381 continue; 1382 1383 ec_stripe_update_extent(trans, ca, bucket_pos, ptr.gen, s, 1384 bkey_s_c_to_backpointer(bp_k), &last_flushed); 1385 })); 1386 1387 bch2_bkey_buf_exit(&last_flushed, c); 1388 bch2_dev_put(ca); 1389 return ret; 1390 } 1391 1392 static int ec_stripe_update_extents(struct bch_fs *c, struct ec_stripe_buf *s) 1393 { 1394 struct btree_trans *trans = bch2_trans_get(c); 1395 struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v; 1396 unsigned i, nr_data = v->nr_blocks - v->nr_redundant; 1397 int ret = 0; 1398 1399 ret = bch2_btree_write_buffer_flush_sync(trans); 1400 if (ret) 1401 goto err; 1402 1403 for (i = 0; i < nr_data; i++) { 1404 ret = ec_stripe_update_bucket(trans, s, i); 1405 if (ret) 1406 break; 1407 } 1408 err: 1409 bch2_trans_put(trans); 1410 1411 return ret; 1412 } 1413 1414 static void zero_out_rest_of_ec_bucket(struct bch_fs *c, 1415 struct ec_stripe_new *s, 1416 unsigned block, 1417 struct open_bucket *ob) 1418 { 1419 struct bch_dev *ca = bch2_dev_get_ioref(c, ob->dev, WRITE); 1420 if (!ca) { 1421 s->err = -BCH_ERR_erofs_no_writes; 1422 return; 1423 } 1424 1425 unsigned offset = ca->mi.bucket_size - ob->sectors_free; 1426 memset(s->new_stripe.data[block] + (offset << 9), 1427 0, 1428 ob->sectors_free << 9); 1429 1430 int ret = blkdev_issue_zeroout(ca->disk_sb.bdev, 1431 ob->bucket * ca->mi.bucket_size + offset, 1432 ob->sectors_free, 1433 GFP_KERNEL, 0); 1434 1435 percpu_ref_put(&ca->io_ref); 1436 1437 if (ret) 1438 s->err = ret; 1439 } 1440 1441 void bch2_ec_stripe_new_free(struct bch_fs *c, struct ec_stripe_new *s) 1442 { 1443 if (s->idx) 1444 bch2_stripe_close(c, s); 1445 kfree(s); 1446 } 1447 1448 /* 1449 * data buckets of new stripe all written: create the stripe 1450 */ 1451 static void ec_stripe_create(struct ec_stripe_new *s) 1452 { 1453 struct bch_fs *c = s->c; 1454 struct open_bucket *ob; 1455 struct bch_stripe *v = &bkey_i_to_stripe(&s->new_stripe.key)->v; 1456 unsigned i, nr_data = v->nr_blocks - v->nr_redundant; 1457 int ret; 1458 1459 BUG_ON(s->h->s == s); 1460 1461 closure_sync(&s->iodone); 1462 1463 if (!s->err) { 1464 for (i = 0; i < nr_data; i++) 1465 if (s->blocks[i]) { 1466 ob = c->open_buckets + s->blocks[i]; 1467 1468 if (ob->sectors_free) 1469 zero_out_rest_of_ec_bucket(c, s, i, ob); 1470 } 1471 } 1472 1473 if (s->err) { 1474 if (!bch2_err_matches(s->err, EROFS)) 1475 bch_err(c, "error creating stripe: error writing data buckets"); 1476 goto err; 1477 } 1478 1479 if (s->have_existing_stripe) { 1480 ec_validate_checksums(c, &s->existing_stripe); 1481 1482 if (ec_do_recov(c, &s->existing_stripe)) { 1483 bch_err(c, "error creating stripe: error reading existing stripe"); 1484 goto err; 1485 } 1486 1487 for (i = 0; i < nr_data; i++) 1488 if (stripe_blockcount_get(&bkey_i_to_stripe(&s->existing_stripe.key)->v, i)) 1489 swap(s->new_stripe.data[i], 1490 s->existing_stripe.data[i]); 1491 1492 ec_stripe_buf_exit(&s->existing_stripe); 1493 } 1494 1495 BUG_ON(!s->allocated); 1496 BUG_ON(!s->idx); 1497 1498 ec_generate_ec(&s->new_stripe); 1499 1500 ec_generate_checksums(&s->new_stripe); 1501 1502 /* write p/q: */ 1503 for (i = nr_data; i < v->nr_blocks; i++) 1504 ec_block_io(c, &s->new_stripe, REQ_OP_WRITE, i, &s->iodone); 1505 closure_sync(&s->iodone); 1506 1507 if (ec_nr_failed(&s->new_stripe)) { 1508 bch_err(c, "error creating stripe: error writing redundancy buckets"); 1509 goto err; 1510 } 1511 1512 ret = bch2_trans_commit_do(c, &s->res, NULL, 1513 BCH_TRANS_COMMIT_no_check_rw| 1514 BCH_TRANS_COMMIT_no_enospc, 1515 ec_stripe_key_update(trans, 1516 s->have_existing_stripe 1517 ? bkey_i_to_stripe(&s->existing_stripe.key) 1518 : NULL, 1519 bkey_i_to_stripe(&s->new_stripe.key))); 1520 bch_err_msg(c, ret, "creating stripe key"); 1521 if (ret) { 1522 goto err; 1523 } 1524 1525 ret = ec_stripe_update_extents(c, &s->new_stripe); 1526 bch_err_msg(c, ret, "error updating extents"); 1527 if (ret) 1528 goto err; 1529 err: 1530 bch2_disk_reservation_put(c, &s->res); 1531 1532 for (i = 0; i < v->nr_blocks; i++) 1533 if (s->blocks[i]) { 1534 ob = c->open_buckets + s->blocks[i]; 1535 1536 if (i < nr_data) { 1537 ob->ec = NULL; 1538 __bch2_open_bucket_put(c, ob); 1539 } else { 1540 bch2_open_bucket_put(c, ob); 1541 } 1542 } 1543 1544 mutex_lock(&c->ec_stripe_new_lock); 1545 list_del(&s->list); 1546 mutex_unlock(&c->ec_stripe_new_lock); 1547 wake_up(&c->ec_stripe_new_wait); 1548 1549 ec_stripe_buf_exit(&s->existing_stripe); 1550 ec_stripe_buf_exit(&s->new_stripe); 1551 closure_debug_destroy(&s->iodone); 1552 1553 ec_stripe_new_put(c, s, STRIPE_REF_stripe); 1554 } 1555 1556 static struct ec_stripe_new *get_pending_stripe(struct bch_fs *c) 1557 { 1558 struct ec_stripe_new *s; 1559 1560 mutex_lock(&c->ec_stripe_new_lock); 1561 list_for_each_entry(s, &c->ec_stripe_new_list, list) 1562 if (!atomic_read(&s->ref[STRIPE_REF_io])) 1563 goto out; 1564 s = NULL; 1565 out: 1566 mutex_unlock(&c->ec_stripe_new_lock); 1567 1568 return s; 1569 } 1570 1571 static void ec_stripe_create_work(struct work_struct *work) 1572 { 1573 struct bch_fs *c = container_of(work, 1574 struct bch_fs, ec_stripe_create_work); 1575 struct ec_stripe_new *s; 1576 1577 while ((s = get_pending_stripe(c))) 1578 ec_stripe_create(s); 1579 1580 bch2_write_ref_put(c, BCH_WRITE_REF_stripe_create); 1581 } 1582 1583 void bch2_ec_do_stripe_creates(struct bch_fs *c) 1584 { 1585 bch2_write_ref_get(c, BCH_WRITE_REF_stripe_create); 1586 1587 if (!queue_work(system_long_wq, &c->ec_stripe_create_work)) 1588 bch2_write_ref_put(c, BCH_WRITE_REF_stripe_create); 1589 } 1590 1591 static void ec_stripe_new_set_pending(struct bch_fs *c, struct ec_stripe_head *h) 1592 { 1593 struct ec_stripe_new *s = h->s; 1594 1595 lockdep_assert_held(&h->lock); 1596 1597 BUG_ON(!s->allocated && !s->err); 1598 1599 h->s = NULL; 1600 s->pending = true; 1601 1602 mutex_lock(&c->ec_stripe_new_lock); 1603 list_add(&s->list, &c->ec_stripe_new_list); 1604 mutex_unlock(&c->ec_stripe_new_lock); 1605 1606 ec_stripe_new_put(c, s, STRIPE_REF_io); 1607 } 1608 1609 static void ec_stripe_new_cancel(struct bch_fs *c, struct ec_stripe_head *h, int err) 1610 { 1611 h->s->err = err; 1612 ec_stripe_new_set_pending(c, h); 1613 } 1614 1615 void bch2_ec_bucket_cancel(struct bch_fs *c, struct open_bucket *ob) 1616 { 1617 struct ec_stripe_new *s = ob->ec; 1618 1619 s->err = -EIO; 1620 } 1621 1622 void *bch2_writepoint_ec_buf(struct bch_fs *c, struct write_point *wp) 1623 { 1624 struct open_bucket *ob = ec_open_bucket(c, &wp->ptrs); 1625 if (!ob) 1626 return NULL; 1627 1628 BUG_ON(!ob->ec->new_stripe.data[ob->ec_idx]); 1629 1630 struct bch_dev *ca = ob_dev(c, ob); 1631 unsigned offset = ca->mi.bucket_size - ob->sectors_free; 1632 1633 return ob->ec->new_stripe.data[ob->ec_idx] + (offset << 9); 1634 } 1635 1636 static int unsigned_cmp(const void *_l, const void *_r) 1637 { 1638 unsigned l = *((const unsigned *) _l); 1639 unsigned r = *((const unsigned *) _r); 1640 1641 return cmp_int(l, r); 1642 } 1643 1644 /* pick most common bucket size: */ 1645 static unsigned pick_blocksize(struct bch_fs *c, 1646 struct bch_devs_mask *devs) 1647 { 1648 unsigned nr = 0, sizes[BCH_SB_MEMBERS_MAX]; 1649 struct { 1650 unsigned nr, size; 1651 } cur = { 0, 0 }, best = { 0, 0 }; 1652 1653 for_each_member_device_rcu(c, ca, devs) 1654 sizes[nr++] = ca->mi.bucket_size; 1655 1656 sort(sizes, nr, sizeof(unsigned), unsigned_cmp, NULL); 1657 1658 for (unsigned i = 0; i < nr; i++) { 1659 if (sizes[i] != cur.size) { 1660 if (cur.nr > best.nr) 1661 best = cur; 1662 1663 cur.nr = 0; 1664 cur.size = sizes[i]; 1665 } 1666 1667 cur.nr++; 1668 } 1669 1670 if (cur.nr > best.nr) 1671 best = cur; 1672 1673 return best.size; 1674 } 1675 1676 static bool may_create_new_stripe(struct bch_fs *c) 1677 { 1678 return false; 1679 } 1680 1681 static void ec_stripe_key_init(struct bch_fs *c, 1682 struct bkey_i *k, 1683 unsigned nr_data, 1684 unsigned nr_parity, 1685 unsigned stripe_size, 1686 unsigned disk_label) 1687 { 1688 struct bkey_i_stripe *s = bkey_stripe_init(k); 1689 unsigned u64s; 1690 1691 s->v.sectors = cpu_to_le16(stripe_size); 1692 s->v.algorithm = 0; 1693 s->v.nr_blocks = nr_data + nr_parity; 1694 s->v.nr_redundant = nr_parity; 1695 s->v.csum_granularity_bits = ilog2(c->opts.encoded_extent_max >> 9); 1696 s->v.csum_type = BCH_CSUM_crc32c; 1697 s->v.disk_label = disk_label; 1698 1699 while ((u64s = stripe_val_u64s(&s->v)) > BKEY_VAL_U64s_MAX) { 1700 BUG_ON(1 << s->v.csum_granularity_bits >= 1701 le16_to_cpu(s->v.sectors) || 1702 s->v.csum_granularity_bits == U8_MAX); 1703 s->v.csum_granularity_bits++; 1704 } 1705 1706 set_bkey_val_u64s(&s->k, u64s); 1707 } 1708 1709 static struct ec_stripe_new *ec_new_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h) 1710 { 1711 struct ec_stripe_new *s; 1712 1713 lockdep_assert_held(&h->lock); 1714 1715 s = kzalloc(sizeof(*s), GFP_KERNEL); 1716 if (!s) 1717 return NULL; 1718 1719 mutex_init(&s->lock); 1720 closure_init(&s->iodone, NULL); 1721 atomic_set(&s->ref[STRIPE_REF_stripe], 1); 1722 atomic_set(&s->ref[STRIPE_REF_io], 1); 1723 s->c = c; 1724 s->h = h; 1725 s->nr_data = min_t(unsigned, h->nr_active_devs, 1726 BCH_BKEY_PTRS_MAX) - h->redundancy; 1727 s->nr_parity = h->redundancy; 1728 1729 ec_stripe_key_init(c, &s->new_stripe.key, 1730 s->nr_data, s->nr_parity, 1731 h->blocksize, h->disk_label); 1732 return s; 1733 } 1734 1735 static void ec_stripe_head_devs_update(struct bch_fs *c, struct ec_stripe_head *h) 1736 { 1737 struct bch_devs_mask devs = h->devs; 1738 1739 rcu_read_lock(); 1740 h->devs = target_rw_devs(c, BCH_DATA_user, h->disk_label 1741 ? group_to_target(h->disk_label - 1) 1742 : 0); 1743 unsigned nr_devs = dev_mask_nr(&h->devs); 1744 1745 for_each_member_device_rcu(c, ca, &h->devs) 1746 if (!ca->mi.durability) 1747 __clear_bit(ca->dev_idx, h->devs.d); 1748 unsigned nr_devs_with_durability = dev_mask_nr(&h->devs); 1749 1750 h->blocksize = pick_blocksize(c, &h->devs); 1751 1752 h->nr_active_devs = 0; 1753 for_each_member_device_rcu(c, ca, &h->devs) 1754 if (ca->mi.bucket_size == h->blocksize) 1755 h->nr_active_devs++; 1756 1757 rcu_read_unlock(); 1758 1759 /* 1760 * If we only have redundancy + 1 devices, we're better off with just 1761 * replication: 1762 */ 1763 h->insufficient_devs = h->nr_active_devs < h->redundancy + 2; 1764 1765 if (h->insufficient_devs) { 1766 const char *err; 1767 1768 if (nr_devs < h->redundancy + 2) 1769 err = NULL; 1770 else if (nr_devs_with_durability < h->redundancy + 2) 1771 err = "cannot use durability=0 devices"; 1772 else 1773 err = "mismatched bucket sizes"; 1774 1775 if (err) 1776 bch_err(c, "insufficient devices available to create stripe (have %u, need %u): %s", 1777 h->nr_active_devs, h->redundancy + 2, err); 1778 } 1779 1780 struct bch_devs_mask devs_leaving; 1781 bitmap_andnot(devs_leaving.d, devs.d, h->devs.d, BCH_SB_MEMBERS_MAX); 1782 1783 if (h->s && !h->s->allocated && dev_mask_nr(&devs_leaving)) 1784 ec_stripe_new_cancel(c, h, -EINTR); 1785 1786 h->rw_devs_change_count = c->rw_devs_change_count; 1787 } 1788 1789 static struct ec_stripe_head * 1790 ec_new_stripe_head_alloc(struct bch_fs *c, unsigned disk_label, 1791 unsigned algo, unsigned redundancy, 1792 enum bch_watermark watermark) 1793 { 1794 struct ec_stripe_head *h; 1795 1796 h = kzalloc(sizeof(*h), GFP_KERNEL); 1797 if (!h) 1798 return NULL; 1799 1800 mutex_init(&h->lock); 1801 BUG_ON(!mutex_trylock(&h->lock)); 1802 1803 h->disk_label = disk_label; 1804 h->algo = algo; 1805 h->redundancy = redundancy; 1806 h->watermark = watermark; 1807 1808 list_add(&h->list, &c->ec_stripe_head_list); 1809 return h; 1810 } 1811 1812 void bch2_ec_stripe_head_put(struct bch_fs *c, struct ec_stripe_head *h) 1813 { 1814 if (h->s && 1815 h->s->allocated && 1816 bitmap_weight(h->s->blocks_allocated, 1817 h->s->nr_data) == h->s->nr_data) 1818 ec_stripe_new_set_pending(c, h); 1819 1820 mutex_unlock(&h->lock); 1821 } 1822 1823 static struct ec_stripe_head * 1824 __bch2_ec_stripe_head_get(struct btree_trans *trans, 1825 unsigned disk_label, 1826 unsigned algo, 1827 unsigned redundancy, 1828 enum bch_watermark watermark) 1829 { 1830 struct bch_fs *c = trans->c; 1831 struct ec_stripe_head *h; 1832 int ret; 1833 1834 if (!redundancy) 1835 return NULL; 1836 1837 ret = bch2_trans_mutex_lock(trans, &c->ec_stripe_head_lock); 1838 if (ret) 1839 return ERR_PTR(ret); 1840 1841 if (test_bit(BCH_FS_going_ro, &c->flags)) { 1842 h = ERR_PTR(-BCH_ERR_erofs_no_writes); 1843 goto err; 1844 } 1845 1846 list_for_each_entry(h, &c->ec_stripe_head_list, list) 1847 if (h->disk_label == disk_label && 1848 h->algo == algo && 1849 h->redundancy == redundancy && 1850 h->watermark == watermark) { 1851 ret = bch2_trans_mutex_lock(trans, &h->lock); 1852 if (ret) { 1853 h = ERR_PTR(ret); 1854 goto err; 1855 } 1856 goto found; 1857 } 1858 1859 h = ec_new_stripe_head_alloc(c, disk_label, algo, redundancy, watermark); 1860 if (!h) { 1861 h = ERR_PTR(-BCH_ERR_ENOMEM_stripe_head_alloc); 1862 goto err; 1863 } 1864 found: 1865 if (h->rw_devs_change_count != c->rw_devs_change_count) 1866 ec_stripe_head_devs_update(c, h); 1867 1868 if (h->insufficient_devs) { 1869 mutex_unlock(&h->lock); 1870 h = NULL; 1871 } 1872 err: 1873 mutex_unlock(&c->ec_stripe_head_lock); 1874 return h; 1875 } 1876 1877 static int new_stripe_alloc_buckets(struct btree_trans *trans, 1878 struct ec_stripe_head *h, struct ec_stripe_new *s, 1879 enum bch_watermark watermark, struct closure *cl) 1880 { 1881 struct bch_fs *c = trans->c; 1882 struct bch_devs_mask devs = h->devs; 1883 struct open_bucket *ob; 1884 struct open_buckets buckets; 1885 struct bch_stripe *v = &bkey_i_to_stripe(&s->new_stripe.key)->v; 1886 unsigned i, j, nr_have_parity = 0, nr_have_data = 0; 1887 bool have_cache = true; 1888 int ret = 0; 1889 1890 BUG_ON(v->nr_blocks != s->nr_data + s->nr_parity); 1891 BUG_ON(v->nr_redundant != s->nr_parity); 1892 1893 /* * We bypass the sector allocator which normally does this: */ 1894 bitmap_and(devs.d, devs.d, c->rw_devs[BCH_DATA_user].d, BCH_SB_MEMBERS_MAX); 1895 1896 for_each_set_bit(i, s->blocks_gotten, v->nr_blocks) { 1897 /* 1898 * Note: we don't yet repair invalid blocks (failed/removed 1899 * devices) when reusing stripes - we still need a codepath to 1900 * walk backpointers and update all extents that point to that 1901 * block when updating the stripe 1902 */ 1903 if (v->ptrs[i].dev != BCH_SB_MEMBER_INVALID) 1904 __clear_bit(v->ptrs[i].dev, devs.d); 1905 1906 if (i < s->nr_data) 1907 nr_have_data++; 1908 else 1909 nr_have_parity++; 1910 } 1911 1912 BUG_ON(nr_have_data > s->nr_data); 1913 BUG_ON(nr_have_parity > s->nr_parity); 1914 1915 buckets.nr = 0; 1916 if (nr_have_parity < s->nr_parity) { 1917 ret = bch2_bucket_alloc_set_trans(trans, &buckets, 1918 &h->parity_stripe, 1919 &devs, 1920 s->nr_parity, 1921 &nr_have_parity, 1922 &have_cache, 0, 1923 BCH_DATA_parity, 1924 watermark, 1925 cl); 1926 1927 open_bucket_for_each(c, &buckets, ob, i) { 1928 j = find_next_zero_bit(s->blocks_gotten, 1929 s->nr_data + s->nr_parity, 1930 s->nr_data); 1931 BUG_ON(j >= s->nr_data + s->nr_parity); 1932 1933 s->blocks[j] = buckets.v[i]; 1934 v->ptrs[j] = bch2_ob_ptr(c, ob); 1935 __set_bit(j, s->blocks_gotten); 1936 } 1937 1938 if (ret) 1939 return ret; 1940 } 1941 1942 buckets.nr = 0; 1943 if (nr_have_data < s->nr_data) { 1944 ret = bch2_bucket_alloc_set_trans(trans, &buckets, 1945 &h->block_stripe, 1946 &devs, 1947 s->nr_data, 1948 &nr_have_data, 1949 &have_cache, 0, 1950 BCH_DATA_user, 1951 watermark, 1952 cl); 1953 1954 open_bucket_for_each(c, &buckets, ob, i) { 1955 j = find_next_zero_bit(s->blocks_gotten, 1956 s->nr_data, 0); 1957 BUG_ON(j >= s->nr_data); 1958 1959 s->blocks[j] = buckets.v[i]; 1960 v->ptrs[j] = bch2_ob_ptr(c, ob); 1961 __set_bit(j, s->blocks_gotten); 1962 } 1963 1964 if (ret) 1965 return ret; 1966 } 1967 1968 return 0; 1969 } 1970 1971 static s64 get_existing_stripe(struct bch_fs *c, 1972 struct ec_stripe_head *head) 1973 { 1974 ec_stripes_heap *h = &c->ec_stripes_heap; 1975 struct stripe *m; 1976 size_t heap_idx; 1977 u64 stripe_idx; 1978 s64 ret = -1; 1979 1980 if (may_create_new_stripe(c)) 1981 return -1; 1982 1983 mutex_lock(&c->ec_stripes_heap_lock); 1984 for (heap_idx = 0; heap_idx < h->nr; heap_idx++) { 1985 /* No blocks worth reusing, stripe will just be deleted: */ 1986 if (!h->data[heap_idx].blocks_nonempty) 1987 continue; 1988 1989 stripe_idx = h->data[heap_idx].idx; 1990 1991 m = genradix_ptr(&c->stripes, stripe_idx); 1992 1993 if (m->disk_label == head->disk_label && 1994 m->algorithm == head->algo && 1995 m->nr_redundant == head->redundancy && 1996 m->sectors == head->blocksize && 1997 m->blocks_nonempty < m->nr_blocks - m->nr_redundant && 1998 bch2_try_open_stripe(c, head->s, stripe_idx)) { 1999 ret = stripe_idx; 2000 break; 2001 } 2002 } 2003 mutex_unlock(&c->ec_stripes_heap_lock); 2004 return ret; 2005 } 2006 2007 static int init_new_stripe_from_existing(struct bch_fs *c, struct ec_stripe_new *s) 2008 { 2009 struct bch_stripe *new_v = &bkey_i_to_stripe(&s->new_stripe.key)->v; 2010 struct bch_stripe *existing_v = &bkey_i_to_stripe(&s->existing_stripe.key)->v; 2011 unsigned i; 2012 2013 BUG_ON(existing_v->nr_redundant != s->nr_parity); 2014 s->nr_data = existing_v->nr_blocks - 2015 existing_v->nr_redundant; 2016 2017 int ret = ec_stripe_buf_init(&s->existing_stripe, 0, le16_to_cpu(existing_v->sectors)); 2018 if (ret) { 2019 bch2_stripe_close(c, s); 2020 return ret; 2021 } 2022 2023 BUG_ON(s->existing_stripe.size != le16_to_cpu(existing_v->sectors)); 2024 2025 /* 2026 * Free buckets we initially allocated - they might conflict with 2027 * blocks from the stripe we're reusing: 2028 */ 2029 for_each_set_bit(i, s->blocks_gotten, new_v->nr_blocks) { 2030 bch2_open_bucket_put(c, c->open_buckets + s->blocks[i]); 2031 s->blocks[i] = 0; 2032 } 2033 memset(s->blocks_gotten, 0, sizeof(s->blocks_gotten)); 2034 memset(s->blocks_allocated, 0, sizeof(s->blocks_allocated)); 2035 2036 for (unsigned i = 0; i < existing_v->nr_blocks; i++) { 2037 if (stripe_blockcount_get(existing_v, i)) { 2038 __set_bit(i, s->blocks_gotten); 2039 __set_bit(i, s->blocks_allocated); 2040 } 2041 2042 ec_block_io(c, &s->existing_stripe, READ, i, &s->iodone); 2043 } 2044 2045 bkey_copy(&s->new_stripe.key, &s->existing_stripe.key); 2046 s->have_existing_stripe = true; 2047 2048 return 0; 2049 } 2050 2051 static int __bch2_ec_stripe_head_reuse(struct btree_trans *trans, struct ec_stripe_head *h, 2052 struct ec_stripe_new *s) 2053 { 2054 struct bch_fs *c = trans->c; 2055 s64 idx; 2056 int ret; 2057 2058 /* 2059 * If we can't allocate a new stripe, and there's no stripes with empty 2060 * blocks for us to reuse, that means we have to wait on copygc: 2061 */ 2062 idx = get_existing_stripe(c, h); 2063 if (idx < 0) 2064 return -BCH_ERR_stripe_alloc_blocked; 2065 2066 ret = get_stripe_key_trans(trans, idx, &s->existing_stripe); 2067 bch2_fs_fatal_err_on(ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart), c, 2068 "reading stripe key: %s", bch2_err_str(ret)); 2069 if (ret) { 2070 bch2_stripe_close(c, s); 2071 return ret; 2072 } 2073 2074 return init_new_stripe_from_existing(c, s); 2075 } 2076 2077 static int __bch2_ec_stripe_head_reserve(struct btree_trans *trans, struct ec_stripe_head *h, 2078 struct ec_stripe_new *s) 2079 { 2080 struct bch_fs *c = trans->c; 2081 struct btree_iter iter; 2082 struct bkey_s_c k; 2083 struct bpos min_pos = POS(0, 1); 2084 struct bpos start_pos = bpos_max(min_pos, POS(0, c->ec_stripe_hint)); 2085 int ret; 2086 2087 if (!s->res.sectors) { 2088 ret = bch2_disk_reservation_get(c, &s->res, 2089 h->blocksize, 2090 s->nr_parity, 2091 BCH_DISK_RESERVATION_NOFAIL); 2092 if (ret) 2093 return ret; 2094 } 2095 2096 /* 2097 * Allocate stripe slot 2098 * XXX: we're going to need a bitrange btree of free stripes 2099 */ 2100 for_each_btree_key_norestart(trans, iter, BTREE_ID_stripes, start_pos, 2101 BTREE_ITER_slots|BTREE_ITER_intent, k, ret) { 2102 if (bkey_gt(k.k->p, POS(0, U32_MAX))) { 2103 if (start_pos.offset) { 2104 start_pos = min_pos; 2105 bch2_btree_iter_set_pos(&iter, start_pos); 2106 continue; 2107 } 2108 2109 ret = -BCH_ERR_ENOSPC_stripe_create; 2110 break; 2111 } 2112 2113 if (bkey_deleted(k.k) && 2114 bch2_try_open_stripe(c, s, k.k->p.offset)) 2115 break; 2116 } 2117 2118 c->ec_stripe_hint = iter.pos.offset; 2119 2120 if (ret) 2121 goto err; 2122 2123 ret = ec_stripe_mem_alloc(trans, &iter); 2124 if (ret) { 2125 bch2_stripe_close(c, s); 2126 goto err; 2127 } 2128 2129 s->new_stripe.key.k.p = iter.pos; 2130 out: 2131 bch2_trans_iter_exit(trans, &iter); 2132 return ret; 2133 err: 2134 bch2_disk_reservation_put(c, &s->res); 2135 goto out; 2136 } 2137 2138 struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans, 2139 unsigned target, 2140 unsigned algo, 2141 unsigned redundancy, 2142 enum bch_watermark watermark, 2143 struct closure *cl) 2144 { 2145 struct bch_fs *c = trans->c; 2146 struct ec_stripe_head *h; 2147 bool waiting = false; 2148 unsigned disk_label = 0; 2149 struct target t = target_decode(target); 2150 int ret; 2151 2152 if (t.type == TARGET_GROUP) { 2153 if (t.group > U8_MAX) { 2154 bch_err(c, "cannot create a stripe when disk_label > U8_MAX"); 2155 return NULL; 2156 } 2157 disk_label = t.group + 1; /* 0 == no label */ 2158 } 2159 2160 h = __bch2_ec_stripe_head_get(trans, disk_label, algo, redundancy, watermark); 2161 if (IS_ERR_OR_NULL(h)) 2162 return h; 2163 2164 if (!h->s) { 2165 h->s = ec_new_stripe_alloc(c, h); 2166 if (!h->s) { 2167 ret = -BCH_ERR_ENOMEM_ec_new_stripe_alloc; 2168 bch_err(c, "failed to allocate new stripe"); 2169 goto err; 2170 } 2171 2172 h->nr_created++; 2173 } 2174 2175 struct ec_stripe_new *s = h->s; 2176 2177 if (s->allocated) 2178 goto allocated; 2179 2180 if (s->have_existing_stripe) 2181 goto alloc_existing; 2182 2183 /* First, try to allocate a full stripe: */ 2184 ret = new_stripe_alloc_buckets(trans, h, s, BCH_WATERMARK_stripe, NULL) ?: 2185 __bch2_ec_stripe_head_reserve(trans, h, s); 2186 if (!ret) 2187 goto allocate_buf; 2188 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || 2189 bch2_err_matches(ret, ENOMEM)) 2190 goto err; 2191 2192 /* 2193 * Not enough buckets available for a full stripe: we must reuse an 2194 * existing stripe: 2195 */ 2196 while (1) { 2197 ret = __bch2_ec_stripe_head_reuse(trans, h, s); 2198 if (!ret) 2199 break; 2200 if (waiting || !cl || ret != -BCH_ERR_stripe_alloc_blocked) 2201 goto err; 2202 2203 if (watermark == BCH_WATERMARK_copygc) { 2204 ret = new_stripe_alloc_buckets(trans, h, s, watermark, NULL) ?: 2205 __bch2_ec_stripe_head_reserve(trans, h, s); 2206 if (ret) 2207 goto err; 2208 goto allocate_buf; 2209 } 2210 2211 /* XXX freelist_wait? */ 2212 closure_wait(&c->freelist_wait, cl); 2213 waiting = true; 2214 } 2215 2216 if (waiting) 2217 closure_wake_up(&c->freelist_wait); 2218 alloc_existing: 2219 /* 2220 * Retry allocating buckets, with the watermark for this 2221 * particular write: 2222 */ 2223 ret = new_stripe_alloc_buckets(trans, h, s, watermark, cl); 2224 if (ret) 2225 goto err; 2226 2227 allocate_buf: 2228 ret = ec_stripe_buf_init(&s->new_stripe, 0, h->blocksize); 2229 if (ret) 2230 goto err; 2231 2232 s->allocated = true; 2233 allocated: 2234 BUG_ON(!s->idx); 2235 BUG_ON(!s->new_stripe.data[0]); 2236 BUG_ON(trans->restarted); 2237 return h; 2238 err: 2239 bch2_ec_stripe_head_put(c, h); 2240 return ERR_PTR(ret); 2241 } 2242 2243 /* device removal */ 2244 2245 static int bch2_invalidate_stripe_to_dev(struct btree_trans *trans, struct bkey_s_c k_a) 2246 { 2247 struct bch_alloc_v4 a_convert; 2248 const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k_a, &a_convert); 2249 2250 if (!a->stripe) 2251 return 0; 2252 2253 if (a->stripe_sectors) { 2254 bch_err(trans->c, "trying to invalidate device in stripe when bucket has stripe data"); 2255 return -BCH_ERR_invalidate_stripe_to_dev; 2256 } 2257 2258 struct btree_iter iter; 2259 struct bkey_i_stripe *s = 2260 bch2_bkey_get_mut_typed(trans, &iter, BTREE_ID_stripes, POS(0, a->stripe), 2261 BTREE_ITER_slots, stripe); 2262 int ret = PTR_ERR_OR_ZERO(s); 2263 if (ret) 2264 return ret; 2265 2266 struct disk_accounting_pos acc = { 2267 .type = BCH_DISK_ACCOUNTING_replicas, 2268 }; 2269 2270 s64 sectors = 0; 2271 for (unsigned i = 0; i < s->v.nr_blocks; i++) 2272 sectors -= stripe_blockcount_get(&s->v, i); 2273 2274 bch2_bkey_to_replicas(&acc.replicas, bkey_i_to_s_c(&s->k_i)); 2275 acc.replicas.data_type = BCH_DATA_user; 2276 ret = bch2_disk_accounting_mod(trans, &acc, §ors, 1, false); 2277 if (ret) 2278 goto err; 2279 2280 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(&s->k_i)); 2281 bkey_for_each_ptr(ptrs, ptr) 2282 if (ptr->dev == k_a.k->p.inode) 2283 ptr->dev = BCH_SB_MEMBER_INVALID; 2284 2285 sectors = -sectors; 2286 2287 bch2_bkey_to_replicas(&acc.replicas, bkey_i_to_s_c(&s->k_i)); 2288 acc.replicas.data_type = BCH_DATA_user; 2289 ret = bch2_disk_accounting_mod(trans, &acc, §ors, 1, false); 2290 if (ret) 2291 goto err; 2292 err: 2293 bch2_trans_iter_exit(trans, &iter); 2294 return ret; 2295 } 2296 2297 int bch2_dev_remove_stripes(struct bch_fs *c, unsigned dev_idx) 2298 { 2299 return bch2_trans_run(c, 2300 for_each_btree_key_max_commit(trans, iter, 2301 BTREE_ID_alloc, POS(dev_idx, 0), POS(dev_idx, U64_MAX), 2302 BTREE_ITER_intent, k, 2303 NULL, NULL, 0, ({ 2304 bch2_invalidate_stripe_to_dev(trans, k); 2305 }))); 2306 } 2307 2308 /* startup/shutdown */ 2309 2310 static void __bch2_ec_stop(struct bch_fs *c, struct bch_dev *ca) 2311 { 2312 struct ec_stripe_head *h; 2313 struct open_bucket *ob; 2314 unsigned i; 2315 2316 mutex_lock(&c->ec_stripe_head_lock); 2317 list_for_each_entry(h, &c->ec_stripe_head_list, list) { 2318 mutex_lock(&h->lock); 2319 if (!h->s) 2320 goto unlock; 2321 2322 if (!ca) 2323 goto found; 2324 2325 for (i = 0; i < bkey_i_to_stripe(&h->s->new_stripe.key)->v.nr_blocks; i++) { 2326 if (!h->s->blocks[i]) 2327 continue; 2328 2329 ob = c->open_buckets + h->s->blocks[i]; 2330 if (ob->dev == ca->dev_idx) 2331 goto found; 2332 } 2333 goto unlock; 2334 found: 2335 ec_stripe_new_cancel(c, h, -BCH_ERR_erofs_no_writes); 2336 unlock: 2337 mutex_unlock(&h->lock); 2338 } 2339 mutex_unlock(&c->ec_stripe_head_lock); 2340 } 2341 2342 void bch2_ec_stop_dev(struct bch_fs *c, struct bch_dev *ca) 2343 { 2344 __bch2_ec_stop(c, ca); 2345 } 2346 2347 void bch2_fs_ec_stop(struct bch_fs *c) 2348 { 2349 __bch2_ec_stop(c, NULL); 2350 } 2351 2352 static bool bch2_fs_ec_flush_done(struct bch_fs *c) 2353 { 2354 bool ret; 2355 2356 mutex_lock(&c->ec_stripe_new_lock); 2357 ret = list_empty(&c->ec_stripe_new_list); 2358 mutex_unlock(&c->ec_stripe_new_lock); 2359 2360 return ret; 2361 } 2362 2363 void bch2_fs_ec_flush(struct bch_fs *c) 2364 { 2365 wait_event(c->ec_stripe_new_wait, bch2_fs_ec_flush_done(c)); 2366 } 2367 2368 int bch2_stripes_read(struct bch_fs *c) 2369 { 2370 int ret = bch2_trans_run(c, 2371 for_each_btree_key(trans, iter, BTREE_ID_stripes, POS_MIN, 2372 BTREE_ITER_prefetch, k, ({ 2373 if (k.k->type != KEY_TYPE_stripe) 2374 continue; 2375 2376 ret = __ec_stripe_mem_alloc(c, k.k->p.offset, GFP_KERNEL); 2377 if (ret) 2378 break; 2379 2380 struct stripe *m = genradix_ptr(&c->stripes, k.k->p.offset); 2381 2382 stripe_to_mem(m, bkey_s_c_to_stripe(k).v); 2383 2384 bch2_stripes_heap_insert(c, m, k.k->p.offset); 2385 0; 2386 }))); 2387 bch_err_fn(c, ret); 2388 return ret; 2389 } 2390 2391 void bch2_stripes_heap_to_text(struct printbuf *out, struct bch_fs *c) 2392 { 2393 ec_stripes_heap *h = &c->ec_stripes_heap; 2394 struct stripe *m; 2395 size_t i; 2396 2397 mutex_lock(&c->ec_stripes_heap_lock); 2398 for (i = 0; i < min_t(size_t, h->nr, 50); i++) { 2399 m = genradix_ptr(&c->stripes, h->data[i].idx); 2400 2401 prt_printf(out, "%zu %u/%u+%u", h->data[i].idx, 2402 h->data[i].blocks_nonempty, 2403 m->nr_blocks - m->nr_redundant, 2404 m->nr_redundant); 2405 if (bch2_stripe_is_open(c, h->data[i].idx)) 2406 prt_str(out, " open"); 2407 prt_newline(out); 2408 } 2409 mutex_unlock(&c->ec_stripes_heap_lock); 2410 } 2411 2412 static void bch2_new_stripe_to_text(struct printbuf *out, struct bch_fs *c, 2413 struct ec_stripe_new *s) 2414 { 2415 prt_printf(out, "\tidx %llu blocks %u+%u allocated %u ref %u %u %s obs", 2416 s->idx, s->nr_data, s->nr_parity, 2417 bitmap_weight(s->blocks_allocated, s->nr_data), 2418 atomic_read(&s->ref[STRIPE_REF_io]), 2419 atomic_read(&s->ref[STRIPE_REF_stripe]), 2420 bch2_watermarks[s->h->watermark]); 2421 2422 struct bch_stripe *v = &bkey_i_to_stripe(&s->new_stripe.key)->v; 2423 unsigned i; 2424 for_each_set_bit(i, s->blocks_gotten, v->nr_blocks) 2425 prt_printf(out, " %u", s->blocks[i]); 2426 prt_newline(out); 2427 bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&s->new_stripe.key)); 2428 prt_newline(out); 2429 } 2430 2431 void bch2_new_stripes_to_text(struct printbuf *out, struct bch_fs *c) 2432 { 2433 struct ec_stripe_head *h; 2434 struct ec_stripe_new *s; 2435 2436 mutex_lock(&c->ec_stripe_head_lock); 2437 list_for_each_entry(h, &c->ec_stripe_head_list, list) { 2438 prt_printf(out, "disk label %u algo %u redundancy %u %s nr created %llu:\n", 2439 h->disk_label, h->algo, h->redundancy, 2440 bch2_watermarks[h->watermark], 2441 h->nr_created); 2442 2443 if (h->s) 2444 bch2_new_stripe_to_text(out, c, h->s); 2445 } 2446 mutex_unlock(&c->ec_stripe_head_lock); 2447 2448 prt_printf(out, "in flight:\n"); 2449 2450 mutex_lock(&c->ec_stripe_new_lock); 2451 list_for_each_entry(s, &c->ec_stripe_new_list, list) 2452 bch2_new_stripe_to_text(out, c, s); 2453 mutex_unlock(&c->ec_stripe_new_lock); 2454 } 2455 2456 void bch2_fs_ec_exit(struct bch_fs *c) 2457 { 2458 struct ec_stripe_head *h; 2459 unsigned i; 2460 2461 while (1) { 2462 mutex_lock(&c->ec_stripe_head_lock); 2463 h = list_pop_entry(&c->ec_stripe_head_list, struct ec_stripe_head, list); 2464 mutex_unlock(&c->ec_stripe_head_lock); 2465 2466 if (!h) 2467 break; 2468 2469 if (h->s) { 2470 for (i = 0; i < bkey_i_to_stripe(&h->s->new_stripe.key)->v.nr_blocks; i++) 2471 BUG_ON(h->s->blocks[i]); 2472 2473 kfree(h->s); 2474 } 2475 kfree(h); 2476 } 2477 2478 BUG_ON(!list_empty(&c->ec_stripe_new_list)); 2479 2480 free_heap(&c->ec_stripes_heap); 2481 genradix_free(&c->stripes); 2482 bioset_exit(&c->ec_bioset); 2483 } 2484 2485 void bch2_fs_ec_init_early(struct bch_fs *c) 2486 { 2487 spin_lock_init(&c->ec_stripes_new_lock); 2488 mutex_init(&c->ec_stripes_heap_lock); 2489 2490 INIT_LIST_HEAD(&c->ec_stripe_head_list); 2491 mutex_init(&c->ec_stripe_head_lock); 2492 2493 INIT_LIST_HEAD(&c->ec_stripe_new_list); 2494 mutex_init(&c->ec_stripe_new_lock); 2495 init_waitqueue_head(&c->ec_stripe_new_wait); 2496 2497 INIT_WORK(&c->ec_stripe_create_work, ec_stripe_create_work); 2498 INIT_WORK(&c->ec_stripe_delete_work, ec_stripe_delete_work); 2499 } 2500 2501 int bch2_fs_ec_init(struct bch_fs *c) 2502 { 2503 return bioset_init(&c->ec_bioset, 1, offsetof(struct ec_bio, bio), 2504 BIOSET_NEED_BVECS); 2505 } 2506