1 // SPDX-License-Identifier: GPL-2.0 2 3 /* erasure coding */ 4 5 #include "bcachefs.h" 6 #include "alloc_background.h" 7 #include "alloc_foreground.h" 8 #include "backpointers.h" 9 #include "bkey_buf.h" 10 #include "bset.h" 11 #include "btree_gc.h" 12 #include "btree_update.h" 13 #include "btree_write_buffer.h" 14 #include "buckets.h" 15 #include "checksum.h" 16 #include "disk_accounting.h" 17 #include "disk_groups.h" 18 #include "ec.h" 19 #include "error.h" 20 #include "io_read.h" 21 #include "keylist.h" 22 #include "recovery.h" 23 #include "replicas.h" 24 #include "super-io.h" 25 #include "util.h" 26 27 #include <linux/sort.h> 28 29 #ifdef __KERNEL__ 30 31 #include <linux/raid/pq.h> 32 #include <linux/raid/xor.h> 33 34 static void raid5_recov(unsigned disks, unsigned failed_idx, 35 size_t size, void **data) 36 { 37 unsigned i = 2, nr; 38 39 BUG_ON(failed_idx >= disks); 40 41 swap(data[0], data[failed_idx]); 42 memcpy(data[0], data[1], size); 43 44 while (i < disks) { 45 nr = min_t(unsigned, disks - i, MAX_XOR_BLOCKS); 46 xor_blocks(nr, size, data[0], data + i); 47 i += nr; 48 } 49 50 swap(data[0], data[failed_idx]); 51 } 52 53 static void raid_gen(int nd, int np, size_t size, void **v) 54 { 55 if (np >= 1) 56 raid5_recov(nd + np, nd, size, v); 57 if (np >= 2) 58 raid6_call.gen_syndrome(nd + np, size, v); 59 BUG_ON(np > 2); 60 } 61 62 static void raid_rec(int nr, int *ir, int nd, int np, size_t size, void **v) 63 { 64 switch (nr) { 65 case 0: 66 break; 67 case 1: 68 if (ir[0] < nd + 1) 69 raid5_recov(nd + 1, ir[0], size, v); 70 else 71 raid6_call.gen_syndrome(nd + np, size, v); 72 break; 73 case 2: 74 if (ir[1] < nd) { 75 /* data+data failure. */ 76 raid6_2data_recov(nd + np, size, ir[0], ir[1], v); 77 } else if (ir[0] < nd) { 78 /* data + p/q failure */ 79 80 if (ir[1] == nd) /* data + p failure */ 81 raid6_datap_recov(nd + np, size, ir[0], v); 82 else { /* data + q failure */ 83 raid5_recov(nd + 1, ir[0], size, v); 84 raid6_call.gen_syndrome(nd + np, size, v); 85 } 86 } else { 87 raid_gen(nd, np, size, v); 88 } 89 break; 90 default: 91 BUG(); 92 } 93 } 94 95 #else 96 97 #include <raid/raid.h> 98 99 #endif 100 101 struct ec_bio { 102 struct bch_dev *ca; 103 struct ec_stripe_buf *buf; 104 size_t idx; 105 struct bio bio; 106 }; 107 108 /* Stripes btree keys: */ 109 110 int bch2_stripe_invalid(struct bch_fs *c, struct bkey_s_c k, 111 enum bch_validate_flags flags, 112 struct printbuf *err) 113 { 114 const struct bch_stripe *s = bkey_s_c_to_stripe(k).v; 115 int ret = 0; 116 117 bkey_fsck_err_on(bkey_eq(k.k->p, POS_MIN) || 118 bpos_gt(k.k->p, POS(0, U32_MAX)), c, err, 119 stripe_pos_bad, 120 "stripe at bad pos"); 121 122 bkey_fsck_err_on(bkey_val_u64s(k.k) < stripe_val_u64s(s), c, err, 123 stripe_val_size_bad, 124 "incorrect value size (%zu < %u)", 125 bkey_val_u64s(k.k), stripe_val_u64s(s)); 126 127 ret = bch2_bkey_ptrs_invalid(c, k, flags, err); 128 fsck_err: 129 return ret; 130 } 131 132 void bch2_stripe_to_text(struct printbuf *out, struct bch_fs *c, 133 struct bkey_s_c k) 134 { 135 const struct bch_stripe *sp = bkey_s_c_to_stripe(k).v; 136 struct bch_stripe s = {}; 137 138 memcpy(&s, sp, min(sizeof(s), bkey_val_bytes(k.k))); 139 140 unsigned nr_data = s.nr_blocks - s.nr_redundant; 141 142 prt_printf(out, "algo %u sectors %u blocks %u:%u csum ", 143 s.algorithm, 144 le16_to_cpu(s.sectors), 145 nr_data, 146 s.nr_redundant); 147 bch2_prt_csum_type(out, s.csum_type); 148 prt_printf(out, " gran %u", 1U << s.csum_granularity_bits); 149 150 for (unsigned i = 0; i < s.nr_blocks; i++) { 151 const struct bch_extent_ptr *ptr = sp->ptrs + i; 152 153 if ((void *) ptr >= bkey_val_end(k)) 154 break; 155 156 bch2_extent_ptr_to_text(out, c, ptr); 157 158 if (s.csum_type < BCH_CSUM_NR && 159 i < nr_data && 160 stripe_blockcount_offset(&s, i) < bkey_val_bytes(k.k)) 161 prt_printf(out, "#%u", stripe_blockcount_get(sp, i)); 162 } 163 } 164 165 /* Triggers: */ 166 167 static int __mark_stripe_bucket(struct btree_trans *trans, 168 struct bch_dev *ca, 169 struct bkey_s_c_stripe s, 170 unsigned ptr_idx, bool deleting, 171 struct bpos bucket, 172 struct bch_alloc_v4 *a, 173 enum btree_iter_update_trigger_flags flags) 174 { 175 const struct bch_extent_ptr *ptr = s.v->ptrs + ptr_idx; 176 unsigned nr_data = s.v->nr_blocks - s.v->nr_redundant; 177 bool parity = ptr_idx >= nr_data; 178 enum bch_data_type data_type = parity ? BCH_DATA_parity : BCH_DATA_stripe; 179 s64 sectors = parity ? le16_to_cpu(s.v->sectors) : 0; 180 struct printbuf buf = PRINTBUF; 181 int ret = 0; 182 183 struct bch_fs *c = trans->c; 184 if (deleting) 185 sectors = -sectors; 186 187 if (!deleting) { 188 if (bch2_trans_inconsistent_on(a->stripe || 189 a->stripe_redundancy, trans, 190 "bucket %llu:%llu gen %u data type %s dirty_sectors %u: multiple stripes using same bucket (%u, %llu)\n%s", 191 bucket.inode, bucket.offset, a->gen, 192 bch2_data_type_str(a->data_type), 193 a->dirty_sectors, 194 a->stripe, s.k->p.offset, 195 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) { 196 ret = -EIO; 197 goto err; 198 } 199 200 if (bch2_trans_inconsistent_on(parity && bch2_bucket_sectors_total(*a), trans, 201 "bucket %llu:%llu gen %u data type %s dirty_sectors %u cached_sectors %u: data already in parity bucket\n%s", 202 bucket.inode, bucket.offset, a->gen, 203 bch2_data_type_str(a->data_type), 204 a->dirty_sectors, 205 a->cached_sectors, 206 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) { 207 ret = -EIO; 208 goto err; 209 } 210 } else { 211 if (bch2_trans_inconsistent_on(a->stripe != s.k->p.offset || 212 a->stripe_redundancy != s.v->nr_redundant, trans, 213 "bucket %llu:%llu gen %u: not marked as stripe when deleting stripe (got %u)\n%s", 214 bucket.inode, bucket.offset, a->gen, 215 a->stripe, 216 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) { 217 ret = -EIO; 218 goto err; 219 } 220 221 if (bch2_trans_inconsistent_on(a->data_type != data_type, trans, 222 "bucket %llu:%llu gen %u data type %s: wrong data type when stripe, should be %s\n%s", 223 bucket.inode, bucket.offset, a->gen, 224 bch2_data_type_str(a->data_type), 225 bch2_data_type_str(data_type), 226 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) { 227 ret = -EIO; 228 goto err; 229 } 230 231 if (bch2_trans_inconsistent_on(parity && 232 (a->dirty_sectors != -sectors || 233 a->cached_sectors), trans, 234 "bucket %llu:%llu gen %u dirty_sectors %u cached_sectors %u: wrong sectors when deleting parity block of stripe\n%s", 235 bucket.inode, bucket.offset, a->gen, 236 a->dirty_sectors, 237 a->cached_sectors, 238 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) { 239 ret = -EIO; 240 goto err; 241 } 242 } 243 244 if (sectors) { 245 ret = bch2_bucket_ref_update(trans, ca, s.s_c, ptr, sectors, data_type, 246 a->gen, a->data_type, &a->dirty_sectors); 247 if (ret) 248 goto err; 249 } 250 251 if (!deleting) { 252 a->stripe = s.k->p.offset; 253 a->stripe_redundancy = s.v->nr_redundant; 254 } else { 255 a->stripe = 0; 256 a->stripe_redundancy = 0; 257 } 258 259 alloc_data_type_set(a, data_type); 260 err: 261 printbuf_exit(&buf); 262 return ret; 263 } 264 265 static int mark_stripe_bucket(struct btree_trans *trans, 266 struct bkey_s_c_stripe s, 267 unsigned ptr_idx, bool deleting, 268 enum btree_iter_update_trigger_flags flags) 269 { 270 struct bch_fs *c = trans->c; 271 const struct bch_extent_ptr *ptr = s.v->ptrs + ptr_idx; 272 struct printbuf buf = PRINTBUF; 273 int ret = 0; 274 275 struct bch_dev *ca = bch2_dev_tryget(c, ptr->dev); 276 if (unlikely(!ca)) { 277 if (!(flags & BTREE_TRIGGER_overwrite)) 278 ret = -EIO; 279 goto err; 280 } 281 282 struct bpos bucket = PTR_BUCKET_POS(ca, ptr); 283 284 if (flags & BTREE_TRIGGER_transactional) { 285 struct bkey_i_alloc_v4 *a = 286 bch2_trans_start_alloc_update(trans, bucket, 0); 287 ret = PTR_ERR_OR_ZERO(a) ?: 288 __mark_stripe_bucket(trans, ca, s, ptr_idx, deleting, bucket, &a->v, flags); 289 } 290 291 if (flags & BTREE_TRIGGER_gc) { 292 percpu_down_read(&c->mark_lock); 293 struct bucket *g = gc_bucket(ca, bucket.offset); 294 if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u\n %s", 295 ptr->dev, 296 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) { 297 ret = -EIO; 298 goto err_unlock; 299 } 300 301 bucket_lock(g); 302 struct bch_alloc_v4 old = bucket_m_to_alloc(*g), new = old; 303 ret = __mark_stripe_bucket(trans, ca, s, ptr_idx, deleting, bucket, &new, flags); 304 alloc_to_bucket(g, new); 305 bucket_unlock(g); 306 err_unlock: 307 percpu_up_read(&c->mark_lock); 308 if (!ret) 309 ret = bch2_alloc_key_to_dev_counters(trans, ca, &old, &new, flags); 310 } 311 err: 312 bch2_dev_put(ca); 313 printbuf_exit(&buf); 314 return ret; 315 } 316 317 static int mark_stripe_buckets(struct btree_trans *trans, 318 struct bkey_s_c old, struct bkey_s_c new, 319 enum btree_iter_update_trigger_flags flags) 320 { 321 const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe 322 ? bkey_s_c_to_stripe(old).v : NULL; 323 const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe 324 ? bkey_s_c_to_stripe(new).v : NULL; 325 326 BUG_ON(old_s && new_s && old_s->nr_blocks != new_s->nr_blocks); 327 328 unsigned nr_blocks = new_s ? new_s->nr_blocks : old_s->nr_blocks; 329 330 for (unsigned i = 0; i < nr_blocks; i++) { 331 if (new_s && old_s && 332 !memcmp(&new_s->ptrs[i], 333 &old_s->ptrs[i], 334 sizeof(new_s->ptrs[i]))) 335 continue; 336 337 if (new_s) { 338 int ret = mark_stripe_bucket(trans, 339 bkey_s_c_to_stripe(new), i, false, flags); 340 if (ret) 341 return ret; 342 } 343 344 if (old_s) { 345 int ret = mark_stripe_bucket(trans, 346 bkey_s_c_to_stripe(old), i, true, flags); 347 if (ret) 348 return ret; 349 } 350 } 351 352 return 0; 353 } 354 355 int bch2_trigger_stripe(struct btree_trans *trans, 356 enum btree_id btree, unsigned level, 357 struct bkey_s_c old, struct bkey_s _new, 358 enum btree_iter_update_trigger_flags flags) 359 { 360 struct bkey_s_c new = _new.s_c; 361 struct bch_fs *c = trans->c; 362 u64 idx = new.k->p.offset; 363 const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe 364 ? bkey_s_c_to_stripe(old).v : NULL; 365 const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe 366 ? bkey_s_c_to_stripe(new).v : NULL; 367 368 if (unlikely(flags & BTREE_TRIGGER_check_repair)) 369 return bch2_check_fix_ptrs(trans, btree, level, _new.s_c, flags); 370 371 BUG_ON(new_s && old_s && 372 (new_s->nr_blocks != old_s->nr_blocks || 373 new_s->nr_redundant != old_s->nr_redundant)); 374 375 376 if (flags & (BTREE_TRIGGER_transactional|BTREE_TRIGGER_gc)) { 377 /* 378 * If the pointers aren't changing, we don't need to do anything: 379 */ 380 if (new_s && old_s && 381 new_s->nr_blocks == old_s->nr_blocks && 382 new_s->nr_redundant == old_s->nr_redundant && 383 !memcmp(old_s->ptrs, new_s->ptrs, 384 new_s->nr_blocks * sizeof(struct bch_extent_ptr))) 385 return 0; 386 387 struct gc_stripe *gc = NULL; 388 if (flags & BTREE_TRIGGER_gc) { 389 gc = genradix_ptr_alloc(&c->gc_stripes, idx, GFP_KERNEL); 390 if (!gc) { 391 bch_err(c, "error allocating memory for gc_stripes, idx %llu", idx); 392 return -BCH_ERR_ENOMEM_mark_stripe; 393 } 394 395 /* 396 * This will be wrong when we bring back runtime gc: we should 397 * be unmarking the old key and then marking the new key 398 * 399 * Also: when we bring back runtime gc, locking 400 */ 401 gc->alive = true; 402 gc->sectors = le16_to_cpu(new_s->sectors); 403 gc->nr_blocks = new_s->nr_blocks; 404 gc->nr_redundant = new_s->nr_redundant; 405 406 for (unsigned i = 0; i < new_s->nr_blocks; i++) 407 gc->ptrs[i] = new_s->ptrs[i]; 408 409 /* 410 * gc recalculates this field from stripe ptr 411 * references: 412 */ 413 memset(gc->block_sectors, 0, sizeof(gc->block_sectors)); 414 } 415 416 if (new_s) { 417 s64 sectors = (u64) le16_to_cpu(new_s->sectors) * new_s->nr_redundant; 418 419 struct disk_accounting_pos acc = { 420 .type = BCH_DISK_ACCOUNTING_replicas, 421 }; 422 bch2_bkey_to_replicas(&acc.replicas, new); 423 int ret = bch2_disk_accounting_mod(trans, &acc, §ors, 1, gc); 424 if (ret) 425 return ret; 426 427 if (gc) 428 memcpy(&gc->r.e, &acc.replicas, replicas_entry_bytes(&acc.replicas)); 429 } 430 431 if (old_s) { 432 s64 sectors = -((s64) le16_to_cpu(old_s->sectors)) * old_s->nr_redundant; 433 434 struct disk_accounting_pos acc = { 435 .type = BCH_DISK_ACCOUNTING_replicas, 436 }; 437 bch2_bkey_to_replicas(&acc.replicas, old); 438 int ret = bch2_disk_accounting_mod(trans, &acc, §ors, 1, gc); 439 if (ret) 440 return ret; 441 } 442 443 int ret = mark_stripe_buckets(trans, old, new, flags); 444 if (ret) 445 return ret; 446 } 447 448 if (flags & BTREE_TRIGGER_atomic) { 449 struct stripe *m = genradix_ptr(&c->stripes, idx); 450 451 if (!m) { 452 struct printbuf buf1 = PRINTBUF; 453 struct printbuf buf2 = PRINTBUF; 454 455 bch2_bkey_val_to_text(&buf1, c, old); 456 bch2_bkey_val_to_text(&buf2, c, new); 457 bch_err_ratelimited(c, "error marking nonexistent stripe %llu while marking\n" 458 "old %s\n" 459 "new %s", idx, buf1.buf, buf2.buf); 460 printbuf_exit(&buf2); 461 printbuf_exit(&buf1); 462 bch2_inconsistent_error(c); 463 return -1; 464 } 465 466 if (!new_s) { 467 bch2_stripes_heap_del(c, m, idx); 468 469 memset(m, 0, sizeof(*m)); 470 } else { 471 m->sectors = le16_to_cpu(new_s->sectors); 472 m->algorithm = new_s->algorithm; 473 m->nr_blocks = new_s->nr_blocks; 474 m->nr_redundant = new_s->nr_redundant; 475 m->blocks_nonempty = 0; 476 477 for (unsigned i = 0; i < new_s->nr_blocks; i++) 478 m->blocks_nonempty += !!stripe_blockcount_get(new_s, i); 479 480 if (!old_s) 481 bch2_stripes_heap_insert(c, m, idx); 482 else 483 bch2_stripes_heap_update(c, m, idx); 484 } 485 } 486 487 return 0; 488 } 489 490 /* returns blocknr in stripe that we matched: */ 491 static const struct bch_extent_ptr *bkey_matches_stripe(struct bch_stripe *s, 492 struct bkey_s_c k, unsigned *block) 493 { 494 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 495 unsigned i, nr_data = s->nr_blocks - s->nr_redundant; 496 497 bkey_for_each_ptr(ptrs, ptr) 498 for (i = 0; i < nr_data; i++) 499 if (__bch2_ptr_matches_stripe(&s->ptrs[i], ptr, 500 le16_to_cpu(s->sectors))) { 501 *block = i; 502 return ptr; 503 } 504 505 return NULL; 506 } 507 508 static bool extent_has_stripe_ptr(struct bkey_s_c k, u64 idx) 509 { 510 switch (k.k->type) { 511 case KEY_TYPE_extent: { 512 struct bkey_s_c_extent e = bkey_s_c_to_extent(k); 513 const union bch_extent_entry *entry; 514 515 extent_for_each_entry(e, entry) 516 if (extent_entry_type(entry) == 517 BCH_EXTENT_ENTRY_stripe_ptr && 518 entry->stripe_ptr.idx == idx) 519 return true; 520 521 break; 522 } 523 } 524 525 return false; 526 } 527 528 /* Stripe bufs: */ 529 530 static void ec_stripe_buf_exit(struct ec_stripe_buf *buf) 531 { 532 if (buf->key.k.type == KEY_TYPE_stripe) { 533 struct bkey_i_stripe *s = bkey_i_to_stripe(&buf->key); 534 unsigned i; 535 536 for (i = 0; i < s->v.nr_blocks; i++) { 537 kvfree(buf->data[i]); 538 buf->data[i] = NULL; 539 } 540 } 541 } 542 543 /* XXX: this is a non-mempoolified memory allocation: */ 544 static int ec_stripe_buf_init(struct ec_stripe_buf *buf, 545 unsigned offset, unsigned size) 546 { 547 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; 548 unsigned csum_granularity = 1U << v->csum_granularity_bits; 549 unsigned end = offset + size; 550 unsigned i; 551 552 BUG_ON(end > le16_to_cpu(v->sectors)); 553 554 offset = round_down(offset, csum_granularity); 555 end = min_t(unsigned, le16_to_cpu(v->sectors), 556 round_up(end, csum_granularity)); 557 558 buf->offset = offset; 559 buf->size = end - offset; 560 561 memset(buf->valid, 0xFF, sizeof(buf->valid)); 562 563 for (i = 0; i < v->nr_blocks; i++) { 564 buf->data[i] = kvmalloc(buf->size << 9, GFP_KERNEL); 565 if (!buf->data[i]) 566 goto err; 567 } 568 569 return 0; 570 err: 571 ec_stripe_buf_exit(buf); 572 return -BCH_ERR_ENOMEM_stripe_buf; 573 } 574 575 /* Checksumming: */ 576 577 static struct bch_csum ec_block_checksum(struct ec_stripe_buf *buf, 578 unsigned block, unsigned offset) 579 { 580 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; 581 unsigned csum_granularity = 1 << v->csum_granularity_bits; 582 unsigned end = buf->offset + buf->size; 583 unsigned len = min(csum_granularity, end - offset); 584 585 BUG_ON(offset >= end); 586 BUG_ON(offset < buf->offset); 587 BUG_ON(offset & (csum_granularity - 1)); 588 BUG_ON(offset + len != le16_to_cpu(v->sectors) && 589 (len & (csum_granularity - 1))); 590 591 return bch2_checksum(NULL, v->csum_type, 592 null_nonce(), 593 buf->data[block] + ((offset - buf->offset) << 9), 594 len << 9); 595 } 596 597 static void ec_generate_checksums(struct ec_stripe_buf *buf) 598 { 599 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; 600 unsigned i, j, csums_per_device = stripe_csums_per_device(v); 601 602 if (!v->csum_type) 603 return; 604 605 BUG_ON(buf->offset); 606 BUG_ON(buf->size != le16_to_cpu(v->sectors)); 607 608 for (i = 0; i < v->nr_blocks; i++) 609 for (j = 0; j < csums_per_device; j++) 610 stripe_csum_set(v, i, j, 611 ec_block_checksum(buf, i, j << v->csum_granularity_bits)); 612 } 613 614 static void ec_validate_checksums(struct bch_fs *c, struct ec_stripe_buf *buf) 615 { 616 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; 617 unsigned csum_granularity = 1 << v->csum_granularity_bits; 618 unsigned i; 619 620 if (!v->csum_type) 621 return; 622 623 for (i = 0; i < v->nr_blocks; i++) { 624 unsigned offset = buf->offset; 625 unsigned end = buf->offset + buf->size; 626 627 if (!test_bit(i, buf->valid)) 628 continue; 629 630 while (offset < end) { 631 unsigned j = offset >> v->csum_granularity_bits; 632 unsigned len = min(csum_granularity, end - offset); 633 struct bch_csum want = stripe_csum_get(v, i, j); 634 struct bch_csum got = ec_block_checksum(buf, i, offset); 635 636 if (bch2_crc_cmp(want, got)) { 637 struct bch_dev *ca = bch2_dev_tryget(c, v->ptrs[i].dev); 638 if (ca) { 639 struct printbuf err = PRINTBUF; 640 641 prt_str(&err, "stripe "); 642 bch2_csum_err_msg(&err, v->csum_type, want, got); 643 prt_printf(&err, " for %ps at %u of\n ", (void *) _RET_IP_, i); 644 bch2_bkey_val_to_text(&err, c, bkey_i_to_s_c(&buf->key)); 645 bch_err_ratelimited(ca, "%s", err.buf); 646 printbuf_exit(&err); 647 648 bch2_io_error(ca, BCH_MEMBER_ERROR_checksum); 649 } 650 651 clear_bit(i, buf->valid); 652 break; 653 } 654 655 offset += len; 656 } 657 } 658 } 659 660 /* Erasure coding: */ 661 662 static void ec_generate_ec(struct ec_stripe_buf *buf) 663 { 664 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; 665 unsigned nr_data = v->nr_blocks - v->nr_redundant; 666 unsigned bytes = le16_to_cpu(v->sectors) << 9; 667 668 raid_gen(nr_data, v->nr_redundant, bytes, buf->data); 669 } 670 671 static unsigned ec_nr_failed(struct ec_stripe_buf *buf) 672 { 673 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; 674 675 return v->nr_blocks - bitmap_weight(buf->valid, v->nr_blocks); 676 } 677 678 static int ec_do_recov(struct bch_fs *c, struct ec_stripe_buf *buf) 679 { 680 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; 681 unsigned i, failed[BCH_BKEY_PTRS_MAX], nr_failed = 0; 682 unsigned nr_data = v->nr_blocks - v->nr_redundant; 683 unsigned bytes = buf->size << 9; 684 685 if (ec_nr_failed(buf) > v->nr_redundant) { 686 bch_err_ratelimited(c, 687 "error doing reconstruct read: unable to read enough blocks"); 688 return -1; 689 } 690 691 for (i = 0; i < nr_data; i++) 692 if (!test_bit(i, buf->valid)) 693 failed[nr_failed++] = i; 694 695 raid_rec(nr_failed, failed, nr_data, v->nr_redundant, bytes, buf->data); 696 return 0; 697 } 698 699 /* IO: */ 700 701 static void ec_block_endio(struct bio *bio) 702 { 703 struct ec_bio *ec_bio = container_of(bio, struct ec_bio, bio); 704 struct bch_stripe *v = &bkey_i_to_stripe(&ec_bio->buf->key)->v; 705 struct bch_extent_ptr *ptr = &v->ptrs[ec_bio->idx]; 706 struct bch_dev *ca = ec_bio->ca; 707 struct closure *cl = bio->bi_private; 708 709 if (bch2_dev_io_err_on(bio->bi_status, ca, 710 bio_data_dir(bio) 711 ? BCH_MEMBER_ERROR_write 712 : BCH_MEMBER_ERROR_read, 713 "erasure coding %s error: %s", 714 bio_data_dir(bio) ? "write" : "read", 715 bch2_blk_status_to_str(bio->bi_status))) 716 clear_bit(ec_bio->idx, ec_bio->buf->valid); 717 718 int stale = dev_ptr_stale(ca, ptr); 719 if (stale) { 720 bch_err_ratelimited(ca->fs, 721 "error %s stripe: stale/invalid pointer (%i) after io", 722 bio_data_dir(bio) == READ ? "reading from" : "writing to", 723 stale); 724 clear_bit(ec_bio->idx, ec_bio->buf->valid); 725 } 726 727 bio_put(&ec_bio->bio); 728 percpu_ref_put(&ca->io_ref); 729 closure_put(cl); 730 } 731 732 static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf, 733 blk_opf_t opf, unsigned idx, struct closure *cl) 734 { 735 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; 736 unsigned offset = 0, bytes = buf->size << 9; 737 struct bch_extent_ptr *ptr = &v->ptrs[idx]; 738 enum bch_data_type data_type = idx < v->nr_blocks - v->nr_redundant 739 ? BCH_DATA_user 740 : BCH_DATA_parity; 741 int rw = op_is_write(opf); 742 743 struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, rw); 744 if (!ca) { 745 clear_bit(idx, buf->valid); 746 return; 747 } 748 749 int stale = dev_ptr_stale(ca, ptr); 750 if (stale) { 751 bch_err_ratelimited(c, 752 "error %s stripe: stale pointer (%i)", 753 rw == READ ? "reading from" : "writing to", 754 stale); 755 clear_bit(idx, buf->valid); 756 return; 757 } 758 759 760 this_cpu_add(ca->io_done->sectors[rw][data_type], buf->size); 761 762 while (offset < bytes) { 763 unsigned nr_iovecs = min_t(size_t, BIO_MAX_VECS, 764 DIV_ROUND_UP(bytes, PAGE_SIZE)); 765 unsigned b = min_t(size_t, bytes - offset, 766 nr_iovecs << PAGE_SHIFT); 767 struct ec_bio *ec_bio; 768 769 ec_bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev, 770 nr_iovecs, 771 opf, 772 GFP_KERNEL, 773 &c->ec_bioset), 774 struct ec_bio, bio); 775 776 ec_bio->ca = ca; 777 ec_bio->buf = buf; 778 ec_bio->idx = idx; 779 780 ec_bio->bio.bi_iter.bi_sector = ptr->offset + buf->offset + (offset >> 9); 781 ec_bio->bio.bi_end_io = ec_block_endio; 782 ec_bio->bio.bi_private = cl; 783 784 bch2_bio_map(&ec_bio->bio, buf->data[idx] + offset, b); 785 786 closure_get(cl); 787 percpu_ref_get(&ca->io_ref); 788 789 submit_bio(&ec_bio->bio); 790 791 offset += b; 792 } 793 794 percpu_ref_put(&ca->io_ref); 795 } 796 797 static int get_stripe_key_trans(struct btree_trans *trans, u64 idx, 798 struct ec_stripe_buf *stripe) 799 { 800 struct btree_iter iter; 801 struct bkey_s_c k; 802 int ret; 803 804 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes, 805 POS(0, idx), BTREE_ITER_slots); 806 ret = bkey_err(k); 807 if (ret) 808 goto err; 809 if (k.k->type != KEY_TYPE_stripe) { 810 ret = -ENOENT; 811 goto err; 812 } 813 bkey_reassemble(&stripe->key, k); 814 err: 815 bch2_trans_iter_exit(trans, &iter); 816 return ret; 817 } 818 819 /* recovery read path: */ 820 int bch2_ec_read_extent(struct btree_trans *trans, struct bch_read_bio *rbio) 821 { 822 struct bch_fs *c = trans->c; 823 struct ec_stripe_buf *buf; 824 struct closure cl; 825 struct bch_stripe *v; 826 unsigned i, offset; 827 int ret = 0; 828 829 closure_init_stack(&cl); 830 831 BUG_ON(!rbio->pick.has_ec); 832 833 buf = kzalloc(sizeof(*buf), GFP_NOFS); 834 if (!buf) 835 return -BCH_ERR_ENOMEM_ec_read_extent; 836 837 ret = lockrestart_do(trans, get_stripe_key_trans(trans, rbio->pick.ec.idx, buf)); 838 if (ret) { 839 bch_err_ratelimited(c, 840 "error doing reconstruct read: error %i looking up stripe", ret); 841 kfree(buf); 842 return -EIO; 843 } 844 845 v = &bkey_i_to_stripe(&buf->key)->v; 846 847 if (!bch2_ptr_matches_stripe(v, rbio->pick)) { 848 bch_err_ratelimited(c, 849 "error doing reconstruct read: pointer doesn't match stripe"); 850 ret = -EIO; 851 goto err; 852 } 853 854 offset = rbio->bio.bi_iter.bi_sector - v->ptrs[rbio->pick.ec.block].offset; 855 if (offset + bio_sectors(&rbio->bio) > le16_to_cpu(v->sectors)) { 856 bch_err_ratelimited(c, 857 "error doing reconstruct read: read is bigger than stripe"); 858 ret = -EIO; 859 goto err; 860 } 861 862 ret = ec_stripe_buf_init(buf, offset, bio_sectors(&rbio->bio)); 863 if (ret) 864 goto err; 865 866 for (i = 0; i < v->nr_blocks; i++) 867 ec_block_io(c, buf, REQ_OP_READ, i, &cl); 868 869 closure_sync(&cl); 870 871 if (ec_nr_failed(buf) > v->nr_redundant) { 872 bch_err_ratelimited(c, 873 "error doing reconstruct read: unable to read enough blocks"); 874 ret = -EIO; 875 goto err; 876 } 877 878 ec_validate_checksums(c, buf); 879 880 ret = ec_do_recov(c, buf); 881 if (ret) 882 goto err; 883 884 memcpy_to_bio(&rbio->bio, rbio->bio.bi_iter, 885 buf->data[rbio->pick.ec.block] + ((offset - buf->offset) << 9)); 886 err: 887 ec_stripe_buf_exit(buf); 888 kfree(buf); 889 return ret; 890 } 891 892 /* stripe bucket accounting: */ 893 894 static int __ec_stripe_mem_alloc(struct bch_fs *c, size_t idx, gfp_t gfp) 895 { 896 ec_stripes_heap n, *h = &c->ec_stripes_heap; 897 898 if (idx >= h->size) { 899 if (!init_heap(&n, max(1024UL, roundup_pow_of_two(idx + 1)), gfp)) 900 return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc; 901 902 mutex_lock(&c->ec_stripes_heap_lock); 903 if (n.size > h->size) { 904 memcpy(n.data, h->data, h->nr * sizeof(h->data[0])); 905 n.nr = h->nr; 906 swap(*h, n); 907 } 908 mutex_unlock(&c->ec_stripes_heap_lock); 909 910 free_heap(&n); 911 } 912 913 if (!genradix_ptr_alloc(&c->stripes, idx, gfp)) 914 return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc; 915 916 if (c->gc_pos.phase != GC_PHASE_not_running && 917 !genradix_ptr_alloc(&c->gc_stripes, idx, gfp)) 918 return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc; 919 920 return 0; 921 } 922 923 static int ec_stripe_mem_alloc(struct btree_trans *trans, 924 struct btree_iter *iter) 925 { 926 return allocate_dropping_locks_errcode(trans, 927 __ec_stripe_mem_alloc(trans->c, iter->pos.offset, _gfp)); 928 } 929 930 /* 931 * Hash table of open stripes: 932 * Stripes that are being created or modified are kept in a hash table, so that 933 * stripe deletion can skip them. 934 */ 935 936 static bool __bch2_stripe_is_open(struct bch_fs *c, u64 idx) 937 { 938 unsigned hash = hash_64(idx, ilog2(ARRAY_SIZE(c->ec_stripes_new))); 939 struct ec_stripe_new *s; 940 941 hlist_for_each_entry(s, &c->ec_stripes_new[hash], hash) 942 if (s->idx == idx) 943 return true; 944 return false; 945 } 946 947 static bool bch2_stripe_is_open(struct bch_fs *c, u64 idx) 948 { 949 bool ret = false; 950 951 spin_lock(&c->ec_stripes_new_lock); 952 ret = __bch2_stripe_is_open(c, idx); 953 spin_unlock(&c->ec_stripes_new_lock); 954 955 return ret; 956 } 957 958 static bool bch2_try_open_stripe(struct bch_fs *c, 959 struct ec_stripe_new *s, 960 u64 idx) 961 { 962 bool ret; 963 964 spin_lock(&c->ec_stripes_new_lock); 965 ret = !__bch2_stripe_is_open(c, idx); 966 if (ret) { 967 unsigned hash = hash_64(idx, ilog2(ARRAY_SIZE(c->ec_stripes_new))); 968 969 s->idx = idx; 970 hlist_add_head(&s->hash, &c->ec_stripes_new[hash]); 971 } 972 spin_unlock(&c->ec_stripes_new_lock); 973 974 return ret; 975 } 976 977 static void bch2_stripe_close(struct bch_fs *c, struct ec_stripe_new *s) 978 { 979 BUG_ON(!s->idx); 980 981 spin_lock(&c->ec_stripes_new_lock); 982 hlist_del_init(&s->hash); 983 spin_unlock(&c->ec_stripes_new_lock); 984 985 s->idx = 0; 986 } 987 988 /* Heap of all existing stripes, ordered by blocks_nonempty */ 989 990 static u64 stripe_idx_to_delete(struct bch_fs *c) 991 { 992 ec_stripes_heap *h = &c->ec_stripes_heap; 993 994 lockdep_assert_held(&c->ec_stripes_heap_lock); 995 996 if (h->nr && 997 h->data[0].blocks_nonempty == 0 && 998 !bch2_stripe_is_open(c, h->data[0].idx)) 999 return h->data[0].idx; 1000 1001 return 0; 1002 } 1003 1004 static inline void ec_stripes_heap_set_backpointer(ec_stripes_heap *h, 1005 size_t i) 1006 { 1007 struct bch_fs *c = container_of(h, struct bch_fs, ec_stripes_heap); 1008 1009 genradix_ptr(&c->stripes, h->data[i].idx)->heap_idx = i; 1010 } 1011 1012 static inline bool ec_stripes_heap_cmp(const void *l, const void *r, void __always_unused *args) 1013 { 1014 struct ec_stripe_heap_entry *_l = (struct ec_stripe_heap_entry *)l; 1015 struct ec_stripe_heap_entry *_r = (struct ec_stripe_heap_entry *)r; 1016 1017 return ((_l->blocks_nonempty > _r->blocks_nonempty) < 1018 (_l->blocks_nonempty < _r->blocks_nonempty)); 1019 } 1020 1021 static inline void ec_stripes_heap_swap(void *l, void *r, void *h) 1022 { 1023 struct ec_stripe_heap_entry *_l = (struct ec_stripe_heap_entry *)l; 1024 struct ec_stripe_heap_entry *_r = (struct ec_stripe_heap_entry *)r; 1025 ec_stripes_heap *_h = (ec_stripes_heap *)h; 1026 size_t i = _l - _h->data; 1027 size_t j = _r - _h->data; 1028 1029 swap(*_l, *_r); 1030 1031 ec_stripes_heap_set_backpointer(_h, i); 1032 ec_stripes_heap_set_backpointer(_h, j); 1033 } 1034 1035 static void heap_verify_backpointer(struct bch_fs *c, size_t idx) 1036 { 1037 ec_stripes_heap *h = &c->ec_stripes_heap; 1038 struct stripe *m = genradix_ptr(&c->stripes, idx); 1039 1040 BUG_ON(m->heap_idx >= h->nr); 1041 BUG_ON(h->data[m->heap_idx].idx != idx); 1042 } 1043 1044 void bch2_stripes_heap_del(struct bch_fs *c, 1045 struct stripe *m, size_t idx) 1046 { 1047 const struct min_heap_callbacks callbacks = { 1048 .less = ec_stripes_heap_cmp, 1049 .swp = ec_stripes_heap_swap, 1050 }; 1051 1052 mutex_lock(&c->ec_stripes_heap_lock); 1053 heap_verify_backpointer(c, idx); 1054 1055 min_heap_del(&c->ec_stripes_heap, m->heap_idx, &callbacks, &c->ec_stripes_heap); 1056 mutex_unlock(&c->ec_stripes_heap_lock); 1057 } 1058 1059 void bch2_stripes_heap_insert(struct bch_fs *c, 1060 struct stripe *m, size_t idx) 1061 { 1062 const struct min_heap_callbacks callbacks = { 1063 .less = ec_stripes_heap_cmp, 1064 .swp = ec_stripes_heap_swap, 1065 }; 1066 1067 mutex_lock(&c->ec_stripes_heap_lock); 1068 BUG_ON(min_heap_full(&c->ec_stripes_heap)); 1069 1070 genradix_ptr(&c->stripes, idx)->heap_idx = c->ec_stripes_heap.nr; 1071 min_heap_push(&c->ec_stripes_heap, &((struct ec_stripe_heap_entry) { 1072 .idx = idx, 1073 .blocks_nonempty = m->blocks_nonempty, 1074 }), 1075 &callbacks, 1076 &c->ec_stripes_heap); 1077 1078 heap_verify_backpointer(c, idx); 1079 mutex_unlock(&c->ec_stripes_heap_lock); 1080 } 1081 1082 void bch2_stripes_heap_update(struct bch_fs *c, 1083 struct stripe *m, size_t idx) 1084 { 1085 const struct min_heap_callbacks callbacks = { 1086 .less = ec_stripes_heap_cmp, 1087 .swp = ec_stripes_heap_swap, 1088 }; 1089 ec_stripes_heap *h = &c->ec_stripes_heap; 1090 bool do_deletes; 1091 size_t i; 1092 1093 mutex_lock(&c->ec_stripes_heap_lock); 1094 heap_verify_backpointer(c, idx); 1095 1096 h->data[m->heap_idx].blocks_nonempty = m->blocks_nonempty; 1097 1098 i = m->heap_idx; 1099 min_heap_sift_up(h, i, &callbacks, &c->ec_stripes_heap); 1100 min_heap_sift_down(h, i, &callbacks, &c->ec_stripes_heap); 1101 1102 heap_verify_backpointer(c, idx); 1103 1104 do_deletes = stripe_idx_to_delete(c) != 0; 1105 mutex_unlock(&c->ec_stripes_heap_lock); 1106 1107 if (do_deletes) 1108 bch2_do_stripe_deletes(c); 1109 } 1110 1111 /* stripe deletion */ 1112 1113 static int ec_stripe_delete(struct btree_trans *trans, u64 idx) 1114 { 1115 struct bch_fs *c = trans->c; 1116 struct btree_iter iter; 1117 struct bkey_s_c k; 1118 struct bkey_s_c_stripe s; 1119 int ret; 1120 1121 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes, POS(0, idx), 1122 BTREE_ITER_intent); 1123 ret = bkey_err(k); 1124 if (ret) 1125 goto err; 1126 1127 if (k.k->type != KEY_TYPE_stripe) { 1128 bch2_fs_inconsistent(c, "attempting to delete nonexistent stripe %llu", idx); 1129 ret = -EINVAL; 1130 goto err; 1131 } 1132 1133 s = bkey_s_c_to_stripe(k); 1134 for (unsigned i = 0; i < s.v->nr_blocks; i++) 1135 if (stripe_blockcount_get(s.v, i)) { 1136 struct printbuf buf = PRINTBUF; 1137 1138 bch2_bkey_val_to_text(&buf, c, k); 1139 bch2_fs_inconsistent(c, "attempting to delete nonempty stripe %s", buf.buf); 1140 printbuf_exit(&buf); 1141 ret = -EINVAL; 1142 goto err; 1143 } 1144 1145 ret = bch2_btree_delete_at(trans, &iter, 0); 1146 err: 1147 bch2_trans_iter_exit(trans, &iter); 1148 return ret; 1149 } 1150 1151 static void ec_stripe_delete_work(struct work_struct *work) 1152 { 1153 struct bch_fs *c = 1154 container_of(work, struct bch_fs, ec_stripe_delete_work); 1155 1156 while (1) { 1157 mutex_lock(&c->ec_stripes_heap_lock); 1158 u64 idx = stripe_idx_to_delete(c); 1159 mutex_unlock(&c->ec_stripes_heap_lock); 1160 1161 if (!idx) 1162 break; 1163 1164 int ret = bch2_trans_do(c, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, 1165 ec_stripe_delete(trans, idx)); 1166 bch_err_fn(c, ret); 1167 if (ret) 1168 break; 1169 } 1170 1171 bch2_write_ref_put(c, BCH_WRITE_REF_stripe_delete); 1172 } 1173 1174 void bch2_do_stripe_deletes(struct bch_fs *c) 1175 { 1176 if (bch2_write_ref_tryget(c, BCH_WRITE_REF_stripe_delete) && 1177 !queue_work(c->write_ref_wq, &c->ec_stripe_delete_work)) 1178 bch2_write_ref_put(c, BCH_WRITE_REF_stripe_delete); 1179 } 1180 1181 /* stripe creation: */ 1182 1183 static int ec_stripe_key_update(struct btree_trans *trans, 1184 struct bkey_i_stripe *new, 1185 bool create) 1186 { 1187 struct bch_fs *c = trans->c; 1188 struct btree_iter iter; 1189 struct bkey_s_c k; 1190 int ret; 1191 1192 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes, 1193 new->k.p, BTREE_ITER_intent); 1194 ret = bkey_err(k); 1195 if (ret) 1196 goto err; 1197 1198 if (k.k->type != (create ? KEY_TYPE_deleted : KEY_TYPE_stripe)) { 1199 bch2_fs_inconsistent(c, "error %s stripe: got existing key type %s", 1200 create ? "creating" : "updating", 1201 bch2_bkey_types[k.k->type]); 1202 ret = -EINVAL; 1203 goto err; 1204 } 1205 1206 if (k.k->type == KEY_TYPE_stripe) { 1207 const struct bch_stripe *old = bkey_s_c_to_stripe(k).v; 1208 unsigned i; 1209 1210 if (old->nr_blocks != new->v.nr_blocks) { 1211 bch_err(c, "error updating stripe: nr_blocks does not match"); 1212 ret = -EINVAL; 1213 goto err; 1214 } 1215 1216 for (i = 0; i < new->v.nr_blocks; i++) { 1217 unsigned v = stripe_blockcount_get(old, i); 1218 1219 BUG_ON(v && 1220 (old->ptrs[i].dev != new->v.ptrs[i].dev || 1221 old->ptrs[i].gen != new->v.ptrs[i].gen || 1222 old->ptrs[i].offset != new->v.ptrs[i].offset)); 1223 1224 stripe_blockcount_set(&new->v, i, v); 1225 } 1226 } 1227 1228 ret = bch2_trans_update(trans, &iter, &new->k_i, 0); 1229 err: 1230 bch2_trans_iter_exit(trans, &iter); 1231 return ret; 1232 } 1233 1234 static int ec_stripe_update_extent(struct btree_trans *trans, 1235 struct bch_dev *ca, 1236 struct bpos bucket, u8 gen, 1237 struct ec_stripe_buf *s, 1238 struct bpos *bp_pos) 1239 { 1240 struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v; 1241 struct bch_fs *c = trans->c; 1242 struct bch_backpointer bp; 1243 struct btree_iter iter; 1244 struct bkey_s_c k; 1245 const struct bch_extent_ptr *ptr_c; 1246 struct bch_extent_ptr *ec_ptr = NULL; 1247 struct bch_extent_stripe_ptr stripe_ptr; 1248 struct bkey_i *n; 1249 int ret, dev, block; 1250 1251 ret = bch2_get_next_backpointer(trans, ca, bucket, gen, 1252 bp_pos, &bp, BTREE_ITER_cached); 1253 if (ret) 1254 return ret; 1255 if (bpos_eq(*bp_pos, SPOS_MAX)) 1256 return 0; 1257 1258 if (bp.level) { 1259 struct printbuf buf = PRINTBUF; 1260 struct btree_iter node_iter; 1261 struct btree *b; 1262 1263 b = bch2_backpointer_get_node(trans, &node_iter, *bp_pos, bp); 1264 bch2_trans_iter_exit(trans, &node_iter); 1265 1266 if (!b) 1267 return 0; 1268 1269 prt_printf(&buf, "found btree node in erasure coded bucket: b=%px\n", b); 1270 bch2_backpointer_to_text(&buf, &bp); 1271 1272 bch2_fs_inconsistent(c, "%s", buf.buf); 1273 printbuf_exit(&buf); 1274 return -EIO; 1275 } 1276 1277 k = bch2_backpointer_get_key(trans, &iter, *bp_pos, bp, BTREE_ITER_intent); 1278 ret = bkey_err(k); 1279 if (ret) 1280 return ret; 1281 if (!k.k) { 1282 /* 1283 * extent no longer exists - we could flush the btree 1284 * write buffer and retry to verify, but no need: 1285 */ 1286 return 0; 1287 } 1288 1289 if (extent_has_stripe_ptr(k, s->key.k.p.offset)) 1290 goto out; 1291 1292 ptr_c = bkey_matches_stripe(v, k, &block); 1293 /* 1294 * It doesn't generally make sense to erasure code cached ptrs: 1295 * XXX: should we be incrementing a counter? 1296 */ 1297 if (!ptr_c || ptr_c->cached) 1298 goto out; 1299 1300 dev = v->ptrs[block].dev; 1301 1302 n = bch2_trans_kmalloc(trans, bkey_bytes(k.k) + sizeof(stripe_ptr)); 1303 ret = PTR_ERR_OR_ZERO(n); 1304 if (ret) 1305 goto out; 1306 1307 bkey_reassemble(n, k); 1308 1309 bch2_bkey_drop_ptrs(bkey_i_to_s(n), ptr, ptr->dev != dev); 1310 ec_ptr = bch2_bkey_has_device(bkey_i_to_s(n), dev); 1311 BUG_ON(!ec_ptr); 1312 1313 stripe_ptr = (struct bch_extent_stripe_ptr) { 1314 .type = 1 << BCH_EXTENT_ENTRY_stripe_ptr, 1315 .block = block, 1316 .redundancy = v->nr_redundant, 1317 .idx = s->key.k.p.offset, 1318 }; 1319 1320 __extent_entry_insert(n, 1321 (union bch_extent_entry *) ec_ptr, 1322 (union bch_extent_entry *) &stripe_ptr); 1323 1324 ret = bch2_trans_update(trans, &iter, n, 0); 1325 out: 1326 bch2_trans_iter_exit(trans, &iter); 1327 return ret; 1328 } 1329 1330 static int ec_stripe_update_bucket(struct btree_trans *trans, struct ec_stripe_buf *s, 1331 unsigned block) 1332 { 1333 struct bch_fs *c = trans->c; 1334 struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v; 1335 struct bch_extent_ptr ptr = v->ptrs[block]; 1336 struct bpos bp_pos = POS_MIN; 1337 int ret = 0; 1338 1339 struct bch_dev *ca = bch2_dev_tryget(c, ptr.dev); 1340 if (!ca) 1341 return -EIO; 1342 1343 struct bpos bucket_pos = PTR_BUCKET_POS(ca, &ptr); 1344 1345 while (1) { 1346 ret = commit_do(trans, NULL, NULL, 1347 BCH_TRANS_COMMIT_no_check_rw| 1348 BCH_TRANS_COMMIT_no_enospc, 1349 ec_stripe_update_extent(trans, ca, bucket_pos, ptr.gen, s, &bp_pos)); 1350 if (ret) 1351 break; 1352 if (bkey_eq(bp_pos, POS_MAX)) 1353 break; 1354 1355 bp_pos = bpos_nosnap_successor(bp_pos); 1356 } 1357 1358 bch2_dev_put(ca); 1359 return ret; 1360 } 1361 1362 static int ec_stripe_update_extents(struct bch_fs *c, struct ec_stripe_buf *s) 1363 { 1364 struct btree_trans *trans = bch2_trans_get(c); 1365 struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v; 1366 unsigned i, nr_data = v->nr_blocks - v->nr_redundant; 1367 int ret = 0; 1368 1369 ret = bch2_btree_write_buffer_flush_sync(trans); 1370 if (ret) 1371 goto err; 1372 1373 for (i = 0; i < nr_data; i++) { 1374 ret = ec_stripe_update_bucket(trans, s, i); 1375 if (ret) 1376 break; 1377 } 1378 err: 1379 bch2_trans_put(trans); 1380 1381 return ret; 1382 } 1383 1384 static void zero_out_rest_of_ec_bucket(struct bch_fs *c, 1385 struct ec_stripe_new *s, 1386 unsigned block, 1387 struct open_bucket *ob) 1388 { 1389 struct bch_dev *ca = bch2_dev_get_ioref(c, ob->dev, WRITE); 1390 if (!ca) { 1391 s->err = -BCH_ERR_erofs_no_writes; 1392 return; 1393 } 1394 1395 unsigned offset = ca->mi.bucket_size - ob->sectors_free; 1396 memset(s->new_stripe.data[block] + (offset << 9), 1397 0, 1398 ob->sectors_free << 9); 1399 1400 int ret = blkdev_issue_zeroout(ca->disk_sb.bdev, 1401 ob->bucket * ca->mi.bucket_size + offset, 1402 ob->sectors_free, 1403 GFP_KERNEL, 0); 1404 1405 percpu_ref_put(&ca->io_ref); 1406 1407 if (ret) 1408 s->err = ret; 1409 } 1410 1411 void bch2_ec_stripe_new_free(struct bch_fs *c, struct ec_stripe_new *s) 1412 { 1413 if (s->idx) 1414 bch2_stripe_close(c, s); 1415 kfree(s); 1416 } 1417 1418 /* 1419 * data buckets of new stripe all written: create the stripe 1420 */ 1421 static void ec_stripe_create(struct ec_stripe_new *s) 1422 { 1423 struct bch_fs *c = s->c; 1424 struct open_bucket *ob; 1425 struct bch_stripe *v = &bkey_i_to_stripe(&s->new_stripe.key)->v; 1426 unsigned i, nr_data = v->nr_blocks - v->nr_redundant; 1427 int ret; 1428 1429 BUG_ON(s->h->s == s); 1430 1431 closure_sync(&s->iodone); 1432 1433 if (!s->err) { 1434 for (i = 0; i < nr_data; i++) 1435 if (s->blocks[i]) { 1436 ob = c->open_buckets + s->blocks[i]; 1437 1438 if (ob->sectors_free) 1439 zero_out_rest_of_ec_bucket(c, s, i, ob); 1440 } 1441 } 1442 1443 if (s->err) { 1444 if (!bch2_err_matches(s->err, EROFS)) 1445 bch_err(c, "error creating stripe: error writing data buckets"); 1446 goto err; 1447 } 1448 1449 if (s->have_existing_stripe) { 1450 ec_validate_checksums(c, &s->existing_stripe); 1451 1452 if (ec_do_recov(c, &s->existing_stripe)) { 1453 bch_err(c, "error creating stripe: error reading existing stripe"); 1454 goto err; 1455 } 1456 1457 for (i = 0; i < nr_data; i++) 1458 if (stripe_blockcount_get(&bkey_i_to_stripe(&s->existing_stripe.key)->v, i)) 1459 swap(s->new_stripe.data[i], 1460 s->existing_stripe.data[i]); 1461 1462 ec_stripe_buf_exit(&s->existing_stripe); 1463 } 1464 1465 BUG_ON(!s->allocated); 1466 BUG_ON(!s->idx); 1467 1468 ec_generate_ec(&s->new_stripe); 1469 1470 ec_generate_checksums(&s->new_stripe); 1471 1472 /* write p/q: */ 1473 for (i = nr_data; i < v->nr_blocks; i++) 1474 ec_block_io(c, &s->new_stripe, REQ_OP_WRITE, i, &s->iodone); 1475 closure_sync(&s->iodone); 1476 1477 if (ec_nr_failed(&s->new_stripe)) { 1478 bch_err(c, "error creating stripe: error writing redundancy buckets"); 1479 goto err; 1480 } 1481 1482 ret = bch2_trans_do(c, &s->res, NULL, 1483 BCH_TRANS_COMMIT_no_check_rw| 1484 BCH_TRANS_COMMIT_no_enospc, 1485 ec_stripe_key_update(trans, 1486 bkey_i_to_stripe(&s->new_stripe.key), 1487 !s->have_existing_stripe)); 1488 bch_err_msg(c, ret, "creating stripe key"); 1489 if (ret) { 1490 goto err; 1491 } 1492 1493 ret = ec_stripe_update_extents(c, &s->new_stripe); 1494 bch_err_msg(c, ret, "error updating extents"); 1495 if (ret) 1496 goto err; 1497 err: 1498 bch2_disk_reservation_put(c, &s->res); 1499 1500 for (i = 0; i < v->nr_blocks; i++) 1501 if (s->blocks[i]) { 1502 ob = c->open_buckets + s->blocks[i]; 1503 1504 if (i < nr_data) { 1505 ob->ec = NULL; 1506 __bch2_open_bucket_put(c, ob); 1507 } else { 1508 bch2_open_bucket_put(c, ob); 1509 } 1510 } 1511 1512 mutex_lock(&c->ec_stripe_new_lock); 1513 list_del(&s->list); 1514 mutex_unlock(&c->ec_stripe_new_lock); 1515 wake_up(&c->ec_stripe_new_wait); 1516 1517 ec_stripe_buf_exit(&s->existing_stripe); 1518 ec_stripe_buf_exit(&s->new_stripe); 1519 closure_debug_destroy(&s->iodone); 1520 1521 ec_stripe_new_put(c, s, STRIPE_REF_stripe); 1522 } 1523 1524 static struct ec_stripe_new *get_pending_stripe(struct bch_fs *c) 1525 { 1526 struct ec_stripe_new *s; 1527 1528 mutex_lock(&c->ec_stripe_new_lock); 1529 list_for_each_entry(s, &c->ec_stripe_new_list, list) 1530 if (!atomic_read(&s->ref[STRIPE_REF_io])) 1531 goto out; 1532 s = NULL; 1533 out: 1534 mutex_unlock(&c->ec_stripe_new_lock); 1535 1536 return s; 1537 } 1538 1539 static void ec_stripe_create_work(struct work_struct *work) 1540 { 1541 struct bch_fs *c = container_of(work, 1542 struct bch_fs, ec_stripe_create_work); 1543 struct ec_stripe_new *s; 1544 1545 while ((s = get_pending_stripe(c))) 1546 ec_stripe_create(s); 1547 1548 bch2_write_ref_put(c, BCH_WRITE_REF_stripe_create); 1549 } 1550 1551 void bch2_ec_do_stripe_creates(struct bch_fs *c) 1552 { 1553 bch2_write_ref_get(c, BCH_WRITE_REF_stripe_create); 1554 1555 if (!queue_work(system_long_wq, &c->ec_stripe_create_work)) 1556 bch2_write_ref_put(c, BCH_WRITE_REF_stripe_create); 1557 } 1558 1559 static void ec_stripe_set_pending(struct bch_fs *c, struct ec_stripe_head *h) 1560 { 1561 struct ec_stripe_new *s = h->s; 1562 1563 BUG_ON(!s->allocated && !s->err); 1564 1565 h->s = NULL; 1566 s->pending = true; 1567 1568 mutex_lock(&c->ec_stripe_new_lock); 1569 list_add(&s->list, &c->ec_stripe_new_list); 1570 mutex_unlock(&c->ec_stripe_new_lock); 1571 1572 ec_stripe_new_put(c, s, STRIPE_REF_io); 1573 } 1574 1575 void bch2_ec_bucket_cancel(struct bch_fs *c, struct open_bucket *ob) 1576 { 1577 struct ec_stripe_new *s = ob->ec; 1578 1579 s->err = -EIO; 1580 } 1581 1582 void *bch2_writepoint_ec_buf(struct bch_fs *c, struct write_point *wp) 1583 { 1584 struct open_bucket *ob = ec_open_bucket(c, &wp->ptrs); 1585 if (!ob) 1586 return NULL; 1587 1588 BUG_ON(!ob->ec->new_stripe.data[ob->ec_idx]); 1589 1590 struct bch_dev *ca = ob_dev(c, ob); 1591 unsigned offset = ca->mi.bucket_size - ob->sectors_free; 1592 1593 return ob->ec->new_stripe.data[ob->ec_idx] + (offset << 9); 1594 } 1595 1596 static int unsigned_cmp(const void *_l, const void *_r) 1597 { 1598 unsigned l = *((const unsigned *) _l); 1599 unsigned r = *((const unsigned *) _r); 1600 1601 return cmp_int(l, r); 1602 } 1603 1604 /* pick most common bucket size: */ 1605 static unsigned pick_blocksize(struct bch_fs *c, 1606 struct bch_devs_mask *devs) 1607 { 1608 unsigned nr = 0, sizes[BCH_SB_MEMBERS_MAX]; 1609 struct { 1610 unsigned nr, size; 1611 } cur = { 0, 0 }, best = { 0, 0 }; 1612 1613 for_each_member_device_rcu(c, ca, devs) 1614 sizes[nr++] = ca->mi.bucket_size; 1615 1616 sort(sizes, nr, sizeof(unsigned), unsigned_cmp, NULL); 1617 1618 for (unsigned i = 0; i < nr; i++) { 1619 if (sizes[i] != cur.size) { 1620 if (cur.nr > best.nr) 1621 best = cur; 1622 1623 cur.nr = 0; 1624 cur.size = sizes[i]; 1625 } 1626 1627 cur.nr++; 1628 } 1629 1630 if (cur.nr > best.nr) 1631 best = cur; 1632 1633 return best.size; 1634 } 1635 1636 static bool may_create_new_stripe(struct bch_fs *c) 1637 { 1638 return false; 1639 } 1640 1641 static void ec_stripe_key_init(struct bch_fs *c, 1642 struct bkey_i *k, 1643 unsigned nr_data, 1644 unsigned nr_parity, 1645 unsigned stripe_size) 1646 { 1647 struct bkey_i_stripe *s = bkey_stripe_init(k); 1648 unsigned u64s; 1649 1650 s->v.sectors = cpu_to_le16(stripe_size); 1651 s->v.algorithm = 0; 1652 s->v.nr_blocks = nr_data + nr_parity; 1653 s->v.nr_redundant = nr_parity; 1654 s->v.csum_granularity_bits = ilog2(c->opts.encoded_extent_max >> 9); 1655 s->v.csum_type = BCH_CSUM_crc32c; 1656 s->v.pad = 0; 1657 1658 while ((u64s = stripe_val_u64s(&s->v)) > BKEY_VAL_U64s_MAX) { 1659 BUG_ON(1 << s->v.csum_granularity_bits >= 1660 le16_to_cpu(s->v.sectors) || 1661 s->v.csum_granularity_bits == U8_MAX); 1662 s->v.csum_granularity_bits++; 1663 } 1664 1665 set_bkey_val_u64s(&s->k, u64s); 1666 } 1667 1668 static int ec_new_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h) 1669 { 1670 struct ec_stripe_new *s; 1671 1672 lockdep_assert_held(&h->lock); 1673 1674 s = kzalloc(sizeof(*s), GFP_KERNEL); 1675 if (!s) 1676 return -BCH_ERR_ENOMEM_ec_new_stripe_alloc; 1677 1678 mutex_init(&s->lock); 1679 closure_init(&s->iodone, NULL); 1680 atomic_set(&s->ref[STRIPE_REF_stripe], 1); 1681 atomic_set(&s->ref[STRIPE_REF_io], 1); 1682 s->c = c; 1683 s->h = h; 1684 s->nr_data = min_t(unsigned, h->nr_active_devs, 1685 BCH_BKEY_PTRS_MAX) - h->redundancy; 1686 s->nr_parity = h->redundancy; 1687 1688 ec_stripe_key_init(c, &s->new_stripe.key, 1689 s->nr_data, s->nr_parity, h->blocksize); 1690 1691 h->s = s; 1692 return 0; 1693 } 1694 1695 static struct ec_stripe_head * 1696 ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target, 1697 unsigned algo, unsigned redundancy, 1698 enum bch_watermark watermark) 1699 { 1700 struct ec_stripe_head *h; 1701 1702 h = kzalloc(sizeof(*h), GFP_KERNEL); 1703 if (!h) 1704 return NULL; 1705 1706 mutex_init(&h->lock); 1707 BUG_ON(!mutex_trylock(&h->lock)); 1708 1709 h->target = target; 1710 h->algo = algo; 1711 h->redundancy = redundancy; 1712 h->watermark = watermark; 1713 1714 rcu_read_lock(); 1715 h->devs = target_rw_devs(c, BCH_DATA_user, target); 1716 1717 for_each_member_device_rcu(c, ca, &h->devs) 1718 if (!ca->mi.durability) 1719 __clear_bit(ca->dev_idx, h->devs.d); 1720 1721 h->blocksize = pick_blocksize(c, &h->devs); 1722 1723 for_each_member_device_rcu(c, ca, &h->devs) 1724 if (ca->mi.bucket_size == h->blocksize) 1725 h->nr_active_devs++; 1726 1727 rcu_read_unlock(); 1728 1729 /* 1730 * If we only have redundancy + 1 devices, we're better off with just 1731 * replication: 1732 */ 1733 if (h->nr_active_devs < h->redundancy + 2) 1734 bch_err(c, "insufficient devices available to create stripe (have %u, need %u) - mismatched bucket sizes?", 1735 h->nr_active_devs, h->redundancy + 2); 1736 1737 list_add(&h->list, &c->ec_stripe_head_list); 1738 return h; 1739 } 1740 1741 void bch2_ec_stripe_head_put(struct bch_fs *c, struct ec_stripe_head *h) 1742 { 1743 if (h->s && 1744 h->s->allocated && 1745 bitmap_weight(h->s->blocks_allocated, 1746 h->s->nr_data) == h->s->nr_data) 1747 ec_stripe_set_pending(c, h); 1748 1749 mutex_unlock(&h->lock); 1750 } 1751 1752 static struct ec_stripe_head * 1753 __bch2_ec_stripe_head_get(struct btree_trans *trans, 1754 unsigned target, 1755 unsigned algo, 1756 unsigned redundancy, 1757 enum bch_watermark watermark) 1758 { 1759 struct bch_fs *c = trans->c; 1760 struct ec_stripe_head *h; 1761 int ret; 1762 1763 if (!redundancy) 1764 return NULL; 1765 1766 ret = bch2_trans_mutex_lock(trans, &c->ec_stripe_head_lock); 1767 if (ret) 1768 return ERR_PTR(ret); 1769 1770 if (test_bit(BCH_FS_going_ro, &c->flags)) { 1771 h = ERR_PTR(-BCH_ERR_erofs_no_writes); 1772 goto found; 1773 } 1774 1775 list_for_each_entry(h, &c->ec_stripe_head_list, list) 1776 if (h->target == target && 1777 h->algo == algo && 1778 h->redundancy == redundancy && 1779 h->watermark == watermark) { 1780 ret = bch2_trans_mutex_lock(trans, &h->lock); 1781 if (ret) 1782 h = ERR_PTR(ret); 1783 goto found; 1784 } 1785 1786 h = ec_new_stripe_head_alloc(c, target, algo, redundancy, watermark); 1787 found: 1788 if (!IS_ERR_OR_NULL(h) && 1789 h->nr_active_devs < h->redundancy + 2) { 1790 mutex_unlock(&h->lock); 1791 h = NULL; 1792 } 1793 mutex_unlock(&c->ec_stripe_head_lock); 1794 return h; 1795 } 1796 1797 static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_head *h, 1798 enum bch_watermark watermark, struct closure *cl) 1799 { 1800 struct bch_fs *c = trans->c; 1801 struct bch_devs_mask devs = h->devs; 1802 struct open_bucket *ob; 1803 struct open_buckets buckets; 1804 struct bch_stripe *v = &bkey_i_to_stripe(&h->s->new_stripe.key)->v; 1805 unsigned i, j, nr_have_parity = 0, nr_have_data = 0; 1806 bool have_cache = true; 1807 int ret = 0; 1808 1809 BUG_ON(v->nr_blocks != h->s->nr_data + h->s->nr_parity); 1810 BUG_ON(v->nr_redundant != h->s->nr_parity); 1811 1812 /* * We bypass the sector allocator which normally does this: */ 1813 bitmap_and(devs.d, devs.d, c->rw_devs[BCH_DATA_user].d, BCH_SB_MEMBERS_MAX); 1814 1815 for_each_set_bit(i, h->s->blocks_gotten, v->nr_blocks) { 1816 __clear_bit(v->ptrs[i].dev, devs.d); 1817 if (i < h->s->nr_data) 1818 nr_have_data++; 1819 else 1820 nr_have_parity++; 1821 } 1822 1823 BUG_ON(nr_have_data > h->s->nr_data); 1824 BUG_ON(nr_have_parity > h->s->nr_parity); 1825 1826 buckets.nr = 0; 1827 if (nr_have_parity < h->s->nr_parity) { 1828 ret = bch2_bucket_alloc_set_trans(trans, &buckets, 1829 &h->parity_stripe, 1830 &devs, 1831 h->s->nr_parity, 1832 &nr_have_parity, 1833 &have_cache, 0, 1834 BCH_DATA_parity, 1835 watermark, 1836 cl); 1837 1838 open_bucket_for_each(c, &buckets, ob, i) { 1839 j = find_next_zero_bit(h->s->blocks_gotten, 1840 h->s->nr_data + h->s->nr_parity, 1841 h->s->nr_data); 1842 BUG_ON(j >= h->s->nr_data + h->s->nr_parity); 1843 1844 h->s->blocks[j] = buckets.v[i]; 1845 v->ptrs[j] = bch2_ob_ptr(c, ob); 1846 __set_bit(j, h->s->blocks_gotten); 1847 } 1848 1849 if (ret) 1850 return ret; 1851 } 1852 1853 buckets.nr = 0; 1854 if (nr_have_data < h->s->nr_data) { 1855 ret = bch2_bucket_alloc_set_trans(trans, &buckets, 1856 &h->block_stripe, 1857 &devs, 1858 h->s->nr_data, 1859 &nr_have_data, 1860 &have_cache, 0, 1861 BCH_DATA_user, 1862 watermark, 1863 cl); 1864 1865 open_bucket_for_each(c, &buckets, ob, i) { 1866 j = find_next_zero_bit(h->s->blocks_gotten, 1867 h->s->nr_data, 0); 1868 BUG_ON(j >= h->s->nr_data); 1869 1870 h->s->blocks[j] = buckets.v[i]; 1871 v->ptrs[j] = bch2_ob_ptr(c, ob); 1872 __set_bit(j, h->s->blocks_gotten); 1873 } 1874 1875 if (ret) 1876 return ret; 1877 } 1878 1879 return 0; 1880 } 1881 1882 /* XXX: doesn't obey target: */ 1883 static s64 get_existing_stripe(struct bch_fs *c, 1884 struct ec_stripe_head *head) 1885 { 1886 ec_stripes_heap *h = &c->ec_stripes_heap; 1887 struct stripe *m; 1888 size_t heap_idx; 1889 u64 stripe_idx; 1890 s64 ret = -1; 1891 1892 if (may_create_new_stripe(c)) 1893 return -1; 1894 1895 mutex_lock(&c->ec_stripes_heap_lock); 1896 for (heap_idx = 0; heap_idx < h->nr; heap_idx++) { 1897 /* No blocks worth reusing, stripe will just be deleted: */ 1898 if (!h->data[heap_idx].blocks_nonempty) 1899 continue; 1900 1901 stripe_idx = h->data[heap_idx].idx; 1902 1903 m = genradix_ptr(&c->stripes, stripe_idx); 1904 1905 if (m->algorithm == head->algo && 1906 m->nr_redundant == head->redundancy && 1907 m->sectors == head->blocksize && 1908 m->blocks_nonempty < m->nr_blocks - m->nr_redundant && 1909 bch2_try_open_stripe(c, head->s, stripe_idx)) { 1910 ret = stripe_idx; 1911 break; 1912 } 1913 } 1914 mutex_unlock(&c->ec_stripes_heap_lock); 1915 return ret; 1916 } 1917 1918 static int __bch2_ec_stripe_head_reuse(struct btree_trans *trans, struct ec_stripe_head *h) 1919 { 1920 struct bch_fs *c = trans->c; 1921 struct bch_stripe *new_v = &bkey_i_to_stripe(&h->s->new_stripe.key)->v; 1922 struct bch_stripe *existing_v; 1923 unsigned i; 1924 s64 idx; 1925 int ret; 1926 1927 /* 1928 * If we can't allocate a new stripe, and there's no stripes with empty 1929 * blocks for us to reuse, that means we have to wait on copygc: 1930 */ 1931 idx = get_existing_stripe(c, h); 1932 if (idx < 0) 1933 return -BCH_ERR_stripe_alloc_blocked; 1934 1935 ret = get_stripe_key_trans(trans, idx, &h->s->existing_stripe); 1936 bch2_fs_fatal_err_on(ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart), c, 1937 "reading stripe key: %s", bch2_err_str(ret)); 1938 if (ret) { 1939 bch2_stripe_close(c, h->s); 1940 return ret; 1941 } 1942 1943 existing_v = &bkey_i_to_stripe(&h->s->existing_stripe.key)->v; 1944 1945 BUG_ON(existing_v->nr_redundant != h->s->nr_parity); 1946 h->s->nr_data = existing_v->nr_blocks - 1947 existing_v->nr_redundant; 1948 1949 ret = ec_stripe_buf_init(&h->s->existing_stripe, 0, h->blocksize); 1950 if (ret) { 1951 bch2_stripe_close(c, h->s); 1952 return ret; 1953 } 1954 1955 BUG_ON(h->s->existing_stripe.size != h->blocksize); 1956 BUG_ON(h->s->existing_stripe.size != le16_to_cpu(existing_v->sectors)); 1957 1958 /* 1959 * Free buckets we initially allocated - they might conflict with 1960 * blocks from the stripe we're reusing: 1961 */ 1962 for_each_set_bit(i, h->s->blocks_gotten, new_v->nr_blocks) { 1963 bch2_open_bucket_put(c, c->open_buckets + h->s->blocks[i]); 1964 h->s->blocks[i] = 0; 1965 } 1966 memset(h->s->blocks_gotten, 0, sizeof(h->s->blocks_gotten)); 1967 memset(h->s->blocks_allocated, 0, sizeof(h->s->blocks_allocated)); 1968 1969 for (i = 0; i < existing_v->nr_blocks; i++) { 1970 if (stripe_blockcount_get(existing_v, i)) { 1971 __set_bit(i, h->s->blocks_gotten); 1972 __set_bit(i, h->s->blocks_allocated); 1973 } 1974 1975 ec_block_io(c, &h->s->existing_stripe, READ, i, &h->s->iodone); 1976 } 1977 1978 bkey_copy(&h->s->new_stripe.key, &h->s->existing_stripe.key); 1979 h->s->have_existing_stripe = true; 1980 1981 return 0; 1982 } 1983 1984 static int __bch2_ec_stripe_head_reserve(struct btree_trans *trans, struct ec_stripe_head *h) 1985 { 1986 struct bch_fs *c = trans->c; 1987 struct btree_iter iter; 1988 struct bkey_s_c k; 1989 struct bpos min_pos = POS(0, 1); 1990 struct bpos start_pos = bpos_max(min_pos, POS(0, c->ec_stripe_hint)); 1991 int ret; 1992 1993 if (!h->s->res.sectors) { 1994 ret = bch2_disk_reservation_get(c, &h->s->res, 1995 h->blocksize, 1996 h->s->nr_parity, 1997 BCH_DISK_RESERVATION_NOFAIL); 1998 if (ret) 1999 return ret; 2000 } 2001 2002 for_each_btree_key_norestart(trans, iter, BTREE_ID_stripes, start_pos, 2003 BTREE_ITER_slots|BTREE_ITER_intent, k, ret) { 2004 if (bkey_gt(k.k->p, POS(0, U32_MAX))) { 2005 if (start_pos.offset) { 2006 start_pos = min_pos; 2007 bch2_btree_iter_set_pos(&iter, start_pos); 2008 continue; 2009 } 2010 2011 ret = -BCH_ERR_ENOSPC_stripe_create; 2012 break; 2013 } 2014 2015 if (bkey_deleted(k.k) && 2016 bch2_try_open_stripe(c, h->s, k.k->p.offset)) 2017 break; 2018 } 2019 2020 c->ec_stripe_hint = iter.pos.offset; 2021 2022 if (ret) 2023 goto err; 2024 2025 ret = ec_stripe_mem_alloc(trans, &iter); 2026 if (ret) { 2027 bch2_stripe_close(c, h->s); 2028 goto err; 2029 } 2030 2031 h->s->new_stripe.key.k.p = iter.pos; 2032 out: 2033 bch2_trans_iter_exit(trans, &iter); 2034 return ret; 2035 err: 2036 bch2_disk_reservation_put(c, &h->s->res); 2037 goto out; 2038 } 2039 2040 struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans, 2041 unsigned target, 2042 unsigned algo, 2043 unsigned redundancy, 2044 enum bch_watermark watermark, 2045 struct closure *cl) 2046 { 2047 struct bch_fs *c = trans->c; 2048 struct ec_stripe_head *h; 2049 bool waiting = false; 2050 int ret; 2051 2052 h = __bch2_ec_stripe_head_get(trans, target, algo, redundancy, watermark); 2053 if (IS_ERR_OR_NULL(h)) 2054 return h; 2055 2056 if (!h->s) { 2057 ret = ec_new_stripe_alloc(c, h); 2058 if (ret) { 2059 bch_err(c, "failed to allocate new stripe"); 2060 goto err; 2061 } 2062 } 2063 2064 if (h->s->allocated) 2065 goto allocated; 2066 2067 if (h->s->have_existing_stripe) 2068 goto alloc_existing; 2069 2070 /* First, try to allocate a full stripe: */ 2071 ret = new_stripe_alloc_buckets(trans, h, BCH_WATERMARK_stripe, NULL) ?: 2072 __bch2_ec_stripe_head_reserve(trans, h); 2073 if (!ret) 2074 goto allocate_buf; 2075 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || 2076 bch2_err_matches(ret, ENOMEM)) 2077 goto err; 2078 2079 /* 2080 * Not enough buckets available for a full stripe: we must reuse an 2081 * existing stripe: 2082 */ 2083 while (1) { 2084 ret = __bch2_ec_stripe_head_reuse(trans, h); 2085 if (!ret) 2086 break; 2087 if (waiting || !cl || ret != -BCH_ERR_stripe_alloc_blocked) 2088 goto err; 2089 2090 if (watermark == BCH_WATERMARK_copygc) { 2091 ret = new_stripe_alloc_buckets(trans, h, watermark, NULL) ?: 2092 __bch2_ec_stripe_head_reserve(trans, h); 2093 if (ret) 2094 goto err; 2095 goto allocate_buf; 2096 } 2097 2098 /* XXX freelist_wait? */ 2099 closure_wait(&c->freelist_wait, cl); 2100 waiting = true; 2101 } 2102 2103 if (waiting) 2104 closure_wake_up(&c->freelist_wait); 2105 alloc_existing: 2106 /* 2107 * Retry allocating buckets, with the watermark for this 2108 * particular write: 2109 */ 2110 ret = new_stripe_alloc_buckets(trans, h, watermark, cl); 2111 if (ret) 2112 goto err; 2113 2114 allocate_buf: 2115 ret = ec_stripe_buf_init(&h->s->new_stripe, 0, h->blocksize); 2116 if (ret) 2117 goto err; 2118 2119 h->s->allocated = true; 2120 allocated: 2121 BUG_ON(!h->s->idx); 2122 BUG_ON(!h->s->new_stripe.data[0]); 2123 BUG_ON(trans->restarted); 2124 return h; 2125 err: 2126 bch2_ec_stripe_head_put(c, h); 2127 return ERR_PTR(ret); 2128 } 2129 2130 static void __bch2_ec_stop(struct bch_fs *c, struct bch_dev *ca) 2131 { 2132 struct ec_stripe_head *h; 2133 struct open_bucket *ob; 2134 unsigned i; 2135 2136 mutex_lock(&c->ec_stripe_head_lock); 2137 list_for_each_entry(h, &c->ec_stripe_head_list, list) { 2138 mutex_lock(&h->lock); 2139 if (!h->s) 2140 goto unlock; 2141 2142 if (!ca) 2143 goto found; 2144 2145 for (i = 0; i < bkey_i_to_stripe(&h->s->new_stripe.key)->v.nr_blocks; i++) { 2146 if (!h->s->blocks[i]) 2147 continue; 2148 2149 ob = c->open_buckets + h->s->blocks[i]; 2150 if (ob->dev == ca->dev_idx) 2151 goto found; 2152 } 2153 goto unlock; 2154 found: 2155 h->s->err = -BCH_ERR_erofs_no_writes; 2156 ec_stripe_set_pending(c, h); 2157 unlock: 2158 mutex_unlock(&h->lock); 2159 } 2160 mutex_unlock(&c->ec_stripe_head_lock); 2161 } 2162 2163 void bch2_ec_stop_dev(struct bch_fs *c, struct bch_dev *ca) 2164 { 2165 __bch2_ec_stop(c, ca); 2166 } 2167 2168 void bch2_fs_ec_stop(struct bch_fs *c) 2169 { 2170 __bch2_ec_stop(c, NULL); 2171 } 2172 2173 static bool bch2_fs_ec_flush_done(struct bch_fs *c) 2174 { 2175 bool ret; 2176 2177 mutex_lock(&c->ec_stripe_new_lock); 2178 ret = list_empty(&c->ec_stripe_new_list); 2179 mutex_unlock(&c->ec_stripe_new_lock); 2180 2181 return ret; 2182 } 2183 2184 void bch2_fs_ec_flush(struct bch_fs *c) 2185 { 2186 wait_event(c->ec_stripe_new_wait, bch2_fs_ec_flush_done(c)); 2187 } 2188 2189 int bch2_stripes_read(struct bch_fs *c) 2190 { 2191 int ret = bch2_trans_run(c, 2192 for_each_btree_key(trans, iter, BTREE_ID_stripes, POS_MIN, 2193 BTREE_ITER_prefetch, k, ({ 2194 if (k.k->type != KEY_TYPE_stripe) 2195 continue; 2196 2197 ret = __ec_stripe_mem_alloc(c, k.k->p.offset, GFP_KERNEL); 2198 if (ret) 2199 break; 2200 2201 const struct bch_stripe *s = bkey_s_c_to_stripe(k).v; 2202 2203 struct stripe *m = genradix_ptr(&c->stripes, k.k->p.offset); 2204 m->sectors = le16_to_cpu(s->sectors); 2205 m->algorithm = s->algorithm; 2206 m->nr_blocks = s->nr_blocks; 2207 m->nr_redundant = s->nr_redundant; 2208 m->blocks_nonempty = 0; 2209 2210 for (unsigned i = 0; i < s->nr_blocks; i++) 2211 m->blocks_nonempty += !!stripe_blockcount_get(s, i); 2212 2213 bch2_stripes_heap_insert(c, m, k.k->p.offset); 2214 0; 2215 }))); 2216 bch_err_fn(c, ret); 2217 return ret; 2218 } 2219 2220 void bch2_stripes_heap_to_text(struct printbuf *out, struct bch_fs *c) 2221 { 2222 ec_stripes_heap *h = &c->ec_stripes_heap; 2223 struct stripe *m; 2224 size_t i; 2225 2226 mutex_lock(&c->ec_stripes_heap_lock); 2227 for (i = 0; i < min_t(size_t, h->nr, 50); i++) { 2228 m = genradix_ptr(&c->stripes, h->data[i].idx); 2229 2230 prt_printf(out, "%zu %u/%u+%u", h->data[i].idx, 2231 h->data[i].blocks_nonempty, 2232 m->nr_blocks - m->nr_redundant, 2233 m->nr_redundant); 2234 if (bch2_stripe_is_open(c, h->data[i].idx)) 2235 prt_str(out, " open"); 2236 prt_newline(out); 2237 } 2238 mutex_unlock(&c->ec_stripes_heap_lock); 2239 } 2240 2241 static void bch2_new_stripe_to_text(struct printbuf *out, struct bch_fs *c, 2242 struct ec_stripe_new *s) 2243 { 2244 prt_printf(out, "\tidx %llu blocks %u+%u allocated %u ref %u %u %s obs", 2245 s->idx, s->nr_data, s->nr_parity, 2246 bitmap_weight(s->blocks_allocated, s->nr_data), 2247 atomic_read(&s->ref[STRIPE_REF_io]), 2248 atomic_read(&s->ref[STRIPE_REF_stripe]), 2249 bch2_watermarks[s->h->watermark]); 2250 2251 struct bch_stripe *v = &bkey_i_to_stripe(&s->new_stripe.key)->v; 2252 unsigned i; 2253 for_each_set_bit(i, s->blocks_gotten, v->nr_blocks) 2254 prt_printf(out, " %u", s->blocks[i]); 2255 prt_newline(out); 2256 } 2257 2258 void bch2_new_stripes_to_text(struct printbuf *out, struct bch_fs *c) 2259 { 2260 struct ec_stripe_head *h; 2261 struct ec_stripe_new *s; 2262 2263 mutex_lock(&c->ec_stripe_head_lock); 2264 list_for_each_entry(h, &c->ec_stripe_head_list, list) { 2265 prt_printf(out, "target %u algo %u redundancy %u %s:\n", 2266 h->target, h->algo, h->redundancy, 2267 bch2_watermarks[h->watermark]); 2268 2269 if (h->s) 2270 bch2_new_stripe_to_text(out, c, h->s); 2271 } 2272 mutex_unlock(&c->ec_stripe_head_lock); 2273 2274 prt_printf(out, "in flight:\n"); 2275 2276 mutex_lock(&c->ec_stripe_new_lock); 2277 list_for_each_entry(s, &c->ec_stripe_new_list, list) 2278 bch2_new_stripe_to_text(out, c, s); 2279 mutex_unlock(&c->ec_stripe_new_lock); 2280 } 2281 2282 void bch2_fs_ec_exit(struct bch_fs *c) 2283 { 2284 struct ec_stripe_head *h; 2285 unsigned i; 2286 2287 while (1) { 2288 mutex_lock(&c->ec_stripe_head_lock); 2289 h = list_first_entry_or_null(&c->ec_stripe_head_list, 2290 struct ec_stripe_head, list); 2291 if (h) 2292 list_del(&h->list); 2293 mutex_unlock(&c->ec_stripe_head_lock); 2294 if (!h) 2295 break; 2296 2297 if (h->s) { 2298 for (i = 0; i < bkey_i_to_stripe(&h->s->new_stripe.key)->v.nr_blocks; i++) 2299 BUG_ON(h->s->blocks[i]); 2300 2301 kfree(h->s); 2302 } 2303 kfree(h); 2304 } 2305 2306 BUG_ON(!list_empty(&c->ec_stripe_new_list)); 2307 2308 free_heap(&c->ec_stripes_heap); 2309 genradix_free(&c->stripes); 2310 bioset_exit(&c->ec_bioset); 2311 } 2312 2313 void bch2_fs_ec_init_early(struct bch_fs *c) 2314 { 2315 spin_lock_init(&c->ec_stripes_new_lock); 2316 mutex_init(&c->ec_stripes_heap_lock); 2317 2318 INIT_LIST_HEAD(&c->ec_stripe_head_list); 2319 mutex_init(&c->ec_stripe_head_lock); 2320 2321 INIT_LIST_HEAD(&c->ec_stripe_new_list); 2322 mutex_init(&c->ec_stripe_new_lock); 2323 init_waitqueue_head(&c->ec_stripe_new_wait); 2324 2325 INIT_WORK(&c->ec_stripe_create_work, ec_stripe_create_work); 2326 INIT_WORK(&c->ec_stripe_delete_work, ec_stripe_delete_work); 2327 } 2328 2329 int bch2_fs_ec_init(struct bch_fs *c) 2330 { 2331 return bioset_init(&c->ec_bioset, 1, offsetof(struct ec_bio, bio), 2332 BIOSET_NEED_BVECS); 2333 } 2334