1 // SPDX-License-Identifier: GPL-2.0 2 3 /* erasure coding */ 4 5 #include "bcachefs.h" 6 #include "alloc_foreground.h" 7 #include "backpointers.h" 8 #include "bkey_buf.h" 9 #include "bset.h" 10 #include "btree_gc.h" 11 #include "btree_update.h" 12 #include "btree_write_buffer.h" 13 #include "buckets.h" 14 #include "checksum.h" 15 #include "disk_groups.h" 16 #include "ec.h" 17 #include "error.h" 18 #include "io_read.h" 19 #include "keylist.h" 20 #include "recovery.h" 21 #include "replicas.h" 22 #include "super-io.h" 23 #include "util.h" 24 25 #include <linux/sort.h> 26 27 #ifdef __KERNEL__ 28 29 #include <linux/raid/pq.h> 30 #include <linux/raid/xor.h> 31 32 static void raid5_recov(unsigned disks, unsigned failed_idx, 33 size_t size, void **data) 34 { 35 unsigned i = 2, nr; 36 37 BUG_ON(failed_idx >= disks); 38 39 swap(data[0], data[failed_idx]); 40 memcpy(data[0], data[1], size); 41 42 while (i < disks) { 43 nr = min_t(unsigned, disks - i, MAX_XOR_BLOCKS); 44 xor_blocks(nr, size, data[0], data + i); 45 i += nr; 46 } 47 48 swap(data[0], data[failed_idx]); 49 } 50 51 static void raid_gen(int nd, int np, size_t size, void **v) 52 { 53 if (np >= 1) 54 raid5_recov(nd + np, nd, size, v); 55 if (np >= 2) 56 raid6_call.gen_syndrome(nd + np, size, v); 57 BUG_ON(np > 2); 58 } 59 60 static void raid_rec(int nr, int *ir, int nd, int np, size_t size, void **v) 61 { 62 switch (nr) { 63 case 0: 64 break; 65 case 1: 66 if (ir[0] < nd + 1) 67 raid5_recov(nd + 1, ir[0], size, v); 68 else 69 raid6_call.gen_syndrome(nd + np, size, v); 70 break; 71 case 2: 72 if (ir[1] < nd) { 73 /* data+data failure. */ 74 raid6_2data_recov(nd + np, size, ir[0], ir[1], v); 75 } else if (ir[0] < nd) { 76 /* data + p/q failure */ 77 78 if (ir[1] == nd) /* data + p failure */ 79 raid6_datap_recov(nd + np, size, ir[0], v); 80 else { /* data + q failure */ 81 raid5_recov(nd + 1, ir[0], size, v); 82 raid6_call.gen_syndrome(nd + np, size, v); 83 } 84 } else { 85 raid_gen(nd, np, size, v); 86 } 87 break; 88 default: 89 BUG(); 90 } 91 } 92 93 #else 94 95 #include <raid/raid.h> 96 97 #endif 98 99 struct ec_bio { 100 struct bch_dev *ca; 101 struct ec_stripe_buf *buf; 102 size_t idx; 103 struct bio bio; 104 }; 105 106 /* Stripes btree keys: */ 107 108 int bch2_stripe_invalid(const struct bch_fs *c, struct bkey_s_c k, 109 enum bkey_invalid_flags flags, 110 struct printbuf *err) 111 { 112 const struct bch_stripe *s = bkey_s_c_to_stripe(k).v; 113 114 if (bkey_eq(k.k->p, POS_MIN)) { 115 prt_printf(err, "stripe at POS_MIN"); 116 return -BCH_ERR_invalid_bkey; 117 } 118 119 if (k.k->p.inode) { 120 prt_printf(err, "nonzero inode field"); 121 return -BCH_ERR_invalid_bkey; 122 } 123 124 if (bkey_val_u64s(k.k) < stripe_val_u64s(s)) { 125 prt_printf(err, "incorrect value size (%zu < %u)", 126 bkey_val_u64s(k.k), stripe_val_u64s(s)); 127 return -BCH_ERR_invalid_bkey; 128 } 129 130 return bch2_bkey_ptrs_invalid(c, k, flags, err); 131 } 132 133 void bch2_stripe_to_text(struct printbuf *out, struct bch_fs *c, 134 struct bkey_s_c k) 135 { 136 const struct bch_stripe *s = bkey_s_c_to_stripe(k).v; 137 unsigned i, nr_data = s->nr_blocks - s->nr_redundant; 138 139 prt_printf(out, "algo %u sectors %u blocks %u:%u csum %u gran %u", 140 s->algorithm, 141 le16_to_cpu(s->sectors), 142 nr_data, 143 s->nr_redundant, 144 s->csum_type, 145 1U << s->csum_granularity_bits); 146 147 for (i = 0; i < s->nr_blocks; i++) { 148 const struct bch_extent_ptr *ptr = s->ptrs + i; 149 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev); 150 u32 offset; 151 u64 b = sector_to_bucket_and_offset(ca, ptr->offset, &offset); 152 153 prt_printf(out, " %u:%llu:%u", ptr->dev, b, offset); 154 if (i < nr_data) 155 prt_printf(out, "#%u", stripe_blockcount_get(s, i)); 156 if (ptr_stale(ca, ptr)) 157 prt_printf(out, " stale"); 158 } 159 } 160 161 /* returns blocknr in stripe that we matched: */ 162 static const struct bch_extent_ptr *bkey_matches_stripe(struct bch_stripe *s, 163 struct bkey_s_c k, unsigned *block) 164 { 165 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 166 const struct bch_extent_ptr *ptr; 167 unsigned i, nr_data = s->nr_blocks - s->nr_redundant; 168 169 bkey_for_each_ptr(ptrs, ptr) 170 for (i = 0; i < nr_data; i++) 171 if (__bch2_ptr_matches_stripe(&s->ptrs[i], ptr, 172 le16_to_cpu(s->sectors))) { 173 *block = i; 174 return ptr; 175 } 176 177 return NULL; 178 } 179 180 static bool extent_has_stripe_ptr(struct bkey_s_c k, u64 idx) 181 { 182 switch (k.k->type) { 183 case KEY_TYPE_extent: { 184 struct bkey_s_c_extent e = bkey_s_c_to_extent(k); 185 const union bch_extent_entry *entry; 186 187 extent_for_each_entry(e, entry) 188 if (extent_entry_type(entry) == 189 BCH_EXTENT_ENTRY_stripe_ptr && 190 entry->stripe_ptr.idx == idx) 191 return true; 192 193 break; 194 } 195 } 196 197 return false; 198 } 199 200 /* Stripe bufs: */ 201 202 static void ec_stripe_buf_exit(struct ec_stripe_buf *buf) 203 { 204 if (buf->key.k.type == KEY_TYPE_stripe) { 205 struct bkey_i_stripe *s = bkey_i_to_stripe(&buf->key); 206 unsigned i; 207 208 for (i = 0; i < s->v.nr_blocks; i++) { 209 kvpfree(buf->data[i], buf->size << 9); 210 buf->data[i] = NULL; 211 } 212 } 213 } 214 215 /* XXX: this is a non-mempoolified memory allocation: */ 216 static int ec_stripe_buf_init(struct ec_stripe_buf *buf, 217 unsigned offset, unsigned size) 218 { 219 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; 220 unsigned csum_granularity = 1U << v->csum_granularity_bits; 221 unsigned end = offset + size; 222 unsigned i; 223 224 BUG_ON(end > le16_to_cpu(v->sectors)); 225 226 offset = round_down(offset, csum_granularity); 227 end = min_t(unsigned, le16_to_cpu(v->sectors), 228 round_up(end, csum_granularity)); 229 230 buf->offset = offset; 231 buf->size = end - offset; 232 233 memset(buf->valid, 0xFF, sizeof(buf->valid)); 234 235 for (i = 0; i < v->nr_blocks; i++) { 236 buf->data[i] = kvpmalloc(buf->size << 9, GFP_KERNEL); 237 if (!buf->data[i]) 238 goto err; 239 } 240 241 return 0; 242 err: 243 ec_stripe_buf_exit(buf); 244 return -BCH_ERR_ENOMEM_stripe_buf; 245 } 246 247 /* Checksumming: */ 248 249 static struct bch_csum ec_block_checksum(struct ec_stripe_buf *buf, 250 unsigned block, unsigned offset) 251 { 252 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; 253 unsigned csum_granularity = 1 << v->csum_granularity_bits; 254 unsigned end = buf->offset + buf->size; 255 unsigned len = min(csum_granularity, end - offset); 256 257 BUG_ON(offset >= end); 258 BUG_ON(offset < buf->offset); 259 BUG_ON(offset & (csum_granularity - 1)); 260 BUG_ON(offset + len != le16_to_cpu(v->sectors) && 261 (len & (csum_granularity - 1))); 262 263 return bch2_checksum(NULL, v->csum_type, 264 null_nonce(), 265 buf->data[block] + ((offset - buf->offset) << 9), 266 len << 9); 267 } 268 269 static void ec_generate_checksums(struct ec_stripe_buf *buf) 270 { 271 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; 272 unsigned i, j, csums_per_device = stripe_csums_per_device(v); 273 274 if (!v->csum_type) 275 return; 276 277 BUG_ON(buf->offset); 278 BUG_ON(buf->size != le16_to_cpu(v->sectors)); 279 280 for (i = 0; i < v->nr_blocks; i++) 281 for (j = 0; j < csums_per_device; j++) 282 stripe_csum_set(v, i, j, 283 ec_block_checksum(buf, i, j << v->csum_granularity_bits)); 284 } 285 286 static void ec_validate_checksums(struct bch_fs *c, struct ec_stripe_buf *buf) 287 { 288 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; 289 unsigned csum_granularity = 1 << v->csum_granularity_bits; 290 unsigned i; 291 292 if (!v->csum_type) 293 return; 294 295 for (i = 0; i < v->nr_blocks; i++) { 296 unsigned offset = buf->offset; 297 unsigned end = buf->offset + buf->size; 298 299 if (!test_bit(i, buf->valid)) 300 continue; 301 302 while (offset < end) { 303 unsigned j = offset >> v->csum_granularity_bits; 304 unsigned len = min(csum_granularity, end - offset); 305 struct bch_csum want = stripe_csum_get(v, i, j); 306 struct bch_csum got = ec_block_checksum(buf, i, offset); 307 308 if (bch2_crc_cmp(want, got)) { 309 struct printbuf buf2 = PRINTBUF; 310 311 bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(&buf->key)); 312 313 bch_err_ratelimited(c, 314 "stripe checksum error for %ps at %u:%u: csum type %u, expected %llx got %llx\n%s", 315 (void *) _RET_IP_, i, j, v->csum_type, 316 want.lo, got.lo, buf2.buf); 317 printbuf_exit(&buf2); 318 clear_bit(i, buf->valid); 319 break; 320 } 321 322 offset += len; 323 } 324 } 325 } 326 327 /* Erasure coding: */ 328 329 static void ec_generate_ec(struct ec_stripe_buf *buf) 330 { 331 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; 332 unsigned nr_data = v->nr_blocks - v->nr_redundant; 333 unsigned bytes = le16_to_cpu(v->sectors) << 9; 334 335 raid_gen(nr_data, v->nr_redundant, bytes, buf->data); 336 } 337 338 static unsigned ec_nr_failed(struct ec_stripe_buf *buf) 339 { 340 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; 341 342 return v->nr_blocks - bitmap_weight(buf->valid, v->nr_blocks); 343 } 344 345 static int ec_do_recov(struct bch_fs *c, struct ec_stripe_buf *buf) 346 { 347 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; 348 unsigned i, failed[BCH_BKEY_PTRS_MAX], nr_failed = 0; 349 unsigned nr_data = v->nr_blocks - v->nr_redundant; 350 unsigned bytes = buf->size << 9; 351 352 if (ec_nr_failed(buf) > v->nr_redundant) { 353 bch_err_ratelimited(c, 354 "error doing reconstruct read: unable to read enough blocks"); 355 return -1; 356 } 357 358 for (i = 0; i < nr_data; i++) 359 if (!test_bit(i, buf->valid)) 360 failed[nr_failed++] = i; 361 362 raid_rec(nr_failed, failed, nr_data, v->nr_redundant, bytes, buf->data); 363 return 0; 364 } 365 366 /* IO: */ 367 368 static void ec_block_endio(struct bio *bio) 369 { 370 struct ec_bio *ec_bio = container_of(bio, struct ec_bio, bio); 371 struct bch_stripe *v = &bkey_i_to_stripe(&ec_bio->buf->key)->v; 372 struct bch_extent_ptr *ptr = &v->ptrs[ec_bio->idx]; 373 struct bch_dev *ca = ec_bio->ca; 374 struct closure *cl = bio->bi_private; 375 376 if (bch2_dev_io_err_on(bio->bi_status, ca, "erasure coding %s error: %s", 377 bio_data_dir(bio) ? "write" : "read", 378 bch2_blk_status_to_str(bio->bi_status))) 379 clear_bit(ec_bio->idx, ec_bio->buf->valid); 380 381 if (ptr_stale(ca, ptr)) { 382 bch_err_ratelimited(ca->fs, 383 "error %s stripe: stale pointer after io", 384 bio_data_dir(bio) == READ ? "reading from" : "writing to"); 385 clear_bit(ec_bio->idx, ec_bio->buf->valid); 386 } 387 388 bio_put(&ec_bio->bio); 389 percpu_ref_put(&ca->io_ref); 390 closure_put(cl); 391 } 392 393 static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf, 394 blk_opf_t opf, unsigned idx, struct closure *cl) 395 { 396 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; 397 unsigned offset = 0, bytes = buf->size << 9; 398 struct bch_extent_ptr *ptr = &v->ptrs[idx]; 399 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev); 400 enum bch_data_type data_type = idx < v->nr_blocks - v->nr_redundant 401 ? BCH_DATA_user 402 : BCH_DATA_parity; 403 int rw = op_is_write(opf); 404 405 if (ptr_stale(ca, ptr)) { 406 bch_err_ratelimited(c, 407 "error %s stripe: stale pointer", 408 rw == READ ? "reading from" : "writing to"); 409 clear_bit(idx, buf->valid); 410 return; 411 } 412 413 if (!bch2_dev_get_ioref(ca, rw)) { 414 clear_bit(idx, buf->valid); 415 return; 416 } 417 418 this_cpu_add(ca->io_done->sectors[rw][data_type], buf->size); 419 420 while (offset < bytes) { 421 unsigned nr_iovecs = min_t(size_t, BIO_MAX_VECS, 422 DIV_ROUND_UP(bytes, PAGE_SIZE)); 423 unsigned b = min_t(size_t, bytes - offset, 424 nr_iovecs << PAGE_SHIFT); 425 struct ec_bio *ec_bio; 426 427 ec_bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev, 428 nr_iovecs, 429 opf, 430 GFP_KERNEL, 431 &c->ec_bioset), 432 struct ec_bio, bio); 433 434 ec_bio->ca = ca; 435 ec_bio->buf = buf; 436 ec_bio->idx = idx; 437 438 ec_bio->bio.bi_iter.bi_sector = ptr->offset + buf->offset + (offset >> 9); 439 ec_bio->bio.bi_end_io = ec_block_endio; 440 ec_bio->bio.bi_private = cl; 441 442 bch2_bio_map(&ec_bio->bio, buf->data[idx] + offset, b); 443 444 closure_get(cl); 445 percpu_ref_get(&ca->io_ref); 446 447 submit_bio(&ec_bio->bio); 448 449 offset += b; 450 } 451 452 percpu_ref_put(&ca->io_ref); 453 } 454 455 static int get_stripe_key_trans(struct btree_trans *trans, u64 idx, 456 struct ec_stripe_buf *stripe) 457 { 458 struct btree_iter iter; 459 struct bkey_s_c k; 460 int ret; 461 462 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes, 463 POS(0, idx), BTREE_ITER_SLOTS); 464 ret = bkey_err(k); 465 if (ret) 466 goto err; 467 if (k.k->type != KEY_TYPE_stripe) { 468 ret = -ENOENT; 469 goto err; 470 } 471 bkey_reassemble(&stripe->key, k); 472 err: 473 bch2_trans_iter_exit(trans, &iter); 474 return ret; 475 } 476 477 static int get_stripe_key(struct bch_fs *c, u64 idx, struct ec_stripe_buf *stripe) 478 { 479 return bch2_trans_run(c, get_stripe_key_trans(trans, idx, stripe)); 480 } 481 482 /* recovery read path: */ 483 int bch2_ec_read_extent(struct bch_fs *c, struct bch_read_bio *rbio) 484 { 485 struct ec_stripe_buf *buf; 486 struct closure cl; 487 struct bch_stripe *v; 488 unsigned i, offset; 489 int ret = 0; 490 491 closure_init_stack(&cl); 492 493 BUG_ON(!rbio->pick.has_ec); 494 495 buf = kzalloc(sizeof(*buf), GFP_NOFS); 496 if (!buf) 497 return -BCH_ERR_ENOMEM_ec_read_extent; 498 499 ret = get_stripe_key(c, rbio->pick.ec.idx, buf); 500 if (ret) { 501 bch_err_ratelimited(c, 502 "error doing reconstruct read: error %i looking up stripe", ret); 503 kfree(buf); 504 return -EIO; 505 } 506 507 v = &bkey_i_to_stripe(&buf->key)->v; 508 509 if (!bch2_ptr_matches_stripe(v, rbio->pick)) { 510 bch_err_ratelimited(c, 511 "error doing reconstruct read: pointer doesn't match stripe"); 512 ret = -EIO; 513 goto err; 514 } 515 516 offset = rbio->bio.bi_iter.bi_sector - v->ptrs[rbio->pick.ec.block].offset; 517 if (offset + bio_sectors(&rbio->bio) > le16_to_cpu(v->sectors)) { 518 bch_err_ratelimited(c, 519 "error doing reconstruct read: read is bigger than stripe"); 520 ret = -EIO; 521 goto err; 522 } 523 524 ret = ec_stripe_buf_init(buf, offset, bio_sectors(&rbio->bio)); 525 if (ret) 526 goto err; 527 528 for (i = 0; i < v->nr_blocks; i++) 529 ec_block_io(c, buf, REQ_OP_READ, i, &cl); 530 531 closure_sync(&cl); 532 533 if (ec_nr_failed(buf) > v->nr_redundant) { 534 bch_err_ratelimited(c, 535 "error doing reconstruct read: unable to read enough blocks"); 536 ret = -EIO; 537 goto err; 538 } 539 540 ec_validate_checksums(c, buf); 541 542 ret = ec_do_recov(c, buf); 543 if (ret) 544 goto err; 545 546 memcpy_to_bio(&rbio->bio, rbio->bio.bi_iter, 547 buf->data[rbio->pick.ec.block] + ((offset - buf->offset) << 9)); 548 err: 549 ec_stripe_buf_exit(buf); 550 kfree(buf); 551 return ret; 552 } 553 554 /* stripe bucket accounting: */ 555 556 static int __ec_stripe_mem_alloc(struct bch_fs *c, size_t idx, gfp_t gfp) 557 { 558 ec_stripes_heap n, *h = &c->ec_stripes_heap; 559 560 if (idx >= h->size) { 561 if (!init_heap(&n, max(1024UL, roundup_pow_of_two(idx + 1)), gfp)) 562 return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc; 563 564 mutex_lock(&c->ec_stripes_heap_lock); 565 if (n.size > h->size) { 566 memcpy(n.data, h->data, h->used * sizeof(h->data[0])); 567 n.used = h->used; 568 swap(*h, n); 569 } 570 mutex_unlock(&c->ec_stripes_heap_lock); 571 572 free_heap(&n); 573 } 574 575 if (!genradix_ptr_alloc(&c->stripes, idx, gfp)) 576 return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc; 577 578 if (c->gc_pos.phase != GC_PHASE_NOT_RUNNING && 579 !genradix_ptr_alloc(&c->gc_stripes, idx, gfp)) 580 return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc; 581 582 return 0; 583 } 584 585 static int ec_stripe_mem_alloc(struct btree_trans *trans, 586 struct btree_iter *iter) 587 { 588 return allocate_dropping_locks_errcode(trans, 589 __ec_stripe_mem_alloc(trans->c, iter->pos.offset, _gfp)); 590 } 591 592 /* 593 * Hash table of open stripes: 594 * Stripes that are being created or modified are kept in a hash table, so that 595 * stripe deletion can skip them. 596 */ 597 598 static bool __bch2_stripe_is_open(struct bch_fs *c, u64 idx) 599 { 600 unsigned hash = hash_64(idx, ilog2(ARRAY_SIZE(c->ec_stripes_new))); 601 struct ec_stripe_new *s; 602 603 hlist_for_each_entry(s, &c->ec_stripes_new[hash], hash) 604 if (s->idx == idx) 605 return true; 606 return false; 607 } 608 609 static bool bch2_stripe_is_open(struct bch_fs *c, u64 idx) 610 { 611 bool ret = false; 612 613 spin_lock(&c->ec_stripes_new_lock); 614 ret = __bch2_stripe_is_open(c, idx); 615 spin_unlock(&c->ec_stripes_new_lock); 616 617 return ret; 618 } 619 620 static bool bch2_try_open_stripe(struct bch_fs *c, 621 struct ec_stripe_new *s, 622 u64 idx) 623 { 624 bool ret; 625 626 spin_lock(&c->ec_stripes_new_lock); 627 ret = !__bch2_stripe_is_open(c, idx); 628 if (ret) { 629 unsigned hash = hash_64(idx, ilog2(ARRAY_SIZE(c->ec_stripes_new))); 630 631 s->idx = idx; 632 hlist_add_head(&s->hash, &c->ec_stripes_new[hash]); 633 } 634 spin_unlock(&c->ec_stripes_new_lock); 635 636 return ret; 637 } 638 639 static void bch2_stripe_close(struct bch_fs *c, struct ec_stripe_new *s) 640 { 641 BUG_ON(!s->idx); 642 643 spin_lock(&c->ec_stripes_new_lock); 644 hlist_del_init(&s->hash); 645 spin_unlock(&c->ec_stripes_new_lock); 646 647 s->idx = 0; 648 } 649 650 /* Heap of all existing stripes, ordered by blocks_nonempty */ 651 652 static u64 stripe_idx_to_delete(struct bch_fs *c) 653 { 654 ec_stripes_heap *h = &c->ec_stripes_heap; 655 656 lockdep_assert_held(&c->ec_stripes_heap_lock); 657 658 if (h->used && 659 h->data[0].blocks_nonempty == 0 && 660 !bch2_stripe_is_open(c, h->data[0].idx)) 661 return h->data[0].idx; 662 663 return 0; 664 } 665 666 static inline int ec_stripes_heap_cmp(ec_stripes_heap *h, 667 struct ec_stripe_heap_entry l, 668 struct ec_stripe_heap_entry r) 669 { 670 return ((l.blocks_nonempty > r.blocks_nonempty) - 671 (l.blocks_nonempty < r.blocks_nonempty)); 672 } 673 674 static inline void ec_stripes_heap_set_backpointer(ec_stripes_heap *h, 675 size_t i) 676 { 677 struct bch_fs *c = container_of(h, struct bch_fs, ec_stripes_heap); 678 679 genradix_ptr(&c->stripes, h->data[i].idx)->heap_idx = i; 680 } 681 682 static void heap_verify_backpointer(struct bch_fs *c, size_t idx) 683 { 684 ec_stripes_heap *h = &c->ec_stripes_heap; 685 struct stripe *m = genradix_ptr(&c->stripes, idx); 686 687 BUG_ON(m->heap_idx >= h->used); 688 BUG_ON(h->data[m->heap_idx].idx != idx); 689 } 690 691 void bch2_stripes_heap_del(struct bch_fs *c, 692 struct stripe *m, size_t idx) 693 { 694 mutex_lock(&c->ec_stripes_heap_lock); 695 heap_verify_backpointer(c, idx); 696 697 heap_del(&c->ec_stripes_heap, m->heap_idx, 698 ec_stripes_heap_cmp, 699 ec_stripes_heap_set_backpointer); 700 mutex_unlock(&c->ec_stripes_heap_lock); 701 } 702 703 void bch2_stripes_heap_insert(struct bch_fs *c, 704 struct stripe *m, size_t idx) 705 { 706 mutex_lock(&c->ec_stripes_heap_lock); 707 BUG_ON(heap_full(&c->ec_stripes_heap)); 708 709 heap_add(&c->ec_stripes_heap, ((struct ec_stripe_heap_entry) { 710 .idx = idx, 711 .blocks_nonempty = m->blocks_nonempty, 712 }), 713 ec_stripes_heap_cmp, 714 ec_stripes_heap_set_backpointer); 715 716 heap_verify_backpointer(c, idx); 717 mutex_unlock(&c->ec_stripes_heap_lock); 718 } 719 720 void bch2_stripes_heap_update(struct bch_fs *c, 721 struct stripe *m, size_t idx) 722 { 723 ec_stripes_heap *h = &c->ec_stripes_heap; 724 bool do_deletes; 725 size_t i; 726 727 mutex_lock(&c->ec_stripes_heap_lock); 728 heap_verify_backpointer(c, idx); 729 730 h->data[m->heap_idx].blocks_nonempty = m->blocks_nonempty; 731 732 i = m->heap_idx; 733 heap_sift_up(h, i, ec_stripes_heap_cmp, 734 ec_stripes_heap_set_backpointer); 735 heap_sift_down(h, i, ec_stripes_heap_cmp, 736 ec_stripes_heap_set_backpointer); 737 738 heap_verify_backpointer(c, idx); 739 740 do_deletes = stripe_idx_to_delete(c) != 0; 741 mutex_unlock(&c->ec_stripes_heap_lock); 742 743 if (do_deletes) 744 bch2_do_stripe_deletes(c); 745 } 746 747 /* stripe deletion */ 748 749 static int ec_stripe_delete(struct btree_trans *trans, u64 idx) 750 { 751 struct bch_fs *c = trans->c; 752 struct btree_iter iter; 753 struct bkey_s_c k; 754 struct bkey_s_c_stripe s; 755 int ret; 756 757 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes, POS(0, idx), 758 BTREE_ITER_INTENT); 759 ret = bkey_err(k); 760 if (ret) 761 goto err; 762 763 if (k.k->type != KEY_TYPE_stripe) { 764 bch2_fs_inconsistent(c, "attempting to delete nonexistent stripe %llu", idx); 765 ret = -EINVAL; 766 goto err; 767 } 768 769 s = bkey_s_c_to_stripe(k); 770 for (unsigned i = 0; i < s.v->nr_blocks; i++) 771 if (stripe_blockcount_get(s.v, i)) { 772 struct printbuf buf = PRINTBUF; 773 774 bch2_bkey_val_to_text(&buf, c, k); 775 bch2_fs_inconsistent(c, "attempting to delete nonempty stripe %s", buf.buf); 776 printbuf_exit(&buf); 777 ret = -EINVAL; 778 goto err; 779 } 780 781 ret = bch2_btree_delete_at(trans, &iter, 0); 782 err: 783 bch2_trans_iter_exit(trans, &iter); 784 return ret; 785 } 786 787 static void ec_stripe_delete_work(struct work_struct *work) 788 { 789 struct bch_fs *c = 790 container_of(work, struct bch_fs, ec_stripe_delete_work); 791 struct btree_trans *trans = bch2_trans_get(c); 792 int ret; 793 u64 idx; 794 795 while (1) { 796 mutex_lock(&c->ec_stripes_heap_lock); 797 idx = stripe_idx_to_delete(c); 798 mutex_unlock(&c->ec_stripes_heap_lock); 799 800 if (!idx) 801 break; 802 803 ret = commit_do(trans, NULL, NULL, BTREE_INSERT_NOFAIL, 804 ec_stripe_delete(trans, idx)); 805 if (ret) { 806 bch_err_fn(c, ret); 807 break; 808 } 809 } 810 811 bch2_trans_put(trans); 812 813 bch2_write_ref_put(c, BCH_WRITE_REF_stripe_delete); 814 } 815 816 void bch2_do_stripe_deletes(struct bch_fs *c) 817 { 818 if (bch2_write_ref_tryget(c, BCH_WRITE_REF_stripe_delete) && 819 !queue_work(c->write_ref_wq, &c->ec_stripe_delete_work)) 820 bch2_write_ref_put(c, BCH_WRITE_REF_stripe_delete); 821 } 822 823 /* stripe creation: */ 824 825 static int ec_stripe_key_update(struct btree_trans *trans, 826 struct bkey_i_stripe *new, 827 bool create) 828 { 829 struct bch_fs *c = trans->c; 830 struct btree_iter iter; 831 struct bkey_s_c k; 832 int ret; 833 834 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes, 835 new->k.p, BTREE_ITER_INTENT); 836 ret = bkey_err(k); 837 if (ret) 838 goto err; 839 840 if (k.k->type != (create ? KEY_TYPE_deleted : KEY_TYPE_stripe)) { 841 bch2_fs_inconsistent(c, "error %s stripe: got existing key type %s", 842 create ? "creating" : "updating", 843 bch2_bkey_types[k.k->type]); 844 ret = -EINVAL; 845 goto err; 846 } 847 848 if (k.k->type == KEY_TYPE_stripe) { 849 const struct bch_stripe *old = bkey_s_c_to_stripe(k).v; 850 unsigned i; 851 852 if (old->nr_blocks != new->v.nr_blocks) { 853 bch_err(c, "error updating stripe: nr_blocks does not match"); 854 ret = -EINVAL; 855 goto err; 856 } 857 858 for (i = 0; i < new->v.nr_blocks; i++) { 859 unsigned v = stripe_blockcount_get(old, i); 860 861 BUG_ON(v && 862 (old->ptrs[i].dev != new->v.ptrs[i].dev || 863 old->ptrs[i].gen != new->v.ptrs[i].gen || 864 old->ptrs[i].offset != new->v.ptrs[i].offset)); 865 866 stripe_blockcount_set(&new->v, i, v); 867 } 868 } 869 870 ret = bch2_trans_update(trans, &iter, &new->k_i, 0); 871 err: 872 bch2_trans_iter_exit(trans, &iter); 873 return ret; 874 } 875 876 static int ec_stripe_update_extent(struct btree_trans *trans, 877 struct bpos bucket, u8 gen, 878 struct ec_stripe_buf *s, 879 struct bpos *bp_pos) 880 { 881 struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v; 882 struct bch_fs *c = trans->c; 883 struct bch_backpointer bp; 884 struct btree_iter iter; 885 struct bkey_s_c k; 886 const struct bch_extent_ptr *ptr_c; 887 struct bch_extent_ptr *ptr, *ec_ptr = NULL; 888 struct bch_extent_stripe_ptr stripe_ptr; 889 struct bkey_i *n; 890 int ret, dev, block; 891 892 ret = bch2_get_next_backpointer(trans, bucket, gen, 893 bp_pos, &bp, BTREE_ITER_CACHED); 894 if (ret) 895 return ret; 896 if (bpos_eq(*bp_pos, SPOS_MAX)) 897 return 0; 898 899 if (bp.level) { 900 struct printbuf buf = PRINTBUF; 901 struct btree_iter node_iter; 902 struct btree *b; 903 904 b = bch2_backpointer_get_node(trans, &node_iter, *bp_pos, bp); 905 bch2_trans_iter_exit(trans, &node_iter); 906 907 if (!b) 908 return 0; 909 910 prt_printf(&buf, "found btree node in erasure coded bucket: b=%px\n", b); 911 bch2_backpointer_to_text(&buf, &bp); 912 913 bch2_fs_inconsistent(c, "%s", buf.buf); 914 printbuf_exit(&buf); 915 return -EIO; 916 } 917 918 k = bch2_backpointer_get_key(trans, &iter, *bp_pos, bp, BTREE_ITER_INTENT); 919 ret = bkey_err(k); 920 if (ret) 921 return ret; 922 if (!k.k) { 923 /* 924 * extent no longer exists - we could flush the btree 925 * write buffer and retry to verify, but no need: 926 */ 927 return 0; 928 } 929 930 if (extent_has_stripe_ptr(k, s->key.k.p.offset)) 931 goto out; 932 933 ptr_c = bkey_matches_stripe(v, k, &block); 934 /* 935 * It doesn't generally make sense to erasure code cached ptrs: 936 * XXX: should we be incrementing a counter? 937 */ 938 if (!ptr_c || ptr_c->cached) 939 goto out; 940 941 dev = v->ptrs[block].dev; 942 943 n = bch2_trans_kmalloc(trans, bkey_bytes(k.k) + sizeof(stripe_ptr)); 944 ret = PTR_ERR_OR_ZERO(n); 945 if (ret) 946 goto out; 947 948 bkey_reassemble(n, k); 949 950 bch2_bkey_drop_ptrs(bkey_i_to_s(n), ptr, ptr->dev != dev); 951 ec_ptr = bch2_bkey_has_device(bkey_i_to_s(n), dev); 952 BUG_ON(!ec_ptr); 953 954 stripe_ptr = (struct bch_extent_stripe_ptr) { 955 .type = 1 << BCH_EXTENT_ENTRY_stripe_ptr, 956 .block = block, 957 .redundancy = v->nr_redundant, 958 .idx = s->key.k.p.offset, 959 }; 960 961 __extent_entry_insert(n, 962 (union bch_extent_entry *) ec_ptr, 963 (union bch_extent_entry *) &stripe_ptr); 964 965 ret = bch2_trans_update(trans, &iter, n, 0); 966 out: 967 bch2_trans_iter_exit(trans, &iter); 968 return ret; 969 } 970 971 static int ec_stripe_update_bucket(struct btree_trans *trans, struct ec_stripe_buf *s, 972 unsigned block) 973 { 974 struct bch_fs *c = trans->c; 975 struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v; 976 struct bch_extent_ptr bucket = v->ptrs[block]; 977 struct bpos bucket_pos = PTR_BUCKET_POS(c, &bucket); 978 struct bpos bp_pos = POS_MIN; 979 int ret = 0; 980 981 while (1) { 982 ret = commit_do(trans, NULL, NULL, 983 BTREE_INSERT_NOCHECK_RW| 984 BTREE_INSERT_NOFAIL, 985 ec_stripe_update_extent(trans, bucket_pos, bucket.gen, 986 s, &bp_pos)); 987 if (ret) 988 break; 989 if (bkey_eq(bp_pos, POS_MAX)) 990 break; 991 992 bp_pos = bpos_nosnap_successor(bp_pos); 993 } 994 995 return ret; 996 } 997 998 static int ec_stripe_update_extents(struct bch_fs *c, struct ec_stripe_buf *s) 999 { 1000 struct btree_trans *trans = bch2_trans_get(c); 1001 struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v; 1002 unsigned i, nr_data = v->nr_blocks - v->nr_redundant; 1003 int ret = 0; 1004 1005 ret = bch2_btree_write_buffer_flush(trans); 1006 if (ret) 1007 goto err; 1008 1009 for (i = 0; i < nr_data; i++) { 1010 ret = ec_stripe_update_bucket(trans, s, i); 1011 if (ret) 1012 break; 1013 } 1014 err: 1015 bch2_trans_put(trans); 1016 1017 return ret; 1018 } 1019 1020 static void zero_out_rest_of_ec_bucket(struct bch_fs *c, 1021 struct ec_stripe_new *s, 1022 unsigned block, 1023 struct open_bucket *ob) 1024 { 1025 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev); 1026 unsigned offset = ca->mi.bucket_size - ob->sectors_free; 1027 int ret; 1028 1029 if (!bch2_dev_get_ioref(ca, WRITE)) { 1030 s->err = -BCH_ERR_erofs_no_writes; 1031 return; 1032 } 1033 1034 memset(s->new_stripe.data[block] + (offset << 9), 1035 0, 1036 ob->sectors_free << 9); 1037 1038 ret = blkdev_issue_zeroout(ca->disk_sb.bdev, 1039 ob->bucket * ca->mi.bucket_size + offset, 1040 ob->sectors_free, 1041 GFP_KERNEL, 0); 1042 1043 percpu_ref_put(&ca->io_ref); 1044 1045 if (ret) 1046 s->err = ret; 1047 } 1048 1049 void bch2_ec_stripe_new_free(struct bch_fs *c, struct ec_stripe_new *s) 1050 { 1051 if (s->idx) 1052 bch2_stripe_close(c, s); 1053 kfree(s); 1054 } 1055 1056 /* 1057 * data buckets of new stripe all written: create the stripe 1058 */ 1059 static void ec_stripe_create(struct ec_stripe_new *s) 1060 { 1061 struct bch_fs *c = s->c; 1062 struct open_bucket *ob; 1063 struct bch_stripe *v = &bkey_i_to_stripe(&s->new_stripe.key)->v; 1064 unsigned i, nr_data = v->nr_blocks - v->nr_redundant; 1065 int ret; 1066 1067 BUG_ON(s->h->s == s); 1068 1069 closure_sync(&s->iodone); 1070 1071 if (!s->err) { 1072 for (i = 0; i < nr_data; i++) 1073 if (s->blocks[i]) { 1074 ob = c->open_buckets + s->blocks[i]; 1075 1076 if (ob->sectors_free) 1077 zero_out_rest_of_ec_bucket(c, s, i, ob); 1078 } 1079 } 1080 1081 if (s->err) { 1082 if (!bch2_err_matches(s->err, EROFS)) 1083 bch_err(c, "error creating stripe: error writing data buckets"); 1084 goto err; 1085 } 1086 1087 if (s->have_existing_stripe) { 1088 ec_validate_checksums(c, &s->existing_stripe); 1089 1090 if (ec_do_recov(c, &s->existing_stripe)) { 1091 bch_err(c, "error creating stripe: error reading existing stripe"); 1092 goto err; 1093 } 1094 1095 for (i = 0; i < nr_data; i++) 1096 if (stripe_blockcount_get(&bkey_i_to_stripe(&s->existing_stripe.key)->v, i)) 1097 swap(s->new_stripe.data[i], 1098 s->existing_stripe.data[i]); 1099 1100 ec_stripe_buf_exit(&s->existing_stripe); 1101 } 1102 1103 BUG_ON(!s->allocated); 1104 BUG_ON(!s->idx); 1105 1106 ec_generate_ec(&s->new_stripe); 1107 1108 ec_generate_checksums(&s->new_stripe); 1109 1110 /* write p/q: */ 1111 for (i = nr_data; i < v->nr_blocks; i++) 1112 ec_block_io(c, &s->new_stripe, REQ_OP_WRITE, i, &s->iodone); 1113 closure_sync(&s->iodone); 1114 1115 if (ec_nr_failed(&s->new_stripe)) { 1116 bch_err(c, "error creating stripe: error writing redundancy buckets"); 1117 goto err; 1118 } 1119 1120 ret = bch2_trans_do(c, &s->res, NULL, 1121 BTREE_INSERT_NOCHECK_RW| 1122 BTREE_INSERT_NOFAIL, 1123 ec_stripe_key_update(trans, 1124 bkey_i_to_stripe(&s->new_stripe.key), 1125 !s->have_existing_stripe)); 1126 if (ret) { 1127 bch_err(c, "error creating stripe: error creating stripe key"); 1128 goto err; 1129 } 1130 1131 ret = ec_stripe_update_extents(c, &s->new_stripe); 1132 if (ret) { 1133 bch_err_msg(c, ret, "creating stripe: error updating pointers"); 1134 goto err; 1135 } 1136 err: 1137 bch2_disk_reservation_put(c, &s->res); 1138 1139 for (i = 0; i < v->nr_blocks; i++) 1140 if (s->blocks[i]) { 1141 ob = c->open_buckets + s->blocks[i]; 1142 1143 if (i < nr_data) { 1144 ob->ec = NULL; 1145 __bch2_open_bucket_put(c, ob); 1146 } else { 1147 bch2_open_bucket_put(c, ob); 1148 } 1149 } 1150 1151 mutex_lock(&c->ec_stripe_new_lock); 1152 list_del(&s->list); 1153 mutex_unlock(&c->ec_stripe_new_lock); 1154 wake_up(&c->ec_stripe_new_wait); 1155 1156 ec_stripe_buf_exit(&s->existing_stripe); 1157 ec_stripe_buf_exit(&s->new_stripe); 1158 closure_debug_destroy(&s->iodone); 1159 1160 ec_stripe_new_put(c, s, STRIPE_REF_stripe); 1161 } 1162 1163 static struct ec_stripe_new *get_pending_stripe(struct bch_fs *c) 1164 { 1165 struct ec_stripe_new *s; 1166 1167 mutex_lock(&c->ec_stripe_new_lock); 1168 list_for_each_entry(s, &c->ec_stripe_new_list, list) 1169 if (!atomic_read(&s->ref[STRIPE_REF_io])) 1170 goto out; 1171 s = NULL; 1172 out: 1173 mutex_unlock(&c->ec_stripe_new_lock); 1174 1175 return s; 1176 } 1177 1178 static void ec_stripe_create_work(struct work_struct *work) 1179 { 1180 struct bch_fs *c = container_of(work, 1181 struct bch_fs, ec_stripe_create_work); 1182 struct ec_stripe_new *s; 1183 1184 while ((s = get_pending_stripe(c))) 1185 ec_stripe_create(s); 1186 1187 bch2_write_ref_put(c, BCH_WRITE_REF_stripe_create); 1188 } 1189 1190 void bch2_ec_do_stripe_creates(struct bch_fs *c) 1191 { 1192 bch2_write_ref_get(c, BCH_WRITE_REF_stripe_create); 1193 1194 if (!queue_work(system_long_wq, &c->ec_stripe_create_work)) 1195 bch2_write_ref_put(c, BCH_WRITE_REF_stripe_create); 1196 } 1197 1198 static void ec_stripe_set_pending(struct bch_fs *c, struct ec_stripe_head *h) 1199 { 1200 struct ec_stripe_new *s = h->s; 1201 1202 BUG_ON(!s->allocated && !s->err); 1203 1204 h->s = NULL; 1205 s->pending = true; 1206 1207 mutex_lock(&c->ec_stripe_new_lock); 1208 list_add(&s->list, &c->ec_stripe_new_list); 1209 mutex_unlock(&c->ec_stripe_new_lock); 1210 1211 ec_stripe_new_put(c, s, STRIPE_REF_io); 1212 } 1213 1214 void bch2_ec_bucket_cancel(struct bch_fs *c, struct open_bucket *ob) 1215 { 1216 struct ec_stripe_new *s = ob->ec; 1217 1218 s->err = -EIO; 1219 } 1220 1221 void *bch2_writepoint_ec_buf(struct bch_fs *c, struct write_point *wp) 1222 { 1223 struct open_bucket *ob = ec_open_bucket(c, &wp->ptrs); 1224 struct bch_dev *ca; 1225 unsigned offset; 1226 1227 if (!ob) 1228 return NULL; 1229 1230 BUG_ON(!ob->ec->new_stripe.data[ob->ec_idx]); 1231 1232 ca = bch_dev_bkey_exists(c, ob->dev); 1233 offset = ca->mi.bucket_size - ob->sectors_free; 1234 1235 return ob->ec->new_stripe.data[ob->ec_idx] + (offset << 9); 1236 } 1237 1238 static int unsigned_cmp(const void *_l, const void *_r) 1239 { 1240 unsigned l = *((const unsigned *) _l); 1241 unsigned r = *((const unsigned *) _r); 1242 1243 return cmp_int(l, r); 1244 } 1245 1246 /* pick most common bucket size: */ 1247 static unsigned pick_blocksize(struct bch_fs *c, 1248 struct bch_devs_mask *devs) 1249 { 1250 struct bch_dev *ca; 1251 unsigned i, nr = 0, sizes[BCH_SB_MEMBERS_MAX]; 1252 struct { 1253 unsigned nr, size; 1254 } cur = { 0, 0 }, best = { 0, 0 }; 1255 1256 for_each_member_device_rcu(ca, c, i, devs) 1257 sizes[nr++] = ca->mi.bucket_size; 1258 1259 sort(sizes, nr, sizeof(unsigned), unsigned_cmp, NULL); 1260 1261 for (i = 0; i < nr; i++) { 1262 if (sizes[i] != cur.size) { 1263 if (cur.nr > best.nr) 1264 best = cur; 1265 1266 cur.nr = 0; 1267 cur.size = sizes[i]; 1268 } 1269 1270 cur.nr++; 1271 } 1272 1273 if (cur.nr > best.nr) 1274 best = cur; 1275 1276 return best.size; 1277 } 1278 1279 static bool may_create_new_stripe(struct bch_fs *c) 1280 { 1281 return false; 1282 } 1283 1284 static void ec_stripe_key_init(struct bch_fs *c, 1285 struct bkey_i *k, 1286 unsigned nr_data, 1287 unsigned nr_parity, 1288 unsigned stripe_size) 1289 { 1290 struct bkey_i_stripe *s = bkey_stripe_init(k); 1291 unsigned u64s; 1292 1293 s->v.sectors = cpu_to_le16(stripe_size); 1294 s->v.algorithm = 0; 1295 s->v.nr_blocks = nr_data + nr_parity; 1296 s->v.nr_redundant = nr_parity; 1297 s->v.csum_granularity_bits = ilog2(c->opts.encoded_extent_max >> 9); 1298 s->v.csum_type = BCH_CSUM_crc32c; 1299 s->v.pad = 0; 1300 1301 while ((u64s = stripe_val_u64s(&s->v)) > BKEY_VAL_U64s_MAX) { 1302 BUG_ON(1 << s->v.csum_granularity_bits >= 1303 le16_to_cpu(s->v.sectors) || 1304 s->v.csum_granularity_bits == U8_MAX); 1305 s->v.csum_granularity_bits++; 1306 } 1307 1308 set_bkey_val_u64s(&s->k, u64s); 1309 } 1310 1311 static int ec_new_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h) 1312 { 1313 struct ec_stripe_new *s; 1314 1315 lockdep_assert_held(&h->lock); 1316 1317 s = kzalloc(sizeof(*s), GFP_KERNEL); 1318 if (!s) 1319 return -BCH_ERR_ENOMEM_ec_new_stripe_alloc; 1320 1321 mutex_init(&s->lock); 1322 closure_init(&s->iodone, NULL); 1323 atomic_set(&s->ref[STRIPE_REF_stripe], 1); 1324 atomic_set(&s->ref[STRIPE_REF_io], 1); 1325 s->c = c; 1326 s->h = h; 1327 s->nr_data = min_t(unsigned, h->nr_active_devs, 1328 BCH_BKEY_PTRS_MAX) - h->redundancy; 1329 s->nr_parity = h->redundancy; 1330 1331 ec_stripe_key_init(c, &s->new_stripe.key, 1332 s->nr_data, s->nr_parity, h->blocksize); 1333 1334 h->s = s; 1335 return 0; 1336 } 1337 1338 static struct ec_stripe_head * 1339 ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target, 1340 unsigned algo, unsigned redundancy, 1341 enum bch_watermark watermark) 1342 { 1343 struct ec_stripe_head *h; 1344 struct bch_dev *ca; 1345 unsigned i; 1346 1347 h = kzalloc(sizeof(*h), GFP_KERNEL); 1348 if (!h) 1349 return NULL; 1350 1351 mutex_init(&h->lock); 1352 BUG_ON(!mutex_trylock(&h->lock)); 1353 1354 h->target = target; 1355 h->algo = algo; 1356 h->redundancy = redundancy; 1357 h->watermark = watermark; 1358 1359 rcu_read_lock(); 1360 h->devs = target_rw_devs(c, BCH_DATA_user, target); 1361 1362 for_each_member_device_rcu(ca, c, i, &h->devs) 1363 if (!ca->mi.durability) 1364 __clear_bit(i, h->devs.d); 1365 1366 h->blocksize = pick_blocksize(c, &h->devs); 1367 1368 for_each_member_device_rcu(ca, c, i, &h->devs) 1369 if (ca->mi.bucket_size == h->blocksize) 1370 h->nr_active_devs++; 1371 1372 rcu_read_unlock(); 1373 list_add(&h->list, &c->ec_stripe_head_list); 1374 return h; 1375 } 1376 1377 void bch2_ec_stripe_head_put(struct bch_fs *c, struct ec_stripe_head *h) 1378 { 1379 if (h->s && 1380 h->s->allocated && 1381 bitmap_weight(h->s->blocks_allocated, 1382 h->s->nr_data) == h->s->nr_data) 1383 ec_stripe_set_pending(c, h); 1384 1385 mutex_unlock(&h->lock); 1386 } 1387 1388 static struct ec_stripe_head * 1389 __bch2_ec_stripe_head_get(struct btree_trans *trans, 1390 unsigned target, 1391 unsigned algo, 1392 unsigned redundancy, 1393 enum bch_watermark watermark) 1394 { 1395 struct bch_fs *c = trans->c; 1396 struct ec_stripe_head *h; 1397 int ret; 1398 1399 if (!redundancy) 1400 return NULL; 1401 1402 ret = bch2_trans_mutex_lock(trans, &c->ec_stripe_head_lock); 1403 if (ret) 1404 return ERR_PTR(ret); 1405 1406 if (test_bit(BCH_FS_GOING_RO, &c->flags)) { 1407 h = ERR_PTR(-BCH_ERR_erofs_no_writes); 1408 goto found; 1409 } 1410 1411 list_for_each_entry(h, &c->ec_stripe_head_list, list) 1412 if (h->target == target && 1413 h->algo == algo && 1414 h->redundancy == redundancy && 1415 h->watermark == watermark) { 1416 ret = bch2_trans_mutex_lock(trans, &h->lock); 1417 if (ret) 1418 h = ERR_PTR(ret); 1419 goto found; 1420 } 1421 1422 h = ec_new_stripe_head_alloc(c, target, algo, redundancy, watermark); 1423 found: 1424 mutex_unlock(&c->ec_stripe_head_lock); 1425 return h; 1426 } 1427 1428 static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_head *h, 1429 enum bch_watermark watermark, struct closure *cl) 1430 { 1431 struct bch_fs *c = trans->c; 1432 struct bch_devs_mask devs = h->devs; 1433 struct open_bucket *ob; 1434 struct open_buckets buckets; 1435 struct bch_stripe *v = &bkey_i_to_stripe(&h->s->new_stripe.key)->v; 1436 unsigned i, j, nr_have_parity = 0, nr_have_data = 0; 1437 bool have_cache = true; 1438 int ret = 0; 1439 1440 BUG_ON(v->nr_blocks != h->s->nr_data + h->s->nr_parity); 1441 BUG_ON(v->nr_redundant != h->s->nr_parity); 1442 1443 for_each_set_bit(i, h->s->blocks_gotten, v->nr_blocks) { 1444 __clear_bit(v->ptrs[i].dev, devs.d); 1445 if (i < h->s->nr_data) 1446 nr_have_data++; 1447 else 1448 nr_have_parity++; 1449 } 1450 1451 BUG_ON(nr_have_data > h->s->nr_data); 1452 BUG_ON(nr_have_parity > h->s->nr_parity); 1453 1454 buckets.nr = 0; 1455 if (nr_have_parity < h->s->nr_parity) { 1456 ret = bch2_bucket_alloc_set_trans(trans, &buckets, 1457 &h->parity_stripe, 1458 &devs, 1459 h->s->nr_parity, 1460 &nr_have_parity, 1461 &have_cache, 0, 1462 BCH_DATA_parity, 1463 watermark, 1464 cl); 1465 1466 open_bucket_for_each(c, &buckets, ob, i) { 1467 j = find_next_zero_bit(h->s->blocks_gotten, 1468 h->s->nr_data + h->s->nr_parity, 1469 h->s->nr_data); 1470 BUG_ON(j >= h->s->nr_data + h->s->nr_parity); 1471 1472 h->s->blocks[j] = buckets.v[i]; 1473 v->ptrs[j] = bch2_ob_ptr(c, ob); 1474 __set_bit(j, h->s->blocks_gotten); 1475 } 1476 1477 if (ret) 1478 return ret; 1479 } 1480 1481 buckets.nr = 0; 1482 if (nr_have_data < h->s->nr_data) { 1483 ret = bch2_bucket_alloc_set_trans(trans, &buckets, 1484 &h->block_stripe, 1485 &devs, 1486 h->s->nr_data, 1487 &nr_have_data, 1488 &have_cache, 0, 1489 BCH_DATA_user, 1490 watermark, 1491 cl); 1492 1493 open_bucket_for_each(c, &buckets, ob, i) { 1494 j = find_next_zero_bit(h->s->blocks_gotten, 1495 h->s->nr_data, 0); 1496 BUG_ON(j >= h->s->nr_data); 1497 1498 h->s->blocks[j] = buckets.v[i]; 1499 v->ptrs[j] = bch2_ob_ptr(c, ob); 1500 __set_bit(j, h->s->blocks_gotten); 1501 } 1502 1503 if (ret) 1504 return ret; 1505 } 1506 1507 return 0; 1508 } 1509 1510 /* XXX: doesn't obey target: */ 1511 static s64 get_existing_stripe(struct bch_fs *c, 1512 struct ec_stripe_head *head) 1513 { 1514 ec_stripes_heap *h = &c->ec_stripes_heap; 1515 struct stripe *m; 1516 size_t heap_idx; 1517 u64 stripe_idx; 1518 s64 ret = -1; 1519 1520 if (may_create_new_stripe(c)) 1521 return -1; 1522 1523 mutex_lock(&c->ec_stripes_heap_lock); 1524 for (heap_idx = 0; heap_idx < h->used; heap_idx++) { 1525 /* No blocks worth reusing, stripe will just be deleted: */ 1526 if (!h->data[heap_idx].blocks_nonempty) 1527 continue; 1528 1529 stripe_idx = h->data[heap_idx].idx; 1530 1531 m = genradix_ptr(&c->stripes, stripe_idx); 1532 1533 if (m->algorithm == head->algo && 1534 m->nr_redundant == head->redundancy && 1535 m->sectors == head->blocksize && 1536 m->blocks_nonempty < m->nr_blocks - m->nr_redundant && 1537 bch2_try_open_stripe(c, head->s, stripe_idx)) { 1538 ret = stripe_idx; 1539 break; 1540 } 1541 } 1542 mutex_unlock(&c->ec_stripes_heap_lock); 1543 return ret; 1544 } 1545 1546 static int __bch2_ec_stripe_head_reuse(struct btree_trans *trans, struct ec_stripe_head *h) 1547 { 1548 struct bch_fs *c = trans->c; 1549 struct bch_stripe *new_v = &bkey_i_to_stripe(&h->s->new_stripe.key)->v; 1550 struct bch_stripe *existing_v; 1551 unsigned i; 1552 s64 idx; 1553 int ret; 1554 1555 /* 1556 * If we can't allocate a new stripe, and there's no stripes with empty 1557 * blocks for us to reuse, that means we have to wait on copygc: 1558 */ 1559 idx = get_existing_stripe(c, h); 1560 if (idx < 0) 1561 return -BCH_ERR_stripe_alloc_blocked; 1562 1563 ret = get_stripe_key_trans(trans, idx, &h->s->existing_stripe); 1564 if (ret) { 1565 bch2_stripe_close(c, h->s); 1566 if (!bch2_err_matches(ret, BCH_ERR_transaction_restart)) 1567 bch2_fs_fatal_error(c, "error reading stripe key: %s", bch2_err_str(ret)); 1568 return ret; 1569 } 1570 1571 existing_v = &bkey_i_to_stripe(&h->s->existing_stripe.key)->v; 1572 1573 BUG_ON(existing_v->nr_redundant != h->s->nr_parity); 1574 h->s->nr_data = existing_v->nr_blocks - 1575 existing_v->nr_redundant; 1576 1577 ret = ec_stripe_buf_init(&h->s->existing_stripe, 0, h->blocksize); 1578 if (ret) { 1579 bch2_stripe_close(c, h->s); 1580 return ret; 1581 } 1582 1583 BUG_ON(h->s->existing_stripe.size != h->blocksize); 1584 BUG_ON(h->s->existing_stripe.size != le16_to_cpu(existing_v->sectors)); 1585 1586 /* 1587 * Free buckets we initially allocated - they might conflict with 1588 * blocks from the stripe we're reusing: 1589 */ 1590 for_each_set_bit(i, h->s->blocks_gotten, new_v->nr_blocks) { 1591 bch2_open_bucket_put(c, c->open_buckets + h->s->blocks[i]); 1592 h->s->blocks[i] = 0; 1593 } 1594 memset(h->s->blocks_gotten, 0, sizeof(h->s->blocks_gotten)); 1595 memset(h->s->blocks_allocated, 0, sizeof(h->s->blocks_allocated)); 1596 1597 for (i = 0; i < existing_v->nr_blocks; i++) { 1598 if (stripe_blockcount_get(existing_v, i)) { 1599 __set_bit(i, h->s->blocks_gotten); 1600 __set_bit(i, h->s->blocks_allocated); 1601 } 1602 1603 ec_block_io(c, &h->s->existing_stripe, READ, i, &h->s->iodone); 1604 } 1605 1606 bkey_copy(&h->s->new_stripe.key, &h->s->existing_stripe.key); 1607 h->s->have_existing_stripe = true; 1608 1609 return 0; 1610 } 1611 1612 static int __bch2_ec_stripe_head_reserve(struct btree_trans *trans, struct ec_stripe_head *h) 1613 { 1614 struct bch_fs *c = trans->c; 1615 struct btree_iter iter; 1616 struct bkey_s_c k; 1617 struct bpos min_pos = POS(0, 1); 1618 struct bpos start_pos = bpos_max(min_pos, POS(0, c->ec_stripe_hint)); 1619 int ret; 1620 1621 if (!h->s->res.sectors) { 1622 ret = bch2_disk_reservation_get(c, &h->s->res, 1623 h->blocksize, 1624 h->s->nr_parity, 1625 BCH_DISK_RESERVATION_NOFAIL); 1626 if (ret) 1627 return ret; 1628 } 1629 1630 for_each_btree_key_norestart(trans, iter, BTREE_ID_stripes, start_pos, 1631 BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) { 1632 if (bkey_gt(k.k->p, POS(0, U32_MAX))) { 1633 if (start_pos.offset) { 1634 start_pos = min_pos; 1635 bch2_btree_iter_set_pos(&iter, start_pos); 1636 continue; 1637 } 1638 1639 ret = -BCH_ERR_ENOSPC_stripe_create; 1640 break; 1641 } 1642 1643 if (bkey_deleted(k.k) && 1644 bch2_try_open_stripe(c, h->s, k.k->p.offset)) 1645 break; 1646 } 1647 1648 c->ec_stripe_hint = iter.pos.offset; 1649 1650 if (ret) 1651 goto err; 1652 1653 ret = ec_stripe_mem_alloc(trans, &iter); 1654 if (ret) { 1655 bch2_stripe_close(c, h->s); 1656 goto err; 1657 } 1658 1659 h->s->new_stripe.key.k.p = iter.pos; 1660 out: 1661 bch2_trans_iter_exit(trans, &iter); 1662 return ret; 1663 err: 1664 bch2_disk_reservation_put(c, &h->s->res); 1665 goto out; 1666 } 1667 1668 struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans, 1669 unsigned target, 1670 unsigned algo, 1671 unsigned redundancy, 1672 enum bch_watermark watermark, 1673 struct closure *cl) 1674 { 1675 struct bch_fs *c = trans->c; 1676 struct ec_stripe_head *h; 1677 bool waiting = false; 1678 int ret; 1679 1680 h = __bch2_ec_stripe_head_get(trans, target, algo, redundancy, watermark); 1681 if (!h) 1682 bch_err(c, "no stripe head"); 1683 if (IS_ERR_OR_NULL(h)) 1684 return h; 1685 1686 if (!h->s) { 1687 ret = ec_new_stripe_alloc(c, h); 1688 if (ret) { 1689 bch_err(c, "failed to allocate new stripe"); 1690 goto err; 1691 } 1692 } 1693 1694 if (h->s->allocated) 1695 goto allocated; 1696 1697 if (h->s->have_existing_stripe) 1698 goto alloc_existing; 1699 1700 /* First, try to allocate a full stripe: */ 1701 ret = new_stripe_alloc_buckets(trans, h, BCH_WATERMARK_stripe, NULL) ?: 1702 __bch2_ec_stripe_head_reserve(trans, h); 1703 if (!ret) 1704 goto allocate_buf; 1705 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || 1706 bch2_err_matches(ret, ENOMEM)) 1707 goto err; 1708 1709 /* 1710 * Not enough buckets available for a full stripe: we must reuse an 1711 * existing stripe: 1712 */ 1713 while (1) { 1714 ret = __bch2_ec_stripe_head_reuse(trans, h); 1715 if (!ret) 1716 break; 1717 if (waiting || !cl || ret != -BCH_ERR_stripe_alloc_blocked) 1718 goto err; 1719 1720 if (watermark == BCH_WATERMARK_copygc) { 1721 ret = new_stripe_alloc_buckets(trans, h, watermark, NULL) ?: 1722 __bch2_ec_stripe_head_reserve(trans, h); 1723 if (ret) 1724 goto err; 1725 goto allocate_buf; 1726 } 1727 1728 /* XXX freelist_wait? */ 1729 closure_wait(&c->freelist_wait, cl); 1730 waiting = true; 1731 } 1732 1733 if (waiting) 1734 closure_wake_up(&c->freelist_wait); 1735 alloc_existing: 1736 /* 1737 * Retry allocating buckets, with the watermark for this 1738 * particular write: 1739 */ 1740 ret = new_stripe_alloc_buckets(trans, h, watermark, cl); 1741 if (ret) 1742 goto err; 1743 1744 allocate_buf: 1745 ret = ec_stripe_buf_init(&h->s->new_stripe, 0, h->blocksize); 1746 if (ret) 1747 goto err; 1748 1749 h->s->allocated = true; 1750 allocated: 1751 BUG_ON(!h->s->idx); 1752 BUG_ON(!h->s->new_stripe.data[0]); 1753 BUG_ON(trans->restarted); 1754 return h; 1755 err: 1756 bch2_ec_stripe_head_put(c, h); 1757 return ERR_PTR(ret); 1758 } 1759 1760 static void __bch2_ec_stop(struct bch_fs *c, struct bch_dev *ca) 1761 { 1762 struct ec_stripe_head *h; 1763 struct open_bucket *ob; 1764 unsigned i; 1765 1766 mutex_lock(&c->ec_stripe_head_lock); 1767 list_for_each_entry(h, &c->ec_stripe_head_list, list) { 1768 mutex_lock(&h->lock); 1769 if (!h->s) 1770 goto unlock; 1771 1772 if (!ca) 1773 goto found; 1774 1775 for (i = 0; i < bkey_i_to_stripe(&h->s->new_stripe.key)->v.nr_blocks; i++) { 1776 if (!h->s->blocks[i]) 1777 continue; 1778 1779 ob = c->open_buckets + h->s->blocks[i]; 1780 if (ob->dev == ca->dev_idx) 1781 goto found; 1782 } 1783 goto unlock; 1784 found: 1785 h->s->err = -BCH_ERR_erofs_no_writes; 1786 ec_stripe_set_pending(c, h); 1787 unlock: 1788 mutex_unlock(&h->lock); 1789 } 1790 mutex_unlock(&c->ec_stripe_head_lock); 1791 } 1792 1793 void bch2_ec_stop_dev(struct bch_fs *c, struct bch_dev *ca) 1794 { 1795 __bch2_ec_stop(c, ca); 1796 } 1797 1798 void bch2_fs_ec_stop(struct bch_fs *c) 1799 { 1800 __bch2_ec_stop(c, NULL); 1801 } 1802 1803 static bool bch2_fs_ec_flush_done(struct bch_fs *c) 1804 { 1805 bool ret; 1806 1807 mutex_lock(&c->ec_stripe_new_lock); 1808 ret = list_empty(&c->ec_stripe_new_list); 1809 mutex_unlock(&c->ec_stripe_new_lock); 1810 1811 return ret; 1812 } 1813 1814 void bch2_fs_ec_flush(struct bch_fs *c) 1815 { 1816 wait_event(c->ec_stripe_new_wait, bch2_fs_ec_flush_done(c)); 1817 } 1818 1819 int bch2_stripes_read(struct bch_fs *c) 1820 { 1821 struct btree_trans *trans = bch2_trans_get(c); 1822 struct btree_iter iter; 1823 struct bkey_s_c k; 1824 const struct bch_stripe *s; 1825 struct stripe *m; 1826 unsigned i; 1827 int ret; 1828 1829 for_each_btree_key(trans, iter, BTREE_ID_stripes, POS_MIN, 1830 BTREE_ITER_PREFETCH, k, ret) { 1831 if (k.k->type != KEY_TYPE_stripe) 1832 continue; 1833 1834 ret = __ec_stripe_mem_alloc(c, k.k->p.offset, GFP_KERNEL); 1835 if (ret) 1836 break; 1837 1838 s = bkey_s_c_to_stripe(k).v; 1839 1840 m = genradix_ptr(&c->stripes, k.k->p.offset); 1841 m->sectors = le16_to_cpu(s->sectors); 1842 m->algorithm = s->algorithm; 1843 m->nr_blocks = s->nr_blocks; 1844 m->nr_redundant = s->nr_redundant; 1845 m->blocks_nonempty = 0; 1846 1847 for (i = 0; i < s->nr_blocks; i++) 1848 m->blocks_nonempty += !!stripe_blockcount_get(s, i); 1849 1850 bch2_stripes_heap_insert(c, m, k.k->p.offset); 1851 } 1852 bch2_trans_iter_exit(trans, &iter); 1853 1854 bch2_trans_put(trans); 1855 1856 if (ret) 1857 bch_err_fn(c, ret); 1858 1859 return ret; 1860 } 1861 1862 void bch2_stripes_heap_to_text(struct printbuf *out, struct bch_fs *c) 1863 { 1864 ec_stripes_heap *h = &c->ec_stripes_heap; 1865 struct stripe *m; 1866 size_t i; 1867 1868 mutex_lock(&c->ec_stripes_heap_lock); 1869 for (i = 0; i < min_t(size_t, h->used, 50); i++) { 1870 m = genradix_ptr(&c->stripes, h->data[i].idx); 1871 1872 prt_printf(out, "%zu %u/%u+%u", h->data[i].idx, 1873 h->data[i].blocks_nonempty, 1874 m->nr_blocks - m->nr_redundant, 1875 m->nr_redundant); 1876 if (bch2_stripe_is_open(c, h->data[i].idx)) 1877 prt_str(out, " open"); 1878 prt_newline(out); 1879 } 1880 mutex_unlock(&c->ec_stripes_heap_lock); 1881 } 1882 1883 void bch2_new_stripes_to_text(struct printbuf *out, struct bch_fs *c) 1884 { 1885 struct ec_stripe_head *h; 1886 struct ec_stripe_new *s; 1887 1888 mutex_lock(&c->ec_stripe_head_lock); 1889 list_for_each_entry(h, &c->ec_stripe_head_list, list) { 1890 prt_printf(out, "target %u algo %u redundancy %u %s:\n", 1891 h->target, h->algo, h->redundancy, 1892 bch2_watermarks[h->watermark]); 1893 1894 if (h->s) 1895 prt_printf(out, "\tidx %llu blocks %u+%u allocated %u\n", 1896 h->s->idx, h->s->nr_data, h->s->nr_parity, 1897 bitmap_weight(h->s->blocks_allocated, 1898 h->s->nr_data)); 1899 } 1900 mutex_unlock(&c->ec_stripe_head_lock); 1901 1902 prt_printf(out, "in flight:\n"); 1903 1904 mutex_lock(&c->ec_stripe_new_lock); 1905 list_for_each_entry(s, &c->ec_stripe_new_list, list) { 1906 prt_printf(out, "\tidx %llu blocks %u+%u ref %u %u %s\n", 1907 s->idx, s->nr_data, s->nr_parity, 1908 atomic_read(&s->ref[STRIPE_REF_io]), 1909 atomic_read(&s->ref[STRIPE_REF_stripe]), 1910 bch2_watermarks[s->h->watermark]); 1911 } 1912 mutex_unlock(&c->ec_stripe_new_lock); 1913 } 1914 1915 void bch2_fs_ec_exit(struct bch_fs *c) 1916 { 1917 struct ec_stripe_head *h; 1918 unsigned i; 1919 1920 while (1) { 1921 mutex_lock(&c->ec_stripe_head_lock); 1922 h = list_first_entry_or_null(&c->ec_stripe_head_list, 1923 struct ec_stripe_head, list); 1924 if (h) 1925 list_del(&h->list); 1926 mutex_unlock(&c->ec_stripe_head_lock); 1927 if (!h) 1928 break; 1929 1930 if (h->s) { 1931 for (i = 0; i < bkey_i_to_stripe(&h->s->new_stripe.key)->v.nr_blocks; i++) 1932 BUG_ON(h->s->blocks[i]); 1933 1934 kfree(h->s); 1935 } 1936 kfree(h); 1937 } 1938 1939 BUG_ON(!list_empty(&c->ec_stripe_new_list)); 1940 1941 free_heap(&c->ec_stripes_heap); 1942 genradix_free(&c->stripes); 1943 bioset_exit(&c->ec_bioset); 1944 } 1945 1946 void bch2_fs_ec_init_early(struct bch_fs *c) 1947 { 1948 spin_lock_init(&c->ec_stripes_new_lock); 1949 mutex_init(&c->ec_stripes_heap_lock); 1950 1951 INIT_LIST_HEAD(&c->ec_stripe_head_list); 1952 mutex_init(&c->ec_stripe_head_lock); 1953 1954 INIT_LIST_HEAD(&c->ec_stripe_new_list); 1955 mutex_init(&c->ec_stripe_new_lock); 1956 init_waitqueue_head(&c->ec_stripe_new_wait); 1957 1958 INIT_WORK(&c->ec_stripe_create_work, ec_stripe_create_work); 1959 INIT_WORK(&c->ec_stripe_delete_work, ec_stripe_delete_work); 1960 } 1961 1962 int bch2_fs_ec_init(struct bch_fs *c) 1963 { 1964 return bioset_init(&c->ec_bioset, 1, offsetof(struct ec_bio, bio), 1965 BIOSET_NEED_BVECS); 1966 } 1967