1 // SPDX-License-Identifier: GPL-2.0 2 3 /* erasure coding */ 4 5 #include "bcachefs.h" 6 #include "alloc_background.h" 7 #include "alloc_foreground.h" 8 #include "backpointers.h" 9 #include "bkey_buf.h" 10 #include "bset.h" 11 #include "btree_gc.h" 12 #include "btree_update.h" 13 #include "btree_write_buffer.h" 14 #include "buckets.h" 15 #include "checksum.h" 16 #include "disk_groups.h" 17 #include "ec.h" 18 #include "error.h" 19 #include "io_read.h" 20 #include "keylist.h" 21 #include "recovery.h" 22 #include "replicas.h" 23 #include "super-io.h" 24 #include "util.h" 25 26 #include <linux/sort.h> 27 28 #ifdef __KERNEL__ 29 30 #include <linux/raid/pq.h> 31 #include <linux/raid/xor.h> 32 33 static void raid5_recov(unsigned disks, unsigned failed_idx, 34 size_t size, void **data) 35 { 36 unsigned i = 2, nr; 37 38 BUG_ON(failed_idx >= disks); 39 40 swap(data[0], data[failed_idx]); 41 memcpy(data[0], data[1], size); 42 43 while (i < disks) { 44 nr = min_t(unsigned, disks - i, MAX_XOR_BLOCKS); 45 xor_blocks(nr, size, data[0], data + i); 46 i += nr; 47 } 48 49 swap(data[0], data[failed_idx]); 50 } 51 52 static void raid_gen(int nd, int np, size_t size, void **v) 53 { 54 if (np >= 1) 55 raid5_recov(nd + np, nd, size, v); 56 if (np >= 2) 57 raid6_call.gen_syndrome(nd + np, size, v); 58 BUG_ON(np > 2); 59 } 60 61 static void raid_rec(int nr, int *ir, int nd, int np, size_t size, void **v) 62 { 63 switch (nr) { 64 case 0: 65 break; 66 case 1: 67 if (ir[0] < nd + 1) 68 raid5_recov(nd + 1, ir[0], size, v); 69 else 70 raid6_call.gen_syndrome(nd + np, size, v); 71 break; 72 case 2: 73 if (ir[1] < nd) { 74 /* data+data failure. */ 75 raid6_2data_recov(nd + np, size, ir[0], ir[1], v); 76 } else if (ir[0] < nd) { 77 /* data + p/q failure */ 78 79 if (ir[1] == nd) /* data + p failure */ 80 raid6_datap_recov(nd + np, size, ir[0], v); 81 else { /* data + q failure */ 82 raid5_recov(nd + 1, ir[0], size, v); 83 raid6_call.gen_syndrome(nd + np, size, v); 84 } 85 } else { 86 raid_gen(nd, np, size, v); 87 } 88 break; 89 default: 90 BUG(); 91 } 92 } 93 94 #else 95 96 #include <raid/raid.h> 97 98 #endif 99 100 struct ec_bio { 101 struct bch_dev *ca; 102 struct ec_stripe_buf *buf; 103 size_t idx; 104 struct bio bio; 105 }; 106 107 /* Stripes btree keys: */ 108 109 int bch2_stripe_invalid(struct bch_fs *c, struct bkey_s_c k, 110 enum bch_validate_flags flags, 111 struct printbuf *err) 112 { 113 const struct bch_stripe *s = bkey_s_c_to_stripe(k).v; 114 int ret = 0; 115 116 bkey_fsck_err_on(bkey_eq(k.k->p, POS_MIN) || 117 bpos_gt(k.k->p, POS(0, U32_MAX)), c, err, 118 stripe_pos_bad, 119 "stripe at bad pos"); 120 121 bkey_fsck_err_on(bkey_val_u64s(k.k) < stripe_val_u64s(s), c, err, 122 stripe_val_size_bad, 123 "incorrect value size (%zu < %u)", 124 bkey_val_u64s(k.k), stripe_val_u64s(s)); 125 126 ret = bch2_bkey_ptrs_invalid(c, k, flags, err); 127 fsck_err: 128 return ret; 129 } 130 131 void bch2_stripe_to_text(struct printbuf *out, struct bch_fs *c, 132 struct bkey_s_c k) 133 { 134 const struct bch_stripe *sp = bkey_s_c_to_stripe(k).v; 135 struct bch_stripe s = {}; 136 137 memcpy(&s, sp, min(sizeof(s), bkey_val_bytes(k.k))); 138 139 unsigned nr_data = s.nr_blocks - s.nr_redundant; 140 141 prt_printf(out, "algo %u sectors %u blocks %u:%u csum ", 142 s.algorithm, 143 le16_to_cpu(s.sectors), 144 nr_data, 145 s.nr_redundant); 146 bch2_prt_csum_type(out, s.csum_type); 147 prt_printf(out, " gran %u", 1U << s.csum_granularity_bits); 148 149 for (unsigned i = 0; i < s.nr_blocks; i++) { 150 const struct bch_extent_ptr *ptr = sp->ptrs + i; 151 152 if ((void *) ptr >= bkey_val_end(k)) 153 break; 154 155 bch2_extent_ptr_to_text(out, c, ptr); 156 157 if (s.csum_type < BCH_CSUM_NR && 158 i < nr_data && 159 stripe_blockcount_offset(&s, i) < bkey_val_bytes(k.k)) 160 prt_printf(out, "#%u", stripe_blockcount_get(sp, i)); 161 } 162 } 163 164 /* Triggers: */ 165 166 static int __mark_stripe_bucket(struct btree_trans *trans, 167 struct bch_dev *ca, 168 struct bkey_s_c_stripe s, 169 unsigned ptr_idx, bool deleting, 170 struct bpos bucket, 171 struct bch_alloc_v4 *a, 172 enum btree_iter_update_trigger_flags flags) 173 { 174 const struct bch_extent_ptr *ptr = s.v->ptrs + ptr_idx; 175 unsigned nr_data = s.v->nr_blocks - s.v->nr_redundant; 176 bool parity = ptr_idx >= nr_data; 177 enum bch_data_type data_type = parity ? BCH_DATA_parity : BCH_DATA_stripe; 178 s64 sectors = parity ? le16_to_cpu(s.v->sectors) : 0; 179 struct printbuf buf = PRINTBUF; 180 int ret = 0; 181 182 struct bch_fs *c = trans->c; 183 if (deleting) 184 sectors = -sectors; 185 186 if (!deleting) { 187 if (bch2_trans_inconsistent_on(a->stripe || 188 a->stripe_redundancy, trans, 189 "bucket %llu:%llu gen %u data type %s dirty_sectors %u: multiple stripes using same bucket (%u, %llu)\n%s", 190 bucket.inode, bucket.offset, a->gen, 191 bch2_data_type_str(a->data_type), 192 a->dirty_sectors, 193 a->stripe, s.k->p.offset, 194 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) { 195 ret = -EIO; 196 goto err; 197 } 198 199 if (bch2_trans_inconsistent_on(parity && bch2_bucket_sectors_total(*a), trans, 200 "bucket %llu:%llu gen %u data type %s dirty_sectors %u cached_sectors %u: data already in parity bucket\n%s", 201 bucket.inode, bucket.offset, a->gen, 202 bch2_data_type_str(a->data_type), 203 a->dirty_sectors, 204 a->cached_sectors, 205 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) { 206 ret = -EIO; 207 goto err; 208 } 209 } else { 210 if (bch2_trans_inconsistent_on(a->stripe != s.k->p.offset || 211 a->stripe_redundancy != s.v->nr_redundant, trans, 212 "bucket %llu:%llu gen %u: not marked as stripe when deleting stripe (got %u)\n%s", 213 bucket.inode, bucket.offset, a->gen, 214 a->stripe, 215 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) { 216 ret = -EIO; 217 goto err; 218 } 219 220 if (bch2_trans_inconsistent_on(a->data_type != data_type, trans, 221 "bucket %llu:%llu gen %u data type %s: wrong data type when stripe, should be %s\n%s", 222 bucket.inode, bucket.offset, a->gen, 223 bch2_data_type_str(a->data_type), 224 bch2_data_type_str(data_type), 225 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) { 226 ret = -EIO; 227 goto err; 228 } 229 230 if (bch2_trans_inconsistent_on(parity && 231 (a->dirty_sectors != -sectors || 232 a->cached_sectors), trans, 233 "bucket %llu:%llu gen %u dirty_sectors %u cached_sectors %u: wrong sectors when deleting parity block of stripe\n%s", 234 bucket.inode, bucket.offset, a->gen, 235 a->dirty_sectors, 236 a->cached_sectors, 237 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) { 238 ret = -EIO; 239 goto err; 240 } 241 } 242 243 if (sectors) { 244 ret = bch2_bucket_ref_update(trans, ca, s.s_c, ptr, sectors, data_type, 245 a->gen, a->data_type, &a->dirty_sectors); 246 if (ret) 247 goto err; 248 } 249 250 if (!deleting) { 251 a->stripe = s.k->p.offset; 252 a->stripe_redundancy = s.v->nr_redundant; 253 } else { 254 a->stripe = 0; 255 a->stripe_redundancy = 0; 256 } 257 258 alloc_data_type_set(a, data_type); 259 err: 260 printbuf_exit(&buf); 261 return ret; 262 } 263 264 static int mark_stripe_bucket(struct btree_trans *trans, 265 struct bkey_s_c_stripe s, 266 unsigned ptr_idx, bool deleting, 267 enum btree_iter_update_trigger_flags flags) 268 { 269 struct bch_fs *c = trans->c; 270 const struct bch_extent_ptr *ptr = s.v->ptrs + ptr_idx; 271 struct printbuf buf = PRINTBUF; 272 int ret = 0; 273 274 struct bch_dev *ca = bch2_dev_tryget(c, ptr->dev); 275 if (unlikely(!ca)) { 276 if (!(flags & BTREE_TRIGGER_overwrite)) 277 ret = -EIO; 278 goto err; 279 } 280 281 struct bpos bucket = PTR_BUCKET_POS(ca, ptr); 282 283 if (flags & BTREE_TRIGGER_transactional) { 284 struct bkey_i_alloc_v4 *a = 285 bch2_trans_start_alloc_update(trans, bucket); 286 ret = PTR_ERR_OR_ZERO(a) ?: 287 __mark_stripe_bucket(trans, ca, s, ptr_idx, deleting, bucket, &a->v, flags); 288 } 289 290 if (flags & BTREE_TRIGGER_gc) { 291 percpu_down_read(&c->mark_lock); 292 struct bucket *g = gc_bucket(ca, bucket.offset); 293 if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u\n %s", 294 ptr->dev, 295 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) { 296 ret = -EIO; 297 goto err_unlock; 298 } 299 300 bucket_lock(g); 301 struct bch_alloc_v4 old = bucket_m_to_alloc(*g), new = old; 302 ret = __mark_stripe_bucket(trans, ca, s, ptr_idx, deleting, bucket, &new, flags); 303 if (!ret) { 304 alloc_to_bucket(g, new); 305 bch2_dev_usage_update(c, ca, &old, &new, 0, true); 306 } 307 bucket_unlock(g); 308 err_unlock: 309 percpu_up_read(&c->mark_lock); 310 } 311 err: 312 bch2_dev_put(ca); 313 printbuf_exit(&buf); 314 return ret; 315 } 316 317 static int mark_stripe_buckets(struct btree_trans *trans, 318 struct bkey_s_c old, struct bkey_s_c new, 319 enum btree_iter_update_trigger_flags flags) 320 { 321 const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe 322 ? bkey_s_c_to_stripe(old).v : NULL; 323 const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe 324 ? bkey_s_c_to_stripe(new).v : NULL; 325 326 BUG_ON(old_s && new_s && old_s->nr_blocks != new_s->nr_blocks); 327 328 unsigned nr_blocks = new_s ? new_s->nr_blocks : old_s->nr_blocks; 329 330 for (unsigned i = 0; i < nr_blocks; i++) { 331 if (new_s && old_s && 332 !memcmp(&new_s->ptrs[i], 333 &old_s->ptrs[i], 334 sizeof(new_s->ptrs[i]))) 335 continue; 336 337 if (new_s) { 338 int ret = mark_stripe_bucket(trans, 339 bkey_s_c_to_stripe(new), i, false, flags); 340 if (ret) 341 return ret; 342 } 343 344 if (old_s) { 345 int ret = mark_stripe_bucket(trans, 346 bkey_s_c_to_stripe(old), i, true, flags); 347 if (ret) 348 return ret; 349 } 350 } 351 352 return 0; 353 } 354 355 int bch2_trigger_stripe(struct btree_trans *trans, 356 enum btree_id btree, unsigned level, 357 struct bkey_s_c old, struct bkey_s _new, 358 enum btree_iter_update_trigger_flags flags) 359 { 360 struct bkey_s_c new = _new.s_c; 361 struct bch_fs *c = trans->c; 362 u64 idx = new.k->p.offset; 363 const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe 364 ? bkey_s_c_to_stripe(old).v : NULL; 365 const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe 366 ? bkey_s_c_to_stripe(new).v : NULL; 367 368 if (unlikely(flags & BTREE_TRIGGER_check_repair)) 369 return bch2_check_fix_ptrs(trans, btree, level, _new.s_c, flags); 370 371 if (flags & BTREE_TRIGGER_transactional) { 372 /* 373 * If the pointers aren't changing, we don't need to do anything: 374 */ 375 if (new_s && old_s && 376 new_s->nr_blocks == old_s->nr_blocks && 377 new_s->nr_redundant == old_s->nr_redundant && 378 !memcmp(old_s->ptrs, new_s->ptrs, 379 new_s->nr_blocks * sizeof(struct bch_extent_ptr))) 380 return 0; 381 382 BUG_ON(new_s && old_s && 383 (new_s->nr_blocks != old_s->nr_blocks || 384 new_s->nr_redundant != old_s->nr_redundant)); 385 386 if (new_s) { 387 s64 sectors = le16_to_cpu(new_s->sectors); 388 389 struct bch_replicas_padded r; 390 bch2_bkey_to_replicas(&r.e, new); 391 int ret = bch2_update_replicas_list(trans, &r.e, sectors * new_s->nr_redundant); 392 if (ret) 393 return ret; 394 } 395 396 if (old_s) { 397 s64 sectors = -((s64) le16_to_cpu(old_s->sectors)); 398 399 struct bch_replicas_padded r; 400 bch2_bkey_to_replicas(&r.e, old); 401 int ret = bch2_update_replicas_list(trans, &r.e, sectors * old_s->nr_redundant); 402 if (ret) 403 return ret; 404 } 405 406 int ret = mark_stripe_buckets(trans, old, new, flags); 407 if (ret) 408 return ret; 409 } 410 411 if (flags & BTREE_TRIGGER_atomic) { 412 struct stripe *m = genradix_ptr(&c->stripes, idx); 413 414 if (!m) { 415 struct printbuf buf1 = PRINTBUF; 416 struct printbuf buf2 = PRINTBUF; 417 418 bch2_bkey_val_to_text(&buf1, c, old); 419 bch2_bkey_val_to_text(&buf2, c, new); 420 bch_err_ratelimited(c, "error marking nonexistent stripe %llu while marking\n" 421 "old %s\n" 422 "new %s", idx, buf1.buf, buf2.buf); 423 printbuf_exit(&buf2); 424 printbuf_exit(&buf1); 425 bch2_inconsistent_error(c); 426 return -1; 427 } 428 429 if (!new_s) { 430 bch2_stripes_heap_del(c, m, idx); 431 432 memset(m, 0, sizeof(*m)); 433 } else { 434 m->sectors = le16_to_cpu(new_s->sectors); 435 m->algorithm = new_s->algorithm; 436 m->nr_blocks = new_s->nr_blocks; 437 m->nr_redundant = new_s->nr_redundant; 438 m->blocks_nonempty = 0; 439 440 for (unsigned i = 0; i < new_s->nr_blocks; i++) 441 m->blocks_nonempty += !!stripe_blockcount_get(new_s, i); 442 443 if (!old_s) 444 bch2_stripes_heap_insert(c, m, idx); 445 else 446 bch2_stripes_heap_update(c, m, idx); 447 } 448 } 449 450 if (flags & BTREE_TRIGGER_gc) { 451 struct gc_stripe *m = 452 genradix_ptr_alloc(&c->gc_stripes, idx, GFP_KERNEL); 453 454 if (!m) { 455 bch_err(c, "error allocating memory for gc_stripes, idx %llu", 456 idx); 457 return -BCH_ERR_ENOMEM_mark_stripe; 458 } 459 /* 460 * This will be wrong when we bring back runtime gc: we should 461 * be unmarking the old key and then marking the new key 462 */ 463 m->alive = true; 464 m->sectors = le16_to_cpu(new_s->sectors); 465 m->nr_blocks = new_s->nr_blocks; 466 m->nr_redundant = new_s->nr_redundant; 467 468 for (unsigned i = 0; i < new_s->nr_blocks; i++) 469 m->ptrs[i] = new_s->ptrs[i]; 470 471 bch2_bkey_to_replicas(&m->r.e, new); 472 473 /* 474 * gc recalculates this field from stripe ptr 475 * references: 476 */ 477 memset(m->block_sectors, 0, sizeof(m->block_sectors)); 478 479 int ret = mark_stripe_buckets(trans, old, new, flags); 480 if (ret) 481 return ret; 482 483 ret = bch2_update_replicas(c, new, &m->r.e, 484 ((s64) m->sectors * m->nr_redundant), 485 0, true); 486 if (ret) { 487 struct printbuf buf = PRINTBUF; 488 489 bch2_bkey_val_to_text(&buf, c, new); 490 bch2_fs_fatal_error(c, ": no replicas entry for %s", buf.buf); 491 printbuf_exit(&buf); 492 return ret; 493 } 494 } 495 496 return 0; 497 } 498 499 /* returns blocknr in stripe that we matched: */ 500 static const struct bch_extent_ptr *bkey_matches_stripe(struct bch_stripe *s, 501 struct bkey_s_c k, unsigned *block) 502 { 503 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 504 unsigned i, nr_data = s->nr_blocks - s->nr_redundant; 505 506 bkey_for_each_ptr(ptrs, ptr) 507 for (i = 0; i < nr_data; i++) 508 if (__bch2_ptr_matches_stripe(&s->ptrs[i], ptr, 509 le16_to_cpu(s->sectors))) { 510 *block = i; 511 return ptr; 512 } 513 514 return NULL; 515 } 516 517 static bool extent_has_stripe_ptr(struct bkey_s_c k, u64 idx) 518 { 519 switch (k.k->type) { 520 case KEY_TYPE_extent: { 521 struct bkey_s_c_extent e = bkey_s_c_to_extent(k); 522 const union bch_extent_entry *entry; 523 524 extent_for_each_entry(e, entry) 525 if (extent_entry_type(entry) == 526 BCH_EXTENT_ENTRY_stripe_ptr && 527 entry->stripe_ptr.idx == idx) 528 return true; 529 530 break; 531 } 532 } 533 534 return false; 535 } 536 537 /* Stripe bufs: */ 538 539 static void ec_stripe_buf_exit(struct ec_stripe_buf *buf) 540 { 541 if (buf->key.k.type == KEY_TYPE_stripe) { 542 struct bkey_i_stripe *s = bkey_i_to_stripe(&buf->key); 543 unsigned i; 544 545 for (i = 0; i < s->v.nr_blocks; i++) { 546 kvfree(buf->data[i]); 547 buf->data[i] = NULL; 548 } 549 } 550 } 551 552 /* XXX: this is a non-mempoolified memory allocation: */ 553 static int ec_stripe_buf_init(struct ec_stripe_buf *buf, 554 unsigned offset, unsigned size) 555 { 556 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; 557 unsigned csum_granularity = 1U << v->csum_granularity_bits; 558 unsigned end = offset + size; 559 unsigned i; 560 561 BUG_ON(end > le16_to_cpu(v->sectors)); 562 563 offset = round_down(offset, csum_granularity); 564 end = min_t(unsigned, le16_to_cpu(v->sectors), 565 round_up(end, csum_granularity)); 566 567 buf->offset = offset; 568 buf->size = end - offset; 569 570 memset(buf->valid, 0xFF, sizeof(buf->valid)); 571 572 for (i = 0; i < v->nr_blocks; i++) { 573 buf->data[i] = kvmalloc(buf->size << 9, GFP_KERNEL); 574 if (!buf->data[i]) 575 goto err; 576 } 577 578 return 0; 579 err: 580 ec_stripe_buf_exit(buf); 581 return -BCH_ERR_ENOMEM_stripe_buf; 582 } 583 584 /* Checksumming: */ 585 586 static struct bch_csum ec_block_checksum(struct ec_stripe_buf *buf, 587 unsigned block, unsigned offset) 588 { 589 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; 590 unsigned csum_granularity = 1 << v->csum_granularity_bits; 591 unsigned end = buf->offset + buf->size; 592 unsigned len = min(csum_granularity, end - offset); 593 594 BUG_ON(offset >= end); 595 BUG_ON(offset < buf->offset); 596 BUG_ON(offset & (csum_granularity - 1)); 597 BUG_ON(offset + len != le16_to_cpu(v->sectors) && 598 (len & (csum_granularity - 1))); 599 600 return bch2_checksum(NULL, v->csum_type, 601 null_nonce(), 602 buf->data[block] + ((offset - buf->offset) << 9), 603 len << 9); 604 } 605 606 static void ec_generate_checksums(struct ec_stripe_buf *buf) 607 { 608 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; 609 unsigned i, j, csums_per_device = stripe_csums_per_device(v); 610 611 if (!v->csum_type) 612 return; 613 614 BUG_ON(buf->offset); 615 BUG_ON(buf->size != le16_to_cpu(v->sectors)); 616 617 for (i = 0; i < v->nr_blocks; i++) 618 for (j = 0; j < csums_per_device; j++) 619 stripe_csum_set(v, i, j, 620 ec_block_checksum(buf, i, j << v->csum_granularity_bits)); 621 } 622 623 static void ec_validate_checksums(struct bch_fs *c, struct ec_stripe_buf *buf) 624 { 625 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; 626 unsigned csum_granularity = 1 << v->csum_granularity_bits; 627 unsigned i; 628 629 if (!v->csum_type) 630 return; 631 632 for (i = 0; i < v->nr_blocks; i++) { 633 unsigned offset = buf->offset; 634 unsigned end = buf->offset + buf->size; 635 636 if (!test_bit(i, buf->valid)) 637 continue; 638 639 while (offset < end) { 640 unsigned j = offset >> v->csum_granularity_bits; 641 unsigned len = min(csum_granularity, end - offset); 642 struct bch_csum want = stripe_csum_get(v, i, j); 643 struct bch_csum got = ec_block_checksum(buf, i, offset); 644 645 if (bch2_crc_cmp(want, got)) { 646 struct bch_dev *ca = bch2_dev_tryget(c, v->ptrs[i].dev); 647 if (ca) { 648 struct printbuf err = PRINTBUF; 649 650 prt_str(&err, "stripe "); 651 bch2_csum_err_msg(&err, v->csum_type, want, got); 652 prt_printf(&err, " for %ps at %u of\n ", (void *) _RET_IP_, i); 653 bch2_bkey_val_to_text(&err, c, bkey_i_to_s_c(&buf->key)); 654 bch_err_ratelimited(ca, "%s", err.buf); 655 printbuf_exit(&err); 656 657 bch2_io_error(ca, BCH_MEMBER_ERROR_checksum); 658 } 659 660 clear_bit(i, buf->valid); 661 break; 662 } 663 664 offset += len; 665 } 666 } 667 } 668 669 /* Erasure coding: */ 670 671 static void ec_generate_ec(struct ec_stripe_buf *buf) 672 { 673 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; 674 unsigned nr_data = v->nr_blocks - v->nr_redundant; 675 unsigned bytes = le16_to_cpu(v->sectors) << 9; 676 677 raid_gen(nr_data, v->nr_redundant, bytes, buf->data); 678 } 679 680 static unsigned ec_nr_failed(struct ec_stripe_buf *buf) 681 { 682 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; 683 684 return v->nr_blocks - bitmap_weight(buf->valid, v->nr_blocks); 685 } 686 687 static int ec_do_recov(struct bch_fs *c, struct ec_stripe_buf *buf) 688 { 689 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; 690 unsigned i, failed[BCH_BKEY_PTRS_MAX], nr_failed = 0; 691 unsigned nr_data = v->nr_blocks - v->nr_redundant; 692 unsigned bytes = buf->size << 9; 693 694 if (ec_nr_failed(buf) > v->nr_redundant) { 695 bch_err_ratelimited(c, 696 "error doing reconstruct read: unable to read enough blocks"); 697 return -1; 698 } 699 700 for (i = 0; i < nr_data; i++) 701 if (!test_bit(i, buf->valid)) 702 failed[nr_failed++] = i; 703 704 raid_rec(nr_failed, failed, nr_data, v->nr_redundant, bytes, buf->data); 705 return 0; 706 } 707 708 /* IO: */ 709 710 static void ec_block_endio(struct bio *bio) 711 { 712 struct ec_bio *ec_bio = container_of(bio, struct ec_bio, bio); 713 struct bch_stripe *v = &bkey_i_to_stripe(&ec_bio->buf->key)->v; 714 struct bch_extent_ptr *ptr = &v->ptrs[ec_bio->idx]; 715 struct bch_dev *ca = ec_bio->ca; 716 struct closure *cl = bio->bi_private; 717 718 if (bch2_dev_io_err_on(bio->bi_status, ca, 719 bio_data_dir(bio) 720 ? BCH_MEMBER_ERROR_write 721 : BCH_MEMBER_ERROR_read, 722 "erasure coding %s error: %s", 723 bio_data_dir(bio) ? "write" : "read", 724 bch2_blk_status_to_str(bio->bi_status))) 725 clear_bit(ec_bio->idx, ec_bio->buf->valid); 726 727 int stale = dev_ptr_stale(ca, ptr); 728 if (stale) { 729 bch_err_ratelimited(ca->fs, 730 "error %s stripe: stale/invalid pointer (%i) after io", 731 bio_data_dir(bio) == READ ? "reading from" : "writing to", 732 stale); 733 clear_bit(ec_bio->idx, ec_bio->buf->valid); 734 } 735 736 bio_put(&ec_bio->bio); 737 percpu_ref_put(&ca->io_ref); 738 closure_put(cl); 739 } 740 741 static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf, 742 blk_opf_t opf, unsigned idx, struct closure *cl) 743 { 744 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; 745 unsigned offset = 0, bytes = buf->size << 9; 746 struct bch_extent_ptr *ptr = &v->ptrs[idx]; 747 enum bch_data_type data_type = idx < v->nr_blocks - v->nr_redundant 748 ? BCH_DATA_user 749 : BCH_DATA_parity; 750 int rw = op_is_write(opf); 751 752 struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, rw); 753 if (!ca) { 754 clear_bit(idx, buf->valid); 755 return; 756 } 757 758 int stale = dev_ptr_stale(ca, ptr); 759 if (stale) { 760 bch_err_ratelimited(c, 761 "error %s stripe: stale pointer (%i)", 762 rw == READ ? "reading from" : "writing to", 763 stale); 764 clear_bit(idx, buf->valid); 765 return; 766 } 767 768 769 this_cpu_add(ca->io_done->sectors[rw][data_type], buf->size); 770 771 while (offset < bytes) { 772 unsigned nr_iovecs = min_t(size_t, BIO_MAX_VECS, 773 DIV_ROUND_UP(bytes, PAGE_SIZE)); 774 unsigned b = min_t(size_t, bytes - offset, 775 nr_iovecs << PAGE_SHIFT); 776 struct ec_bio *ec_bio; 777 778 ec_bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev, 779 nr_iovecs, 780 opf, 781 GFP_KERNEL, 782 &c->ec_bioset), 783 struct ec_bio, bio); 784 785 ec_bio->ca = ca; 786 ec_bio->buf = buf; 787 ec_bio->idx = idx; 788 789 ec_bio->bio.bi_iter.bi_sector = ptr->offset + buf->offset + (offset >> 9); 790 ec_bio->bio.bi_end_io = ec_block_endio; 791 ec_bio->bio.bi_private = cl; 792 793 bch2_bio_map(&ec_bio->bio, buf->data[idx] + offset, b); 794 795 closure_get(cl); 796 percpu_ref_get(&ca->io_ref); 797 798 submit_bio(&ec_bio->bio); 799 800 offset += b; 801 } 802 803 percpu_ref_put(&ca->io_ref); 804 } 805 806 static int get_stripe_key_trans(struct btree_trans *trans, u64 idx, 807 struct ec_stripe_buf *stripe) 808 { 809 struct btree_iter iter; 810 struct bkey_s_c k; 811 int ret; 812 813 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes, 814 POS(0, idx), BTREE_ITER_slots); 815 ret = bkey_err(k); 816 if (ret) 817 goto err; 818 if (k.k->type != KEY_TYPE_stripe) { 819 ret = -ENOENT; 820 goto err; 821 } 822 bkey_reassemble(&stripe->key, k); 823 err: 824 bch2_trans_iter_exit(trans, &iter); 825 return ret; 826 } 827 828 /* recovery read path: */ 829 int bch2_ec_read_extent(struct btree_trans *trans, struct bch_read_bio *rbio) 830 { 831 struct bch_fs *c = trans->c; 832 struct ec_stripe_buf *buf; 833 struct closure cl; 834 struct bch_stripe *v; 835 unsigned i, offset; 836 int ret = 0; 837 838 closure_init_stack(&cl); 839 840 BUG_ON(!rbio->pick.has_ec); 841 842 buf = kzalloc(sizeof(*buf), GFP_NOFS); 843 if (!buf) 844 return -BCH_ERR_ENOMEM_ec_read_extent; 845 846 ret = lockrestart_do(trans, get_stripe_key_trans(trans, rbio->pick.ec.idx, buf)); 847 if (ret) { 848 bch_err_ratelimited(c, 849 "error doing reconstruct read: error %i looking up stripe", ret); 850 kfree(buf); 851 return -EIO; 852 } 853 854 v = &bkey_i_to_stripe(&buf->key)->v; 855 856 if (!bch2_ptr_matches_stripe(v, rbio->pick)) { 857 bch_err_ratelimited(c, 858 "error doing reconstruct read: pointer doesn't match stripe"); 859 ret = -EIO; 860 goto err; 861 } 862 863 offset = rbio->bio.bi_iter.bi_sector - v->ptrs[rbio->pick.ec.block].offset; 864 if (offset + bio_sectors(&rbio->bio) > le16_to_cpu(v->sectors)) { 865 bch_err_ratelimited(c, 866 "error doing reconstruct read: read is bigger than stripe"); 867 ret = -EIO; 868 goto err; 869 } 870 871 ret = ec_stripe_buf_init(buf, offset, bio_sectors(&rbio->bio)); 872 if (ret) 873 goto err; 874 875 for (i = 0; i < v->nr_blocks; i++) 876 ec_block_io(c, buf, REQ_OP_READ, i, &cl); 877 878 closure_sync(&cl); 879 880 if (ec_nr_failed(buf) > v->nr_redundant) { 881 bch_err_ratelimited(c, 882 "error doing reconstruct read: unable to read enough blocks"); 883 ret = -EIO; 884 goto err; 885 } 886 887 ec_validate_checksums(c, buf); 888 889 ret = ec_do_recov(c, buf); 890 if (ret) 891 goto err; 892 893 memcpy_to_bio(&rbio->bio, rbio->bio.bi_iter, 894 buf->data[rbio->pick.ec.block] + ((offset - buf->offset) << 9)); 895 err: 896 ec_stripe_buf_exit(buf); 897 kfree(buf); 898 return ret; 899 } 900 901 /* stripe bucket accounting: */ 902 903 static int __ec_stripe_mem_alloc(struct bch_fs *c, size_t idx, gfp_t gfp) 904 { 905 ec_stripes_heap n, *h = &c->ec_stripes_heap; 906 907 if (idx >= h->size) { 908 if (!init_heap(&n, max(1024UL, roundup_pow_of_two(idx + 1)), gfp)) 909 return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc; 910 911 mutex_lock(&c->ec_stripes_heap_lock); 912 if (n.size > h->size) { 913 memcpy(n.data, h->data, h->used * sizeof(h->data[0])); 914 n.used = h->used; 915 swap(*h, n); 916 } 917 mutex_unlock(&c->ec_stripes_heap_lock); 918 919 free_heap(&n); 920 } 921 922 if (!genradix_ptr_alloc(&c->stripes, idx, gfp)) 923 return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc; 924 925 if (c->gc_pos.phase != GC_PHASE_not_running && 926 !genradix_ptr_alloc(&c->gc_stripes, idx, gfp)) 927 return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc; 928 929 return 0; 930 } 931 932 static int ec_stripe_mem_alloc(struct btree_trans *trans, 933 struct btree_iter *iter) 934 { 935 return allocate_dropping_locks_errcode(trans, 936 __ec_stripe_mem_alloc(trans->c, iter->pos.offset, _gfp)); 937 } 938 939 /* 940 * Hash table of open stripes: 941 * Stripes that are being created or modified are kept in a hash table, so that 942 * stripe deletion can skip them. 943 */ 944 945 static bool __bch2_stripe_is_open(struct bch_fs *c, u64 idx) 946 { 947 unsigned hash = hash_64(idx, ilog2(ARRAY_SIZE(c->ec_stripes_new))); 948 struct ec_stripe_new *s; 949 950 hlist_for_each_entry(s, &c->ec_stripes_new[hash], hash) 951 if (s->idx == idx) 952 return true; 953 return false; 954 } 955 956 static bool bch2_stripe_is_open(struct bch_fs *c, u64 idx) 957 { 958 bool ret = false; 959 960 spin_lock(&c->ec_stripes_new_lock); 961 ret = __bch2_stripe_is_open(c, idx); 962 spin_unlock(&c->ec_stripes_new_lock); 963 964 return ret; 965 } 966 967 static bool bch2_try_open_stripe(struct bch_fs *c, 968 struct ec_stripe_new *s, 969 u64 idx) 970 { 971 bool ret; 972 973 spin_lock(&c->ec_stripes_new_lock); 974 ret = !__bch2_stripe_is_open(c, idx); 975 if (ret) { 976 unsigned hash = hash_64(idx, ilog2(ARRAY_SIZE(c->ec_stripes_new))); 977 978 s->idx = idx; 979 hlist_add_head(&s->hash, &c->ec_stripes_new[hash]); 980 } 981 spin_unlock(&c->ec_stripes_new_lock); 982 983 return ret; 984 } 985 986 static void bch2_stripe_close(struct bch_fs *c, struct ec_stripe_new *s) 987 { 988 BUG_ON(!s->idx); 989 990 spin_lock(&c->ec_stripes_new_lock); 991 hlist_del_init(&s->hash); 992 spin_unlock(&c->ec_stripes_new_lock); 993 994 s->idx = 0; 995 } 996 997 /* Heap of all existing stripes, ordered by blocks_nonempty */ 998 999 static u64 stripe_idx_to_delete(struct bch_fs *c) 1000 { 1001 ec_stripes_heap *h = &c->ec_stripes_heap; 1002 1003 lockdep_assert_held(&c->ec_stripes_heap_lock); 1004 1005 if (h->used && 1006 h->data[0].blocks_nonempty == 0 && 1007 !bch2_stripe_is_open(c, h->data[0].idx)) 1008 return h->data[0].idx; 1009 1010 return 0; 1011 } 1012 1013 static inline int ec_stripes_heap_cmp(ec_stripes_heap *h, 1014 struct ec_stripe_heap_entry l, 1015 struct ec_stripe_heap_entry r) 1016 { 1017 return ((l.blocks_nonempty > r.blocks_nonempty) - 1018 (l.blocks_nonempty < r.blocks_nonempty)); 1019 } 1020 1021 static inline void ec_stripes_heap_set_backpointer(ec_stripes_heap *h, 1022 size_t i) 1023 { 1024 struct bch_fs *c = container_of(h, struct bch_fs, ec_stripes_heap); 1025 1026 genradix_ptr(&c->stripes, h->data[i].idx)->heap_idx = i; 1027 } 1028 1029 static void heap_verify_backpointer(struct bch_fs *c, size_t idx) 1030 { 1031 ec_stripes_heap *h = &c->ec_stripes_heap; 1032 struct stripe *m = genradix_ptr(&c->stripes, idx); 1033 1034 BUG_ON(m->heap_idx >= h->used); 1035 BUG_ON(h->data[m->heap_idx].idx != idx); 1036 } 1037 1038 void bch2_stripes_heap_del(struct bch_fs *c, 1039 struct stripe *m, size_t idx) 1040 { 1041 mutex_lock(&c->ec_stripes_heap_lock); 1042 heap_verify_backpointer(c, idx); 1043 1044 heap_del(&c->ec_stripes_heap, m->heap_idx, 1045 ec_stripes_heap_cmp, 1046 ec_stripes_heap_set_backpointer); 1047 mutex_unlock(&c->ec_stripes_heap_lock); 1048 } 1049 1050 void bch2_stripes_heap_insert(struct bch_fs *c, 1051 struct stripe *m, size_t idx) 1052 { 1053 mutex_lock(&c->ec_stripes_heap_lock); 1054 BUG_ON(heap_full(&c->ec_stripes_heap)); 1055 1056 heap_add(&c->ec_stripes_heap, ((struct ec_stripe_heap_entry) { 1057 .idx = idx, 1058 .blocks_nonempty = m->blocks_nonempty, 1059 }), 1060 ec_stripes_heap_cmp, 1061 ec_stripes_heap_set_backpointer); 1062 1063 heap_verify_backpointer(c, idx); 1064 mutex_unlock(&c->ec_stripes_heap_lock); 1065 } 1066 1067 void bch2_stripes_heap_update(struct bch_fs *c, 1068 struct stripe *m, size_t idx) 1069 { 1070 ec_stripes_heap *h = &c->ec_stripes_heap; 1071 bool do_deletes; 1072 size_t i; 1073 1074 mutex_lock(&c->ec_stripes_heap_lock); 1075 heap_verify_backpointer(c, idx); 1076 1077 h->data[m->heap_idx].blocks_nonempty = m->blocks_nonempty; 1078 1079 i = m->heap_idx; 1080 heap_sift_up(h, i, ec_stripes_heap_cmp, 1081 ec_stripes_heap_set_backpointer); 1082 heap_sift_down(h, i, ec_stripes_heap_cmp, 1083 ec_stripes_heap_set_backpointer); 1084 1085 heap_verify_backpointer(c, idx); 1086 1087 do_deletes = stripe_idx_to_delete(c) != 0; 1088 mutex_unlock(&c->ec_stripes_heap_lock); 1089 1090 if (do_deletes) 1091 bch2_do_stripe_deletes(c); 1092 } 1093 1094 /* stripe deletion */ 1095 1096 static int ec_stripe_delete(struct btree_trans *trans, u64 idx) 1097 { 1098 struct bch_fs *c = trans->c; 1099 struct btree_iter iter; 1100 struct bkey_s_c k; 1101 struct bkey_s_c_stripe s; 1102 int ret; 1103 1104 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes, POS(0, idx), 1105 BTREE_ITER_intent); 1106 ret = bkey_err(k); 1107 if (ret) 1108 goto err; 1109 1110 if (k.k->type != KEY_TYPE_stripe) { 1111 bch2_fs_inconsistent(c, "attempting to delete nonexistent stripe %llu", idx); 1112 ret = -EINVAL; 1113 goto err; 1114 } 1115 1116 s = bkey_s_c_to_stripe(k); 1117 for (unsigned i = 0; i < s.v->nr_blocks; i++) 1118 if (stripe_blockcount_get(s.v, i)) { 1119 struct printbuf buf = PRINTBUF; 1120 1121 bch2_bkey_val_to_text(&buf, c, k); 1122 bch2_fs_inconsistent(c, "attempting to delete nonempty stripe %s", buf.buf); 1123 printbuf_exit(&buf); 1124 ret = -EINVAL; 1125 goto err; 1126 } 1127 1128 ret = bch2_btree_delete_at(trans, &iter, 0); 1129 err: 1130 bch2_trans_iter_exit(trans, &iter); 1131 return ret; 1132 } 1133 1134 static void ec_stripe_delete_work(struct work_struct *work) 1135 { 1136 struct bch_fs *c = 1137 container_of(work, struct bch_fs, ec_stripe_delete_work); 1138 1139 while (1) { 1140 mutex_lock(&c->ec_stripes_heap_lock); 1141 u64 idx = stripe_idx_to_delete(c); 1142 mutex_unlock(&c->ec_stripes_heap_lock); 1143 1144 if (!idx) 1145 break; 1146 1147 int ret = bch2_trans_do(c, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, 1148 ec_stripe_delete(trans, idx)); 1149 bch_err_fn(c, ret); 1150 if (ret) 1151 break; 1152 } 1153 1154 bch2_write_ref_put(c, BCH_WRITE_REF_stripe_delete); 1155 } 1156 1157 void bch2_do_stripe_deletes(struct bch_fs *c) 1158 { 1159 if (bch2_write_ref_tryget(c, BCH_WRITE_REF_stripe_delete) && 1160 !queue_work(c->write_ref_wq, &c->ec_stripe_delete_work)) 1161 bch2_write_ref_put(c, BCH_WRITE_REF_stripe_delete); 1162 } 1163 1164 /* stripe creation: */ 1165 1166 static int ec_stripe_key_update(struct btree_trans *trans, 1167 struct bkey_i_stripe *new, 1168 bool create) 1169 { 1170 struct bch_fs *c = trans->c; 1171 struct btree_iter iter; 1172 struct bkey_s_c k; 1173 int ret; 1174 1175 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes, 1176 new->k.p, BTREE_ITER_intent); 1177 ret = bkey_err(k); 1178 if (ret) 1179 goto err; 1180 1181 if (k.k->type != (create ? KEY_TYPE_deleted : KEY_TYPE_stripe)) { 1182 bch2_fs_inconsistent(c, "error %s stripe: got existing key type %s", 1183 create ? "creating" : "updating", 1184 bch2_bkey_types[k.k->type]); 1185 ret = -EINVAL; 1186 goto err; 1187 } 1188 1189 if (k.k->type == KEY_TYPE_stripe) { 1190 const struct bch_stripe *old = bkey_s_c_to_stripe(k).v; 1191 unsigned i; 1192 1193 if (old->nr_blocks != new->v.nr_blocks) { 1194 bch_err(c, "error updating stripe: nr_blocks does not match"); 1195 ret = -EINVAL; 1196 goto err; 1197 } 1198 1199 for (i = 0; i < new->v.nr_blocks; i++) { 1200 unsigned v = stripe_blockcount_get(old, i); 1201 1202 BUG_ON(v && 1203 (old->ptrs[i].dev != new->v.ptrs[i].dev || 1204 old->ptrs[i].gen != new->v.ptrs[i].gen || 1205 old->ptrs[i].offset != new->v.ptrs[i].offset)); 1206 1207 stripe_blockcount_set(&new->v, i, v); 1208 } 1209 } 1210 1211 ret = bch2_trans_update(trans, &iter, &new->k_i, 0); 1212 err: 1213 bch2_trans_iter_exit(trans, &iter); 1214 return ret; 1215 } 1216 1217 static int ec_stripe_update_extent(struct btree_trans *trans, 1218 struct bch_dev *ca, 1219 struct bpos bucket, u8 gen, 1220 struct ec_stripe_buf *s, 1221 struct bpos *bp_pos) 1222 { 1223 struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v; 1224 struct bch_fs *c = trans->c; 1225 struct bch_backpointer bp; 1226 struct btree_iter iter; 1227 struct bkey_s_c k; 1228 const struct bch_extent_ptr *ptr_c; 1229 struct bch_extent_ptr *ec_ptr = NULL; 1230 struct bch_extent_stripe_ptr stripe_ptr; 1231 struct bkey_i *n; 1232 int ret, dev, block; 1233 1234 ret = bch2_get_next_backpointer(trans, ca, bucket, gen, 1235 bp_pos, &bp, BTREE_ITER_cached); 1236 if (ret) 1237 return ret; 1238 if (bpos_eq(*bp_pos, SPOS_MAX)) 1239 return 0; 1240 1241 if (bp.level) { 1242 struct printbuf buf = PRINTBUF; 1243 struct btree_iter node_iter; 1244 struct btree *b; 1245 1246 b = bch2_backpointer_get_node(trans, &node_iter, *bp_pos, bp); 1247 bch2_trans_iter_exit(trans, &node_iter); 1248 1249 if (!b) 1250 return 0; 1251 1252 prt_printf(&buf, "found btree node in erasure coded bucket: b=%px\n", b); 1253 bch2_backpointer_to_text(&buf, &bp); 1254 1255 bch2_fs_inconsistent(c, "%s", buf.buf); 1256 printbuf_exit(&buf); 1257 return -EIO; 1258 } 1259 1260 k = bch2_backpointer_get_key(trans, &iter, *bp_pos, bp, BTREE_ITER_intent); 1261 ret = bkey_err(k); 1262 if (ret) 1263 return ret; 1264 if (!k.k) { 1265 /* 1266 * extent no longer exists - we could flush the btree 1267 * write buffer and retry to verify, but no need: 1268 */ 1269 return 0; 1270 } 1271 1272 if (extent_has_stripe_ptr(k, s->key.k.p.offset)) 1273 goto out; 1274 1275 ptr_c = bkey_matches_stripe(v, k, &block); 1276 /* 1277 * It doesn't generally make sense to erasure code cached ptrs: 1278 * XXX: should we be incrementing a counter? 1279 */ 1280 if (!ptr_c || ptr_c->cached) 1281 goto out; 1282 1283 dev = v->ptrs[block].dev; 1284 1285 n = bch2_trans_kmalloc(trans, bkey_bytes(k.k) + sizeof(stripe_ptr)); 1286 ret = PTR_ERR_OR_ZERO(n); 1287 if (ret) 1288 goto out; 1289 1290 bkey_reassemble(n, k); 1291 1292 bch2_bkey_drop_ptrs(bkey_i_to_s(n), ptr, ptr->dev != dev); 1293 ec_ptr = bch2_bkey_has_device(bkey_i_to_s(n), dev); 1294 BUG_ON(!ec_ptr); 1295 1296 stripe_ptr = (struct bch_extent_stripe_ptr) { 1297 .type = 1 << BCH_EXTENT_ENTRY_stripe_ptr, 1298 .block = block, 1299 .redundancy = v->nr_redundant, 1300 .idx = s->key.k.p.offset, 1301 }; 1302 1303 __extent_entry_insert(n, 1304 (union bch_extent_entry *) ec_ptr, 1305 (union bch_extent_entry *) &stripe_ptr); 1306 1307 ret = bch2_trans_update(trans, &iter, n, 0); 1308 out: 1309 bch2_trans_iter_exit(trans, &iter); 1310 return ret; 1311 } 1312 1313 static int ec_stripe_update_bucket(struct btree_trans *trans, struct ec_stripe_buf *s, 1314 unsigned block) 1315 { 1316 struct bch_fs *c = trans->c; 1317 struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v; 1318 struct bch_extent_ptr ptr = v->ptrs[block]; 1319 struct bpos bp_pos = POS_MIN; 1320 int ret = 0; 1321 1322 struct bch_dev *ca = bch2_dev_tryget(c, ptr.dev); 1323 if (!ca) 1324 return -EIO; 1325 1326 struct bpos bucket_pos = PTR_BUCKET_POS(ca, &ptr); 1327 1328 while (1) { 1329 ret = commit_do(trans, NULL, NULL, 1330 BCH_TRANS_COMMIT_no_check_rw| 1331 BCH_TRANS_COMMIT_no_enospc, 1332 ec_stripe_update_extent(trans, ca, bucket_pos, ptr.gen, s, &bp_pos)); 1333 if (ret) 1334 break; 1335 if (bkey_eq(bp_pos, POS_MAX)) 1336 break; 1337 1338 bp_pos = bpos_nosnap_successor(bp_pos); 1339 } 1340 1341 bch2_dev_put(ca); 1342 return ret; 1343 } 1344 1345 static int ec_stripe_update_extents(struct bch_fs *c, struct ec_stripe_buf *s) 1346 { 1347 struct btree_trans *trans = bch2_trans_get(c); 1348 struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v; 1349 unsigned i, nr_data = v->nr_blocks - v->nr_redundant; 1350 int ret = 0; 1351 1352 ret = bch2_btree_write_buffer_flush_sync(trans); 1353 if (ret) 1354 goto err; 1355 1356 for (i = 0; i < nr_data; i++) { 1357 ret = ec_stripe_update_bucket(trans, s, i); 1358 if (ret) 1359 break; 1360 } 1361 err: 1362 bch2_trans_put(trans); 1363 1364 return ret; 1365 } 1366 1367 static void zero_out_rest_of_ec_bucket(struct bch_fs *c, 1368 struct ec_stripe_new *s, 1369 unsigned block, 1370 struct open_bucket *ob) 1371 { 1372 struct bch_dev *ca = bch2_dev_get_ioref(c, ob->dev, WRITE); 1373 if (!ca) { 1374 s->err = -BCH_ERR_erofs_no_writes; 1375 return; 1376 } 1377 1378 unsigned offset = ca->mi.bucket_size - ob->sectors_free; 1379 memset(s->new_stripe.data[block] + (offset << 9), 1380 0, 1381 ob->sectors_free << 9); 1382 1383 int ret = blkdev_issue_zeroout(ca->disk_sb.bdev, 1384 ob->bucket * ca->mi.bucket_size + offset, 1385 ob->sectors_free, 1386 GFP_KERNEL, 0); 1387 1388 percpu_ref_put(&ca->io_ref); 1389 1390 if (ret) 1391 s->err = ret; 1392 } 1393 1394 void bch2_ec_stripe_new_free(struct bch_fs *c, struct ec_stripe_new *s) 1395 { 1396 if (s->idx) 1397 bch2_stripe_close(c, s); 1398 kfree(s); 1399 } 1400 1401 /* 1402 * data buckets of new stripe all written: create the stripe 1403 */ 1404 static void ec_stripe_create(struct ec_stripe_new *s) 1405 { 1406 struct bch_fs *c = s->c; 1407 struct open_bucket *ob; 1408 struct bch_stripe *v = &bkey_i_to_stripe(&s->new_stripe.key)->v; 1409 unsigned i, nr_data = v->nr_blocks - v->nr_redundant; 1410 int ret; 1411 1412 BUG_ON(s->h->s == s); 1413 1414 closure_sync(&s->iodone); 1415 1416 if (!s->err) { 1417 for (i = 0; i < nr_data; i++) 1418 if (s->blocks[i]) { 1419 ob = c->open_buckets + s->blocks[i]; 1420 1421 if (ob->sectors_free) 1422 zero_out_rest_of_ec_bucket(c, s, i, ob); 1423 } 1424 } 1425 1426 if (s->err) { 1427 if (!bch2_err_matches(s->err, EROFS)) 1428 bch_err(c, "error creating stripe: error writing data buckets"); 1429 goto err; 1430 } 1431 1432 if (s->have_existing_stripe) { 1433 ec_validate_checksums(c, &s->existing_stripe); 1434 1435 if (ec_do_recov(c, &s->existing_stripe)) { 1436 bch_err(c, "error creating stripe: error reading existing stripe"); 1437 goto err; 1438 } 1439 1440 for (i = 0; i < nr_data; i++) 1441 if (stripe_blockcount_get(&bkey_i_to_stripe(&s->existing_stripe.key)->v, i)) 1442 swap(s->new_stripe.data[i], 1443 s->existing_stripe.data[i]); 1444 1445 ec_stripe_buf_exit(&s->existing_stripe); 1446 } 1447 1448 BUG_ON(!s->allocated); 1449 BUG_ON(!s->idx); 1450 1451 ec_generate_ec(&s->new_stripe); 1452 1453 ec_generate_checksums(&s->new_stripe); 1454 1455 /* write p/q: */ 1456 for (i = nr_data; i < v->nr_blocks; i++) 1457 ec_block_io(c, &s->new_stripe, REQ_OP_WRITE, i, &s->iodone); 1458 closure_sync(&s->iodone); 1459 1460 if (ec_nr_failed(&s->new_stripe)) { 1461 bch_err(c, "error creating stripe: error writing redundancy buckets"); 1462 goto err; 1463 } 1464 1465 ret = bch2_trans_do(c, &s->res, NULL, 1466 BCH_TRANS_COMMIT_no_check_rw| 1467 BCH_TRANS_COMMIT_no_enospc, 1468 ec_stripe_key_update(trans, 1469 bkey_i_to_stripe(&s->new_stripe.key), 1470 !s->have_existing_stripe)); 1471 bch_err_msg(c, ret, "creating stripe key"); 1472 if (ret) { 1473 goto err; 1474 } 1475 1476 ret = ec_stripe_update_extents(c, &s->new_stripe); 1477 bch_err_msg(c, ret, "error updating extents"); 1478 if (ret) 1479 goto err; 1480 err: 1481 bch2_disk_reservation_put(c, &s->res); 1482 1483 for (i = 0; i < v->nr_blocks; i++) 1484 if (s->blocks[i]) { 1485 ob = c->open_buckets + s->blocks[i]; 1486 1487 if (i < nr_data) { 1488 ob->ec = NULL; 1489 __bch2_open_bucket_put(c, ob); 1490 } else { 1491 bch2_open_bucket_put(c, ob); 1492 } 1493 } 1494 1495 mutex_lock(&c->ec_stripe_new_lock); 1496 list_del(&s->list); 1497 mutex_unlock(&c->ec_stripe_new_lock); 1498 wake_up(&c->ec_stripe_new_wait); 1499 1500 ec_stripe_buf_exit(&s->existing_stripe); 1501 ec_stripe_buf_exit(&s->new_stripe); 1502 closure_debug_destroy(&s->iodone); 1503 1504 ec_stripe_new_put(c, s, STRIPE_REF_stripe); 1505 } 1506 1507 static struct ec_stripe_new *get_pending_stripe(struct bch_fs *c) 1508 { 1509 struct ec_stripe_new *s; 1510 1511 mutex_lock(&c->ec_stripe_new_lock); 1512 list_for_each_entry(s, &c->ec_stripe_new_list, list) 1513 if (!atomic_read(&s->ref[STRIPE_REF_io])) 1514 goto out; 1515 s = NULL; 1516 out: 1517 mutex_unlock(&c->ec_stripe_new_lock); 1518 1519 return s; 1520 } 1521 1522 static void ec_stripe_create_work(struct work_struct *work) 1523 { 1524 struct bch_fs *c = container_of(work, 1525 struct bch_fs, ec_stripe_create_work); 1526 struct ec_stripe_new *s; 1527 1528 while ((s = get_pending_stripe(c))) 1529 ec_stripe_create(s); 1530 1531 bch2_write_ref_put(c, BCH_WRITE_REF_stripe_create); 1532 } 1533 1534 void bch2_ec_do_stripe_creates(struct bch_fs *c) 1535 { 1536 bch2_write_ref_get(c, BCH_WRITE_REF_stripe_create); 1537 1538 if (!queue_work(system_long_wq, &c->ec_stripe_create_work)) 1539 bch2_write_ref_put(c, BCH_WRITE_REF_stripe_create); 1540 } 1541 1542 static void ec_stripe_set_pending(struct bch_fs *c, struct ec_stripe_head *h) 1543 { 1544 struct ec_stripe_new *s = h->s; 1545 1546 BUG_ON(!s->allocated && !s->err); 1547 1548 h->s = NULL; 1549 s->pending = true; 1550 1551 mutex_lock(&c->ec_stripe_new_lock); 1552 list_add(&s->list, &c->ec_stripe_new_list); 1553 mutex_unlock(&c->ec_stripe_new_lock); 1554 1555 ec_stripe_new_put(c, s, STRIPE_REF_io); 1556 } 1557 1558 void bch2_ec_bucket_cancel(struct bch_fs *c, struct open_bucket *ob) 1559 { 1560 struct ec_stripe_new *s = ob->ec; 1561 1562 s->err = -EIO; 1563 } 1564 1565 void *bch2_writepoint_ec_buf(struct bch_fs *c, struct write_point *wp) 1566 { 1567 struct open_bucket *ob = ec_open_bucket(c, &wp->ptrs); 1568 if (!ob) 1569 return NULL; 1570 1571 BUG_ON(!ob->ec->new_stripe.data[ob->ec_idx]); 1572 1573 struct bch_dev *ca = ob_dev(c, ob); 1574 unsigned offset = ca->mi.bucket_size - ob->sectors_free; 1575 1576 return ob->ec->new_stripe.data[ob->ec_idx] + (offset << 9); 1577 } 1578 1579 static int unsigned_cmp(const void *_l, const void *_r) 1580 { 1581 unsigned l = *((const unsigned *) _l); 1582 unsigned r = *((const unsigned *) _r); 1583 1584 return cmp_int(l, r); 1585 } 1586 1587 /* pick most common bucket size: */ 1588 static unsigned pick_blocksize(struct bch_fs *c, 1589 struct bch_devs_mask *devs) 1590 { 1591 unsigned nr = 0, sizes[BCH_SB_MEMBERS_MAX]; 1592 struct { 1593 unsigned nr, size; 1594 } cur = { 0, 0 }, best = { 0, 0 }; 1595 1596 for_each_member_device_rcu(c, ca, devs) 1597 sizes[nr++] = ca->mi.bucket_size; 1598 1599 sort(sizes, nr, sizeof(unsigned), unsigned_cmp, NULL); 1600 1601 for (unsigned i = 0; i < nr; i++) { 1602 if (sizes[i] != cur.size) { 1603 if (cur.nr > best.nr) 1604 best = cur; 1605 1606 cur.nr = 0; 1607 cur.size = sizes[i]; 1608 } 1609 1610 cur.nr++; 1611 } 1612 1613 if (cur.nr > best.nr) 1614 best = cur; 1615 1616 return best.size; 1617 } 1618 1619 static bool may_create_new_stripe(struct bch_fs *c) 1620 { 1621 return false; 1622 } 1623 1624 static void ec_stripe_key_init(struct bch_fs *c, 1625 struct bkey_i *k, 1626 unsigned nr_data, 1627 unsigned nr_parity, 1628 unsigned stripe_size) 1629 { 1630 struct bkey_i_stripe *s = bkey_stripe_init(k); 1631 unsigned u64s; 1632 1633 s->v.sectors = cpu_to_le16(stripe_size); 1634 s->v.algorithm = 0; 1635 s->v.nr_blocks = nr_data + nr_parity; 1636 s->v.nr_redundant = nr_parity; 1637 s->v.csum_granularity_bits = ilog2(c->opts.encoded_extent_max >> 9); 1638 s->v.csum_type = BCH_CSUM_crc32c; 1639 s->v.pad = 0; 1640 1641 while ((u64s = stripe_val_u64s(&s->v)) > BKEY_VAL_U64s_MAX) { 1642 BUG_ON(1 << s->v.csum_granularity_bits >= 1643 le16_to_cpu(s->v.sectors) || 1644 s->v.csum_granularity_bits == U8_MAX); 1645 s->v.csum_granularity_bits++; 1646 } 1647 1648 set_bkey_val_u64s(&s->k, u64s); 1649 } 1650 1651 static int ec_new_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h) 1652 { 1653 struct ec_stripe_new *s; 1654 1655 lockdep_assert_held(&h->lock); 1656 1657 s = kzalloc(sizeof(*s), GFP_KERNEL); 1658 if (!s) 1659 return -BCH_ERR_ENOMEM_ec_new_stripe_alloc; 1660 1661 mutex_init(&s->lock); 1662 closure_init(&s->iodone, NULL); 1663 atomic_set(&s->ref[STRIPE_REF_stripe], 1); 1664 atomic_set(&s->ref[STRIPE_REF_io], 1); 1665 s->c = c; 1666 s->h = h; 1667 s->nr_data = min_t(unsigned, h->nr_active_devs, 1668 BCH_BKEY_PTRS_MAX) - h->redundancy; 1669 s->nr_parity = h->redundancy; 1670 1671 ec_stripe_key_init(c, &s->new_stripe.key, 1672 s->nr_data, s->nr_parity, h->blocksize); 1673 1674 h->s = s; 1675 return 0; 1676 } 1677 1678 static struct ec_stripe_head * 1679 ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target, 1680 unsigned algo, unsigned redundancy, 1681 enum bch_watermark watermark) 1682 { 1683 struct ec_stripe_head *h; 1684 1685 h = kzalloc(sizeof(*h), GFP_KERNEL); 1686 if (!h) 1687 return NULL; 1688 1689 mutex_init(&h->lock); 1690 BUG_ON(!mutex_trylock(&h->lock)); 1691 1692 h->target = target; 1693 h->algo = algo; 1694 h->redundancy = redundancy; 1695 h->watermark = watermark; 1696 1697 rcu_read_lock(); 1698 h->devs = target_rw_devs(c, BCH_DATA_user, target); 1699 1700 for_each_member_device_rcu(c, ca, &h->devs) 1701 if (!ca->mi.durability) 1702 __clear_bit(ca->dev_idx, h->devs.d); 1703 1704 h->blocksize = pick_blocksize(c, &h->devs); 1705 1706 for_each_member_device_rcu(c, ca, &h->devs) 1707 if (ca->mi.bucket_size == h->blocksize) 1708 h->nr_active_devs++; 1709 1710 rcu_read_unlock(); 1711 1712 /* 1713 * If we only have redundancy + 1 devices, we're better off with just 1714 * replication: 1715 */ 1716 if (h->nr_active_devs < h->redundancy + 2) 1717 bch_err(c, "insufficient devices available to create stripe (have %u, need %u) - mismatched bucket sizes?", 1718 h->nr_active_devs, h->redundancy + 2); 1719 1720 list_add(&h->list, &c->ec_stripe_head_list); 1721 return h; 1722 } 1723 1724 void bch2_ec_stripe_head_put(struct bch_fs *c, struct ec_stripe_head *h) 1725 { 1726 if (h->s && 1727 h->s->allocated && 1728 bitmap_weight(h->s->blocks_allocated, 1729 h->s->nr_data) == h->s->nr_data) 1730 ec_stripe_set_pending(c, h); 1731 1732 mutex_unlock(&h->lock); 1733 } 1734 1735 static struct ec_stripe_head * 1736 __bch2_ec_stripe_head_get(struct btree_trans *trans, 1737 unsigned target, 1738 unsigned algo, 1739 unsigned redundancy, 1740 enum bch_watermark watermark) 1741 { 1742 struct bch_fs *c = trans->c; 1743 struct ec_stripe_head *h; 1744 int ret; 1745 1746 if (!redundancy) 1747 return NULL; 1748 1749 ret = bch2_trans_mutex_lock(trans, &c->ec_stripe_head_lock); 1750 if (ret) 1751 return ERR_PTR(ret); 1752 1753 if (test_bit(BCH_FS_going_ro, &c->flags)) { 1754 h = ERR_PTR(-BCH_ERR_erofs_no_writes); 1755 goto found; 1756 } 1757 1758 list_for_each_entry(h, &c->ec_stripe_head_list, list) 1759 if (h->target == target && 1760 h->algo == algo && 1761 h->redundancy == redundancy && 1762 h->watermark == watermark) { 1763 ret = bch2_trans_mutex_lock(trans, &h->lock); 1764 if (ret) 1765 h = ERR_PTR(ret); 1766 goto found; 1767 } 1768 1769 h = ec_new_stripe_head_alloc(c, target, algo, redundancy, watermark); 1770 found: 1771 if (!IS_ERR_OR_NULL(h) && 1772 h->nr_active_devs < h->redundancy + 2) { 1773 mutex_unlock(&h->lock); 1774 h = NULL; 1775 } 1776 mutex_unlock(&c->ec_stripe_head_lock); 1777 return h; 1778 } 1779 1780 static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_head *h, 1781 enum bch_watermark watermark, struct closure *cl) 1782 { 1783 struct bch_fs *c = trans->c; 1784 struct bch_devs_mask devs = h->devs; 1785 struct open_bucket *ob; 1786 struct open_buckets buckets; 1787 struct bch_stripe *v = &bkey_i_to_stripe(&h->s->new_stripe.key)->v; 1788 unsigned i, j, nr_have_parity = 0, nr_have_data = 0; 1789 bool have_cache = true; 1790 int ret = 0; 1791 1792 BUG_ON(v->nr_blocks != h->s->nr_data + h->s->nr_parity); 1793 BUG_ON(v->nr_redundant != h->s->nr_parity); 1794 1795 for_each_set_bit(i, h->s->blocks_gotten, v->nr_blocks) { 1796 __clear_bit(v->ptrs[i].dev, devs.d); 1797 if (i < h->s->nr_data) 1798 nr_have_data++; 1799 else 1800 nr_have_parity++; 1801 } 1802 1803 BUG_ON(nr_have_data > h->s->nr_data); 1804 BUG_ON(nr_have_parity > h->s->nr_parity); 1805 1806 buckets.nr = 0; 1807 if (nr_have_parity < h->s->nr_parity) { 1808 ret = bch2_bucket_alloc_set_trans(trans, &buckets, 1809 &h->parity_stripe, 1810 &devs, 1811 h->s->nr_parity, 1812 &nr_have_parity, 1813 &have_cache, 0, 1814 BCH_DATA_parity, 1815 watermark, 1816 cl); 1817 1818 open_bucket_for_each(c, &buckets, ob, i) { 1819 j = find_next_zero_bit(h->s->blocks_gotten, 1820 h->s->nr_data + h->s->nr_parity, 1821 h->s->nr_data); 1822 BUG_ON(j >= h->s->nr_data + h->s->nr_parity); 1823 1824 h->s->blocks[j] = buckets.v[i]; 1825 v->ptrs[j] = bch2_ob_ptr(c, ob); 1826 __set_bit(j, h->s->blocks_gotten); 1827 } 1828 1829 if (ret) 1830 return ret; 1831 } 1832 1833 buckets.nr = 0; 1834 if (nr_have_data < h->s->nr_data) { 1835 ret = bch2_bucket_alloc_set_trans(trans, &buckets, 1836 &h->block_stripe, 1837 &devs, 1838 h->s->nr_data, 1839 &nr_have_data, 1840 &have_cache, 0, 1841 BCH_DATA_user, 1842 watermark, 1843 cl); 1844 1845 open_bucket_for_each(c, &buckets, ob, i) { 1846 j = find_next_zero_bit(h->s->blocks_gotten, 1847 h->s->nr_data, 0); 1848 BUG_ON(j >= h->s->nr_data); 1849 1850 h->s->blocks[j] = buckets.v[i]; 1851 v->ptrs[j] = bch2_ob_ptr(c, ob); 1852 __set_bit(j, h->s->blocks_gotten); 1853 } 1854 1855 if (ret) 1856 return ret; 1857 } 1858 1859 return 0; 1860 } 1861 1862 /* XXX: doesn't obey target: */ 1863 static s64 get_existing_stripe(struct bch_fs *c, 1864 struct ec_stripe_head *head) 1865 { 1866 ec_stripes_heap *h = &c->ec_stripes_heap; 1867 struct stripe *m; 1868 size_t heap_idx; 1869 u64 stripe_idx; 1870 s64 ret = -1; 1871 1872 if (may_create_new_stripe(c)) 1873 return -1; 1874 1875 mutex_lock(&c->ec_stripes_heap_lock); 1876 for (heap_idx = 0; heap_idx < h->used; heap_idx++) { 1877 /* No blocks worth reusing, stripe will just be deleted: */ 1878 if (!h->data[heap_idx].blocks_nonempty) 1879 continue; 1880 1881 stripe_idx = h->data[heap_idx].idx; 1882 1883 m = genradix_ptr(&c->stripes, stripe_idx); 1884 1885 if (m->algorithm == head->algo && 1886 m->nr_redundant == head->redundancy && 1887 m->sectors == head->blocksize && 1888 m->blocks_nonempty < m->nr_blocks - m->nr_redundant && 1889 bch2_try_open_stripe(c, head->s, stripe_idx)) { 1890 ret = stripe_idx; 1891 break; 1892 } 1893 } 1894 mutex_unlock(&c->ec_stripes_heap_lock); 1895 return ret; 1896 } 1897 1898 static int __bch2_ec_stripe_head_reuse(struct btree_trans *trans, struct ec_stripe_head *h) 1899 { 1900 struct bch_fs *c = trans->c; 1901 struct bch_stripe *new_v = &bkey_i_to_stripe(&h->s->new_stripe.key)->v; 1902 struct bch_stripe *existing_v; 1903 unsigned i; 1904 s64 idx; 1905 int ret; 1906 1907 /* 1908 * If we can't allocate a new stripe, and there's no stripes with empty 1909 * blocks for us to reuse, that means we have to wait on copygc: 1910 */ 1911 idx = get_existing_stripe(c, h); 1912 if (idx < 0) 1913 return -BCH_ERR_stripe_alloc_blocked; 1914 1915 ret = get_stripe_key_trans(trans, idx, &h->s->existing_stripe); 1916 bch2_fs_fatal_err_on(ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart), c, 1917 "reading stripe key: %s", bch2_err_str(ret)); 1918 if (ret) { 1919 bch2_stripe_close(c, h->s); 1920 return ret; 1921 } 1922 1923 existing_v = &bkey_i_to_stripe(&h->s->existing_stripe.key)->v; 1924 1925 BUG_ON(existing_v->nr_redundant != h->s->nr_parity); 1926 h->s->nr_data = existing_v->nr_blocks - 1927 existing_v->nr_redundant; 1928 1929 ret = ec_stripe_buf_init(&h->s->existing_stripe, 0, h->blocksize); 1930 if (ret) { 1931 bch2_stripe_close(c, h->s); 1932 return ret; 1933 } 1934 1935 BUG_ON(h->s->existing_stripe.size != h->blocksize); 1936 BUG_ON(h->s->existing_stripe.size != le16_to_cpu(existing_v->sectors)); 1937 1938 /* 1939 * Free buckets we initially allocated - they might conflict with 1940 * blocks from the stripe we're reusing: 1941 */ 1942 for_each_set_bit(i, h->s->blocks_gotten, new_v->nr_blocks) { 1943 bch2_open_bucket_put(c, c->open_buckets + h->s->blocks[i]); 1944 h->s->blocks[i] = 0; 1945 } 1946 memset(h->s->blocks_gotten, 0, sizeof(h->s->blocks_gotten)); 1947 memset(h->s->blocks_allocated, 0, sizeof(h->s->blocks_allocated)); 1948 1949 for (i = 0; i < existing_v->nr_blocks; i++) { 1950 if (stripe_blockcount_get(existing_v, i)) { 1951 __set_bit(i, h->s->blocks_gotten); 1952 __set_bit(i, h->s->blocks_allocated); 1953 } 1954 1955 ec_block_io(c, &h->s->existing_stripe, READ, i, &h->s->iodone); 1956 } 1957 1958 bkey_copy(&h->s->new_stripe.key, &h->s->existing_stripe.key); 1959 h->s->have_existing_stripe = true; 1960 1961 return 0; 1962 } 1963 1964 static int __bch2_ec_stripe_head_reserve(struct btree_trans *trans, struct ec_stripe_head *h) 1965 { 1966 struct bch_fs *c = trans->c; 1967 struct btree_iter iter; 1968 struct bkey_s_c k; 1969 struct bpos min_pos = POS(0, 1); 1970 struct bpos start_pos = bpos_max(min_pos, POS(0, c->ec_stripe_hint)); 1971 int ret; 1972 1973 if (!h->s->res.sectors) { 1974 ret = bch2_disk_reservation_get(c, &h->s->res, 1975 h->blocksize, 1976 h->s->nr_parity, 1977 BCH_DISK_RESERVATION_NOFAIL); 1978 if (ret) 1979 return ret; 1980 } 1981 1982 for_each_btree_key_norestart(trans, iter, BTREE_ID_stripes, start_pos, 1983 BTREE_ITER_slots|BTREE_ITER_intent, k, ret) { 1984 if (bkey_gt(k.k->p, POS(0, U32_MAX))) { 1985 if (start_pos.offset) { 1986 start_pos = min_pos; 1987 bch2_btree_iter_set_pos(&iter, start_pos); 1988 continue; 1989 } 1990 1991 ret = -BCH_ERR_ENOSPC_stripe_create; 1992 break; 1993 } 1994 1995 if (bkey_deleted(k.k) && 1996 bch2_try_open_stripe(c, h->s, k.k->p.offset)) 1997 break; 1998 } 1999 2000 c->ec_stripe_hint = iter.pos.offset; 2001 2002 if (ret) 2003 goto err; 2004 2005 ret = ec_stripe_mem_alloc(trans, &iter); 2006 if (ret) { 2007 bch2_stripe_close(c, h->s); 2008 goto err; 2009 } 2010 2011 h->s->new_stripe.key.k.p = iter.pos; 2012 out: 2013 bch2_trans_iter_exit(trans, &iter); 2014 return ret; 2015 err: 2016 bch2_disk_reservation_put(c, &h->s->res); 2017 goto out; 2018 } 2019 2020 struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans, 2021 unsigned target, 2022 unsigned algo, 2023 unsigned redundancy, 2024 enum bch_watermark watermark, 2025 struct closure *cl) 2026 { 2027 struct bch_fs *c = trans->c; 2028 struct ec_stripe_head *h; 2029 bool waiting = false; 2030 int ret; 2031 2032 h = __bch2_ec_stripe_head_get(trans, target, algo, redundancy, watermark); 2033 if (IS_ERR_OR_NULL(h)) 2034 return h; 2035 2036 if (!h->s) { 2037 ret = ec_new_stripe_alloc(c, h); 2038 if (ret) { 2039 bch_err(c, "failed to allocate new stripe"); 2040 goto err; 2041 } 2042 } 2043 2044 if (h->s->allocated) 2045 goto allocated; 2046 2047 if (h->s->have_existing_stripe) 2048 goto alloc_existing; 2049 2050 /* First, try to allocate a full stripe: */ 2051 ret = new_stripe_alloc_buckets(trans, h, BCH_WATERMARK_stripe, NULL) ?: 2052 __bch2_ec_stripe_head_reserve(trans, h); 2053 if (!ret) 2054 goto allocate_buf; 2055 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || 2056 bch2_err_matches(ret, ENOMEM)) 2057 goto err; 2058 2059 /* 2060 * Not enough buckets available for a full stripe: we must reuse an 2061 * existing stripe: 2062 */ 2063 while (1) { 2064 ret = __bch2_ec_stripe_head_reuse(trans, h); 2065 if (!ret) 2066 break; 2067 if (waiting || !cl || ret != -BCH_ERR_stripe_alloc_blocked) 2068 goto err; 2069 2070 if (watermark == BCH_WATERMARK_copygc) { 2071 ret = new_stripe_alloc_buckets(trans, h, watermark, NULL) ?: 2072 __bch2_ec_stripe_head_reserve(trans, h); 2073 if (ret) 2074 goto err; 2075 goto allocate_buf; 2076 } 2077 2078 /* XXX freelist_wait? */ 2079 closure_wait(&c->freelist_wait, cl); 2080 waiting = true; 2081 } 2082 2083 if (waiting) 2084 closure_wake_up(&c->freelist_wait); 2085 alloc_existing: 2086 /* 2087 * Retry allocating buckets, with the watermark for this 2088 * particular write: 2089 */ 2090 ret = new_stripe_alloc_buckets(trans, h, watermark, cl); 2091 if (ret) 2092 goto err; 2093 2094 allocate_buf: 2095 ret = ec_stripe_buf_init(&h->s->new_stripe, 0, h->blocksize); 2096 if (ret) 2097 goto err; 2098 2099 h->s->allocated = true; 2100 allocated: 2101 BUG_ON(!h->s->idx); 2102 BUG_ON(!h->s->new_stripe.data[0]); 2103 BUG_ON(trans->restarted); 2104 return h; 2105 err: 2106 bch2_ec_stripe_head_put(c, h); 2107 return ERR_PTR(ret); 2108 } 2109 2110 static void __bch2_ec_stop(struct bch_fs *c, struct bch_dev *ca) 2111 { 2112 struct ec_stripe_head *h; 2113 struct open_bucket *ob; 2114 unsigned i; 2115 2116 mutex_lock(&c->ec_stripe_head_lock); 2117 list_for_each_entry(h, &c->ec_stripe_head_list, list) { 2118 mutex_lock(&h->lock); 2119 if (!h->s) 2120 goto unlock; 2121 2122 if (!ca) 2123 goto found; 2124 2125 for (i = 0; i < bkey_i_to_stripe(&h->s->new_stripe.key)->v.nr_blocks; i++) { 2126 if (!h->s->blocks[i]) 2127 continue; 2128 2129 ob = c->open_buckets + h->s->blocks[i]; 2130 if (ob->dev == ca->dev_idx) 2131 goto found; 2132 } 2133 goto unlock; 2134 found: 2135 h->s->err = -BCH_ERR_erofs_no_writes; 2136 ec_stripe_set_pending(c, h); 2137 unlock: 2138 mutex_unlock(&h->lock); 2139 } 2140 mutex_unlock(&c->ec_stripe_head_lock); 2141 } 2142 2143 void bch2_ec_stop_dev(struct bch_fs *c, struct bch_dev *ca) 2144 { 2145 __bch2_ec_stop(c, ca); 2146 } 2147 2148 void bch2_fs_ec_stop(struct bch_fs *c) 2149 { 2150 __bch2_ec_stop(c, NULL); 2151 } 2152 2153 static bool bch2_fs_ec_flush_done(struct bch_fs *c) 2154 { 2155 bool ret; 2156 2157 mutex_lock(&c->ec_stripe_new_lock); 2158 ret = list_empty(&c->ec_stripe_new_list); 2159 mutex_unlock(&c->ec_stripe_new_lock); 2160 2161 return ret; 2162 } 2163 2164 void bch2_fs_ec_flush(struct bch_fs *c) 2165 { 2166 wait_event(c->ec_stripe_new_wait, bch2_fs_ec_flush_done(c)); 2167 } 2168 2169 int bch2_stripes_read(struct bch_fs *c) 2170 { 2171 int ret = bch2_trans_run(c, 2172 for_each_btree_key(trans, iter, BTREE_ID_stripes, POS_MIN, 2173 BTREE_ITER_prefetch, k, ({ 2174 if (k.k->type != KEY_TYPE_stripe) 2175 continue; 2176 2177 ret = __ec_stripe_mem_alloc(c, k.k->p.offset, GFP_KERNEL); 2178 if (ret) 2179 break; 2180 2181 const struct bch_stripe *s = bkey_s_c_to_stripe(k).v; 2182 2183 struct stripe *m = genradix_ptr(&c->stripes, k.k->p.offset); 2184 m->sectors = le16_to_cpu(s->sectors); 2185 m->algorithm = s->algorithm; 2186 m->nr_blocks = s->nr_blocks; 2187 m->nr_redundant = s->nr_redundant; 2188 m->blocks_nonempty = 0; 2189 2190 for (unsigned i = 0; i < s->nr_blocks; i++) 2191 m->blocks_nonempty += !!stripe_blockcount_get(s, i); 2192 2193 bch2_stripes_heap_insert(c, m, k.k->p.offset); 2194 0; 2195 }))); 2196 bch_err_fn(c, ret); 2197 return ret; 2198 } 2199 2200 void bch2_stripes_heap_to_text(struct printbuf *out, struct bch_fs *c) 2201 { 2202 ec_stripes_heap *h = &c->ec_stripes_heap; 2203 struct stripe *m; 2204 size_t i; 2205 2206 mutex_lock(&c->ec_stripes_heap_lock); 2207 for (i = 0; i < min_t(size_t, h->used, 50); i++) { 2208 m = genradix_ptr(&c->stripes, h->data[i].idx); 2209 2210 prt_printf(out, "%zu %u/%u+%u", h->data[i].idx, 2211 h->data[i].blocks_nonempty, 2212 m->nr_blocks - m->nr_redundant, 2213 m->nr_redundant); 2214 if (bch2_stripe_is_open(c, h->data[i].idx)) 2215 prt_str(out, " open"); 2216 prt_newline(out); 2217 } 2218 mutex_unlock(&c->ec_stripes_heap_lock); 2219 } 2220 2221 void bch2_new_stripes_to_text(struct printbuf *out, struct bch_fs *c) 2222 { 2223 struct ec_stripe_head *h; 2224 struct ec_stripe_new *s; 2225 2226 mutex_lock(&c->ec_stripe_head_lock); 2227 list_for_each_entry(h, &c->ec_stripe_head_list, list) { 2228 prt_printf(out, "target %u algo %u redundancy %u %s:\n", 2229 h->target, h->algo, h->redundancy, 2230 bch2_watermarks[h->watermark]); 2231 2232 if (h->s) 2233 prt_printf(out, "\tidx %llu blocks %u+%u allocated %u\n", 2234 h->s->idx, h->s->nr_data, h->s->nr_parity, 2235 bitmap_weight(h->s->blocks_allocated, 2236 h->s->nr_data)); 2237 } 2238 mutex_unlock(&c->ec_stripe_head_lock); 2239 2240 prt_printf(out, "in flight:\n"); 2241 2242 mutex_lock(&c->ec_stripe_new_lock); 2243 list_for_each_entry(s, &c->ec_stripe_new_list, list) { 2244 prt_printf(out, "\tidx %llu blocks %u+%u ref %u %u %s\n", 2245 s->idx, s->nr_data, s->nr_parity, 2246 atomic_read(&s->ref[STRIPE_REF_io]), 2247 atomic_read(&s->ref[STRIPE_REF_stripe]), 2248 bch2_watermarks[s->h->watermark]); 2249 } 2250 mutex_unlock(&c->ec_stripe_new_lock); 2251 } 2252 2253 void bch2_fs_ec_exit(struct bch_fs *c) 2254 { 2255 struct ec_stripe_head *h; 2256 unsigned i; 2257 2258 while (1) { 2259 mutex_lock(&c->ec_stripe_head_lock); 2260 h = list_first_entry_or_null(&c->ec_stripe_head_list, 2261 struct ec_stripe_head, list); 2262 if (h) 2263 list_del(&h->list); 2264 mutex_unlock(&c->ec_stripe_head_lock); 2265 if (!h) 2266 break; 2267 2268 if (h->s) { 2269 for (i = 0; i < bkey_i_to_stripe(&h->s->new_stripe.key)->v.nr_blocks; i++) 2270 BUG_ON(h->s->blocks[i]); 2271 2272 kfree(h->s); 2273 } 2274 kfree(h); 2275 } 2276 2277 BUG_ON(!list_empty(&c->ec_stripe_new_list)); 2278 2279 free_heap(&c->ec_stripes_heap); 2280 genradix_free(&c->stripes); 2281 bioset_exit(&c->ec_bioset); 2282 } 2283 2284 void bch2_fs_ec_init_early(struct bch_fs *c) 2285 { 2286 spin_lock_init(&c->ec_stripes_new_lock); 2287 mutex_init(&c->ec_stripes_heap_lock); 2288 2289 INIT_LIST_HEAD(&c->ec_stripe_head_list); 2290 mutex_init(&c->ec_stripe_head_lock); 2291 2292 INIT_LIST_HEAD(&c->ec_stripe_new_list); 2293 mutex_init(&c->ec_stripe_new_lock); 2294 init_waitqueue_head(&c->ec_stripe_new_wait); 2295 2296 INIT_WORK(&c->ec_stripe_create_work, ec_stripe_create_work); 2297 INIT_WORK(&c->ec_stripe_delete_work, ec_stripe_delete_work); 2298 } 2299 2300 int bch2_fs_ec_init(struct bch_fs *c) 2301 { 2302 return bioset_init(&c->ec_bioset, 1, offsetof(struct ec_bio, bio), 2303 BIOSET_NEED_BVECS); 2304 } 2305