1 // SPDX-License-Identifier: GPL-2.0 2 3 /* erasure coding */ 4 5 #include "bcachefs.h" 6 #include "alloc_background.h" 7 #include "alloc_foreground.h" 8 #include "backpointers.h" 9 #include "bkey_buf.h" 10 #include "bset.h" 11 #include "btree_gc.h" 12 #include "btree_update.h" 13 #include "btree_write_buffer.h" 14 #include "buckets.h" 15 #include "checksum.h" 16 #include "disk_accounting.h" 17 #include "disk_groups.h" 18 #include "ec.h" 19 #include "error.h" 20 #include "io_read.h" 21 #include "io_write.h" 22 #include "keylist.h" 23 #include "recovery.h" 24 #include "replicas.h" 25 #include "super-io.h" 26 #include "util.h" 27 28 #include <linux/sort.h> 29 30 #ifdef __KERNEL__ 31 32 #include <linux/raid/pq.h> 33 #include <linux/raid/xor.h> 34 35 static void raid5_recov(unsigned disks, unsigned failed_idx, 36 size_t size, void **data) 37 { 38 unsigned i = 2, nr; 39 40 BUG_ON(failed_idx >= disks); 41 42 swap(data[0], data[failed_idx]); 43 memcpy(data[0], data[1], size); 44 45 while (i < disks) { 46 nr = min_t(unsigned, disks - i, MAX_XOR_BLOCKS); 47 xor_blocks(nr, size, data[0], data + i); 48 i += nr; 49 } 50 51 swap(data[0], data[failed_idx]); 52 } 53 54 static void raid_gen(int nd, int np, size_t size, void **v) 55 { 56 if (np >= 1) 57 raid5_recov(nd + np, nd, size, v); 58 if (np >= 2) 59 raid6_call.gen_syndrome(nd + np, size, v); 60 BUG_ON(np > 2); 61 } 62 63 static void raid_rec(int nr, int *ir, int nd, int np, size_t size, void **v) 64 { 65 switch (nr) { 66 case 0: 67 break; 68 case 1: 69 if (ir[0] < nd + 1) 70 raid5_recov(nd + 1, ir[0], size, v); 71 else 72 raid6_call.gen_syndrome(nd + np, size, v); 73 break; 74 case 2: 75 if (ir[1] < nd) { 76 /* data+data failure. */ 77 raid6_2data_recov(nd + np, size, ir[0], ir[1], v); 78 } else if (ir[0] < nd) { 79 /* data + p/q failure */ 80 81 if (ir[1] == nd) /* data + p failure */ 82 raid6_datap_recov(nd + np, size, ir[0], v); 83 else { /* data + q failure */ 84 raid5_recov(nd + 1, ir[0], size, v); 85 raid6_call.gen_syndrome(nd + np, size, v); 86 } 87 } else { 88 raid_gen(nd, np, size, v); 89 } 90 break; 91 default: 92 BUG(); 93 } 94 } 95 96 #else 97 98 #include <raid/raid.h> 99 100 #endif 101 102 struct ec_bio { 103 struct bch_dev *ca; 104 struct ec_stripe_buf *buf; 105 size_t idx; 106 struct bio bio; 107 }; 108 109 /* Stripes btree keys: */ 110 111 int bch2_stripe_validate(struct bch_fs *c, struct bkey_s_c k, 112 enum bch_validate_flags flags) 113 { 114 const struct bch_stripe *s = bkey_s_c_to_stripe(k).v; 115 int ret = 0; 116 117 bkey_fsck_err_on(bkey_eq(k.k->p, POS_MIN) || 118 bpos_gt(k.k->p, POS(0, U32_MAX)), 119 c, stripe_pos_bad, 120 "stripe at bad pos"); 121 122 bkey_fsck_err_on(bkey_val_u64s(k.k) < stripe_val_u64s(s), 123 c, stripe_val_size_bad, 124 "incorrect value size (%zu < %u)", 125 bkey_val_u64s(k.k), stripe_val_u64s(s)); 126 127 ret = bch2_bkey_ptrs_validate(c, k, flags); 128 fsck_err: 129 return ret; 130 } 131 132 void bch2_stripe_to_text(struct printbuf *out, struct bch_fs *c, 133 struct bkey_s_c k) 134 { 135 const struct bch_stripe *sp = bkey_s_c_to_stripe(k).v; 136 struct bch_stripe s = {}; 137 138 memcpy(&s, sp, min(sizeof(s), bkey_val_bytes(k.k))); 139 140 unsigned nr_data = s.nr_blocks - s.nr_redundant; 141 142 prt_printf(out, "algo %u sectors %u blocks %u:%u csum ", 143 s.algorithm, 144 le16_to_cpu(s.sectors), 145 nr_data, 146 s.nr_redundant); 147 bch2_prt_csum_type(out, s.csum_type); 148 prt_printf(out, " gran %u", 1U << s.csum_granularity_bits); 149 150 if (s.disk_label) { 151 prt_str(out, " label"); 152 bch2_disk_path_to_text(out, c, s.disk_label - 1); 153 } 154 155 for (unsigned i = 0; i < s.nr_blocks; i++) { 156 const struct bch_extent_ptr *ptr = sp->ptrs + i; 157 158 if ((void *) ptr >= bkey_val_end(k)) 159 break; 160 161 prt_char(out, ' '); 162 bch2_extent_ptr_to_text(out, c, ptr); 163 164 if (s.csum_type < BCH_CSUM_NR && 165 i < nr_data && 166 stripe_blockcount_offset(&s, i) < bkey_val_bytes(k.k)) 167 prt_printf(out, "#%u", stripe_blockcount_get(sp, i)); 168 } 169 } 170 171 /* Triggers: */ 172 173 static int __mark_stripe_bucket(struct btree_trans *trans, 174 struct bch_dev *ca, 175 struct bkey_s_c_stripe s, 176 unsigned ptr_idx, bool deleting, 177 struct bpos bucket, 178 struct bch_alloc_v4 *a, 179 enum btree_iter_update_trigger_flags flags) 180 { 181 const struct bch_extent_ptr *ptr = s.v->ptrs + ptr_idx; 182 unsigned nr_data = s.v->nr_blocks - s.v->nr_redundant; 183 bool parity = ptr_idx >= nr_data; 184 enum bch_data_type data_type = parity ? BCH_DATA_parity : BCH_DATA_stripe; 185 s64 sectors = parity ? le16_to_cpu(s.v->sectors) : 0; 186 struct printbuf buf = PRINTBUF; 187 int ret = 0; 188 189 struct bch_fs *c = trans->c; 190 if (deleting) 191 sectors = -sectors; 192 193 if (!deleting) { 194 if (bch2_trans_inconsistent_on(a->stripe || 195 a->stripe_redundancy, trans, 196 "bucket %llu:%llu gen %u data type %s dirty_sectors %u: multiple stripes using same bucket (%u, %llu)\n%s", 197 bucket.inode, bucket.offset, a->gen, 198 bch2_data_type_str(a->data_type), 199 a->dirty_sectors, 200 a->stripe, s.k->p.offset, 201 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) { 202 ret = -BCH_ERR_mark_stripe; 203 goto err; 204 } 205 206 if (bch2_trans_inconsistent_on(parity && bch2_bucket_sectors_total(*a), trans, 207 "bucket %llu:%llu gen %u data type %s dirty_sectors %u cached_sectors %u: data already in parity bucket\n%s", 208 bucket.inode, bucket.offset, a->gen, 209 bch2_data_type_str(a->data_type), 210 a->dirty_sectors, 211 a->cached_sectors, 212 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) { 213 ret = -BCH_ERR_mark_stripe; 214 goto err; 215 } 216 } else { 217 if (bch2_trans_inconsistent_on(a->stripe != s.k->p.offset || 218 a->stripe_redundancy != s.v->nr_redundant, trans, 219 "bucket %llu:%llu gen %u: not marked as stripe when deleting stripe (got %u)\n%s", 220 bucket.inode, bucket.offset, a->gen, 221 a->stripe, 222 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) { 223 ret = -BCH_ERR_mark_stripe; 224 goto err; 225 } 226 227 if (bch2_trans_inconsistent_on(a->data_type != data_type, trans, 228 "bucket %llu:%llu gen %u data type %s: wrong data type when stripe, should be %s\n%s", 229 bucket.inode, bucket.offset, a->gen, 230 bch2_data_type_str(a->data_type), 231 bch2_data_type_str(data_type), 232 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) { 233 ret = -BCH_ERR_mark_stripe; 234 goto err; 235 } 236 237 if (bch2_trans_inconsistent_on(parity && 238 (a->dirty_sectors != -sectors || 239 a->cached_sectors), trans, 240 "bucket %llu:%llu gen %u dirty_sectors %u cached_sectors %u: wrong sectors when deleting parity block of stripe\n%s", 241 bucket.inode, bucket.offset, a->gen, 242 a->dirty_sectors, 243 a->cached_sectors, 244 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) { 245 ret = -BCH_ERR_mark_stripe; 246 goto err; 247 } 248 } 249 250 if (sectors) { 251 ret = bch2_bucket_ref_update(trans, ca, s.s_c, ptr, sectors, data_type, 252 a->gen, a->data_type, &a->dirty_sectors); 253 if (ret) 254 goto err; 255 } 256 257 if (!deleting) { 258 a->stripe = s.k->p.offset; 259 a->stripe_redundancy = s.v->nr_redundant; 260 } else { 261 a->stripe = 0; 262 a->stripe_redundancy = 0; 263 } 264 265 alloc_data_type_set(a, data_type); 266 err: 267 printbuf_exit(&buf); 268 return ret; 269 } 270 271 static int mark_stripe_bucket(struct btree_trans *trans, 272 struct bkey_s_c_stripe s, 273 unsigned ptr_idx, bool deleting, 274 enum btree_iter_update_trigger_flags flags) 275 { 276 struct bch_fs *c = trans->c; 277 const struct bch_extent_ptr *ptr = s.v->ptrs + ptr_idx; 278 struct printbuf buf = PRINTBUF; 279 int ret = 0; 280 281 struct bch_dev *ca = bch2_dev_tryget(c, ptr->dev); 282 if (unlikely(!ca)) { 283 if (ptr->dev != BCH_SB_MEMBER_INVALID && !(flags & BTREE_TRIGGER_overwrite)) 284 ret = -BCH_ERR_mark_stripe; 285 goto err; 286 } 287 288 struct bpos bucket = PTR_BUCKET_POS(ca, ptr); 289 290 if (flags & BTREE_TRIGGER_transactional) { 291 struct bkey_i_alloc_v4 *a = 292 bch2_trans_start_alloc_update(trans, bucket, 0); 293 ret = PTR_ERR_OR_ZERO(a) ?: 294 __mark_stripe_bucket(trans, ca, s, ptr_idx, deleting, bucket, &a->v, flags); 295 } 296 297 if (flags & BTREE_TRIGGER_gc) { 298 percpu_down_read(&c->mark_lock); 299 struct bucket *g = gc_bucket(ca, bucket.offset); 300 if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u\n %s", 301 ptr->dev, 302 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) { 303 ret = -BCH_ERR_mark_stripe; 304 goto err_unlock; 305 } 306 307 bucket_lock(g); 308 struct bch_alloc_v4 old = bucket_m_to_alloc(*g), new = old; 309 ret = __mark_stripe_bucket(trans, ca, s, ptr_idx, deleting, bucket, &new, flags); 310 alloc_to_bucket(g, new); 311 bucket_unlock(g); 312 err_unlock: 313 percpu_up_read(&c->mark_lock); 314 if (!ret) 315 ret = bch2_alloc_key_to_dev_counters(trans, ca, &old, &new, flags); 316 } 317 err: 318 bch2_dev_put(ca); 319 printbuf_exit(&buf); 320 return ret; 321 } 322 323 static int mark_stripe_buckets(struct btree_trans *trans, 324 struct bkey_s_c old, struct bkey_s_c new, 325 enum btree_iter_update_trigger_flags flags) 326 { 327 const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe 328 ? bkey_s_c_to_stripe(old).v : NULL; 329 const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe 330 ? bkey_s_c_to_stripe(new).v : NULL; 331 332 BUG_ON(old_s && new_s && old_s->nr_blocks != new_s->nr_blocks); 333 334 unsigned nr_blocks = new_s ? new_s->nr_blocks : old_s->nr_blocks; 335 336 for (unsigned i = 0; i < nr_blocks; i++) { 337 if (new_s && old_s && 338 !memcmp(&new_s->ptrs[i], 339 &old_s->ptrs[i], 340 sizeof(new_s->ptrs[i]))) 341 continue; 342 343 if (new_s) { 344 int ret = mark_stripe_bucket(trans, 345 bkey_s_c_to_stripe(new), i, false, flags); 346 if (ret) 347 return ret; 348 } 349 350 if (old_s) { 351 int ret = mark_stripe_bucket(trans, 352 bkey_s_c_to_stripe(old), i, true, flags); 353 if (ret) 354 return ret; 355 } 356 } 357 358 return 0; 359 } 360 361 static inline void stripe_to_mem(struct stripe *m, const struct bch_stripe *s) 362 { 363 m->sectors = le16_to_cpu(s->sectors); 364 m->algorithm = s->algorithm; 365 m->nr_blocks = s->nr_blocks; 366 m->nr_redundant = s->nr_redundant; 367 m->disk_label = s->disk_label; 368 m->blocks_nonempty = 0; 369 370 for (unsigned i = 0; i < s->nr_blocks; i++) 371 m->blocks_nonempty += !!stripe_blockcount_get(s, i); 372 } 373 374 int bch2_trigger_stripe(struct btree_trans *trans, 375 enum btree_id btree, unsigned level, 376 struct bkey_s_c old, struct bkey_s _new, 377 enum btree_iter_update_trigger_flags flags) 378 { 379 struct bkey_s_c new = _new.s_c; 380 struct bch_fs *c = trans->c; 381 u64 idx = new.k->p.offset; 382 const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe 383 ? bkey_s_c_to_stripe(old).v : NULL; 384 const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe 385 ? bkey_s_c_to_stripe(new).v : NULL; 386 387 if (unlikely(flags & BTREE_TRIGGER_check_repair)) 388 return bch2_check_fix_ptrs(trans, btree, level, _new.s_c, flags); 389 390 BUG_ON(new_s && old_s && 391 (new_s->nr_blocks != old_s->nr_blocks || 392 new_s->nr_redundant != old_s->nr_redundant)); 393 394 395 if (flags & (BTREE_TRIGGER_transactional|BTREE_TRIGGER_gc)) { 396 /* 397 * If the pointers aren't changing, we don't need to do anything: 398 */ 399 if (new_s && old_s && 400 new_s->nr_blocks == old_s->nr_blocks && 401 new_s->nr_redundant == old_s->nr_redundant && 402 !memcmp(old_s->ptrs, new_s->ptrs, 403 new_s->nr_blocks * sizeof(struct bch_extent_ptr))) 404 return 0; 405 406 struct gc_stripe *gc = NULL; 407 if (flags & BTREE_TRIGGER_gc) { 408 gc = genradix_ptr_alloc(&c->gc_stripes, idx, GFP_KERNEL); 409 if (!gc) { 410 bch_err(c, "error allocating memory for gc_stripes, idx %llu", idx); 411 return -BCH_ERR_ENOMEM_mark_stripe; 412 } 413 414 /* 415 * This will be wrong when we bring back runtime gc: we should 416 * be unmarking the old key and then marking the new key 417 * 418 * Also: when we bring back runtime gc, locking 419 */ 420 gc->alive = true; 421 gc->sectors = le16_to_cpu(new_s->sectors); 422 gc->nr_blocks = new_s->nr_blocks; 423 gc->nr_redundant = new_s->nr_redundant; 424 425 for (unsigned i = 0; i < new_s->nr_blocks; i++) 426 gc->ptrs[i] = new_s->ptrs[i]; 427 428 /* 429 * gc recalculates this field from stripe ptr 430 * references: 431 */ 432 memset(gc->block_sectors, 0, sizeof(gc->block_sectors)); 433 } 434 435 if (new_s) { 436 s64 sectors = (u64) le16_to_cpu(new_s->sectors) * new_s->nr_redundant; 437 438 struct disk_accounting_pos acc = { 439 .type = BCH_DISK_ACCOUNTING_replicas, 440 }; 441 bch2_bkey_to_replicas(&acc.replicas, new); 442 int ret = bch2_disk_accounting_mod(trans, &acc, §ors, 1, gc); 443 if (ret) 444 return ret; 445 446 if (gc) 447 memcpy(&gc->r.e, &acc.replicas, replicas_entry_bytes(&acc.replicas)); 448 } 449 450 if (old_s) { 451 s64 sectors = -((s64) le16_to_cpu(old_s->sectors)) * old_s->nr_redundant; 452 453 struct disk_accounting_pos acc = { 454 .type = BCH_DISK_ACCOUNTING_replicas, 455 }; 456 bch2_bkey_to_replicas(&acc.replicas, old); 457 int ret = bch2_disk_accounting_mod(trans, &acc, §ors, 1, gc); 458 if (ret) 459 return ret; 460 } 461 462 int ret = mark_stripe_buckets(trans, old, new, flags); 463 if (ret) 464 return ret; 465 } 466 467 if (flags & BTREE_TRIGGER_atomic) { 468 struct stripe *m = genradix_ptr(&c->stripes, idx); 469 470 if (!m) { 471 struct printbuf buf1 = PRINTBUF; 472 struct printbuf buf2 = PRINTBUF; 473 474 bch2_bkey_val_to_text(&buf1, c, old); 475 bch2_bkey_val_to_text(&buf2, c, new); 476 bch_err_ratelimited(c, "error marking nonexistent stripe %llu while marking\n" 477 "old %s\n" 478 "new %s", idx, buf1.buf, buf2.buf); 479 printbuf_exit(&buf2); 480 printbuf_exit(&buf1); 481 bch2_inconsistent_error(c); 482 return -1; 483 } 484 485 if (!new_s) { 486 bch2_stripes_heap_del(c, m, idx); 487 488 memset(m, 0, sizeof(*m)); 489 } else { 490 stripe_to_mem(m, new_s); 491 492 if (!old_s) 493 bch2_stripes_heap_insert(c, m, idx); 494 else 495 bch2_stripes_heap_update(c, m, idx); 496 } 497 } 498 499 return 0; 500 } 501 502 /* returns blocknr in stripe that we matched: */ 503 static const struct bch_extent_ptr *bkey_matches_stripe(struct bch_stripe *s, 504 struct bkey_s_c k, unsigned *block) 505 { 506 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 507 unsigned i, nr_data = s->nr_blocks - s->nr_redundant; 508 509 bkey_for_each_ptr(ptrs, ptr) 510 for (i = 0; i < nr_data; i++) 511 if (__bch2_ptr_matches_stripe(&s->ptrs[i], ptr, 512 le16_to_cpu(s->sectors))) { 513 *block = i; 514 return ptr; 515 } 516 517 return NULL; 518 } 519 520 static bool extent_has_stripe_ptr(struct bkey_s_c k, u64 idx) 521 { 522 switch (k.k->type) { 523 case KEY_TYPE_extent: { 524 struct bkey_s_c_extent e = bkey_s_c_to_extent(k); 525 const union bch_extent_entry *entry; 526 527 extent_for_each_entry(e, entry) 528 if (extent_entry_type(entry) == 529 BCH_EXTENT_ENTRY_stripe_ptr && 530 entry->stripe_ptr.idx == idx) 531 return true; 532 533 break; 534 } 535 } 536 537 return false; 538 } 539 540 /* Stripe bufs: */ 541 542 static void ec_stripe_buf_exit(struct ec_stripe_buf *buf) 543 { 544 if (buf->key.k.type == KEY_TYPE_stripe) { 545 struct bkey_i_stripe *s = bkey_i_to_stripe(&buf->key); 546 unsigned i; 547 548 for (i = 0; i < s->v.nr_blocks; i++) { 549 kvfree(buf->data[i]); 550 buf->data[i] = NULL; 551 } 552 } 553 } 554 555 /* XXX: this is a non-mempoolified memory allocation: */ 556 static int ec_stripe_buf_init(struct ec_stripe_buf *buf, 557 unsigned offset, unsigned size) 558 { 559 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; 560 unsigned csum_granularity = 1U << v->csum_granularity_bits; 561 unsigned end = offset + size; 562 unsigned i; 563 564 BUG_ON(end > le16_to_cpu(v->sectors)); 565 566 offset = round_down(offset, csum_granularity); 567 end = min_t(unsigned, le16_to_cpu(v->sectors), 568 round_up(end, csum_granularity)); 569 570 buf->offset = offset; 571 buf->size = end - offset; 572 573 memset(buf->valid, 0xFF, sizeof(buf->valid)); 574 575 for (i = 0; i < v->nr_blocks; i++) { 576 buf->data[i] = kvmalloc(buf->size << 9, GFP_KERNEL); 577 if (!buf->data[i]) 578 goto err; 579 } 580 581 return 0; 582 err: 583 ec_stripe_buf_exit(buf); 584 return -BCH_ERR_ENOMEM_stripe_buf; 585 } 586 587 /* Checksumming: */ 588 589 static struct bch_csum ec_block_checksum(struct ec_stripe_buf *buf, 590 unsigned block, unsigned offset) 591 { 592 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; 593 unsigned csum_granularity = 1 << v->csum_granularity_bits; 594 unsigned end = buf->offset + buf->size; 595 unsigned len = min(csum_granularity, end - offset); 596 597 BUG_ON(offset >= end); 598 BUG_ON(offset < buf->offset); 599 BUG_ON(offset & (csum_granularity - 1)); 600 BUG_ON(offset + len != le16_to_cpu(v->sectors) && 601 (len & (csum_granularity - 1))); 602 603 return bch2_checksum(NULL, v->csum_type, 604 null_nonce(), 605 buf->data[block] + ((offset - buf->offset) << 9), 606 len << 9); 607 } 608 609 static void ec_generate_checksums(struct ec_stripe_buf *buf) 610 { 611 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; 612 unsigned i, j, csums_per_device = stripe_csums_per_device(v); 613 614 if (!v->csum_type) 615 return; 616 617 BUG_ON(buf->offset); 618 BUG_ON(buf->size != le16_to_cpu(v->sectors)); 619 620 for (i = 0; i < v->nr_blocks; i++) 621 for (j = 0; j < csums_per_device; j++) 622 stripe_csum_set(v, i, j, 623 ec_block_checksum(buf, i, j << v->csum_granularity_bits)); 624 } 625 626 static void ec_validate_checksums(struct bch_fs *c, struct ec_stripe_buf *buf) 627 { 628 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; 629 unsigned csum_granularity = 1 << v->csum_granularity_bits; 630 unsigned i; 631 632 if (!v->csum_type) 633 return; 634 635 for (i = 0; i < v->nr_blocks; i++) { 636 unsigned offset = buf->offset; 637 unsigned end = buf->offset + buf->size; 638 639 if (!test_bit(i, buf->valid)) 640 continue; 641 642 while (offset < end) { 643 unsigned j = offset >> v->csum_granularity_bits; 644 unsigned len = min(csum_granularity, end - offset); 645 struct bch_csum want = stripe_csum_get(v, i, j); 646 struct bch_csum got = ec_block_checksum(buf, i, offset); 647 648 if (bch2_crc_cmp(want, got)) { 649 struct bch_dev *ca = bch2_dev_tryget(c, v->ptrs[i].dev); 650 if (ca) { 651 struct printbuf err = PRINTBUF; 652 653 prt_str(&err, "stripe "); 654 bch2_csum_err_msg(&err, v->csum_type, want, got); 655 prt_printf(&err, " for %ps at %u of\n ", (void *) _RET_IP_, i); 656 bch2_bkey_val_to_text(&err, c, bkey_i_to_s_c(&buf->key)); 657 bch_err_ratelimited(ca, "%s", err.buf); 658 printbuf_exit(&err); 659 660 bch2_io_error(ca, BCH_MEMBER_ERROR_checksum); 661 } 662 663 clear_bit(i, buf->valid); 664 break; 665 } 666 667 offset += len; 668 } 669 } 670 } 671 672 /* Erasure coding: */ 673 674 static void ec_generate_ec(struct ec_stripe_buf *buf) 675 { 676 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; 677 unsigned nr_data = v->nr_blocks - v->nr_redundant; 678 unsigned bytes = le16_to_cpu(v->sectors) << 9; 679 680 raid_gen(nr_data, v->nr_redundant, bytes, buf->data); 681 } 682 683 static unsigned ec_nr_failed(struct ec_stripe_buf *buf) 684 { 685 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; 686 687 return v->nr_blocks - bitmap_weight(buf->valid, v->nr_blocks); 688 } 689 690 static int ec_do_recov(struct bch_fs *c, struct ec_stripe_buf *buf) 691 { 692 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; 693 unsigned i, failed[BCH_BKEY_PTRS_MAX], nr_failed = 0; 694 unsigned nr_data = v->nr_blocks - v->nr_redundant; 695 unsigned bytes = buf->size << 9; 696 697 if (ec_nr_failed(buf) > v->nr_redundant) { 698 bch_err_ratelimited(c, 699 "error doing reconstruct read: unable to read enough blocks"); 700 return -1; 701 } 702 703 for (i = 0; i < nr_data; i++) 704 if (!test_bit(i, buf->valid)) 705 failed[nr_failed++] = i; 706 707 raid_rec(nr_failed, failed, nr_data, v->nr_redundant, bytes, buf->data); 708 return 0; 709 } 710 711 /* IO: */ 712 713 static void ec_block_endio(struct bio *bio) 714 { 715 struct ec_bio *ec_bio = container_of(bio, struct ec_bio, bio); 716 struct bch_stripe *v = &bkey_i_to_stripe(&ec_bio->buf->key)->v; 717 struct bch_extent_ptr *ptr = &v->ptrs[ec_bio->idx]; 718 struct bch_dev *ca = ec_bio->ca; 719 struct closure *cl = bio->bi_private; 720 721 if (bch2_dev_io_err_on(bio->bi_status, ca, 722 bio_data_dir(bio) 723 ? BCH_MEMBER_ERROR_write 724 : BCH_MEMBER_ERROR_read, 725 "erasure coding %s error: %s", 726 bio_data_dir(bio) ? "write" : "read", 727 bch2_blk_status_to_str(bio->bi_status))) 728 clear_bit(ec_bio->idx, ec_bio->buf->valid); 729 730 int stale = dev_ptr_stale(ca, ptr); 731 if (stale) { 732 bch_err_ratelimited(ca->fs, 733 "error %s stripe: stale/invalid pointer (%i) after io", 734 bio_data_dir(bio) == READ ? "reading from" : "writing to", 735 stale); 736 clear_bit(ec_bio->idx, ec_bio->buf->valid); 737 } 738 739 bio_put(&ec_bio->bio); 740 percpu_ref_put(&ca->io_ref); 741 closure_put(cl); 742 } 743 744 static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf, 745 blk_opf_t opf, unsigned idx, struct closure *cl) 746 { 747 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; 748 unsigned offset = 0, bytes = buf->size << 9; 749 struct bch_extent_ptr *ptr = &v->ptrs[idx]; 750 enum bch_data_type data_type = idx < v->nr_blocks - v->nr_redundant 751 ? BCH_DATA_user 752 : BCH_DATA_parity; 753 int rw = op_is_write(opf); 754 755 struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, rw); 756 if (!ca) { 757 clear_bit(idx, buf->valid); 758 return; 759 } 760 761 int stale = dev_ptr_stale(ca, ptr); 762 if (stale) { 763 bch_err_ratelimited(c, 764 "error %s stripe: stale pointer (%i)", 765 rw == READ ? "reading from" : "writing to", 766 stale); 767 clear_bit(idx, buf->valid); 768 return; 769 } 770 771 772 this_cpu_add(ca->io_done->sectors[rw][data_type], buf->size); 773 774 while (offset < bytes) { 775 unsigned nr_iovecs = min_t(size_t, BIO_MAX_VECS, 776 DIV_ROUND_UP(bytes, PAGE_SIZE)); 777 unsigned b = min_t(size_t, bytes - offset, 778 nr_iovecs << PAGE_SHIFT); 779 struct ec_bio *ec_bio; 780 781 ec_bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev, 782 nr_iovecs, 783 opf, 784 GFP_KERNEL, 785 &c->ec_bioset), 786 struct ec_bio, bio); 787 788 ec_bio->ca = ca; 789 ec_bio->buf = buf; 790 ec_bio->idx = idx; 791 792 ec_bio->bio.bi_iter.bi_sector = ptr->offset + buf->offset + (offset >> 9); 793 ec_bio->bio.bi_end_io = ec_block_endio; 794 ec_bio->bio.bi_private = cl; 795 796 bch2_bio_map(&ec_bio->bio, buf->data[idx] + offset, b); 797 798 closure_get(cl); 799 percpu_ref_get(&ca->io_ref); 800 801 submit_bio(&ec_bio->bio); 802 803 offset += b; 804 } 805 806 percpu_ref_put(&ca->io_ref); 807 } 808 809 static int get_stripe_key_trans(struct btree_trans *trans, u64 idx, 810 struct ec_stripe_buf *stripe) 811 { 812 struct btree_iter iter; 813 struct bkey_s_c k; 814 int ret; 815 816 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes, 817 POS(0, idx), BTREE_ITER_slots); 818 ret = bkey_err(k); 819 if (ret) 820 goto err; 821 if (k.k->type != KEY_TYPE_stripe) { 822 ret = -ENOENT; 823 goto err; 824 } 825 bkey_reassemble(&stripe->key, k); 826 err: 827 bch2_trans_iter_exit(trans, &iter); 828 return ret; 829 } 830 831 /* recovery read path: */ 832 int bch2_ec_read_extent(struct btree_trans *trans, struct bch_read_bio *rbio, 833 struct bkey_s_c orig_k) 834 { 835 struct bch_fs *c = trans->c; 836 struct ec_stripe_buf *buf = NULL; 837 struct closure cl; 838 struct bch_stripe *v; 839 unsigned i, offset; 840 const char *msg = NULL; 841 struct printbuf msgbuf = PRINTBUF; 842 int ret = 0; 843 844 closure_init_stack(&cl); 845 846 BUG_ON(!rbio->pick.has_ec); 847 848 buf = kzalloc(sizeof(*buf), GFP_NOFS); 849 if (!buf) 850 return -BCH_ERR_ENOMEM_ec_read_extent; 851 852 ret = lockrestart_do(trans, get_stripe_key_trans(trans, rbio->pick.ec.idx, buf)); 853 if (ret) { 854 msg = "stripe not found"; 855 goto err; 856 } 857 858 v = &bkey_i_to_stripe(&buf->key)->v; 859 860 if (!bch2_ptr_matches_stripe(v, rbio->pick)) { 861 msg = "pointer doesn't match stripe"; 862 goto err; 863 } 864 865 offset = rbio->bio.bi_iter.bi_sector - v->ptrs[rbio->pick.ec.block].offset; 866 if (offset + bio_sectors(&rbio->bio) > le16_to_cpu(v->sectors)) { 867 msg = "read is bigger than stripe"; 868 goto err; 869 } 870 871 ret = ec_stripe_buf_init(buf, offset, bio_sectors(&rbio->bio)); 872 if (ret) { 873 msg = "-ENOMEM"; 874 goto err; 875 } 876 877 for (i = 0; i < v->nr_blocks; i++) 878 ec_block_io(c, buf, REQ_OP_READ, i, &cl); 879 880 closure_sync(&cl); 881 882 if (ec_nr_failed(buf) > v->nr_redundant) { 883 msg = "unable to read enough blocks"; 884 goto err; 885 } 886 887 ec_validate_checksums(c, buf); 888 889 ret = ec_do_recov(c, buf); 890 if (ret) 891 goto err; 892 893 memcpy_to_bio(&rbio->bio, rbio->bio.bi_iter, 894 buf->data[rbio->pick.ec.block] + ((offset - buf->offset) << 9)); 895 out: 896 ec_stripe_buf_exit(buf); 897 kfree(buf); 898 return ret; 899 err: 900 bch2_bkey_val_to_text(&msgbuf, c, orig_k); 901 bch_err_ratelimited(c, 902 "error doing reconstruct read: %s\n %s", msg, msgbuf.buf); 903 printbuf_exit(&msgbuf);; 904 ret = -BCH_ERR_stripe_reconstruct; 905 goto out; 906 } 907 908 /* stripe bucket accounting: */ 909 910 static int __ec_stripe_mem_alloc(struct bch_fs *c, size_t idx, gfp_t gfp) 911 { 912 ec_stripes_heap n, *h = &c->ec_stripes_heap; 913 914 if (idx >= h->size) { 915 if (!init_heap(&n, max(1024UL, roundup_pow_of_two(idx + 1)), gfp)) 916 return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc; 917 918 mutex_lock(&c->ec_stripes_heap_lock); 919 if (n.size > h->size) { 920 memcpy(n.data, h->data, h->nr * sizeof(h->data[0])); 921 n.nr = h->nr; 922 swap(*h, n); 923 } 924 mutex_unlock(&c->ec_stripes_heap_lock); 925 926 free_heap(&n); 927 } 928 929 if (!genradix_ptr_alloc(&c->stripes, idx, gfp)) 930 return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc; 931 932 if (c->gc_pos.phase != GC_PHASE_not_running && 933 !genradix_ptr_alloc(&c->gc_stripes, idx, gfp)) 934 return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc; 935 936 return 0; 937 } 938 939 static int ec_stripe_mem_alloc(struct btree_trans *trans, 940 struct btree_iter *iter) 941 { 942 return allocate_dropping_locks_errcode(trans, 943 __ec_stripe_mem_alloc(trans->c, iter->pos.offset, _gfp)); 944 } 945 946 /* 947 * Hash table of open stripes: 948 * Stripes that are being created or modified are kept in a hash table, so that 949 * stripe deletion can skip them. 950 */ 951 952 static bool __bch2_stripe_is_open(struct bch_fs *c, u64 idx) 953 { 954 unsigned hash = hash_64(idx, ilog2(ARRAY_SIZE(c->ec_stripes_new))); 955 struct ec_stripe_new *s; 956 957 hlist_for_each_entry(s, &c->ec_stripes_new[hash], hash) 958 if (s->idx == idx) 959 return true; 960 return false; 961 } 962 963 static bool bch2_stripe_is_open(struct bch_fs *c, u64 idx) 964 { 965 bool ret = false; 966 967 spin_lock(&c->ec_stripes_new_lock); 968 ret = __bch2_stripe_is_open(c, idx); 969 spin_unlock(&c->ec_stripes_new_lock); 970 971 return ret; 972 } 973 974 static bool bch2_try_open_stripe(struct bch_fs *c, 975 struct ec_stripe_new *s, 976 u64 idx) 977 { 978 bool ret; 979 980 spin_lock(&c->ec_stripes_new_lock); 981 ret = !__bch2_stripe_is_open(c, idx); 982 if (ret) { 983 unsigned hash = hash_64(idx, ilog2(ARRAY_SIZE(c->ec_stripes_new))); 984 985 s->idx = idx; 986 hlist_add_head(&s->hash, &c->ec_stripes_new[hash]); 987 } 988 spin_unlock(&c->ec_stripes_new_lock); 989 990 return ret; 991 } 992 993 static void bch2_stripe_close(struct bch_fs *c, struct ec_stripe_new *s) 994 { 995 BUG_ON(!s->idx); 996 997 spin_lock(&c->ec_stripes_new_lock); 998 hlist_del_init(&s->hash); 999 spin_unlock(&c->ec_stripes_new_lock); 1000 1001 s->idx = 0; 1002 } 1003 1004 /* Heap of all existing stripes, ordered by blocks_nonempty */ 1005 1006 static u64 stripe_idx_to_delete(struct bch_fs *c) 1007 { 1008 ec_stripes_heap *h = &c->ec_stripes_heap; 1009 1010 lockdep_assert_held(&c->ec_stripes_heap_lock); 1011 1012 if (h->nr && 1013 h->data[0].blocks_nonempty == 0 && 1014 !bch2_stripe_is_open(c, h->data[0].idx)) 1015 return h->data[0].idx; 1016 1017 return 0; 1018 } 1019 1020 static inline void ec_stripes_heap_set_backpointer(ec_stripes_heap *h, 1021 size_t i) 1022 { 1023 struct bch_fs *c = container_of(h, struct bch_fs, ec_stripes_heap); 1024 1025 genradix_ptr(&c->stripes, h->data[i].idx)->heap_idx = i; 1026 } 1027 1028 static inline bool ec_stripes_heap_cmp(const void *l, const void *r, void __always_unused *args) 1029 { 1030 struct ec_stripe_heap_entry *_l = (struct ec_stripe_heap_entry *)l; 1031 struct ec_stripe_heap_entry *_r = (struct ec_stripe_heap_entry *)r; 1032 1033 return ((_l->blocks_nonempty > _r->blocks_nonempty) < 1034 (_l->blocks_nonempty < _r->blocks_nonempty)); 1035 } 1036 1037 static inline void ec_stripes_heap_swap(void *l, void *r, void *h) 1038 { 1039 struct ec_stripe_heap_entry *_l = (struct ec_stripe_heap_entry *)l; 1040 struct ec_stripe_heap_entry *_r = (struct ec_stripe_heap_entry *)r; 1041 ec_stripes_heap *_h = (ec_stripes_heap *)h; 1042 size_t i = _l - _h->data; 1043 size_t j = _r - _h->data; 1044 1045 swap(*_l, *_r); 1046 1047 ec_stripes_heap_set_backpointer(_h, i); 1048 ec_stripes_heap_set_backpointer(_h, j); 1049 } 1050 1051 static void heap_verify_backpointer(struct bch_fs *c, size_t idx) 1052 { 1053 ec_stripes_heap *h = &c->ec_stripes_heap; 1054 struct stripe *m = genradix_ptr(&c->stripes, idx); 1055 1056 BUG_ON(m->heap_idx >= h->nr); 1057 BUG_ON(h->data[m->heap_idx].idx != idx); 1058 } 1059 1060 void bch2_stripes_heap_del(struct bch_fs *c, 1061 struct stripe *m, size_t idx) 1062 { 1063 const struct min_heap_callbacks callbacks = { 1064 .less = ec_stripes_heap_cmp, 1065 .swp = ec_stripes_heap_swap, 1066 }; 1067 1068 mutex_lock(&c->ec_stripes_heap_lock); 1069 heap_verify_backpointer(c, idx); 1070 1071 min_heap_del(&c->ec_stripes_heap, m->heap_idx, &callbacks, &c->ec_stripes_heap); 1072 mutex_unlock(&c->ec_stripes_heap_lock); 1073 } 1074 1075 void bch2_stripes_heap_insert(struct bch_fs *c, 1076 struct stripe *m, size_t idx) 1077 { 1078 const struct min_heap_callbacks callbacks = { 1079 .less = ec_stripes_heap_cmp, 1080 .swp = ec_stripes_heap_swap, 1081 }; 1082 1083 mutex_lock(&c->ec_stripes_heap_lock); 1084 BUG_ON(min_heap_full(&c->ec_stripes_heap)); 1085 1086 genradix_ptr(&c->stripes, idx)->heap_idx = c->ec_stripes_heap.nr; 1087 min_heap_push(&c->ec_stripes_heap, &((struct ec_stripe_heap_entry) { 1088 .idx = idx, 1089 .blocks_nonempty = m->blocks_nonempty, 1090 }), 1091 &callbacks, 1092 &c->ec_stripes_heap); 1093 1094 heap_verify_backpointer(c, idx); 1095 mutex_unlock(&c->ec_stripes_heap_lock); 1096 } 1097 1098 void bch2_stripes_heap_update(struct bch_fs *c, 1099 struct stripe *m, size_t idx) 1100 { 1101 const struct min_heap_callbacks callbacks = { 1102 .less = ec_stripes_heap_cmp, 1103 .swp = ec_stripes_heap_swap, 1104 }; 1105 ec_stripes_heap *h = &c->ec_stripes_heap; 1106 bool do_deletes; 1107 size_t i; 1108 1109 mutex_lock(&c->ec_stripes_heap_lock); 1110 heap_verify_backpointer(c, idx); 1111 1112 h->data[m->heap_idx].blocks_nonempty = m->blocks_nonempty; 1113 1114 i = m->heap_idx; 1115 min_heap_sift_up(h, i, &callbacks, &c->ec_stripes_heap); 1116 min_heap_sift_down(h, i, &callbacks, &c->ec_stripes_heap); 1117 1118 heap_verify_backpointer(c, idx); 1119 1120 do_deletes = stripe_idx_to_delete(c) != 0; 1121 mutex_unlock(&c->ec_stripes_heap_lock); 1122 1123 if (do_deletes) 1124 bch2_do_stripe_deletes(c); 1125 } 1126 1127 /* stripe deletion */ 1128 1129 static int ec_stripe_delete(struct btree_trans *trans, u64 idx) 1130 { 1131 struct bch_fs *c = trans->c; 1132 struct btree_iter iter; 1133 struct bkey_s_c k; 1134 struct bkey_s_c_stripe s; 1135 int ret; 1136 1137 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes, POS(0, idx), 1138 BTREE_ITER_intent); 1139 ret = bkey_err(k); 1140 if (ret) 1141 goto err; 1142 1143 if (k.k->type != KEY_TYPE_stripe) { 1144 bch2_fs_inconsistent(c, "attempting to delete nonexistent stripe %llu", idx); 1145 ret = -EINVAL; 1146 goto err; 1147 } 1148 1149 s = bkey_s_c_to_stripe(k); 1150 for (unsigned i = 0; i < s.v->nr_blocks; i++) 1151 if (stripe_blockcount_get(s.v, i)) { 1152 struct printbuf buf = PRINTBUF; 1153 1154 bch2_bkey_val_to_text(&buf, c, k); 1155 bch2_fs_inconsistent(c, "attempting to delete nonempty stripe %s", buf.buf); 1156 printbuf_exit(&buf); 1157 ret = -EINVAL; 1158 goto err; 1159 } 1160 1161 ret = bch2_btree_delete_at(trans, &iter, 0); 1162 err: 1163 bch2_trans_iter_exit(trans, &iter); 1164 return ret; 1165 } 1166 1167 static void ec_stripe_delete_work(struct work_struct *work) 1168 { 1169 struct bch_fs *c = 1170 container_of(work, struct bch_fs, ec_stripe_delete_work); 1171 1172 while (1) { 1173 mutex_lock(&c->ec_stripes_heap_lock); 1174 u64 idx = stripe_idx_to_delete(c); 1175 mutex_unlock(&c->ec_stripes_heap_lock); 1176 1177 if (!idx) 1178 break; 1179 1180 int ret = bch2_trans_do(c, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, 1181 ec_stripe_delete(trans, idx)); 1182 bch_err_fn(c, ret); 1183 if (ret) 1184 break; 1185 } 1186 1187 bch2_write_ref_put(c, BCH_WRITE_REF_stripe_delete); 1188 } 1189 1190 void bch2_do_stripe_deletes(struct bch_fs *c) 1191 { 1192 if (bch2_write_ref_tryget(c, BCH_WRITE_REF_stripe_delete) && 1193 !queue_work(c->write_ref_wq, &c->ec_stripe_delete_work)) 1194 bch2_write_ref_put(c, BCH_WRITE_REF_stripe_delete); 1195 } 1196 1197 /* stripe creation: */ 1198 1199 static int ec_stripe_key_update(struct btree_trans *trans, 1200 struct bkey_i_stripe *new, 1201 bool create) 1202 { 1203 struct bch_fs *c = trans->c; 1204 struct btree_iter iter; 1205 struct bkey_s_c k; 1206 int ret; 1207 1208 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes, 1209 new->k.p, BTREE_ITER_intent); 1210 ret = bkey_err(k); 1211 if (ret) 1212 goto err; 1213 1214 if (k.k->type != (create ? KEY_TYPE_deleted : KEY_TYPE_stripe)) { 1215 bch2_fs_inconsistent(c, "error %s stripe: got existing key type %s", 1216 create ? "creating" : "updating", 1217 bch2_bkey_types[k.k->type]); 1218 ret = -EINVAL; 1219 goto err; 1220 } 1221 1222 if (k.k->type == KEY_TYPE_stripe) { 1223 const struct bch_stripe *old = bkey_s_c_to_stripe(k).v; 1224 unsigned i; 1225 1226 if (old->nr_blocks != new->v.nr_blocks) { 1227 bch_err(c, "error updating stripe: nr_blocks does not match"); 1228 ret = -EINVAL; 1229 goto err; 1230 } 1231 1232 for (i = 0; i < new->v.nr_blocks; i++) { 1233 unsigned v = stripe_blockcount_get(old, i); 1234 1235 BUG_ON(v && 1236 (old->ptrs[i].dev != new->v.ptrs[i].dev || 1237 old->ptrs[i].gen != new->v.ptrs[i].gen || 1238 old->ptrs[i].offset != new->v.ptrs[i].offset)); 1239 1240 stripe_blockcount_set(&new->v, i, v); 1241 } 1242 } 1243 1244 ret = bch2_trans_update(trans, &iter, &new->k_i, 0); 1245 err: 1246 bch2_trans_iter_exit(trans, &iter); 1247 return ret; 1248 } 1249 1250 static int ec_stripe_update_extent(struct btree_trans *trans, 1251 struct bch_dev *ca, 1252 struct bpos bucket, u8 gen, 1253 struct ec_stripe_buf *s, 1254 struct bpos *bp_pos) 1255 { 1256 struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v; 1257 struct bch_fs *c = trans->c; 1258 struct bch_backpointer bp; 1259 struct btree_iter iter; 1260 struct bkey_s_c k; 1261 const struct bch_extent_ptr *ptr_c; 1262 struct bch_extent_ptr *ec_ptr = NULL; 1263 struct bch_extent_stripe_ptr stripe_ptr; 1264 struct bkey_i *n; 1265 int ret, dev, block; 1266 1267 ret = bch2_get_next_backpointer(trans, ca, bucket, gen, 1268 bp_pos, &bp, BTREE_ITER_cached); 1269 if (ret) 1270 return ret; 1271 if (bpos_eq(*bp_pos, SPOS_MAX)) 1272 return 0; 1273 1274 if (bp.level) { 1275 struct printbuf buf = PRINTBUF; 1276 struct btree_iter node_iter; 1277 struct btree *b; 1278 1279 b = bch2_backpointer_get_node(trans, &node_iter, *bp_pos, bp); 1280 bch2_trans_iter_exit(trans, &node_iter); 1281 1282 if (!b) 1283 return 0; 1284 1285 prt_printf(&buf, "found btree node in erasure coded bucket: b=%px\n", b); 1286 bch2_backpointer_to_text(&buf, &bp); 1287 1288 bch2_fs_inconsistent(c, "%s", buf.buf); 1289 printbuf_exit(&buf); 1290 return -EIO; 1291 } 1292 1293 k = bch2_backpointer_get_key(trans, &iter, *bp_pos, bp, BTREE_ITER_intent); 1294 ret = bkey_err(k); 1295 if (ret) 1296 return ret; 1297 if (!k.k) { 1298 /* 1299 * extent no longer exists - we could flush the btree 1300 * write buffer and retry to verify, but no need: 1301 */ 1302 return 0; 1303 } 1304 1305 if (extent_has_stripe_ptr(k, s->key.k.p.offset)) 1306 goto out; 1307 1308 ptr_c = bkey_matches_stripe(v, k, &block); 1309 /* 1310 * It doesn't generally make sense to erasure code cached ptrs: 1311 * XXX: should we be incrementing a counter? 1312 */ 1313 if (!ptr_c || ptr_c->cached) 1314 goto out; 1315 1316 dev = v->ptrs[block].dev; 1317 1318 n = bch2_trans_kmalloc(trans, bkey_bytes(k.k) + sizeof(stripe_ptr)); 1319 ret = PTR_ERR_OR_ZERO(n); 1320 if (ret) 1321 goto out; 1322 1323 bkey_reassemble(n, k); 1324 1325 bch2_bkey_drop_ptrs_noerror(bkey_i_to_s(n), ptr, ptr->dev != dev); 1326 ec_ptr = bch2_bkey_has_device(bkey_i_to_s(n), dev); 1327 BUG_ON(!ec_ptr); 1328 1329 stripe_ptr = (struct bch_extent_stripe_ptr) { 1330 .type = 1 << BCH_EXTENT_ENTRY_stripe_ptr, 1331 .block = block, 1332 .redundancy = v->nr_redundant, 1333 .idx = s->key.k.p.offset, 1334 }; 1335 1336 __extent_entry_insert(n, 1337 (union bch_extent_entry *) ec_ptr, 1338 (union bch_extent_entry *) &stripe_ptr); 1339 1340 ret = bch2_trans_update(trans, &iter, n, 0); 1341 out: 1342 bch2_trans_iter_exit(trans, &iter); 1343 return ret; 1344 } 1345 1346 static int ec_stripe_update_bucket(struct btree_trans *trans, struct ec_stripe_buf *s, 1347 unsigned block) 1348 { 1349 struct bch_fs *c = trans->c; 1350 struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v; 1351 struct bch_extent_ptr ptr = v->ptrs[block]; 1352 struct bpos bp_pos = POS_MIN; 1353 int ret = 0; 1354 1355 struct bch_dev *ca = bch2_dev_tryget(c, ptr.dev); 1356 if (!ca) 1357 return -EIO; 1358 1359 struct bpos bucket_pos = PTR_BUCKET_POS(ca, &ptr); 1360 1361 while (1) { 1362 ret = commit_do(trans, NULL, NULL, 1363 BCH_TRANS_COMMIT_no_check_rw| 1364 BCH_TRANS_COMMIT_no_enospc, 1365 ec_stripe_update_extent(trans, ca, bucket_pos, ptr.gen, s, &bp_pos)); 1366 if (ret) 1367 break; 1368 if (bkey_eq(bp_pos, POS_MAX)) 1369 break; 1370 1371 bp_pos = bpos_nosnap_successor(bp_pos); 1372 } 1373 1374 bch2_dev_put(ca); 1375 return ret; 1376 } 1377 1378 static int ec_stripe_update_extents(struct bch_fs *c, struct ec_stripe_buf *s) 1379 { 1380 struct btree_trans *trans = bch2_trans_get(c); 1381 struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v; 1382 unsigned i, nr_data = v->nr_blocks - v->nr_redundant; 1383 int ret = 0; 1384 1385 ret = bch2_btree_write_buffer_flush_sync(trans); 1386 if (ret) 1387 goto err; 1388 1389 for (i = 0; i < nr_data; i++) { 1390 ret = ec_stripe_update_bucket(trans, s, i); 1391 if (ret) 1392 break; 1393 } 1394 err: 1395 bch2_trans_put(trans); 1396 1397 return ret; 1398 } 1399 1400 static void zero_out_rest_of_ec_bucket(struct bch_fs *c, 1401 struct ec_stripe_new *s, 1402 unsigned block, 1403 struct open_bucket *ob) 1404 { 1405 struct bch_dev *ca = bch2_dev_get_ioref(c, ob->dev, WRITE); 1406 if (!ca) { 1407 s->err = -BCH_ERR_erofs_no_writes; 1408 return; 1409 } 1410 1411 unsigned offset = ca->mi.bucket_size - ob->sectors_free; 1412 memset(s->new_stripe.data[block] + (offset << 9), 1413 0, 1414 ob->sectors_free << 9); 1415 1416 int ret = blkdev_issue_zeroout(ca->disk_sb.bdev, 1417 ob->bucket * ca->mi.bucket_size + offset, 1418 ob->sectors_free, 1419 GFP_KERNEL, 0); 1420 1421 percpu_ref_put(&ca->io_ref); 1422 1423 if (ret) 1424 s->err = ret; 1425 } 1426 1427 void bch2_ec_stripe_new_free(struct bch_fs *c, struct ec_stripe_new *s) 1428 { 1429 if (s->idx) 1430 bch2_stripe_close(c, s); 1431 kfree(s); 1432 } 1433 1434 /* 1435 * data buckets of new stripe all written: create the stripe 1436 */ 1437 static void ec_stripe_create(struct ec_stripe_new *s) 1438 { 1439 struct bch_fs *c = s->c; 1440 struct open_bucket *ob; 1441 struct bch_stripe *v = &bkey_i_to_stripe(&s->new_stripe.key)->v; 1442 unsigned i, nr_data = v->nr_blocks - v->nr_redundant; 1443 int ret; 1444 1445 BUG_ON(s->h->s == s); 1446 1447 closure_sync(&s->iodone); 1448 1449 if (!s->err) { 1450 for (i = 0; i < nr_data; i++) 1451 if (s->blocks[i]) { 1452 ob = c->open_buckets + s->blocks[i]; 1453 1454 if (ob->sectors_free) 1455 zero_out_rest_of_ec_bucket(c, s, i, ob); 1456 } 1457 } 1458 1459 if (s->err) { 1460 if (!bch2_err_matches(s->err, EROFS)) 1461 bch_err(c, "error creating stripe: error writing data buckets"); 1462 goto err; 1463 } 1464 1465 if (s->have_existing_stripe) { 1466 ec_validate_checksums(c, &s->existing_stripe); 1467 1468 if (ec_do_recov(c, &s->existing_stripe)) { 1469 bch_err(c, "error creating stripe: error reading existing stripe"); 1470 goto err; 1471 } 1472 1473 for (i = 0; i < nr_data; i++) 1474 if (stripe_blockcount_get(&bkey_i_to_stripe(&s->existing_stripe.key)->v, i)) 1475 swap(s->new_stripe.data[i], 1476 s->existing_stripe.data[i]); 1477 1478 ec_stripe_buf_exit(&s->existing_stripe); 1479 } 1480 1481 BUG_ON(!s->allocated); 1482 BUG_ON(!s->idx); 1483 1484 ec_generate_ec(&s->new_stripe); 1485 1486 ec_generate_checksums(&s->new_stripe); 1487 1488 /* write p/q: */ 1489 for (i = nr_data; i < v->nr_blocks; i++) 1490 ec_block_io(c, &s->new_stripe, REQ_OP_WRITE, i, &s->iodone); 1491 closure_sync(&s->iodone); 1492 1493 if (ec_nr_failed(&s->new_stripe)) { 1494 bch_err(c, "error creating stripe: error writing redundancy buckets"); 1495 goto err; 1496 } 1497 1498 ret = bch2_trans_do(c, &s->res, NULL, 1499 BCH_TRANS_COMMIT_no_check_rw| 1500 BCH_TRANS_COMMIT_no_enospc, 1501 ec_stripe_key_update(trans, 1502 bkey_i_to_stripe(&s->new_stripe.key), 1503 !s->have_existing_stripe)); 1504 bch_err_msg(c, ret, "creating stripe key"); 1505 if (ret) { 1506 goto err; 1507 } 1508 1509 ret = ec_stripe_update_extents(c, &s->new_stripe); 1510 bch_err_msg(c, ret, "error updating extents"); 1511 if (ret) 1512 goto err; 1513 err: 1514 bch2_disk_reservation_put(c, &s->res); 1515 1516 for (i = 0; i < v->nr_blocks; i++) 1517 if (s->blocks[i]) { 1518 ob = c->open_buckets + s->blocks[i]; 1519 1520 if (i < nr_data) { 1521 ob->ec = NULL; 1522 __bch2_open_bucket_put(c, ob); 1523 } else { 1524 bch2_open_bucket_put(c, ob); 1525 } 1526 } 1527 1528 mutex_lock(&c->ec_stripe_new_lock); 1529 list_del(&s->list); 1530 mutex_unlock(&c->ec_stripe_new_lock); 1531 wake_up(&c->ec_stripe_new_wait); 1532 1533 ec_stripe_buf_exit(&s->existing_stripe); 1534 ec_stripe_buf_exit(&s->new_stripe); 1535 closure_debug_destroy(&s->iodone); 1536 1537 ec_stripe_new_put(c, s, STRIPE_REF_stripe); 1538 } 1539 1540 static struct ec_stripe_new *get_pending_stripe(struct bch_fs *c) 1541 { 1542 struct ec_stripe_new *s; 1543 1544 mutex_lock(&c->ec_stripe_new_lock); 1545 list_for_each_entry(s, &c->ec_stripe_new_list, list) 1546 if (!atomic_read(&s->ref[STRIPE_REF_io])) 1547 goto out; 1548 s = NULL; 1549 out: 1550 mutex_unlock(&c->ec_stripe_new_lock); 1551 1552 return s; 1553 } 1554 1555 static void ec_stripe_create_work(struct work_struct *work) 1556 { 1557 struct bch_fs *c = container_of(work, 1558 struct bch_fs, ec_stripe_create_work); 1559 struct ec_stripe_new *s; 1560 1561 while ((s = get_pending_stripe(c))) 1562 ec_stripe_create(s); 1563 1564 bch2_write_ref_put(c, BCH_WRITE_REF_stripe_create); 1565 } 1566 1567 void bch2_ec_do_stripe_creates(struct bch_fs *c) 1568 { 1569 bch2_write_ref_get(c, BCH_WRITE_REF_stripe_create); 1570 1571 if (!queue_work(system_long_wq, &c->ec_stripe_create_work)) 1572 bch2_write_ref_put(c, BCH_WRITE_REF_stripe_create); 1573 } 1574 1575 static void ec_stripe_new_set_pending(struct bch_fs *c, struct ec_stripe_head *h) 1576 { 1577 struct ec_stripe_new *s = h->s; 1578 1579 lockdep_assert_held(&h->lock); 1580 1581 BUG_ON(!s->allocated && !s->err); 1582 1583 h->s = NULL; 1584 s->pending = true; 1585 1586 mutex_lock(&c->ec_stripe_new_lock); 1587 list_add(&s->list, &c->ec_stripe_new_list); 1588 mutex_unlock(&c->ec_stripe_new_lock); 1589 1590 ec_stripe_new_put(c, s, STRIPE_REF_io); 1591 } 1592 1593 static void ec_stripe_new_cancel(struct bch_fs *c, struct ec_stripe_head *h, int err) 1594 { 1595 h->s->err = err; 1596 ec_stripe_new_set_pending(c, h); 1597 } 1598 1599 void bch2_ec_bucket_cancel(struct bch_fs *c, struct open_bucket *ob) 1600 { 1601 struct ec_stripe_new *s = ob->ec; 1602 1603 s->err = -EIO; 1604 } 1605 1606 void *bch2_writepoint_ec_buf(struct bch_fs *c, struct write_point *wp) 1607 { 1608 struct open_bucket *ob = ec_open_bucket(c, &wp->ptrs); 1609 if (!ob) 1610 return NULL; 1611 1612 BUG_ON(!ob->ec->new_stripe.data[ob->ec_idx]); 1613 1614 struct bch_dev *ca = ob_dev(c, ob); 1615 unsigned offset = ca->mi.bucket_size - ob->sectors_free; 1616 1617 return ob->ec->new_stripe.data[ob->ec_idx] + (offset << 9); 1618 } 1619 1620 static int unsigned_cmp(const void *_l, const void *_r) 1621 { 1622 unsigned l = *((const unsigned *) _l); 1623 unsigned r = *((const unsigned *) _r); 1624 1625 return cmp_int(l, r); 1626 } 1627 1628 /* pick most common bucket size: */ 1629 static unsigned pick_blocksize(struct bch_fs *c, 1630 struct bch_devs_mask *devs) 1631 { 1632 unsigned nr = 0, sizes[BCH_SB_MEMBERS_MAX]; 1633 struct { 1634 unsigned nr, size; 1635 } cur = { 0, 0 }, best = { 0, 0 }; 1636 1637 for_each_member_device_rcu(c, ca, devs) 1638 sizes[nr++] = ca->mi.bucket_size; 1639 1640 sort(sizes, nr, sizeof(unsigned), unsigned_cmp, NULL); 1641 1642 for (unsigned i = 0; i < nr; i++) { 1643 if (sizes[i] != cur.size) { 1644 if (cur.nr > best.nr) 1645 best = cur; 1646 1647 cur.nr = 0; 1648 cur.size = sizes[i]; 1649 } 1650 1651 cur.nr++; 1652 } 1653 1654 if (cur.nr > best.nr) 1655 best = cur; 1656 1657 return best.size; 1658 } 1659 1660 static bool may_create_new_stripe(struct bch_fs *c) 1661 { 1662 return false; 1663 } 1664 1665 static void ec_stripe_key_init(struct bch_fs *c, 1666 struct bkey_i *k, 1667 unsigned nr_data, 1668 unsigned nr_parity, 1669 unsigned stripe_size, 1670 unsigned disk_label) 1671 { 1672 struct bkey_i_stripe *s = bkey_stripe_init(k); 1673 unsigned u64s; 1674 1675 s->v.sectors = cpu_to_le16(stripe_size); 1676 s->v.algorithm = 0; 1677 s->v.nr_blocks = nr_data + nr_parity; 1678 s->v.nr_redundant = nr_parity; 1679 s->v.csum_granularity_bits = ilog2(c->opts.encoded_extent_max >> 9); 1680 s->v.csum_type = BCH_CSUM_crc32c; 1681 s->v.disk_label = disk_label; 1682 1683 while ((u64s = stripe_val_u64s(&s->v)) > BKEY_VAL_U64s_MAX) { 1684 BUG_ON(1 << s->v.csum_granularity_bits >= 1685 le16_to_cpu(s->v.sectors) || 1686 s->v.csum_granularity_bits == U8_MAX); 1687 s->v.csum_granularity_bits++; 1688 } 1689 1690 set_bkey_val_u64s(&s->k, u64s); 1691 } 1692 1693 static int ec_new_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h) 1694 { 1695 struct ec_stripe_new *s; 1696 1697 lockdep_assert_held(&h->lock); 1698 1699 s = kzalloc(sizeof(*s), GFP_KERNEL); 1700 if (!s) 1701 return -BCH_ERR_ENOMEM_ec_new_stripe_alloc; 1702 1703 mutex_init(&s->lock); 1704 closure_init(&s->iodone, NULL); 1705 atomic_set(&s->ref[STRIPE_REF_stripe], 1); 1706 atomic_set(&s->ref[STRIPE_REF_io], 1); 1707 s->c = c; 1708 s->h = h; 1709 s->nr_data = min_t(unsigned, h->nr_active_devs, 1710 BCH_BKEY_PTRS_MAX) - h->redundancy; 1711 s->nr_parity = h->redundancy; 1712 1713 ec_stripe_key_init(c, &s->new_stripe.key, 1714 s->nr_data, s->nr_parity, 1715 h->blocksize, h->disk_label); 1716 1717 h->s = s; 1718 h->nr_created++; 1719 return 0; 1720 } 1721 1722 static void ec_stripe_head_devs_update(struct bch_fs *c, struct ec_stripe_head *h) 1723 { 1724 struct bch_devs_mask devs = h->devs; 1725 1726 rcu_read_lock(); 1727 h->devs = target_rw_devs(c, BCH_DATA_user, h->disk_label 1728 ? group_to_target(h->disk_label - 1) 1729 : 0); 1730 unsigned nr_devs = dev_mask_nr(&h->devs); 1731 1732 for_each_member_device_rcu(c, ca, &h->devs) 1733 if (!ca->mi.durability) 1734 __clear_bit(ca->dev_idx, h->devs.d); 1735 unsigned nr_devs_with_durability = dev_mask_nr(&h->devs); 1736 1737 h->blocksize = pick_blocksize(c, &h->devs); 1738 1739 h->nr_active_devs = 0; 1740 for_each_member_device_rcu(c, ca, &h->devs) 1741 if (ca->mi.bucket_size == h->blocksize) 1742 h->nr_active_devs++; 1743 1744 rcu_read_unlock(); 1745 1746 /* 1747 * If we only have redundancy + 1 devices, we're better off with just 1748 * replication: 1749 */ 1750 h->insufficient_devs = h->nr_active_devs < h->redundancy + 2; 1751 1752 if (h->insufficient_devs) { 1753 const char *err; 1754 1755 if (nr_devs < h->redundancy + 2) 1756 err = NULL; 1757 else if (nr_devs_with_durability < h->redundancy + 2) 1758 err = "cannot use durability=0 devices"; 1759 else 1760 err = "mismatched bucket sizes"; 1761 1762 if (err) 1763 bch_err(c, "insufficient devices available to create stripe (have %u, need %u): %s", 1764 h->nr_active_devs, h->redundancy + 2, err); 1765 } 1766 1767 struct bch_devs_mask devs_leaving; 1768 bitmap_andnot(devs_leaving.d, devs.d, h->devs.d, BCH_SB_MEMBERS_MAX); 1769 1770 if (h->s && !h->s->allocated && dev_mask_nr(&devs_leaving)) 1771 ec_stripe_new_cancel(c, h, -EINTR); 1772 1773 h->rw_devs_change_count = c->rw_devs_change_count; 1774 } 1775 1776 static struct ec_stripe_head * 1777 ec_new_stripe_head_alloc(struct bch_fs *c, unsigned disk_label, 1778 unsigned algo, unsigned redundancy, 1779 enum bch_watermark watermark) 1780 { 1781 struct ec_stripe_head *h; 1782 1783 h = kzalloc(sizeof(*h), GFP_KERNEL); 1784 if (!h) 1785 return NULL; 1786 1787 mutex_init(&h->lock); 1788 BUG_ON(!mutex_trylock(&h->lock)); 1789 1790 h->disk_label = disk_label; 1791 h->algo = algo; 1792 h->redundancy = redundancy; 1793 h->watermark = watermark; 1794 1795 list_add(&h->list, &c->ec_stripe_head_list); 1796 return h; 1797 } 1798 1799 void bch2_ec_stripe_head_put(struct bch_fs *c, struct ec_stripe_head *h) 1800 { 1801 if (h->s && 1802 h->s->allocated && 1803 bitmap_weight(h->s->blocks_allocated, 1804 h->s->nr_data) == h->s->nr_data) 1805 ec_stripe_new_set_pending(c, h); 1806 1807 mutex_unlock(&h->lock); 1808 } 1809 1810 static struct ec_stripe_head * 1811 __bch2_ec_stripe_head_get(struct btree_trans *trans, 1812 unsigned disk_label, 1813 unsigned algo, 1814 unsigned redundancy, 1815 enum bch_watermark watermark) 1816 { 1817 struct bch_fs *c = trans->c; 1818 struct ec_stripe_head *h; 1819 int ret; 1820 1821 if (!redundancy) 1822 return NULL; 1823 1824 ret = bch2_trans_mutex_lock(trans, &c->ec_stripe_head_lock); 1825 if (ret) 1826 return ERR_PTR(ret); 1827 1828 if (test_bit(BCH_FS_going_ro, &c->flags)) { 1829 h = ERR_PTR(-BCH_ERR_erofs_no_writes); 1830 goto err; 1831 } 1832 1833 list_for_each_entry(h, &c->ec_stripe_head_list, list) 1834 if (h->disk_label == disk_label && 1835 h->algo == algo && 1836 h->redundancy == redundancy && 1837 h->watermark == watermark) { 1838 ret = bch2_trans_mutex_lock(trans, &h->lock); 1839 if (ret) { 1840 h = ERR_PTR(ret); 1841 goto err; 1842 } 1843 goto found; 1844 } 1845 1846 h = ec_new_stripe_head_alloc(c, disk_label, algo, redundancy, watermark); 1847 found: 1848 if (h->rw_devs_change_count != c->rw_devs_change_count) 1849 ec_stripe_head_devs_update(c, h); 1850 1851 if (h->insufficient_devs) { 1852 mutex_unlock(&h->lock); 1853 h = NULL; 1854 } 1855 err: 1856 mutex_unlock(&c->ec_stripe_head_lock); 1857 return h; 1858 } 1859 1860 static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_head *h, 1861 enum bch_watermark watermark, struct closure *cl) 1862 { 1863 struct bch_fs *c = trans->c; 1864 struct bch_devs_mask devs = h->devs; 1865 struct open_bucket *ob; 1866 struct open_buckets buckets; 1867 struct bch_stripe *v = &bkey_i_to_stripe(&h->s->new_stripe.key)->v; 1868 unsigned i, j, nr_have_parity = 0, nr_have_data = 0; 1869 bool have_cache = true; 1870 int ret = 0; 1871 1872 BUG_ON(v->nr_blocks != h->s->nr_data + h->s->nr_parity); 1873 BUG_ON(v->nr_redundant != h->s->nr_parity); 1874 1875 /* * We bypass the sector allocator which normally does this: */ 1876 bitmap_and(devs.d, devs.d, c->rw_devs[BCH_DATA_user].d, BCH_SB_MEMBERS_MAX); 1877 1878 for_each_set_bit(i, h->s->blocks_gotten, v->nr_blocks) { 1879 __clear_bit(v->ptrs[i].dev, devs.d); 1880 if (i < h->s->nr_data) 1881 nr_have_data++; 1882 else 1883 nr_have_parity++; 1884 } 1885 1886 BUG_ON(nr_have_data > h->s->nr_data); 1887 BUG_ON(nr_have_parity > h->s->nr_parity); 1888 1889 buckets.nr = 0; 1890 if (nr_have_parity < h->s->nr_parity) { 1891 ret = bch2_bucket_alloc_set_trans(trans, &buckets, 1892 &h->parity_stripe, 1893 &devs, 1894 h->s->nr_parity, 1895 &nr_have_parity, 1896 &have_cache, 0, 1897 BCH_DATA_parity, 1898 watermark, 1899 cl); 1900 1901 open_bucket_for_each(c, &buckets, ob, i) { 1902 j = find_next_zero_bit(h->s->blocks_gotten, 1903 h->s->nr_data + h->s->nr_parity, 1904 h->s->nr_data); 1905 BUG_ON(j >= h->s->nr_data + h->s->nr_parity); 1906 1907 h->s->blocks[j] = buckets.v[i]; 1908 v->ptrs[j] = bch2_ob_ptr(c, ob); 1909 __set_bit(j, h->s->blocks_gotten); 1910 } 1911 1912 if (ret) 1913 return ret; 1914 } 1915 1916 buckets.nr = 0; 1917 if (nr_have_data < h->s->nr_data) { 1918 ret = bch2_bucket_alloc_set_trans(trans, &buckets, 1919 &h->block_stripe, 1920 &devs, 1921 h->s->nr_data, 1922 &nr_have_data, 1923 &have_cache, 0, 1924 BCH_DATA_user, 1925 watermark, 1926 cl); 1927 1928 open_bucket_for_each(c, &buckets, ob, i) { 1929 j = find_next_zero_bit(h->s->blocks_gotten, 1930 h->s->nr_data, 0); 1931 BUG_ON(j >= h->s->nr_data); 1932 1933 h->s->blocks[j] = buckets.v[i]; 1934 v->ptrs[j] = bch2_ob_ptr(c, ob); 1935 __set_bit(j, h->s->blocks_gotten); 1936 } 1937 1938 if (ret) 1939 return ret; 1940 } 1941 1942 return 0; 1943 } 1944 1945 static s64 get_existing_stripe(struct bch_fs *c, 1946 struct ec_stripe_head *head) 1947 { 1948 ec_stripes_heap *h = &c->ec_stripes_heap; 1949 struct stripe *m; 1950 size_t heap_idx; 1951 u64 stripe_idx; 1952 s64 ret = -1; 1953 1954 if (may_create_new_stripe(c)) 1955 return -1; 1956 1957 mutex_lock(&c->ec_stripes_heap_lock); 1958 for (heap_idx = 0; heap_idx < h->nr; heap_idx++) { 1959 /* No blocks worth reusing, stripe will just be deleted: */ 1960 if (!h->data[heap_idx].blocks_nonempty) 1961 continue; 1962 1963 stripe_idx = h->data[heap_idx].idx; 1964 1965 m = genradix_ptr(&c->stripes, stripe_idx); 1966 1967 if (m->disk_label == head->disk_label && 1968 m->algorithm == head->algo && 1969 m->nr_redundant == head->redundancy && 1970 m->sectors == head->blocksize && 1971 m->blocks_nonempty < m->nr_blocks - m->nr_redundant && 1972 bch2_try_open_stripe(c, head->s, stripe_idx)) { 1973 ret = stripe_idx; 1974 break; 1975 } 1976 } 1977 mutex_unlock(&c->ec_stripes_heap_lock); 1978 return ret; 1979 } 1980 1981 static int __bch2_ec_stripe_head_reuse(struct btree_trans *trans, struct ec_stripe_head *h) 1982 { 1983 struct bch_fs *c = trans->c; 1984 struct bch_stripe *new_v = &bkey_i_to_stripe(&h->s->new_stripe.key)->v; 1985 struct bch_stripe *existing_v; 1986 unsigned i; 1987 s64 idx; 1988 int ret; 1989 1990 /* 1991 * If we can't allocate a new stripe, and there's no stripes with empty 1992 * blocks for us to reuse, that means we have to wait on copygc: 1993 */ 1994 idx = get_existing_stripe(c, h); 1995 if (idx < 0) 1996 return -BCH_ERR_stripe_alloc_blocked; 1997 1998 ret = get_stripe_key_trans(trans, idx, &h->s->existing_stripe); 1999 bch2_fs_fatal_err_on(ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart), c, 2000 "reading stripe key: %s", bch2_err_str(ret)); 2001 if (ret) { 2002 bch2_stripe_close(c, h->s); 2003 return ret; 2004 } 2005 2006 existing_v = &bkey_i_to_stripe(&h->s->existing_stripe.key)->v; 2007 2008 BUG_ON(existing_v->nr_redundant != h->s->nr_parity); 2009 h->s->nr_data = existing_v->nr_blocks - 2010 existing_v->nr_redundant; 2011 2012 ret = ec_stripe_buf_init(&h->s->existing_stripe, 0, h->blocksize); 2013 if (ret) { 2014 bch2_stripe_close(c, h->s); 2015 return ret; 2016 } 2017 2018 BUG_ON(h->s->existing_stripe.size != h->blocksize); 2019 BUG_ON(h->s->existing_stripe.size != le16_to_cpu(existing_v->sectors)); 2020 2021 /* 2022 * Free buckets we initially allocated - they might conflict with 2023 * blocks from the stripe we're reusing: 2024 */ 2025 for_each_set_bit(i, h->s->blocks_gotten, new_v->nr_blocks) { 2026 bch2_open_bucket_put(c, c->open_buckets + h->s->blocks[i]); 2027 h->s->blocks[i] = 0; 2028 } 2029 memset(h->s->blocks_gotten, 0, sizeof(h->s->blocks_gotten)); 2030 memset(h->s->blocks_allocated, 0, sizeof(h->s->blocks_allocated)); 2031 2032 for (i = 0; i < existing_v->nr_blocks; i++) { 2033 if (stripe_blockcount_get(existing_v, i)) { 2034 __set_bit(i, h->s->blocks_gotten); 2035 __set_bit(i, h->s->blocks_allocated); 2036 } 2037 2038 ec_block_io(c, &h->s->existing_stripe, READ, i, &h->s->iodone); 2039 } 2040 2041 bkey_copy(&h->s->new_stripe.key, &h->s->existing_stripe.key); 2042 h->s->have_existing_stripe = true; 2043 2044 return 0; 2045 } 2046 2047 static int __bch2_ec_stripe_head_reserve(struct btree_trans *trans, struct ec_stripe_head *h) 2048 { 2049 struct bch_fs *c = trans->c; 2050 struct btree_iter iter; 2051 struct bkey_s_c k; 2052 struct bpos min_pos = POS(0, 1); 2053 struct bpos start_pos = bpos_max(min_pos, POS(0, c->ec_stripe_hint)); 2054 int ret; 2055 2056 if (!h->s->res.sectors) { 2057 ret = bch2_disk_reservation_get(c, &h->s->res, 2058 h->blocksize, 2059 h->s->nr_parity, 2060 BCH_DISK_RESERVATION_NOFAIL); 2061 if (ret) 2062 return ret; 2063 } 2064 2065 for_each_btree_key_norestart(trans, iter, BTREE_ID_stripes, start_pos, 2066 BTREE_ITER_slots|BTREE_ITER_intent, k, ret) { 2067 if (bkey_gt(k.k->p, POS(0, U32_MAX))) { 2068 if (start_pos.offset) { 2069 start_pos = min_pos; 2070 bch2_btree_iter_set_pos(&iter, start_pos); 2071 continue; 2072 } 2073 2074 ret = -BCH_ERR_ENOSPC_stripe_create; 2075 break; 2076 } 2077 2078 if (bkey_deleted(k.k) && 2079 bch2_try_open_stripe(c, h->s, k.k->p.offset)) 2080 break; 2081 } 2082 2083 c->ec_stripe_hint = iter.pos.offset; 2084 2085 if (ret) 2086 goto err; 2087 2088 ret = ec_stripe_mem_alloc(trans, &iter); 2089 if (ret) { 2090 bch2_stripe_close(c, h->s); 2091 goto err; 2092 } 2093 2094 h->s->new_stripe.key.k.p = iter.pos; 2095 out: 2096 bch2_trans_iter_exit(trans, &iter); 2097 return ret; 2098 err: 2099 bch2_disk_reservation_put(c, &h->s->res); 2100 goto out; 2101 } 2102 2103 struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans, 2104 unsigned target, 2105 unsigned algo, 2106 unsigned redundancy, 2107 enum bch_watermark watermark, 2108 struct closure *cl) 2109 { 2110 struct bch_fs *c = trans->c; 2111 struct ec_stripe_head *h; 2112 bool waiting = false; 2113 unsigned disk_label = 0; 2114 struct target t = target_decode(target); 2115 int ret; 2116 2117 if (t.type == TARGET_GROUP) { 2118 if (t.group > U8_MAX) { 2119 bch_err(c, "cannot create a stripe when disk_label > U8_MAX"); 2120 return NULL; 2121 } 2122 disk_label = t.group + 1; /* 0 == no label */ 2123 } 2124 2125 h = __bch2_ec_stripe_head_get(trans, disk_label, algo, redundancy, watermark); 2126 if (IS_ERR_OR_NULL(h)) 2127 return h; 2128 2129 if (!h->s) { 2130 ret = ec_new_stripe_alloc(c, h); 2131 if (ret) { 2132 bch_err(c, "failed to allocate new stripe"); 2133 goto err; 2134 } 2135 } 2136 2137 if (h->s->allocated) 2138 goto allocated; 2139 2140 if (h->s->have_existing_stripe) 2141 goto alloc_existing; 2142 2143 /* First, try to allocate a full stripe: */ 2144 ret = new_stripe_alloc_buckets(trans, h, BCH_WATERMARK_stripe, NULL) ?: 2145 __bch2_ec_stripe_head_reserve(trans, h); 2146 if (!ret) 2147 goto allocate_buf; 2148 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || 2149 bch2_err_matches(ret, ENOMEM)) 2150 goto err; 2151 2152 /* 2153 * Not enough buckets available for a full stripe: we must reuse an 2154 * existing stripe: 2155 */ 2156 while (1) { 2157 ret = __bch2_ec_stripe_head_reuse(trans, h); 2158 if (!ret) 2159 break; 2160 if (waiting || !cl || ret != -BCH_ERR_stripe_alloc_blocked) 2161 goto err; 2162 2163 if (watermark == BCH_WATERMARK_copygc) { 2164 ret = new_stripe_alloc_buckets(trans, h, watermark, NULL) ?: 2165 __bch2_ec_stripe_head_reserve(trans, h); 2166 if (ret) 2167 goto err; 2168 goto allocate_buf; 2169 } 2170 2171 /* XXX freelist_wait? */ 2172 closure_wait(&c->freelist_wait, cl); 2173 waiting = true; 2174 } 2175 2176 if (waiting) 2177 closure_wake_up(&c->freelist_wait); 2178 alloc_existing: 2179 /* 2180 * Retry allocating buckets, with the watermark for this 2181 * particular write: 2182 */ 2183 ret = new_stripe_alloc_buckets(trans, h, watermark, cl); 2184 if (ret) 2185 goto err; 2186 2187 allocate_buf: 2188 ret = ec_stripe_buf_init(&h->s->new_stripe, 0, h->blocksize); 2189 if (ret) 2190 goto err; 2191 2192 h->s->allocated = true; 2193 allocated: 2194 BUG_ON(!h->s->idx); 2195 BUG_ON(!h->s->new_stripe.data[0]); 2196 BUG_ON(trans->restarted); 2197 return h; 2198 err: 2199 bch2_ec_stripe_head_put(c, h); 2200 return ERR_PTR(ret); 2201 } 2202 2203 /* device removal */ 2204 2205 static int bch2_invalidate_stripe_to_dev(struct btree_trans *trans, struct bkey_s_c k_a) 2206 { 2207 struct bch_alloc_v4 a_convert; 2208 const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k_a, &a_convert); 2209 2210 if (!a->stripe) 2211 return 0; 2212 2213 if (a->stripe_sectors) { 2214 bch_err(trans->c, "trying to invalidate device in stripe when bucket has stripe data"); 2215 return -BCH_ERR_invalidate_stripe_to_dev; 2216 } 2217 2218 struct btree_iter iter; 2219 struct bkey_i_stripe *s = 2220 bch2_bkey_get_mut_typed(trans, &iter, BTREE_ID_stripes, POS(0, a->stripe), 2221 BTREE_ITER_slots, stripe); 2222 int ret = PTR_ERR_OR_ZERO(s); 2223 if (ret) 2224 return ret; 2225 2226 struct disk_accounting_pos acc = { 2227 .type = BCH_DISK_ACCOUNTING_replicas, 2228 }; 2229 2230 s64 sectors = 0; 2231 for (unsigned i = 0; i < s->v.nr_blocks; i++) 2232 sectors -= stripe_blockcount_get(&s->v, i); 2233 2234 bch2_bkey_to_replicas(&acc.replicas, bkey_i_to_s_c(&s->k_i)); 2235 acc.replicas.data_type = BCH_DATA_user; 2236 ret = bch2_disk_accounting_mod(trans, &acc, §ors, 1, false); 2237 if (ret) 2238 goto err; 2239 2240 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(&s->k_i)); 2241 bkey_for_each_ptr(ptrs, ptr) 2242 if (ptr->dev == k_a.k->p.inode) 2243 ptr->dev = BCH_SB_MEMBER_INVALID; 2244 2245 sectors = -sectors; 2246 2247 bch2_bkey_to_replicas(&acc.replicas, bkey_i_to_s_c(&s->k_i)); 2248 acc.replicas.data_type = BCH_DATA_user; 2249 ret = bch2_disk_accounting_mod(trans, &acc, §ors, 1, false); 2250 if (ret) 2251 goto err; 2252 err: 2253 bch2_trans_iter_exit(trans, &iter); 2254 return ret; 2255 } 2256 2257 int bch2_dev_remove_stripes(struct bch_fs *c, unsigned dev_idx) 2258 { 2259 return bch2_trans_run(c, 2260 for_each_btree_key_upto_commit(trans, iter, 2261 BTREE_ID_alloc, POS(dev_idx, 0), POS(dev_idx, U64_MAX), 2262 BTREE_ITER_intent, k, 2263 NULL, NULL, 0, ({ 2264 bch2_invalidate_stripe_to_dev(trans, k); 2265 }))); 2266 } 2267 2268 /* startup/shutdown */ 2269 2270 static void __bch2_ec_stop(struct bch_fs *c, struct bch_dev *ca) 2271 { 2272 struct ec_stripe_head *h; 2273 struct open_bucket *ob; 2274 unsigned i; 2275 2276 mutex_lock(&c->ec_stripe_head_lock); 2277 list_for_each_entry(h, &c->ec_stripe_head_list, list) { 2278 mutex_lock(&h->lock); 2279 if (!h->s) 2280 goto unlock; 2281 2282 if (!ca) 2283 goto found; 2284 2285 for (i = 0; i < bkey_i_to_stripe(&h->s->new_stripe.key)->v.nr_blocks; i++) { 2286 if (!h->s->blocks[i]) 2287 continue; 2288 2289 ob = c->open_buckets + h->s->blocks[i]; 2290 if (ob->dev == ca->dev_idx) 2291 goto found; 2292 } 2293 goto unlock; 2294 found: 2295 ec_stripe_new_cancel(c, h, -BCH_ERR_erofs_no_writes); 2296 unlock: 2297 mutex_unlock(&h->lock); 2298 } 2299 mutex_unlock(&c->ec_stripe_head_lock); 2300 } 2301 2302 void bch2_ec_stop_dev(struct bch_fs *c, struct bch_dev *ca) 2303 { 2304 __bch2_ec_stop(c, ca); 2305 } 2306 2307 void bch2_fs_ec_stop(struct bch_fs *c) 2308 { 2309 __bch2_ec_stop(c, NULL); 2310 } 2311 2312 static bool bch2_fs_ec_flush_done(struct bch_fs *c) 2313 { 2314 bool ret; 2315 2316 mutex_lock(&c->ec_stripe_new_lock); 2317 ret = list_empty(&c->ec_stripe_new_list); 2318 mutex_unlock(&c->ec_stripe_new_lock); 2319 2320 return ret; 2321 } 2322 2323 void bch2_fs_ec_flush(struct bch_fs *c) 2324 { 2325 wait_event(c->ec_stripe_new_wait, bch2_fs_ec_flush_done(c)); 2326 } 2327 2328 int bch2_stripes_read(struct bch_fs *c) 2329 { 2330 int ret = bch2_trans_run(c, 2331 for_each_btree_key(trans, iter, BTREE_ID_stripes, POS_MIN, 2332 BTREE_ITER_prefetch, k, ({ 2333 if (k.k->type != KEY_TYPE_stripe) 2334 continue; 2335 2336 ret = __ec_stripe_mem_alloc(c, k.k->p.offset, GFP_KERNEL); 2337 if (ret) 2338 break; 2339 2340 struct stripe *m = genradix_ptr(&c->stripes, k.k->p.offset); 2341 2342 stripe_to_mem(m, bkey_s_c_to_stripe(k).v); 2343 2344 bch2_stripes_heap_insert(c, m, k.k->p.offset); 2345 0; 2346 }))); 2347 bch_err_fn(c, ret); 2348 return ret; 2349 } 2350 2351 void bch2_stripes_heap_to_text(struct printbuf *out, struct bch_fs *c) 2352 { 2353 ec_stripes_heap *h = &c->ec_stripes_heap; 2354 struct stripe *m; 2355 size_t i; 2356 2357 mutex_lock(&c->ec_stripes_heap_lock); 2358 for (i = 0; i < min_t(size_t, h->nr, 50); i++) { 2359 m = genradix_ptr(&c->stripes, h->data[i].idx); 2360 2361 prt_printf(out, "%zu %u/%u+%u", h->data[i].idx, 2362 h->data[i].blocks_nonempty, 2363 m->nr_blocks - m->nr_redundant, 2364 m->nr_redundant); 2365 if (bch2_stripe_is_open(c, h->data[i].idx)) 2366 prt_str(out, " open"); 2367 prt_newline(out); 2368 } 2369 mutex_unlock(&c->ec_stripes_heap_lock); 2370 } 2371 2372 static void bch2_new_stripe_to_text(struct printbuf *out, struct bch_fs *c, 2373 struct ec_stripe_new *s) 2374 { 2375 prt_printf(out, "\tidx %llu blocks %u+%u allocated %u ref %u %u %s obs", 2376 s->idx, s->nr_data, s->nr_parity, 2377 bitmap_weight(s->blocks_allocated, s->nr_data), 2378 atomic_read(&s->ref[STRIPE_REF_io]), 2379 atomic_read(&s->ref[STRIPE_REF_stripe]), 2380 bch2_watermarks[s->h->watermark]); 2381 2382 struct bch_stripe *v = &bkey_i_to_stripe(&s->new_stripe.key)->v; 2383 unsigned i; 2384 for_each_set_bit(i, s->blocks_gotten, v->nr_blocks) 2385 prt_printf(out, " %u", s->blocks[i]); 2386 prt_newline(out); 2387 bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&s->new_stripe.key)); 2388 prt_newline(out); 2389 } 2390 2391 void bch2_new_stripes_to_text(struct printbuf *out, struct bch_fs *c) 2392 { 2393 struct ec_stripe_head *h; 2394 struct ec_stripe_new *s; 2395 2396 mutex_lock(&c->ec_stripe_head_lock); 2397 list_for_each_entry(h, &c->ec_stripe_head_list, list) { 2398 prt_printf(out, "disk label %u algo %u redundancy %u %s nr created %llu:\n", 2399 h->disk_label, h->algo, h->redundancy, 2400 bch2_watermarks[h->watermark], 2401 h->nr_created); 2402 2403 if (h->s) 2404 bch2_new_stripe_to_text(out, c, h->s); 2405 } 2406 mutex_unlock(&c->ec_stripe_head_lock); 2407 2408 prt_printf(out, "in flight:\n"); 2409 2410 mutex_lock(&c->ec_stripe_new_lock); 2411 list_for_each_entry(s, &c->ec_stripe_new_list, list) 2412 bch2_new_stripe_to_text(out, c, s); 2413 mutex_unlock(&c->ec_stripe_new_lock); 2414 } 2415 2416 void bch2_fs_ec_exit(struct bch_fs *c) 2417 { 2418 struct ec_stripe_head *h; 2419 unsigned i; 2420 2421 while (1) { 2422 mutex_lock(&c->ec_stripe_head_lock); 2423 h = list_first_entry_or_null(&c->ec_stripe_head_list, 2424 struct ec_stripe_head, list); 2425 if (h) 2426 list_del(&h->list); 2427 mutex_unlock(&c->ec_stripe_head_lock); 2428 if (!h) 2429 break; 2430 2431 if (h->s) { 2432 for (i = 0; i < bkey_i_to_stripe(&h->s->new_stripe.key)->v.nr_blocks; i++) 2433 BUG_ON(h->s->blocks[i]); 2434 2435 kfree(h->s); 2436 } 2437 kfree(h); 2438 } 2439 2440 BUG_ON(!list_empty(&c->ec_stripe_new_list)); 2441 2442 free_heap(&c->ec_stripes_heap); 2443 genradix_free(&c->stripes); 2444 bioset_exit(&c->ec_bioset); 2445 } 2446 2447 void bch2_fs_ec_init_early(struct bch_fs *c) 2448 { 2449 spin_lock_init(&c->ec_stripes_new_lock); 2450 mutex_init(&c->ec_stripes_heap_lock); 2451 2452 INIT_LIST_HEAD(&c->ec_stripe_head_list); 2453 mutex_init(&c->ec_stripe_head_lock); 2454 2455 INIT_LIST_HEAD(&c->ec_stripe_new_list); 2456 mutex_init(&c->ec_stripe_new_lock); 2457 init_waitqueue_head(&c->ec_stripe_new_wait); 2458 2459 INIT_WORK(&c->ec_stripe_create_work, ec_stripe_create_work); 2460 INIT_WORK(&c->ec_stripe_delete_work, ec_stripe_delete_work); 2461 } 2462 2463 int bch2_fs_ec_init(struct bch_fs *c) 2464 { 2465 return bioset_init(&c->ec_bioset, 1, offsetof(struct ec_bio, bio), 2466 BIOSET_NEED_BVECS); 2467 } 2468