1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Code for manipulating bucket marks for garbage collection. 4 * 5 * Copyright 2014 Datera, Inc. 6 */ 7 8 #include "bcachefs.h" 9 #include "alloc_background.h" 10 #include "backpointers.h" 11 #include "bset.h" 12 #include "btree_gc.h" 13 #include "btree_update.h" 14 #include "buckets.h" 15 #include "buckets_waiting_for_journal.h" 16 #include "disk_accounting.h" 17 #include "ec.h" 18 #include "error.h" 19 #include "inode.h" 20 #include "movinggc.h" 21 #include "rebalance.h" 22 #include "recovery.h" 23 #include "recovery_passes.h" 24 #include "reflink.h" 25 #include "replicas.h" 26 #include "subvolume.h" 27 #include "trace.h" 28 29 #include <linux/preempt.h> 30 31 void bch2_dev_usage_read_fast(struct bch_dev *ca, struct bch_dev_usage *usage) 32 { 33 for (unsigned i = 0; i < BCH_DATA_NR; i++) 34 usage->buckets[i] = percpu_u64_get(&ca->usage->d[i].buckets); 35 } 36 37 void bch2_dev_usage_full_read_fast(struct bch_dev *ca, struct bch_dev_usage_full *usage) 38 { 39 memset(usage, 0, sizeof(*usage)); 40 acc_u64s_percpu((u64 *) usage, (u64 __percpu *) ca->usage, 41 sizeof(struct bch_dev_usage_full) / sizeof(u64)); 42 } 43 44 static u64 reserve_factor(u64 r) 45 { 46 return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR); 47 } 48 49 static struct bch_fs_usage_short 50 __bch2_fs_usage_read_short(struct bch_fs *c) 51 { 52 struct bch_fs_usage_short ret; 53 u64 data, reserved; 54 55 ret.capacity = c->capacity - 56 percpu_u64_get(&c->usage->hidden); 57 58 data = percpu_u64_get(&c->usage->data) + 59 percpu_u64_get(&c->usage->btree); 60 reserved = percpu_u64_get(&c->usage->reserved) + 61 percpu_u64_get(c->online_reserved); 62 63 ret.used = min(ret.capacity, data + reserve_factor(reserved)); 64 ret.free = ret.capacity - ret.used; 65 66 ret.nr_inodes = percpu_u64_get(&c->usage->nr_inodes); 67 68 return ret; 69 } 70 71 struct bch_fs_usage_short 72 bch2_fs_usage_read_short(struct bch_fs *c) 73 { 74 struct bch_fs_usage_short ret; 75 76 percpu_down_read(&c->mark_lock); 77 ret = __bch2_fs_usage_read_short(c); 78 percpu_up_read(&c->mark_lock); 79 80 return ret; 81 } 82 83 void bch2_dev_usage_to_text(struct printbuf *out, 84 struct bch_dev *ca, 85 struct bch_dev_usage_full *usage) 86 { 87 if (out->nr_tabstops < 5) { 88 printbuf_tabstops_reset(out); 89 printbuf_tabstop_push(out, 12); 90 printbuf_tabstop_push(out, 16); 91 printbuf_tabstop_push(out, 16); 92 printbuf_tabstop_push(out, 16); 93 printbuf_tabstop_push(out, 16); 94 } 95 96 prt_printf(out, "\tbuckets\rsectors\rfragmented\r\n"); 97 98 for (unsigned i = 0; i < BCH_DATA_NR; i++) { 99 bch2_prt_data_type(out, i); 100 prt_printf(out, "\t%llu\r%llu\r%llu\r\n", 101 usage->d[i].buckets, 102 usage->d[i].sectors, 103 usage->d[i].fragmented); 104 } 105 106 prt_printf(out, "capacity\t%llu\r\n", ca->mi.nbuckets); 107 } 108 109 static int bch2_check_fix_ptr(struct btree_trans *trans, 110 struct bkey_s_c k, 111 struct extent_ptr_decoded p, 112 const union bch_extent_entry *entry, 113 bool *do_update) 114 { 115 struct bch_fs *c = trans->c; 116 struct printbuf buf = PRINTBUF; 117 int ret = 0; 118 119 struct bch_dev *ca = bch2_dev_tryget(c, p.ptr.dev); 120 if (!ca) { 121 if (fsck_err_on(p.ptr.dev != BCH_SB_MEMBER_INVALID, 122 trans, ptr_to_invalid_device, 123 "pointer to missing device %u\n" 124 "while marking %s", 125 p.ptr.dev, 126 (printbuf_reset(&buf), 127 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) 128 *do_update = true; 129 return 0; 130 } 131 132 struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr); 133 if (!g) { 134 if (fsck_err(trans, ptr_to_invalid_device, 135 "pointer to invalid bucket on device %u\n" 136 "while marking %s", 137 p.ptr.dev, 138 (printbuf_reset(&buf), 139 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) 140 *do_update = true; 141 goto out; 142 } 143 144 enum bch_data_type data_type = bch2_bkey_ptr_data_type(k, p, entry); 145 146 if (fsck_err_on(!g->gen_valid, 147 trans, ptr_to_missing_alloc_key, 148 "bucket %u:%zu data type %s ptr gen %u missing in alloc btree\n" 149 "while marking %s", 150 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), 151 bch2_data_type_str(ptr_data_type(k.k, &p.ptr)), 152 p.ptr.gen, 153 (printbuf_reset(&buf), 154 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { 155 if (!p.ptr.cached) { 156 g->gen_valid = true; 157 g->gen = p.ptr.gen; 158 } else { 159 *do_update = true; 160 } 161 } 162 163 if (fsck_err_on(gen_cmp(p.ptr.gen, g->gen) > 0, 164 trans, ptr_gen_newer_than_bucket_gen, 165 "bucket %u:%zu data type %s ptr gen in the future: %u > %u\n" 166 "while marking %s", 167 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), 168 bch2_data_type_str(ptr_data_type(k.k, &p.ptr)), 169 p.ptr.gen, g->gen, 170 (printbuf_reset(&buf), 171 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { 172 if (!p.ptr.cached && 173 (g->data_type != BCH_DATA_btree || 174 data_type == BCH_DATA_btree)) { 175 g->gen_valid = true; 176 g->gen = p.ptr.gen; 177 g->data_type = 0; 178 g->stripe_sectors = 0; 179 g->dirty_sectors = 0; 180 g->cached_sectors = 0; 181 } else { 182 *do_update = true; 183 } 184 } 185 186 if (fsck_err_on(gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX, 187 trans, ptr_gen_newer_than_bucket_gen, 188 "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n" 189 "while marking %s", 190 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen, 191 bch2_data_type_str(ptr_data_type(k.k, &p.ptr)), 192 p.ptr.gen, 193 (printbuf_reset(&buf), 194 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) 195 *do_update = true; 196 197 if (fsck_err_on(!p.ptr.cached && gen_cmp(p.ptr.gen, g->gen) < 0, 198 trans, stale_dirty_ptr, 199 "bucket %u:%zu data type %s stale dirty ptr: %u < %u\n" 200 "while marking %s", 201 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), 202 bch2_data_type_str(ptr_data_type(k.k, &p.ptr)), 203 p.ptr.gen, g->gen, 204 (printbuf_reset(&buf), 205 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) 206 *do_update = true; 207 208 if (data_type != BCH_DATA_btree && p.ptr.gen != g->gen) 209 goto out; 210 211 if (fsck_err_on(bucket_data_type_mismatch(g->data_type, data_type), 212 trans, ptr_bucket_data_type_mismatch, 213 "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n" 214 "while marking %s", 215 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen, 216 bch2_data_type_str(g->data_type), 217 bch2_data_type_str(data_type), 218 (printbuf_reset(&buf), 219 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { 220 if (data_type == BCH_DATA_btree) { 221 g->gen_valid = true; 222 g->gen = p.ptr.gen; 223 g->data_type = data_type; 224 g->stripe_sectors = 0; 225 g->dirty_sectors = 0; 226 g->cached_sectors = 0; 227 } else { 228 *do_update = true; 229 } 230 } 231 232 if (p.has_ec) { 233 struct gc_stripe *m = genradix_ptr(&c->gc_stripes, p.ec.idx); 234 235 if (fsck_err_on(!m || !m->alive, 236 trans, ptr_to_missing_stripe, 237 "pointer to nonexistent stripe %llu\n" 238 "while marking %s", 239 (u64) p.ec.idx, 240 (printbuf_reset(&buf), 241 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) 242 *do_update = true; 243 244 if (fsck_err_on(m && m->alive && !bch2_ptr_matches_stripe_m(m, p), 245 trans, ptr_to_incorrect_stripe, 246 "pointer does not match stripe %llu\n" 247 "while marking %s", 248 (u64) p.ec.idx, 249 (printbuf_reset(&buf), 250 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) 251 *do_update = true; 252 } 253 out: 254 fsck_err: 255 bch2_dev_put(ca); 256 printbuf_exit(&buf); 257 return ret; 258 } 259 260 int bch2_check_fix_ptrs(struct btree_trans *trans, 261 enum btree_id btree, unsigned level, struct bkey_s_c k, 262 enum btree_iter_update_trigger_flags flags) 263 { 264 struct bch_fs *c = trans->c; 265 struct bkey_ptrs_c ptrs_c = bch2_bkey_ptrs_c(k); 266 const union bch_extent_entry *entry_c; 267 struct extent_ptr_decoded p = { 0 }; 268 bool do_update = false; 269 struct printbuf buf = PRINTBUF; 270 int ret = 0; 271 272 bkey_for_each_ptr_decode(k.k, ptrs_c, p, entry_c) { 273 ret = bch2_check_fix_ptr(trans, k, p, entry_c, &do_update); 274 if (ret) 275 goto err; 276 } 277 278 if (do_update) { 279 if (flags & BTREE_TRIGGER_is_root) { 280 bch_err(c, "cannot update btree roots yet"); 281 ret = -EINVAL; 282 goto err; 283 } 284 285 struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k); 286 ret = PTR_ERR_OR_ZERO(new); 287 if (ret) 288 goto err; 289 290 rcu_read_lock(); 291 bch2_bkey_drop_ptrs(bkey_i_to_s(new), ptr, !bch2_dev_exists(c, ptr->dev)); 292 rcu_read_unlock(); 293 294 if (level) { 295 /* 296 * We don't want to drop btree node pointers - if the 297 * btree node isn't there anymore, the read path will 298 * sort it out: 299 */ 300 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(new)); 301 rcu_read_lock(); 302 bkey_for_each_ptr(ptrs, ptr) { 303 struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev); 304 struct bucket *g = PTR_GC_BUCKET(ca, ptr); 305 306 ptr->gen = g->gen; 307 } 308 rcu_read_unlock(); 309 } else { 310 struct bkey_ptrs ptrs; 311 union bch_extent_entry *entry; 312 313 rcu_read_lock(); 314 restart_drop_ptrs: 315 ptrs = bch2_bkey_ptrs(bkey_i_to_s(new)); 316 bkey_for_each_ptr_decode(bkey_i_to_s(new).k, ptrs, p, entry) { 317 struct bch_dev *ca = bch2_dev_rcu(c, p.ptr.dev); 318 struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr); 319 enum bch_data_type data_type = bch2_bkey_ptr_data_type(bkey_i_to_s_c(new), p, entry); 320 321 if ((p.ptr.cached && 322 (!g->gen_valid || gen_cmp(p.ptr.gen, g->gen) > 0)) || 323 (!p.ptr.cached && 324 gen_cmp(p.ptr.gen, g->gen) < 0) || 325 gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX || 326 (g->data_type && 327 g->data_type != data_type)) { 328 bch2_bkey_drop_ptr(bkey_i_to_s(new), &entry->ptr); 329 goto restart_drop_ptrs; 330 } 331 } 332 rcu_read_unlock(); 333 again: 334 ptrs = bch2_bkey_ptrs(bkey_i_to_s(new)); 335 bkey_extent_entry_for_each(ptrs, entry) { 336 if (extent_entry_type(entry) == BCH_EXTENT_ENTRY_stripe_ptr) { 337 struct gc_stripe *m = genradix_ptr(&c->gc_stripes, 338 entry->stripe_ptr.idx); 339 union bch_extent_entry *next_ptr; 340 341 bkey_extent_entry_for_each_from(ptrs, next_ptr, entry) 342 if (extent_entry_type(next_ptr) == BCH_EXTENT_ENTRY_ptr) 343 goto found; 344 next_ptr = NULL; 345 found: 346 if (!next_ptr) { 347 bch_err(c, "aieee, found stripe ptr with no data ptr"); 348 continue; 349 } 350 351 if (!m || !m->alive || 352 !__bch2_ptr_matches_stripe(&m->ptrs[entry->stripe_ptr.block], 353 &next_ptr->ptr, 354 m->sectors)) { 355 bch2_bkey_extent_entry_drop(new, entry); 356 goto again; 357 } 358 } 359 } 360 } 361 362 if (0) { 363 printbuf_reset(&buf); 364 bch2_bkey_val_to_text(&buf, c, k); 365 bch_info(c, "updated %s", buf.buf); 366 367 printbuf_reset(&buf); 368 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(new)); 369 bch_info(c, "new key %s", buf.buf); 370 } 371 372 struct btree_iter iter; 373 bch2_trans_node_iter_init(trans, &iter, btree, new->k.p, 0, level, 374 BTREE_ITER_intent|BTREE_ITER_all_snapshots); 375 ret = bch2_btree_iter_traverse(trans, &iter) ?: 376 bch2_trans_update(trans, &iter, new, 377 BTREE_UPDATE_internal_snapshot_node| 378 BTREE_TRIGGER_norun); 379 bch2_trans_iter_exit(trans, &iter); 380 if (ret) 381 goto err; 382 383 if (level) 384 bch2_btree_node_update_key_early(trans, btree, level - 1, k, new); 385 } 386 err: 387 printbuf_exit(&buf); 388 return ret; 389 } 390 391 static int bucket_ref_update_err(struct btree_trans *trans, struct printbuf *buf, 392 struct bkey_s_c k, bool insert, enum bch_sb_error_id id) 393 { 394 struct bch_fs *c = trans->c; 395 bool repeat = false, print = true, suppress = false; 396 397 prt_printf(buf, "\nwhile marking "); 398 bch2_bkey_val_to_text(buf, c, k); 399 prt_newline(buf); 400 401 __bch2_count_fsck_err(c, id, buf->buf, &repeat, &print, &suppress); 402 403 int ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_allocations); 404 405 if (insert) { 406 print = true; 407 suppress = false; 408 409 bch2_trans_updates_to_text(buf, trans); 410 __bch2_inconsistent_error(c, buf); 411 ret = -BCH_ERR_bucket_ref_update; 412 } 413 414 if (suppress) 415 prt_printf(buf, "Ratelimiting new instances of previous error\n"); 416 if (print) 417 bch2_print_string_as_lines(KERN_ERR, buf->buf); 418 return ret; 419 } 420 421 int bch2_bucket_ref_update(struct btree_trans *trans, struct bch_dev *ca, 422 struct bkey_s_c k, 423 const struct bch_extent_ptr *ptr, 424 s64 sectors, enum bch_data_type ptr_data_type, 425 u8 b_gen, u8 bucket_data_type, 426 u32 *bucket_sectors) 427 { 428 struct bch_fs *c = trans->c; 429 size_t bucket_nr = PTR_BUCKET_NR(ca, ptr); 430 struct printbuf buf = PRINTBUF; 431 bool inserting = sectors > 0; 432 int ret = 0; 433 434 BUG_ON(!sectors); 435 436 if (unlikely(gen_after(ptr->gen, b_gen))) { 437 bch2_log_msg_start(c, &buf); 438 prt_printf(&buf, 439 "bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen", 440 ptr->dev, bucket_nr, b_gen, 441 bch2_data_type_str(bucket_data_type ?: ptr_data_type), 442 ptr->gen); 443 444 ret = bucket_ref_update_err(trans, &buf, k, inserting, 445 BCH_FSCK_ERR_ptr_gen_newer_than_bucket_gen); 446 goto out; 447 } 448 449 if (unlikely(gen_cmp(b_gen, ptr->gen) > BUCKET_GC_GEN_MAX)) { 450 bch2_log_msg_start(c, &buf); 451 prt_printf(&buf, 452 "bucket %u:%zu gen %u data type %s: ptr gen %u too stale", 453 ptr->dev, bucket_nr, b_gen, 454 bch2_data_type_str(bucket_data_type ?: ptr_data_type), 455 ptr->gen); 456 457 ret = bucket_ref_update_err(trans, &buf, k, inserting, 458 BCH_FSCK_ERR_ptr_too_stale); 459 goto out; 460 } 461 462 if (b_gen != ptr->gen && ptr->cached) { 463 ret = 1; 464 goto out; 465 } 466 467 if (unlikely(b_gen != ptr->gen)) { 468 bch2_log_msg_start(c, &buf); 469 prt_printf(&buf, 470 "bucket %u:%zu gen %u (mem gen %u) data type %s: stale dirty ptr (gen %u)", 471 ptr->dev, bucket_nr, b_gen, 472 bucket_gen_get(ca, bucket_nr), 473 bch2_data_type_str(bucket_data_type ?: ptr_data_type), 474 ptr->gen); 475 476 ret = bucket_ref_update_err(trans, &buf, k, inserting, 477 BCH_FSCK_ERR_stale_dirty_ptr); 478 goto out; 479 } 480 481 if (unlikely(bucket_data_type_mismatch(bucket_data_type, ptr_data_type))) { 482 bch2_log_msg_start(c, &buf); 483 prt_printf(&buf, "bucket %u:%zu gen %u different types of data in same bucket: %s, %s", 484 ptr->dev, bucket_nr, b_gen, 485 bch2_data_type_str(bucket_data_type), 486 bch2_data_type_str(ptr_data_type)); 487 488 ret = bucket_ref_update_err(trans, &buf, k, inserting, 489 BCH_FSCK_ERR_ptr_bucket_data_type_mismatch); 490 goto out; 491 } 492 493 if (unlikely((u64) *bucket_sectors + sectors > U32_MAX)) { 494 bch2_log_msg_start(c, &buf); 495 prt_printf(&buf, 496 "bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U32_MAX", 497 ptr->dev, bucket_nr, b_gen, 498 bch2_data_type_str(bucket_data_type ?: ptr_data_type), 499 *bucket_sectors, sectors); 500 501 ret = bucket_ref_update_err(trans, &buf, k, inserting, 502 BCH_FSCK_ERR_bucket_sector_count_overflow); 503 sectors = -*bucket_sectors; 504 goto out; 505 } 506 507 *bucket_sectors += sectors; 508 out: 509 printbuf_exit(&buf); 510 return ret; 511 } 512 513 void bch2_trans_account_disk_usage_change(struct btree_trans *trans) 514 { 515 struct bch_fs *c = trans->c; 516 u64 disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0; 517 static int warned_disk_usage = 0; 518 bool warn = false; 519 520 percpu_down_read(&c->mark_lock); 521 struct bch_fs_usage_base *src = &trans->fs_usage_delta; 522 523 s64 added = src->btree + src->data + src->reserved; 524 525 /* 526 * Not allowed to reduce sectors_available except by getting a 527 * reservation: 528 */ 529 s64 should_not_have_added = added - (s64) disk_res_sectors; 530 if (unlikely(should_not_have_added > 0)) { 531 u64 old, new; 532 533 old = atomic64_read(&c->sectors_available); 534 do { 535 new = max_t(s64, 0, old - should_not_have_added); 536 } while (!atomic64_try_cmpxchg(&c->sectors_available, 537 &old, new)); 538 539 added -= should_not_have_added; 540 warn = true; 541 } 542 543 if (added > 0) { 544 trans->disk_res->sectors -= added; 545 this_cpu_sub(*c->online_reserved, added); 546 } 547 548 preempt_disable(); 549 struct bch_fs_usage_base *dst = this_cpu_ptr(c->usage); 550 acc_u64s((u64 *) dst, (u64 *) src, sizeof(*src) / sizeof(u64)); 551 preempt_enable(); 552 percpu_up_read(&c->mark_lock); 553 554 if (unlikely(warn) && !xchg(&warned_disk_usage, 1)) 555 bch2_trans_inconsistent(trans, 556 "disk usage increased %lli more than %llu sectors reserved)", 557 should_not_have_added, disk_res_sectors); 558 } 559 560 /* KEY_TYPE_extent: */ 561 562 static int __mark_pointer(struct btree_trans *trans, struct bch_dev *ca, 563 struct bkey_s_c k, 564 const struct extent_ptr_decoded *p, 565 s64 sectors, enum bch_data_type ptr_data_type, 566 struct bch_alloc_v4 *a, 567 bool insert) 568 { 569 u32 *dst_sectors = p->has_ec ? &a->stripe_sectors : 570 !p->ptr.cached ? &a->dirty_sectors : 571 &a->cached_sectors; 572 int ret = bch2_bucket_ref_update(trans, ca, k, &p->ptr, sectors, ptr_data_type, 573 a->gen, a->data_type, dst_sectors); 574 575 if (ret) 576 return ret; 577 if (insert) 578 alloc_data_type_set(a, ptr_data_type); 579 return 0; 580 } 581 582 static int bch2_trigger_pointer(struct btree_trans *trans, 583 enum btree_id btree_id, unsigned level, 584 struct bkey_s_c k, struct extent_ptr_decoded p, 585 const union bch_extent_entry *entry, 586 s64 *sectors, 587 enum btree_iter_update_trigger_flags flags) 588 { 589 struct bch_fs *c = trans->c; 590 bool insert = !(flags & BTREE_TRIGGER_overwrite); 591 struct printbuf buf = PRINTBUF; 592 int ret = 0; 593 594 struct bkey_i_backpointer bp; 595 bch2_extent_ptr_to_bp(c, btree_id, level, k, p, entry, &bp); 596 597 *sectors = insert ? bp.v.bucket_len : -(s64) bp.v.bucket_len; 598 599 struct bch_dev *ca = bch2_dev_tryget(c, p.ptr.dev); 600 if (unlikely(!ca)) { 601 if (insert && p.ptr.dev != BCH_SB_MEMBER_INVALID) 602 ret = -BCH_ERR_trigger_pointer; 603 goto err; 604 } 605 606 struct bpos bucket = PTR_BUCKET_POS(ca, &p.ptr); 607 608 if (flags & BTREE_TRIGGER_transactional) { 609 struct bkey_i_alloc_v4 *a = bch2_trans_start_alloc_update(trans, bucket, 0); 610 ret = PTR_ERR_OR_ZERO(a) ?: 611 __mark_pointer(trans, ca, k, &p, *sectors, bp.v.data_type, &a->v, insert); 612 if (ret) 613 goto err; 614 615 ret = bch2_bucket_backpointer_mod(trans, k, &bp, insert); 616 if (ret) 617 goto err; 618 } 619 620 if (flags & BTREE_TRIGGER_gc) { 621 struct bucket *g = gc_bucket(ca, bucket.offset); 622 if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u\n %s", 623 p.ptr.dev, 624 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { 625 ret = -BCH_ERR_trigger_pointer; 626 goto err; 627 } 628 629 bucket_lock(g); 630 struct bch_alloc_v4 old = bucket_m_to_alloc(*g), new = old; 631 ret = __mark_pointer(trans, ca, k, &p, *sectors, bp.v.data_type, &new, insert); 632 alloc_to_bucket(g, new); 633 bucket_unlock(g); 634 635 if (!ret) 636 ret = bch2_alloc_key_to_dev_counters(trans, ca, &old, &new, flags); 637 } 638 err: 639 bch2_dev_put(ca); 640 printbuf_exit(&buf); 641 return ret; 642 } 643 644 static int bch2_trigger_stripe_ptr(struct btree_trans *trans, 645 struct bkey_s_c k, 646 struct extent_ptr_decoded p, 647 enum bch_data_type data_type, 648 s64 sectors, 649 enum btree_iter_update_trigger_flags flags) 650 { 651 if (flags & BTREE_TRIGGER_transactional) { 652 struct btree_iter iter; 653 struct bkey_i_stripe *s = bch2_bkey_get_mut_typed(trans, &iter, 654 BTREE_ID_stripes, POS(0, p.ec.idx), 655 BTREE_ITER_with_updates, stripe); 656 int ret = PTR_ERR_OR_ZERO(s); 657 if (unlikely(ret)) { 658 bch2_trans_inconsistent_on(bch2_err_matches(ret, ENOENT), trans, 659 "pointer to nonexistent stripe %llu", 660 (u64) p.ec.idx); 661 goto err; 662 } 663 664 if (!bch2_ptr_matches_stripe(&s->v, p)) { 665 bch2_trans_inconsistent(trans, 666 "stripe pointer doesn't match stripe %llu", 667 (u64) p.ec.idx); 668 ret = -BCH_ERR_trigger_stripe_pointer; 669 goto err; 670 } 671 672 stripe_blockcount_set(&s->v, p.ec.block, 673 stripe_blockcount_get(&s->v, p.ec.block) + 674 sectors); 675 676 struct disk_accounting_pos acc; 677 memset(&acc, 0, sizeof(acc)); 678 acc.type = BCH_DISK_ACCOUNTING_replicas; 679 bch2_bkey_to_replicas(&acc.replicas, bkey_i_to_s_c(&s->k_i)); 680 acc.replicas.data_type = data_type; 681 ret = bch2_disk_accounting_mod(trans, &acc, §ors, 1, false); 682 err: 683 bch2_trans_iter_exit(trans, &iter); 684 return ret; 685 } 686 687 if (flags & BTREE_TRIGGER_gc) { 688 struct bch_fs *c = trans->c; 689 690 struct gc_stripe *m = genradix_ptr_alloc(&c->gc_stripes, p.ec.idx, GFP_KERNEL); 691 if (!m) { 692 bch_err(c, "error allocating memory for gc_stripes, idx %llu", 693 (u64) p.ec.idx); 694 return -BCH_ERR_ENOMEM_mark_stripe_ptr; 695 } 696 697 gc_stripe_lock(m); 698 699 if (!m || !m->alive) { 700 gc_stripe_unlock(m); 701 struct printbuf buf = PRINTBUF; 702 bch2_log_msg_start(c, &buf); 703 prt_printf(&buf, "pointer to nonexistent stripe %llu\n while marking ", 704 (u64) p.ec.idx); 705 bch2_bkey_val_to_text(&buf, c, k); 706 __bch2_inconsistent_error(c, &buf); 707 bch2_print_string_as_lines(KERN_ERR, buf.buf); 708 printbuf_exit(&buf); 709 return -BCH_ERR_trigger_stripe_pointer; 710 } 711 712 m->block_sectors[p.ec.block] += sectors; 713 714 struct disk_accounting_pos acc; 715 memset(&acc, 0, sizeof(acc)); 716 acc.type = BCH_DISK_ACCOUNTING_replicas; 717 unsafe_memcpy(&acc.replicas, &m->r.e, replicas_entry_bytes(&m->r.e), "VLA"); 718 gc_stripe_unlock(m); 719 720 acc.replicas.data_type = data_type; 721 int ret = bch2_disk_accounting_mod(trans, &acc, §ors, 1, true); 722 if (ret) 723 return ret; 724 } 725 726 return 0; 727 } 728 729 static int __trigger_extent(struct btree_trans *trans, 730 enum btree_id btree_id, unsigned level, 731 struct bkey_s_c k, 732 enum btree_iter_update_trigger_flags flags, 733 s64 *replicas_sectors) 734 { 735 bool gc = flags & BTREE_TRIGGER_gc; 736 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 737 const union bch_extent_entry *entry; 738 struct extent_ptr_decoded p; 739 enum bch_data_type data_type = bkey_is_btree_ptr(k.k) 740 ? BCH_DATA_btree 741 : BCH_DATA_user; 742 int ret = 0; 743 744 struct disk_accounting_pos acc_replicas_key; 745 memset(&acc_replicas_key, 0, sizeof(acc_replicas_key)); 746 acc_replicas_key.type = BCH_DISK_ACCOUNTING_replicas; 747 acc_replicas_key.replicas.data_type = data_type; 748 acc_replicas_key.replicas.nr_devs = 0; 749 acc_replicas_key.replicas.nr_required = 1; 750 751 unsigned cur_compression_type = 0; 752 u64 compression_acct[3] = { 1, 0, 0 }; 753 754 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { 755 s64 disk_sectors = 0; 756 ret = bch2_trigger_pointer(trans, btree_id, level, k, p, entry, &disk_sectors, flags); 757 if (ret < 0) 758 return ret; 759 760 bool stale = ret > 0; 761 762 if (p.ptr.cached && stale) 763 continue; 764 765 if (p.ptr.cached) { 766 ret = bch2_mod_dev_cached_sectors(trans, p.ptr.dev, disk_sectors, gc); 767 if (ret) 768 return ret; 769 } else if (!p.has_ec) { 770 *replicas_sectors += disk_sectors; 771 replicas_entry_add_dev(&acc_replicas_key.replicas, p.ptr.dev); 772 } else { 773 ret = bch2_trigger_stripe_ptr(trans, k, p, data_type, disk_sectors, flags); 774 if (ret) 775 return ret; 776 777 /* 778 * There may be other dirty pointers in this extent, but 779 * if so they're not required for mounting if we have an 780 * erasure coded pointer in this extent: 781 */ 782 acc_replicas_key.replicas.nr_required = 0; 783 } 784 785 if (cur_compression_type && 786 cur_compression_type != p.crc.compression_type) { 787 if (flags & BTREE_TRIGGER_overwrite) 788 bch2_u64s_neg(compression_acct, ARRAY_SIZE(compression_acct)); 789 790 ret = bch2_disk_accounting_mod2(trans, gc, compression_acct, 791 compression, cur_compression_type); 792 if (ret) 793 return ret; 794 795 compression_acct[0] = 1; 796 compression_acct[1] = 0; 797 compression_acct[2] = 0; 798 } 799 800 cur_compression_type = p.crc.compression_type; 801 if (p.crc.compression_type) { 802 compression_acct[1] += p.crc.uncompressed_size; 803 compression_acct[2] += p.crc.compressed_size; 804 } 805 } 806 807 if (acc_replicas_key.replicas.nr_devs) { 808 ret = bch2_disk_accounting_mod(trans, &acc_replicas_key, replicas_sectors, 1, gc); 809 if (ret) 810 return ret; 811 } 812 813 if (acc_replicas_key.replicas.nr_devs && !level && k.k->p.snapshot) { 814 ret = bch2_disk_accounting_mod2_nr(trans, gc, replicas_sectors, 1, snapshot, k.k->p.snapshot); 815 if (ret) 816 return ret; 817 } 818 819 if (cur_compression_type) { 820 if (flags & BTREE_TRIGGER_overwrite) 821 bch2_u64s_neg(compression_acct, ARRAY_SIZE(compression_acct)); 822 823 ret = bch2_disk_accounting_mod2(trans, gc, compression_acct, 824 compression, cur_compression_type); 825 if (ret) 826 return ret; 827 } 828 829 if (level) { 830 ret = bch2_disk_accounting_mod2_nr(trans, gc, replicas_sectors, 1, btree, btree_id); 831 if (ret) 832 return ret; 833 } else { 834 bool insert = !(flags & BTREE_TRIGGER_overwrite); 835 836 s64 v[3] = { 837 insert ? 1 : -1, 838 insert ? k.k->size : -((s64) k.k->size), 839 *replicas_sectors, 840 }; 841 ret = bch2_disk_accounting_mod2(trans, gc, v, inum, k.k->p.inode); 842 if (ret) 843 return ret; 844 } 845 846 return 0; 847 } 848 849 int bch2_trigger_extent(struct btree_trans *trans, 850 enum btree_id btree, unsigned level, 851 struct bkey_s_c old, struct bkey_s new, 852 enum btree_iter_update_trigger_flags flags) 853 { 854 struct bch_fs *c = trans->c; 855 struct bkey_ptrs_c new_ptrs = bch2_bkey_ptrs_c(new.s_c); 856 struct bkey_ptrs_c old_ptrs = bch2_bkey_ptrs_c(old); 857 unsigned new_ptrs_bytes = (void *) new_ptrs.end - (void *) new_ptrs.start; 858 unsigned old_ptrs_bytes = (void *) old_ptrs.end - (void *) old_ptrs.start; 859 860 if (unlikely(flags & BTREE_TRIGGER_check_repair)) 861 return bch2_check_fix_ptrs(trans, btree, level, new.s_c, flags); 862 863 /* if pointers aren't changing - nothing to do: */ 864 if (new_ptrs_bytes == old_ptrs_bytes && 865 !memcmp(new_ptrs.start, 866 old_ptrs.start, 867 new_ptrs_bytes)) 868 return 0; 869 870 if (flags & (BTREE_TRIGGER_transactional|BTREE_TRIGGER_gc)) { 871 s64 old_replicas_sectors = 0, new_replicas_sectors = 0; 872 873 if (old.k->type) { 874 int ret = __trigger_extent(trans, btree, level, old, 875 flags & ~BTREE_TRIGGER_insert, 876 &old_replicas_sectors); 877 if (ret) 878 return ret; 879 } 880 881 if (new.k->type) { 882 int ret = __trigger_extent(trans, btree, level, new.s_c, 883 flags & ~BTREE_TRIGGER_overwrite, 884 &new_replicas_sectors); 885 if (ret) 886 return ret; 887 } 888 889 int need_rebalance_delta = 0; 890 s64 need_rebalance_sectors_delta[1] = { 0 }; 891 892 s64 s = bch2_bkey_sectors_need_rebalance(c, old); 893 need_rebalance_delta -= s != 0; 894 need_rebalance_sectors_delta[0] -= s; 895 896 s = bch2_bkey_sectors_need_rebalance(c, new.s_c); 897 need_rebalance_delta += s != 0; 898 need_rebalance_sectors_delta[0] += s; 899 900 if ((flags & BTREE_TRIGGER_transactional) && need_rebalance_delta) { 901 int ret = bch2_btree_bit_mod_buffered(trans, BTREE_ID_rebalance_work, 902 new.k->p, need_rebalance_delta > 0); 903 if (ret) 904 return ret; 905 } 906 907 if (need_rebalance_sectors_delta[0]) { 908 int ret = bch2_disk_accounting_mod2(trans, flags & BTREE_TRIGGER_gc, 909 need_rebalance_sectors_delta, rebalance_work); 910 if (ret) 911 return ret; 912 } 913 } 914 915 return 0; 916 } 917 918 /* KEY_TYPE_reservation */ 919 920 static int __trigger_reservation(struct btree_trans *trans, 921 enum btree_id btree_id, unsigned level, struct bkey_s_c k, 922 enum btree_iter_update_trigger_flags flags) 923 { 924 if (flags & (BTREE_TRIGGER_transactional|BTREE_TRIGGER_gc)) { 925 s64 sectors[1] = { k.k->size }; 926 927 if (flags & BTREE_TRIGGER_overwrite) 928 sectors[0] = -sectors[0]; 929 930 return bch2_disk_accounting_mod2(trans, flags & BTREE_TRIGGER_gc, sectors, 931 persistent_reserved, bkey_s_c_to_reservation(k).v->nr_replicas); 932 } 933 934 return 0; 935 } 936 937 int bch2_trigger_reservation(struct btree_trans *trans, 938 enum btree_id btree_id, unsigned level, 939 struct bkey_s_c old, struct bkey_s new, 940 enum btree_iter_update_trigger_flags flags) 941 { 942 return trigger_run_overwrite_then_insert(__trigger_reservation, trans, btree_id, level, old, new, flags); 943 } 944 945 /* Mark superblocks: */ 946 947 static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans, 948 struct bch_dev *ca, u64 b, 949 enum bch_data_type type, 950 unsigned sectors) 951 { 952 struct bch_fs *c = trans->c; 953 struct btree_iter iter; 954 int ret = 0; 955 956 struct bkey_i_alloc_v4 *a = 957 bch2_trans_start_alloc_update_noupdate(trans, &iter, POS(ca->dev_idx, b)); 958 if (IS_ERR(a)) 959 return PTR_ERR(a); 960 961 if (a->v.data_type && type && a->v.data_type != type) { 962 bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_allocations); 963 log_fsck_err(trans, bucket_metadata_type_mismatch, 964 "bucket %llu:%llu gen %u different types of data in same bucket: %s, %s\n" 965 "while marking %s", 966 iter.pos.inode, iter.pos.offset, a->v.gen, 967 bch2_data_type_str(a->v.data_type), 968 bch2_data_type_str(type), 969 bch2_data_type_str(type)); 970 ret = -BCH_ERR_metadata_bucket_inconsistency; 971 goto err; 972 } 973 974 if (a->v.data_type != type || 975 a->v.dirty_sectors != sectors) { 976 a->v.data_type = type; 977 a->v.dirty_sectors = sectors; 978 ret = bch2_trans_update(trans, &iter, &a->k_i, 0); 979 } 980 err: 981 fsck_err: 982 bch2_trans_iter_exit(trans, &iter); 983 return ret; 984 } 985 986 static int bch2_mark_metadata_bucket(struct btree_trans *trans, struct bch_dev *ca, 987 u64 b, enum bch_data_type data_type, unsigned sectors, 988 enum btree_iter_update_trigger_flags flags) 989 { 990 struct bch_fs *c = trans->c; 991 int ret = 0; 992 993 struct bucket *g = gc_bucket(ca, b); 994 if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u when marking metadata type %s", 995 ca->dev_idx, bch2_data_type_str(data_type))) 996 goto err; 997 998 bucket_lock(g); 999 struct bch_alloc_v4 old = bucket_m_to_alloc(*g); 1000 1001 if (bch2_fs_inconsistent_on(g->data_type && 1002 g->data_type != data_type, c, 1003 "different types of data in same bucket: %s, %s", 1004 bch2_data_type_str(g->data_type), 1005 bch2_data_type_str(data_type))) 1006 goto err_unlock; 1007 1008 if (bch2_fs_inconsistent_on((u64) g->dirty_sectors + sectors > ca->mi.bucket_size, c, 1009 "bucket %u:%llu gen %u data type %s sector count overflow: %u + %u > bucket size", 1010 ca->dev_idx, b, g->gen, 1011 bch2_data_type_str(g->data_type ?: data_type), 1012 g->dirty_sectors, sectors)) 1013 goto err_unlock; 1014 1015 g->data_type = data_type; 1016 g->dirty_sectors += sectors; 1017 struct bch_alloc_v4 new = bucket_m_to_alloc(*g); 1018 bucket_unlock(g); 1019 ret = bch2_alloc_key_to_dev_counters(trans, ca, &old, &new, flags); 1020 return ret; 1021 err_unlock: 1022 bucket_unlock(g); 1023 err: 1024 return -BCH_ERR_metadata_bucket_inconsistency; 1025 } 1026 1027 int bch2_trans_mark_metadata_bucket(struct btree_trans *trans, 1028 struct bch_dev *ca, u64 b, 1029 enum bch_data_type type, unsigned sectors, 1030 enum btree_iter_update_trigger_flags flags) 1031 { 1032 BUG_ON(type != BCH_DATA_free && 1033 type != BCH_DATA_sb && 1034 type != BCH_DATA_journal); 1035 1036 /* 1037 * Backup superblock might be past the end of our normal usable space: 1038 */ 1039 if (b >= ca->mi.nbuckets) 1040 return 0; 1041 1042 if (flags & BTREE_TRIGGER_gc) 1043 return bch2_mark_metadata_bucket(trans, ca, b, type, sectors, flags); 1044 else if (flags & BTREE_TRIGGER_transactional) 1045 return commit_do(trans, NULL, NULL, 0, 1046 __bch2_trans_mark_metadata_bucket(trans, ca, b, type, sectors)); 1047 else 1048 BUG(); 1049 } 1050 1051 static int bch2_trans_mark_metadata_sectors(struct btree_trans *trans, 1052 struct bch_dev *ca, u64 start, u64 end, 1053 enum bch_data_type type, u64 *bucket, unsigned *bucket_sectors, 1054 enum btree_iter_update_trigger_flags flags) 1055 { 1056 do { 1057 u64 b = sector_to_bucket(ca, start); 1058 unsigned sectors = 1059 min_t(u64, bucket_to_sector(ca, b + 1), end) - start; 1060 1061 if (b != *bucket && *bucket_sectors) { 1062 int ret = bch2_trans_mark_metadata_bucket(trans, ca, *bucket, 1063 type, *bucket_sectors, flags); 1064 if (ret) 1065 return ret; 1066 1067 *bucket_sectors = 0; 1068 } 1069 1070 *bucket = b; 1071 *bucket_sectors += sectors; 1072 start += sectors; 1073 } while (start < end); 1074 1075 return 0; 1076 } 1077 1078 static int __bch2_trans_mark_dev_sb(struct btree_trans *trans, struct bch_dev *ca, 1079 enum btree_iter_update_trigger_flags flags) 1080 { 1081 struct bch_fs *c = trans->c; 1082 1083 mutex_lock(&c->sb_lock); 1084 struct bch_sb_layout layout = ca->disk_sb.sb->layout; 1085 mutex_unlock(&c->sb_lock); 1086 1087 u64 bucket = 0; 1088 unsigned i, bucket_sectors = 0; 1089 int ret; 1090 1091 for (i = 0; i < layout.nr_superblocks; i++) { 1092 u64 offset = le64_to_cpu(layout.sb_offset[i]); 1093 1094 if (offset == BCH_SB_SECTOR) { 1095 ret = bch2_trans_mark_metadata_sectors(trans, ca, 1096 0, BCH_SB_SECTOR, 1097 BCH_DATA_sb, &bucket, &bucket_sectors, flags); 1098 if (ret) 1099 return ret; 1100 } 1101 1102 ret = bch2_trans_mark_metadata_sectors(trans, ca, offset, 1103 offset + (1 << layout.sb_max_size_bits), 1104 BCH_DATA_sb, &bucket, &bucket_sectors, flags); 1105 if (ret) 1106 return ret; 1107 } 1108 1109 if (bucket_sectors) { 1110 ret = bch2_trans_mark_metadata_bucket(trans, ca, 1111 bucket, BCH_DATA_sb, bucket_sectors, flags); 1112 if (ret) 1113 return ret; 1114 } 1115 1116 for (i = 0; i < ca->journal.nr; i++) { 1117 ret = bch2_trans_mark_metadata_bucket(trans, ca, 1118 ca->journal.buckets[i], 1119 BCH_DATA_journal, ca->mi.bucket_size, flags); 1120 if (ret) 1121 return ret; 1122 } 1123 1124 return 0; 1125 } 1126 1127 int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca, 1128 enum btree_iter_update_trigger_flags flags) 1129 { 1130 int ret = bch2_trans_run(c, 1131 __bch2_trans_mark_dev_sb(trans, ca, flags)); 1132 bch_err_fn(c, ret); 1133 return ret; 1134 } 1135 1136 int bch2_trans_mark_dev_sbs_flags(struct bch_fs *c, 1137 enum btree_iter_update_trigger_flags flags) 1138 { 1139 for_each_online_member(c, ca) { 1140 int ret = bch2_trans_mark_dev_sb(c, ca, flags); 1141 if (ret) { 1142 percpu_ref_put(&ca->io_ref[READ]); 1143 return ret; 1144 } 1145 } 1146 1147 return 0; 1148 } 1149 1150 int bch2_trans_mark_dev_sbs(struct bch_fs *c) 1151 { 1152 return bch2_trans_mark_dev_sbs_flags(c, BTREE_TRIGGER_transactional); 1153 } 1154 1155 bool bch2_is_superblock_bucket(struct bch_dev *ca, u64 b) 1156 { 1157 struct bch_sb_layout *layout = &ca->disk_sb.sb->layout; 1158 u64 b_offset = bucket_to_sector(ca, b); 1159 u64 b_end = bucket_to_sector(ca, b + 1); 1160 unsigned i; 1161 1162 if (!b) 1163 return true; 1164 1165 for (i = 0; i < layout->nr_superblocks; i++) { 1166 u64 offset = le64_to_cpu(layout->sb_offset[i]); 1167 u64 end = offset + (1 << layout->sb_max_size_bits); 1168 1169 if (!(offset >= b_end || end <= b_offset)) 1170 return true; 1171 } 1172 1173 for (i = 0; i < ca->journal.nr; i++) 1174 if (b == ca->journal.buckets[i]) 1175 return true; 1176 1177 return false; 1178 } 1179 1180 /* Disk reservations: */ 1181 1182 #define SECTORS_CACHE 1024 1183 1184 int __bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res, 1185 u64 sectors, enum bch_reservation_flags flags) 1186 { 1187 struct bch_fs_pcpu *pcpu; 1188 u64 old, get; 1189 u64 sectors_available; 1190 int ret; 1191 1192 percpu_down_read(&c->mark_lock); 1193 preempt_disable(); 1194 pcpu = this_cpu_ptr(c->pcpu); 1195 1196 if (sectors <= pcpu->sectors_available) 1197 goto out; 1198 1199 old = atomic64_read(&c->sectors_available); 1200 do { 1201 get = min((u64) sectors + SECTORS_CACHE, old); 1202 1203 if (get < sectors) { 1204 preempt_enable(); 1205 goto recalculate; 1206 } 1207 } while (!atomic64_try_cmpxchg(&c->sectors_available, 1208 &old, old - get)); 1209 1210 pcpu->sectors_available += get; 1211 1212 out: 1213 pcpu->sectors_available -= sectors; 1214 this_cpu_add(*c->online_reserved, sectors); 1215 res->sectors += sectors; 1216 1217 preempt_enable(); 1218 percpu_up_read(&c->mark_lock); 1219 return 0; 1220 1221 recalculate: 1222 mutex_lock(&c->sectors_available_lock); 1223 1224 percpu_u64_set(&c->pcpu->sectors_available, 0); 1225 sectors_available = avail_factor(__bch2_fs_usage_read_short(c).free); 1226 1227 if (sectors_available && (flags & BCH_DISK_RESERVATION_PARTIAL)) 1228 sectors = min(sectors, sectors_available); 1229 1230 if (sectors <= sectors_available || 1231 (flags & BCH_DISK_RESERVATION_NOFAIL)) { 1232 atomic64_set(&c->sectors_available, 1233 max_t(s64, 0, sectors_available - sectors)); 1234 this_cpu_add(*c->online_reserved, sectors); 1235 res->sectors += sectors; 1236 ret = 0; 1237 } else { 1238 atomic64_set(&c->sectors_available, sectors_available); 1239 ret = -BCH_ERR_ENOSPC_disk_reservation; 1240 } 1241 1242 mutex_unlock(&c->sectors_available_lock); 1243 percpu_up_read(&c->mark_lock); 1244 1245 return ret; 1246 } 1247 1248 /* Startup/shutdown: */ 1249 1250 void bch2_buckets_nouse_free(struct bch_fs *c) 1251 { 1252 for_each_member_device(c, ca) { 1253 kvfree_rcu_mightsleep(ca->buckets_nouse); 1254 ca->buckets_nouse = NULL; 1255 } 1256 } 1257 1258 int bch2_buckets_nouse_alloc(struct bch_fs *c) 1259 { 1260 for_each_member_device(c, ca) { 1261 BUG_ON(ca->buckets_nouse); 1262 1263 ca->buckets_nouse = bch2_kvmalloc(BITS_TO_LONGS(ca->mi.nbuckets) * 1264 sizeof(unsigned long), 1265 GFP_KERNEL|__GFP_ZERO); 1266 if (!ca->buckets_nouse) { 1267 bch2_dev_put(ca); 1268 return -BCH_ERR_ENOMEM_buckets_nouse; 1269 } 1270 } 1271 1272 return 0; 1273 } 1274 1275 static void bucket_gens_free_rcu(struct rcu_head *rcu) 1276 { 1277 struct bucket_gens *buckets = 1278 container_of(rcu, struct bucket_gens, rcu); 1279 1280 kvfree(buckets); 1281 } 1282 1283 int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets) 1284 { 1285 struct bucket_gens *bucket_gens = NULL, *old_bucket_gens = NULL; 1286 bool resize = ca->bucket_gens != NULL; 1287 int ret; 1288 1289 if (resize) 1290 lockdep_assert_held(&c->state_lock); 1291 1292 if (resize && ca->buckets_nouse) 1293 return -BCH_ERR_no_resize_with_buckets_nouse; 1294 1295 bucket_gens = bch2_kvmalloc(struct_size(bucket_gens, b, nbuckets), 1296 GFP_KERNEL|__GFP_ZERO); 1297 if (!bucket_gens) { 1298 ret = -BCH_ERR_ENOMEM_bucket_gens; 1299 goto err; 1300 } 1301 1302 bucket_gens->first_bucket = ca->mi.first_bucket; 1303 bucket_gens->nbuckets = nbuckets; 1304 bucket_gens->nbuckets_minus_first = 1305 bucket_gens->nbuckets - bucket_gens->first_bucket; 1306 1307 old_bucket_gens = rcu_dereference_protected(ca->bucket_gens, 1); 1308 1309 if (resize) { 1310 bucket_gens->nbuckets = min(bucket_gens->nbuckets, 1311 old_bucket_gens->nbuckets); 1312 bucket_gens->nbuckets_minus_first = 1313 bucket_gens->nbuckets - bucket_gens->first_bucket; 1314 memcpy(bucket_gens->b, 1315 old_bucket_gens->b, 1316 bucket_gens->nbuckets); 1317 } 1318 1319 rcu_assign_pointer(ca->bucket_gens, bucket_gens); 1320 bucket_gens = old_bucket_gens; 1321 1322 nbuckets = ca->mi.nbuckets; 1323 1324 ret = 0; 1325 err: 1326 if (bucket_gens) 1327 call_rcu(&bucket_gens->rcu, bucket_gens_free_rcu); 1328 1329 return ret; 1330 } 1331 1332 void bch2_dev_buckets_free(struct bch_dev *ca) 1333 { 1334 kvfree(ca->buckets_nouse); 1335 kvfree(rcu_dereference_protected(ca->bucket_gens, 1)); 1336 free_percpu(ca->usage); 1337 } 1338 1339 int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca) 1340 { 1341 ca->usage = alloc_percpu(struct bch_dev_usage_full); 1342 if (!ca->usage) 1343 return -BCH_ERR_ENOMEM_usage_init; 1344 1345 return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets); 1346 } 1347