1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Code for manipulating bucket marks for garbage collection. 4 * 5 * Copyright 2014 Datera, Inc. 6 */ 7 8 #include "bcachefs.h" 9 #include "alloc_background.h" 10 #include "backpointers.h" 11 #include "bset.h" 12 #include "btree_gc.h" 13 #include "btree_update.h" 14 #include "buckets.h" 15 #include "buckets_waiting_for_journal.h" 16 #include "ec.h" 17 #include "error.h" 18 #include "inode.h" 19 #include "movinggc.h" 20 #include "recovery.h" 21 #include "reflink.h" 22 #include "replicas.h" 23 #include "subvolume.h" 24 #include "trace.h" 25 26 #include <linux/preempt.h> 27 28 static inline void fs_usage_data_type_to_base(struct bch_fs_usage_base *fs_usage, 29 enum bch_data_type data_type, 30 s64 sectors) 31 { 32 switch (data_type) { 33 case BCH_DATA_btree: 34 fs_usage->btree += sectors; 35 break; 36 case BCH_DATA_user: 37 case BCH_DATA_parity: 38 fs_usage->data += sectors; 39 break; 40 case BCH_DATA_cached: 41 fs_usage->cached += sectors; 42 break; 43 default: 44 break; 45 } 46 } 47 48 void bch2_fs_usage_initialize(struct bch_fs *c) 49 { 50 percpu_down_write(&c->mark_lock); 51 struct bch_fs_usage *usage = c->usage_base; 52 53 for (unsigned i = 0; i < ARRAY_SIZE(c->usage); i++) 54 bch2_fs_usage_acc_to_base(c, i); 55 56 for (unsigned i = 0; i < BCH_REPLICAS_MAX; i++) 57 usage->b.reserved += usage->persistent_reserved[i]; 58 59 for (unsigned i = 0; i < c->replicas.nr; i++) { 60 struct bch_replicas_entry_v1 *e = 61 cpu_replicas_entry(&c->replicas, i); 62 63 fs_usage_data_type_to_base(&usage->b, e->data_type, usage->replicas[i]); 64 } 65 66 for_each_member_device(c, ca) { 67 struct bch_dev_usage dev = bch2_dev_usage_read(ca); 68 69 usage->b.hidden += (dev.d[BCH_DATA_sb].buckets + 70 dev.d[BCH_DATA_journal].buckets) * 71 ca->mi.bucket_size; 72 } 73 74 percpu_up_write(&c->mark_lock); 75 } 76 77 static inline struct bch_dev_usage *dev_usage_ptr(struct bch_dev *ca, 78 unsigned journal_seq, 79 bool gc) 80 { 81 BUG_ON(!gc && !journal_seq); 82 83 return this_cpu_ptr(gc 84 ? ca->usage_gc 85 : ca->usage[journal_seq & JOURNAL_BUF_MASK]); 86 } 87 88 void bch2_dev_usage_read_fast(struct bch_dev *ca, struct bch_dev_usage *usage) 89 { 90 struct bch_fs *c = ca->fs; 91 unsigned seq, i, u64s = dev_usage_u64s(); 92 93 do { 94 seq = read_seqcount_begin(&c->usage_lock); 95 memcpy(usage, ca->usage_base, u64s * sizeof(u64)); 96 for (i = 0; i < ARRAY_SIZE(ca->usage); i++) 97 acc_u64s_percpu((u64 *) usage, (u64 __percpu *) ca->usage[i], u64s); 98 } while (read_seqcount_retry(&c->usage_lock, seq)); 99 } 100 101 u64 bch2_fs_usage_read_one(struct bch_fs *c, u64 *v) 102 { 103 ssize_t offset = v - (u64 *) c->usage_base; 104 unsigned i, seq; 105 u64 ret; 106 107 BUG_ON(offset < 0 || offset >= fs_usage_u64s(c)); 108 percpu_rwsem_assert_held(&c->mark_lock); 109 110 do { 111 seq = read_seqcount_begin(&c->usage_lock); 112 ret = *v; 113 114 for (i = 0; i < ARRAY_SIZE(c->usage); i++) 115 ret += percpu_u64_get((u64 __percpu *) c->usage[i] + offset); 116 } while (read_seqcount_retry(&c->usage_lock, seq)); 117 118 return ret; 119 } 120 121 struct bch_fs_usage_online *bch2_fs_usage_read(struct bch_fs *c) 122 { 123 struct bch_fs_usage_online *ret; 124 unsigned nr_replicas = READ_ONCE(c->replicas.nr); 125 unsigned seq, i; 126 retry: 127 ret = kmalloc(__fs_usage_online_u64s(nr_replicas) * sizeof(u64), GFP_KERNEL); 128 if (unlikely(!ret)) 129 return NULL; 130 131 percpu_down_read(&c->mark_lock); 132 133 if (nr_replicas != c->replicas.nr) { 134 nr_replicas = c->replicas.nr; 135 percpu_up_read(&c->mark_lock); 136 kfree(ret); 137 goto retry; 138 } 139 140 ret->online_reserved = percpu_u64_get(c->online_reserved); 141 142 do { 143 seq = read_seqcount_begin(&c->usage_lock); 144 unsafe_memcpy(&ret->u, c->usage_base, 145 __fs_usage_u64s(nr_replicas) * sizeof(u64), 146 "embedded variable length struct"); 147 for (i = 0; i < ARRAY_SIZE(c->usage); i++) 148 acc_u64s_percpu((u64 *) &ret->u, (u64 __percpu *) c->usage[i], 149 __fs_usage_u64s(nr_replicas)); 150 } while (read_seqcount_retry(&c->usage_lock, seq)); 151 152 return ret; 153 } 154 155 void bch2_fs_usage_acc_to_base(struct bch_fs *c, unsigned idx) 156 { 157 unsigned u64s = fs_usage_u64s(c); 158 159 BUG_ON(idx >= ARRAY_SIZE(c->usage)); 160 161 preempt_disable(); 162 write_seqcount_begin(&c->usage_lock); 163 164 acc_u64s_percpu((u64 *) c->usage_base, 165 (u64 __percpu *) c->usage[idx], u64s); 166 percpu_memset(c->usage[idx], 0, u64s * sizeof(u64)); 167 168 rcu_read_lock(); 169 for_each_member_device_rcu(c, ca, NULL) { 170 u64s = dev_usage_u64s(); 171 172 acc_u64s_percpu((u64 *) ca->usage_base, 173 (u64 __percpu *) ca->usage[idx], u64s); 174 percpu_memset(ca->usage[idx], 0, u64s * sizeof(u64)); 175 } 176 rcu_read_unlock(); 177 178 write_seqcount_end(&c->usage_lock); 179 preempt_enable(); 180 } 181 182 void bch2_fs_usage_to_text(struct printbuf *out, 183 struct bch_fs *c, 184 struct bch_fs_usage_online *fs_usage) 185 { 186 unsigned i; 187 188 prt_printf(out, "capacity:\t\t\t%llu\n", c->capacity); 189 190 prt_printf(out, "hidden:\t\t\t\t%llu\n", 191 fs_usage->u.b.hidden); 192 prt_printf(out, "data:\t\t\t\t%llu\n", 193 fs_usage->u.b.data); 194 prt_printf(out, "cached:\t\t\t\t%llu\n", 195 fs_usage->u.b.cached); 196 prt_printf(out, "reserved:\t\t\t%llu\n", 197 fs_usage->u.b.reserved); 198 prt_printf(out, "nr_inodes:\t\t\t%llu\n", 199 fs_usage->u.b.nr_inodes); 200 prt_printf(out, "online reserved:\t\t%llu\n", 201 fs_usage->online_reserved); 202 203 for (i = 0; 204 i < ARRAY_SIZE(fs_usage->u.persistent_reserved); 205 i++) { 206 prt_printf(out, "%u replicas:\n", i + 1); 207 prt_printf(out, "\treserved:\t\t%llu\n", 208 fs_usage->u.persistent_reserved[i]); 209 } 210 211 for (i = 0; i < c->replicas.nr; i++) { 212 struct bch_replicas_entry_v1 *e = 213 cpu_replicas_entry(&c->replicas, i); 214 215 prt_printf(out, "\t"); 216 bch2_replicas_entry_to_text(out, e); 217 prt_printf(out, ":\t%llu\n", fs_usage->u.replicas[i]); 218 } 219 } 220 221 static u64 reserve_factor(u64 r) 222 { 223 return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR); 224 } 225 226 u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage_online *fs_usage) 227 { 228 return min(fs_usage->u.b.hidden + 229 fs_usage->u.b.btree + 230 fs_usage->u.b.data + 231 reserve_factor(fs_usage->u.b.reserved + 232 fs_usage->online_reserved), 233 c->capacity); 234 } 235 236 static struct bch_fs_usage_short 237 __bch2_fs_usage_read_short(struct bch_fs *c) 238 { 239 struct bch_fs_usage_short ret; 240 u64 data, reserved; 241 242 ret.capacity = c->capacity - 243 bch2_fs_usage_read_one(c, &c->usage_base->b.hidden); 244 245 data = bch2_fs_usage_read_one(c, &c->usage_base->b.data) + 246 bch2_fs_usage_read_one(c, &c->usage_base->b.btree); 247 reserved = bch2_fs_usage_read_one(c, &c->usage_base->b.reserved) + 248 percpu_u64_get(c->online_reserved); 249 250 ret.used = min(ret.capacity, data + reserve_factor(reserved)); 251 ret.free = ret.capacity - ret.used; 252 253 ret.nr_inodes = bch2_fs_usage_read_one(c, &c->usage_base->b.nr_inodes); 254 255 return ret; 256 } 257 258 struct bch_fs_usage_short 259 bch2_fs_usage_read_short(struct bch_fs *c) 260 { 261 struct bch_fs_usage_short ret; 262 263 percpu_down_read(&c->mark_lock); 264 ret = __bch2_fs_usage_read_short(c); 265 percpu_up_read(&c->mark_lock); 266 267 return ret; 268 } 269 270 void bch2_dev_usage_init(struct bch_dev *ca) 271 { 272 ca->usage_base->d[BCH_DATA_free].buckets = ca->mi.nbuckets - ca->mi.first_bucket; 273 } 274 275 void bch2_dev_usage_to_text(struct printbuf *out, struct bch_dev_usage *usage) 276 { 277 prt_printf(out, "\tbuckets\rsectors\rfragmented\r\n"); 278 279 for (unsigned i = 0; i < BCH_DATA_NR; i++) { 280 bch2_prt_data_type(out, i); 281 prt_printf(out, "\t%llu\r%llu\r%llu\r\n", 282 usage->d[i].buckets, 283 usage->d[i].sectors, 284 usage->d[i].fragmented); 285 } 286 } 287 288 void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca, 289 const struct bch_alloc_v4 *old, 290 const struct bch_alloc_v4 *new, 291 u64 journal_seq, bool gc) 292 { 293 struct bch_fs_usage *fs_usage; 294 struct bch_dev_usage *u; 295 296 preempt_disable(); 297 fs_usage = fs_usage_ptr(c, journal_seq, gc); 298 299 if (data_type_is_hidden(old->data_type)) 300 fs_usage->b.hidden -= ca->mi.bucket_size; 301 if (data_type_is_hidden(new->data_type)) 302 fs_usage->b.hidden += ca->mi.bucket_size; 303 304 u = dev_usage_ptr(ca, journal_seq, gc); 305 306 u->d[old->data_type].buckets--; 307 u->d[new->data_type].buckets++; 308 309 u->d[old->data_type].sectors -= bch2_bucket_sectors_dirty(*old); 310 u->d[new->data_type].sectors += bch2_bucket_sectors_dirty(*new); 311 312 u->d[BCH_DATA_cached].sectors += new->cached_sectors; 313 u->d[BCH_DATA_cached].sectors -= old->cached_sectors; 314 315 u->d[old->data_type].fragmented -= bch2_bucket_sectors_fragmented(ca, *old); 316 u->d[new->data_type].fragmented += bch2_bucket_sectors_fragmented(ca, *new); 317 318 preempt_enable(); 319 } 320 321 static inline int __update_replicas(struct bch_fs *c, 322 struct bch_fs_usage *fs_usage, 323 struct bch_replicas_entry_v1 *r, 324 s64 sectors) 325 { 326 int idx = bch2_replicas_entry_idx(c, r); 327 328 if (idx < 0) 329 return -1; 330 331 fs_usage_data_type_to_base(&fs_usage->b, r->data_type, sectors); 332 fs_usage->replicas[idx] += sectors; 333 return 0; 334 } 335 336 int bch2_update_replicas(struct bch_fs *c, struct bkey_s_c k, 337 struct bch_replicas_entry_v1 *r, s64 sectors, 338 unsigned journal_seq, bool gc) 339 { 340 struct bch_fs_usage *fs_usage; 341 int idx, ret = 0; 342 struct printbuf buf = PRINTBUF; 343 344 percpu_down_read(&c->mark_lock); 345 346 idx = bch2_replicas_entry_idx(c, r); 347 if (idx < 0 && 348 fsck_err(c, ptr_to_missing_replicas_entry, 349 "no replicas entry\n while marking %s", 350 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { 351 percpu_up_read(&c->mark_lock); 352 ret = bch2_mark_replicas(c, r); 353 percpu_down_read(&c->mark_lock); 354 355 if (ret) 356 goto err; 357 idx = bch2_replicas_entry_idx(c, r); 358 } 359 if (idx < 0) { 360 ret = -1; 361 goto err; 362 } 363 364 preempt_disable(); 365 fs_usage = fs_usage_ptr(c, journal_seq, gc); 366 fs_usage_data_type_to_base(&fs_usage->b, r->data_type, sectors); 367 fs_usage->replicas[idx] += sectors; 368 preempt_enable(); 369 err: 370 fsck_err: 371 percpu_up_read(&c->mark_lock); 372 printbuf_exit(&buf); 373 return ret; 374 } 375 376 static inline int update_cached_sectors(struct bch_fs *c, 377 struct bkey_s_c k, 378 unsigned dev, s64 sectors, 379 unsigned journal_seq, bool gc) 380 { 381 struct bch_replicas_padded r; 382 383 bch2_replicas_entry_cached(&r.e, dev); 384 385 return bch2_update_replicas(c, k, &r.e, sectors, journal_seq, gc); 386 } 387 388 static int __replicas_deltas_realloc(struct btree_trans *trans, unsigned more, 389 gfp_t gfp) 390 { 391 struct replicas_delta_list *d = trans->fs_usage_deltas; 392 unsigned new_size = d ? (d->size + more) * 2 : 128; 393 unsigned alloc_size = sizeof(*d) + new_size; 394 395 WARN_ON_ONCE(alloc_size > REPLICAS_DELTA_LIST_MAX); 396 397 if (!d || d->used + more > d->size) { 398 d = krealloc(d, alloc_size, gfp|__GFP_ZERO); 399 400 if (unlikely(!d)) { 401 if (alloc_size > REPLICAS_DELTA_LIST_MAX) 402 return -ENOMEM; 403 404 d = mempool_alloc(&trans->c->replicas_delta_pool, gfp); 405 if (!d) 406 return -ENOMEM; 407 408 memset(d, 0, REPLICAS_DELTA_LIST_MAX); 409 410 if (trans->fs_usage_deltas) 411 memcpy(d, trans->fs_usage_deltas, 412 trans->fs_usage_deltas->size + sizeof(*d)); 413 414 new_size = REPLICAS_DELTA_LIST_MAX - sizeof(*d); 415 kfree(trans->fs_usage_deltas); 416 } 417 418 d->size = new_size; 419 trans->fs_usage_deltas = d; 420 } 421 422 return 0; 423 } 424 425 int bch2_replicas_deltas_realloc(struct btree_trans *trans, unsigned more) 426 { 427 return allocate_dropping_locks_errcode(trans, 428 __replicas_deltas_realloc(trans, more, _gfp)); 429 } 430 431 int bch2_update_replicas_list(struct btree_trans *trans, 432 struct bch_replicas_entry_v1 *r, 433 s64 sectors) 434 { 435 struct replicas_delta_list *d; 436 struct replicas_delta *n; 437 unsigned b; 438 int ret; 439 440 if (!sectors) 441 return 0; 442 443 b = replicas_entry_bytes(r) + 8; 444 ret = bch2_replicas_deltas_realloc(trans, b); 445 if (ret) 446 return ret; 447 448 d = trans->fs_usage_deltas; 449 n = (void *) d->d + d->used; 450 n->delta = sectors; 451 unsafe_memcpy((void *) n + offsetof(struct replicas_delta, r), 452 r, replicas_entry_bytes(r), 453 "flexible array member embedded in strcuct with padding"); 454 bch2_replicas_entry_sort(&n->r); 455 d->used += b; 456 return 0; 457 } 458 459 int bch2_update_cached_sectors_list(struct btree_trans *trans, unsigned dev, s64 sectors) 460 { 461 struct bch_replicas_padded r; 462 463 bch2_replicas_entry_cached(&r.e, dev); 464 465 return bch2_update_replicas_list(trans, &r.e, sectors); 466 } 467 468 int bch2_check_fix_ptrs(struct btree_trans *trans, 469 enum btree_id btree, unsigned level, struct bkey_s_c k, 470 enum btree_iter_update_trigger_flags flags) 471 { 472 struct bch_fs *c = trans->c; 473 struct bkey_ptrs_c ptrs_c = bch2_bkey_ptrs_c(k); 474 const union bch_extent_entry *entry_c; 475 struct extent_ptr_decoded p = { 0 }; 476 bool do_update = false; 477 struct printbuf buf = PRINTBUF; 478 int ret = 0; 479 480 percpu_down_read(&c->mark_lock); 481 482 bkey_for_each_ptr_decode(k.k, ptrs_c, p, entry_c) { 483 struct bch_dev *ca = bch2_dev_tryget(c, p.ptr.dev); 484 if (!ca) { 485 if (fsck_err(c, ptr_to_invalid_device, 486 "pointer to missing device %u\n" 487 "while marking %s", 488 p.ptr.dev, 489 (printbuf_reset(&buf), 490 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) 491 do_update = true; 492 continue; 493 } 494 495 struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr); 496 enum bch_data_type data_type = bch2_bkey_ptr_data_type(k, p, entry_c); 497 498 if (fsck_err_on(!g->gen_valid, 499 c, ptr_to_missing_alloc_key, 500 "bucket %u:%zu data type %s ptr gen %u missing in alloc btree\n" 501 "while marking %s", 502 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), 503 bch2_data_type_str(ptr_data_type(k.k, &p.ptr)), 504 p.ptr.gen, 505 (printbuf_reset(&buf), 506 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { 507 if (!p.ptr.cached) { 508 g->gen_valid = true; 509 g->gen = p.ptr.gen; 510 } else { 511 do_update = true; 512 } 513 } 514 515 if (fsck_err_on(gen_cmp(p.ptr.gen, g->gen) > 0, 516 c, ptr_gen_newer_than_bucket_gen, 517 "bucket %u:%zu data type %s ptr gen in the future: %u > %u\n" 518 "while marking %s", 519 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), 520 bch2_data_type_str(ptr_data_type(k.k, &p.ptr)), 521 p.ptr.gen, g->gen, 522 (printbuf_reset(&buf), 523 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { 524 if (!p.ptr.cached && 525 (g->data_type != BCH_DATA_btree || 526 data_type == BCH_DATA_btree)) { 527 g->gen_valid = true; 528 g->gen = p.ptr.gen; 529 g->data_type = 0; 530 g->dirty_sectors = 0; 531 g->cached_sectors = 0; 532 } else { 533 do_update = true; 534 } 535 } 536 537 if (fsck_err_on(gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX, 538 c, ptr_gen_newer_than_bucket_gen, 539 "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n" 540 "while marking %s", 541 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen, 542 bch2_data_type_str(ptr_data_type(k.k, &p.ptr)), 543 p.ptr.gen, 544 (printbuf_reset(&buf), 545 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) 546 do_update = true; 547 548 if (fsck_err_on(!p.ptr.cached && gen_cmp(p.ptr.gen, g->gen) < 0, 549 c, stale_dirty_ptr, 550 "bucket %u:%zu data type %s stale dirty ptr: %u < %u\n" 551 "while marking %s", 552 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), 553 bch2_data_type_str(ptr_data_type(k.k, &p.ptr)), 554 p.ptr.gen, g->gen, 555 (printbuf_reset(&buf), 556 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) 557 do_update = true; 558 559 if (data_type != BCH_DATA_btree && p.ptr.gen != g->gen) 560 goto next; 561 562 if (fsck_err_on(bucket_data_type_mismatch(g->data_type, data_type), 563 c, ptr_bucket_data_type_mismatch, 564 "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n" 565 "while marking %s", 566 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen, 567 bch2_data_type_str(g->data_type), 568 bch2_data_type_str(data_type), 569 (printbuf_reset(&buf), 570 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { 571 if (data_type == BCH_DATA_btree) { 572 g->gen_valid = true; 573 g->gen = p.ptr.gen; 574 g->data_type = data_type; 575 g->dirty_sectors = 0; 576 g->cached_sectors = 0; 577 } else { 578 do_update = true; 579 } 580 } 581 582 if (p.has_ec) { 583 struct gc_stripe *m = genradix_ptr(&c->gc_stripes, p.ec.idx); 584 585 if (fsck_err_on(!m || !m->alive, c, 586 ptr_to_missing_stripe, 587 "pointer to nonexistent stripe %llu\n" 588 "while marking %s", 589 (u64) p.ec.idx, 590 (printbuf_reset(&buf), 591 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) 592 do_update = true; 593 594 if (fsck_err_on(m && m->alive && !bch2_ptr_matches_stripe_m(m, p), c, 595 ptr_to_incorrect_stripe, 596 "pointer does not match stripe %llu\n" 597 "while marking %s", 598 (u64) p.ec.idx, 599 (printbuf_reset(&buf), 600 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) 601 do_update = true; 602 } 603 next: 604 bch2_dev_put(ca); 605 } 606 607 if (do_update) { 608 if (flags & BTREE_TRIGGER_is_root) { 609 bch_err(c, "cannot update btree roots yet"); 610 ret = -EINVAL; 611 goto err; 612 } 613 614 struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k); 615 ret = PTR_ERR_OR_ZERO(new); 616 if (ret) 617 goto err; 618 619 rcu_read_lock(); 620 bch2_bkey_drop_ptrs(bkey_i_to_s(new), ptr, !bch2_dev_rcu(c, ptr->dev)); 621 rcu_read_unlock(); 622 623 if (level) { 624 /* 625 * We don't want to drop btree node pointers - if the 626 * btree node isn't there anymore, the read path will 627 * sort it out: 628 */ 629 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(new)); 630 rcu_read_lock(); 631 bkey_for_each_ptr(ptrs, ptr) { 632 struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev); 633 struct bucket *g = PTR_GC_BUCKET(ca, ptr); 634 635 ptr->gen = g->gen; 636 } 637 rcu_read_unlock(); 638 } else { 639 struct bkey_ptrs ptrs; 640 union bch_extent_entry *entry; 641 642 rcu_read_lock(); 643 restart_drop_ptrs: 644 ptrs = bch2_bkey_ptrs(bkey_i_to_s(new)); 645 bkey_for_each_ptr_decode(bkey_i_to_s(new).k, ptrs, p, entry) { 646 struct bch_dev *ca = bch2_dev_rcu(c, p.ptr.dev); 647 struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr); 648 enum bch_data_type data_type = bch2_bkey_ptr_data_type(bkey_i_to_s_c(new), p, entry); 649 650 if ((p.ptr.cached && 651 (!g->gen_valid || gen_cmp(p.ptr.gen, g->gen) > 0)) || 652 (!p.ptr.cached && 653 gen_cmp(p.ptr.gen, g->gen) < 0) || 654 gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX || 655 (g->data_type && 656 g->data_type != data_type)) { 657 bch2_bkey_drop_ptr(bkey_i_to_s(new), &entry->ptr); 658 goto restart_drop_ptrs; 659 } 660 } 661 rcu_read_unlock(); 662 again: 663 ptrs = bch2_bkey_ptrs(bkey_i_to_s(new)); 664 bkey_extent_entry_for_each(ptrs, entry) { 665 if (extent_entry_type(entry) == BCH_EXTENT_ENTRY_stripe_ptr) { 666 struct gc_stripe *m = genradix_ptr(&c->gc_stripes, 667 entry->stripe_ptr.idx); 668 union bch_extent_entry *next_ptr; 669 670 bkey_extent_entry_for_each_from(ptrs, next_ptr, entry) 671 if (extent_entry_type(next_ptr) == BCH_EXTENT_ENTRY_ptr) 672 goto found; 673 next_ptr = NULL; 674 found: 675 if (!next_ptr) { 676 bch_err(c, "aieee, found stripe ptr with no data ptr"); 677 continue; 678 } 679 680 if (!m || !m->alive || 681 !__bch2_ptr_matches_stripe(&m->ptrs[entry->stripe_ptr.block], 682 &next_ptr->ptr, 683 m->sectors)) { 684 bch2_bkey_extent_entry_drop(new, entry); 685 goto again; 686 } 687 } 688 } 689 } 690 691 if (0) { 692 printbuf_reset(&buf); 693 bch2_bkey_val_to_text(&buf, c, k); 694 bch_info(c, "updated %s", buf.buf); 695 696 printbuf_reset(&buf); 697 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(new)); 698 bch_info(c, "new key %s", buf.buf); 699 } 700 701 percpu_up_read(&c->mark_lock); 702 struct btree_iter iter; 703 bch2_trans_node_iter_init(trans, &iter, btree, new->k.p, 0, level, 704 BTREE_ITER_intent|BTREE_ITER_all_snapshots); 705 ret = bch2_btree_iter_traverse(&iter) ?: 706 bch2_trans_update(trans, &iter, new, 707 BTREE_UPDATE_internal_snapshot_node| 708 BTREE_TRIGGER_norun); 709 bch2_trans_iter_exit(trans, &iter); 710 percpu_down_read(&c->mark_lock); 711 712 if (ret) 713 goto err; 714 715 if (level) 716 bch2_btree_node_update_key_early(trans, btree, level - 1, k, new); 717 } 718 err: 719 fsck_err: 720 percpu_up_read(&c->mark_lock); 721 printbuf_exit(&buf); 722 return ret; 723 } 724 725 int bch2_bucket_ref_update(struct btree_trans *trans, struct bch_dev *ca, 726 struct bkey_s_c k, 727 const struct bch_extent_ptr *ptr, 728 s64 sectors, enum bch_data_type ptr_data_type, 729 u8 b_gen, u8 bucket_data_type, 730 u32 *bucket_sectors) 731 { 732 struct bch_fs *c = trans->c; 733 size_t bucket_nr = PTR_BUCKET_NR(ca, ptr); 734 struct printbuf buf = PRINTBUF; 735 bool inserting = sectors > 0; 736 int ret = 0; 737 738 BUG_ON(!sectors); 739 740 if (gen_after(ptr->gen, b_gen)) { 741 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, 742 BCH_FSCK_ERR_ptr_gen_newer_than_bucket_gen, 743 "bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen\n" 744 "while marking %s", 745 ptr->dev, bucket_nr, b_gen, 746 bch2_data_type_str(bucket_data_type ?: ptr_data_type), 747 ptr->gen, 748 (bch2_bkey_val_to_text(&buf, c, k), buf.buf)); 749 if (inserting) 750 goto err; 751 goto out; 752 } 753 754 if (gen_cmp(b_gen, ptr->gen) > BUCKET_GC_GEN_MAX) { 755 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, 756 BCH_FSCK_ERR_ptr_too_stale, 757 "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n" 758 "while marking %s", 759 ptr->dev, bucket_nr, b_gen, 760 bch2_data_type_str(bucket_data_type ?: ptr_data_type), 761 ptr->gen, 762 (printbuf_reset(&buf), 763 bch2_bkey_val_to_text(&buf, c, k), buf.buf)); 764 if (inserting) 765 goto err; 766 goto out; 767 } 768 769 if (b_gen != ptr->gen && ptr->cached) { 770 ret = 1; 771 goto out; 772 } 773 774 if (b_gen != ptr->gen) { 775 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, 776 BCH_FSCK_ERR_stale_dirty_ptr, 777 "bucket %u:%zu gen %u (mem gen %u) data type %s: stale dirty ptr (gen %u)\n" 778 "while marking %s", 779 ptr->dev, bucket_nr, b_gen, 780 *bucket_gen(ca, bucket_nr), 781 bch2_data_type_str(bucket_data_type ?: ptr_data_type), 782 ptr->gen, 783 (printbuf_reset(&buf), 784 bch2_bkey_val_to_text(&buf, c, k), buf.buf)); 785 if (inserting) 786 goto err; 787 goto out; 788 } 789 790 if (bucket_data_type_mismatch(bucket_data_type, ptr_data_type)) { 791 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, 792 BCH_FSCK_ERR_ptr_bucket_data_type_mismatch, 793 "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n" 794 "while marking %s", 795 ptr->dev, bucket_nr, b_gen, 796 bch2_data_type_str(bucket_data_type), 797 bch2_data_type_str(ptr_data_type), 798 (printbuf_reset(&buf), 799 bch2_bkey_val_to_text(&buf, c, k), buf.buf)); 800 if (inserting) 801 goto err; 802 goto out; 803 } 804 805 if ((u64) *bucket_sectors + sectors > U32_MAX) { 806 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, 807 BCH_FSCK_ERR_bucket_sector_count_overflow, 808 "bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U32_MAX\n" 809 "while marking %s", 810 ptr->dev, bucket_nr, b_gen, 811 bch2_data_type_str(bucket_data_type ?: ptr_data_type), 812 *bucket_sectors, sectors, 813 (printbuf_reset(&buf), 814 bch2_bkey_val_to_text(&buf, c, k), buf.buf)); 815 if (inserting) 816 goto err; 817 sectors = -*bucket_sectors; 818 } 819 820 *bucket_sectors += sectors; 821 out: 822 printbuf_exit(&buf); 823 return ret; 824 err: 825 bch2_dump_trans_updates(trans); 826 ret = -EIO; 827 goto out; 828 } 829 830 void bch2_trans_fs_usage_revert(struct btree_trans *trans, 831 struct replicas_delta_list *deltas) 832 { 833 struct bch_fs *c = trans->c; 834 struct bch_fs_usage *dst; 835 struct replicas_delta *d, *top = (void *) deltas->d + deltas->used; 836 s64 added = 0; 837 unsigned i; 838 839 percpu_down_read(&c->mark_lock); 840 preempt_disable(); 841 dst = fs_usage_ptr(c, trans->journal_res.seq, false); 842 843 /* revert changes: */ 844 for (d = deltas->d; d != top; d = replicas_delta_next(d)) { 845 switch (d->r.data_type) { 846 case BCH_DATA_btree: 847 case BCH_DATA_user: 848 case BCH_DATA_parity: 849 added += d->delta; 850 } 851 BUG_ON(__update_replicas(c, dst, &d->r, -d->delta)); 852 } 853 854 dst->b.nr_inodes -= deltas->nr_inodes; 855 856 for (i = 0; i < BCH_REPLICAS_MAX; i++) { 857 added -= deltas->persistent_reserved[i]; 858 dst->b.reserved -= deltas->persistent_reserved[i]; 859 dst->persistent_reserved[i] -= deltas->persistent_reserved[i]; 860 } 861 862 if (added > 0) { 863 trans->disk_res->sectors += added; 864 this_cpu_add(*c->online_reserved, added); 865 } 866 867 preempt_enable(); 868 percpu_up_read(&c->mark_lock); 869 } 870 871 void bch2_trans_account_disk_usage_change(struct btree_trans *trans) 872 { 873 struct bch_fs *c = trans->c; 874 u64 disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0; 875 static int warned_disk_usage = 0; 876 bool warn = false; 877 878 percpu_down_read(&c->mark_lock); 879 preempt_disable(); 880 struct bch_fs_usage_base *dst = &fs_usage_ptr(c, trans->journal_res.seq, false)->b; 881 struct bch_fs_usage_base *src = &trans->fs_usage_delta; 882 883 s64 added = src->btree + src->data + src->reserved; 884 885 /* 886 * Not allowed to reduce sectors_available except by getting a 887 * reservation: 888 */ 889 s64 should_not_have_added = added - (s64) disk_res_sectors; 890 if (unlikely(should_not_have_added > 0)) { 891 u64 old, new, v = atomic64_read(&c->sectors_available); 892 893 do { 894 old = v; 895 new = max_t(s64, 0, old - should_not_have_added); 896 } while ((v = atomic64_cmpxchg(&c->sectors_available, 897 old, new)) != old); 898 899 added -= should_not_have_added; 900 warn = true; 901 } 902 903 if (added > 0) { 904 trans->disk_res->sectors -= added; 905 this_cpu_sub(*c->online_reserved, added); 906 } 907 908 dst->hidden += src->hidden; 909 dst->btree += src->btree; 910 dst->data += src->data; 911 dst->cached += src->cached; 912 dst->reserved += src->reserved; 913 dst->nr_inodes += src->nr_inodes; 914 915 preempt_enable(); 916 percpu_up_read(&c->mark_lock); 917 918 if (unlikely(warn) && !xchg(&warned_disk_usage, 1)) 919 bch2_trans_inconsistent(trans, 920 "disk usage increased %lli more than %llu sectors reserved)", 921 should_not_have_added, disk_res_sectors); 922 } 923 924 int bch2_trans_fs_usage_apply(struct btree_trans *trans, 925 struct replicas_delta_list *deltas) 926 { 927 struct bch_fs *c = trans->c; 928 struct replicas_delta *d, *d2; 929 struct replicas_delta *top = (void *) deltas->d + deltas->used; 930 struct bch_fs_usage *dst; 931 unsigned i; 932 933 percpu_down_read(&c->mark_lock); 934 preempt_disable(); 935 dst = fs_usage_ptr(c, trans->journal_res.seq, false); 936 937 for (d = deltas->d; d != top; d = replicas_delta_next(d)) 938 if (__update_replicas(c, dst, &d->r, d->delta)) 939 goto need_mark; 940 941 dst->b.nr_inodes += deltas->nr_inodes; 942 943 for (i = 0; i < BCH_REPLICAS_MAX; i++) { 944 dst->b.reserved += deltas->persistent_reserved[i]; 945 dst->persistent_reserved[i] += deltas->persistent_reserved[i]; 946 } 947 948 preempt_enable(); 949 percpu_up_read(&c->mark_lock); 950 return 0; 951 need_mark: 952 /* revert changes: */ 953 for (d2 = deltas->d; d2 != d; d2 = replicas_delta_next(d2)) 954 BUG_ON(__update_replicas(c, dst, &d2->r, -d2->delta)); 955 956 preempt_enable(); 957 percpu_up_read(&c->mark_lock); 958 return -1; 959 } 960 961 /* KEY_TYPE_extent: */ 962 963 static int __mark_pointer(struct btree_trans *trans, struct bch_dev *ca, 964 struct bkey_s_c k, 965 const struct bch_extent_ptr *ptr, 966 s64 sectors, enum bch_data_type ptr_data_type, 967 struct bch_alloc_v4 *a) 968 { 969 u32 *dst_sectors = !ptr->cached 970 ? &a->dirty_sectors 971 : &a->cached_sectors; 972 int ret = bch2_bucket_ref_update(trans, ca, k, ptr, sectors, ptr_data_type, 973 a->gen, a->data_type, dst_sectors); 974 975 if (ret) 976 return ret; 977 978 alloc_data_type_set(a, ptr_data_type); 979 return 0; 980 } 981 982 static int bch2_trigger_pointer(struct btree_trans *trans, 983 enum btree_id btree_id, unsigned level, 984 struct bkey_s_c k, struct extent_ptr_decoded p, 985 const union bch_extent_entry *entry, 986 s64 *sectors, 987 enum btree_iter_update_trigger_flags flags) 988 { 989 bool insert = !(flags & BTREE_TRIGGER_overwrite); 990 int ret = 0; 991 992 struct bch_fs *c = trans->c; 993 struct bch_dev *ca = bch2_dev_tryget(c, p.ptr.dev); 994 if (unlikely(!ca)) { 995 if (insert) 996 ret = -EIO; 997 goto err; 998 } 999 1000 struct bpos bucket; 1001 struct bch_backpointer bp; 1002 bch2_extent_ptr_to_bp(trans->c, ca, btree_id, level, k, p, entry, &bucket, &bp); 1003 *sectors = insert ? bp.bucket_len : -((s64) bp.bucket_len); 1004 1005 if (flags & BTREE_TRIGGER_transactional) { 1006 struct bkey_i_alloc_v4 *a = bch2_trans_start_alloc_update(trans, bucket); 1007 ret = PTR_ERR_OR_ZERO(a) ?: 1008 __mark_pointer(trans, ca, k, &p.ptr, *sectors, bp.data_type, &a->v); 1009 if (ret) 1010 goto err; 1011 1012 if (!p.ptr.cached) { 1013 ret = bch2_bucket_backpointer_mod(trans, ca, bucket, bp, k, insert); 1014 if (ret) 1015 goto err; 1016 } 1017 } 1018 1019 if (flags & BTREE_TRIGGER_gc) { 1020 percpu_down_read(&c->mark_lock); 1021 struct bucket *g = gc_bucket(ca, bucket.offset); 1022 bucket_lock(g); 1023 struct bch_alloc_v4 old = bucket_m_to_alloc(*g), new = old; 1024 ret = __mark_pointer(trans, ca, k, &p.ptr, *sectors, bp.data_type, &new); 1025 if (!ret) { 1026 alloc_to_bucket(g, new); 1027 bch2_dev_usage_update(c, ca, &old, &new, 0, true); 1028 } 1029 bucket_unlock(g); 1030 percpu_up_read(&c->mark_lock); 1031 } 1032 err: 1033 bch2_dev_put(ca); 1034 return ret; 1035 } 1036 1037 static int bch2_trigger_stripe_ptr(struct btree_trans *trans, 1038 struct bkey_s_c k, 1039 struct extent_ptr_decoded p, 1040 enum bch_data_type data_type, 1041 s64 sectors, 1042 enum btree_iter_update_trigger_flags flags) 1043 { 1044 if (flags & BTREE_TRIGGER_transactional) { 1045 struct btree_iter iter; 1046 struct bkey_i_stripe *s = bch2_bkey_get_mut_typed(trans, &iter, 1047 BTREE_ID_stripes, POS(0, p.ec.idx), 1048 BTREE_ITER_with_updates, stripe); 1049 int ret = PTR_ERR_OR_ZERO(s); 1050 if (unlikely(ret)) { 1051 bch2_trans_inconsistent_on(bch2_err_matches(ret, ENOENT), trans, 1052 "pointer to nonexistent stripe %llu", 1053 (u64) p.ec.idx); 1054 goto err; 1055 } 1056 1057 if (!bch2_ptr_matches_stripe(&s->v, p)) { 1058 bch2_trans_inconsistent(trans, 1059 "stripe pointer doesn't match stripe %llu", 1060 (u64) p.ec.idx); 1061 ret = -EIO; 1062 goto err; 1063 } 1064 1065 stripe_blockcount_set(&s->v, p.ec.block, 1066 stripe_blockcount_get(&s->v, p.ec.block) + 1067 sectors); 1068 1069 struct bch_replicas_padded r; 1070 bch2_bkey_to_replicas(&r.e, bkey_i_to_s_c(&s->k_i)); 1071 r.e.data_type = data_type; 1072 ret = bch2_update_replicas_list(trans, &r.e, sectors); 1073 err: 1074 bch2_trans_iter_exit(trans, &iter); 1075 return ret; 1076 } 1077 1078 if (flags & BTREE_TRIGGER_gc) { 1079 struct bch_fs *c = trans->c; 1080 1081 BUG_ON(!(flags & BTREE_TRIGGER_gc)); 1082 1083 struct gc_stripe *m = genradix_ptr_alloc(&c->gc_stripes, p.ec.idx, GFP_KERNEL); 1084 if (!m) { 1085 bch_err(c, "error allocating memory for gc_stripes, idx %llu", 1086 (u64) p.ec.idx); 1087 return -BCH_ERR_ENOMEM_mark_stripe_ptr; 1088 } 1089 1090 mutex_lock(&c->ec_stripes_heap_lock); 1091 1092 if (!m || !m->alive) { 1093 mutex_unlock(&c->ec_stripes_heap_lock); 1094 struct printbuf buf = PRINTBUF; 1095 bch2_bkey_val_to_text(&buf, c, k); 1096 bch_err_ratelimited(c, "pointer to nonexistent stripe %llu\n while marking %s", 1097 (u64) p.ec.idx, buf.buf); 1098 printbuf_exit(&buf); 1099 bch2_inconsistent_error(c); 1100 return -EIO; 1101 } 1102 1103 m->block_sectors[p.ec.block] += sectors; 1104 1105 struct bch_replicas_padded r = m->r; 1106 mutex_unlock(&c->ec_stripes_heap_lock); 1107 1108 r.e.data_type = data_type; 1109 bch2_update_replicas(c, k, &r.e, sectors, trans->journal_res.seq, true); 1110 } 1111 1112 return 0; 1113 } 1114 1115 static int __trigger_extent(struct btree_trans *trans, 1116 enum btree_id btree_id, unsigned level, 1117 struct bkey_s_c k, 1118 enum btree_iter_update_trigger_flags flags) 1119 { 1120 bool gc = flags & BTREE_TRIGGER_gc; 1121 struct bch_fs *c = trans->c; 1122 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 1123 const union bch_extent_entry *entry; 1124 struct extent_ptr_decoded p; 1125 struct bch_replicas_padded r; 1126 enum bch_data_type data_type = bkey_is_btree_ptr(k.k) 1127 ? BCH_DATA_btree 1128 : BCH_DATA_user; 1129 s64 replicas_sectors = 0; 1130 int ret = 0; 1131 1132 r.e.data_type = data_type; 1133 r.e.nr_devs = 0; 1134 r.e.nr_required = 1; 1135 1136 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { 1137 s64 disk_sectors; 1138 ret = bch2_trigger_pointer(trans, btree_id, level, k, p, entry, &disk_sectors, flags); 1139 if (ret < 0) 1140 return ret; 1141 1142 bool stale = ret > 0; 1143 1144 if (p.ptr.cached) { 1145 if (!stale) { 1146 ret = !gc 1147 ? bch2_update_cached_sectors_list(trans, p.ptr.dev, disk_sectors) 1148 : update_cached_sectors(c, k, p.ptr.dev, disk_sectors, 0, true); 1149 bch2_fs_fatal_err_on(ret && gc, c, "%s: no replicas entry while updating cached sectors", 1150 bch2_err_str(ret)); 1151 if (ret) 1152 return ret; 1153 } 1154 } else if (!p.has_ec) { 1155 replicas_sectors += disk_sectors; 1156 r.e.devs[r.e.nr_devs++] = p.ptr.dev; 1157 } else { 1158 ret = bch2_trigger_stripe_ptr(trans, k, p, data_type, disk_sectors, flags); 1159 if (ret) 1160 return ret; 1161 1162 /* 1163 * There may be other dirty pointers in this extent, but 1164 * if so they're not required for mounting if we have an 1165 * erasure coded pointer in this extent: 1166 */ 1167 r.e.nr_required = 0; 1168 } 1169 } 1170 1171 if (r.e.nr_devs) { 1172 ret = !gc 1173 ? bch2_update_replicas_list(trans, &r.e, replicas_sectors) 1174 : bch2_update_replicas(c, k, &r.e, replicas_sectors, 0, true); 1175 if (unlikely(ret && gc)) { 1176 struct printbuf buf = PRINTBUF; 1177 1178 bch2_bkey_val_to_text(&buf, c, k); 1179 bch2_fs_fatal_error(c, ": no replicas entry for %s", buf.buf); 1180 printbuf_exit(&buf); 1181 } 1182 if (ret) 1183 return ret; 1184 } 1185 1186 return 0; 1187 } 1188 1189 int bch2_trigger_extent(struct btree_trans *trans, 1190 enum btree_id btree, unsigned level, 1191 struct bkey_s_c old, struct bkey_s new, 1192 enum btree_iter_update_trigger_flags flags) 1193 { 1194 struct bkey_ptrs_c new_ptrs = bch2_bkey_ptrs_c(new.s_c); 1195 struct bkey_ptrs_c old_ptrs = bch2_bkey_ptrs_c(old); 1196 unsigned new_ptrs_bytes = (void *) new_ptrs.end - (void *) new_ptrs.start; 1197 unsigned old_ptrs_bytes = (void *) old_ptrs.end - (void *) old_ptrs.start; 1198 1199 if (unlikely(flags & BTREE_TRIGGER_check_repair)) 1200 return bch2_check_fix_ptrs(trans, btree, level, new.s_c, flags); 1201 1202 /* if pointers aren't changing - nothing to do: */ 1203 if (new_ptrs_bytes == old_ptrs_bytes && 1204 !memcmp(new_ptrs.start, 1205 old_ptrs.start, 1206 new_ptrs_bytes)) 1207 return 0; 1208 1209 if (flags & BTREE_TRIGGER_transactional) { 1210 struct bch_fs *c = trans->c; 1211 int mod = (int) bch2_bkey_needs_rebalance(c, new.s_c) - 1212 (int) bch2_bkey_needs_rebalance(c, old); 1213 1214 if (mod) { 1215 int ret = bch2_btree_bit_mod_buffered(trans, BTREE_ID_rebalance_work, 1216 new.k->p, mod > 0); 1217 if (ret) 1218 return ret; 1219 } 1220 } 1221 1222 if (flags & (BTREE_TRIGGER_transactional|BTREE_TRIGGER_gc)) 1223 return trigger_run_overwrite_then_insert(__trigger_extent, trans, btree, level, old, new, flags); 1224 1225 return 0; 1226 } 1227 1228 /* KEY_TYPE_reservation */ 1229 1230 static int __trigger_reservation(struct btree_trans *trans, 1231 enum btree_id btree_id, unsigned level, struct bkey_s_c k, 1232 enum btree_iter_update_trigger_flags flags) 1233 { 1234 struct bch_fs *c = trans->c; 1235 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas; 1236 s64 sectors = (s64) k.k->size * replicas; 1237 1238 if (flags & BTREE_TRIGGER_overwrite) 1239 sectors = -sectors; 1240 1241 if (flags & BTREE_TRIGGER_transactional) { 1242 int ret = bch2_replicas_deltas_realloc(trans, 0); 1243 if (ret) 1244 return ret; 1245 1246 struct replicas_delta_list *d = trans->fs_usage_deltas; 1247 replicas = min(replicas, ARRAY_SIZE(d->persistent_reserved)); 1248 1249 d->persistent_reserved[replicas - 1] += sectors; 1250 } 1251 1252 if (flags & BTREE_TRIGGER_gc) { 1253 percpu_down_read(&c->mark_lock); 1254 preempt_disable(); 1255 1256 struct bch_fs_usage *fs_usage = this_cpu_ptr(c->usage_gc); 1257 1258 replicas = min(replicas, ARRAY_SIZE(fs_usage->persistent_reserved)); 1259 fs_usage->b.reserved += sectors; 1260 fs_usage->persistent_reserved[replicas - 1] += sectors; 1261 1262 preempt_enable(); 1263 percpu_up_read(&c->mark_lock); 1264 } 1265 1266 return 0; 1267 } 1268 1269 int bch2_trigger_reservation(struct btree_trans *trans, 1270 enum btree_id btree_id, unsigned level, 1271 struct bkey_s_c old, struct bkey_s new, 1272 enum btree_iter_update_trigger_flags flags) 1273 { 1274 return trigger_run_overwrite_then_insert(__trigger_reservation, trans, btree_id, level, old, new, flags); 1275 } 1276 1277 /* Mark superblocks: */ 1278 1279 static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans, 1280 struct bch_dev *ca, u64 b, 1281 enum bch_data_type type, 1282 unsigned sectors) 1283 { 1284 struct bch_fs *c = trans->c; 1285 struct btree_iter iter; 1286 int ret = 0; 1287 1288 struct bkey_i_alloc_v4 *a = 1289 bch2_trans_start_alloc_update_noupdate(trans, &iter, POS(ca->dev_idx, b)); 1290 if (IS_ERR(a)) 1291 return PTR_ERR(a); 1292 1293 if (a->v.data_type && type && a->v.data_type != type) { 1294 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, 1295 BCH_FSCK_ERR_bucket_metadata_type_mismatch, 1296 "bucket %llu:%llu gen %u different types of data in same bucket: %s, %s\n" 1297 "while marking %s", 1298 iter.pos.inode, iter.pos.offset, a->v.gen, 1299 bch2_data_type_str(a->v.data_type), 1300 bch2_data_type_str(type), 1301 bch2_data_type_str(type)); 1302 ret = -EIO; 1303 goto err; 1304 } 1305 1306 if (a->v.data_type != type || 1307 a->v.dirty_sectors != sectors) { 1308 a->v.data_type = type; 1309 a->v.dirty_sectors = sectors; 1310 ret = bch2_trans_update(trans, &iter, &a->k_i, 0); 1311 } 1312 err: 1313 bch2_trans_iter_exit(trans, &iter); 1314 return ret; 1315 } 1316 1317 static int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca, 1318 u64 b, enum bch_data_type data_type, unsigned sectors, 1319 enum btree_iter_update_trigger_flags flags) 1320 { 1321 int ret = 0; 1322 1323 percpu_down_read(&c->mark_lock); 1324 struct bucket *g = gc_bucket(ca, b); 1325 1326 bucket_lock(g); 1327 struct bch_alloc_v4 old = bucket_m_to_alloc(*g); 1328 1329 if (bch2_fs_inconsistent_on(g->data_type && 1330 g->data_type != data_type, c, 1331 "different types of data in same bucket: %s, %s", 1332 bch2_data_type_str(g->data_type), 1333 bch2_data_type_str(data_type))) { 1334 ret = -EIO; 1335 goto err; 1336 } 1337 1338 if (bch2_fs_inconsistent_on((u64) g->dirty_sectors + sectors > ca->mi.bucket_size, c, 1339 "bucket %u:%llu gen %u data type %s sector count overflow: %u + %u > bucket size", 1340 ca->dev_idx, b, g->gen, 1341 bch2_data_type_str(g->data_type ?: data_type), 1342 g->dirty_sectors, sectors)) { 1343 ret = -EIO; 1344 goto err; 1345 } 1346 1347 g->data_type = data_type; 1348 g->dirty_sectors += sectors; 1349 struct bch_alloc_v4 new = bucket_m_to_alloc(*g); 1350 err: 1351 bucket_unlock(g); 1352 if (!ret) 1353 bch2_dev_usage_update(c, ca, &old, &new, 0, true); 1354 percpu_up_read(&c->mark_lock); 1355 return ret; 1356 } 1357 1358 int bch2_trans_mark_metadata_bucket(struct btree_trans *trans, 1359 struct bch_dev *ca, u64 b, 1360 enum bch_data_type type, unsigned sectors, 1361 enum btree_iter_update_trigger_flags flags) 1362 { 1363 BUG_ON(type != BCH_DATA_free && 1364 type != BCH_DATA_sb && 1365 type != BCH_DATA_journal); 1366 1367 /* 1368 * Backup superblock might be past the end of our normal usable space: 1369 */ 1370 if (b >= ca->mi.nbuckets) 1371 return 0; 1372 1373 if (flags & BTREE_TRIGGER_gc) 1374 return bch2_mark_metadata_bucket(trans->c, ca, b, type, sectors, flags); 1375 else if (flags & BTREE_TRIGGER_transactional) 1376 return commit_do(trans, NULL, NULL, 0, 1377 __bch2_trans_mark_metadata_bucket(trans, ca, b, type, sectors)); 1378 else 1379 BUG(); 1380 } 1381 1382 static int bch2_trans_mark_metadata_sectors(struct btree_trans *trans, 1383 struct bch_dev *ca, u64 start, u64 end, 1384 enum bch_data_type type, u64 *bucket, unsigned *bucket_sectors, 1385 enum btree_iter_update_trigger_flags flags) 1386 { 1387 do { 1388 u64 b = sector_to_bucket(ca, start); 1389 unsigned sectors = 1390 min_t(u64, bucket_to_sector(ca, b + 1), end) - start; 1391 1392 if (b != *bucket && *bucket_sectors) { 1393 int ret = bch2_trans_mark_metadata_bucket(trans, ca, *bucket, 1394 type, *bucket_sectors, flags); 1395 if (ret) 1396 return ret; 1397 1398 *bucket_sectors = 0; 1399 } 1400 1401 *bucket = b; 1402 *bucket_sectors += sectors; 1403 start += sectors; 1404 } while (start < end); 1405 1406 return 0; 1407 } 1408 1409 static int __bch2_trans_mark_dev_sb(struct btree_trans *trans, struct bch_dev *ca, 1410 enum btree_iter_update_trigger_flags flags) 1411 { 1412 struct bch_sb_layout *layout = &ca->disk_sb.sb->layout; 1413 u64 bucket = 0; 1414 unsigned i, bucket_sectors = 0; 1415 int ret; 1416 1417 for (i = 0; i < layout->nr_superblocks; i++) { 1418 u64 offset = le64_to_cpu(layout->sb_offset[i]); 1419 1420 if (offset == BCH_SB_SECTOR) { 1421 ret = bch2_trans_mark_metadata_sectors(trans, ca, 1422 0, BCH_SB_SECTOR, 1423 BCH_DATA_sb, &bucket, &bucket_sectors, flags); 1424 if (ret) 1425 return ret; 1426 } 1427 1428 ret = bch2_trans_mark_metadata_sectors(trans, ca, offset, 1429 offset + (1 << layout->sb_max_size_bits), 1430 BCH_DATA_sb, &bucket, &bucket_sectors, flags); 1431 if (ret) 1432 return ret; 1433 } 1434 1435 if (bucket_sectors) { 1436 ret = bch2_trans_mark_metadata_bucket(trans, ca, 1437 bucket, BCH_DATA_sb, bucket_sectors, flags); 1438 if (ret) 1439 return ret; 1440 } 1441 1442 for (i = 0; i < ca->journal.nr; i++) { 1443 ret = bch2_trans_mark_metadata_bucket(trans, ca, 1444 ca->journal.buckets[i], 1445 BCH_DATA_journal, ca->mi.bucket_size, flags); 1446 if (ret) 1447 return ret; 1448 } 1449 1450 return 0; 1451 } 1452 1453 int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca, 1454 enum btree_iter_update_trigger_flags flags) 1455 { 1456 int ret = bch2_trans_run(c, 1457 __bch2_trans_mark_dev_sb(trans, ca, flags)); 1458 bch_err_fn(c, ret); 1459 return ret; 1460 } 1461 1462 int bch2_trans_mark_dev_sbs_flags(struct bch_fs *c, 1463 enum btree_iter_update_trigger_flags flags) 1464 { 1465 for_each_online_member(c, ca) { 1466 int ret = bch2_trans_mark_dev_sb(c, ca, flags); 1467 if (ret) { 1468 percpu_ref_put(&ca->io_ref); 1469 return ret; 1470 } 1471 } 1472 1473 return 0; 1474 } 1475 1476 int bch2_trans_mark_dev_sbs(struct bch_fs *c) 1477 { 1478 return bch2_trans_mark_dev_sbs_flags(c, BTREE_TRIGGER_transactional); 1479 } 1480 1481 /* Disk reservations: */ 1482 1483 #define SECTORS_CACHE 1024 1484 1485 int __bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res, 1486 u64 sectors, int flags) 1487 { 1488 struct bch_fs_pcpu *pcpu; 1489 u64 old, v, get; 1490 s64 sectors_available; 1491 int ret; 1492 1493 percpu_down_read(&c->mark_lock); 1494 preempt_disable(); 1495 pcpu = this_cpu_ptr(c->pcpu); 1496 1497 if (sectors <= pcpu->sectors_available) 1498 goto out; 1499 1500 v = atomic64_read(&c->sectors_available); 1501 do { 1502 old = v; 1503 get = min((u64) sectors + SECTORS_CACHE, old); 1504 1505 if (get < sectors) { 1506 preempt_enable(); 1507 goto recalculate; 1508 } 1509 } while ((v = atomic64_cmpxchg(&c->sectors_available, 1510 old, old - get)) != old); 1511 1512 pcpu->sectors_available += get; 1513 1514 out: 1515 pcpu->sectors_available -= sectors; 1516 this_cpu_add(*c->online_reserved, sectors); 1517 res->sectors += sectors; 1518 1519 preempt_enable(); 1520 percpu_up_read(&c->mark_lock); 1521 return 0; 1522 1523 recalculate: 1524 mutex_lock(&c->sectors_available_lock); 1525 1526 percpu_u64_set(&c->pcpu->sectors_available, 0); 1527 sectors_available = avail_factor(__bch2_fs_usage_read_short(c).free); 1528 1529 if (sectors <= sectors_available || 1530 (flags & BCH_DISK_RESERVATION_NOFAIL)) { 1531 atomic64_set(&c->sectors_available, 1532 max_t(s64, 0, sectors_available - sectors)); 1533 this_cpu_add(*c->online_reserved, sectors); 1534 res->sectors += sectors; 1535 ret = 0; 1536 } else { 1537 atomic64_set(&c->sectors_available, sectors_available); 1538 ret = -BCH_ERR_ENOSPC_disk_reservation; 1539 } 1540 1541 mutex_unlock(&c->sectors_available_lock); 1542 percpu_up_read(&c->mark_lock); 1543 1544 return ret; 1545 } 1546 1547 /* Startup/shutdown: */ 1548 1549 void bch2_buckets_nouse_free(struct bch_fs *c) 1550 { 1551 for_each_member_device(c, ca) { 1552 kvfree_rcu_mightsleep(ca->buckets_nouse); 1553 ca->buckets_nouse = NULL; 1554 } 1555 } 1556 1557 int bch2_buckets_nouse_alloc(struct bch_fs *c) 1558 { 1559 for_each_member_device(c, ca) { 1560 BUG_ON(ca->buckets_nouse); 1561 1562 ca->buckets_nouse = kvmalloc(BITS_TO_LONGS(ca->mi.nbuckets) * 1563 sizeof(unsigned long), 1564 GFP_KERNEL|__GFP_ZERO); 1565 if (!ca->buckets_nouse) { 1566 bch2_dev_put(ca); 1567 return -BCH_ERR_ENOMEM_buckets_nouse; 1568 } 1569 } 1570 1571 return 0; 1572 } 1573 1574 static void bucket_gens_free_rcu(struct rcu_head *rcu) 1575 { 1576 struct bucket_gens *buckets = 1577 container_of(rcu, struct bucket_gens, rcu); 1578 1579 kvfree(buckets); 1580 } 1581 1582 int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets) 1583 { 1584 struct bucket_gens *bucket_gens = NULL, *old_bucket_gens = NULL; 1585 bool resize = ca->bucket_gens != NULL; 1586 int ret; 1587 1588 BUG_ON(resize && ca->buckets_nouse); 1589 1590 if (!(bucket_gens = kvmalloc(sizeof(struct bucket_gens) + nbuckets, 1591 GFP_KERNEL|__GFP_ZERO))) { 1592 ret = -BCH_ERR_ENOMEM_bucket_gens; 1593 goto err; 1594 } 1595 1596 bucket_gens->first_bucket = ca->mi.first_bucket; 1597 bucket_gens->nbuckets = nbuckets; 1598 1599 if (resize) { 1600 down_write(&c->gc_lock); 1601 down_write(&ca->bucket_lock); 1602 percpu_down_write(&c->mark_lock); 1603 } 1604 1605 old_bucket_gens = rcu_dereference_protected(ca->bucket_gens, 1); 1606 1607 if (resize) { 1608 size_t n = min(bucket_gens->nbuckets, old_bucket_gens->nbuckets); 1609 1610 memcpy(bucket_gens->b, 1611 old_bucket_gens->b, 1612 n); 1613 } 1614 1615 rcu_assign_pointer(ca->bucket_gens, bucket_gens); 1616 bucket_gens = old_bucket_gens; 1617 1618 nbuckets = ca->mi.nbuckets; 1619 1620 if (resize) { 1621 percpu_up_write(&c->mark_lock); 1622 up_write(&ca->bucket_lock); 1623 up_write(&c->gc_lock); 1624 } 1625 1626 ret = 0; 1627 err: 1628 if (bucket_gens) 1629 call_rcu(&bucket_gens->rcu, bucket_gens_free_rcu); 1630 1631 return ret; 1632 } 1633 1634 void bch2_dev_buckets_free(struct bch_dev *ca) 1635 { 1636 kvfree(ca->buckets_nouse); 1637 kvfree(rcu_dereference_protected(ca->bucket_gens, 1)); 1638 1639 for (unsigned i = 0; i < ARRAY_SIZE(ca->usage); i++) 1640 free_percpu(ca->usage[i]); 1641 kfree(ca->usage_base); 1642 } 1643 1644 int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca) 1645 { 1646 ca->usage_base = kzalloc(sizeof(struct bch_dev_usage), GFP_KERNEL); 1647 if (!ca->usage_base) 1648 return -BCH_ERR_ENOMEM_usage_init; 1649 1650 for (unsigned i = 0; i < ARRAY_SIZE(ca->usage); i++) { 1651 ca->usage[i] = alloc_percpu(struct bch_dev_usage); 1652 if (!ca->usage[i]) 1653 return -BCH_ERR_ENOMEM_usage_init; 1654 } 1655 1656 return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets); 1657 } 1658