1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Code for manipulating bucket marks for garbage collection. 4 * 5 * Copyright 2014 Datera, Inc. 6 */ 7 8 #include "bcachefs.h" 9 #include "alloc_background.h" 10 #include "backpointers.h" 11 #include "bset.h" 12 #include "btree_gc.h" 13 #include "btree_update.h" 14 #include "buckets.h" 15 #include "buckets_waiting_for_journal.h" 16 #include "ec.h" 17 #include "error.h" 18 #include "inode.h" 19 #include "movinggc.h" 20 #include "recovery.h" 21 #include "reflink.h" 22 #include "replicas.h" 23 #include "subvolume.h" 24 #include "trace.h" 25 26 #include <linux/preempt.h> 27 28 static inline void fs_usage_data_type_to_base(struct bch_fs_usage_base *fs_usage, 29 enum bch_data_type data_type, 30 s64 sectors) 31 { 32 switch (data_type) { 33 case BCH_DATA_btree: 34 fs_usage->btree += sectors; 35 break; 36 case BCH_DATA_user: 37 case BCH_DATA_parity: 38 fs_usage->data += sectors; 39 break; 40 case BCH_DATA_cached: 41 fs_usage->cached += sectors; 42 break; 43 default: 44 break; 45 } 46 } 47 48 void bch2_fs_usage_initialize(struct bch_fs *c) 49 { 50 percpu_down_write(&c->mark_lock); 51 struct bch_fs_usage *usage = c->usage_base; 52 53 for (unsigned i = 0; i < ARRAY_SIZE(c->usage); i++) 54 bch2_fs_usage_acc_to_base(c, i); 55 56 for (unsigned i = 0; i < BCH_REPLICAS_MAX; i++) 57 usage->b.reserved += usage->persistent_reserved[i]; 58 59 for (unsigned i = 0; i < c->replicas.nr; i++) { 60 struct bch_replicas_entry_v1 *e = 61 cpu_replicas_entry(&c->replicas, i); 62 63 fs_usage_data_type_to_base(&usage->b, e->data_type, usage->replicas[i]); 64 } 65 66 for_each_member_device(c, ca) { 67 struct bch_dev_usage dev = bch2_dev_usage_read(ca); 68 69 usage->b.hidden += (dev.d[BCH_DATA_sb].buckets + 70 dev.d[BCH_DATA_journal].buckets) * 71 ca->mi.bucket_size; 72 } 73 74 percpu_up_write(&c->mark_lock); 75 } 76 77 static inline struct bch_dev_usage *dev_usage_ptr(struct bch_dev *ca, 78 unsigned journal_seq, 79 bool gc) 80 { 81 BUG_ON(!gc && !journal_seq); 82 83 return this_cpu_ptr(gc 84 ? ca->usage_gc 85 : ca->usage[journal_seq & JOURNAL_BUF_MASK]); 86 } 87 88 void bch2_dev_usage_read_fast(struct bch_dev *ca, struct bch_dev_usage *usage) 89 { 90 struct bch_fs *c = ca->fs; 91 unsigned seq, i, u64s = dev_usage_u64s(); 92 93 do { 94 seq = read_seqcount_begin(&c->usage_lock); 95 memcpy(usage, ca->usage_base, u64s * sizeof(u64)); 96 for (i = 0; i < ARRAY_SIZE(ca->usage); i++) 97 acc_u64s_percpu((u64 *) usage, (u64 __percpu *) ca->usage[i], u64s); 98 } while (read_seqcount_retry(&c->usage_lock, seq)); 99 } 100 101 u64 bch2_fs_usage_read_one(struct bch_fs *c, u64 *v) 102 { 103 ssize_t offset = v - (u64 *) c->usage_base; 104 unsigned i, seq; 105 u64 ret; 106 107 BUG_ON(offset < 0 || offset >= fs_usage_u64s(c)); 108 percpu_rwsem_assert_held(&c->mark_lock); 109 110 do { 111 seq = read_seqcount_begin(&c->usage_lock); 112 ret = *v; 113 114 for (i = 0; i < ARRAY_SIZE(c->usage); i++) 115 ret += percpu_u64_get((u64 __percpu *) c->usage[i] + offset); 116 } while (read_seqcount_retry(&c->usage_lock, seq)); 117 118 return ret; 119 } 120 121 struct bch_fs_usage_online *bch2_fs_usage_read(struct bch_fs *c) 122 { 123 struct bch_fs_usage_online *ret; 124 unsigned nr_replicas = READ_ONCE(c->replicas.nr); 125 unsigned seq, i; 126 retry: 127 ret = kmalloc(__fs_usage_online_u64s(nr_replicas) * sizeof(u64), GFP_KERNEL); 128 if (unlikely(!ret)) 129 return NULL; 130 131 percpu_down_read(&c->mark_lock); 132 133 if (nr_replicas != c->replicas.nr) { 134 nr_replicas = c->replicas.nr; 135 percpu_up_read(&c->mark_lock); 136 kfree(ret); 137 goto retry; 138 } 139 140 ret->online_reserved = percpu_u64_get(c->online_reserved); 141 142 do { 143 seq = read_seqcount_begin(&c->usage_lock); 144 unsafe_memcpy(&ret->u, c->usage_base, 145 __fs_usage_u64s(nr_replicas) * sizeof(u64), 146 "embedded variable length struct"); 147 for (i = 0; i < ARRAY_SIZE(c->usage); i++) 148 acc_u64s_percpu((u64 *) &ret->u, (u64 __percpu *) c->usage[i], 149 __fs_usage_u64s(nr_replicas)); 150 } while (read_seqcount_retry(&c->usage_lock, seq)); 151 152 return ret; 153 } 154 155 void bch2_fs_usage_acc_to_base(struct bch_fs *c, unsigned idx) 156 { 157 unsigned u64s = fs_usage_u64s(c); 158 159 BUG_ON(idx >= ARRAY_SIZE(c->usage)); 160 161 preempt_disable(); 162 write_seqcount_begin(&c->usage_lock); 163 164 acc_u64s_percpu((u64 *) c->usage_base, 165 (u64 __percpu *) c->usage[idx], u64s); 166 percpu_memset(c->usage[idx], 0, u64s * sizeof(u64)); 167 168 rcu_read_lock(); 169 for_each_member_device_rcu(c, ca, NULL) { 170 u64s = dev_usage_u64s(); 171 172 acc_u64s_percpu((u64 *) ca->usage_base, 173 (u64 __percpu *) ca->usage[idx], u64s); 174 percpu_memset(ca->usage[idx], 0, u64s * sizeof(u64)); 175 } 176 rcu_read_unlock(); 177 178 write_seqcount_end(&c->usage_lock); 179 preempt_enable(); 180 } 181 182 void bch2_fs_usage_to_text(struct printbuf *out, 183 struct bch_fs *c, 184 struct bch_fs_usage_online *fs_usage) 185 { 186 unsigned i; 187 188 prt_printf(out, "capacity:\t\t\t%llu\n", c->capacity); 189 190 prt_printf(out, "hidden:\t\t\t\t%llu\n", 191 fs_usage->u.b.hidden); 192 prt_printf(out, "data:\t\t\t\t%llu\n", 193 fs_usage->u.b.data); 194 prt_printf(out, "cached:\t\t\t\t%llu\n", 195 fs_usage->u.b.cached); 196 prt_printf(out, "reserved:\t\t\t%llu\n", 197 fs_usage->u.b.reserved); 198 prt_printf(out, "nr_inodes:\t\t\t%llu\n", 199 fs_usage->u.b.nr_inodes); 200 prt_printf(out, "online reserved:\t\t%llu\n", 201 fs_usage->online_reserved); 202 203 for (i = 0; 204 i < ARRAY_SIZE(fs_usage->u.persistent_reserved); 205 i++) { 206 prt_printf(out, "%u replicas:\n", i + 1); 207 prt_printf(out, "\treserved:\t\t%llu\n", 208 fs_usage->u.persistent_reserved[i]); 209 } 210 211 for (i = 0; i < c->replicas.nr; i++) { 212 struct bch_replicas_entry_v1 *e = 213 cpu_replicas_entry(&c->replicas, i); 214 215 prt_printf(out, "\t"); 216 bch2_replicas_entry_to_text(out, e); 217 prt_printf(out, ":\t%llu\n", fs_usage->u.replicas[i]); 218 } 219 } 220 221 static u64 reserve_factor(u64 r) 222 { 223 return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR); 224 } 225 226 u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage_online *fs_usage) 227 { 228 return min(fs_usage->u.b.hidden + 229 fs_usage->u.b.btree + 230 fs_usage->u.b.data + 231 reserve_factor(fs_usage->u.b.reserved + 232 fs_usage->online_reserved), 233 c->capacity); 234 } 235 236 static struct bch_fs_usage_short 237 __bch2_fs_usage_read_short(struct bch_fs *c) 238 { 239 struct bch_fs_usage_short ret; 240 u64 data, reserved; 241 242 ret.capacity = c->capacity - 243 bch2_fs_usage_read_one(c, &c->usage_base->b.hidden); 244 245 data = bch2_fs_usage_read_one(c, &c->usage_base->b.data) + 246 bch2_fs_usage_read_one(c, &c->usage_base->b.btree); 247 reserved = bch2_fs_usage_read_one(c, &c->usage_base->b.reserved) + 248 percpu_u64_get(c->online_reserved); 249 250 ret.used = min(ret.capacity, data + reserve_factor(reserved)); 251 ret.free = ret.capacity - ret.used; 252 253 ret.nr_inodes = bch2_fs_usage_read_one(c, &c->usage_base->b.nr_inodes); 254 255 return ret; 256 } 257 258 struct bch_fs_usage_short 259 bch2_fs_usage_read_short(struct bch_fs *c) 260 { 261 struct bch_fs_usage_short ret; 262 263 percpu_down_read(&c->mark_lock); 264 ret = __bch2_fs_usage_read_short(c); 265 percpu_up_read(&c->mark_lock); 266 267 return ret; 268 } 269 270 void bch2_dev_usage_init(struct bch_dev *ca) 271 { 272 ca->usage_base->d[BCH_DATA_free].buckets = ca->mi.nbuckets - ca->mi.first_bucket; 273 } 274 275 void bch2_dev_usage_to_text(struct printbuf *out, struct bch_dev_usage *usage) 276 { 277 prt_tab(out); 278 prt_str(out, "buckets"); 279 prt_tab_rjust(out); 280 prt_str(out, "sectors"); 281 prt_tab_rjust(out); 282 prt_str(out, "fragmented"); 283 prt_tab_rjust(out); 284 prt_newline(out); 285 286 for (unsigned i = 0; i < BCH_DATA_NR; i++) { 287 bch2_prt_data_type(out, i); 288 prt_tab(out); 289 prt_u64(out, usage->d[i].buckets); 290 prt_tab_rjust(out); 291 prt_u64(out, usage->d[i].sectors); 292 prt_tab_rjust(out); 293 prt_u64(out, usage->d[i].fragmented); 294 prt_tab_rjust(out); 295 prt_newline(out); 296 } 297 } 298 299 void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca, 300 const struct bch_alloc_v4 *old, 301 const struct bch_alloc_v4 *new, 302 u64 journal_seq, bool gc) 303 { 304 struct bch_fs_usage *fs_usage; 305 struct bch_dev_usage *u; 306 307 preempt_disable(); 308 fs_usage = fs_usage_ptr(c, journal_seq, gc); 309 310 if (data_type_is_hidden(old->data_type)) 311 fs_usage->b.hidden -= ca->mi.bucket_size; 312 if (data_type_is_hidden(new->data_type)) 313 fs_usage->b.hidden += ca->mi.bucket_size; 314 315 u = dev_usage_ptr(ca, journal_seq, gc); 316 317 u->d[old->data_type].buckets--; 318 u->d[new->data_type].buckets++; 319 320 u->d[old->data_type].sectors -= bch2_bucket_sectors_dirty(*old); 321 u->d[new->data_type].sectors += bch2_bucket_sectors_dirty(*new); 322 323 u->d[BCH_DATA_cached].sectors += new->cached_sectors; 324 u->d[BCH_DATA_cached].sectors -= old->cached_sectors; 325 326 u->d[old->data_type].fragmented -= bch2_bucket_sectors_fragmented(ca, *old); 327 u->d[new->data_type].fragmented += bch2_bucket_sectors_fragmented(ca, *new); 328 329 preempt_enable(); 330 } 331 332 static inline struct bch_alloc_v4 bucket_m_to_alloc(struct bucket b) 333 { 334 return (struct bch_alloc_v4) { 335 .gen = b.gen, 336 .data_type = b.data_type, 337 .dirty_sectors = b.dirty_sectors, 338 .cached_sectors = b.cached_sectors, 339 .stripe = b.stripe, 340 }; 341 } 342 343 void bch2_dev_usage_update_m(struct bch_fs *c, struct bch_dev *ca, 344 struct bucket *old, struct bucket *new) 345 { 346 struct bch_alloc_v4 old_a = bucket_m_to_alloc(*old); 347 struct bch_alloc_v4 new_a = bucket_m_to_alloc(*new); 348 349 bch2_dev_usage_update(c, ca, &old_a, &new_a, 0, true); 350 } 351 352 static inline int __update_replicas(struct bch_fs *c, 353 struct bch_fs_usage *fs_usage, 354 struct bch_replicas_entry_v1 *r, 355 s64 sectors) 356 { 357 int idx = bch2_replicas_entry_idx(c, r); 358 359 if (idx < 0) 360 return -1; 361 362 fs_usage_data_type_to_base(&fs_usage->b, r->data_type, sectors); 363 fs_usage->replicas[idx] += sectors; 364 return 0; 365 } 366 367 int bch2_update_replicas(struct bch_fs *c, struct bkey_s_c k, 368 struct bch_replicas_entry_v1 *r, s64 sectors, 369 unsigned journal_seq, bool gc) 370 { 371 struct bch_fs_usage *fs_usage; 372 int idx, ret = 0; 373 struct printbuf buf = PRINTBUF; 374 375 percpu_down_read(&c->mark_lock); 376 377 idx = bch2_replicas_entry_idx(c, r); 378 if (idx < 0 && 379 fsck_err(c, ptr_to_missing_replicas_entry, 380 "no replicas entry\n while marking %s", 381 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { 382 percpu_up_read(&c->mark_lock); 383 ret = bch2_mark_replicas(c, r); 384 percpu_down_read(&c->mark_lock); 385 386 if (ret) 387 goto err; 388 idx = bch2_replicas_entry_idx(c, r); 389 } 390 if (idx < 0) { 391 ret = -1; 392 goto err; 393 } 394 395 preempt_disable(); 396 fs_usage = fs_usage_ptr(c, journal_seq, gc); 397 fs_usage_data_type_to_base(&fs_usage->b, r->data_type, sectors); 398 fs_usage->replicas[idx] += sectors; 399 preempt_enable(); 400 err: 401 fsck_err: 402 percpu_up_read(&c->mark_lock); 403 printbuf_exit(&buf); 404 return ret; 405 } 406 407 static inline int update_cached_sectors(struct bch_fs *c, 408 struct bkey_s_c k, 409 unsigned dev, s64 sectors, 410 unsigned journal_seq, bool gc) 411 { 412 struct bch_replicas_padded r; 413 414 bch2_replicas_entry_cached(&r.e, dev); 415 416 return bch2_update_replicas(c, k, &r.e, sectors, journal_seq, gc); 417 } 418 419 static int __replicas_deltas_realloc(struct btree_trans *trans, unsigned more, 420 gfp_t gfp) 421 { 422 struct replicas_delta_list *d = trans->fs_usage_deltas; 423 unsigned new_size = d ? (d->size + more) * 2 : 128; 424 unsigned alloc_size = sizeof(*d) + new_size; 425 426 WARN_ON_ONCE(alloc_size > REPLICAS_DELTA_LIST_MAX); 427 428 if (!d || d->used + more > d->size) { 429 d = krealloc(d, alloc_size, gfp|__GFP_ZERO); 430 431 if (unlikely(!d)) { 432 if (alloc_size > REPLICAS_DELTA_LIST_MAX) 433 return -ENOMEM; 434 435 d = mempool_alloc(&trans->c->replicas_delta_pool, gfp); 436 if (!d) 437 return -ENOMEM; 438 439 memset(d, 0, REPLICAS_DELTA_LIST_MAX); 440 441 if (trans->fs_usage_deltas) 442 memcpy(d, trans->fs_usage_deltas, 443 trans->fs_usage_deltas->size + sizeof(*d)); 444 445 new_size = REPLICAS_DELTA_LIST_MAX - sizeof(*d); 446 kfree(trans->fs_usage_deltas); 447 } 448 449 d->size = new_size; 450 trans->fs_usage_deltas = d; 451 } 452 453 return 0; 454 } 455 456 int bch2_replicas_deltas_realloc(struct btree_trans *trans, unsigned more) 457 { 458 return allocate_dropping_locks_errcode(trans, 459 __replicas_deltas_realloc(trans, more, _gfp)); 460 } 461 462 int bch2_update_replicas_list(struct btree_trans *trans, 463 struct bch_replicas_entry_v1 *r, 464 s64 sectors) 465 { 466 struct replicas_delta_list *d; 467 struct replicas_delta *n; 468 unsigned b; 469 int ret; 470 471 if (!sectors) 472 return 0; 473 474 b = replicas_entry_bytes(r) + 8; 475 ret = bch2_replicas_deltas_realloc(trans, b); 476 if (ret) 477 return ret; 478 479 d = trans->fs_usage_deltas; 480 n = (void *) d->d + d->used; 481 n->delta = sectors; 482 unsafe_memcpy((void *) n + offsetof(struct replicas_delta, r), 483 r, replicas_entry_bytes(r), 484 "flexible array member embedded in strcuct with padding"); 485 bch2_replicas_entry_sort(&n->r); 486 d->used += b; 487 return 0; 488 } 489 490 int bch2_update_cached_sectors_list(struct btree_trans *trans, unsigned dev, s64 sectors) 491 { 492 struct bch_replicas_padded r; 493 494 bch2_replicas_entry_cached(&r.e, dev); 495 496 return bch2_update_replicas_list(trans, &r.e, sectors); 497 } 498 499 int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca, 500 size_t b, enum bch_data_type data_type, 501 unsigned sectors, struct gc_pos pos, 502 unsigned flags) 503 { 504 struct bucket old, new, *g; 505 int ret = 0; 506 507 BUG_ON(!(flags & BTREE_TRIGGER_GC)); 508 BUG_ON(data_type != BCH_DATA_sb && 509 data_type != BCH_DATA_journal); 510 511 /* 512 * Backup superblock might be past the end of our normal usable space: 513 */ 514 if (b >= ca->mi.nbuckets) 515 return 0; 516 517 percpu_down_read(&c->mark_lock); 518 g = gc_bucket(ca, b); 519 520 bucket_lock(g); 521 old = *g; 522 523 if (bch2_fs_inconsistent_on(g->data_type && 524 g->data_type != data_type, c, 525 "different types of data in same bucket: %s, %s", 526 bch2_data_type_str(g->data_type), 527 bch2_data_type_str(data_type))) { 528 ret = -EIO; 529 goto err; 530 } 531 532 if (bch2_fs_inconsistent_on((u64) g->dirty_sectors + sectors > ca->mi.bucket_size, c, 533 "bucket %u:%zu gen %u data type %s sector count overflow: %u + %u > bucket size", 534 ca->dev_idx, b, g->gen, 535 bch2_data_type_str(g->data_type ?: data_type), 536 g->dirty_sectors, sectors)) { 537 ret = -EIO; 538 goto err; 539 } 540 541 g->data_type = data_type; 542 g->dirty_sectors += sectors; 543 new = *g; 544 err: 545 bucket_unlock(g); 546 if (!ret) 547 bch2_dev_usage_update_m(c, ca, &old, &new); 548 percpu_up_read(&c->mark_lock); 549 return ret; 550 } 551 552 int bch2_check_bucket_ref(struct btree_trans *trans, 553 struct bkey_s_c k, 554 const struct bch_extent_ptr *ptr, 555 s64 sectors, enum bch_data_type ptr_data_type, 556 u8 b_gen, u8 bucket_data_type, 557 u32 bucket_sectors) 558 { 559 struct bch_fs *c = trans->c; 560 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev); 561 size_t bucket_nr = PTR_BUCKET_NR(ca, ptr); 562 struct printbuf buf = PRINTBUF; 563 int ret = 0; 564 565 if (bucket_data_type == BCH_DATA_cached) 566 bucket_data_type = BCH_DATA_user; 567 568 if ((bucket_data_type == BCH_DATA_stripe && ptr_data_type == BCH_DATA_user) || 569 (bucket_data_type == BCH_DATA_user && ptr_data_type == BCH_DATA_stripe)) 570 bucket_data_type = ptr_data_type = BCH_DATA_stripe; 571 572 if (gen_after(ptr->gen, b_gen)) { 573 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, 574 BCH_FSCK_ERR_ptr_gen_newer_than_bucket_gen, 575 "bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen\n" 576 "while marking %s", 577 ptr->dev, bucket_nr, b_gen, 578 bch2_data_type_str(bucket_data_type ?: ptr_data_type), 579 ptr->gen, 580 (bch2_bkey_val_to_text(&buf, c, k), buf.buf)); 581 ret = -EIO; 582 goto err; 583 } 584 585 if (gen_cmp(b_gen, ptr->gen) > BUCKET_GC_GEN_MAX) { 586 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, 587 BCH_FSCK_ERR_ptr_too_stale, 588 "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n" 589 "while marking %s", 590 ptr->dev, bucket_nr, b_gen, 591 bch2_data_type_str(bucket_data_type ?: ptr_data_type), 592 ptr->gen, 593 (printbuf_reset(&buf), 594 bch2_bkey_val_to_text(&buf, c, k), buf.buf)); 595 ret = -EIO; 596 goto err; 597 } 598 599 if (b_gen != ptr->gen && !ptr->cached) { 600 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, 601 BCH_FSCK_ERR_stale_dirty_ptr, 602 "bucket %u:%zu gen %u (mem gen %u) data type %s: stale dirty ptr (gen %u)\n" 603 "while marking %s", 604 ptr->dev, bucket_nr, b_gen, 605 *bucket_gen(ca, bucket_nr), 606 bch2_data_type_str(bucket_data_type ?: ptr_data_type), 607 ptr->gen, 608 (printbuf_reset(&buf), 609 bch2_bkey_val_to_text(&buf, c, k), buf.buf)); 610 ret = -EIO; 611 goto err; 612 } 613 614 if (b_gen != ptr->gen) { 615 ret = 1; 616 goto out; 617 } 618 619 if (!data_type_is_empty(bucket_data_type) && 620 ptr_data_type && 621 bucket_data_type != ptr_data_type) { 622 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, 623 BCH_FSCK_ERR_ptr_bucket_data_type_mismatch, 624 "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n" 625 "while marking %s", 626 ptr->dev, bucket_nr, b_gen, 627 bch2_data_type_str(bucket_data_type), 628 bch2_data_type_str(ptr_data_type), 629 (printbuf_reset(&buf), 630 bch2_bkey_val_to_text(&buf, c, k), buf.buf)); 631 ret = -EIO; 632 goto err; 633 } 634 635 if ((u64) bucket_sectors + sectors > U32_MAX) { 636 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, 637 BCH_FSCK_ERR_bucket_sector_count_overflow, 638 "bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U32_MAX\n" 639 "while marking %s", 640 ptr->dev, bucket_nr, b_gen, 641 bch2_data_type_str(bucket_data_type ?: ptr_data_type), 642 bucket_sectors, sectors, 643 (printbuf_reset(&buf), 644 bch2_bkey_val_to_text(&buf, c, k), buf.buf)); 645 ret = -EIO; 646 goto err; 647 } 648 out: 649 printbuf_exit(&buf); 650 return ret; 651 err: 652 bch2_dump_trans_updates(trans); 653 goto out; 654 } 655 656 void bch2_trans_fs_usage_revert(struct btree_trans *trans, 657 struct replicas_delta_list *deltas) 658 { 659 struct bch_fs *c = trans->c; 660 struct bch_fs_usage *dst; 661 struct replicas_delta *d, *top = (void *) deltas->d + deltas->used; 662 s64 added = 0; 663 unsigned i; 664 665 percpu_down_read(&c->mark_lock); 666 preempt_disable(); 667 dst = fs_usage_ptr(c, trans->journal_res.seq, false); 668 669 /* revert changes: */ 670 for (d = deltas->d; d != top; d = replicas_delta_next(d)) { 671 switch (d->r.data_type) { 672 case BCH_DATA_btree: 673 case BCH_DATA_user: 674 case BCH_DATA_parity: 675 added += d->delta; 676 } 677 BUG_ON(__update_replicas(c, dst, &d->r, -d->delta)); 678 } 679 680 dst->b.nr_inodes -= deltas->nr_inodes; 681 682 for (i = 0; i < BCH_REPLICAS_MAX; i++) { 683 added -= deltas->persistent_reserved[i]; 684 dst->b.reserved -= deltas->persistent_reserved[i]; 685 dst->persistent_reserved[i] -= deltas->persistent_reserved[i]; 686 } 687 688 if (added > 0) { 689 trans->disk_res->sectors += added; 690 this_cpu_add(*c->online_reserved, added); 691 } 692 693 preempt_enable(); 694 percpu_up_read(&c->mark_lock); 695 } 696 697 void bch2_trans_account_disk_usage_change(struct btree_trans *trans) 698 { 699 struct bch_fs *c = trans->c; 700 u64 disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0; 701 static int warned_disk_usage = 0; 702 bool warn = false; 703 704 percpu_down_read(&c->mark_lock); 705 preempt_disable(); 706 struct bch_fs_usage_base *dst = &fs_usage_ptr(c, trans->journal_res.seq, false)->b; 707 struct bch_fs_usage_base *src = &trans->fs_usage_delta; 708 709 s64 added = src->btree + src->data + src->reserved; 710 711 /* 712 * Not allowed to reduce sectors_available except by getting a 713 * reservation: 714 */ 715 s64 should_not_have_added = added - (s64) disk_res_sectors; 716 if (unlikely(should_not_have_added > 0)) { 717 u64 old, new, v = atomic64_read(&c->sectors_available); 718 719 do { 720 old = v; 721 new = max_t(s64, 0, old - should_not_have_added); 722 } while ((v = atomic64_cmpxchg(&c->sectors_available, 723 old, new)) != old); 724 725 added -= should_not_have_added; 726 warn = true; 727 } 728 729 if (added > 0) { 730 trans->disk_res->sectors -= added; 731 this_cpu_sub(*c->online_reserved, added); 732 } 733 734 dst->hidden += src->hidden; 735 dst->btree += src->btree; 736 dst->data += src->data; 737 dst->cached += src->cached; 738 dst->reserved += src->reserved; 739 dst->nr_inodes += src->nr_inodes; 740 741 preempt_enable(); 742 percpu_up_read(&c->mark_lock); 743 744 if (unlikely(warn) && !xchg(&warned_disk_usage, 1)) 745 bch2_trans_inconsistent(trans, 746 "disk usage increased %lli more than %llu sectors reserved)", 747 should_not_have_added, disk_res_sectors); 748 } 749 750 int bch2_trans_fs_usage_apply(struct btree_trans *trans, 751 struct replicas_delta_list *deltas) 752 { 753 struct bch_fs *c = trans->c; 754 struct replicas_delta *d, *d2; 755 struct replicas_delta *top = (void *) deltas->d + deltas->used; 756 struct bch_fs_usage *dst; 757 unsigned i; 758 759 percpu_down_read(&c->mark_lock); 760 preempt_disable(); 761 dst = fs_usage_ptr(c, trans->journal_res.seq, false); 762 763 for (d = deltas->d; d != top; d = replicas_delta_next(d)) 764 if (__update_replicas(c, dst, &d->r, d->delta)) 765 goto need_mark; 766 767 dst->b.nr_inodes += deltas->nr_inodes; 768 769 for (i = 0; i < BCH_REPLICAS_MAX; i++) { 770 dst->b.reserved += deltas->persistent_reserved[i]; 771 dst->persistent_reserved[i] += deltas->persistent_reserved[i]; 772 } 773 774 preempt_enable(); 775 percpu_up_read(&c->mark_lock); 776 return 0; 777 need_mark: 778 /* revert changes: */ 779 for (d2 = deltas->d; d2 != d; d2 = replicas_delta_next(d2)) 780 BUG_ON(__update_replicas(c, dst, &d2->r, -d2->delta)); 781 782 preempt_enable(); 783 percpu_up_read(&c->mark_lock); 784 return -1; 785 } 786 787 /* KEY_TYPE_extent: */ 788 789 static int __mark_pointer(struct btree_trans *trans, 790 struct bkey_s_c k, 791 const struct bch_extent_ptr *ptr, 792 s64 sectors, enum bch_data_type ptr_data_type, 793 u8 bucket_gen, u8 *bucket_data_type, 794 u32 *dirty_sectors, u32 *cached_sectors) 795 { 796 u32 *dst_sectors = !ptr->cached 797 ? dirty_sectors 798 : cached_sectors; 799 int ret = bch2_check_bucket_ref(trans, k, ptr, sectors, ptr_data_type, 800 bucket_gen, *bucket_data_type, *dst_sectors); 801 802 if (ret) 803 return ret; 804 805 *dst_sectors += sectors; 806 807 if (!*dirty_sectors && !*cached_sectors) 808 *bucket_data_type = 0; 809 else if (*bucket_data_type != BCH_DATA_stripe) 810 *bucket_data_type = ptr_data_type; 811 812 return 0; 813 } 814 815 static int bch2_trigger_pointer(struct btree_trans *trans, 816 enum btree_id btree_id, unsigned level, 817 struct bkey_s_c k, struct extent_ptr_decoded p, 818 s64 *sectors, 819 unsigned flags) 820 { 821 bool insert = !(flags & BTREE_TRIGGER_OVERWRITE); 822 struct bpos bucket; 823 struct bch_backpointer bp; 824 825 bch2_extent_ptr_to_bp(trans->c, btree_id, level, k, p, &bucket, &bp); 826 *sectors = insert ? bp.bucket_len : -((s64) bp.bucket_len); 827 828 if (flags & BTREE_TRIGGER_TRANSACTIONAL) { 829 struct btree_iter iter; 830 struct bkey_i_alloc_v4 *a = bch2_trans_start_alloc_update(trans, &iter, bucket); 831 int ret = PTR_ERR_OR_ZERO(a); 832 if (ret) 833 return ret; 834 835 ret = __mark_pointer(trans, k, &p.ptr, *sectors, bp.data_type, 836 a->v.gen, &a->v.data_type, 837 &a->v.dirty_sectors, &a->v.cached_sectors) ?: 838 bch2_trans_update(trans, &iter, &a->k_i, 0); 839 bch2_trans_iter_exit(trans, &iter); 840 841 if (ret) 842 return ret; 843 844 if (!p.ptr.cached) { 845 ret = bch2_bucket_backpointer_mod(trans, bucket, bp, k, insert); 846 if (ret) 847 return ret; 848 } 849 } 850 851 if (flags & BTREE_TRIGGER_GC) { 852 struct bch_fs *c = trans->c; 853 struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev); 854 enum bch_data_type data_type = bkey_ptr_data_type(btree_id, level, k, p); 855 856 percpu_down_read(&c->mark_lock); 857 struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr); 858 bucket_lock(g); 859 struct bucket old = *g; 860 861 u8 bucket_data_type = g->data_type; 862 int ret = __mark_pointer(trans, k, &p.ptr, *sectors, 863 data_type, g->gen, 864 &bucket_data_type, 865 &g->dirty_sectors, 866 &g->cached_sectors); 867 if (ret) { 868 bucket_unlock(g); 869 percpu_up_read(&c->mark_lock); 870 return ret; 871 } 872 873 g->data_type = bucket_data_type; 874 struct bucket new = *g; 875 bucket_unlock(g); 876 bch2_dev_usage_update_m(c, ca, &old, &new); 877 percpu_up_read(&c->mark_lock); 878 } 879 880 return 0; 881 } 882 883 static int bch2_trigger_stripe_ptr(struct btree_trans *trans, 884 struct bkey_s_c k, 885 struct extent_ptr_decoded p, 886 enum bch_data_type data_type, 887 s64 sectors, unsigned flags) 888 { 889 if (flags & BTREE_TRIGGER_TRANSACTIONAL) { 890 struct btree_iter iter; 891 struct bkey_i_stripe *s = bch2_bkey_get_mut_typed(trans, &iter, 892 BTREE_ID_stripes, POS(0, p.ec.idx), 893 BTREE_ITER_WITH_UPDATES, stripe); 894 int ret = PTR_ERR_OR_ZERO(s); 895 if (unlikely(ret)) { 896 bch2_trans_inconsistent_on(bch2_err_matches(ret, ENOENT), trans, 897 "pointer to nonexistent stripe %llu", 898 (u64) p.ec.idx); 899 goto err; 900 } 901 902 if (!bch2_ptr_matches_stripe(&s->v, p)) { 903 bch2_trans_inconsistent(trans, 904 "stripe pointer doesn't match stripe %llu", 905 (u64) p.ec.idx); 906 ret = -EIO; 907 goto err; 908 } 909 910 stripe_blockcount_set(&s->v, p.ec.block, 911 stripe_blockcount_get(&s->v, p.ec.block) + 912 sectors); 913 914 struct bch_replicas_padded r; 915 bch2_bkey_to_replicas(&r.e, bkey_i_to_s_c(&s->k_i)); 916 r.e.data_type = data_type; 917 ret = bch2_update_replicas_list(trans, &r.e, sectors); 918 err: 919 bch2_trans_iter_exit(trans, &iter); 920 return ret; 921 } 922 923 if (flags & BTREE_TRIGGER_GC) { 924 struct bch_fs *c = trans->c; 925 926 BUG_ON(!(flags & BTREE_TRIGGER_GC)); 927 928 struct gc_stripe *m = genradix_ptr_alloc(&c->gc_stripes, p.ec.idx, GFP_KERNEL); 929 if (!m) { 930 bch_err(c, "error allocating memory for gc_stripes, idx %llu", 931 (u64) p.ec.idx); 932 return -BCH_ERR_ENOMEM_mark_stripe_ptr; 933 } 934 935 mutex_lock(&c->ec_stripes_heap_lock); 936 937 if (!m || !m->alive) { 938 mutex_unlock(&c->ec_stripes_heap_lock); 939 struct printbuf buf = PRINTBUF; 940 bch2_bkey_val_to_text(&buf, c, k); 941 bch_err_ratelimited(c, "pointer to nonexistent stripe %llu\n while marking %s", 942 (u64) p.ec.idx, buf.buf); 943 printbuf_exit(&buf); 944 bch2_inconsistent_error(c); 945 return -EIO; 946 } 947 948 m->block_sectors[p.ec.block] += sectors; 949 950 struct bch_replicas_padded r = m->r; 951 mutex_unlock(&c->ec_stripes_heap_lock); 952 953 r.e.data_type = data_type; 954 bch2_update_replicas(c, k, &r.e, sectors, trans->journal_res.seq, true); 955 } 956 957 return 0; 958 } 959 960 static int __trigger_extent(struct btree_trans *trans, 961 enum btree_id btree_id, unsigned level, 962 struct bkey_s_c k, unsigned flags) 963 { 964 bool gc = flags & BTREE_TRIGGER_GC; 965 struct bch_fs *c = trans->c; 966 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 967 const union bch_extent_entry *entry; 968 struct extent_ptr_decoded p; 969 struct bch_replicas_padded r; 970 enum bch_data_type data_type = bkey_is_btree_ptr(k.k) 971 ? BCH_DATA_btree 972 : BCH_DATA_user; 973 s64 dirty_sectors = 0; 974 int ret = 0; 975 976 r.e.data_type = data_type; 977 r.e.nr_devs = 0; 978 r.e.nr_required = 1; 979 980 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { 981 s64 disk_sectors; 982 ret = bch2_trigger_pointer(trans, btree_id, level, k, p, &disk_sectors, flags); 983 if (ret < 0) 984 return ret; 985 986 bool stale = ret > 0; 987 988 if (p.ptr.cached) { 989 if (!stale) { 990 ret = !gc 991 ? bch2_update_cached_sectors_list(trans, p.ptr.dev, disk_sectors) 992 : update_cached_sectors(c, k, p.ptr.dev, disk_sectors, 0, true); 993 bch2_fs_fatal_err_on(ret && gc, c, "%s: no replicas entry while updating cached sectors", 994 bch2_err_str(ret)); 995 if (ret) 996 return ret; 997 } 998 } else if (!p.has_ec) { 999 dirty_sectors += disk_sectors; 1000 r.e.devs[r.e.nr_devs++] = p.ptr.dev; 1001 } else { 1002 ret = bch2_trigger_stripe_ptr(trans, k, p, data_type, disk_sectors, flags); 1003 if (ret) 1004 return ret; 1005 1006 /* 1007 * There may be other dirty pointers in this extent, but 1008 * if so they're not required for mounting if we have an 1009 * erasure coded pointer in this extent: 1010 */ 1011 r.e.nr_required = 0; 1012 } 1013 } 1014 1015 if (r.e.nr_devs) { 1016 ret = !gc 1017 ? bch2_update_replicas_list(trans, &r.e, dirty_sectors) 1018 : bch2_update_replicas(c, k, &r.e, dirty_sectors, 0, true); 1019 if (unlikely(ret && gc)) { 1020 struct printbuf buf = PRINTBUF; 1021 1022 bch2_bkey_val_to_text(&buf, c, k); 1023 bch2_fs_fatal_error(c, ": no replicas entry for %s", buf.buf); 1024 printbuf_exit(&buf); 1025 } 1026 if (ret) 1027 return ret; 1028 } 1029 1030 return 0; 1031 } 1032 1033 int bch2_trigger_extent(struct btree_trans *trans, 1034 enum btree_id btree_id, unsigned level, 1035 struct bkey_s_c old, struct bkey_s new, 1036 unsigned flags) 1037 { 1038 struct bkey_ptrs_c new_ptrs = bch2_bkey_ptrs_c(new.s_c); 1039 struct bkey_ptrs_c old_ptrs = bch2_bkey_ptrs_c(old); 1040 unsigned new_ptrs_bytes = (void *) new_ptrs.end - (void *) new_ptrs.start; 1041 unsigned old_ptrs_bytes = (void *) old_ptrs.end - (void *) old_ptrs.start; 1042 1043 /* if pointers aren't changing - nothing to do: */ 1044 if (new_ptrs_bytes == old_ptrs_bytes && 1045 !memcmp(new_ptrs.start, 1046 old_ptrs.start, 1047 new_ptrs_bytes)) 1048 return 0; 1049 1050 if (flags & BTREE_TRIGGER_TRANSACTIONAL) { 1051 struct bch_fs *c = trans->c; 1052 int mod = (int) bch2_bkey_needs_rebalance(c, new.s_c) - 1053 (int) bch2_bkey_needs_rebalance(c, old); 1054 1055 if (mod) { 1056 int ret = bch2_btree_bit_mod_buffered(trans, BTREE_ID_rebalance_work, 1057 new.k->p, mod > 0); 1058 if (ret) 1059 return ret; 1060 } 1061 } 1062 1063 if (flags & (BTREE_TRIGGER_TRANSACTIONAL|BTREE_TRIGGER_GC)) 1064 return trigger_run_overwrite_then_insert(__trigger_extent, trans, btree_id, level, old, new, flags); 1065 1066 return 0; 1067 } 1068 1069 /* KEY_TYPE_reservation */ 1070 1071 static int __trigger_reservation(struct btree_trans *trans, 1072 enum btree_id btree_id, unsigned level, 1073 struct bkey_s_c k, unsigned flags) 1074 { 1075 struct bch_fs *c = trans->c; 1076 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas; 1077 s64 sectors = (s64) k.k->size * replicas; 1078 1079 if (flags & BTREE_TRIGGER_OVERWRITE) 1080 sectors = -sectors; 1081 1082 if (flags & BTREE_TRIGGER_TRANSACTIONAL) { 1083 int ret = bch2_replicas_deltas_realloc(trans, 0); 1084 if (ret) 1085 return ret; 1086 1087 struct replicas_delta_list *d = trans->fs_usage_deltas; 1088 replicas = min(replicas, ARRAY_SIZE(d->persistent_reserved)); 1089 1090 d->persistent_reserved[replicas - 1] += sectors; 1091 } 1092 1093 if (flags & BTREE_TRIGGER_GC) { 1094 percpu_down_read(&c->mark_lock); 1095 preempt_disable(); 1096 1097 struct bch_fs_usage *fs_usage = this_cpu_ptr(c->usage_gc); 1098 1099 replicas = min(replicas, ARRAY_SIZE(fs_usage->persistent_reserved)); 1100 fs_usage->b.reserved += sectors; 1101 fs_usage->persistent_reserved[replicas - 1] += sectors; 1102 1103 preempt_enable(); 1104 percpu_up_read(&c->mark_lock); 1105 } 1106 1107 return 0; 1108 } 1109 1110 int bch2_trigger_reservation(struct btree_trans *trans, 1111 enum btree_id btree_id, unsigned level, 1112 struct bkey_s_c old, struct bkey_s new, 1113 unsigned flags) 1114 { 1115 return trigger_run_overwrite_then_insert(__trigger_reservation, trans, btree_id, level, old, new, flags); 1116 } 1117 1118 /* Mark superblocks: */ 1119 1120 static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans, 1121 struct bch_dev *ca, size_t b, 1122 enum bch_data_type type, 1123 unsigned sectors) 1124 { 1125 struct bch_fs *c = trans->c; 1126 struct btree_iter iter; 1127 struct bkey_i_alloc_v4 *a; 1128 int ret = 0; 1129 1130 /* 1131 * Backup superblock might be past the end of our normal usable space: 1132 */ 1133 if (b >= ca->mi.nbuckets) 1134 return 0; 1135 1136 a = bch2_trans_start_alloc_update(trans, &iter, POS(ca->dev_idx, b)); 1137 if (IS_ERR(a)) 1138 return PTR_ERR(a); 1139 1140 if (a->v.data_type && type && a->v.data_type != type) { 1141 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, 1142 BCH_FSCK_ERR_bucket_metadata_type_mismatch, 1143 "bucket %llu:%llu gen %u different types of data in same bucket: %s, %s\n" 1144 "while marking %s", 1145 iter.pos.inode, iter.pos.offset, a->v.gen, 1146 bch2_data_type_str(a->v.data_type), 1147 bch2_data_type_str(type), 1148 bch2_data_type_str(type)); 1149 ret = -EIO; 1150 goto err; 1151 } 1152 1153 if (a->v.data_type != type || 1154 a->v.dirty_sectors != sectors) { 1155 a->v.data_type = type; 1156 a->v.dirty_sectors = sectors; 1157 ret = bch2_trans_update(trans, &iter, &a->k_i, 0); 1158 } 1159 err: 1160 bch2_trans_iter_exit(trans, &iter); 1161 return ret; 1162 } 1163 1164 int bch2_trans_mark_metadata_bucket(struct btree_trans *trans, 1165 struct bch_dev *ca, size_t b, 1166 enum bch_data_type type, 1167 unsigned sectors) 1168 { 1169 return commit_do(trans, NULL, NULL, 0, 1170 __bch2_trans_mark_metadata_bucket(trans, ca, b, type, sectors)); 1171 } 1172 1173 static int bch2_trans_mark_metadata_sectors(struct btree_trans *trans, 1174 struct bch_dev *ca, 1175 u64 start, u64 end, 1176 enum bch_data_type type, 1177 u64 *bucket, unsigned *bucket_sectors) 1178 { 1179 do { 1180 u64 b = sector_to_bucket(ca, start); 1181 unsigned sectors = 1182 min_t(u64, bucket_to_sector(ca, b + 1), end) - start; 1183 1184 if (b != *bucket && *bucket_sectors) { 1185 int ret = bch2_trans_mark_metadata_bucket(trans, ca, *bucket, 1186 type, *bucket_sectors); 1187 if (ret) 1188 return ret; 1189 1190 *bucket_sectors = 0; 1191 } 1192 1193 *bucket = b; 1194 *bucket_sectors += sectors; 1195 start += sectors; 1196 } while (start < end); 1197 1198 return 0; 1199 } 1200 1201 static int __bch2_trans_mark_dev_sb(struct btree_trans *trans, 1202 struct bch_dev *ca) 1203 { 1204 struct bch_sb_layout *layout = &ca->disk_sb.sb->layout; 1205 u64 bucket = 0; 1206 unsigned i, bucket_sectors = 0; 1207 int ret; 1208 1209 for (i = 0; i < layout->nr_superblocks; i++) { 1210 u64 offset = le64_to_cpu(layout->sb_offset[i]); 1211 1212 if (offset == BCH_SB_SECTOR) { 1213 ret = bch2_trans_mark_metadata_sectors(trans, ca, 1214 0, BCH_SB_SECTOR, 1215 BCH_DATA_sb, &bucket, &bucket_sectors); 1216 if (ret) 1217 return ret; 1218 } 1219 1220 ret = bch2_trans_mark_metadata_sectors(trans, ca, offset, 1221 offset + (1 << layout->sb_max_size_bits), 1222 BCH_DATA_sb, &bucket, &bucket_sectors); 1223 if (ret) 1224 return ret; 1225 } 1226 1227 if (bucket_sectors) { 1228 ret = bch2_trans_mark_metadata_bucket(trans, ca, 1229 bucket, BCH_DATA_sb, bucket_sectors); 1230 if (ret) 1231 return ret; 1232 } 1233 1234 for (i = 0; i < ca->journal.nr; i++) { 1235 ret = bch2_trans_mark_metadata_bucket(trans, ca, 1236 ca->journal.buckets[i], 1237 BCH_DATA_journal, ca->mi.bucket_size); 1238 if (ret) 1239 return ret; 1240 } 1241 1242 return 0; 1243 } 1244 1245 int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca) 1246 { 1247 int ret = bch2_trans_run(c, __bch2_trans_mark_dev_sb(trans, ca)); 1248 1249 bch_err_fn(c, ret); 1250 return ret; 1251 } 1252 1253 int bch2_trans_mark_dev_sbs(struct bch_fs *c) 1254 { 1255 for_each_online_member(c, ca) { 1256 int ret = bch2_trans_mark_dev_sb(c, ca); 1257 if (ret) { 1258 percpu_ref_put(&ca->ref); 1259 return ret; 1260 } 1261 } 1262 1263 return 0; 1264 } 1265 1266 /* Disk reservations: */ 1267 1268 #define SECTORS_CACHE 1024 1269 1270 int __bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res, 1271 u64 sectors, int flags) 1272 { 1273 struct bch_fs_pcpu *pcpu; 1274 u64 old, v, get; 1275 s64 sectors_available; 1276 int ret; 1277 1278 percpu_down_read(&c->mark_lock); 1279 preempt_disable(); 1280 pcpu = this_cpu_ptr(c->pcpu); 1281 1282 if (sectors <= pcpu->sectors_available) 1283 goto out; 1284 1285 v = atomic64_read(&c->sectors_available); 1286 do { 1287 old = v; 1288 get = min((u64) sectors + SECTORS_CACHE, old); 1289 1290 if (get < sectors) { 1291 preempt_enable(); 1292 goto recalculate; 1293 } 1294 } while ((v = atomic64_cmpxchg(&c->sectors_available, 1295 old, old - get)) != old); 1296 1297 pcpu->sectors_available += get; 1298 1299 out: 1300 pcpu->sectors_available -= sectors; 1301 this_cpu_add(*c->online_reserved, sectors); 1302 res->sectors += sectors; 1303 1304 preempt_enable(); 1305 percpu_up_read(&c->mark_lock); 1306 return 0; 1307 1308 recalculate: 1309 mutex_lock(&c->sectors_available_lock); 1310 1311 percpu_u64_set(&c->pcpu->sectors_available, 0); 1312 sectors_available = avail_factor(__bch2_fs_usage_read_short(c).free); 1313 1314 if (sectors <= sectors_available || 1315 (flags & BCH_DISK_RESERVATION_NOFAIL)) { 1316 atomic64_set(&c->sectors_available, 1317 max_t(s64, 0, sectors_available - sectors)); 1318 this_cpu_add(*c->online_reserved, sectors); 1319 res->sectors += sectors; 1320 ret = 0; 1321 } else { 1322 atomic64_set(&c->sectors_available, sectors_available); 1323 ret = -BCH_ERR_ENOSPC_disk_reservation; 1324 } 1325 1326 mutex_unlock(&c->sectors_available_lock); 1327 percpu_up_read(&c->mark_lock); 1328 1329 return ret; 1330 } 1331 1332 /* Startup/shutdown: */ 1333 1334 static void bucket_gens_free_rcu(struct rcu_head *rcu) 1335 { 1336 struct bucket_gens *buckets = 1337 container_of(rcu, struct bucket_gens, rcu); 1338 1339 kvfree(buckets); 1340 } 1341 1342 int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets) 1343 { 1344 struct bucket_gens *bucket_gens = NULL, *old_bucket_gens = NULL; 1345 unsigned long *buckets_nouse = NULL; 1346 bool resize = ca->bucket_gens != NULL; 1347 int ret; 1348 1349 if (!(bucket_gens = kvmalloc(sizeof(struct bucket_gens) + nbuckets, 1350 GFP_KERNEL|__GFP_ZERO))) { 1351 ret = -BCH_ERR_ENOMEM_bucket_gens; 1352 goto err; 1353 } 1354 1355 if ((c->opts.buckets_nouse && 1356 !(buckets_nouse = kvmalloc(BITS_TO_LONGS(nbuckets) * 1357 sizeof(unsigned long), 1358 GFP_KERNEL|__GFP_ZERO)))) { 1359 ret = -BCH_ERR_ENOMEM_buckets_nouse; 1360 goto err; 1361 } 1362 1363 bucket_gens->first_bucket = ca->mi.first_bucket; 1364 bucket_gens->nbuckets = nbuckets; 1365 1366 if (resize) { 1367 down_write(&c->gc_lock); 1368 down_write(&ca->bucket_lock); 1369 percpu_down_write(&c->mark_lock); 1370 } 1371 1372 old_bucket_gens = rcu_dereference_protected(ca->bucket_gens, 1); 1373 1374 if (resize) { 1375 size_t n = min(bucket_gens->nbuckets, old_bucket_gens->nbuckets); 1376 1377 memcpy(bucket_gens->b, 1378 old_bucket_gens->b, 1379 n); 1380 if (buckets_nouse) 1381 memcpy(buckets_nouse, 1382 ca->buckets_nouse, 1383 BITS_TO_LONGS(n) * sizeof(unsigned long)); 1384 } 1385 1386 rcu_assign_pointer(ca->bucket_gens, bucket_gens); 1387 bucket_gens = old_bucket_gens; 1388 1389 swap(ca->buckets_nouse, buckets_nouse); 1390 1391 nbuckets = ca->mi.nbuckets; 1392 1393 if (resize) { 1394 percpu_up_write(&c->mark_lock); 1395 up_write(&ca->bucket_lock); 1396 up_write(&c->gc_lock); 1397 } 1398 1399 ret = 0; 1400 err: 1401 kvfree(buckets_nouse); 1402 if (bucket_gens) 1403 call_rcu(&bucket_gens->rcu, bucket_gens_free_rcu); 1404 1405 return ret; 1406 } 1407 1408 void bch2_dev_buckets_free(struct bch_dev *ca) 1409 { 1410 kvfree(ca->buckets_nouse); 1411 kvfree(rcu_dereference_protected(ca->bucket_gens, 1)); 1412 1413 for (unsigned i = 0; i < ARRAY_SIZE(ca->usage); i++) 1414 free_percpu(ca->usage[i]); 1415 kfree(ca->usage_base); 1416 } 1417 1418 int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca) 1419 { 1420 ca->usage_base = kzalloc(sizeof(struct bch_dev_usage), GFP_KERNEL); 1421 if (!ca->usage_base) 1422 return -BCH_ERR_ENOMEM_usage_init; 1423 1424 for (unsigned i = 0; i < ARRAY_SIZE(ca->usage); i++) { 1425 ca->usage[i] = alloc_percpu(struct bch_dev_usage); 1426 if (!ca->usage[i]) 1427 return -BCH_ERR_ENOMEM_usage_init; 1428 } 1429 1430 return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets); 1431 } 1432