1 // SPDX-License-Identifier: GPL-2.0 2 #include "bcachefs.h" 3 #include "alloc_background.h" 4 #include "alloc_foreground.h" 5 #include "backpointers.h" 6 #include "bkey_buf.h" 7 #include "btree_cache.h" 8 #include "btree_io.h" 9 #include "btree_key_cache.h" 10 #include "btree_update.h" 11 #include "btree_update_interior.h" 12 #include "btree_gc.h" 13 #include "btree_write_buffer.h" 14 #include "buckets.h" 15 #include "buckets_waiting_for_journal.h" 16 #include "clock.h" 17 #include "debug.h" 18 #include "disk_accounting.h" 19 #include "ec.h" 20 #include "enumerated_ref.h" 21 #include "error.h" 22 #include "lru.h" 23 #include "recovery.h" 24 #include "varint.h" 25 26 #include <linux/kthread.h> 27 #include <linux/math64.h> 28 #include <linux/random.h> 29 #include <linux/rculist.h> 30 #include <linux/rcupdate.h> 31 #include <linux/sched/task.h> 32 #include <linux/sort.h> 33 #include <linux/jiffies.h> 34 35 static void bch2_discard_one_bucket_fast(struct bch_dev *, u64); 36 37 /* Persistent alloc info: */ 38 39 static const unsigned BCH_ALLOC_V1_FIELD_BYTES[] = { 40 #define x(name, bits) [BCH_ALLOC_FIELD_V1_##name] = bits / 8, 41 BCH_ALLOC_FIELDS_V1() 42 #undef x 43 }; 44 45 struct bkey_alloc_unpacked { 46 u64 journal_seq; 47 u8 gen; 48 u8 oldest_gen; 49 u8 data_type; 50 bool need_discard:1; 51 bool need_inc_gen:1; 52 #define x(_name, _bits) u##_bits _name; 53 BCH_ALLOC_FIELDS_V2() 54 #undef x 55 }; 56 57 static inline u64 alloc_field_v1_get(const struct bch_alloc *a, 58 const void **p, unsigned field) 59 { 60 unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field]; 61 u64 v; 62 63 if (!(a->fields & (1 << field))) 64 return 0; 65 66 switch (bytes) { 67 case 1: 68 v = *((const u8 *) *p); 69 break; 70 case 2: 71 v = le16_to_cpup(*p); 72 break; 73 case 4: 74 v = le32_to_cpup(*p); 75 break; 76 case 8: 77 v = le64_to_cpup(*p); 78 break; 79 default: 80 BUG(); 81 } 82 83 *p += bytes; 84 return v; 85 } 86 87 static void bch2_alloc_unpack_v1(struct bkey_alloc_unpacked *out, 88 struct bkey_s_c k) 89 { 90 const struct bch_alloc *in = bkey_s_c_to_alloc(k).v; 91 const void *d = in->data; 92 unsigned idx = 0; 93 94 out->gen = in->gen; 95 96 #define x(_name, _bits) out->_name = alloc_field_v1_get(in, &d, idx++); 97 BCH_ALLOC_FIELDS_V1() 98 #undef x 99 } 100 101 static int bch2_alloc_unpack_v2(struct bkey_alloc_unpacked *out, 102 struct bkey_s_c k) 103 { 104 struct bkey_s_c_alloc_v2 a = bkey_s_c_to_alloc_v2(k); 105 const u8 *in = a.v->data; 106 const u8 *end = bkey_val_end(a); 107 unsigned fieldnr = 0; 108 int ret; 109 u64 v; 110 111 out->gen = a.v->gen; 112 out->oldest_gen = a.v->oldest_gen; 113 out->data_type = a.v->data_type; 114 115 #define x(_name, _bits) \ 116 if (fieldnr < a.v->nr_fields) { \ 117 ret = bch2_varint_decode_fast(in, end, &v); \ 118 if (ret < 0) \ 119 return ret; \ 120 in += ret; \ 121 } else { \ 122 v = 0; \ 123 } \ 124 out->_name = v; \ 125 if (v != out->_name) \ 126 return -1; \ 127 fieldnr++; 128 129 BCH_ALLOC_FIELDS_V2() 130 #undef x 131 return 0; 132 } 133 134 static int bch2_alloc_unpack_v3(struct bkey_alloc_unpacked *out, 135 struct bkey_s_c k) 136 { 137 struct bkey_s_c_alloc_v3 a = bkey_s_c_to_alloc_v3(k); 138 const u8 *in = a.v->data; 139 const u8 *end = bkey_val_end(a); 140 unsigned fieldnr = 0; 141 int ret; 142 u64 v; 143 144 out->gen = a.v->gen; 145 out->oldest_gen = a.v->oldest_gen; 146 out->data_type = a.v->data_type; 147 out->need_discard = BCH_ALLOC_V3_NEED_DISCARD(a.v); 148 out->need_inc_gen = BCH_ALLOC_V3_NEED_INC_GEN(a.v); 149 out->journal_seq = le64_to_cpu(a.v->journal_seq); 150 151 #define x(_name, _bits) \ 152 if (fieldnr < a.v->nr_fields) { \ 153 ret = bch2_varint_decode_fast(in, end, &v); \ 154 if (ret < 0) \ 155 return ret; \ 156 in += ret; \ 157 } else { \ 158 v = 0; \ 159 } \ 160 out->_name = v; \ 161 if (v != out->_name) \ 162 return -1; \ 163 fieldnr++; 164 165 BCH_ALLOC_FIELDS_V2() 166 #undef x 167 return 0; 168 } 169 170 static struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k) 171 { 172 struct bkey_alloc_unpacked ret = { .gen = 0 }; 173 174 switch (k.k->type) { 175 case KEY_TYPE_alloc: 176 bch2_alloc_unpack_v1(&ret, k); 177 break; 178 case KEY_TYPE_alloc_v2: 179 bch2_alloc_unpack_v2(&ret, k); 180 break; 181 case KEY_TYPE_alloc_v3: 182 bch2_alloc_unpack_v3(&ret, k); 183 break; 184 } 185 186 return ret; 187 } 188 189 static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a) 190 { 191 unsigned i, bytes = offsetof(struct bch_alloc, data); 192 193 for (i = 0; i < ARRAY_SIZE(BCH_ALLOC_V1_FIELD_BYTES); i++) 194 if (a->fields & (1 << i)) 195 bytes += BCH_ALLOC_V1_FIELD_BYTES[i]; 196 197 return DIV_ROUND_UP(bytes, sizeof(u64)); 198 } 199 200 int bch2_alloc_v1_validate(struct bch_fs *c, struct bkey_s_c k, 201 struct bkey_validate_context from) 202 { 203 struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k); 204 int ret = 0; 205 206 /* allow for unknown fields */ 207 bkey_fsck_err_on(bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v), 208 c, alloc_v1_val_size_bad, 209 "incorrect value size (%zu < %u)", 210 bkey_val_u64s(a.k), bch_alloc_v1_val_u64s(a.v)); 211 fsck_err: 212 return ret; 213 } 214 215 int bch2_alloc_v2_validate(struct bch_fs *c, struct bkey_s_c k, 216 struct bkey_validate_context from) 217 { 218 struct bkey_alloc_unpacked u; 219 int ret = 0; 220 221 bkey_fsck_err_on(bch2_alloc_unpack_v2(&u, k), 222 c, alloc_v2_unpack_error, 223 "unpack error"); 224 fsck_err: 225 return ret; 226 } 227 228 int bch2_alloc_v3_validate(struct bch_fs *c, struct bkey_s_c k, 229 struct bkey_validate_context from) 230 { 231 struct bkey_alloc_unpacked u; 232 int ret = 0; 233 234 bkey_fsck_err_on(bch2_alloc_unpack_v3(&u, k), 235 c, alloc_v3_unpack_error, 236 "unpack error"); 237 fsck_err: 238 return ret; 239 } 240 241 int bch2_alloc_v4_validate(struct bch_fs *c, struct bkey_s_c k, 242 struct bkey_validate_context from) 243 { 244 struct bch_alloc_v4 a; 245 int ret = 0; 246 247 bkey_val_copy(&a, bkey_s_c_to_alloc_v4(k)); 248 249 bkey_fsck_err_on(alloc_v4_u64s_noerror(&a) > bkey_val_u64s(k.k), 250 c, alloc_v4_val_size_bad, 251 "bad val size (%u > %zu)", 252 alloc_v4_u64s_noerror(&a), bkey_val_u64s(k.k)); 253 254 bkey_fsck_err_on(!BCH_ALLOC_V4_BACKPOINTERS_START(&a) && 255 BCH_ALLOC_V4_NR_BACKPOINTERS(&a), 256 c, alloc_v4_backpointers_start_bad, 257 "invalid backpointers_start"); 258 259 bkey_fsck_err_on(alloc_data_type(a, a.data_type) != a.data_type, 260 c, alloc_key_data_type_bad, 261 "invalid data type (got %u should be %u)", 262 a.data_type, alloc_data_type(a, a.data_type)); 263 264 for (unsigned i = 0; i < 2; i++) 265 bkey_fsck_err_on(a.io_time[i] > LRU_TIME_MAX, 266 c, alloc_key_io_time_bad, 267 "invalid io_time[%s]: %llu, max %llu", 268 i == READ ? "read" : "write", 269 a.io_time[i], LRU_TIME_MAX); 270 271 unsigned stripe_sectors = BCH_ALLOC_V4_BACKPOINTERS_START(&a) * sizeof(u64) > 272 offsetof(struct bch_alloc_v4, stripe_sectors) 273 ? a.stripe_sectors 274 : 0; 275 276 switch (a.data_type) { 277 case BCH_DATA_free: 278 case BCH_DATA_need_gc_gens: 279 case BCH_DATA_need_discard: 280 bkey_fsck_err_on(stripe_sectors || 281 a.dirty_sectors || 282 a.cached_sectors || 283 a.stripe, 284 c, alloc_key_empty_but_have_data, 285 "empty data type free but have data %u.%u.%u %u", 286 stripe_sectors, 287 a.dirty_sectors, 288 a.cached_sectors, 289 a.stripe); 290 break; 291 case BCH_DATA_sb: 292 case BCH_DATA_journal: 293 case BCH_DATA_btree: 294 case BCH_DATA_user: 295 case BCH_DATA_parity: 296 bkey_fsck_err_on(!a.dirty_sectors && 297 !stripe_sectors, 298 c, alloc_key_dirty_sectors_0, 299 "data_type %s but dirty_sectors==0", 300 bch2_data_type_str(a.data_type)); 301 break; 302 case BCH_DATA_cached: 303 bkey_fsck_err_on(!a.cached_sectors || 304 a.dirty_sectors || 305 stripe_sectors || 306 a.stripe, 307 c, alloc_key_cached_inconsistency, 308 "data type inconsistency"); 309 310 bkey_fsck_err_on(!a.io_time[READ] && 311 !(c->recovery.passes_to_run & 312 BIT_ULL(BCH_RECOVERY_PASS_check_alloc_to_lru_refs)), 313 c, alloc_key_cached_but_read_time_zero, 314 "cached bucket with read_time == 0"); 315 break; 316 case BCH_DATA_stripe: 317 break; 318 } 319 fsck_err: 320 return ret; 321 } 322 323 void bch2_alloc_v4_swab(struct bkey_s k) 324 { 325 struct bch_alloc_v4 *a = bkey_s_to_alloc_v4(k).v; 326 327 a->journal_seq_nonempty = swab64(a->journal_seq_nonempty); 328 a->journal_seq_empty = swab64(a->journal_seq_empty); 329 a->flags = swab32(a->flags); 330 a->dirty_sectors = swab32(a->dirty_sectors); 331 a->cached_sectors = swab32(a->cached_sectors); 332 a->io_time[0] = swab64(a->io_time[0]); 333 a->io_time[1] = swab64(a->io_time[1]); 334 a->stripe = swab32(a->stripe); 335 a->nr_external_backpointers = swab32(a->nr_external_backpointers); 336 a->stripe_sectors = swab32(a->stripe_sectors); 337 } 338 339 static inline void __bch2_alloc_v4_to_text(struct printbuf *out, struct bch_fs *c, 340 unsigned dev, const struct bch_alloc_v4 *a) 341 { 342 struct bch_dev *ca = c ? bch2_dev_tryget_noerror(c, dev) : NULL; 343 344 prt_newline(out); 345 printbuf_indent_add(out, 2); 346 347 prt_printf(out, "gen %u oldest_gen %u data_type ", a->gen, a->oldest_gen); 348 bch2_prt_data_type(out, a->data_type); 349 prt_newline(out); 350 prt_printf(out, "journal_seq_nonempty %llu\n", a->journal_seq_nonempty); 351 prt_printf(out, "journal_seq_empty %llu\n", a->journal_seq_empty); 352 prt_printf(out, "need_discard %llu\n", BCH_ALLOC_V4_NEED_DISCARD(a)); 353 prt_printf(out, "need_inc_gen %llu\n", BCH_ALLOC_V4_NEED_INC_GEN(a)); 354 prt_printf(out, "dirty_sectors %u\n", a->dirty_sectors); 355 prt_printf(out, "stripe_sectors %u\n", a->stripe_sectors); 356 prt_printf(out, "cached_sectors %u\n", a->cached_sectors); 357 prt_printf(out, "stripe %u\n", a->stripe); 358 prt_printf(out, "stripe_redundancy %u\n", a->stripe_redundancy); 359 prt_printf(out, "io_time[READ] %llu\n", a->io_time[READ]); 360 prt_printf(out, "io_time[WRITE] %llu\n", a->io_time[WRITE]); 361 362 if (ca) 363 prt_printf(out, "fragmentation %llu\n", alloc_lru_idx_fragmentation(*a, ca)); 364 prt_printf(out, "bp_start %llu\n", BCH_ALLOC_V4_BACKPOINTERS_START(a)); 365 printbuf_indent_sub(out, 2); 366 367 bch2_dev_put(ca); 368 } 369 370 void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k) 371 { 372 struct bch_alloc_v4 _a; 373 const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &_a); 374 375 __bch2_alloc_v4_to_text(out, c, k.k->p.inode, a); 376 } 377 378 void bch2_alloc_v4_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k) 379 { 380 __bch2_alloc_v4_to_text(out, c, k.k->p.inode, bkey_s_c_to_alloc_v4(k).v); 381 } 382 383 void __bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out) 384 { 385 if (k.k->type == KEY_TYPE_alloc_v4) { 386 void *src, *dst; 387 388 *out = *bkey_s_c_to_alloc_v4(k).v; 389 390 src = alloc_v4_backpointers(out); 391 SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s); 392 dst = alloc_v4_backpointers(out); 393 394 if (src < dst) 395 memset(src, 0, dst - src); 396 397 SET_BCH_ALLOC_V4_NR_BACKPOINTERS(out, 0); 398 } else { 399 struct bkey_alloc_unpacked u = bch2_alloc_unpack(k); 400 401 *out = (struct bch_alloc_v4) { 402 .journal_seq_nonempty = u.journal_seq, 403 .flags = u.need_discard, 404 .gen = u.gen, 405 .oldest_gen = u.oldest_gen, 406 .data_type = u.data_type, 407 .stripe_redundancy = u.stripe_redundancy, 408 .dirty_sectors = u.dirty_sectors, 409 .cached_sectors = u.cached_sectors, 410 .io_time[READ] = u.read_time, 411 .io_time[WRITE] = u.write_time, 412 .stripe = u.stripe, 413 }; 414 415 SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s); 416 } 417 } 418 419 static noinline struct bkey_i_alloc_v4 * 420 __bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k) 421 { 422 struct bkey_i_alloc_v4 *ret; 423 424 ret = bch2_trans_kmalloc(trans, max(bkey_bytes(k.k), sizeof(struct bkey_i_alloc_v4))); 425 if (IS_ERR(ret)) 426 return ret; 427 428 if (k.k->type == KEY_TYPE_alloc_v4) { 429 void *src, *dst; 430 431 bkey_reassemble(&ret->k_i, k); 432 433 src = alloc_v4_backpointers(&ret->v); 434 SET_BCH_ALLOC_V4_BACKPOINTERS_START(&ret->v, BCH_ALLOC_V4_U64s); 435 dst = alloc_v4_backpointers(&ret->v); 436 437 if (src < dst) 438 memset(src, 0, dst - src); 439 440 SET_BCH_ALLOC_V4_NR_BACKPOINTERS(&ret->v, 0); 441 set_alloc_v4_u64s(ret); 442 } else { 443 bkey_alloc_v4_init(&ret->k_i); 444 ret->k.p = k.k->p; 445 bch2_alloc_to_v4(k, &ret->v); 446 } 447 return ret; 448 } 449 450 static inline struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut_inlined(struct btree_trans *trans, struct bkey_s_c k) 451 { 452 struct bkey_s_c_alloc_v4 a; 453 454 if (likely(k.k->type == KEY_TYPE_alloc_v4) && 455 ((a = bkey_s_c_to_alloc_v4(k), true) && 456 BCH_ALLOC_V4_NR_BACKPOINTERS(a.v) == 0)) 457 return bch2_bkey_make_mut_noupdate_typed(trans, k, alloc_v4); 458 459 return __bch2_alloc_to_v4_mut(trans, k); 460 } 461 462 struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k) 463 { 464 return bch2_alloc_to_v4_mut_inlined(trans, k); 465 } 466 467 struct bkey_i_alloc_v4 * 468 bch2_trans_start_alloc_update_noupdate(struct btree_trans *trans, struct btree_iter *iter, 469 struct bpos pos) 470 { 471 struct bkey_s_c k = bch2_bkey_get_iter(trans, iter, BTREE_ID_alloc, pos, 472 BTREE_ITER_with_updates| 473 BTREE_ITER_cached| 474 BTREE_ITER_intent); 475 int ret = bkey_err(k); 476 if (unlikely(ret)) 477 return ERR_PTR(ret); 478 479 struct bkey_i_alloc_v4 *a = bch2_alloc_to_v4_mut_inlined(trans, k); 480 ret = PTR_ERR_OR_ZERO(a); 481 if (unlikely(ret)) 482 goto err; 483 return a; 484 err: 485 bch2_trans_iter_exit(trans, iter); 486 return ERR_PTR(ret); 487 } 488 489 __flatten 490 struct bkey_i_alloc_v4 *bch2_trans_start_alloc_update(struct btree_trans *trans, struct bpos pos, 491 enum btree_iter_update_trigger_flags flags) 492 { 493 struct btree_iter iter; 494 struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc, pos, 495 BTREE_ITER_with_updates| 496 BTREE_ITER_cached| 497 BTREE_ITER_intent); 498 int ret = bkey_err(k); 499 if (unlikely(ret)) 500 return ERR_PTR(ret); 501 502 if ((void *) k.v >= trans->mem && 503 (void *) k.v < trans->mem + trans->mem_top) { 504 bch2_trans_iter_exit(trans, &iter); 505 return container_of(bkey_s_c_to_alloc_v4(k).v, struct bkey_i_alloc_v4, v); 506 } 507 508 struct bkey_i_alloc_v4 *a = bch2_alloc_to_v4_mut_inlined(trans, k); 509 if (IS_ERR(a)) { 510 bch2_trans_iter_exit(trans, &iter); 511 return a; 512 } 513 514 ret = bch2_trans_update_ip(trans, &iter, &a->k_i, flags, _RET_IP_); 515 bch2_trans_iter_exit(trans, &iter); 516 return unlikely(ret) ? ERR_PTR(ret) : a; 517 } 518 519 static struct bpos alloc_gens_pos(struct bpos pos, unsigned *offset) 520 { 521 *offset = pos.offset & KEY_TYPE_BUCKET_GENS_MASK; 522 523 pos.offset >>= KEY_TYPE_BUCKET_GENS_BITS; 524 return pos; 525 } 526 527 static struct bpos bucket_gens_pos_to_alloc(struct bpos pos, unsigned offset) 528 { 529 pos.offset <<= KEY_TYPE_BUCKET_GENS_BITS; 530 pos.offset += offset; 531 return pos; 532 } 533 534 static unsigned alloc_gen(struct bkey_s_c k, unsigned offset) 535 { 536 return k.k->type == KEY_TYPE_bucket_gens 537 ? bkey_s_c_to_bucket_gens(k).v->gens[offset] 538 : 0; 539 } 540 541 int bch2_bucket_gens_validate(struct bch_fs *c, struct bkey_s_c k, 542 struct bkey_validate_context from) 543 { 544 int ret = 0; 545 546 bkey_fsck_err_on(bkey_val_bytes(k.k) != sizeof(struct bch_bucket_gens), 547 c, bucket_gens_val_size_bad, 548 "bad val size (%zu != %zu)", 549 bkey_val_bytes(k.k), sizeof(struct bch_bucket_gens)); 550 fsck_err: 551 return ret; 552 } 553 554 void bch2_bucket_gens_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k) 555 { 556 struct bkey_s_c_bucket_gens g = bkey_s_c_to_bucket_gens(k); 557 unsigned i; 558 559 for (i = 0; i < ARRAY_SIZE(g.v->gens); i++) { 560 if (i) 561 prt_char(out, ' '); 562 prt_printf(out, "%u", g.v->gens[i]); 563 } 564 } 565 566 int bch2_bucket_gens_init(struct bch_fs *c) 567 { 568 struct btree_trans *trans = bch2_trans_get(c); 569 struct bkey_i_bucket_gens g; 570 bool have_bucket_gens_key = false; 571 int ret; 572 573 ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN, 574 BTREE_ITER_prefetch, k, ({ 575 /* 576 * Not a fsck error because this is checked/repaired by 577 * bch2_check_alloc_key() which runs later: 578 */ 579 if (!bch2_dev_bucket_exists(c, k.k->p)) 580 continue; 581 582 struct bch_alloc_v4 a; 583 u8 gen = bch2_alloc_to_v4(k, &a)->gen; 584 unsigned offset; 585 struct bpos pos = alloc_gens_pos(iter.pos, &offset); 586 int ret2 = 0; 587 588 if (have_bucket_gens_key && !bkey_eq(g.k.p, pos)) { 589 ret2 = bch2_btree_insert_trans(trans, BTREE_ID_bucket_gens, &g.k_i, 0) ?: 590 bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc); 591 if (ret2) 592 goto iter_err; 593 have_bucket_gens_key = false; 594 } 595 596 if (!have_bucket_gens_key) { 597 bkey_bucket_gens_init(&g.k_i); 598 g.k.p = pos; 599 have_bucket_gens_key = true; 600 } 601 602 g.v.gens[offset] = gen; 603 iter_err: 604 ret2; 605 })); 606 607 if (have_bucket_gens_key && !ret) 608 ret = commit_do(trans, NULL, NULL, 609 BCH_TRANS_COMMIT_no_enospc, 610 bch2_btree_insert_trans(trans, BTREE_ID_bucket_gens, &g.k_i, 0)); 611 612 bch2_trans_put(trans); 613 614 bch_err_fn(c, ret); 615 return ret; 616 } 617 618 int bch2_alloc_read(struct bch_fs *c) 619 { 620 down_read(&c->state_lock); 621 622 struct btree_trans *trans = bch2_trans_get(c); 623 struct bch_dev *ca = NULL; 624 int ret; 625 626 if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_bucket_gens) { 627 ret = for_each_btree_key(trans, iter, BTREE_ID_bucket_gens, POS_MIN, 628 BTREE_ITER_prefetch, k, ({ 629 u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset; 630 u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset; 631 632 if (k.k->type != KEY_TYPE_bucket_gens) 633 continue; 634 635 ca = bch2_dev_iterate(c, ca, k.k->p.inode); 636 /* 637 * Not a fsck error because this is checked/repaired by 638 * bch2_check_alloc_key() which runs later: 639 */ 640 if (!ca) { 641 bch2_btree_iter_set_pos(trans, &iter, POS(k.k->p.inode + 1, 0)); 642 continue; 643 } 644 645 const struct bch_bucket_gens *g = bkey_s_c_to_bucket_gens(k).v; 646 647 for (u64 b = max_t(u64, ca->mi.first_bucket, start); 648 b < min_t(u64, ca->mi.nbuckets, end); 649 b++) 650 *bucket_gen(ca, b) = g->gens[b & KEY_TYPE_BUCKET_GENS_MASK]; 651 0; 652 })); 653 } else { 654 ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN, 655 BTREE_ITER_prefetch, k, ({ 656 ca = bch2_dev_iterate(c, ca, k.k->p.inode); 657 /* 658 * Not a fsck error because this is checked/repaired by 659 * bch2_check_alloc_key() which runs later: 660 */ 661 if (!ca) { 662 bch2_btree_iter_set_pos(trans, &iter, POS(k.k->p.inode + 1, 0)); 663 continue; 664 } 665 666 if (k.k->p.offset < ca->mi.first_bucket) { 667 bch2_btree_iter_set_pos(trans, &iter, POS(k.k->p.inode, ca->mi.first_bucket)); 668 continue; 669 } 670 671 if (k.k->p.offset >= ca->mi.nbuckets) { 672 bch2_btree_iter_set_pos(trans, &iter, POS(k.k->p.inode + 1, 0)); 673 continue; 674 } 675 676 struct bch_alloc_v4 a; 677 *bucket_gen(ca, k.k->p.offset) = bch2_alloc_to_v4(k, &a)->gen; 678 0; 679 })); 680 } 681 682 bch2_dev_put(ca); 683 bch2_trans_put(trans); 684 685 up_read(&c->state_lock); 686 bch_err_fn(c, ret); 687 return ret; 688 } 689 690 /* Free space/discard btree: */ 691 692 static int __need_discard_or_freespace_err(struct btree_trans *trans, 693 struct bkey_s_c alloc_k, 694 bool set, bool discard, bool repair) 695 { 696 struct bch_fs *c = trans->c; 697 enum bch_fsck_flags flags = FSCK_CAN_IGNORE|(repair ? FSCK_CAN_FIX : 0); 698 enum bch_sb_error_id err_id = discard 699 ? BCH_FSCK_ERR_need_discard_key_wrong 700 : BCH_FSCK_ERR_freespace_key_wrong; 701 enum btree_id btree = discard ? BTREE_ID_need_discard : BTREE_ID_freespace; 702 struct printbuf buf = PRINTBUF; 703 704 bch2_bkey_val_to_text(&buf, c, alloc_k); 705 706 int ret = __bch2_fsck_err(NULL, trans, flags, err_id, 707 "bucket incorrectly %sset in %s btree\n%s", 708 set ? "" : "un", 709 bch2_btree_id_str(btree), 710 buf.buf); 711 if (bch2_err_matches(ret, BCH_ERR_fsck_ignore) || 712 bch2_err_matches(ret, BCH_ERR_fsck_errors_not_fixed)) 713 ret = 0; 714 715 printbuf_exit(&buf); 716 return ret; 717 } 718 719 #define need_discard_or_freespace_err(...) \ 720 fsck_err_wrap(__need_discard_or_freespace_err(__VA_ARGS__)) 721 722 #define need_discard_or_freespace_err_on(cond, ...) \ 723 (unlikely(cond) ? need_discard_or_freespace_err(__VA_ARGS__) : false) 724 725 static int bch2_bucket_do_index(struct btree_trans *trans, 726 struct bch_dev *ca, 727 struct bkey_s_c alloc_k, 728 const struct bch_alloc_v4 *a, 729 bool set) 730 { 731 enum btree_id btree; 732 struct bpos pos; 733 734 if (a->data_type != BCH_DATA_free && 735 a->data_type != BCH_DATA_need_discard) 736 return 0; 737 738 switch (a->data_type) { 739 case BCH_DATA_free: 740 btree = BTREE_ID_freespace; 741 pos = alloc_freespace_pos(alloc_k.k->p, *a); 742 break; 743 case BCH_DATA_need_discard: 744 btree = BTREE_ID_need_discard; 745 pos = alloc_k.k->p; 746 break; 747 default: 748 return 0; 749 } 750 751 struct btree_iter iter; 752 struct bkey_s_c old = bch2_bkey_get_iter(trans, &iter, btree, pos, BTREE_ITER_intent); 753 int ret = bkey_err(old); 754 if (ret) 755 return ret; 756 757 need_discard_or_freespace_err_on(ca->mi.freespace_initialized && 758 !old.k->type != set, 759 trans, alloc_k, set, 760 btree == BTREE_ID_need_discard, false); 761 762 ret = bch2_btree_bit_mod_iter(trans, &iter, set); 763 fsck_err: 764 bch2_trans_iter_exit(trans, &iter); 765 return ret; 766 } 767 768 static noinline int bch2_bucket_gen_update(struct btree_trans *trans, 769 struct bpos bucket, u8 gen) 770 { 771 struct btree_iter iter; 772 unsigned offset; 773 struct bpos pos = alloc_gens_pos(bucket, &offset); 774 struct bkey_i_bucket_gens *g; 775 struct bkey_s_c k; 776 int ret; 777 778 g = bch2_trans_kmalloc(trans, sizeof(*g)); 779 ret = PTR_ERR_OR_ZERO(g); 780 if (ret) 781 return ret; 782 783 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_bucket_gens, pos, 784 BTREE_ITER_intent| 785 BTREE_ITER_with_updates); 786 ret = bkey_err(k); 787 if (ret) 788 return ret; 789 790 if (k.k->type != KEY_TYPE_bucket_gens) { 791 bkey_bucket_gens_init(&g->k_i); 792 g->k.p = iter.pos; 793 } else { 794 bkey_reassemble(&g->k_i, k); 795 } 796 797 g->v.gens[offset] = gen; 798 799 ret = bch2_trans_update(trans, &iter, &g->k_i, 0); 800 bch2_trans_iter_exit(trans, &iter); 801 return ret; 802 } 803 804 static inline int bch2_dev_data_type_accounting_mod(struct btree_trans *trans, struct bch_dev *ca, 805 enum bch_data_type data_type, 806 s64 delta_buckets, 807 s64 delta_sectors, 808 s64 delta_fragmented, unsigned flags) 809 { 810 s64 d[3] = { delta_buckets, delta_sectors, delta_fragmented }; 811 812 return bch2_disk_accounting_mod2(trans, flags & BTREE_TRIGGER_gc, 813 d, dev_data_type, 814 .dev = ca->dev_idx, 815 .data_type = data_type); 816 } 817 818 int bch2_alloc_key_to_dev_counters(struct btree_trans *trans, struct bch_dev *ca, 819 const struct bch_alloc_v4 *old, 820 const struct bch_alloc_v4 *new, 821 unsigned flags) 822 { 823 s64 old_sectors = bch2_bucket_sectors(*old); 824 s64 new_sectors = bch2_bucket_sectors(*new); 825 if (old->data_type != new->data_type) { 826 int ret = bch2_dev_data_type_accounting_mod(trans, ca, new->data_type, 827 1, new_sectors, bch2_bucket_sectors_fragmented(ca, *new), flags) ?: 828 bch2_dev_data_type_accounting_mod(trans, ca, old->data_type, 829 -1, -old_sectors, -bch2_bucket_sectors_fragmented(ca, *old), flags); 830 if (ret) 831 return ret; 832 } else if (old_sectors != new_sectors) { 833 int ret = bch2_dev_data_type_accounting_mod(trans, ca, new->data_type, 834 0, 835 new_sectors - old_sectors, 836 bch2_bucket_sectors_fragmented(ca, *new) - 837 bch2_bucket_sectors_fragmented(ca, *old), flags); 838 if (ret) 839 return ret; 840 } 841 842 s64 old_unstriped = bch2_bucket_sectors_unstriped(*old); 843 s64 new_unstriped = bch2_bucket_sectors_unstriped(*new); 844 if (old_unstriped != new_unstriped) { 845 int ret = bch2_dev_data_type_accounting_mod(trans, ca, BCH_DATA_unstriped, 846 !!new_unstriped - !!old_unstriped, 847 new_unstriped - old_unstriped, 848 0, 849 flags); 850 if (ret) 851 return ret; 852 } 853 854 return 0; 855 } 856 857 int bch2_trigger_alloc(struct btree_trans *trans, 858 enum btree_id btree, unsigned level, 859 struct bkey_s_c old, struct bkey_s new, 860 enum btree_iter_update_trigger_flags flags) 861 { 862 struct bch_fs *c = trans->c; 863 struct printbuf buf = PRINTBUF; 864 int ret = 0; 865 866 struct bch_dev *ca = bch2_dev_bucket_tryget(c, new.k->p); 867 if (!ca) 868 return bch_err_throw(c, trigger_alloc); 869 870 struct bch_alloc_v4 old_a_convert; 871 const struct bch_alloc_v4 *old_a = bch2_alloc_to_v4(old, &old_a_convert); 872 873 struct bch_alloc_v4 *new_a; 874 if (likely(new.k->type == KEY_TYPE_alloc_v4)) { 875 new_a = bkey_s_to_alloc_v4(new).v; 876 } else { 877 BUG_ON(!(flags & (BTREE_TRIGGER_gc|BTREE_TRIGGER_check_repair))); 878 879 struct bkey_i_alloc_v4 *new_ka = bch2_alloc_to_v4_mut_inlined(trans, new.s_c); 880 ret = PTR_ERR_OR_ZERO(new_ka); 881 if (unlikely(ret)) 882 goto err; 883 new_a = &new_ka->v; 884 } 885 886 if (flags & BTREE_TRIGGER_transactional) { 887 alloc_data_type_set(new_a, new_a->data_type); 888 889 int is_empty_delta = (int) data_type_is_empty(new_a->data_type) - 890 (int) data_type_is_empty(old_a->data_type); 891 892 if (is_empty_delta < 0) { 893 new_a->io_time[READ] = bch2_current_io_time(c, READ); 894 new_a->io_time[WRITE]= bch2_current_io_time(c, WRITE); 895 SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, true); 896 SET_BCH_ALLOC_V4_NEED_DISCARD(new_a, true); 897 } 898 899 if (data_type_is_empty(new_a->data_type) && 900 BCH_ALLOC_V4_NEED_INC_GEN(new_a) && 901 !bch2_bucket_is_open_safe(c, new.k->p.inode, new.k->p.offset)) { 902 if (new_a->oldest_gen == new_a->gen && 903 !bch2_bucket_sectors_total(*new_a)) 904 new_a->oldest_gen++; 905 new_a->gen++; 906 SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, false); 907 alloc_data_type_set(new_a, new_a->data_type); 908 } 909 910 if (old_a->data_type != new_a->data_type || 911 (new_a->data_type == BCH_DATA_free && 912 alloc_freespace_genbits(*old_a) != alloc_freespace_genbits(*new_a))) { 913 ret = bch2_bucket_do_index(trans, ca, old, old_a, false) ?: 914 bch2_bucket_do_index(trans, ca, new.s_c, new_a, true); 915 if (ret) 916 goto err; 917 } 918 919 if (new_a->data_type == BCH_DATA_cached && 920 !new_a->io_time[READ]) 921 new_a->io_time[READ] = bch2_current_io_time(c, READ); 922 923 ret = bch2_lru_change(trans, new.k->p.inode, 924 bucket_to_u64(new.k->p), 925 alloc_lru_idx_read(*old_a), 926 alloc_lru_idx_read(*new_a)); 927 if (ret) 928 goto err; 929 930 ret = bch2_lru_change(trans, 931 BCH_LRU_BUCKET_FRAGMENTATION, 932 bucket_to_u64(new.k->p), 933 alloc_lru_idx_fragmentation(*old_a, ca), 934 alloc_lru_idx_fragmentation(*new_a, ca)); 935 if (ret) 936 goto err; 937 938 if (old_a->gen != new_a->gen) { 939 ret = bch2_bucket_gen_update(trans, new.k->p, new_a->gen); 940 if (ret) 941 goto err; 942 } 943 944 ret = bch2_alloc_key_to_dev_counters(trans, ca, old_a, new_a, flags); 945 if (ret) 946 goto err; 947 } 948 949 if ((flags & BTREE_TRIGGER_atomic) && (flags & BTREE_TRIGGER_insert)) { 950 u64 transaction_seq = trans->journal_res.seq; 951 BUG_ON(!transaction_seq); 952 953 if (log_fsck_err_on(transaction_seq && new_a->journal_seq_nonempty > transaction_seq, 954 trans, alloc_key_journal_seq_in_future, 955 "bucket journal seq in future (currently at %llu)\n%s", 956 journal_cur_seq(&c->journal), 957 (bch2_bkey_val_to_text(&buf, c, new.s_c), buf.buf))) 958 new_a->journal_seq_nonempty = transaction_seq; 959 960 int is_empty_delta = (int) data_type_is_empty(new_a->data_type) - 961 (int) data_type_is_empty(old_a->data_type); 962 963 /* 964 * Record journal sequence number of empty -> nonempty transition: 965 * Note that there may be multiple empty -> nonempty 966 * transitions, data in a bucket may be overwritten while we're 967 * still writing to it - so be careful to only record the first: 968 * */ 969 if (is_empty_delta < 0 && 970 new_a->journal_seq_empty <= c->journal.flushed_seq_ondisk) { 971 new_a->journal_seq_nonempty = transaction_seq; 972 new_a->journal_seq_empty = 0; 973 } 974 975 /* 976 * Bucket becomes empty: mark it as waiting for a journal flush, 977 * unless updates since empty -> nonempty transition were never 978 * flushed - we may need to ask the journal not to flush 979 * intermediate sequence numbers: 980 */ 981 if (is_empty_delta > 0) { 982 if (new_a->journal_seq_nonempty == transaction_seq || 983 bch2_journal_noflush_seq(&c->journal, 984 new_a->journal_seq_nonempty, 985 transaction_seq)) { 986 new_a->journal_seq_nonempty = new_a->journal_seq_empty = 0; 987 } else { 988 new_a->journal_seq_empty = transaction_seq; 989 990 ret = bch2_set_bucket_needs_journal_commit(&c->buckets_waiting_for_journal, 991 c->journal.flushed_seq_ondisk, 992 new.k->p.inode, new.k->p.offset, 993 transaction_seq); 994 if (bch2_fs_fatal_err_on(ret, c, 995 "setting bucket_needs_journal_commit: %s", 996 bch2_err_str(ret))) 997 goto err; 998 } 999 } 1000 1001 if (new_a->gen != old_a->gen) { 1002 guard(rcu)(); 1003 u8 *gen = bucket_gen(ca, new.k->p.offset); 1004 if (unlikely(!gen)) 1005 goto invalid_bucket; 1006 *gen = new_a->gen; 1007 } 1008 1009 #define eval_state(_a, expr) ({ const struct bch_alloc_v4 *a = _a; expr; }) 1010 #define statechange(expr) !eval_state(old_a, expr) && eval_state(new_a, expr) 1011 #define bucket_flushed(a) (a->journal_seq_empty <= c->journal.flushed_seq_ondisk) 1012 1013 if (statechange(a->data_type == BCH_DATA_free) && 1014 bucket_flushed(new_a)) 1015 closure_wake_up(&c->freelist_wait); 1016 1017 if (statechange(a->data_type == BCH_DATA_need_discard) && 1018 !bch2_bucket_is_open_safe(c, new.k->p.inode, new.k->p.offset) && 1019 bucket_flushed(new_a)) 1020 bch2_discard_one_bucket_fast(ca, new.k->p.offset); 1021 1022 if (statechange(a->data_type == BCH_DATA_cached) && 1023 !bch2_bucket_is_open(c, new.k->p.inode, new.k->p.offset) && 1024 should_invalidate_buckets(ca, bch2_dev_usage_read(ca))) 1025 bch2_dev_do_invalidates(ca); 1026 1027 if (statechange(a->data_type == BCH_DATA_need_gc_gens)) 1028 bch2_gc_gens_async(c); 1029 } 1030 1031 if ((flags & BTREE_TRIGGER_gc) && (flags & BTREE_TRIGGER_insert)) { 1032 guard(rcu)(); 1033 struct bucket *g = gc_bucket(ca, new.k->p.offset); 1034 if (unlikely(!g)) 1035 goto invalid_bucket; 1036 g->gen_valid = 1; 1037 g->gen = new_a->gen; 1038 } 1039 err: 1040 fsck_err: 1041 printbuf_exit(&buf); 1042 bch2_dev_put(ca); 1043 return ret; 1044 invalid_bucket: 1045 bch2_fs_inconsistent(c, "reference to invalid bucket\n%s", 1046 (bch2_bkey_val_to_text(&buf, c, new.s_c), buf.buf)); 1047 ret = bch_err_throw(c, trigger_alloc); 1048 goto err; 1049 } 1050 1051 /* 1052 * This synthesizes deleted extents for holes, similar to BTREE_ITER_slots for 1053 * extents style btrees, but works on non-extents btrees: 1054 */ 1055 static struct bkey_s_c bch2_get_key_or_hole(struct btree_trans *trans, struct btree_iter *iter, 1056 struct bpos end, struct bkey *hole) 1057 { 1058 struct bkey_s_c k = bch2_btree_iter_peek_slot(trans, iter); 1059 1060 if (bkey_err(k)) 1061 return k; 1062 1063 if (k.k->type) { 1064 return k; 1065 } else { 1066 struct btree_iter iter2; 1067 struct bpos next; 1068 1069 bch2_trans_copy_iter(trans, &iter2, iter); 1070 1071 struct btree_path *path = btree_iter_path(trans, iter); 1072 if (!bpos_eq(path->l[0].b->key.k.p, SPOS_MAX)) 1073 end = bkey_min(end, bpos_nosnap_successor(path->l[0].b->key.k.p)); 1074 1075 end = bkey_min(end, POS(iter->pos.inode, iter->pos.offset + U32_MAX - 1)); 1076 1077 /* 1078 * btree node min/max is a closed interval, upto takes a half 1079 * open interval: 1080 */ 1081 k = bch2_btree_iter_peek_max(trans, &iter2, end); 1082 next = iter2.pos; 1083 bch2_trans_iter_exit(trans, &iter2); 1084 1085 BUG_ON(next.offset >= iter->pos.offset + U32_MAX); 1086 1087 if (bkey_err(k)) 1088 return k; 1089 1090 bkey_init(hole); 1091 hole->p = iter->pos; 1092 1093 bch2_key_resize(hole, next.offset - iter->pos.offset); 1094 return (struct bkey_s_c) { hole, NULL }; 1095 } 1096 } 1097 1098 static bool next_bucket(struct bch_fs *c, struct bch_dev **ca, struct bpos *bucket) 1099 { 1100 if (*ca) { 1101 if (bucket->offset < (*ca)->mi.first_bucket) 1102 bucket->offset = (*ca)->mi.first_bucket; 1103 1104 if (bucket->offset < (*ca)->mi.nbuckets) 1105 return true; 1106 1107 bch2_dev_put(*ca); 1108 *ca = NULL; 1109 bucket->inode++; 1110 bucket->offset = 0; 1111 } 1112 1113 guard(rcu)(); 1114 *ca = __bch2_next_dev_idx(c, bucket->inode, NULL); 1115 if (*ca) { 1116 *bucket = POS((*ca)->dev_idx, (*ca)->mi.first_bucket); 1117 bch2_dev_get(*ca); 1118 } 1119 1120 return *ca != NULL; 1121 } 1122 1123 static struct bkey_s_c bch2_get_key_or_real_bucket_hole(struct btree_trans *trans, 1124 struct btree_iter *iter, 1125 struct bch_dev **ca, struct bkey *hole) 1126 { 1127 struct bch_fs *c = trans->c; 1128 struct bkey_s_c k; 1129 again: 1130 k = bch2_get_key_or_hole(trans, iter, POS_MAX, hole); 1131 if (bkey_err(k)) 1132 return k; 1133 1134 *ca = bch2_dev_iterate_noerror(c, *ca, k.k->p.inode); 1135 1136 if (!k.k->type) { 1137 struct bpos hole_start = bkey_start_pos(k.k); 1138 1139 if (!*ca || !bucket_valid(*ca, hole_start.offset)) { 1140 if (!next_bucket(c, ca, &hole_start)) 1141 return bkey_s_c_null; 1142 1143 bch2_btree_iter_set_pos(trans, iter, hole_start); 1144 goto again; 1145 } 1146 1147 if (k.k->p.offset > (*ca)->mi.nbuckets) 1148 bch2_key_resize(hole, (*ca)->mi.nbuckets - hole_start.offset); 1149 } 1150 1151 return k; 1152 } 1153 1154 static noinline_for_stack 1155 int bch2_check_alloc_key(struct btree_trans *trans, 1156 struct bkey_s_c alloc_k, 1157 struct btree_iter *alloc_iter, 1158 struct btree_iter *discard_iter, 1159 struct btree_iter *freespace_iter, 1160 struct btree_iter *bucket_gens_iter) 1161 { 1162 struct bch_fs *c = trans->c; 1163 struct bch_alloc_v4 a_convert; 1164 const struct bch_alloc_v4 *a; 1165 unsigned gens_offset; 1166 struct bkey_s_c k; 1167 struct printbuf buf = PRINTBUF; 1168 int ret = 0; 1169 1170 struct bch_dev *ca = bch2_dev_bucket_tryget_noerror(c, alloc_k.k->p); 1171 if (fsck_err_on(!ca, 1172 trans, alloc_key_to_missing_dev_bucket, 1173 "alloc key for invalid device:bucket %llu:%llu", 1174 alloc_k.k->p.inode, alloc_k.k->p.offset)) 1175 ret = bch2_btree_delete_at(trans, alloc_iter, 0); 1176 if (!ca) 1177 return ret; 1178 1179 if (!ca->mi.freespace_initialized) 1180 goto out; 1181 1182 a = bch2_alloc_to_v4(alloc_k, &a_convert); 1183 1184 bch2_btree_iter_set_pos(trans, discard_iter, alloc_k.k->p); 1185 k = bch2_btree_iter_peek_slot(trans, discard_iter); 1186 ret = bkey_err(k); 1187 if (ret) 1188 goto err; 1189 1190 bool is_discarded = a->data_type == BCH_DATA_need_discard; 1191 if (need_discard_or_freespace_err_on(!!k.k->type != is_discarded, 1192 trans, alloc_k, !is_discarded, true, true)) { 1193 ret = bch2_btree_bit_mod_iter(trans, discard_iter, is_discarded); 1194 if (ret) 1195 goto err; 1196 } 1197 1198 bch2_btree_iter_set_pos(trans, freespace_iter, alloc_freespace_pos(alloc_k.k->p, *a)); 1199 k = bch2_btree_iter_peek_slot(trans, freespace_iter); 1200 ret = bkey_err(k); 1201 if (ret) 1202 goto err; 1203 1204 bool is_free = a->data_type == BCH_DATA_free; 1205 if (need_discard_or_freespace_err_on(!!k.k->type != is_free, 1206 trans, alloc_k, !is_free, false, true)) { 1207 ret = bch2_btree_bit_mod_iter(trans, freespace_iter, is_free); 1208 if (ret) 1209 goto err; 1210 } 1211 1212 bch2_btree_iter_set_pos(trans, bucket_gens_iter, alloc_gens_pos(alloc_k.k->p, &gens_offset)); 1213 k = bch2_btree_iter_peek_slot(trans, bucket_gens_iter); 1214 ret = bkey_err(k); 1215 if (ret) 1216 goto err; 1217 1218 if (fsck_err_on(a->gen != alloc_gen(k, gens_offset), 1219 trans, bucket_gens_key_wrong, 1220 "incorrect gen in bucket_gens btree (got %u should be %u)\n%s", 1221 alloc_gen(k, gens_offset), a->gen, 1222 (printbuf_reset(&buf), 1223 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) { 1224 struct bkey_i_bucket_gens *g = 1225 bch2_trans_kmalloc(trans, sizeof(*g)); 1226 1227 ret = PTR_ERR_OR_ZERO(g); 1228 if (ret) 1229 goto err; 1230 1231 if (k.k->type == KEY_TYPE_bucket_gens) { 1232 bkey_reassemble(&g->k_i, k); 1233 } else { 1234 bkey_bucket_gens_init(&g->k_i); 1235 g->k.p = alloc_gens_pos(alloc_k.k->p, &gens_offset); 1236 } 1237 1238 g->v.gens[gens_offset] = a->gen; 1239 1240 ret = bch2_trans_update(trans, bucket_gens_iter, &g->k_i, 0); 1241 if (ret) 1242 goto err; 1243 } 1244 out: 1245 err: 1246 fsck_err: 1247 bch2_dev_put(ca); 1248 printbuf_exit(&buf); 1249 return ret; 1250 } 1251 1252 static noinline_for_stack 1253 int bch2_check_alloc_hole_freespace(struct btree_trans *trans, 1254 struct bch_dev *ca, 1255 struct bpos start, 1256 struct bpos *end, 1257 struct btree_iter *freespace_iter) 1258 { 1259 struct bkey_s_c k; 1260 struct printbuf buf = PRINTBUF; 1261 int ret; 1262 1263 if (!ca->mi.freespace_initialized) 1264 return 0; 1265 1266 bch2_btree_iter_set_pos(trans, freespace_iter, start); 1267 1268 k = bch2_btree_iter_peek_slot(trans, freespace_iter); 1269 ret = bkey_err(k); 1270 if (ret) 1271 goto err; 1272 1273 *end = bkey_min(k.k->p, *end); 1274 1275 if (fsck_err_on(k.k->type != KEY_TYPE_set, 1276 trans, freespace_hole_missing, 1277 "hole in alloc btree missing in freespace btree\n" 1278 "device %llu buckets %llu-%llu", 1279 freespace_iter->pos.inode, 1280 freespace_iter->pos.offset, 1281 end->offset)) { 1282 struct bkey_i *update = 1283 bch2_trans_kmalloc(trans, sizeof(*update)); 1284 1285 ret = PTR_ERR_OR_ZERO(update); 1286 if (ret) 1287 goto err; 1288 1289 bkey_init(&update->k); 1290 update->k.type = KEY_TYPE_set; 1291 update->k.p = freespace_iter->pos; 1292 bch2_key_resize(&update->k, 1293 min_t(u64, U32_MAX, end->offset - 1294 freespace_iter->pos.offset)); 1295 1296 ret = bch2_trans_update(trans, freespace_iter, update, 0); 1297 if (ret) 1298 goto err; 1299 } 1300 err: 1301 fsck_err: 1302 printbuf_exit(&buf); 1303 return ret; 1304 } 1305 1306 static noinline_for_stack 1307 int bch2_check_alloc_hole_bucket_gens(struct btree_trans *trans, 1308 struct bpos start, 1309 struct bpos *end, 1310 struct btree_iter *bucket_gens_iter) 1311 { 1312 struct bkey_s_c k; 1313 struct printbuf buf = PRINTBUF; 1314 unsigned i, gens_offset, gens_end_offset; 1315 int ret; 1316 1317 bch2_btree_iter_set_pos(trans, bucket_gens_iter, alloc_gens_pos(start, &gens_offset)); 1318 1319 k = bch2_btree_iter_peek_slot(trans, bucket_gens_iter); 1320 ret = bkey_err(k); 1321 if (ret) 1322 goto err; 1323 1324 if (bkey_cmp(alloc_gens_pos(start, &gens_offset), 1325 alloc_gens_pos(*end, &gens_end_offset))) 1326 gens_end_offset = KEY_TYPE_BUCKET_GENS_NR; 1327 1328 if (k.k->type == KEY_TYPE_bucket_gens) { 1329 struct bkey_i_bucket_gens g; 1330 bool need_update = false; 1331 1332 bkey_reassemble(&g.k_i, k); 1333 1334 for (i = gens_offset; i < gens_end_offset; i++) { 1335 if (fsck_err_on(g.v.gens[i], trans, 1336 bucket_gens_hole_wrong, 1337 "hole in alloc btree at %llu:%llu with nonzero gen in bucket_gens btree (%u)", 1338 bucket_gens_pos_to_alloc(k.k->p, i).inode, 1339 bucket_gens_pos_to_alloc(k.k->p, i).offset, 1340 g.v.gens[i])) { 1341 g.v.gens[i] = 0; 1342 need_update = true; 1343 } 1344 } 1345 1346 if (need_update) { 1347 struct bkey_i *u = bch2_trans_kmalloc(trans, sizeof(g)); 1348 1349 ret = PTR_ERR_OR_ZERO(u); 1350 if (ret) 1351 goto err; 1352 1353 memcpy(u, &g, sizeof(g)); 1354 1355 ret = bch2_trans_update(trans, bucket_gens_iter, u, 0); 1356 if (ret) 1357 goto err; 1358 } 1359 } 1360 1361 *end = bkey_min(*end, bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0)); 1362 err: 1363 fsck_err: 1364 printbuf_exit(&buf); 1365 return ret; 1366 } 1367 1368 struct check_discard_freespace_key_async { 1369 struct work_struct work; 1370 struct bch_fs *c; 1371 struct bbpos pos; 1372 }; 1373 1374 static int bch2_recheck_discard_freespace_key(struct btree_trans *trans, struct bbpos pos) 1375 { 1376 struct btree_iter iter; 1377 struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, pos.btree, pos.pos, 0); 1378 int ret = bkey_err(k); 1379 if (ret) 1380 return ret; 1381 1382 u8 gen; 1383 ret = k.k->type != KEY_TYPE_set 1384 ? bch2_check_discard_freespace_key(trans, &iter, &gen, false) 1385 : 0; 1386 bch2_trans_iter_exit(trans, &iter); 1387 return ret; 1388 } 1389 1390 static void check_discard_freespace_key_work(struct work_struct *work) 1391 { 1392 struct check_discard_freespace_key_async *w = 1393 container_of(work, struct check_discard_freespace_key_async, work); 1394 1395 bch2_trans_do(w->c, bch2_recheck_discard_freespace_key(trans, w->pos)); 1396 enumerated_ref_put(&w->c->writes, BCH_WRITE_REF_check_discard_freespace_key); 1397 kfree(w); 1398 } 1399 1400 int bch2_check_discard_freespace_key(struct btree_trans *trans, struct btree_iter *iter, u8 *gen, 1401 bool async_repair) 1402 { 1403 struct bch_fs *c = trans->c; 1404 enum bch_data_type state = iter->btree_id == BTREE_ID_need_discard 1405 ? BCH_DATA_need_discard 1406 : BCH_DATA_free; 1407 struct printbuf buf = PRINTBUF; 1408 1409 struct bpos bucket = iter->pos; 1410 bucket.offset &= ~(~0ULL << 56); 1411 u64 genbits = iter->pos.offset & (~0ULL << 56); 1412 1413 struct btree_iter alloc_iter; 1414 struct bkey_s_c alloc_k = bch2_bkey_get_iter(trans, &alloc_iter, 1415 BTREE_ID_alloc, bucket, 1416 async_repair ? BTREE_ITER_cached : 0); 1417 int ret = bkey_err(alloc_k); 1418 if (ret) 1419 return ret; 1420 1421 if (!bch2_dev_bucket_exists(c, bucket)) { 1422 if (fsck_err(trans, need_discard_freespace_key_to_invalid_dev_bucket, 1423 "entry in %s btree for nonexistant dev:bucket %llu:%llu", 1424 bch2_btree_id_str(iter->btree_id), bucket.inode, bucket.offset)) 1425 goto delete; 1426 ret = 1; 1427 goto out; 1428 } 1429 1430 struct bch_alloc_v4 a_convert; 1431 const struct bch_alloc_v4 *a = bch2_alloc_to_v4(alloc_k, &a_convert); 1432 1433 if (a->data_type != state || 1434 (state == BCH_DATA_free && 1435 genbits != alloc_freespace_genbits(*a))) { 1436 if (fsck_err(trans, need_discard_freespace_key_bad, 1437 "%s\nincorrectly set at %s:%llu:%llu:0 (free %u, genbits %llu should be %llu)", 1438 (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf), 1439 bch2_btree_id_str(iter->btree_id), 1440 iter->pos.inode, 1441 iter->pos.offset, 1442 a->data_type == state, 1443 genbits >> 56, alloc_freespace_genbits(*a) >> 56)) 1444 goto delete; 1445 ret = 1; 1446 goto out; 1447 } 1448 1449 *gen = a->gen; 1450 out: 1451 fsck_err: 1452 bch2_set_btree_iter_dontneed(trans, &alloc_iter); 1453 bch2_trans_iter_exit(trans, &alloc_iter); 1454 printbuf_exit(&buf); 1455 return ret; 1456 delete: 1457 if (!async_repair) { 1458 ret = bch2_btree_bit_mod_iter(trans, iter, false) ?: 1459 bch2_trans_commit(trans, NULL, NULL, 1460 BCH_TRANS_COMMIT_no_enospc) ?: 1461 bch_err_throw(c, transaction_restart_commit); 1462 goto out; 1463 } else { 1464 /* 1465 * We can't repair here when called from the allocator path: the 1466 * commit will recurse back into the allocator 1467 */ 1468 struct check_discard_freespace_key_async *w = 1469 kzalloc(sizeof(*w), GFP_KERNEL); 1470 if (!w) 1471 goto out; 1472 1473 if (!enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_check_discard_freespace_key)) { 1474 kfree(w); 1475 goto out; 1476 } 1477 1478 INIT_WORK(&w->work, check_discard_freespace_key_work); 1479 w->c = c; 1480 w->pos = BBPOS(iter->btree_id, iter->pos); 1481 queue_work(c->write_ref_wq, &w->work); 1482 1483 ret = 1; /* don't allocate from this bucket */ 1484 goto out; 1485 } 1486 } 1487 1488 static int bch2_check_discard_freespace_key_fsck(struct btree_trans *trans, struct btree_iter *iter) 1489 { 1490 u8 gen; 1491 int ret = bch2_check_discard_freespace_key(trans, iter, &gen, false); 1492 return ret < 0 ? ret : 0; 1493 } 1494 1495 /* 1496 * We've already checked that generation numbers in the bucket_gens btree are 1497 * valid for buckets that exist; this just checks for keys for nonexistent 1498 * buckets. 1499 */ 1500 static noinline_for_stack 1501 int bch2_check_bucket_gens_key(struct btree_trans *trans, 1502 struct btree_iter *iter, 1503 struct bkey_s_c k) 1504 { 1505 struct bch_fs *c = trans->c; 1506 struct bkey_i_bucket_gens g; 1507 u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset; 1508 u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset; 1509 u64 b; 1510 bool need_update = false; 1511 struct printbuf buf = PRINTBUF; 1512 int ret = 0; 1513 1514 BUG_ON(k.k->type != KEY_TYPE_bucket_gens); 1515 bkey_reassemble(&g.k_i, k); 1516 1517 struct bch_dev *ca = bch2_dev_tryget_noerror(c, k.k->p.inode); 1518 if (!ca) { 1519 if (fsck_err(trans, bucket_gens_to_invalid_dev, 1520 "bucket_gens key for invalid device:\n%s", 1521 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) 1522 ret = bch2_btree_delete_at(trans, iter, 0); 1523 goto out; 1524 } 1525 1526 if (fsck_err_on(end <= ca->mi.first_bucket || 1527 start >= ca->mi.nbuckets, 1528 trans, bucket_gens_to_invalid_buckets, 1529 "bucket_gens key for invalid buckets:\n%s", 1530 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { 1531 ret = bch2_btree_delete_at(trans, iter, 0); 1532 goto out; 1533 } 1534 1535 for (b = start; b < ca->mi.first_bucket; b++) 1536 if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK], 1537 trans, bucket_gens_nonzero_for_invalid_buckets, 1538 "bucket_gens key has nonzero gen for invalid bucket")) { 1539 g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0; 1540 need_update = true; 1541 } 1542 1543 for (b = ca->mi.nbuckets; b < end; b++) 1544 if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK], 1545 trans, bucket_gens_nonzero_for_invalid_buckets, 1546 "bucket_gens key has nonzero gen for invalid bucket")) { 1547 g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0; 1548 need_update = true; 1549 } 1550 1551 if (need_update) { 1552 struct bkey_i *u = bch2_trans_kmalloc(trans, sizeof(g)); 1553 1554 ret = PTR_ERR_OR_ZERO(u); 1555 if (ret) 1556 goto out; 1557 1558 memcpy(u, &g, sizeof(g)); 1559 ret = bch2_trans_update(trans, iter, u, 0); 1560 } 1561 out: 1562 fsck_err: 1563 bch2_dev_put(ca); 1564 printbuf_exit(&buf); 1565 return ret; 1566 } 1567 1568 int bch2_check_alloc_info(struct bch_fs *c) 1569 { 1570 struct btree_trans *trans = bch2_trans_get(c); 1571 struct btree_iter iter, discard_iter, freespace_iter, bucket_gens_iter; 1572 struct bch_dev *ca = NULL; 1573 struct bkey hole; 1574 struct bkey_s_c k; 1575 int ret = 0; 1576 1577 bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS_MIN, 1578 BTREE_ITER_prefetch); 1579 bch2_trans_iter_init(trans, &discard_iter, BTREE_ID_need_discard, POS_MIN, 1580 BTREE_ITER_prefetch); 1581 bch2_trans_iter_init(trans, &freespace_iter, BTREE_ID_freespace, POS_MIN, 1582 BTREE_ITER_prefetch); 1583 bch2_trans_iter_init(trans, &bucket_gens_iter, BTREE_ID_bucket_gens, POS_MIN, 1584 BTREE_ITER_prefetch); 1585 1586 while (1) { 1587 struct bpos next; 1588 1589 bch2_trans_begin(trans); 1590 1591 k = bch2_get_key_or_real_bucket_hole(trans, &iter, &ca, &hole); 1592 ret = bkey_err(k); 1593 if (ret) 1594 goto bkey_err; 1595 1596 if (!k.k) 1597 break; 1598 1599 if (k.k->type) { 1600 next = bpos_nosnap_successor(k.k->p); 1601 1602 ret = bch2_check_alloc_key(trans, 1603 k, &iter, 1604 &discard_iter, 1605 &freespace_iter, 1606 &bucket_gens_iter); 1607 if (ret) 1608 goto bkey_err; 1609 } else { 1610 next = k.k->p; 1611 1612 ret = bch2_check_alloc_hole_freespace(trans, ca, 1613 bkey_start_pos(k.k), 1614 &next, 1615 &freespace_iter) ?: 1616 bch2_check_alloc_hole_bucket_gens(trans, 1617 bkey_start_pos(k.k), 1618 &next, 1619 &bucket_gens_iter); 1620 if (ret) 1621 goto bkey_err; 1622 } 1623 1624 ret = bch2_trans_commit(trans, NULL, NULL, 1625 BCH_TRANS_COMMIT_no_enospc); 1626 if (ret) 1627 goto bkey_err; 1628 1629 bch2_btree_iter_set_pos(trans, &iter, next); 1630 bkey_err: 1631 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 1632 continue; 1633 if (ret) 1634 break; 1635 } 1636 bch2_trans_iter_exit(trans, &bucket_gens_iter); 1637 bch2_trans_iter_exit(trans, &freespace_iter); 1638 bch2_trans_iter_exit(trans, &discard_iter); 1639 bch2_trans_iter_exit(trans, &iter); 1640 bch2_dev_put(ca); 1641 ca = NULL; 1642 1643 if (ret < 0) 1644 goto err; 1645 1646 ret = for_each_btree_key(trans, iter, 1647 BTREE_ID_need_discard, POS_MIN, 1648 BTREE_ITER_prefetch, k, 1649 bch2_check_discard_freespace_key_fsck(trans, &iter)); 1650 if (ret) 1651 goto err; 1652 1653 bch2_trans_iter_init(trans, &iter, BTREE_ID_freespace, POS_MIN, 1654 BTREE_ITER_prefetch); 1655 while (1) { 1656 bch2_trans_begin(trans); 1657 k = bch2_btree_iter_peek(trans, &iter); 1658 if (!k.k) 1659 break; 1660 1661 ret = bkey_err(k) ?: 1662 bch2_check_discard_freespace_key_fsck(trans, &iter); 1663 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) { 1664 ret = 0; 1665 continue; 1666 } 1667 if (ret) { 1668 struct printbuf buf = PRINTBUF; 1669 bch2_bkey_val_to_text(&buf, c, k); 1670 1671 bch_err(c, "while checking %s", buf.buf); 1672 printbuf_exit(&buf); 1673 break; 1674 } 1675 1676 bch2_btree_iter_set_pos(trans, &iter, bpos_nosnap_successor(iter.pos)); 1677 } 1678 bch2_trans_iter_exit(trans, &iter); 1679 if (ret) 1680 goto err; 1681 1682 ret = for_each_btree_key_commit(trans, iter, 1683 BTREE_ID_bucket_gens, POS_MIN, 1684 BTREE_ITER_prefetch, k, 1685 NULL, NULL, BCH_TRANS_COMMIT_no_enospc, 1686 bch2_check_bucket_gens_key(trans, &iter, k)); 1687 err: 1688 bch2_trans_put(trans); 1689 bch_err_fn(c, ret); 1690 return ret; 1691 } 1692 1693 static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans, 1694 struct btree_iter *alloc_iter, 1695 struct bkey_buf *last_flushed) 1696 { 1697 struct bch_fs *c = trans->c; 1698 struct bch_alloc_v4 a_convert; 1699 const struct bch_alloc_v4 *a; 1700 struct bkey_s_c alloc_k; 1701 struct printbuf buf = PRINTBUF; 1702 int ret; 1703 1704 alloc_k = bch2_btree_iter_peek(trans, alloc_iter); 1705 if (!alloc_k.k) 1706 return 0; 1707 1708 ret = bkey_err(alloc_k); 1709 if (ret) 1710 return ret; 1711 1712 struct bch_dev *ca = bch2_dev_tryget_noerror(c, alloc_k.k->p.inode); 1713 if (!ca) 1714 return 0; 1715 1716 a = bch2_alloc_to_v4(alloc_k, &a_convert); 1717 1718 u64 lru_idx = alloc_lru_idx_fragmentation(*a, ca); 1719 if (lru_idx) { 1720 ret = bch2_lru_check_set(trans, BCH_LRU_BUCKET_FRAGMENTATION, 1721 bucket_to_u64(alloc_k.k->p), 1722 lru_idx, alloc_k, last_flushed); 1723 if (ret) 1724 goto err; 1725 } 1726 1727 if (a->data_type != BCH_DATA_cached) 1728 goto err; 1729 1730 if (fsck_err_on(!a->io_time[READ], 1731 trans, alloc_key_cached_but_read_time_zero, 1732 "cached bucket with read_time 0\n%s", 1733 (printbuf_reset(&buf), 1734 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) { 1735 struct bkey_i_alloc_v4 *a_mut = 1736 bch2_alloc_to_v4_mut(trans, alloc_k); 1737 ret = PTR_ERR_OR_ZERO(a_mut); 1738 if (ret) 1739 goto err; 1740 1741 a_mut->v.io_time[READ] = bch2_current_io_time(c, READ); 1742 ret = bch2_trans_update(trans, alloc_iter, 1743 &a_mut->k_i, BTREE_TRIGGER_norun); 1744 if (ret) 1745 goto err; 1746 1747 a = &a_mut->v; 1748 } 1749 1750 ret = bch2_lru_check_set(trans, alloc_k.k->p.inode, 1751 bucket_to_u64(alloc_k.k->p), 1752 a->io_time[READ], 1753 alloc_k, last_flushed); 1754 if (ret) 1755 goto err; 1756 err: 1757 fsck_err: 1758 bch2_dev_put(ca); 1759 printbuf_exit(&buf); 1760 return ret; 1761 } 1762 1763 int bch2_check_alloc_to_lru_refs(struct bch_fs *c) 1764 { 1765 struct bkey_buf last_flushed; 1766 1767 bch2_bkey_buf_init(&last_flushed); 1768 bkey_init(&last_flushed.k->k); 1769 1770 int ret = bch2_trans_run(c, 1771 for_each_btree_key_commit(trans, iter, BTREE_ID_alloc, 1772 POS_MIN, BTREE_ITER_prefetch, k, 1773 NULL, NULL, BCH_TRANS_COMMIT_no_enospc, 1774 bch2_check_alloc_to_lru_ref(trans, &iter, &last_flushed))) ?: 1775 bch2_check_stripe_to_lru_refs(c); 1776 1777 bch2_bkey_buf_exit(&last_flushed, c); 1778 bch_err_fn(c, ret); 1779 return ret; 1780 } 1781 1782 static int discard_in_flight_add(struct bch_dev *ca, u64 bucket, bool in_progress) 1783 { 1784 struct bch_fs *c = ca->fs; 1785 int ret; 1786 1787 mutex_lock(&ca->discard_buckets_in_flight_lock); 1788 struct discard_in_flight *i = 1789 darray_find_p(ca->discard_buckets_in_flight, i, i->bucket == bucket); 1790 if (i) { 1791 ret = bch_err_throw(c, EEXIST_discard_in_flight_add); 1792 goto out; 1793 } 1794 1795 ret = darray_push(&ca->discard_buckets_in_flight, ((struct discard_in_flight) { 1796 .in_progress = in_progress, 1797 .bucket = bucket, 1798 })); 1799 out: 1800 mutex_unlock(&ca->discard_buckets_in_flight_lock); 1801 return ret; 1802 } 1803 1804 static void discard_in_flight_remove(struct bch_dev *ca, u64 bucket) 1805 { 1806 mutex_lock(&ca->discard_buckets_in_flight_lock); 1807 struct discard_in_flight *i = 1808 darray_find_p(ca->discard_buckets_in_flight, i, i->bucket == bucket); 1809 BUG_ON(!i || !i->in_progress); 1810 1811 darray_remove_item(&ca->discard_buckets_in_flight, i); 1812 mutex_unlock(&ca->discard_buckets_in_flight_lock); 1813 } 1814 1815 struct discard_buckets_state { 1816 u64 seen; 1817 u64 open; 1818 u64 need_journal_commit; 1819 u64 discarded; 1820 }; 1821 1822 static int bch2_discard_one_bucket(struct btree_trans *trans, 1823 struct bch_dev *ca, 1824 struct btree_iter *need_discard_iter, 1825 struct bpos *discard_pos_done, 1826 struct discard_buckets_state *s, 1827 bool fastpath) 1828 { 1829 struct bch_fs *c = trans->c; 1830 struct bpos pos = need_discard_iter->pos; 1831 struct btree_iter iter = {}; 1832 struct bkey_s_c k; 1833 struct bkey_i_alloc_v4 *a; 1834 struct printbuf buf = PRINTBUF; 1835 bool discard_locked = false; 1836 int ret = 0; 1837 1838 if (bch2_bucket_is_open_safe(c, pos.inode, pos.offset)) { 1839 s->open++; 1840 goto out; 1841 } 1842 1843 u64 seq_ready = bch2_bucket_journal_seq_ready(&c->buckets_waiting_for_journal, 1844 pos.inode, pos.offset); 1845 if (seq_ready > c->journal.flushed_seq_ondisk) { 1846 if (seq_ready > c->journal.flushing_seq) 1847 s->need_journal_commit++; 1848 goto out; 1849 } 1850 1851 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc, 1852 need_discard_iter->pos, 1853 BTREE_ITER_cached); 1854 ret = bkey_err(k); 1855 if (ret) 1856 goto out; 1857 1858 a = bch2_alloc_to_v4_mut(trans, k); 1859 ret = PTR_ERR_OR_ZERO(a); 1860 if (ret) 1861 goto out; 1862 1863 if (a->v.data_type != BCH_DATA_need_discard) { 1864 if (need_discard_or_freespace_err(trans, k, true, true, true)) { 1865 ret = bch2_btree_bit_mod_iter(trans, need_discard_iter, false); 1866 if (ret) 1867 goto out; 1868 goto commit; 1869 } 1870 1871 goto out; 1872 } 1873 1874 if (!fastpath) { 1875 if (discard_in_flight_add(ca, iter.pos.offset, true)) 1876 goto out; 1877 1878 discard_locked = true; 1879 } 1880 1881 if (!bkey_eq(*discard_pos_done, iter.pos)) { 1882 s->discarded++; 1883 *discard_pos_done = iter.pos; 1884 1885 if (bch2_discard_opt_enabled(c, ca) && !c->opts.nochanges) { 1886 /* 1887 * This works without any other locks because this is the only 1888 * thread that removes items from the need_discard tree 1889 */ 1890 bch2_trans_unlock_long(trans); 1891 blkdev_issue_discard(ca->disk_sb.bdev, 1892 k.k->p.offset * ca->mi.bucket_size, 1893 ca->mi.bucket_size, 1894 GFP_KERNEL); 1895 ret = bch2_trans_relock_notrace(trans); 1896 if (ret) 1897 goto out; 1898 } 1899 } 1900 1901 SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false); 1902 alloc_data_type_set(&a->v, a->v.data_type); 1903 1904 ret = bch2_trans_update(trans, &iter, &a->k_i, 0); 1905 if (ret) 1906 goto out; 1907 commit: 1908 ret = bch2_trans_commit(trans, NULL, NULL, 1909 BCH_WATERMARK_btree| 1910 BCH_TRANS_COMMIT_no_enospc); 1911 if (ret) 1912 goto out; 1913 1914 if (!fastpath) 1915 count_event(c, bucket_discard); 1916 else 1917 count_event(c, bucket_discard_fast); 1918 out: 1919 fsck_err: 1920 if (discard_locked) 1921 discard_in_flight_remove(ca, iter.pos.offset); 1922 if (!ret) 1923 s->seen++; 1924 bch2_trans_iter_exit(trans, &iter); 1925 printbuf_exit(&buf); 1926 return ret; 1927 } 1928 1929 static void bch2_do_discards_work(struct work_struct *work) 1930 { 1931 struct bch_dev *ca = container_of(work, struct bch_dev, discard_work); 1932 struct bch_fs *c = ca->fs; 1933 struct discard_buckets_state s = {}; 1934 struct bpos discard_pos_done = POS_MAX; 1935 int ret; 1936 1937 /* 1938 * We're doing the commit in bch2_discard_one_bucket instead of using 1939 * for_each_btree_key_commit() so that we can increment counters after 1940 * successful commit: 1941 */ 1942 ret = bch2_trans_run(c, 1943 for_each_btree_key_max(trans, iter, 1944 BTREE_ID_need_discard, 1945 POS(ca->dev_idx, 0), 1946 POS(ca->dev_idx, U64_MAX), 0, k, 1947 bch2_discard_one_bucket(trans, ca, &iter, &discard_pos_done, &s, false))); 1948 1949 if (s.need_journal_commit > dev_buckets_available(ca, BCH_WATERMARK_normal)) 1950 bch2_journal_flush_async(&c->journal, NULL); 1951 1952 trace_discard_buckets(c, s.seen, s.open, s.need_journal_commit, s.discarded, 1953 bch2_err_str(ret)); 1954 1955 enumerated_ref_put(&ca->io_ref[WRITE], BCH_DEV_WRITE_REF_dev_do_discards); 1956 enumerated_ref_put(&c->writes, BCH_WRITE_REF_discard); 1957 } 1958 1959 void bch2_dev_do_discards(struct bch_dev *ca) 1960 { 1961 struct bch_fs *c = ca->fs; 1962 1963 if (!enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_discard)) 1964 return; 1965 1966 if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE, BCH_DEV_WRITE_REF_dev_do_discards)) 1967 goto put_write_ref; 1968 1969 if (queue_work(c->write_ref_wq, &ca->discard_work)) 1970 return; 1971 1972 enumerated_ref_put(&ca->io_ref[WRITE], BCH_DEV_WRITE_REF_dev_do_discards); 1973 put_write_ref: 1974 enumerated_ref_put(&c->writes, BCH_WRITE_REF_discard); 1975 } 1976 1977 void bch2_do_discards(struct bch_fs *c) 1978 { 1979 for_each_member_device(c, ca) 1980 bch2_dev_do_discards(ca); 1981 } 1982 1983 static int bch2_do_discards_fast_one(struct btree_trans *trans, 1984 struct bch_dev *ca, 1985 u64 bucket, 1986 struct bpos *discard_pos_done, 1987 struct discard_buckets_state *s) 1988 { 1989 struct btree_iter need_discard_iter; 1990 struct bkey_s_c discard_k = bch2_bkey_get_iter(trans, &need_discard_iter, 1991 BTREE_ID_need_discard, POS(ca->dev_idx, bucket), 0); 1992 int ret = bkey_err(discard_k); 1993 if (ret) 1994 return ret; 1995 1996 if (log_fsck_err_on(discard_k.k->type != KEY_TYPE_set, 1997 trans, discarding_bucket_not_in_need_discard_btree, 1998 "attempting to discard bucket %u:%llu not in need_discard btree", 1999 ca->dev_idx, bucket)) 2000 goto out; 2001 2002 ret = bch2_discard_one_bucket(trans, ca, &need_discard_iter, discard_pos_done, s, true); 2003 out: 2004 fsck_err: 2005 bch2_trans_iter_exit(trans, &need_discard_iter); 2006 return ret; 2007 } 2008 2009 static void bch2_do_discards_fast_work(struct work_struct *work) 2010 { 2011 struct bch_dev *ca = container_of(work, struct bch_dev, discard_fast_work); 2012 struct bch_fs *c = ca->fs; 2013 struct discard_buckets_state s = {}; 2014 struct bpos discard_pos_done = POS_MAX; 2015 struct btree_trans *trans = bch2_trans_get(c); 2016 int ret = 0; 2017 2018 while (1) { 2019 bool got_bucket = false; 2020 u64 bucket; 2021 2022 mutex_lock(&ca->discard_buckets_in_flight_lock); 2023 darray_for_each(ca->discard_buckets_in_flight, i) { 2024 if (i->in_progress) 2025 continue; 2026 2027 got_bucket = true; 2028 bucket = i->bucket; 2029 i->in_progress = true; 2030 break; 2031 } 2032 mutex_unlock(&ca->discard_buckets_in_flight_lock); 2033 2034 if (!got_bucket) 2035 break; 2036 2037 ret = lockrestart_do(trans, 2038 bch2_do_discards_fast_one(trans, ca, bucket, &discard_pos_done, &s)); 2039 bch_err_fn(c, ret); 2040 2041 discard_in_flight_remove(ca, bucket); 2042 2043 if (ret) 2044 break; 2045 } 2046 2047 trace_discard_buckets_fast(c, s.seen, s.open, s.need_journal_commit, s.discarded, bch2_err_str(ret)); 2048 2049 bch2_trans_put(trans); 2050 enumerated_ref_put(&ca->io_ref[WRITE], BCH_DEV_WRITE_REF_discard_one_bucket_fast); 2051 enumerated_ref_put(&c->writes, BCH_WRITE_REF_discard_fast); 2052 } 2053 2054 static void bch2_discard_one_bucket_fast(struct bch_dev *ca, u64 bucket) 2055 { 2056 struct bch_fs *c = ca->fs; 2057 2058 if (discard_in_flight_add(ca, bucket, false)) 2059 return; 2060 2061 if (!enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_discard_fast)) 2062 return; 2063 2064 if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE, BCH_DEV_WRITE_REF_discard_one_bucket_fast)) 2065 goto put_ref; 2066 2067 if (queue_work(c->write_ref_wq, &ca->discard_fast_work)) 2068 return; 2069 2070 enumerated_ref_put(&ca->io_ref[WRITE], BCH_DEV_WRITE_REF_discard_one_bucket_fast); 2071 put_ref: 2072 enumerated_ref_put(&c->writes, BCH_WRITE_REF_discard_fast); 2073 } 2074 2075 static int invalidate_one_bp(struct btree_trans *trans, 2076 struct bch_dev *ca, 2077 struct bkey_s_c_backpointer bp, 2078 struct bkey_buf *last_flushed) 2079 { 2080 struct btree_iter extent_iter; 2081 struct bkey_s_c extent_k = 2082 bch2_backpointer_get_key(trans, bp, &extent_iter, 0, last_flushed); 2083 int ret = bkey_err(extent_k); 2084 if (ret) 2085 return ret; 2086 2087 if (!extent_k.k) 2088 return 0; 2089 2090 struct bkey_i *n = 2091 bch2_bkey_make_mut(trans, &extent_iter, &extent_k, 2092 BTREE_UPDATE_internal_snapshot_node); 2093 ret = PTR_ERR_OR_ZERO(n); 2094 if (ret) 2095 goto err; 2096 2097 bch2_bkey_drop_device(bkey_i_to_s(n), ca->dev_idx); 2098 err: 2099 bch2_trans_iter_exit(trans, &extent_iter); 2100 return ret; 2101 } 2102 2103 static int invalidate_one_bucket_by_bps(struct btree_trans *trans, 2104 struct bch_dev *ca, 2105 struct bpos bucket, 2106 u8 gen, 2107 struct bkey_buf *last_flushed) 2108 { 2109 struct bpos bp_start = bucket_pos_to_bp_start(ca, bucket); 2110 struct bpos bp_end = bucket_pos_to_bp_end(ca, bucket); 2111 2112 return for_each_btree_key_max_commit(trans, iter, BTREE_ID_backpointers, 2113 bp_start, bp_end, 0, k, 2114 NULL, NULL, 2115 BCH_WATERMARK_btree| 2116 BCH_TRANS_COMMIT_no_enospc, ({ 2117 if (k.k->type != KEY_TYPE_backpointer) 2118 continue; 2119 2120 struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k); 2121 2122 if (bp.v->bucket_gen != gen) 2123 continue; 2124 2125 /* filter out bps with gens that don't match */ 2126 2127 invalidate_one_bp(trans, ca, bp, last_flushed); 2128 })); 2129 } 2130 2131 noinline_for_stack 2132 static int invalidate_one_bucket(struct btree_trans *trans, 2133 struct bch_dev *ca, 2134 struct btree_iter *lru_iter, 2135 struct bkey_s_c lru_k, 2136 struct bkey_buf *last_flushed, 2137 s64 *nr_to_invalidate) 2138 { 2139 struct bch_fs *c = trans->c; 2140 struct printbuf buf = PRINTBUF; 2141 struct bpos bucket = u64_to_bucket(lru_k.k->p.offset); 2142 struct btree_iter alloc_iter = {}; 2143 int ret = 0; 2144 2145 if (*nr_to_invalidate <= 0) 2146 return 1; 2147 2148 if (!bch2_dev_bucket_exists(c, bucket)) { 2149 if (fsck_err(trans, lru_entry_to_invalid_bucket, 2150 "lru key points to nonexistent device:bucket %llu:%llu", 2151 bucket.inode, bucket.offset)) 2152 return bch2_btree_bit_mod_buffered(trans, BTREE_ID_lru, lru_iter->pos, false); 2153 goto out; 2154 } 2155 2156 if (bch2_bucket_is_open_safe(c, bucket.inode, bucket.offset)) 2157 return 0; 2158 2159 struct bkey_s_c alloc_k = bch2_bkey_get_iter(trans, &alloc_iter, 2160 BTREE_ID_alloc, bucket, 2161 BTREE_ITER_cached); 2162 ret = bkey_err(alloc_k); 2163 if (ret) 2164 return ret; 2165 2166 struct bch_alloc_v4 a_convert; 2167 const struct bch_alloc_v4 *a = bch2_alloc_to_v4(alloc_k, &a_convert); 2168 2169 /* We expect harmless races here due to the btree write buffer: */ 2170 if (lru_pos_time(lru_iter->pos) != alloc_lru_idx_read(*a)) 2171 goto out; 2172 2173 /* 2174 * Impossible since alloc_lru_idx_read() only returns nonzero if the 2175 * bucket is supposed to be on the cached bucket LRU (i.e. 2176 * BCH_DATA_cached) 2177 * 2178 * bch2_lru_validate() also disallows lru keys with lru_pos_time() == 0 2179 */ 2180 BUG_ON(a->data_type != BCH_DATA_cached); 2181 BUG_ON(a->dirty_sectors); 2182 2183 if (!a->cached_sectors) { 2184 bch2_check_bucket_backpointer_mismatch(trans, ca, bucket.offset, 2185 true, last_flushed); 2186 goto out; 2187 } 2188 2189 unsigned cached_sectors = a->cached_sectors; 2190 u8 gen = a->gen; 2191 2192 ret = invalidate_one_bucket_by_bps(trans, ca, bucket, gen, last_flushed); 2193 if (ret) 2194 goto out; 2195 2196 trace_and_count(c, bucket_invalidate, c, bucket.inode, bucket.offset, cached_sectors); 2197 --*nr_to_invalidate; 2198 out: 2199 fsck_err: 2200 bch2_trans_iter_exit(trans, &alloc_iter); 2201 printbuf_exit(&buf); 2202 return ret; 2203 } 2204 2205 static struct bkey_s_c next_lru_key(struct btree_trans *trans, struct btree_iter *iter, 2206 struct bch_dev *ca, bool *wrapped) 2207 { 2208 struct bkey_s_c k; 2209 again: 2210 k = bch2_btree_iter_peek_max(trans, iter, lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX)); 2211 if (!k.k && !*wrapped) { 2212 bch2_btree_iter_set_pos(trans, iter, lru_pos(ca->dev_idx, 0, 0)); 2213 *wrapped = true; 2214 goto again; 2215 } 2216 2217 return k; 2218 } 2219 2220 static void bch2_do_invalidates_work(struct work_struct *work) 2221 { 2222 struct bch_dev *ca = container_of(work, struct bch_dev, invalidate_work); 2223 struct bch_fs *c = ca->fs; 2224 struct btree_trans *trans = bch2_trans_get(c); 2225 int ret = 0; 2226 2227 struct bkey_buf last_flushed; 2228 bch2_bkey_buf_init(&last_flushed); 2229 bkey_init(&last_flushed.k->k); 2230 2231 ret = bch2_btree_write_buffer_tryflush(trans); 2232 if (ret) 2233 goto err; 2234 2235 s64 nr_to_invalidate = 2236 should_invalidate_buckets(ca, bch2_dev_usage_read(ca)); 2237 struct btree_iter iter; 2238 bool wrapped = false; 2239 2240 bch2_trans_iter_init(trans, &iter, BTREE_ID_lru, 2241 lru_pos(ca->dev_idx, 0, 2242 ((bch2_current_io_time(c, READ) + U32_MAX) & 2243 LRU_TIME_MAX)), 0); 2244 2245 while (true) { 2246 bch2_trans_begin(trans); 2247 2248 struct bkey_s_c k = next_lru_key(trans, &iter, ca, &wrapped); 2249 ret = bkey_err(k); 2250 if (ret) 2251 goto restart_err; 2252 if (!k.k) 2253 break; 2254 2255 ret = invalidate_one_bucket(trans, ca, &iter, k, &last_flushed, &nr_to_invalidate); 2256 restart_err: 2257 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 2258 continue; 2259 if (ret) 2260 break; 2261 2262 bch2_btree_iter_advance(trans, &iter); 2263 } 2264 bch2_trans_iter_exit(trans, &iter); 2265 err: 2266 bch2_trans_put(trans); 2267 bch2_bkey_buf_exit(&last_flushed, c); 2268 enumerated_ref_put(&ca->io_ref[WRITE], BCH_DEV_WRITE_REF_do_invalidates); 2269 enumerated_ref_put(&c->writes, BCH_WRITE_REF_invalidate); 2270 } 2271 2272 void bch2_dev_do_invalidates(struct bch_dev *ca) 2273 { 2274 struct bch_fs *c = ca->fs; 2275 2276 if (!enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_invalidate)) 2277 return; 2278 2279 if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE, BCH_DEV_WRITE_REF_do_invalidates)) 2280 goto put_ref; 2281 2282 if (queue_work(c->write_ref_wq, &ca->invalidate_work)) 2283 return; 2284 2285 enumerated_ref_put(&ca->io_ref[WRITE], BCH_DEV_WRITE_REF_do_invalidates); 2286 put_ref: 2287 enumerated_ref_put(&c->writes, BCH_WRITE_REF_invalidate); 2288 } 2289 2290 void bch2_do_invalidates(struct bch_fs *c) 2291 { 2292 for_each_member_device(c, ca) 2293 bch2_dev_do_invalidates(ca); 2294 } 2295 2296 int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca, 2297 u64 bucket_start, u64 bucket_end) 2298 { 2299 struct btree_trans *trans = bch2_trans_get(c); 2300 struct btree_iter iter; 2301 struct bkey_s_c k; 2302 struct bkey hole; 2303 struct bpos end = POS(ca->dev_idx, bucket_end); 2304 struct bch_member *m; 2305 unsigned long last_updated = jiffies; 2306 int ret; 2307 2308 BUG_ON(bucket_start > bucket_end); 2309 BUG_ON(bucket_end > ca->mi.nbuckets); 2310 2311 bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, 2312 POS(ca->dev_idx, max_t(u64, ca->mi.first_bucket, bucket_start)), 2313 BTREE_ITER_prefetch); 2314 /* 2315 * Scan the alloc btree for every bucket on @ca, and add buckets to the 2316 * freespace/need_discard/need_gc_gens btrees as needed: 2317 */ 2318 while (1) { 2319 if (time_after(jiffies, last_updated + HZ * 10)) { 2320 bch_info(ca, "%s: currently at %llu/%llu", 2321 __func__, iter.pos.offset, ca->mi.nbuckets); 2322 last_updated = jiffies; 2323 } 2324 2325 bch2_trans_begin(trans); 2326 2327 if (bkey_ge(iter.pos, end)) { 2328 ret = 0; 2329 break; 2330 } 2331 2332 k = bch2_get_key_or_hole(trans, &iter, end, &hole); 2333 ret = bkey_err(k); 2334 if (ret) 2335 goto bkey_err; 2336 2337 if (k.k->type) { 2338 /* 2339 * We process live keys in the alloc btree one at a 2340 * time: 2341 */ 2342 struct bch_alloc_v4 a_convert; 2343 const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert); 2344 2345 ret = bch2_bucket_do_index(trans, ca, k, a, true) ?: 2346 bch2_trans_commit(trans, NULL, NULL, 2347 BCH_TRANS_COMMIT_no_enospc); 2348 if (ret) 2349 goto bkey_err; 2350 2351 bch2_btree_iter_advance(trans, &iter); 2352 } else { 2353 struct bkey_i *freespace; 2354 2355 freespace = bch2_trans_kmalloc(trans, sizeof(*freespace)); 2356 ret = PTR_ERR_OR_ZERO(freespace); 2357 if (ret) 2358 goto bkey_err; 2359 2360 bkey_init(&freespace->k); 2361 freespace->k.type = KEY_TYPE_set; 2362 freespace->k.p = k.k->p; 2363 freespace->k.size = k.k->size; 2364 2365 ret = bch2_btree_insert_trans(trans, BTREE_ID_freespace, freespace, 0) ?: 2366 bch2_trans_commit(trans, NULL, NULL, 2367 BCH_TRANS_COMMIT_no_enospc); 2368 if (ret) 2369 goto bkey_err; 2370 2371 bch2_btree_iter_set_pos(trans, &iter, k.k->p); 2372 } 2373 bkey_err: 2374 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 2375 continue; 2376 if (ret) 2377 break; 2378 } 2379 2380 bch2_trans_iter_exit(trans, &iter); 2381 bch2_trans_put(trans); 2382 2383 if (ret < 0) { 2384 bch_err_msg(ca, ret, "initializing free space"); 2385 return ret; 2386 } 2387 2388 mutex_lock(&c->sb_lock); 2389 m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx); 2390 SET_BCH_MEMBER_FREESPACE_INITIALIZED(m, true); 2391 mutex_unlock(&c->sb_lock); 2392 2393 return 0; 2394 } 2395 2396 int bch2_fs_freespace_init(struct bch_fs *c) 2397 { 2398 if (c->sb.features & BIT_ULL(BCH_FEATURE_small_image)) 2399 return 0; 2400 2401 2402 /* 2403 * We can crash during the device add path, so we need to check this on 2404 * every mount: 2405 */ 2406 2407 bool doing_init = false; 2408 for_each_member_device(c, ca) { 2409 if (ca->mi.freespace_initialized) 2410 continue; 2411 2412 if (!doing_init) { 2413 bch_info(c, "initializing freespace"); 2414 doing_init = true; 2415 } 2416 2417 int ret = bch2_dev_freespace_init(c, ca, 0, ca->mi.nbuckets); 2418 if (ret) { 2419 bch2_dev_put(ca); 2420 bch_err_fn(c, ret); 2421 return ret; 2422 } 2423 } 2424 2425 if (doing_init) { 2426 mutex_lock(&c->sb_lock); 2427 bch2_write_super(c); 2428 mutex_unlock(&c->sb_lock); 2429 bch_verbose(c, "done initializing freespace"); 2430 } 2431 2432 return 0; 2433 } 2434 2435 /* device removal */ 2436 2437 int bch2_dev_remove_alloc(struct bch_fs *c, struct bch_dev *ca) 2438 { 2439 struct bpos start = POS(ca->dev_idx, 0); 2440 struct bpos end = POS(ca->dev_idx, U64_MAX); 2441 int ret; 2442 2443 /* 2444 * We clear the LRU and need_discard btrees first so that we don't race 2445 * with bch2_do_invalidates() and bch2_do_discards() 2446 */ 2447 ret = bch2_btree_delete_range(c, BTREE_ID_lru, start, end, 2448 BTREE_TRIGGER_norun, NULL) ?: 2449 bch2_btree_delete_range(c, BTREE_ID_need_discard, start, end, 2450 BTREE_TRIGGER_norun, NULL) ?: 2451 bch2_btree_delete_range(c, BTREE_ID_freespace, start, end, 2452 BTREE_TRIGGER_norun, NULL) ?: 2453 bch2_btree_delete_range(c, BTREE_ID_backpointers, start, end, 2454 BTREE_TRIGGER_norun, NULL) ?: 2455 bch2_btree_delete_range(c, BTREE_ID_bucket_gens, start, end, 2456 BTREE_TRIGGER_norun, NULL) ?: 2457 bch2_btree_delete_range(c, BTREE_ID_alloc, start, end, 2458 BTREE_TRIGGER_norun, NULL) ?: 2459 bch2_dev_usage_remove(c, ca->dev_idx); 2460 bch_err_msg(ca, ret, "removing dev alloc info"); 2461 return ret; 2462 } 2463 2464 /* Bucket IO clocks: */ 2465 2466 static int __bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev, 2467 size_t bucket_nr, int rw) 2468 { 2469 struct bch_fs *c = trans->c; 2470 2471 struct btree_iter iter; 2472 struct bkey_i_alloc_v4 *a = 2473 bch2_trans_start_alloc_update_noupdate(trans, &iter, POS(dev, bucket_nr)); 2474 int ret = PTR_ERR_OR_ZERO(a); 2475 if (ret) 2476 return ret; 2477 2478 u64 now = bch2_current_io_time(c, rw); 2479 if (a->v.io_time[rw] == now) 2480 goto out; 2481 2482 a->v.io_time[rw] = now; 2483 2484 ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?: 2485 bch2_trans_commit(trans, NULL, NULL, 0); 2486 out: 2487 bch2_trans_iter_exit(trans, &iter); 2488 return ret; 2489 } 2490 2491 int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev, 2492 size_t bucket_nr, int rw) 2493 { 2494 if (bch2_trans_relock(trans)) 2495 bch2_trans_begin(trans); 2496 2497 return nested_lockrestart_do(trans, __bch2_bucket_io_time_reset(trans, dev, bucket_nr, rw)); 2498 } 2499 2500 /* Startup/shutdown (ro/rw): */ 2501 2502 void bch2_recalc_capacity(struct bch_fs *c) 2503 { 2504 u64 capacity = 0, reserved_sectors = 0, gc_reserve; 2505 unsigned bucket_size_max = 0; 2506 unsigned long ra_pages = 0; 2507 2508 lockdep_assert_held(&c->state_lock); 2509 2510 guard(rcu)(); 2511 for_each_member_device_rcu(c, ca, NULL) { 2512 struct block_device *bdev = READ_ONCE(ca->disk_sb.bdev); 2513 if (bdev) 2514 ra_pages += bdev->bd_disk->bdi->ra_pages; 2515 2516 if (ca->mi.state != BCH_MEMBER_STATE_rw) 2517 continue; 2518 2519 u64 dev_reserve = 0; 2520 2521 /* 2522 * We need to reserve buckets (from the number 2523 * of currently available buckets) against 2524 * foreground writes so that mainly copygc can 2525 * make forward progress. 2526 * 2527 * We need enough to refill the various reserves 2528 * from scratch - copygc will use its entire 2529 * reserve all at once, then run against when 2530 * its reserve is refilled (from the formerly 2531 * available buckets). 2532 * 2533 * This reserve is just used when considering if 2534 * allocations for foreground writes must wait - 2535 * not -ENOSPC calculations. 2536 */ 2537 2538 dev_reserve += ca->nr_btree_reserve * 2; 2539 dev_reserve += ca->mi.nbuckets >> 6; /* copygc reserve */ 2540 2541 dev_reserve += 1; /* btree write point */ 2542 dev_reserve += 1; /* copygc write point */ 2543 dev_reserve += 1; /* rebalance write point */ 2544 2545 dev_reserve *= ca->mi.bucket_size; 2546 2547 capacity += bucket_to_sector(ca, ca->mi.nbuckets - 2548 ca->mi.first_bucket); 2549 2550 reserved_sectors += dev_reserve * 2; 2551 2552 bucket_size_max = max_t(unsigned, bucket_size_max, 2553 ca->mi.bucket_size); 2554 } 2555 2556 bch2_set_ra_pages(c, ra_pages); 2557 2558 gc_reserve = c->opts.gc_reserve_bytes 2559 ? c->opts.gc_reserve_bytes >> 9 2560 : div64_u64(capacity * c->opts.gc_reserve_percent, 100); 2561 2562 reserved_sectors = max(gc_reserve, reserved_sectors); 2563 2564 reserved_sectors = min(reserved_sectors, capacity); 2565 2566 c->reserved = reserved_sectors; 2567 c->capacity = capacity - reserved_sectors; 2568 2569 c->bucket_size_max = bucket_size_max; 2570 2571 /* Wake up case someone was waiting for buckets */ 2572 closure_wake_up(&c->freelist_wait); 2573 } 2574 2575 u64 bch2_min_rw_member_capacity(struct bch_fs *c) 2576 { 2577 u64 ret = U64_MAX; 2578 2579 guard(rcu)(); 2580 for_each_rw_member_rcu(c, ca) 2581 ret = min(ret, ca->mi.nbuckets * ca->mi.bucket_size); 2582 return ret; 2583 } 2584 2585 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca) 2586 { 2587 struct open_bucket *ob; 2588 2589 for (ob = c->open_buckets; 2590 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); 2591 ob++) { 2592 scoped_guard(spinlock, &ob->lock) { 2593 if (ob->valid && !ob->on_partial_list && 2594 ob->dev == ca->dev_idx) 2595 return true; 2596 } 2597 } 2598 2599 return false; 2600 } 2601 2602 void bch2_dev_allocator_set_rw(struct bch_fs *c, struct bch_dev *ca, bool rw) 2603 { 2604 /* BCH_DATA_free == all rw devs */ 2605 2606 for (unsigned i = 0; i < ARRAY_SIZE(c->rw_devs); i++) 2607 if (rw && 2608 (i == BCH_DATA_free || 2609 (ca->mi.data_allowed & BIT(i)))) 2610 set_bit(ca->dev_idx, c->rw_devs[i].d); 2611 else 2612 clear_bit(ca->dev_idx, c->rw_devs[i].d); 2613 } 2614 2615 /* device goes ro: */ 2616 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca) 2617 { 2618 lockdep_assert_held(&c->state_lock); 2619 2620 /* First, remove device from allocation groups: */ 2621 bch2_dev_allocator_set_rw(c, ca, false); 2622 2623 c->rw_devs_change_count++; 2624 2625 /* 2626 * Capacity is calculated based off of devices in allocation groups: 2627 */ 2628 bch2_recalc_capacity(c); 2629 2630 bch2_open_buckets_stop(c, ca, false); 2631 2632 /* 2633 * Wake up threads that were blocked on allocation, so they can notice 2634 * the device can no longer be removed and the capacity has changed: 2635 */ 2636 closure_wake_up(&c->freelist_wait); 2637 2638 /* 2639 * journal_res_get() can block waiting for free space in the journal - 2640 * it needs to notice there may not be devices to allocate from anymore: 2641 */ 2642 wake_up(&c->journal.wait); 2643 2644 /* Now wait for any in flight writes: */ 2645 2646 closure_wait_event(&c->open_buckets_wait, 2647 !bch2_dev_has_open_write_point(c, ca)); 2648 } 2649 2650 /* device goes rw: */ 2651 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca) 2652 { 2653 lockdep_assert_held(&c->state_lock); 2654 2655 bch2_dev_allocator_set_rw(c, ca, true); 2656 c->rw_devs_change_count++; 2657 } 2658 2659 void bch2_dev_allocator_background_exit(struct bch_dev *ca) 2660 { 2661 darray_exit(&ca->discard_buckets_in_flight); 2662 } 2663 2664 void bch2_dev_allocator_background_init(struct bch_dev *ca) 2665 { 2666 mutex_init(&ca->discard_buckets_in_flight_lock); 2667 INIT_WORK(&ca->discard_work, bch2_do_discards_work); 2668 INIT_WORK(&ca->discard_fast_work, bch2_do_discards_fast_work); 2669 INIT_WORK(&ca->invalidate_work, bch2_do_invalidates_work); 2670 } 2671 2672 void bch2_fs_allocator_background_init(struct bch_fs *c) 2673 { 2674 spin_lock_init(&c->freelist_lock); 2675 } 2676